Deleted Added
full compact
cam_xpt.c (115562) cam_xpt.c (116161)
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: head/sys/cam/cam_xpt.c 115562 2003-05-31 20:46:21Z phk $
30 */
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/cam/cam_xpt.c 116161 2003-06-10 17:50:20Z obrien $");
32
31#include <sys/param.h>
32#include <sys/bus.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
39#include <sys/fcntl.h>
40#include <sys/md5.h>
41#include <sys/interrupt.h>
42#include <sys/sbuf.h>
43
44#ifdef PC98
45#include <pc98/pc98/pc98_machdep.h> /* geometry translation */
46#endif
47
48#include <cam/cam.h>
49#include <cam/cam_ccb.h>
50#include <cam/cam_periph.h>
51#include <cam/cam_sim.h>
52#include <cam/cam_xpt.h>
53#include <cam/cam_xpt_sim.h>
54#include <cam/cam_xpt_periph.h>
55#include <cam/cam_debug.h>
56
57#include <cam/scsi/scsi_all.h>
58#include <cam/scsi/scsi_message.h>
59#include <cam/scsi/scsi_pass.h>
60#include "opt_cam.h"
61
62/* Datastructures internal to the xpt layer */
63
64/*
65 * Definition of an async handler callback block. These are used to add
66 * SIMs and peripherals to the async callback lists.
67 */
68struct async_node {
69 SLIST_ENTRY(async_node) links;
70 u_int32_t event_enable; /* Async Event enables */
71 void (*callback)(void *arg, u_int32_t code,
72 struct cam_path *path, void *args);
73 void *callback_arg;
74};
75
76SLIST_HEAD(async_list, async_node);
77SLIST_HEAD(periph_list, cam_periph);
78static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
79
80/*
81 * This is the maximum number of high powered commands (e.g. start unit)
82 * that can be outstanding at a particular time.
83 */
84#ifndef CAM_MAX_HIGHPOWER
85#define CAM_MAX_HIGHPOWER 4
86#endif
87
88/* number of high powered commands that can go through right now */
89static int num_highpower = CAM_MAX_HIGHPOWER;
90
91/*
92 * Structure for queueing a device in a run queue.
93 * There is one run queue for allocating new ccbs,
94 * and another for sending ccbs to the controller.
95 */
96struct cam_ed_qinfo {
97 cam_pinfo pinfo;
98 struct cam_ed *device;
99};
100
101/*
102 * The CAM EDT (Existing Device Table) contains the device information for
103 * all devices for all busses in the system. The table contains a
104 * cam_ed structure for each device on the bus.
105 */
106struct cam_ed {
107 TAILQ_ENTRY(cam_ed) links;
108 struct cam_ed_qinfo alloc_ccb_entry;
109 struct cam_ed_qinfo send_ccb_entry;
110 struct cam_et *target;
111 lun_id_t lun_id;
112 struct camq drvq; /*
113 * Queue of type drivers wanting to do
114 * work on this device.
115 */
116 struct cam_ccbq ccbq; /* Queue of pending ccbs */
117 struct async_list asyncs; /* Async callback info for this B/T/L */
118 struct periph_list periphs; /* All attached devices */
119 u_int generation; /* Generation number */
120 struct cam_periph *owner; /* Peripheral driver's ownership tag */
121 struct xpt_quirk_entry *quirk; /* Oddities about this device */
122 /* Storage for the inquiry data */
123#ifdef CAM_NEW_TRAN_CODE
124 cam_proto protocol;
125 u_int protocol_version;
126 cam_xport transport;
127 u_int transport_version;
128#endif /* CAM_NEW_TRAN_CODE */
129 struct scsi_inquiry_data inq_data;
130 u_int8_t inq_flags; /*
131 * Current settings for inquiry flags.
132 * This allows us to override settings
133 * like disconnection and tagged
134 * queuing for a device.
135 */
136 u_int8_t queue_flags; /* Queue flags from the control page */
137 u_int8_t serial_num_len;
138 u_int8_t *serial_num;
139 u_int32_t qfrozen_cnt;
140 u_int32_t flags;
141#define CAM_DEV_UNCONFIGURED 0x01
142#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
143#define CAM_DEV_REL_ON_COMPLETE 0x04
144#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
145#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
146#define CAM_DEV_TAG_AFTER_COUNT 0x20
147#define CAM_DEV_INQUIRY_DATA_VALID 0x40
148 u_int32_t tag_delay_count;
149#define CAM_TAG_DELAY_COUNT 5
150 u_int32_t refcount;
151 struct callout_handle c_handle;
152};
153
154/*
155 * Each target is represented by an ET (Existing Target). These
156 * entries are created when a target is successfully probed with an
157 * identify, and removed when a device fails to respond after a number
158 * of retries, or a bus rescan finds the device missing.
159 */
160struct cam_et {
161 TAILQ_HEAD(, cam_ed) ed_entries;
162 TAILQ_ENTRY(cam_et) links;
163 struct cam_eb *bus;
164 target_id_t target_id;
165 u_int32_t refcount;
166 u_int generation;
167 struct timeval last_reset;
168};
169
170/*
171 * Each bus is represented by an EB (Existing Bus). These entries
172 * are created by calls to xpt_bus_register and deleted by calls to
173 * xpt_bus_deregister.
174 */
175struct cam_eb {
176 TAILQ_HEAD(, cam_et) et_entries;
177 TAILQ_ENTRY(cam_eb) links;
178 path_id_t path_id;
179 struct cam_sim *sim;
180 struct timeval last_reset;
181 u_int32_t flags;
182#define CAM_EB_RUNQ_SCHEDULED 0x01
183 u_int32_t refcount;
184 u_int generation;
185};
186
187struct cam_path {
188 struct cam_periph *periph;
189 struct cam_eb *bus;
190 struct cam_et *target;
191 struct cam_ed *device;
192};
193
194struct xpt_quirk_entry {
195 struct scsi_inquiry_pattern inq_pat;
196 u_int8_t quirks;
197#define CAM_QUIRK_NOLUNS 0x01
198#define CAM_QUIRK_NOSERIAL 0x02
199#define CAM_QUIRK_HILUNS 0x04
200 u_int mintags;
201 u_int maxtags;
202};
203#define CAM_SCSI2_MAXLUN 8
204
205typedef enum {
206 XPT_FLAG_OPEN = 0x01
207} xpt_flags;
208
209struct xpt_softc {
210 xpt_flags flags;
211 u_int32_t generation;
212};
213
214static const char quantum[] = "QUANTUM";
215static const char sony[] = "SONY";
216static const char west_digital[] = "WDIGTL";
217static const char samsung[] = "SAMSUNG";
218static const char seagate[] = "SEAGATE";
219static const char microp[] = "MICROP";
220
221static struct xpt_quirk_entry xpt_quirk_table[] =
222{
223 {
224 /* Reports QUEUE FULL for temporary resource shortages */
225 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
226 /*quirks*/0, /*mintags*/24, /*maxtags*/32
227 },
228 {
229 /* Reports QUEUE FULL for temporary resource shortages */
230 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
231 /*quirks*/0, /*mintags*/24, /*maxtags*/32
232 },
233 {
234 /* Reports QUEUE FULL for temporary resource shortages */
235 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
236 /*quirks*/0, /*mintags*/24, /*maxtags*/32
237 },
238 {
239 /* Broken tagged queuing drive */
240 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
241 /*quirks*/0, /*mintags*/0, /*maxtags*/0
242 },
243 {
244 /* Broken tagged queuing drive */
245 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
246 /*quirks*/0, /*mintags*/0, /*maxtags*/0
247 },
248 {
249 /* Broken tagged queuing drive */
250 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
251 /*quirks*/0, /*mintags*/0, /*maxtags*/0
252 },
253 {
254 /*
255 * Unfortunately, the Quantum Atlas III has the same
256 * problem as the Atlas II drives above.
257 * Reported by: "Johan Granlund" <johan@granlund.nu>
258 *
259 * For future reference, the drive with the problem was:
260 * QUANTUM QM39100TD-SW N1B0
261 *
262 * It's possible that Quantum will fix the problem in later
263 * firmware revisions. If that happens, the quirk entry
264 * will need to be made specific to the firmware revisions
265 * with the problem.
266 *
267 */
268 /* Reports QUEUE FULL for temporary resource shortages */
269 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
270 /*quirks*/0, /*mintags*/24, /*maxtags*/32
271 },
272 {
273 /*
274 * 18 Gig Atlas III, same problem as the 9G version.
275 * Reported by: Andre Albsmeier
276 * <andre.albsmeier@mchp.siemens.de>
277 *
278 * For future reference, the drive with the problem was:
279 * QUANTUM QM318000TD-S N491
280 */
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
284 },
285 {
286 /*
287 * Broken tagged queuing drive
288 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
289 * and: Martin Renters <martin@tdc.on.ca>
290 */
291 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
292 /*quirks*/0, /*mintags*/0, /*maxtags*/0
293 },
294 /*
295 * The Seagate Medalist Pro drives have very poor write
296 * performance with anything more than 2 tags.
297 *
298 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
299 * Drive: <SEAGATE ST36530N 1444>
300 *
301 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
302 * Drive: <SEAGATE ST34520W 1281>
303 *
304 * No one has actually reported that the 9G version
305 * (ST39140*) of the Medalist Pro has the same problem, but
306 * we're assuming that it does because the 4G and 6.5G
307 * versions of the drive are broken.
308 */
309 {
310 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
311 /*quirks*/0, /*mintags*/2, /*maxtags*/2
312 },
313 {
314 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
315 /*quirks*/0, /*mintags*/2, /*maxtags*/2
316 },
317 {
318 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
319 /*quirks*/0, /*mintags*/2, /*maxtags*/2
320 },
321 {
322 /*
323 * Slow when tagged queueing is enabled. Write performance
324 * steadily drops off with more and more concurrent
325 * transactions. Best sequential write performance with
326 * tagged queueing turned off and write caching turned on.
327 *
328 * PR: kern/10398
329 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
330 * Drive: DCAS-34330 w/ "S65A" firmware.
331 *
332 * The drive with the problem had the "S65A" firmware
333 * revision, and has also been reported (by Stephen J.
334 * Roznowski <sjr@home.net>) for a drive with the "S61A"
335 * firmware revision.
336 *
337 * Although no one has reported problems with the 2 gig
338 * version of the DCAS drive, the assumption is that it
339 * has the same problems as the 4 gig version. Therefore
340 * this quirk entries disables tagged queueing for all
341 * DCAS drives.
342 */
343 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
344 /*quirks*/0, /*mintags*/0, /*maxtags*/0
345 },
346 {
347 /* Broken tagged queuing drive */
348 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
350 },
351 {
352 /* Broken tagged queuing drive */
353 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
354 /*quirks*/0, /*mintags*/0, /*maxtags*/0
355 },
356 {
357 /*
358 * Broken tagged queuing drive.
359 * Submitted by:
360 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
361 * in PR kern/9535
362 */
363 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
364 /*quirks*/0, /*mintags*/0, /*maxtags*/0
365 },
366 {
367 /*
368 * Slow when tagged queueing is enabled. (1.5MB/sec versus
369 * 8MB/sec.)
370 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
371 * Best performance with these drives is achieved with
372 * tagged queueing turned off, and write caching turned on.
373 */
374 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
375 /*quirks*/0, /*mintags*/0, /*maxtags*/0
376 },
377 {
378 /*
379 * Slow when tagged queueing is enabled. (1.5MB/sec versus
380 * 8MB/sec.)
381 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
382 * Best performance with these drives is achieved with
383 * tagged queueing turned off, and write caching turned on.
384 */
385 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
386 /*quirks*/0, /*mintags*/0, /*maxtags*/0
387 },
388 {
389 /*
390 * Doesn't handle queue full condition correctly,
391 * so we need to limit maxtags to what the device
392 * can handle instead of determining this automatically.
393 */
394 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
395 /*quirks*/0, /*mintags*/2, /*maxtags*/32
396 },
397 {
398 /* Really only one LUN */
399 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
400 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
401 },
402 {
403 /* I can't believe we need a quirk for DPT volumes. */
404 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
405 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
406 /*mintags*/0, /*maxtags*/255
407 },
408 {
409 /*
410 * Many Sony CDROM drives don't like multi-LUN probing.
411 */
412 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
413 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
414 },
415 {
416 /*
417 * This drive doesn't like multiple LUN probing.
418 * Submitted by: Parag Patel <parag@cgt.com>
419 */
420 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
421 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
422 },
423 {
424 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
425 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
426 },
427 {
428 /*
429 * The 8200 doesn't like multi-lun probing, and probably
430 * don't like serial number requests either.
431 */
432 {
433 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
434 "EXB-8200*", "*"
435 },
436 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
437 },
438 {
439 /*
440 * Let's try the same as above, but for a drive that says
441 * it's an IPL-6860 but is actually an EXB 8200.
442 */
443 {
444 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
445 "IPL-6860*", "*"
446 },
447 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
448 },
449 {
450 /*
451 * These Hitachi drives don't like multi-lun probing.
452 * The PR submitter has a DK319H, but says that the Linux
453 * kernel has a similar work-around for the DK312 and DK314,
454 * so all DK31* drives are quirked here.
455 * PR: misc/18793
456 * Submitted by: Paul Haddad <paul@pth.com>
457 */
458 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
459 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
460 },
461 {
462 /*
463 * The Hitachi CJ series with J8A8 firmware apparantly has
464 * problems with tagged commands.
465 * PR: 23536
466 * Reported by: amagai@nue.org
467 */
468 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
469 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
470 },
471 {
472 /*
473 * These are the large storage arrays.
474 * Submitted by: William Carrel <william.carrel@infospace.com>
475 */
476 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
477 CAM_QUIRK_HILUNS, 2, 1024
478 },
479 {
480 /*
481 * This old revision of the TDC3600 is also SCSI-1, and
482 * hangs upon serial number probing.
483 */
484 {
485 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
486 " TDC 3600", "U07:"
487 },
488 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
489 },
490 {
491 /*
492 * Maxtor Personal Storage 3000XT (Firewire)
493 * hangs upon serial number probing.
494 */
495 {
496 T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
497 "1394 storage", "*"
498 },
499 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
500 },
501 {
502 /*
503 * Would repond to all LUNs if asked for.
504 */
505 {
506 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
507 "CP150", "*"
508 },
509 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
510 },
511 {
512 /*
513 * Would repond to all LUNs if asked for.
514 */
515 {
516 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
517 "96X2*", "*"
518 },
519 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
520 },
521 {
522 /* Submitted by: Matthew Dodd <winter@jurai.net> */
523 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
524 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
525 },
526 {
527 /* Submitted by: Matthew Dodd <winter@jurai.net> */
528 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
529 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
530 },
531 {
532 /* TeraSolutions special settings for TRC-22 RAID */
533 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
534 /*quirks*/0, /*mintags*/55, /*maxtags*/255
535 },
536 {
537 /* Veritas Storage Appliance */
538 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
539 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
540 },
541 {
542 /*
543 * Would respond to all LUNs. Device type and removable
544 * flag are jumper-selectable.
545 */
546 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
547 "Tahiti 1", "*"
548 },
549 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
550 },
551 {
552 /* Default tagged queuing parameters for all devices */
553 {
554 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
555 /*vendor*/"*", /*product*/"*", /*revision*/"*"
556 },
557 /*quirks*/0, /*mintags*/2, /*maxtags*/255
558 },
559};
560
561static const int xpt_quirk_table_size =
562 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
563
564typedef enum {
565 DM_RET_COPY = 0x01,
566 DM_RET_FLAG_MASK = 0x0f,
567 DM_RET_NONE = 0x00,
568 DM_RET_STOP = 0x10,
569 DM_RET_DESCEND = 0x20,
570 DM_RET_ERROR = 0x30,
571 DM_RET_ACTION_MASK = 0xf0
572} dev_match_ret;
573
574typedef enum {
575 XPT_DEPTH_BUS,
576 XPT_DEPTH_TARGET,
577 XPT_DEPTH_DEVICE,
578 XPT_DEPTH_PERIPH
579} xpt_traverse_depth;
580
581struct xpt_traverse_config {
582 xpt_traverse_depth depth;
583 void *tr_func;
584 void *tr_arg;
585};
586
587typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
588typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
589typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
590typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
591typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
592
593/* Transport layer configuration information */
594static struct xpt_softc xsoftc;
595
596/* Queues for our software interrupt handler */
597typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
598static cam_isrq_t cam_bioq;
599static cam_isrq_t cam_netq;
600
601/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
602static SLIST_HEAD(,ccb_hdr) ccb_freeq;
603static u_int xpt_max_ccbs; /*
604 * Maximum size of ccb pool. Modified as
605 * devices are added/removed or have their
606 * opening counts changed.
607 */
608static u_int xpt_ccb_count; /* Current count of allocated ccbs */
609
610struct cam_periph *xpt_periph;
611
612static periph_init_t xpt_periph_init;
613
614static periph_init_t probe_periph_init;
615
616static struct periph_driver xpt_driver =
617{
618 xpt_periph_init, "xpt",
619 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
620};
621
622static struct periph_driver probe_driver =
623{
624 probe_periph_init, "probe",
625 TAILQ_HEAD_INITIALIZER(probe_driver.units)
626};
627
628PERIPHDRIVER_DECLARE(xpt, xpt_driver);
629PERIPHDRIVER_DECLARE(probe, probe_driver);
630
631#define XPT_CDEV_MAJOR 104
632
633static d_open_t xptopen;
634static d_close_t xptclose;
635static d_ioctl_t xptioctl;
636
637static struct cdevsw xpt_cdevsw = {
638 .d_open = xptopen,
639 .d_close = xptclose,
640 .d_ioctl = xptioctl,
641 .d_name = "xpt",
642 .d_maj = XPT_CDEV_MAJOR,
643};
644
645static struct intr_config_hook *xpt_config_hook;
646
647/* Registered busses */
648static TAILQ_HEAD(,cam_eb) xpt_busses;
649static u_int bus_generation;
650
651/* Storage for debugging datastructures */
652#ifdef CAMDEBUG
653struct cam_path *cam_dpath;
654u_int32_t cam_dflags;
655u_int32_t cam_debug_delay;
656#endif
657
658/* Pointers to software interrupt handlers */
659static void *camnet_ih;
660static void *cambio_ih;
661
662#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
663#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
664#endif
665
666/*
667 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
668 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
669 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
670 */
671#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
672 || defined(CAM_DEBUG_LUN)
673#ifdef CAMDEBUG
674#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
675 || !defined(CAM_DEBUG_LUN)
676#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
677 and CAM_DEBUG_LUN"
678#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
679#else /* !CAMDEBUG */
680#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
681#endif /* CAMDEBUG */
682#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
683
684/* Our boot-time initialization hook */
685static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
686
687static moduledata_t cam_moduledata = {
688 "cam",
689 cam_module_event_handler,
690 NULL
691};
692
693static void xpt_init(void *);
694
695DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
696MODULE_VERSION(cam, 1);
697
698
699static cam_status xpt_compile_path(struct cam_path *new_path,
700 struct cam_periph *perph,
701 path_id_t path_id,
702 target_id_t target_id,
703 lun_id_t lun_id);
704
705static void xpt_release_path(struct cam_path *path);
706
707static void xpt_async_bcast(struct async_list *async_head,
708 u_int32_t async_code,
709 struct cam_path *path,
710 void *async_arg);
711static void xpt_dev_async(u_int32_t async_code,
712 struct cam_eb *bus,
713 struct cam_et *target,
714 struct cam_ed *device,
715 void *async_arg);
716static path_id_t xptnextfreepathid(void);
717static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
718static union ccb *xpt_get_ccb(struct cam_ed *device);
719static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
720 u_int32_t new_priority);
721static void xpt_run_dev_allocq(struct cam_eb *bus);
722static void xpt_run_dev_sendq(struct cam_eb *bus);
723static timeout_t xpt_release_devq_timeout;
724static timeout_t xpt_release_simq_timeout;
725static void xpt_release_bus(struct cam_eb *bus);
726static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
727 int run_queue);
728static struct cam_et*
729 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
730static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
731static struct cam_ed*
732 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
733 lun_id_t lun_id);
734static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
735 struct cam_ed *device);
736static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
737static struct cam_eb*
738 xpt_find_bus(path_id_t path_id);
739static struct cam_et*
740 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
741static struct cam_ed*
742 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
743static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
744static void xpt_scan_lun(struct cam_periph *periph,
745 struct cam_path *path, cam_flags flags,
746 union ccb *ccb);
747static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
748static xpt_busfunc_t xptconfigbuscountfunc;
749static xpt_busfunc_t xptconfigfunc;
750static void xpt_config(void *arg);
751static xpt_devicefunc_t xptpassannouncefunc;
752static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
753static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
754static void xptpoll(struct cam_sim *sim);
755static void camisr(void *);
756#if 0
757static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
758static void xptasync(struct cam_periph *periph,
759 u_int32_t code, cam_path *path);
760#endif
761static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
762 u_int num_patterns, struct cam_eb *bus);
763static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
764 u_int num_patterns,
765 struct cam_ed *device);
766static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
767 u_int num_patterns,
768 struct cam_periph *periph);
769static xpt_busfunc_t xptedtbusfunc;
770static xpt_targetfunc_t xptedttargetfunc;
771static xpt_devicefunc_t xptedtdevicefunc;
772static xpt_periphfunc_t xptedtperiphfunc;
773static xpt_pdrvfunc_t xptplistpdrvfunc;
774static xpt_periphfunc_t xptplistperiphfunc;
775static int xptedtmatch(struct ccb_dev_match *cdm);
776static int xptperiphlistmatch(struct ccb_dev_match *cdm);
777static int xptbustraverse(struct cam_eb *start_bus,
778 xpt_busfunc_t *tr_func, void *arg);
779static int xpttargettraverse(struct cam_eb *bus,
780 struct cam_et *start_target,
781 xpt_targetfunc_t *tr_func, void *arg);
782static int xptdevicetraverse(struct cam_et *target,
783 struct cam_ed *start_device,
784 xpt_devicefunc_t *tr_func, void *arg);
785static int xptperiphtraverse(struct cam_ed *device,
786 struct cam_periph *start_periph,
787 xpt_periphfunc_t *tr_func, void *arg);
788static int xptpdrvtraverse(struct periph_driver **start_pdrv,
789 xpt_pdrvfunc_t *tr_func, void *arg);
790static int xptpdperiphtraverse(struct periph_driver **pdrv,
791 struct cam_periph *start_periph,
792 xpt_periphfunc_t *tr_func,
793 void *arg);
794static xpt_busfunc_t xptdefbusfunc;
795static xpt_targetfunc_t xptdeftargetfunc;
796static xpt_devicefunc_t xptdefdevicefunc;
797static xpt_periphfunc_t xptdefperiphfunc;
798static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
799#ifdef notusedyet
800static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
801 void *arg);
802#endif
803static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
804 void *arg);
805#ifdef notusedyet
806static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
807 void *arg);
808#endif
809static xpt_devicefunc_t xptsetasyncfunc;
810static xpt_busfunc_t xptsetasyncbusfunc;
811static cam_status xptregister(struct cam_periph *periph,
812 void *arg);
813static cam_status proberegister(struct cam_periph *periph,
814 void *arg);
815static void probeschedule(struct cam_periph *probe_periph);
816static void probestart(struct cam_periph *periph, union ccb *start_ccb);
817static void proberequestdefaultnegotiation(struct cam_periph *periph);
818static void probedone(struct cam_periph *periph, union ccb *done_ccb);
819static void probecleanup(struct cam_periph *periph);
820static void xpt_find_quirk(struct cam_ed *device);
821#ifdef CAM_NEW_TRAN_CODE
822static void xpt_devise_transport(struct cam_path *path);
823#endif /* CAM_NEW_TRAN_CODE */
824static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
825 struct cam_ed *device,
826 int async_update);
827static void xpt_toggle_tags(struct cam_path *path);
828static void xpt_start_tags(struct cam_path *path);
829static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
830 struct cam_ed *dev);
831static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
832 struct cam_ed *dev);
833static __inline int periph_is_queued(struct cam_periph *periph);
834static __inline int device_is_alloc_queued(struct cam_ed *device);
835static __inline int device_is_send_queued(struct cam_ed *device);
836static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
837
838static __inline int
839xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
840{
841 int retval;
842
843 if (dev->ccbq.devq_openings > 0) {
844 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
845 cam_ccbq_resize(&dev->ccbq,
846 dev->ccbq.dev_openings
847 + dev->ccbq.dev_active);
848 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
849 }
850 /*
851 * The priority of a device waiting for CCB resources
852 * is that of the the highest priority peripheral driver
853 * enqueued.
854 */
855 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
856 &dev->alloc_ccb_entry.pinfo,
857 CAMQ_GET_HEAD(&dev->drvq)->priority);
858 } else {
859 retval = 0;
860 }
861
862 return (retval);
863}
864
865static __inline int
866xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
867{
868 int retval;
869
870 if (dev->ccbq.dev_openings > 0) {
871 /*
872 * The priority of a device waiting for controller
873 * resources is that of the the highest priority CCB
874 * enqueued.
875 */
876 retval =
877 xpt_schedule_dev(&bus->sim->devq->send_queue,
878 &dev->send_ccb_entry.pinfo,
879 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
880 } else {
881 retval = 0;
882 }
883 return (retval);
884}
885
886static __inline int
887periph_is_queued(struct cam_periph *periph)
888{
889 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
890}
891
892static __inline int
893device_is_alloc_queued(struct cam_ed *device)
894{
895 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
896}
897
898static __inline int
899device_is_send_queued(struct cam_ed *device)
900{
901 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
902}
903
904static __inline int
905dev_allocq_is_runnable(struct cam_devq *devq)
906{
907 /*
908 * Have work to do.
909 * Have space to do more work.
910 * Allowed to do work.
911 */
912 return ((devq->alloc_queue.qfrozen_cnt == 0)
913 && (devq->alloc_queue.entries > 0)
914 && (devq->alloc_openings > 0));
915}
916
917static void
918xpt_periph_init()
919{
920 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
921}
922
923static void
924probe_periph_init()
925{
926}
927
928
929static void
930xptdone(struct cam_periph *periph, union ccb *done_ccb)
931{
932 /* Caller will release the CCB */
933 wakeup(&done_ccb->ccb_h.cbfcnp);
934}
935
936static int
937xptopen(dev_t dev, int flags, int fmt, struct thread *td)
938{
939 int unit;
940
941 unit = minor(dev) & 0xff;
942
943 /*
944 * Only allow read-write access.
945 */
946 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
947 return(EPERM);
948
949 /*
950 * We don't allow nonblocking access.
951 */
952 if ((flags & O_NONBLOCK) != 0) {
953 printf("xpt%d: can't do nonblocking access\n", unit);
954 return(ENODEV);
955 }
956
957 /*
958 * We only have one transport layer right now. If someone accesses
959 * us via something other than minor number 1, point out their
960 * mistake.
961 */
962 if (unit != 0) {
963 printf("xptopen: got invalid xpt unit %d\n", unit);
964 return(ENXIO);
965 }
966
967 /* Mark ourselves open */
968 xsoftc.flags |= XPT_FLAG_OPEN;
969
970 return(0);
971}
972
973static int
974xptclose(dev_t dev, int flag, int fmt, struct thread *td)
975{
976 int unit;
977
978 unit = minor(dev) & 0xff;
979
980 /*
981 * We only have one transport layer right now. If someone accesses
982 * us via something other than minor number 1, point out their
983 * mistake.
984 */
985 if (unit != 0) {
986 printf("xptclose: got invalid xpt unit %d\n", unit);
987 return(ENXIO);
988 }
989
990 /* Mark ourselves closed */
991 xsoftc.flags &= ~XPT_FLAG_OPEN;
992
993 return(0);
994}
995
996static int
997xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
998{
999 int unit, error;
1000
1001 error = 0;
1002 unit = minor(dev) & 0xff;
1003
1004 /*
1005 * We only have one transport layer right now. If someone accesses
1006 * us via something other than minor number 1, point out their
1007 * mistake.
1008 */
1009 if (unit != 0) {
1010 printf("xptioctl: got invalid xpt unit %d\n", unit);
1011 return(ENXIO);
1012 }
1013
1014 switch(cmd) {
1015 /*
1016 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1017 * to accept CCB types that don't quite make sense to send through a
1018 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1019 * in the CAM spec.
1020 */
1021 case CAMIOCOMMAND: {
1022 union ccb *ccb;
1023 union ccb *inccb;
1024
1025 inccb = (union ccb *)addr;
1026
1027 switch(inccb->ccb_h.func_code) {
1028 case XPT_SCAN_BUS:
1029 case XPT_RESET_BUS:
1030 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1031 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1032 error = EINVAL;
1033 break;
1034 }
1035 /* FALLTHROUGH */
1036 case XPT_PATH_INQ:
1037 case XPT_ENG_INQ:
1038 case XPT_SCAN_LUN:
1039
1040 ccb = xpt_alloc_ccb();
1041
1042 /*
1043 * Create a path using the bus, target, and lun the
1044 * user passed in.
1045 */
1046 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1047 inccb->ccb_h.path_id,
1048 inccb->ccb_h.target_id,
1049 inccb->ccb_h.target_lun) !=
1050 CAM_REQ_CMP){
1051 error = EINVAL;
1052 xpt_free_ccb(ccb);
1053 break;
1054 }
1055 /* Ensure all of our fields are correct */
1056 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1057 inccb->ccb_h.pinfo.priority);
1058 xpt_merge_ccb(ccb, inccb);
1059 ccb->ccb_h.cbfcnp = xptdone;
1060 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1061 bcopy(ccb, inccb, sizeof(union ccb));
1062 xpt_free_path(ccb->ccb_h.path);
1063 xpt_free_ccb(ccb);
1064 break;
1065
1066 case XPT_DEBUG: {
1067 union ccb ccb;
1068
1069 /*
1070 * This is an immediate CCB, so it's okay to
1071 * allocate it on the stack.
1072 */
1073
1074 /*
1075 * Create a path using the bus, target, and lun the
1076 * user passed in.
1077 */
1078 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1079 inccb->ccb_h.path_id,
1080 inccb->ccb_h.target_id,
1081 inccb->ccb_h.target_lun) !=
1082 CAM_REQ_CMP){
1083 error = EINVAL;
1084 break;
1085 }
1086 /* Ensure all of our fields are correct */
1087 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1088 inccb->ccb_h.pinfo.priority);
1089 xpt_merge_ccb(&ccb, inccb);
1090 ccb.ccb_h.cbfcnp = xptdone;
1091 xpt_action(&ccb);
1092 bcopy(&ccb, inccb, sizeof(union ccb));
1093 xpt_free_path(ccb.ccb_h.path);
1094 break;
1095
1096 }
1097 case XPT_DEV_MATCH: {
1098 struct cam_periph_map_info mapinfo;
1099 struct cam_path *old_path;
1100
1101 /*
1102 * We can't deal with physical addresses for this
1103 * type of transaction.
1104 */
1105 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1106 error = EINVAL;
1107 break;
1108 }
1109
1110 /*
1111 * Save this in case the caller had it set to
1112 * something in particular.
1113 */
1114 old_path = inccb->ccb_h.path;
1115
1116 /*
1117 * We really don't need a path for the matching
1118 * code. The path is needed because of the
1119 * debugging statements in xpt_action(). They
1120 * assume that the CCB has a valid path.
1121 */
1122 inccb->ccb_h.path = xpt_periph->path;
1123
1124 bzero(&mapinfo, sizeof(mapinfo));
1125
1126 /*
1127 * Map the pattern and match buffers into kernel
1128 * virtual address space.
1129 */
1130 error = cam_periph_mapmem(inccb, &mapinfo);
1131
1132 if (error) {
1133 inccb->ccb_h.path = old_path;
1134 break;
1135 }
1136
1137 /*
1138 * This is an immediate CCB, we can send it on directly.
1139 */
1140 xpt_action(inccb);
1141
1142 /*
1143 * Map the buffers back into user space.
1144 */
1145 cam_periph_unmapmem(inccb, &mapinfo);
1146
1147 inccb->ccb_h.path = old_path;
1148
1149 error = 0;
1150 break;
1151 }
1152 default:
1153 error = ENOTSUP;
1154 break;
1155 }
1156 break;
1157 }
1158 /*
1159 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1160 * with the periphal driver name and unit name filled in. The other
1161 * fields don't really matter as input. The passthrough driver name
1162 * ("pass"), and unit number are passed back in the ccb. The current
1163 * device generation number, and the index into the device peripheral
1164 * driver list, and the status are also passed back. Note that
1165 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1166 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1167 * (or rather should be) impossible for the device peripheral driver
1168 * list to change since we look at the whole thing in one pass, and
1169 * we do it with splcam protection.
1170 *
1171 */
1172 case CAMGETPASSTHRU: {
1173 union ccb *ccb;
1174 struct cam_periph *periph;
1175 struct periph_driver **p_drv;
1176 char *name;
1177 u_int unit;
1178 u_int cur_generation;
1179 int base_periph_found;
1180 int splbreaknum;
1181 int s;
1182
1183 ccb = (union ccb *)addr;
1184 unit = ccb->cgdl.unit_number;
1185 name = ccb->cgdl.periph_name;
1186 /*
1187 * Every 100 devices, we want to drop our spl protection to
1188 * give the software interrupt handler a chance to run.
1189 * Most systems won't run into this check, but this should
1190 * avoid starvation in the software interrupt handler in
1191 * large systems.
1192 */
1193 splbreaknum = 100;
1194
1195 ccb = (union ccb *)addr;
1196
1197 base_periph_found = 0;
1198
1199 /*
1200 * Sanity check -- make sure we don't get a null peripheral
1201 * driver name.
1202 */
1203 if (*ccb->cgdl.periph_name == '\0') {
1204 error = EINVAL;
1205 break;
1206 }
1207
1208 /* Keep the list from changing while we traverse it */
1209 s = splcam();
1210ptstartover:
1211 cur_generation = xsoftc.generation;
1212
1213 /* first find our driver in the list of drivers */
1214 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1215 if (strcmp((*p_drv)->driver_name, name) == 0)
1216 break;
1217
1218 if (*p_drv == NULL) {
1219 splx(s);
1220 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1221 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1222 *ccb->cgdl.periph_name = '\0';
1223 ccb->cgdl.unit_number = 0;
1224 error = ENOENT;
1225 break;
1226 }
1227
1228 /*
1229 * Run through every peripheral instance of this driver
1230 * and check to see whether it matches the unit passed
1231 * in by the user. If it does, get out of the loops and
1232 * find the passthrough driver associated with that
1233 * peripheral driver.
1234 */
1235 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1236 periph = TAILQ_NEXT(periph, unit_links)) {
1237
1238 if (periph->unit_number == unit) {
1239 break;
1240 } else if (--splbreaknum == 0) {
1241 splx(s);
1242 s = splcam();
1243 splbreaknum = 100;
1244 if (cur_generation != xsoftc.generation)
1245 goto ptstartover;
1246 }
1247 }
1248 /*
1249 * If we found the peripheral driver that the user passed
1250 * in, go through all of the peripheral drivers for that
1251 * particular device and look for a passthrough driver.
1252 */
1253 if (periph != NULL) {
1254 struct cam_ed *device;
1255 int i;
1256
1257 base_periph_found = 1;
1258 device = periph->path->device;
1259 for (i = 0, periph = SLIST_FIRST(&device->periphs);
1260 periph != NULL;
1261 periph = SLIST_NEXT(periph, periph_links), i++) {
1262 /*
1263 * Check to see whether we have a
1264 * passthrough device or not.
1265 */
1266 if (strcmp(periph->periph_name, "pass") == 0) {
1267 /*
1268 * Fill in the getdevlist fields.
1269 */
1270 strcpy(ccb->cgdl.periph_name,
1271 periph->periph_name);
1272 ccb->cgdl.unit_number =
1273 periph->unit_number;
1274 if (SLIST_NEXT(periph, periph_links))
1275 ccb->cgdl.status =
1276 CAM_GDEVLIST_MORE_DEVS;
1277 else
1278 ccb->cgdl.status =
1279 CAM_GDEVLIST_LAST_DEVICE;
1280 ccb->cgdl.generation =
1281 device->generation;
1282 ccb->cgdl.index = i;
1283 /*
1284 * Fill in some CCB header fields
1285 * that the user may want.
1286 */
1287 ccb->ccb_h.path_id =
1288 periph->path->bus->path_id;
1289 ccb->ccb_h.target_id =
1290 periph->path->target->target_id;
1291 ccb->ccb_h.target_lun =
1292 periph->path->device->lun_id;
1293 ccb->ccb_h.status = CAM_REQ_CMP;
1294 break;
1295 }
1296 }
1297 }
1298
1299 /*
1300 * If the periph is null here, one of two things has
1301 * happened. The first possibility is that we couldn't
1302 * find the unit number of the particular peripheral driver
1303 * that the user is asking about. e.g. the user asks for
1304 * the passthrough driver for "da11". We find the list of
1305 * "da" peripherals all right, but there is no unit 11.
1306 * The other possibility is that we went through the list
1307 * of peripheral drivers attached to the device structure,
1308 * but didn't find one with the name "pass". Either way,
1309 * we return ENOENT, since we couldn't find something.
1310 */
1311 if (periph == NULL) {
1312 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1313 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1314 *ccb->cgdl.periph_name = '\0';
1315 ccb->cgdl.unit_number = 0;
1316 error = ENOENT;
1317 /*
1318 * It is unfortunate that this is even necessary,
1319 * but there are many, many clueless users out there.
1320 * If this is true, the user is looking for the
1321 * passthrough driver, but doesn't have one in his
1322 * kernel.
1323 */
1324 if (base_periph_found == 1) {
1325 printf("xptioctl: pass driver is not in the "
1326 "kernel\n");
1327 printf("xptioctl: put \"device pass0\" in "
1328 "your kernel config file\n");
1329 }
1330 }
1331 splx(s);
1332 break;
1333 }
1334 default:
1335 error = ENOTTY;
1336 break;
1337 }
1338
1339 return(error);
1340}
1341
1342static int
1343cam_module_event_handler(module_t mod, int what, void *arg)
1344{
1345 if (what == MOD_LOAD) {
1346 xpt_init(NULL);
1347 } else if (what == MOD_UNLOAD) {
1348 return EBUSY;
1349 }
1350
1351 return 0;
1352}
1353
1354/* Functions accessed by the peripheral drivers */
1355static void
1356xpt_init(dummy)
1357 void *dummy;
1358{
1359 struct cam_sim *xpt_sim;
1360 struct cam_path *path;
1361 struct cam_devq *devq;
1362 cam_status status;
1363
1364 TAILQ_INIT(&xpt_busses);
1365 TAILQ_INIT(&cam_bioq);
1366 TAILQ_INIT(&cam_netq);
1367 SLIST_INIT(&ccb_freeq);
1368 STAILQ_INIT(&highpowerq);
1369
1370 /*
1371 * The xpt layer is, itself, the equivelent of a SIM.
1372 * Allow 16 ccbs in the ccb pool for it. This should
1373 * give decent parallelism when we probe busses and
1374 * perform other XPT functions.
1375 */
1376 devq = cam_simq_alloc(16);
1377 xpt_sim = cam_sim_alloc(xptaction,
1378 xptpoll,
1379 "xpt",
1380 /*softc*/NULL,
1381 /*unit*/0,
1382 /*max_dev_transactions*/0,
1383 /*max_tagged_dev_transactions*/0,
1384 devq);
1385 xpt_max_ccbs = 16;
1386
1387 xpt_bus_register(xpt_sim, /*bus #*/0);
1388
1389 /*
1390 * Looking at the XPT from the SIM layer, the XPT is
1391 * the equivelent of a peripheral driver. Allocate
1392 * a peripheral driver entry for us.
1393 */
1394 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1395 CAM_TARGET_WILDCARD,
1396 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1397 printf("xpt_init: xpt_create_path failed with status %#x,"
1398 " failing attach\n", status);
1399 return;
1400 }
1401
1402 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1403 path, NULL, 0, NULL);
1404 xpt_free_path(path);
1405
1406 xpt_sim->softc = xpt_periph;
1407
1408 /*
1409 * Register a callback for when interrupts are enabled.
1410 */
1411 xpt_config_hook =
1412 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1413 M_TEMP, M_NOWAIT | M_ZERO);
1414 if (xpt_config_hook == NULL) {
1415 printf("xpt_init: Cannot malloc config hook "
1416 "- failing attach\n");
1417 return;
1418 }
1419
1420 xpt_config_hook->ich_func = xpt_config;
1421 if (config_intrhook_establish(xpt_config_hook) != 0) {
1422 free (xpt_config_hook, M_TEMP);
1423 printf("xpt_init: config_intrhook_establish failed "
1424 "- failing attach\n");
1425 }
1426
1427 /* Install our software interrupt handlers */
1428 swi_add(NULL, "camnet", camisr, &cam_netq, SWI_CAMNET, 0, &camnet_ih);
1429 swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
1430}
1431
1432static cam_status
1433xptregister(struct cam_periph *periph, void *arg)
1434{
1435 if (periph == NULL) {
1436 printf("xptregister: periph was NULL!!\n");
1437 return(CAM_REQ_CMP_ERR);
1438 }
1439
1440 periph->softc = NULL;
1441
1442 xpt_periph = periph;
1443
1444 return(CAM_REQ_CMP);
1445}
1446
1447int32_t
1448xpt_add_periph(struct cam_periph *periph)
1449{
1450 struct cam_ed *device;
1451 int32_t status;
1452 struct periph_list *periph_head;
1453
1454 device = periph->path->device;
1455
1456 periph_head = &device->periphs;
1457
1458 status = CAM_REQ_CMP;
1459
1460 if (device != NULL) {
1461 int s;
1462
1463 /*
1464 * Make room for this peripheral
1465 * so it will fit in the queue
1466 * when it's scheduled to run
1467 */
1468 s = splsoftcam();
1469 status = camq_resize(&device->drvq,
1470 device->drvq.array_size + 1);
1471
1472 device->generation++;
1473
1474 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1475
1476 splx(s);
1477 }
1478
1479 xsoftc.generation++;
1480
1481 return (status);
1482}
1483
1484void
1485xpt_remove_periph(struct cam_periph *periph)
1486{
1487 struct cam_ed *device;
1488
1489 device = periph->path->device;
1490
1491 if (device != NULL) {
1492 int s;
1493 struct periph_list *periph_head;
1494
1495 periph_head = &device->periphs;
1496
1497 /* Release the slot for this peripheral */
1498 s = splsoftcam();
1499 camq_resize(&device->drvq, device->drvq.array_size - 1);
1500
1501 device->generation++;
1502
1503 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1504
1505 splx(s);
1506 }
1507
1508 xsoftc.generation++;
1509
1510}
1511
1512#ifdef CAM_NEW_TRAN_CODE
1513
1514void
1515xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1516{
1517 struct ccb_pathinq cpi;
1518 struct ccb_trans_settings cts;
1519 struct cam_path *path;
1520 u_int speed;
1521 u_int freq;
1522 u_int mb;
1523 int s;
1524
1525 path = periph->path;
1526 /*
1527 * To ensure that this is printed in one piece,
1528 * mask out CAM interrupts.
1529 */
1530 s = splsoftcam();
1531 printf("%s%d at %s%d bus %d target %d lun %d\n",
1532 periph->periph_name, periph->unit_number,
1533 path->bus->sim->sim_name,
1534 path->bus->sim->unit_number,
1535 path->bus->sim->bus_id,
1536 path->target->target_id,
1537 path->device->lun_id);
1538 printf("%s%d: ", periph->periph_name, periph->unit_number);
1539 scsi_print_inquiry(&path->device->inq_data);
1540 if (bootverbose && path->device->serial_num_len > 0) {
1541 /* Don't wrap the screen - print only the first 60 chars */
1542 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1543 periph->unit_number, path->device->serial_num);
1544 }
1545 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1546 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1547 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1548 xpt_action((union ccb*)&cts);
1549
1550 /* Ask the SIM for its base transfer speed */
1551 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1552 cpi.ccb_h.func_code = XPT_PATH_INQ;
1553 xpt_action((union ccb *)&cpi);
1554
1555 speed = cpi.base_transfer_speed;
1556 freq = 0;
1557 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1558 struct ccb_trans_settings_spi *spi;
1559
1560 spi = &cts.xport_specific.spi;
1561 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1562 && spi->sync_offset != 0) {
1563 freq = scsi_calc_syncsrate(spi->sync_period);
1564 speed = freq;
1565 }
1566
1567 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1568 speed *= (0x01 << spi->bus_width);
1569 }
1570
1571 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1572 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1573 if (fc->valid & CTS_FC_VALID_SPEED) {
1574 speed = fc->bitrate;
1575 }
1576 }
1577
1578 mb = speed / 1000;
1579 if (mb > 0)
1580 printf("%s%d: %d.%03dMB/s transfers",
1581 periph->periph_name, periph->unit_number,
1582 mb, speed % 1000);
1583 else
1584 printf("%s%d: %dKB/s transfers", periph->periph_name,
1585 periph->unit_number, speed);
1586 /* Report additional information about SPI connections */
1587 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1588 struct ccb_trans_settings_spi *spi;
1589
1590 spi = &cts.xport_specific.spi;
1591 if (freq != 0) {
1592 printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1593 freq % 1000,
1594 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1595 ? " DT" : "",
1596 spi->sync_offset);
1597 }
1598 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1599 && spi->bus_width > 0) {
1600 if (freq != 0) {
1601 printf(", ");
1602 } else {
1603 printf(" (");
1604 }
1605 printf("%dbit)", 8 * (0x01 << spi->bus_width));
1606 } else if (freq != 0) {
1607 printf(")");
1608 }
1609 }
1610 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1611 struct ccb_trans_settings_fc *fc;
1612
1613 fc = &cts.xport_specific.fc;
1614 if (fc->valid & CTS_FC_VALID_WWNN)
1615 printf(" WWNN 0x%llx", (long long) fc->wwnn);
1616 if (fc->valid & CTS_FC_VALID_WWPN)
1617 printf(" WWPN 0x%llx", (long long) fc->wwpn);
1618 if (fc->valid & CTS_FC_VALID_PORT)
1619 printf(" PortID 0x%x", fc->port);
1620 }
1621
1622 if (path->device->inq_flags & SID_CmdQue
1623 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1624 printf("\n%s%d: Tagged Queueing Enabled",
1625 periph->periph_name, periph->unit_number);
1626 }
1627 printf("\n");
1628
1629 /*
1630 * We only want to print the caller's announce string if they've
1631 * passed one in..
1632 */
1633 if (announce_string != NULL)
1634 printf("%s%d: %s\n", periph->periph_name,
1635 periph->unit_number, announce_string);
1636 splx(s);
1637}
1638#else /* CAM_NEW_TRAN_CODE */
1639void
1640xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1641{
1642 int s;
1643 u_int mb;
1644 struct cam_path *path;
1645 struct ccb_trans_settings cts;
1646
1647 path = periph->path;
1648 /*
1649 * To ensure that this is printed in one piece,
1650 * mask out CAM interrupts.
1651 */
1652 s = splsoftcam();
1653 printf("%s%d at %s%d bus %d target %d lun %d\n",
1654 periph->periph_name, periph->unit_number,
1655 path->bus->sim->sim_name,
1656 path->bus->sim->unit_number,
1657 path->bus->sim->bus_id,
1658 path->target->target_id,
1659 path->device->lun_id);
1660 printf("%s%d: ", periph->periph_name, periph->unit_number);
1661 scsi_print_inquiry(&path->device->inq_data);
1662 if ((bootverbose)
1663 && (path->device->serial_num_len > 0)) {
1664 /* Don't wrap the screen - print only the first 60 chars */
1665 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1666 periph->unit_number, path->device->serial_num);
1667 }
1668 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1669 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1670 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1671 xpt_action((union ccb*)&cts);
1672 if (cts.ccb_h.status == CAM_REQ_CMP) {
1673 u_int speed;
1674 u_int freq;
1675
1676 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1677 && cts.sync_offset != 0) {
1678 freq = scsi_calc_syncsrate(cts.sync_period);
1679 speed = freq;
1680 } else {
1681 struct ccb_pathinq cpi;
1682
1683 /* Ask the SIM for its base transfer speed */
1684 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1685 cpi.ccb_h.func_code = XPT_PATH_INQ;
1686 xpt_action((union ccb *)&cpi);
1687
1688 speed = cpi.base_transfer_speed;
1689 freq = 0;
1690 }
1691 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1692 speed *= (0x01 << cts.bus_width);
1693 mb = speed / 1000;
1694 if (mb > 0)
1695 printf("%s%d: %d.%03dMB/s transfers",
1696 periph->periph_name, periph->unit_number,
1697 mb, speed % 1000);
1698 else
1699 printf("%s%d: %dKB/s transfers", periph->periph_name,
1700 periph->unit_number, speed);
1701 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1702 && cts.sync_offset != 0) {
1703 printf(" (%d.%03dMHz, offset %d", freq / 1000,
1704 freq % 1000, cts.sync_offset);
1705 }
1706 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1707 && cts.bus_width > 0) {
1708 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1709 && cts.sync_offset != 0) {
1710 printf(", ");
1711 } else {
1712 printf(" (");
1713 }
1714 printf("%dbit)", 8 * (0x01 << cts.bus_width));
1715 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1716 && cts.sync_offset != 0) {
1717 printf(")");
1718 }
1719
1720 if (path->device->inq_flags & SID_CmdQue
1721 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1722 printf(", Tagged Queueing Enabled");
1723 }
1724
1725 printf("\n");
1726 } else if (path->device->inq_flags & SID_CmdQue
1727 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1728 printf("%s%d: Tagged Queueing Enabled\n",
1729 periph->periph_name, periph->unit_number);
1730 }
1731
1732 /*
1733 * We only want to print the caller's announce string if they've
1734 * passed one in..
1735 */
1736 if (announce_string != NULL)
1737 printf("%s%d: %s\n", periph->periph_name,
1738 periph->unit_number, announce_string);
1739 splx(s);
1740}
1741
1742#endif /* CAM_NEW_TRAN_CODE */
1743
1744static dev_match_ret
1745xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1746 struct cam_eb *bus)
1747{
1748 dev_match_ret retval;
1749 int i;
1750
1751 retval = DM_RET_NONE;
1752
1753 /*
1754 * If we aren't given something to match against, that's an error.
1755 */
1756 if (bus == NULL)
1757 return(DM_RET_ERROR);
1758
1759 /*
1760 * If there are no match entries, then this bus matches no
1761 * matter what.
1762 */
1763 if ((patterns == NULL) || (num_patterns == 0))
1764 return(DM_RET_DESCEND | DM_RET_COPY);
1765
1766 for (i = 0; i < num_patterns; i++) {
1767 struct bus_match_pattern *cur_pattern;
1768
1769 /*
1770 * If the pattern in question isn't for a bus node, we
1771 * aren't interested. However, we do indicate to the
1772 * calling routine that we should continue descending the
1773 * tree, since the user wants to match against lower-level
1774 * EDT elements.
1775 */
1776 if (patterns[i].type != DEV_MATCH_BUS) {
1777 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1778 retval |= DM_RET_DESCEND;
1779 continue;
1780 }
1781
1782 cur_pattern = &patterns[i].pattern.bus_pattern;
1783
1784 /*
1785 * If they want to match any bus node, we give them any
1786 * device node.
1787 */
1788 if (cur_pattern->flags == BUS_MATCH_ANY) {
1789 /* set the copy flag */
1790 retval |= DM_RET_COPY;
1791
1792 /*
1793 * If we've already decided on an action, go ahead
1794 * and return.
1795 */
1796 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1797 return(retval);
1798 }
1799
1800 /*
1801 * Not sure why someone would do this...
1802 */
1803 if (cur_pattern->flags == BUS_MATCH_NONE)
1804 continue;
1805
1806 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1807 && (cur_pattern->path_id != bus->path_id))
1808 continue;
1809
1810 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1811 && (cur_pattern->bus_id != bus->sim->bus_id))
1812 continue;
1813
1814 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1815 && (cur_pattern->unit_number != bus->sim->unit_number))
1816 continue;
1817
1818 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1819 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1820 DEV_IDLEN) != 0))
1821 continue;
1822
1823 /*
1824 * If we get to this point, the user definitely wants
1825 * information on this bus. So tell the caller to copy the
1826 * data out.
1827 */
1828 retval |= DM_RET_COPY;
1829
1830 /*
1831 * If the return action has been set to descend, then we
1832 * know that we've already seen a non-bus matching
1833 * expression, therefore we need to further descend the tree.
1834 * This won't change by continuing around the loop, so we
1835 * go ahead and return. If we haven't seen a non-bus
1836 * matching expression, we keep going around the loop until
1837 * we exhaust the matching expressions. We'll set the stop
1838 * flag once we fall out of the loop.
1839 */
1840 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1841 return(retval);
1842 }
1843
1844 /*
1845 * If the return action hasn't been set to descend yet, that means
1846 * we haven't seen anything other than bus matching patterns. So
1847 * tell the caller to stop descending the tree -- the user doesn't
1848 * want to match against lower level tree elements.
1849 */
1850 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1851 retval |= DM_RET_STOP;
1852
1853 return(retval);
1854}
1855
1856static dev_match_ret
1857xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1858 struct cam_ed *device)
1859{
1860 dev_match_ret retval;
1861 int i;
1862
1863 retval = DM_RET_NONE;
1864
1865 /*
1866 * If we aren't given something to match against, that's an error.
1867 */
1868 if (device == NULL)
1869 return(DM_RET_ERROR);
1870
1871 /*
1872 * If there are no match entries, then this device matches no
1873 * matter what.
1874 */
1875 if ((patterns == NULL) || (patterns == 0))
1876 return(DM_RET_DESCEND | DM_RET_COPY);
1877
1878 for (i = 0; i < num_patterns; i++) {
1879 struct device_match_pattern *cur_pattern;
1880
1881 /*
1882 * If the pattern in question isn't for a device node, we
1883 * aren't interested.
1884 */
1885 if (patterns[i].type != DEV_MATCH_DEVICE) {
1886 if ((patterns[i].type == DEV_MATCH_PERIPH)
1887 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1888 retval |= DM_RET_DESCEND;
1889 continue;
1890 }
1891
1892 cur_pattern = &patterns[i].pattern.device_pattern;
1893
1894 /*
1895 * If they want to match any device node, we give them any
1896 * device node.
1897 */
1898 if (cur_pattern->flags == DEV_MATCH_ANY) {
1899 /* set the copy flag */
1900 retval |= DM_RET_COPY;
1901
1902
1903 /*
1904 * If we've already decided on an action, go ahead
1905 * and return.
1906 */
1907 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1908 return(retval);
1909 }
1910
1911 /*
1912 * Not sure why someone would do this...
1913 */
1914 if (cur_pattern->flags == DEV_MATCH_NONE)
1915 continue;
1916
1917 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1918 && (cur_pattern->path_id != device->target->bus->path_id))
1919 continue;
1920
1921 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1922 && (cur_pattern->target_id != device->target->target_id))
1923 continue;
1924
1925 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1926 && (cur_pattern->target_lun != device->lun_id))
1927 continue;
1928
1929 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1930 && (cam_quirkmatch((caddr_t)&device->inq_data,
1931 (caddr_t)&cur_pattern->inq_pat,
1932 1, sizeof(cur_pattern->inq_pat),
1933 scsi_static_inquiry_match) == NULL))
1934 continue;
1935
1936 /*
1937 * If we get to this point, the user definitely wants
1938 * information on this device. So tell the caller to copy
1939 * the data out.
1940 */
1941 retval |= DM_RET_COPY;
1942
1943 /*
1944 * If the return action has been set to descend, then we
1945 * know that we've already seen a peripheral matching
1946 * expression, therefore we need to further descend the tree.
1947 * This won't change by continuing around the loop, so we
1948 * go ahead and return. If we haven't seen a peripheral
1949 * matching expression, we keep going around the loop until
1950 * we exhaust the matching expressions. We'll set the stop
1951 * flag once we fall out of the loop.
1952 */
1953 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1954 return(retval);
1955 }
1956
1957 /*
1958 * If the return action hasn't been set to descend yet, that means
1959 * we haven't seen any peripheral matching patterns. So tell the
1960 * caller to stop descending the tree -- the user doesn't want to
1961 * match against lower level tree elements.
1962 */
1963 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1964 retval |= DM_RET_STOP;
1965
1966 return(retval);
1967}
1968
1969/*
1970 * Match a single peripheral against any number of match patterns.
1971 */
1972static dev_match_ret
1973xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1974 struct cam_periph *periph)
1975{
1976 dev_match_ret retval;
1977 int i;
1978
1979 /*
1980 * If we aren't given something to match against, that's an error.
1981 */
1982 if (periph == NULL)
1983 return(DM_RET_ERROR);
1984
1985 /*
1986 * If there are no match entries, then this peripheral matches no
1987 * matter what.
1988 */
1989 if ((patterns == NULL) || (num_patterns == 0))
1990 return(DM_RET_STOP | DM_RET_COPY);
1991
1992 /*
1993 * There aren't any nodes below a peripheral node, so there's no
1994 * reason to descend the tree any further.
1995 */
1996 retval = DM_RET_STOP;
1997
1998 for (i = 0; i < num_patterns; i++) {
1999 struct periph_match_pattern *cur_pattern;
2000
2001 /*
2002 * If the pattern in question isn't for a peripheral, we
2003 * aren't interested.
2004 */
2005 if (patterns[i].type != DEV_MATCH_PERIPH)
2006 continue;
2007
2008 cur_pattern = &patterns[i].pattern.periph_pattern;
2009
2010 /*
2011 * If they want to match on anything, then we will do so.
2012 */
2013 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2014 /* set the copy flag */
2015 retval |= DM_RET_COPY;
2016
2017 /*
2018 * We've already set the return action to stop,
2019 * since there are no nodes below peripherals in
2020 * the tree.
2021 */
2022 return(retval);
2023 }
2024
2025 /*
2026 * Not sure why someone would do this...
2027 */
2028 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2029 continue;
2030
2031 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2032 && (cur_pattern->path_id != periph->path->bus->path_id))
2033 continue;
2034
2035 /*
2036 * For the target and lun id's, we have to make sure the
2037 * target and lun pointers aren't NULL. The xpt peripheral
2038 * has a wildcard target and device.
2039 */
2040 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2041 && ((periph->path->target == NULL)
2042 ||(cur_pattern->target_id != periph->path->target->target_id)))
2043 continue;
2044
2045 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2046 && ((periph->path->device == NULL)
2047 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2048 continue;
2049
2050 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2051 && (cur_pattern->unit_number != periph->unit_number))
2052 continue;
2053
2054 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2055 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2056 DEV_IDLEN) != 0))
2057 continue;
2058
2059 /*
2060 * If we get to this point, the user definitely wants
2061 * information on this peripheral. So tell the caller to
2062 * copy the data out.
2063 */
2064 retval |= DM_RET_COPY;
2065
2066 /*
2067 * The return action has already been set to stop, since
2068 * peripherals don't have any nodes below them in the EDT.
2069 */
2070 return(retval);
2071 }
2072
2073 /*
2074 * If we get to this point, the peripheral that was passed in
2075 * doesn't match any of the patterns.
2076 */
2077 return(retval);
2078}
2079
2080static int
2081xptedtbusfunc(struct cam_eb *bus, void *arg)
2082{
2083 struct ccb_dev_match *cdm;
2084 dev_match_ret retval;
2085
2086 cdm = (struct ccb_dev_match *)arg;
2087
2088 /*
2089 * If our position is for something deeper in the tree, that means
2090 * that we've already seen this node. So, we keep going down.
2091 */
2092 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2093 && (cdm->pos.cookie.bus == bus)
2094 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2095 && (cdm->pos.cookie.target != NULL))
2096 retval = DM_RET_DESCEND;
2097 else
2098 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2099
2100 /*
2101 * If we got an error, bail out of the search.
2102 */
2103 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2104 cdm->status = CAM_DEV_MATCH_ERROR;
2105 return(0);
2106 }
2107
2108 /*
2109 * If the copy flag is set, copy this bus out.
2110 */
2111 if (retval & DM_RET_COPY) {
2112 int spaceleft, j;
2113
2114 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2115 sizeof(struct dev_match_result));
2116
2117 /*
2118 * If we don't have enough space to put in another
2119 * match result, save our position and tell the
2120 * user there are more devices to check.
2121 */
2122 if (spaceleft < sizeof(struct dev_match_result)) {
2123 bzero(&cdm->pos, sizeof(cdm->pos));
2124 cdm->pos.position_type =
2125 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2126
2127 cdm->pos.cookie.bus = bus;
2128 cdm->pos.generations[CAM_BUS_GENERATION]=
2129 bus_generation;
2130 cdm->status = CAM_DEV_MATCH_MORE;
2131 return(0);
2132 }
2133 j = cdm->num_matches;
2134 cdm->num_matches++;
2135 cdm->matches[j].type = DEV_MATCH_BUS;
2136 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2137 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2138 cdm->matches[j].result.bus_result.unit_number =
2139 bus->sim->unit_number;
2140 strncpy(cdm->matches[j].result.bus_result.dev_name,
2141 bus->sim->sim_name, DEV_IDLEN);
2142 }
2143
2144 /*
2145 * If the user is only interested in busses, there's no
2146 * reason to descend to the next level in the tree.
2147 */
2148 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2149 return(1);
2150
2151 /*
2152 * If there is a target generation recorded, check it to
2153 * make sure the target list hasn't changed.
2154 */
2155 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2156 && (bus == cdm->pos.cookie.bus)
2157 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2158 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2159 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2160 bus->generation)) {
2161 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2162 return(0);
2163 }
2164
2165 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2166 && (cdm->pos.cookie.bus == bus)
2167 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2168 && (cdm->pos.cookie.target != NULL))
2169 return(xpttargettraverse(bus,
2170 (struct cam_et *)cdm->pos.cookie.target,
2171 xptedttargetfunc, arg));
2172 else
2173 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2174}
2175
2176static int
2177xptedttargetfunc(struct cam_et *target, void *arg)
2178{
2179 struct ccb_dev_match *cdm;
2180
2181 cdm = (struct ccb_dev_match *)arg;
2182
2183 /*
2184 * If there is a device list generation recorded, check it to
2185 * make sure the device list hasn't changed.
2186 */
2187 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2188 && (cdm->pos.cookie.bus == target->bus)
2189 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2190 && (cdm->pos.cookie.target == target)
2191 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2192 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2193 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2194 target->generation)) {
2195 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2196 return(0);
2197 }
2198
2199 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2200 && (cdm->pos.cookie.bus == target->bus)
2201 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2202 && (cdm->pos.cookie.target == target)
2203 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2204 && (cdm->pos.cookie.device != NULL))
2205 return(xptdevicetraverse(target,
2206 (struct cam_ed *)cdm->pos.cookie.device,
2207 xptedtdevicefunc, arg));
2208 else
2209 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2210}
2211
2212static int
2213xptedtdevicefunc(struct cam_ed *device, void *arg)
2214{
2215
2216 struct ccb_dev_match *cdm;
2217 dev_match_ret retval;
2218
2219 cdm = (struct ccb_dev_match *)arg;
2220
2221 /*
2222 * If our position is for something deeper in the tree, that means
2223 * that we've already seen this node. So, we keep going down.
2224 */
2225 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2226 && (cdm->pos.cookie.device == device)
2227 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2228 && (cdm->pos.cookie.periph != NULL))
2229 retval = DM_RET_DESCEND;
2230 else
2231 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2232 device);
2233
2234 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2235 cdm->status = CAM_DEV_MATCH_ERROR;
2236 return(0);
2237 }
2238
2239 /*
2240 * If the copy flag is set, copy this device out.
2241 */
2242 if (retval & DM_RET_COPY) {
2243 int spaceleft, j;
2244
2245 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2246 sizeof(struct dev_match_result));
2247
2248 /*
2249 * If we don't have enough space to put in another
2250 * match result, save our position and tell the
2251 * user there are more devices to check.
2252 */
2253 if (spaceleft < sizeof(struct dev_match_result)) {
2254 bzero(&cdm->pos, sizeof(cdm->pos));
2255 cdm->pos.position_type =
2256 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2257 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2258
2259 cdm->pos.cookie.bus = device->target->bus;
2260 cdm->pos.generations[CAM_BUS_GENERATION]=
2261 bus_generation;
2262 cdm->pos.cookie.target = device->target;
2263 cdm->pos.generations[CAM_TARGET_GENERATION] =
2264 device->target->bus->generation;
2265 cdm->pos.cookie.device = device;
2266 cdm->pos.generations[CAM_DEV_GENERATION] =
2267 device->target->generation;
2268 cdm->status = CAM_DEV_MATCH_MORE;
2269 return(0);
2270 }
2271 j = cdm->num_matches;
2272 cdm->num_matches++;
2273 cdm->matches[j].type = DEV_MATCH_DEVICE;
2274 cdm->matches[j].result.device_result.path_id =
2275 device->target->bus->path_id;
2276 cdm->matches[j].result.device_result.target_id =
2277 device->target->target_id;
2278 cdm->matches[j].result.device_result.target_lun =
2279 device->lun_id;
2280 bcopy(&device->inq_data,
2281 &cdm->matches[j].result.device_result.inq_data,
2282 sizeof(struct scsi_inquiry_data));
2283
2284 /* Let the user know whether this device is unconfigured */
2285 if (device->flags & CAM_DEV_UNCONFIGURED)
2286 cdm->matches[j].result.device_result.flags =
2287 DEV_RESULT_UNCONFIGURED;
2288 else
2289 cdm->matches[j].result.device_result.flags =
2290 DEV_RESULT_NOFLAG;
2291 }
2292
2293 /*
2294 * If the user isn't interested in peripherals, don't descend
2295 * the tree any further.
2296 */
2297 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2298 return(1);
2299
2300 /*
2301 * If there is a peripheral list generation recorded, make sure
2302 * it hasn't changed.
2303 */
2304 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2305 && (device->target->bus == cdm->pos.cookie.bus)
2306 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2307 && (device->target == cdm->pos.cookie.target)
2308 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2309 && (device == cdm->pos.cookie.device)
2310 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2311 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2312 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2313 device->generation)){
2314 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2315 return(0);
2316 }
2317
2318 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2319 && (cdm->pos.cookie.bus == device->target->bus)
2320 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2321 && (cdm->pos.cookie.target == device->target)
2322 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2323 && (cdm->pos.cookie.device == device)
2324 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2325 && (cdm->pos.cookie.periph != NULL))
2326 return(xptperiphtraverse(device,
2327 (struct cam_periph *)cdm->pos.cookie.periph,
2328 xptedtperiphfunc, arg));
2329 else
2330 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2331}
2332
2333static int
2334xptedtperiphfunc(struct cam_periph *periph, void *arg)
2335{
2336 struct ccb_dev_match *cdm;
2337 dev_match_ret retval;
2338
2339 cdm = (struct ccb_dev_match *)arg;
2340
2341 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2342
2343 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2344 cdm->status = CAM_DEV_MATCH_ERROR;
2345 return(0);
2346 }
2347
2348 /*
2349 * If the copy flag is set, copy this peripheral out.
2350 */
2351 if (retval & DM_RET_COPY) {
2352 int spaceleft, j;
2353
2354 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2355 sizeof(struct dev_match_result));
2356
2357 /*
2358 * If we don't have enough space to put in another
2359 * match result, save our position and tell the
2360 * user there are more devices to check.
2361 */
2362 if (spaceleft < sizeof(struct dev_match_result)) {
2363 bzero(&cdm->pos, sizeof(cdm->pos));
2364 cdm->pos.position_type =
2365 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2366 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2367 CAM_DEV_POS_PERIPH;
2368
2369 cdm->pos.cookie.bus = periph->path->bus;
2370 cdm->pos.generations[CAM_BUS_GENERATION]=
2371 bus_generation;
2372 cdm->pos.cookie.target = periph->path->target;
2373 cdm->pos.generations[CAM_TARGET_GENERATION] =
2374 periph->path->bus->generation;
2375 cdm->pos.cookie.device = periph->path->device;
2376 cdm->pos.generations[CAM_DEV_GENERATION] =
2377 periph->path->target->generation;
2378 cdm->pos.cookie.periph = periph;
2379 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2380 periph->path->device->generation;
2381 cdm->status = CAM_DEV_MATCH_MORE;
2382 return(0);
2383 }
2384
2385 j = cdm->num_matches;
2386 cdm->num_matches++;
2387 cdm->matches[j].type = DEV_MATCH_PERIPH;
2388 cdm->matches[j].result.periph_result.path_id =
2389 periph->path->bus->path_id;
2390 cdm->matches[j].result.periph_result.target_id =
2391 periph->path->target->target_id;
2392 cdm->matches[j].result.periph_result.target_lun =
2393 periph->path->device->lun_id;
2394 cdm->matches[j].result.periph_result.unit_number =
2395 periph->unit_number;
2396 strncpy(cdm->matches[j].result.periph_result.periph_name,
2397 periph->periph_name, DEV_IDLEN);
2398 }
2399
2400 return(1);
2401}
2402
2403static int
2404xptedtmatch(struct ccb_dev_match *cdm)
2405{
2406 int ret;
2407
2408 cdm->num_matches = 0;
2409
2410 /*
2411 * Check the bus list generation. If it has changed, the user
2412 * needs to reset everything and start over.
2413 */
2414 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2415 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2416 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2417 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2418 return(0);
2419 }
2420
2421 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2422 && (cdm->pos.cookie.bus != NULL))
2423 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2424 xptedtbusfunc, cdm);
2425 else
2426 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2427
2428 /*
2429 * If we get back 0, that means that we had to stop before fully
2430 * traversing the EDT. It also means that one of the subroutines
2431 * has set the status field to the proper value. If we get back 1,
2432 * we've fully traversed the EDT and copied out any matching entries.
2433 */
2434 if (ret == 1)
2435 cdm->status = CAM_DEV_MATCH_LAST;
2436
2437 return(ret);
2438}
2439
2440static int
2441xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2442{
2443 struct ccb_dev_match *cdm;
2444
2445 cdm = (struct ccb_dev_match *)arg;
2446
2447 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2448 && (cdm->pos.cookie.pdrv == pdrv)
2449 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2450 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2451 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2452 (*pdrv)->generation)) {
2453 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2454 return(0);
2455 }
2456
2457 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2458 && (cdm->pos.cookie.pdrv == pdrv)
2459 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2460 && (cdm->pos.cookie.periph != NULL))
2461 return(xptpdperiphtraverse(pdrv,
2462 (struct cam_periph *)cdm->pos.cookie.periph,
2463 xptplistperiphfunc, arg));
2464 else
2465 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2466}
2467
2468static int
2469xptplistperiphfunc(struct cam_periph *periph, void *arg)
2470{
2471 struct ccb_dev_match *cdm;
2472 dev_match_ret retval;
2473
2474 cdm = (struct ccb_dev_match *)arg;
2475
2476 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2477
2478 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2479 cdm->status = CAM_DEV_MATCH_ERROR;
2480 return(0);
2481 }
2482
2483 /*
2484 * If the copy flag is set, copy this peripheral out.
2485 */
2486 if (retval & DM_RET_COPY) {
2487 int spaceleft, j;
2488
2489 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2490 sizeof(struct dev_match_result));
2491
2492 /*
2493 * If we don't have enough space to put in another
2494 * match result, save our position and tell the
2495 * user there are more devices to check.
2496 */
2497 if (spaceleft < sizeof(struct dev_match_result)) {
2498 struct periph_driver **pdrv;
2499
2500 pdrv = NULL;
2501 bzero(&cdm->pos, sizeof(cdm->pos));
2502 cdm->pos.position_type =
2503 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2504 CAM_DEV_POS_PERIPH;
2505
2506 /*
2507 * This may look a bit non-sensical, but it is
2508 * actually quite logical. There are very few
2509 * peripheral drivers, and bloating every peripheral
2510 * structure with a pointer back to its parent
2511 * peripheral driver linker set entry would cost
2512 * more in the long run than doing this quick lookup.
2513 */
2514 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2515 if (strcmp((*pdrv)->driver_name,
2516 periph->periph_name) == 0)
2517 break;
2518 }
2519
2520 if (pdrv == NULL) {
2521 cdm->status = CAM_DEV_MATCH_ERROR;
2522 return(0);
2523 }
2524
2525 cdm->pos.cookie.pdrv = pdrv;
2526 /*
2527 * The periph generation slot does double duty, as
2528 * does the periph pointer slot. They are used for
2529 * both edt and pdrv lookups and positioning.
2530 */
2531 cdm->pos.cookie.periph = periph;
2532 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2533 (*pdrv)->generation;
2534 cdm->status = CAM_DEV_MATCH_MORE;
2535 return(0);
2536 }
2537
2538 j = cdm->num_matches;
2539 cdm->num_matches++;
2540 cdm->matches[j].type = DEV_MATCH_PERIPH;
2541 cdm->matches[j].result.periph_result.path_id =
2542 periph->path->bus->path_id;
2543
2544 /*
2545 * The transport layer peripheral doesn't have a target or
2546 * lun.
2547 */
2548 if (periph->path->target)
2549 cdm->matches[j].result.periph_result.target_id =
2550 periph->path->target->target_id;
2551 else
2552 cdm->matches[j].result.periph_result.target_id = -1;
2553
2554 if (periph->path->device)
2555 cdm->matches[j].result.periph_result.target_lun =
2556 periph->path->device->lun_id;
2557 else
2558 cdm->matches[j].result.periph_result.target_lun = -1;
2559
2560 cdm->matches[j].result.periph_result.unit_number =
2561 periph->unit_number;
2562 strncpy(cdm->matches[j].result.periph_result.periph_name,
2563 periph->periph_name, DEV_IDLEN);
2564 }
2565
2566 return(1);
2567}
2568
2569static int
2570xptperiphlistmatch(struct ccb_dev_match *cdm)
2571{
2572 int ret;
2573
2574 cdm->num_matches = 0;
2575
2576 /*
2577 * At this point in the edt traversal function, we check the bus
2578 * list generation to make sure that no busses have been added or
2579 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2580 * For the peripheral driver list traversal function, however, we
2581 * don't have to worry about new peripheral driver types coming or
2582 * going; they're in a linker set, and therefore can't change
2583 * without a recompile.
2584 */
2585
2586 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2587 && (cdm->pos.cookie.pdrv != NULL))
2588 ret = xptpdrvtraverse(
2589 (struct periph_driver **)cdm->pos.cookie.pdrv,
2590 xptplistpdrvfunc, cdm);
2591 else
2592 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2593
2594 /*
2595 * If we get back 0, that means that we had to stop before fully
2596 * traversing the peripheral driver tree. It also means that one of
2597 * the subroutines has set the status field to the proper value. If
2598 * we get back 1, we've fully traversed the EDT and copied out any
2599 * matching entries.
2600 */
2601 if (ret == 1)
2602 cdm->status = CAM_DEV_MATCH_LAST;
2603
2604 return(ret);
2605}
2606
2607static int
2608xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2609{
2610 struct cam_eb *bus, *next_bus;
2611 int retval;
2612
2613 retval = 1;
2614
2615 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2616 bus != NULL;
2617 bus = next_bus) {
2618 next_bus = TAILQ_NEXT(bus, links);
2619
2620 retval = tr_func(bus, arg);
2621 if (retval == 0)
2622 return(retval);
2623 }
2624
2625 return(retval);
2626}
2627
2628static int
2629xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2630 xpt_targetfunc_t *tr_func, void *arg)
2631{
2632 struct cam_et *target, *next_target;
2633 int retval;
2634
2635 retval = 1;
2636 for (target = (start_target ? start_target :
2637 TAILQ_FIRST(&bus->et_entries));
2638 target != NULL; target = next_target) {
2639
2640 next_target = TAILQ_NEXT(target, links);
2641
2642 retval = tr_func(target, arg);
2643
2644 if (retval == 0)
2645 return(retval);
2646 }
2647
2648 return(retval);
2649}
2650
2651static int
2652xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2653 xpt_devicefunc_t *tr_func, void *arg)
2654{
2655 struct cam_ed *device, *next_device;
2656 int retval;
2657
2658 retval = 1;
2659 for (device = (start_device ? start_device :
2660 TAILQ_FIRST(&target->ed_entries));
2661 device != NULL;
2662 device = next_device) {
2663
2664 next_device = TAILQ_NEXT(device, links);
2665
2666 retval = tr_func(device, arg);
2667
2668 if (retval == 0)
2669 return(retval);
2670 }
2671
2672 return(retval);
2673}
2674
2675static int
2676xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2677 xpt_periphfunc_t *tr_func, void *arg)
2678{
2679 struct cam_periph *periph, *next_periph;
2680 int retval;
2681
2682 retval = 1;
2683
2684 for (periph = (start_periph ? start_periph :
2685 SLIST_FIRST(&device->periphs));
2686 periph != NULL;
2687 periph = next_periph) {
2688
2689 next_periph = SLIST_NEXT(periph, periph_links);
2690
2691 retval = tr_func(periph, arg);
2692 if (retval == 0)
2693 return(retval);
2694 }
2695
2696 return(retval);
2697}
2698
2699static int
2700xptpdrvtraverse(struct periph_driver **start_pdrv,
2701 xpt_pdrvfunc_t *tr_func, void *arg)
2702{
2703 struct periph_driver **pdrv;
2704 int retval;
2705
2706 retval = 1;
2707
2708 /*
2709 * We don't traverse the peripheral driver list like we do the
2710 * other lists, because it is a linker set, and therefore cannot be
2711 * changed during runtime. If the peripheral driver list is ever
2712 * re-done to be something other than a linker set (i.e. it can
2713 * change while the system is running), the list traversal should
2714 * be modified to work like the other traversal functions.
2715 */
2716 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2717 *pdrv != NULL; pdrv++) {
2718 retval = tr_func(pdrv, arg);
2719
2720 if (retval == 0)
2721 return(retval);
2722 }
2723
2724 return(retval);
2725}
2726
2727static int
2728xptpdperiphtraverse(struct periph_driver **pdrv,
2729 struct cam_periph *start_periph,
2730 xpt_periphfunc_t *tr_func, void *arg)
2731{
2732 struct cam_periph *periph, *next_periph;
2733 int retval;
2734
2735 retval = 1;
2736
2737 for (periph = (start_periph ? start_periph :
2738 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2739 periph = next_periph) {
2740
2741 next_periph = TAILQ_NEXT(periph, unit_links);
2742
2743 retval = tr_func(periph, arg);
2744 if (retval == 0)
2745 return(retval);
2746 }
2747 return(retval);
2748}
2749
2750static int
2751xptdefbusfunc(struct cam_eb *bus, void *arg)
2752{
2753 struct xpt_traverse_config *tr_config;
2754
2755 tr_config = (struct xpt_traverse_config *)arg;
2756
2757 if (tr_config->depth == XPT_DEPTH_BUS) {
2758 xpt_busfunc_t *tr_func;
2759
2760 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2761
2762 return(tr_func(bus, tr_config->tr_arg));
2763 } else
2764 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2765}
2766
2767static int
2768xptdeftargetfunc(struct cam_et *target, void *arg)
2769{
2770 struct xpt_traverse_config *tr_config;
2771
2772 tr_config = (struct xpt_traverse_config *)arg;
2773
2774 if (tr_config->depth == XPT_DEPTH_TARGET) {
2775 xpt_targetfunc_t *tr_func;
2776
2777 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2778
2779 return(tr_func(target, tr_config->tr_arg));
2780 } else
2781 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2782}
2783
2784static int
2785xptdefdevicefunc(struct cam_ed *device, void *arg)
2786{
2787 struct xpt_traverse_config *tr_config;
2788
2789 tr_config = (struct xpt_traverse_config *)arg;
2790
2791 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2792 xpt_devicefunc_t *tr_func;
2793
2794 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2795
2796 return(tr_func(device, tr_config->tr_arg));
2797 } else
2798 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2799}
2800
2801static int
2802xptdefperiphfunc(struct cam_periph *periph, void *arg)
2803{
2804 struct xpt_traverse_config *tr_config;
2805 xpt_periphfunc_t *tr_func;
2806
2807 tr_config = (struct xpt_traverse_config *)arg;
2808
2809 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2810
2811 /*
2812 * Unlike the other default functions, we don't check for depth
2813 * here. The peripheral driver level is the last level in the EDT,
2814 * so if we're here, we should execute the function in question.
2815 */
2816 return(tr_func(periph, tr_config->tr_arg));
2817}
2818
2819/*
2820 * Execute the given function for every bus in the EDT.
2821 */
2822static int
2823xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2824{
2825 struct xpt_traverse_config tr_config;
2826
2827 tr_config.depth = XPT_DEPTH_BUS;
2828 tr_config.tr_func = tr_func;
2829 tr_config.tr_arg = arg;
2830
2831 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2832}
2833
2834#ifdef notusedyet
2835/*
2836 * Execute the given function for every target in the EDT.
2837 */
2838static int
2839xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2840{
2841 struct xpt_traverse_config tr_config;
2842
2843 tr_config.depth = XPT_DEPTH_TARGET;
2844 tr_config.tr_func = tr_func;
2845 tr_config.tr_arg = arg;
2846
2847 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2848}
2849#endif /* notusedyet */
2850
2851/*
2852 * Execute the given function for every device in the EDT.
2853 */
2854static int
2855xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2856{
2857 struct xpt_traverse_config tr_config;
2858
2859 tr_config.depth = XPT_DEPTH_DEVICE;
2860 tr_config.tr_func = tr_func;
2861 tr_config.tr_arg = arg;
2862
2863 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2864}
2865
2866#ifdef notusedyet
2867/*
2868 * Execute the given function for every peripheral in the EDT.
2869 */
2870static int
2871xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2872{
2873 struct xpt_traverse_config tr_config;
2874
2875 tr_config.depth = XPT_DEPTH_PERIPH;
2876 tr_config.tr_func = tr_func;
2877 tr_config.tr_arg = arg;
2878
2879 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2880}
2881#endif /* notusedyet */
2882
2883static int
2884xptsetasyncfunc(struct cam_ed *device, void *arg)
2885{
2886 struct cam_path path;
2887 struct ccb_getdev cgd;
2888 struct async_node *cur_entry;
2889
2890 cur_entry = (struct async_node *)arg;
2891
2892 /*
2893 * Don't report unconfigured devices (Wildcard devs,
2894 * devices only for target mode, device instances
2895 * that have been invalidated but are waiting for
2896 * their last reference count to be released).
2897 */
2898 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2899 return (1);
2900
2901 xpt_compile_path(&path,
2902 NULL,
2903 device->target->bus->path_id,
2904 device->target->target_id,
2905 device->lun_id);
2906 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2907 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2908 xpt_action((union ccb *)&cgd);
2909 cur_entry->callback(cur_entry->callback_arg,
2910 AC_FOUND_DEVICE,
2911 &path, &cgd);
2912 xpt_release_path(&path);
2913
2914 return(1);
2915}
2916
2917static int
2918xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2919{
2920 struct cam_path path;
2921 struct ccb_pathinq cpi;
2922 struct async_node *cur_entry;
2923
2924 cur_entry = (struct async_node *)arg;
2925
2926 xpt_compile_path(&path, /*periph*/NULL,
2927 bus->sim->path_id,
2928 CAM_TARGET_WILDCARD,
2929 CAM_LUN_WILDCARD);
2930 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2931 cpi.ccb_h.func_code = XPT_PATH_INQ;
2932 xpt_action((union ccb *)&cpi);
2933 cur_entry->callback(cur_entry->callback_arg,
2934 AC_PATH_REGISTERED,
2935 &path, &cpi);
2936 xpt_release_path(&path);
2937
2938 return(1);
2939}
2940
2941void
2942xpt_action(union ccb *start_ccb)
2943{
2944 int iopl;
2945
2946 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2947
2948 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2949
2950 iopl = splsoftcam();
2951 switch (start_ccb->ccb_h.func_code) {
2952 case XPT_SCSI_IO:
2953 {
2954#ifdef CAM_NEW_TRAN_CODE
2955 struct cam_ed *device;
2956#endif /* CAM_NEW_TRAN_CODE */
2957#ifdef CAMDEBUG
2958 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2959 struct cam_path *path;
2960
2961 path = start_ccb->ccb_h.path;
2962#endif
2963
2964 /*
2965 * For the sake of compatibility with SCSI-1
2966 * devices that may not understand the identify
2967 * message, we include lun information in the
2968 * second byte of all commands. SCSI-1 specifies
2969 * that luns are a 3 bit value and reserves only 3
2970 * bits for lun information in the CDB. Later
2971 * revisions of the SCSI spec allow for more than 8
2972 * luns, but have deprecated lun information in the
2973 * CDB. So, if the lun won't fit, we must omit.
2974 *
2975 * Also be aware that during initial probing for devices,
2976 * the inquiry information is unknown but initialized to 0.
2977 * This means that this code will be exercised while probing
2978 * devices with an ANSI revision greater than 2.
2979 */
2980#ifdef CAM_NEW_TRAN_CODE
2981 device = start_ccb->ccb_h.path->device;
2982 if (device->protocol_version <= SCSI_REV_2
2983#else /* CAM_NEW_TRAN_CODE */
2984 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2985#endif /* CAM_NEW_TRAN_CODE */
2986 && start_ccb->ccb_h.target_lun < 8
2987 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2988
2989 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2990 start_ccb->ccb_h.target_lun << 5;
2991 }
2992 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2993 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2994 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2995 &path->device->inq_data),
2996 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2997 cdb_str, sizeof(cdb_str))));
2998 }
2999 /* FALLTHROUGH */
3000 case XPT_TARGET_IO:
3001 case XPT_CONT_TARGET_IO:
3002 start_ccb->csio.sense_resid = 0;
3003 start_ccb->csio.resid = 0;
3004 /* FALLTHROUGH */
3005 case XPT_RESET_DEV:
3006 case XPT_ENG_EXEC:
3007 {
3008 struct cam_path *path;
3009 int s;
3010 int runq;
3011
3012 path = start_ccb->ccb_h.path;
3013 s = splsoftcam();
3014
3015 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3016 if (path->device->qfrozen_cnt == 0)
3017 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3018 else
3019 runq = 0;
3020 splx(s);
3021 if (runq != 0)
3022 xpt_run_dev_sendq(path->bus);
3023 break;
3024 }
3025 case XPT_SET_TRAN_SETTINGS:
3026 {
3027 xpt_set_transfer_settings(&start_ccb->cts,
3028 start_ccb->ccb_h.path->device,
3029 /*async_update*/FALSE);
3030 break;
3031 }
3032 case XPT_CALC_GEOMETRY:
3033 {
3034 struct cam_sim *sim;
3035
3036 /* Filter out garbage */
3037 if (start_ccb->ccg.block_size == 0
3038 || start_ccb->ccg.volume_size == 0) {
3039 start_ccb->ccg.cylinders = 0;
3040 start_ccb->ccg.heads = 0;
3041 start_ccb->ccg.secs_per_track = 0;
3042 start_ccb->ccb_h.status = CAM_REQ_CMP;
3043 break;
3044 }
3045#ifdef PC98
3046 /*
3047 * In a PC-98 system, geometry translation depens on
3048 * the "real" device geometry obtained from mode page 4.
3049 * SCSI geometry translation is performed in the
3050 * initialization routine of the SCSI BIOS and the result
3051 * stored in host memory. If the translation is available
3052 * in host memory, use it. If not, rely on the default
3053 * translation the device driver performs.
3054 */
3055 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3056 start_ccb->ccb_h.status = CAM_REQ_CMP;
3057 break;
3058 }
3059#endif
3060 sim = start_ccb->ccb_h.path->bus->sim;
3061 (*(sim->sim_action))(sim, start_ccb);
3062 break;
3063 }
3064 case XPT_ABORT:
3065 {
3066 union ccb* abort_ccb;
3067 int s;
3068
3069 abort_ccb = start_ccb->cab.abort_ccb;
3070 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3071
3072 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3073 struct cam_ccbq *ccbq;
3074
3075 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3076 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3077 abort_ccb->ccb_h.status =
3078 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3079 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3080 s = splcam();
3081 xpt_done(abort_ccb);
3082 splx(s);
3083 start_ccb->ccb_h.status = CAM_REQ_CMP;
3084 break;
3085 }
3086 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3087 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3088 /*
3089 * We've caught this ccb en route to
3090 * the SIM. Flag it for abort and the
3091 * SIM will do so just before starting
3092 * real work on the CCB.
3093 */
3094 abort_ccb->ccb_h.status =
3095 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3096 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3097 start_ccb->ccb_h.status = CAM_REQ_CMP;
3098 break;
3099 }
3100 }
3101 if (XPT_FC_IS_QUEUED(abort_ccb)
3102 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3103 /*
3104 * It's already completed but waiting
3105 * for our SWI to get to it.
3106 */
3107 start_ccb->ccb_h.status = CAM_UA_ABORT;
3108 break;
3109 }
3110 /*
3111 * If we weren't able to take care of the abort request
3112 * in the XPT, pass the request down to the SIM for processing.
3113 */
3114 }
3115 /* FALLTHROUGH */
3116 case XPT_ACCEPT_TARGET_IO:
3117 case XPT_EN_LUN:
3118 case XPT_IMMED_NOTIFY:
3119 case XPT_NOTIFY_ACK:
3120 case XPT_GET_TRAN_SETTINGS:
3121 case XPT_RESET_BUS:
3122 {
3123 struct cam_sim *sim;
3124
3125 sim = start_ccb->ccb_h.path->bus->sim;
3126 (*(sim->sim_action))(sim, start_ccb);
3127 break;
3128 }
3129 case XPT_PATH_INQ:
3130 {
3131 struct cam_sim *sim;
3132
3133 sim = start_ccb->ccb_h.path->bus->sim;
3134 (*(sim->sim_action))(sim, start_ccb);
3135 break;
3136 }
3137 case XPT_PATH_STATS:
3138 start_ccb->cpis.last_reset =
3139 start_ccb->ccb_h.path->bus->last_reset;
3140 start_ccb->ccb_h.status = CAM_REQ_CMP;
3141 break;
3142 case XPT_GDEV_TYPE:
3143 {
3144 struct cam_ed *dev;
3145 int s;
3146
3147 dev = start_ccb->ccb_h.path->device;
3148 s = splcam();
3149 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3150 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3151 } else {
3152 struct ccb_getdev *cgd;
3153 struct cam_eb *bus;
3154 struct cam_et *tar;
3155
3156 cgd = &start_ccb->cgd;
3157 bus = cgd->ccb_h.path->bus;
3158 tar = cgd->ccb_h.path->target;
3159 cgd->inq_data = dev->inq_data;
3160 cgd->ccb_h.status = CAM_REQ_CMP;
3161 cgd->serial_num_len = dev->serial_num_len;
3162 if ((dev->serial_num_len > 0)
3163 && (dev->serial_num != NULL))
3164 bcopy(dev->serial_num, cgd->serial_num,
3165 dev->serial_num_len);
3166 }
3167 splx(s);
3168 break;
3169 }
3170 case XPT_GDEV_STATS:
3171 {
3172 struct cam_ed *dev;
3173 int s;
3174
3175 dev = start_ccb->ccb_h.path->device;
3176 s = splcam();
3177 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3178 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3179 } else {
3180 struct ccb_getdevstats *cgds;
3181 struct cam_eb *bus;
3182 struct cam_et *tar;
3183
3184 cgds = &start_ccb->cgds;
3185 bus = cgds->ccb_h.path->bus;
3186 tar = cgds->ccb_h.path->target;
3187 cgds->dev_openings = dev->ccbq.dev_openings;
3188 cgds->dev_active = dev->ccbq.dev_active;
3189 cgds->devq_openings = dev->ccbq.devq_openings;
3190 cgds->devq_queued = dev->ccbq.queue.entries;
3191 cgds->held = dev->ccbq.held;
3192 cgds->last_reset = tar->last_reset;
3193 cgds->maxtags = dev->quirk->maxtags;
3194 cgds->mintags = dev->quirk->mintags;
3195 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3196 cgds->last_reset = bus->last_reset;
3197 cgds->ccb_h.status = CAM_REQ_CMP;
3198 }
3199 splx(s);
3200 break;
3201 }
3202 case XPT_GDEVLIST:
3203 {
3204 struct cam_periph *nperiph;
3205 struct periph_list *periph_head;
3206 struct ccb_getdevlist *cgdl;
3207 u_int i;
3208 int s;
3209 struct cam_ed *device;
3210 int found;
3211
3212
3213 found = 0;
3214
3215 /*
3216 * Don't want anyone mucking with our data.
3217 */
3218 s = splcam();
3219 device = start_ccb->ccb_h.path->device;
3220 periph_head = &device->periphs;
3221 cgdl = &start_ccb->cgdl;
3222
3223 /*
3224 * Check and see if the list has changed since the user
3225 * last requested a list member. If so, tell them that the
3226 * list has changed, and therefore they need to start over
3227 * from the beginning.
3228 */
3229 if ((cgdl->index != 0) &&
3230 (cgdl->generation != device->generation)) {
3231 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3232 splx(s);
3233 break;
3234 }
3235
3236 /*
3237 * Traverse the list of peripherals and attempt to find
3238 * the requested peripheral.
3239 */
3240 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3241 (nperiph != NULL) && (i <= cgdl->index);
3242 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3243 if (i == cgdl->index) {
3244 strncpy(cgdl->periph_name,
3245 nperiph->periph_name,
3246 DEV_IDLEN);
3247 cgdl->unit_number = nperiph->unit_number;
3248 found = 1;
3249 }
3250 }
3251 if (found == 0) {
3252 cgdl->status = CAM_GDEVLIST_ERROR;
3253 splx(s);
3254 break;
3255 }
3256
3257 if (nperiph == NULL)
3258 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3259 else
3260 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3261
3262 cgdl->index++;
3263 cgdl->generation = device->generation;
3264
3265 splx(s);
3266 cgdl->ccb_h.status = CAM_REQ_CMP;
3267 break;
3268 }
3269 case XPT_DEV_MATCH:
3270 {
3271 int s;
3272 dev_pos_type position_type;
3273 struct ccb_dev_match *cdm;
3274
3275 cdm = &start_ccb->cdm;
3276
3277 /*
3278 * Prevent EDT changes while we traverse it.
3279 */
3280 s = splcam();
3281 /*
3282 * There are two ways of getting at information in the EDT.
3283 * The first way is via the primary EDT tree. It starts
3284 * with a list of busses, then a list of targets on a bus,
3285 * then devices/luns on a target, and then peripherals on a
3286 * device/lun. The "other" way is by the peripheral driver
3287 * lists. The peripheral driver lists are organized by
3288 * peripheral driver. (obviously) So it makes sense to
3289 * use the peripheral driver list if the user is looking
3290 * for something like "da1", or all "da" devices. If the
3291 * user is looking for something on a particular bus/target
3292 * or lun, it's generally better to go through the EDT tree.
3293 */
3294
3295 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3296 position_type = cdm->pos.position_type;
3297 else {
3298 u_int i;
3299
3300 position_type = CAM_DEV_POS_NONE;
3301
3302 for (i = 0; i < cdm->num_patterns; i++) {
3303 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3304 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3305 position_type = CAM_DEV_POS_EDT;
3306 break;
3307 }
3308 }
3309
3310 if (cdm->num_patterns == 0)
3311 position_type = CAM_DEV_POS_EDT;
3312 else if (position_type == CAM_DEV_POS_NONE)
3313 position_type = CAM_DEV_POS_PDRV;
3314 }
3315
3316 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3317 case CAM_DEV_POS_EDT:
3318 xptedtmatch(cdm);
3319 break;
3320 case CAM_DEV_POS_PDRV:
3321 xptperiphlistmatch(cdm);
3322 break;
3323 default:
3324 cdm->status = CAM_DEV_MATCH_ERROR;
3325 break;
3326 }
3327
3328 splx(s);
3329
3330 if (cdm->status == CAM_DEV_MATCH_ERROR)
3331 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3332 else
3333 start_ccb->ccb_h.status = CAM_REQ_CMP;
3334
3335 break;
3336 }
3337 case XPT_SASYNC_CB:
3338 {
3339 struct ccb_setasync *csa;
3340 struct async_node *cur_entry;
3341 struct async_list *async_head;
3342 u_int32_t added;
3343 int s;
3344
3345 csa = &start_ccb->csa;
3346 added = csa->event_enable;
3347 async_head = &csa->ccb_h.path->device->asyncs;
3348
3349 /*
3350 * If there is already an entry for us, simply
3351 * update it.
3352 */
3353 s = splcam();
3354 cur_entry = SLIST_FIRST(async_head);
3355 while (cur_entry != NULL) {
3356 if ((cur_entry->callback_arg == csa->callback_arg)
3357 && (cur_entry->callback == csa->callback))
3358 break;
3359 cur_entry = SLIST_NEXT(cur_entry, links);
3360 }
3361
3362 if (cur_entry != NULL) {
3363 /*
3364 * If the request has no flags set,
3365 * remove the entry.
3366 */
3367 added &= ~cur_entry->event_enable;
3368 if (csa->event_enable == 0) {
3369 SLIST_REMOVE(async_head, cur_entry,
3370 async_node, links);
3371 csa->ccb_h.path->device->refcount--;
3372 free(cur_entry, M_DEVBUF);
3373 } else {
3374 cur_entry->event_enable = csa->event_enable;
3375 }
3376 } else {
3377 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3378 M_NOWAIT);
3379 if (cur_entry == NULL) {
3380 splx(s);
3381 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3382 break;
3383 }
3384 cur_entry->event_enable = csa->event_enable;
3385 cur_entry->callback_arg = csa->callback_arg;
3386 cur_entry->callback = csa->callback;
3387 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3388 csa->ccb_h.path->device->refcount++;
3389 }
3390
3391 if ((added & AC_FOUND_DEVICE) != 0) {
3392 /*
3393 * Get this peripheral up to date with all
3394 * the currently existing devices.
3395 */
3396 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3397 }
3398 if ((added & AC_PATH_REGISTERED) != 0) {
3399 /*
3400 * Get this peripheral up to date with all
3401 * the currently existing busses.
3402 */
3403 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3404 }
3405 splx(s);
3406 start_ccb->ccb_h.status = CAM_REQ_CMP;
3407 break;
3408 }
3409 case XPT_REL_SIMQ:
3410 {
3411 struct ccb_relsim *crs;
3412 struct cam_ed *dev;
3413 int s;
3414
3415 crs = &start_ccb->crs;
3416 dev = crs->ccb_h.path->device;
3417 if (dev == NULL) {
3418
3419 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3420 break;
3421 }
3422
3423 s = splcam();
3424
3425 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3426
3427 if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3428
3429 /* Don't ever go below one opening */
3430 if (crs->openings > 0) {
3431 xpt_dev_ccbq_resize(crs->ccb_h.path,
3432 crs->openings);
3433
3434 if (bootverbose) {
3435 xpt_print_path(crs->ccb_h.path);
3436 printf("tagged openings "
3437 "now %d\n",
3438 crs->openings);
3439 }
3440 }
3441 }
3442 }
3443
3444 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3445
3446 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3447
3448 /*
3449 * Just extend the old timeout and decrement
3450 * the freeze count so that a single timeout
3451 * is sufficient for releasing the queue.
3452 */
3453 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3454 untimeout(xpt_release_devq_timeout,
3455 dev, dev->c_handle);
3456 } else {
3457
3458 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3459 }
3460
3461 dev->c_handle =
3462 timeout(xpt_release_devq_timeout,
3463 dev,
3464 (crs->release_timeout * hz) / 1000);
3465
3466 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3467
3468 }
3469
3470 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3471
3472 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3473 /*
3474 * Decrement the freeze count so that a single
3475 * completion is still sufficient to unfreeze
3476 * the queue.
3477 */
3478 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3479 } else {
3480
3481 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3482 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3483 }
3484 }
3485
3486 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3487
3488 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3489 || (dev->ccbq.dev_active == 0)) {
3490
3491 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3492 } else {
3493
3494 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3495 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3496 }
3497 }
3498 splx(s);
3499
3500 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3501
3502 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3503 /*run_queue*/TRUE);
3504 }
3505 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3506 start_ccb->ccb_h.status = CAM_REQ_CMP;
3507 break;
3508 }
3509 case XPT_SCAN_BUS:
3510 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3511 break;
3512 case XPT_SCAN_LUN:
3513 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3514 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3515 start_ccb);
3516 break;
3517 case XPT_DEBUG: {
3518#ifdef CAMDEBUG
3519 int s;
3520
3521 s = splcam();
3522#ifdef CAM_DEBUG_DELAY
3523 cam_debug_delay = CAM_DEBUG_DELAY;
3524#endif
3525 cam_dflags = start_ccb->cdbg.flags;
3526 if (cam_dpath != NULL) {
3527 xpt_free_path(cam_dpath);
3528 cam_dpath = NULL;
3529 }
3530
3531 if (cam_dflags != CAM_DEBUG_NONE) {
3532 if (xpt_create_path(&cam_dpath, xpt_periph,
3533 start_ccb->ccb_h.path_id,
3534 start_ccb->ccb_h.target_id,
3535 start_ccb->ccb_h.target_lun) !=
3536 CAM_REQ_CMP) {
3537 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3538 cam_dflags = CAM_DEBUG_NONE;
3539 } else {
3540 start_ccb->ccb_h.status = CAM_REQ_CMP;
3541 xpt_print_path(cam_dpath);
3542 printf("debugging flags now %x\n", cam_dflags);
3543 }
3544 } else {
3545 cam_dpath = NULL;
3546 start_ccb->ccb_h.status = CAM_REQ_CMP;
3547 }
3548 splx(s);
3549#else /* !CAMDEBUG */
3550 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3551#endif /* CAMDEBUG */
3552 break;
3553 }
3554 case XPT_NOOP:
3555 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3556 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3557 start_ccb->ccb_h.status = CAM_REQ_CMP;
3558 break;
3559 default:
3560 case XPT_SDEV_TYPE:
3561 case XPT_TERM_IO:
3562 case XPT_ENG_INQ:
3563 /* XXX Implement */
3564 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3565 break;
3566 }
3567 splx(iopl);
3568}
3569
3570void
3571xpt_polled_action(union ccb *start_ccb)
3572{
3573 int s;
3574 u_int32_t timeout;
3575 struct cam_sim *sim;
3576 struct cam_devq *devq;
3577 struct cam_ed *dev;
3578
3579 timeout = start_ccb->ccb_h.timeout;
3580 sim = start_ccb->ccb_h.path->bus->sim;
3581 devq = sim->devq;
3582 dev = start_ccb->ccb_h.path->device;
3583
3584 s = splcam();
3585
3586 /*
3587 * Steal an opening so that no other queued requests
3588 * can get it before us while we simulate interrupts.
3589 */
3590 dev->ccbq.devq_openings--;
3591 dev->ccbq.dev_openings--;
3592
3593 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3594 && (--timeout > 0)) {
3595 DELAY(1000);
3596 (*(sim->sim_poll))(sim);
3597 camisr(&cam_netq);
3598 camisr(&cam_bioq);
3599 }
3600
3601 dev->ccbq.devq_openings++;
3602 dev->ccbq.dev_openings++;
3603
3604 if (timeout != 0) {
3605 xpt_action(start_ccb);
3606 while(--timeout > 0) {
3607 (*(sim->sim_poll))(sim);
3608 camisr(&cam_netq);
3609 camisr(&cam_bioq);
3610 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3611 != CAM_REQ_INPROG)
3612 break;
3613 DELAY(1000);
3614 }
3615 if (timeout == 0) {
3616 /*
3617 * XXX Is it worth adding a sim_timeout entry
3618 * point so we can attempt recovery? If
3619 * this is only used for dumps, I don't think
3620 * it is.
3621 */
3622 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3623 }
3624 } else {
3625 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3626 }
3627 splx(s);
3628}
3629
3630/*
3631 * Schedule a peripheral driver to receive a ccb when it's
3632 * target device has space for more transactions.
3633 */
3634void
3635xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3636{
3637 struct cam_ed *device;
3638 int s;
3639 int runq;
3640
3641 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3642 device = perph->path->device;
3643 s = splsoftcam();
3644 if (periph_is_queued(perph)) {
3645 /* Simply reorder based on new priority */
3646 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3647 (" change priority to %d\n", new_priority));
3648 if (new_priority < perph->pinfo.priority) {
3649 camq_change_priority(&device->drvq,
3650 perph->pinfo.index,
3651 new_priority);
3652 }
3653 runq = 0;
3654 } else {
3655 /* New entry on the queue */
3656 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3657 (" added periph to queue\n"));
3658 perph->pinfo.priority = new_priority;
3659 perph->pinfo.generation = ++device->drvq.generation;
3660 camq_insert(&device->drvq, &perph->pinfo);
3661 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3662 }
3663 splx(s);
3664 if (runq != 0) {
3665 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3666 (" calling xpt_run_devq\n"));
3667 xpt_run_dev_allocq(perph->path->bus);
3668 }
3669}
3670
3671
3672/*
3673 * Schedule a device to run on a given queue.
3674 * If the device was inserted as a new entry on the queue,
3675 * return 1 meaning the device queue should be run. If we
3676 * were already queued, implying someone else has already
3677 * started the queue, return 0 so the caller doesn't attempt
3678 * to run the queue. Must be run at either splsoftcam
3679 * (or splcam since that encompases splsoftcam).
3680 */
3681static int
3682xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3683 u_int32_t new_priority)
3684{
3685 int retval;
3686 u_int32_t old_priority;
3687
3688 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3689
3690 old_priority = pinfo->priority;
3691
3692 /*
3693 * Are we already queued?
3694 */
3695 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3696 /* Simply reorder based on new priority */
3697 if (new_priority < old_priority) {
3698 camq_change_priority(queue, pinfo->index,
3699 new_priority);
3700 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3701 ("changed priority to %d\n",
3702 new_priority));
3703 }
3704 retval = 0;
3705 } else {
3706 /* New entry on the queue */
3707 if (new_priority < old_priority)
3708 pinfo->priority = new_priority;
3709
3710 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3711 ("Inserting onto queue\n"));
3712 pinfo->generation = ++queue->generation;
3713 camq_insert(queue, pinfo);
3714 retval = 1;
3715 }
3716 return (retval);
3717}
3718
3719static void
3720xpt_run_dev_allocq(struct cam_eb *bus)
3721{
3722 struct cam_devq *devq;
3723 int s;
3724
3725 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3726 devq = bus->sim->devq;
3727
3728 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3729 (" qfrozen_cnt == 0x%x, entries == %d, "
3730 "openings == %d, active == %d\n",
3731 devq->alloc_queue.qfrozen_cnt,
3732 devq->alloc_queue.entries,
3733 devq->alloc_openings,
3734 devq->alloc_active));
3735
3736 s = splsoftcam();
3737 devq->alloc_queue.qfrozen_cnt++;
3738 while ((devq->alloc_queue.entries > 0)
3739 && (devq->alloc_openings > 0)
3740 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3741 struct cam_ed_qinfo *qinfo;
3742 struct cam_ed *device;
3743 union ccb *work_ccb;
3744 struct cam_periph *drv;
3745 struct camq *drvq;
3746
3747 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3748 CAMQ_HEAD);
3749 device = qinfo->device;
3750
3751 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3752 ("running device %p\n", device));
3753
3754 drvq = &device->drvq;
3755
3756#ifdef CAMDEBUG
3757 if (drvq->entries <= 0) {
3758 panic("xpt_run_dev_allocq: "
3759 "Device on queue without any work to do");
3760 }
3761#endif
3762 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3763 devq->alloc_openings--;
3764 devq->alloc_active++;
3765 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3766 splx(s);
3767 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3768 drv->pinfo.priority);
3769 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3770 ("calling periph start\n"));
3771 drv->periph_start(drv, work_ccb);
3772 } else {
3773 /*
3774 * Malloc failure in alloc_ccb
3775 */
3776 /*
3777 * XXX add us to a list to be run from free_ccb
3778 * if we don't have any ccbs active on this
3779 * device queue otherwise we may never get run
3780 * again.
3781 */
3782 break;
3783 }
3784
3785 /* Raise IPL for possible insertion and test at top of loop */
3786 s = splsoftcam();
3787
3788 if (drvq->entries > 0) {
3789 /* We have more work. Attempt to reschedule */
3790 xpt_schedule_dev_allocq(bus, device);
3791 }
3792 }
3793 devq->alloc_queue.qfrozen_cnt--;
3794 splx(s);
3795}
3796
3797static void
3798xpt_run_dev_sendq(struct cam_eb *bus)
3799{
3800 struct cam_devq *devq;
3801 int s;
3802
3803 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3804
3805 devq = bus->sim->devq;
3806
3807 s = splcam();
3808 devq->send_queue.qfrozen_cnt++;
3809 splx(s);
3810 s = splsoftcam();
3811 while ((devq->send_queue.entries > 0)
3812 && (devq->send_openings > 0)) {
3813 struct cam_ed_qinfo *qinfo;
3814 struct cam_ed *device;
3815 union ccb *work_ccb;
3816 struct cam_sim *sim;
3817 int ospl;
3818
3819 ospl = splcam();
3820 if (devq->send_queue.qfrozen_cnt > 1) {
3821 splx(ospl);
3822 break;
3823 }
3824
3825 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3826 CAMQ_HEAD);
3827 device = qinfo->device;
3828
3829 /*
3830 * If the device has been "frozen", don't attempt
3831 * to run it.
3832 */
3833 if (device->qfrozen_cnt > 0) {
3834 splx(ospl);
3835 continue;
3836 }
3837
3838 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3839 ("running device %p\n", device));
3840
3841 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3842 if (work_ccb == NULL) {
3843 printf("device on run queue with no ccbs???\n");
3844 splx(ospl);
3845 continue;
3846 }
3847
3848 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3849
3850 if (num_highpower <= 0) {
3851 /*
3852 * We got a high power command, but we
3853 * don't have any available slots. Freeze
3854 * the device queue until we have a slot
3855 * available.
3856 */
3857 device->qfrozen_cnt++;
3858 STAILQ_INSERT_TAIL(&highpowerq,
3859 &work_ccb->ccb_h,
3860 xpt_links.stqe);
3861
3862 splx(ospl);
3863 continue;
3864 } else {
3865 /*
3866 * Consume a high power slot while
3867 * this ccb runs.
3868 */
3869 num_highpower--;
3870 }
3871 }
3872 devq->active_dev = device;
3873 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3874
3875 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3876 splx(ospl);
3877
3878 devq->send_openings--;
3879 devq->send_active++;
3880
3881 if (device->ccbq.queue.entries > 0)
3882 xpt_schedule_dev_sendq(bus, device);
3883
3884 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3885 /*
3886 * The client wants to freeze the queue
3887 * after this CCB is sent.
3888 */
3889 ospl = splcam();
3890 device->qfrozen_cnt++;
3891 splx(ospl);
3892 }
3893
3894 splx(s);
3895
3896 /* In Target mode, the peripheral driver knows best... */
3897 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3898 if ((device->inq_flags & SID_CmdQue) != 0
3899 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3900 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3901 else
3902 /*
3903 * Clear this in case of a retried CCB that
3904 * failed due to a rejected tag.
3905 */
3906 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3907 }
3908
3909 /*
3910 * Device queues can be shared among multiple sim instances
3911 * that reside on different busses. Use the SIM in the queue
3912 * CCB's path, rather than the one in the bus that was passed
3913 * into this function.
3914 */
3915 sim = work_ccb->ccb_h.path->bus->sim;
3916 (*(sim->sim_action))(sim, work_ccb);
3917
3918 ospl = splcam();
3919 devq->active_dev = NULL;
3920 splx(ospl);
3921 /* Raise IPL for possible insertion and test at top of loop */
3922 s = splsoftcam();
3923 }
3924 splx(s);
3925 s = splcam();
3926 devq->send_queue.qfrozen_cnt--;
3927 splx(s);
3928}
3929
3930/*
3931 * This function merges stuff from the slave ccb into the master ccb, while
3932 * keeping important fields in the master ccb constant.
3933 */
3934void
3935xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3936{
3937 /*
3938 * Pull fields that are valid for peripheral drivers to set
3939 * into the master CCB along with the CCB "payload".
3940 */
3941 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3942 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3943 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3944 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3945 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3946 sizeof(union ccb) - sizeof(struct ccb_hdr));
3947}
3948
3949void
3950xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3951{
3952 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3953 ccb_h->pinfo.priority = priority;
3954 ccb_h->path = path;
3955 ccb_h->path_id = path->bus->path_id;
3956 if (path->target)
3957 ccb_h->target_id = path->target->target_id;
3958 else
3959 ccb_h->target_id = CAM_TARGET_WILDCARD;
3960 if (path->device) {
3961 ccb_h->target_lun = path->device->lun_id;
3962 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3963 } else {
3964 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3965 }
3966 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3967 ccb_h->flags = 0;
3968}
3969
3970/* Path manipulation functions */
3971cam_status
3972xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3973 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3974{
3975 struct cam_path *path;
3976 cam_status status;
3977
3978 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3979
3980 if (path == NULL) {
3981 status = CAM_RESRC_UNAVAIL;
3982 return(status);
3983 }
3984 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3985 if (status != CAM_REQ_CMP) {
3986 free(path, M_DEVBUF);
3987 path = NULL;
3988 }
3989 *new_path_ptr = path;
3990 return (status);
3991}
3992
3993static cam_status
3994xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3995 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3996{
3997 struct cam_eb *bus;
3998 struct cam_et *target;
3999 struct cam_ed *device;
4000 cam_status status;
4001 int s;
4002
4003 status = CAM_REQ_CMP; /* Completed without error */
4004 target = NULL; /* Wildcarded */
4005 device = NULL; /* Wildcarded */
4006
4007 /*
4008 * We will potentially modify the EDT, so block interrupts
4009 * that may attempt to create cam paths.
4010 */
4011 s = splcam();
4012 bus = xpt_find_bus(path_id);
4013 if (bus == NULL) {
4014 status = CAM_PATH_INVALID;
4015 } else {
4016 target = xpt_find_target(bus, target_id);
4017 if (target == NULL) {
4018 /* Create one */
4019 struct cam_et *new_target;
4020
4021 new_target = xpt_alloc_target(bus, target_id);
4022 if (new_target == NULL) {
4023 status = CAM_RESRC_UNAVAIL;
4024 } else {
4025 target = new_target;
4026 }
4027 }
4028 if (target != NULL) {
4029 device = xpt_find_device(target, lun_id);
4030 if (device == NULL) {
4031 /* Create one */
4032 struct cam_ed *new_device;
4033
4034 new_device = xpt_alloc_device(bus,
4035 target,
4036 lun_id);
4037 if (new_device == NULL) {
4038 status = CAM_RESRC_UNAVAIL;
4039 } else {
4040 device = new_device;
4041 }
4042 }
4043 }
4044 }
4045 splx(s);
4046
4047 /*
4048 * Only touch the user's data if we are successful.
4049 */
4050 if (status == CAM_REQ_CMP) {
4051 new_path->periph = perph;
4052 new_path->bus = bus;
4053 new_path->target = target;
4054 new_path->device = device;
4055 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4056 } else {
4057 if (device != NULL)
4058 xpt_release_device(bus, target, device);
4059 if (target != NULL)
4060 xpt_release_target(bus, target);
4061 if (bus != NULL)
4062 xpt_release_bus(bus);
4063 }
4064 return (status);
4065}
4066
4067static void
4068xpt_release_path(struct cam_path *path)
4069{
4070 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4071 if (path->device != NULL) {
4072 xpt_release_device(path->bus, path->target, path->device);
4073 path->device = NULL;
4074 }
4075 if (path->target != NULL) {
4076 xpt_release_target(path->bus, path->target);
4077 path->target = NULL;
4078 }
4079 if (path->bus != NULL) {
4080 xpt_release_bus(path->bus);
4081 path->bus = NULL;
4082 }
4083}
4084
4085void
4086xpt_free_path(struct cam_path *path)
4087{
4088 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4089 xpt_release_path(path);
4090 free(path, M_DEVBUF);
4091}
4092
4093
4094/*
4095 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4096 * in path1, 2 for match with wildcards in path2.
4097 */
4098int
4099xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4100{
4101 int retval = 0;
4102
4103 if (path1->bus != path2->bus) {
4104 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4105 retval = 1;
4106 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4107 retval = 2;
4108 else
4109 return (-1);
4110 }
4111 if (path1->target != path2->target) {
4112 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4113 if (retval == 0)
4114 retval = 1;
4115 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4116 retval = 2;
4117 else
4118 return (-1);
4119 }
4120 if (path1->device != path2->device) {
4121 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4122 if (retval == 0)
4123 retval = 1;
4124 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4125 retval = 2;
4126 else
4127 return (-1);
4128 }
4129 return (retval);
4130}
4131
4132void
4133xpt_print_path(struct cam_path *path)
4134{
4135 if (path == NULL)
4136 printf("(nopath): ");
4137 else {
4138 if (path->periph != NULL)
4139 printf("(%s%d:", path->periph->periph_name,
4140 path->periph->unit_number);
4141 else
4142 printf("(noperiph:");
4143
4144 if (path->bus != NULL)
4145 printf("%s%d:%d:", path->bus->sim->sim_name,
4146 path->bus->sim->unit_number,
4147 path->bus->sim->bus_id);
4148 else
4149 printf("nobus:");
4150
4151 if (path->target != NULL)
4152 printf("%d:", path->target->target_id);
4153 else
4154 printf("X:");
4155
4156 if (path->device != NULL)
4157 printf("%d): ", path->device->lun_id);
4158 else
4159 printf("X): ");
4160 }
4161}
4162
4163int
4164xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4165{
4166 struct sbuf sb;
4167
4168 sbuf_new(&sb, str, str_len, 0);
4169
4170 if (path == NULL)
4171 sbuf_printf(&sb, "(nopath): ");
4172 else {
4173 if (path->periph != NULL)
4174 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4175 path->periph->unit_number);
4176 else
4177 sbuf_printf(&sb, "(noperiph:");
4178
4179 if (path->bus != NULL)
4180 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4181 path->bus->sim->unit_number,
4182 path->bus->sim->bus_id);
4183 else
4184 sbuf_printf(&sb, "nobus:");
4185
4186 if (path->target != NULL)
4187 sbuf_printf(&sb, "%d:", path->target->target_id);
4188 else
4189 sbuf_printf(&sb, "X:");
4190
4191 if (path->device != NULL)
4192 sbuf_printf(&sb, "%d): ", path->device->lun_id);
4193 else
4194 sbuf_printf(&sb, "X): ");
4195 }
4196 sbuf_finish(&sb);
4197
4198 return(sbuf_len(&sb));
4199}
4200
4201path_id_t
4202xpt_path_path_id(struct cam_path *path)
4203{
4204 return(path->bus->path_id);
4205}
4206
4207target_id_t
4208xpt_path_target_id(struct cam_path *path)
4209{
4210 if (path->target != NULL)
4211 return (path->target->target_id);
4212 else
4213 return (CAM_TARGET_WILDCARD);
4214}
4215
4216lun_id_t
4217xpt_path_lun_id(struct cam_path *path)
4218{
4219 if (path->device != NULL)
4220 return (path->device->lun_id);
4221 else
4222 return (CAM_LUN_WILDCARD);
4223}
4224
4225struct cam_sim *
4226xpt_path_sim(struct cam_path *path)
4227{
4228 return (path->bus->sim);
4229}
4230
4231struct cam_periph*
4232xpt_path_periph(struct cam_path *path)
4233{
4234 return (path->periph);
4235}
4236
4237/*
4238 * Release a CAM control block for the caller. Remit the cost of the structure
4239 * to the device referenced by the path. If the this device had no 'credits'
4240 * and peripheral drivers have registered async callbacks for this notification
4241 * call them now.
4242 */
4243void
4244xpt_release_ccb(union ccb *free_ccb)
4245{
4246 int s;
4247 struct cam_path *path;
4248 struct cam_ed *device;
4249 struct cam_eb *bus;
4250
4251 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4252 path = free_ccb->ccb_h.path;
4253 device = path->device;
4254 bus = path->bus;
4255 s = splsoftcam();
4256 cam_ccbq_release_opening(&device->ccbq);
4257 if (xpt_ccb_count > xpt_max_ccbs) {
4258 xpt_free_ccb(free_ccb);
4259 xpt_ccb_count--;
4260 } else {
4261 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4262 }
4263 bus->sim->devq->alloc_openings++;
4264 bus->sim->devq->alloc_active--;
4265 /* XXX Turn this into an inline function - xpt_run_device?? */
4266 if ((device_is_alloc_queued(device) == 0)
4267 && (device->drvq.entries > 0)) {
4268 xpt_schedule_dev_allocq(bus, device);
4269 }
4270 splx(s);
4271 if (dev_allocq_is_runnable(bus->sim->devq))
4272 xpt_run_dev_allocq(bus);
4273}
4274
4275/* Functions accessed by SIM drivers */
4276
4277/*
4278 * A sim structure, listing the SIM entry points and instance
4279 * identification info is passed to xpt_bus_register to hook the SIM
4280 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4281 * for this new bus and places it in the array of busses and assigns
4282 * it a path_id. The path_id may be influenced by "hard wiring"
4283 * information specified by the user. Once interrupt services are
4284 * availible, the bus will be probed.
4285 */
4286int32_t
4287xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4288{
4289 struct cam_eb *new_bus;
4290 struct cam_eb *old_bus;
4291 struct ccb_pathinq cpi;
4292 int s;
4293
4294 sim->bus_id = bus;
4295 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4296 M_DEVBUF, M_NOWAIT);
4297 if (new_bus == NULL) {
4298 /* Couldn't satisfy request */
4299 return (CAM_RESRC_UNAVAIL);
4300 }
4301
4302 if (strcmp(sim->sim_name, "xpt") != 0) {
4303
4304 sim->path_id =
4305 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4306 }
4307
4308 TAILQ_INIT(&new_bus->et_entries);
4309 new_bus->path_id = sim->path_id;
4310 new_bus->sim = sim;
4311 timevalclear(&new_bus->last_reset);
4312 new_bus->flags = 0;
4313 new_bus->refcount = 1; /* Held until a bus_deregister event */
4314 new_bus->generation = 0;
4315 s = splcam();
4316 old_bus = TAILQ_FIRST(&xpt_busses);
4317 while (old_bus != NULL
4318 && old_bus->path_id < new_bus->path_id)
4319 old_bus = TAILQ_NEXT(old_bus, links);
4320 if (old_bus != NULL)
4321 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4322 else
4323 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4324 bus_generation++;
4325 splx(s);
4326
4327 /* Notify interested parties */
4328 if (sim->path_id != CAM_XPT_PATH_ID) {
4329 struct cam_path path;
4330
4331 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4332 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4333 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4334 cpi.ccb_h.func_code = XPT_PATH_INQ;
4335 xpt_action((union ccb *)&cpi);
4336 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4337 xpt_release_path(&path);
4338 }
4339 return (CAM_SUCCESS);
4340}
4341
4342int32_t
4343xpt_bus_deregister(path_id_t pathid)
4344{
4345 struct cam_path bus_path;
4346 cam_status status;
4347
4348 status = xpt_compile_path(&bus_path, NULL, pathid,
4349 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4350 if (status != CAM_REQ_CMP)
4351 return (status);
4352
4353 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4354 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4355
4356 /* Release the reference count held while registered. */
4357 xpt_release_bus(bus_path.bus);
4358 xpt_release_path(&bus_path);
4359
4360 return (CAM_REQ_CMP);
4361}
4362
4363static path_id_t
4364xptnextfreepathid(void)
4365{
4366 struct cam_eb *bus;
4367 path_id_t pathid;
4368 const char *strval;
4369
4370 pathid = 0;
4371 bus = TAILQ_FIRST(&xpt_busses);
4372retry:
4373 /* Find an unoccupied pathid */
4374 while (bus != NULL
4375 && bus->path_id <= pathid) {
4376 if (bus->path_id == pathid)
4377 pathid++;
4378 bus = TAILQ_NEXT(bus, links);
4379 }
4380
4381 /*
4382 * Ensure that this pathid is not reserved for
4383 * a bus that may be registered in the future.
4384 */
4385 if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4386 ++pathid;
4387 /* Start the search over */
4388 goto retry;
4389 }
4390 return (pathid);
4391}
4392
4393static path_id_t
4394xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4395{
4396 path_id_t pathid;
4397 int i, dunit, val;
4398 char buf[32];
4399 const char *dname;
4400
4401 pathid = CAM_XPT_PATH_ID;
4402 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4403 i = 0;
4404 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4405 if (strcmp(dname, "scbus")) {
4406 /* Avoid a bit of foot shooting. */
4407 continue;
4408 }
4409 if (dunit < 0) /* unwired?! */
4410 continue;
4411 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4412 if (sim_bus == val) {
4413 pathid = dunit;
4414 break;
4415 }
4416 } else if (sim_bus == 0) {
4417 /* Unspecified matches bus 0 */
4418 pathid = dunit;
4419 break;
4420 } else {
4421 printf("Ambiguous scbus configuration for %s%d "
4422 "bus %d, cannot wire down. The kernel "
4423 "config entry for scbus%d should "
4424 "specify a controller bus.\n"
4425 "Scbus will be assigned dynamically.\n",
4426 sim_name, sim_unit, sim_bus, dunit);
4427 break;
4428 }
4429 }
4430
4431 if (pathid == CAM_XPT_PATH_ID)
4432 pathid = xptnextfreepathid();
4433 return (pathid);
4434}
4435
4436void
4437xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4438{
4439 struct cam_eb *bus;
4440 struct cam_et *target, *next_target;
4441 struct cam_ed *device, *next_device;
4442 int s;
4443
4444 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4445
4446 /*
4447 * Most async events come from a CAM interrupt context. In
4448 * a few cases, the error recovery code at the peripheral layer,
4449 * which may run from our SWI or a process context, may signal
4450 * deferred events with a call to xpt_async. Ensure async
4451 * notifications are serialized by blocking cam interrupts.
4452 */
4453 s = splcam();
4454
4455 bus = path->bus;
4456
4457 if (async_code == AC_BUS_RESET) {
4458 int s;
4459
4460 s = splclock();
4461 /* Update our notion of when the last reset occurred */
4462 microtime(&bus->last_reset);
4463 splx(s);
4464 }
4465
4466 for (target = TAILQ_FIRST(&bus->et_entries);
4467 target != NULL;
4468 target = next_target) {
4469
4470 next_target = TAILQ_NEXT(target, links);
4471
4472 if (path->target != target
4473 && path->target->target_id != CAM_TARGET_WILDCARD
4474 && target->target_id != CAM_TARGET_WILDCARD)
4475 continue;
4476
4477 if (async_code == AC_SENT_BDR) {
4478 int s;
4479
4480 /* Update our notion of when the last reset occurred */
4481 s = splclock();
4482 microtime(&path->target->last_reset);
4483 splx(s);
4484 }
4485
4486 for (device = TAILQ_FIRST(&target->ed_entries);
4487 device != NULL;
4488 device = next_device) {
4489
4490 next_device = TAILQ_NEXT(device, links);
4491
4492 if (path->device != device
4493 && path->device->lun_id != CAM_LUN_WILDCARD
4494 && device->lun_id != CAM_LUN_WILDCARD)
4495 continue;
4496
4497 xpt_dev_async(async_code, bus, target,
4498 device, async_arg);
4499
4500 xpt_async_bcast(&device->asyncs, async_code,
4501 path, async_arg);
4502 }
4503 }
4504
4505 /*
4506 * If this wasn't a fully wildcarded async, tell all
4507 * clients that want all async events.
4508 */
4509 if (bus != xpt_periph->path->bus)
4510 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4511 path, async_arg);
4512 splx(s);
4513}
4514
4515static void
4516xpt_async_bcast(struct async_list *async_head,
4517 u_int32_t async_code,
4518 struct cam_path *path, void *async_arg)
4519{
4520 struct async_node *cur_entry;
4521
4522 cur_entry = SLIST_FIRST(async_head);
4523 while (cur_entry != NULL) {
4524 struct async_node *next_entry;
4525 /*
4526 * Grab the next list entry before we call the current
4527 * entry's callback. This is because the callback function
4528 * can delete its async callback entry.
4529 */
4530 next_entry = SLIST_NEXT(cur_entry, links);
4531 if ((cur_entry->event_enable & async_code) != 0)
4532 cur_entry->callback(cur_entry->callback_arg,
4533 async_code, path,
4534 async_arg);
4535 cur_entry = next_entry;
4536 }
4537}
4538
4539/*
4540 * Handle any per-device event notifications that require action by the XPT.
4541 */
4542static void
4543xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4544 struct cam_ed *device, void *async_arg)
4545{
4546 cam_status status;
4547 struct cam_path newpath;
4548
4549 /*
4550 * We only need to handle events for real devices.
4551 */
4552 if (target->target_id == CAM_TARGET_WILDCARD
4553 || device->lun_id == CAM_LUN_WILDCARD)
4554 return;
4555
4556 /*
4557 * We need our own path with wildcards expanded to
4558 * handle certain types of events.
4559 */
4560 if ((async_code == AC_SENT_BDR)
4561 || (async_code == AC_BUS_RESET)
4562 || (async_code == AC_INQ_CHANGED))
4563 status = xpt_compile_path(&newpath, NULL,
4564 bus->path_id,
4565 target->target_id,
4566 device->lun_id);
4567 else
4568 status = CAM_REQ_CMP_ERR;
4569
4570 if (status == CAM_REQ_CMP) {
4571
4572 /*
4573 * Allow transfer negotiation to occur in a
4574 * tag free environment.
4575 */
4576 if (async_code == AC_SENT_BDR
4577 || async_code == AC_BUS_RESET)
4578 xpt_toggle_tags(&newpath);
4579
4580 if (async_code == AC_INQ_CHANGED) {
4581 /*
4582 * We've sent a start unit command, or
4583 * something similar to a device that
4584 * may have caused its inquiry data to
4585 * change. So we re-scan the device to
4586 * refresh the inquiry data for it.
4587 */
4588 xpt_scan_lun(newpath.periph, &newpath,
4589 CAM_EXPECT_INQ_CHANGE, NULL);
4590 }
4591 xpt_release_path(&newpath);
4592 } else if (async_code == AC_LOST_DEVICE) {
4593 device->flags |= CAM_DEV_UNCONFIGURED;
4594 } else if (async_code == AC_TRANSFER_NEG) {
4595 struct ccb_trans_settings *settings;
4596
4597 settings = (struct ccb_trans_settings *)async_arg;
4598 xpt_set_transfer_settings(settings, device,
4599 /*async_update*/TRUE);
4600 }
4601}
4602
4603u_int32_t
4604xpt_freeze_devq(struct cam_path *path, u_int count)
4605{
4606 int s;
4607 struct ccb_hdr *ccbh;
4608
4609 s = splcam();
4610 path->device->qfrozen_cnt += count;
4611
4612 /*
4613 * Mark the last CCB in the queue as needing
4614 * to be requeued if the driver hasn't
4615 * changed it's state yet. This fixes a race
4616 * where a ccb is just about to be queued to
4617 * a controller driver when it's interrupt routine
4618 * freezes the queue. To completly close the
4619 * hole, controller drives must check to see
4620 * if a ccb's status is still CAM_REQ_INPROG
4621 * under spl protection just before they queue
4622 * the CCB. See ahc_action/ahc_freeze_devq for
4623 * an example.
4624 */
4625 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4626 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4627 ccbh->status = CAM_REQUEUE_REQ;
4628 splx(s);
4629 return (path->device->qfrozen_cnt);
4630}
4631
4632u_int32_t
4633xpt_freeze_simq(struct cam_sim *sim, u_int count)
4634{
4635 sim->devq->send_queue.qfrozen_cnt += count;
4636 if (sim->devq->active_dev != NULL) {
4637 struct ccb_hdr *ccbh;
4638
4639 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4640 ccb_hdr_tailq);
4641 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4642 ccbh->status = CAM_REQUEUE_REQ;
4643 }
4644 return (sim->devq->send_queue.qfrozen_cnt);
4645}
4646
4647static void
4648xpt_release_devq_timeout(void *arg)
4649{
4650 struct cam_ed *device;
4651
4652 device = (struct cam_ed *)arg;
4653
4654 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4655}
4656
4657void
4658xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4659{
4660 xpt_release_devq_device(path->device, count, run_queue);
4661}
4662
4663static void
4664xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4665{
4666 int rundevq;
4667 int s0, s1;
4668
4669 rundevq = 0;
4670 s0 = splsoftcam();
4671 s1 = splcam();
4672 if (dev->qfrozen_cnt > 0) {
4673
4674 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4675 dev->qfrozen_cnt -= count;
4676 if (dev->qfrozen_cnt == 0) {
4677
4678 /*
4679 * No longer need to wait for a successful
4680 * command completion.
4681 */
4682 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4683
4684 /*
4685 * Remove any timeouts that might be scheduled
4686 * to release this queue.
4687 */
4688 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4689 untimeout(xpt_release_devq_timeout, dev,
4690 dev->c_handle);
4691 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4692 }
4693
4694 /*
4695 * Now that we are unfrozen schedule the
4696 * device so any pending transactions are
4697 * run.
4698 */
4699 if ((dev->ccbq.queue.entries > 0)
4700 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4701 && (run_queue != 0)) {
4702 rundevq = 1;
4703 }
4704 }
4705 }
4706 splx(s1);
4707 if (rundevq != 0)
4708 xpt_run_dev_sendq(dev->target->bus);
4709 splx(s0);
4710}
4711
4712void
4713xpt_release_simq(struct cam_sim *sim, int run_queue)
4714{
4715 int s;
4716 struct camq *sendq;
4717
4718 sendq = &(sim->devq->send_queue);
4719 s = splcam();
4720 if (sendq->qfrozen_cnt > 0) {
4721
4722 sendq->qfrozen_cnt--;
4723 if (sendq->qfrozen_cnt == 0) {
4724 struct cam_eb *bus;
4725
4726 /*
4727 * If there is a timeout scheduled to release this
4728 * sim queue, remove it. The queue frozen count is
4729 * already at 0.
4730 */
4731 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4732 untimeout(xpt_release_simq_timeout, sim,
4733 sim->c_handle);
4734 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4735 }
4736 bus = xpt_find_bus(sim->path_id);
4737 splx(s);
4738
4739 if (run_queue) {
4740 /*
4741 * Now that we are unfrozen run the send queue.
4742 */
4743 xpt_run_dev_sendq(bus);
4744 }
4745 xpt_release_bus(bus);
4746 } else
4747 splx(s);
4748 } else
4749 splx(s);
4750}
4751
4752static void
4753xpt_release_simq_timeout(void *arg)
4754{
4755 struct cam_sim *sim;
4756
4757 sim = (struct cam_sim *)arg;
4758 xpt_release_simq(sim, /* run_queue */ TRUE);
4759}
4760
4761void
4762xpt_done(union ccb *done_ccb)
4763{
4764 int s;
4765
4766 s = splcam();
4767
4768 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4769 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4770 /*
4771 * Queue up the request for handling by our SWI handler
4772 * any of the "non-immediate" type of ccbs.
4773 */
4774 switch (done_ccb->ccb_h.path->periph->type) {
4775 case CAM_PERIPH_BIO:
4776 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4777 sim_links.tqe);
4778 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4779 swi_sched(cambio_ih, 0);
4780 break;
4781 case CAM_PERIPH_NET:
4782 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4783 sim_links.tqe);
4784 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4785 swi_sched(camnet_ih, 0);
4786 break;
4787 }
4788 }
4789 splx(s);
4790}
4791
4792union ccb *
4793xpt_alloc_ccb()
4794{
4795 union ccb *new_ccb;
4796
4797 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4798 return (new_ccb);
4799}
4800
4801void
4802xpt_free_ccb(union ccb *free_ccb)
4803{
4804 free(free_ccb, M_DEVBUF);
4805}
4806
4807
4808
4809/* Private XPT functions */
4810
4811/*
4812 * Get a CAM control block for the caller. Charge the structure to the device
4813 * referenced by the path. If the this device has no 'credits' then the
4814 * device already has the maximum number of outstanding operations under way
4815 * and we return NULL. If we don't have sufficient resources to allocate more
4816 * ccbs, we also return NULL.
4817 */
4818static union ccb *
4819xpt_get_ccb(struct cam_ed *device)
4820{
4821 union ccb *new_ccb;
4822 int s;
4823
4824 s = splsoftcam();
4825 if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
4826 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4827 if (new_ccb == NULL) {
4828 splx(s);
4829 return (NULL);
4830 }
4831 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4832 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4833 xpt_links.sle);
4834 xpt_ccb_count++;
4835 }
4836 cam_ccbq_take_opening(&device->ccbq);
4837 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4838 splx(s);
4839 return (new_ccb);
4840}
4841
4842static void
4843xpt_release_bus(struct cam_eb *bus)
4844{
4845 int s;
4846
4847 s = splcam();
4848 if ((--bus->refcount == 0)
4849 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4850 TAILQ_REMOVE(&xpt_busses, bus, links);
4851 bus_generation++;
4852 splx(s);
4853 free(bus, M_DEVBUF);
4854 } else
4855 splx(s);
4856}
4857
4858static struct cam_et *
4859xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4860{
4861 struct cam_et *target;
4862
4863 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4864 if (target != NULL) {
4865 struct cam_et *cur_target;
4866
4867 TAILQ_INIT(&target->ed_entries);
4868 target->bus = bus;
4869 target->target_id = target_id;
4870 target->refcount = 1;
4871 target->generation = 0;
4872 timevalclear(&target->last_reset);
4873 /*
4874 * Hold a reference to our parent bus so it
4875 * will not go away before we do.
4876 */
4877 bus->refcount++;
4878
4879 /* Insertion sort into our bus's target list */
4880 cur_target = TAILQ_FIRST(&bus->et_entries);
4881 while (cur_target != NULL && cur_target->target_id < target_id)
4882 cur_target = TAILQ_NEXT(cur_target, links);
4883
4884 if (cur_target != NULL) {
4885 TAILQ_INSERT_BEFORE(cur_target, target, links);
4886 } else {
4887 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4888 }
4889 bus->generation++;
4890 }
4891 return (target);
4892}
4893
4894static void
4895xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4896{
4897 int s;
4898
4899 s = splcam();
4900 if ((--target->refcount == 0)
4901 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4902 TAILQ_REMOVE(&bus->et_entries, target, links);
4903 bus->generation++;
4904 splx(s);
4905 free(target, M_DEVBUF);
4906 xpt_release_bus(bus);
4907 } else
4908 splx(s);
4909}
4910
4911static struct cam_ed *
4912xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4913{
4914#ifdef CAM_NEW_TRAN_CODE
4915 struct cam_path path;
4916#endif /* CAM_NEW_TRAN_CODE */
4917 struct cam_ed *device;
4918 struct cam_devq *devq;
4919 cam_status status;
4920
4921 /* Make space for us in the device queue on our bus */
4922 devq = bus->sim->devq;
4923 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4924
4925 if (status != CAM_REQ_CMP) {
4926 device = NULL;
4927 } else {
4928 device = (struct cam_ed *)malloc(sizeof(*device),
4929 M_DEVBUF, M_NOWAIT);
4930 }
4931
4932 if (device != NULL) {
4933 struct cam_ed *cur_device;
4934
4935 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4936 device->alloc_ccb_entry.device = device;
4937 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4938 device->send_ccb_entry.device = device;
4939 device->target = target;
4940 device->lun_id = lun_id;
4941 /* Initialize our queues */
4942 if (camq_init(&device->drvq, 0) != 0) {
4943 free(device, M_DEVBUF);
4944 return (NULL);
4945 }
4946 if (cam_ccbq_init(&device->ccbq,
4947 bus->sim->max_dev_openings) != 0) {
4948 camq_fini(&device->drvq);
4949 free(device, M_DEVBUF);
4950 return (NULL);
4951 }
4952 SLIST_INIT(&device->asyncs);
4953 SLIST_INIT(&device->periphs);
4954 device->generation = 0;
4955 device->owner = NULL;
4956 /*
4957 * Take the default quirk entry until we have inquiry
4958 * data and can determine a better quirk to use.
4959 */
4960 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4961 bzero(&device->inq_data, sizeof(device->inq_data));
4962 device->inq_flags = 0;
4963 device->queue_flags = 0;
4964 device->serial_num = NULL;
4965 device->serial_num_len = 0;
4966 device->qfrozen_cnt = 0;
4967 device->flags = CAM_DEV_UNCONFIGURED;
4968 device->tag_delay_count = 0;
4969 device->refcount = 1;
4970 callout_handle_init(&device->c_handle);
4971
4972 /*
4973 * Hold a reference to our parent target so it
4974 * will not go away before we do.
4975 */
4976 target->refcount++;
4977
4978 /*
4979 * XXX should be limited by number of CCBs this bus can
4980 * do.
4981 */
4982 xpt_max_ccbs += device->ccbq.devq_openings;
4983 /* Insertion sort into our target's device list */
4984 cur_device = TAILQ_FIRST(&target->ed_entries);
4985 while (cur_device != NULL && cur_device->lun_id < lun_id)
4986 cur_device = TAILQ_NEXT(cur_device, links);
4987 if (cur_device != NULL) {
4988 TAILQ_INSERT_BEFORE(cur_device, device, links);
4989 } else {
4990 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4991 }
4992 target->generation++;
4993#ifdef CAM_NEW_TRAN_CODE
4994 if (lun_id != CAM_LUN_WILDCARD) {
4995 xpt_compile_path(&path,
4996 NULL,
4997 bus->path_id,
4998 target->target_id,
4999 lun_id);
5000 xpt_devise_transport(&path);
5001 xpt_release_path(&path);
5002 }
5003#endif /* CAM_NEW_TRAN_CODE */
5004 }
5005 return (device);
5006}
5007
5008static void
5009xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5010 struct cam_ed *device)
5011{
5012 int s;
5013
5014 s = splcam();
5015 if ((--device->refcount == 0)
5016 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5017 struct cam_devq *devq;
5018
5019 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5020 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5021 panic("Removing device while still queued for ccbs");
5022
5023 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5024 untimeout(xpt_release_devq_timeout, device,
5025 device->c_handle);
5026
5027 TAILQ_REMOVE(&target->ed_entries, device,links);
5028 target->generation++;
5029 xpt_max_ccbs -= device->ccbq.devq_openings;
5030 /* Release our slot in the devq */
5031 devq = bus->sim->devq;
5032 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5033 splx(s);
5034 free(device, M_DEVBUF);
5035 xpt_release_target(bus, target);
5036 } else
5037 splx(s);
5038}
5039
5040static u_int32_t
5041xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5042{
5043 int s;
5044 int diff;
5045 int result;
5046 struct cam_ed *dev;
5047
5048 dev = path->device;
5049 s = splsoftcam();
5050
5051 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5052 result = cam_ccbq_resize(&dev->ccbq, newopenings);
5053 if (result == CAM_REQ_CMP && (diff < 0)) {
5054 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5055 }
5056 /* Adjust the global limit */
5057 xpt_max_ccbs += diff;
5058 splx(s);
5059 return (result);
5060}
5061
5062static struct cam_eb *
5063xpt_find_bus(path_id_t path_id)
5064{
5065 struct cam_eb *bus;
5066
5067 for (bus = TAILQ_FIRST(&xpt_busses);
5068 bus != NULL;
5069 bus = TAILQ_NEXT(bus, links)) {
5070 if (bus->path_id == path_id) {
5071 bus->refcount++;
5072 break;
5073 }
5074 }
5075 return (bus);
5076}
5077
5078static struct cam_et *
5079xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5080{
5081 struct cam_et *target;
5082
5083 for (target = TAILQ_FIRST(&bus->et_entries);
5084 target != NULL;
5085 target = TAILQ_NEXT(target, links)) {
5086 if (target->target_id == target_id) {
5087 target->refcount++;
5088 break;
5089 }
5090 }
5091 return (target);
5092}
5093
5094static struct cam_ed *
5095xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5096{
5097 struct cam_ed *device;
5098
5099 for (device = TAILQ_FIRST(&target->ed_entries);
5100 device != NULL;
5101 device = TAILQ_NEXT(device, links)) {
5102 if (device->lun_id == lun_id) {
5103 device->refcount++;
5104 break;
5105 }
5106 }
5107 return (device);
5108}
5109
5110typedef struct {
5111 union ccb *request_ccb;
5112 struct ccb_pathinq *cpi;
5113 int pending_count;
5114} xpt_scan_bus_info;
5115
5116/*
5117 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5118 * As the scan progresses, xpt_scan_bus is used as the
5119 * callback on completion function.
5120 */
5121static void
5122xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5123{
5124 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5125 ("xpt_scan_bus\n"));
5126 switch (request_ccb->ccb_h.func_code) {
5127 case XPT_SCAN_BUS:
5128 {
5129 xpt_scan_bus_info *scan_info;
5130 union ccb *work_ccb;
5131 struct cam_path *path;
5132 u_int i;
5133 u_int max_target;
5134 u_int initiator_id;
5135
5136 /* Find out the characteristics of the bus */
5137 work_ccb = xpt_alloc_ccb();
5138 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5139 request_ccb->ccb_h.pinfo.priority);
5140 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5141 xpt_action(work_ccb);
5142 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5143 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5144 xpt_free_ccb(work_ccb);
5145 xpt_done(request_ccb);
5146 return;
5147 }
5148
5149 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5150 /*
5151 * Can't scan the bus on an adapter that
5152 * cannot perform the initiator role.
5153 */
5154 request_ccb->ccb_h.status = CAM_REQ_CMP;
5155 xpt_free_ccb(work_ccb);
5156 xpt_done(request_ccb);
5157 return;
5158 }
5159
5160 /* Save some state for use while we probe for devices */
5161 scan_info = (xpt_scan_bus_info *)
5162 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
5163 scan_info->request_ccb = request_ccb;
5164 scan_info->cpi = &work_ccb->cpi;
5165
5166 /* Cache on our stack so we can work asynchronously */
5167 max_target = scan_info->cpi->max_target;
5168 initiator_id = scan_info->cpi->initiator_id;
5169
5170 /*
5171 * Don't count the initiator if the
5172 * initiator is addressable.
5173 */
5174 scan_info->pending_count = max_target + 1;
5175 if (initiator_id <= max_target)
5176 scan_info->pending_count--;
5177
5178 for (i = 0; i <= max_target; i++) {
5179 cam_status status;
5180 if (i == initiator_id)
5181 continue;
5182
5183 status = xpt_create_path(&path, xpt_periph,
5184 request_ccb->ccb_h.path_id,
5185 i, 0);
5186 if (status != CAM_REQ_CMP) {
5187 printf("xpt_scan_bus: xpt_create_path failed"
5188 " with status %#x, bus scan halted\n",
5189 status);
5190 break;
5191 }
5192 work_ccb = xpt_alloc_ccb();
5193 xpt_setup_ccb(&work_ccb->ccb_h, path,
5194 request_ccb->ccb_h.pinfo.priority);
5195 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5196 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5197 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5198 work_ccb->crcn.flags = request_ccb->crcn.flags;
5199 xpt_action(work_ccb);
5200 }
5201 break;
5202 }
5203 case XPT_SCAN_LUN:
5204 {
5205 xpt_scan_bus_info *scan_info;
5206 path_id_t path_id;
5207 target_id_t target_id;
5208 lun_id_t lun_id;
5209
5210 /* Reuse the same CCB to query if a device was really found */
5211 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5212 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5213 request_ccb->ccb_h.pinfo.priority);
5214 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5215
5216 path_id = request_ccb->ccb_h.path_id;
5217 target_id = request_ccb->ccb_h.target_id;
5218 lun_id = request_ccb->ccb_h.target_lun;
5219 xpt_action(request_ccb);
5220
5221 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5222 struct cam_ed *device;
5223 struct cam_et *target;
5224 int s, phl;
5225
5226 /*
5227 * If we already probed lun 0 successfully, or
5228 * we have additional configured luns on this
5229 * target that might have "gone away", go onto
5230 * the next lun.
5231 */
5232 target = request_ccb->ccb_h.path->target;
5233 /*
5234 * We may touch devices that we don't
5235 * hold references too, so ensure they
5236 * don't disappear out from under us.
5237 * The target above is referenced by the
5238 * path in the request ccb.
5239 */
5240 phl = 0;
5241 s = splcam();
5242 device = TAILQ_FIRST(&target->ed_entries);
5243 if (device != NULL) {
5244 phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
5245 if (device->lun_id == 0)
5246 device = TAILQ_NEXT(device, links);
5247 }
5248 splx(s);
5249 if ((lun_id != 0) || (device != NULL)) {
5250 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5251 lun_id++;
5252 }
5253 } else {
5254 struct cam_ed *device;
5255
5256 device = request_ccb->ccb_h.path->device;
5257
5258 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5259 /* Try the next lun */
5260 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
5261 (device->quirk->quirks & CAM_QUIRK_HILUNS))
5262 lun_id++;
5263 }
5264 }
5265
5266 xpt_free_path(request_ccb->ccb_h.path);
5267
5268 /* Check Bounds */
5269 if ((lun_id == request_ccb->ccb_h.target_lun)
5270 || lun_id > scan_info->cpi->max_lun) {
5271 /* We're done */
5272
5273 xpt_free_ccb(request_ccb);
5274 scan_info->pending_count--;
5275 if (scan_info->pending_count == 0) {
5276 xpt_free_ccb((union ccb *)scan_info->cpi);
5277 request_ccb = scan_info->request_ccb;
5278 free(scan_info, M_TEMP);
5279 request_ccb->ccb_h.status = CAM_REQ_CMP;
5280 xpt_done(request_ccb);
5281 }
5282 } else {
5283 /* Try the next device */
5284 struct cam_path *path;
5285 cam_status status;
5286
5287 path = request_ccb->ccb_h.path;
5288 status = xpt_create_path(&path, xpt_periph,
5289 path_id, target_id, lun_id);
5290 if (status != CAM_REQ_CMP) {
5291 printf("xpt_scan_bus: xpt_create_path failed "
5292 "with status %#x, halting LUN scan\n",
5293 status);
5294 xpt_free_ccb(request_ccb);
5295 scan_info->pending_count--;
5296 if (scan_info->pending_count == 0) {
5297 xpt_free_ccb(
5298 (union ccb *)scan_info->cpi);
5299 request_ccb = scan_info->request_ccb;
5300 free(scan_info, M_TEMP);
5301 request_ccb->ccb_h.status = CAM_REQ_CMP;
5302 xpt_done(request_ccb);
5303 break;
5304 }
5305 }
5306 xpt_setup_ccb(&request_ccb->ccb_h, path,
5307 request_ccb->ccb_h.pinfo.priority);
5308 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5309 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5310 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5311 request_ccb->crcn.flags =
5312 scan_info->request_ccb->crcn.flags;
5313 xpt_action(request_ccb);
5314 }
5315 break;
5316 }
5317 default:
5318 break;
5319 }
5320}
5321
5322typedef enum {
5323 PROBE_TUR,
5324 PROBE_INQUIRY,
5325 PROBE_FULL_INQUIRY,
5326 PROBE_MODE_SENSE,
5327 PROBE_SERIAL_NUM,
5328 PROBE_TUR_FOR_NEGOTIATION
5329} probe_action;
5330
5331typedef enum {
5332 PROBE_INQUIRY_CKSUM = 0x01,
5333 PROBE_SERIAL_CKSUM = 0x02,
5334 PROBE_NO_ANNOUNCE = 0x04
5335} probe_flags;
5336
5337typedef struct {
5338 TAILQ_HEAD(, ccb_hdr) request_ccbs;
5339 probe_action action;
5340 union ccb saved_ccb;
5341 probe_flags flags;
5342 MD5_CTX context;
5343 u_int8_t digest[16];
5344} probe_softc;
5345
5346static void
5347xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5348 cam_flags flags, union ccb *request_ccb)
5349{
5350 struct ccb_pathinq cpi;
5351 cam_status status;
5352 struct cam_path *new_path;
5353 struct cam_periph *old_periph;
5354 int s;
5355
5356 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5357 ("xpt_scan_lun\n"));
5358
5359 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5360 cpi.ccb_h.func_code = XPT_PATH_INQ;
5361 xpt_action((union ccb *)&cpi);
5362
5363 if (cpi.ccb_h.status != CAM_REQ_CMP) {
5364 if (request_ccb != NULL) {
5365 request_ccb->ccb_h.status = cpi.ccb_h.status;
5366 xpt_done(request_ccb);
5367 }
5368 return;
5369 }
5370
5371 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5372 /*
5373 * Can't scan the bus on an adapter that
5374 * cannot perform the initiator role.
5375 */
5376 if (request_ccb != NULL) {
5377 request_ccb->ccb_h.status = CAM_REQ_CMP;
5378 xpt_done(request_ccb);
5379 }
5380 return;
5381 }
5382
5383 if (request_ccb == NULL) {
5384 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5385 if (request_ccb == NULL) {
5386 xpt_print_path(path);
5387 printf("xpt_scan_lun: can't allocate CCB, can't "
5388 "continue\n");
5389 return;
5390 }
5391 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5392 if (new_path == NULL) {
5393 xpt_print_path(path);
5394 printf("xpt_scan_lun: can't allocate path, can't "
5395 "continue\n");
5396 free(request_ccb, M_TEMP);
5397 return;
5398 }
5399 status = xpt_compile_path(new_path, xpt_periph,
5400 path->bus->path_id,
5401 path->target->target_id,
5402 path->device->lun_id);
5403
5404 if (status != CAM_REQ_CMP) {
5405 xpt_print_path(path);
5406 printf("xpt_scan_lun: can't compile path, can't "
5407 "continue\n");
5408 free(request_ccb, M_TEMP);
5409 free(new_path, M_TEMP);
5410 return;
5411 }
5412 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5413 request_ccb->ccb_h.cbfcnp = xptscandone;
5414 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5415 request_ccb->crcn.flags = flags;
5416 }
5417
5418 s = splsoftcam();
5419 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5420 probe_softc *softc;
5421
5422 softc = (probe_softc *)old_periph->softc;
5423 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5424 periph_links.tqe);
5425 } else {
5426 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5427 probestart, "probe",
5428 CAM_PERIPH_BIO,
5429 request_ccb->ccb_h.path, NULL, 0,
5430 request_ccb);
5431
5432 if (status != CAM_REQ_CMP) {
5433 xpt_print_path(path);
5434 printf("xpt_scan_lun: cam_alloc_periph returned an "
5435 "error, can't continue probe\n");
5436 request_ccb->ccb_h.status = status;
5437 xpt_done(request_ccb);
5438 }
5439 }
5440 splx(s);
5441}
5442
5443static void
5444xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5445{
5446 xpt_release_path(done_ccb->ccb_h.path);
5447 free(done_ccb->ccb_h.path, M_TEMP);
5448 free(done_ccb, M_TEMP);
5449}
5450
5451static cam_status
5452proberegister(struct cam_periph *periph, void *arg)
5453{
5454 union ccb *request_ccb; /* CCB representing the probe request */
5455 probe_softc *softc;
5456
5457 request_ccb = (union ccb *)arg;
5458 if (periph == NULL) {
5459 printf("proberegister: periph was NULL!!\n");
5460 return(CAM_REQ_CMP_ERR);
5461 }
5462
5463 if (request_ccb == NULL) {
5464 printf("proberegister: no probe CCB, "
5465 "can't register device\n");
5466 return(CAM_REQ_CMP_ERR);
5467 }
5468
5469 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5470
5471 if (softc == NULL) {
5472 printf("proberegister: Unable to probe new device. "
5473 "Unable to allocate softc\n");
5474 return(CAM_REQ_CMP_ERR);
5475 }
5476 TAILQ_INIT(&softc->request_ccbs);
5477 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5478 periph_links.tqe);
5479 softc->flags = 0;
5480 periph->softc = softc;
5481 cam_periph_acquire(periph);
5482 /*
5483 * Ensure we've waited at least a bus settle
5484 * delay before attempting to probe the device.
5485 * For HBAs that don't do bus resets, this won't make a difference.
5486 */
5487 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5488 scsi_delay);
5489 probeschedule(periph);
5490 return(CAM_REQ_CMP);
5491}
5492
5493static void
5494probeschedule(struct cam_periph *periph)
5495{
5496 struct ccb_pathinq cpi;
5497 union ccb *ccb;
5498 probe_softc *softc;
5499
5500 softc = (probe_softc *)periph->softc;
5501 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5502
5503 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5504 cpi.ccb_h.func_code = XPT_PATH_INQ;
5505 xpt_action((union ccb *)&cpi);
5506
5507 /*
5508 * If a device has gone away and another device, or the same one,
5509 * is back in the same place, it should have a unit attention
5510 * condition pending. It will not report the unit attention in
5511 * response to an inquiry, which may leave invalid transfer
5512 * negotiations in effect. The TUR will reveal the unit attention
5513 * condition. Only send the TUR for lun 0, since some devices
5514 * will get confused by commands other than inquiry to non-existent
5515 * luns. If you think a device has gone away start your scan from
5516 * lun 0. This will insure that any bogus transfer settings are
5517 * invalidated.
5518 *
5519 * If we haven't seen the device before and the controller supports
5520 * some kind of transfer negotiation, negotiate with the first
5521 * sent command if no bus reset was performed at startup. This
5522 * ensures that the device is not confused by transfer negotiation
5523 * settings left over by loader or BIOS action.
5524 */
5525 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5526 && (ccb->ccb_h.target_lun == 0)) {
5527 softc->action = PROBE_TUR;
5528 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5529 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5530 proberequestdefaultnegotiation(periph);
5531 softc->action = PROBE_INQUIRY;
5532 } else {
5533 softc->action = PROBE_INQUIRY;
5534 }
5535
5536 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5537 softc->flags |= PROBE_NO_ANNOUNCE;
5538 else
5539 softc->flags &= ~PROBE_NO_ANNOUNCE;
5540
5541 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5542}
5543
5544static void
5545probestart(struct cam_periph *periph, union ccb *start_ccb)
5546{
5547 /* Probe the device that our peripheral driver points to */
5548 struct ccb_scsiio *csio;
5549 probe_softc *softc;
5550
5551 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5552
5553 softc = (probe_softc *)periph->softc;
5554 csio = &start_ccb->csio;
5555
5556 switch (softc->action) {
5557 case PROBE_TUR:
5558 case PROBE_TUR_FOR_NEGOTIATION:
5559 {
5560 scsi_test_unit_ready(csio,
5561 /*retries*/4,
5562 probedone,
5563 MSG_SIMPLE_Q_TAG,
5564 SSD_FULL_SIZE,
5565 /*timeout*/60000);
5566 break;
5567 }
5568 case PROBE_INQUIRY:
5569 case PROBE_FULL_INQUIRY:
5570 {
5571 u_int inquiry_len;
5572 struct scsi_inquiry_data *inq_buf;
5573
5574 inq_buf = &periph->path->device->inq_data;
5575 /*
5576 * If the device is currently configured, we calculate an
5577 * MD5 checksum of the inquiry data, and if the serial number
5578 * length is greater than 0, add the serial number data
5579 * into the checksum as well. Once the inquiry and the
5580 * serial number check finish, we attempt to figure out
5581 * whether we still have the same device.
5582 */
5583 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5584
5585 MD5Init(&softc->context);
5586 MD5Update(&softc->context, (unsigned char *)inq_buf,
5587 sizeof(struct scsi_inquiry_data));
5588 softc->flags |= PROBE_INQUIRY_CKSUM;
5589 if (periph->path->device->serial_num_len > 0) {
5590 MD5Update(&softc->context,
5591 periph->path->device->serial_num,
5592 periph->path->device->serial_num_len);
5593 softc->flags |= PROBE_SERIAL_CKSUM;
5594 }
5595 MD5Final(softc->digest, &softc->context);
5596 }
5597
5598 if (softc->action == PROBE_INQUIRY)
5599 inquiry_len = SHORT_INQUIRY_LENGTH;
5600 else
5601 inquiry_len = inq_buf->additional_length + 4;
5602
5603 scsi_inquiry(csio,
5604 /*retries*/4,
5605 probedone,
5606 MSG_SIMPLE_Q_TAG,
5607 (u_int8_t *)inq_buf,
5608 inquiry_len,
5609 /*evpd*/FALSE,
5610 /*page_code*/0,
5611 SSD_MIN_SIZE,
5612 /*timeout*/60 * 1000);
5613 break;
5614 }
5615 case PROBE_MODE_SENSE:
5616 {
5617 void *mode_buf;
5618 int mode_buf_len;
5619
5620 mode_buf_len = sizeof(struct scsi_mode_header_6)
5621 + sizeof(struct scsi_mode_blk_desc)
5622 + sizeof(struct scsi_control_page);
5623 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5624 if (mode_buf != NULL) {
5625 scsi_mode_sense(csio,
5626 /*retries*/4,
5627 probedone,
5628 MSG_SIMPLE_Q_TAG,
5629 /*dbd*/FALSE,
5630 SMS_PAGE_CTRL_CURRENT,
5631 SMS_CONTROL_MODE_PAGE,
5632 mode_buf,
5633 mode_buf_len,
5634 SSD_FULL_SIZE,
5635 /*timeout*/60000);
5636 break;
5637 }
5638 xpt_print_path(periph->path);
5639 printf("Unable to mode sense control page - malloc failure\n");
5640 softc->action = PROBE_SERIAL_NUM;
5641 }
5642 /* FALLTHROUGH */
5643 case PROBE_SERIAL_NUM:
5644 {
5645 struct scsi_vpd_unit_serial_number *serial_buf;
5646 struct cam_ed* device;
5647
5648 serial_buf = NULL;
5649 device = periph->path->device;
5650 device->serial_num = NULL;
5651 device->serial_num_len = 0;
5652
5653 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5654 serial_buf = (struct scsi_vpd_unit_serial_number *)
5655 malloc(sizeof(*serial_buf), M_TEMP,
5656 M_NOWAIT | M_ZERO);
5657
5658 if (serial_buf != NULL) {
5659 scsi_inquiry(csio,
5660 /*retries*/4,
5661 probedone,
5662 MSG_SIMPLE_Q_TAG,
5663 (u_int8_t *)serial_buf,
5664 sizeof(*serial_buf),
5665 /*evpd*/TRUE,
5666 SVPD_UNIT_SERIAL_NUMBER,
5667 SSD_MIN_SIZE,
5668 /*timeout*/60 * 1000);
5669 break;
5670 }
5671 /*
5672 * We'll have to do without, let our probedone
5673 * routine finish up for us.
5674 */
5675 start_ccb->csio.data_ptr = NULL;
5676 probedone(periph, start_ccb);
5677 return;
5678 }
5679 }
5680 xpt_action(start_ccb);
5681}
5682
5683static void
5684proberequestdefaultnegotiation(struct cam_periph *periph)
5685{
5686 struct ccb_trans_settings cts;
5687
5688 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5689 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5690#ifdef CAM_NEW_TRAN_CODE
5691 cts.type = CTS_TYPE_USER_SETTINGS;
5692#else /* CAM_NEW_TRAN_CODE */
5693 cts.flags = CCB_TRANS_USER_SETTINGS;
5694#endif /* CAM_NEW_TRAN_CODE */
5695 xpt_action((union ccb *)&cts);
5696 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5697#ifdef CAM_NEW_TRAN_CODE
5698 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5699#else /* CAM_NEW_TRAN_CODE */
5700 cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5701 cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5702#endif /* CAM_NEW_TRAN_CODE */
5703 xpt_action((union ccb *)&cts);
5704}
5705
5706static void
5707probedone(struct cam_periph *periph, union ccb *done_ccb)
5708{
5709 probe_softc *softc;
5710 struct cam_path *path;
5711 u_int32_t priority;
5712
5713 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5714
5715 softc = (probe_softc *)periph->softc;
5716 path = done_ccb->ccb_h.path;
5717 priority = done_ccb->ccb_h.pinfo.priority;
5718
5719 switch (softc->action) {
5720 case PROBE_TUR:
5721 {
5722 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5723
5724 if (cam_periph_error(done_ccb, 0,
5725 SF_NO_PRINT, NULL) == ERESTART)
5726 return;
5727 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5728 /* Don't wedge the queue */
5729 xpt_release_devq(done_ccb->ccb_h.path,
5730 /*count*/1,
5731 /*run_queue*/TRUE);
5732 }
5733 softc->action = PROBE_INQUIRY;
5734 xpt_release_ccb(done_ccb);
5735 xpt_schedule(periph, priority);
5736 return;
5737 }
5738 case PROBE_INQUIRY:
5739 case PROBE_FULL_INQUIRY:
5740 {
5741 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5742 struct scsi_inquiry_data *inq_buf;
5743 u_int8_t periph_qual;
5744
5745 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5746 inq_buf = &path->device->inq_data;
5747
5748 periph_qual = SID_QUAL(inq_buf);
5749
5750 switch(periph_qual) {
5751 case SID_QUAL_LU_CONNECTED:
5752 {
5753 u_int8_t alen;
5754
5755 /*
5756 * We conservatively request only
5757 * SHORT_INQUIRY_LEN bytes of inquiry
5758 * information during our first try
5759 * at sending an INQUIRY. If the device
5760 * has more information to give,
5761 * perform a second request specifying
5762 * the amount of information the device
5763 * is willing to give.
5764 */
5765 alen = inq_buf->additional_length;
5766 if (softc->action == PROBE_INQUIRY
5767 && alen > (SHORT_INQUIRY_LENGTH - 4)) {
5768 softc->action = PROBE_FULL_INQUIRY;
5769 xpt_release_ccb(done_ccb);
5770 xpt_schedule(periph, priority);
5771 return;
5772 }
5773
5774 xpt_find_quirk(path->device);
5775
5776#ifdef CAM_NEW_TRAN_CODE
5777 xpt_devise_transport(path);
5778#endif /* CAM_NEW_TRAN_CODE */
5779 if ((inq_buf->flags & SID_CmdQue) != 0)
5780 softc->action = PROBE_MODE_SENSE;
5781 else
5782 softc->action = PROBE_SERIAL_NUM;
5783
5784 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5785
5786 xpt_release_ccb(done_ccb);
5787 xpt_schedule(periph, priority);
5788 return;
5789 }
5790 default:
5791 break;
5792 }
5793 } else if (cam_periph_error(done_ccb, 0,
5794 done_ccb->ccb_h.target_lun > 0
5795 ? SF_RETRY_UA|SF_QUIET_IR
5796 : SF_RETRY_UA,
5797 &softc->saved_ccb) == ERESTART) {
5798 return;
5799 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5800 /* Don't wedge the queue */
5801 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5802 /*run_queue*/TRUE);
5803 }
5804 /*
5805 * If we get to this point, we got an error status back
5806 * from the inquiry and the error status doesn't require
5807 * automatically retrying the command. Therefore, the
5808 * inquiry failed. If we had inquiry information before
5809 * for this device, but this latest inquiry command failed,
5810 * the device has probably gone away. If this device isn't
5811 * already marked unconfigured, notify the peripheral
5812 * drivers that this device is no more.
5813 */
5814 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5815 /* Send the async notification. */
5816 xpt_async(AC_LOST_DEVICE, path, NULL);
5817
5818 xpt_release_ccb(done_ccb);
5819 break;
5820 }
5821 case PROBE_MODE_SENSE:
5822 {
5823 struct ccb_scsiio *csio;
5824 struct scsi_mode_header_6 *mode_hdr;
5825
5826 csio = &done_ccb->csio;
5827 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5828 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5829 struct scsi_control_page *page;
5830 u_int8_t *offset;
5831
5832 offset = ((u_int8_t *)&mode_hdr[1])
5833 + mode_hdr->blk_desc_len;
5834 page = (struct scsi_control_page *)offset;
5835 path->device->queue_flags = page->queue_flags;
5836 } else if (cam_periph_error(done_ccb, 0,
5837 SF_RETRY_UA|SF_NO_PRINT,
5838 &softc->saved_ccb) == ERESTART) {
5839 return;
5840 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5841 /* Don't wedge the queue */
5842 xpt_release_devq(done_ccb->ccb_h.path,
5843 /*count*/1, /*run_queue*/TRUE);
5844 }
5845 xpt_release_ccb(done_ccb);
5846 free(mode_hdr, M_TEMP);
5847 softc->action = PROBE_SERIAL_NUM;
5848 xpt_schedule(periph, priority);
5849 return;
5850 }
5851 case PROBE_SERIAL_NUM:
5852 {
5853 struct ccb_scsiio *csio;
5854 struct scsi_vpd_unit_serial_number *serial_buf;
5855 u_int32_t priority;
5856 int changed;
5857 int have_serialnum;
5858
5859 changed = 1;
5860 have_serialnum = 0;
5861 csio = &done_ccb->csio;
5862 priority = done_ccb->ccb_h.pinfo.priority;
5863 serial_buf =
5864 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5865
5866 /* Clean up from previous instance of this device */
5867 if (path->device->serial_num != NULL) {
5868 free(path->device->serial_num, M_DEVBUF);
5869 path->device->serial_num = NULL;
5870 path->device->serial_num_len = 0;
5871 }
5872
5873 if (serial_buf == NULL) {
5874 /*
5875 * Don't process the command as it was never sent
5876 */
5877 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5878 && (serial_buf->length > 0)) {
5879
5880 have_serialnum = 1;
5881 path->device->serial_num =
5882 (u_int8_t *)malloc((serial_buf->length + 1),
5883 M_DEVBUF, M_NOWAIT);
5884 if (path->device->serial_num != NULL) {
5885 bcopy(serial_buf->serial_num,
5886 path->device->serial_num,
5887 serial_buf->length);
5888 path->device->serial_num_len =
5889 serial_buf->length;
5890 path->device->serial_num[serial_buf->length]
5891 = '\0';
5892 }
5893 } else if (cam_periph_error(done_ccb, 0,
5894 SF_RETRY_UA|SF_NO_PRINT,
5895 &softc->saved_ccb) == ERESTART) {
5896 return;
5897 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5898 /* Don't wedge the queue */
5899 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5900 /*run_queue*/TRUE);
5901 }
5902
5903 /*
5904 * Let's see if we have seen this device before.
5905 */
5906 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5907 MD5_CTX context;
5908 u_int8_t digest[16];
5909
5910 MD5Init(&context);
5911
5912 MD5Update(&context,
5913 (unsigned char *)&path->device->inq_data,
5914 sizeof(struct scsi_inquiry_data));
5915
5916 if (have_serialnum)
5917 MD5Update(&context, serial_buf->serial_num,
5918 serial_buf->length);
5919
5920 MD5Final(digest, &context);
5921 if (bcmp(softc->digest, digest, 16) == 0)
5922 changed = 0;
5923
5924 /*
5925 * XXX Do we need to do a TUR in order to ensure
5926 * that the device really hasn't changed???
5927 */
5928 if ((changed != 0)
5929 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5930 xpt_async(AC_LOST_DEVICE, path, NULL);
5931 }
5932 if (serial_buf != NULL)
5933 free(serial_buf, M_TEMP);
5934
5935 if (changed != 0) {
5936 /*
5937 * Now that we have all the necessary
5938 * information to safely perform transfer
5939 * negotiations... Controllers don't perform
5940 * any negotiation or tagged queuing until
5941 * after the first XPT_SET_TRAN_SETTINGS ccb is
5942 * received. So, on a new device, just retreive
5943 * the user settings, and set them as the current
5944 * settings to set the device up.
5945 */
5946 proberequestdefaultnegotiation(periph);
5947 xpt_release_ccb(done_ccb);
5948
5949 /*
5950 * Perform a TUR to allow the controller to
5951 * perform any necessary transfer negotiation.
5952 */
5953 softc->action = PROBE_TUR_FOR_NEGOTIATION;
5954 xpt_schedule(periph, priority);
5955 return;
5956 }
5957 xpt_release_ccb(done_ccb);
5958 break;
5959 }
5960 case PROBE_TUR_FOR_NEGOTIATION:
5961 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5962 /* Don't wedge the queue */
5963 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5964 /*run_queue*/TRUE);
5965 }
5966
5967 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5968
5969 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5970 /* Inform the XPT that a new device has been found */
5971 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5972 xpt_action(done_ccb);
5973
5974 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5975 }
5976 xpt_release_ccb(done_ccb);
5977 break;
5978 }
5979 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5980 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5981 done_ccb->ccb_h.status = CAM_REQ_CMP;
5982 xpt_done(done_ccb);
5983 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5984 cam_periph_invalidate(periph);
5985 cam_periph_release(periph);
5986 } else {
5987 probeschedule(periph);
5988 }
5989}
5990
5991static void
5992probecleanup(struct cam_periph *periph)
5993{
5994 free(periph->softc, M_TEMP);
5995}
5996
5997static void
5998xpt_find_quirk(struct cam_ed *device)
5999{
6000 caddr_t match;
6001
6002 match = cam_quirkmatch((caddr_t)&device->inq_data,
6003 (caddr_t)xpt_quirk_table,
6004 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6005 sizeof(*xpt_quirk_table), scsi_inquiry_match);
6006
6007 if (match == NULL)
6008 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6009
6010 device->quirk = (struct xpt_quirk_entry *)match;
6011}
6012
6013#ifdef CAM_NEW_TRAN_CODE
6014
6015static void
6016xpt_devise_transport(struct cam_path *path)
6017{
6018 struct ccb_pathinq cpi;
6019 struct ccb_trans_settings cts;
6020 struct scsi_inquiry_data *inq_buf;
6021
6022 /* Get transport information from the SIM */
6023 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6024 cpi.ccb_h.func_code = XPT_PATH_INQ;
6025 xpt_action((union ccb *)&cpi);
6026
6027 inq_buf = NULL;
6028 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6029 inq_buf = &path->device->inq_data;
6030 path->device->protocol = PROTO_SCSI;
6031 path->device->protocol_version =
6032 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6033 path->device->transport = cpi.transport;
6034 path->device->transport_version = cpi.transport_version;
6035
6036 /*
6037 * Any device not using SPI3 features should
6038 * be considered SPI2 or lower.
6039 */
6040 if (inq_buf != NULL) {
6041 if (path->device->transport == XPORT_SPI
6042 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6043 && path->device->transport_version > 2)
6044 path->device->transport_version = 2;
6045 } else {
6046 struct cam_ed* otherdev;
6047
6048 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6049 otherdev != NULL;
6050 otherdev = TAILQ_NEXT(otherdev, links)) {
6051 if (otherdev != path->device)
6052 break;
6053 }
6054
6055 if (otherdev != NULL) {
6056 /*
6057 * Initially assume the same versioning as
6058 * prior luns for this target.
6059 */
6060 path->device->protocol_version =
6061 otherdev->protocol_version;
6062 path->device->transport_version =
6063 otherdev->transport_version;
6064 } else {
6065 /* Until we know better, opt for safty */
6066 path->device->protocol_version = 2;
6067 if (path->device->transport == XPORT_SPI)
6068 path->device->transport_version = 2;
6069 else
6070 path->device->transport_version = 0;
6071 }
6072 }
6073
6074 /*
6075 * XXX
6076 * For a device compliant with SPC-2 we should be able
6077 * to determine the transport version supported by
6078 * scrutinizing the version descriptors in the
6079 * inquiry buffer.
6080 */
6081
6082 /* Tell the controller what we think */
6083 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6084 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6085 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6086 cts.transport = path->device->transport;
6087 cts.transport_version = path->device->transport_version;
6088 cts.protocol = path->device->protocol;
6089 cts.protocol_version = path->device->protocol_version;
6090 cts.proto_specific.valid = 0;
6091 cts.xport_specific.valid = 0;
6092 xpt_action((union ccb *)&cts);
6093}
6094
6095static void
6096xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6097 int async_update)
6098{
6099 struct ccb_pathinq cpi;
6100 struct ccb_trans_settings cur_cts;
6101 struct ccb_trans_settings_scsi *scsi;
6102 struct ccb_trans_settings_scsi *cur_scsi;
6103 struct cam_sim *sim;
6104 struct scsi_inquiry_data *inq_data;
6105
6106 if (device == NULL) {
6107 cts->ccb_h.status = CAM_PATH_INVALID;
6108 xpt_done((union ccb *)cts);
6109 return;
6110 }
6111
6112 if (cts->protocol == PROTO_UNKNOWN
6113 || cts->protocol == PROTO_UNSPECIFIED) {
6114 cts->protocol = device->protocol;
6115 cts->protocol_version = device->protocol_version;
6116 }
6117
6118 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6119 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6120 cts->protocol_version = device->protocol_version;
6121
6122 if (cts->protocol != device->protocol) {
6123 xpt_print_path(cts->ccb_h.path);
6124 printf("Uninitialized Protocol %x:%x?\n",
6125 cts->protocol, device->protocol);
6126 cts->protocol = device->protocol;
6127 }
6128
6129 if (cts->protocol_version > device->protocol_version) {
6130 if (bootverbose) {
6131 xpt_print_path(cts->ccb_h.path);
6132 printf("Down reving Protocol Version from %d to %d?\n",
6133 cts->protocol_version, device->protocol_version);
6134 }
6135 cts->protocol_version = device->protocol_version;
6136 }
6137
6138 if (cts->transport == XPORT_UNKNOWN
6139 || cts->transport == XPORT_UNSPECIFIED) {
6140 cts->transport = device->transport;
6141 cts->transport_version = device->transport_version;
6142 }
6143
6144 if (cts->transport_version == XPORT_VERSION_UNKNOWN
6145 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6146 cts->transport_version = device->transport_version;
6147
6148 if (cts->transport != device->transport) {
6149 xpt_print_path(cts->ccb_h.path);
6150 printf("Uninitialized Transport %x:%x?\n",
6151 cts->transport, device->transport);
6152 cts->transport = device->transport;
6153 }
6154
6155 if (cts->transport_version > device->transport_version) {
6156 if (bootverbose) {
6157 xpt_print_path(cts->ccb_h.path);
6158 printf("Down reving Transport Version from %d to %d?\n",
6159 cts->transport_version,
6160 device->transport_version);
6161 }
6162 cts->transport_version = device->transport_version;
6163 }
6164
6165 sim = cts->ccb_h.path->bus->sim;
6166
6167 /*
6168 * Nothing more of interest to do unless
6169 * this is a device connected via the
6170 * SCSI protocol.
6171 */
6172 if (cts->protocol != PROTO_SCSI) {
6173 if (async_update == FALSE)
6174 (*(sim->sim_action))(sim, (union ccb *)cts);
6175 return;
6176 }
6177
6178 inq_data = &device->inq_data;
6179 scsi = &cts->proto_specific.scsi;
6180 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6181 cpi.ccb_h.func_code = XPT_PATH_INQ;
6182 xpt_action((union ccb *)&cpi);
6183
6184 /* SCSI specific sanity checking */
6185 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6186 || (inq_data->flags & SID_CmdQue) == 0
6187 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6188 || (device->quirk->mintags == 0)) {
6189 /*
6190 * Can't tag on hardware that doesn't support tags,
6191 * doesn't have it enabled, or has broken tag support.
6192 */
6193 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6194 }
6195
6196 if (async_update == FALSE) {
6197 /*
6198 * Perform sanity checking against what the
6199 * controller and device can do.
6200 */
6201 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6202 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6203 cur_cts.type = cts->type;
6204 xpt_action((union ccb *)&cur_cts);
6205
6206 cur_scsi = &cur_cts.proto_specific.scsi;
6207 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6208 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6209 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6210 }
6211 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6212 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6213 }
6214
6215 /* SPI specific sanity checking */
6216 if (cts->transport == XPORT_SPI && async_update == FALSE) {
6217 u_int spi3caps;
6218 struct ccb_trans_settings_spi *spi;
6219 struct ccb_trans_settings_spi *cur_spi;
6220
6221 spi = &cts->xport_specific.spi;
6222
6223 cur_spi = &cur_cts.xport_specific.spi;
6224
6225 /* Fill in any gaps in what the user gave us */
6226 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6227 spi->sync_period = cur_spi->sync_period;
6228 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6229 spi->sync_period = 0;
6230 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6231 spi->sync_offset = cur_spi->sync_offset;
6232 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6233 spi->sync_offset = 0;
6234 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6235 spi->ppr_options = cur_spi->ppr_options;
6236 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6237 spi->ppr_options = 0;
6238 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6239 spi->bus_width = cur_spi->bus_width;
6240 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6241 spi->bus_width = 0;
6242 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6243 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6244 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6245 }
6246 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6247 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6248 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6249 && (inq_data->flags & SID_Sync) == 0
6250 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6251 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6252 || (cur_spi->sync_offset == 0)
6253 || (cur_spi->sync_period == 0)) {
6254 /* Force async */
6255 spi->sync_period = 0;
6256 spi->sync_offset = 0;
6257 }
6258
6259 switch (spi->bus_width) {
6260 case MSG_EXT_WDTR_BUS_32_BIT:
6261 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6262 || (inq_data->flags & SID_WBus32) != 0
6263 || cts->type == CTS_TYPE_USER_SETTINGS)
6264 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6265 break;
6266 /* Fall Through to 16-bit */
6267 case MSG_EXT_WDTR_BUS_16_BIT:
6268 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6269 || (inq_data->flags & SID_WBus16) != 0
6270 || cts->type == CTS_TYPE_USER_SETTINGS)
6271 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6272 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6273 break;
6274 }
6275 /* Fall Through to 8-bit */
6276 default: /* New bus width?? */
6277 case MSG_EXT_WDTR_BUS_8_BIT:
6278 /* All targets can do this */
6279 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6280 break;
6281 }
6282
6283 spi3caps = cpi.xport_specific.spi.ppr_options;
6284 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6285 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6286 spi3caps &= inq_data->spi3data;
6287
6288 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6289 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6290
6291 if ((spi3caps & SID_SPI_IUS) == 0)
6292 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6293
6294 if ((spi3caps & SID_SPI_QAS) == 0)
6295 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6296
6297 /* No SPI Transfer settings are allowed unless we are wide */
6298 if (spi->bus_width == 0)
6299 spi->ppr_options = 0;
6300
6301 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6302 /*
6303 * Can't tag queue without disconnection.
6304 */
6305 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6306 scsi->valid |= CTS_SCSI_VALID_TQ;
6307 }
6308
6309 /*
6310 * If we are currently performing tagged transactions to
6311 * this device and want to change its negotiation parameters,
6312 * go non-tagged for a bit to give the controller a chance to
6313 * negotiate unhampered by tag messages.
6314 */
6315 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6316 && (device->inq_flags & SID_CmdQue) != 0
6317 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6318 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6319 CTS_SPI_VALID_SYNC_OFFSET|
6320 CTS_SPI_VALID_BUS_WIDTH)) != 0)
6321 xpt_toggle_tags(cts->ccb_h.path);
6322 }
6323
6324 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6325 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6326 int device_tagenb;
6327
6328 /*
6329 * If we are transitioning from tags to no-tags or
6330 * vice-versa, we need to carefully freeze and restart
6331 * the queue so that we don't overlap tagged and non-tagged
6332 * commands. We also temporarily stop tags if there is
6333 * a change in transfer negotiation settings to allow
6334 * "tag-less" negotiation.
6335 */
6336 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6337 || (device->inq_flags & SID_CmdQue) != 0)
6338 device_tagenb = TRUE;
6339 else
6340 device_tagenb = FALSE;
6341
6342 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6343 && device_tagenb == FALSE)
6344 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6345 && device_tagenb == TRUE)) {
6346
6347 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6348 /*
6349 * Delay change to use tags until after a
6350 * few commands have gone to this device so
6351 * the controller has time to perform transfer
6352 * negotiations without tagged messages getting
6353 * in the way.
6354 */
6355 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6356 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6357 } else {
6358 struct ccb_relsim crs;
6359
6360 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6361 device->inq_flags &= ~SID_CmdQue;
6362 xpt_dev_ccbq_resize(cts->ccb_h.path,
6363 sim->max_dev_openings);
6364 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6365 device->tag_delay_count = 0;
6366
6367 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6368 /*priority*/1);
6369 crs.ccb_h.func_code = XPT_REL_SIMQ;
6370 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6371 crs.openings
6372 = crs.release_timeout
6373 = crs.qfrozen_cnt
6374 = 0;
6375 xpt_action((union ccb *)&crs);
6376 }
6377 }
6378 }
6379 if (async_update == FALSE)
6380 (*(sim->sim_action))(sim, (union ccb *)cts);
6381}
6382
6383#else /* CAM_NEW_TRAN_CODE */
6384
6385static void
6386xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6387 int async_update)
6388{
6389 struct cam_sim *sim;
6390 int qfrozen;
6391
6392 sim = cts->ccb_h.path->bus->sim;
6393 if (async_update == FALSE) {
6394 struct scsi_inquiry_data *inq_data;
6395 struct ccb_pathinq cpi;
6396 struct ccb_trans_settings cur_cts;
6397
6398 if (device == NULL) {
6399 cts->ccb_h.status = CAM_PATH_INVALID;
6400 xpt_done((union ccb *)cts);
6401 return;
6402 }
6403
6404 /*
6405 * Perform sanity checking against what the
6406 * controller and device can do.
6407 */
6408 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6409 cpi.ccb_h.func_code = XPT_PATH_INQ;
6410 xpt_action((union ccb *)&cpi);
6411 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6412 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6413 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6414 xpt_action((union ccb *)&cur_cts);
6415 inq_data = &device->inq_data;
6416
6417 /* Fill in any gaps in what the user gave us */
6418 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6419 cts->sync_period = cur_cts.sync_period;
6420 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6421 cts->sync_offset = cur_cts.sync_offset;
6422 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6423 cts->bus_width = cur_cts.bus_width;
6424 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6425 cts->flags &= ~CCB_TRANS_DISC_ENB;
6426 cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6427 }
6428 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6429 cts->flags &= ~CCB_TRANS_TAG_ENB;
6430 cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6431 }
6432
6433 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6434 && (inq_data->flags & SID_Sync) == 0)
6435 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6436 || (cts->sync_offset == 0)
6437 || (cts->sync_period == 0)) {
6438 /* Force async */
6439 cts->sync_period = 0;
6440 cts->sync_offset = 0;
6441 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6442 && (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6443 && cts->sync_period <= 0x9) {
6444 /*
6445 * Don't allow DT transmission rates if the
6446 * device does not support it.
6447 */
6448 cts->sync_period = 0xa;
6449 }
6450
6451 switch (cts->bus_width) {
6452 case MSG_EXT_WDTR_BUS_32_BIT:
6453 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6454 || (inq_data->flags & SID_WBus32) != 0)
6455 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6456 break;
6457 /* FALLTHROUGH to 16-bit */
6458 case MSG_EXT_WDTR_BUS_16_BIT:
6459 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6460 || (inq_data->flags & SID_WBus16) != 0)
6461 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6462 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6463 break;
6464 }
6465 /* FALLTHROUGH to 8-bit */
6466 default: /* New bus width?? */
6467 case MSG_EXT_WDTR_BUS_8_BIT:
6468 /* All targets can do this */
6469 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6470 break;
6471 }
6472
6473 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6474 /*
6475 * Can't tag queue without disconnection.
6476 */
6477 cts->flags &= ~CCB_TRANS_TAG_ENB;
6478 cts->valid |= CCB_TRANS_TQ_VALID;
6479 }
6480
6481 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6482 || (inq_data->flags & SID_CmdQue) == 0
6483 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6484 || (device->quirk->mintags == 0)) {
6485 /*
6486 * Can't tag on hardware that doesn't support,
6487 * doesn't have it enabled, or has broken tag support.
6488 */
6489 cts->flags &= ~CCB_TRANS_TAG_ENB;
6490 }
6491 }
6492
6493 qfrozen = FALSE;
6494 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6495 int device_tagenb;
6496
6497 /*
6498 * If we are transitioning from tags to no-tags or
6499 * vice-versa, we need to carefully freeze and restart
6500 * the queue so that we don't overlap tagged and non-tagged
6501 * commands. We also temporarily stop tags if there is
6502 * a change in transfer negotiation settings to allow
6503 * "tag-less" negotiation.
6504 */
6505 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6506 || (device->inq_flags & SID_CmdQue) != 0)
6507 device_tagenb = TRUE;
6508 else
6509 device_tagenb = FALSE;
6510
6511 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6512 && device_tagenb == FALSE)
6513 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6514 && device_tagenb == TRUE)) {
6515
6516 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6517 /*
6518 * Delay change to use tags until after a
6519 * few commands have gone to this device so
6520 * the controller has time to perform transfer
6521 * negotiations without tagged messages getting
6522 * in the way.
6523 */
6524 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6525 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6526 } else {
6527 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6528 qfrozen = TRUE;
6529 device->inq_flags &= ~SID_CmdQue;
6530 xpt_dev_ccbq_resize(cts->ccb_h.path,
6531 sim->max_dev_openings);
6532 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6533 device->tag_delay_count = 0;
6534 }
6535 }
6536 }
6537
6538 if (async_update == FALSE) {
6539 /*
6540 * If we are currently performing tagged transactions to
6541 * this device and want to change its negotiation parameters,
6542 * go non-tagged for a bit to give the controller a chance to
6543 * negotiate unhampered by tag messages.
6544 */
6545 if ((device->inq_flags & SID_CmdQue) != 0
6546 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6547 CCB_TRANS_SYNC_OFFSET_VALID|
6548 CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6549 xpt_toggle_tags(cts->ccb_h.path);
6550
6551 (*(sim->sim_action))(sim, (union ccb *)cts);
6552 }
6553
6554 if (qfrozen) {
6555 struct ccb_relsim crs;
6556
6557 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6558 /*priority*/1);
6559 crs.ccb_h.func_code = XPT_REL_SIMQ;
6560 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6561 crs.openings
6562 = crs.release_timeout
6563 = crs.qfrozen_cnt
6564 = 0;
6565 xpt_action((union ccb *)&crs);
6566 }
6567}
6568
6569
6570#endif /* CAM_NEW_TRAN_CODE */
6571
6572static void
6573xpt_toggle_tags(struct cam_path *path)
6574{
6575 struct cam_ed *dev;
6576
6577 /*
6578 * Give controllers a chance to renegotiate
6579 * before starting tag operations. We
6580 * "toggle" tagged queuing off then on
6581 * which causes the tag enable command delay
6582 * counter to come into effect.
6583 */
6584 dev = path->device;
6585 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6586 || ((dev->inq_flags & SID_CmdQue) != 0
6587 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6588 struct ccb_trans_settings cts;
6589
6590 xpt_setup_ccb(&cts.ccb_h, path, 1);
6591#ifdef CAM_NEW_TRAN_CODE
6592 cts.protocol = PROTO_SCSI;
6593 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6594 cts.transport = XPORT_UNSPECIFIED;
6595 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6596 cts.proto_specific.scsi.flags = 0;
6597 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6598#else /* CAM_NEW_TRAN_CODE */
6599 cts.flags = 0;
6600 cts.valid = CCB_TRANS_TQ_VALID;
6601#endif /* CAM_NEW_TRAN_CODE */
6602 xpt_set_transfer_settings(&cts, path->device,
6603 /*async_update*/TRUE);
6604#ifdef CAM_NEW_TRAN_CODE
6605 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6606#else /* CAM_NEW_TRAN_CODE */
6607 cts.flags = CCB_TRANS_TAG_ENB;
6608#endif /* CAM_NEW_TRAN_CODE */
6609 xpt_set_transfer_settings(&cts, path->device,
6610 /*async_update*/TRUE);
6611 }
6612}
6613
6614static void
6615xpt_start_tags(struct cam_path *path)
6616{
6617 struct ccb_relsim crs;
6618 struct cam_ed *device;
6619 struct cam_sim *sim;
6620 int newopenings;
6621
6622 device = path->device;
6623 sim = path->bus->sim;
6624 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6625 xpt_freeze_devq(path, /*count*/1);
6626 device->inq_flags |= SID_CmdQue;
6627 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
6628 xpt_dev_ccbq_resize(path, newopenings);
6629 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6630 crs.ccb_h.func_code = XPT_REL_SIMQ;
6631 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6632 crs.openings
6633 = crs.release_timeout
6634 = crs.qfrozen_cnt
6635 = 0;
6636 xpt_action((union ccb *)&crs);
6637}
6638
6639static int busses_to_config;
6640static int busses_to_reset;
6641
6642static int
6643xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6644{
6645 if (bus->path_id != CAM_XPT_PATH_ID) {
6646 struct cam_path path;
6647 struct ccb_pathinq cpi;
6648 int can_negotiate;
6649
6650 busses_to_config++;
6651 xpt_compile_path(&path, NULL, bus->path_id,
6652 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6653 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6654 cpi.ccb_h.func_code = XPT_PATH_INQ;
6655 xpt_action((union ccb *)&cpi);
6656 can_negotiate = cpi.hba_inquiry;
6657 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6658 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6659 && can_negotiate)
6660 busses_to_reset++;
6661 xpt_release_path(&path);
6662 }
6663
6664 return(1);
6665}
6666
6667static int
6668xptconfigfunc(struct cam_eb *bus, void *arg)
6669{
6670 struct cam_path *path;
6671 union ccb *work_ccb;
6672
6673 if (bus->path_id != CAM_XPT_PATH_ID) {
6674 cam_status status;
6675 int can_negotiate;
6676
6677 work_ccb = xpt_alloc_ccb();
6678 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6679 CAM_TARGET_WILDCARD,
6680 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6681 printf("xptconfigfunc: xpt_create_path failed with "
6682 "status %#x for bus %d\n", status, bus->path_id);
6683 printf("xptconfigfunc: halting bus configuration\n");
6684 xpt_free_ccb(work_ccb);
6685 busses_to_config--;
6686 xpt_finishconfig(xpt_periph, NULL);
6687 return(0);
6688 }
6689 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6690 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6691 xpt_action(work_ccb);
6692 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6693 printf("xptconfigfunc: CPI failed on bus %d "
6694 "with status %d\n", bus->path_id,
6695 work_ccb->ccb_h.status);
6696 xpt_finishconfig(xpt_periph, work_ccb);
6697 return(1);
6698 }
6699
6700 can_negotiate = work_ccb->cpi.hba_inquiry;
6701 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6702 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6703 && (can_negotiate != 0)) {
6704 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6705 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6706 work_ccb->ccb_h.cbfcnp = NULL;
6707 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6708 ("Resetting Bus\n"));
6709 xpt_action(work_ccb);
6710 xpt_finishconfig(xpt_periph, work_ccb);
6711 } else {
6712 /* Act as though we performed a successful BUS RESET */
6713 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6714 xpt_finishconfig(xpt_periph, work_ccb);
6715 }
6716 }
6717
6718 return(1);
6719}
6720
6721static void
6722xpt_config(void *arg)
6723{
6724 /*
6725 * Now that interrupts are enabled, go find our devices
6726 */
6727
6728#ifdef CAMDEBUG
6729 /* Setup debugging flags and path */
6730#ifdef CAM_DEBUG_FLAGS
6731 cam_dflags = CAM_DEBUG_FLAGS;
6732#else /* !CAM_DEBUG_FLAGS */
6733 cam_dflags = CAM_DEBUG_NONE;
6734#endif /* CAM_DEBUG_FLAGS */
6735#ifdef CAM_DEBUG_BUS
6736 if (cam_dflags != CAM_DEBUG_NONE) {
6737 if (xpt_create_path(&cam_dpath, xpt_periph,
6738 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6739 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6740 printf("xpt_config: xpt_create_path() failed for debug"
6741 " target %d:%d:%d, debugging disabled\n",
6742 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6743 cam_dflags = CAM_DEBUG_NONE;
6744 }
6745 } else
6746 cam_dpath = NULL;
6747#else /* !CAM_DEBUG_BUS */
6748 cam_dpath = NULL;
6749#endif /* CAM_DEBUG_BUS */
6750#endif /* CAMDEBUG */
6751
6752 /*
6753 * Scan all installed busses.
6754 */
6755 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6756
6757 if (busses_to_config == 0) {
6758 /* Call manually because we don't have any busses */
6759 xpt_finishconfig(xpt_periph, NULL);
6760 } else {
6761 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6762 printf("Waiting %d seconds for SCSI "
6763 "devices to settle\n", scsi_delay/1000);
6764 }
6765 xpt_for_all_busses(xptconfigfunc, NULL);
6766 }
6767}
6768
6769/*
6770 * If the given device only has one peripheral attached to it, and if that
6771 * peripheral is the passthrough driver, announce it. This insures that the
6772 * user sees some sort of announcement for every peripheral in their system.
6773 */
6774static int
6775xptpassannouncefunc(struct cam_ed *device, void *arg)
6776{
6777 struct cam_periph *periph;
6778 int i;
6779
6780 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6781 periph = SLIST_NEXT(periph, periph_links), i++);
6782
6783 periph = SLIST_FIRST(&device->periphs);
6784 if ((i == 1)
6785 && (strncmp(periph->periph_name, "pass", 4) == 0))
6786 xpt_announce_periph(periph, NULL);
6787
6788 return(1);
6789}
6790
6791static void
6792xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6793{
6794 struct periph_driver **p_drv;
6795 int i;
6796
6797 if (done_ccb != NULL) {
6798 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6799 ("xpt_finishconfig\n"));
6800 switch(done_ccb->ccb_h.func_code) {
6801 case XPT_RESET_BUS:
6802 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6803 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6804 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6805 xpt_action(done_ccb);
6806 return;
6807 }
6808 /* FALLTHROUGH */
6809 case XPT_SCAN_BUS:
6810 default:
6811 xpt_free_path(done_ccb->ccb_h.path);
6812 busses_to_config--;
6813 break;
6814 }
6815 }
6816
6817 if (busses_to_config == 0) {
6818 /* Register all the peripheral drivers */
6819 /* XXX This will have to change when we have loadable modules */
6820 p_drv = periph_drivers;
6821 for (i = 0; p_drv[i] != NULL; i++) {
6822 (*p_drv[i]->init)();
6823 }
6824
6825 /*
6826 * Check for devices with no "standard" peripheral driver
6827 * attached. For any devices like that, announce the
6828 * passthrough driver so the user will see something.
6829 */
6830 xpt_for_all_devices(xptpassannouncefunc, NULL);
6831
6832 /* Release our hook so that the boot can continue. */
6833 config_intrhook_disestablish(xpt_config_hook);
6834 free(xpt_config_hook, M_TEMP);
6835 xpt_config_hook = NULL;
6836 }
6837 if (done_ccb != NULL)
6838 xpt_free_ccb(done_ccb);
6839}
6840
6841static void
6842xptaction(struct cam_sim *sim, union ccb *work_ccb)
6843{
6844 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6845
6846 switch (work_ccb->ccb_h.func_code) {
6847 /* Common cases first */
6848 case XPT_PATH_INQ: /* Path routing inquiry */
6849 {
6850 struct ccb_pathinq *cpi;
6851
6852 cpi = &work_ccb->cpi;
6853 cpi->version_num = 1; /* XXX??? */
6854 cpi->hba_inquiry = 0;
6855 cpi->target_sprt = 0;
6856 cpi->hba_misc = 0;
6857 cpi->hba_eng_cnt = 0;
6858 cpi->max_target = 0;
6859 cpi->max_lun = 0;
6860 cpi->initiator_id = 0;
6861 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6862 strncpy(cpi->hba_vid, "", HBA_IDLEN);
6863 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6864 cpi->unit_number = sim->unit_number;
6865 cpi->bus_id = sim->bus_id;
6866 cpi->base_transfer_speed = 0;
6867#ifdef CAM_NEW_TRAN_CODE
6868 cpi->protocol = PROTO_UNSPECIFIED;
6869 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
6870 cpi->transport = XPORT_UNSPECIFIED;
6871 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
6872#endif /* CAM_NEW_TRAN_CODE */
6873 cpi->ccb_h.status = CAM_REQ_CMP;
6874 xpt_done(work_ccb);
6875 break;
6876 }
6877 default:
6878 work_ccb->ccb_h.status = CAM_REQ_INVALID;
6879 xpt_done(work_ccb);
6880 break;
6881 }
6882}
6883
6884/*
6885 * The xpt as a "controller" has no interrupt sources, so polling
6886 * is a no-op.
6887 */
6888static void
6889xptpoll(struct cam_sim *sim)
6890{
6891}
6892
6893static void
6894camisr(void *V_queue)
6895{
6896 cam_isrq_t *queue = V_queue;
6897 int s;
6898 struct ccb_hdr *ccb_h;
6899
6900 s = splcam();
6901 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6902 int runq;
6903
6904 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6905 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6906 splx(s);
6907
6908 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6909 ("camisr\n"));
6910
6911 runq = FALSE;
6912
6913 if (ccb_h->flags & CAM_HIGH_POWER) {
6914 struct highpowerlist *hphead;
6915 union ccb *send_ccb;
6916
6917 hphead = &highpowerq;
6918
6919 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6920
6921 /*
6922 * Increment the count since this command is done.
6923 */
6924 num_highpower++;
6925
6926 /*
6927 * Any high powered commands queued up?
6928 */
6929 if (send_ccb != NULL) {
6930
6931 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6932
6933 xpt_release_devq(send_ccb->ccb_h.path,
6934 /*count*/1, /*runqueue*/TRUE);
6935 }
6936 }
6937 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6938 struct cam_ed *dev;
6939
6940 dev = ccb_h->path->device;
6941
6942 s = splcam();
6943 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6944
6945 ccb_h->path->bus->sim->devq->send_active--;
6946 ccb_h->path->bus->sim->devq->send_openings++;
6947 splx(s);
6948
6949 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6950 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
6951 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6952 && (dev->ccbq.dev_active == 0))) {
6953
6954 xpt_release_devq(ccb_h->path, /*count*/1,
6955 /*run_queue*/TRUE);
6956 }
6957
6958 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6959 && (--dev->tag_delay_count == 0))
6960 xpt_start_tags(ccb_h->path);
6961
6962 if ((dev->ccbq.queue.entries > 0)
6963 && (dev->qfrozen_cnt == 0)
6964 && (device_is_send_queued(dev) == 0)) {
6965 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6966 dev);
6967 }
6968 }
6969
6970 if (ccb_h->status & CAM_RELEASE_SIMQ) {
6971 xpt_release_simq(ccb_h->path->bus->sim,
6972 /*run_queue*/TRUE);
6973 ccb_h->status &= ~CAM_RELEASE_SIMQ;
6974 runq = FALSE;
6975 }
6976
6977 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6978 && (ccb_h->status & CAM_DEV_QFRZN)) {
6979 xpt_release_devq(ccb_h->path, /*count*/1,
6980 /*run_queue*/TRUE);
6981 ccb_h->status &= ~CAM_DEV_QFRZN;
6982 } else if (runq) {
6983 xpt_run_dev_sendq(ccb_h->path->bus);
6984 }
6985
6986 /* Call the peripheral driver's callback */
6987 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
6988
6989 /* Raise IPL for while test */
6990 s = splcam();
6991 }
6992 splx(s);
6993}
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/systm.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/time.h>
40#include <sys/conf.h>
41#include <sys/fcntl.h>
42#include <sys/md5.h>
43#include <sys/interrupt.h>
44#include <sys/sbuf.h>
45
46#ifdef PC98
47#include <pc98/pc98/pc98_machdep.h> /* geometry translation */
48#endif
49
50#include <cam/cam.h>
51#include <cam/cam_ccb.h>
52#include <cam/cam_periph.h>
53#include <cam/cam_sim.h>
54#include <cam/cam_xpt.h>
55#include <cam/cam_xpt_sim.h>
56#include <cam/cam_xpt_periph.h>
57#include <cam/cam_debug.h>
58
59#include <cam/scsi/scsi_all.h>
60#include <cam/scsi/scsi_message.h>
61#include <cam/scsi/scsi_pass.h>
62#include "opt_cam.h"
63
64/* Datastructures internal to the xpt layer */
65
66/*
67 * Definition of an async handler callback block. These are used to add
68 * SIMs and peripherals to the async callback lists.
69 */
70struct async_node {
71 SLIST_ENTRY(async_node) links;
72 u_int32_t event_enable; /* Async Event enables */
73 void (*callback)(void *arg, u_int32_t code,
74 struct cam_path *path, void *args);
75 void *callback_arg;
76};
77
78SLIST_HEAD(async_list, async_node);
79SLIST_HEAD(periph_list, cam_periph);
80static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
81
82/*
83 * This is the maximum number of high powered commands (e.g. start unit)
84 * that can be outstanding at a particular time.
85 */
86#ifndef CAM_MAX_HIGHPOWER
87#define CAM_MAX_HIGHPOWER 4
88#endif
89
90/* number of high powered commands that can go through right now */
91static int num_highpower = CAM_MAX_HIGHPOWER;
92
93/*
94 * Structure for queueing a device in a run queue.
95 * There is one run queue for allocating new ccbs,
96 * and another for sending ccbs to the controller.
97 */
98struct cam_ed_qinfo {
99 cam_pinfo pinfo;
100 struct cam_ed *device;
101};
102
103/*
104 * The CAM EDT (Existing Device Table) contains the device information for
105 * all devices for all busses in the system. The table contains a
106 * cam_ed structure for each device on the bus.
107 */
108struct cam_ed {
109 TAILQ_ENTRY(cam_ed) links;
110 struct cam_ed_qinfo alloc_ccb_entry;
111 struct cam_ed_qinfo send_ccb_entry;
112 struct cam_et *target;
113 lun_id_t lun_id;
114 struct camq drvq; /*
115 * Queue of type drivers wanting to do
116 * work on this device.
117 */
118 struct cam_ccbq ccbq; /* Queue of pending ccbs */
119 struct async_list asyncs; /* Async callback info for this B/T/L */
120 struct periph_list periphs; /* All attached devices */
121 u_int generation; /* Generation number */
122 struct cam_periph *owner; /* Peripheral driver's ownership tag */
123 struct xpt_quirk_entry *quirk; /* Oddities about this device */
124 /* Storage for the inquiry data */
125#ifdef CAM_NEW_TRAN_CODE
126 cam_proto protocol;
127 u_int protocol_version;
128 cam_xport transport;
129 u_int transport_version;
130#endif /* CAM_NEW_TRAN_CODE */
131 struct scsi_inquiry_data inq_data;
132 u_int8_t inq_flags; /*
133 * Current settings for inquiry flags.
134 * This allows us to override settings
135 * like disconnection and tagged
136 * queuing for a device.
137 */
138 u_int8_t queue_flags; /* Queue flags from the control page */
139 u_int8_t serial_num_len;
140 u_int8_t *serial_num;
141 u_int32_t qfrozen_cnt;
142 u_int32_t flags;
143#define CAM_DEV_UNCONFIGURED 0x01
144#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
145#define CAM_DEV_REL_ON_COMPLETE 0x04
146#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
147#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
148#define CAM_DEV_TAG_AFTER_COUNT 0x20
149#define CAM_DEV_INQUIRY_DATA_VALID 0x40
150 u_int32_t tag_delay_count;
151#define CAM_TAG_DELAY_COUNT 5
152 u_int32_t refcount;
153 struct callout_handle c_handle;
154};
155
156/*
157 * Each target is represented by an ET (Existing Target). These
158 * entries are created when a target is successfully probed with an
159 * identify, and removed when a device fails to respond after a number
160 * of retries, or a bus rescan finds the device missing.
161 */
162struct cam_et {
163 TAILQ_HEAD(, cam_ed) ed_entries;
164 TAILQ_ENTRY(cam_et) links;
165 struct cam_eb *bus;
166 target_id_t target_id;
167 u_int32_t refcount;
168 u_int generation;
169 struct timeval last_reset;
170};
171
172/*
173 * Each bus is represented by an EB (Existing Bus). These entries
174 * are created by calls to xpt_bus_register and deleted by calls to
175 * xpt_bus_deregister.
176 */
177struct cam_eb {
178 TAILQ_HEAD(, cam_et) et_entries;
179 TAILQ_ENTRY(cam_eb) links;
180 path_id_t path_id;
181 struct cam_sim *sim;
182 struct timeval last_reset;
183 u_int32_t flags;
184#define CAM_EB_RUNQ_SCHEDULED 0x01
185 u_int32_t refcount;
186 u_int generation;
187};
188
189struct cam_path {
190 struct cam_periph *periph;
191 struct cam_eb *bus;
192 struct cam_et *target;
193 struct cam_ed *device;
194};
195
196struct xpt_quirk_entry {
197 struct scsi_inquiry_pattern inq_pat;
198 u_int8_t quirks;
199#define CAM_QUIRK_NOLUNS 0x01
200#define CAM_QUIRK_NOSERIAL 0x02
201#define CAM_QUIRK_HILUNS 0x04
202 u_int mintags;
203 u_int maxtags;
204};
205#define CAM_SCSI2_MAXLUN 8
206
207typedef enum {
208 XPT_FLAG_OPEN = 0x01
209} xpt_flags;
210
211struct xpt_softc {
212 xpt_flags flags;
213 u_int32_t generation;
214};
215
216static const char quantum[] = "QUANTUM";
217static const char sony[] = "SONY";
218static const char west_digital[] = "WDIGTL";
219static const char samsung[] = "SAMSUNG";
220static const char seagate[] = "SEAGATE";
221static const char microp[] = "MICROP";
222
223static struct xpt_quirk_entry xpt_quirk_table[] =
224{
225 {
226 /* Reports QUEUE FULL for temporary resource shortages */
227 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
228 /*quirks*/0, /*mintags*/24, /*maxtags*/32
229 },
230 {
231 /* Reports QUEUE FULL for temporary resource shortages */
232 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
233 /*quirks*/0, /*mintags*/24, /*maxtags*/32
234 },
235 {
236 /* Reports QUEUE FULL for temporary resource shortages */
237 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
238 /*quirks*/0, /*mintags*/24, /*maxtags*/32
239 },
240 {
241 /* Broken tagged queuing drive */
242 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
243 /*quirks*/0, /*mintags*/0, /*maxtags*/0
244 },
245 {
246 /* Broken tagged queuing drive */
247 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
248 /*quirks*/0, /*mintags*/0, /*maxtags*/0
249 },
250 {
251 /* Broken tagged queuing drive */
252 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
253 /*quirks*/0, /*mintags*/0, /*maxtags*/0
254 },
255 {
256 /*
257 * Unfortunately, the Quantum Atlas III has the same
258 * problem as the Atlas II drives above.
259 * Reported by: "Johan Granlund" <johan@granlund.nu>
260 *
261 * For future reference, the drive with the problem was:
262 * QUANTUM QM39100TD-SW N1B0
263 *
264 * It's possible that Quantum will fix the problem in later
265 * firmware revisions. If that happens, the quirk entry
266 * will need to be made specific to the firmware revisions
267 * with the problem.
268 *
269 */
270 /* Reports QUEUE FULL for temporary resource shortages */
271 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
272 /*quirks*/0, /*mintags*/24, /*maxtags*/32
273 },
274 {
275 /*
276 * 18 Gig Atlas III, same problem as the 9G version.
277 * Reported by: Andre Albsmeier
278 * <andre.albsmeier@mchp.siemens.de>
279 *
280 * For future reference, the drive with the problem was:
281 * QUANTUM QM318000TD-S N491
282 */
283 /* Reports QUEUE FULL for temporary resource shortages */
284 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
285 /*quirks*/0, /*mintags*/24, /*maxtags*/32
286 },
287 {
288 /*
289 * Broken tagged queuing drive
290 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
291 * and: Martin Renters <martin@tdc.on.ca>
292 */
293 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
294 /*quirks*/0, /*mintags*/0, /*maxtags*/0
295 },
296 /*
297 * The Seagate Medalist Pro drives have very poor write
298 * performance with anything more than 2 tags.
299 *
300 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
301 * Drive: <SEAGATE ST36530N 1444>
302 *
303 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
304 * Drive: <SEAGATE ST34520W 1281>
305 *
306 * No one has actually reported that the 9G version
307 * (ST39140*) of the Medalist Pro has the same problem, but
308 * we're assuming that it does because the 4G and 6.5G
309 * versions of the drive are broken.
310 */
311 {
312 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
313 /*quirks*/0, /*mintags*/2, /*maxtags*/2
314 },
315 {
316 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
317 /*quirks*/0, /*mintags*/2, /*maxtags*/2
318 },
319 {
320 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
321 /*quirks*/0, /*mintags*/2, /*maxtags*/2
322 },
323 {
324 /*
325 * Slow when tagged queueing is enabled. Write performance
326 * steadily drops off with more and more concurrent
327 * transactions. Best sequential write performance with
328 * tagged queueing turned off and write caching turned on.
329 *
330 * PR: kern/10398
331 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
332 * Drive: DCAS-34330 w/ "S65A" firmware.
333 *
334 * The drive with the problem had the "S65A" firmware
335 * revision, and has also been reported (by Stephen J.
336 * Roznowski <sjr@home.net>) for a drive with the "S61A"
337 * firmware revision.
338 *
339 * Although no one has reported problems with the 2 gig
340 * version of the DCAS drive, the assumption is that it
341 * has the same problems as the 4 gig version. Therefore
342 * this quirk entries disables tagged queueing for all
343 * DCAS drives.
344 */
345 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
346 /*quirks*/0, /*mintags*/0, /*maxtags*/0
347 },
348 {
349 /* Broken tagged queuing drive */
350 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
351 /*quirks*/0, /*mintags*/0, /*maxtags*/0
352 },
353 {
354 /* Broken tagged queuing drive */
355 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
356 /*quirks*/0, /*mintags*/0, /*maxtags*/0
357 },
358 {
359 /*
360 * Broken tagged queuing drive.
361 * Submitted by:
362 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
363 * in PR kern/9535
364 */
365 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
366 /*quirks*/0, /*mintags*/0, /*maxtags*/0
367 },
368 {
369 /*
370 * Slow when tagged queueing is enabled. (1.5MB/sec versus
371 * 8MB/sec.)
372 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
373 * Best performance with these drives is achieved with
374 * tagged queueing turned off, and write caching turned on.
375 */
376 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
377 /*quirks*/0, /*mintags*/0, /*maxtags*/0
378 },
379 {
380 /*
381 * Slow when tagged queueing is enabled. (1.5MB/sec versus
382 * 8MB/sec.)
383 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
384 * Best performance with these drives is achieved with
385 * tagged queueing turned off, and write caching turned on.
386 */
387 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
388 /*quirks*/0, /*mintags*/0, /*maxtags*/0
389 },
390 {
391 /*
392 * Doesn't handle queue full condition correctly,
393 * so we need to limit maxtags to what the device
394 * can handle instead of determining this automatically.
395 */
396 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
397 /*quirks*/0, /*mintags*/2, /*maxtags*/32
398 },
399 {
400 /* Really only one LUN */
401 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
402 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
403 },
404 {
405 /* I can't believe we need a quirk for DPT volumes. */
406 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
407 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
408 /*mintags*/0, /*maxtags*/255
409 },
410 {
411 /*
412 * Many Sony CDROM drives don't like multi-LUN probing.
413 */
414 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
415 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
416 },
417 {
418 /*
419 * This drive doesn't like multiple LUN probing.
420 * Submitted by: Parag Patel <parag@cgt.com>
421 */
422 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
423 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
424 },
425 {
426 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
427 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
428 },
429 {
430 /*
431 * The 8200 doesn't like multi-lun probing, and probably
432 * don't like serial number requests either.
433 */
434 {
435 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
436 "EXB-8200*", "*"
437 },
438 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
439 },
440 {
441 /*
442 * Let's try the same as above, but for a drive that says
443 * it's an IPL-6860 but is actually an EXB 8200.
444 */
445 {
446 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
447 "IPL-6860*", "*"
448 },
449 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
450 },
451 {
452 /*
453 * These Hitachi drives don't like multi-lun probing.
454 * The PR submitter has a DK319H, but says that the Linux
455 * kernel has a similar work-around for the DK312 and DK314,
456 * so all DK31* drives are quirked here.
457 * PR: misc/18793
458 * Submitted by: Paul Haddad <paul@pth.com>
459 */
460 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
461 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
462 },
463 {
464 /*
465 * The Hitachi CJ series with J8A8 firmware apparantly has
466 * problems with tagged commands.
467 * PR: 23536
468 * Reported by: amagai@nue.org
469 */
470 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
471 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
472 },
473 {
474 /*
475 * These are the large storage arrays.
476 * Submitted by: William Carrel <william.carrel@infospace.com>
477 */
478 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
479 CAM_QUIRK_HILUNS, 2, 1024
480 },
481 {
482 /*
483 * This old revision of the TDC3600 is also SCSI-1, and
484 * hangs upon serial number probing.
485 */
486 {
487 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
488 " TDC 3600", "U07:"
489 },
490 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
491 },
492 {
493 /*
494 * Maxtor Personal Storage 3000XT (Firewire)
495 * hangs upon serial number probing.
496 */
497 {
498 T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
499 "1394 storage", "*"
500 },
501 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
502 },
503 {
504 /*
505 * Would repond to all LUNs if asked for.
506 */
507 {
508 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
509 "CP150", "*"
510 },
511 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
512 },
513 {
514 /*
515 * Would repond to all LUNs if asked for.
516 */
517 {
518 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
519 "96X2*", "*"
520 },
521 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
522 },
523 {
524 /* Submitted by: Matthew Dodd <winter@jurai.net> */
525 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
526 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
527 },
528 {
529 /* Submitted by: Matthew Dodd <winter@jurai.net> */
530 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
531 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
532 },
533 {
534 /* TeraSolutions special settings for TRC-22 RAID */
535 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
536 /*quirks*/0, /*mintags*/55, /*maxtags*/255
537 },
538 {
539 /* Veritas Storage Appliance */
540 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
541 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
542 },
543 {
544 /*
545 * Would respond to all LUNs. Device type and removable
546 * flag are jumper-selectable.
547 */
548 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
549 "Tahiti 1", "*"
550 },
551 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
552 },
553 {
554 /* Default tagged queuing parameters for all devices */
555 {
556 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
557 /*vendor*/"*", /*product*/"*", /*revision*/"*"
558 },
559 /*quirks*/0, /*mintags*/2, /*maxtags*/255
560 },
561};
562
563static const int xpt_quirk_table_size =
564 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
565
566typedef enum {
567 DM_RET_COPY = 0x01,
568 DM_RET_FLAG_MASK = 0x0f,
569 DM_RET_NONE = 0x00,
570 DM_RET_STOP = 0x10,
571 DM_RET_DESCEND = 0x20,
572 DM_RET_ERROR = 0x30,
573 DM_RET_ACTION_MASK = 0xf0
574} dev_match_ret;
575
576typedef enum {
577 XPT_DEPTH_BUS,
578 XPT_DEPTH_TARGET,
579 XPT_DEPTH_DEVICE,
580 XPT_DEPTH_PERIPH
581} xpt_traverse_depth;
582
583struct xpt_traverse_config {
584 xpt_traverse_depth depth;
585 void *tr_func;
586 void *tr_arg;
587};
588
589typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
590typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
591typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
592typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
593typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
594
595/* Transport layer configuration information */
596static struct xpt_softc xsoftc;
597
598/* Queues for our software interrupt handler */
599typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
600static cam_isrq_t cam_bioq;
601static cam_isrq_t cam_netq;
602
603/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
604static SLIST_HEAD(,ccb_hdr) ccb_freeq;
605static u_int xpt_max_ccbs; /*
606 * Maximum size of ccb pool. Modified as
607 * devices are added/removed or have their
608 * opening counts changed.
609 */
610static u_int xpt_ccb_count; /* Current count of allocated ccbs */
611
612struct cam_periph *xpt_periph;
613
614static periph_init_t xpt_periph_init;
615
616static periph_init_t probe_periph_init;
617
618static struct periph_driver xpt_driver =
619{
620 xpt_periph_init, "xpt",
621 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
622};
623
624static struct periph_driver probe_driver =
625{
626 probe_periph_init, "probe",
627 TAILQ_HEAD_INITIALIZER(probe_driver.units)
628};
629
630PERIPHDRIVER_DECLARE(xpt, xpt_driver);
631PERIPHDRIVER_DECLARE(probe, probe_driver);
632
633#define XPT_CDEV_MAJOR 104
634
635static d_open_t xptopen;
636static d_close_t xptclose;
637static d_ioctl_t xptioctl;
638
639static struct cdevsw xpt_cdevsw = {
640 .d_open = xptopen,
641 .d_close = xptclose,
642 .d_ioctl = xptioctl,
643 .d_name = "xpt",
644 .d_maj = XPT_CDEV_MAJOR,
645};
646
647static struct intr_config_hook *xpt_config_hook;
648
649/* Registered busses */
650static TAILQ_HEAD(,cam_eb) xpt_busses;
651static u_int bus_generation;
652
653/* Storage for debugging datastructures */
654#ifdef CAMDEBUG
655struct cam_path *cam_dpath;
656u_int32_t cam_dflags;
657u_int32_t cam_debug_delay;
658#endif
659
660/* Pointers to software interrupt handlers */
661static void *camnet_ih;
662static void *cambio_ih;
663
664#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
665#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
666#endif
667
668/*
669 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
670 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
671 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
672 */
673#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
674 || defined(CAM_DEBUG_LUN)
675#ifdef CAMDEBUG
676#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
677 || !defined(CAM_DEBUG_LUN)
678#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
679 and CAM_DEBUG_LUN"
680#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
681#else /* !CAMDEBUG */
682#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
683#endif /* CAMDEBUG */
684#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
685
686/* Our boot-time initialization hook */
687static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
688
689static moduledata_t cam_moduledata = {
690 "cam",
691 cam_module_event_handler,
692 NULL
693};
694
695static void xpt_init(void *);
696
697DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
698MODULE_VERSION(cam, 1);
699
700
701static cam_status xpt_compile_path(struct cam_path *new_path,
702 struct cam_periph *perph,
703 path_id_t path_id,
704 target_id_t target_id,
705 lun_id_t lun_id);
706
707static void xpt_release_path(struct cam_path *path);
708
709static void xpt_async_bcast(struct async_list *async_head,
710 u_int32_t async_code,
711 struct cam_path *path,
712 void *async_arg);
713static void xpt_dev_async(u_int32_t async_code,
714 struct cam_eb *bus,
715 struct cam_et *target,
716 struct cam_ed *device,
717 void *async_arg);
718static path_id_t xptnextfreepathid(void);
719static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
720static union ccb *xpt_get_ccb(struct cam_ed *device);
721static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
722 u_int32_t new_priority);
723static void xpt_run_dev_allocq(struct cam_eb *bus);
724static void xpt_run_dev_sendq(struct cam_eb *bus);
725static timeout_t xpt_release_devq_timeout;
726static timeout_t xpt_release_simq_timeout;
727static void xpt_release_bus(struct cam_eb *bus);
728static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
729 int run_queue);
730static struct cam_et*
731 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
732static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
733static struct cam_ed*
734 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
735 lun_id_t lun_id);
736static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
737 struct cam_ed *device);
738static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
739static struct cam_eb*
740 xpt_find_bus(path_id_t path_id);
741static struct cam_et*
742 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
743static struct cam_ed*
744 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
745static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
746static void xpt_scan_lun(struct cam_periph *periph,
747 struct cam_path *path, cam_flags flags,
748 union ccb *ccb);
749static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
750static xpt_busfunc_t xptconfigbuscountfunc;
751static xpt_busfunc_t xptconfigfunc;
752static void xpt_config(void *arg);
753static xpt_devicefunc_t xptpassannouncefunc;
754static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
755static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
756static void xptpoll(struct cam_sim *sim);
757static void camisr(void *);
758#if 0
759static void xptstart(struct cam_periph *periph, union ccb *work_ccb);
760static void xptasync(struct cam_periph *periph,
761 u_int32_t code, cam_path *path);
762#endif
763static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
764 u_int num_patterns, struct cam_eb *bus);
765static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
766 u_int num_patterns,
767 struct cam_ed *device);
768static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
769 u_int num_patterns,
770 struct cam_periph *periph);
771static xpt_busfunc_t xptedtbusfunc;
772static xpt_targetfunc_t xptedttargetfunc;
773static xpt_devicefunc_t xptedtdevicefunc;
774static xpt_periphfunc_t xptedtperiphfunc;
775static xpt_pdrvfunc_t xptplistpdrvfunc;
776static xpt_periphfunc_t xptplistperiphfunc;
777static int xptedtmatch(struct ccb_dev_match *cdm);
778static int xptperiphlistmatch(struct ccb_dev_match *cdm);
779static int xptbustraverse(struct cam_eb *start_bus,
780 xpt_busfunc_t *tr_func, void *arg);
781static int xpttargettraverse(struct cam_eb *bus,
782 struct cam_et *start_target,
783 xpt_targetfunc_t *tr_func, void *arg);
784static int xptdevicetraverse(struct cam_et *target,
785 struct cam_ed *start_device,
786 xpt_devicefunc_t *tr_func, void *arg);
787static int xptperiphtraverse(struct cam_ed *device,
788 struct cam_periph *start_periph,
789 xpt_periphfunc_t *tr_func, void *arg);
790static int xptpdrvtraverse(struct periph_driver **start_pdrv,
791 xpt_pdrvfunc_t *tr_func, void *arg);
792static int xptpdperiphtraverse(struct periph_driver **pdrv,
793 struct cam_periph *start_periph,
794 xpt_periphfunc_t *tr_func,
795 void *arg);
796static xpt_busfunc_t xptdefbusfunc;
797static xpt_targetfunc_t xptdeftargetfunc;
798static xpt_devicefunc_t xptdefdevicefunc;
799static xpt_periphfunc_t xptdefperiphfunc;
800static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
801#ifdef notusedyet
802static int xpt_for_all_targets(xpt_targetfunc_t *tr_func,
803 void *arg);
804#endif
805static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
806 void *arg);
807#ifdef notusedyet
808static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
809 void *arg);
810#endif
811static xpt_devicefunc_t xptsetasyncfunc;
812static xpt_busfunc_t xptsetasyncbusfunc;
813static cam_status xptregister(struct cam_periph *periph,
814 void *arg);
815static cam_status proberegister(struct cam_periph *periph,
816 void *arg);
817static void probeschedule(struct cam_periph *probe_periph);
818static void probestart(struct cam_periph *periph, union ccb *start_ccb);
819static void proberequestdefaultnegotiation(struct cam_periph *periph);
820static void probedone(struct cam_periph *periph, union ccb *done_ccb);
821static void probecleanup(struct cam_periph *periph);
822static void xpt_find_quirk(struct cam_ed *device);
823#ifdef CAM_NEW_TRAN_CODE
824static void xpt_devise_transport(struct cam_path *path);
825#endif /* CAM_NEW_TRAN_CODE */
826static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
827 struct cam_ed *device,
828 int async_update);
829static void xpt_toggle_tags(struct cam_path *path);
830static void xpt_start_tags(struct cam_path *path);
831static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
832 struct cam_ed *dev);
833static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
834 struct cam_ed *dev);
835static __inline int periph_is_queued(struct cam_periph *periph);
836static __inline int device_is_alloc_queued(struct cam_ed *device);
837static __inline int device_is_send_queued(struct cam_ed *device);
838static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
839
840static __inline int
841xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
842{
843 int retval;
844
845 if (dev->ccbq.devq_openings > 0) {
846 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
847 cam_ccbq_resize(&dev->ccbq,
848 dev->ccbq.dev_openings
849 + dev->ccbq.dev_active);
850 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
851 }
852 /*
853 * The priority of a device waiting for CCB resources
854 * is that of the the highest priority peripheral driver
855 * enqueued.
856 */
857 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
858 &dev->alloc_ccb_entry.pinfo,
859 CAMQ_GET_HEAD(&dev->drvq)->priority);
860 } else {
861 retval = 0;
862 }
863
864 return (retval);
865}
866
867static __inline int
868xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
869{
870 int retval;
871
872 if (dev->ccbq.dev_openings > 0) {
873 /*
874 * The priority of a device waiting for controller
875 * resources is that of the the highest priority CCB
876 * enqueued.
877 */
878 retval =
879 xpt_schedule_dev(&bus->sim->devq->send_queue,
880 &dev->send_ccb_entry.pinfo,
881 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
882 } else {
883 retval = 0;
884 }
885 return (retval);
886}
887
888static __inline int
889periph_is_queued(struct cam_periph *periph)
890{
891 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
892}
893
894static __inline int
895device_is_alloc_queued(struct cam_ed *device)
896{
897 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
898}
899
900static __inline int
901device_is_send_queued(struct cam_ed *device)
902{
903 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
904}
905
906static __inline int
907dev_allocq_is_runnable(struct cam_devq *devq)
908{
909 /*
910 * Have work to do.
911 * Have space to do more work.
912 * Allowed to do work.
913 */
914 return ((devq->alloc_queue.qfrozen_cnt == 0)
915 && (devq->alloc_queue.entries > 0)
916 && (devq->alloc_openings > 0));
917}
918
919static void
920xpt_periph_init()
921{
922 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
923}
924
925static void
926probe_periph_init()
927{
928}
929
930
931static void
932xptdone(struct cam_periph *periph, union ccb *done_ccb)
933{
934 /* Caller will release the CCB */
935 wakeup(&done_ccb->ccb_h.cbfcnp);
936}
937
938static int
939xptopen(dev_t dev, int flags, int fmt, struct thread *td)
940{
941 int unit;
942
943 unit = minor(dev) & 0xff;
944
945 /*
946 * Only allow read-write access.
947 */
948 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
949 return(EPERM);
950
951 /*
952 * We don't allow nonblocking access.
953 */
954 if ((flags & O_NONBLOCK) != 0) {
955 printf("xpt%d: can't do nonblocking access\n", unit);
956 return(ENODEV);
957 }
958
959 /*
960 * We only have one transport layer right now. If someone accesses
961 * us via something other than minor number 1, point out their
962 * mistake.
963 */
964 if (unit != 0) {
965 printf("xptopen: got invalid xpt unit %d\n", unit);
966 return(ENXIO);
967 }
968
969 /* Mark ourselves open */
970 xsoftc.flags |= XPT_FLAG_OPEN;
971
972 return(0);
973}
974
975static int
976xptclose(dev_t dev, int flag, int fmt, struct thread *td)
977{
978 int unit;
979
980 unit = minor(dev) & 0xff;
981
982 /*
983 * We only have one transport layer right now. If someone accesses
984 * us via something other than minor number 1, point out their
985 * mistake.
986 */
987 if (unit != 0) {
988 printf("xptclose: got invalid xpt unit %d\n", unit);
989 return(ENXIO);
990 }
991
992 /* Mark ourselves closed */
993 xsoftc.flags &= ~XPT_FLAG_OPEN;
994
995 return(0);
996}
997
998static int
999xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1000{
1001 int unit, error;
1002
1003 error = 0;
1004 unit = minor(dev) & 0xff;
1005
1006 /*
1007 * We only have one transport layer right now. If someone accesses
1008 * us via something other than minor number 1, point out their
1009 * mistake.
1010 */
1011 if (unit != 0) {
1012 printf("xptioctl: got invalid xpt unit %d\n", unit);
1013 return(ENXIO);
1014 }
1015
1016 switch(cmd) {
1017 /*
1018 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1019 * to accept CCB types that don't quite make sense to send through a
1020 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1021 * in the CAM spec.
1022 */
1023 case CAMIOCOMMAND: {
1024 union ccb *ccb;
1025 union ccb *inccb;
1026
1027 inccb = (union ccb *)addr;
1028
1029 switch(inccb->ccb_h.func_code) {
1030 case XPT_SCAN_BUS:
1031 case XPT_RESET_BUS:
1032 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1033 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1034 error = EINVAL;
1035 break;
1036 }
1037 /* FALLTHROUGH */
1038 case XPT_PATH_INQ:
1039 case XPT_ENG_INQ:
1040 case XPT_SCAN_LUN:
1041
1042 ccb = xpt_alloc_ccb();
1043
1044 /*
1045 * Create a path using the bus, target, and lun the
1046 * user passed in.
1047 */
1048 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1049 inccb->ccb_h.path_id,
1050 inccb->ccb_h.target_id,
1051 inccb->ccb_h.target_lun) !=
1052 CAM_REQ_CMP){
1053 error = EINVAL;
1054 xpt_free_ccb(ccb);
1055 break;
1056 }
1057 /* Ensure all of our fields are correct */
1058 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1059 inccb->ccb_h.pinfo.priority);
1060 xpt_merge_ccb(ccb, inccb);
1061 ccb->ccb_h.cbfcnp = xptdone;
1062 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1063 bcopy(ccb, inccb, sizeof(union ccb));
1064 xpt_free_path(ccb->ccb_h.path);
1065 xpt_free_ccb(ccb);
1066 break;
1067
1068 case XPT_DEBUG: {
1069 union ccb ccb;
1070
1071 /*
1072 * This is an immediate CCB, so it's okay to
1073 * allocate it on the stack.
1074 */
1075
1076 /*
1077 * Create a path using the bus, target, and lun the
1078 * user passed in.
1079 */
1080 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1081 inccb->ccb_h.path_id,
1082 inccb->ccb_h.target_id,
1083 inccb->ccb_h.target_lun) !=
1084 CAM_REQ_CMP){
1085 error = EINVAL;
1086 break;
1087 }
1088 /* Ensure all of our fields are correct */
1089 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1090 inccb->ccb_h.pinfo.priority);
1091 xpt_merge_ccb(&ccb, inccb);
1092 ccb.ccb_h.cbfcnp = xptdone;
1093 xpt_action(&ccb);
1094 bcopy(&ccb, inccb, sizeof(union ccb));
1095 xpt_free_path(ccb.ccb_h.path);
1096 break;
1097
1098 }
1099 case XPT_DEV_MATCH: {
1100 struct cam_periph_map_info mapinfo;
1101 struct cam_path *old_path;
1102
1103 /*
1104 * We can't deal with physical addresses for this
1105 * type of transaction.
1106 */
1107 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1108 error = EINVAL;
1109 break;
1110 }
1111
1112 /*
1113 * Save this in case the caller had it set to
1114 * something in particular.
1115 */
1116 old_path = inccb->ccb_h.path;
1117
1118 /*
1119 * We really don't need a path for the matching
1120 * code. The path is needed because of the
1121 * debugging statements in xpt_action(). They
1122 * assume that the CCB has a valid path.
1123 */
1124 inccb->ccb_h.path = xpt_periph->path;
1125
1126 bzero(&mapinfo, sizeof(mapinfo));
1127
1128 /*
1129 * Map the pattern and match buffers into kernel
1130 * virtual address space.
1131 */
1132 error = cam_periph_mapmem(inccb, &mapinfo);
1133
1134 if (error) {
1135 inccb->ccb_h.path = old_path;
1136 break;
1137 }
1138
1139 /*
1140 * This is an immediate CCB, we can send it on directly.
1141 */
1142 xpt_action(inccb);
1143
1144 /*
1145 * Map the buffers back into user space.
1146 */
1147 cam_periph_unmapmem(inccb, &mapinfo);
1148
1149 inccb->ccb_h.path = old_path;
1150
1151 error = 0;
1152 break;
1153 }
1154 default:
1155 error = ENOTSUP;
1156 break;
1157 }
1158 break;
1159 }
1160 /*
1161 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1162 * with the periphal driver name and unit name filled in. The other
1163 * fields don't really matter as input. The passthrough driver name
1164 * ("pass"), and unit number are passed back in the ccb. The current
1165 * device generation number, and the index into the device peripheral
1166 * driver list, and the status are also passed back. Note that
1167 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1168 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1169 * (or rather should be) impossible for the device peripheral driver
1170 * list to change since we look at the whole thing in one pass, and
1171 * we do it with splcam protection.
1172 *
1173 */
1174 case CAMGETPASSTHRU: {
1175 union ccb *ccb;
1176 struct cam_periph *periph;
1177 struct periph_driver **p_drv;
1178 char *name;
1179 u_int unit;
1180 u_int cur_generation;
1181 int base_periph_found;
1182 int splbreaknum;
1183 int s;
1184
1185 ccb = (union ccb *)addr;
1186 unit = ccb->cgdl.unit_number;
1187 name = ccb->cgdl.periph_name;
1188 /*
1189 * Every 100 devices, we want to drop our spl protection to
1190 * give the software interrupt handler a chance to run.
1191 * Most systems won't run into this check, but this should
1192 * avoid starvation in the software interrupt handler in
1193 * large systems.
1194 */
1195 splbreaknum = 100;
1196
1197 ccb = (union ccb *)addr;
1198
1199 base_periph_found = 0;
1200
1201 /*
1202 * Sanity check -- make sure we don't get a null peripheral
1203 * driver name.
1204 */
1205 if (*ccb->cgdl.periph_name == '\0') {
1206 error = EINVAL;
1207 break;
1208 }
1209
1210 /* Keep the list from changing while we traverse it */
1211 s = splcam();
1212ptstartover:
1213 cur_generation = xsoftc.generation;
1214
1215 /* first find our driver in the list of drivers */
1216 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1217 if (strcmp((*p_drv)->driver_name, name) == 0)
1218 break;
1219
1220 if (*p_drv == NULL) {
1221 splx(s);
1222 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1223 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1224 *ccb->cgdl.periph_name = '\0';
1225 ccb->cgdl.unit_number = 0;
1226 error = ENOENT;
1227 break;
1228 }
1229
1230 /*
1231 * Run through every peripheral instance of this driver
1232 * and check to see whether it matches the unit passed
1233 * in by the user. If it does, get out of the loops and
1234 * find the passthrough driver associated with that
1235 * peripheral driver.
1236 */
1237 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1238 periph = TAILQ_NEXT(periph, unit_links)) {
1239
1240 if (periph->unit_number == unit) {
1241 break;
1242 } else if (--splbreaknum == 0) {
1243 splx(s);
1244 s = splcam();
1245 splbreaknum = 100;
1246 if (cur_generation != xsoftc.generation)
1247 goto ptstartover;
1248 }
1249 }
1250 /*
1251 * If we found the peripheral driver that the user passed
1252 * in, go through all of the peripheral drivers for that
1253 * particular device and look for a passthrough driver.
1254 */
1255 if (periph != NULL) {
1256 struct cam_ed *device;
1257 int i;
1258
1259 base_periph_found = 1;
1260 device = periph->path->device;
1261 for (i = 0, periph = SLIST_FIRST(&device->periphs);
1262 periph != NULL;
1263 periph = SLIST_NEXT(periph, periph_links), i++) {
1264 /*
1265 * Check to see whether we have a
1266 * passthrough device or not.
1267 */
1268 if (strcmp(periph->periph_name, "pass") == 0) {
1269 /*
1270 * Fill in the getdevlist fields.
1271 */
1272 strcpy(ccb->cgdl.periph_name,
1273 periph->periph_name);
1274 ccb->cgdl.unit_number =
1275 periph->unit_number;
1276 if (SLIST_NEXT(periph, periph_links))
1277 ccb->cgdl.status =
1278 CAM_GDEVLIST_MORE_DEVS;
1279 else
1280 ccb->cgdl.status =
1281 CAM_GDEVLIST_LAST_DEVICE;
1282 ccb->cgdl.generation =
1283 device->generation;
1284 ccb->cgdl.index = i;
1285 /*
1286 * Fill in some CCB header fields
1287 * that the user may want.
1288 */
1289 ccb->ccb_h.path_id =
1290 periph->path->bus->path_id;
1291 ccb->ccb_h.target_id =
1292 periph->path->target->target_id;
1293 ccb->ccb_h.target_lun =
1294 periph->path->device->lun_id;
1295 ccb->ccb_h.status = CAM_REQ_CMP;
1296 break;
1297 }
1298 }
1299 }
1300
1301 /*
1302 * If the periph is null here, one of two things has
1303 * happened. The first possibility is that we couldn't
1304 * find the unit number of the particular peripheral driver
1305 * that the user is asking about. e.g. the user asks for
1306 * the passthrough driver for "da11". We find the list of
1307 * "da" peripherals all right, but there is no unit 11.
1308 * The other possibility is that we went through the list
1309 * of peripheral drivers attached to the device structure,
1310 * but didn't find one with the name "pass". Either way,
1311 * we return ENOENT, since we couldn't find something.
1312 */
1313 if (periph == NULL) {
1314 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1315 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1316 *ccb->cgdl.periph_name = '\0';
1317 ccb->cgdl.unit_number = 0;
1318 error = ENOENT;
1319 /*
1320 * It is unfortunate that this is even necessary,
1321 * but there are many, many clueless users out there.
1322 * If this is true, the user is looking for the
1323 * passthrough driver, but doesn't have one in his
1324 * kernel.
1325 */
1326 if (base_periph_found == 1) {
1327 printf("xptioctl: pass driver is not in the "
1328 "kernel\n");
1329 printf("xptioctl: put \"device pass0\" in "
1330 "your kernel config file\n");
1331 }
1332 }
1333 splx(s);
1334 break;
1335 }
1336 default:
1337 error = ENOTTY;
1338 break;
1339 }
1340
1341 return(error);
1342}
1343
1344static int
1345cam_module_event_handler(module_t mod, int what, void *arg)
1346{
1347 if (what == MOD_LOAD) {
1348 xpt_init(NULL);
1349 } else if (what == MOD_UNLOAD) {
1350 return EBUSY;
1351 }
1352
1353 return 0;
1354}
1355
1356/* Functions accessed by the peripheral drivers */
1357static void
1358xpt_init(dummy)
1359 void *dummy;
1360{
1361 struct cam_sim *xpt_sim;
1362 struct cam_path *path;
1363 struct cam_devq *devq;
1364 cam_status status;
1365
1366 TAILQ_INIT(&xpt_busses);
1367 TAILQ_INIT(&cam_bioq);
1368 TAILQ_INIT(&cam_netq);
1369 SLIST_INIT(&ccb_freeq);
1370 STAILQ_INIT(&highpowerq);
1371
1372 /*
1373 * The xpt layer is, itself, the equivelent of a SIM.
1374 * Allow 16 ccbs in the ccb pool for it. This should
1375 * give decent parallelism when we probe busses and
1376 * perform other XPT functions.
1377 */
1378 devq = cam_simq_alloc(16);
1379 xpt_sim = cam_sim_alloc(xptaction,
1380 xptpoll,
1381 "xpt",
1382 /*softc*/NULL,
1383 /*unit*/0,
1384 /*max_dev_transactions*/0,
1385 /*max_tagged_dev_transactions*/0,
1386 devq);
1387 xpt_max_ccbs = 16;
1388
1389 xpt_bus_register(xpt_sim, /*bus #*/0);
1390
1391 /*
1392 * Looking at the XPT from the SIM layer, the XPT is
1393 * the equivelent of a peripheral driver. Allocate
1394 * a peripheral driver entry for us.
1395 */
1396 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1397 CAM_TARGET_WILDCARD,
1398 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1399 printf("xpt_init: xpt_create_path failed with status %#x,"
1400 " failing attach\n", status);
1401 return;
1402 }
1403
1404 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1405 path, NULL, 0, NULL);
1406 xpt_free_path(path);
1407
1408 xpt_sim->softc = xpt_periph;
1409
1410 /*
1411 * Register a callback for when interrupts are enabled.
1412 */
1413 xpt_config_hook =
1414 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1415 M_TEMP, M_NOWAIT | M_ZERO);
1416 if (xpt_config_hook == NULL) {
1417 printf("xpt_init: Cannot malloc config hook "
1418 "- failing attach\n");
1419 return;
1420 }
1421
1422 xpt_config_hook->ich_func = xpt_config;
1423 if (config_intrhook_establish(xpt_config_hook) != 0) {
1424 free (xpt_config_hook, M_TEMP);
1425 printf("xpt_init: config_intrhook_establish failed "
1426 "- failing attach\n");
1427 }
1428
1429 /* Install our software interrupt handlers */
1430 swi_add(NULL, "camnet", camisr, &cam_netq, SWI_CAMNET, 0, &camnet_ih);
1431 swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
1432}
1433
1434static cam_status
1435xptregister(struct cam_periph *periph, void *arg)
1436{
1437 if (periph == NULL) {
1438 printf("xptregister: periph was NULL!!\n");
1439 return(CAM_REQ_CMP_ERR);
1440 }
1441
1442 periph->softc = NULL;
1443
1444 xpt_periph = periph;
1445
1446 return(CAM_REQ_CMP);
1447}
1448
1449int32_t
1450xpt_add_periph(struct cam_periph *periph)
1451{
1452 struct cam_ed *device;
1453 int32_t status;
1454 struct periph_list *periph_head;
1455
1456 device = periph->path->device;
1457
1458 periph_head = &device->periphs;
1459
1460 status = CAM_REQ_CMP;
1461
1462 if (device != NULL) {
1463 int s;
1464
1465 /*
1466 * Make room for this peripheral
1467 * so it will fit in the queue
1468 * when it's scheduled to run
1469 */
1470 s = splsoftcam();
1471 status = camq_resize(&device->drvq,
1472 device->drvq.array_size + 1);
1473
1474 device->generation++;
1475
1476 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1477
1478 splx(s);
1479 }
1480
1481 xsoftc.generation++;
1482
1483 return (status);
1484}
1485
1486void
1487xpt_remove_periph(struct cam_periph *periph)
1488{
1489 struct cam_ed *device;
1490
1491 device = periph->path->device;
1492
1493 if (device != NULL) {
1494 int s;
1495 struct periph_list *periph_head;
1496
1497 periph_head = &device->periphs;
1498
1499 /* Release the slot for this peripheral */
1500 s = splsoftcam();
1501 camq_resize(&device->drvq, device->drvq.array_size - 1);
1502
1503 device->generation++;
1504
1505 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1506
1507 splx(s);
1508 }
1509
1510 xsoftc.generation++;
1511
1512}
1513
1514#ifdef CAM_NEW_TRAN_CODE
1515
1516void
1517xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1518{
1519 struct ccb_pathinq cpi;
1520 struct ccb_trans_settings cts;
1521 struct cam_path *path;
1522 u_int speed;
1523 u_int freq;
1524 u_int mb;
1525 int s;
1526
1527 path = periph->path;
1528 /*
1529 * To ensure that this is printed in one piece,
1530 * mask out CAM interrupts.
1531 */
1532 s = splsoftcam();
1533 printf("%s%d at %s%d bus %d target %d lun %d\n",
1534 periph->periph_name, periph->unit_number,
1535 path->bus->sim->sim_name,
1536 path->bus->sim->unit_number,
1537 path->bus->sim->bus_id,
1538 path->target->target_id,
1539 path->device->lun_id);
1540 printf("%s%d: ", periph->periph_name, periph->unit_number);
1541 scsi_print_inquiry(&path->device->inq_data);
1542 if (bootverbose && path->device->serial_num_len > 0) {
1543 /* Don't wrap the screen - print only the first 60 chars */
1544 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1545 periph->unit_number, path->device->serial_num);
1546 }
1547 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1548 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1549 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1550 xpt_action((union ccb*)&cts);
1551
1552 /* Ask the SIM for its base transfer speed */
1553 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1554 cpi.ccb_h.func_code = XPT_PATH_INQ;
1555 xpt_action((union ccb *)&cpi);
1556
1557 speed = cpi.base_transfer_speed;
1558 freq = 0;
1559 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1560 struct ccb_trans_settings_spi *spi;
1561
1562 spi = &cts.xport_specific.spi;
1563 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1564 && spi->sync_offset != 0) {
1565 freq = scsi_calc_syncsrate(spi->sync_period);
1566 speed = freq;
1567 }
1568
1569 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1570 speed *= (0x01 << spi->bus_width);
1571 }
1572
1573 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1574 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1575 if (fc->valid & CTS_FC_VALID_SPEED) {
1576 speed = fc->bitrate;
1577 }
1578 }
1579
1580 mb = speed / 1000;
1581 if (mb > 0)
1582 printf("%s%d: %d.%03dMB/s transfers",
1583 periph->periph_name, periph->unit_number,
1584 mb, speed % 1000);
1585 else
1586 printf("%s%d: %dKB/s transfers", periph->periph_name,
1587 periph->unit_number, speed);
1588 /* Report additional information about SPI connections */
1589 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1590 struct ccb_trans_settings_spi *spi;
1591
1592 spi = &cts.xport_specific.spi;
1593 if (freq != 0) {
1594 printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1595 freq % 1000,
1596 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1597 ? " DT" : "",
1598 spi->sync_offset);
1599 }
1600 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1601 && spi->bus_width > 0) {
1602 if (freq != 0) {
1603 printf(", ");
1604 } else {
1605 printf(" (");
1606 }
1607 printf("%dbit)", 8 * (0x01 << spi->bus_width));
1608 } else if (freq != 0) {
1609 printf(")");
1610 }
1611 }
1612 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1613 struct ccb_trans_settings_fc *fc;
1614
1615 fc = &cts.xport_specific.fc;
1616 if (fc->valid & CTS_FC_VALID_WWNN)
1617 printf(" WWNN 0x%llx", (long long) fc->wwnn);
1618 if (fc->valid & CTS_FC_VALID_WWPN)
1619 printf(" WWPN 0x%llx", (long long) fc->wwpn);
1620 if (fc->valid & CTS_FC_VALID_PORT)
1621 printf(" PortID 0x%x", fc->port);
1622 }
1623
1624 if (path->device->inq_flags & SID_CmdQue
1625 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1626 printf("\n%s%d: Tagged Queueing Enabled",
1627 periph->periph_name, periph->unit_number);
1628 }
1629 printf("\n");
1630
1631 /*
1632 * We only want to print the caller's announce string if they've
1633 * passed one in..
1634 */
1635 if (announce_string != NULL)
1636 printf("%s%d: %s\n", periph->periph_name,
1637 periph->unit_number, announce_string);
1638 splx(s);
1639}
1640#else /* CAM_NEW_TRAN_CODE */
1641void
1642xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1643{
1644 int s;
1645 u_int mb;
1646 struct cam_path *path;
1647 struct ccb_trans_settings cts;
1648
1649 path = periph->path;
1650 /*
1651 * To ensure that this is printed in one piece,
1652 * mask out CAM interrupts.
1653 */
1654 s = splsoftcam();
1655 printf("%s%d at %s%d bus %d target %d lun %d\n",
1656 periph->periph_name, periph->unit_number,
1657 path->bus->sim->sim_name,
1658 path->bus->sim->unit_number,
1659 path->bus->sim->bus_id,
1660 path->target->target_id,
1661 path->device->lun_id);
1662 printf("%s%d: ", periph->periph_name, periph->unit_number);
1663 scsi_print_inquiry(&path->device->inq_data);
1664 if ((bootverbose)
1665 && (path->device->serial_num_len > 0)) {
1666 /* Don't wrap the screen - print only the first 60 chars */
1667 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1668 periph->unit_number, path->device->serial_num);
1669 }
1670 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1671 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1672 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1673 xpt_action((union ccb*)&cts);
1674 if (cts.ccb_h.status == CAM_REQ_CMP) {
1675 u_int speed;
1676 u_int freq;
1677
1678 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1679 && cts.sync_offset != 0) {
1680 freq = scsi_calc_syncsrate(cts.sync_period);
1681 speed = freq;
1682 } else {
1683 struct ccb_pathinq cpi;
1684
1685 /* Ask the SIM for its base transfer speed */
1686 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1687 cpi.ccb_h.func_code = XPT_PATH_INQ;
1688 xpt_action((union ccb *)&cpi);
1689
1690 speed = cpi.base_transfer_speed;
1691 freq = 0;
1692 }
1693 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1694 speed *= (0x01 << cts.bus_width);
1695 mb = speed / 1000;
1696 if (mb > 0)
1697 printf("%s%d: %d.%03dMB/s transfers",
1698 periph->periph_name, periph->unit_number,
1699 mb, speed % 1000);
1700 else
1701 printf("%s%d: %dKB/s transfers", periph->periph_name,
1702 periph->unit_number, speed);
1703 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1704 && cts.sync_offset != 0) {
1705 printf(" (%d.%03dMHz, offset %d", freq / 1000,
1706 freq % 1000, cts.sync_offset);
1707 }
1708 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1709 && cts.bus_width > 0) {
1710 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1711 && cts.sync_offset != 0) {
1712 printf(", ");
1713 } else {
1714 printf(" (");
1715 }
1716 printf("%dbit)", 8 * (0x01 << cts.bus_width));
1717 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1718 && cts.sync_offset != 0) {
1719 printf(")");
1720 }
1721
1722 if (path->device->inq_flags & SID_CmdQue
1723 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1724 printf(", Tagged Queueing Enabled");
1725 }
1726
1727 printf("\n");
1728 } else if (path->device->inq_flags & SID_CmdQue
1729 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1730 printf("%s%d: Tagged Queueing Enabled\n",
1731 periph->periph_name, periph->unit_number);
1732 }
1733
1734 /*
1735 * We only want to print the caller's announce string if they've
1736 * passed one in..
1737 */
1738 if (announce_string != NULL)
1739 printf("%s%d: %s\n", periph->periph_name,
1740 periph->unit_number, announce_string);
1741 splx(s);
1742}
1743
1744#endif /* CAM_NEW_TRAN_CODE */
1745
1746static dev_match_ret
1747xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1748 struct cam_eb *bus)
1749{
1750 dev_match_ret retval;
1751 int i;
1752
1753 retval = DM_RET_NONE;
1754
1755 /*
1756 * If we aren't given something to match against, that's an error.
1757 */
1758 if (bus == NULL)
1759 return(DM_RET_ERROR);
1760
1761 /*
1762 * If there are no match entries, then this bus matches no
1763 * matter what.
1764 */
1765 if ((patterns == NULL) || (num_patterns == 0))
1766 return(DM_RET_DESCEND | DM_RET_COPY);
1767
1768 for (i = 0; i < num_patterns; i++) {
1769 struct bus_match_pattern *cur_pattern;
1770
1771 /*
1772 * If the pattern in question isn't for a bus node, we
1773 * aren't interested. However, we do indicate to the
1774 * calling routine that we should continue descending the
1775 * tree, since the user wants to match against lower-level
1776 * EDT elements.
1777 */
1778 if (patterns[i].type != DEV_MATCH_BUS) {
1779 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1780 retval |= DM_RET_DESCEND;
1781 continue;
1782 }
1783
1784 cur_pattern = &patterns[i].pattern.bus_pattern;
1785
1786 /*
1787 * If they want to match any bus node, we give them any
1788 * device node.
1789 */
1790 if (cur_pattern->flags == BUS_MATCH_ANY) {
1791 /* set the copy flag */
1792 retval |= DM_RET_COPY;
1793
1794 /*
1795 * If we've already decided on an action, go ahead
1796 * and return.
1797 */
1798 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1799 return(retval);
1800 }
1801
1802 /*
1803 * Not sure why someone would do this...
1804 */
1805 if (cur_pattern->flags == BUS_MATCH_NONE)
1806 continue;
1807
1808 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1809 && (cur_pattern->path_id != bus->path_id))
1810 continue;
1811
1812 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1813 && (cur_pattern->bus_id != bus->sim->bus_id))
1814 continue;
1815
1816 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1817 && (cur_pattern->unit_number != bus->sim->unit_number))
1818 continue;
1819
1820 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1821 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1822 DEV_IDLEN) != 0))
1823 continue;
1824
1825 /*
1826 * If we get to this point, the user definitely wants
1827 * information on this bus. So tell the caller to copy the
1828 * data out.
1829 */
1830 retval |= DM_RET_COPY;
1831
1832 /*
1833 * If the return action has been set to descend, then we
1834 * know that we've already seen a non-bus matching
1835 * expression, therefore we need to further descend the tree.
1836 * This won't change by continuing around the loop, so we
1837 * go ahead and return. If we haven't seen a non-bus
1838 * matching expression, we keep going around the loop until
1839 * we exhaust the matching expressions. We'll set the stop
1840 * flag once we fall out of the loop.
1841 */
1842 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1843 return(retval);
1844 }
1845
1846 /*
1847 * If the return action hasn't been set to descend yet, that means
1848 * we haven't seen anything other than bus matching patterns. So
1849 * tell the caller to stop descending the tree -- the user doesn't
1850 * want to match against lower level tree elements.
1851 */
1852 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1853 retval |= DM_RET_STOP;
1854
1855 return(retval);
1856}
1857
1858static dev_match_ret
1859xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1860 struct cam_ed *device)
1861{
1862 dev_match_ret retval;
1863 int i;
1864
1865 retval = DM_RET_NONE;
1866
1867 /*
1868 * If we aren't given something to match against, that's an error.
1869 */
1870 if (device == NULL)
1871 return(DM_RET_ERROR);
1872
1873 /*
1874 * If there are no match entries, then this device matches no
1875 * matter what.
1876 */
1877 if ((patterns == NULL) || (patterns == 0))
1878 return(DM_RET_DESCEND | DM_RET_COPY);
1879
1880 for (i = 0; i < num_patterns; i++) {
1881 struct device_match_pattern *cur_pattern;
1882
1883 /*
1884 * If the pattern in question isn't for a device node, we
1885 * aren't interested.
1886 */
1887 if (patterns[i].type != DEV_MATCH_DEVICE) {
1888 if ((patterns[i].type == DEV_MATCH_PERIPH)
1889 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1890 retval |= DM_RET_DESCEND;
1891 continue;
1892 }
1893
1894 cur_pattern = &patterns[i].pattern.device_pattern;
1895
1896 /*
1897 * If they want to match any device node, we give them any
1898 * device node.
1899 */
1900 if (cur_pattern->flags == DEV_MATCH_ANY) {
1901 /* set the copy flag */
1902 retval |= DM_RET_COPY;
1903
1904
1905 /*
1906 * If we've already decided on an action, go ahead
1907 * and return.
1908 */
1909 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1910 return(retval);
1911 }
1912
1913 /*
1914 * Not sure why someone would do this...
1915 */
1916 if (cur_pattern->flags == DEV_MATCH_NONE)
1917 continue;
1918
1919 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1920 && (cur_pattern->path_id != device->target->bus->path_id))
1921 continue;
1922
1923 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1924 && (cur_pattern->target_id != device->target->target_id))
1925 continue;
1926
1927 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1928 && (cur_pattern->target_lun != device->lun_id))
1929 continue;
1930
1931 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1932 && (cam_quirkmatch((caddr_t)&device->inq_data,
1933 (caddr_t)&cur_pattern->inq_pat,
1934 1, sizeof(cur_pattern->inq_pat),
1935 scsi_static_inquiry_match) == NULL))
1936 continue;
1937
1938 /*
1939 * If we get to this point, the user definitely wants
1940 * information on this device. So tell the caller to copy
1941 * the data out.
1942 */
1943 retval |= DM_RET_COPY;
1944
1945 /*
1946 * If the return action has been set to descend, then we
1947 * know that we've already seen a peripheral matching
1948 * expression, therefore we need to further descend the tree.
1949 * This won't change by continuing around the loop, so we
1950 * go ahead and return. If we haven't seen a peripheral
1951 * matching expression, we keep going around the loop until
1952 * we exhaust the matching expressions. We'll set the stop
1953 * flag once we fall out of the loop.
1954 */
1955 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1956 return(retval);
1957 }
1958
1959 /*
1960 * If the return action hasn't been set to descend yet, that means
1961 * we haven't seen any peripheral matching patterns. So tell the
1962 * caller to stop descending the tree -- the user doesn't want to
1963 * match against lower level tree elements.
1964 */
1965 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1966 retval |= DM_RET_STOP;
1967
1968 return(retval);
1969}
1970
1971/*
1972 * Match a single peripheral against any number of match patterns.
1973 */
1974static dev_match_ret
1975xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1976 struct cam_periph *periph)
1977{
1978 dev_match_ret retval;
1979 int i;
1980
1981 /*
1982 * If we aren't given something to match against, that's an error.
1983 */
1984 if (periph == NULL)
1985 return(DM_RET_ERROR);
1986
1987 /*
1988 * If there are no match entries, then this peripheral matches no
1989 * matter what.
1990 */
1991 if ((patterns == NULL) || (num_patterns == 0))
1992 return(DM_RET_STOP | DM_RET_COPY);
1993
1994 /*
1995 * There aren't any nodes below a peripheral node, so there's no
1996 * reason to descend the tree any further.
1997 */
1998 retval = DM_RET_STOP;
1999
2000 for (i = 0; i < num_patterns; i++) {
2001 struct periph_match_pattern *cur_pattern;
2002
2003 /*
2004 * If the pattern in question isn't for a peripheral, we
2005 * aren't interested.
2006 */
2007 if (patterns[i].type != DEV_MATCH_PERIPH)
2008 continue;
2009
2010 cur_pattern = &patterns[i].pattern.periph_pattern;
2011
2012 /*
2013 * If they want to match on anything, then we will do so.
2014 */
2015 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2016 /* set the copy flag */
2017 retval |= DM_RET_COPY;
2018
2019 /*
2020 * We've already set the return action to stop,
2021 * since there are no nodes below peripherals in
2022 * the tree.
2023 */
2024 return(retval);
2025 }
2026
2027 /*
2028 * Not sure why someone would do this...
2029 */
2030 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2031 continue;
2032
2033 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2034 && (cur_pattern->path_id != periph->path->bus->path_id))
2035 continue;
2036
2037 /*
2038 * For the target and lun id's, we have to make sure the
2039 * target and lun pointers aren't NULL. The xpt peripheral
2040 * has a wildcard target and device.
2041 */
2042 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2043 && ((periph->path->target == NULL)
2044 ||(cur_pattern->target_id != periph->path->target->target_id)))
2045 continue;
2046
2047 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2048 && ((periph->path->device == NULL)
2049 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2050 continue;
2051
2052 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2053 && (cur_pattern->unit_number != periph->unit_number))
2054 continue;
2055
2056 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2057 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2058 DEV_IDLEN) != 0))
2059 continue;
2060
2061 /*
2062 * If we get to this point, the user definitely wants
2063 * information on this peripheral. So tell the caller to
2064 * copy the data out.
2065 */
2066 retval |= DM_RET_COPY;
2067
2068 /*
2069 * The return action has already been set to stop, since
2070 * peripherals don't have any nodes below them in the EDT.
2071 */
2072 return(retval);
2073 }
2074
2075 /*
2076 * If we get to this point, the peripheral that was passed in
2077 * doesn't match any of the patterns.
2078 */
2079 return(retval);
2080}
2081
2082static int
2083xptedtbusfunc(struct cam_eb *bus, void *arg)
2084{
2085 struct ccb_dev_match *cdm;
2086 dev_match_ret retval;
2087
2088 cdm = (struct ccb_dev_match *)arg;
2089
2090 /*
2091 * If our position is for something deeper in the tree, that means
2092 * that we've already seen this node. So, we keep going down.
2093 */
2094 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2095 && (cdm->pos.cookie.bus == bus)
2096 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2097 && (cdm->pos.cookie.target != NULL))
2098 retval = DM_RET_DESCEND;
2099 else
2100 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2101
2102 /*
2103 * If we got an error, bail out of the search.
2104 */
2105 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2106 cdm->status = CAM_DEV_MATCH_ERROR;
2107 return(0);
2108 }
2109
2110 /*
2111 * If the copy flag is set, copy this bus out.
2112 */
2113 if (retval & DM_RET_COPY) {
2114 int spaceleft, j;
2115
2116 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2117 sizeof(struct dev_match_result));
2118
2119 /*
2120 * If we don't have enough space to put in another
2121 * match result, save our position and tell the
2122 * user there are more devices to check.
2123 */
2124 if (spaceleft < sizeof(struct dev_match_result)) {
2125 bzero(&cdm->pos, sizeof(cdm->pos));
2126 cdm->pos.position_type =
2127 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2128
2129 cdm->pos.cookie.bus = bus;
2130 cdm->pos.generations[CAM_BUS_GENERATION]=
2131 bus_generation;
2132 cdm->status = CAM_DEV_MATCH_MORE;
2133 return(0);
2134 }
2135 j = cdm->num_matches;
2136 cdm->num_matches++;
2137 cdm->matches[j].type = DEV_MATCH_BUS;
2138 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2139 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2140 cdm->matches[j].result.bus_result.unit_number =
2141 bus->sim->unit_number;
2142 strncpy(cdm->matches[j].result.bus_result.dev_name,
2143 bus->sim->sim_name, DEV_IDLEN);
2144 }
2145
2146 /*
2147 * If the user is only interested in busses, there's no
2148 * reason to descend to the next level in the tree.
2149 */
2150 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2151 return(1);
2152
2153 /*
2154 * If there is a target generation recorded, check it to
2155 * make sure the target list hasn't changed.
2156 */
2157 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2158 && (bus == cdm->pos.cookie.bus)
2159 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2160 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2161 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2162 bus->generation)) {
2163 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2164 return(0);
2165 }
2166
2167 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2168 && (cdm->pos.cookie.bus == bus)
2169 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2170 && (cdm->pos.cookie.target != NULL))
2171 return(xpttargettraverse(bus,
2172 (struct cam_et *)cdm->pos.cookie.target,
2173 xptedttargetfunc, arg));
2174 else
2175 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2176}
2177
2178static int
2179xptedttargetfunc(struct cam_et *target, void *arg)
2180{
2181 struct ccb_dev_match *cdm;
2182
2183 cdm = (struct ccb_dev_match *)arg;
2184
2185 /*
2186 * If there is a device list generation recorded, check it to
2187 * make sure the device list hasn't changed.
2188 */
2189 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2190 && (cdm->pos.cookie.bus == target->bus)
2191 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2192 && (cdm->pos.cookie.target == target)
2193 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2194 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2195 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2196 target->generation)) {
2197 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2198 return(0);
2199 }
2200
2201 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2202 && (cdm->pos.cookie.bus == target->bus)
2203 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2204 && (cdm->pos.cookie.target == target)
2205 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2206 && (cdm->pos.cookie.device != NULL))
2207 return(xptdevicetraverse(target,
2208 (struct cam_ed *)cdm->pos.cookie.device,
2209 xptedtdevicefunc, arg));
2210 else
2211 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2212}
2213
2214static int
2215xptedtdevicefunc(struct cam_ed *device, void *arg)
2216{
2217
2218 struct ccb_dev_match *cdm;
2219 dev_match_ret retval;
2220
2221 cdm = (struct ccb_dev_match *)arg;
2222
2223 /*
2224 * If our position is for something deeper in the tree, that means
2225 * that we've already seen this node. So, we keep going down.
2226 */
2227 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2228 && (cdm->pos.cookie.device == device)
2229 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2230 && (cdm->pos.cookie.periph != NULL))
2231 retval = DM_RET_DESCEND;
2232 else
2233 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2234 device);
2235
2236 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2237 cdm->status = CAM_DEV_MATCH_ERROR;
2238 return(0);
2239 }
2240
2241 /*
2242 * If the copy flag is set, copy this device out.
2243 */
2244 if (retval & DM_RET_COPY) {
2245 int spaceleft, j;
2246
2247 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2248 sizeof(struct dev_match_result));
2249
2250 /*
2251 * If we don't have enough space to put in another
2252 * match result, save our position and tell the
2253 * user there are more devices to check.
2254 */
2255 if (spaceleft < sizeof(struct dev_match_result)) {
2256 bzero(&cdm->pos, sizeof(cdm->pos));
2257 cdm->pos.position_type =
2258 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2259 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2260
2261 cdm->pos.cookie.bus = device->target->bus;
2262 cdm->pos.generations[CAM_BUS_GENERATION]=
2263 bus_generation;
2264 cdm->pos.cookie.target = device->target;
2265 cdm->pos.generations[CAM_TARGET_GENERATION] =
2266 device->target->bus->generation;
2267 cdm->pos.cookie.device = device;
2268 cdm->pos.generations[CAM_DEV_GENERATION] =
2269 device->target->generation;
2270 cdm->status = CAM_DEV_MATCH_MORE;
2271 return(0);
2272 }
2273 j = cdm->num_matches;
2274 cdm->num_matches++;
2275 cdm->matches[j].type = DEV_MATCH_DEVICE;
2276 cdm->matches[j].result.device_result.path_id =
2277 device->target->bus->path_id;
2278 cdm->matches[j].result.device_result.target_id =
2279 device->target->target_id;
2280 cdm->matches[j].result.device_result.target_lun =
2281 device->lun_id;
2282 bcopy(&device->inq_data,
2283 &cdm->matches[j].result.device_result.inq_data,
2284 sizeof(struct scsi_inquiry_data));
2285
2286 /* Let the user know whether this device is unconfigured */
2287 if (device->flags & CAM_DEV_UNCONFIGURED)
2288 cdm->matches[j].result.device_result.flags =
2289 DEV_RESULT_UNCONFIGURED;
2290 else
2291 cdm->matches[j].result.device_result.flags =
2292 DEV_RESULT_NOFLAG;
2293 }
2294
2295 /*
2296 * If the user isn't interested in peripherals, don't descend
2297 * the tree any further.
2298 */
2299 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2300 return(1);
2301
2302 /*
2303 * If there is a peripheral list generation recorded, make sure
2304 * it hasn't changed.
2305 */
2306 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2307 && (device->target->bus == cdm->pos.cookie.bus)
2308 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2309 && (device->target == cdm->pos.cookie.target)
2310 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2311 && (device == cdm->pos.cookie.device)
2312 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2313 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2314 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2315 device->generation)){
2316 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2317 return(0);
2318 }
2319
2320 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2321 && (cdm->pos.cookie.bus == device->target->bus)
2322 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2323 && (cdm->pos.cookie.target == device->target)
2324 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2325 && (cdm->pos.cookie.device == device)
2326 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2327 && (cdm->pos.cookie.periph != NULL))
2328 return(xptperiphtraverse(device,
2329 (struct cam_periph *)cdm->pos.cookie.periph,
2330 xptedtperiphfunc, arg));
2331 else
2332 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2333}
2334
2335static int
2336xptedtperiphfunc(struct cam_periph *periph, void *arg)
2337{
2338 struct ccb_dev_match *cdm;
2339 dev_match_ret retval;
2340
2341 cdm = (struct ccb_dev_match *)arg;
2342
2343 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2344
2345 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2346 cdm->status = CAM_DEV_MATCH_ERROR;
2347 return(0);
2348 }
2349
2350 /*
2351 * If the copy flag is set, copy this peripheral out.
2352 */
2353 if (retval & DM_RET_COPY) {
2354 int spaceleft, j;
2355
2356 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2357 sizeof(struct dev_match_result));
2358
2359 /*
2360 * If we don't have enough space to put in another
2361 * match result, save our position and tell the
2362 * user there are more devices to check.
2363 */
2364 if (spaceleft < sizeof(struct dev_match_result)) {
2365 bzero(&cdm->pos, sizeof(cdm->pos));
2366 cdm->pos.position_type =
2367 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2368 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2369 CAM_DEV_POS_PERIPH;
2370
2371 cdm->pos.cookie.bus = periph->path->bus;
2372 cdm->pos.generations[CAM_BUS_GENERATION]=
2373 bus_generation;
2374 cdm->pos.cookie.target = periph->path->target;
2375 cdm->pos.generations[CAM_TARGET_GENERATION] =
2376 periph->path->bus->generation;
2377 cdm->pos.cookie.device = periph->path->device;
2378 cdm->pos.generations[CAM_DEV_GENERATION] =
2379 periph->path->target->generation;
2380 cdm->pos.cookie.periph = periph;
2381 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2382 periph->path->device->generation;
2383 cdm->status = CAM_DEV_MATCH_MORE;
2384 return(0);
2385 }
2386
2387 j = cdm->num_matches;
2388 cdm->num_matches++;
2389 cdm->matches[j].type = DEV_MATCH_PERIPH;
2390 cdm->matches[j].result.periph_result.path_id =
2391 periph->path->bus->path_id;
2392 cdm->matches[j].result.periph_result.target_id =
2393 periph->path->target->target_id;
2394 cdm->matches[j].result.periph_result.target_lun =
2395 periph->path->device->lun_id;
2396 cdm->matches[j].result.periph_result.unit_number =
2397 periph->unit_number;
2398 strncpy(cdm->matches[j].result.periph_result.periph_name,
2399 periph->periph_name, DEV_IDLEN);
2400 }
2401
2402 return(1);
2403}
2404
2405static int
2406xptedtmatch(struct ccb_dev_match *cdm)
2407{
2408 int ret;
2409
2410 cdm->num_matches = 0;
2411
2412 /*
2413 * Check the bus list generation. If it has changed, the user
2414 * needs to reset everything and start over.
2415 */
2416 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2417 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2418 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2419 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2420 return(0);
2421 }
2422
2423 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2424 && (cdm->pos.cookie.bus != NULL))
2425 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2426 xptedtbusfunc, cdm);
2427 else
2428 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2429
2430 /*
2431 * If we get back 0, that means that we had to stop before fully
2432 * traversing the EDT. It also means that one of the subroutines
2433 * has set the status field to the proper value. If we get back 1,
2434 * we've fully traversed the EDT and copied out any matching entries.
2435 */
2436 if (ret == 1)
2437 cdm->status = CAM_DEV_MATCH_LAST;
2438
2439 return(ret);
2440}
2441
2442static int
2443xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2444{
2445 struct ccb_dev_match *cdm;
2446
2447 cdm = (struct ccb_dev_match *)arg;
2448
2449 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2450 && (cdm->pos.cookie.pdrv == pdrv)
2451 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2452 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2453 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2454 (*pdrv)->generation)) {
2455 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2456 return(0);
2457 }
2458
2459 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2460 && (cdm->pos.cookie.pdrv == pdrv)
2461 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2462 && (cdm->pos.cookie.periph != NULL))
2463 return(xptpdperiphtraverse(pdrv,
2464 (struct cam_periph *)cdm->pos.cookie.periph,
2465 xptplistperiphfunc, arg));
2466 else
2467 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2468}
2469
2470static int
2471xptplistperiphfunc(struct cam_periph *periph, void *arg)
2472{
2473 struct ccb_dev_match *cdm;
2474 dev_match_ret retval;
2475
2476 cdm = (struct ccb_dev_match *)arg;
2477
2478 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2479
2480 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2481 cdm->status = CAM_DEV_MATCH_ERROR;
2482 return(0);
2483 }
2484
2485 /*
2486 * If the copy flag is set, copy this peripheral out.
2487 */
2488 if (retval & DM_RET_COPY) {
2489 int spaceleft, j;
2490
2491 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2492 sizeof(struct dev_match_result));
2493
2494 /*
2495 * If we don't have enough space to put in another
2496 * match result, save our position and tell the
2497 * user there are more devices to check.
2498 */
2499 if (spaceleft < sizeof(struct dev_match_result)) {
2500 struct periph_driver **pdrv;
2501
2502 pdrv = NULL;
2503 bzero(&cdm->pos, sizeof(cdm->pos));
2504 cdm->pos.position_type =
2505 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2506 CAM_DEV_POS_PERIPH;
2507
2508 /*
2509 * This may look a bit non-sensical, but it is
2510 * actually quite logical. There are very few
2511 * peripheral drivers, and bloating every peripheral
2512 * structure with a pointer back to its parent
2513 * peripheral driver linker set entry would cost
2514 * more in the long run than doing this quick lookup.
2515 */
2516 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2517 if (strcmp((*pdrv)->driver_name,
2518 periph->periph_name) == 0)
2519 break;
2520 }
2521
2522 if (pdrv == NULL) {
2523 cdm->status = CAM_DEV_MATCH_ERROR;
2524 return(0);
2525 }
2526
2527 cdm->pos.cookie.pdrv = pdrv;
2528 /*
2529 * The periph generation slot does double duty, as
2530 * does the periph pointer slot. They are used for
2531 * both edt and pdrv lookups and positioning.
2532 */
2533 cdm->pos.cookie.periph = periph;
2534 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2535 (*pdrv)->generation;
2536 cdm->status = CAM_DEV_MATCH_MORE;
2537 return(0);
2538 }
2539
2540 j = cdm->num_matches;
2541 cdm->num_matches++;
2542 cdm->matches[j].type = DEV_MATCH_PERIPH;
2543 cdm->matches[j].result.periph_result.path_id =
2544 periph->path->bus->path_id;
2545
2546 /*
2547 * The transport layer peripheral doesn't have a target or
2548 * lun.
2549 */
2550 if (periph->path->target)
2551 cdm->matches[j].result.periph_result.target_id =
2552 periph->path->target->target_id;
2553 else
2554 cdm->matches[j].result.periph_result.target_id = -1;
2555
2556 if (periph->path->device)
2557 cdm->matches[j].result.periph_result.target_lun =
2558 periph->path->device->lun_id;
2559 else
2560 cdm->matches[j].result.periph_result.target_lun = -1;
2561
2562 cdm->matches[j].result.periph_result.unit_number =
2563 periph->unit_number;
2564 strncpy(cdm->matches[j].result.periph_result.periph_name,
2565 periph->periph_name, DEV_IDLEN);
2566 }
2567
2568 return(1);
2569}
2570
2571static int
2572xptperiphlistmatch(struct ccb_dev_match *cdm)
2573{
2574 int ret;
2575
2576 cdm->num_matches = 0;
2577
2578 /*
2579 * At this point in the edt traversal function, we check the bus
2580 * list generation to make sure that no busses have been added or
2581 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2582 * For the peripheral driver list traversal function, however, we
2583 * don't have to worry about new peripheral driver types coming or
2584 * going; they're in a linker set, and therefore can't change
2585 * without a recompile.
2586 */
2587
2588 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2589 && (cdm->pos.cookie.pdrv != NULL))
2590 ret = xptpdrvtraverse(
2591 (struct periph_driver **)cdm->pos.cookie.pdrv,
2592 xptplistpdrvfunc, cdm);
2593 else
2594 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2595
2596 /*
2597 * If we get back 0, that means that we had to stop before fully
2598 * traversing the peripheral driver tree. It also means that one of
2599 * the subroutines has set the status field to the proper value. If
2600 * we get back 1, we've fully traversed the EDT and copied out any
2601 * matching entries.
2602 */
2603 if (ret == 1)
2604 cdm->status = CAM_DEV_MATCH_LAST;
2605
2606 return(ret);
2607}
2608
2609static int
2610xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2611{
2612 struct cam_eb *bus, *next_bus;
2613 int retval;
2614
2615 retval = 1;
2616
2617 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2618 bus != NULL;
2619 bus = next_bus) {
2620 next_bus = TAILQ_NEXT(bus, links);
2621
2622 retval = tr_func(bus, arg);
2623 if (retval == 0)
2624 return(retval);
2625 }
2626
2627 return(retval);
2628}
2629
2630static int
2631xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2632 xpt_targetfunc_t *tr_func, void *arg)
2633{
2634 struct cam_et *target, *next_target;
2635 int retval;
2636
2637 retval = 1;
2638 for (target = (start_target ? start_target :
2639 TAILQ_FIRST(&bus->et_entries));
2640 target != NULL; target = next_target) {
2641
2642 next_target = TAILQ_NEXT(target, links);
2643
2644 retval = tr_func(target, arg);
2645
2646 if (retval == 0)
2647 return(retval);
2648 }
2649
2650 return(retval);
2651}
2652
2653static int
2654xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2655 xpt_devicefunc_t *tr_func, void *arg)
2656{
2657 struct cam_ed *device, *next_device;
2658 int retval;
2659
2660 retval = 1;
2661 for (device = (start_device ? start_device :
2662 TAILQ_FIRST(&target->ed_entries));
2663 device != NULL;
2664 device = next_device) {
2665
2666 next_device = TAILQ_NEXT(device, links);
2667
2668 retval = tr_func(device, arg);
2669
2670 if (retval == 0)
2671 return(retval);
2672 }
2673
2674 return(retval);
2675}
2676
2677static int
2678xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2679 xpt_periphfunc_t *tr_func, void *arg)
2680{
2681 struct cam_periph *periph, *next_periph;
2682 int retval;
2683
2684 retval = 1;
2685
2686 for (periph = (start_periph ? start_periph :
2687 SLIST_FIRST(&device->periphs));
2688 periph != NULL;
2689 periph = next_periph) {
2690
2691 next_periph = SLIST_NEXT(periph, periph_links);
2692
2693 retval = tr_func(periph, arg);
2694 if (retval == 0)
2695 return(retval);
2696 }
2697
2698 return(retval);
2699}
2700
2701static int
2702xptpdrvtraverse(struct periph_driver **start_pdrv,
2703 xpt_pdrvfunc_t *tr_func, void *arg)
2704{
2705 struct periph_driver **pdrv;
2706 int retval;
2707
2708 retval = 1;
2709
2710 /*
2711 * We don't traverse the peripheral driver list like we do the
2712 * other lists, because it is a linker set, and therefore cannot be
2713 * changed during runtime. If the peripheral driver list is ever
2714 * re-done to be something other than a linker set (i.e. it can
2715 * change while the system is running), the list traversal should
2716 * be modified to work like the other traversal functions.
2717 */
2718 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2719 *pdrv != NULL; pdrv++) {
2720 retval = tr_func(pdrv, arg);
2721
2722 if (retval == 0)
2723 return(retval);
2724 }
2725
2726 return(retval);
2727}
2728
2729static int
2730xptpdperiphtraverse(struct periph_driver **pdrv,
2731 struct cam_periph *start_periph,
2732 xpt_periphfunc_t *tr_func, void *arg)
2733{
2734 struct cam_periph *periph, *next_periph;
2735 int retval;
2736
2737 retval = 1;
2738
2739 for (periph = (start_periph ? start_periph :
2740 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2741 periph = next_periph) {
2742
2743 next_periph = TAILQ_NEXT(periph, unit_links);
2744
2745 retval = tr_func(periph, arg);
2746 if (retval == 0)
2747 return(retval);
2748 }
2749 return(retval);
2750}
2751
2752static int
2753xptdefbusfunc(struct cam_eb *bus, void *arg)
2754{
2755 struct xpt_traverse_config *tr_config;
2756
2757 tr_config = (struct xpt_traverse_config *)arg;
2758
2759 if (tr_config->depth == XPT_DEPTH_BUS) {
2760 xpt_busfunc_t *tr_func;
2761
2762 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2763
2764 return(tr_func(bus, tr_config->tr_arg));
2765 } else
2766 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2767}
2768
2769static int
2770xptdeftargetfunc(struct cam_et *target, void *arg)
2771{
2772 struct xpt_traverse_config *tr_config;
2773
2774 tr_config = (struct xpt_traverse_config *)arg;
2775
2776 if (tr_config->depth == XPT_DEPTH_TARGET) {
2777 xpt_targetfunc_t *tr_func;
2778
2779 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2780
2781 return(tr_func(target, tr_config->tr_arg));
2782 } else
2783 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2784}
2785
2786static int
2787xptdefdevicefunc(struct cam_ed *device, void *arg)
2788{
2789 struct xpt_traverse_config *tr_config;
2790
2791 tr_config = (struct xpt_traverse_config *)arg;
2792
2793 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2794 xpt_devicefunc_t *tr_func;
2795
2796 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2797
2798 return(tr_func(device, tr_config->tr_arg));
2799 } else
2800 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2801}
2802
2803static int
2804xptdefperiphfunc(struct cam_periph *periph, void *arg)
2805{
2806 struct xpt_traverse_config *tr_config;
2807 xpt_periphfunc_t *tr_func;
2808
2809 tr_config = (struct xpt_traverse_config *)arg;
2810
2811 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2812
2813 /*
2814 * Unlike the other default functions, we don't check for depth
2815 * here. The peripheral driver level is the last level in the EDT,
2816 * so if we're here, we should execute the function in question.
2817 */
2818 return(tr_func(periph, tr_config->tr_arg));
2819}
2820
2821/*
2822 * Execute the given function for every bus in the EDT.
2823 */
2824static int
2825xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2826{
2827 struct xpt_traverse_config tr_config;
2828
2829 tr_config.depth = XPT_DEPTH_BUS;
2830 tr_config.tr_func = tr_func;
2831 tr_config.tr_arg = arg;
2832
2833 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2834}
2835
2836#ifdef notusedyet
2837/*
2838 * Execute the given function for every target in the EDT.
2839 */
2840static int
2841xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2842{
2843 struct xpt_traverse_config tr_config;
2844
2845 tr_config.depth = XPT_DEPTH_TARGET;
2846 tr_config.tr_func = tr_func;
2847 tr_config.tr_arg = arg;
2848
2849 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2850}
2851#endif /* notusedyet */
2852
2853/*
2854 * Execute the given function for every device in the EDT.
2855 */
2856static int
2857xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2858{
2859 struct xpt_traverse_config tr_config;
2860
2861 tr_config.depth = XPT_DEPTH_DEVICE;
2862 tr_config.tr_func = tr_func;
2863 tr_config.tr_arg = arg;
2864
2865 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2866}
2867
2868#ifdef notusedyet
2869/*
2870 * Execute the given function for every peripheral in the EDT.
2871 */
2872static int
2873xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2874{
2875 struct xpt_traverse_config tr_config;
2876
2877 tr_config.depth = XPT_DEPTH_PERIPH;
2878 tr_config.tr_func = tr_func;
2879 tr_config.tr_arg = arg;
2880
2881 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2882}
2883#endif /* notusedyet */
2884
2885static int
2886xptsetasyncfunc(struct cam_ed *device, void *arg)
2887{
2888 struct cam_path path;
2889 struct ccb_getdev cgd;
2890 struct async_node *cur_entry;
2891
2892 cur_entry = (struct async_node *)arg;
2893
2894 /*
2895 * Don't report unconfigured devices (Wildcard devs,
2896 * devices only for target mode, device instances
2897 * that have been invalidated but are waiting for
2898 * their last reference count to be released).
2899 */
2900 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2901 return (1);
2902
2903 xpt_compile_path(&path,
2904 NULL,
2905 device->target->bus->path_id,
2906 device->target->target_id,
2907 device->lun_id);
2908 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2909 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2910 xpt_action((union ccb *)&cgd);
2911 cur_entry->callback(cur_entry->callback_arg,
2912 AC_FOUND_DEVICE,
2913 &path, &cgd);
2914 xpt_release_path(&path);
2915
2916 return(1);
2917}
2918
2919static int
2920xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2921{
2922 struct cam_path path;
2923 struct ccb_pathinq cpi;
2924 struct async_node *cur_entry;
2925
2926 cur_entry = (struct async_node *)arg;
2927
2928 xpt_compile_path(&path, /*periph*/NULL,
2929 bus->sim->path_id,
2930 CAM_TARGET_WILDCARD,
2931 CAM_LUN_WILDCARD);
2932 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2933 cpi.ccb_h.func_code = XPT_PATH_INQ;
2934 xpt_action((union ccb *)&cpi);
2935 cur_entry->callback(cur_entry->callback_arg,
2936 AC_PATH_REGISTERED,
2937 &path, &cpi);
2938 xpt_release_path(&path);
2939
2940 return(1);
2941}
2942
2943void
2944xpt_action(union ccb *start_ccb)
2945{
2946 int iopl;
2947
2948 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2949
2950 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2951
2952 iopl = splsoftcam();
2953 switch (start_ccb->ccb_h.func_code) {
2954 case XPT_SCSI_IO:
2955 {
2956#ifdef CAM_NEW_TRAN_CODE
2957 struct cam_ed *device;
2958#endif /* CAM_NEW_TRAN_CODE */
2959#ifdef CAMDEBUG
2960 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2961 struct cam_path *path;
2962
2963 path = start_ccb->ccb_h.path;
2964#endif
2965
2966 /*
2967 * For the sake of compatibility with SCSI-1
2968 * devices that may not understand the identify
2969 * message, we include lun information in the
2970 * second byte of all commands. SCSI-1 specifies
2971 * that luns are a 3 bit value and reserves only 3
2972 * bits for lun information in the CDB. Later
2973 * revisions of the SCSI spec allow for more than 8
2974 * luns, but have deprecated lun information in the
2975 * CDB. So, if the lun won't fit, we must omit.
2976 *
2977 * Also be aware that during initial probing for devices,
2978 * the inquiry information is unknown but initialized to 0.
2979 * This means that this code will be exercised while probing
2980 * devices with an ANSI revision greater than 2.
2981 */
2982#ifdef CAM_NEW_TRAN_CODE
2983 device = start_ccb->ccb_h.path->device;
2984 if (device->protocol_version <= SCSI_REV_2
2985#else /* CAM_NEW_TRAN_CODE */
2986 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2987#endif /* CAM_NEW_TRAN_CODE */
2988 && start_ccb->ccb_h.target_lun < 8
2989 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2990
2991 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2992 start_ccb->ccb_h.target_lun << 5;
2993 }
2994 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2995 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2996 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2997 &path->device->inq_data),
2998 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2999 cdb_str, sizeof(cdb_str))));
3000 }
3001 /* FALLTHROUGH */
3002 case XPT_TARGET_IO:
3003 case XPT_CONT_TARGET_IO:
3004 start_ccb->csio.sense_resid = 0;
3005 start_ccb->csio.resid = 0;
3006 /* FALLTHROUGH */
3007 case XPT_RESET_DEV:
3008 case XPT_ENG_EXEC:
3009 {
3010 struct cam_path *path;
3011 int s;
3012 int runq;
3013
3014 path = start_ccb->ccb_h.path;
3015 s = splsoftcam();
3016
3017 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3018 if (path->device->qfrozen_cnt == 0)
3019 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3020 else
3021 runq = 0;
3022 splx(s);
3023 if (runq != 0)
3024 xpt_run_dev_sendq(path->bus);
3025 break;
3026 }
3027 case XPT_SET_TRAN_SETTINGS:
3028 {
3029 xpt_set_transfer_settings(&start_ccb->cts,
3030 start_ccb->ccb_h.path->device,
3031 /*async_update*/FALSE);
3032 break;
3033 }
3034 case XPT_CALC_GEOMETRY:
3035 {
3036 struct cam_sim *sim;
3037
3038 /* Filter out garbage */
3039 if (start_ccb->ccg.block_size == 0
3040 || start_ccb->ccg.volume_size == 0) {
3041 start_ccb->ccg.cylinders = 0;
3042 start_ccb->ccg.heads = 0;
3043 start_ccb->ccg.secs_per_track = 0;
3044 start_ccb->ccb_h.status = CAM_REQ_CMP;
3045 break;
3046 }
3047#ifdef PC98
3048 /*
3049 * In a PC-98 system, geometry translation depens on
3050 * the "real" device geometry obtained from mode page 4.
3051 * SCSI geometry translation is performed in the
3052 * initialization routine of the SCSI BIOS and the result
3053 * stored in host memory. If the translation is available
3054 * in host memory, use it. If not, rely on the default
3055 * translation the device driver performs.
3056 */
3057 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3058 start_ccb->ccb_h.status = CAM_REQ_CMP;
3059 break;
3060 }
3061#endif
3062 sim = start_ccb->ccb_h.path->bus->sim;
3063 (*(sim->sim_action))(sim, start_ccb);
3064 break;
3065 }
3066 case XPT_ABORT:
3067 {
3068 union ccb* abort_ccb;
3069 int s;
3070
3071 abort_ccb = start_ccb->cab.abort_ccb;
3072 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3073
3074 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3075 struct cam_ccbq *ccbq;
3076
3077 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3078 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3079 abort_ccb->ccb_h.status =
3080 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3081 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3082 s = splcam();
3083 xpt_done(abort_ccb);
3084 splx(s);
3085 start_ccb->ccb_h.status = CAM_REQ_CMP;
3086 break;
3087 }
3088 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3089 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3090 /*
3091 * We've caught this ccb en route to
3092 * the SIM. Flag it for abort and the
3093 * SIM will do so just before starting
3094 * real work on the CCB.
3095 */
3096 abort_ccb->ccb_h.status =
3097 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3098 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3099 start_ccb->ccb_h.status = CAM_REQ_CMP;
3100 break;
3101 }
3102 }
3103 if (XPT_FC_IS_QUEUED(abort_ccb)
3104 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3105 /*
3106 * It's already completed but waiting
3107 * for our SWI to get to it.
3108 */
3109 start_ccb->ccb_h.status = CAM_UA_ABORT;
3110 break;
3111 }
3112 /*
3113 * If we weren't able to take care of the abort request
3114 * in the XPT, pass the request down to the SIM for processing.
3115 */
3116 }
3117 /* FALLTHROUGH */
3118 case XPT_ACCEPT_TARGET_IO:
3119 case XPT_EN_LUN:
3120 case XPT_IMMED_NOTIFY:
3121 case XPT_NOTIFY_ACK:
3122 case XPT_GET_TRAN_SETTINGS:
3123 case XPT_RESET_BUS:
3124 {
3125 struct cam_sim *sim;
3126
3127 sim = start_ccb->ccb_h.path->bus->sim;
3128 (*(sim->sim_action))(sim, start_ccb);
3129 break;
3130 }
3131 case XPT_PATH_INQ:
3132 {
3133 struct cam_sim *sim;
3134
3135 sim = start_ccb->ccb_h.path->bus->sim;
3136 (*(sim->sim_action))(sim, start_ccb);
3137 break;
3138 }
3139 case XPT_PATH_STATS:
3140 start_ccb->cpis.last_reset =
3141 start_ccb->ccb_h.path->bus->last_reset;
3142 start_ccb->ccb_h.status = CAM_REQ_CMP;
3143 break;
3144 case XPT_GDEV_TYPE:
3145 {
3146 struct cam_ed *dev;
3147 int s;
3148
3149 dev = start_ccb->ccb_h.path->device;
3150 s = splcam();
3151 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3152 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3153 } else {
3154 struct ccb_getdev *cgd;
3155 struct cam_eb *bus;
3156 struct cam_et *tar;
3157
3158 cgd = &start_ccb->cgd;
3159 bus = cgd->ccb_h.path->bus;
3160 tar = cgd->ccb_h.path->target;
3161 cgd->inq_data = dev->inq_data;
3162 cgd->ccb_h.status = CAM_REQ_CMP;
3163 cgd->serial_num_len = dev->serial_num_len;
3164 if ((dev->serial_num_len > 0)
3165 && (dev->serial_num != NULL))
3166 bcopy(dev->serial_num, cgd->serial_num,
3167 dev->serial_num_len);
3168 }
3169 splx(s);
3170 break;
3171 }
3172 case XPT_GDEV_STATS:
3173 {
3174 struct cam_ed *dev;
3175 int s;
3176
3177 dev = start_ccb->ccb_h.path->device;
3178 s = splcam();
3179 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3180 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3181 } else {
3182 struct ccb_getdevstats *cgds;
3183 struct cam_eb *bus;
3184 struct cam_et *tar;
3185
3186 cgds = &start_ccb->cgds;
3187 bus = cgds->ccb_h.path->bus;
3188 tar = cgds->ccb_h.path->target;
3189 cgds->dev_openings = dev->ccbq.dev_openings;
3190 cgds->dev_active = dev->ccbq.dev_active;
3191 cgds->devq_openings = dev->ccbq.devq_openings;
3192 cgds->devq_queued = dev->ccbq.queue.entries;
3193 cgds->held = dev->ccbq.held;
3194 cgds->last_reset = tar->last_reset;
3195 cgds->maxtags = dev->quirk->maxtags;
3196 cgds->mintags = dev->quirk->mintags;
3197 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3198 cgds->last_reset = bus->last_reset;
3199 cgds->ccb_h.status = CAM_REQ_CMP;
3200 }
3201 splx(s);
3202 break;
3203 }
3204 case XPT_GDEVLIST:
3205 {
3206 struct cam_periph *nperiph;
3207 struct periph_list *periph_head;
3208 struct ccb_getdevlist *cgdl;
3209 u_int i;
3210 int s;
3211 struct cam_ed *device;
3212 int found;
3213
3214
3215 found = 0;
3216
3217 /*
3218 * Don't want anyone mucking with our data.
3219 */
3220 s = splcam();
3221 device = start_ccb->ccb_h.path->device;
3222 periph_head = &device->periphs;
3223 cgdl = &start_ccb->cgdl;
3224
3225 /*
3226 * Check and see if the list has changed since the user
3227 * last requested a list member. If so, tell them that the
3228 * list has changed, and therefore they need to start over
3229 * from the beginning.
3230 */
3231 if ((cgdl->index != 0) &&
3232 (cgdl->generation != device->generation)) {
3233 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3234 splx(s);
3235 break;
3236 }
3237
3238 /*
3239 * Traverse the list of peripherals and attempt to find
3240 * the requested peripheral.
3241 */
3242 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3243 (nperiph != NULL) && (i <= cgdl->index);
3244 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3245 if (i == cgdl->index) {
3246 strncpy(cgdl->periph_name,
3247 nperiph->periph_name,
3248 DEV_IDLEN);
3249 cgdl->unit_number = nperiph->unit_number;
3250 found = 1;
3251 }
3252 }
3253 if (found == 0) {
3254 cgdl->status = CAM_GDEVLIST_ERROR;
3255 splx(s);
3256 break;
3257 }
3258
3259 if (nperiph == NULL)
3260 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3261 else
3262 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3263
3264 cgdl->index++;
3265 cgdl->generation = device->generation;
3266
3267 splx(s);
3268 cgdl->ccb_h.status = CAM_REQ_CMP;
3269 break;
3270 }
3271 case XPT_DEV_MATCH:
3272 {
3273 int s;
3274 dev_pos_type position_type;
3275 struct ccb_dev_match *cdm;
3276
3277 cdm = &start_ccb->cdm;
3278
3279 /*
3280 * Prevent EDT changes while we traverse it.
3281 */
3282 s = splcam();
3283 /*
3284 * There are two ways of getting at information in the EDT.
3285 * The first way is via the primary EDT tree. It starts
3286 * with a list of busses, then a list of targets on a bus,
3287 * then devices/luns on a target, and then peripherals on a
3288 * device/lun. The "other" way is by the peripheral driver
3289 * lists. The peripheral driver lists are organized by
3290 * peripheral driver. (obviously) So it makes sense to
3291 * use the peripheral driver list if the user is looking
3292 * for something like "da1", or all "da" devices. If the
3293 * user is looking for something on a particular bus/target
3294 * or lun, it's generally better to go through the EDT tree.
3295 */
3296
3297 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3298 position_type = cdm->pos.position_type;
3299 else {
3300 u_int i;
3301
3302 position_type = CAM_DEV_POS_NONE;
3303
3304 for (i = 0; i < cdm->num_patterns; i++) {
3305 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3306 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3307 position_type = CAM_DEV_POS_EDT;
3308 break;
3309 }
3310 }
3311
3312 if (cdm->num_patterns == 0)
3313 position_type = CAM_DEV_POS_EDT;
3314 else if (position_type == CAM_DEV_POS_NONE)
3315 position_type = CAM_DEV_POS_PDRV;
3316 }
3317
3318 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3319 case CAM_DEV_POS_EDT:
3320 xptedtmatch(cdm);
3321 break;
3322 case CAM_DEV_POS_PDRV:
3323 xptperiphlistmatch(cdm);
3324 break;
3325 default:
3326 cdm->status = CAM_DEV_MATCH_ERROR;
3327 break;
3328 }
3329
3330 splx(s);
3331
3332 if (cdm->status == CAM_DEV_MATCH_ERROR)
3333 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3334 else
3335 start_ccb->ccb_h.status = CAM_REQ_CMP;
3336
3337 break;
3338 }
3339 case XPT_SASYNC_CB:
3340 {
3341 struct ccb_setasync *csa;
3342 struct async_node *cur_entry;
3343 struct async_list *async_head;
3344 u_int32_t added;
3345 int s;
3346
3347 csa = &start_ccb->csa;
3348 added = csa->event_enable;
3349 async_head = &csa->ccb_h.path->device->asyncs;
3350
3351 /*
3352 * If there is already an entry for us, simply
3353 * update it.
3354 */
3355 s = splcam();
3356 cur_entry = SLIST_FIRST(async_head);
3357 while (cur_entry != NULL) {
3358 if ((cur_entry->callback_arg == csa->callback_arg)
3359 && (cur_entry->callback == csa->callback))
3360 break;
3361 cur_entry = SLIST_NEXT(cur_entry, links);
3362 }
3363
3364 if (cur_entry != NULL) {
3365 /*
3366 * If the request has no flags set,
3367 * remove the entry.
3368 */
3369 added &= ~cur_entry->event_enable;
3370 if (csa->event_enable == 0) {
3371 SLIST_REMOVE(async_head, cur_entry,
3372 async_node, links);
3373 csa->ccb_h.path->device->refcount--;
3374 free(cur_entry, M_DEVBUF);
3375 } else {
3376 cur_entry->event_enable = csa->event_enable;
3377 }
3378 } else {
3379 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3380 M_NOWAIT);
3381 if (cur_entry == NULL) {
3382 splx(s);
3383 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3384 break;
3385 }
3386 cur_entry->event_enable = csa->event_enable;
3387 cur_entry->callback_arg = csa->callback_arg;
3388 cur_entry->callback = csa->callback;
3389 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3390 csa->ccb_h.path->device->refcount++;
3391 }
3392
3393 if ((added & AC_FOUND_DEVICE) != 0) {
3394 /*
3395 * Get this peripheral up to date with all
3396 * the currently existing devices.
3397 */
3398 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3399 }
3400 if ((added & AC_PATH_REGISTERED) != 0) {
3401 /*
3402 * Get this peripheral up to date with all
3403 * the currently existing busses.
3404 */
3405 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3406 }
3407 splx(s);
3408 start_ccb->ccb_h.status = CAM_REQ_CMP;
3409 break;
3410 }
3411 case XPT_REL_SIMQ:
3412 {
3413 struct ccb_relsim *crs;
3414 struct cam_ed *dev;
3415 int s;
3416
3417 crs = &start_ccb->crs;
3418 dev = crs->ccb_h.path->device;
3419 if (dev == NULL) {
3420
3421 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3422 break;
3423 }
3424
3425 s = splcam();
3426
3427 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3428
3429 if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3430
3431 /* Don't ever go below one opening */
3432 if (crs->openings > 0) {
3433 xpt_dev_ccbq_resize(crs->ccb_h.path,
3434 crs->openings);
3435
3436 if (bootverbose) {
3437 xpt_print_path(crs->ccb_h.path);
3438 printf("tagged openings "
3439 "now %d\n",
3440 crs->openings);
3441 }
3442 }
3443 }
3444 }
3445
3446 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3447
3448 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3449
3450 /*
3451 * Just extend the old timeout and decrement
3452 * the freeze count so that a single timeout
3453 * is sufficient for releasing the queue.
3454 */
3455 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3456 untimeout(xpt_release_devq_timeout,
3457 dev, dev->c_handle);
3458 } else {
3459
3460 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3461 }
3462
3463 dev->c_handle =
3464 timeout(xpt_release_devq_timeout,
3465 dev,
3466 (crs->release_timeout * hz) / 1000);
3467
3468 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3469
3470 }
3471
3472 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3473
3474 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3475 /*
3476 * Decrement the freeze count so that a single
3477 * completion is still sufficient to unfreeze
3478 * the queue.
3479 */
3480 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3481 } else {
3482
3483 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3484 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3485 }
3486 }
3487
3488 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3489
3490 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3491 || (dev->ccbq.dev_active == 0)) {
3492
3493 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3494 } else {
3495
3496 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3497 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3498 }
3499 }
3500 splx(s);
3501
3502 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3503
3504 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3505 /*run_queue*/TRUE);
3506 }
3507 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3508 start_ccb->ccb_h.status = CAM_REQ_CMP;
3509 break;
3510 }
3511 case XPT_SCAN_BUS:
3512 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3513 break;
3514 case XPT_SCAN_LUN:
3515 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3516 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3517 start_ccb);
3518 break;
3519 case XPT_DEBUG: {
3520#ifdef CAMDEBUG
3521 int s;
3522
3523 s = splcam();
3524#ifdef CAM_DEBUG_DELAY
3525 cam_debug_delay = CAM_DEBUG_DELAY;
3526#endif
3527 cam_dflags = start_ccb->cdbg.flags;
3528 if (cam_dpath != NULL) {
3529 xpt_free_path(cam_dpath);
3530 cam_dpath = NULL;
3531 }
3532
3533 if (cam_dflags != CAM_DEBUG_NONE) {
3534 if (xpt_create_path(&cam_dpath, xpt_periph,
3535 start_ccb->ccb_h.path_id,
3536 start_ccb->ccb_h.target_id,
3537 start_ccb->ccb_h.target_lun) !=
3538 CAM_REQ_CMP) {
3539 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3540 cam_dflags = CAM_DEBUG_NONE;
3541 } else {
3542 start_ccb->ccb_h.status = CAM_REQ_CMP;
3543 xpt_print_path(cam_dpath);
3544 printf("debugging flags now %x\n", cam_dflags);
3545 }
3546 } else {
3547 cam_dpath = NULL;
3548 start_ccb->ccb_h.status = CAM_REQ_CMP;
3549 }
3550 splx(s);
3551#else /* !CAMDEBUG */
3552 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3553#endif /* CAMDEBUG */
3554 break;
3555 }
3556 case XPT_NOOP:
3557 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3558 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3559 start_ccb->ccb_h.status = CAM_REQ_CMP;
3560 break;
3561 default:
3562 case XPT_SDEV_TYPE:
3563 case XPT_TERM_IO:
3564 case XPT_ENG_INQ:
3565 /* XXX Implement */
3566 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3567 break;
3568 }
3569 splx(iopl);
3570}
3571
3572void
3573xpt_polled_action(union ccb *start_ccb)
3574{
3575 int s;
3576 u_int32_t timeout;
3577 struct cam_sim *sim;
3578 struct cam_devq *devq;
3579 struct cam_ed *dev;
3580
3581 timeout = start_ccb->ccb_h.timeout;
3582 sim = start_ccb->ccb_h.path->bus->sim;
3583 devq = sim->devq;
3584 dev = start_ccb->ccb_h.path->device;
3585
3586 s = splcam();
3587
3588 /*
3589 * Steal an opening so that no other queued requests
3590 * can get it before us while we simulate interrupts.
3591 */
3592 dev->ccbq.devq_openings--;
3593 dev->ccbq.dev_openings--;
3594
3595 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3596 && (--timeout > 0)) {
3597 DELAY(1000);
3598 (*(sim->sim_poll))(sim);
3599 camisr(&cam_netq);
3600 camisr(&cam_bioq);
3601 }
3602
3603 dev->ccbq.devq_openings++;
3604 dev->ccbq.dev_openings++;
3605
3606 if (timeout != 0) {
3607 xpt_action(start_ccb);
3608 while(--timeout > 0) {
3609 (*(sim->sim_poll))(sim);
3610 camisr(&cam_netq);
3611 camisr(&cam_bioq);
3612 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3613 != CAM_REQ_INPROG)
3614 break;
3615 DELAY(1000);
3616 }
3617 if (timeout == 0) {
3618 /*
3619 * XXX Is it worth adding a sim_timeout entry
3620 * point so we can attempt recovery? If
3621 * this is only used for dumps, I don't think
3622 * it is.
3623 */
3624 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3625 }
3626 } else {
3627 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3628 }
3629 splx(s);
3630}
3631
3632/*
3633 * Schedule a peripheral driver to receive a ccb when it's
3634 * target device has space for more transactions.
3635 */
3636void
3637xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3638{
3639 struct cam_ed *device;
3640 int s;
3641 int runq;
3642
3643 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3644 device = perph->path->device;
3645 s = splsoftcam();
3646 if (periph_is_queued(perph)) {
3647 /* Simply reorder based on new priority */
3648 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3649 (" change priority to %d\n", new_priority));
3650 if (new_priority < perph->pinfo.priority) {
3651 camq_change_priority(&device->drvq,
3652 perph->pinfo.index,
3653 new_priority);
3654 }
3655 runq = 0;
3656 } else {
3657 /* New entry on the queue */
3658 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3659 (" added periph to queue\n"));
3660 perph->pinfo.priority = new_priority;
3661 perph->pinfo.generation = ++device->drvq.generation;
3662 camq_insert(&device->drvq, &perph->pinfo);
3663 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3664 }
3665 splx(s);
3666 if (runq != 0) {
3667 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3668 (" calling xpt_run_devq\n"));
3669 xpt_run_dev_allocq(perph->path->bus);
3670 }
3671}
3672
3673
3674/*
3675 * Schedule a device to run on a given queue.
3676 * If the device was inserted as a new entry on the queue,
3677 * return 1 meaning the device queue should be run. If we
3678 * were already queued, implying someone else has already
3679 * started the queue, return 0 so the caller doesn't attempt
3680 * to run the queue. Must be run at either splsoftcam
3681 * (or splcam since that encompases splsoftcam).
3682 */
3683static int
3684xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3685 u_int32_t new_priority)
3686{
3687 int retval;
3688 u_int32_t old_priority;
3689
3690 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3691
3692 old_priority = pinfo->priority;
3693
3694 /*
3695 * Are we already queued?
3696 */
3697 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3698 /* Simply reorder based on new priority */
3699 if (new_priority < old_priority) {
3700 camq_change_priority(queue, pinfo->index,
3701 new_priority);
3702 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3703 ("changed priority to %d\n",
3704 new_priority));
3705 }
3706 retval = 0;
3707 } else {
3708 /* New entry on the queue */
3709 if (new_priority < old_priority)
3710 pinfo->priority = new_priority;
3711
3712 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3713 ("Inserting onto queue\n"));
3714 pinfo->generation = ++queue->generation;
3715 camq_insert(queue, pinfo);
3716 retval = 1;
3717 }
3718 return (retval);
3719}
3720
3721static void
3722xpt_run_dev_allocq(struct cam_eb *bus)
3723{
3724 struct cam_devq *devq;
3725 int s;
3726
3727 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3728 devq = bus->sim->devq;
3729
3730 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3731 (" qfrozen_cnt == 0x%x, entries == %d, "
3732 "openings == %d, active == %d\n",
3733 devq->alloc_queue.qfrozen_cnt,
3734 devq->alloc_queue.entries,
3735 devq->alloc_openings,
3736 devq->alloc_active));
3737
3738 s = splsoftcam();
3739 devq->alloc_queue.qfrozen_cnt++;
3740 while ((devq->alloc_queue.entries > 0)
3741 && (devq->alloc_openings > 0)
3742 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3743 struct cam_ed_qinfo *qinfo;
3744 struct cam_ed *device;
3745 union ccb *work_ccb;
3746 struct cam_periph *drv;
3747 struct camq *drvq;
3748
3749 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3750 CAMQ_HEAD);
3751 device = qinfo->device;
3752
3753 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3754 ("running device %p\n", device));
3755
3756 drvq = &device->drvq;
3757
3758#ifdef CAMDEBUG
3759 if (drvq->entries <= 0) {
3760 panic("xpt_run_dev_allocq: "
3761 "Device on queue without any work to do");
3762 }
3763#endif
3764 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3765 devq->alloc_openings--;
3766 devq->alloc_active++;
3767 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3768 splx(s);
3769 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3770 drv->pinfo.priority);
3771 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3772 ("calling periph start\n"));
3773 drv->periph_start(drv, work_ccb);
3774 } else {
3775 /*
3776 * Malloc failure in alloc_ccb
3777 */
3778 /*
3779 * XXX add us to a list to be run from free_ccb
3780 * if we don't have any ccbs active on this
3781 * device queue otherwise we may never get run
3782 * again.
3783 */
3784 break;
3785 }
3786
3787 /* Raise IPL for possible insertion and test at top of loop */
3788 s = splsoftcam();
3789
3790 if (drvq->entries > 0) {
3791 /* We have more work. Attempt to reschedule */
3792 xpt_schedule_dev_allocq(bus, device);
3793 }
3794 }
3795 devq->alloc_queue.qfrozen_cnt--;
3796 splx(s);
3797}
3798
3799static void
3800xpt_run_dev_sendq(struct cam_eb *bus)
3801{
3802 struct cam_devq *devq;
3803 int s;
3804
3805 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3806
3807 devq = bus->sim->devq;
3808
3809 s = splcam();
3810 devq->send_queue.qfrozen_cnt++;
3811 splx(s);
3812 s = splsoftcam();
3813 while ((devq->send_queue.entries > 0)
3814 && (devq->send_openings > 0)) {
3815 struct cam_ed_qinfo *qinfo;
3816 struct cam_ed *device;
3817 union ccb *work_ccb;
3818 struct cam_sim *sim;
3819 int ospl;
3820
3821 ospl = splcam();
3822 if (devq->send_queue.qfrozen_cnt > 1) {
3823 splx(ospl);
3824 break;
3825 }
3826
3827 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3828 CAMQ_HEAD);
3829 device = qinfo->device;
3830
3831 /*
3832 * If the device has been "frozen", don't attempt
3833 * to run it.
3834 */
3835 if (device->qfrozen_cnt > 0) {
3836 splx(ospl);
3837 continue;
3838 }
3839
3840 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3841 ("running device %p\n", device));
3842
3843 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3844 if (work_ccb == NULL) {
3845 printf("device on run queue with no ccbs???\n");
3846 splx(ospl);
3847 continue;
3848 }
3849
3850 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3851
3852 if (num_highpower <= 0) {
3853 /*
3854 * We got a high power command, but we
3855 * don't have any available slots. Freeze
3856 * the device queue until we have a slot
3857 * available.
3858 */
3859 device->qfrozen_cnt++;
3860 STAILQ_INSERT_TAIL(&highpowerq,
3861 &work_ccb->ccb_h,
3862 xpt_links.stqe);
3863
3864 splx(ospl);
3865 continue;
3866 } else {
3867 /*
3868 * Consume a high power slot while
3869 * this ccb runs.
3870 */
3871 num_highpower--;
3872 }
3873 }
3874 devq->active_dev = device;
3875 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3876
3877 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3878 splx(ospl);
3879
3880 devq->send_openings--;
3881 devq->send_active++;
3882
3883 if (device->ccbq.queue.entries > 0)
3884 xpt_schedule_dev_sendq(bus, device);
3885
3886 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3887 /*
3888 * The client wants to freeze the queue
3889 * after this CCB is sent.
3890 */
3891 ospl = splcam();
3892 device->qfrozen_cnt++;
3893 splx(ospl);
3894 }
3895
3896 splx(s);
3897
3898 /* In Target mode, the peripheral driver knows best... */
3899 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3900 if ((device->inq_flags & SID_CmdQue) != 0
3901 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3902 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3903 else
3904 /*
3905 * Clear this in case of a retried CCB that
3906 * failed due to a rejected tag.
3907 */
3908 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3909 }
3910
3911 /*
3912 * Device queues can be shared among multiple sim instances
3913 * that reside on different busses. Use the SIM in the queue
3914 * CCB's path, rather than the one in the bus that was passed
3915 * into this function.
3916 */
3917 sim = work_ccb->ccb_h.path->bus->sim;
3918 (*(sim->sim_action))(sim, work_ccb);
3919
3920 ospl = splcam();
3921 devq->active_dev = NULL;
3922 splx(ospl);
3923 /* Raise IPL for possible insertion and test at top of loop */
3924 s = splsoftcam();
3925 }
3926 splx(s);
3927 s = splcam();
3928 devq->send_queue.qfrozen_cnt--;
3929 splx(s);
3930}
3931
3932/*
3933 * This function merges stuff from the slave ccb into the master ccb, while
3934 * keeping important fields in the master ccb constant.
3935 */
3936void
3937xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3938{
3939 /*
3940 * Pull fields that are valid for peripheral drivers to set
3941 * into the master CCB along with the CCB "payload".
3942 */
3943 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3944 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3945 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3946 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3947 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3948 sizeof(union ccb) - sizeof(struct ccb_hdr));
3949}
3950
3951void
3952xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3953{
3954 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3955 ccb_h->pinfo.priority = priority;
3956 ccb_h->path = path;
3957 ccb_h->path_id = path->bus->path_id;
3958 if (path->target)
3959 ccb_h->target_id = path->target->target_id;
3960 else
3961 ccb_h->target_id = CAM_TARGET_WILDCARD;
3962 if (path->device) {
3963 ccb_h->target_lun = path->device->lun_id;
3964 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3965 } else {
3966 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3967 }
3968 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3969 ccb_h->flags = 0;
3970}
3971
3972/* Path manipulation functions */
3973cam_status
3974xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3975 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3976{
3977 struct cam_path *path;
3978 cam_status status;
3979
3980 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3981
3982 if (path == NULL) {
3983 status = CAM_RESRC_UNAVAIL;
3984 return(status);
3985 }
3986 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3987 if (status != CAM_REQ_CMP) {
3988 free(path, M_DEVBUF);
3989 path = NULL;
3990 }
3991 *new_path_ptr = path;
3992 return (status);
3993}
3994
3995static cam_status
3996xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3997 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3998{
3999 struct cam_eb *bus;
4000 struct cam_et *target;
4001 struct cam_ed *device;
4002 cam_status status;
4003 int s;
4004
4005 status = CAM_REQ_CMP; /* Completed without error */
4006 target = NULL; /* Wildcarded */
4007 device = NULL; /* Wildcarded */
4008
4009 /*
4010 * We will potentially modify the EDT, so block interrupts
4011 * that may attempt to create cam paths.
4012 */
4013 s = splcam();
4014 bus = xpt_find_bus(path_id);
4015 if (bus == NULL) {
4016 status = CAM_PATH_INVALID;
4017 } else {
4018 target = xpt_find_target(bus, target_id);
4019 if (target == NULL) {
4020 /* Create one */
4021 struct cam_et *new_target;
4022
4023 new_target = xpt_alloc_target(bus, target_id);
4024 if (new_target == NULL) {
4025 status = CAM_RESRC_UNAVAIL;
4026 } else {
4027 target = new_target;
4028 }
4029 }
4030 if (target != NULL) {
4031 device = xpt_find_device(target, lun_id);
4032 if (device == NULL) {
4033 /* Create one */
4034 struct cam_ed *new_device;
4035
4036 new_device = xpt_alloc_device(bus,
4037 target,
4038 lun_id);
4039 if (new_device == NULL) {
4040 status = CAM_RESRC_UNAVAIL;
4041 } else {
4042 device = new_device;
4043 }
4044 }
4045 }
4046 }
4047 splx(s);
4048
4049 /*
4050 * Only touch the user's data if we are successful.
4051 */
4052 if (status == CAM_REQ_CMP) {
4053 new_path->periph = perph;
4054 new_path->bus = bus;
4055 new_path->target = target;
4056 new_path->device = device;
4057 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4058 } else {
4059 if (device != NULL)
4060 xpt_release_device(bus, target, device);
4061 if (target != NULL)
4062 xpt_release_target(bus, target);
4063 if (bus != NULL)
4064 xpt_release_bus(bus);
4065 }
4066 return (status);
4067}
4068
4069static void
4070xpt_release_path(struct cam_path *path)
4071{
4072 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4073 if (path->device != NULL) {
4074 xpt_release_device(path->bus, path->target, path->device);
4075 path->device = NULL;
4076 }
4077 if (path->target != NULL) {
4078 xpt_release_target(path->bus, path->target);
4079 path->target = NULL;
4080 }
4081 if (path->bus != NULL) {
4082 xpt_release_bus(path->bus);
4083 path->bus = NULL;
4084 }
4085}
4086
4087void
4088xpt_free_path(struct cam_path *path)
4089{
4090 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4091 xpt_release_path(path);
4092 free(path, M_DEVBUF);
4093}
4094
4095
4096/*
4097 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4098 * in path1, 2 for match with wildcards in path2.
4099 */
4100int
4101xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4102{
4103 int retval = 0;
4104
4105 if (path1->bus != path2->bus) {
4106 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4107 retval = 1;
4108 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4109 retval = 2;
4110 else
4111 return (-1);
4112 }
4113 if (path1->target != path2->target) {
4114 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4115 if (retval == 0)
4116 retval = 1;
4117 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4118 retval = 2;
4119 else
4120 return (-1);
4121 }
4122 if (path1->device != path2->device) {
4123 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4124 if (retval == 0)
4125 retval = 1;
4126 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4127 retval = 2;
4128 else
4129 return (-1);
4130 }
4131 return (retval);
4132}
4133
4134void
4135xpt_print_path(struct cam_path *path)
4136{
4137 if (path == NULL)
4138 printf("(nopath): ");
4139 else {
4140 if (path->periph != NULL)
4141 printf("(%s%d:", path->periph->periph_name,
4142 path->periph->unit_number);
4143 else
4144 printf("(noperiph:");
4145
4146 if (path->bus != NULL)
4147 printf("%s%d:%d:", path->bus->sim->sim_name,
4148 path->bus->sim->unit_number,
4149 path->bus->sim->bus_id);
4150 else
4151 printf("nobus:");
4152
4153 if (path->target != NULL)
4154 printf("%d:", path->target->target_id);
4155 else
4156 printf("X:");
4157
4158 if (path->device != NULL)
4159 printf("%d): ", path->device->lun_id);
4160 else
4161 printf("X): ");
4162 }
4163}
4164
4165int
4166xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4167{
4168 struct sbuf sb;
4169
4170 sbuf_new(&sb, str, str_len, 0);
4171
4172 if (path == NULL)
4173 sbuf_printf(&sb, "(nopath): ");
4174 else {
4175 if (path->periph != NULL)
4176 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4177 path->periph->unit_number);
4178 else
4179 sbuf_printf(&sb, "(noperiph:");
4180
4181 if (path->bus != NULL)
4182 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4183 path->bus->sim->unit_number,
4184 path->bus->sim->bus_id);
4185 else
4186 sbuf_printf(&sb, "nobus:");
4187
4188 if (path->target != NULL)
4189 sbuf_printf(&sb, "%d:", path->target->target_id);
4190 else
4191 sbuf_printf(&sb, "X:");
4192
4193 if (path->device != NULL)
4194 sbuf_printf(&sb, "%d): ", path->device->lun_id);
4195 else
4196 sbuf_printf(&sb, "X): ");
4197 }
4198 sbuf_finish(&sb);
4199
4200 return(sbuf_len(&sb));
4201}
4202
4203path_id_t
4204xpt_path_path_id(struct cam_path *path)
4205{
4206 return(path->bus->path_id);
4207}
4208
4209target_id_t
4210xpt_path_target_id(struct cam_path *path)
4211{
4212 if (path->target != NULL)
4213 return (path->target->target_id);
4214 else
4215 return (CAM_TARGET_WILDCARD);
4216}
4217
4218lun_id_t
4219xpt_path_lun_id(struct cam_path *path)
4220{
4221 if (path->device != NULL)
4222 return (path->device->lun_id);
4223 else
4224 return (CAM_LUN_WILDCARD);
4225}
4226
4227struct cam_sim *
4228xpt_path_sim(struct cam_path *path)
4229{
4230 return (path->bus->sim);
4231}
4232
4233struct cam_periph*
4234xpt_path_periph(struct cam_path *path)
4235{
4236 return (path->periph);
4237}
4238
4239/*
4240 * Release a CAM control block for the caller. Remit the cost of the structure
4241 * to the device referenced by the path. If the this device had no 'credits'
4242 * and peripheral drivers have registered async callbacks for this notification
4243 * call them now.
4244 */
4245void
4246xpt_release_ccb(union ccb *free_ccb)
4247{
4248 int s;
4249 struct cam_path *path;
4250 struct cam_ed *device;
4251 struct cam_eb *bus;
4252
4253 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4254 path = free_ccb->ccb_h.path;
4255 device = path->device;
4256 bus = path->bus;
4257 s = splsoftcam();
4258 cam_ccbq_release_opening(&device->ccbq);
4259 if (xpt_ccb_count > xpt_max_ccbs) {
4260 xpt_free_ccb(free_ccb);
4261 xpt_ccb_count--;
4262 } else {
4263 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4264 }
4265 bus->sim->devq->alloc_openings++;
4266 bus->sim->devq->alloc_active--;
4267 /* XXX Turn this into an inline function - xpt_run_device?? */
4268 if ((device_is_alloc_queued(device) == 0)
4269 && (device->drvq.entries > 0)) {
4270 xpt_schedule_dev_allocq(bus, device);
4271 }
4272 splx(s);
4273 if (dev_allocq_is_runnable(bus->sim->devq))
4274 xpt_run_dev_allocq(bus);
4275}
4276
4277/* Functions accessed by SIM drivers */
4278
4279/*
4280 * A sim structure, listing the SIM entry points and instance
4281 * identification info is passed to xpt_bus_register to hook the SIM
4282 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4283 * for this new bus and places it in the array of busses and assigns
4284 * it a path_id. The path_id may be influenced by "hard wiring"
4285 * information specified by the user. Once interrupt services are
4286 * availible, the bus will be probed.
4287 */
4288int32_t
4289xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4290{
4291 struct cam_eb *new_bus;
4292 struct cam_eb *old_bus;
4293 struct ccb_pathinq cpi;
4294 int s;
4295
4296 sim->bus_id = bus;
4297 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4298 M_DEVBUF, M_NOWAIT);
4299 if (new_bus == NULL) {
4300 /* Couldn't satisfy request */
4301 return (CAM_RESRC_UNAVAIL);
4302 }
4303
4304 if (strcmp(sim->sim_name, "xpt") != 0) {
4305
4306 sim->path_id =
4307 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4308 }
4309
4310 TAILQ_INIT(&new_bus->et_entries);
4311 new_bus->path_id = sim->path_id;
4312 new_bus->sim = sim;
4313 timevalclear(&new_bus->last_reset);
4314 new_bus->flags = 0;
4315 new_bus->refcount = 1; /* Held until a bus_deregister event */
4316 new_bus->generation = 0;
4317 s = splcam();
4318 old_bus = TAILQ_FIRST(&xpt_busses);
4319 while (old_bus != NULL
4320 && old_bus->path_id < new_bus->path_id)
4321 old_bus = TAILQ_NEXT(old_bus, links);
4322 if (old_bus != NULL)
4323 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4324 else
4325 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4326 bus_generation++;
4327 splx(s);
4328
4329 /* Notify interested parties */
4330 if (sim->path_id != CAM_XPT_PATH_ID) {
4331 struct cam_path path;
4332
4333 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4334 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4335 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4336 cpi.ccb_h.func_code = XPT_PATH_INQ;
4337 xpt_action((union ccb *)&cpi);
4338 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4339 xpt_release_path(&path);
4340 }
4341 return (CAM_SUCCESS);
4342}
4343
4344int32_t
4345xpt_bus_deregister(path_id_t pathid)
4346{
4347 struct cam_path bus_path;
4348 cam_status status;
4349
4350 status = xpt_compile_path(&bus_path, NULL, pathid,
4351 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4352 if (status != CAM_REQ_CMP)
4353 return (status);
4354
4355 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4356 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4357
4358 /* Release the reference count held while registered. */
4359 xpt_release_bus(bus_path.bus);
4360 xpt_release_path(&bus_path);
4361
4362 return (CAM_REQ_CMP);
4363}
4364
4365static path_id_t
4366xptnextfreepathid(void)
4367{
4368 struct cam_eb *bus;
4369 path_id_t pathid;
4370 const char *strval;
4371
4372 pathid = 0;
4373 bus = TAILQ_FIRST(&xpt_busses);
4374retry:
4375 /* Find an unoccupied pathid */
4376 while (bus != NULL
4377 && bus->path_id <= pathid) {
4378 if (bus->path_id == pathid)
4379 pathid++;
4380 bus = TAILQ_NEXT(bus, links);
4381 }
4382
4383 /*
4384 * Ensure that this pathid is not reserved for
4385 * a bus that may be registered in the future.
4386 */
4387 if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4388 ++pathid;
4389 /* Start the search over */
4390 goto retry;
4391 }
4392 return (pathid);
4393}
4394
4395static path_id_t
4396xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4397{
4398 path_id_t pathid;
4399 int i, dunit, val;
4400 char buf[32];
4401 const char *dname;
4402
4403 pathid = CAM_XPT_PATH_ID;
4404 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4405 i = 0;
4406 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4407 if (strcmp(dname, "scbus")) {
4408 /* Avoid a bit of foot shooting. */
4409 continue;
4410 }
4411 if (dunit < 0) /* unwired?! */
4412 continue;
4413 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4414 if (sim_bus == val) {
4415 pathid = dunit;
4416 break;
4417 }
4418 } else if (sim_bus == 0) {
4419 /* Unspecified matches bus 0 */
4420 pathid = dunit;
4421 break;
4422 } else {
4423 printf("Ambiguous scbus configuration for %s%d "
4424 "bus %d, cannot wire down. The kernel "
4425 "config entry for scbus%d should "
4426 "specify a controller bus.\n"
4427 "Scbus will be assigned dynamically.\n",
4428 sim_name, sim_unit, sim_bus, dunit);
4429 break;
4430 }
4431 }
4432
4433 if (pathid == CAM_XPT_PATH_ID)
4434 pathid = xptnextfreepathid();
4435 return (pathid);
4436}
4437
4438void
4439xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4440{
4441 struct cam_eb *bus;
4442 struct cam_et *target, *next_target;
4443 struct cam_ed *device, *next_device;
4444 int s;
4445
4446 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4447
4448 /*
4449 * Most async events come from a CAM interrupt context. In
4450 * a few cases, the error recovery code at the peripheral layer,
4451 * which may run from our SWI or a process context, may signal
4452 * deferred events with a call to xpt_async. Ensure async
4453 * notifications are serialized by blocking cam interrupts.
4454 */
4455 s = splcam();
4456
4457 bus = path->bus;
4458
4459 if (async_code == AC_BUS_RESET) {
4460 int s;
4461
4462 s = splclock();
4463 /* Update our notion of when the last reset occurred */
4464 microtime(&bus->last_reset);
4465 splx(s);
4466 }
4467
4468 for (target = TAILQ_FIRST(&bus->et_entries);
4469 target != NULL;
4470 target = next_target) {
4471
4472 next_target = TAILQ_NEXT(target, links);
4473
4474 if (path->target != target
4475 && path->target->target_id != CAM_TARGET_WILDCARD
4476 && target->target_id != CAM_TARGET_WILDCARD)
4477 continue;
4478
4479 if (async_code == AC_SENT_BDR) {
4480 int s;
4481
4482 /* Update our notion of when the last reset occurred */
4483 s = splclock();
4484 microtime(&path->target->last_reset);
4485 splx(s);
4486 }
4487
4488 for (device = TAILQ_FIRST(&target->ed_entries);
4489 device != NULL;
4490 device = next_device) {
4491
4492 next_device = TAILQ_NEXT(device, links);
4493
4494 if (path->device != device
4495 && path->device->lun_id != CAM_LUN_WILDCARD
4496 && device->lun_id != CAM_LUN_WILDCARD)
4497 continue;
4498
4499 xpt_dev_async(async_code, bus, target,
4500 device, async_arg);
4501
4502 xpt_async_bcast(&device->asyncs, async_code,
4503 path, async_arg);
4504 }
4505 }
4506
4507 /*
4508 * If this wasn't a fully wildcarded async, tell all
4509 * clients that want all async events.
4510 */
4511 if (bus != xpt_periph->path->bus)
4512 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4513 path, async_arg);
4514 splx(s);
4515}
4516
4517static void
4518xpt_async_bcast(struct async_list *async_head,
4519 u_int32_t async_code,
4520 struct cam_path *path, void *async_arg)
4521{
4522 struct async_node *cur_entry;
4523
4524 cur_entry = SLIST_FIRST(async_head);
4525 while (cur_entry != NULL) {
4526 struct async_node *next_entry;
4527 /*
4528 * Grab the next list entry before we call the current
4529 * entry's callback. This is because the callback function
4530 * can delete its async callback entry.
4531 */
4532 next_entry = SLIST_NEXT(cur_entry, links);
4533 if ((cur_entry->event_enable & async_code) != 0)
4534 cur_entry->callback(cur_entry->callback_arg,
4535 async_code, path,
4536 async_arg);
4537 cur_entry = next_entry;
4538 }
4539}
4540
4541/*
4542 * Handle any per-device event notifications that require action by the XPT.
4543 */
4544static void
4545xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4546 struct cam_ed *device, void *async_arg)
4547{
4548 cam_status status;
4549 struct cam_path newpath;
4550
4551 /*
4552 * We only need to handle events for real devices.
4553 */
4554 if (target->target_id == CAM_TARGET_WILDCARD
4555 || device->lun_id == CAM_LUN_WILDCARD)
4556 return;
4557
4558 /*
4559 * We need our own path with wildcards expanded to
4560 * handle certain types of events.
4561 */
4562 if ((async_code == AC_SENT_BDR)
4563 || (async_code == AC_BUS_RESET)
4564 || (async_code == AC_INQ_CHANGED))
4565 status = xpt_compile_path(&newpath, NULL,
4566 bus->path_id,
4567 target->target_id,
4568 device->lun_id);
4569 else
4570 status = CAM_REQ_CMP_ERR;
4571
4572 if (status == CAM_REQ_CMP) {
4573
4574 /*
4575 * Allow transfer negotiation to occur in a
4576 * tag free environment.
4577 */
4578 if (async_code == AC_SENT_BDR
4579 || async_code == AC_BUS_RESET)
4580 xpt_toggle_tags(&newpath);
4581
4582 if (async_code == AC_INQ_CHANGED) {
4583 /*
4584 * We've sent a start unit command, or
4585 * something similar to a device that
4586 * may have caused its inquiry data to
4587 * change. So we re-scan the device to
4588 * refresh the inquiry data for it.
4589 */
4590 xpt_scan_lun(newpath.periph, &newpath,
4591 CAM_EXPECT_INQ_CHANGE, NULL);
4592 }
4593 xpt_release_path(&newpath);
4594 } else if (async_code == AC_LOST_DEVICE) {
4595 device->flags |= CAM_DEV_UNCONFIGURED;
4596 } else if (async_code == AC_TRANSFER_NEG) {
4597 struct ccb_trans_settings *settings;
4598
4599 settings = (struct ccb_trans_settings *)async_arg;
4600 xpt_set_transfer_settings(settings, device,
4601 /*async_update*/TRUE);
4602 }
4603}
4604
4605u_int32_t
4606xpt_freeze_devq(struct cam_path *path, u_int count)
4607{
4608 int s;
4609 struct ccb_hdr *ccbh;
4610
4611 s = splcam();
4612 path->device->qfrozen_cnt += count;
4613
4614 /*
4615 * Mark the last CCB in the queue as needing
4616 * to be requeued if the driver hasn't
4617 * changed it's state yet. This fixes a race
4618 * where a ccb is just about to be queued to
4619 * a controller driver when it's interrupt routine
4620 * freezes the queue. To completly close the
4621 * hole, controller drives must check to see
4622 * if a ccb's status is still CAM_REQ_INPROG
4623 * under spl protection just before they queue
4624 * the CCB. See ahc_action/ahc_freeze_devq for
4625 * an example.
4626 */
4627 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4628 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4629 ccbh->status = CAM_REQUEUE_REQ;
4630 splx(s);
4631 return (path->device->qfrozen_cnt);
4632}
4633
4634u_int32_t
4635xpt_freeze_simq(struct cam_sim *sim, u_int count)
4636{
4637 sim->devq->send_queue.qfrozen_cnt += count;
4638 if (sim->devq->active_dev != NULL) {
4639 struct ccb_hdr *ccbh;
4640
4641 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4642 ccb_hdr_tailq);
4643 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4644 ccbh->status = CAM_REQUEUE_REQ;
4645 }
4646 return (sim->devq->send_queue.qfrozen_cnt);
4647}
4648
4649static void
4650xpt_release_devq_timeout(void *arg)
4651{
4652 struct cam_ed *device;
4653
4654 device = (struct cam_ed *)arg;
4655
4656 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4657}
4658
4659void
4660xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4661{
4662 xpt_release_devq_device(path->device, count, run_queue);
4663}
4664
4665static void
4666xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4667{
4668 int rundevq;
4669 int s0, s1;
4670
4671 rundevq = 0;
4672 s0 = splsoftcam();
4673 s1 = splcam();
4674 if (dev->qfrozen_cnt > 0) {
4675
4676 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4677 dev->qfrozen_cnt -= count;
4678 if (dev->qfrozen_cnt == 0) {
4679
4680 /*
4681 * No longer need to wait for a successful
4682 * command completion.
4683 */
4684 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4685
4686 /*
4687 * Remove any timeouts that might be scheduled
4688 * to release this queue.
4689 */
4690 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4691 untimeout(xpt_release_devq_timeout, dev,
4692 dev->c_handle);
4693 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4694 }
4695
4696 /*
4697 * Now that we are unfrozen schedule the
4698 * device so any pending transactions are
4699 * run.
4700 */
4701 if ((dev->ccbq.queue.entries > 0)
4702 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4703 && (run_queue != 0)) {
4704 rundevq = 1;
4705 }
4706 }
4707 }
4708 splx(s1);
4709 if (rundevq != 0)
4710 xpt_run_dev_sendq(dev->target->bus);
4711 splx(s0);
4712}
4713
4714void
4715xpt_release_simq(struct cam_sim *sim, int run_queue)
4716{
4717 int s;
4718 struct camq *sendq;
4719
4720 sendq = &(sim->devq->send_queue);
4721 s = splcam();
4722 if (sendq->qfrozen_cnt > 0) {
4723
4724 sendq->qfrozen_cnt--;
4725 if (sendq->qfrozen_cnt == 0) {
4726 struct cam_eb *bus;
4727
4728 /*
4729 * If there is a timeout scheduled to release this
4730 * sim queue, remove it. The queue frozen count is
4731 * already at 0.
4732 */
4733 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4734 untimeout(xpt_release_simq_timeout, sim,
4735 sim->c_handle);
4736 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4737 }
4738 bus = xpt_find_bus(sim->path_id);
4739 splx(s);
4740
4741 if (run_queue) {
4742 /*
4743 * Now that we are unfrozen run the send queue.
4744 */
4745 xpt_run_dev_sendq(bus);
4746 }
4747 xpt_release_bus(bus);
4748 } else
4749 splx(s);
4750 } else
4751 splx(s);
4752}
4753
4754static void
4755xpt_release_simq_timeout(void *arg)
4756{
4757 struct cam_sim *sim;
4758
4759 sim = (struct cam_sim *)arg;
4760 xpt_release_simq(sim, /* run_queue */ TRUE);
4761}
4762
4763void
4764xpt_done(union ccb *done_ccb)
4765{
4766 int s;
4767
4768 s = splcam();
4769
4770 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4771 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4772 /*
4773 * Queue up the request for handling by our SWI handler
4774 * any of the "non-immediate" type of ccbs.
4775 */
4776 switch (done_ccb->ccb_h.path->periph->type) {
4777 case CAM_PERIPH_BIO:
4778 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4779 sim_links.tqe);
4780 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4781 swi_sched(cambio_ih, 0);
4782 break;
4783 case CAM_PERIPH_NET:
4784 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4785 sim_links.tqe);
4786 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4787 swi_sched(camnet_ih, 0);
4788 break;
4789 }
4790 }
4791 splx(s);
4792}
4793
4794union ccb *
4795xpt_alloc_ccb()
4796{
4797 union ccb *new_ccb;
4798
4799 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4800 return (new_ccb);
4801}
4802
4803void
4804xpt_free_ccb(union ccb *free_ccb)
4805{
4806 free(free_ccb, M_DEVBUF);
4807}
4808
4809
4810
4811/* Private XPT functions */
4812
4813/*
4814 * Get a CAM control block for the caller. Charge the structure to the device
4815 * referenced by the path. If the this device has no 'credits' then the
4816 * device already has the maximum number of outstanding operations under way
4817 * and we return NULL. If we don't have sufficient resources to allocate more
4818 * ccbs, we also return NULL.
4819 */
4820static union ccb *
4821xpt_get_ccb(struct cam_ed *device)
4822{
4823 union ccb *new_ccb;
4824 int s;
4825
4826 s = splsoftcam();
4827 if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
4828 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4829 if (new_ccb == NULL) {
4830 splx(s);
4831 return (NULL);
4832 }
4833 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4834 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4835 xpt_links.sle);
4836 xpt_ccb_count++;
4837 }
4838 cam_ccbq_take_opening(&device->ccbq);
4839 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4840 splx(s);
4841 return (new_ccb);
4842}
4843
4844static void
4845xpt_release_bus(struct cam_eb *bus)
4846{
4847 int s;
4848
4849 s = splcam();
4850 if ((--bus->refcount == 0)
4851 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4852 TAILQ_REMOVE(&xpt_busses, bus, links);
4853 bus_generation++;
4854 splx(s);
4855 free(bus, M_DEVBUF);
4856 } else
4857 splx(s);
4858}
4859
4860static struct cam_et *
4861xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4862{
4863 struct cam_et *target;
4864
4865 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4866 if (target != NULL) {
4867 struct cam_et *cur_target;
4868
4869 TAILQ_INIT(&target->ed_entries);
4870 target->bus = bus;
4871 target->target_id = target_id;
4872 target->refcount = 1;
4873 target->generation = 0;
4874 timevalclear(&target->last_reset);
4875 /*
4876 * Hold a reference to our parent bus so it
4877 * will not go away before we do.
4878 */
4879 bus->refcount++;
4880
4881 /* Insertion sort into our bus's target list */
4882 cur_target = TAILQ_FIRST(&bus->et_entries);
4883 while (cur_target != NULL && cur_target->target_id < target_id)
4884 cur_target = TAILQ_NEXT(cur_target, links);
4885
4886 if (cur_target != NULL) {
4887 TAILQ_INSERT_BEFORE(cur_target, target, links);
4888 } else {
4889 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4890 }
4891 bus->generation++;
4892 }
4893 return (target);
4894}
4895
4896static void
4897xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4898{
4899 int s;
4900
4901 s = splcam();
4902 if ((--target->refcount == 0)
4903 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4904 TAILQ_REMOVE(&bus->et_entries, target, links);
4905 bus->generation++;
4906 splx(s);
4907 free(target, M_DEVBUF);
4908 xpt_release_bus(bus);
4909 } else
4910 splx(s);
4911}
4912
4913static struct cam_ed *
4914xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4915{
4916#ifdef CAM_NEW_TRAN_CODE
4917 struct cam_path path;
4918#endif /* CAM_NEW_TRAN_CODE */
4919 struct cam_ed *device;
4920 struct cam_devq *devq;
4921 cam_status status;
4922
4923 /* Make space for us in the device queue on our bus */
4924 devq = bus->sim->devq;
4925 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4926
4927 if (status != CAM_REQ_CMP) {
4928 device = NULL;
4929 } else {
4930 device = (struct cam_ed *)malloc(sizeof(*device),
4931 M_DEVBUF, M_NOWAIT);
4932 }
4933
4934 if (device != NULL) {
4935 struct cam_ed *cur_device;
4936
4937 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4938 device->alloc_ccb_entry.device = device;
4939 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4940 device->send_ccb_entry.device = device;
4941 device->target = target;
4942 device->lun_id = lun_id;
4943 /* Initialize our queues */
4944 if (camq_init(&device->drvq, 0) != 0) {
4945 free(device, M_DEVBUF);
4946 return (NULL);
4947 }
4948 if (cam_ccbq_init(&device->ccbq,
4949 bus->sim->max_dev_openings) != 0) {
4950 camq_fini(&device->drvq);
4951 free(device, M_DEVBUF);
4952 return (NULL);
4953 }
4954 SLIST_INIT(&device->asyncs);
4955 SLIST_INIT(&device->periphs);
4956 device->generation = 0;
4957 device->owner = NULL;
4958 /*
4959 * Take the default quirk entry until we have inquiry
4960 * data and can determine a better quirk to use.
4961 */
4962 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4963 bzero(&device->inq_data, sizeof(device->inq_data));
4964 device->inq_flags = 0;
4965 device->queue_flags = 0;
4966 device->serial_num = NULL;
4967 device->serial_num_len = 0;
4968 device->qfrozen_cnt = 0;
4969 device->flags = CAM_DEV_UNCONFIGURED;
4970 device->tag_delay_count = 0;
4971 device->refcount = 1;
4972 callout_handle_init(&device->c_handle);
4973
4974 /*
4975 * Hold a reference to our parent target so it
4976 * will not go away before we do.
4977 */
4978 target->refcount++;
4979
4980 /*
4981 * XXX should be limited by number of CCBs this bus can
4982 * do.
4983 */
4984 xpt_max_ccbs += device->ccbq.devq_openings;
4985 /* Insertion sort into our target's device list */
4986 cur_device = TAILQ_FIRST(&target->ed_entries);
4987 while (cur_device != NULL && cur_device->lun_id < lun_id)
4988 cur_device = TAILQ_NEXT(cur_device, links);
4989 if (cur_device != NULL) {
4990 TAILQ_INSERT_BEFORE(cur_device, device, links);
4991 } else {
4992 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4993 }
4994 target->generation++;
4995#ifdef CAM_NEW_TRAN_CODE
4996 if (lun_id != CAM_LUN_WILDCARD) {
4997 xpt_compile_path(&path,
4998 NULL,
4999 bus->path_id,
5000 target->target_id,
5001 lun_id);
5002 xpt_devise_transport(&path);
5003 xpt_release_path(&path);
5004 }
5005#endif /* CAM_NEW_TRAN_CODE */
5006 }
5007 return (device);
5008}
5009
5010static void
5011xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5012 struct cam_ed *device)
5013{
5014 int s;
5015
5016 s = splcam();
5017 if ((--device->refcount == 0)
5018 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5019 struct cam_devq *devq;
5020
5021 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5022 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5023 panic("Removing device while still queued for ccbs");
5024
5025 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5026 untimeout(xpt_release_devq_timeout, device,
5027 device->c_handle);
5028
5029 TAILQ_REMOVE(&target->ed_entries, device,links);
5030 target->generation++;
5031 xpt_max_ccbs -= device->ccbq.devq_openings;
5032 /* Release our slot in the devq */
5033 devq = bus->sim->devq;
5034 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5035 splx(s);
5036 free(device, M_DEVBUF);
5037 xpt_release_target(bus, target);
5038 } else
5039 splx(s);
5040}
5041
5042static u_int32_t
5043xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5044{
5045 int s;
5046 int diff;
5047 int result;
5048 struct cam_ed *dev;
5049
5050 dev = path->device;
5051 s = splsoftcam();
5052
5053 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5054 result = cam_ccbq_resize(&dev->ccbq, newopenings);
5055 if (result == CAM_REQ_CMP && (diff < 0)) {
5056 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5057 }
5058 /* Adjust the global limit */
5059 xpt_max_ccbs += diff;
5060 splx(s);
5061 return (result);
5062}
5063
5064static struct cam_eb *
5065xpt_find_bus(path_id_t path_id)
5066{
5067 struct cam_eb *bus;
5068
5069 for (bus = TAILQ_FIRST(&xpt_busses);
5070 bus != NULL;
5071 bus = TAILQ_NEXT(bus, links)) {
5072 if (bus->path_id == path_id) {
5073 bus->refcount++;
5074 break;
5075 }
5076 }
5077 return (bus);
5078}
5079
5080static struct cam_et *
5081xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5082{
5083 struct cam_et *target;
5084
5085 for (target = TAILQ_FIRST(&bus->et_entries);
5086 target != NULL;
5087 target = TAILQ_NEXT(target, links)) {
5088 if (target->target_id == target_id) {
5089 target->refcount++;
5090 break;
5091 }
5092 }
5093 return (target);
5094}
5095
5096static struct cam_ed *
5097xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5098{
5099 struct cam_ed *device;
5100
5101 for (device = TAILQ_FIRST(&target->ed_entries);
5102 device != NULL;
5103 device = TAILQ_NEXT(device, links)) {
5104 if (device->lun_id == lun_id) {
5105 device->refcount++;
5106 break;
5107 }
5108 }
5109 return (device);
5110}
5111
5112typedef struct {
5113 union ccb *request_ccb;
5114 struct ccb_pathinq *cpi;
5115 int pending_count;
5116} xpt_scan_bus_info;
5117
5118/*
5119 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5120 * As the scan progresses, xpt_scan_bus is used as the
5121 * callback on completion function.
5122 */
5123static void
5124xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5125{
5126 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5127 ("xpt_scan_bus\n"));
5128 switch (request_ccb->ccb_h.func_code) {
5129 case XPT_SCAN_BUS:
5130 {
5131 xpt_scan_bus_info *scan_info;
5132 union ccb *work_ccb;
5133 struct cam_path *path;
5134 u_int i;
5135 u_int max_target;
5136 u_int initiator_id;
5137
5138 /* Find out the characteristics of the bus */
5139 work_ccb = xpt_alloc_ccb();
5140 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5141 request_ccb->ccb_h.pinfo.priority);
5142 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5143 xpt_action(work_ccb);
5144 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5145 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5146 xpt_free_ccb(work_ccb);
5147 xpt_done(request_ccb);
5148 return;
5149 }
5150
5151 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5152 /*
5153 * Can't scan the bus on an adapter that
5154 * cannot perform the initiator role.
5155 */
5156 request_ccb->ccb_h.status = CAM_REQ_CMP;
5157 xpt_free_ccb(work_ccb);
5158 xpt_done(request_ccb);
5159 return;
5160 }
5161
5162 /* Save some state for use while we probe for devices */
5163 scan_info = (xpt_scan_bus_info *)
5164 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
5165 scan_info->request_ccb = request_ccb;
5166 scan_info->cpi = &work_ccb->cpi;
5167
5168 /* Cache on our stack so we can work asynchronously */
5169 max_target = scan_info->cpi->max_target;
5170 initiator_id = scan_info->cpi->initiator_id;
5171
5172 /*
5173 * Don't count the initiator if the
5174 * initiator is addressable.
5175 */
5176 scan_info->pending_count = max_target + 1;
5177 if (initiator_id <= max_target)
5178 scan_info->pending_count--;
5179
5180 for (i = 0; i <= max_target; i++) {
5181 cam_status status;
5182 if (i == initiator_id)
5183 continue;
5184
5185 status = xpt_create_path(&path, xpt_periph,
5186 request_ccb->ccb_h.path_id,
5187 i, 0);
5188 if (status != CAM_REQ_CMP) {
5189 printf("xpt_scan_bus: xpt_create_path failed"
5190 " with status %#x, bus scan halted\n",
5191 status);
5192 break;
5193 }
5194 work_ccb = xpt_alloc_ccb();
5195 xpt_setup_ccb(&work_ccb->ccb_h, path,
5196 request_ccb->ccb_h.pinfo.priority);
5197 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5198 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5199 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5200 work_ccb->crcn.flags = request_ccb->crcn.flags;
5201 xpt_action(work_ccb);
5202 }
5203 break;
5204 }
5205 case XPT_SCAN_LUN:
5206 {
5207 xpt_scan_bus_info *scan_info;
5208 path_id_t path_id;
5209 target_id_t target_id;
5210 lun_id_t lun_id;
5211
5212 /* Reuse the same CCB to query if a device was really found */
5213 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5214 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5215 request_ccb->ccb_h.pinfo.priority);
5216 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5217
5218 path_id = request_ccb->ccb_h.path_id;
5219 target_id = request_ccb->ccb_h.target_id;
5220 lun_id = request_ccb->ccb_h.target_lun;
5221 xpt_action(request_ccb);
5222
5223 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5224 struct cam_ed *device;
5225 struct cam_et *target;
5226 int s, phl;
5227
5228 /*
5229 * If we already probed lun 0 successfully, or
5230 * we have additional configured luns on this
5231 * target that might have "gone away", go onto
5232 * the next lun.
5233 */
5234 target = request_ccb->ccb_h.path->target;
5235 /*
5236 * We may touch devices that we don't
5237 * hold references too, so ensure they
5238 * don't disappear out from under us.
5239 * The target above is referenced by the
5240 * path in the request ccb.
5241 */
5242 phl = 0;
5243 s = splcam();
5244 device = TAILQ_FIRST(&target->ed_entries);
5245 if (device != NULL) {
5246 phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
5247 if (device->lun_id == 0)
5248 device = TAILQ_NEXT(device, links);
5249 }
5250 splx(s);
5251 if ((lun_id != 0) || (device != NULL)) {
5252 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5253 lun_id++;
5254 }
5255 } else {
5256 struct cam_ed *device;
5257
5258 device = request_ccb->ccb_h.path->device;
5259
5260 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5261 /* Try the next lun */
5262 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
5263 (device->quirk->quirks & CAM_QUIRK_HILUNS))
5264 lun_id++;
5265 }
5266 }
5267
5268 xpt_free_path(request_ccb->ccb_h.path);
5269
5270 /* Check Bounds */
5271 if ((lun_id == request_ccb->ccb_h.target_lun)
5272 || lun_id > scan_info->cpi->max_lun) {
5273 /* We're done */
5274
5275 xpt_free_ccb(request_ccb);
5276 scan_info->pending_count--;
5277 if (scan_info->pending_count == 0) {
5278 xpt_free_ccb((union ccb *)scan_info->cpi);
5279 request_ccb = scan_info->request_ccb;
5280 free(scan_info, M_TEMP);
5281 request_ccb->ccb_h.status = CAM_REQ_CMP;
5282 xpt_done(request_ccb);
5283 }
5284 } else {
5285 /* Try the next device */
5286 struct cam_path *path;
5287 cam_status status;
5288
5289 path = request_ccb->ccb_h.path;
5290 status = xpt_create_path(&path, xpt_periph,
5291 path_id, target_id, lun_id);
5292 if (status != CAM_REQ_CMP) {
5293 printf("xpt_scan_bus: xpt_create_path failed "
5294 "with status %#x, halting LUN scan\n",
5295 status);
5296 xpt_free_ccb(request_ccb);
5297 scan_info->pending_count--;
5298 if (scan_info->pending_count == 0) {
5299 xpt_free_ccb(
5300 (union ccb *)scan_info->cpi);
5301 request_ccb = scan_info->request_ccb;
5302 free(scan_info, M_TEMP);
5303 request_ccb->ccb_h.status = CAM_REQ_CMP;
5304 xpt_done(request_ccb);
5305 break;
5306 }
5307 }
5308 xpt_setup_ccb(&request_ccb->ccb_h, path,
5309 request_ccb->ccb_h.pinfo.priority);
5310 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5311 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5312 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5313 request_ccb->crcn.flags =
5314 scan_info->request_ccb->crcn.flags;
5315 xpt_action(request_ccb);
5316 }
5317 break;
5318 }
5319 default:
5320 break;
5321 }
5322}
5323
5324typedef enum {
5325 PROBE_TUR,
5326 PROBE_INQUIRY,
5327 PROBE_FULL_INQUIRY,
5328 PROBE_MODE_SENSE,
5329 PROBE_SERIAL_NUM,
5330 PROBE_TUR_FOR_NEGOTIATION
5331} probe_action;
5332
5333typedef enum {
5334 PROBE_INQUIRY_CKSUM = 0x01,
5335 PROBE_SERIAL_CKSUM = 0x02,
5336 PROBE_NO_ANNOUNCE = 0x04
5337} probe_flags;
5338
5339typedef struct {
5340 TAILQ_HEAD(, ccb_hdr) request_ccbs;
5341 probe_action action;
5342 union ccb saved_ccb;
5343 probe_flags flags;
5344 MD5_CTX context;
5345 u_int8_t digest[16];
5346} probe_softc;
5347
5348static void
5349xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5350 cam_flags flags, union ccb *request_ccb)
5351{
5352 struct ccb_pathinq cpi;
5353 cam_status status;
5354 struct cam_path *new_path;
5355 struct cam_periph *old_periph;
5356 int s;
5357
5358 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5359 ("xpt_scan_lun\n"));
5360
5361 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5362 cpi.ccb_h.func_code = XPT_PATH_INQ;
5363 xpt_action((union ccb *)&cpi);
5364
5365 if (cpi.ccb_h.status != CAM_REQ_CMP) {
5366 if (request_ccb != NULL) {
5367 request_ccb->ccb_h.status = cpi.ccb_h.status;
5368 xpt_done(request_ccb);
5369 }
5370 return;
5371 }
5372
5373 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5374 /*
5375 * Can't scan the bus on an adapter that
5376 * cannot perform the initiator role.
5377 */
5378 if (request_ccb != NULL) {
5379 request_ccb->ccb_h.status = CAM_REQ_CMP;
5380 xpt_done(request_ccb);
5381 }
5382 return;
5383 }
5384
5385 if (request_ccb == NULL) {
5386 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5387 if (request_ccb == NULL) {
5388 xpt_print_path(path);
5389 printf("xpt_scan_lun: can't allocate CCB, can't "
5390 "continue\n");
5391 return;
5392 }
5393 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5394 if (new_path == NULL) {
5395 xpt_print_path(path);
5396 printf("xpt_scan_lun: can't allocate path, can't "
5397 "continue\n");
5398 free(request_ccb, M_TEMP);
5399 return;
5400 }
5401 status = xpt_compile_path(new_path, xpt_periph,
5402 path->bus->path_id,
5403 path->target->target_id,
5404 path->device->lun_id);
5405
5406 if (status != CAM_REQ_CMP) {
5407 xpt_print_path(path);
5408 printf("xpt_scan_lun: can't compile path, can't "
5409 "continue\n");
5410 free(request_ccb, M_TEMP);
5411 free(new_path, M_TEMP);
5412 return;
5413 }
5414 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5415 request_ccb->ccb_h.cbfcnp = xptscandone;
5416 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5417 request_ccb->crcn.flags = flags;
5418 }
5419
5420 s = splsoftcam();
5421 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5422 probe_softc *softc;
5423
5424 softc = (probe_softc *)old_periph->softc;
5425 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5426 periph_links.tqe);
5427 } else {
5428 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5429 probestart, "probe",
5430 CAM_PERIPH_BIO,
5431 request_ccb->ccb_h.path, NULL, 0,
5432 request_ccb);
5433
5434 if (status != CAM_REQ_CMP) {
5435 xpt_print_path(path);
5436 printf("xpt_scan_lun: cam_alloc_periph returned an "
5437 "error, can't continue probe\n");
5438 request_ccb->ccb_h.status = status;
5439 xpt_done(request_ccb);
5440 }
5441 }
5442 splx(s);
5443}
5444
5445static void
5446xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5447{
5448 xpt_release_path(done_ccb->ccb_h.path);
5449 free(done_ccb->ccb_h.path, M_TEMP);
5450 free(done_ccb, M_TEMP);
5451}
5452
5453static cam_status
5454proberegister(struct cam_periph *periph, void *arg)
5455{
5456 union ccb *request_ccb; /* CCB representing the probe request */
5457 probe_softc *softc;
5458
5459 request_ccb = (union ccb *)arg;
5460 if (periph == NULL) {
5461 printf("proberegister: periph was NULL!!\n");
5462 return(CAM_REQ_CMP_ERR);
5463 }
5464
5465 if (request_ccb == NULL) {
5466 printf("proberegister: no probe CCB, "
5467 "can't register device\n");
5468 return(CAM_REQ_CMP_ERR);
5469 }
5470
5471 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5472
5473 if (softc == NULL) {
5474 printf("proberegister: Unable to probe new device. "
5475 "Unable to allocate softc\n");
5476 return(CAM_REQ_CMP_ERR);
5477 }
5478 TAILQ_INIT(&softc->request_ccbs);
5479 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5480 periph_links.tqe);
5481 softc->flags = 0;
5482 periph->softc = softc;
5483 cam_periph_acquire(periph);
5484 /*
5485 * Ensure we've waited at least a bus settle
5486 * delay before attempting to probe the device.
5487 * For HBAs that don't do bus resets, this won't make a difference.
5488 */
5489 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5490 scsi_delay);
5491 probeschedule(periph);
5492 return(CAM_REQ_CMP);
5493}
5494
5495static void
5496probeschedule(struct cam_periph *periph)
5497{
5498 struct ccb_pathinq cpi;
5499 union ccb *ccb;
5500 probe_softc *softc;
5501
5502 softc = (probe_softc *)periph->softc;
5503 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5504
5505 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5506 cpi.ccb_h.func_code = XPT_PATH_INQ;
5507 xpt_action((union ccb *)&cpi);
5508
5509 /*
5510 * If a device has gone away and another device, or the same one,
5511 * is back in the same place, it should have a unit attention
5512 * condition pending. It will not report the unit attention in
5513 * response to an inquiry, which may leave invalid transfer
5514 * negotiations in effect. The TUR will reveal the unit attention
5515 * condition. Only send the TUR for lun 0, since some devices
5516 * will get confused by commands other than inquiry to non-existent
5517 * luns. If you think a device has gone away start your scan from
5518 * lun 0. This will insure that any bogus transfer settings are
5519 * invalidated.
5520 *
5521 * If we haven't seen the device before and the controller supports
5522 * some kind of transfer negotiation, negotiate with the first
5523 * sent command if no bus reset was performed at startup. This
5524 * ensures that the device is not confused by transfer negotiation
5525 * settings left over by loader or BIOS action.
5526 */
5527 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5528 && (ccb->ccb_h.target_lun == 0)) {
5529 softc->action = PROBE_TUR;
5530 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5531 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5532 proberequestdefaultnegotiation(periph);
5533 softc->action = PROBE_INQUIRY;
5534 } else {
5535 softc->action = PROBE_INQUIRY;
5536 }
5537
5538 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5539 softc->flags |= PROBE_NO_ANNOUNCE;
5540 else
5541 softc->flags &= ~PROBE_NO_ANNOUNCE;
5542
5543 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5544}
5545
5546static void
5547probestart(struct cam_periph *periph, union ccb *start_ccb)
5548{
5549 /* Probe the device that our peripheral driver points to */
5550 struct ccb_scsiio *csio;
5551 probe_softc *softc;
5552
5553 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5554
5555 softc = (probe_softc *)periph->softc;
5556 csio = &start_ccb->csio;
5557
5558 switch (softc->action) {
5559 case PROBE_TUR:
5560 case PROBE_TUR_FOR_NEGOTIATION:
5561 {
5562 scsi_test_unit_ready(csio,
5563 /*retries*/4,
5564 probedone,
5565 MSG_SIMPLE_Q_TAG,
5566 SSD_FULL_SIZE,
5567 /*timeout*/60000);
5568 break;
5569 }
5570 case PROBE_INQUIRY:
5571 case PROBE_FULL_INQUIRY:
5572 {
5573 u_int inquiry_len;
5574 struct scsi_inquiry_data *inq_buf;
5575
5576 inq_buf = &periph->path->device->inq_data;
5577 /*
5578 * If the device is currently configured, we calculate an
5579 * MD5 checksum of the inquiry data, and if the serial number
5580 * length is greater than 0, add the serial number data
5581 * into the checksum as well. Once the inquiry and the
5582 * serial number check finish, we attempt to figure out
5583 * whether we still have the same device.
5584 */
5585 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5586
5587 MD5Init(&softc->context);
5588 MD5Update(&softc->context, (unsigned char *)inq_buf,
5589 sizeof(struct scsi_inquiry_data));
5590 softc->flags |= PROBE_INQUIRY_CKSUM;
5591 if (periph->path->device->serial_num_len > 0) {
5592 MD5Update(&softc->context,
5593 periph->path->device->serial_num,
5594 periph->path->device->serial_num_len);
5595 softc->flags |= PROBE_SERIAL_CKSUM;
5596 }
5597 MD5Final(softc->digest, &softc->context);
5598 }
5599
5600 if (softc->action == PROBE_INQUIRY)
5601 inquiry_len = SHORT_INQUIRY_LENGTH;
5602 else
5603 inquiry_len = inq_buf->additional_length + 4;
5604
5605 scsi_inquiry(csio,
5606 /*retries*/4,
5607 probedone,
5608 MSG_SIMPLE_Q_TAG,
5609 (u_int8_t *)inq_buf,
5610 inquiry_len,
5611 /*evpd*/FALSE,
5612 /*page_code*/0,
5613 SSD_MIN_SIZE,
5614 /*timeout*/60 * 1000);
5615 break;
5616 }
5617 case PROBE_MODE_SENSE:
5618 {
5619 void *mode_buf;
5620 int mode_buf_len;
5621
5622 mode_buf_len = sizeof(struct scsi_mode_header_6)
5623 + sizeof(struct scsi_mode_blk_desc)
5624 + sizeof(struct scsi_control_page);
5625 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5626 if (mode_buf != NULL) {
5627 scsi_mode_sense(csio,
5628 /*retries*/4,
5629 probedone,
5630 MSG_SIMPLE_Q_TAG,
5631 /*dbd*/FALSE,
5632 SMS_PAGE_CTRL_CURRENT,
5633 SMS_CONTROL_MODE_PAGE,
5634 mode_buf,
5635 mode_buf_len,
5636 SSD_FULL_SIZE,
5637 /*timeout*/60000);
5638 break;
5639 }
5640 xpt_print_path(periph->path);
5641 printf("Unable to mode sense control page - malloc failure\n");
5642 softc->action = PROBE_SERIAL_NUM;
5643 }
5644 /* FALLTHROUGH */
5645 case PROBE_SERIAL_NUM:
5646 {
5647 struct scsi_vpd_unit_serial_number *serial_buf;
5648 struct cam_ed* device;
5649
5650 serial_buf = NULL;
5651 device = periph->path->device;
5652 device->serial_num = NULL;
5653 device->serial_num_len = 0;
5654
5655 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5656 serial_buf = (struct scsi_vpd_unit_serial_number *)
5657 malloc(sizeof(*serial_buf), M_TEMP,
5658 M_NOWAIT | M_ZERO);
5659
5660 if (serial_buf != NULL) {
5661 scsi_inquiry(csio,
5662 /*retries*/4,
5663 probedone,
5664 MSG_SIMPLE_Q_TAG,
5665 (u_int8_t *)serial_buf,
5666 sizeof(*serial_buf),
5667 /*evpd*/TRUE,
5668 SVPD_UNIT_SERIAL_NUMBER,
5669 SSD_MIN_SIZE,
5670 /*timeout*/60 * 1000);
5671 break;
5672 }
5673 /*
5674 * We'll have to do without, let our probedone
5675 * routine finish up for us.
5676 */
5677 start_ccb->csio.data_ptr = NULL;
5678 probedone(periph, start_ccb);
5679 return;
5680 }
5681 }
5682 xpt_action(start_ccb);
5683}
5684
5685static void
5686proberequestdefaultnegotiation(struct cam_periph *periph)
5687{
5688 struct ccb_trans_settings cts;
5689
5690 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5691 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5692#ifdef CAM_NEW_TRAN_CODE
5693 cts.type = CTS_TYPE_USER_SETTINGS;
5694#else /* CAM_NEW_TRAN_CODE */
5695 cts.flags = CCB_TRANS_USER_SETTINGS;
5696#endif /* CAM_NEW_TRAN_CODE */
5697 xpt_action((union ccb *)&cts);
5698 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5699#ifdef CAM_NEW_TRAN_CODE
5700 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5701#else /* CAM_NEW_TRAN_CODE */
5702 cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5703 cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5704#endif /* CAM_NEW_TRAN_CODE */
5705 xpt_action((union ccb *)&cts);
5706}
5707
5708static void
5709probedone(struct cam_periph *periph, union ccb *done_ccb)
5710{
5711 probe_softc *softc;
5712 struct cam_path *path;
5713 u_int32_t priority;
5714
5715 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5716
5717 softc = (probe_softc *)periph->softc;
5718 path = done_ccb->ccb_h.path;
5719 priority = done_ccb->ccb_h.pinfo.priority;
5720
5721 switch (softc->action) {
5722 case PROBE_TUR:
5723 {
5724 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5725
5726 if (cam_periph_error(done_ccb, 0,
5727 SF_NO_PRINT, NULL) == ERESTART)
5728 return;
5729 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5730 /* Don't wedge the queue */
5731 xpt_release_devq(done_ccb->ccb_h.path,
5732 /*count*/1,
5733 /*run_queue*/TRUE);
5734 }
5735 softc->action = PROBE_INQUIRY;
5736 xpt_release_ccb(done_ccb);
5737 xpt_schedule(periph, priority);
5738 return;
5739 }
5740 case PROBE_INQUIRY:
5741 case PROBE_FULL_INQUIRY:
5742 {
5743 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5744 struct scsi_inquiry_data *inq_buf;
5745 u_int8_t periph_qual;
5746
5747 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5748 inq_buf = &path->device->inq_data;
5749
5750 periph_qual = SID_QUAL(inq_buf);
5751
5752 switch(periph_qual) {
5753 case SID_QUAL_LU_CONNECTED:
5754 {
5755 u_int8_t alen;
5756
5757 /*
5758 * We conservatively request only
5759 * SHORT_INQUIRY_LEN bytes of inquiry
5760 * information during our first try
5761 * at sending an INQUIRY. If the device
5762 * has more information to give,
5763 * perform a second request specifying
5764 * the amount of information the device
5765 * is willing to give.
5766 */
5767 alen = inq_buf->additional_length;
5768 if (softc->action == PROBE_INQUIRY
5769 && alen > (SHORT_INQUIRY_LENGTH - 4)) {
5770 softc->action = PROBE_FULL_INQUIRY;
5771 xpt_release_ccb(done_ccb);
5772 xpt_schedule(periph, priority);
5773 return;
5774 }
5775
5776 xpt_find_quirk(path->device);
5777
5778#ifdef CAM_NEW_TRAN_CODE
5779 xpt_devise_transport(path);
5780#endif /* CAM_NEW_TRAN_CODE */
5781 if ((inq_buf->flags & SID_CmdQue) != 0)
5782 softc->action = PROBE_MODE_SENSE;
5783 else
5784 softc->action = PROBE_SERIAL_NUM;
5785
5786 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5787
5788 xpt_release_ccb(done_ccb);
5789 xpt_schedule(periph, priority);
5790 return;
5791 }
5792 default:
5793 break;
5794 }
5795 } else if (cam_periph_error(done_ccb, 0,
5796 done_ccb->ccb_h.target_lun > 0
5797 ? SF_RETRY_UA|SF_QUIET_IR
5798 : SF_RETRY_UA,
5799 &softc->saved_ccb) == ERESTART) {
5800 return;
5801 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5802 /* Don't wedge the queue */
5803 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5804 /*run_queue*/TRUE);
5805 }
5806 /*
5807 * If we get to this point, we got an error status back
5808 * from the inquiry and the error status doesn't require
5809 * automatically retrying the command. Therefore, the
5810 * inquiry failed. If we had inquiry information before
5811 * for this device, but this latest inquiry command failed,
5812 * the device has probably gone away. If this device isn't
5813 * already marked unconfigured, notify the peripheral
5814 * drivers that this device is no more.
5815 */
5816 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5817 /* Send the async notification. */
5818 xpt_async(AC_LOST_DEVICE, path, NULL);
5819
5820 xpt_release_ccb(done_ccb);
5821 break;
5822 }
5823 case PROBE_MODE_SENSE:
5824 {
5825 struct ccb_scsiio *csio;
5826 struct scsi_mode_header_6 *mode_hdr;
5827
5828 csio = &done_ccb->csio;
5829 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5830 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5831 struct scsi_control_page *page;
5832 u_int8_t *offset;
5833
5834 offset = ((u_int8_t *)&mode_hdr[1])
5835 + mode_hdr->blk_desc_len;
5836 page = (struct scsi_control_page *)offset;
5837 path->device->queue_flags = page->queue_flags;
5838 } else if (cam_periph_error(done_ccb, 0,
5839 SF_RETRY_UA|SF_NO_PRINT,
5840 &softc->saved_ccb) == ERESTART) {
5841 return;
5842 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5843 /* Don't wedge the queue */
5844 xpt_release_devq(done_ccb->ccb_h.path,
5845 /*count*/1, /*run_queue*/TRUE);
5846 }
5847 xpt_release_ccb(done_ccb);
5848 free(mode_hdr, M_TEMP);
5849 softc->action = PROBE_SERIAL_NUM;
5850 xpt_schedule(periph, priority);
5851 return;
5852 }
5853 case PROBE_SERIAL_NUM:
5854 {
5855 struct ccb_scsiio *csio;
5856 struct scsi_vpd_unit_serial_number *serial_buf;
5857 u_int32_t priority;
5858 int changed;
5859 int have_serialnum;
5860
5861 changed = 1;
5862 have_serialnum = 0;
5863 csio = &done_ccb->csio;
5864 priority = done_ccb->ccb_h.pinfo.priority;
5865 serial_buf =
5866 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5867
5868 /* Clean up from previous instance of this device */
5869 if (path->device->serial_num != NULL) {
5870 free(path->device->serial_num, M_DEVBUF);
5871 path->device->serial_num = NULL;
5872 path->device->serial_num_len = 0;
5873 }
5874
5875 if (serial_buf == NULL) {
5876 /*
5877 * Don't process the command as it was never sent
5878 */
5879 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5880 && (serial_buf->length > 0)) {
5881
5882 have_serialnum = 1;
5883 path->device->serial_num =
5884 (u_int8_t *)malloc((serial_buf->length + 1),
5885 M_DEVBUF, M_NOWAIT);
5886 if (path->device->serial_num != NULL) {
5887 bcopy(serial_buf->serial_num,
5888 path->device->serial_num,
5889 serial_buf->length);
5890 path->device->serial_num_len =
5891 serial_buf->length;
5892 path->device->serial_num[serial_buf->length]
5893 = '\0';
5894 }
5895 } else if (cam_periph_error(done_ccb, 0,
5896 SF_RETRY_UA|SF_NO_PRINT,
5897 &softc->saved_ccb) == ERESTART) {
5898 return;
5899 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5900 /* Don't wedge the queue */
5901 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5902 /*run_queue*/TRUE);
5903 }
5904
5905 /*
5906 * Let's see if we have seen this device before.
5907 */
5908 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5909 MD5_CTX context;
5910 u_int8_t digest[16];
5911
5912 MD5Init(&context);
5913
5914 MD5Update(&context,
5915 (unsigned char *)&path->device->inq_data,
5916 sizeof(struct scsi_inquiry_data));
5917
5918 if (have_serialnum)
5919 MD5Update(&context, serial_buf->serial_num,
5920 serial_buf->length);
5921
5922 MD5Final(digest, &context);
5923 if (bcmp(softc->digest, digest, 16) == 0)
5924 changed = 0;
5925
5926 /*
5927 * XXX Do we need to do a TUR in order to ensure
5928 * that the device really hasn't changed???
5929 */
5930 if ((changed != 0)
5931 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5932 xpt_async(AC_LOST_DEVICE, path, NULL);
5933 }
5934 if (serial_buf != NULL)
5935 free(serial_buf, M_TEMP);
5936
5937 if (changed != 0) {
5938 /*
5939 * Now that we have all the necessary
5940 * information to safely perform transfer
5941 * negotiations... Controllers don't perform
5942 * any negotiation or tagged queuing until
5943 * after the first XPT_SET_TRAN_SETTINGS ccb is
5944 * received. So, on a new device, just retreive
5945 * the user settings, and set them as the current
5946 * settings to set the device up.
5947 */
5948 proberequestdefaultnegotiation(periph);
5949 xpt_release_ccb(done_ccb);
5950
5951 /*
5952 * Perform a TUR to allow the controller to
5953 * perform any necessary transfer negotiation.
5954 */
5955 softc->action = PROBE_TUR_FOR_NEGOTIATION;
5956 xpt_schedule(periph, priority);
5957 return;
5958 }
5959 xpt_release_ccb(done_ccb);
5960 break;
5961 }
5962 case PROBE_TUR_FOR_NEGOTIATION:
5963 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5964 /* Don't wedge the queue */
5965 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5966 /*run_queue*/TRUE);
5967 }
5968
5969 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5970
5971 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5972 /* Inform the XPT that a new device has been found */
5973 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5974 xpt_action(done_ccb);
5975
5976 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5977 }
5978 xpt_release_ccb(done_ccb);
5979 break;
5980 }
5981 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5982 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5983 done_ccb->ccb_h.status = CAM_REQ_CMP;
5984 xpt_done(done_ccb);
5985 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5986 cam_periph_invalidate(periph);
5987 cam_periph_release(periph);
5988 } else {
5989 probeschedule(periph);
5990 }
5991}
5992
5993static void
5994probecleanup(struct cam_periph *periph)
5995{
5996 free(periph->softc, M_TEMP);
5997}
5998
5999static void
6000xpt_find_quirk(struct cam_ed *device)
6001{
6002 caddr_t match;
6003
6004 match = cam_quirkmatch((caddr_t)&device->inq_data,
6005 (caddr_t)xpt_quirk_table,
6006 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6007 sizeof(*xpt_quirk_table), scsi_inquiry_match);
6008
6009 if (match == NULL)
6010 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6011
6012 device->quirk = (struct xpt_quirk_entry *)match;
6013}
6014
6015#ifdef CAM_NEW_TRAN_CODE
6016
6017static void
6018xpt_devise_transport(struct cam_path *path)
6019{
6020 struct ccb_pathinq cpi;
6021 struct ccb_trans_settings cts;
6022 struct scsi_inquiry_data *inq_buf;
6023
6024 /* Get transport information from the SIM */
6025 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6026 cpi.ccb_h.func_code = XPT_PATH_INQ;
6027 xpt_action((union ccb *)&cpi);
6028
6029 inq_buf = NULL;
6030 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6031 inq_buf = &path->device->inq_data;
6032 path->device->protocol = PROTO_SCSI;
6033 path->device->protocol_version =
6034 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6035 path->device->transport = cpi.transport;
6036 path->device->transport_version = cpi.transport_version;
6037
6038 /*
6039 * Any device not using SPI3 features should
6040 * be considered SPI2 or lower.
6041 */
6042 if (inq_buf != NULL) {
6043 if (path->device->transport == XPORT_SPI
6044 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6045 && path->device->transport_version > 2)
6046 path->device->transport_version = 2;
6047 } else {
6048 struct cam_ed* otherdev;
6049
6050 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6051 otherdev != NULL;
6052 otherdev = TAILQ_NEXT(otherdev, links)) {
6053 if (otherdev != path->device)
6054 break;
6055 }
6056
6057 if (otherdev != NULL) {
6058 /*
6059 * Initially assume the same versioning as
6060 * prior luns for this target.
6061 */
6062 path->device->protocol_version =
6063 otherdev->protocol_version;
6064 path->device->transport_version =
6065 otherdev->transport_version;
6066 } else {
6067 /* Until we know better, opt for safty */
6068 path->device->protocol_version = 2;
6069 if (path->device->transport == XPORT_SPI)
6070 path->device->transport_version = 2;
6071 else
6072 path->device->transport_version = 0;
6073 }
6074 }
6075
6076 /*
6077 * XXX
6078 * For a device compliant with SPC-2 we should be able
6079 * to determine the transport version supported by
6080 * scrutinizing the version descriptors in the
6081 * inquiry buffer.
6082 */
6083
6084 /* Tell the controller what we think */
6085 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6086 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6087 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6088 cts.transport = path->device->transport;
6089 cts.transport_version = path->device->transport_version;
6090 cts.protocol = path->device->protocol;
6091 cts.protocol_version = path->device->protocol_version;
6092 cts.proto_specific.valid = 0;
6093 cts.xport_specific.valid = 0;
6094 xpt_action((union ccb *)&cts);
6095}
6096
6097static void
6098xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6099 int async_update)
6100{
6101 struct ccb_pathinq cpi;
6102 struct ccb_trans_settings cur_cts;
6103 struct ccb_trans_settings_scsi *scsi;
6104 struct ccb_trans_settings_scsi *cur_scsi;
6105 struct cam_sim *sim;
6106 struct scsi_inquiry_data *inq_data;
6107
6108 if (device == NULL) {
6109 cts->ccb_h.status = CAM_PATH_INVALID;
6110 xpt_done((union ccb *)cts);
6111 return;
6112 }
6113
6114 if (cts->protocol == PROTO_UNKNOWN
6115 || cts->protocol == PROTO_UNSPECIFIED) {
6116 cts->protocol = device->protocol;
6117 cts->protocol_version = device->protocol_version;
6118 }
6119
6120 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6121 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6122 cts->protocol_version = device->protocol_version;
6123
6124 if (cts->protocol != device->protocol) {
6125 xpt_print_path(cts->ccb_h.path);
6126 printf("Uninitialized Protocol %x:%x?\n",
6127 cts->protocol, device->protocol);
6128 cts->protocol = device->protocol;
6129 }
6130
6131 if (cts->protocol_version > device->protocol_version) {
6132 if (bootverbose) {
6133 xpt_print_path(cts->ccb_h.path);
6134 printf("Down reving Protocol Version from %d to %d?\n",
6135 cts->protocol_version, device->protocol_version);
6136 }
6137 cts->protocol_version = device->protocol_version;
6138 }
6139
6140 if (cts->transport == XPORT_UNKNOWN
6141 || cts->transport == XPORT_UNSPECIFIED) {
6142 cts->transport = device->transport;
6143 cts->transport_version = device->transport_version;
6144 }
6145
6146 if (cts->transport_version == XPORT_VERSION_UNKNOWN
6147 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6148 cts->transport_version = device->transport_version;
6149
6150 if (cts->transport != device->transport) {
6151 xpt_print_path(cts->ccb_h.path);
6152 printf("Uninitialized Transport %x:%x?\n",
6153 cts->transport, device->transport);
6154 cts->transport = device->transport;
6155 }
6156
6157 if (cts->transport_version > device->transport_version) {
6158 if (bootverbose) {
6159 xpt_print_path(cts->ccb_h.path);
6160 printf("Down reving Transport Version from %d to %d?\n",
6161 cts->transport_version,
6162 device->transport_version);
6163 }
6164 cts->transport_version = device->transport_version;
6165 }
6166
6167 sim = cts->ccb_h.path->bus->sim;
6168
6169 /*
6170 * Nothing more of interest to do unless
6171 * this is a device connected via the
6172 * SCSI protocol.
6173 */
6174 if (cts->protocol != PROTO_SCSI) {
6175 if (async_update == FALSE)
6176 (*(sim->sim_action))(sim, (union ccb *)cts);
6177 return;
6178 }
6179
6180 inq_data = &device->inq_data;
6181 scsi = &cts->proto_specific.scsi;
6182 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6183 cpi.ccb_h.func_code = XPT_PATH_INQ;
6184 xpt_action((union ccb *)&cpi);
6185
6186 /* SCSI specific sanity checking */
6187 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6188 || (inq_data->flags & SID_CmdQue) == 0
6189 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6190 || (device->quirk->mintags == 0)) {
6191 /*
6192 * Can't tag on hardware that doesn't support tags,
6193 * doesn't have it enabled, or has broken tag support.
6194 */
6195 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6196 }
6197
6198 if (async_update == FALSE) {
6199 /*
6200 * Perform sanity checking against what the
6201 * controller and device can do.
6202 */
6203 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6204 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6205 cur_cts.type = cts->type;
6206 xpt_action((union ccb *)&cur_cts);
6207
6208 cur_scsi = &cur_cts.proto_specific.scsi;
6209 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6210 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6211 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6212 }
6213 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6214 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6215 }
6216
6217 /* SPI specific sanity checking */
6218 if (cts->transport == XPORT_SPI && async_update == FALSE) {
6219 u_int spi3caps;
6220 struct ccb_trans_settings_spi *spi;
6221 struct ccb_trans_settings_spi *cur_spi;
6222
6223 spi = &cts->xport_specific.spi;
6224
6225 cur_spi = &cur_cts.xport_specific.spi;
6226
6227 /* Fill in any gaps in what the user gave us */
6228 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6229 spi->sync_period = cur_spi->sync_period;
6230 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6231 spi->sync_period = 0;
6232 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6233 spi->sync_offset = cur_spi->sync_offset;
6234 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6235 spi->sync_offset = 0;
6236 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6237 spi->ppr_options = cur_spi->ppr_options;
6238 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6239 spi->ppr_options = 0;
6240 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6241 spi->bus_width = cur_spi->bus_width;
6242 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6243 spi->bus_width = 0;
6244 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6245 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6246 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6247 }
6248 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6249 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6250 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6251 && (inq_data->flags & SID_Sync) == 0
6252 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6253 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6254 || (cur_spi->sync_offset == 0)
6255 || (cur_spi->sync_period == 0)) {
6256 /* Force async */
6257 spi->sync_period = 0;
6258 spi->sync_offset = 0;
6259 }
6260
6261 switch (spi->bus_width) {
6262 case MSG_EXT_WDTR_BUS_32_BIT:
6263 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6264 || (inq_data->flags & SID_WBus32) != 0
6265 || cts->type == CTS_TYPE_USER_SETTINGS)
6266 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6267 break;
6268 /* Fall Through to 16-bit */
6269 case MSG_EXT_WDTR_BUS_16_BIT:
6270 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6271 || (inq_data->flags & SID_WBus16) != 0
6272 || cts->type == CTS_TYPE_USER_SETTINGS)
6273 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6274 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6275 break;
6276 }
6277 /* Fall Through to 8-bit */
6278 default: /* New bus width?? */
6279 case MSG_EXT_WDTR_BUS_8_BIT:
6280 /* All targets can do this */
6281 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6282 break;
6283 }
6284
6285 spi3caps = cpi.xport_specific.spi.ppr_options;
6286 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6287 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6288 spi3caps &= inq_data->spi3data;
6289
6290 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6291 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6292
6293 if ((spi3caps & SID_SPI_IUS) == 0)
6294 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6295
6296 if ((spi3caps & SID_SPI_QAS) == 0)
6297 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6298
6299 /* No SPI Transfer settings are allowed unless we are wide */
6300 if (spi->bus_width == 0)
6301 spi->ppr_options = 0;
6302
6303 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6304 /*
6305 * Can't tag queue without disconnection.
6306 */
6307 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6308 scsi->valid |= CTS_SCSI_VALID_TQ;
6309 }
6310
6311 /*
6312 * If we are currently performing tagged transactions to
6313 * this device and want to change its negotiation parameters,
6314 * go non-tagged for a bit to give the controller a chance to
6315 * negotiate unhampered by tag messages.
6316 */
6317 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6318 && (device->inq_flags & SID_CmdQue) != 0
6319 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6320 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6321 CTS_SPI_VALID_SYNC_OFFSET|
6322 CTS_SPI_VALID_BUS_WIDTH)) != 0)
6323 xpt_toggle_tags(cts->ccb_h.path);
6324 }
6325
6326 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6327 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6328 int device_tagenb;
6329
6330 /*
6331 * If we are transitioning from tags to no-tags or
6332 * vice-versa, we need to carefully freeze and restart
6333 * the queue so that we don't overlap tagged and non-tagged
6334 * commands. We also temporarily stop tags if there is
6335 * a change in transfer negotiation settings to allow
6336 * "tag-less" negotiation.
6337 */
6338 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6339 || (device->inq_flags & SID_CmdQue) != 0)
6340 device_tagenb = TRUE;
6341 else
6342 device_tagenb = FALSE;
6343
6344 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6345 && device_tagenb == FALSE)
6346 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6347 && device_tagenb == TRUE)) {
6348
6349 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6350 /*
6351 * Delay change to use tags until after a
6352 * few commands have gone to this device so
6353 * the controller has time to perform transfer
6354 * negotiations without tagged messages getting
6355 * in the way.
6356 */
6357 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6358 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6359 } else {
6360 struct ccb_relsim crs;
6361
6362 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6363 device->inq_flags &= ~SID_CmdQue;
6364 xpt_dev_ccbq_resize(cts->ccb_h.path,
6365 sim->max_dev_openings);
6366 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6367 device->tag_delay_count = 0;
6368
6369 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6370 /*priority*/1);
6371 crs.ccb_h.func_code = XPT_REL_SIMQ;
6372 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6373 crs.openings
6374 = crs.release_timeout
6375 = crs.qfrozen_cnt
6376 = 0;
6377 xpt_action((union ccb *)&crs);
6378 }
6379 }
6380 }
6381 if (async_update == FALSE)
6382 (*(sim->sim_action))(sim, (union ccb *)cts);
6383}
6384
6385#else /* CAM_NEW_TRAN_CODE */
6386
6387static void
6388xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6389 int async_update)
6390{
6391 struct cam_sim *sim;
6392 int qfrozen;
6393
6394 sim = cts->ccb_h.path->bus->sim;
6395 if (async_update == FALSE) {
6396 struct scsi_inquiry_data *inq_data;
6397 struct ccb_pathinq cpi;
6398 struct ccb_trans_settings cur_cts;
6399
6400 if (device == NULL) {
6401 cts->ccb_h.status = CAM_PATH_INVALID;
6402 xpt_done((union ccb *)cts);
6403 return;
6404 }
6405
6406 /*
6407 * Perform sanity checking against what the
6408 * controller and device can do.
6409 */
6410 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6411 cpi.ccb_h.func_code = XPT_PATH_INQ;
6412 xpt_action((union ccb *)&cpi);
6413 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6414 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6415 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6416 xpt_action((union ccb *)&cur_cts);
6417 inq_data = &device->inq_data;
6418
6419 /* Fill in any gaps in what the user gave us */
6420 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6421 cts->sync_period = cur_cts.sync_period;
6422 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6423 cts->sync_offset = cur_cts.sync_offset;
6424 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6425 cts->bus_width = cur_cts.bus_width;
6426 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6427 cts->flags &= ~CCB_TRANS_DISC_ENB;
6428 cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6429 }
6430 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6431 cts->flags &= ~CCB_TRANS_TAG_ENB;
6432 cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6433 }
6434
6435 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6436 && (inq_data->flags & SID_Sync) == 0)
6437 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6438 || (cts->sync_offset == 0)
6439 || (cts->sync_period == 0)) {
6440 /* Force async */
6441 cts->sync_period = 0;
6442 cts->sync_offset = 0;
6443 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6444 && (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6445 && cts->sync_period <= 0x9) {
6446 /*
6447 * Don't allow DT transmission rates if the
6448 * device does not support it.
6449 */
6450 cts->sync_period = 0xa;
6451 }
6452
6453 switch (cts->bus_width) {
6454 case MSG_EXT_WDTR_BUS_32_BIT:
6455 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6456 || (inq_data->flags & SID_WBus32) != 0)
6457 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6458 break;
6459 /* FALLTHROUGH to 16-bit */
6460 case MSG_EXT_WDTR_BUS_16_BIT:
6461 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6462 || (inq_data->flags & SID_WBus16) != 0)
6463 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6464 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6465 break;
6466 }
6467 /* FALLTHROUGH to 8-bit */
6468 default: /* New bus width?? */
6469 case MSG_EXT_WDTR_BUS_8_BIT:
6470 /* All targets can do this */
6471 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6472 break;
6473 }
6474
6475 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6476 /*
6477 * Can't tag queue without disconnection.
6478 */
6479 cts->flags &= ~CCB_TRANS_TAG_ENB;
6480 cts->valid |= CCB_TRANS_TQ_VALID;
6481 }
6482
6483 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6484 || (inq_data->flags & SID_CmdQue) == 0
6485 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6486 || (device->quirk->mintags == 0)) {
6487 /*
6488 * Can't tag on hardware that doesn't support,
6489 * doesn't have it enabled, or has broken tag support.
6490 */
6491 cts->flags &= ~CCB_TRANS_TAG_ENB;
6492 }
6493 }
6494
6495 qfrozen = FALSE;
6496 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6497 int device_tagenb;
6498
6499 /*
6500 * If we are transitioning from tags to no-tags or
6501 * vice-versa, we need to carefully freeze and restart
6502 * the queue so that we don't overlap tagged and non-tagged
6503 * commands. We also temporarily stop tags if there is
6504 * a change in transfer negotiation settings to allow
6505 * "tag-less" negotiation.
6506 */
6507 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6508 || (device->inq_flags & SID_CmdQue) != 0)
6509 device_tagenb = TRUE;
6510 else
6511 device_tagenb = FALSE;
6512
6513 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6514 && device_tagenb == FALSE)
6515 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6516 && device_tagenb == TRUE)) {
6517
6518 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6519 /*
6520 * Delay change to use tags until after a
6521 * few commands have gone to this device so
6522 * the controller has time to perform transfer
6523 * negotiations without tagged messages getting
6524 * in the way.
6525 */
6526 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6527 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6528 } else {
6529 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6530 qfrozen = TRUE;
6531 device->inq_flags &= ~SID_CmdQue;
6532 xpt_dev_ccbq_resize(cts->ccb_h.path,
6533 sim->max_dev_openings);
6534 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6535 device->tag_delay_count = 0;
6536 }
6537 }
6538 }
6539
6540 if (async_update == FALSE) {
6541 /*
6542 * If we are currently performing tagged transactions to
6543 * this device and want to change its negotiation parameters,
6544 * go non-tagged for a bit to give the controller a chance to
6545 * negotiate unhampered by tag messages.
6546 */
6547 if ((device->inq_flags & SID_CmdQue) != 0
6548 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6549 CCB_TRANS_SYNC_OFFSET_VALID|
6550 CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6551 xpt_toggle_tags(cts->ccb_h.path);
6552
6553 (*(sim->sim_action))(sim, (union ccb *)cts);
6554 }
6555
6556 if (qfrozen) {
6557 struct ccb_relsim crs;
6558
6559 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6560 /*priority*/1);
6561 crs.ccb_h.func_code = XPT_REL_SIMQ;
6562 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6563 crs.openings
6564 = crs.release_timeout
6565 = crs.qfrozen_cnt
6566 = 0;
6567 xpt_action((union ccb *)&crs);
6568 }
6569}
6570
6571
6572#endif /* CAM_NEW_TRAN_CODE */
6573
6574static void
6575xpt_toggle_tags(struct cam_path *path)
6576{
6577 struct cam_ed *dev;
6578
6579 /*
6580 * Give controllers a chance to renegotiate
6581 * before starting tag operations. We
6582 * "toggle" tagged queuing off then on
6583 * which causes the tag enable command delay
6584 * counter to come into effect.
6585 */
6586 dev = path->device;
6587 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6588 || ((dev->inq_flags & SID_CmdQue) != 0
6589 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6590 struct ccb_trans_settings cts;
6591
6592 xpt_setup_ccb(&cts.ccb_h, path, 1);
6593#ifdef CAM_NEW_TRAN_CODE
6594 cts.protocol = PROTO_SCSI;
6595 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6596 cts.transport = XPORT_UNSPECIFIED;
6597 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6598 cts.proto_specific.scsi.flags = 0;
6599 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6600#else /* CAM_NEW_TRAN_CODE */
6601 cts.flags = 0;
6602 cts.valid = CCB_TRANS_TQ_VALID;
6603#endif /* CAM_NEW_TRAN_CODE */
6604 xpt_set_transfer_settings(&cts, path->device,
6605 /*async_update*/TRUE);
6606#ifdef CAM_NEW_TRAN_CODE
6607 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6608#else /* CAM_NEW_TRAN_CODE */
6609 cts.flags = CCB_TRANS_TAG_ENB;
6610#endif /* CAM_NEW_TRAN_CODE */
6611 xpt_set_transfer_settings(&cts, path->device,
6612 /*async_update*/TRUE);
6613 }
6614}
6615
6616static void
6617xpt_start_tags(struct cam_path *path)
6618{
6619 struct ccb_relsim crs;
6620 struct cam_ed *device;
6621 struct cam_sim *sim;
6622 int newopenings;
6623
6624 device = path->device;
6625 sim = path->bus->sim;
6626 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6627 xpt_freeze_devq(path, /*count*/1);
6628 device->inq_flags |= SID_CmdQue;
6629 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
6630 xpt_dev_ccbq_resize(path, newopenings);
6631 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6632 crs.ccb_h.func_code = XPT_REL_SIMQ;
6633 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6634 crs.openings
6635 = crs.release_timeout
6636 = crs.qfrozen_cnt
6637 = 0;
6638 xpt_action((union ccb *)&crs);
6639}
6640
6641static int busses_to_config;
6642static int busses_to_reset;
6643
6644static int
6645xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6646{
6647 if (bus->path_id != CAM_XPT_PATH_ID) {
6648 struct cam_path path;
6649 struct ccb_pathinq cpi;
6650 int can_negotiate;
6651
6652 busses_to_config++;
6653 xpt_compile_path(&path, NULL, bus->path_id,
6654 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6655 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6656 cpi.ccb_h.func_code = XPT_PATH_INQ;
6657 xpt_action((union ccb *)&cpi);
6658 can_negotiate = cpi.hba_inquiry;
6659 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6660 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6661 && can_negotiate)
6662 busses_to_reset++;
6663 xpt_release_path(&path);
6664 }
6665
6666 return(1);
6667}
6668
6669static int
6670xptconfigfunc(struct cam_eb *bus, void *arg)
6671{
6672 struct cam_path *path;
6673 union ccb *work_ccb;
6674
6675 if (bus->path_id != CAM_XPT_PATH_ID) {
6676 cam_status status;
6677 int can_negotiate;
6678
6679 work_ccb = xpt_alloc_ccb();
6680 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6681 CAM_TARGET_WILDCARD,
6682 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6683 printf("xptconfigfunc: xpt_create_path failed with "
6684 "status %#x for bus %d\n", status, bus->path_id);
6685 printf("xptconfigfunc: halting bus configuration\n");
6686 xpt_free_ccb(work_ccb);
6687 busses_to_config--;
6688 xpt_finishconfig(xpt_periph, NULL);
6689 return(0);
6690 }
6691 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6692 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6693 xpt_action(work_ccb);
6694 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6695 printf("xptconfigfunc: CPI failed on bus %d "
6696 "with status %d\n", bus->path_id,
6697 work_ccb->ccb_h.status);
6698 xpt_finishconfig(xpt_periph, work_ccb);
6699 return(1);
6700 }
6701
6702 can_negotiate = work_ccb->cpi.hba_inquiry;
6703 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6704 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6705 && (can_negotiate != 0)) {
6706 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6707 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6708 work_ccb->ccb_h.cbfcnp = NULL;
6709 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6710 ("Resetting Bus\n"));
6711 xpt_action(work_ccb);
6712 xpt_finishconfig(xpt_periph, work_ccb);
6713 } else {
6714 /* Act as though we performed a successful BUS RESET */
6715 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6716 xpt_finishconfig(xpt_periph, work_ccb);
6717 }
6718 }
6719
6720 return(1);
6721}
6722
6723static void
6724xpt_config(void *arg)
6725{
6726 /*
6727 * Now that interrupts are enabled, go find our devices
6728 */
6729
6730#ifdef CAMDEBUG
6731 /* Setup debugging flags and path */
6732#ifdef CAM_DEBUG_FLAGS
6733 cam_dflags = CAM_DEBUG_FLAGS;
6734#else /* !CAM_DEBUG_FLAGS */
6735 cam_dflags = CAM_DEBUG_NONE;
6736#endif /* CAM_DEBUG_FLAGS */
6737#ifdef CAM_DEBUG_BUS
6738 if (cam_dflags != CAM_DEBUG_NONE) {
6739 if (xpt_create_path(&cam_dpath, xpt_periph,
6740 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6741 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6742 printf("xpt_config: xpt_create_path() failed for debug"
6743 " target %d:%d:%d, debugging disabled\n",
6744 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6745 cam_dflags = CAM_DEBUG_NONE;
6746 }
6747 } else
6748 cam_dpath = NULL;
6749#else /* !CAM_DEBUG_BUS */
6750 cam_dpath = NULL;
6751#endif /* CAM_DEBUG_BUS */
6752#endif /* CAMDEBUG */
6753
6754 /*
6755 * Scan all installed busses.
6756 */
6757 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6758
6759 if (busses_to_config == 0) {
6760 /* Call manually because we don't have any busses */
6761 xpt_finishconfig(xpt_periph, NULL);
6762 } else {
6763 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6764 printf("Waiting %d seconds for SCSI "
6765 "devices to settle\n", scsi_delay/1000);
6766 }
6767 xpt_for_all_busses(xptconfigfunc, NULL);
6768 }
6769}
6770
6771/*
6772 * If the given device only has one peripheral attached to it, and if that
6773 * peripheral is the passthrough driver, announce it. This insures that the
6774 * user sees some sort of announcement for every peripheral in their system.
6775 */
6776static int
6777xptpassannouncefunc(struct cam_ed *device, void *arg)
6778{
6779 struct cam_periph *periph;
6780 int i;
6781
6782 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6783 periph = SLIST_NEXT(periph, periph_links), i++);
6784
6785 periph = SLIST_FIRST(&device->periphs);
6786 if ((i == 1)
6787 && (strncmp(periph->periph_name, "pass", 4) == 0))
6788 xpt_announce_periph(periph, NULL);
6789
6790 return(1);
6791}
6792
6793static void
6794xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6795{
6796 struct periph_driver **p_drv;
6797 int i;
6798
6799 if (done_ccb != NULL) {
6800 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6801 ("xpt_finishconfig\n"));
6802 switch(done_ccb->ccb_h.func_code) {
6803 case XPT_RESET_BUS:
6804 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6805 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6806 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6807 xpt_action(done_ccb);
6808 return;
6809 }
6810 /* FALLTHROUGH */
6811 case XPT_SCAN_BUS:
6812 default:
6813 xpt_free_path(done_ccb->ccb_h.path);
6814 busses_to_config--;
6815 break;
6816 }
6817 }
6818
6819 if (busses_to_config == 0) {
6820 /* Register all the peripheral drivers */
6821 /* XXX This will have to change when we have loadable modules */
6822 p_drv = periph_drivers;
6823 for (i = 0; p_drv[i] != NULL; i++) {
6824 (*p_drv[i]->init)();
6825 }
6826
6827 /*
6828 * Check for devices with no "standard" peripheral driver
6829 * attached. For any devices like that, announce the
6830 * passthrough driver so the user will see something.
6831 */
6832 xpt_for_all_devices(xptpassannouncefunc, NULL);
6833
6834 /* Release our hook so that the boot can continue. */
6835 config_intrhook_disestablish(xpt_config_hook);
6836 free(xpt_config_hook, M_TEMP);
6837 xpt_config_hook = NULL;
6838 }
6839 if (done_ccb != NULL)
6840 xpt_free_ccb(done_ccb);
6841}
6842
6843static void
6844xptaction(struct cam_sim *sim, union ccb *work_ccb)
6845{
6846 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6847
6848 switch (work_ccb->ccb_h.func_code) {
6849 /* Common cases first */
6850 case XPT_PATH_INQ: /* Path routing inquiry */
6851 {
6852 struct ccb_pathinq *cpi;
6853
6854 cpi = &work_ccb->cpi;
6855 cpi->version_num = 1; /* XXX??? */
6856 cpi->hba_inquiry = 0;
6857 cpi->target_sprt = 0;
6858 cpi->hba_misc = 0;
6859 cpi->hba_eng_cnt = 0;
6860 cpi->max_target = 0;
6861 cpi->max_lun = 0;
6862 cpi->initiator_id = 0;
6863 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6864 strncpy(cpi->hba_vid, "", HBA_IDLEN);
6865 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6866 cpi->unit_number = sim->unit_number;
6867 cpi->bus_id = sim->bus_id;
6868 cpi->base_transfer_speed = 0;
6869#ifdef CAM_NEW_TRAN_CODE
6870 cpi->protocol = PROTO_UNSPECIFIED;
6871 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
6872 cpi->transport = XPORT_UNSPECIFIED;
6873 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
6874#endif /* CAM_NEW_TRAN_CODE */
6875 cpi->ccb_h.status = CAM_REQ_CMP;
6876 xpt_done(work_ccb);
6877 break;
6878 }
6879 default:
6880 work_ccb->ccb_h.status = CAM_REQ_INVALID;
6881 xpt_done(work_ccb);
6882 break;
6883 }
6884}
6885
6886/*
6887 * The xpt as a "controller" has no interrupt sources, so polling
6888 * is a no-op.
6889 */
6890static void
6891xptpoll(struct cam_sim *sim)
6892{
6893}
6894
6895static void
6896camisr(void *V_queue)
6897{
6898 cam_isrq_t *queue = V_queue;
6899 int s;
6900 struct ccb_hdr *ccb_h;
6901
6902 s = splcam();
6903 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6904 int runq;
6905
6906 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6907 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6908 splx(s);
6909
6910 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6911 ("camisr\n"));
6912
6913 runq = FALSE;
6914
6915 if (ccb_h->flags & CAM_HIGH_POWER) {
6916 struct highpowerlist *hphead;
6917 union ccb *send_ccb;
6918
6919 hphead = &highpowerq;
6920
6921 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6922
6923 /*
6924 * Increment the count since this command is done.
6925 */
6926 num_highpower++;
6927
6928 /*
6929 * Any high powered commands queued up?
6930 */
6931 if (send_ccb != NULL) {
6932
6933 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6934
6935 xpt_release_devq(send_ccb->ccb_h.path,
6936 /*count*/1, /*runqueue*/TRUE);
6937 }
6938 }
6939 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6940 struct cam_ed *dev;
6941
6942 dev = ccb_h->path->device;
6943
6944 s = splcam();
6945 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6946
6947 ccb_h->path->bus->sim->devq->send_active--;
6948 ccb_h->path->bus->sim->devq->send_openings++;
6949 splx(s);
6950
6951 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6952 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
6953 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6954 && (dev->ccbq.dev_active == 0))) {
6955
6956 xpt_release_devq(ccb_h->path, /*count*/1,
6957 /*run_queue*/TRUE);
6958 }
6959
6960 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6961 && (--dev->tag_delay_count == 0))
6962 xpt_start_tags(ccb_h->path);
6963
6964 if ((dev->ccbq.queue.entries > 0)
6965 && (dev->qfrozen_cnt == 0)
6966 && (device_is_send_queued(dev) == 0)) {
6967 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6968 dev);
6969 }
6970 }
6971
6972 if (ccb_h->status & CAM_RELEASE_SIMQ) {
6973 xpt_release_simq(ccb_h->path->bus->sim,
6974 /*run_queue*/TRUE);
6975 ccb_h->status &= ~CAM_RELEASE_SIMQ;
6976 runq = FALSE;
6977 }
6978
6979 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6980 && (ccb_h->status & CAM_DEV_QFRZN)) {
6981 xpt_release_devq(ccb_h->path, /*count*/1,
6982 /*run_queue*/TRUE);
6983 ccb_h->status &= ~CAM_DEV_QFRZN;
6984 } else if (runq) {
6985 xpt_run_dev_sendq(ccb_h->path->bus);
6986 }
6987
6988 /* Call the peripheral driver's callback */
6989 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
6990
6991 /* Raise IPL for while test */
6992 s = splcam();
6993 }
6994 splx(s);
6995}