Deleted Added
full compact
cam_xpt.c (186184) cam_xpt.c (186185)
1/*-
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/cam/cam_xpt.c 186184 2008-12-16 16:54:51Z trasz $");
31__FBSDID("$FreeBSD: head/sys/cam/cam_xpt.c 186185 2008-12-16 16:57:33Z trasz $");
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/systm.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/time.h>
40#include <sys/conf.h>
41#include <sys/fcntl.h>
42#include <sys/md5.h>
43#include <sys/interrupt.h>
44#include <sys/sbuf.h>
45#include <sys/taskqueue.h>
46
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/sysctl.h>
50#include <sys/kthread.h>
51
52#ifdef PC98
53#include <pc98/pc98/pc98_machdep.h> /* geometry translation */
54#endif
55
56#include <cam/cam.h>
57#include <cam/cam_ccb.h>
58#include <cam/cam_periph.h>
59#include <cam/cam_sim.h>
60#include <cam/cam_xpt.h>
61#include <cam/cam_xpt_sim.h>
62#include <cam/cam_xpt_periph.h>
63#include <cam/cam_debug.h>
64
65#include <cam/scsi/scsi_all.h>
66#include <cam/scsi/scsi_message.h>
67#include <cam/scsi/scsi_pass.h>
68#include <machine/stdarg.h> /* for xpt_print below */
69#include "opt_cam.h"
70
71/* Datastructures internal to the xpt layer */
72MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
73
74/* Object for defering XPT actions to a taskqueue */
75struct xpt_task {
76 struct task task;
77 void *data1;
78 uintptr_t data2;
79};
80
81/*
82 * Definition of an async handler callback block. These are used to add
83 * SIMs and peripherals to the async callback lists.
84 */
85struct async_node {
86 SLIST_ENTRY(async_node) links;
87 u_int32_t event_enable; /* Async Event enables */
88 void (*callback)(void *arg, u_int32_t code,
89 struct cam_path *path, void *args);
90 void *callback_arg;
91};
92
93SLIST_HEAD(async_list, async_node);
94SLIST_HEAD(periph_list, cam_periph);
95
96/*
97 * This is the maximum number of high powered commands (e.g. start unit)
98 * that can be outstanding at a particular time.
99 */
100#ifndef CAM_MAX_HIGHPOWER
101#define CAM_MAX_HIGHPOWER 4
102#endif
103
104/*
105 * Structure for queueing a device in a run queue.
106 * There is one run queue for allocating new ccbs,
107 * and another for sending ccbs to the controller.
108 */
109struct cam_ed_qinfo {
110 cam_pinfo pinfo;
111 struct cam_ed *device;
112};
113
114/*
115 * The CAM EDT (Existing Device Table) contains the device information for
116 * all devices for all busses in the system. The table contains a
117 * cam_ed structure for each device on the bus.
118 */
119struct cam_ed {
120 TAILQ_ENTRY(cam_ed) links;
121 struct cam_ed_qinfo alloc_ccb_entry;
122 struct cam_ed_qinfo send_ccb_entry;
123 struct cam_et *target;
124 struct cam_sim *sim;
125 lun_id_t lun_id;
126 struct camq drvq; /*
127 * Queue of type drivers wanting to do
128 * work on this device.
129 */
130 struct cam_ccbq ccbq; /* Queue of pending ccbs */
131 struct async_list asyncs; /* Async callback info for this B/T/L */
132 struct periph_list periphs; /* All attached devices */
133 u_int generation; /* Generation number */
134 struct cam_periph *owner; /* Peripheral driver's ownership tag */
135 struct xpt_quirk_entry *quirk; /* Oddities about this device */
136 /* Storage for the inquiry data */
137 cam_proto protocol;
138 u_int protocol_version;
139 cam_xport transport;
140 u_int transport_version;
141 struct scsi_inquiry_data inq_data;
142 u_int8_t inq_flags; /*
143 * Current settings for inquiry flags.
144 * This allows us to override settings
145 * like disconnection and tagged
146 * queuing for a device.
147 */
148 u_int8_t queue_flags; /* Queue flags from the control page */
149 u_int8_t serial_num_len;
150 u_int8_t *serial_num;
151 u_int32_t qfrozen_cnt;
152 u_int32_t flags;
153#define CAM_DEV_UNCONFIGURED 0x01
154#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
155#define CAM_DEV_REL_ON_COMPLETE 0x04
156#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
157#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
158#define CAM_DEV_TAG_AFTER_COUNT 0x20
159#define CAM_DEV_INQUIRY_DATA_VALID 0x40
160#define CAM_DEV_IN_DV 0x80
161#define CAM_DEV_DV_HIT_BOTTOM 0x100
162 u_int32_t tag_delay_count;
163#define CAM_TAG_DELAY_COUNT 5
164 u_int32_t tag_saved_openings;
165 u_int32_t refcount;
166 struct callout callout;
167};
168
169/*
170 * Each target is represented by an ET (Existing Target). These
171 * entries are created when a target is successfully probed with an
172 * identify, and removed when a device fails to respond after a number
173 * of retries, or a bus rescan finds the device missing.
174 */
175struct cam_et {
176 TAILQ_HEAD(, cam_ed) ed_entries;
177 TAILQ_ENTRY(cam_et) links;
178 struct cam_eb *bus;
179 target_id_t target_id;
180 u_int32_t refcount;
181 u_int generation;
182 struct timeval last_reset;
183};
184
185/*
186 * Each bus is represented by an EB (Existing Bus). These entries
187 * are created by calls to xpt_bus_register and deleted by calls to
188 * xpt_bus_deregister.
189 */
190struct cam_eb {
191 TAILQ_HEAD(, cam_et) et_entries;
192 TAILQ_ENTRY(cam_eb) links;
193 path_id_t path_id;
194 struct cam_sim *sim;
195 struct timeval last_reset;
196 u_int32_t flags;
197#define CAM_EB_RUNQ_SCHEDULED 0x01
198 u_int32_t refcount;
199 u_int generation;
200 device_t parent_dev;
201};
202
203struct cam_path {
204 struct cam_periph *periph;
205 struct cam_eb *bus;
206 struct cam_et *target;
207 struct cam_ed *device;
208};
209
210struct xpt_quirk_entry {
211 struct scsi_inquiry_pattern inq_pat;
212 u_int8_t quirks;
213#define CAM_QUIRK_NOLUNS 0x01
214#define CAM_QUIRK_NOSERIAL 0x02
215#define CAM_QUIRK_HILUNS 0x04
216#define CAM_QUIRK_NOHILUNS 0x08
217 u_int mintags;
218 u_int maxtags;
219};
220
221static int cam_srch_hi = 0;
222TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
223static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
224SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
225 sysctl_cam_search_luns, "I",
226 "allow search above LUN 7 for SCSI3 and greater devices");
227
228#define CAM_SCSI2_MAXLUN 8
229/*
230 * If we're not quirked to search <= the first 8 luns
231 * and we are either quirked to search above lun 8,
232 * or we're > SCSI-2 and we've enabled hilun searching,
233 * or we're > SCSI-2 and the last lun was a success,
234 * we can look for luns above lun 8.
235 */
236#define CAN_SRCH_HI_SPARSE(dv) \
237 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
238 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
239 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
240
241#define CAN_SRCH_HI_DENSE(dv) \
242 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
243 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
244 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
245
246typedef enum {
247 XPT_FLAG_OPEN = 0x01
248} xpt_flags;
249
250struct xpt_softc {
251 xpt_flags flags;
252 u_int32_t xpt_generation;
253
254 /* number of high powered commands that can go through right now */
255 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
256 int num_highpower;
257
258 /* queue for handling async rescan requests. */
259 TAILQ_HEAD(, ccb_hdr) ccb_scanq;
260
261 /* Registered busses */
262 TAILQ_HEAD(,cam_eb) xpt_busses;
263 u_int bus_generation;
264
265 struct intr_config_hook *xpt_config_hook;
266
267 struct mtx xpt_topo_lock;
268 struct mtx xpt_lock;
269};
270
271static const char quantum[] = "QUANTUM";
272static const char sony[] = "SONY";
273static const char west_digital[] = "WDIGTL";
274static const char samsung[] = "SAMSUNG";
275static const char seagate[] = "SEAGATE";
276static const char microp[] = "MICROP";
277
278static struct xpt_quirk_entry xpt_quirk_table[] =
279{
280 {
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
284 },
285 {
286 /* Reports QUEUE FULL for temporary resource shortages */
287 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
288 /*quirks*/0, /*mintags*/24, /*maxtags*/32
289 },
290 {
291 /* Reports QUEUE FULL for temporary resource shortages */
292 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
293 /*quirks*/0, /*mintags*/24, /*maxtags*/32
294 },
295 {
296 /* Broken tagged queuing drive */
297 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
298 /*quirks*/0, /*mintags*/0, /*maxtags*/0
299 },
300 {
301 /* Broken tagged queuing drive */
302 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
303 /*quirks*/0, /*mintags*/0, /*maxtags*/0
304 },
305 {
306 /* Broken tagged queuing drive */
307 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
308 /*quirks*/0, /*mintags*/0, /*maxtags*/0
309 },
310 {
311 /*
312 * Unfortunately, the Quantum Atlas III has the same
313 * problem as the Atlas II drives above.
314 * Reported by: "Johan Granlund" <johan@granlund.nu>
315 *
316 * For future reference, the drive with the problem was:
317 * QUANTUM QM39100TD-SW N1B0
318 *
319 * It's possible that Quantum will fix the problem in later
320 * firmware revisions. If that happens, the quirk entry
321 * will need to be made specific to the firmware revisions
322 * with the problem.
323 *
324 */
325 /* Reports QUEUE FULL for temporary resource shortages */
326 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
327 /*quirks*/0, /*mintags*/24, /*maxtags*/32
328 },
329 {
330 /*
331 * 18 Gig Atlas III, same problem as the 9G version.
332 * Reported by: Andre Albsmeier
333 * <andre.albsmeier@mchp.siemens.de>
334 *
335 * For future reference, the drive with the problem was:
336 * QUANTUM QM318000TD-S N491
337 */
338 /* Reports QUEUE FULL for temporary resource shortages */
339 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
340 /*quirks*/0, /*mintags*/24, /*maxtags*/32
341 },
342 {
343 /*
344 * Broken tagged queuing drive
345 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
346 * and: Martin Renters <martin@tdc.on.ca>
347 */
348 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
350 },
351 /*
352 * The Seagate Medalist Pro drives have very poor write
353 * performance with anything more than 2 tags.
354 *
355 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
356 * Drive: <SEAGATE ST36530N 1444>
357 *
358 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
359 * Drive: <SEAGATE ST34520W 1281>
360 *
361 * No one has actually reported that the 9G version
362 * (ST39140*) of the Medalist Pro has the same problem, but
363 * we're assuming that it does because the 4G and 6.5G
364 * versions of the drive are broken.
365 */
366 {
367 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
368 /*quirks*/0, /*mintags*/2, /*maxtags*/2
369 },
370 {
371 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
372 /*quirks*/0, /*mintags*/2, /*maxtags*/2
373 },
374 {
375 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
376 /*quirks*/0, /*mintags*/2, /*maxtags*/2
377 },
378 {
379 /*
380 * Slow when tagged queueing is enabled. Write performance
381 * steadily drops off with more and more concurrent
382 * transactions. Best sequential write performance with
383 * tagged queueing turned off and write caching turned on.
384 *
385 * PR: kern/10398
386 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
387 * Drive: DCAS-34330 w/ "S65A" firmware.
388 *
389 * The drive with the problem had the "S65A" firmware
390 * revision, and has also been reported (by Stephen J.
391 * Roznowski <sjr@home.net>) for a drive with the "S61A"
392 * firmware revision.
393 *
394 * Although no one has reported problems with the 2 gig
395 * version of the DCAS drive, the assumption is that it
396 * has the same problems as the 4 gig version. Therefore
397 * this quirk entries disables tagged queueing for all
398 * DCAS drives.
399 */
400 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
401 /*quirks*/0, /*mintags*/0, /*maxtags*/0
402 },
403 {
404 /* Broken tagged queuing drive */
405 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
406 /*quirks*/0, /*mintags*/0, /*maxtags*/0
407 },
408 {
409 /* Broken tagged queuing drive */
410 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
411 /*quirks*/0, /*mintags*/0, /*maxtags*/0
412 },
413 {
414 /* This does not support other than LUN 0 */
415 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
416 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
417 },
418 {
419 /*
420 * Broken tagged queuing drive.
421 * Submitted by:
422 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
423 * in PR kern/9535
424 */
425 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
426 /*quirks*/0, /*mintags*/0, /*maxtags*/0
427 },
428 {
429 /*
430 * Slow when tagged queueing is enabled. (1.5MB/sec versus
431 * 8MB/sec.)
432 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
433 * Best performance with these drives is achieved with
434 * tagged queueing turned off, and write caching turned on.
435 */
436 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
437 /*quirks*/0, /*mintags*/0, /*maxtags*/0
438 },
439 {
440 /*
441 * Slow when tagged queueing is enabled. (1.5MB/sec versus
442 * 8MB/sec.)
443 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
444 * Best performance with these drives is achieved with
445 * tagged queueing turned off, and write caching turned on.
446 */
447 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
448 /*quirks*/0, /*mintags*/0, /*maxtags*/0
449 },
450 {
451 /*
452 * Doesn't handle queue full condition correctly,
453 * so we need to limit maxtags to what the device
454 * can handle instead of determining this automatically.
455 */
456 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
457 /*quirks*/0, /*mintags*/2, /*maxtags*/32
458 },
459 {
460 /* Really only one LUN */
461 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
462 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
463 },
464 {
465 /* I can't believe we need a quirk for DPT volumes. */
466 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
467 CAM_QUIRK_NOLUNS,
468 /*mintags*/0, /*maxtags*/255
469 },
470 {
471 /*
472 * Many Sony CDROM drives don't like multi-LUN probing.
473 */
474 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
475 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
476 },
477 {
478 /*
479 * This drive doesn't like multiple LUN probing.
480 * Submitted by: Parag Patel <parag@cgt.com>
481 */
482 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
483 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
484 },
485 {
486 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
487 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
488 },
489 {
490 /*
491 * The 8200 doesn't like multi-lun probing, and probably
492 * don't like serial number requests either.
493 */
494 {
495 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
496 "EXB-8200*", "*"
497 },
498 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
499 },
500 {
501 /*
502 * Let's try the same as above, but for a drive that says
503 * it's an IPL-6860 but is actually an EXB 8200.
504 */
505 {
506 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
507 "IPL-6860*", "*"
508 },
509 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
510 },
511 {
512 /*
513 * These Hitachi drives don't like multi-lun probing.
514 * The PR submitter has a DK319H, but says that the Linux
515 * kernel has a similar work-around for the DK312 and DK314,
516 * so all DK31* drives are quirked here.
517 * PR: misc/18793
518 * Submitted by: Paul Haddad <paul@pth.com>
519 */
520 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
521 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
522 },
523 {
524 /*
525 * The Hitachi CJ series with J8A8 firmware apparantly has
526 * problems with tagged commands.
527 * PR: 23536
528 * Reported by: amagai@nue.org
529 */
530 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
531 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
532 },
533 {
534 /*
535 * These are the large storage arrays.
536 * Submitted by: William Carrel <william.carrel@infospace.com>
537 */
538 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
539 CAM_QUIRK_HILUNS, 2, 1024
540 },
541 {
542 /*
543 * This old revision of the TDC3600 is also SCSI-1, and
544 * hangs upon serial number probing.
545 */
546 {
547 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
548 " TDC 3600", "U07:"
549 },
550 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
551 },
552 {
553 /*
554 * Would repond to all LUNs if asked for.
555 */
556 {
557 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
558 "CP150", "*"
559 },
560 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
561 },
562 {
563 /*
564 * Would repond to all LUNs if asked for.
565 */
566 {
567 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
568 "96X2*", "*"
569 },
570 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
571 },
572 {
573 /* Submitted by: Matthew Dodd <winter@jurai.net> */
574 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
575 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
576 },
577 {
578 /* Submitted by: Matthew Dodd <winter@jurai.net> */
579 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
580 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
581 },
582 {
583 /* TeraSolutions special settings for TRC-22 RAID */
584 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
585 /*quirks*/0, /*mintags*/55, /*maxtags*/255
586 },
587 {
588 /* Veritas Storage Appliance */
589 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
590 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
591 },
592 {
593 /*
594 * Would respond to all LUNs. Device type and removable
595 * flag are jumper-selectable.
596 */
597 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
598 "Tahiti 1", "*"
599 },
600 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
601 },
602 {
603 /* EasyRAID E5A aka. areca ARC-6010 */
604 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
605 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
606 },
607 {
608 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
609 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
610 },
611 {
612 /* Default tagged queuing parameters for all devices */
613 {
614 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
615 /*vendor*/"*", /*product*/"*", /*revision*/"*"
616 },
617 /*quirks*/0, /*mintags*/2, /*maxtags*/255
618 },
619};
620
621static const int xpt_quirk_table_size =
622 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
623
624typedef enum {
625 DM_RET_COPY = 0x01,
626 DM_RET_FLAG_MASK = 0x0f,
627 DM_RET_NONE = 0x00,
628 DM_RET_STOP = 0x10,
629 DM_RET_DESCEND = 0x20,
630 DM_RET_ERROR = 0x30,
631 DM_RET_ACTION_MASK = 0xf0
632} dev_match_ret;
633
634typedef enum {
635 XPT_DEPTH_BUS,
636 XPT_DEPTH_TARGET,
637 XPT_DEPTH_DEVICE,
638 XPT_DEPTH_PERIPH
639} xpt_traverse_depth;
640
641struct xpt_traverse_config {
642 xpt_traverse_depth depth;
643 void *tr_func;
644 void *tr_arg;
645};
646
647typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
648typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
649typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
650typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
651typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
652
653/* Transport layer configuration information */
654static struct xpt_softc xsoftc;
655
656/* Queues for our software interrupt handler */
657typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
658typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
659static cam_simq_t cam_simq;
660static struct mtx cam_simq_lock;
661
662/* Pointers to software interrupt handlers */
663static void *cambio_ih;
664
665struct cam_periph *xpt_periph;
666
667static periph_init_t xpt_periph_init;
668
669static periph_init_t probe_periph_init;
670
671static struct periph_driver xpt_driver =
672{
673 xpt_periph_init, "xpt",
674 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
675};
676
677static struct periph_driver probe_driver =
678{
679 probe_periph_init, "probe",
680 TAILQ_HEAD_INITIALIZER(probe_driver.units)
681};
682
683PERIPHDRIVER_DECLARE(xpt, xpt_driver);
684PERIPHDRIVER_DECLARE(probe, probe_driver);
685
686
687static d_open_t xptopen;
688static d_close_t xptclose;
689static d_ioctl_t xptioctl;
690
691static struct cdevsw xpt_cdevsw = {
692 .d_version = D_VERSION,
693 .d_flags = 0,
694 .d_open = xptopen,
695 .d_close = xptclose,
696 .d_ioctl = xptioctl,
697 .d_name = "xpt",
698};
699
700
701/* Storage for debugging datastructures */
702#ifdef CAMDEBUG
703struct cam_path *cam_dpath;
704u_int32_t cam_dflags;
705u_int32_t cam_debug_delay;
706#endif
707
708#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
709#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
710#endif
711
712/*
713 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
714 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
715 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
716 */
717#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
718 || defined(CAM_DEBUG_LUN)
719#ifdef CAMDEBUG
720#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
721 || !defined(CAM_DEBUG_LUN)
722#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
723 and CAM_DEBUG_LUN"
724#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
725#else /* !CAMDEBUG */
726#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
727#endif /* CAMDEBUG */
728#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
729
730/* Our boot-time initialization hook */
731static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
732
733static moduledata_t cam_moduledata = {
734 "cam",
735 cam_module_event_handler,
736 NULL
737};
738
739static int xpt_init(void *);
740
741DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
742MODULE_VERSION(cam, 1);
743
744
745static cam_status xpt_compile_path(struct cam_path *new_path,
746 struct cam_periph *perph,
747 path_id_t path_id,
748 target_id_t target_id,
749 lun_id_t lun_id);
750
751static void xpt_release_path(struct cam_path *path);
752
753static void xpt_async_bcast(struct async_list *async_head,
754 u_int32_t async_code,
755 struct cam_path *path,
756 void *async_arg);
757static void xpt_dev_async(u_int32_t async_code,
758 struct cam_eb *bus,
759 struct cam_et *target,
760 struct cam_ed *device,
761 void *async_arg);
762static path_id_t xptnextfreepathid(void);
763static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
764static union ccb *xpt_get_ccb(struct cam_ed *device);
765static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
766 u_int32_t new_priority);
767static void xpt_run_dev_allocq(struct cam_eb *bus);
768static void xpt_run_dev_sendq(struct cam_eb *bus);
769static timeout_t xpt_release_devq_timeout;
770static void xpt_release_simq_timeout(void *arg) __unused;
771static void xpt_release_bus(struct cam_eb *bus);
772static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
773 int run_queue);
774static struct cam_et*
775 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
776static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
777static struct cam_ed*
778 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
779 lun_id_t lun_id);
780static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
781 struct cam_ed *device);
782static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
783static struct cam_eb*
784 xpt_find_bus(path_id_t path_id);
785static struct cam_et*
786 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
787static struct cam_ed*
788 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
789static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
790static void xpt_scan_lun(struct cam_periph *periph,
791 struct cam_path *path, cam_flags flags,
792 union ccb *ccb);
793static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
794static xpt_busfunc_t xptconfigbuscountfunc;
795static xpt_busfunc_t xptconfigfunc;
796static void xpt_config(void *arg);
797static xpt_devicefunc_t xptpassannouncefunc;
798static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
799static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
800static void xptpoll(struct cam_sim *sim);
801static void camisr(void *);
802static void camisr_runqueue(void *);
803static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
804 u_int num_patterns, struct cam_eb *bus);
805static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
806 u_int num_patterns,
807 struct cam_ed *device);
808static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
809 u_int num_patterns,
810 struct cam_periph *periph);
811static xpt_busfunc_t xptedtbusfunc;
812static xpt_targetfunc_t xptedttargetfunc;
813static xpt_devicefunc_t xptedtdevicefunc;
814static xpt_periphfunc_t xptedtperiphfunc;
815static xpt_pdrvfunc_t xptplistpdrvfunc;
816static xpt_periphfunc_t xptplistperiphfunc;
817static int xptedtmatch(struct ccb_dev_match *cdm);
818static int xptperiphlistmatch(struct ccb_dev_match *cdm);
819static int xptbustraverse(struct cam_eb *start_bus,
820 xpt_busfunc_t *tr_func, void *arg);
821static int xpttargettraverse(struct cam_eb *bus,
822 struct cam_et *start_target,
823 xpt_targetfunc_t *tr_func, void *arg);
824static int xptdevicetraverse(struct cam_et *target,
825 struct cam_ed *start_device,
826 xpt_devicefunc_t *tr_func, void *arg);
827static int xptperiphtraverse(struct cam_ed *device,
828 struct cam_periph *start_periph,
829 xpt_periphfunc_t *tr_func, void *arg);
830static int xptpdrvtraverse(struct periph_driver **start_pdrv,
831 xpt_pdrvfunc_t *tr_func, void *arg);
832static int xptpdperiphtraverse(struct periph_driver **pdrv,
833 struct cam_periph *start_periph,
834 xpt_periphfunc_t *tr_func,
835 void *arg);
836static xpt_busfunc_t xptdefbusfunc;
837static xpt_targetfunc_t xptdeftargetfunc;
838static xpt_devicefunc_t xptdefdevicefunc;
839static xpt_periphfunc_t xptdefperiphfunc;
840static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
841static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
842 void *arg);
843static xpt_devicefunc_t xptsetasyncfunc;
844static xpt_busfunc_t xptsetasyncbusfunc;
845static cam_status xptregister(struct cam_periph *periph,
846 void *arg);
847static cam_status proberegister(struct cam_periph *periph,
848 void *arg);
849static void probeschedule(struct cam_periph *probe_periph);
850static void probestart(struct cam_periph *periph, union ccb *start_ccb);
851static void proberequestdefaultnegotiation(struct cam_periph *periph);
852static int proberequestbackoff(struct cam_periph *periph,
853 struct cam_ed *device);
854static void probedone(struct cam_periph *periph, union ccb *done_ccb);
855static void probecleanup(struct cam_periph *periph);
856static void xpt_find_quirk(struct cam_ed *device);
857static void xpt_devise_transport(struct cam_path *path);
858static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
859 struct cam_ed *device,
860 int async_update);
861static void xpt_toggle_tags(struct cam_path *path);
862static void xpt_start_tags(struct cam_path *path);
863static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
864 struct cam_ed *dev);
865static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
866 struct cam_ed *dev);
867static __inline int periph_is_queued(struct cam_periph *periph);
868static __inline int device_is_alloc_queued(struct cam_ed *device);
869static __inline int device_is_send_queued(struct cam_ed *device);
870static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
871
872static __inline int
873xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
874{
875 int retval;
876
877 if (dev->ccbq.devq_openings > 0) {
878 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
879 cam_ccbq_resize(&dev->ccbq,
880 dev->ccbq.dev_openings
881 + dev->ccbq.dev_active);
882 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
883 }
884 /*
885 * The priority of a device waiting for CCB resources
886 * is that of the the highest priority peripheral driver
887 * enqueued.
888 */
889 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
890 &dev->alloc_ccb_entry.pinfo,
891 CAMQ_GET_HEAD(&dev->drvq)->priority);
892 } else {
893 retval = 0;
894 }
895
896 return (retval);
897}
898
899static __inline int
900xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
901{
902 int retval;
903
904 if (dev->ccbq.dev_openings > 0) {
905 /*
906 * The priority of a device waiting for controller
907 * resources is that of the the highest priority CCB
908 * enqueued.
909 */
910 retval =
911 xpt_schedule_dev(&bus->sim->devq->send_queue,
912 &dev->send_ccb_entry.pinfo,
913 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
914 } else {
915 retval = 0;
916 }
917 return (retval);
918}
919
920static __inline int
921periph_is_queued(struct cam_periph *periph)
922{
923 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
924}
925
926static __inline int
927device_is_alloc_queued(struct cam_ed *device)
928{
929 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
930}
931
932static __inline int
933device_is_send_queued(struct cam_ed *device)
934{
935 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
936}
937
938static __inline int
939dev_allocq_is_runnable(struct cam_devq *devq)
940{
941 /*
942 * Have work to do.
943 * Have space to do more work.
944 * Allowed to do work.
945 */
946 return ((devq->alloc_queue.qfrozen_cnt == 0)
947 && (devq->alloc_queue.entries > 0)
948 && (devq->alloc_openings > 0));
949}
950
951static void
952xpt_periph_init()
953{
954 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
955}
956
957static void
958probe_periph_init()
959{
960}
961
962
963static void
964xptdone(struct cam_periph *periph, union ccb *done_ccb)
965{
966 /* Caller will release the CCB */
967 wakeup(&done_ccb->ccb_h.cbfcnp);
968}
969
970static int
971xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
972{
973
974 /*
975 * Only allow read-write access.
976 */
977 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
978 return(EPERM);
979
980 /*
981 * We don't allow nonblocking access.
982 */
983 if ((flags & O_NONBLOCK) != 0) {
984 printf("%s: can't do nonblocking access\n", devtoname(dev));
985 return(ENODEV);
986 }
987
988 /* Mark ourselves open */
989 mtx_lock(&xsoftc.xpt_lock);
990 xsoftc.flags |= XPT_FLAG_OPEN;
991 mtx_unlock(&xsoftc.xpt_lock);
992
993 return(0);
994}
995
996static int
997xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
998{
999
1000 /* Mark ourselves closed */
1001 mtx_lock(&xsoftc.xpt_lock);
1002 xsoftc.flags &= ~XPT_FLAG_OPEN;
1003 mtx_unlock(&xsoftc.xpt_lock);
1004
1005 return(0);
1006}
1007
1008/*
1009 * Don't automatically grab the xpt softc lock here even though this is going
1010 * through the xpt device. The xpt device is really just a back door for
1011 * accessing other devices and SIMs, so the right thing to do is to grab
1012 * the appropriate SIM lock once the bus/SIM is located.
1013 */
1014static int
1015xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1016{
1017 int error;
1018
1019 error = 0;
1020
1021 switch(cmd) {
1022 /*
1023 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1024 * to accept CCB types that don't quite make sense to send through a
1025 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1026 * in the CAM spec.
1027 */
1028 case CAMIOCOMMAND: {
1029 union ccb *ccb;
1030 union ccb *inccb;
1031 struct cam_eb *bus;
1032
1033 inccb = (union ccb *)addr;
1034
1035 bus = xpt_find_bus(inccb->ccb_h.path_id);
1036 if (bus == NULL) {
1037 error = EINVAL;
1038 break;
1039 }
1040
1041 switch(inccb->ccb_h.func_code) {
1042 case XPT_SCAN_BUS:
1043 case XPT_RESET_BUS:
1044 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1045 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1046 error = EINVAL;
1047 break;
1048 }
1049 /* FALLTHROUGH */
1050 case XPT_PATH_INQ:
1051 case XPT_ENG_INQ:
1052 case XPT_SCAN_LUN:
1053
1054 ccb = xpt_alloc_ccb();
1055
1056 CAM_SIM_LOCK(bus->sim);
1057
1058 /*
1059 * Create a path using the bus, target, and lun the
1060 * user passed in.
1061 */
1062 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1063 inccb->ccb_h.path_id,
1064 inccb->ccb_h.target_id,
1065 inccb->ccb_h.target_lun) !=
1066 CAM_REQ_CMP){
1067 error = EINVAL;
1068 CAM_SIM_UNLOCK(bus->sim);
1069 xpt_free_ccb(ccb);
1070 break;
1071 }
1072 /* Ensure all of our fields are correct */
1073 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1074 inccb->ccb_h.pinfo.priority);
1075 xpt_merge_ccb(ccb, inccb);
1076 ccb->ccb_h.cbfcnp = xptdone;
1077 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1078 bcopy(ccb, inccb, sizeof(union ccb));
1079 xpt_free_path(ccb->ccb_h.path);
1080 xpt_free_ccb(ccb);
1081 CAM_SIM_UNLOCK(bus->sim);
1082 break;
1083
1084 case XPT_DEBUG: {
1085 union ccb ccb;
1086
1087 /*
1088 * This is an immediate CCB, so it's okay to
1089 * allocate it on the stack.
1090 */
1091
1092 CAM_SIM_LOCK(bus->sim);
1093
1094 /*
1095 * Create a path using the bus, target, and lun the
1096 * user passed in.
1097 */
1098 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1099 inccb->ccb_h.path_id,
1100 inccb->ccb_h.target_id,
1101 inccb->ccb_h.target_lun) !=
1102 CAM_REQ_CMP){
1103 error = EINVAL;
1104 CAM_SIM_UNLOCK(bus->sim);
1105 break;
1106 }
1107 /* Ensure all of our fields are correct */
1108 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1109 inccb->ccb_h.pinfo.priority);
1110 xpt_merge_ccb(&ccb, inccb);
1111 ccb.ccb_h.cbfcnp = xptdone;
1112 xpt_action(&ccb);
1113 CAM_SIM_UNLOCK(bus->sim);
1114 bcopy(&ccb, inccb, sizeof(union ccb));
1115 xpt_free_path(ccb.ccb_h.path);
1116 break;
1117
1118 }
1119 case XPT_DEV_MATCH: {
1120 struct cam_periph_map_info mapinfo;
1121 struct cam_path *old_path;
1122
1123 /*
1124 * We can't deal with physical addresses for this
1125 * type of transaction.
1126 */
1127 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1128 error = EINVAL;
1129 break;
1130 }
1131
1132 /*
1133 * Save this in case the caller had it set to
1134 * something in particular.
1135 */
1136 old_path = inccb->ccb_h.path;
1137
1138 /*
1139 * We really don't need a path for the matching
1140 * code. The path is needed because of the
1141 * debugging statements in xpt_action(). They
1142 * assume that the CCB has a valid path.
1143 */
1144 inccb->ccb_h.path = xpt_periph->path;
1145
1146 bzero(&mapinfo, sizeof(mapinfo));
1147
1148 /*
1149 * Map the pattern and match buffers into kernel
1150 * virtual address space.
1151 */
1152 error = cam_periph_mapmem(inccb, &mapinfo);
1153
1154 if (error) {
1155 inccb->ccb_h.path = old_path;
1156 break;
1157 }
1158
1159 /*
1160 * This is an immediate CCB, we can send it on directly.
1161 */
1162 xpt_action(inccb);
1163
1164 /*
1165 * Map the buffers back into user space.
1166 */
1167 cam_periph_unmapmem(inccb, &mapinfo);
1168
1169 inccb->ccb_h.path = old_path;
1170
1171 error = 0;
1172 break;
1173 }
1174 default:
1175 error = ENOTSUP;
1176 break;
1177 }
1178 xpt_release_bus(bus);
1179 break;
1180 }
1181 /*
1182 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1183 * with the periphal driver name and unit name filled in. The other
1184 * fields don't really matter as input. The passthrough driver name
1185 * ("pass"), and unit number are passed back in the ccb. The current
1186 * device generation number, and the index into the device peripheral
1187 * driver list, and the status are also passed back. Note that
1188 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1189 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1190 * (or rather should be) impossible for the device peripheral driver
1191 * list to change since we look at the whole thing in one pass, and
1192 * we do it with lock protection.
1193 *
1194 */
1195 case CAMGETPASSTHRU: {
1196 union ccb *ccb;
1197 struct cam_periph *periph;
1198 struct periph_driver **p_drv;
1199 char *name;
1200 u_int unit;
1201 u_int cur_generation;
1202 int base_periph_found;
1203 int splbreaknum;
1204
1205 ccb = (union ccb *)addr;
1206 unit = ccb->cgdl.unit_number;
1207 name = ccb->cgdl.periph_name;
1208 /*
1209 * Every 100 devices, we want to drop our lock protection to
1210 * give the software interrupt handler a chance to run.
1211 * Most systems won't run into this check, but this should
1212 * avoid starvation in the software interrupt handler in
1213 * large systems.
1214 */
1215 splbreaknum = 100;
1216
1217 ccb = (union ccb *)addr;
1218
1219 base_periph_found = 0;
1220
1221 /*
1222 * Sanity check -- make sure we don't get a null peripheral
1223 * driver name.
1224 */
1225 if (*ccb->cgdl.periph_name == '\0') {
1226 error = EINVAL;
1227 break;
1228 }
1229
1230 /* Keep the list from changing while we traverse it */
1231 mtx_lock(&xsoftc.xpt_topo_lock);
1232ptstartover:
1233 cur_generation = xsoftc.xpt_generation;
1234
1235 /* first find our driver in the list of drivers */
1236 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1237 if (strcmp((*p_drv)->driver_name, name) == 0)
1238 break;
1239
1240 if (*p_drv == NULL) {
1241 mtx_unlock(&xsoftc.xpt_topo_lock);
1242 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1243 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1244 *ccb->cgdl.periph_name = '\0';
1245 ccb->cgdl.unit_number = 0;
1246 error = ENOENT;
1247 break;
1248 }
1249
1250 /*
1251 * Run through every peripheral instance of this driver
1252 * and check to see whether it matches the unit passed
1253 * in by the user. If it does, get out of the loops and
1254 * find the passthrough driver associated with that
1255 * peripheral driver.
1256 */
1257 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1258 periph = TAILQ_NEXT(periph, unit_links)) {
1259
1260 if (periph->unit_number == unit) {
1261 break;
1262 } else if (--splbreaknum == 0) {
1263 mtx_unlock(&xsoftc.xpt_topo_lock);
1264 mtx_lock(&xsoftc.xpt_topo_lock);
1265 splbreaknum = 100;
1266 if (cur_generation != xsoftc.xpt_generation)
1267 goto ptstartover;
1268 }
1269 }
1270 /*
1271 * If we found the peripheral driver that the user passed
1272 * in, go through all of the peripheral drivers for that
1273 * particular device and look for a passthrough driver.
1274 */
1275 if (periph != NULL) {
1276 struct cam_ed *device;
1277 int i;
1278
1279 base_periph_found = 1;
1280 device = periph->path->device;
1281 for (i = 0, periph = SLIST_FIRST(&device->periphs);
1282 periph != NULL;
1283 periph = SLIST_NEXT(periph, periph_links), i++) {
1284 /*
1285 * Check to see whether we have a
1286 * passthrough device or not.
1287 */
1288 if (strcmp(periph->periph_name, "pass") == 0) {
1289 /*
1290 * Fill in the getdevlist fields.
1291 */
1292 strcpy(ccb->cgdl.periph_name,
1293 periph->periph_name);
1294 ccb->cgdl.unit_number =
1295 periph->unit_number;
1296 if (SLIST_NEXT(periph, periph_links))
1297 ccb->cgdl.status =
1298 CAM_GDEVLIST_MORE_DEVS;
1299 else
1300 ccb->cgdl.status =
1301 CAM_GDEVLIST_LAST_DEVICE;
1302 ccb->cgdl.generation =
1303 device->generation;
1304 ccb->cgdl.index = i;
1305 /*
1306 * Fill in some CCB header fields
1307 * that the user may want.
1308 */
1309 ccb->ccb_h.path_id =
1310 periph->path->bus->path_id;
1311 ccb->ccb_h.target_id =
1312 periph->path->target->target_id;
1313 ccb->ccb_h.target_lun =
1314 periph->path->device->lun_id;
1315 ccb->ccb_h.status = CAM_REQ_CMP;
1316 break;
1317 }
1318 }
1319 }
1320
1321 /*
1322 * If the periph is null here, one of two things has
1323 * happened. The first possibility is that we couldn't
1324 * find the unit number of the particular peripheral driver
1325 * that the user is asking about. e.g. the user asks for
1326 * the passthrough driver for "da11". We find the list of
1327 * "da" peripherals all right, but there is no unit 11.
1328 * The other possibility is that we went through the list
1329 * of peripheral drivers attached to the device structure,
1330 * but didn't find one with the name "pass". Either way,
1331 * we return ENOENT, since we couldn't find something.
1332 */
1333 if (periph == NULL) {
1334 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1335 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1336 *ccb->cgdl.periph_name = '\0';
1337 ccb->cgdl.unit_number = 0;
1338 error = ENOENT;
1339 /*
1340 * It is unfortunate that this is even necessary,
1341 * but there are many, many clueless users out there.
1342 * If this is true, the user is looking for the
1343 * passthrough driver, but doesn't have one in his
1344 * kernel.
1345 */
1346 if (base_periph_found == 1) {
1347 printf("xptioctl: pass driver is not in the "
1348 "kernel\n");
1349 printf("xptioctl: put \"device pass\" in "
1350 "your kernel config file\n");
1351 }
1352 }
1353 mtx_unlock(&xsoftc.xpt_topo_lock);
1354 break;
1355 }
1356 default:
1357 error = ENOTTY;
1358 break;
1359 }
1360
1361 return(error);
1362}
1363
1364static int
1365cam_module_event_handler(module_t mod, int what, void *arg)
1366{
1367 int error;
1368
1369 switch (what) {
1370 case MOD_LOAD:
1371 if ((error = xpt_init(NULL)) != 0)
1372 return (error);
1373 break;
1374 case MOD_UNLOAD:
1375 return EBUSY;
1376 default:
1377 return EOPNOTSUPP;
1378 }
1379
1380 return 0;
1381}
1382
1383/* thread to handle bus rescans */
1384static void
1385xpt_scanner_thread(void *dummy)
1386{
1387 cam_isrq_t queue;
1388 union ccb *ccb;
1389 struct cam_sim *sim;
1390
1391 for (;;) {
1392 /*
1393 * Wait for a rescan request to come in. When it does, splice
1394 * it onto a queue from local storage so that the xpt lock
1395 * doesn't need to be held while the requests are being
1396 * processed.
1397 */
1398 xpt_lock_buses();
1399 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
1400 "ccb_scanq", 0);
1401 TAILQ_INIT(&queue);
1402 TAILQ_CONCAT(&queue, &xsoftc.ccb_scanq, sim_links.tqe);
1403 xpt_unlock_buses();
1404
1405 while ((ccb = (union ccb *)TAILQ_FIRST(&queue)) != NULL) {
1406 TAILQ_REMOVE(&queue, &ccb->ccb_h, sim_links.tqe);
1407
1408 sim = ccb->ccb_h.path->bus->sim;
1409 CAM_SIM_LOCK(sim);
1410
1411 ccb->ccb_h.func_code = XPT_SCAN_BUS;
1412 ccb->ccb_h.cbfcnp = xptdone;
1413 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
1414 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1415 xpt_free_path(ccb->ccb_h.path);
1416 xpt_free_ccb(ccb);
1417 CAM_SIM_UNLOCK(sim);
1418 }
1419 }
1420}
1421
1422void
1423xpt_rescan(union ccb *ccb)
1424{
1425 struct ccb_hdr *hdr;
1426
1427 /*
1428 * Don't make duplicate entries for the same paths.
1429 */
1430 xpt_lock_buses();
1431 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
1432 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
1433 xpt_unlock_buses();
1434 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
1435 xpt_free_path(ccb->ccb_h.path);
1436 xpt_free_ccb(ccb);
1437 return;
1438 }
1439 }
1440 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1441 wakeup(&xsoftc.ccb_scanq);
1442 xpt_unlock_buses();
1443}
1444
1445/* Functions accessed by the peripheral drivers */
1446static int
1447xpt_init(void *dummy)
1448{
1449 struct cam_sim *xpt_sim;
1450 struct cam_path *path;
1451 struct cam_devq *devq;
1452 cam_status status;
1453
1454 TAILQ_INIT(&xsoftc.xpt_busses);
1455 TAILQ_INIT(&cam_simq);
1456 TAILQ_INIT(&xsoftc.ccb_scanq);
1457 STAILQ_INIT(&xsoftc.highpowerq);
1458 xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
1459
1460 mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
1461 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
1462 mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
1463
1464 /*
1465 * The xpt layer is, itself, the equivelent of a SIM.
1466 * Allow 16 ccbs in the ccb pool for it. This should
1467 * give decent parallelism when we probe busses and
1468 * perform other XPT functions.
1469 */
1470 devq = cam_simq_alloc(16);
1471 xpt_sim = cam_sim_alloc(xptaction,
1472 xptpoll,
1473 "xpt",
1474 /*softc*/NULL,
1475 /*unit*/0,
1476 /*mtx*/&xsoftc.xpt_lock,
1477 /*max_dev_transactions*/0,
1478 /*max_tagged_dev_transactions*/0,
1479 devq);
1480 if (xpt_sim == NULL)
1481 return (ENOMEM);
1482
1483 xpt_sim->max_ccbs = 16;
1484
1485 mtx_lock(&xsoftc.xpt_lock);
1486 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
1487 printf("xpt_init: xpt_bus_register failed with status %#x,"
1488 " failing attach\n", status);
1489 return (EINVAL);
1490 }
1491
1492 /*
1493 * Looking at the XPT from the SIM layer, the XPT is
1494 * the equivelent of a peripheral driver. Allocate
1495 * a peripheral driver entry for us.
1496 */
1497 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1498 CAM_TARGET_WILDCARD,
1499 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1500 printf("xpt_init: xpt_create_path failed with status %#x,"
1501 " failing attach\n", status);
1502 return (EINVAL);
1503 }
1504
1505 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1506 path, NULL, 0, xpt_sim);
1507 xpt_free_path(path);
1508 mtx_unlock(&xsoftc.xpt_lock);
1509
1510 /*
1511 * Register a callback for when interrupts are enabled.
1512 */
1513 xsoftc.xpt_config_hook =
1514 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1515 M_CAMXPT, M_NOWAIT | M_ZERO);
1516 if (xsoftc.xpt_config_hook == NULL) {
1517 printf("xpt_init: Cannot malloc config hook "
1518 "- failing attach\n");
1519 return (ENOMEM);
1520 }
1521
1522 xsoftc.xpt_config_hook->ich_func = xpt_config;
1523 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
1524 free (xsoftc.xpt_config_hook, M_CAMXPT);
1525 printf("xpt_init: config_intrhook_establish failed "
1526 "- failing attach\n");
1527 }
1528
1529 /* fire up rescan thread */
1530 if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
1531 printf("xpt_init: failed to create rescan thread\n");
1532 }
1533 /* Install our software interrupt handlers */
1534 swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
1535
1536 return (0);
1537}
1538
1539static cam_status
1540xptregister(struct cam_periph *periph, void *arg)
1541{
1542 struct cam_sim *xpt_sim;
1543
1544 if (periph == NULL) {
1545 printf("xptregister: periph was NULL!!\n");
1546 return(CAM_REQ_CMP_ERR);
1547 }
1548
1549 xpt_sim = (struct cam_sim *)arg;
1550 xpt_sim->softc = periph;
1551 xpt_periph = periph;
1552 periph->softc = NULL;
1553
1554 return(CAM_REQ_CMP);
1555}
1556
1557int32_t
1558xpt_add_periph(struct cam_periph *periph)
1559{
1560 struct cam_ed *device;
1561 int32_t status;
1562 struct periph_list *periph_head;
1563
1564 mtx_assert(periph->sim->mtx, MA_OWNED);
1565
1566 device = periph->path->device;
1567
1568 periph_head = &device->periphs;
1569
1570 status = CAM_REQ_CMP;
1571
1572 if (device != NULL) {
1573 /*
1574 * Make room for this peripheral
1575 * so it will fit in the queue
1576 * when it's scheduled to run
1577 */
1578 status = camq_resize(&device->drvq,
1579 device->drvq.array_size + 1);
1580
1581 device->generation++;
1582
1583 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1584 }
1585
1586 mtx_lock(&xsoftc.xpt_topo_lock);
1587 xsoftc.xpt_generation++;
1588 mtx_unlock(&xsoftc.xpt_topo_lock);
1589
1590 return (status);
1591}
1592
1593void
1594xpt_remove_periph(struct cam_periph *periph)
1595{
1596 struct cam_ed *device;
1597
1598 mtx_assert(periph->sim->mtx, MA_OWNED);
1599
1600 device = periph->path->device;
1601
1602 if (device != NULL) {
1603 struct periph_list *periph_head;
1604
1605 periph_head = &device->periphs;
1606
1607 /* Release the slot for this peripheral */
1608 camq_resize(&device->drvq, device->drvq.array_size - 1);
1609
1610 device->generation++;
1611
1612 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1613 }
1614
1615 mtx_lock(&xsoftc.xpt_topo_lock);
1616 xsoftc.xpt_generation++;
1617 mtx_unlock(&xsoftc.xpt_topo_lock);
1618}
1619
1620
1621void
1622xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1623{
1624 struct ccb_pathinq cpi;
1625 struct ccb_trans_settings cts;
1626 struct cam_path *path;
1627 u_int speed;
1628 u_int freq;
1629 u_int mb;
1630
1631 mtx_assert(periph->sim->mtx, MA_OWNED);
1632
1633 path = periph->path;
1634 /*
1635 * To ensure that this is printed in one piece,
1636 * mask out CAM interrupts.
1637 */
1638 printf("%s%d at %s%d bus %d target %d lun %d\n",
1639 periph->periph_name, periph->unit_number,
1640 path->bus->sim->sim_name,
1641 path->bus->sim->unit_number,
1642 path->bus->sim->bus_id,
1643 path->target->target_id,
1644 path->device->lun_id);
1645 printf("%s%d: ", periph->periph_name, periph->unit_number);
1646 scsi_print_inquiry(&path->device->inq_data);
1647 if (bootverbose && path->device->serial_num_len > 0) {
1648 /* Don't wrap the screen - print only the first 60 chars */
1649 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1650 periph->unit_number, path->device->serial_num);
1651 }
1652 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1653 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1654 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1655 xpt_action((union ccb*)&cts);
1656 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1657 return;
1658 }
1659
1660 /* Ask the SIM for its base transfer speed */
1661 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1662 cpi.ccb_h.func_code = XPT_PATH_INQ;
1663 xpt_action((union ccb *)&cpi);
1664
1665 speed = cpi.base_transfer_speed;
1666 freq = 0;
1667 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1668 struct ccb_trans_settings_spi *spi;
1669
1670 spi = &cts.xport_specific.spi;
1671 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1672 && spi->sync_offset != 0) {
1673 freq = scsi_calc_syncsrate(spi->sync_period);
1674 speed = freq;
1675 }
1676
1677 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1678 speed *= (0x01 << spi->bus_width);
1679 }
1680
1681 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1682 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1683 if (fc->valid & CTS_FC_VALID_SPEED) {
1684 speed = fc->bitrate;
1685 }
1686 }
1687
1688 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1689 struct ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1690 if (sas->valid & CTS_SAS_VALID_SPEED) {
1691 speed = sas->bitrate;
1692 }
1693 }
1694
1695 mb = speed / 1000;
1696 if (mb > 0)
1697 printf("%s%d: %d.%03dMB/s transfers",
1698 periph->periph_name, periph->unit_number,
1699 mb, speed % 1000);
1700 else
1701 printf("%s%d: %dKB/s transfers", periph->periph_name,
1702 periph->unit_number, speed);
1703 /* Report additional information about SPI connections */
1704 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1705 struct ccb_trans_settings_spi *spi;
1706
1707 spi = &cts.xport_specific.spi;
1708 if (freq != 0) {
1709 printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1710 freq % 1000,
1711 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1712 ? " DT" : "",
1713 spi->sync_offset);
1714 }
1715 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1716 && spi->bus_width > 0) {
1717 if (freq != 0) {
1718 printf(", ");
1719 } else {
1720 printf(" (");
1721 }
1722 printf("%dbit)", 8 * (0x01 << spi->bus_width));
1723 } else if (freq != 0) {
1724 printf(")");
1725 }
1726 }
1727 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1728 struct ccb_trans_settings_fc *fc;
1729
1730 fc = &cts.xport_specific.fc;
1731 if (fc->valid & CTS_FC_VALID_WWNN)
1732 printf(" WWNN 0x%llx", (long long) fc->wwnn);
1733 if (fc->valid & CTS_FC_VALID_WWPN)
1734 printf(" WWPN 0x%llx", (long long) fc->wwpn);
1735 if (fc->valid & CTS_FC_VALID_PORT)
1736 printf(" PortID 0x%x", fc->port);
1737 }
1738
1739 if (path->device->inq_flags & SID_CmdQue
1740 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1741 printf("\n%s%d: Command Queueing Enabled",
1742 periph->periph_name, periph->unit_number);
1743 }
1744 printf("\n");
1745
1746 /*
1747 * We only want to print the caller's announce string if they've
1748 * passed one in..
1749 */
1750 if (announce_string != NULL)
1751 printf("%s%d: %s\n", periph->periph_name,
1752 periph->unit_number, announce_string);
1753}
1754
1755static dev_match_ret
1756xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1757 struct cam_eb *bus)
1758{
1759 dev_match_ret retval;
1760 int i;
1761
1762 retval = DM_RET_NONE;
1763
1764 /*
1765 * If we aren't given something to match against, that's an error.
1766 */
1767 if (bus == NULL)
1768 return(DM_RET_ERROR);
1769
1770 /*
1771 * If there are no match entries, then this bus matches no
1772 * matter what.
1773 */
1774 if ((patterns == NULL) || (num_patterns == 0))
1775 return(DM_RET_DESCEND | DM_RET_COPY);
1776
1777 for (i = 0; i < num_patterns; i++) {
1778 struct bus_match_pattern *cur_pattern;
1779
1780 /*
1781 * If the pattern in question isn't for a bus node, we
1782 * aren't interested. However, we do indicate to the
1783 * calling routine that we should continue descending the
1784 * tree, since the user wants to match against lower-level
1785 * EDT elements.
1786 */
1787 if (patterns[i].type != DEV_MATCH_BUS) {
1788 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1789 retval |= DM_RET_DESCEND;
1790 continue;
1791 }
1792
1793 cur_pattern = &patterns[i].pattern.bus_pattern;
1794
1795 /*
1796 * If they want to match any bus node, we give them any
1797 * device node.
1798 */
1799 if (cur_pattern->flags == BUS_MATCH_ANY) {
1800 /* set the copy flag */
1801 retval |= DM_RET_COPY;
1802
1803 /*
1804 * If we've already decided on an action, go ahead
1805 * and return.
1806 */
1807 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1808 return(retval);
1809 }
1810
1811 /*
1812 * Not sure why someone would do this...
1813 */
1814 if (cur_pattern->flags == BUS_MATCH_NONE)
1815 continue;
1816
1817 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1818 && (cur_pattern->path_id != bus->path_id))
1819 continue;
1820
1821 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1822 && (cur_pattern->bus_id != bus->sim->bus_id))
1823 continue;
1824
1825 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1826 && (cur_pattern->unit_number != bus->sim->unit_number))
1827 continue;
1828
1829 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1830 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1831 DEV_IDLEN) != 0))
1832 continue;
1833
1834 /*
1835 * If we get to this point, the user definitely wants
1836 * information on this bus. So tell the caller to copy the
1837 * data out.
1838 */
1839 retval |= DM_RET_COPY;
1840
1841 /*
1842 * If the return action has been set to descend, then we
1843 * know that we've already seen a non-bus matching
1844 * expression, therefore we need to further descend the tree.
1845 * This won't change by continuing around the loop, so we
1846 * go ahead and return. If we haven't seen a non-bus
1847 * matching expression, we keep going around the loop until
1848 * we exhaust the matching expressions. We'll set the stop
1849 * flag once we fall out of the loop.
1850 */
1851 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1852 return(retval);
1853 }
1854
1855 /*
1856 * If the return action hasn't been set to descend yet, that means
1857 * we haven't seen anything other than bus matching patterns. So
1858 * tell the caller to stop descending the tree -- the user doesn't
1859 * want to match against lower level tree elements.
1860 */
1861 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1862 retval |= DM_RET_STOP;
1863
1864 return(retval);
1865}
1866
1867static dev_match_ret
1868xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1869 struct cam_ed *device)
1870{
1871 dev_match_ret retval;
1872 int i;
1873
1874 retval = DM_RET_NONE;
1875
1876 /*
1877 * If we aren't given something to match against, that's an error.
1878 */
1879 if (device == NULL)
1880 return(DM_RET_ERROR);
1881
1882 /*
1883 * If there are no match entries, then this device matches no
1884 * matter what.
1885 */
1886 if ((patterns == NULL) || (num_patterns == 0))
1887 return(DM_RET_DESCEND | DM_RET_COPY);
1888
1889 for (i = 0; i < num_patterns; i++) {
1890 struct device_match_pattern *cur_pattern;
1891
1892 /*
1893 * If the pattern in question isn't for a device node, we
1894 * aren't interested.
1895 */
1896 if (patterns[i].type != DEV_MATCH_DEVICE) {
1897 if ((patterns[i].type == DEV_MATCH_PERIPH)
1898 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1899 retval |= DM_RET_DESCEND;
1900 continue;
1901 }
1902
1903 cur_pattern = &patterns[i].pattern.device_pattern;
1904
1905 /*
1906 * If they want to match any device node, we give them any
1907 * device node.
1908 */
1909 if (cur_pattern->flags == DEV_MATCH_ANY) {
1910 /* set the copy flag */
1911 retval |= DM_RET_COPY;
1912
1913
1914 /*
1915 * If we've already decided on an action, go ahead
1916 * and return.
1917 */
1918 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1919 return(retval);
1920 }
1921
1922 /*
1923 * Not sure why someone would do this...
1924 */
1925 if (cur_pattern->flags == DEV_MATCH_NONE)
1926 continue;
1927
1928 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1929 && (cur_pattern->path_id != device->target->bus->path_id))
1930 continue;
1931
1932 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1933 && (cur_pattern->target_id != device->target->target_id))
1934 continue;
1935
1936 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1937 && (cur_pattern->target_lun != device->lun_id))
1938 continue;
1939
1940 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1941 && (cam_quirkmatch((caddr_t)&device->inq_data,
1942 (caddr_t)&cur_pattern->inq_pat,
1943 1, sizeof(cur_pattern->inq_pat),
1944 scsi_static_inquiry_match) == NULL))
1945 continue;
1946
1947 /*
1948 * If we get to this point, the user definitely wants
1949 * information on this device. So tell the caller to copy
1950 * the data out.
1951 */
1952 retval |= DM_RET_COPY;
1953
1954 /*
1955 * If the return action has been set to descend, then we
1956 * know that we've already seen a peripheral matching
1957 * expression, therefore we need to further descend the tree.
1958 * This won't change by continuing around the loop, so we
1959 * go ahead and return. If we haven't seen a peripheral
1960 * matching expression, we keep going around the loop until
1961 * we exhaust the matching expressions. We'll set the stop
1962 * flag once we fall out of the loop.
1963 */
1964 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1965 return(retval);
1966 }
1967
1968 /*
1969 * If the return action hasn't been set to descend yet, that means
1970 * we haven't seen any peripheral matching patterns. So tell the
1971 * caller to stop descending the tree -- the user doesn't want to
1972 * match against lower level tree elements.
1973 */
1974 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1975 retval |= DM_RET_STOP;
1976
1977 return(retval);
1978}
1979
1980/*
1981 * Match a single peripheral against any number of match patterns.
1982 */
1983static dev_match_ret
1984xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1985 struct cam_periph *periph)
1986{
1987 dev_match_ret retval;
1988 int i;
1989
1990 /*
1991 * If we aren't given something to match against, that's an error.
1992 */
1993 if (periph == NULL)
1994 return(DM_RET_ERROR);
1995
1996 /*
1997 * If there are no match entries, then this peripheral matches no
1998 * matter what.
1999 */
2000 if ((patterns == NULL) || (num_patterns == 0))
2001 return(DM_RET_STOP | DM_RET_COPY);
2002
2003 /*
2004 * There aren't any nodes below a peripheral node, so there's no
2005 * reason to descend the tree any further.
2006 */
2007 retval = DM_RET_STOP;
2008
2009 for (i = 0; i < num_patterns; i++) {
2010 struct periph_match_pattern *cur_pattern;
2011
2012 /*
2013 * If the pattern in question isn't for a peripheral, we
2014 * aren't interested.
2015 */
2016 if (patterns[i].type != DEV_MATCH_PERIPH)
2017 continue;
2018
2019 cur_pattern = &patterns[i].pattern.periph_pattern;
2020
2021 /*
2022 * If they want to match on anything, then we will do so.
2023 */
2024 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2025 /* set the copy flag */
2026 retval |= DM_RET_COPY;
2027
2028 /*
2029 * We've already set the return action to stop,
2030 * since there are no nodes below peripherals in
2031 * the tree.
2032 */
2033 return(retval);
2034 }
2035
2036 /*
2037 * Not sure why someone would do this...
2038 */
2039 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2040 continue;
2041
2042 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2043 && (cur_pattern->path_id != periph->path->bus->path_id))
2044 continue;
2045
2046 /*
2047 * For the target and lun id's, we have to make sure the
2048 * target and lun pointers aren't NULL. The xpt peripheral
2049 * has a wildcard target and device.
2050 */
2051 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2052 && ((periph->path->target == NULL)
2053 ||(cur_pattern->target_id != periph->path->target->target_id)))
2054 continue;
2055
2056 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2057 && ((periph->path->device == NULL)
2058 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2059 continue;
2060
2061 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2062 && (cur_pattern->unit_number != periph->unit_number))
2063 continue;
2064
2065 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2066 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2067 DEV_IDLEN) != 0))
2068 continue;
2069
2070 /*
2071 * If we get to this point, the user definitely wants
2072 * information on this peripheral. So tell the caller to
2073 * copy the data out.
2074 */
2075 retval |= DM_RET_COPY;
2076
2077 /*
2078 * The return action has already been set to stop, since
2079 * peripherals don't have any nodes below them in the EDT.
2080 */
2081 return(retval);
2082 }
2083
2084 /*
2085 * If we get to this point, the peripheral that was passed in
2086 * doesn't match any of the patterns.
2087 */
2088 return(retval);
2089}
2090
2091static int
2092xptedtbusfunc(struct cam_eb *bus, void *arg)
2093{
2094 struct ccb_dev_match *cdm;
2095 dev_match_ret retval;
2096
2097 cdm = (struct ccb_dev_match *)arg;
2098
2099 /*
2100 * If our position is for something deeper in the tree, that means
2101 * that we've already seen this node. So, we keep going down.
2102 */
2103 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2104 && (cdm->pos.cookie.bus == bus)
2105 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2106 && (cdm->pos.cookie.target != NULL))
2107 retval = DM_RET_DESCEND;
2108 else
2109 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2110
2111 /*
2112 * If we got an error, bail out of the search.
2113 */
2114 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2115 cdm->status = CAM_DEV_MATCH_ERROR;
2116 return(0);
2117 }
2118
2119 /*
2120 * If the copy flag is set, copy this bus out.
2121 */
2122 if (retval & DM_RET_COPY) {
2123 int spaceleft, j;
2124
2125 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2126 sizeof(struct dev_match_result));
2127
2128 /*
2129 * If we don't have enough space to put in another
2130 * match result, save our position and tell the
2131 * user there are more devices to check.
2132 */
2133 if (spaceleft < sizeof(struct dev_match_result)) {
2134 bzero(&cdm->pos, sizeof(cdm->pos));
2135 cdm->pos.position_type =
2136 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2137
2138 cdm->pos.cookie.bus = bus;
2139 cdm->pos.generations[CAM_BUS_GENERATION]=
2140 xsoftc.bus_generation;
2141 cdm->status = CAM_DEV_MATCH_MORE;
2142 return(0);
2143 }
2144 j = cdm->num_matches;
2145 cdm->num_matches++;
2146 cdm->matches[j].type = DEV_MATCH_BUS;
2147 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2148 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2149 cdm->matches[j].result.bus_result.unit_number =
2150 bus->sim->unit_number;
2151 strncpy(cdm->matches[j].result.bus_result.dev_name,
2152 bus->sim->sim_name, DEV_IDLEN);
2153 }
2154
2155 /*
2156 * If the user is only interested in busses, there's no
2157 * reason to descend to the next level in the tree.
2158 */
2159 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2160 return(1);
2161
2162 /*
2163 * If there is a target generation recorded, check it to
2164 * make sure the target list hasn't changed.
2165 */
2166 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2167 && (bus == cdm->pos.cookie.bus)
2168 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2169 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2170 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2171 bus->generation)) {
2172 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2173 return(0);
2174 }
2175
2176 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2177 && (cdm->pos.cookie.bus == bus)
2178 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2179 && (cdm->pos.cookie.target != NULL))
2180 return(xpttargettraverse(bus,
2181 (struct cam_et *)cdm->pos.cookie.target,
2182 xptedttargetfunc, arg));
2183 else
2184 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2185}
2186
2187static int
2188xptedttargetfunc(struct cam_et *target, void *arg)
2189{
2190 struct ccb_dev_match *cdm;
2191
2192 cdm = (struct ccb_dev_match *)arg;
2193
2194 /*
2195 * If there is a device list generation recorded, check it to
2196 * make sure the device list hasn't changed.
2197 */
2198 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2199 && (cdm->pos.cookie.bus == target->bus)
2200 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2201 && (cdm->pos.cookie.target == target)
2202 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2203 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2204 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2205 target->generation)) {
2206 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2207 return(0);
2208 }
2209
2210 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2211 && (cdm->pos.cookie.bus == target->bus)
2212 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2213 && (cdm->pos.cookie.target == target)
2214 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2215 && (cdm->pos.cookie.device != NULL))
2216 return(xptdevicetraverse(target,
2217 (struct cam_ed *)cdm->pos.cookie.device,
2218 xptedtdevicefunc, arg));
2219 else
2220 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2221}
2222
2223static int
2224xptedtdevicefunc(struct cam_ed *device, void *arg)
2225{
2226
2227 struct ccb_dev_match *cdm;
2228 dev_match_ret retval;
2229
2230 cdm = (struct ccb_dev_match *)arg;
2231
2232 /*
2233 * If our position is for something deeper in the tree, that means
2234 * that we've already seen this node. So, we keep going down.
2235 */
2236 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2237 && (cdm->pos.cookie.device == device)
2238 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2239 && (cdm->pos.cookie.periph != NULL))
2240 retval = DM_RET_DESCEND;
2241 else
2242 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2243 device);
2244
2245 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2246 cdm->status = CAM_DEV_MATCH_ERROR;
2247 return(0);
2248 }
2249
2250 /*
2251 * If the copy flag is set, copy this device out.
2252 */
2253 if (retval & DM_RET_COPY) {
2254 int spaceleft, j;
2255
2256 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2257 sizeof(struct dev_match_result));
2258
2259 /*
2260 * If we don't have enough space to put in another
2261 * match result, save our position and tell the
2262 * user there are more devices to check.
2263 */
2264 if (spaceleft < sizeof(struct dev_match_result)) {
2265 bzero(&cdm->pos, sizeof(cdm->pos));
2266 cdm->pos.position_type =
2267 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2268 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2269
2270 cdm->pos.cookie.bus = device->target->bus;
2271 cdm->pos.generations[CAM_BUS_GENERATION]=
2272 xsoftc.bus_generation;
2273 cdm->pos.cookie.target = device->target;
2274 cdm->pos.generations[CAM_TARGET_GENERATION] =
2275 device->target->bus->generation;
2276 cdm->pos.cookie.device = device;
2277 cdm->pos.generations[CAM_DEV_GENERATION] =
2278 device->target->generation;
2279 cdm->status = CAM_DEV_MATCH_MORE;
2280 return(0);
2281 }
2282 j = cdm->num_matches;
2283 cdm->num_matches++;
2284 cdm->matches[j].type = DEV_MATCH_DEVICE;
2285 cdm->matches[j].result.device_result.path_id =
2286 device->target->bus->path_id;
2287 cdm->matches[j].result.device_result.target_id =
2288 device->target->target_id;
2289 cdm->matches[j].result.device_result.target_lun =
2290 device->lun_id;
2291 bcopy(&device->inq_data,
2292 &cdm->matches[j].result.device_result.inq_data,
2293 sizeof(struct scsi_inquiry_data));
2294
2295 /* Let the user know whether this device is unconfigured */
2296 if (device->flags & CAM_DEV_UNCONFIGURED)
2297 cdm->matches[j].result.device_result.flags =
2298 DEV_RESULT_UNCONFIGURED;
2299 else
2300 cdm->matches[j].result.device_result.flags =
2301 DEV_RESULT_NOFLAG;
2302 }
2303
2304 /*
2305 * If the user isn't interested in peripherals, don't descend
2306 * the tree any further.
2307 */
2308 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2309 return(1);
2310
2311 /*
2312 * If there is a peripheral list generation recorded, make sure
2313 * it hasn't changed.
2314 */
2315 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2316 && (device->target->bus == cdm->pos.cookie.bus)
2317 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2318 && (device->target == cdm->pos.cookie.target)
2319 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2320 && (device == cdm->pos.cookie.device)
2321 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2322 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2323 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2324 device->generation)){
2325 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2326 return(0);
2327 }
2328
2329 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2330 && (cdm->pos.cookie.bus == device->target->bus)
2331 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2332 && (cdm->pos.cookie.target == device->target)
2333 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2334 && (cdm->pos.cookie.device == device)
2335 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2336 && (cdm->pos.cookie.periph != NULL))
2337 return(xptperiphtraverse(device,
2338 (struct cam_periph *)cdm->pos.cookie.periph,
2339 xptedtperiphfunc, arg));
2340 else
2341 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2342}
2343
2344static int
2345xptedtperiphfunc(struct cam_periph *periph, void *arg)
2346{
2347 struct ccb_dev_match *cdm;
2348 dev_match_ret retval;
2349
2350 cdm = (struct ccb_dev_match *)arg;
2351
2352 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2353
2354 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2355 cdm->status = CAM_DEV_MATCH_ERROR;
2356 return(0);
2357 }
2358
2359 /*
2360 * If the copy flag is set, copy this peripheral out.
2361 */
2362 if (retval & DM_RET_COPY) {
2363 int spaceleft, j;
2364
2365 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2366 sizeof(struct dev_match_result));
2367
2368 /*
2369 * If we don't have enough space to put in another
2370 * match result, save our position and tell the
2371 * user there are more devices to check.
2372 */
2373 if (spaceleft < sizeof(struct dev_match_result)) {
2374 bzero(&cdm->pos, sizeof(cdm->pos));
2375 cdm->pos.position_type =
2376 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2377 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2378 CAM_DEV_POS_PERIPH;
2379
2380 cdm->pos.cookie.bus = periph->path->bus;
2381 cdm->pos.generations[CAM_BUS_GENERATION]=
2382 xsoftc.bus_generation;
2383 cdm->pos.cookie.target = periph->path->target;
2384 cdm->pos.generations[CAM_TARGET_GENERATION] =
2385 periph->path->bus->generation;
2386 cdm->pos.cookie.device = periph->path->device;
2387 cdm->pos.generations[CAM_DEV_GENERATION] =
2388 periph->path->target->generation;
2389 cdm->pos.cookie.periph = periph;
2390 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2391 periph->path->device->generation;
2392 cdm->status = CAM_DEV_MATCH_MORE;
2393 return(0);
2394 }
2395
2396 j = cdm->num_matches;
2397 cdm->num_matches++;
2398 cdm->matches[j].type = DEV_MATCH_PERIPH;
2399 cdm->matches[j].result.periph_result.path_id =
2400 periph->path->bus->path_id;
2401 cdm->matches[j].result.periph_result.target_id =
2402 periph->path->target->target_id;
2403 cdm->matches[j].result.periph_result.target_lun =
2404 periph->path->device->lun_id;
2405 cdm->matches[j].result.periph_result.unit_number =
2406 periph->unit_number;
2407 strncpy(cdm->matches[j].result.periph_result.periph_name,
2408 periph->periph_name, DEV_IDLEN);
2409 }
2410
2411 return(1);
2412}
2413
2414static int
2415xptedtmatch(struct ccb_dev_match *cdm)
2416{
2417 int ret;
2418
2419 cdm->num_matches = 0;
2420
2421 /*
2422 * Check the bus list generation. If it has changed, the user
2423 * needs to reset everything and start over.
2424 */
2425 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2426 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2427 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
2428 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2429 return(0);
2430 }
2431
2432 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2433 && (cdm->pos.cookie.bus != NULL))
2434 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2435 xptedtbusfunc, cdm);
2436 else
2437 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2438
2439 /*
2440 * If we get back 0, that means that we had to stop before fully
2441 * traversing the EDT. It also means that one of the subroutines
2442 * has set the status field to the proper value. If we get back 1,
2443 * we've fully traversed the EDT and copied out any matching entries.
2444 */
2445 if (ret == 1)
2446 cdm->status = CAM_DEV_MATCH_LAST;
2447
2448 return(ret);
2449}
2450
2451static int
2452xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2453{
2454 struct ccb_dev_match *cdm;
2455
2456 cdm = (struct ccb_dev_match *)arg;
2457
2458 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2459 && (cdm->pos.cookie.pdrv == pdrv)
2460 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2461 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2462 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2463 (*pdrv)->generation)) {
2464 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2465 return(0);
2466 }
2467
2468 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2469 && (cdm->pos.cookie.pdrv == pdrv)
2470 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2471 && (cdm->pos.cookie.periph != NULL))
2472 return(xptpdperiphtraverse(pdrv,
2473 (struct cam_periph *)cdm->pos.cookie.periph,
2474 xptplistperiphfunc, arg));
2475 else
2476 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2477}
2478
2479static int
2480xptplistperiphfunc(struct cam_periph *periph, void *arg)
2481{
2482 struct ccb_dev_match *cdm;
2483 dev_match_ret retval;
2484
2485 cdm = (struct ccb_dev_match *)arg;
2486
2487 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2488
2489 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2490 cdm->status = CAM_DEV_MATCH_ERROR;
2491 return(0);
2492 }
2493
2494 /*
2495 * If the copy flag is set, copy this peripheral out.
2496 */
2497 if (retval & DM_RET_COPY) {
2498 int spaceleft, j;
2499
2500 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2501 sizeof(struct dev_match_result));
2502
2503 /*
2504 * If we don't have enough space to put in another
2505 * match result, save our position and tell the
2506 * user there are more devices to check.
2507 */
2508 if (spaceleft < sizeof(struct dev_match_result)) {
2509 struct periph_driver **pdrv;
2510
2511 pdrv = NULL;
2512 bzero(&cdm->pos, sizeof(cdm->pos));
2513 cdm->pos.position_type =
2514 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2515 CAM_DEV_POS_PERIPH;
2516
2517 /*
2518 * This may look a bit non-sensical, but it is
2519 * actually quite logical. There are very few
2520 * peripheral drivers, and bloating every peripheral
2521 * structure with a pointer back to its parent
2522 * peripheral driver linker set entry would cost
2523 * more in the long run than doing this quick lookup.
2524 */
2525 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2526 if (strcmp((*pdrv)->driver_name,
2527 periph->periph_name) == 0)
2528 break;
2529 }
2530
2531 if (*pdrv == NULL) {
2532 cdm->status = CAM_DEV_MATCH_ERROR;
2533 return(0);
2534 }
2535
2536 cdm->pos.cookie.pdrv = pdrv;
2537 /*
2538 * The periph generation slot does double duty, as
2539 * does the periph pointer slot. They are used for
2540 * both edt and pdrv lookups and positioning.
2541 */
2542 cdm->pos.cookie.periph = periph;
2543 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2544 (*pdrv)->generation;
2545 cdm->status = CAM_DEV_MATCH_MORE;
2546 return(0);
2547 }
2548
2549 j = cdm->num_matches;
2550 cdm->num_matches++;
2551 cdm->matches[j].type = DEV_MATCH_PERIPH;
2552 cdm->matches[j].result.periph_result.path_id =
2553 periph->path->bus->path_id;
2554
2555 /*
2556 * The transport layer peripheral doesn't have a target or
2557 * lun.
2558 */
2559 if (periph->path->target)
2560 cdm->matches[j].result.periph_result.target_id =
2561 periph->path->target->target_id;
2562 else
2563 cdm->matches[j].result.periph_result.target_id = -1;
2564
2565 if (periph->path->device)
2566 cdm->matches[j].result.periph_result.target_lun =
2567 periph->path->device->lun_id;
2568 else
2569 cdm->matches[j].result.periph_result.target_lun = -1;
2570
2571 cdm->matches[j].result.periph_result.unit_number =
2572 periph->unit_number;
2573 strncpy(cdm->matches[j].result.periph_result.periph_name,
2574 periph->periph_name, DEV_IDLEN);
2575 }
2576
2577 return(1);
2578}
2579
2580static int
2581xptperiphlistmatch(struct ccb_dev_match *cdm)
2582{
2583 int ret;
2584
2585 cdm->num_matches = 0;
2586
2587 /*
2588 * At this point in the edt traversal function, we check the bus
2589 * list generation to make sure that no busses have been added or
2590 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2591 * For the peripheral driver list traversal function, however, we
2592 * don't have to worry about new peripheral driver types coming or
2593 * going; they're in a linker set, and therefore can't change
2594 * without a recompile.
2595 */
2596
2597 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2598 && (cdm->pos.cookie.pdrv != NULL))
2599 ret = xptpdrvtraverse(
2600 (struct periph_driver **)cdm->pos.cookie.pdrv,
2601 xptplistpdrvfunc, cdm);
2602 else
2603 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2604
2605 /*
2606 * If we get back 0, that means that we had to stop before fully
2607 * traversing the peripheral driver tree. It also means that one of
2608 * the subroutines has set the status field to the proper value. If
2609 * we get back 1, we've fully traversed the EDT and copied out any
2610 * matching entries.
2611 */
2612 if (ret == 1)
2613 cdm->status = CAM_DEV_MATCH_LAST;
2614
2615 return(ret);
2616}
2617
2618static int
2619xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2620{
2621 struct cam_eb *bus, *next_bus;
2622 int retval;
2623
2624 retval = 1;
2625
2626 mtx_lock(&xsoftc.xpt_topo_lock);
2627 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2628 bus != NULL;
2629 bus = next_bus) {
2630 next_bus = TAILQ_NEXT(bus, links);
2631
2632 mtx_unlock(&xsoftc.xpt_topo_lock);
2633 CAM_SIM_LOCK(bus->sim);
2634 retval = tr_func(bus, arg);
2635 CAM_SIM_UNLOCK(bus->sim);
2636 if (retval == 0)
2637 return(retval);
2638 mtx_lock(&xsoftc.xpt_topo_lock);
2639 }
2640 mtx_unlock(&xsoftc.xpt_topo_lock);
2641
2642 return(retval);
2643}
2644
2645static int
2646xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2647 xpt_targetfunc_t *tr_func, void *arg)
2648{
2649 struct cam_et *target, *next_target;
2650 int retval;
2651
2652 retval = 1;
2653 for (target = (start_target ? start_target :
2654 TAILQ_FIRST(&bus->et_entries));
2655 target != NULL; target = next_target) {
2656
2657 next_target = TAILQ_NEXT(target, links);
2658
2659 retval = tr_func(target, arg);
2660
2661 if (retval == 0)
2662 return(retval);
2663 }
2664
2665 return(retval);
2666}
2667
2668static int
2669xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2670 xpt_devicefunc_t *tr_func, void *arg)
2671{
2672 struct cam_ed *device, *next_device;
2673 int retval;
2674
2675 retval = 1;
2676 for (device = (start_device ? start_device :
2677 TAILQ_FIRST(&target->ed_entries));
2678 device != NULL;
2679 device = next_device) {
2680
2681 next_device = TAILQ_NEXT(device, links);
2682
2683 retval = tr_func(device, arg);
2684
2685 if (retval == 0)
2686 return(retval);
2687 }
2688
2689 return(retval);
2690}
2691
2692static int
2693xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2694 xpt_periphfunc_t *tr_func, void *arg)
2695{
2696 struct cam_periph *periph, *next_periph;
2697 int retval;
2698
2699 retval = 1;
2700
2701 for (periph = (start_periph ? start_periph :
2702 SLIST_FIRST(&device->periphs));
2703 periph != NULL;
2704 periph = next_periph) {
2705
2706 next_periph = SLIST_NEXT(periph, periph_links);
2707
2708 retval = tr_func(periph, arg);
2709 if (retval == 0)
2710 return(retval);
2711 }
2712
2713 return(retval);
2714}
2715
2716static int
2717xptpdrvtraverse(struct periph_driver **start_pdrv,
2718 xpt_pdrvfunc_t *tr_func, void *arg)
2719{
2720 struct periph_driver **pdrv;
2721 int retval;
2722
2723 retval = 1;
2724
2725 /*
2726 * We don't traverse the peripheral driver list like we do the
2727 * other lists, because it is a linker set, and therefore cannot be
2728 * changed during runtime. If the peripheral driver list is ever
2729 * re-done to be something other than a linker set (i.e. it can
2730 * change while the system is running), the list traversal should
2731 * be modified to work like the other traversal functions.
2732 */
2733 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2734 *pdrv != NULL; pdrv++) {
2735 retval = tr_func(pdrv, arg);
2736
2737 if (retval == 0)
2738 return(retval);
2739 }
2740
2741 return(retval);
2742}
2743
2744static int
2745xptpdperiphtraverse(struct periph_driver **pdrv,
2746 struct cam_periph *start_periph,
2747 xpt_periphfunc_t *tr_func, void *arg)
2748{
2749 struct cam_periph *periph, *next_periph;
2750 int retval;
2751
2752 retval = 1;
2753
2754 for (periph = (start_periph ? start_periph :
2755 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2756 periph = next_periph) {
2757
2758 next_periph = TAILQ_NEXT(periph, unit_links);
2759
2760 retval = tr_func(periph, arg);
2761 if (retval == 0)
2762 return(retval);
2763 }
2764 return(retval);
2765}
2766
2767static int
2768xptdefbusfunc(struct cam_eb *bus, void *arg)
2769{
2770 struct xpt_traverse_config *tr_config;
2771
2772 tr_config = (struct xpt_traverse_config *)arg;
2773
2774 if (tr_config->depth == XPT_DEPTH_BUS) {
2775 xpt_busfunc_t *tr_func;
2776
2777 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2778
2779 return(tr_func(bus, tr_config->tr_arg));
2780 } else
2781 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2782}
2783
2784static int
2785xptdeftargetfunc(struct cam_et *target, void *arg)
2786{
2787 struct xpt_traverse_config *tr_config;
2788
2789 tr_config = (struct xpt_traverse_config *)arg;
2790
2791 if (tr_config->depth == XPT_DEPTH_TARGET) {
2792 xpt_targetfunc_t *tr_func;
2793
2794 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2795
2796 return(tr_func(target, tr_config->tr_arg));
2797 } else
2798 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2799}
2800
2801static int
2802xptdefdevicefunc(struct cam_ed *device, void *arg)
2803{
2804 struct xpt_traverse_config *tr_config;
2805
2806 tr_config = (struct xpt_traverse_config *)arg;
2807
2808 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2809 xpt_devicefunc_t *tr_func;
2810
2811 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2812
2813 return(tr_func(device, tr_config->tr_arg));
2814 } else
2815 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2816}
2817
2818static int
2819xptdefperiphfunc(struct cam_periph *periph, void *arg)
2820{
2821 struct xpt_traverse_config *tr_config;
2822 xpt_periphfunc_t *tr_func;
2823
2824 tr_config = (struct xpt_traverse_config *)arg;
2825
2826 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2827
2828 /*
2829 * Unlike the other default functions, we don't check for depth
2830 * here. The peripheral driver level is the last level in the EDT,
2831 * so if we're here, we should execute the function in question.
2832 */
2833 return(tr_func(periph, tr_config->tr_arg));
2834}
2835
2836/*
2837 * Execute the given function for every bus in the EDT.
2838 */
2839static int
2840xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2841{
2842 struct xpt_traverse_config tr_config;
2843
2844 tr_config.depth = XPT_DEPTH_BUS;
2845 tr_config.tr_func = tr_func;
2846 tr_config.tr_arg = arg;
2847
2848 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2849}
2850
2851/*
2852 * Execute the given function for every device in the EDT.
2853 */
2854static int
2855xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2856{
2857 struct xpt_traverse_config tr_config;
2858
2859 tr_config.depth = XPT_DEPTH_DEVICE;
2860 tr_config.tr_func = tr_func;
2861 tr_config.tr_arg = arg;
2862
2863 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2864}
2865
2866static int
2867xptsetasyncfunc(struct cam_ed *device, void *arg)
2868{
2869 struct cam_path path;
2870 struct ccb_getdev cgd;
2871 struct async_node *cur_entry;
2872
2873 cur_entry = (struct async_node *)arg;
2874
2875 /*
2876 * Don't report unconfigured devices (Wildcard devs,
2877 * devices only for target mode, device instances
2878 * that have been invalidated but are waiting for
2879 * their last reference count to be released).
2880 */
2881 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2882 return (1);
2883
2884 xpt_compile_path(&path,
2885 NULL,
2886 device->target->bus->path_id,
2887 device->target->target_id,
2888 device->lun_id);
2889 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2890 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2891 xpt_action((union ccb *)&cgd);
2892 cur_entry->callback(cur_entry->callback_arg,
2893 AC_FOUND_DEVICE,
2894 &path, &cgd);
2895 xpt_release_path(&path);
2896
2897 return(1);
2898}
2899
2900static int
2901xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2902{
2903 struct cam_path path;
2904 struct ccb_pathinq cpi;
2905 struct async_node *cur_entry;
2906
2907 cur_entry = (struct async_node *)arg;
2908
2909 xpt_compile_path(&path, /*periph*/NULL,
2910 bus->sim->path_id,
2911 CAM_TARGET_WILDCARD,
2912 CAM_LUN_WILDCARD);
2913 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2914 cpi.ccb_h.func_code = XPT_PATH_INQ;
2915 xpt_action((union ccb *)&cpi);
2916 cur_entry->callback(cur_entry->callback_arg,
2917 AC_PATH_REGISTERED,
2918 &path, &cpi);
2919 xpt_release_path(&path);
2920
2921 return(1);
2922}
2923
2924static void
2925xpt_action_sasync_cb(void *context, int pending)
2926{
2927 struct async_node *cur_entry;
2928 struct xpt_task *task;
2929 uint32_t added;
2930
2931 task = (struct xpt_task *)context;
2932 cur_entry = (struct async_node *)task->data1;
2933 added = task->data2;
2934
2935 if ((added & AC_FOUND_DEVICE) != 0) {
2936 /*
2937 * Get this peripheral up to date with all
2938 * the currently existing devices.
2939 */
2940 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2941 }
2942 if ((added & AC_PATH_REGISTERED) != 0) {
2943 /*
2944 * Get this peripheral up to date with all
2945 * the currently existing busses.
2946 */
2947 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2948 }
2949
2950 free(task, M_CAMXPT);
2951}
2952
2953void
2954xpt_action(union ccb *start_ccb)
2955{
2956
2957 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2958
2959 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2960
2961 switch (start_ccb->ccb_h.func_code) {
2962 case XPT_SCSI_IO:
2963 {
2964 struct cam_ed *device;
2965#ifdef CAMDEBUG
2966 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2967 struct cam_path *path;
2968
2969 path = start_ccb->ccb_h.path;
2970#endif
2971
2972 /*
2973 * For the sake of compatibility with SCSI-1
2974 * devices that may not understand the identify
2975 * message, we include lun information in the
2976 * second byte of all commands. SCSI-1 specifies
2977 * that luns are a 3 bit value and reserves only 3
2978 * bits for lun information in the CDB. Later
2979 * revisions of the SCSI spec allow for more than 8
2980 * luns, but have deprecated lun information in the
2981 * CDB. So, if the lun won't fit, we must omit.
2982 *
2983 * Also be aware that during initial probing for devices,
2984 * the inquiry information is unknown but initialized to 0.
2985 * This means that this code will be exercised while probing
2986 * devices with an ANSI revision greater than 2.
2987 */
2988 device = start_ccb->ccb_h.path->device;
2989 if (device->protocol_version <= SCSI_REV_2
2990 && start_ccb->ccb_h.target_lun < 8
2991 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2992
2993 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2994 start_ccb->ccb_h.target_lun << 5;
2995 }
2996 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2997 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2998 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2999 &path->device->inq_data),
3000 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3001 cdb_str, sizeof(cdb_str))));
3002 }
3003 /* FALLTHROUGH */
3004 case XPT_TARGET_IO:
3005 case XPT_CONT_TARGET_IO:
3006 start_ccb->csio.sense_resid = 0;
3007 start_ccb->csio.resid = 0;
3008 /* FALLTHROUGH */
3009 case XPT_RESET_DEV:
3010 case XPT_ENG_EXEC:
3011 {
3012 struct cam_path *path;
3013 int runq;
3014
3015 path = start_ccb->ccb_h.path;
3016
3017 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3018 if (path->device->qfrozen_cnt == 0)
3019 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3020 else
3021 runq = 0;
3022 if (runq != 0)
3023 xpt_run_dev_sendq(path->bus);
3024 break;
3025 }
3026 case XPT_SET_TRAN_SETTINGS:
3027 {
3028 xpt_set_transfer_settings(&start_ccb->cts,
3029 start_ccb->ccb_h.path->device,
3030 /*async_update*/FALSE);
3031 break;
3032 }
3033 case XPT_CALC_GEOMETRY:
3034 {
3035 struct cam_sim *sim;
3036
3037 /* Filter out garbage */
3038 if (start_ccb->ccg.block_size == 0
3039 || start_ccb->ccg.volume_size == 0) {
3040 start_ccb->ccg.cylinders = 0;
3041 start_ccb->ccg.heads = 0;
3042 start_ccb->ccg.secs_per_track = 0;
3043 start_ccb->ccb_h.status = CAM_REQ_CMP;
3044 break;
3045 }
3046#ifdef PC98
3047 /*
3048 * In a PC-98 system, geometry translation depens on
3049 * the "real" device geometry obtained from mode page 4.
3050 * SCSI geometry translation is performed in the
3051 * initialization routine of the SCSI BIOS and the result
3052 * stored in host memory. If the translation is available
3053 * in host memory, use it. If not, rely on the default
3054 * translation the device driver performs.
3055 */
3056 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3057 start_ccb->ccb_h.status = CAM_REQ_CMP;
3058 break;
3059 }
3060#endif
3061 sim = start_ccb->ccb_h.path->bus->sim;
3062 (*(sim->sim_action))(sim, start_ccb);
3063 break;
3064 }
3065 case XPT_ABORT:
3066 {
3067 union ccb* abort_ccb;
3068
3069 abort_ccb = start_ccb->cab.abort_ccb;
3070 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3071
3072 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3073 struct cam_ccbq *ccbq;
3074
3075 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3076 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3077 abort_ccb->ccb_h.status =
3078 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3079 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3080 xpt_done(abort_ccb);
3081 start_ccb->ccb_h.status = CAM_REQ_CMP;
3082 break;
3083 }
3084 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3085 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3086 /*
3087 * We've caught this ccb en route to
3088 * the SIM. Flag it for abort and the
3089 * SIM will do so just before starting
3090 * real work on the CCB.
3091 */
3092 abort_ccb->ccb_h.status =
3093 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3094 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3095 start_ccb->ccb_h.status = CAM_REQ_CMP;
3096 break;
3097 }
3098 }
3099 if (XPT_FC_IS_QUEUED(abort_ccb)
3100 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3101 /*
3102 * It's already completed but waiting
3103 * for our SWI to get to it.
3104 */
3105 start_ccb->ccb_h.status = CAM_UA_ABORT;
3106 break;
3107 }
3108 /*
3109 * If we weren't able to take care of the abort request
3110 * in the XPT, pass the request down to the SIM for processing.
3111 */
3112 }
3113 /* FALLTHROUGH */
3114 case XPT_ACCEPT_TARGET_IO:
3115 case XPT_EN_LUN:
3116 case XPT_IMMED_NOTIFY:
3117 case XPT_NOTIFY_ACK:
3118 case XPT_GET_TRAN_SETTINGS:
3119 case XPT_RESET_BUS:
3120 {
3121 struct cam_sim *sim;
3122
3123 sim = start_ccb->ccb_h.path->bus->sim;
3124 (*(sim->sim_action))(sim, start_ccb);
3125 break;
3126 }
3127 case XPT_PATH_INQ:
3128 {
3129 struct cam_sim *sim;
3130
3131 sim = start_ccb->ccb_h.path->bus->sim;
3132 (*(sim->sim_action))(sim, start_ccb);
3133 break;
3134 }
3135 case XPT_PATH_STATS:
3136 start_ccb->cpis.last_reset =
3137 start_ccb->ccb_h.path->bus->last_reset;
3138 start_ccb->ccb_h.status = CAM_REQ_CMP;
3139 break;
3140 case XPT_GDEV_TYPE:
3141 {
3142 struct cam_ed *dev;
3143
3144 dev = start_ccb->ccb_h.path->device;
3145 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3146 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3147 } else {
3148 struct ccb_getdev *cgd;
3149 struct cam_eb *bus;
3150 struct cam_et *tar;
3151
3152 cgd = &start_ccb->cgd;
3153 bus = cgd->ccb_h.path->bus;
3154 tar = cgd->ccb_h.path->target;
3155 cgd->inq_data = dev->inq_data;
3156 cgd->ccb_h.status = CAM_REQ_CMP;
3157 cgd->serial_num_len = dev->serial_num_len;
3158 if ((dev->serial_num_len > 0)
3159 && (dev->serial_num != NULL))
3160 bcopy(dev->serial_num, cgd->serial_num,
3161 dev->serial_num_len);
3162 }
3163 break;
3164 }
3165 case XPT_GDEV_STATS:
3166 {
3167 struct cam_ed *dev;
3168
3169 dev = start_ccb->ccb_h.path->device;
3170 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3171 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3172 } else {
3173 struct ccb_getdevstats *cgds;
3174 struct cam_eb *bus;
3175 struct cam_et *tar;
3176
3177 cgds = &start_ccb->cgds;
3178 bus = cgds->ccb_h.path->bus;
3179 tar = cgds->ccb_h.path->target;
3180 cgds->dev_openings = dev->ccbq.dev_openings;
3181 cgds->dev_active = dev->ccbq.dev_active;
3182 cgds->devq_openings = dev->ccbq.devq_openings;
3183 cgds->devq_queued = dev->ccbq.queue.entries;
3184 cgds->held = dev->ccbq.held;
3185 cgds->last_reset = tar->last_reset;
3186 cgds->maxtags = dev->quirk->maxtags;
3187 cgds->mintags = dev->quirk->mintags;
3188 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3189 cgds->last_reset = bus->last_reset;
3190 cgds->ccb_h.status = CAM_REQ_CMP;
3191 }
3192 break;
3193 }
3194 case XPT_GDEVLIST:
3195 {
3196 struct cam_periph *nperiph;
3197 struct periph_list *periph_head;
3198 struct ccb_getdevlist *cgdl;
3199 u_int i;
3200 struct cam_ed *device;
3201 int found;
3202
3203
3204 found = 0;
3205
3206 /*
3207 * Don't want anyone mucking with our data.
3208 */
3209 device = start_ccb->ccb_h.path->device;
3210 periph_head = &device->periphs;
3211 cgdl = &start_ccb->cgdl;
3212
3213 /*
3214 * Check and see if the list has changed since the user
3215 * last requested a list member. If so, tell them that the
3216 * list has changed, and therefore they need to start over
3217 * from the beginning.
3218 */
3219 if ((cgdl->index != 0) &&
3220 (cgdl->generation != device->generation)) {
3221 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3222 break;
3223 }
3224
3225 /*
3226 * Traverse the list of peripherals and attempt to find
3227 * the requested peripheral.
3228 */
3229 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3230 (nperiph != NULL) && (i <= cgdl->index);
3231 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3232 if (i == cgdl->index) {
3233 strncpy(cgdl->periph_name,
3234 nperiph->periph_name,
3235 DEV_IDLEN);
3236 cgdl->unit_number = nperiph->unit_number;
3237 found = 1;
3238 }
3239 }
3240 if (found == 0) {
3241 cgdl->status = CAM_GDEVLIST_ERROR;
3242 break;
3243 }
3244
3245 if (nperiph == NULL)
3246 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3247 else
3248 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3249
3250 cgdl->index++;
3251 cgdl->generation = device->generation;
3252
3253 cgdl->ccb_h.status = CAM_REQ_CMP;
3254 break;
3255 }
3256 case XPT_DEV_MATCH:
3257 {
3258 dev_pos_type position_type;
3259 struct ccb_dev_match *cdm;
3260
3261 cdm = &start_ccb->cdm;
3262
3263 /*
3264 * There are two ways of getting at information in the EDT.
3265 * The first way is via the primary EDT tree. It starts
3266 * with a list of busses, then a list of targets on a bus,
3267 * then devices/luns on a target, and then peripherals on a
3268 * device/lun. The "other" way is by the peripheral driver
3269 * lists. The peripheral driver lists are organized by
3270 * peripheral driver. (obviously) So it makes sense to
3271 * use the peripheral driver list if the user is looking
3272 * for something like "da1", or all "da" devices. If the
3273 * user is looking for something on a particular bus/target
3274 * or lun, it's generally better to go through the EDT tree.
3275 */
3276
3277 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3278 position_type = cdm->pos.position_type;
3279 else {
3280 u_int i;
3281
3282 position_type = CAM_DEV_POS_NONE;
3283
3284 for (i = 0; i < cdm->num_patterns; i++) {
3285 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3286 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3287 position_type = CAM_DEV_POS_EDT;
3288 break;
3289 }
3290 }
3291
3292 if (cdm->num_patterns == 0)
3293 position_type = CAM_DEV_POS_EDT;
3294 else if (position_type == CAM_DEV_POS_NONE)
3295 position_type = CAM_DEV_POS_PDRV;
3296 }
3297
3298 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3299 case CAM_DEV_POS_EDT:
3300 xptedtmatch(cdm);
3301 break;
3302 case CAM_DEV_POS_PDRV:
3303 xptperiphlistmatch(cdm);
3304 break;
3305 default:
3306 cdm->status = CAM_DEV_MATCH_ERROR;
3307 break;
3308 }
3309
3310 if (cdm->status == CAM_DEV_MATCH_ERROR)
3311 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3312 else
3313 start_ccb->ccb_h.status = CAM_REQ_CMP;
3314
3315 break;
3316 }
3317 case XPT_SASYNC_CB:
3318 {
3319 struct ccb_setasync *csa;
3320 struct async_node *cur_entry;
3321 struct async_list *async_head;
3322 u_int32_t added;
3323
3324 csa = &start_ccb->csa;
3325 added = csa->event_enable;
3326 async_head = &csa->ccb_h.path->device->asyncs;
3327
3328 /*
3329 * If there is already an entry for us, simply
3330 * update it.
3331 */
3332 cur_entry = SLIST_FIRST(async_head);
3333 while (cur_entry != NULL) {
3334 if ((cur_entry->callback_arg == csa->callback_arg)
3335 && (cur_entry->callback == csa->callback))
3336 break;
3337 cur_entry = SLIST_NEXT(cur_entry, links);
3338 }
3339
3340 if (cur_entry != NULL) {
3341 /*
3342 * If the request has no flags set,
3343 * remove the entry.
3344 */
3345 added &= ~cur_entry->event_enable;
3346 if (csa->event_enable == 0) {
3347 SLIST_REMOVE(async_head, cur_entry,
3348 async_node, links);
3349 csa->ccb_h.path->device->refcount--;
3350 free(cur_entry, M_CAMXPT);
3351 } else {
3352 cur_entry->event_enable = csa->event_enable;
3353 }
3354 } else {
3355 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3356 M_NOWAIT);
3357 if (cur_entry == NULL) {
3358 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3359 break;
3360 }
3361 cur_entry->event_enable = csa->event_enable;
3362 cur_entry->callback_arg = csa->callback_arg;
3363 cur_entry->callback = csa->callback;
3364 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3365 csa->ccb_h.path->device->refcount++;
3366 }
3367
3368 /*
3369 * Need to decouple this operation via a taqskqueue so that
3370 * the locking doesn't become a mess.
3371 */
3372 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
3373 struct xpt_task *task;
3374
3375 task = malloc(sizeof(struct xpt_task), M_CAMXPT,
3376 M_NOWAIT);
3377 if (task == NULL) {
3378 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3379 break;
3380 }
3381
3382 TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
3383 task->data1 = cur_entry;
3384 task->data2 = added;
3385 taskqueue_enqueue(taskqueue_thread, &task->task);
3386 }
3387
3388 start_ccb->ccb_h.status = CAM_REQ_CMP;
3389 break;
3390 }
3391 case XPT_REL_SIMQ:
3392 {
3393 struct ccb_relsim *crs;
3394 struct cam_ed *dev;
3395
3396 crs = &start_ccb->crs;
3397 dev = crs->ccb_h.path->device;
3398 if (dev == NULL) {
3399
3400 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3401 break;
3402 }
3403
3404 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3405
3406 if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3407 /* Don't ever go below one opening */
3408 if (crs->openings > 0) {
3409 xpt_dev_ccbq_resize(crs->ccb_h.path,
3410 crs->openings);
3411
3412 if (bootverbose) {
3413 xpt_print(crs->ccb_h.path,
3414 "tagged openings now %d\n",
3415 crs->openings);
3416 }
3417 }
3418 }
3419 }
3420
3421 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3422
3423 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3424
3425 /*
3426 * Just extend the old timeout and decrement
3427 * the freeze count so that a single timeout
3428 * is sufficient for releasing the queue.
3429 */
3430 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3431 callout_stop(&dev->callout);
3432 } else {
3433
3434 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3435 }
3436
3437 callout_reset(&dev->callout,
3438 (crs->release_timeout * hz) / 1000,
3439 xpt_release_devq_timeout, dev);
3440
3441 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3442
3443 }
3444
3445 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3446
3447 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3448 /*
3449 * Decrement the freeze count so that a single
3450 * completion is still sufficient to unfreeze
3451 * the queue.
3452 */
3453 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3454 } else {
3455
3456 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3457 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3458 }
3459 }
3460
3461 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3462
3463 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3464 || (dev->ccbq.dev_active == 0)) {
3465
3466 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3467 } else {
3468
3469 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3470 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3471 }
3472 }
3473
3474 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3475
3476 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3477 /*run_queue*/TRUE);
3478 }
3479 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3480 start_ccb->ccb_h.status = CAM_REQ_CMP;
3481 break;
3482 }
3483 case XPT_SCAN_BUS:
3484 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3485 break;
3486 case XPT_SCAN_LUN:
3487 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3488 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3489 start_ccb);
3490 break;
3491 case XPT_DEBUG: {
3492#ifdef CAMDEBUG
3493#ifdef CAM_DEBUG_DELAY
3494 cam_debug_delay = CAM_DEBUG_DELAY;
3495#endif
3496 cam_dflags = start_ccb->cdbg.flags;
3497 if (cam_dpath != NULL) {
3498 xpt_free_path(cam_dpath);
3499 cam_dpath = NULL;
3500 }
3501
3502 if (cam_dflags != CAM_DEBUG_NONE) {
3503 if (xpt_create_path(&cam_dpath, xpt_periph,
3504 start_ccb->ccb_h.path_id,
3505 start_ccb->ccb_h.target_id,
3506 start_ccb->ccb_h.target_lun) !=
3507 CAM_REQ_CMP) {
3508 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3509 cam_dflags = CAM_DEBUG_NONE;
3510 } else {
3511 start_ccb->ccb_h.status = CAM_REQ_CMP;
3512 xpt_print(cam_dpath, "debugging flags now %x\n",
3513 cam_dflags);
3514 }
3515 } else {
3516 cam_dpath = NULL;
3517 start_ccb->ccb_h.status = CAM_REQ_CMP;
3518 }
3519#else /* !CAMDEBUG */
3520 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3521#endif /* CAMDEBUG */
3522 break;
3523 }
3524 case XPT_NOOP:
3525 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3526 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3527 start_ccb->ccb_h.status = CAM_REQ_CMP;
3528 break;
3529 default:
3530 case XPT_SDEV_TYPE:
3531 case XPT_TERM_IO:
3532 case XPT_ENG_INQ:
3533 /* XXX Implement */
3534 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3535 break;
3536 }
3537}
3538
3539void
3540xpt_polled_action(union ccb *start_ccb)
3541{
3542 u_int32_t timeout;
3543 struct cam_sim *sim;
3544 struct cam_devq *devq;
3545 struct cam_ed *dev;
3546
3547
3548 timeout = start_ccb->ccb_h.timeout;
3549 sim = start_ccb->ccb_h.path->bus->sim;
3550 devq = sim->devq;
3551 dev = start_ccb->ccb_h.path->device;
3552
3553 mtx_assert(sim->mtx, MA_OWNED);
3554
3555 /*
3556 * Steal an opening so that no other queued requests
3557 * can get it before us while we simulate interrupts.
3558 */
3559 dev->ccbq.devq_openings--;
3560 dev->ccbq.dev_openings--;
3561
3562 while(((devq != NULL && devq->send_openings <= 0) ||
3563 dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3564 DELAY(1000);
3565 (*(sim->sim_poll))(sim);
3566 camisr_runqueue(&sim->sim_doneq);
3567 }
3568
3569 dev->ccbq.devq_openings++;
3570 dev->ccbq.dev_openings++;
3571
3572 if (timeout != 0) {
3573 xpt_action(start_ccb);
3574 while(--timeout > 0) {
3575 (*(sim->sim_poll))(sim);
3576 camisr_runqueue(&sim->sim_doneq);
3577 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3578 != CAM_REQ_INPROG)
3579 break;
3580 DELAY(1000);
3581 }
3582 if (timeout == 0) {
3583 /*
3584 * XXX Is it worth adding a sim_timeout entry
3585 * point so we can attempt recovery? If
3586 * this is only used for dumps, I don't think
3587 * it is.
3588 */
3589 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3590 }
3591 } else {
3592 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3593 }
3594}
3595
3596/*
3597 * Schedule a peripheral driver to receive a ccb when it's
3598 * target device has space for more transactions.
3599 */
3600void
3601xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3602{
3603 struct cam_ed *device;
3604 int runq;
3605
3606 mtx_assert(perph->sim->mtx, MA_OWNED);
3607
3608 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3609 device = perph->path->device;
3610 if (periph_is_queued(perph)) {
3611 /* Simply reorder based on new priority */
3612 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3613 (" change priority to %d\n", new_priority));
3614 if (new_priority < perph->pinfo.priority) {
3615 camq_change_priority(&device->drvq,
3616 perph->pinfo.index,
3617 new_priority);
3618 }
3619 runq = 0;
3620 } else {
3621 /* New entry on the queue */
3622 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3623 (" added periph to queue\n"));
3624 perph->pinfo.priority = new_priority;
3625 perph->pinfo.generation = ++device->drvq.generation;
3626 camq_insert(&device->drvq, &perph->pinfo);
3627 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3628 }
3629 if (runq != 0) {
3630 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3631 (" calling xpt_run_devq\n"));
3632 xpt_run_dev_allocq(perph->path->bus);
3633 }
3634}
3635
3636
3637/*
3638 * Schedule a device to run on a given queue.
3639 * If the device was inserted as a new entry on the queue,
3640 * return 1 meaning the device queue should be run. If we
3641 * were already queued, implying someone else has already
3642 * started the queue, return 0 so the caller doesn't attempt
3643 * to run the queue.
3644 */
3645static int
3646xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3647 u_int32_t new_priority)
3648{
3649 int retval;
3650 u_int32_t old_priority;
3651
3652 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3653
3654 old_priority = pinfo->priority;
3655
3656 /*
3657 * Are we already queued?
3658 */
3659 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3660 /* Simply reorder based on new priority */
3661 if (new_priority < old_priority) {
3662 camq_change_priority(queue, pinfo->index,
3663 new_priority);
3664 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3665 ("changed priority to %d\n",
3666 new_priority));
3667 }
3668 retval = 0;
3669 } else {
3670 /* New entry on the queue */
3671 if (new_priority < old_priority)
3672 pinfo->priority = new_priority;
3673
3674 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3675 ("Inserting onto queue\n"));
3676 pinfo->generation = ++queue->generation;
3677 camq_insert(queue, pinfo);
3678 retval = 1;
3679 }
3680 return (retval);
3681}
3682
3683static void
3684xpt_run_dev_allocq(struct cam_eb *bus)
3685{
3686 struct cam_devq *devq;
3687
3688 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3689 devq = bus->sim->devq;
3690
3691 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3692 (" qfrozen_cnt == 0x%x, entries == %d, "
3693 "openings == %d, active == %d\n",
3694 devq->alloc_queue.qfrozen_cnt,
3695 devq->alloc_queue.entries,
3696 devq->alloc_openings,
3697 devq->alloc_active));
3698
3699 devq->alloc_queue.qfrozen_cnt++;
3700 while ((devq->alloc_queue.entries > 0)
3701 && (devq->alloc_openings > 0)
3702 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3703 struct cam_ed_qinfo *qinfo;
3704 struct cam_ed *device;
3705 union ccb *work_ccb;
3706 struct cam_periph *drv;
3707 struct camq *drvq;
3708
3709 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3710 CAMQ_HEAD);
3711 device = qinfo->device;
3712
3713 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3714 ("running device %p\n", device));
3715
3716 drvq = &device->drvq;
3717
3718#ifdef CAMDEBUG
3719 if (drvq->entries <= 0) {
3720 panic("xpt_run_dev_allocq: "
3721 "Device on queue without any work to do");
3722 }
3723#endif
3724 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3725 devq->alloc_openings--;
3726 devq->alloc_active++;
3727 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3728 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3729 drv->pinfo.priority);
3730 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3731 ("calling periph start\n"));
3732 drv->periph_start(drv, work_ccb);
3733 } else {
3734 /*
3735 * Malloc failure in alloc_ccb
3736 */
3737 /*
3738 * XXX add us to a list to be run from free_ccb
3739 * if we don't have any ccbs active on this
3740 * device queue otherwise we may never get run
3741 * again.
3742 */
3743 break;
3744 }
3745
3746 if (drvq->entries > 0) {
3747 /* We have more work. Attempt to reschedule */
3748 xpt_schedule_dev_allocq(bus, device);
3749 }
3750 }
3751 devq->alloc_queue.qfrozen_cnt--;
3752}
3753
3754static void
3755xpt_run_dev_sendq(struct cam_eb *bus)
3756{
3757 struct cam_devq *devq;
3758
3759 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3760
3761 devq = bus->sim->devq;
3762
3763 devq->send_queue.qfrozen_cnt++;
3764 while ((devq->send_queue.entries > 0)
3765 && (devq->send_openings > 0)) {
3766 struct cam_ed_qinfo *qinfo;
3767 struct cam_ed *device;
3768 union ccb *work_ccb;
3769 struct cam_sim *sim;
3770
3771 if (devq->send_queue.qfrozen_cnt > 1) {
3772 break;
3773 }
3774
3775 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3776 CAMQ_HEAD);
3777 device = qinfo->device;
3778
3779 /*
3780 * If the device has been "frozen", don't attempt
3781 * to run it.
3782 */
3783 if (device->qfrozen_cnt > 0) {
3784 continue;
3785 }
3786
3787 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3788 ("running device %p\n", device));
3789
3790 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3791 if (work_ccb == NULL) {
3792 printf("device on run queue with no ccbs???\n");
3793 continue;
3794 }
3795
3796 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3797
3798 mtx_lock(&xsoftc.xpt_lock);
3799 if (xsoftc.num_highpower <= 0) {
3800 /*
3801 * We got a high power command, but we
3802 * don't have any available slots. Freeze
3803 * the device queue until we have a slot
3804 * available.
3805 */
3806 device->qfrozen_cnt++;
3807 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3808 &work_ccb->ccb_h,
3809 xpt_links.stqe);
3810
3811 mtx_unlock(&xsoftc.xpt_lock);
3812 continue;
3813 } else {
3814 /*
3815 * Consume a high power slot while
3816 * this ccb runs.
3817 */
3818 xsoftc.num_highpower--;
3819 }
3820 mtx_unlock(&xsoftc.xpt_lock);
3821 }
3822 devq->active_dev = device;
3823 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3824
3825 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3826
3827 devq->send_openings--;
3828 devq->send_active++;
3829
3830 if (device->ccbq.queue.entries > 0)
3831 xpt_schedule_dev_sendq(bus, device);
3832
3833 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3834 /*
3835 * The client wants to freeze the queue
3836 * after this CCB is sent.
3837 */
3838 device->qfrozen_cnt++;
3839 }
3840
3841 /* In Target mode, the peripheral driver knows best... */
3842 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3843 if ((device->inq_flags & SID_CmdQue) != 0
3844 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3845 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3846 else
3847 /*
3848 * Clear this in case of a retried CCB that
3849 * failed due to a rejected tag.
3850 */
3851 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3852 }
3853
3854 /*
3855 * Device queues can be shared among multiple sim instances
3856 * that reside on different busses. Use the SIM in the queue
3857 * CCB's path, rather than the one in the bus that was passed
3858 * into this function.
3859 */
3860 sim = work_ccb->ccb_h.path->bus->sim;
3861 (*(sim->sim_action))(sim, work_ccb);
3862
3863 devq->active_dev = NULL;
3864 }
3865 devq->send_queue.qfrozen_cnt--;
3866}
3867
3868/*
3869 * This function merges stuff from the slave ccb into the master ccb, while
3870 * keeping important fields in the master ccb constant.
3871 */
3872void
3873xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3874{
3875
3876 /*
3877 * Pull fields that are valid for peripheral drivers to set
3878 * into the master CCB along with the CCB "payload".
3879 */
3880 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3881 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3882 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3883 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3884 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3885 sizeof(union ccb) - sizeof(struct ccb_hdr));
3886}
3887
3888void
3889xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3890{
3891
3892 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3893 ccb_h->pinfo.priority = priority;
3894 ccb_h->path = path;
3895 ccb_h->path_id = path->bus->path_id;
3896 if (path->target)
3897 ccb_h->target_id = path->target->target_id;
3898 else
3899 ccb_h->target_id = CAM_TARGET_WILDCARD;
3900 if (path->device) {
3901 ccb_h->target_lun = path->device->lun_id;
3902 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3903 } else {
3904 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3905 }
3906 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3907 ccb_h->flags = 0;
3908}
3909
3910/* Path manipulation functions */
3911cam_status
3912xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3913 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3914{
3915 struct cam_path *path;
3916 cam_status status;
3917
3918 path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
3919
3920 if (path == NULL) {
3921 status = CAM_RESRC_UNAVAIL;
3922 return(status);
3923 }
3924 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3925 if (status != CAM_REQ_CMP) {
3926 free(path, M_CAMXPT);
3927 path = NULL;
3928 }
3929 *new_path_ptr = path;
3930 return (status);
3931}
3932
3933cam_status
3934xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3935 struct cam_periph *periph, path_id_t path_id,
3936 target_id_t target_id, lun_id_t lun_id)
3937{
3938 struct cam_path *path;
3939 struct cam_eb *bus = NULL;
3940 cam_status status;
3941 int need_unlock = 0;
3942
3943 path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
3944
3945 if (path_id != CAM_BUS_WILDCARD) {
3946 bus = xpt_find_bus(path_id);
3947 if (bus != NULL) {
3948 need_unlock = 1;
3949 CAM_SIM_LOCK(bus->sim);
3950 }
3951 }
3952 status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3953 if (need_unlock)
3954 CAM_SIM_UNLOCK(bus->sim);
3955 if (status != CAM_REQ_CMP) {
3956 free(path, M_CAMXPT);
3957 path = NULL;
3958 }
3959 *new_path_ptr = path;
3960 return (status);
3961}
3962
3963static cam_status
3964xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3965 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3966{
3967 struct cam_eb *bus;
3968 struct cam_et *target;
3969 struct cam_ed *device;
3970 cam_status status;
3971
3972 status = CAM_REQ_CMP; /* Completed without error */
3973 target = NULL; /* Wildcarded */
3974 device = NULL; /* Wildcarded */
3975
3976 /*
3977 * We will potentially modify the EDT, so block interrupts
3978 * that may attempt to create cam paths.
3979 */
3980 bus = xpt_find_bus(path_id);
3981 if (bus == NULL) {
3982 status = CAM_PATH_INVALID;
3983 } else {
3984 target = xpt_find_target(bus, target_id);
3985 if (target == NULL) {
3986 /* Create one */
3987 struct cam_et *new_target;
3988
3989 new_target = xpt_alloc_target(bus, target_id);
3990 if (new_target == NULL) {
3991 status = CAM_RESRC_UNAVAIL;
3992 } else {
3993 target = new_target;
3994 }
3995 }
3996 if (target != NULL) {
3997 device = xpt_find_device(target, lun_id);
3998 if (device == NULL) {
3999 /* Create one */
4000 struct cam_ed *new_device;
4001
4002 new_device = xpt_alloc_device(bus,
4003 target,
4004 lun_id);
4005 if (new_device == NULL) {
4006 status = CAM_RESRC_UNAVAIL;
4007 } else {
4008 device = new_device;
4009 }
4010 }
4011 }
4012 }
4013
4014 /*
4015 * Only touch the user's data if we are successful.
4016 */
4017 if (status == CAM_REQ_CMP) {
4018 new_path->periph = perph;
4019 new_path->bus = bus;
4020 new_path->target = target;
4021 new_path->device = device;
4022 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4023 } else {
4024 if (device != NULL)
4025 xpt_release_device(bus, target, device);
4026 if (target != NULL)
4027 xpt_release_target(bus, target);
4028 if (bus != NULL)
4029 xpt_release_bus(bus);
4030 }
4031 return (status);
4032}
4033
4034static void
4035xpt_release_path(struct cam_path *path)
4036{
4037 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4038 if (path->device != NULL) {
4039 xpt_release_device(path->bus, path->target, path->device);
4040 path->device = NULL;
4041 }
4042 if (path->target != NULL) {
4043 xpt_release_target(path->bus, path->target);
4044 path->target = NULL;
4045 }
4046 if (path->bus != NULL) {
4047 xpt_release_bus(path->bus);
4048 path->bus = NULL;
4049 }
4050}
4051
4052void
4053xpt_free_path(struct cam_path *path)
4054{
4055
4056 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4057 xpt_release_path(path);
4058 free(path, M_CAMXPT);
4059}
4060
4061
4062/*
4063 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4064 * in path1, 2 for match with wildcards in path2.
4065 */
4066int
4067xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4068{
4069 int retval = 0;
4070
4071 if (path1->bus != path2->bus) {
4072 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4073 retval = 1;
4074 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4075 retval = 2;
4076 else
4077 return (-1);
4078 }
4079 if (path1->target != path2->target) {
4080 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4081 if (retval == 0)
4082 retval = 1;
4083 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4084 retval = 2;
4085 else
4086 return (-1);
4087 }
4088 if (path1->device != path2->device) {
4089 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4090 if (retval == 0)
4091 retval = 1;
4092 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4093 retval = 2;
4094 else
4095 return (-1);
4096 }
4097 return (retval);
4098}
4099
4100void
4101xpt_print_path(struct cam_path *path)
4102{
4103
4104 if (path == NULL)
4105 printf("(nopath): ");
4106 else {
4107 if (path->periph != NULL)
4108 printf("(%s%d:", path->periph->periph_name,
4109 path->periph->unit_number);
4110 else
4111 printf("(noperiph:");
4112
4113 if (path->bus != NULL)
4114 printf("%s%d:%d:", path->bus->sim->sim_name,
4115 path->bus->sim->unit_number,
4116 path->bus->sim->bus_id);
4117 else
4118 printf("nobus:");
4119
4120 if (path->target != NULL)
4121 printf("%d:", path->target->target_id);
4122 else
4123 printf("X:");
4124
4125 if (path->device != NULL)
4126 printf("%d): ", path->device->lun_id);
4127 else
4128 printf("X): ");
4129 }
4130}
4131
4132void
4133xpt_print(struct cam_path *path, const char *fmt, ...)
4134{
4135 va_list ap;
4136 xpt_print_path(path);
4137 va_start(ap, fmt);
4138 vprintf(fmt, ap);
4139 va_end(ap);
4140}
4141
4142int
4143xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4144{
4145 struct sbuf sb;
4146
4147 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4148
4149 sbuf_new(&sb, str, str_len, 0);
4150
4151 if (path == NULL)
4152 sbuf_printf(&sb, "(nopath): ");
4153 else {
4154 if (path->periph != NULL)
4155 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4156 path->periph->unit_number);
4157 else
4158 sbuf_printf(&sb, "(noperiph:");
4159
4160 if (path->bus != NULL)
4161 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4162 path->bus->sim->unit_number,
4163 path->bus->sim->bus_id);
4164 else
4165 sbuf_printf(&sb, "nobus:");
4166
4167 if (path->target != NULL)
4168 sbuf_printf(&sb, "%d:", path->target->target_id);
4169 else
4170 sbuf_printf(&sb, "X:");
4171
4172 if (path->device != NULL)
4173 sbuf_printf(&sb, "%d): ", path->device->lun_id);
4174 else
4175 sbuf_printf(&sb, "X): ");
4176 }
4177 sbuf_finish(&sb);
4178
4179 return(sbuf_len(&sb));
4180}
4181
4182path_id_t
4183xpt_path_path_id(struct cam_path *path)
4184{
4185 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4186
4187 return(path->bus->path_id);
4188}
4189
4190target_id_t
4191xpt_path_target_id(struct cam_path *path)
4192{
4193 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4194
4195 if (path->target != NULL)
4196 return (path->target->target_id);
4197 else
4198 return (CAM_TARGET_WILDCARD);
4199}
4200
4201lun_id_t
4202xpt_path_lun_id(struct cam_path *path)
4203{
4204 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4205
4206 if (path->device != NULL)
4207 return (path->device->lun_id);
4208 else
4209 return (CAM_LUN_WILDCARD);
4210}
4211
4212struct cam_sim *
4213xpt_path_sim(struct cam_path *path)
4214{
4215
4216 return (path->bus->sim);
4217}
4218
4219struct cam_periph*
4220xpt_path_periph(struct cam_path *path)
4221{
4222 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4223
4224 return (path->periph);
4225}
4226
4227/*
4228 * Release a CAM control block for the caller. Remit the cost of the structure
4229 * to the device referenced by the path. If the this device had no 'credits'
4230 * and peripheral drivers have registered async callbacks for this notification
4231 * call them now.
4232 */
4233void
4234xpt_release_ccb(union ccb *free_ccb)
4235{
4236 struct cam_path *path;
4237 struct cam_ed *device;
4238 struct cam_eb *bus;
4239 struct cam_sim *sim;
4240
4241 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4242 path = free_ccb->ccb_h.path;
4243 device = path->device;
4244 bus = path->bus;
4245 sim = bus->sim;
4246
4247 mtx_assert(sim->mtx, MA_OWNED);
4248
4249 cam_ccbq_release_opening(&device->ccbq);
4250 if (sim->ccb_count > sim->max_ccbs) {
4251 xpt_free_ccb(free_ccb);
4252 sim->ccb_count--;
4253 } else {
4254 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
4255 xpt_links.sle);
4256 }
4257 if (sim->devq == NULL) {
4258 return;
4259 }
4260 sim->devq->alloc_openings++;
4261 sim->devq->alloc_active--;
4262 /* XXX Turn this into an inline function - xpt_run_device?? */
4263 if ((device_is_alloc_queued(device) == 0)
4264 && (device->drvq.entries > 0)) {
4265 xpt_schedule_dev_allocq(bus, device);
4266 }
4267 if (dev_allocq_is_runnable(sim->devq))
4268 xpt_run_dev_allocq(bus);
4269}
4270
4271/* Functions accessed by SIM drivers */
4272
4273/*
4274 * A sim structure, listing the SIM entry points and instance
4275 * identification info is passed to xpt_bus_register to hook the SIM
4276 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4277 * for this new bus and places it in the array of busses and assigns
4278 * it a path_id. The path_id may be influenced by "hard wiring"
4279 * information specified by the user. Once interrupt services are
4280 * availible, the bus will be probed.
4281 */
4282int32_t
4283xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
4284{
4285 struct cam_eb *new_bus;
4286 struct cam_eb *old_bus;
4287 struct ccb_pathinq cpi;
4288
4289 mtx_assert(sim->mtx, MA_OWNED);
4290
4291 sim->bus_id = bus;
4292 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4293 M_CAMXPT, M_NOWAIT);
4294 if (new_bus == NULL) {
4295 /* Couldn't satisfy request */
4296 return (CAM_RESRC_UNAVAIL);
4297 }
4298
4299 if (strcmp(sim->sim_name, "xpt") != 0) {
4300
4301 sim->path_id =
4302 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4303 }
4304
4305 TAILQ_INIT(&new_bus->et_entries);
4306 new_bus->path_id = sim->path_id;
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/systm.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/time.h>
40#include <sys/conf.h>
41#include <sys/fcntl.h>
42#include <sys/md5.h>
43#include <sys/interrupt.h>
44#include <sys/sbuf.h>
45#include <sys/taskqueue.h>
46
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/sysctl.h>
50#include <sys/kthread.h>
51
52#ifdef PC98
53#include <pc98/pc98/pc98_machdep.h> /* geometry translation */
54#endif
55
56#include <cam/cam.h>
57#include <cam/cam_ccb.h>
58#include <cam/cam_periph.h>
59#include <cam/cam_sim.h>
60#include <cam/cam_xpt.h>
61#include <cam/cam_xpt_sim.h>
62#include <cam/cam_xpt_periph.h>
63#include <cam/cam_debug.h>
64
65#include <cam/scsi/scsi_all.h>
66#include <cam/scsi/scsi_message.h>
67#include <cam/scsi/scsi_pass.h>
68#include <machine/stdarg.h> /* for xpt_print below */
69#include "opt_cam.h"
70
71/* Datastructures internal to the xpt layer */
72MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
73
74/* Object for defering XPT actions to a taskqueue */
75struct xpt_task {
76 struct task task;
77 void *data1;
78 uintptr_t data2;
79};
80
81/*
82 * Definition of an async handler callback block. These are used to add
83 * SIMs and peripherals to the async callback lists.
84 */
85struct async_node {
86 SLIST_ENTRY(async_node) links;
87 u_int32_t event_enable; /* Async Event enables */
88 void (*callback)(void *arg, u_int32_t code,
89 struct cam_path *path, void *args);
90 void *callback_arg;
91};
92
93SLIST_HEAD(async_list, async_node);
94SLIST_HEAD(periph_list, cam_periph);
95
96/*
97 * This is the maximum number of high powered commands (e.g. start unit)
98 * that can be outstanding at a particular time.
99 */
100#ifndef CAM_MAX_HIGHPOWER
101#define CAM_MAX_HIGHPOWER 4
102#endif
103
104/*
105 * Structure for queueing a device in a run queue.
106 * There is one run queue for allocating new ccbs,
107 * and another for sending ccbs to the controller.
108 */
109struct cam_ed_qinfo {
110 cam_pinfo pinfo;
111 struct cam_ed *device;
112};
113
114/*
115 * The CAM EDT (Existing Device Table) contains the device information for
116 * all devices for all busses in the system. The table contains a
117 * cam_ed structure for each device on the bus.
118 */
119struct cam_ed {
120 TAILQ_ENTRY(cam_ed) links;
121 struct cam_ed_qinfo alloc_ccb_entry;
122 struct cam_ed_qinfo send_ccb_entry;
123 struct cam_et *target;
124 struct cam_sim *sim;
125 lun_id_t lun_id;
126 struct camq drvq; /*
127 * Queue of type drivers wanting to do
128 * work on this device.
129 */
130 struct cam_ccbq ccbq; /* Queue of pending ccbs */
131 struct async_list asyncs; /* Async callback info for this B/T/L */
132 struct periph_list periphs; /* All attached devices */
133 u_int generation; /* Generation number */
134 struct cam_periph *owner; /* Peripheral driver's ownership tag */
135 struct xpt_quirk_entry *quirk; /* Oddities about this device */
136 /* Storage for the inquiry data */
137 cam_proto protocol;
138 u_int protocol_version;
139 cam_xport transport;
140 u_int transport_version;
141 struct scsi_inquiry_data inq_data;
142 u_int8_t inq_flags; /*
143 * Current settings for inquiry flags.
144 * This allows us to override settings
145 * like disconnection and tagged
146 * queuing for a device.
147 */
148 u_int8_t queue_flags; /* Queue flags from the control page */
149 u_int8_t serial_num_len;
150 u_int8_t *serial_num;
151 u_int32_t qfrozen_cnt;
152 u_int32_t flags;
153#define CAM_DEV_UNCONFIGURED 0x01
154#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
155#define CAM_DEV_REL_ON_COMPLETE 0x04
156#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
157#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
158#define CAM_DEV_TAG_AFTER_COUNT 0x20
159#define CAM_DEV_INQUIRY_DATA_VALID 0x40
160#define CAM_DEV_IN_DV 0x80
161#define CAM_DEV_DV_HIT_BOTTOM 0x100
162 u_int32_t tag_delay_count;
163#define CAM_TAG_DELAY_COUNT 5
164 u_int32_t tag_saved_openings;
165 u_int32_t refcount;
166 struct callout callout;
167};
168
169/*
170 * Each target is represented by an ET (Existing Target). These
171 * entries are created when a target is successfully probed with an
172 * identify, and removed when a device fails to respond after a number
173 * of retries, or a bus rescan finds the device missing.
174 */
175struct cam_et {
176 TAILQ_HEAD(, cam_ed) ed_entries;
177 TAILQ_ENTRY(cam_et) links;
178 struct cam_eb *bus;
179 target_id_t target_id;
180 u_int32_t refcount;
181 u_int generation;
182 struct timeval last_reset;
183};
184
185/*
186 * Each bus is represented by an EB (Existing Bus). These entries
187 * are created by calls to xpt_bus_register and deleted by calls to
188 * xpt_bus_deregister.
189 */
190struct cam_eb {
191 TAILQ_HEAD(, cam_et) et_entries;
192 TAILQ_ENTRY(cam_eb) links;
193 path_id_t path_id;
194 struct cam_sim *sim;
195 struct timeval last_reset;
196 u_int32_t flags;
197#define CAM_EB_RUNQ_SCHEDULED 0x01
198 u_int32_t refcount;
199 u_int generation;
200 device_t parent_dev;
201};
202
203struct cam_path {
204 struct cam_periph *periph;
205 struct cam_eb *bus;
206 struct cam_et *target;
207 struct cam_ed *device;
208};
209
210struct xpt_quirk_entry {
211 struct scsi_inquiry_pattern inq_pat;
212 u_int8_t quirks;
213#define CAM_QUIRK_NOLUNS 0x01
214#define CAM_QUIRK_NOSERIAL 0x02
215#define CAM_QUIRK_HILUNS 0x04
216#define CAM_QUIRK_NOHILUNS 0x08
217 u_int mintags;
218 u_int maxtags;
219};
220
221static int cam_srch_hi = 0;
222TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
223static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
224SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
225 sysctl_cam_search_luns, "I",
226 "allow search above LUN 7 for SCSI3 and greater devices");
227
228#define CAM_SCSI2_MAXLUN 8
229/*
230 * If we're not quirked to search <= the first 8 luns
231 * and we are either quirked to search above lun 8,
232 * or we're > SCSI-2 and we've enabled hilun searching,
233 * or we're > SCSI-2 and the last lun was a success,
234 * we can look for luns above lun 8.
235 */
236#define CAN_SRCH_HI_SPARSE(dv) \
237 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
238 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
239 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
240
241#define CAN_SRCH_HI_DENSE(dv) \
242 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
243 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
244 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
245
246typedef enum {
247 XPT_FLAG_OPEN = 0x01
248} xpt_flags;
249
250struct xpt_softc {
251 xpt_flags flags;
252 u_int32_t xpt_generation;
253
254 /* number of high powered commands that can go through right now */
255 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
256 int num_highpower;
257
258 /* queue for handling async rescan requests. */
259 TAILQ_HEAD(, ccb_hdr) ccb_scanq;
260
261 /* Registered busses */
262 TAILQ_HEAD(,cam_eb) xpt_busses;
263 u_int bus_generation;
264
265 struct intr_config_hook *xpt_config_hook;
266
267 struct mtx xpt_topo_lock;
268 struct mtx xpt_lock;
269};
270
271static const char quantum[] = "QUANTUM";
272static const char sony[] = "SONY";
273static const char west_digital[] = "WDIGTL";
274static const char samsung[] = "SAMSUNG";
275static const char seagate[] = "SEAGATE";
276static const char microp[] = "MICROP";
277
278static struct xpt_quirk_entry xpt_quirk_table[] =
279{
280 {
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
284 },
285 {
286 /* Reports QUEUE FULL for temporary resource shortages */
287 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
288 /*quirks*/0, /*mintags*/24, /*maxtags*/32
289 },
290 {
291 /* Reports QUEUE FULL for temporary resource shortages */
292 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
293 /*quirks*/0, /*mintags*/24, /*maxtags*/32
294 },
295 {
296 /* Broken tagged queuing drive */
297 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
298 /*quirks*/0, /*mintags*/0, /*maxtags*/0
299 },
300 {
301 /* Broken tagged queuing drive */
302 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
303 /*quirks*/0, /*mintags*/0, /*maxtags*/0
304 },
305 {
306 /* Broken tagged queuing drive */
307 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
308 /*quirks*/0, /*mintags*/0, /*maxtags*/0
309 },
310 {
311 /*
312 * Unfortunately, the Quantum Atlas III has the same
313 * problem as the Atlas II drives above.
314 * Reported by: "Johan Granlund" <johan@granlund.nu>
315 *
316 * For future reference, the drive with the problem was:
317 * QUANTUM QM39100TD-SW N1B0
318 *
319 * It's possible that Quantum will fix the problem in later
320 * firmware revisions. If that happens, the quirk entry
321 * will need to be made specific to the firmware revisions
322 * with the problem.
323 *
324 */
325 /* Reports QUEUE FULL for temporary resource shortages */
326 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
327 /*quirks*/0, /*mintags*/24, /*maxtags*/32
328 },
329 {
330 /*
331 * 18 Gig Atlas III, same problem as the 9G version.
332 * Reported by: Andre Albsmeier
333 * <andre.albsmeier@mchp.siemens.de>
334 *
335 * For future reference, the drive with the problem was:
336 * QUANTUM QM318000TD-S N491
337 */
338 /* Reports QUEUE FULL for temporary resource shortages */
339 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
340 /*quirks*/0, /*mintags*/24, /*maxtags*/32
341 },
342 {
343 /*
344 * Broken tagged queuing drive
345 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
346 * and: Martin Renters <martin@tdc.on.ca>
347 */
348 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
350 },
351 /*
352 * The Seagate Medalist Pro drives have very poor write
353 * performance with anything more than 2 tags.
354 *
355 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
356 * Drive: <SEAGATE ST36530N 1444>
357 *
358 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
359 * Drive: <SEAGATE ST34520W 1281>
360 *
361 * No one has actually reported that the 9G version
362 * (ST39140*) of the Medalist Pro has the same problem, but
363 * we're assuming that it does because the 4G and 6.5G
364 * versions of the drive are broken.
365 */
366 {
367 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
368 /*quirks*/0, /*mintags*/2, /*maxtags*/2
369 },
370 {
371 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
372 /*quirks*/0, /*mintags*/2, /*maxtags*/2
373 },
374 {
375 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
376 /*quirks*/0, /*mintags*/2, /*maxtags*/2
377 },
378 {
379 /*
380 * Slow when tagged queueing is enabled. Write performance
381 * steadily drops off with more and more concurrent
382 * transactions. Best sequential write performance with
383 * tagged queueing turned off and write caching turned on.
384 *
385 * PR: kern/10398
386 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
387 * Drive: DCAS-34330 w/ "S65A" firmware.
388 *
389 * The drive with the problem had the "S65A" firmware
390 * revision, and has also been reported (by Stephen J.
391 * Roznowski <sjr@home.net>) for a drive with the "S61A"
392 * firmware revision.
393 *
394 * Although no one has reported problems with the 2 gig
395 * version of the DCAS drive, the assumption is that it
396 * has the same problems as the 4 gig version. Therefore
397 * this quirk entries disables tagged queueing for all
398 * DCAS drives.
399 */
400 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
401 /*quirks*/0, /*mintags*/0, /*maxtags*/0
402 },
403 {
404 /* Broken tagged queuing drive */
405 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
406 /*quirks*/0, /*mintags*/0, /*maxtags*/0
407 },
408 {
409 /* Broken tagged queuing drive */
410 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
411 /*quirks*/0, /*mintags*/0, /*maxtags*/0
412 },
413 {
414 /* This does not support other than LUN 0 */
415 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
416 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
417 },
418 {
419 /*
420 * Broken tagged queuing drive.
421 * Submitted by:
422 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
423 * in PR kern/9535
424 */
425 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
426 /*quirks*/0, /*mintags*/0, /*maxtags*/0
427 },
428 {
429 /*
430 * Slow when tagged queueing is enabled. (1.5MB/sec versus
431 * 8MB/sec.)
432 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
433 * Best performance with these drives is achieved with
434 * tagged queueing turned off, and write caching turned on.
435 */
436 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
437 /*quirks*/0, /*mintags*/0, /*maxtags*/0
438 },
439 {
440 /*
441 * Slow when tagged queueing is enabled. (1.5MB/sec versus
442 * 8MB/sec.)
443 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
444 * Best performance with these drives is achieved with
445 * tagged queueing turned off, and write caching turned on.
446 */
447 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
448 /*quirks*/0, /*mintags*/0, /*maxtags*/0
449 },
450 {
451 /*
452 * Doesn't handle queue full condition correctly,
453 * so we need to limit maxtags to what the device
454 * can handle instead of determining this automatically.
455 */
456 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
457 /*quirks*/0, /*mintags*/2, /*maxtags*/32
458 },
459 {
460 /* Really only one LUN */
461 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
462 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
463 },
464 {
465 /* I can't believe we need a quirk for DPT volumes. */
466 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
467 CAM_QUIRK_NOLUNS,
468 /*mintags*/0, /*maxtags*/255
469 },
470 {
471 /*
472 * Many Sony CDROM drives don't like multi-LUN probing.
473 */
474 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
475 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
476 },
477 {
478 /*
479 * This drive doesn't like multiple LUN probing.
480 * Submitted by: Parag Patel <parag@cgt.com>
481 */
482 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
483 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
484 },
485 {
486 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
487 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
488 },
489 {
490 /*
491 * The 8200 doesn't like multi-lun probing, and probably
492 * don't like serial number requests either.
493 */
494 {
495 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
496 "EXB-8200*", "*"
497 },
498 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
499 },
500 {
501 /*
502 * Let's try the same as above, but for a drive that says
503 * it's an IPL-6860 but is actually an EXB 8200.
504 */
505 {
506 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
507 "IPL-6860*", "*"
508 },
509 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
510 },
511 {
512 /*
513 * These Hitachi drives don't like multi-lun probing.
514 * The PR submitter has a DK319H, but says that the Linux
515 * kernel has a similar work-around for the DK312 and DK314,
516 * so all DK31* drives are quirked here.
517 * PR: misc/18793
518 * Submitted by: Paul Haddad <paul@pth.com>
519 */
520 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
521 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
522 },
523 {
524 /*
525 * The Hitachi CJ series with J8A8 firmware apparantly has
526 * problems with tagged commands.
527 * PR: 23536
528 * Reported by: amagai@nue.org
529 */
530 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
531 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
532 },
533 {
534 /*
535 * These are the large storage arrays.
536 * Submitted by: William Carrel <william.carrel@infospace.com>
537 */
538 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
539 CAM_QUIRK_HILUNS, 2, 1024
540 },
541 {
542 /*
543 * This old revision of the TDC3600 is also SCSI-1, and
544 * hangs upon serial number probing.
545 */
546 {
547 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
548 " TDC 3600", "U07:"
549 },
550 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
551 },
552 {
553 /*
554 * Would repond to all LUNs if asked for.
555 */
556 {
557 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
558 "CP150", "*"
559 },
560 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
561 },
562 {
563 /*
564 * Would repond to all LUNs if asked for.
565 */
566 {
567 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
568 "96X2*", "*"
569 },
570 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
571 },
572 {
573 /* Submitted by: Matthew Dodd <winter@jurai.net> */
574 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
575 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
576 },
577 {
578 /* Submitted by: Matthew Dodd <winter@jurai.net> */
579 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
580 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
581 },
582 {
583 /* TeraSolutions special settings for TRC-22 RAID */
584 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
585 /*quirks*/0, /*mintags*/55, /*maxtags*/255
586 },
587 {
588 /* Veritas Storage Appliance */
589 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
590 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
591 },
592 {
593 /*
594 * Would respond to all LUNs. Device type and removable
595 * flag are jumper-selectable.
596 */
597 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
598 "Tahiti 1", "*"
599 },
600 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
601 },
602 {
603 /* EasyRAID E5A aka. areca ARC-6010 */
604 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
605 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
606 },
607 {
608 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
609 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
610 },
611 {
612 /* Default tagged queuing parameters for all devices */
613 {
614 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
615 /*vendor*/"*", /*product*/"*", /*revision*/"*"
616 },
617 /*quirks*/0, /*mintags*/2, /*maxtags*/255
618 },
619};
620
621static const int xpt_quirk_table_size =
622 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
623
624typedef enum {
625 DM_RET_COPY = 0x01,
626 DM_RET_FLAG_MASK = 0x0f,
627 DM_RET_NONE = 0x00,
628 DM_RET_STOP = 0x10,
629 DM_RET_DESCEND = 0x20,
630 DM_RET_ERROR = 0x30,
631 DM_RET_ACTION_MASK = 0xf0
632} dev_match_ret;
633
634typedef enum {
635 XPT_DEPTH_BUS,
636 XPT_DEPTH_TARGET,
637 XPT_DEPTH_DEVICE,
638 XPT_DEPTH_PERIPH
639} xpt_traverse_depth;
640
641struct xpt_traverse_config {
642 xpt_traverse_depth depth;
643 void *tr_func;
644 void *tr_arg;
645};
646
647typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
648typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
649typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
650typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
651typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
652
653/* Transport layer configuration information */
654static struct xpt_softc xsoftc;
655
656/* Queues for our software interrupt handler */
657typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
658typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
659static cam_simq_t cam_simq;
660static struct mtx cam_simq_lock;
661
662/* Pointers to software interrupt handlers */
663static void *cambio_ih;
664
665struct cam_periph *xpt_periph;
666
667static periph_init_t xpt_periph_init;
668
669static periph_init_t probe_periph_init;
670
671static struct periph_driver xpt_driver =
672{
673 xpt_periph_init, "xpt",
674 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
675};
676
677static struct periph_driver probe_driver =
678{
679 probe_periph_init, "probe",
680 TAILQ_HEAD_INITIALIZER(probe_driver.units)
681};
682
683PERIPHDRIVER_DECLARE(xpt, xpt_driver);
684PERIPHDRIVER_DECLARE(probe, probe_driver);
685
686
687static d_open_t xptopen;
688static d_close_t xptclose;
689static d_ioctl_t xptioctl;
690
691static struct cdevsw xpt_cdevsw = {
692 .d_version = D_VERSION,
693 .d_flags = 0,
694 .d_open = xptopen,
695 .d_close = xptclose,
696 .d_ioctl = xptioctl,
697 .d_name = "xpt",
698};
699
700
701/* Storage for debugging datastructures */
702#ifdef CAMDEBUG
703struct cam_path *cam_dpath;
704u_int32_t cam_dflags;
705u_int32_t cam_debug_delay;
706#endif
707
708#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
709#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
710#endif
711
712/*
713 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
714 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
715 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
716 */
717#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
718 || defined(CAM_DEBUG_LUN)
719#ifdef CAMDEBUG
720#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
721 || !defined(CAM_DEBUG_LUN)
722#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
723 and CAM_DEBUG_LUN"
724#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
725#else /* !CAMDEBUG */
726#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
727#endif /* CAMDEBUG */
728#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
729
730/* Our boot-time initialization hook */
731static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
732
733static moduledata_t cam_moduledata = {
734 "cam",
735 cam_module_event_handler,
736 NULL
737};
738
739static int xpt_init(void *);
740
741DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
742MODULE_VERSION(cam, 1);
743
744
745static cam_status xpt_compile_path(struct cam_path *new_path,
746 struct cam_periph *perph,
747 path_id_t path_id,
748 target_id_t target_id,
749 lun_id_t lun_id);
750
751static void xpt_release_path(struct cam_path *path);
752
753static void xpt_async_bcast(struct async_list *async_head,
754 u_int32_t async_code,
755 struct cam_path *path,
756 void *async_arg);
757static void xpt_dev_async(u_int32_t async_code,
758 struct cam_eb *bus,
759 struct cam_et *target,
760 struct cam_ed *device,
761 void *async_arg);
762static path_id_t xptnextfreepathid(void);
763static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
764static union ccb *xpt_get_ccb(struct cam_ed *device);
765static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
766 u_int32_t new_priority);
767static void xpt_run_dev_allocq(struct cam_eb *bus);
768static void xpt_run_dev_sendq(struct cam_eb *bus);
769static timeout_t xpt_release_devq_timeout;
770static void xpt_release_simq_timeout(void *arg) __unused;
771static void xpt_release_bus(struct cam_eb *bus);
772static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
773 int run_queue);
774static struct cam_et*
775 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
776static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
777static struct cam_ed*
778 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
779 lun_id_t lun_id);
780static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
781 struct cam_ed *device);
782static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
783static struct cam_eb*
784 xpt_find_bus(path_id_t path_id);
785static struct cam_et*
786 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
787static struct cam_ed*
788 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
789static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
790static void xpt_scan_lun(struct cam_periph *periph,
791 struct cam_path *path, cam_flags flags,
792 union ccb *ccb);
793static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
794static xpt_busfunc_t xptconfigbuscountfunc;
795static xpt_busfunc_t xptconfigfunc;
796static void xpt_config(void *arg);
797static xpt_devicefunc_t xptpassannouncefunc;
798static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
799static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
800static void xptpoll(struct cam_sim *sim);
801static void camisr(void *);
802static void camisr_runqueue(void *);
803static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
804 u_int num_patterns, struct cam_eb *bus);
805static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
806 u_int num_patterns,
807 struct cam_ed *device);
808static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
809 u_int num_patterns,
810 struct cam_periph *periph);
811static xpt_busfunc_t xptedtbusfunc;
812static xpt_targetfunc_t xptedttargetfunc;
813static xpt_devicefunc_t xptedtdevicefunc;
814static xpt_periphfunc_t xptedtperiphfunc;
815static xpt_pdrvfunc_t xptplistpdrvfunc;
816static xpt_periphfunc_t xptplistperiphfunc;
817static int xptedtmatch(struct ccb_dev_match *cdm);
818static int xptperiphlistmatch(struct ccb_dev_match *cdm);
819static int xptbustraverse(struct cam_eb *start_bus,
820 xpt_busfunc_t *tr_func, void *arg);
821static int xpttargettraverse(struct cam_eb *bus,
822 struct cam_et *start_target,
823 xpt_targetfunc_t *tr_func, void *arg);
824static int xptdevicetraverse(struct cam_et *target,
825 struct cam_ed *start_device,
826 xpt_devicefunc_t *tr_func, void *arg);
827static int xptperiphtraverse(struct cam_ed *device,
828 struct cam_periph *start_periph,
829 xpt_periphfunc_t *tr_func, void *arg);
830static int xptpdrvtraverse(struct periph_driver **start_pdrv,
831 xpt_pdrvfunc_t *tr_func, void *arg);
832static int xptpdperiphtraverse(struct periph_driver **pdrv,
833 struct cam_periph *start_periph,
834 xpt_periphfunc_t *tr_func,
835 void *arg);
836static xpt_busfunc_t xptdefbusfunc;
837static xpt_targetfunc_t xptdeftargetfunc;
838static xpt_devicefunc_t xptdefdevicefunc;
839static xpt_periphfunc_t xptdefperiphfunc;
840static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
841static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
842 void *arg);
843static xpt_devicefunc_t xptsetasyncfunc;
844static xpt_busfunc_t xptsetasyncbusfunc;
845static cam_status xptregister(struct cam_periph *periph,
846 void *arg);
847static cam_status proberegister(struct cam_periph *periph,
848 void *arg);
849static void probeschedule(struct cam_periph *probe_periph);
850static void probestart(struct cam_periph *periph, union ccb *start_ccb);
851static void proberequestdefaultnegotiation(struct cam_periph *periph);
852static int proberequestbackoff(struct cam_periph *periph,
853 struct cam_ed *device);
854static void probedone(struct cam_periph *periph, union ccb *done_ccb);
855static void probecleanup(struct cam_periph *periph);
856static void xpt_find_quirk(struct cam_ed *device);
857static void xpt_devise_transport(struct cam_path *path);
858static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
859 struct cam_ed *device,
860 int async_update);
861static void xpt_toggle_tags(struct cam_path *path);
862static void xpt_start_tags(struct cam_path *path);
863static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
864 struct cam_ed *dev);
865static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
866 struct cam_ed *dev);
867static __inline int periph_is_queued(struct cam_periph *periph);
868static __inline int device_is_alloc_queued(struct cam_ed *device);
869static __inline int device_is_send_queued(struct cam_ed *device);
870static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
871
872static __inline int
873xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
874{
875 int retval;
876
877 if (dev->ccbq.devq_openings > 0) {
878 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
879 cam_ccbq_resize(&dev->ccbq,
880 dev->ccbq.dev_openings
881 + dev->ccbq.dev_active);
882 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
883 }
884 /*
885 * The priority of a device waiting for CCB resources
886 * is that of the the highest priority peripheral driver
887 * enqueued.
888 */
889 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
890 &dev->alloc_ccb_entry.pinfo,
891 CAMQ_GET_HEAD(&dev->drvq)->priority);
892 } else {
893 retval = 0;
894 }
895
896 return (retval);
897}
898
899static __inline int
900xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
901{
902 int retval;
903
904 if (dev->ccbq.dev_openings > 0) {
905 /*
906 * The priority of a device waiting for controller
907 * resources is that of the the highest priority CCB
908 * enqueued.
909 */
910 retval =
911 xpt_schedule_dev(&bus->sim->devq->send_queue,
912 &dev->send_ccb_entry.pinfo,
913 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
914 } else {
915 retval = 0;
916 }
917 return (retval);
918}
919
920static __inline int
921periph_is_queued(struct cam_periph *periph)
922{
923 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
924}
925
926static __inline int
927device_is_alloc_queued(struct cam_ed *device)
928{
929 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
930}
931
932static __inline int
933device_is_send_queued(struct cam_ed *device)
934{
935 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
936}
937
938static __inline int
939dev_allocq_is_runnable(struct cam_devq *devq)
940{
941 /*
942 * Have work to do.
943 * Have space to do more work.
944 * Allowed to do work.
945 */
946 return ((devq->alloc_queue.qfrozen_cnt == 0)
947 && (devq->alloc_queue.entries > 0)
948 && (devq->alloc_openings > 0));
949}
950
951static void
952xpt_periph_init()
953{
954 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
955}
956
957static void
958probe_periph_init()
959{
960}
961
962
963static void
964xptdone(struct cam_periph *periph, union ccb *done_ccb)
965{
966 /* Caller will release the CCB */
967 wakeup(&done_ccb->ccb_h.cbfcnp);
968}
969
970static int
971xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
972{
973
974 /*
975 * Only allow read-write access.
976 */
977 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
978 return(EPERM);
979
980 /*
981 * We don't allow nonblocking access.
982 */
983 if ((flags & O_NONBLOCK) != 0) {
984 printf("%s: can't do nonblocking access\n", devtoname(dev));
985 return(ENODEV);
986 }
987
988 /* Mark ourselves open */
989 mtx_lock(&xsoftc.xpt_lock);
990 xsoftc.flags |= XPT_FLAG_OPEN;
991 mtx_unlock(&xsoftc.xpt_lock);
992
993 return(0);
994}
995
996static int
997xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
998{
999
1000 /* Mark ourselves closed */
1001 mtx_lock(&xsoftc.xpt_lock);
1002 xsoftc.flags &= ~XPT_FLAG_OPEN;
1003 mtx_unlock(&xsoftc.xpt_lock);
1004
1005 return(0);
1006}
1007
1008/*
1009 * Don't automatically grab the xpt softc lock here even though this is going
1010 * through the xpt device. The xpt device is really just a back door for
1011 * accessing other devices and SIMs, so the right thing to do is to grab
1012 * the appropriate SIM lock once the bus/SIM is located.
1013 */
1014static int
1015xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1016{
1017 int error;
1018
1019 error = 0;
1020
1021 switch(cmd) {
1022 /*
1023 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1024 * to accept CCB types that don't quite make sense to send through a
1025 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1026 * in the CAM spec.
1027 */
1028 case CAMIOCOMMAND: {
1029 union ccb *ccb;
1030 union ccb *inccb;
1031 struct cam_eb *bus;
1032
1033 inccb = (union ccb *)addr;
1034
1035 bus = xpt_find_bus(inccb->ccb_h.path_id);
1036 if (bus == NULL) {
1037 error = EINVAL;
1038 break;
1039 }
1040
1041 switch(inccb->ccb_h.func_code) {
1042 case XPT_SCAN_BUS:
1043 case XPT_RESET_BUS:
1044 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1045 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1046 error = EINVAL;
1047 break;
1048 }
1049 /* FALLTHROUGH */
1050 case XPT_PATH_INQ:
1051 case XPT_ENG_INQ:
1052 case XPT_SCAN_LUN:
1053
1054 ccb = xpt_alloc_ccb();
1055
1056 CAM_SIM_LOCK(bus->sim);
1057
1058 /*
1059 * Create a path using the bus, target, and lun the
1060 * user passed in.
1061 */
1062 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1063 inccb->ccb_h.path_id,
1064 inccb->ccb_h.target_id,
1065 inccb->ccb_h.target_lun) !=
1066 CAM_REQ_CMP){
1067 error = EINVAL;
1068 CAM_SIM_UNLOCK(bus->sim);
1069 xpt_free_ccb(ccb);
1070 break;
1071 }
1072 /* Ensure all of our fields are correct */
1073 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1074 inccb->ccb_h.pinfo.priority);
1075 xpt_merge_ccb(ccb, inccb);
1076 ccb->ccb_h.cbfcnp = xptdone;
1077 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1078 bcopy(ccb, inccb, sizeof(union ccb));
1079 xpt_free_path(ccb->ccb_h.path);
1080 xpt_free_ccb(ccb);
1081 CAM_SIM_UNLOCK(bus->sim);
1082 break;
1083
1084 case XPT_DEBUG: {
1085 union ccb ccb;
1086
1087 /*
1088 * This is an immediate CCB, so it's okay to
1089 * allocate it on the stack.
1090 */
1091
1092 CAM_SIM_LOCK(bus->sim);
1093
1094 /*
1095 * Create a path using the bus, target, and lun the
1096 * user passed in.
1097 */
1098 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1099 inccb->ccb_h.path_id,
1100 inccb->ccb_h.target_id,
1101 inccb->ccb_h.target_lun) !=
1102 CAM_REQ_CMP){
1103 error = EINVAL;
1104 CAM_SIM_UNLOCK(bus->sim);
1105 break;
1106 }
1107 /* Ensure all of our fields are correct */
1108 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1109 inccb->ccb_h.pinfo.priority);
1110 xpt_merge_ccb(&ccb, inccb);
1111 ccb.ccb_h.cbfcnp = xptdone;
1112 xpt_action(&ccb);
1113 CAM_SIM_UNLOCK(bus->sim);
1114 bcopy(&ccb, inccb, sizeof(union ccb));
1115 xpt_free_path(ccb.ccb_h.path);
1116 break;
1117
1118 }
1119 case XPT_DEV_MATCH: {
1120 struct cam_periph_map_info mapinfo;
1121 struct cam_path *old_path;
1122
1123 /*
1124 * We can't deal with physical addresses for this
1125 * type of transaction.
1126 */
1127 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1128 error = EINVAL;
1129 break;
1130 }
1131
1132 /*
1133 * Save this in case the caller had it set to
1134 * something in particular.
1135 */
1136 old_path = inccb->ccb_h.path;
1137
1138 /*
1139 * We really don't need a path for the matching
1140 * code. The path is needed because of the
1141 * debugging statements in xpt_action(). They
1142 * assume that the CCB has a valid path.
1143 */
1144 inccb->ccb_h.path = xpt_periph->path;
1145
1146 bzero(&mapinfo, sizeof(mapinfo));
1147
1148 /*
1149 * Map the pattern and match buffers into kernel
1150 * virtual address space.
1151 */
1152 error = cam_periph_mapmem(inccb, &mapinfo);
1153
1154 if (error) {
1155 inccb->ccb_h.path = old_path;
1156 break;
1157 }
1158
1159 /*
1160 * This is an immediate CCB, we can send it on directly.
1161 */
1162 xpt_action(inccb);
1163
1164 /*
1165 * Map the buffers back into user space.
1166 */
1167 cam_periph_unmapmem(inccb, &mapinfo);
1168
1169 inccb->ccb_h.path = old_path;
1170
1171 error = 0;
1172 break;
1173 }
1174 default:
1175 error = ENOTSUP;
1176 break;
1177 }
1178 xpt_release_bus(bus);
1179 break;
1180 }
1181 /*
1182 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1183 * with the periphal driver name and unit name filled in. The other
1184 * fields don't really matter as input. The passthrough driver name
1185 * ("pass"), and unit number are passed back in the ccb. The current
1186 * device generation number, and the index into the device peripheral
1187 * driver list, and the status are also passed back. Note that
1188 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1189 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1190 * (or rather should be) impossible for the device peripheral driver
1191 * list to change since we look at the whole thing in one pass, and
1192 * we do it with lock protection.
1193 *
1194 */
1195 case CAMGETPASSTHRU: {
1196 union ccb *ccb;
1197 struct cam_periph *periph;
1198 struct periph_driver **p_drv;
1199 char *name;
1200 u_int unit;
1201 u_int cur_generation;
1202 int base_periph_found;
1203 int splbreaknum;
1204
1205 ccb = (union ccb *)addr;
1206 unit = ccb->cgdl.unit_number;
1207 name = ccb->cgdl.periph_name;
1208 /*
1209 * Every 100 devices, we want to drop our lock protection to
1210 * give the software interrupt handler a chance to run.
1211 * Most systems won't run into this check, but this should
1212 * avoid starvation in the software interrupt handler in
1213 * large systems.
1214 */
1215 splbreaknum = 100;
1216
1217 ccb = (union ccb *)addr;
1218
1219 base_periph_found = 0;
1220
1221 /*
1222 * Sanity check -- make sure we don't get a null peripheral
1223 * driver name.
1224 */
1225 if (*ccb->cgdl.periph_name == '\0') {
1226 error = EINVAL;
1227 break;
1228 }
1229
1230 /* Keep the list from changing while we traverse it */
1231 mtx_lock(&xsoftc.xpt_topo_lock);
1232ptstartover:
1233 cur_generation = xsoftc.xpt_generation;
1234
1235 /* first find our driver in the list of drivers */
1236 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1237 if (strcmp((*p_drv)->driver_name, name) == 0)
1238 break;
1239
1240 if (*p_drv == NULL) {
1241 mtx_unlock(&xsoftc.xpt_topo_lock);
1242 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1243 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1244 *ccb->cgdl.periph_name = '\0';
1245 ccb->cgdl.unit_number = 0;
1246 error = ENOENT;
1247 break;
1248 }
1249
1250 /*
1251 * Run through every peripheral instance of this driver
1252 * and check to see whether it matches the unit passed
1253 * in by the user. If it does, get out of the loops and
1254 * find the passthrough driver associated with that
1255 * peripheral driver.
1256 */
1257 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1258 periph = TAILQ_NEXT(periph, unit_links)) {
1259
1260 if (periph->unit_number == unit) {
1261 break;
1262 } else if (--splbreaknum == 0) {
1263 mtx_unlock(&xsoftc.xpt_topo_lock);
1264 mtx_lock(&xsoftc.xpt_topo_lock);
1265 splbreaknum = 100;
1266 if (cur_generation != xsoftc.xpt_generation)
1267 goto ptstartover;
1268 }
1269 }
1270 /*
1271 * If we found the peripheral driver that the user passed
1272 * in, go through all of the peripheral drivers for that
1273 * particular device and look for a passthrough driver.
1274 */
1275 if (periph != NULL) {
1276 struct cam_ed *device;
1277 int i;
1278
1279 base_periph_found = 1;
1280 device = periph->path->device;
1281 for (i = 0, periph = SLIST_FIRST(&device->periphs);
1282 periph != NULL;
1283 periph = SLIST_NEXT(periph, periph_links), i++) {
1284 /*
1285 * Check to see whether we have a
1286 * passthrough device or not.
1287 */
1288 if (strcmp(periph->periph_name, "pass") == 0) {
1289 /*
1290 * Fill in the getdevlist fields.
1291 */
1292 strcpy(ccb->cgdl.periph_name,
1293 periph->periph_name);
1294 ccb->cgdl.unit_number =
1295 periph->unit_number;
1296 if (SLIST_NEXT(periph, periph_links))
1297 ccb->cgdl.status =
1298 CAM_GDEVLIST_MORE_DEVS;
1299 else
1300 ccb->cgdl.status =
1301 CAM_GDEVLIST_LAST_DEVICE;
1302 ccb->cgdl.generation =
1303 device->generation;
1304 ccb->cgdl.index = i;
1305 /*
1306 * Fill in some CCB header fields
1307 * that the user may want.
1308 */
1309 ccb->ccb_h.path_id =
1310 periph->path->bus->path_id;
1311 ccb->ccb_h.target_id =
1312 periph->path->target->target_id;
1313 ccb->ccb_h.target_lun =
1314 periph->path->device->lun_id;
1315 ccb->ccb_h.status = CAM_REQ_CMP;
1316 break;
1317 }
1318 }
1319 }
1320
1321 /*
1322 * If the periph is null here, one of two things has
1323 * happened. The first possibility is that we couldn't
1324 * find the unit number of the particular peripheral driver
1325 * that the user is asking about. e.g. the user asks for
1326 * the passthrough driver for "da11". We find the list of
1327 * "da" peripherals all right, but there is no unit 11.
1328 * The other possibility is that we went through the list
1329 * of peripheral drivers attached to the device structure,
1330 * but didn't find one with the name "pass". Either way,
1331 * we return ENOENT, since we couldn't find something.
1332 */
1333 if (periph == NULL) {
1334 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1335 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1336 *ccb->cgdl.periph_name = '\0';
1337 ccb->cgdl.unit_number = 0;
1338 error = ENOENT;
1339 /*
1340 * It is unfortunate that this is even necessary,
1341 * but there are many, many clueless users out there.
1342 * If this is true, the user is looking for the
1343 * passthrough driver, but doesn't have one in his
1344 * kernel.
1345 */
1346 if (base_periph_found == 1) {
1347 printf("xptioctl: pass driver is not in the "
1348 "kernel\n");
1349 printf("xptioctl: put \"device pass\" in "
1350 "your kernel config file\n");
1351 }
1352 }
1353 mtx_unlock(&xsoftc.xpt_topo_lock);
1354 break;
1355 }
1356 default:
1357 error = ENOTTY;
1358 break;
1359 }
1360
1361 return(error);
1362}
1363
1364static int
1365cam_module_event_handler(module_t mod, int what, void *arg)
1366{
1367 int error;
1368
1369 switch (what) {
1370 case MOD_LOAD:
1371 if ((error = xpt_init(NULL)) != 0)
1372 return (error);
1373 break;
1374 case MOD_UNLOAD:
1375 return EBUSY;
1376 default:
1377 return EOPNOTSUPP;
1378 }
1379
1380 return 0;
1381}
1382
1383/* thread to handle bus rescans */
1384static void
1385xpt_scanner_thread(void *dummy)
1386{
1387 cam_isrq_t queue;
1388 union ccb *ccb;
1389 struct cam_sim *sim;
1390
1391 for (;;) {
1392 /*
1393 * Wait for a rescan request to come in. When it does, splice
1394 * it onto a queue from local storage so that the xpt lock
1395 * doesn't need to be held while the requests are being
1396 * processed.
1397 */
1398 xpt_lock_buses();
1399 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
1400 "ccb_scanq", 0);
1401 TAILQ_INIT(&queue);
1402 TAILQ_CONCAT(&queue, &xsoftc.ccb_scanq, sim_links.tqe);
1403 xpt_unlock_buses();
1404
1405 while ((ccb = (union ccb *)TAILQ_FIRST(&queue)) != NULL) {
1406 TAILQ_REMOVE(&queue, &ccb->ccb_h, sim_links.tqe);
1407
1408 sim = ccb->ccb_h.path->bus->sim;
1409 CAM_SIM_LOCK(sim);
1410
1411 ccb->ccb_h.func_code = XPT_SCAN_BUS;
1412 ccb->ccb_h.cbfcnp = xptdone;
1413 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
1414 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1415 xpt_free_path(ccb->ccb_h.path);
1416 xpt_free_ccb(ccb);
1417 CAM_SIM_UNLOCK(sim);
1418 }
1419 }
1420}
1421
1422void
1423xpt_rescan(union ccb *ccb)
1424{
1425 struct ccb_hdr *hdr;
1426
1427 /*
1428 * Don't make duplicate entries for the same paths.
1429 */
1430 xpt_lock_buses();
1431 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
1432 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
1433 xpt_unlock_buses();
1434 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
1435 xpt_free_path(ccb->ccb_h.path);
1436 xpt_free_ccb(ccb);
1437 return;
1438 }
1439 }
1440 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1441 wakeup(&xsoftc.ccb_scanq);
1442 xpt_unlock_buses();
1443}
1444
1445/* Functions accessed by the peripheral drivers */
1446static int
1447xpt_init(void *dummy)
1448{
1449 struct cam_sim *xpt_sim;
1450 struct cam_path *path;
1451 struct cam_devq *devq;
1452 cam_status status;
1453
1454 TAILQ_INIT(&xsoftc.xpt_busses);
1455 TAILQ_INIT(&cam_simq);
1456 TAILQ_INIT(&xsoftc.ccb_scanq);
1457 STAILQ_INIT(&xsoftc.highpowerq);
1458 xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
1459
1460 mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
1461 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
1462 mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
1463
1464 /*
1465 * The xpt layer is, itself, the equivelent of a SIM.
1466 * Allow 16 ccbs in the ccb pool for it. This should
1467 * give decent parallelism when we probe busses and
1468 * perform other XPT functions.
1469 */
1470 devq = cam_simq_alloc(16);
1471 xpt_sim = cam_sim_alloc(xptaction,
1472 xptpoll,
1473 "xpt",
1474 /*softc*/NULL,
1475 /*unit*/0,
1476 /*mtx*/&xsoftc.xpt_lock,
1477 /*max_dev_transactions*/0,
1478 /*max_tagged_dev_transactions*/0,
1479 devq);
1480 if (xpt_sim == NULL)
1481 return (ENOMEM);
1482
1483 xpt_sim->max_ccbs = 16;
1484
1485 mtx_lock(&xsoftc.xpt_lock);
1486 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
1487 printf("xpt_init: xpt_bus_register failed with status %#x,"
1488 " failing attach\n", status);
1489 return (EINVAL);
1490 }
1491
1492 /*
1493 * Looking at the XPT from the SIM layer, the XPT is
1494 * the equivelent of a peripheral driver. Allocate
1495 * a peripheral driver entry for us.
1496 */
1497 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1498 CAM_TARGET_WILDCARD,
1499 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1500 printf("xpt_init: xpt_create_path failed with status %#x,"
1501 " failing attach\n", status);
1502 return (EINVAL);
1503 }
1504
1505 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1506 path, NULL, 0, xpt_sim);
1507 xpt_free_path(path);
1508 mtx_unlock(&xsoftc.xpt_lock);
1509
1510 /*
1511 * Register a callback for when interrupts are enabled.
1512 */
1513 xsoftc.xpt_config_hook =
1514 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1515 M_CAMXPT, M_NOWAIT | M_ZERO);
1516 if (xsoftc.xpt_config_hook == NULL) {
1517 printf("xpt_init: Cannot malloc config hook "
1518 "- failing attach\n");
1519 return (ENOMEM);
1520 }
1521
1522 xsoftc.xpt_config_hook->ich_func = xpt_config;
1523 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
1524 free (xsoftc.xpt_config_hook, M_CAMXPT);
1525 printf("xpt_init: config_intrhook_establish failed "
1526 "- failing attach\n");
1527 }
1528
1529 /* fire up rescan thread */
1530 if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
1531 printf("xpt_init: failed to create rescan thread\n");
1532 }
1533 /* Install our software interrupt handlers */
1534 swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
1535
1536 return (0);
1537}
1538
1539static cam_status
1540xptregister(struct cam_periph *periph, void *arg)
1541{
1542 struct cam_sim *xpt_sim;
1543
1544 if (periph == NULL) {
1545 printf("xptregister: periph was NULL!!\n");
1546 return(CAM_REQ_CMP_ERR);
1547 }
1548
1549 xpt_sim = (struct cam_sim *)arg;
1550 xpt_sim->softc = periph;
1551 xpt_periph = periph;
1552 periph->softc = NULL;
1553
1554 return(CAM_REQ_CMP);
1555}
1556
1557int32_t
1558xpt_add_periph(struct cam_periph *periph)
1559{
1560 struct cam_ed *device;
1561 int32_t status;
1562 struct periph_list *periph_head;
1563
1564 mtx_assert(periph->sim->mtx, MA_OWNED);
1565
1566 device = periph->path->device;
1567
1568 periph_head = &device->periphs;
1569
1570 status = CAM_REQ_CMP;
1571
1572 if (device != NULL) {
1573 /*
1574 * Make room for this peripheral
1575 * so it will fit in the queue
1576 * when it's scheduled to run
1577 */
1578 status = camq_resize(&device->drvq,
1579 device->drvq.array_size + 1);
1580
1581 device->generation++;
1582
1583 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1584 }
1585
1586 mtx_lock(&xsoftc.xpt_topo_lock);
1587 xsoftc.xpt_generation++;
1588 mtx_unlock(&xsoftc.xpt_topo_lock);
1589
1590 return (status);
1591}
1592
1593void
1594xpt_remove_periph(struct cam_periph *periph)
1595{
1596 struct cam_ed *device;
1597
1598 mtx_assert(periph->sim->mtx, MA_OWNED);
1599
1600 device = periph->path->device;
1601
1602 if (device != NULL) {
1603 struct periph_list *periph_head;
1604
1605 periph_head = &device->periphs;
1606
1607 /* Release the slot for this peripheral */
1608 camq_resize(&device->drvq, device->drvq.array_size - 1);
1609
1610 device->generation++;
1611
1612 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1613 }
1614
1615 mtx_lock(&xsoftc.xpt_topo_lock);
1616 xsoftc.xpt_generation++;
1617 mtx_unlock(&xsoftc.xpt_topo_lock);
1618}
1619
1620
1621void
1622xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1623{
1624 struct ccb_pathinq cpi;
1625 struct ccb_trans_settings cts;
1626 struct cam_path *path;
1627 u_int speed;
1628 u_int freq;
1629 u_int mb;
1630
1631 mtx_assert(periph->sim->mtx, MA_OWNED);
1632
1633 path = periph->path;
1634 /*
1635 * To ensure that this is printed in one piece,
1636 * mask out CAM interrupts.
1637 */
1638 printf("%s%d at %s%d bus %d target %d lun %d\n",
1639 periph->periph_name, periph->unit_number,
1640 path->bus->sim->sim_name,
1641 path->bus->sim->unit_number,
1642 path->bus->sim->bus_id,
1643 path->target->target_id,
1644 path->device->lun_id);
1645 printf("%s%d: ", periph->periph_name, periph->unit_number);
1646 scsi_print_inquiry(&path->device->inq_data);
1647 if (bootverbose && path->device->serial_num_len > 0) {
1648 /* Don't wrap the screen - print only the first 60 chars */
1649 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1650 periph->unit_number, path->device->serial_num);
1651 }
1652 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1653 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1654 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1655 xpt_action((union ccb*)&cts);
1656 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1657 return;
1658 }
1659
1660 /* Ask the SIM for its base transfer speed */
1661 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1662 cpi.ccb_h.func_code = XPT_PATH_INQ;
1663 xpt_action((union ccb *)&cpi);
1664
1665 speed = cpi.base_transfer_speed;
1666 freq = 0;
1667 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1668 struct ccb_trans_settings_spi *spi;
1669
1670 spi = &cts.xport_specific.spi;
1671 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1672 && spi->sync_offset != 0) {
1673 freq = scsi_calc_syncsrate(spi->sync_period);
1674 speed = freq;
1675 }
1676
1677 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1678 speed *= (0x01 << spi->bus_width);
1679 }
1680
1681 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1682 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1683 if (fc->valid & CTS_FC_VALID_SPEED) {
1684 speed = fc->bitrate;
1685 }
1686 }
1687
1688 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1689 struct ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1690 if (sas->valid & CTS_SAS_VALID_SPEED) {
1691 speed = sas->bitrate;
1692 }
1693 }
1694
1695 mb = speed / 1000;
1696 if (mb > 0)
1697 printf("%s%d: %d.%03dMB/s transfers",
1698 periph->periph_name, periph->unit_number,
1699 mb, speed % 1000);
1700 else
1701 printf("%s%d: %dKB/s transfers", periph->periph_name,
1702 periph->unit_number, speed);
1703 /* Report additional information about SPI connections */
1704 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1705 struct ccb_trans_settings_spi *spi;
1706
1707 spi = &cts.xport_specific.spi;
1708 if (freq != 0) {
1709 printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1710 freq % 1000,
1711 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1712 ? " DT" : "",
1713 spi->sync_offset);
1714 }
1715 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1716 && spi->bus_width > 0) {
1717 if (freq != 0) {
1718 printf(", ");
1719 } else {
1720 printf(" (");
1721 }
1722 printf("%dbit)", 8 * (0x01 << spi->bus_width));
1723 } else if (freq != 0) {
1724 printf(")");
1725 }
1726 }
1727 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1728 struct ccb_trans_settings_fc *fc;
1729
1730 fc = &cts.xport_specific.fc;
1731 if (fc->valid & CTS_FC_VALID_WWNN)
1732 printf(" WWNN 0x%llx", (long long) fc->wwnn);
1733 if (fc->valid & CTS_FC_VALID_WWPN)
1734 printf(" WWPN 0x%llx", (long long) fc->wwpn);
1735 if (fc->valid & CTS_FC_VALID_PORT)
1736 printf(" PortID 0x%x", fc->port);
1737 }
1738
1739 if (path->device->inq_flags & SID_CmdQue
1740 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1741 printf("\n%s%d: Command Queueing Enabled",
1742 periph->periph_name, periph->unit_number);
1743 }
1744 printf("\n");
1745
1746 /*
1747 * We only want to print the caller's announce string if they've
1748 * passed one in..
1749 */
1750 if (announce_string != NULL)
1751 printf("%s%d: %s\n", periph->periph_name,
1752 periph->unit_number, announce_string);
1753}
1754
1755static dev_match_ret
1756xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1757 struct cam_eb *bus)
1758{
1759 dev_match_ret retval;
1760 int i;
1761
1762 retval = DM_RET_NONE;
1763
1764 /*
1765 * If we aren't given something to match against, that's an error.
1766 */
1767 if (bus == NULL)
1768 return(DM_RET_ERROR);
1769
1770 /*
1771 * If there are no match entries, then this bus matches no
1772 * matter what.
1773 */
1774 if ((patterns == NULL) || (num_patterns == 0))
1775 return(DM_RET_DESCEND | DM_RET_COPY);
1776
1777 for (i = 0; i < num_patterns; i++) {
1778 struct bus_match_pattern *cur_pattern;
1779
1780 /*
1781 * If the pattern in question isn't for a bus node, we
1782 * aren't interested. However, we do indicate to the
1783 * calling routine that we should continue descending the
1784 * tree, since the user wants to match against lower-level
1785 * EDT elements.
1786 */
1787 if (patterns[i].type != DEV_MATCH_BUS) {
1788 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1789 retval |= DM_RET_DESCEND;
1790 continue;
1791 }
1792
1793 cur_pattern = &patterns[i].pattern.bus_pattern;
1794
1795 /*
1796 * If they want to match any bus node, we give them any
1797 * device node.
1798 */
1799 if (cur_pattern->flags == BUS_MATCH_ANY) {
1800 /* set the copy flag */
1801 retval |= DM_RET_COPY;
1802
1803 /*
1804 * If we've already decided on an action, go ahead
1805 * and return.
1806 */
1807 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1808 return(retval);
1809 }
1810
1811 /*
1812 * Not sure why someone would do this...
1813 */
1814 if (cur_pattern->flags == BUS_MATCH_NONE)
1815 continue;
1816
1817 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1818 && (cur_pattern->path_id != bus->path_id))
1819 continue;
1820
1821 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1822 && (cur_pattern->bus_id != bus->sim->bus_id))
1823 continue;
1824
1825 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1826 && (cur_pattern->unit_number != bus->sim->unit_number))
1827 continue;
1828
1829 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1830 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1831 DEV_IDLEN) != 0))
1832 continue;
1833
1834 /*
1835 * If we get to this point, the user definitely wants
1836 * information on this bus. So tell the caller to copy the
1837 * data out.
1838 */
1839 retval |= DM_RET_COPY;
1840
1841 /*
1842 * If the return action has been set to descend, then we
1843 * know that we've already seen a non-bus matching
1844 * expression, therefore we need to further descend the tree.
1845 * This won't change by continuing around the loop, so we
1846 * go ahead and return. If we haven't seen a non-bus
1847 * matching expression, we keep going around the loop until
1848 * we exhaust the matching expressions. We'll set the stop
1849 * flag once we fall out of the loop.
1850 */
1851 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1852 return(retval);
1853 }
1854
1855 /*
1856 * If the return action hasn't been set to descend yet, that means
1857 * we haven't seen anything other than bus matching patterns. So
1858 * tell the caller to stop descending the tree -- the user doesn't
1859 * want to match against lower level tree elements.
1860 */
1861 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1862 retval |= DM_RET_STOP;
1863
1864 return(retval);
1865}
1866
1867static dev_match_ret
1868xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1869 struct cam_ed *device)
1870{
1871 dev_match_ret retval;
1872 int i;
1873
1874 retval = DM_RET_NONE;
1875
1876 /*
1877 * If we aren't given something to match against, that's an error.
1878 */
1879 if (device == NULL)
1880 return(DM_RET_ERROR);
1881
1882 /*
1883 * If there are no match entries, then this device matches no
1884 * matter what.
1885 */
1886 if ((patterns == NULL) || (num_patterns == 0))
1887 return(DM_RET_DESCEND | DM_RET_COPY);
1888
1889 for (i = 0; i < num_patterns; i++) {
1890 struct device_match_pattern *cur_pattern;
1891
1892 /*
1893 * If the pattern in question isn't for a device node, we
1894 * aren't interested.
1895 */
1896 if (patterns[i].type != DEV_MATCH_DEVICE) {
1897 if ((patterns[i].type == DEV_MATCH_PERIPH)
1898 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1899 retval |= DM_RET_DESCEND;
1900 continue;
1901 }
1902
1903 cur_pattern = &patterns[i].pattern.device_pattern;
1904
1905 /*
1906 * If they want to match any device node, we give them any
1907 * device node.
1908 */
1909 if (cur_pattern->flags == DEV_MATCH_ANY) {
1910 /* set the copy flag */
1911 retval |= DM_RET_COPY;
1912
1913
1914 /*
1915 * If we've already decided on an action, go ahead
1916 * and return.
1917 */
1918 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1919 return(retval);
1920 }
1921
1922 /*
1923 * Not sure why someone would do this...
1924 */
1925 if (cur_pattern->flags == DEV_MATCH_NONE)
1926 continue;
1927
1928 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1929 && (cur_pattern->path_id != device->target->bus->path_id))
1930 continue;
1931
1932 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1933 && (cur_pattern->target_id != device->target->target_id))
1934 continue;
1935
1936 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1937 && (cur_pattern->target_lun != device->lun_id))
1938 continue;
1939
1940 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1941 && (cam_quirkmatch((caddr_t)&device->inq_data,
1942 (caddr_t)&cur_pattern->inq_pat,
1943 1, sizeof(cur_pattern->inq_pat),
1944 scsi_static_inquiry_match) == NULL))
1945 continue;
1946
1947 /*
1948 * If we get to this point, the user definitely wants
1949 * information on this device. So tell the caller to copy
1950 * the data out.
1951 */
1952 retval |= DM_RET_COPY;
1953
1954 /*
1955 * If the return action has been set to descend, then we
1956 * know that we've already seen a peripheral matching
1957 * expression, therefore we need to further descend the tree.
1958 * This won't change by continuing around the loop, so we
1959 * go ahead and return. If we haven't seen a peripheral
1960 * matching expression, we keep going around the loop until
1961 * we exhaust the matching expressions. We'll set the stop
1962 * flag once we fall out of the loop.
1963 */
1964 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1965 return(retval);
1966 }
1967
1968 /*
1969 * If the return action hasn't been set to descend yet, that means
1970 * we haven't seen any peripheral matching patterns. So tell the
1971 * caller to stop descending the tree -- the user doesn't want to
1972 * match against lower level tree elements.
1973 */
1974 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1975 retval |= DM_RET_STOP;
1976
1977 return(retval);
1978}
1979
1980/*
1981 * Match a single peripheral against any number of match patterns.
1982 */
1983static dev_match_ret
1984xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1985 struct cam_periph *periph)
1986{
1987 dev_match_ret retval;
1988 int i;
1989
1990 /*
1991 * If we aren't given something to match against, that's an error.
1992 */
1993 if (periph == NULL)
1994 return(DM_RET_ERROR);
1995
1996 /*
1997 * If there are no match entries, then this peripheral matches no
1998 * matter what.
1999 */
2000 if ((patterns == NULL) || (num_patterns == 0))
2001 return(DM_RET_STOP | DM_RET_COPY);
2002
2003 /*
2004 * There aren't any nodes below a peripheral node, so there's no
2005 * reason to descend the tree any further.
2006 */
2007 retval = DM_RET_STOP;
2008
2009 for (i = 0; i < num_patterns; i++) {
2010 struct periph_match_pattern *cur_pattern;
2011
2012 /*
2013 * If the pattern in question isn't for a peripheral, we
2014 * aren't interested.
2015 */
2016 if (patterns[i].type != DEV_MATCH_PERIPH)
2017 continue;
2018
2019 cur_pattern = &patterns[i].pattern.periph_pattern;
2020
2021 /*
2022 * If they want to match on anything, then we will do so.
2023 */
2024 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2025 /* set the copy flag */
2026 retval |= DM_RET_COPY;
2027
2028 /*
2029 * We've already set the return action to stop,
2030 * since there are no nodes below peripherals in
2031 * the tree.
2032 */
2033 return(retval);
2034 }
2035
2036 /*
2037 * Not sure why someone would do this...
2038 */
2039 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2040 continue;
2041
2042 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2043 && (cur_pattern->path_id != periph->path->bus->path_id))
2044 continue;
2045
2046 /*
2047 * For the target and lun id's, we have to make sure the
2048 * target and lun pointers aren't NULL. The xpt peripheral
2049 * has a wildcard target and device.
2050 */
2051 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2052 && ((periph->path->target == NULL)
2053 ||(cur_pattern->target_id != periph->path->target->target_id)))
2054 continue;
2055
2056 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2057 && ((periph->path->device == NULL)
2058 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2059 continue;
2060
2061 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2062 && (cur_pattern->unit_number != periph->unit_number))
2063 continue;
2064
2065 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2066 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2067 DEV_IDLEN) != 0))
2068 continue;
2069
2070 /*
2071 * If we get to this point, the user definitely wants
2072 * information on this peripheral. So tell the caller to
2073 * copy the data out.
2074 */
2075 retval |= DM_RET_COPY;
2076
2077 /*
2078 * The return action has already been set to stop, since
2079 * peripherals don't have any nodes below them in the EDT.
2080 */
2081 return(retval);
2082 }
2083
2084 /*
2085 * If we get to this point, the peripheral that was passed in
2086 * doesn't match any of the patterns.
2087 */
2088 return(retval);
2089}
2090
2091static int
2092xptedtbusfunc(struct cam_eb *bus, void *arg)
2093{
2094 struct ccb_dev_match *cdm;
2095 dev_match_ret retval;
2096
2097 cdm = (struct ccb_dev_match *)arg;
2098
2099 /*
2100 * If our position is for something deeper in the tree, that means
2101 * that we've already seen this node. So, we keep going down.
2102 */
2103 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2104 && (cdm->pos.cookie.bus == bus)
2105 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2106 && (cdm->pos.cookie.target != NULL))
2107 retval = DM_RET_DESCEND;
2108 else
2109 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2110
2111 /*
2112 * If we got an error, bail out of the search.
2113 */
2114 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2115 cdm->status = CAM_DEV_MATCH_ERROR;
2116 return(0);
2117 }
2118
2119 /*
2120 * If the copy flag is set, copy this bus out.
2121 */
2122 if (retval & DM_RET_COPY) {
2123 int spaceleft, j;
2124
2125 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2126 sizeof(struct dev_match_result));
2127
2128 /*
2129 * If we don't have enough space to put in another
2130 * match result, save our position and tell the
2131 * user there are more devices to check.
2132 */
2133 if (spaceleft < sizeof(struct dev_match_result)) {
2134 bzero(&cdm->pos, sizeof(cdm->pos));
2135 cdm->pos.position_type =
2136 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2137
2138 cdm->pos.cookie.bus = bus;
2139 cdm->pos.generations[CAM_BUS_GENERATION]=
2140 xsoftc.bus_generation;
2141 cdm->status = CAM_DEV_MATCH_MORE;
2142 return(0);
2143 }
2144 j = cdm->num_matches;
2145 cdm->num_matches++;
2146 cdm->matches[j].type = DEV_MATCH_BUS;
2147 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2148 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2149 cdm->matches[j].result.bus_result.unit_number =
2150 bus->sim->unit_number;
2151 strncpy(cdm->matches[j].result.bus_result.dev_name,
2152 bus->sim->sim_name, DEV_IDLEN);
2153 }
2154
2155 /*
2156 * If the user is only interested in busses, there's no
2157 * reason to descend to the next level in the tree.
2158 */
2159 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2160 return(1);
2161
2162 /*
2163 * If there is a target generation recorded, check it to
2164 * make sure the target list hasn't changed.
2165 */
2166 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2167 && (bus == cdm->pos.cookie.bus)
2168 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2169 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2170 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2171 bus->generation)) {
2172 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2173 return(0);
2174 }
2175
2176 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2177 && (cdm->pos.cookie.bus == bus)
2178 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2179 && (cdm->pos.cookie.target != NULL))
2180 return(xpttargettraverse(bus,
2181 (struct cam_et *)cdm->pos.cookie.target,
2182 xptedttargetfunc, arg));
2183 else
2184 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2185}
2186
2187static int
2188xptedttargetfunc(struct cam_et *target, void *arg)
2189{
2190 struct ccb_dev_match *cdm;
2191
2192 cdm = (struct ccb_dev_match *)arg;
2193
2194 /*
2195 * If there is a device list generation recorded, check it to
2196 * make sure the device list hasn't changed.
2197 */
2198 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2199 && (cdm->pos.cookie.bus == target->bus)
2200 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2201 && (cdm->pos.cookie.target == target)
2202 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2203 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2204 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2205 target->generation)) {
2206 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2207 return(0);
2208 }
2209
2210 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2211 && (cdm->pos.cookie.bus == target->bus)
2212 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2213 && (cdm->pos.cookie.target == target)
2214 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2215 && (cdm->pos.cookie.device != NULL))
2216 return(xptdevicetraverse(target,
2217 (struct cam_ed *)cdm->pos.cookie.device,
2218 xptedtdevicefunc, arg));
2219 else
2220 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2221}
2222
2223static int
2224xptedtdevicefunc(struct cam_ed *device, void *arg)
2225{
2226
2227 struct ccb_dev_match *cdm;
2228 dev_match_ret retval;
2229
2230 cdm = (struct ccb_dev_match *)arg;
2231
2232 /*
2233 * If our position is for something deeper in the tree, that means
2234 * that we've already seen this node. So, we keep going down.
2235 */
2236 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2237 && (cdm->pos.cookie.device == device)
2238 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2239 && (cdm->pos.cookie.periph != NULL))
2240 retval = DM_RET_DESCEND;
2241 else
2242 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2243 device);
2244
2245 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2246 cdm->status = CAM_DEV_MATCH_ERROR;
2247 return(0);
2248 }
2249
2250 /*
2251 * If the copy flag is set, copy this device out.
2252 */
2253 if (retval & DM_RET_COPY) {
2254 int spaceleft, j;
2255
2256 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2257 sizeof(struct dev_match_result));
2258
2259 /*
2260 * If we don't have enough space to put in another
2261 * match result, save our position and tell the
2262 * user there are more devices to check.
2263 */
2264 if (spaceleft < sizeof(struct dev_match_result)) {
2265 bzero(&cdm->pos, sizeof(cdm->pos));
2266 cdm->pos.position_type =
2267 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2268 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2269
2270 cdm->pos.cookie.bus = device->target->bus;
2271 cdm->pos.generations[CAM_BUS_GENERATION]=
2272 xsoftc.bus_generation;
2273 cdm->pos.cookie.target = device->target;
2274 cdm->pos.generations[CAM_TARGET_GENERATION] =
2275 device->target->bus->generation;
2276 cdm->pos.cookie.device = device;
2277 cdm->pos.generations[CAM_DEV_GENERATION] =
2278 device->target->generation;
2279 cdm->status = CAM_DEV_MATCH_MORE;
2280 return(0);
2281 }
2282 j = cdm->num_matches;
2283 cdm->num_matches++;
2284 cdm->matches[j].type = DEV_MATCH_DEVICE;
2285 cdm->matches[j].result.device_result.path_id =
2286 device->target->bus->path_id;
2287 cdm->matches[j].result.device_result.target_id =
2288 device->target->target_id;
2289 cdm->matches[j].result.device_result.target_lun =
2290 device->lun_id;
2291 bcopy(&device->inq_data,
2292 &cdm->matches[j].result.device_result.inq_data,
2293 sizeof(struct scsi_inquiry_data));
2294
2295 /* Let the user know whether this device is unconfigured */
2296 if (device->flags & CAM_DEV_UNCONFIGURED)
2297 cdm->matches[j].result.device_result.flags =
2298 DEV_RESULT_UNCONFIGURED;
2299 else
2300 cdm->matches[j].result.device_result.flags =
2301 DEV_RESULT_NOFLAG;
2302 }
2303
2304 /*
2305 * If the user isn't interested in peripherals, don't descend
2306 * the tree any further.
2307 */
2308 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2309 return(1);
2310
2311 /*
2312 * If there is a peripheral list generation recorded, make sure
2313 * it hasn't changed.
2314 */
2315 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2316 && (device->target->bus == cdm->pos.cookie.bus)
2317 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2318 && (device->target == cdm->pos.cookie.target)
2319 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2320 && (device == cdm->pos.cookie.device)
2321 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2322 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2323 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2324 device->generation)){
2325 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2326 return(0);
2327 }
2328
2329 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2330 && (cdm->pos.cookie.bus == device->target->bus)
2331 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2332 && (cdm->pos.cookie.target == device->target)
2333 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2334 && (cdm->pos.cookie.device == device)
2335 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2336 && (cdm->pos.cookie.periph != NULL))
2337 return(xptperiphtraverse(device,
2338 (struct cam_periph *)cdm->pos.cookie.periph,
2339 xptedtperiphfunc, arg));
2340 else
2341 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2342}
2343
2344static int
2345xptedtperiphfunc(struct cam_periph *periph, void *arg)
2346{
2347 struct ccb_dev_match *cdm;
2348 dev_match_ret retval;
2349
2350 cdm = (struct ccb_dev_match *)arg;
2351
2352 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2353
2354 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2355 cdm->status = CAM_DEV_MATCH_ERROR;
2356 return(0);
2357 }
2358
2359 /*
2360 * If the copy flag is set, copy this peripheral out.
2361 */
2362 if (retval & DM_RET_COPY) {
2363 int spaceleft, j;
2364
2365 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2366 sizeof(struct dev_match_result));
2367
2368 /*
2369 * If we don't have enough space to put in another
2370 * match result, save our position and tell the
2371 * user there are more devices to check.
2372 */
2373 if (spaceleft < sizeof(struct dev_match_result)) {
2374 bzero(&cdm->pos, sizeof(cdm->pos));
2375 cdm->pos.position_type =
2376 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2377 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2378 CAM_DEV_POS_PERIPH;
2379
2380 cdm->pos.cookie.bus = periph->path->bus;
2381 cdm->pos.generations[CAM_BUS_GENERATION]=
2382 xsoftc.bus_generation;
2383 cdm->pos.cookie.target = periph->path->target;
2384 cdm->pos.generations[CAM_TARGET_GENERATION] =
2385 periph->path->bus->generation;
2386 cdm->pos.cookie.device = periph->path->device;
2387 cdm->pos.generations[CAM_DEV_GENERATION] =
2388 periph->path->target->generation;
2389 cdm->pos.cookie.periph = periph;
2390 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2391 periph->path->device->generation;
2392 cdm->status = CAM_DEV_MATCH_MORE;
2393 return(0);
2394 }
2395
2396 j = cdm->num_matches;
2397 cdm->num_matches++;
2398 cdm->matches[j].type = DEV_MATCH_PERIPH;
2399 cdm->matches[j].result.periph_result.path_id =
2400 periph->path->bus->path_id;
2401 cdm->matches[j].result.periph_result.target_id =
2402 periph->path->target->target_id;
2403 cdm->matches[j].result.periph_result.target_lun =
2404 periph->path->device->lun_id;
2405 cdm->matches[j].result.periph_result.unit_number =
2406 periph->unit_number;
2407 strncpy(cdm->matches[j].result.periph_result.periph_name,
2408 periph->periph_name, DEV_IDLEN);
2409 }
2410
2411 return(1);
2412}
2413
2414static int
2415xptedtmatch(struct ccb_dev_match *cdm)
2416{
2417 int ret;
2418
2419 cdm->num_matches = 0;
2420
2421 /*
2422 * Check the bus list generation. If it has changed, the user
2423 * needs to reset everything and start over.
2424 */
2425 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2426 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2427 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
2428 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2429 return(0);
2430 }
2431
2432 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2433 && (cdm->pos.cookie.bus != NULL))
2434 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2435 xptedtbusfunc, cdm);
2436 else
2437 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2438
2439 /*
2440 * If we get back 0, that means that we had to stop before fully
2441 * traversing the EDT. It also means that one of the subroutines
2442 * has set the status field to the proper value. If we get back 1,
2443 * we've fully traversed the EDT and copied out any matching entries.
2444 */
2445 if (ret == 1)
2446 cdm->status = CAM_DEV_MATCH_LAST;
2447
2448 return(ret);
2449}
2450
2451static int
2452xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2453{
2454 struct ccb_dev_match *cdm;
2455
2456 cdm = (struct ccb_dev_match *)arg;
2457
2458 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2459 && (cdm->pos.cookie.pdrv == pdrv)
2460 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2461 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2462 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2463 (*pdrv)->generation)) {
2464 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2465 return(0);
2466 }
2467
2468 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2469 && (cdm->pos.cookie.pdrv == pdrv)
2470 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2471 && (cdm->pos.cookie.periph != NULL))
2472 return(xptpdperiphtraverse(pdrv,
2473 (struct cam_periph *)cdm->pos.cookie.periph,
2474 xptplistperiphfunc, arg));
2475 else
2476 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2477}
2478
2479static int
2480xptplistperiphfunc(struct cam_periph *periph, void *arg)
2481{
2482 struct ccb_dev_match *cdm;
2483 dev_match_ret retval;
2484
2485 cdm = (struct ccb_dev_match *)arg;
2486
2487 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2488
2489 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2490 cdm->status = CAM_DEV_MATCH_ERROR;
2491 return(0);
2492 }
2493
2494 /*
2495 * If the copy flag is set, copy this peripheral out.
2496 */
2497 if (retval & DM_RET_COPY) {
2498 int spaceleft, j;
2499
2500 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2501 sizeof(struct dev_match_result));
2502
2503 /*
2504 * If we don't have enough space to put in another
2505 * match result, save our position and tell the
2506 * user there are more devices to check.
2507 */
2508 if (spaceleft < sizeof(struct dev_match_result)) {
2509 struct periph_driver **pdrv;
2510
2511 pdrv = NULL;
2512 bzero(&cdm->pos, sizeof(cdm->pos));
2513 cdm->pos.position_type =
2514 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2515 CAM_DEV_POS_PERIPH;
2516
2517 /*
2518 * This may look a bit non-sensical, but it is
2519 * actually quite logical. There are very few
2520 * peripheral drivers, and bloating every peripheral
2521 * structure with a pointer back to its parent
2522 * peripheral driver linker set entry would cost
2523 * more in the long run than doing this quick lookup.
2524 */
2525 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2526 if (strcmp((*pdrv)->driver_name,
2527 periph->periph_name) == 0)
2528 break;
2529 }
2530
2531 if (*pdrv == NULL) {
2532 cdm->status = CAM_DEV_MATCH_ERROR;
2533 return(0);
2534 }
2535
2536 cdm->pos.cookie.pdrv = pdrv;
2537 /*
2538 * The periph generation slot does double duty, as
2539 * does the periph pointer slot. They are used for
2540 * both edt and pdrv lookups and positioning.
2541 */
2542 cdm->pos.cookie.periph = periph;
2543 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2544 (*pdrv)->generation;
2545 cdm->status = CAM_DEV_MATCH_MORE;
2546 return(0);
2547 }
2548
2549 j = cdm->num_matches;
2550 cdm->num_matches++;
2551 cdm->matches[j].type = DEV_MATCH_PERIPH;
2552 cdm->matches[j].result.periph_result.path_id =
2553 periph->path->bus->path_id;
2554
2555 /*
2556 * The transport layer peripheral doesn't have a target or
2557 * lun.
2558 */
2559 if (periph->path->target)
2560 cdm->matches[j].result.periph_result.target_id =
2561 periph->path->target->target_id;
2562 else
2563 cdm->matches[j].result.periph_result.target_id = -1;
2564
2565 if (periph->path->device)
2566 cdm->matches[j].result.periph_result.target_lun =
2567 periph->path->device->lun_id;
2568 else
2569 cdm->matches[j].result.periph_result.target_lun = -1;
2570
2571 cdm->matches[j].result.periph_result.unit_number =
2572 periph->unit_number;
2573 strncpy(cdm->matches[j].result.periph_result.periph_name,
2574 periph->periph_name, DEV_IDLEN);
2575 }
2576
2577 return(1);
2578}
2579
2580static int
2581xptperiphlistmatch(struct ccb_dev_match *cdm)
2582{
2583 int ret;
2584
2585 cdm->num_matches = 0;
2586
2587 /*
2588 * At this point in the edt traversal function, we check the bus
2589 * list generation to make sure that no busses have been added or
2590 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2591 * For the peripheral driver list traversal function, however, we
2592 * don't have to worry about new peripheral driver types coming or
2593 * going; they're in a linker set, and therefore can't change
2594 * without a recompile.
2595 */
2596
2597 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2598 && (cdm->pos.cookie.pdrv != NULL))
2599 ret = xptpdrvtraverse(
2600 (struct periph_driver **)cdm->pos.cookie.pdrv,
2601 xptplistpdrvfunc, cdm);
2602 else
2603 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2604
2605 /*
2606 * If we get back 0, that means that we had to stop before fully
2607 * traversing the peripheral driver tree. It also means that one of
2608 * the subroutines has set the status field to the proper value. If
2609 * we get back 1, we've fully traversed the EDT and copied out any
2610 * matching entries.
2611 */
2612 if (ret == 1)
2613 cdm->status = CAM_DEV_MATCH_LAST;
2614
2615 return(ret);
2616}
2617
2618static int
2619xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2620{
2621 struct cam_eb *bus, *next_bus;
2622 int retval;
2623
2624 retval = 1;
2625
2626 mtx_lock(&xsoftc.xpt_topo_lock);
2627 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2628 bus != NULL;
2629 bus = next_bus) {
2630 next_bus = TAILQ_NEXT(bus, links);
2631
2632 mtx_unlock(&xsoftc.xpt_topo_lock);
2633 CAM_SIM_LOCK(bus->sim);
2634 retval = tr_func(bus, arg);
2635 CAM_SIM_UNLOCK(bus->sim);
2636 if (retval == 0)
2637 return(retval);
2638 mtx_lock(&xsoftc.xpt_topo_lock);
2639 }
2640 mtx_unlock(&xsoftc.xpt_topo_lock);
2641
2642 return(retval);
2643}
2644
2645static int
2646xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2647 xpt_targetfunc_t *tr_func, void *arg)
2648{
2649 struct cam_et *target, *next_target;
2650 int retval;
2651
2652 retval = 1;
2653 for (target = (start_target ? start_target :
2654 TAILQ_FIRST(&bus->et_entries));
2655 target != NULL; target = next_target) {
2656
2657 next_target = TAILQ_NEXT(target, links);
2658
2659 retval = tr_func(target, arg);
2660
2661 if (retval == 0)
2662 return(retval);
2663 }
2664
2665 return(retval);
2666}
2667
2668static int
2669xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2670 xpt_devicefunc_t *tr_func, void *arg)
2671{
2672 struct cam_ed *device, *next_device;
2673 int retval;
2674
2675 retval = 1;
2676 for (device = (start_device ? start_device :
2677 TAILQ_FIRST(&target->ed_entries));
2678 device != NULL;
2679 device = next_device) {
2680
2681 next_device = TAILQ_NEXT(device, links);
2682
2683 retval = tr_func(device, arg);
2684
2685 if (retval == 0)
2686 return(retval);
2687 }
2688
2689 return(retval);
2690}
2691
2692static int
2693xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2694 xpt_periphfunc_t *tr_func, void *arg)
2695{
2696 struct cam_periph *periph, *next_periph;
2697 int retval;
2698
2699 retval = 1;
2700
2701 for (periph = (start_periph ? start_periph :
2702 SLIST_FIRST(&device->periphs));
2703 periph != NULL;
2704 periph = next_periph) {
2705
2706 next_periph = SLIST_NEXT(periph, periph_links);
2707
2708 retval = tr_func(periph, arg);
2709 if (retval == 0)
2710 return(retval);
2711 }
2712
2713 return(retval);
2714}
2715
2716static int
2717xptpdrvtraverse(struct periph_driver **start_pdrv,
2718 xpt_pdrvfunc_t *tr_func, void *arg)
2719{
2720 struct periph_driver **pdrv;
2721 int retval;
2722
2723 retval = 1;
2724
2725 /*
2726 * We don't traverse the peripheral driver list like we do the
2727 * other lists, because it is a linker set, and therefore cannot be
2728 * changed during runtime. If the peripheral driver list is ever
2729 * re-done to be something other than a linker set (i.e. it can
2730 * change while the system is running), the list traversal should
2731 * be modified to work like the other traversal functions.
2732 */
2733 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2734 *pdrv != NULL; pdrv++) {
2735 retval = tr_func(pdrv, arg);
2736
2737 if (retval == 0)
2738 return(retval);
2739 }
2740
2741 return(retval);
2742}
2743
2744static int
2745xptpdperiphtraverse(struct periph_driver **pdrv,
2746 struct cam_periph *start_periph,
2747 xpt_periphfunc_t *tr_func, void *arg)
2748{
2749 struct cam_periph *periph, *next_periph;
2750 int retval;
2751
2752 retval = 1;
2753
2754 for (periph = (start_periph ? start_periph :
2755 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2756 periph = next_periph) {
2757
2758 next_periph = TAILQ_NEXT(periph, unit_links);
2759
2760 retval = tr_func(periph, arg);
2761 if (retval == 0)
2762 return(retval);
2763 }
2764 return(retval);
2765}
2766
2767static int
2768xptdefbusfunc(struct cam_eb *bus, void *arg)
2769{
2770 struct xpt_traverse_config *tr_config;
2771
2772 tr_config = (struct xpt_traverse_config *)arg;
2773
2774 if (tr_config->depth == XPT_DEPTH_BUS) {
2775 xpt_busfunc_t *tr_func;
2776
2777 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2778
2779 return(tr_func(bus, tr_config->tr_arg));
2780 } else
2781 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2782}
2783
2784static int
2785xptdeftargetfunc(struct cam_et *target, void *arg)
2786{
2787 struct xpt_traverse_config *tr_config;
2788
2789 tr_config = (struct xpt_traverse_config *)arg;
2790
2791 if (tr_config->depth == XPT_DEPTH_TARGET) {
2792 xpt_targetfunc_t *tr_func;
2793
2794 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2795
2796 return(tr_func(target, tr_config->tr_arg));
2797 } else
2798 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2799}
2800
2801static int
2802xptdefdevicefunc(struct cam_ed *device, void *arg)
2803{
2804 struct xpt_traverse_config *tr_config;
2805
2806 tr_config = (struct xpt_traverse_config *)arg;
2807
2808 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2809 xpt_devicefunc_t *tr_func;
2810
2811 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2812
2813 return(tr_func(device, tr_config->tr_arg));
2814 } else
2815 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2816}
2817
2818static int
2819xptdefperiphfunc(struct cam_periph *periph, void *arg)
2820{
2821 struct xpt_traverse_config *tr_config;
2822 xpt_periphfunc_t *tr_func;
2823
2824 tr_config = (struct xpt_traverse_config *)arg;
2825
2826 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2827
2828 /*
2829 * Unlike the other default functions, we don't check for depth
2830 * here. The peripheral driver level is the last level in the EDT,
2831 * so if we're here, we should execute the function in question.
2832 */
2833 return(tr_func(periph, tr_config->tr_arg));
2834}
2835
2836/*
2837 * Execute the given function for every bus in the EDT.
2838 */
2839static int
2840xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2841{
2842 struct xpt_traverse_config tr_config;
2843
2844 tr_config.depth = XPT_DEPTH_BUS;
2845 tr_config.tr_func = tr_func;
2846 tr_config.tr_arg = arg;
2847
2848 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2849}
2850
2851/*
2852 * Execute the given function for every device in the EDT.
2853 */
2854static int
2855xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2856{
2857 struct xpt_traverse_config tr_config;
2858
2859 tr_config.depth = XPT_DEPTH_DEVICE;
2860 tr_config.tr_func = tr_func;
2861 tr_config.tr_arg = arg;
2862
2863 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2864}
2865
2866static int
2867xptsetasyncfunc(struct cam_ed *device, void *arg)
2868{
2869 struct cam_path path;
2870 struct ccb_getdev cgd;
2871 struct async_node *cur_entry;
2872
2873 cur_entry = (struct async_node *)arg;
2874
2875 /*
2876 * Don't report unconfigured devices (Wildcard devs,
2877 * devices only for target mode, device instances
2878 * that have been invalidated but are waiting for
2879 * their last reference count to be released).
2880 */
2881 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2882 return (1);
2883
2884 xpt_compile_path(&path,
2885 NULL,
2886 device->target->bus->path_id,
2887 device->target->target_id,
2888 device->lun_id);
2889 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2890 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2891 xpt_action((union ccb *)&cgd);
2892 cur_entry->callback(cur_entry->callback_arg,
2893 AC_FOUND_DEVICE,
2894 &path, &cgd);
2895 xpt_release_path(&path);
2896
2897 return(1);
2898}
2899
2900static int
2901xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2902{
2903 struct cam_path path;
2904 struct ccb_pathinq cpi;
2905 struct async_node *cur_entry;
2906
2907 cur_entry = (struct async_node *)arg;
2908
2909 xpt_compile_path(&path, /*periph*/NULL,
2910 bus->sim->path_id,
2911 CAM_TARGET_WILDCARD,
2912 CAM_LUN_WILDCARD);
2913 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2914 cpi.ccb_h.func_code = XPT_PATH_INQ;
2915 xpt_action((union ccb *)&cpi);
2916 cur_entry->callback(cur_entry->callback_arg,
2917 AC_PATH_REGISTERED,
2918 &path, &cpi);
2919 xpt_release_path(&path);
2920
2921 return(1);
2922}
2923
2924static void
2925xpt_action_sasync_cb(void *context, int pending)
2926{
2927 struct async_node *cur_entry;
2928 struct xpt_task *task;
2929 uint32_t added;
2930
2931 task = (struct xpt_task *)context;
2932 cur_entry = (struct async_node *)task->data1;
2933 added = task->data2;
2934
2935 if ((added & AC_FOUND_DEVICE) != 0) {
2936 /*
2937 * Get this peripheral up to date with all
2938 * the currently existing devices.
2939 */
2940 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2941 }
2942 if ((added & AC_PATH_REGISTERED) != 0) {
2943 /*
2944 * Get this peripheral up to date with all
2945 * the currently existing busses.
2946 */
2947 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2948 }
2949
2950 free(task, M_CAMXPT);
2951}
2952
2953void
2954xpt_action(union ccb *start_ccb)
2955{
2956
2957 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2958
2959 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2960
2961 switch (start_ccb->ccb_h.func_code) {
2962 case XPT_SCSI_IO:
2963 {
2964 struct cam_ed *device;
2965#ifdef CAMDEBUG
2966 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2967 struct cam_path *path;
2968
2969 path = start_ccb->ccb_h.path;
2970#endif
2971
2972 /*
2973 * For the sake of compatibility with SCSI-1
2974 * devices that may not understand the identify
2975 * message, we include lun information in the
2976 * second byte of all commands. SCSI-1 specifies
2977 * that luns are a 3 bit value and reserves only 3
2978 * bits for lun information in the CDB. Later
2979 * revisions of the SCSI spec allow for more than 8
2980 * luns, but have deprecated lun information in the
2981 * CDB. So, if the lun won't fit, we must omit.
2982 *
2983 * Also be aware that during initial probing for devices,
2984 * the inquiry information is unknown but initialized to 0.
2985 * This means that this code will be exercised while probing
2986 * devices with an ANSI revision greater than 2.
2987 */
2988 device = start_ccb->ccb_h.path->device;
2989 if (device->protocol_version <= SCSI_REV_2
2990 && start_ccb->ccb_h.target_lun < 8
2991 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2992
2993 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2994 start_ccb->ccb_h.target_lun << 5;
2995 }
2996 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2997 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2998 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2999 &path->device->inq_data),
3000 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3001 cdb_str, sizeof(cdb_str))));
3002 }
3003 /* FALLTHROUGH */
3004 case XPT_TARGET_IO:
3005 case XPT_CONT_TARGET_IO:
3006 start_ccb->csio.sense_resid = 0;
3007 start_ccb->csio.resid = 0;
3008 /* FALLTHROUGH */
3009 case XPT_RESET_DEV:
3010 case XPT_ENG_EXEC:
3011 {
3012 struct cam_path *path;
3013 int runq;
3014
3015 path = start_ccb->ccb_h.path;
3016
3017 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3018 if (path->device->qfrozen_cnt == 0)
3019 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3020 else
3021 runq = 0;
3022 if (runq != 0)
3023 xpt_run_dev_sendq(path->bus);
3024 break;
3025 }
3026 case XPT_SET_TRAN_SETTINGS:
3027 {
3028 xpt_set_transfer_settings(&start_ccb->cts,
3029 start_ccb->ccb_h.path->device,
3030 /*async_update*/FALSE);
3031 break;
3032 }
3033 case XPT_CALC_GEOMETRY:
3034 {
3035 struct cam_sim *sim;
3036
3037 /* Filter out garbage */
3038 if (start_ccb->ccg.block_size == 0
3039 || start_ccb->ccg.volume_size == 0) {
3040 start_ccb->ccg.cylinders = 0;
3041 start_ccb->ccg.heads = 0;
3042 start_ccb->ccg.secs_per_track = 0;
3043 start_ccb->ccb_h.status = CAM_REQ_CMP;
3044 break;
3045 }
3046#ifdef PC98
3047 /*
3048 * In a PC-98 system, geometry translation depens on
3049 * the "real" device geometry obtained from mode page 4.
3050 * SCSI geometry translation is performed in the
3051 * initialization routine of the SCSI BIOS and the result
3052 * stored in host memory. If the translation is available
3053 * in host memory, use it. If not, rely on the default
3054 * translation the device driver performs.
3055 */
3056 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3057 start_ccb->ccb_h.status = CAM_REQ_CMP;
3058 break;
3059 }
3060#endif
3061 sim = start_ccb->ccb_h.path->bus->sim;
3062 (*(sim->sim_action))(sim, start_ccb);
3063 break;
3064 }
3065 case XPT_ABORT:
3066 {
3067 union ccb* abort_ccb;
3068
3069 abort_ccb = start_ccb->cab.abort_ccb;
3070 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3071
3072 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3073 struct cam_ccbq *ccbq;
3074
3075 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3076 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3077 abort_ccb->ccb_h.status =
3078 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3079 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3080 xpt_done(abort_ccb);
3081 start_ccb->ccb_h.status = CAM_REQ_CMP;
3082 break;
3083 }
3084 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3085 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3086 /*
3087 * We've caught this ccb en route to
3088 * the SIM. Flag it for abort and the
3089 * SIM will do so just before starting
3090 * real work on the CCB.
3091 */
3092 abort_ccb->ccb_h.status =
3093 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3094 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3095 start_ccb->ccb_h.status = CAM_REQ_CMP;
3096 break;
3097 }
3098 }
3099 if (XPT_FC_IS_QUEUED(abort_ccb)
3100 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3101 /*
3102 * It's already completed but waiting
3103 * for our SWI to get to it.
3104 */
3105 start_ccb->ccb_h.status = CAM_UA_ABORT;
3106 break;
3107 }
3108 /*
3109 * If we weren't able to take care of the abort request
3110 * in the XPT, pass the request down to the SIM for processing.
3111 */
3112 }
3113 /* FALLTHROUGH */
3114 case XPT_ACCEPT_TARGET_IO:
3115 case XPT_EN_LUN:
3116 case XPT_IMMED_NOTIFY:
3117 case XPT_NOTIFY_ACK:
3118 case XPT_GET_TRAN_SETTINGS:
3119 case XPT_RESET_BUS:
3120 {
3121 struct cam_sim *sim;
3122
3123 sim = start_ccb->ccb_h.path->bus->sim;
3124 (*(sim->sim_action))(sim, start_ccb);
3125 break;
3126 }
3127 case XPT_PATH_INQ:
3128 {
3129 struct cam_sim *sim;
3130
3131 sim = start_ccb->ccb_h.path->bus->sim;
3132 (*(sim->sim_action))(sim, start_ccb);
3133 break;
3134 }
3135 case XPT_PATH_STATS:
3136 start_ccb->cpis.last_reset =
3137 start_ccb->ccb_h.path->bus->last_reset;
3138 start_ccb->ccb_h.status = CAM_REQ_CMP;
3139 break;
3140 case XPT_GDEV_TYPE:
3141 {
3142 struct cam_ed *dev;
3143
3144 dev = start_ccb->ccb_h.path->device;
3145 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3146 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3147 } else {
3148 struct ccb_getdev *cgd;
3149 struct cam_eb *bus;
3150 struct cam_et *tar;
3151
3152 cgd = &start_ccb->cgd;
3153 bus = cgd->ccb_h.path->bus;
3154 tar = cgd->ccb_h.path->target;
3155 cgd->inq_data = dev->inq_data;
3156 cgd->ccb_h.status = CAM_REQ_CMP;
3157 cgd->serial_num_len = dev->serial_num_len;
3158 if ((dev->serial_num_len > 0)
3159 && (dev->serial_num != NULL))
3160 bcopy(dev->serial_num, cgd->serial_num,
3161 dev->serial_num_len);
3162 }
3163 break;
3164 }
3165 case XPT_GDEV_STATS:
3166 {
3167 struct cam_ed *dev;
3168
3169 dev = start_ccb->ccb_h.path->device;
3170 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3171 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3172 } else {
3173 struct ccb_getdevstats *cgds;
3174 struct cam_eb *bus;
3175 struct cam_et *tar;
3176
3177 cgds = &start_ccb->cgds;
3178 bus = cgds->ccb_h.path->bus;
3179 tar = cgds->ccb_h.path->target;
3180 cgds->dev_openings = dev->ccbq.dev_openings;
3181 cgds->dev_active = dev->ccbq.dev_active;
3182 cgds->devq_openings = dev->ccbq.devq_openings;
3183 cgds->devq_queued = dev->ccbq.queue.entries;
3184 cgds->held = dev->ccbq.held;
3185 cgds->last_reset = tar->last_reset;
3186 cgds->maxtags = dev->quirk->maxtags;
3187 cgds->mintags = dev->quirk->mintags;
3188 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3189 cgds->last_reset = bus->last_reset;
3190 cgds->ccb_h.status = CAM_REQ_CMP;
3191 }
3192 break;
3193 }
3194 case XPT_GDEVLIST:
3195 {
3196 struct cam_periph *nperiph;
3197 struct periph_list *periph_head;
3198 struct ccb_getdevlist *cgdl;
3199 u_int i;
3200 struct cam_ed *device;
3201 int found;
3202
3203
3204 found = 0;
3205
3206 /*
3207 * Don't want anyone mucking with our data.
3208 */
3209 device = start_ccb->ccb_h.path->device;
3210 periph_head = &device->periphs;
3211 cgdl = &start_ccb->cgdl;
3212
3213 /*
3214 * Check and see if the list has changed since the user
3215 * last requested a list member. If so, tell them that the
3216 * list has changed, and therefore they need to start over
3217 * from the beginning.
3218 */
3219 if ((cgdl->index != 0) &&
3220 (cgdl->generation != device->generation)) {
3221 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3222 break;
3223 }
3224
3225 /*
3226 * Traverse the list of peripherals and attempt to find
3227 * the requested peripheral.
3228 */
3229 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3230 (nperiph != NULL) && (i <= cgdl->index);
3231 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3232 if (i == cgdl->index) {
3233 strncpy(cgdl->periph_name,
3234 nperiph->periph_name,
3235 DEV_IDLEN);
3236 cgdl->unit_number = nperiph->unit_number;
3237 found = 1;
3238 }
3239 }
3240 if (found == 0) {
3241 cgdl->status = CAM_GDEVLIST_ERROR;
3242 break;
3243 }
3244
3245 if (nperiph == NULL)
3246 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3247 else
3248 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3249
3250 cgdl->index++;
3251 cgdl->generation = device->generation;
3252
3253 cgdl->ccb_h.status = CAM_REQ_CMP;
3254 break;
3255 }
3256 case XPT_DEV_MATCH:
3257 {
3258 dev_pos_type position_type;
3259 struct ccb_dev_match *cdm;
3260
3261 cdm = &start_ccb->cdm;
3262
3263 /*
3264 * There are two ways of getting at information in the EDT.
3265 * The first way is via the primary EDT tree. It starts
3266 * with a list of busses, then a list of targets on a bus,
3267 * then devices/luns on a target, and then peripherals on a
3268 * device/lun. The "other" way is by the peripheral driver
3269 * lists. The peripheral driver lists are organized by
3270 * peripheral driver. (obviously) So it makes sense to
3271 * use the peripheral driver list if the user is looking
3272 * for something like "da1", or all "da" devices. If the
3273 * user is looking for something on a particular bus/target
3274 * or lun, it's generally better to go through the EDT tree.
3275 */
3276
3277 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3278 position_type = cdm->pos.position_type;
3279 else {
3280 u_int i;
3281
3282 position_type = CAM_DEV_POS_NONE;
3283
3284 for (i = 0; i < cdm->num_patterns; i++) {
3285 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3286 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3287 position_type = CAM_DEV_POS_EDT;
3288 break;
3289 }
3290 }
3291
3292 if (cdm->num_patterns == 0)
3293 position_type = CAM_DEV_POS_EDT;
3294 else if (position_type == CAM_DEV_POS_NONE)
3295 position_type = CAM_DEV_POS_PDRV;
3296 }
3297
3298 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3299 case CAM_DEV_POS_EDT:
3300 xptedtmatch(cdm);
3301 break;
3302 case CAM_DEV_POS_PDRV:
3303 xptperiphlistmatch(cdm);
3304 break;
3305 default:
3306 cdm->status = CAM_DEV_MATCH_ERROR;
3307 break;
3308 }
3309
3310 if (cdm->status == CAM_DEV_MATCH_ERROR)
3311 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3312 else
3313 start_ccb->ccb_h.status = CAM_REQ_CMP;
3314
3315 break;
3316 }
3317 case XPT_SASYNC_CB:
3318 {
3319 struct ccb_setasync *csa;
3320 struct async_node *cur_entry;
3321 struct async_list *async_head;
3322 u_int32_t added;
3323
3324 csa = &start_ccb->csa;
3325 added = csa->event_enable;
3326 async_head = &csa->ccb_h.path->device->asyncs;
3327
3328 /*
3329 * If there is already an entry for us, simply
3330 * update it.
3331 */
3332 cur_entry = SLIST_FIRST(async_head);
3333 while (cur_entry != NULL) {
3334 if ((cur_entry->callback_arg == csa->callback_arg)
3335 && (cur_entry->callback == csa->callback))
3336 break;
3337 cur_entry = SLIST_NEXT(cur_entry, links);
3338 }
3339
3340 if (cur_entry != NULL) {
3341 /*
3342 * If the request has no flags set,
3343 * remove the entry.
3344 */
3345 added &= ~cur_entry->event_enable;
3346 if (csa->event_enable == 0) {
3347 SLIST_REMOVE(async_head, cur_entry,
3348 async_node, links);
3349 csa->ccb_h.path->device->refcount--;
3350 free(cur_entry, M_CAMXPT);
3351 } else {
3352 cur_entry->event_enable = csa->event_enable;
3353 }
3354 } else {
3355 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3356 M_NOWAIT);
3357 if (cur_entry == NULL) {
3358 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3359 break;
3360 }
3361 cur_entry->event_enable = csa->event_enable;
3362 cur_entry->callback_arg = csa->callback_arg;
3363 cur_entry->callback = csa->callback;
3364 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3365 csa->ccb_h.path->device->refcount++;
3366 }
3367
3368 /*
3369 * Need to decouple this operation via a taqskqueue so that
3370 * the locking doesn't become a mess.
3371 */
3372 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
3373 struct xpt_task *task;
3374
3375 task = malloc(sizeof(struct xpt_task), M_CAMXPT,
3376 M_NOWAIT);
3377 if (task == NULL) {
3378 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3379 break;
3380 }
3381
3382 TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
3383 task->data1 = cur_entry;
3384 task->data2 = added;
3385 taskqueue_enqueue(taskqueue_thread, &task->task);
3386 }
3387
3388 start_ccb->ccb_h.status = CAM_REQ_CMP;
3389 break;
3390 }
3391 case XPT_REL_SIMQ:
3392 {
3393 struct ccb_relsim *crs;
3394 struct cam_ed *dev;
3395
3396 crs = &start_ccb->crs;
3397 dev = crs->ccb_h.path->device;
3398 if (dev == NULL) {
3399
3400 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3401 break;
3402 }
3403
3404 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3405
3406 if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3407 /* Don't ever go below one opening */
3408 if (crs->openings > 0) {
3409 xpt_dev_ccbq_resize(crs->ccb_h.path,
3410 crs->openings);
3411
3412 if (bootverbose) {
3413 xpt_print(crs->ccb_h.path,
3414 "tagged openings now %d\n",
3415 crs->openings);
3416 }
3417 }
3418 }
3419 }
3420
3421 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3422
3423 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3424
3425 /*
3426 * Just extend the old timeout and decrement
3427 * the freeze count so that a single timeout
3428 * is sufficient for releasing the queue.
3429 */
3430 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3431 callout_stop(&dev->callout);
3432 } else {
3433
3434 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3435 }
3436
3437 callout_reset(&dev->callout,
3438 (crs->release_timeout * hz) / 1000,
3439 xpt_release_devq_timeout, dev);
3440
3441 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3442
3443 }
3444
3445 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3446
3447 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3448 /*
3449 * Decrement the freeze count so that a single
3450 * completion is still sufficient to unfreeze
3451 * the queue.
3452 */
3453 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3454 } else {
3455
3456 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3457 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3458 }
3459 }
3460
3461 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3462
3463 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3464 || (dev->ccbq.dev_active == 0)) {
3465
3466 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3467 } else {
3468
3469 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3470 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3471 }
3472 }
3473
3474 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3475
3476 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3477 /*run_queue*/TRUE);
3478 }
3479 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3480 start_ccb->ccb_h.status = CAM_REQ_CMP;
3481 break;
3482 }
3483 case XPT_SCAN_BUS:
3484 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3485 break;
3486 case XPT_SCAN_LUN:
3487 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3488 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3489 start_ccb);
3490 break;
3491 case XPT_DEBUG: {
3492#ifdef CAMDEBUG
3493#ifdef CAM_DEBUG_DELAY
3494 cam_debug_delay = CAM_DEBUG_DELAY;
3495#endif
3496 cam_dflags = start_ccb->cdbg.flags;
3497 if (cam_dpath != NULL) {
3498 xpt_free_path(cam_dpath);
3499 cam_dpath = NULL;
3500 }
3501
3502 if (cam_dflags != CAM_DEBUG_NONE) {
3503 if (xpt_create_path(&cam_dpath, xpt_periph,
3504 start_ccb->ccb_h.path_id,
3505 start_ccb->ccb_h.target_id,
3506 start_ccb->ccb_h.target_lun) !=
3507 CAM_REQ_CMP) {
3508 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3509 cam_dflags = CAM_DEBUG_NONE;
3510 } else {
3511 start_ccb->ccb_h.status = CAM_REQ_CMP;
3512 xpt_print(cam_dpath, "debugging flags now %x\n",
3513 cam_dflags);
3514 }
3515 } else {
3516 cam_dpath = NULL;
3517 start_ccb->ccb_h.status = CAM_REQ_CMP;
3518 }
3519#else /* !CAMDEBUG */
3520 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3521#endif /* CAMDEBUG */
3522 break;
3523 }
3524 case XPT_NOOP:
3525 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3526 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3527 start_ccb->ccb_h.status = CAM_REQ_CMP;
3528 break;
3529 default:
3530 case XPT_SDEV_TYPE:
3531 case XPT_TERM_IO:
3532 case XPT_ENG_INQ:
3533 /* XXX Implement */
3534 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3535 break;
3536 }
3537}
3538
3539void
3540xpt_polled_action(union ccb *start_ccb)
3541{
3542 u_int32_t timeout;
3543 struct cam_sim *sim;
3544 struct cam_devq *devq;
3545 struct cam_ed *dev;
3546
3547
3548 timeout = start_ccb->ccb_h.timeout;
3549 sim = start_ccb->ccb_h.path->bus->sim;
3550 devq = sim->devq;
3551 dev = start_ccb->ccb_h.path->device;
3552
3553 mtx_assert(sim->mtx, MA_OWNED);
3554
3555 /*
3556 * Steal an opening so that no other queued requests
3557 * can get it before us while we simulate interrupts.
3558 */
3559 dev->ccbq.devq_openings--;
3560 dev->ccbq.dev_openings--;
3561
3562 while(((devq != NULL && devq->send_openings <= 0) ||
3563 dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3564 DELAY(1000);
3565 (*(sim->sim_poll))(sim);
3566 camisr_runqueue(&sim->sim_doneq);
3567 }
3568
3569 dev->ccbq.devq_openings++;
3570 dev->ccbq.dev_openings++;
3571
3572 if (timeout != 0) {
3573 xpt_action(start_ccb);
3574 while(--timeout > 0) {
3575 (*(sim->sim_poll))(sim);
3576 camisr_runqueue(&sim->sim_doneq);
3577 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3578 != CAM_REQ_INPROG)
3579 break;
3580 DELAY(1000);
3581 }
3582 if (timeout == 0) {
3583 /*
3584 * XXX Is it worth adding a sim_timeout entry
3585 * point so we can attempt recovery? If
3586 * this is only used for dumps, I don't think
3587 * it is.
3588 */
3589 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3590 }
3591 } else {
3592 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3593 }
3594}
3595
3596/*
3597 * Schedule a peripheral driver to receive a ccb when it's
3598 * target device has space for more transactions.
3599 */
3600void
3601xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3602{
3603 struct cam_ed *device;
3604 int runq;
3605
3606 mtx_assert(perph->sim->mtx, MA_OWNED);
3607
3608 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3609 device = perph->path->device;
3610 if (periph_is_queued(perph)) {
3611 /* Simply reorder based on new priority */
3612 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3613 (" change priority to %d\n", new_priority));
3614 if (new_priority < perph->pinfo.priority) {
3615 camq_change_priority(&device->drvq,
3616 perph->pinfo.index,
3617 new_priority);
3618 }
3619 runq = 0;
3620 } else {
3621 /* New entry on the queue */
3622 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3623 (" added periph to queue\n"));
3624 perph->pinfo.priority = new_priority;
3625 perph->pinfo.generation = ++device->drvq.generation;
3626 camq_insert(&device->drvq, &perph->pinfo);
3627 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3628 }
3629 if (runq != 0) {
3630 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3631 (" calling xpt_run_devq\n"));
3632 xpt_run_dev_allocq(perph->path->bus);
3633 }
3634}
3635
3636
3637/*
3638 * Schedule a device to run on a given queue.
3639 * If the device was inserted as a new entry on the queue,
3640 * return 1 meaning the device queue should be run. If we
3641 * were already queued, implying someone else has already
3642 * started the queue, return 0 so the caller doesn't attempt
3643 * to run the queue.
3644 */
3645static int
3646xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3647 u_int32_t new_priority)
3648{
3649 int retval;
3650 u_int32_t old_priority;
3651
3652 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3653
3654 old_priority = pinfo->priority;
3655
3656 /*
3657 * Are we already queued?
3658 */
3659 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3660 /* Simply reorder based on new priority */
3661 if (new_priority < old_priority) {
3662 camq_change_priority(queue, pinfo->index,
3663 new_priority);
3664 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3665 ("changed priority to %d\n",
3666 new_priority));
3667 }
3668 retval = 0;
3669 } else {
3670 /* New entry on the queue */
3671 if (new_priority < old_priority)
3672 pinfo->priority = new_priority;
3673
3674 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3675 ("Inserting onto queue\n"));
3676 pinfo->generation = ++queue->generation;
3677 camq_insert(queue, pinfo);
3678 retval = 1;
3679 }
3680 return (retval);
3681}
3682
3683static void
3684xpt_run_dev_allocq(struct cam_eb *bus)
3685{
3686 struct cam_devq *devq;
3687
3688 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3689 devq = bus->sim->devq;
3690
3691 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3692 (" qfrozen_cnt == 0x%x, entries == %d, "
3693 "openings == %d, active == %d\n",
3694 devq->alloc_queue.qfrozen_cnt,
3695 devq->alloc_queue.entries,
3696 devq->alloc_openings,
3697 devq->alloc_active));
3698
3699 devq->alloc_queue.qfrozen_cnt++;
3700 while ((devq->alloc_queue.entries > 0)
3701 && (devq->alloc_openings > 0)
3702 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3703 struct cam_ed_qinfo *qinfo;
3704 struct cam_ed *device;
3705 union ccb *work_ccb;
3706 struct cam_periph *drv;
3707 struct camq *drvq;
3708
3709 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3710 CAMQ_HEAD);
3711 device = qinfo->device;
3712
3713 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3714 ("running device %p\n", device));
3715
3716 drvq = &device->drvq;
3717
3718#ifdef CAMDEBUG
3719 if (drvq->entries <= 0) {
3720 panic("xpt_run_dev_allocq: "
3721 "Device on queue without any work to do");
3722 }
3723#endif
3724 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3725 devq->alloc_openings--;
3726 devq->alloc_active++;
3727 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3728 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3729 drv->pinfo.priority);
3730 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3731 ("calling periph start\n"));
3732 drv->periph_start(drv, work_ccb);
3733 } else {
3734 /*
3735 * Malloc failure in alloc_ccb
3736 */
3737 /*
3738 * XXX add us to a list to be run from free_ccb
3739 * if we don't have any ccbs active on this
3740 * device queue otherwise we may never get run
3741 * again.
3742 */
3743 break;
3744 }
3745
3746 if (drvq->entries > 0) {
3747 /* We have more work. Attempt to reschedule */
3748 xpt_schedule_dev_allocq(bus, device);
3749 }
3750 }
3751 devq->alloc_queue.qfrozen_cnt--;
3752}
3753
3754static void
3755xpt_run_dev_sendq(struct cam_eb *bus)
3756{
3757 struct cam_devq *devq;
3758
3759 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3760
3761 devq = bus->sim->devq;
3762
3763 devq->send_queue.qfrozen_cnt++;
3764 while ((devq->send_queue.entries > 0)
3765 && (devq->send_openings > 0)) {
3766 struct cam_ed_qinfo *qinfo;
3767 struct cam_ed *device;
3768 union ccb *work_ccb;
3769 struct cam_sim *sim;
3770
3771 if (devq->send_queue.qfrozen_cnt > 1) {
3772 break;
3773 }
3774
3775 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3776 CAMQ_HEAD);
3777 device = qinfo->device;
3778
3779 /*
3780 * If the device has been "frozen", don't attempt
3781 * to run it.
3782 */
3783 if (device->qfrozen_cnt > 0) {
3784 continue;
3785 }
3786
3787 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3788 ("running device %p\n", device));
3789
3790 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3791 if (work_ccb == NULL) {
3792 printf("device on run queue with no ccbs???\n");
3793 continue;
3794 }
3795
3796 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3797
3798 mtx_lock(&xsoftc.xpt_lock);
3799 if (xsoftc.num_highpower <= 0) {
3800 /*
3801 * We got a high power command, but we
3802 * don't have any available slots. Freeze
3803 * the device queue until we have a slot
3804 * available.
3805 */
3806 device->qfrozen_cnt++;
3807 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3808 &work_ccb->ccb_h,
3809 xpt_links.stqe);
3810
3811 mtx_unlock(&xsoftc.xpt_lock);
3812 continue;
3813 } else {
3814 /*
3815 * Consume a high power slot while
3816 * this ccb runs.
3817 */
3818 xsoftc.num_highpower--;
3819 }
3820 mtx_unlock(&xsoftc.xpt_lock);
3821 }
3822 devq->active_dev = device;
3823 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3824
3825 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3826
3827 devq->send_openings--;
3828 devq->send_active++;
3829
3830 if (device->ccbq.queue.entries > 0)
3831 xpt_schedule_dev_sendq(bus, device);
3832
3833 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3834 /*
3835 * The client wants to freeze the queue
3836 * after this CCB is sent.
3837 */
3838 device->qfrozen_cnt++;
3839 }
3840
3841 /* In Target mode, the peripheral driver knows best... */
3842 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3843 if ((device->inq_flags & SID_CmdQue) != 0
3844 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3845 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3846 else
3847 /*
3848 * Clear this in case of a retried CCB that
3849 * failed due to a rejected tag.
3850 */
3851 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3852 }
3853
3854 /*
3855 * Device queues can be shared among multiple sim instances
3856 * that reside on different busses. Use the SIM in the queue
3857 * CCB's path, rather than the one in the bus that was passed
3858 * into this function.
3859 */
3860 sim = work_ccb->ccb_h.path->bus->sim;
3861 (*(sim->sim_action))(sim, work_ccb);
3862
3863 devq->active_dev = NULL;
3864 }
3865 devq->send_queue.qfrozen_cnt--;
3866}
3867
3868/*
3869 * This function merges stuff from the slave ccb into the master ccb, while
3870 * keeping important fields in the master ccb constant.
3871 */
3872void
3873xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3874{
3875
3876 /*
3877 * Pull fields that are valid for peripheral drivers to set
3878 * into the master CCB along with the CCB "payload".
3879 */
3880 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3881 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3882 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3883 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3884 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3885 sizeof(union ccb) - sizeof(struct ccb_hdr));
3886}
3887
3888void
3889xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3890{
3891
3892 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3893 ccb_h->pinfo.priority = priority;
3894 ccb_h->path = path;
3895 ccb_h->path_id = path->bus->path_id;
3896 if (path->target)
3897 ccb_h->target_id = path->target->target_id;
3898 else
3899 ccb_h->target_id = CAM_TARGET_WILDCARD;
3900 if (path->device) {
3901 ccb_h->target_lun = path->device->lun_id;
3902 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3903 } else {
3904 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3905 }
3906 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3907 ccb_h->flags = 0;
3908}
3909
3910/* Path manipulation functions */
3911cam_status
3912xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3913 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3914{
3915 struct cam_path *path;
3916 cam_status status;
3917
3918 path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
3919
3920 if (path == NULL) {
3921 status = CAM_RESRC_UNAVAIL;
3922 return(status);
3923 }
3924 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3925 if (status != CAM_REQ_CMP) {
3926 free(path, M_CAMXPT);
3927 path = NULL;
3928 }
3929 *new_path_ptr = path;
3930 return (status);
3931}
3932
3933cam_status
3934xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3935 struct cam_periph *periph, path_id_t path_id,
3936 target_id_t target_id, lun_id_t lun_id)
3937{
3938 struct cam_path *path;
3939 struct cam_eb *bus = NULL;
3940 cam_status status;
3941 int need_unlock = 0;
3942
3943 path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
3944
3945 if (path_id != CAM_BUS_WILDCARD) {
3946 bus = xpt_find_bus(path_id);
3947 if (bus != NULL) {
3948 need_unlock = 1;
3949 CAM_SIM_LOCK(bus->sim);
3950 }
3951 }
3952 status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3953 if (need_unlock)
3954 CAM_SIM_UNLOCK(bus->sim);
3955 if (status != CAM_REQ_CMP) {
3956 free(path, M_CAMXPT);
3957 path = NULL;
3958 }
3959 *new_path_ptr = path;
3960 return (status);
3961}
3962
3963static cam_status
3964xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3965 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3966{
3967 struct cam_eb *bus;
3968 struct cam_et *target;
3969 struct cam_ed *device;
3970 cam_status status;
3971
3972 status = CAM_REQ_CMP; /* Completed without error */
3973 target = NULL; /* Wildcarded */
3974 device = NULL; /* Wildcarded */
3975
3976 /*
3977 * We will potentially modify the EDT, so block interrupts
3978 * that may attempt to create cam paths.
3979 */
3980 bus = xpt_find_bus(path_id);
3981 if (bus == NULL) {
3982 status = CAM_PATH_INVALID;
3983 } else {
3984 target = xpt_find_target(bus, target_id);
3985 if (target == NULL) {
3986 /* Create one */
3987 struct cam_et *new_target;
3988
3989 new_target = xpt_alloc_target(bus, target_id);
3990 if (new_target == NULL) {
3991 status = CAM_RESRC_UNAVAIL;
3992 } else {
3993 target = new_target;
3994 }
3995 }
3996 if (target != NULL) {
3997 device = xpt_find_device(target, lun_id);
3998 if (device == NULL) {
3999 /* Create one */
4000 struct cam_ed *new_device;
4001
4002 new_device = xpt_alloc_device(bus,
4003 target,
4004 lun_id);
4005 if (new_device == NULL) {
4006 status = CAM_RESRC_UNAVAIL;
4007 } else {
4008 device = new_device;
4009 }
4010 }
4011 }
4012 }
4013
4014 /*
4015 * Only touch the user's data if we are successful.
4016 */
4017 if (status == CAM_REQ_CMP) {
4018 new_path->periph = perph;
4019 new_path->bus = bus;
4020 new_path->target = target;
4021 new_path->device = device;
4022 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4023 } else {
4024 if (device != NULL)
4025 xpt_release_device(bus, target, device);
4026 if (target != NULL)
4027 xpt_release_target(bus, target);
4028 if (bus != NULL)
4029 xpt_release_bus(bus);
4030 }
4031 return (status);
4032}
4033
4034static void
4035xpt_release_path(struct cam_path *path)
4036{
4037 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4038 if (path->device != NULL) {
4039 xpt_release_device(path->bus, path->target, path->device);
4040 path->device = NULL;
4041 }
4042 if (path->target != NULL) {
4043 xpt_release_target(path->bus, path->target);
4044 path->target = NULL;
4045 }
4046 if (path->bus != NULL) {
4047 xpt_release_bus(path->bus);
4048 path->bus = NULL;
4049 }
4050}
4051
4052void
4053xpt_free_path(struct cam_path *path)
4054{
4055
4056 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4057 xpt_release_path(path);
4058 free(path, M_CAMXPT);
4059}
4060
4061
4062/*
4063 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4064 * in path1, 2 for match with wildcards in path2.
4065 */
4066int
4067xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4068{
4069 int retval = 0;
4070
4071 if (path1->bus != path2->bus) {
4072 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4073 retval = 1;
4074 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4075 retval = 2;
4076 else
4077 return (-1);
4078 }
4079 if (path1->target != path2->target) {
4080 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4081 if (retval == 0)
4082 retval = 1;
4083 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4084 retval = 2;
4085 else
4086 return (-1);
4087 }
4088 if (path1->device != path2->device) {
4089 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4090 if (retval == 0)
4091 retval = 1;
4092 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4093 retval = 2;
4094 else
4095 return (-1);
4096 }
4097 return (retval);
4098}
4099
4100void
4101xpt_print_path(struct cam_path *path)
4102{
4103
4104 if (path == NULL)
4105 printf("(nopath): ");
4106 else {
4107 if (path->periph != NULL)
4108 printf("(%s%d:", path->periph->periph_name,
4109 path->periph->unit_number);
4110 else
4111 printf("(noperiph:");
4112
4113 if (path->bus != NULL)
4114 printf("%s%d:%d:", path->bus->sim->sim_name,
4115 path->bus->sim->unit_number,
4116 path->bus->sim->bus_id);
4117 else
4118 printf("nobus:");
4119
4120 if (path->target != NULL)
4121 printf("%d:", path->target->target_id);
4122 else
4123 printf("X:");
4124
4125 if (path->device != NULL)
4126 printf("%d): ", path->device->lun_id);
4127 else
4128 printf("X): ");
4129 }
4130}
4131
4132void
4133xpt_print(struct cam_path *path, const char *fmt, ...)
4134{
4135 va_list ap;
4136 xpt_print_path(path);
4137 va_start(ap, fmt);
4138 vprintf(fmt, ap);
4139 va_end(ap);
4140}
4141
4142int
4143xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4144{
4145 struct sbuf sb;
4146
4147 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4148
4149 sbuf_new(&sb, str, str_len, 0);
4150
4151 if (path == NULL)
4152 sbuf_printf(&sb, "(nopath): ");
4153 else {
4154 if (path->periph != NULL)
4155 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4156 path->periph->unit_number);
4157 else
4158 sbuf_printf(&sb, "(noperiph:");
4159
4160 if (path->bus != NULL)
4161 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4162 path->bus->sim->unit_number,
4163 path->bus->sim->bus_id);
4164 else
4165 sbuf_printf(&sb, "nobus:");
4166
4167 if (path->target != NULL)
4168 sbuf_printf(&sb, "%d:", path->target->target_id);
4169 else
4170 sbuf_printf(&sb, "X:");
4171
4172 if (path->device != NULL)
4173 sbuf_printf(&sb, "%d): ", path->device->lun_id);
4174 else
4175 sbuf_printf(&sb, "X): ");
4176 }
4177 sbuf_finish(&sb);
4178
4179 return(sbuf_len(&sb));
4180}
4181
4182path_id_t
4183xpt_path_path_id(struct cam_path *path)
4184{
4185 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4186
4187 return(path->bus->path_id);
4188}
4189
4190target_id_t
4191xpt_path_target_id(struct cam_path *path)
4192{
4193 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4194
4195 if (path->target != NULL)
4196 return (path->target->target_id);
4197 else
4198 return (CAM_TARGET_WILDCARD);
4199}
4200
4201lun_id_t
4202xpt_path_lun_id(struct cam_path *path)
4203{
4204 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4205
4206 if (path->device != NULL)
4207 return (path->device->lun_id);
4208 else
4209 return (CAM_LUN_WILDCARD);
4210}
4211
4212struct cam_sim *
4213xpt_path_sim(struct cam_path *path)
4214{
4215
4216 return (path->bus->sim);
4217}
4218
4219struct cam_periph*
4220xpt_path_periph(struct cam_path *path)
4221{
4222 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4223
4224 return (path->periph);
4225}
4226
4227/*
4228 * Release a CAM control block for the caller. Remit the cost of the structure
4229 * to the device referenced by the path. If the this device had no 'credits'
4230 * and peripheral drivers have registered async callbacks for this notification
4231 * call them now.
4232 */
4233void
4234xpt_release_ccb(union ccb *free_ccb)
4235{
4236 struct cam_path *path;
4237 struct cam_ed *device;
4238 struct cam_eb *bus;
4239 struct cam_sim *sim;
4240
4241 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4242 path = free_ccb->ccb_h.path;
4243 device = path->device;
4244 bus = path->bus;
4245 sim = bus->sim;
4246
4247 mtx_assert(sim->mtx, MA_OWNED);
4248
4249 cam_ccbq_release_opening(&device->ccbq);
4250 if (sim->ccb_count > sim->max_ccbs) {
4251 xpt_free_ccb(free_ccb);
4252 sim->ccb_count--;
4253 } else {
4254 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
4255 xpt_links.sle);
4256 }
4257 if (sim->devq == NULL) {
4258 return;
4259 }
4260 sim->devq->alloc_openings++;
4261 sim->devq->alloc_active--;
4262 /* XXX Turn this into an inline function - xpt_run_device?? */
4263 if ((device_is_alloc_queued(device) == 0)
4264 && (device->drvq.entries > 0)) {
4265 xpt_schedule_dev_allocq(bus, device);
4266 }
4267 if (dev_allocq_is_runnable(sim->devq))
4268 xpt_run_dev_allocq(bus);
4269}
4270
4271/* Functions accessed by SIM drivers */
4272
4273/*
4274 * A sim structure, listing the SIM entry points and instance
4275 * identification info is passed to xpt_bus_register to hook the SIM
4276 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4277 * for this new bus and places it in the array of busses and assigns
4278 * it a path_id. The path_id may be influenced by "hard wiring"
4279 * information specified by the user. Once interrupt services are
4280 * availible, the bus will be probed.
4281 */
4282int32_t
4283xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
4284{
4285 struct cam_eb *new_bus;
4286 struct cam_eb *old_bus;
4287 struct ccb_pathinq cpi;
4288
4289 mtx_assert(sim->mtx, MA_OWNED);
4290
4291 sim->bus_id = bus;
4292 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4293 M_CAMXPT, M_NOWAIT);
4294 if (new_bus == NULL) {
4295 /* Couldn't satisfy request */
4296 return (CAM_RESRC_UNAVAIL);
4297 }
4298
4299 if (strcmp(sim->sim_name, "xpt") != 0) {
4300
4301 sim->path_id =
4302 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4303 }
4304
4305 TAILQ_INIT(&new_bus->et_entries);
4306 new_bus->path_id = sim->path_id;
4307 cam_sim_hold(sim);
4307 new_bus->sim = sim;
4308 timevalclear(&new_bus->last_reset);
4309 new_bus->flags = 0;
4310 new_bus->refcount = 1; /* Held until a bus_deregister event */
4311 new_bus->generation = 0;
4312 mtx_lock(&xsoftc.xpt_topo_lock);
4313 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4314 while (old_bus != NULL
4315 && old_bus->path_id < new_bus->path_id)
4316 old_bus = TAILQ_NEXT(old_bus, links);
4317 if (old_bus != NULL)
4318 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4319 else
4320 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4321 xsoftc.bus_generation++;
4322 mtx_unlock(&xsoftc.xpt_topo_lock);
4323
4324 /* Notify interested parties */
4325 if (sim->path_id != CAM_XPT_PATH_ID) {
4326 struct cam_path path;
4327
4328 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4329 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4330 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4331 cpi.ccb_h.func_code = XPT_PATH_INQ;
4332 xpt_action((union ccb *)&cpi);
4333 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4334 xpt_release_path(&path);
4335 }
4336 return (CAM_SUCCESS);
4337}
4338
4339int32_t
4340xpt_bus_deregister(path_id_t pathid)
4341{
4342 struct cam_path bus_path;
4343 cam_status status;
4344
4345 status = xpt_compile_path(&bus_path, NULL, pathid,
4346 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4347 if (status != CAM_REQ_CMP)
4348 return (status);
4349
4350 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4351 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4352
4353 /* Release the reference count held while registered. */
4354 xpt_release_bus(bus_path.bus);
4355 xpt_release_path(&bus_path);
4356
4357 return (CAM_REQ_CMP);
4358}
4359
4360static path_id_t
4361xptnextfreepathid(void)
4362{
4363 struct cam_eb *bus;
4364 path_id_t pathid;
4365 const char *strval;
4366
4367 pathid = 0;
4368 mtx_lock(&xsoftc.xpt_topo_lock);
4369 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4370retry:
4371 /* Find an unoccupied pathid */
4372 while (bus != NULL && bus->path_id <= pathid) {
4373 if (bus->path_id == pathid)
4374 pathid++;
4375 bus = TAILQ_NEXT(bus, links);
4376 }
4377 mtx_unlock(&xsoftc.xpt_topo_lock);
4378
4379 /*
4380 * Ensure that this pathid is not reserved for
4381 * a bus that may be registered in the future.
4382 */
4383 if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4384 ++pathid;
4385 /* Start the search over */
4386 mtx_lock(&xsoftc.xpt_topo_lock);
4387 goto retry;
4388 }
4389 return (pathid);
4390}
4391
4392static path_id_t
4393xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4394{
4395 path_id_t pathid;
4396 int i, dunit, val;
4397 char buf[32];
4398 const char *dname;
4399
4400 pathid = CAM_XPT_PATH_ID;
4401 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4402 i = 0;
4403 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4404 if (strcmp(dname, "scbus")) {
4405 /* Avoid a bit of foot shooting. */
4406 continue;
4407 }
4408 if (dunit < 0) /* unwired?! */
4409 continue;
4410 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4411 if (sim_bus == val) {
4412 pathid = dunit;
4413 break;
4414 }
4415 } else if (sim_bus == 0) {
4416 /* Unspecified matches bus 0 */
4417 pathid = dunit;
4418 break;
4419 } else {
4420 printf("Ambiguous scbus configuration for %s%d "
4421 "bus %d, cannot wire down. The kernel "
4422 "config entry for scbus%d should "
4423 "specify a controller bus.\n"
4424 "Scbus will be assigned dynamically.\n",
4425 sim_name, sim_unit, sim_bus, dunit);
4426 break;
4427 }
4428 }
4429
4430 if (pathid == CAM_XPT_PATH_ID)
4431 pathid = xptnextfreepathid();
4432 return (pathid);
4433}
4434
4435void
4436xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4437{
4438 struct cam_eb *bus;
4439 struct cam_et *target, *next_target;
4440 struct cam_ed *device, *next_device;
4441
4442 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4443
4444 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4445
4446 /*
4447 * Most async events come from a CAM interrupt context. In
4448 * a few cases, the error recovery code at the peripheral layer,
4449 * which may run from our SWI or a process context, may signal
4450 * deferred events with a call to xpt_async.
4451 */
4452
4453 bus = path->bus;
4454
4455 if (async_code == AC_BUS_RESET) {
4456 /* Update our notion of when the last reset occurred */
4457 microtime(&bus->last_reset);
4458 }
4459
4460 for (target = TAILQ_FIRST(&bus->et_entries);
4461 target != NULL;
4462 target = next_target) {
4463
4464 next_target = TAILQ_NEXT(target, links);
4465
4466 if (path->target != target
4467 && path->target->target_id != CAM_TARGET_WILDCARD
4468 && target->target_id != CAM_TARGET_WILDCARD)
4469 continue;
4470
4471 if (async_code == AC_SENT_BDR) {
4472 /* Update our notion of when the last reset occurred */
4473 microtime(&path->target->last_reset);
4474 }
4475
4476 for (device = TAILQ_FIRST(&target->ed_entries);
4477 device != NULL;
4478 device = next_device) {
4479
4480 next_device = TAILQ_NEXT(device, links);
4481
4482 if (path->device != device
4483 && path->device->lun_id != CAM_LUN_WILDCARD
4484 && device->lun_id != CAM_LUN_WILDCARD)
4485 continue;
4486
4487 xpt_dev_async(async_code, bus, target,
4488 device, async_arg);
4489
4490 xpt_async_bcast(&device->asyncs, async_code,
4491 path, async_arg);
4492 }
4493 }
4494
4495 /*
4496 * If this wasn't a fully wildcarded async, tell all
4497 * clients that want all async events.
4498 */
4499 if (bus != xpt_periph->path->bus)
4500 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4501 path, async_arg);
4502}
4503
4504static void
4505xpt_async_bcast(struct async_list *async_head,
4506 u_int32_t async_code,
4507 struct cam_path *path, void *async_arg)
4508{
4509 struct async_node *cur_entry;
4510
4511 cur_entry = SLIST_FIRST(async_head);
4512 while (cur_entry != NULL) {
4513 struct async_node *next_entry;
4514 /*
4515 * Grab the next list entry before we call the current
4516 * entry's callback. This is because the callback function
4517 * can delete its async callback entry.
4518 */
4519 next_entry = SLIST_NEXT(cur_entry, links);
4520 if ((cur_entry->event_enable & async_code) != 0)
4521 cur_entry->callback(cur_entry->callback_arg,
4522 async_code, path,
4523 async_arg);
4524 cur_entry = next_entry;
4525 }
4526}
4527
4528/*
4529 * Handle any per-device event notifications that require action by the XPT.
4530 */
4531static void
4532xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4533 struct cam_ed *device, void *async_arg)
4534{
4535 cam_status status;
4536 struct cam_path newpath;
4537
4538 /*
4539 * We only need to handle events for real devices.
4540 */
4541 if (target->target_id == CAM_TARGET_WILDCARD
4542 || device->lun_id == CAM_LUN_WILDCARD)
4543 return;
4544
4545 /*
4546 * We need our own path with wildcards expanded to
4547 * handle certain types of events.
4548 */
4549 if ((async_code == AC_SENT_BDR)
4550 || (async_code == AC_BUS_RESET)
4551 || (async_code == AC_INQ_CHANGED))
4552 status = xpt_compile_path(&newpath, NULL,
4553 bus->path_id,
4554 target->target_id,
4555 device->lun_id);
4556 else
4557 status = CAM_REQ_CMP_ERR;
4558
4559 if (status == CAM_REQ_CMP) {
4560
4561 /*
4562 * Allow transfer negotiation to occur in a
4563 * tag free environment.
4564 */
4565 if (async_code == AC_SENT_BDR
4566 || async_code == AC_BUS_RESET)
4567 xpt_toggle_tags(&newpath);
4568
4569 if (async_code == AC_INQ_CHANGED) {
4570 /*
4571 * We've sent a start unit command, or
4572 * something similar to a device that
4573 * may have caused its inquiry data to
4574 * change. So we re-scan the device to
4575 * refresh the inquiry data for it.
4576 */
4577 xpt_scan_lun(newpath.periph, &newpath,
4578 CAM_EXPECT_INQ_CHANGE, NULL);
4579 }
4580 xpt_release_path(&newpath);
4581 } else if (async_code == AC_LOST_DEVICE) {
4582 device->flags |= CAM_DEV_UNCONFIGURED;
4583 } else if (async_code == AC_TRANSFER_NEG) {
4584 struct ccb_trans_settings *settings;
4585
4586 settings = (struct ccb_trans_settings *)async_arg;
4587 xpt_set_transfer_settings(settings, device,
4588 /*async_update*/TRUE);
4589 }
4590}
4591
4592u_int32_t
4593xpt_freeze_devq(struct cam_path *path, u_int count)
4594{
4595 struct ccb_hdr *ccbh;
4596
4597 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4598
4599 path->device->qfrozen_cnt += count;
4600
4601 /*
4602 * Mark the last CCB in the queue as needing
4603 * to be requeued if the driver hasn't
4604 * changed it's state yet. This fixes a race
4605 * where a ccb is just about to be queued to
4606 * a controller driver when it's interrupt routine
4607 * freezes the queue. To completly close the
4608 * hole, controller drives must check to see
4609 * if a ccb's status is still CAM_REQ_INPROG
4610 * just before they queue
4611 * the CCB. See ahc_action/ahc_freeze_devq for
4612 * an example.
4613 */
4614 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4615 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4616 ccbh->status = CAM_REQUEUE_REQ;
4617 return (path->device->qfrozen_cnt);
4618}
4619
4620u_int32_t
4621xpt_freeze_simq(struct cam_sim *sim, u_int count)
4622{
4623 mtx_assert(sim->mtx, MA_OWNED);
4624
4625 sim->devq->send_queue.qfrozen_cnt += count;
4626 if (sim->devq->active_dev != NULL) {
4627 struct ccb_hdr *ccbh;
4628
4629 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4630 ccb_hdr_tailq);
4631 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4632 ccbh->status = CAM_REQUEUE_REQ;
4633 }
4634 return (sim->devq->send_queue.qfrozen_cnt);
4635}
4636
4637static void
4638xpt_release_devq_timeout(void *arg)
4639{
4640 struct cam_ed *device;
4641
4642 device = (struct cam_ed *)arg;
4643
4644 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4645}
4646
4647void
4648xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4649{
4650 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4651
4652 xpt_release_devq_device(path->device, count, run_queue);
4653}
4654
4655static void
4656xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4657{
4658 int rundevq;
4659
4660 rundevq = 0;
4661 if (dev->qfrozen_cnt > 0) {
4662
4663 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4664 dev->qfrozen_cnt -= count;
4665 if (dev->qfrozen_cnt == 0) {
4666
4667 /*
4668 * No longer need to wait for a successful
4669 * command completion.
4670 */
4671 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4672
4673 /*
4674 * Remove any timeouts that might be scheduled
4675 * to release this queue.
4676 */
4677 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4678 callout_stop(&dev->callout);
4679 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4680 }
4681
4682 /*
4683 * Now that we are unfrozen schedule the
4684 * device so any pending transactions are
4685 * run.
4686 */
4687 if ((dev->ccbq.queue.entries > 0)
4688 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4689 && (run_queue != 0)) {
4690 rundevq = 1;
4691 }
4692 }
4693 }
4694 if (rundevq != 0)
4695 xpt_run_dev_sendq(dev->target->bus);
4696}
4697
4698void
4699xpt_release_simq(struct cam_sim *sim, int run_queue)
4700{
4701 struct camq *sendq;
4702
4703 mtx_assert(sim->mtx, MA_OWNED);
4704
4705 sendq = &(sim->devq->send_queue);
4706 if (sendq->qfrozen_cnt > 0) {
4707
4708 sendq->qfrozen_cnt--;
4709 if (sendq->qfrozen_cnt == 0) {
4710 struct cam_eb *bus;
4711
4712 /*
4713 * If there is a timeout scheduled to release this
4714 * sim queue, remove it. The queue frozen count is
4715 * already at 0.
4716 */
4717 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4718 callout_stop(&sim->callout);
4719 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4720 }
4721 bus = xpt_find_bus(sim->path_id);
4722
4723 if (run_queue) {
4724 /*
4725 * Now that we are unfrozen run the send queue.
4726 */
4727 xpt_run_dev_sendq(bus);
4728 }
4729 xpt_release_bus(bus);
4730 }
4731 }
4732}
4733
4734/*
4735 * XXX Appears to be unused.
4736 */
4737static void
4738xpt_release_simq_timeout(void *arg)
4739{
4740 struct cam_sim *sim;
4741
4742 sim = (struct cam_sim *)arg;
4743 xpt_release_simq(sim, /* run_queue */ TRUE);
4744}
4745
4746void
4747xpt_done(union ccb *done_ccb)
4748{
4749 struct cam_sim *sim;
4750
4751 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4752 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4753 /*
4754 * Queue up the request for handling by our SWI handler
4755 * any of the "non-immediate" type of ccbs.
4756 */
4757 sim = done_ccb->ccb_h.path->bus->sim;
4758 switch (done_ccb->ccb_h.path->periph->type) {
4759 case CAM_PERIPH_BIO:
4760 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4761 sim_links.tqe);
4762 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4763 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4764 mtx_lock(&cam_simq_lock);
4765 TAILQ_INSERT_TAIL(&cam_simq, sim,
4766 links);
4767 sim->flags |= CAM_SIM_ON_DONEQ;
4768 mtx_unlock(&cam_simq_lock);
4769 }
4770 if ((done_ccb->ccb_h.path->periph->flags &
4771 CAM_PERIPH_POLLED) == 0)
4772 swi_sched(cambio_ih, 0);
4773 break;
4774 default:
4775 panic("unknown periph type %d",
4776 done_ccb->ccb_h.path->periph->type);
4777 }
4778 }
4779}
4780
4781union ccb *
4782xpt_alloc_ccb()
4783{
4784 union ccb *new_ccb;
4785
4786 new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
4787 return (new_ccb);
4788}
4789
4790union ccb *
4791xpt_alloc_ccb_nowait()
4792{
4793 union ccb *new_ccb;
4794
4795 new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
4796 return (new_ccb);
4797}
4798
4799void
4800xpt_free_ccb(union ccb *free_ccb)
4801{
4802 free(free_ccb, M_CAMXPT);
4803}
4804
4805
4806
4807/* Private XPT functions */
4808
4809/*
4810 * Get a CAM control block for the caller. Charge the structure to the device
4811 * referenced by the path. If the this device has no 'credits' then the
4812 * device already has the maximum number of outstanding operations under way
4813 * and we return NULL. If we don't have sufficient resources to allocate more
4814 * ccbs, we also return NULL.
4815 */
4816static union ccb *
4817xpt_get_ccb(struct cam_ed *device)
4818{
4819 union ccb *new_ccb;
4820 struct cam_sim *sim;
4821
4822 sim = device->sim;
4823 if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4824 new_ccb = xpt_alloc_ccb_nowait();
4825 if (new_ccb == NULL) {
4826 return (NULL);
4827 }
4828 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4829 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4830 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4831 xpt_links.sle);
4832 sim->ccb_count++;
4833 }
4834 cam_ccbq_take_opening(&device->ccbq);
4835 SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4836 return (new_ccb);
4837}
4838
4839static void
4840xpt_release_bus(struct cam_eb *bus)
4841{
4842
4843 if ((--bus->refcount == 0)
4844 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4845 mtx_lock(&xsoftc.xpt_topo_lock);
4846 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4847 xsoftc.bus_generation++;
4848 mtx_unlock(&xsoftc.xpt_topo_lock);
4308 new_bus->sim = sim;
4309 timevalclear(&new_bus->last_reset);
4310 new_bus->flags = 0;
4311 new_bus->refcount = 1; /* Held until a bus_deregister event */
4312 new_bus->generation = 0;
4313 mtx_lock(&xsoftc.xpt_topo_lock);
4314 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4315 while (old_bus != NULL
4316 && old_bus->path_id < new_bus->path_id)
4317 old_bus = TAILQ_NEXT(old_bus, links);
4318 if (old_bus != NULL)
4319 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4320 else
4321 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4322 xsoftc.bus_generation++;
4323 mtx_unlock(&xsoftc.xpt_topo_lock);
4324
4325 /* Notify interested parties */
4326 if (sim->path_id != CAM_XPT_PATH_ID) {
4327 struct cam_path path;
4328
4329 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4330 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4331 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4332 cpi.ccb_h.func_code = XPT_PATH_INQ;
4333 xpt_action((union ccb *)&cpi);
4334 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4335 xpt_release_path(&path);
4336 }
4337 return (CAM_SUCCESS);
4338}
4339
4340int32_t
4341xpt_bus_deregister(path_id_t pathid)
4342{
4343 struct cam_path bus_path;
4344 cam_status status;
4345
4346 status = xpt_compile_path(&bus_path, NULL, pathid,
4347 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4348 if (status != CAM_REQ_CMP)
4349 return (status);
4350
4351 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4352 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4353
4354 /* Release the reference count held while registered. */
4355 xpt_release_bus(bus_path.bus);
4356 xpt_release_path(&bus_path);
4357
4358 return (CAM_REQ_CMP);
4359}
4360
4361static path_id_t
4362xptnextfreepathid(void)
4363{
4364 struct cam_eb *bus;
4365 path_id_t pathid;
4366 const char *strval;
4367
4368 pathid = 0;
4369 mtx_lock(&xsoftc.xpt_topo_lock);
4370 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4371retry:
4372 /* Find an unoccupied pathid */
4373 while (bus != NULL && bus->path_id <= pathid) {
4374 if (bus->path_id == pathid)
4375 pathid++;
4376 bus = TAILQ_NEXT(bus, links);
4377 }
4378 mtx_unlock(&xsoftc.xpt_topo_lock);
4379
4380 /*
4381 * Ensure that this pathid is not reserved for
4382 * a bus that may be registered in the future.
4383 */
4384 if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4385 ++pathid;
4386 /* Start the search over */
4387 mtx_lock(&xsoftc.xpt_topo_lock);
4388 goto retry;
4389 }
4390 return (pathid);
4391}
4392
4393static path_id_t
4394xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4395{
4396 path_id_t pathid;
4397 int i, dunit, val;
4398 char buf[32];
4399 const char *dname;
4400
4401 pathid = CAM_XPT_PATH_ID;
4402 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4403 i = 0;
4404 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4405 if (strcmp(dname, "scbus")) {
4406 /* Avoid a bit of foot shooting. */
4407 continue;
4408 }
4409 if (dunit < 0) /* unwired?! */
4410 continue;
4411 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4412 if (sim_bus == val) {
4413 pathid = dunit;
4414 break;
4415 }
4416 } else if (sim_bus == 0) {
4417 /* Unspecified matches bus 0 */
4418 pathid = dunit;
4419 break;
4420 } else {
4421 printf("Ambiguous scbus configuration for %s%d "
4422 "bus %d, cannot wire down. The kernel "
4423 "config entry for scbus%d should "
4424 "specify a controller bus.\n"
4425 "Scbus will be assigned dynamically.\n",
4426 sim_name, sim_unit, sim_bus, dunit);
4427 break;
4428 }
4429 }
4430
4431 if (pathid == CAM_XPT_PATH_ID)
4432 pathid = xptnextfreepathid();
4433 return (pathid);
4434}
4435
4436void
4437xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4438{
4439 struct cam_eb *bus;
4440 struct cam_et *target, *next_target;
4441 struct cam_ed *device, *next_device;
4442
4443 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4444
4445 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4446
4447 /*
4448 * Most async events come from a CAM interrupt context. In
4449 * a few cases, the error recovery code at the peripheral layer,
4450 * which may run from our SWI or a process context, may signal
4451 * deferred events with a call to xpt_async.
4452 */
4453
4454 bus = path->bus;
4455
4456 if (async_code == AC_BUS_RESET) {
4457 /* Update our notion of when the last reset occurred */
4458 microtime(&bus->last_reset);
4459 }
4460
4461 for (target = TAILQ_FIRST(&bus->et_entries);
4462 target != NULL;
4463 target = next_target) {
4464
4465 next_target = TAILQ_NEXT(target, links);
4466
4467 if (path->target != target
4468 && path->target->target_id != CAM_TARGET_WILDCARD
4469 && target->target_id != CAM_TARGET_WILDCARD)
4470 continue;
4471
4472 if (async_code == AC_SENT_BDR) {
4473 /* Update our notion of when the last reset occurred */
4474 microtime(&path->target->last_reset);
4475 }
4476
4477 for (device = TAILQ_FIRST(&target->ed_entries);
4478 device != NULL;
4479 device = next_device) {
4480
4481 next_device = TAILQ_NEXT(device, links);
4482
4483 if (path->device != device
4484 && path->device->lun_id != CAM_LUN_WILDCARD
4485 && device->lun_id != CAM_LUN_WILDCARD)
4486 continue;
4487
4488 xpt_dev_async(async_code, bus, target,
4489 device, async_arg);
4490
4491 xpt_async_bcast(&device->asyncs, async_code,
4492 path, async_arg);
4493 }
4494 }
4495
4496 /*
4497 * If this wasn't a fully wildcarded async, tell all
4498 * clients that want all async events.
4499 */
4500 if (bus != xpt_periph->path->bus)
4501 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4502 path, async_arg);
4503}
4504
4505static void
4506xpt_async_bcast(struct async_list *async_head,
4507 u_int32_t async_code,
4508 struct cam_path *path, void *async_arg)
4509{
4510 struct async_node *cur_entry;
4511
4512 cur_entry = SLIST_FIRST(async_head);
4513 while (cur_entry != NULL) {
4514 struct async_node *next_entry;
4515 /*
4516 * Grab the next list entry before we call the current
4517 * entry's callback. This is because the callback function
4518 * can delete its async callback entry.
4519 */
4520 next_entry = SLIST_NEXT(cur_entry, links);
4521 if ((cur_entry->event_enable & async_code) != 0)
4522 cur_entry->callback(cur_entry->callback_arg,
4523 async_code, path,
4524 async_arg);
4525 cur_entry = next_entry;
4526 }
4527}
4528
4529/*
4530 * Handle any per-device event notifications that require action by the XPT.
4531 */
4532static void
4533xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4534 struct cam_ed *device, void *async_arg)
4535{
4536 cam_status status;
4537 struct cam_path newpath;
4538
4539 /*
4540 * We only need to handle events for real devices.
4541 */
4542 if (target->target_id == CAM_TARGET_WILDCARD
4543 || device->lun_id == CAM_LUN_WILDCARD)
4544 return;
4545
4546 /*
4547 * We need our own path with wildcards expanded to
4548 * handle certain types of events.
4549 */
4550 if ((async_code == AC_SENT_BDR)
4551 || (async_code == AC_BUS_RESET)
4552 || (async_code == AC_INQ_CHANGED))
4553 status = xpt_compile_path(&newpath, NULL,
4554 bus->path_id,
4555 target->target_id,
4556 device->lun_id);
4557 else
4558 status = CAM_REQ_CMP_ERR;
4559
4560 if (status == CAM_REQ_CMP) {
4561
4562 /*
4563 * Allow transfer negotiation to occur in a
4564 * tag free environment.
4565 */
4566 if (async_code == AC_SENT_BDR
4567 || async_code == AC_BUS_RESET)
4568 xpt_toggle_tags(&newpath);
4569
4570 if (async_code == AC_INQ_CHANGED) {
4571 /*
4572 * We've sent a start unit command, or
4573 * something similar to a device that
4574 * may have caused its inquiry data to
4575 * change. So we re-scan the device to
4576 * refresh the inquiry data for it.
4577 */
4578 xpt_scan_lun(newpath.periph, &newpath,
4579 CAM_EXPECT_INQ_CHANGE, NULL);
4580 }
4581 xpt_release_path(&newpath);
4582 } else if (async_code == AC_LOST_DEVICE) {
4583 device->flags |= CAM_DEV_UNCONFIGURED;
4584 } else if (async_code == AC_TRANSFER_NEG) {
4585 struct ccb_trans_settings *settings;
4586
4587 settings = (struct ccb_trans_settings *)async_arg;
4588 xpt_set_transfer_settings(settings, device,
4589 /*async_update*/TRUE);
4590 }
4591}
4592
4593u_int32_t
4594xpt_freeze_devq(struct cam_path *path, u_int count)
4595{
4596 struct ccb_hdr *ccbh;
4597
4598 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4599
4600 path->device->qfrozen_cnt += count;
4601
4602 /*
4603 * Mark the last CCB in the queue as needing
4604 * to be requeued if the driver hasn't
4605 * changed it's state yet. This fixes a race
4606 * where a ccb is just about to be queued to
4607 * a controller driver when it's interrupt routine
4608 * freezes the queue. To completly close the
4609 * hole, controller drives must check to see
4610 * if a ccb's status is still CAM_REQ_INPROG
4611 * just before they queue
4612 * the CCB. See ahc_action/ahc_freeze_devq for
4613 * an example.
4614 */
4615 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4616 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4617 ccbh->status = CAM_REQUEUE_REQ;
4618 return (path->device->qfrozen_cnt);
4619}
4620
4621u_int32_t
4622xpt_freeze_simq(struct cam_sim *sim, u_int count)
4623{
4624 mtx_assert(sim->mtx, MA_OWNED);
4625
4626 sim->devq->send_queue.qfrozen_cnt += count;
4627 if (sim->devq->active_dev != NULL) {
4628 struct ccb_hdr *ccbh;
4629
4630 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4631 ccb_hdr_tailq);
4632 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4633 ccbh->status = CAM_REQUEUE_REQ;
4634 }
4635 return (sim->devq->send_queue.qfrozen_cnt);
4636}
4637
4638static void
4639xpt_release_devq_timeout(void *arg)
4640{
4641 struct cam_ed *device;
4642
4643 device = (struct cam_ed *)arg;
4644
4645 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4646}
4647
4648void
4649xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4650{
4651 mtx_assert(path->bus->sim->mtx, MA_OWNED);
4652
4653 xpt_release_devq_device(path->device, count, run_queue);
4654}
4655
4656static void
4657xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4658{
4659 int rundevq;
4660
4661 rundevq = 0;
4662 if (dev->qfrozen_cnt > 0) {
4663
4664 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4665 dev->qfrozen_cnt -= count;
4666 if (dev->qfrozen_cnt == 0) {
4667
4668 /*
4669 * No longer need to wait for a successful
4670 * command completion.
4671 */
4672 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4673
4674 /*
4675 * Remove any timeouts that might be scheduled
4676 * to release this queue.
4677 */
4678 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4679 callout_stop(&dev->callout);
4680 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4681 }
4682
4683 /*
4684 * Now that we are unfrozen schedule the
4685 * device so any pending transactions are
4686 * run.
4687 */
4688 if ((dev->ccbq.queue.entries > 0)
4689 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4690 && (run_queue != 0)) {
4691 rundevq = 1;
4692 }
4693 }
4694 }
4695 if (rundevq != 0)
4696 xpt_run_dev_sendq(dev->target->bus);
4697}
4698
4699void
4700xpt_release_simq(struct cam_sim *sim, int run_queue)
4701{
4702 struct camq *sendq;
4703
4704 mtx_assert(sim->mtx, MA_OWNED);
4705
4706 sendq = &(sim->devq->send_queue);
4707 if (sendq->qfrozen_cnt > 0) {
4708
4709 sendq->qfrozen_cnt--;
4710 if (sendq->qfrozen_cnt == 0) {
4711 struct cam_eb *bus;
4712
4713 /*
4714 * If there is a timeout scheduled to release this
4715 * sim queue, remove it. The queue frozen count is
4716 * already at 0.
4717 */
4718 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4719 callout_stop(&sim->callout);
4720 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4721 }
4722 bus = xpt_find_bus(sim->path_id);
4723
4724 if (run_queue) {
4725 /*
4726 * Now that we are unfrozen run the send queue.
4727 */
4728 xpt_run_dev_sendq(bus);
4729 }
4730 xpt_release_bus(bus);
4731 }
4732 }
4733}
4734
4735/*
4736 * XXX Appears to be unused.
4737 */
4738static void
4739xpt_release_simq_timeout(void *arg)
4740{
4741 struct cam_sim *sim;
4742
4743 sim = (struct cam_sim *)arg;
4744 xpt_release_simq(sim, /* run_queue */ TRUE);
4745}
4746
4747void
4748xpt_done(union ccb *done_ccb)
4749{
4750 struct cam_sim *sim;
4751
4752 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4753 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4754 /*
4755 * Queue up the request for handling by our SWI handler
4756 * any of the "non-immediate" type of ccbs.
4757 */
4758 sim = done_ccb->ccb_h.path->bus->sim;
4759 switch (done_ccb->ccb_h.path->periph->type) {
4760 case CAM_PERIPH_BIO:
4761 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4762 sim_links.tqe);
4763 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4764 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4765 mtx_lock(&cam_simq_lock);
4766 TAILQ_INSERT_TAIL(&cam_simq, sim,
4767 links);
4768 sim->flags |= CAM_SIM_ON_DONEQ;
4769 mtx_unlock(&cam_simq_lock);
4770 }
4771 if ((done_ccb->ccb_h.path->periph->flags &
4772 CAM_PERIPH_POLLED) == 0)
4773 swi_sched(cambio_ih, 0);
4774 break;
4775 default:
4776 panic("unknown periph type %d",
4777 done_ccb->ccb_h.path->periph->type);
4778 }
4779 }
4780}
4781
4782union ccb *
4783xpt_alloc_ccb()
4784{
4785 union ccb *new_ccb;
4786
4787 new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
4788 return (new_ccb);
4789}
4790
4791union ccb *
4792xpt_alloc_ccb_nowait()
4793{
4794 union ccb *new_ccb;
4795
4796 new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
4797 return (new_ccb);
4798}
4799
4800void
4801xpt_free_ccb(union ccb *free_ccb)
4802{
4803 free(free_ccb, M_CAMXPT);
4804}
4805
4806
4807
4808/* Private XPT functions */
4809
4810/*
4811 * Get a CAM control block for the caller. Charge the structure to the device
4812 * referenced by the path. If the this device has no 'credits' then the
4813 * device already has the maximum number of outstanding operations under way
4814 * and we return NULL. If we don't have sufficient resources to allocate more
4815 * ccbs, we also return NULL.
4816 */
4817static union ccb *
4818xpt_get_ccb(struct cam_ed *device)
4819{
4820 union ccb *new_ccb;
4821 struct cam_sim *sim;
4822
4823 sim = device->sim;
4824 if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4825 new_ccb = xpt_alloc_ccb_nowait();
4826 if (new_ccb == NULL) {
4827 return (NULL);
4828 }
4829 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4830 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4831 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4832 xpt_links.sle);
4833 sim->ccb_count++;
4834 }
4835 cam_ccbq_take_opening(&device->ccbq);
4836 SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4837 return (new_ccb);
4838}
4839
4840static void
4841xpt_release_bus(struct cam_eb *bus)
4842{
4843
4844 if ((--bus->refcount == 0)
4845 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4846 mtx_lock(&xsoftc.xpt_topo_lock);
4847 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4848 xsoftc.bus_generation++;
4849 mtx_unlock(&xsoftc.xpt_topo_lock);
4850 cam_sim_release(bus->sim);
4849 free(bus, M_CAMXPT);
4850 }
4851}
4852
4853static struct cam_et *
4854xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4855{
4856 struct cam_et *target;
4857
4858 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
4859 if (target != NULL) {
4860 struct cam_et *cur_target;
4861
4862 TAILQ_INIT(&target->ed_entries);
4863 target->bus = bus;
4864 target->target_id = target_id;
4865 target->refcount = 1;
4866 target->generation = 0;
4867 timevalclear(&target->last_reset);
4868 /*
4869 * Hold a reference to our parent bus so it
4870 * will not go away before we do.
4871 */
4872 bus->refcount++;
4873
4874 /* Insertion sort into our bus's target list */
4875 cur_target = TAILQ_FIRST(&bus->et_entries);
4876 while (cur_target != NULL && cur_target->target_id < target_id)
4877 cur_target = TAILQ_NEXT(cur_target, links);
4878
4879 if (cur_target != NULL) {
4880 TAILQ_INSERT_BEFORE(cur_target, target, links);
4881 } else {
4882 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4883 }
4884 bus->generation++;
4885 }
4886 return (target);
4887}
4888
4889static void
4890xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4891{
4892
4893 if ((--target->refcount == 0)
4894 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4895 TAILQ_REMOVE(&bus->et_entries, target, links);
4896 bus->generation++;
4897 free(target, M_CAMXPT);
4898 xpt_release_bus(bus);
4899 }
4900}
4901
4902static struct cam_ed *
4903xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4904{
4905 struct cam_path path;
4906 struct cam_ed *device;
4907 struct cam_devq *devq;
4908 cam_status status;
4909
4910 /* Make space for us in the device queue on our bus */
4911 devq = bus->sim->devq;
4912 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4913
4914 if (status != CAM_REQ_CMP) {
4915 device = NULL;
4916 } else {
4917 device = (struct cam_ed *)malloc(sizeof(*device),
4918 M_CAMXPT, M_NOWAIT);
4919 }
4920
4921 if (device != NULL) {
4922 struct cam_ed *cur_device;
4923
4924 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4925 device->alloc_ccb_entry.device = device;
4926 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4927 device->send_ccb_entry.device = device;
4928 device->target = target;
4929 device->lun_id = lun_id;
4930 device->sim = bus->sim;
4931 /* Initialize our queues */
4932 if (camq_init(&device->drvq, 0) != 0) {
4933 free(device, M_CAMXPT);
4934 return (NULL);
4935 }
4936 if (cam_ccbq_init(&device->ccbq,
4937 bus->sim->max_dev_openings) != 0) {
4938 camq_fini(&device->drvq);
4939 free(device, M_CAMXPT);
4940 return (NULL);
4941 }
4942 SLIST_INIT(&device->asyncs);
4943 SLIST_INIT(&device->periphs);
4944 device->generation = 0;
4945 device->owner = NULL;
4946 /*
4947 * Take the default quirk entry until we have inquiry
4948 * data and can determine a better quirk to use.
4949 */
4950 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4951 bzero(&device->inq_data, sizeof(device->inq_data));
4952 device->inq_flags = 0;
4953 device->queue_flags = 0;
4954 device->serial_num = NULL;
4955 device->serial_num_len = 0;
4956 device->qfrozen_cnt = 0;
4957 device->flags = CAM_DEV_UNCONFIGURED;
4958 device->tag_delay_count = 0;
4959 device->tag_saved_openings = 0;
4960 device->refcount = 1;
4961 if (bus->sim->flags & CAM_SIM_MPSAFE)
4962 callout_init_mtx(&device->callout, bus->sim->mtx, 0);
4963 else
4964 callout_init_mtx(&device->callout, &Giant, 0);
4965
4966 /*
4967 * Hold a reference to our parent target so it
4968 * will not go away before we do.
4969 */
4970 target->refcount++;
4971
4972 /*
4973 * XXX should be limited by number of CCBs this bus can
4974 * do.
4975 */
4976 bus->sim->max_ccbs += device->ccbq.devq_openings;
4977 /* Insertion sort into our target's device list */
4978 cur_device = TAILQ_FIRST(&target->ed_entries);
4979 while (cur_device != NULL && cur_device->lun_id < lun_id)
4980 cur_device = TAILQ_NEXT(cur_device, links);
4981 if (cur_device != NULL) {
4982 TAILQ_INSERT_BEFORE(cur_device, device, links);
4983 } else {
4984 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4985 }
4986 target->generation++;
4987 if (lun_id != CAM_LUN_WILDCARD) {
4988 xpt_compile_path(&path,
4989 NULL,
4990 bus->path_id,
4991 target->target_id,
4992 lun_id);
4993 xpt_devise_transport(&path);
4994 xpt_release_path(&path);
4995 }
4996 }
4997 return (device);
4998}
4999
5000static void
5001xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5002 struct cam_ed *device)
5003{
5004
5005 if ((--device->refcount == 0)
5006 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5007 struct cam_devq *devq;
5008
5009 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5010 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5011 panic("Removing device while still queued for ccbs");
5012
5013 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5014 callout_stop(&device->callout);
5015
5016 TAILQ_REMOVE(&target->ed_entries, device,links);
5017 target->generation++;
5018 bus->sim->max_ccbs -= device->ccbq.devq_openings;
5019 /* Release our slot in the devq */
5020 devq = bus->sim->devq;
5021 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5022 camq_fini(&device->drvq);
5023 camq_fini(&device->ccbq.queue);
5024 free(device, M_CAMXPT);
5025 xpt_release_target(bus, target);
5026 }
5027}
5028
5029static u_int32_t
5030xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5031{
5032 int diff;
5033 int result;
5034 struct cam_ed *dev;
5035
5036 dev = path->device;
5037
5038 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5039 result = cam_ccbq_resize(&dev->ccbq, newopenings);
5040 if (result == CAM_REQ_CMP && (diff < 0)) {
5041 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5042 }
5043 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5044 || (dev->inq_flags & SID_CmdQue) != 0)
5045 dev->tag_saved_openings = newopenings;
5046 /* Adjust the global limit */
5047 dev->sim->max_ccbs += diff;
5048 return (result);
5049}
5050
5051static struct cam_eb *
5052xpt_find_bus(path_id_t path_id)
5053{
5054 struct cam_eb *bus;
5055
5056 mtx_lock(&xsoftc.xpt_topo_lock);
5057 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
5058 bus != NULL;
5059 bus = TAILQ_NEXT(bus, links)) {
5060 if (bus->path_id == path_id) {
5061 bus->refcount++;
5062 break;
5063 }
5064 }
5065 mtx_unlock(&xsoftc.xpt_topo_lock);
5066 return (bus);
5067}
5068
5069static struct cam_et *
5070xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5071{
5072 struct cam_et *target;
5073
5074 for (target = TAILQ_FIRST(&bus->et_entries);
5075 target != NULL;
5076 target = TAILQ_NEXT(target, links)) {
5077 if (target->target_id == target_id) {
5078 target->refcount++;
5079 break;
5080 }
5081 }
5082 return (target);
5083}
5084
5085static struct cam_ed *
5086xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5087{
5088 struct cam_ed *device;
5089
5090 for (device = TAILQ_FIRST(&target->ed_entries);
5091 device != NULL;
5092 device = TAILQ_NEXT(device, links)) {
5093 if (device->lun_id == lun_id) {
5094 device->refcount++;
5095 break;
5096 }
5097 }
5098 return (device);
5099}
5100
5101typedef struct {
5102 union ccb *request_ccb;
5103 struct ccb_pathinq *cpi;
5104 int counter;
5105} xpt_scan_bus_info;
5106
5107/*
5108 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5109 * As the scan progresses, xpt_scan_bus is used as the
5110 * callback on completion function.
5111 */
5112static void
5113xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5114{
5115 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5116 ("xpt_scan_bus\n"));
5117 switch (request_ccb->ccb_h.func_code) {
5118 case XPT_SCAN_BUS:
5119 {
5120 xpt_scan_bus_info *scan_info;
5121 union ccb *work_ccb;
5122 struct cam_path *path;
5123 u_int i;
5124 u_int max_target;
5125 u_int initiator_id;
5126
5127 /* Find out the characteristics of the bus */
5128 work_ccb = xpt_alloc_ccb_nowait();
5129 if (work_ccb == NULL) {
5130 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5131 xpt_done(request_ccb);
5132 return;
5133 }
5134 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5135 request_ccb->ccb_h.pinfo.priority);
5136 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5137 xpt_action(work_ccb);
5138 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5139 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5140 xpt_free_ccb(work_ccb);
5141 xpt_done(request_ccb);
5142 return;
5143 }
5144
5145 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5146 /*
5147 * Can't scan the bus on an adapter that
5148 * cannot perform the initiator role.
5149 */
5150 request_ccb->ccb_h.status = CAM_REQ_CMP;
5151 xpt_free_ccb(work_ccb);
5152 xpt_done(request_ccb);
5153 return;
5154 }
5155
5156 /* Save some state for use while we probe for devices */
5157 scan_info = (xpt_scan_bus_info *)
5158 malloc(sizeof(xpt_scan_bus_info), M_CAMXPT, M_NOWAIT);
5159 scan_info->request_ccb = request_ccb;
5160 scan_info->cpi = &work_ccb->cpi;
5161
5162 /* Cache on our stack so we can work asynchronously */
5163 max_target = scan_info->cpi->max_target;
5164 initiator_id = scan_info->cpi->initiator_id;
5165
5166
5167 /*
5168 * We can scan all targets in parallel, or do it sequentially.
5169 */
5170 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5171 max_target = 0;
5172 scan_info->counter = 0;
5173 } else {
5174 scan_info->counter = scan_info->cpi->max_target + 1;
5175 if (scan_info->cpi->initiator_id < scan_info->counter) {
5176 scan_info->counter--;
5177 }
5178 }
5179
5180 for (i = 0; i <= max_target; i++) {
5181 cam_status status;
5182 if (i == initiator_id)
5183 continue;
5184
5185 status = xpt_create_path(&path, xpt_periph,
5186 request_ccb->ccb_h.path_id,
5187 i, 0);
5188 if (status != CAM_REQ_CMP) {
5189 printf("xpt_scan_bus: xpt_create_path failed"
5190 " with status %#x, bus scan halted\n",
5191 status);
5192 free(scan_info, M_CAMXPT);
5193 request_ccb->ccb_h.status = status;
5194 xpt_free_ccb(work_ccb);
5195 xpt_done(request_ccb);
5196 break;
5197 }
5198 work_ccb = xpt_alloc_ccb_nowait();
5199 if (work_ccb == NULL) {
5200 free(scan_info, M_CAMXPT);
5201 xpt_free_path(path);
5202 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5203 xpt_done(request_ccb);
5204 break;
5205 }
5206 xpt_setup_ccb(&work_ccb->ccb_h, path,
5207 request_ccb->ccb_h.pinfo.priority);
5208 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5209 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5210 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5211 work_ccb->crcn.flags = request_ccb->crcn.flags;
5212 xpt_action(work_ccb);
5213 }
5214 break;
5215 }
5216 case XPT_SCAN_LUN:
5217 {
5218 cam_status status;
5219 struct cam_path *path;
5220 xpt_scan_bus_info *scan_info;
5221 path_id_t path_id;
5222 target_id_t target_id;
5223 lun_id_t lun_id;
5224
5225 /* Reuse the same CCB to query if a device was really found */
5226 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5227 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5228 request_ccb->ccb_h.pinfo.priority);
5229 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5230
5231 path_id = request_ccb->ccb_h.path_id;
5232 target_id = request_ccb->ccb_h.target_id;
5233 lun_id = request_ccb->ccb_h.target_lun;
5234 xpt_action(request_ccb);
5235
5236 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5237 struct cam_ed *device;
5238 struct cam_et *target;
5239 int phl;
5240
5241 /*
5242 * If we already probed lun 0 successfully, or
5243 * we have additional configured luns on this
5244 * target that might have "gone away", go onto
5245 * the next lun.
5246 */
5247 target = request_ccb->ccb_h.path->target;
5248 /*
5249 * We may touch devices that we don't
5250 * hold references too, so ensure they
5251 * don't disappear out from under us.
5252 * The target above is referenced by the
5253 * path in the request ccb.
5254 */
5255 phl = 0;
5256 device = TAILQ_FIRST(&target->ed_entries);
5257 if (device != NULL) {
5258 phl = CAN_SRCH_HI_SPARSE(device);
5259 if (device->lun_id == 0)
5260 device = TAILQ_NEXT(device, links);
5261 }
5262 if ((lun_id != 0) || (device != NULL)) {
5263 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5264 lun_id++;
5265 }
5266 } else {
5267 struct cam_ed *device;
5268
5269 device = request_ccb->ccb_h.path->device;
5270
5271 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5272 /* Try the next lun */
5273 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5274 || CAN_SRCH_HI_DENSE(device))
5275 lun_id++;
5276 }
5277 }
5278
5279 /*
5280 * Free the current request path- we're done with it.
5281 */
5282 xpt_free_path(request_ccb->ccb_h.path);
5283
5284 /*
5285 * Check to see if we scan any further luns.
5286 */
5287 if (lun_id == request_ccb->ccb_h.target_lun
5288 || lun_id > scan_info->cpi->max_lun) {
5289 int done;
5290
5291 hop_again:
5292 done = 0;
5293 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5294 scan_info->counter++;
5295 if (scan_info->counter ==
5296 scan_info->cpi->initiator_id) {
5297 scan_info->counter++;
5298 }
5299 if (scan_info->counter >=
5300 scan_info->cpi->max_target+1) {
5301 done = 1;
5302 }
5303 } else {
5304 scan_info->counter--;
5305 if (scan_info->counter == 0) {
5306 done = 1;
5307 }
5308 }
5309 if (done) {
5310 xpt_free_ccb(request_ccb);
5311 xpt_free_ccb((union ccb *)scan_info->cpi);
5312 request_ccb = scan_info->request_ccb;
5313 free(scan_info, M_CAMXPT);
5314 request_ccb->ccb_h.status = CAM_REQ_CMP;
5315 xpt_done(request_ccb);
5316 break;
5317 }
5318
5319 if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5320 break;
5321 }
5322 status = xpt_create_path(&path, xpt_periph,
5323 scan_info->request_ccb->ccb_h.path_id,
5324 scan_info->counter, 0);
5325 if (status != CAM_REQ_CMP) {
5326 printf("xpt_scan_bus: xpt_create_path failed"
5327 " with status %#x, bus scan halted\n",
5328 status);
5329 xpt_free_ccb(request_ccb);
5330 xpt_free_ccb((union ccb *)scan_info->cpi);
5331 request_ccb = scan_info->request_ccb;
5332 free(scan_info, M_CAMXPT);
5333 request_ccb->ccb_h.status = status;
5334 xpt_done(request_ccb);
5335 break;
5336 }
5337 xpt_setup_ccb(&request_ccb->ccb_h, path,
5338 request_ccb->ccb_h.pinfo.priority);
5339 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5340 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5341 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5342 request_ccb->crcn.flags =
5343 scan_info->request_ccb->crcn.flags;
5344 } else {
5345 status = xpt_create_path(&path, xpt_periph,
5346 path_id, target_id, lun_id);
5347 if (status != CAM_REQ_CMP) {
5348 printf("xpt_scan_bus: xpt_create_path failed "
5349 "with status %#x, halting LUN scan\n",
5350 status);
5351 goto hop_again;
5352 }
5353 xpt_setup_ccb(&request_ccb->ccb_h, path,
5354 request_ccb->ccb_h.pinfo.priority);
5355 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5356 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5357 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5358 request_ccb->crcn.flags =
5359 scan_info->request_ccb->crcn.flags;
5360 }
5361 xpt_action(request_ccb);
5362 break;
5363 }
5364 default:
5365 break;
5366 }
5367}
5368
5369typedef enum {
5370 PROBE_TUR,
5371 PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */
5372 PROBE_FULL_INQUIRY,
5373 PROBE_MODE_SENSE,
5374 PROBE_SERIAL_NUM_0,
5375 PROBE_SERIAL_NUM_1,
5376 PROBE_TUR_FOR_NEGOTIATION,
5377 PROBE_INQUIRY_BASIC_DV1,
5378 PROBE_INQUIRY_BASIC_DV2,
5379 PROBE_DV_EXIT
5380} probe_action;
5381
5382typedef enum {
5383 PROBE_INQUIRY_CKSUM = 0x01,
5384 PROBE_SERIAL_CKSUM = 0x02,
5385 PROBE_NO_ANNOUNCE = 0x04
5386} probe_flags;
5387
5388typedef struct {
5389 TAILQ_HEAD(, ccb_hdr) request_ccbs;
5390 probe_action action;
5391 union ccb saved_ccb;
5392 probe_flags flags;
5393 MD5_CTX context;
5394 u_int8_t digest[16];
5395} probe_softc;
5396
5397static void
5398xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5399 cam_flags flags, union ccb *request_ccb)
5400{
5401 struct ccb_pathinq cpi;
5402 cam_status status;
5403 struct cam_path *new_path;
5404 struct cam_periph *old_periph;
5405
5406 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5407 ("xpt_scan_lun\n"));
5408
5409 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5410 cpi.ccb_h.func_code = XPT_PATH_INQ;
5411 xpt_action((union ccb *)&cpi);
5412
5413 if (cpi.ccb_h.status != CAM_REQ_CMP) {
5414 if (request_ccb != NULL) {
5415 request_ccb->ccb_h.status = cpi.ccb_h.status;
5416 xpt_done(request_ccb);
5417 }
5418 return;
5419 }
5420
5421 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5422 /*
5423 * Can't scan the bus on an adapter that
5424 * cannot perform the initiator role.
5425 */
5426 if (request_ccb != NULL) {
5427 request_ccb->ccb_h.status = CAM_REQ_CMP;
5428 xpt_done(request_ccb);
5429 }
5430 return;
5431 }
5432
5433 if (request_ccb == NULL) {
5434 request_ccb = malloc(sizeof(union ccb), M_CAMXPT, M_NOWAIT);
5435 if (request_ccb == NULL) {
5436 xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
5437 "can't continue\n");
5438 return;
5439 }
5440 new_path = malloc(sizeof(*new_path), M_CAMXPT, M_NOWAIT);
5441 if (new_path == NULL) {
5442 xpt_print(path, "xpt_scan_lun: can't allocate path, "
5443 "can't continue\n");
5444 free(request_ccb, M_CAMXPT);
5445 return;
5446 }
5447 status = xpt_compile_path(new_path, xpt_periph,
5448 path->bus->path_id,
5449 path->target->target_id,
5450 path->device->lun_id);
5451
5452 if (status != CAM_REQ_CMP) {
5453 xpt_print(path, "xpt_scan_lun: can't compile path, "
5454 "can't continue\n");
5455 free(request_ccb, M_CAMXPT);
5456 free(new_path, M_CAMXPT);
5457 return;
5458 }
5459 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5460 request_ccb->ccb_h.cbfcnp = xptscandone;
5461 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5462 request_ccb->crcn.flags = flags;
5463 }
5464
5465 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5466 probe_softc *softc;
5467
5468 softc = (probe_softc *)old_periph->softc;
5469 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5470 periph_links.tqe);
5471 } else {
5472 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5473 probestart, "probe",
5474 CAM_PERIPH_BIO,
5475 request_ccb->ccb_h.path, NULL, 0,
5476 request_ccb);
5477
5478 if (status != CAM_REQ_CMP) {
5479 xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
5480 "returned an error, can't continue probe\n");
5481 request_ccb->ccb_h.status = status;
5482 xpt_done(request_ccb);
5483 }
5484 }
5485}
5486
5487static void
5488xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5489{
5490 xpt_release_path(done_ccb->ccb_h.path);
5491 free(done_ccb->ccb_h.path, M_CAMXPT);
5492 free(done_ccb, M_CAMXPT);
5493}
5494
5495static cam_status
5496proberegister(struct cam_periph *periph, void *arg)
5497{
5498 union ccb *request_ccb; /* CCB representing the probe request */
5499 cam_status status;
5500 probe_softc *softc;
5501
5502 request_ccb = (union ccb *)arg;
5503 if (periph == NULL) {
5504 printf("proberegister: periph was NULL!!\n");
5505 return(CAM_REQ_CMP_ERR);
5506 }
5507
5508 if (request_ccb == NULL) {
5509 printf("proberegister: no probe CCB, "
5510 "can't register device\n");
5511 return(CAM_REQ_CMP_ERR);
5512 }
5513
5514 softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT);
5515
5516 if (softc == NULL) {
5517 printf("proberegister: Unable to probe new device. "
5518 "Unable to allocate softc\n");
5519 return(CAM_REQ_CMP_ERR);
5520 }
5521 TAILQ_INIT(&softc->request_ccbs);
5522 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5523 periph_links.tqe);
5524 softc->flags = 0;
5525 periph->softc = softc;
5526 status = cam_periph_acquire(periph);
5527 if (status != CAM_REQ_CMP) {
5528 return (status);
5529 }
5530
5531
5532 /*
5533 * Ensure we've waited at least a bus settle
5534 * delay before attempting to probe the device.
5535 * For HBAs that don't do bus resets, this won't make a difference.
5536 */
5537 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5538 scsi_delay);
5539 probeschedule(periph);
5540 return(CAM_REQ_CMP);
5541}
5542
5543static void
5544probeschedule(struct cam_periph *periph)
5545{
5546 struct ccb_pathinq cpi;
5547 union ccb *ccb;
5548 probe_softc *softc;
5549
5550 softc = (probe_softc *)periph->softc;
5551 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5552
5553 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5554 cpi.ccb_h.func_code = XPT_PATH_INQ;
5555 xpt_action((union ccb *)&cpi);
5556
5557 /*
5558 * If a device has gone away and another device, or the same one,
5559 * is back in the same place, it should have a unit attention
5560 * condition pending. It will not report the unit attention in
5561 * response to an inquiry, which may leave invalid transfer
5562 * negotiations in effect. The TUR will reveal the unit attention
5563 * condition. Only send the TUR for lun 0, since some devices
5564 * will get confused by commands other than inquiry to non-existent
5565 * luns. If you think a device has gone away start your scan from
5566 * lun 0. This will insure that any bogus transfer settings are
5567 * invalidated.
5568 *
5569 * If we haven't seen the device before and the controller supports
5570 * some kind of transfer negotiation, negotiate with the first
5571 * sent command if no bus reset was performed at startup. This
5572 * ensures that the device is not confused by transfer negotiation
5573 * settings left over by loader or BIOS action.
5574 */
5575 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5576 && (ccb->ccb_h.target_lun == 0)) {
5577 softc->action = PROBE_TUR;
5578 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5579 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5580 proberequestdefaultnegotiation(periph);
5581 softc->action = PROBE_INQUIRY;
5582 } else {
5583 softc->action = PROBE_INQUIRY;
5584 }
5585
5586 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5587 softc->flags |= PROBE_NO_ANNOUNCE;
5588 else
5589 softc->flags &= ~PROBE_NO_ANNOUNCE;
5590
5591 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5592}
5593
5594static void
5595probestart(struct cam_periph *periph, union ccb *start_ccb)
5596{
5597 /* Probe the device that our peripheral driver points to */
5598 struct ccb_scsiio *csio;
5599 probe_softc *softc;
5600
5601 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5602
5603 softc = (probe_softc *)periph->softc;
5604 csio = &start_ccb->csio;
5605
5606 switch (softc->action) {
5607 case PROBE_TUR:
5608 case PROBE_TUR_FOR_NEGOTIATION:
5609 case PROBE_DV_EXIT:
5610 {
5611 scsi_test_unit_ready(csio,
5612 /*retries*/4,
5613 probedone,
5614 MSG_SIMPLE_Q_TAG,
5615 SSD_FULL_SIZE,
5616 /*timeout*/60000);
5617 break;
5618 }
5619 case PROBE_INQUIRY:
5620 case PROBE_FULL_INQUIRY:
5621 case PROBE_INQUIRY_BASIC_DV1:
5622 case PROBE_INQUIRY_BASIC_DV2:
5623 {
5624 u_int inquiry_len;
5625 struct scsi_inquiry_data *inq_buf;
5626
5627 inq_buf = &periph->path->device->inq_data;
5628
5629 /*
5630 * If the device is currently configured, we calculate an
5631 * MD5 checksum of the inquiry data, and if the serial number
5632 * length is greater than 0, add the serial number data
5633 * into the checksum as well. Once the inquiry and the
5634 * serial number check finish, we attempt to figure out
5635 * whether we still have the same device.
5636 */
5637 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5638
5639 MD5Init(&softc->context);
5640 MD5Update(&softc->context, (unsigned char *)inq_buf,
5641 sizeof(struct scsi_inquiry_data));
5642 softc->flags |= PROBE_INQUIRY_CKSUM;
5643 if (periph->path->device->serial_num_len > 0) {
5644 MD5Update(&softc->context,
5645 periph->path->device->serial_num,
5646 periph->path->device->serial_num_len);
5647 softc->flags |= PROBE_SERIAL_CKSUM;
5648 }
5649 MD5Final(softc->digest, &softc->context);
5650 }
5651
5652 if (softc->action == PROBE_INQUIRY)
5653 inquiry_len = SHORT_INQUIRY_LENGTH;
5654 else
5655 inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
5656
5657 /*
5658 * Some parallel SCSI devices fail to send an
5659 * ignore wide residue message when dealing with
5660 * odd length inquiry requests. Round up to be
5661 * safe.
5662 */
5663 inquiry_len = roundup2(inquiry_len, 2);
5664
5665 if (softc->action == PROBE_INQUIRY_BASIC_DV1
5666 || softc->action == PROBE_INQUIRY_BASIC_DV2) {
5667 inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT);
5668 }
5669 if (inq_buf == NULL) {
5670 xpt_print(periph->path, "malloc failure- skipping Basic"
5671 "Domain Validation\n");
5672 softc->action = PROBE_DV_EXIT;
5673 scsi_test_unit_ready(csio,
5674 /*retries*/4,
5675 probedone,
5676 MSG_SIMPLE_Q_TAG,
5677 SSD_FULL_SIZE,
5678 /*timeout*/60000);
5679 break;
5680 }
5681 scsi_inquiry(csio,
5682 /*retries*/4,
5683 probedone,
5684 MSG_SIMPLE_Q_TAG,
5685 (u_int8_t *)inq_buf,
5686 inquiry_len,
5687 /*evpd*/FALSE,
5688 /*page_code*/0,
5689 SSD_MIN_SIZE,
5690 /*timeout*/60 * 1000);
5691 break;
5692 }
5693 case PROBE_MODE_SENSE:
5694 {
5695 void *mode_buf;
5696 int mode_buf_len;
5697
5698 mode_buf_len = sizeof(struct scsi_mode_header_6)
5699 + sizeof(struct scsi_mode_blk_desc)
5700 + sizeof(struct scsi_control_page);
5701 mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT);
5702 if (mode_buf != NULL) {
5703 scsi_mode_sense(csio,
5704 /*retries*/4,
5705 probedone,
5706 MSG_SIMPLE_Q_TAG,
5707 /*dbd*/FALSE,
5708 SMS_PAGE_CTRL_CURRENT,
5709 SMS_CONTROL_MODE_PAGE,
5710 mode_buf,
5711 mode_buf_len,
5712 SSD_FULL_SIZE,
5713 /*timeout*/60000);
5714 break;
5715 }
5716 xpt_print(periph->path, "Unable to mode sense control page - "
5717 "malloc failure\n");
5718 softc->action = PROBE_SERIAL_NUM_0;
5719 }
5720 /* FALLTHROUGH */
5721 case PROBE_SERIAL_NUM_0:
5722 {
5723 struct scsi_vpd_supported_page_list *vpd_list = NULL;
5724 struct cam_ed *device;
5725
5726 device = periph->path->device;
5727 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5728 vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT,
5729 M_NOWAIT | M_ZERO);
5730 }
5731
5732 if (vpd_list != NULL) {
5733 scsi_inquiry(csio,
5734 /*retries*/4,
5735 probedone,
5736 MSG_SIMPLE_Q_TAG,
5737 (u_int8_t *)vpd_list,
5738 sizeof(*vpd_list),
5739 /*evpd*/TRUE,
5740 SVPD_SUPPORTED_PAGE_LIST,
5741 SSD_MIN_SIZE,
5742 /*timeout*/60 * 1000);
5743 break;
5744 }
5745 /*
5746 * We'll have to do without, let our probedone
5747 * routine finish up for us.
5748 */
5749 start_ccb->csio.data_ptr = NULL;
5750 probedone(periph, start_ccb);
5751 return;
5752 }
5753 case PROBE_SERIAL_NUM_1:
5754 {
5755 struct scsi_vpd_unit_serial_number *serial_buf;
5756 struct cam_ed* device;
5757
5758 serial_buf = NULL;
5759 device = periph->path->device;
5760 device->serial_num = NULL;
5761 device->serial_num_len = 0;
5762
5763 serial_buf = (struct scsi_vpd_unit_serial_number *)
5764 malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO);
5765
5766 if (serial_buf != NULL) {
5767 scsi_inquiry(csio,
5768 /*retries*/4,
5769 probedone,
5770 MSG_SIMPLE_Q_TAG,
5771 (u_int8_t *)serial_buf,
5772 sizeof(*serial_buf),
5773 /*evpd*/TRUE,
5774 SVPD_UNIT_SERIAL_NUMBER,
5775 SSD_MIN_SIZE,
5776 /*timeout*/60 * 1000);
5777 break;
5778 }
5779 /*
5780 * We'll have to do without, let our probedone
5781 * routine finish up for us.
5782 */
5783 start_ccb->csio.data_ptr = NULL;
5784 probedone(periph, start_ccb);
5785 return;
5786 }
5787 }
5788 xpt_action(start_ccb);
5789}
5790
5791static void
5792proberequestdefaultnegotiation(struct cam_periph *periph)
5793{
5794 struct ccb_trans_settings cts;
5795
5796 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5797 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5798 cts.type = CTS_TYPE_USER_SETTINGS;
5799 xpt_action((union ccb *)&cts);
5800 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5801 return;
5802 }
5803 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5804 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5805 xpt_action((union ccb *)&cts);
5806}
5807
5808/*
5809 * Backoff Negotiation Code- only pertinent for SPI devices.
5810 */
5811static int
5812proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
5813{
5814 struct ccb_trans_settings cts;
5815 struct ccb_trans_settings_spi *spi;
5816
5817 memset(&cts, 0, sizeof (cts));
5818 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5819 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5820 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5821 xpt_action((union ccb *)&cts);
5822 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5823 if (bootverbose) {
5824 xpt_print(periph->path,
5825 "failed to get current device settings\n");
5826 }
5827 return (0);
5828 }
5829 if (cts.transport != XPORT_SPI) {
5830 if (bootverbose) {
5831 xpt_print(periph->path, "not SPI transport\n");
5832 }
5833 return (0);
5834 }
5835 spi = &cts.xport_specific.spi;
5836
5837 /*
5838 * We cannot renegotiate sync rate if we don't have one.
5839 */
5840 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
5841 if (bootverbose) {
5842 xpt_print(periph->path, "no sync rate known\n");
5843 }
5844 return (0);
5845 }
5846
5847 /*
5848 * We'll assert that we don't have to touch PPR options- the
5849 * SIM will see what we do with period and offset and adjust
5850 * the PPR options as appropriate.
5851 */
5852
5853 /*
5854 * A sync rate with unknown or zero offset is nonsensical.
5855 * A sync period of zero means Async.
5856 */
5857 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
5858 || spi->sync_offset == 0 || spi->sync_period == 0) {
5859 if (bootverbose) {
5860 xpt_print(periph->path, "no sync rate available\n");
5861 }
5862 return (0);
5863 }
5864
5865 if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
5866 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5867 ("hit async: giving up on DV\n"));
5868 return (0);
5869 }
5870
5871
5872 /*
5873 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
5874 * We don't try to remember 'last' settings to see if the SIM actually
5875 * gets into the speed we want to set. We check on the SIM telling
5876 * us that a requested speed is bad, but otherwise don't try and
5877 * check the speed due to the asynchronous and handshake nature
5878 * of speed setting.
5879 */
5880 spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
5881 for (;;) {
5882 spi->sync_period++;
5883 if (spi->sync_period >= 0xf) {
5884 spi->sync_period = 0;
5885 spi->sync_offset = 0;
5886 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5887 ("setting to async for DV\n"));
5888 /*
5889 * Once we hit async, we don't want to try
5890 * any more settings.
5891 */
5892 device->flags |= CAM_DEV_DV_HIT_BOTTOM;
5893 } else if (bootverbose) {
5894 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5895 ("DV: period 0x%x\n", spi->sync_period));
5896 printf("setting period to 0x%x\n", spi->sync_period);
5897 }
5898 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5899 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5900 xpt_action((union ccb *)&cts);
5901 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5902 break;
5903 }
5904 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5905 ("DV: failed to set period 0x%x\n", spi->sync_period));
5906 if (spi->sync_period == 0) {
5907 return (0);
5908 }
5909 }
5910 return (1);
5911}
5912
5913static void
5914probedone(struct cam_periph *periph, union ccb *done_ccb)
5915{
5916 probe_softc *softc;
5917 struct cam_path *path;
5918 u_int32_t priority;
5919
5920 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5921
5922 softc = (probe_softc *)periph->softc;
5923 path = done_ccb->ccb_h.path;
5924 priority = done_ccb->ccb_h.pinfo.priority;
5925
5926 switch (softc->action) {
5927 case PROBE_TUR:
5928 {
5929 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5930
5931 if (cam_periph_error(done_ccb, 0,
5932 SF_NO_PRINT, NULL) == ERESTART)
5933 return;
5934 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5935 /* Don't wedge the queue */
5936 xpt_release_devq(done_ccb->ccb_h.path,
5937 /*count*/1,
5938 /*run_queue*/TRUE);
5939 }
5940 softc->action = PROBE_INQUIRY;
5941 xpt_release_ccb(done_ccb);
5942 xpt_schedule(periph, priority);
5943 return;
5944 }
5945 case PROBE_INQUIRY:
5946 case PROBE_FULL_INQUIRY:
5947 {
5948 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5949 struct scsi_inquiry_data *inq_buf;
5950 u_int8_t periph_qual;
5951
5952 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5953 inq_buf = &path->device->inq_data;
5954
5955 periph_qual = SID_QUAL(inq_buf);
5956
5957 switch(periph_qual) {
5958 case SID_QUAL_LU_CONNECTED:
5959 {
5960 u_int8_t len;
5961
5962 /*
5963 * We conservatively request only
5964 * SHORT_INQUIRY_LEN bytes of inquiry
5965 * information during our first try
5966 * at sending an INQUIRY. If the device
5967 * has more information to give,
5968 * perform a second request specifying
5969 * the amount of information the device
5970 * is willing to give.
5971 */
5972 len = inq_buf->additional_length
5973 + offsetof(struct scsi_inquiry_data,
5974 additional_length) + 1;
5975 if (softc->action == PROBE_INQUIRY
5976 && len > SHORT_INQUIRY_LENGTH) {
5977 softc->action = PROBE_FULL_INQUIRY;
5978 xpt_release_ccb(done_ccb);
5979 xpt_schedule(periph, priority);
5980 return;
5981 }
5982
5983 xpt_find_quirk(path->device);
5984
5985 xpt_devise_transport(path);
5986 if (INQ_DATA_TQ_ENABLED(inq_buf))
5987 softc->action = PROBE_MODE_SENSE;
5988 else
5989 softc->action = PROBE_SERIAL_NUM_0;
5990
5991 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5992
5993 xpt_release_ccb(done_ccb);
5994 xpt_schedule(periph, priority);
5995 return;
5996 }
5997 default:
5998 break;
5999 }
6000 } else if (cam_periph_error(done_ccb, 0,
6001 done_ccb->ccb_h.target_lun > 0
6002 ? SF_RETRY_UA|SF_QUIET_IR
6003 : SF_RETRY_UA,
6004 &softc->saved_ccb) == ERESTART) {
6005 return;
6006 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6007 /* Don't wedge the queue */
6008 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6009 /*run_queue*/TRUE);
6010 }
6011 /*
6012 * If we get to this point, we got an error status back
6013 * from the inquiry and the error status doesn't require
6014 * automatically retrying the command. Therefore, the
6015 * inquiry failed. If we had inquiry information before
6016 * for this device, but this latest inquiry command failed,
6017 * the device has probably gone away. If this device isn't
6018 * already marked unconfigured, notify the peripheral
6019 * drivers that this device is no more.
6020 */
6021 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
6022 /* Send the async notification. */
6023 xpt_async(AC_LOST_DEVICE, path, NULL);
6024
6025 xpt_release_ccb(done_ccb);
6026 break;
6027 }
6028 case PROBE_MODE_SENSE:
6029 {
6030 struct ccb_scsiio *csio;
6031 struct scsi_mode_header_6 *mode_hdr;
6032
6033 csio = &done_ccb->csio;
6034 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6035 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6036 struct scsi_control_page *page;
6037 u_int8_t *offset;
6038
6039 offset = ((u_int8_t *)&mode_hdr[1])
6040 + mode_hdr->blk_desc_len;
6041 page = (struct scsi_control_page *)offset;
6042 path->device->queue_flags = page->queue_flags;
6043 } else if (cam_periph_error(done_ccb, 0,
6044 SF_RETRY_UA|SF_NO_PRINT,
6045 &softc->saved_ccb) == ERESTART) {
6046 return;
6047 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6048 /* Don't wedge the queue */
6049 xpt_release_devq(done_ccb->ccb_h.path,
6050 /*count*/1, /*run_queue*/TRUE);
6051 }
6052 xpt_release_ccb(done_ccb);
6053 free(mode_hdr, M_CAMXPT);
6054 softc->action = PROBE_SERIAL_NUM_0;
6055 xpt_schedule(periph, priority);
6056 return;
6057 }
6058 case PROBE_SERIAL_NUM_0:
6059 {
6060 struct ccb_scsiio *csio;
6061 struct scsi_vpd_supported_page_list *page_list;
6062 int length, serialnum_supported, i;
6063
6064 serialnum_supported = 0;
6065 csio = &done_ccb->csio;
6066 page_list =
6067 (struct scsi_vpd_supported_page_list *)csio->data_ptr;
6068
6069 if (page_list == NULL) {
6070 /*
6071 * Don't process the command as it was never sent
6072 */
6073 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6074 && (page_list->length > 0)) {
6075 length = min(page_list->length,
6076 SVPD_SUPPORTED_PAGES_SIZE);
6077 for (i = 0; i < length; i++) {
6078 if (page_list->list[i] ==
6079 SVPD_UNIT_SERIAL_NUMBER) {
6080 serialnum_supported = 1;
6081 break;
6082 }
6083 }
6084 } else if (cam_periph_error(done_ccb, 0,
6085 SF_RETRY_UA|SF_NO_PRINT,
6086 &softc->saved_ccb) == ERESTART) {
6087 return;
6088 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6089 /* Don't wedge the queue */
6090 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6091 /*run_queue*/TRUE);
6092 }
6093
6094 if (page_list != NULL)
6095 free(page_list, M_DEVBUF);
6096
6097 if (serialnum_supported) {
6098 xpt_release_ccb(done_ccb);
6099 softc->action = PROBE_SERIAL_NUM_1;
6100 xpt_schedule(periph, priority);
6101 return;
6102 }
6103 xpt_release_ccb(done_ccb);
6104 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6105 xpt_schedule(periph, done_ccb->ccb_h.pinfo.priority);
6106 return;
6107 }
6108
6109 case PROBE_SERIAL_NUM_1:
6110 {
6111 struct ccb_scsiio *csio;
6112 struct scsi_vpd_unit_serial_number *serial_buf;
6113 u_int32_t priority;
6114 int changed;
6115 int have_serialnum;
6116
6117 changed = 1;
6118 have_serialnum = 0;
6119 csio = &done_ccb->csio;
6120 priority = done_ccb->ccb_h.pinfo.priority;
6121 serial_buf =
6122 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6123
6124 /* Clean up from previous instance of this device */
6125 if (path->device->serial_num != NULL) {
6126 free(path->device->serial_num, M_CAMXPT);
6127 path->device->serial_num = NULL;
6128 path->device->serial_num_len = 0;
6129 }
6130
6131 if (serial_buf == NULL) {
6132 /*
6133 * Don't process the command as it was never sent
6134 */
6135 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6136 && (serial_buf->length > 0)) {
6137
6138 have_serialnum = 1;
6139 path->device->serial_num =
6140 (u_int8_t *)malloc((serial_buf->length + 1),
6141 M_CAMXPT, M_NOWAIT);
6142 if (path->device->serial_num != NULL) {
6143 bcopy(serial_buf->serial_num,
6144 path->device->serial_num,
6145 serial_buf->length);
6146 path->device->serial_num_len =
6147 serial_buf->length;
6148 path->device->serial_num[serial_buf->length]
6149 = '\0';
6150 }
6151 } else if (cam_periph_error(done_ccb, 0,
6152 SF_RETRY_UA|SF_NO_PRINT,
6153 &softc->saved_ccb) == ERESTART) {
6154 return;
6155 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6156 /* Don't wedge the queue */
6157 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6158 /*run_queue*/TRUE);
6159 }
6160
6161 /*
6162 * Let's see if we have seen this device before.
6163 */
6164 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6165 MD5_CTX context;
6166 u_int8_t digest[16];
6167
6168 MD5Init(&context);
6169
6170 MD5Update(&context,
6171 (unsigned char *)&path->device->inq_data,
6172 sizeof(struct scsi_inquiry_data));
6173
6174 if (have_serialnum)
6175 MD5Update(&context, serial_buf->serial_num,
6176 serial_buf->length);
6177
6178 MD5Final(digest, &context);
6179 if (bcmp(softc->digest, digest, 16) == 0)
6180 changed = 0;
6181
6182 /*
6183 * XXX Do we need to do a TUR in order to ensure
6184 * that the device really hasn't changed???
6185 */
6186 if ((changed != 0)
6187 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6188 xpt_async(AC_LOST_DEVICE, path, NULL);
6189 }
6190 if (serial_buf != NULL)
6191 free(serial_buf, M_CAMXPT);
6192
6193 if (changed != 0) {
6194 /*
6195 * Now that we have all the necessary
6196 * information to safely perform transfer
6197 * negotiations... Controllers don't perform
6198 * any negotiation or tagged queuing until
6199 * after the first XPT_SET_TRAN_SETTINGS ccb is
6200 * received. So, on a new device, just retrieve
6201 * the user settings, and set them as the current
6202 * settings to set the device up.
6203 */
6204 proberequestdefaultnegotiation(periph);
6205 xpt_release_ccb(done_ccb);
6206
6207 /*
6208 * Perform a TUR to allow the controller to
6209 * perform any necessary transfer negotiation.
6210 */
6211 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6212 xpt_schedule(periph, priority);
6213 return;
6214 }
6215 xpt_release_ccb(done_ccb);
6216 break;
6217 }
6218 case PROBE_TUR_FOR_NEGOTIATION:
6219 case PROBE_DV_EXIT:
6220 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6221 /* Don't wedge the queue */
6222 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6223 /*run_queue*/TRUE);
6224 }
6225 /*
6226 * Do Domain Validation for lun 0 on devices that claim
6227 * to support Synchronous Transfer modes.
6228 */
6229 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
6230 && done_ccb->ccb_h.target_lun == 0
6231 && (path->device->inq_data.flags & SID_Sync) != 0
6232 && (path->device->flags & CAM_DEV_IN_DV) == 0) {
6233 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6234 ("Begin Domain Validation\n"));
6235 path->device->flags |= CAM_DEV_IN_DV;
6236 xpt_release_ccb(done_ccb);
6237 softc->action = PROBE_INQUIRY_BASIC_DV1;
6238 xpt_schedule(periph, priority);
6239 return;
6240 }
6241 if (softc->action == PROBE_DV_EXIT) {
6242 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6243 ("Leave Domain Validation\n"));
6244 }
6245 path->device->flags &=
6246 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6247 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6248 /* Inform the XPT that a new device has been found */
6249 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6250 xpt_action(done_ccb);
6251 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6252 done_ccb);
6253 }
6254 xpt_release_ccb(done_ccb);
6255 break;
6256 case PROBE_INQUIRY_BASIC_DV1:
6257 case PROBE_INQUIRY_BASIC_DV2:
6258 {
6259 struct scsi_inquiry_data *nbuf;
6260 struct ccb_scsiio *csio;
6261
6262 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6263 /* Don't wedge the queue */
6264 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6265 /*run_queue*/TRUE);
6266 }
6267 csio = &done_ccb->csio;
6268 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
6269 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
6270 xpt_print(path,
6271 "inquiry data fails comparison at DV%d step\n",
6272 softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
6273 if (proberequestbackoff(periph, path->device)) {
6274 path->device->flags &= ~CAM_DEV_IN_DV;
6275 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6276 } else {
6277 /* give up */
6278 softc->action = PROBE_DV_EXIT;
6279 }
6280 free(nbuf, M_CAMXPT);
6281 xpt_release_ccb(done_ccb);
6282 xpt_schedule(periph, priority);
6283 return;
6284 }
6285 free(nbuf, M_CAMXPT);
6286 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
6287 softc->action = PROBE_INQUIRY_BASIC_DV2;
6288 xpt_release_ccb(done_ccb);
6289 xpt_schedule(periph, priority);
6290 return;
6291 }
6292 if (softc->action == PROBE_DV_EXIT) {
6293 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6294 ("Leave Domain Validation Successfully\n"));
6295 }
6296 path->device->flags &=
6297 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6298 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6299 /* Inform the XPT that a new device has been found */
6300 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6301 xpt_action(done_ccb);
6302 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6303 done_ccb);
6304 }
6305 xpt_release_ccb(done_ccb);
6306 break;
6307 }
6308 }
6309 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6310 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6311 done_ccb->ccb_h.status = CAM_REQ_CMP;
6312 xpt_done(done_ccb);
6313 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6314 cam_periph_invalidate(periph);
6315 cam_periph_release(periph);
6316 } else {
6317 probeschedule(periph);
6318 }
6319}
6320
6321static void
6322probecleanup(struct cam_periph *periph)
6323{
6324 free(periph->softc, M_CAMXPT);
6325}
6326
6327static void
6328xpt_find_quirk(struct cam_ed *device)
6329{
6330 caddr_t match;
6331
6332 match = cam_quirkmatch((caddr_t)&device->inq_data,
6333 (caddr_t)xpt_quirk_table,
6334 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6335 sizeof(*xpt_quirk_table), scsi_inquiry_match);
6336
6337 if (match == NULL)
6338 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6339
6340 device->quirk = (struct xpt_quirk_entry *)match;
6341}
6342
6343static int
6344sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6345{
6346 int error, bool;
6347
6348 bool = cam_srch_hi;
6349 error = sysctl_handle_int(oidp, &bool, 0, req);
6350 if (error != 0 || req->newptr == NULL)
6351 return (error);
6352 if (bool == 0 || bool == 1) {
6353 cam_srch_hi = bool;
6354 return (0);
6355 } else {
6356 return (EINVAL);
6357 }
6358}
6359
6360
6361static void
6362xpt_devise_transport(struct cam_path *path)
6363{
6364 struct ccb_pathinq cpi;
6365 struct ccb_trans_settings cts;
6366 struct scsi_inquiry_data *inq_buf;
6367
6368 /* Get transport information from the SIM */
6369 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6370 cpi.ccb_h.func_code = XPT_PATH_INQ;
6371 xpt_action((union ccb *)&cpi);
6372
6373 inq_buf = NULL;
6374 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6375 inq_buf = &path->device->inq_data;
6376 path->device->protocol = PROTO_SCSI;
6377 path->device->protocol_version =
6378 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6379 path->device->transport = cpi.transport;
6380 path->device->transport_version = cpi.transport_version;
6381
6382 /*
6383 * Any device not using SPI3 features should
6384 * be considered SPI2 or lower.
6385 */
6386 if (inq_buf != NULL) {
6387 if (path->device->transport == XPORT_SPI
6388 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6389 && path->device->transport_version > 2)
6390 path->device->transport_version = 2;
6391 } else {
6392 struct cam_ed* otherdev;
6393
6394 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6395 otherdev != NULL;
6396 otherdev = TAILQ_NEXT(otherdev, links)) {
6397 if (otherdev != path->device)
6398 break;
6399 }
6400
6401 if (otherdev != NULL) {
6402 /*
6403 * Initially assume the same versioning as
6404 * prior luns for this target.
6405 */
6406 path->device->protocol_version =
6407 otherdev->protocol_version;
6408 path->device->transport_version =
6409 otherdev->transport_version;
6410 } else {
6411 /* Until we know better, opt for safty */
6412 path->device->protocol_version = 2;
6413 if (path->device->transport == XPORT_SPI)
6414 path->device->transport_version = 2;
6415 else
6416 path->device->transport_version = 0;
6417 }
6418 }
6419
6420 /*
6421 * XXX
6422 * For a device compliant with SPC-2 we should be able
6423 * to determine the transport version supported by
6424 * scrutinizing the version descriptors in the
6425 * inquiry buffer.
6426 */
6427
6428 /* Tell the controller what we think */
6429 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6430 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6431 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6432 cts.transport = path->device->transport;
6433 cts.transport_version = path->device->transport_version;
6434 cts.protocol = path->device->protocol;
6435 cts.protocol_version = path->device->protocol_version;
6436 cts.proto_specific.valid = 0;
6437 cts.xport_specific.valid = 0;
6438 xpt_action((union ccb *)&cts);
6439}
6440
6441static void
6442xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6443 int async_update)
6444{
6445 struct ccb_pathinq cpi;
6446 struct ccb_trans_settings cur_cts;
6447 struct ccb_trans_settings_scsi *scsi;
6448 struct ccb_trans_settings_scsi *cur_scsi;
6449 struct cam_sim *sim;
6450 struct scsi_inquiry_data *inq_data;
6451
6452 if (device == NULL) {
6453 cts->ccb_h.status = CAM_PATH_INVALID;
6454 xpt_done((union ccb *)cts);
6455 return;
6456 }
6457
6458 if (cts->protocol == PROTO_UNKNOWN
6459 || cts->protocol == PROTO_UNSPECIFIED) {
6460 cts->protocol = device->protocol;
6461 cts->protocol_version = device->protocol_version;
6462 }
6463
6464 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6465 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6466 cts->protocol_version = device->protocol_version;
6467
6468 if (cts->protocol != device->protocol) {
6469 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
6470 cts->protocol, device->protocol);
6471 cts->protocol = device->protocol;
6472 }
6473
6474 if (cts->protocol_version > device->protocol_version) {
6475 if (bootverbose) {
6476 xpt_print(cts->ccb_h.path, "Down reving Protocol "
6477 "Version from %d to %d?\n", cts->protocol_version,
6478 device->protocol_version);
6479 }
6480 cts->protocol_version = device->protocol_version;
6481 }
6482
6483 if (cts->transport == XPORT_UNKNOWN
6484 || cts->transport == XPORT_UNSPECIFIED) {
6485 cts->transport = device->transport;
6486 cts->transport_version = device->transport_version;
6487 }
6488
6489 if (cts->transport_version == XPORT_VERSION_UNKNOWN
6490 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6491 cts->transport_version = device->transport_version;
6492
6493 if (cts->transport != device->transport) {
6494 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
6495 cts->transport, device->transport);
6496 cts->transport = device->transport;
6497 }
6498
6499 if (cts->transport_version > device->transport_version) {
6500 if (bootverbose) {
6501 xpt_print(cts->ccb_h.path, "Down reving Transport "
6502 "Version from %d to %d?\n", cts->transport_version,
6503 device->transport_version);
6504 }
6505 cts->transport_version = device->transport_version;
6506 }
6507
6508 sim = cts->ccb_h.path->bus->sim;
6509
6510 /*
6511 * Nothing more of interest to do unless
6512 * this is a device connected via the
6513 * SCSI protocol.
6514 */
6515 if (cts->protocol != PROTO_SCSI) {
6516 if (async_update == FALSE)
6517 (*(sim->sim_action))(sim, (union ccb *)cts);
6518 return;
6519 }
6520
6521 inq_data = &device->inq_data;
6522 scsi = &cts->proto_specific.scsi;
6523 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6524 cpi.ccb_h.func_code = XPT_PATH_INQ;
6525 xpt_action((union ccb *)&cpi);
6526
6527 /* SCSI specific sanity checking */
6528 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6529 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6530 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6531 || (device->quirk->mintags == 0)) {
6532 /*
6533 * Can't tag on hardware that doesn't support tags,
6534 * doesn't have it enabled, or has broken tag support.
6535 */
6536 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6537 }
6538
6539 if (async_update == FALSE) {
6540 /*
6541 * Perform sanity checking against what the
6542 * controller and device can do.
6543 */
6544 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6545 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6546 cur_cts.type = cts->type;
6547 xpt_action((union ccb *)&cur_cts);
6548 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6549 return;
6550 }
6551 cur_scsi = &cur_cts.proto_specific.scsi;
6552 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6553 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6554 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6555 }
6556 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6557 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6558 }
6559
6560 /* SPI specific sanity checking */
6561 if (cts->transport == XPORT_SPI && async_update == FALSE) {
6562 u_int spi3caps;
6563 struct ccb_trans_settings_spi *spi;
6564 struct ccb_trans_settings_spi *cur_spi;
6565
6566 spi = &cts->xport_specific.spi;
6567
6568 cur_spi = &cur_cts.xport_specific.spi;
6569
6570 /* Fill in any gaps in what the user gave us */
6571 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6572 spi->sync_period = cur_spi->sync_period;
6573 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6574 spi->sync_period = 0;
6575 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6576 spi->sync_offset = cur_spi->sync_offset;
6577 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6578 spi->sync_offset = 0;
6579 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6580 spi->ppr_options = cur_spi->ppr_options;
6581 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6582 spi->ppr_options = 0;
6583 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6584 spi->bus_width = cur_spi->bus_width;
6585 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6586 spi->bus_width = 0;
6587 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6588 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6589 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6590 }
6591 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6592 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6593 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6594 && (inq_data->flags & SID_Sync) == 0
6595 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6596 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6597 || (spi->sync_offset == 0)
6598 || (spi->sync_period == 0)) {
6599 /* Force async */
6600 spi->sync_period = 0;
6601 spi->sync_offset = 0;
6602 }
6603
6604 switch (spi->bus_width) {
6605 case MSG_EXT_WDTR_BUS_32_BIT:
6606 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6607 || (inq_data->flags & SID_WBus32) != 0
6608 || cts->type == CTS_TYPE_USER_SETTINGS)
6609 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6610 break;
6611 /* Fall Through to 16-bit */
6612 case MSG_EXT_WDTR_BUS_16_BIT:
6613 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6614 || (inq_data->flags & SID_WBus16) != 0
6615 || cts->type == CTS_TYPE_USER_SETTINGS)
6616 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6617 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6618 break;
6619 }
6620 /* Fall Through to 8-bit */
6621 default: /* New bus width?? */
6622 case MSG_EXT_WDTR_BUS_8_BIT:
6623 /* All targets can do this */
6624 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6625 break;
6626 }
6627
6628 spi3caps = cpi.xport_specific.spi.ppr_options;
6629 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6630 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6631 spi3caps &= inq_data->spi3data;
6632
6633 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6634 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6635
6636 if ((spi3caps & SID_SPI_IUS) == 0)
6637 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6638
6639 if ((spi3caps & SID_SPI_QAS) == 0)
6640 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6641
6642 /* No SPI Transfer settings are allowed unless we are wide */
6643 if (spi->bus_width == 0)
6644 spi->ppr_options = 0;
6645
6646 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6647 /*
6648 * Can't tag queue without disconnection.
6649 */
6650 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6651 scsi->valid |= CTS_SCSI_VALID_TQ;
6652 }
6653
6654 /*
6655 * If we are currently performing tagged transactions to
6656 * this device and want to change its negotiation parameters,
6657 * go non-tagged for a bit to give the controller a chance to
6658 * negotiate unhampered by tag messages.
6659 */
6660 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6661 && (device->inq_flags & SID_CmdQue) != 0
6662 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6663 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6664 CTS_SPI_VALID_SYNC_OFFSET|
6665 CTS_SPI_VALID_BUS_WIDTH)) != 0)
6666 xpt_toggle_tags(cts->ccb_h.path);
6667 }
6668
6669 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6670 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6671 int device_tagenb;
6672
6673 /*
6674 * If we are transitioning from tags to no-tags or
6675 * vice-versa, we need to carefully freeze and restart
6676 * the queue so that we don't overlap tagged and non-tagged
6677 * commands. We also temporarily stop tags if there is
6678 * a change in transfer negotiation settings to allow
6679 * "tag-less" negotiation.
6680 */
6681 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6682 || (device->inq_flags & SID_CmdQue) != 0)
6683 device_tagenb = TRUE;
6684 else
6685 device_tagenb = FALSE;
6686
6687 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6688 && device_tagenb == FALSE)
6689 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6690 && device_tagenb == TRUE)) {
6691
6692 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6693 /*
6694 * Delay change to use tags until after a
6695 * few commands have gone to this device so
6696 * the controller has time to perform transfer
6697 * negotiations without tagged messages getting
6698 * in the way.
6699 */
6700 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6701 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6702 } else {
6703 struct ccb_relsim crs;
6704
6705 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6706 device->inq_flags &= ~SID_CmdQue;
6707 xpt_dev_ccbq_resize(cts->ccb_h.path,
6708 sim->max_dev_openings);
6709 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6710 device->tag_delay_count = 0;
6711
6712 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6713 /*priority*/1);
6714 crs.ccb_h.func_code = XPT_REL_SIMQ;
6715 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6716 crs.openings
6717 = crs.release_timeout
6718 = crs.qfrozen_cnt
6719 = 0;
6720 xpt_action((union ccb *)&crs);
6721 }
6722 }
6723 }
6724 if (async_update == FALSE)
6725 (*(sim->sim_action))(sim, (union ccb *)cts);
6726}
6727
6728
6729static void
6730xpt_toggle_tags(struct cam_path *path)
6731{
6732 struct cam_ed *dev;
6733
6734 /*
6735 * Give controllers a chance to renegotiate
6736 * before starting tag operations. We
6737 * "toggle" tagged queuing off then on
6738 * which causes the tag enable command delay
6739 * counter to come into effect.
6740 */
6741 dev = path->device;
6742 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6743 || ((dev->inq_flags & SID_CmdQue) != 0
6744 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6745 struct ccb_trans_settings cts;
6746
6747 xpt_setup_ccb(&cts.ccb_h, path, 1);
6748 cts.protocol = PROTO_SCSI;
6749 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6750 cts.transport = XPORT_UNSPECIFIED;
6751 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6752 cts.proto_specific.scsi.flags = 0;
6753 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6754 xpt_set_transfer_settings(&cts, path->device,
6755 /*async_update*/TRUE);
6756 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6757 xpt_set_transfer_settings(&cts, path->device,
6758 /*async_update*/TRUE);
6759 }
6760}
6761
6762static void
6763xpt_start_tags(struct cam_path *path)
6764{
6765 struct ccb_relsim crs;
6766 struct cam_ed *device;
6767 struct cam_sim *sim;
6768 int newopenings;
6769
6770 device = path->device;
6771 sim = path->bus->sim;
6772 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6773 xpt_freeze_devq(path, /*count*/1);
6774 device->inq_flags |= SID_CmdQue;
6775 if (device->tag_saved_openings != 0)
6776 newopenings = device->tag_saved_openings;
6777 else
6778 newopenings = min(device->quirk->maxtags,
6779 sim->max_tagged_dev_openings);
6780 xpt_dev_ccbq_resize(path, newopenings);
6781 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6782 crs.ccb_h.func_code = XPT_REL_SIMQ;
6783 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6784 crs.openings
6785 = crs.release_timeout
6786 = crs.qfrozen_cnt
6787 = 0;
6788 xpt_action((union ccb *)&crs);
6789}
6790
6791static int busses_to_config;
6792static int busses_to_reset;
6793
6794static int
6795xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6796{
6797
6798 mtx_assert(bus->sim->mtx, MA_OWNED);
6799
6800 if (bus->path_id != CAM_XPT_PATH_ID) {
6801 struct cam_path path;
6802 struct ccb_pathinq cpi;
6803 int can_negotiate;
6804
6805 busses_to_config++;
6806 xpt_compile_path(&path, NULL, bus->path_id,
6807 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6808 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6809 cpi.ccb_h.func_code = XPT_PATH_INQ;
6810 xpt_action((union ccb *)&cpi);
6811 can_negotiate = cpi.hba_inquiry;
6812 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6813 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6814 && can_negotiate)
6815 busses_to_reset++;
6816 xpt_release_path(&path);
6817 }
6818
6819 return(1);
6820}
6821
6822static int
6823xptconfigfunc(struct cam_eb *bus, void *arg)
6824{
6825 struct cam_path *path;
6826 union ccb *work_ccb;
6827
6828 mtx_assert(bus->sim->mtx, MA_OWNED);
6829
6830 if (bus->path_id != CAM_XPT_PATH_ID) {
6831 cam_status status;
6832 int can_negotiate;
6833
6834 work_ccb = xpt_alloc_ccb_nowait();
6835 if (work_ccb == NULL) {
6836 busses_to_config--;
6837 xpt_finishconfig(xpt_periph, NULL);
6838 return(0);
6839 }
6840 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6841 CAM_TARGET_WILDCARD,
6842 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6843 printf("xptconfigfunc: xpt_create_path failed with "
6844 "status %#x for bus %d\n", status, bus->path_id);
6845 printf("xptconfigfunc: halting bus configuration\n");
6846 xpt_free_ccb(work_ccb);
6847 busses_to_config--;
6848 xpt_finishconfig(xpt_periph, NULL);
6849 return(0);
6850 }
6851 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6852 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6853 xpt_action(work_ccb);
6854 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6855 printf("xptconfigfunc: CPI failed on bus %d "
6856 "with status %d\n", bus->path_id,
6857 work_ccb->ccb_h.status);
6858 xpt_finishconfig(xpt_periph, work_ccb);
6859 return(1);
6860 }
6861
6862 can_negotiate = work_ccb->cpi.hba_inquiry;
6863 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6864 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6865 && (can_negotiate != 0)) {
6866 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6867 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6868 work_ccb->ccb_h.cbfcnp = NULL;
6869 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6870 ("Resetting Bus\n"));
6871 xpt_action(work_ccb);
6872 xpt_finishconfig(xpt_periph, work_ccb);
6873 } else {
6874 /* Act as though we performed a successful BUS RESET */
6875 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6876 xpt_finishconfig(xpt_periph, work_ccb);
6877 }
6878 }
6879
6880 return(1);
6881}
6882
6883static void
6884xpt_config(void *arg)
6885{
6886 /*
6887 * Now that interrupts are enabled, go find our devices
6888 */
6889
6890#ifdef CAMDEBUG
6891 /* Setup debugging flags and path */
6892#ifdef CAM_DEBUG_FLAGS
6893 cam_dflags = CAM_DEBUG_FLAGS;
6894#else /* !CAM_DEBUG_FLAGS */
6895 cam_dflags = CAM_DEBUG_NONE;
6896#endif /* CAM_DEBUG_FLAGS */
6897#ifdef CAM_DEBUG_BUS
6898 if (cam_dflags != CAM_DEBUG_NONE) {
6899 /*
6900 * Locking is specifically omitted here. No SIMs have
6901 * registered yet, so xpt_create_path will only be searching
6902 * empty lists of targets and devices.
6903 */
6904 if (xpt_create_path(&cam_dpath, xpt_periph,
6905 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6906 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6907 printf("xpt_config: xpt_create_path() failed for debug"
6908 " target %d:%d:%d, debugging disabled\n",
6909 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6910 cam_dflags = CAM_DEBUG_NONE;
6911 }
6912 } else
6913 cam_dpath = NULL;
6914#else /* !CAM_DEBUG_BUS */
6915 cam_dpath = NULL;
6916#endif /* CAM_DEBUG_BUS */
6917#endif /* CAMDEBUG */
6918
6919 /*
6920 * Scan all installed busses.
6921 */
6922 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6923
6924 if (busses_to_config == 0) {
6925 /* Call manually because we don't have any busses */
6926 xpt_finishconfig(xpt_periph, NULL);
6927 } else {
6928 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6929 printf("Waiting %d seconds for SCSI "
6930 "devices to settle\n", scsi_delay/1000);
6931 }
6932 xpt_for_all_busses(xptconfigfunc, NULL);
6933 }
6934}
6935
6936/*
6937 * If the given device only has one peripheral attached to it, and if that
6938 * peripheral is the passthrough driver, announce it. This insures that the
6939 * user sees some sort of announcement for every peripheral in their system.
6940 */
6941static int
6942xptpassannouncefunc(struct cam_ed *device, void *arg)
6943{
6944 struct cam_periph *periph;
6945 int i;
6946
6947 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6948 periph = SLIST_NEXT(periph, periph_links), i++);
6949
6950 periph = SLIST_FIRST(&device->periphs);
6951 if ((i == 1)
6952 && (strncmp(periph->periph_name, "pass", 4) == 0))
6953 xpt_announce_periph(periph, NULL);
6954
6955 return(1);
6956}
6957
6958static void
6959xpt_finishconfig_task(void *context, int pending)
6960{
6961 struct periph_driver **p_drv;
6962 int i;
6963
6964 if (busses_to_config == 0) {
6965 /* Register all the peripheral drivers */
6966 /* XXX This will have to change when we have loadable modules */
6967 p_drv = periph_drivers;
6968 for (i = 0; p_drv[i] != NULL; i++) {
6969 (*p_drv[i]->init)();
6970 }
6971
6972 /*
6973 * Check for devices with no "standard" peripheral driver
6974 * attached. For any devices like that, announce the
6975 * passthrough driver so the user will see something.
6976 */
6977 xpt_for_all_devices(xptpassannouncefunc, NULL);
6978
6979 /* Release our hook so that the boot can continue. */
6980 config_intrhook_disestablish(xsoftc.xpt_config_hook);
6981 free(xsoftc.xpt_config_hook, M_CAMXPT);
6982 xsoftc.xpt_config_hook = NULL;
6983 }
6984
6985 free(context, M_CAMXPT);
6986}
6987
6988static void
6989xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6990{
6991 struct xpt_task *task;
6992
6993 if (done_ccb != NULL) {
6994 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6995 ("xpt_finishconfig\n"));
6996 switch(done_ccb->ccb_h.func_code) {
6997 case XPT_RESET_BUS:
6998 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6999 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7000 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7001 done_ccb->crcn.flags = 0;
7002 xpt_action(done_ccb);
7003 return;
7004 }
7005 /* FALLTHROUGH */
7006 case XPT_SCAN_BUS:
7007 default:
7008 xpt_free_path(done_ccb->ccb_h.path);
7009 busses_to_config--;
7010 break;
7011 }
7012 }
7013
7014 if (busses_to_config == 0) {
7015 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
7016 if (task != NULL) {
7017 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
7018 taskqueue_enqueue(taskqueue_thread, &task->task);
7019 }
7020 }
7021
7022 if (done_ccb != NULL)
7023 xpt_free_ccb(done_ccb);
7024}
7025
7026cam_status
7027xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
7028 struct cam_path *path)
7029{
7030 struct ccb_setasync csa;
7031 cam_status status;
7032 int xptpath = 0;
7033
7034 if (path == NULL) {
7035 mtx_lock(&xsoftc.xpt_lock);
7036 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
7037 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
7038 if (status != CAM_REQ_CMP) {
7039 mtx_unlock(&xsoftc.xpt_lock);
7040 return (status);
7041 }
7042 xptpath = 1;
7043 }
7044
7045 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
7046 csa.ccb_h.func_code = XPT_SASYNC_CB;
7047 csa.event_enable = event;
7048 csa.callback = cbfunc;
7049 csa.callback_arg = cbarg;
7050 xpt_action((union ccb *)&csa);
7051 status = csa.ccb_h.status;
7052 if (xptpath) {
7053 xpt_free_path(path);
7054 mtx_unlock(&xsoftc.xpt_lock);
7055 }
7056 return (status);
7057}
7058
7059static void
7060xptaction(struct cam_sim *sim, union ccb *work_ccb)
7061{
7062 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7063
7064 switch (work_ccb->ccb_h.func_code) {
7065 /* Common cases first */
7066 case XPT_PATH_INQ: /* Path routing inquiry */
7067 {
7068 struct ccb_pathinq *cpi;
7069
7070 cpi = &work_ccb->cpi;
7071 cpi->version_num = 1; /* XXX??? */
7072 cpi->hba_inquiry = 0;
7073 cpi->target_sprt = 0;
7074 cpi->hba_misc = 0;
7075 cpi->hba_eng_cnt = 0;
7076 cpi->max_target = 0;
7077 cpi->max_lun = 0;
7078 cpi->initiator_id = 0;
7079 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7080 strncpy(cpi->hba_vid, "", HBA_IDLEN);
7081 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7082 cpi->unit_number = sim->unit_number;
7083 cpi->bus_id = sim->bus_id;
7084 cpi->base_transfer_speed = 0;
7085 cpi->protocol = PROTO_UNSPECIFIED;
7086 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7087 cpi->transport = XPORT_UNSPECIFIED;
7088 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7089 cpi->ccb_h.status = CAM_REQ_CMP;
7090 xpt_done(work_ccb);
7091 break;
7092 }
7093 default:
7094 work_ccb->ccb_h.status = CAM_REQ_INVALID;
7095 xpt_done(work_ccb);
7096 break;
7097 }
7098}
7099
7100/*
7101 * The xpt as a "controller" has no interrupt sources, so polling
7102 * is a no-op.
7103 */
7104static void
7105xptpoll(struct cam_sim *sim)
7106{
7107}
7108
7109void
7110xpt_lock_buses(void)
7111{
7112 mtx_lock(&xsoftc.xpt_topo_lock);
7113}
7114
7115void
7116xpt_unlock_buses(void)
7117{
7118 mtx_unlock(&xsoftc.xpt_topo_lock);
7119}
7120
7121static void
7122camisr(void *dummy)
7123{
7124 cam_simq_t queue;
7125 struct cam_sim *sim;
7126
7127 mtx_lock(&cam_simq_lock);
7128 TAILQ_INIT(&queue);
7129 TAILQ_CONCAT(&queue, &cam_simq, links);
7130 mtx_unlock(&cam_simq_lock);
7131
7132 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
7133 TAILQ_REMOVE(&queue, sim, links);
7134 CAM_SIM_LOCK(sim);
7135 sim->flags &= ~CAM_SIM_ON_DONEQ;
7136 camisr_runqueue(&sim->sim_doneq);
7137 CAM_SIM_UNLOCK(sim);
7138 }
7139}
7140
7141static void
7142camisr_runqueue(void *V_queue)
7143{
7144 cam_isrq_t *queue = V_queue;
7145 struct ccb_hdr *ccb_h;
7146
7147 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
7148 int runq;
7149
7150 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
7151 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7152
7153 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7154 ("camisr\n"));
7155
7156 runq = FALSE;
7157
7158 if (ccb_h->flags & CAM_HIGH_POWER) {
7159 struct highpowerlist *hphead;
7160 union ccb *send_ccb;
7161
7162 mtx_lock(&xsoftc.xpt_lock);
7163 hphead = &xsoftc.highpowerq;
7164
7165 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7166
7167 /*
7168 * Increment the count since this command is done.
7169 */
7170 xsoftc.num_highpower++;
7171
7172 /*
7173 * Any high powered commands queued up?
7174 */
7175 if (send_ccb != NULL) {
7176
7177 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7178 mtx_unlock(&xsoftc.xpt_lock);
7179
7180 xpt_release_devq(send_ccb->ccb_h.path,
7181 /*count*/1, /*runqueue*/TRUE);
7182 } else
7183 mtx_unlock(&xsoftc.xpt_lock);
7184 }
7185
7186 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7187 struct cam_ed *dev;
7188
7189 dev = ccb_h->path->device;
7190
7191 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7192 ccb_h->path->bus->sim->devq->send_active--;
7193 ccb_h->path->bus->sim->devq->send_openings++;
7194
7195 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7196 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7197 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7198 && (dev->ccbq.dev_active == 0))) {
7199
7200 xpt_release_devq(ccb_h->path, /*count*/1,
7201 /*run_queue*/TRUE);
7202 }
7203
7204 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7205 && (--dev->tag_delay_count == 0))
7206 xpt_start_tags(ccb_h->path);
7207
7208 if ((dev->ccbq.queue.entries > 0)
7209 && (dev->qfrozen_cnt == 0)
7210 && (device_is_send_queued(dev) == 0)) {
7211 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7212 dev);
7213 }
7214 }
7215
7216 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7217 xpt_release_simq(ccb_h->path->bus->sim,
7218 /*run_queue*/TRUE);
7219 ccb_h->status &= ~CAM_RELEASE_SIMQ;
7220 runq = FALSE;
7221 }
7222
7223 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7224 && (ccb_h->status & CAM_DEV_QFRZN)) {
7225 xpt_release_devq(ccb_h->path, /*count*/1,
7226 /*run_queue*/TRUE);
7227 ccb_h->status &= ~CAM_DEV_QFRZN;
7228 } else if (runq) {
7229 xpt_run_dev_sendq(ccb_h->path->bus);
7230 }
7231
7232 /* Call the peripheral driver's callback */
7233 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7234 }
7235}
7236
4851 free(bus, M_CAMXPT);
4852 }
4853}
4854
4855static struct cam_et *
4856xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4857{
4858 struct cam_et *target;
4859
4860 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
4861 if (target != NULL) {
4862 struct cam_et *cur_target;
4863
4864 TAILQ_INIT(&target->ed_entries);
4865 target->bus = bus;
4866 target->target_id = target_id;
4867 target->refcount = 1;
4868 target->generation = 0;
4869 timevalclear(&target->last_reset);
4870 /*
4871 * Hold a reference to our parent bus so it
4872 * will not go away before we do.
4873 */
4874 bus->refcount++;
4875
4876 /* Insertion sort into our bus's target list */
4877 cur_target = TAILQ_FIRST(&bus->et_entries);
4878 while (cur_target != NULL && cur_target->target_id < target_id)
4879 cur_target = TAILQ_NEXT(cur_target, links);
4880
4881 if (cur_target != NULL) {
4882 TAILQ_INSERT_BEFORE(cur_target, target, links);
4883 } else {
4884 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4885 }
4886 bus->generation++;
4887 }
4888 return (target);
4889}
4890
4891static void
4892xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4893{
4894
4895 if ((--target->refcount == 0)
4896 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4897 TAILQ_REMOVE(&bus->et_entries, target, links);
4898 bus->generation++;
4899 free(target, M_CAMXPT);
4900 xpt_release_bus(bus);
4901 }
4902}
4903
4904static struct cam_ed *
4905xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4906{
4907 struct cam_path path;
4908 struct cam_ed *device;
4909 struct cam_devq *devq;
4910 cam_status status;
4911
4912 /* Make space for us in the device queue on our bus */
4913 devq = bus->sim->devq;
4914 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4915
4916 if (status != CAM_REQ_CMP) {
4917 device = NULL;
4918 } else {
4919 device = (struct cam_ed *)malloc(sizeof(*device),
4920 M_CAMXPT, M_NOWAIT);
4921 }
4922
4923 if (device != NULL) {
4924 struct cam_ed *cur_device;
4925
4926 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4927 device->alloc_ccb_entry.device = device;
4928 cam_init_pinfo(&device->send_ccb_entry.pinfo);
4929 device->send_ccb_entry.device = device;
4930 device->target = target;
4931 device->lun_id = lun_id;
4932 device->sim = bus->sim;
4933 /* Initialize our queues */
4934 if (camq_init(&device->drvq, 0) != 0) {
4935 free(device, M_CAMXPT);
4936 return (NULL);
4937 }
4938 if (cam_ccbq_init(&device->ccbq,
4939 bus->sim->max_dev_openings) != 0) {
4940 camq_fini(&device->drvq);
4941 free(device, M_CAMXPT);
4942 return (NULL);
4943 }
4944 SLIST_INIT(&device->asyncs);
4945 SLIST_INIT(&device->periphs);
4946 device->generation = 0;
4947 device->owner = NULL;
4948 /*
4949 * Take the default quirk entry until we have inquiry
4950 * data and can determine a better quirk to use.
4951 */
4952 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4953 bzero(&device->inq_data, sizeof(device->inq_data));
4954 device->inq_flags = 0;
4955 device->queue_flags = 0;
4956 device->serial_num = NULL;
4957 device->serial_num_len = 0;
4958 device->qfrozen_cnt = 0;
4959 device->flags = CAM_DEV_UNCONFIGURED;
4960 device->tag_delay_count = 0;
4961 device->tag_saved_openings = 0;
4962 device->refcount = 1;
4963 if (bus->sim->flags & CAM_SIM_MPSAFE)
4964 callout_init_mtx(&device->callout, bus->sim->mtx, 0);
4965 else
4966 callout_init_mtx(&device->callout, &Giant, 0);
4967
4968 /*
4969 * Hold a reference to our parent target so it
4970 * will not go away before we do.
4971 */
4972 target->refcount++;
4973
4974 /*
4975 * XXX should be limited by number of CCBs this bus can
4976 * do.
4977 */
4978 bus->sim->max_ccbs += device->ccbq.devq_openings;
4979 /* Insertion sort into our target's device list */
4980 cur_device = TAILQ_FIRST(&target->ed_entries);
4981 while (cur_device != NULL && cur_device->lun_id < lun_id)
4982 cur_device = TAILQ_NEXT(cur_device, links);
4983 if (cur_device != NULL) {
4984 TAILQ_INSERT_BEFORE(cur_device, device, links);
4985 } else {
4986 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4987 }
4988 target->generation++;
4989 if (lun_id != CAM_LUN_WILDCARD) {
4990 xpt_compile_path(&path,
4991 NULL,
4992 bus->path_id,
4993 target->target_id,
4994 lun_id);
4995 xpt_devise_transport(&path);
4996 xpt_release_path(&path);
4997 }
4998 }
4999 return (device);
5000}
5001
5002static void
5003xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5004 struct cam_ed *device)
5005{
5006
5007 if ((--device->refcount == 0)
5008 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5009 struct cam_devq *devq;
5010
5011 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5012 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5013 panic("Removing device while still queued for ccbs");
5014
5015 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5016 callout_stop(&device->callout);
5017
5018 TAILQ_REMOVE(&target->ed_entries, device,links);
5019 target->generation++;
5020 bus->sim->max_ccbs -= device->ccbq.devq_openings;
5021 /* Release our slot in the devq */
5022 devq = bus->sim->devq;
5023 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5024 camq_fini(&device->drvq);
5025 camq_fini(&device->ccbq.queue);
5026 free(device, M_CAMXPT);
5027 xpt_release_target(bus, target);
5028 }
5029}
5030
5031static u_int32_t
5032xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5033{
5034 int diff;
5035 int result;
5036 struct cam_ed *dev;
5037
5038 dev = path->device;
5039
5040 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5041 result = cam_ccbq_resize(&dev->ccbq, newopenings);
5042 if (result == CAM_REQ_CMP && (diff < 0)) {
5043 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5044 }
5045 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5046 || (dev->inq_flags & SID_CmdQue) != 0)
5047 dev->tag_saved_openings = newopenings;
5048 /* Adjust the global limit */
5049 dev->sim->max_ccbs += diff;
5050 return (result);
5051}
5052
5053static struct cam_eb *
5054xpt_find_bus(path_id_t path_id)
5055{
5056 struct cam_eb *bus;
5057
5058 mtx_lock(&xsoftc.xpt_topo_lock);
5059 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
5060 bus != NULL;
5061 bus = TAILQ_NEXT(bus, links)) {
5062 if (bus->path_id == path_id) {
5063 bus->refcount++;
5064 break;
5065 }
5066 }
5067 mtx_unlock(&xsoftc.xpt_topo_lock);
5068 return (bus);
5069}
5070
5071static struct cam_et *
5072xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5073{
5074 struct cam_et *target;
5075
5076 for (target = TAILQ_FIRST(&bus->et_entries);
5077 target != NULL;
5078 target = TAILQ_NEXT(target, links)) {
5079 if (target->target_id == target_id) {
5080 target->refcount++;
5081 break;
5082 }
5083 }
5084 return (target);
5085}
5086
5087static struct cam_ed *
5088xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5089{
5090 struct cam_ed *device;
5091
5092 for (device = TAILQ_FIRST(&target->ed_entries);
5093 device != NULL;
5094 device = TAILQ_NEXT(device, links)) {
5095 if (device->lun_id == lun_id) {
5096 device->refcount++;
5097 break;
5098 }
5099 }
5100 return (device);
5101}
5102
5103typedef struct {
5104 union ccb *request_ccb;
5105 struct ccb_pathinq *cpi;
5106 int counter;
5107} xpt_scan_bus_info;
5108
5109/*
5110 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5111 * As the scan progresses, xpt_scan_bus is used as the
5112 * callback on completion function.
5113 */
5114static void
5115xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5116{
5117 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5118 ("xpt_scan_bus\n"));
5119 switch (request_ccb->ccb_h.func_code) {
5120 case XPT_SCAN_BUS:
5121 {
5122 xpt_scan_bus_info *scan_info;
5123 union ccb *work_ccb;
5124 struct cam_path *path;
5125 u_int i;
5126 u_int max_target;
5127 u_int initiator_id;
5128
5129 /* Find out the characteristics of the bus */
5130 work_ccb = xpt_alloc_ccb_nowait();
5131 if (work_ccb == NULL) {
5132 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5133 xpt_done(request_ccb);
5134 return;
5135 }
5136 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5137 request_ccb->ccb_h.pinfo.priority);
5138 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5139 xpt_action(work_ccb);
5140 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5141 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5142 xpt_free_ccb(work_ccb);
5143 xpt_done(request_ccb);
5144 return;
5145 }
5146
5147 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5148 /*
5149 * Can't scan the bus on an adapter that
5150 * cannot perform the initiator role.
5151 */
5152 request_ccb->ccb_h.status = CAM_REQ_CMP;
5153 xpt_free_ccb(work_ccb);
5154 xpt_done(request_ccb);
5155 return;
5156 }
5157
5158 /* Save some state for use while we probe for devices */
5159 scan_info = (xpt_scan_bus_info *)
5160 malloc(sizeof(xpt_scan_bus_info), M_CAMXPT, M_NOWAIT);
5161 scan_info->request_ccb = request_ccb;
5162 scan_info->cpi = &work_ccb->cpi;
5163
5164 /* Cache on our stack so we can work asynchronously */
5165 max_target = scan_info->cpi->max_target;
5166 initiator_id = scan_info->cpi->initiator_id;
5167
5168
5169 /*
5170 * We can scan all targets in parallel, or do it sequentially.
5171 */
5172 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5173 max_target = 0;
5174 scan_info->counter = 0;
5175 } else {
5176 scan_info->counter = scan_info->cpi->max_target + 1;
5177 if (scan_info->cpi->initiator_id < scan_info->counter) {
5178 scan_info->counter--;
5179 }
5180 }
5181
5182 for (i = 0; i <= max_target; i++) {
5183 cam_status status;
5184 if (i == initiator_id)
5185 continue;
5186
5187 status = xpt_create_path(&path, xpt_periph,
5188 request_ccb->ccb_h.path_id,
5189 i, 0);
5190 if (status != CAM_REQ_CMP) {
5191 printf("xpt_scan_bus: xpt_create_path failed"
5192 " with status %#x, bus scan halted\n",
5193 status);
5194 free(scan_info, M_CAMXPT);
5195 request_ccb->ccb_h.status = status;
5196 xpt_free_ccb(work_ccb);
5197 xpt_done(request_ccb);
5198 break;
5199 }
5200 work_ccb = xpt_alloc_ccb_nowait();
5201 if (work_ccb == NULL) {
5202 free(scan_info, M_CAMXPT);
5203 xpt_free_path(path);
5204 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5205 xpt_done(request_ccb);
5206 break;
5207 }
5208 xpt_setup_ccb(&work_ccb->ccb_h, path,
5209 request_ccb->ccb_h.pinfo.priority);
5210 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5211 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5212 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5213 work_ccb->crcn.flags = request_ccb->crcn.flags;
5214 xpt_action(work_ccb);
5215 }
5216 break;
5217 }
5218 case XPT_SCAN_LUN:
5219 {
5220 cam_status status;
5221 struct cam_path *path;
5222 xpt_scan_bus_info *scan_info;
5223 path_id_t path_id;
5224 target_id_t target_id;
5225 lun_id_t lun_id;
5226
5227 /* Reuse the same CCB to query if a device was really found */
5228 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5229 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5230 request_ccb->ccb_h.pinfo.priority);
5231 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5232
5233 path_id = request_ccb->ccb_h.path_id;
5234 target_id = request_ccb->ccb_h.target_id;
5235 lun_id = request_ccb->ccb_h.target_lun;
5236 xpt_action(request_ccb);
5237
5238 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5239 struct cam_ed *device;
5240 struct cam_et *target;
5241 int phl;
5242
5243 /*
5244 * If we already probed lun 0 successfully, or
5245 * we have additional configured luns on this
5246 * target that might have "gone away", go onto
5247 * the next lun.
5248 */
5249 target = request_ccb->ccb_h.path->target;
5250 /*
5251 * We may touch devices that we don't
5252 * hold references too, so ensure they
5253 * don't disappear out from under us.
5254 * The target above is referenced by the
5255 * path in the request ccb.
5256 */
5257 phl = 0;
5258 device = TAILQ_FIRST(&target->ed_entries);
5259 if (device != NULL) {
5260 phl = CAN_SRCH_HI_SPARSE(device);
5261 if (device->lun_id == 0)
5262 device = TAILQ_NEXT(device, links);
5263 }
5264 if ((lun_id != 0) || (device != NULL)) {
5265 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5266 lun_id++;
5267 }
5268 } else {
5269 struct cam_ed *device;
5270
5271 device = request_ccb->ccb_h.path->device;
5272
5273 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5274 /* Try the next lun */
5275 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5276 || CAN_SRCH_HI_DENSE(device))
5277 lun_id++;
5278 }
5279 }
5280
5281 /*
5282 * Free the current request path- we're done with it.
5283 */
5284 xpt_free_path(request_ccb->ccb_h.path);
5285
5286 /*
5287 * Check to see if we scan any further luns.
5288 */
5289 if (lun_id == request_ccb->ccb_h.target_lun
5290 || lun_id > scan_info->cpi->max_lun) {
5291 int done;
5292
5293 hop_again:
5294 done = 0;
5295 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5296 scan_info->counter++;
5297 if (scan_info->counter ==
5298 scan_info->cpi->initiator_id) {
5299 scan_info->counter++;
5300 }
5301 if (scan_info->counter >=
5302 scan_info->cpi->max_target+1) {
5303 done = 1;
5304 }
5305 } else {
5306 scan_info->counter--;
5307 if (scan_info->counter == 0) {
5308 done = 1;
5309 }
5310 }
5311 if (done) {
5312 xpt_free_ccb(request_ccb);
5313 xpt_free_ccb((union ccb *)scan_info->cpi);
5314 request_ccb = scan_info->request_ccb;
5315 free(scan_info, M_CAMXPT);
5316 request_ccb->ccb_h.status = CAM_REQ_CMP;
5317 xpt_done(request_ccb);
5318 break;
5319 }
5320
5321 if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5322 break;
5323 }
5324 status = xpt_create_path(&path, xpt_periph,
5325 scan_info->request_ccb->ccb_h.path_id,
5326 scan_info->counter, 0);
5327 if (status != CAM_REQ_CMP) {
5328 printf("xpt_scan_bus: xpt_create_path failed"
5329 " with status %#x, bus scan halted\n",
5330 status);
5331 xpt_free_ccb(request_ccb);
5332 xpt_free_ccb((union ccb *)scan_info->cpi);
5333 request_ccb = scan_info->request_ccb;
5334 free(scan_info, M_CAMXPT);
5335 request_ccb->ccb_h.status = status;
5336 xpt_done(request_ccb);
5337 break;
5338 }
5339 xpt_setup_ccb(&request_ccb->ccb_h, path,
5340 request_ccb->ccb_h.pinfo.priority);
5341 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5342 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5343 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5344 request_ccb->crcn.flags =
5345 scan_info->request_ccb->crcn.flags;
5346 } else {
5347 status = xpt_create_path(&path, xpt_periph,
5348 path_id, target_id, lun_id);
5349 if (status != CAM_REQ_CMP) {
5350 printf("xpt_scan_bus: xpt_create_path failed "
5351 "with status %#x, halting LUN scan\n",
5352 status);
5353 goto hop_again;
5354 }
5355 xpt_setup_ccb(&request_ccb->ccb_h, path,
5356 request_ccb->ccb_h.pinfo.priority);
5357 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5358 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5359 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5360 request_ccb->crcn.flags =
5361 scan_info->request_ccb->crcn.flags;
5362 }
5363 xpt_action(request_ccb);
5364 break;
5365 }
5366 default:
5367 break;
5368 }
5369}
5370
5371typedef enum {
5372 PROBE_TUR,
5373 PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */
5374 PROBE_FULL_INQUIRY,
5375 PROBE_MODE_SENSE,
5376 PROBE_SERIAL_NUM_0,
5377 PROBE_SERIAL_NUM_1,
5378 PROBE_TUR_FOR_NEGOTIATION,
5379 PROBE_INQUIRY_BASIC_DV1,
5380 PROBE_INQUIRY_BASIC_DV2,
5381 PROBE_DV_EXIT
5382} probe_action;
5383
5384typedef enum {
5385 PROBE_INQUIRY_CKSUM = 0x01,
5386 PROBE_SERIAL_CKSUM = 0x02,
5387 PROBE_NO_ANNOUNCE = 0x04
5388} probe_flags;
5389
5390typedef struct {
5391 TAILQ_HEAD(, ccb_hdr) request_ccbs;
5392 probe_action action;
5393 union ccb saved_ccb;
5394 probe_flags flags;
5395 MD5_CTX context;
5396 u_int8_t digest[16];
5397} probe_softc;
5398
5399static void
5400xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5401 cam_flags flags, union ccb *request_ccb)
5402{
5403 struct ccb_pathinq cpi;
5404 cam_status status;
5405 struct cam_path *new_path;
5406 struct cam_periph *old_periph;
5407
5408 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5409 ("xpt_scan_lun\n"));
5410
5411 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5412 cpi.ccb_h.func_code = XPT_PATH_INQ;
5413 xpt_action((union ccb *)&cpi);
5414
5415 if (cpi.ccb_h.status != CAM_REQ_CMP) {
5416 if (request_ccb != NULL) {
5417 request_ccb->ccb_h.status = cpi.ccb_h.status;
5418 xpt_done(request_ccb);
5419 }
5420 return;
5421 }
5422
5423 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5424 /*
5425 * Can't scan the bus on an adapter that
5426 * cannot perform the initiator role.
5427 */
5428 if (request_ccb != NULL) {
5429 request_ccb->ccb_h.status = CAM_REQ_CMP;
5430 xpt_done(request_ccb);
5431 }
5432 return;
5433 }
5434
5435 if (request_ccb == NULL) {
5436 request_ccb = malloc(sizeof(union ccb), M_CAMXPT, M_NOWAIT);
5437 if (request_ccb == NULL) {
5438 xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
5439 "can't continue\n");
5440 return;
5441 }
5442 new_path = malloc(sizeof(*new_path), M_CAMXPT, M_NOWAIT);
5443 if (new_path == NULL) {
5444 xpt_print(path, "xpt_scan_lun: can't allocate path, "
5445 "can't continue\n");
5446 free(request_ccb, M_CAMXPT);
5447 return;
5448 }
5449 status = xpt_compile_path(new_path, xpt_periph,
5450 path->bus->path_id,
5451 path->target->target_id,
5452 path->device->lun_id);
5453
5454 if (status != CAM_REQ_CMP) {
5455 xpt_print(path, "xpt_scan_lun: can't compile path, "
5456 "can't continue\n");
5457 free(request_ccb, M_CAMXPT);
5458 free(new_path, M_CAMXPT);
5459 return;
5460 }
5461 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5462 request_ccb->ccb_h.cbfcnp = xptscandone;
5463 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5464 request_ccb->crcn.flags = flags;
5465 }
5466
5467 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5468 probe_softc *softc;
5469
5470 softc = (probe_softc *)old_periph->softc;
5471 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5472 periph_links.tqe);
5473 } else {
5474 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5475 probestart, "probe",
5476 CAM_PERIPH_BIO,
5477 request_ccb->ccb_h.path, NULL, 0,
5478 request_ccb);
5479
5480 if (status != CAM_REQ_CMP) {
5481 xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
5482 "returned an error, can't continue probe\n");
5483 request_ccb->ccb_h.status = status;
5484 xpt_done(request_ccb);
5485 }
5486 }
5487}
5488
5489static void
5490xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5491{
5492 xpt_release_path(done_ccb->ccb_h.path);
5493 free(done_ccb->ccb_h.path, M_CAMXPT);
5494 free(done_ccb, M_CAMXPT);
5495}
5496
5497static cam_status
5498proberegister(struct cam_periph *periph, void *arg)
5499{
5500 union ccb *request_ccb; /* CCB representing the probe request */
5501 cam_status status;
5502 probe_softc *softc;
5503
5504 request_ccb = (union ccb *)arg;
5505 if (periph == NULL) {
5506 printf("proberegister: periph was NULL!!\n");
5507 return(CAM_REQ_CMP_ERR);
5508 }
5509
5510 if (request_ccb == NULL) {
5511 printf("proberegister: no probe CCB, "
5512 "can't register device\n");
5513 return(CAM_REQ_CMP_ERR);
5514 }
5515
5516 softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT);
5517
5518 if (softc == NULL) {
5519 printf("proberegister: Unable to probe new device. "
5520 "Unable to allocate softc\n");
5521 return(CAM_REQ_CMP_ERR);
5522 }
5523 TAILQ_INIT(&softc->request_ccbs);
5524 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5525 periph_links.tqe);
5526 softc->flags = 0;
5527 periph->softc = softc;
5528 status = cam_periph_acquire(periph);
5529 if (status != CAM_REQ_CMP) {
5530 return (status);
5531 }
5532
5533
5534 /*
5535 * Ensure we've waited at least a bus settle
5536 * delay before attempting to probe the device.
5537 * For HBAs that don't do bus resets, this won't make a difference.
5538 */
5539 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5540 scsi_delay);
5541 probeschedule(periph);
5542 return(CAM_REQ_CMP);
5543}
5544
5545static void
5546probeschedule(struct cam_periph *periph)
5547{
5548 struct ccb_pathinq cpi;
5549 union ccb *ccb;
5550 probe_softc *softc;
5551
5552 softc = (probe_softc *)periph->softc;
5553 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5554
5555 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5556 cpi.ccb_h.func_code = XPT_PATH_INQ;
5557 xpt_action((union ccb *)&cpi);
5558
5559 /*
5560 * If a device has gone away and another device, or the same one,
5561 * is back in the same place, it should have a unit attention
5562 * condition pending. It will not report the unit attention in
5563 * response to an inquiry, which may leave invalid transfer
5564 * negotiations in effect. The TUR will reveal the unit attention
5565 * condition. Only send the TUR for lun 0, since some devices
5566 * will get confused by commands other than inquiry to non-existent
5567 * luns. If you think a device has gone away start your scan from
5568 * lun 0. This will insure that any bogus transfer settings are
5569 * invalidated.
5570 *
5571 * If we haven't seen the device before and the controller supports
5572 * some kind of transfer negotiation, negotiate with the first
5573 * sent command if no bus reset was performed at startup. This
5574 * ensures that the device is not confused by transfer negotiation
5575 * settings left over by loader or BIOS action.
5576 */
5577 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5578 && (ccb->ccb_h.target_lun == 0)) {
5579 softc->action = PROBE_TUR;
5580 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5581 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5582 proberequestdefaultnegotiation(periph);
5583 softc->action = PROBE_INQUIRY;
5584 } else {
5585 softc->action = PROBE_INQUIRY;
5586 }
5587
5588 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5589 softc->flags |= PROBE_NO_ANNOUNCE;
5590 else
5591 softc->flags &= ~PROBE_NO_ANNOUNCE;
5592
5593 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5594}
5595
5596static void
5597probestart(struct cam_periph *periph, union ccb *start_ccb)
5598{
5599 /* Probe the device that our peripheral driver points to */
5600 struct ccb_scsiio *csio;
5601 probe_softc *softc;
5602
5603 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5604
5605 softc = (probe_softc *)periph->softc;
5606 csio = &start_ccb->csio;
5607
5608 switch (softc->action) {
5609 case PROBE_TUR:
5610 case PROBE_TUR_FOR_NEGOTIATION:
5611 case PROBE_DV_EXIT:
5612 {
5613 scsi_test_unit_ready(csio,
5614 /*retries*/4,
5615 probedone,
5616 MSG_SIMPLE_Q_TAG,
5617 SSD_FULL_SIZE,
5618 /*timeout*/60000);
5619 break;
5620 }
5621 case PROBE_INQUIRY:
5622 case PROBE_FULL_INQUIRY:
5623 case PROBE_INQUIRY_BASIC_DV1:
5624 case PROBE_INQUIRY_BASIC_DV2:
5625 {
5626 u_int inquiry_len;
5627 struct scsi_inquiry_data *inq_buf;
5628
5629 inq_buf = &periph->path->device->inq_data;
5630
5631 /*
5632 * If the device is currently configured, we calculate an
5633 * MD5 checksum of the inquiry data, and if the serial number
5634 * length is greater than 0, add the serial number data
5635 * into the checksum as well. Once the inquiry and the
5636 * serial number check finish, we attempt to figure out
5637 * whether we still have the same device.
5638 */
5639 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5640
5641 MD5Init(&softc->context);
5642 MD5Update(&softc->context, (unsigned char *)inq_buf,
5643 sizeof(struct scsi_inquiry_data));
5644 softc->flags |= PROBE_INQUIRY_CKSUM;
5645 if (periph->path->device->serial_num_len > 0) {
5646 MD5Update(&softc->context,
5647 periph->path->device->serial_num,
5648 periph->path->device->serial_num_len);
5649 softc->flags |= PROBE_SERIAL_CKSUM;
5650 }
5651 MD5Final(softc->digest, &softc->context);
5652 }
5653
5654 if (softc->action == PROBE_INQUIRY)
5655 inquiry_len = SHORT_INQUIRY_LENGTH;
5656 else
5657 inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
5658
5659 /*
5660 * Some parallel SCSI devices fail to send an
5661 * ignore wide residue message when dealing with
5662 * odd length inquiry requests. Round up to be
5663 * safe.
5664 */
5665 inquiry_len = roundup2(inquiry_len, 2);
5666
5667 if (softc->action == PROBE_INQUIRY_BASIC_DV1
5668 || softc->action == PROBE_INQUIRY_BASIC_DV2) {
5669 inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT);
5670 }
5671 if (inq_buf == NULL) {
5672 xpt_print(periph->path, "malloc failure- skipping Basic"
5673 "Domain Validation\n");
5674 softc->action = PROBE_DV_EXIT;
5675 scsi_test_unit_ready(csio,
5676 /*retries*/4,
5677 probedone,
5678 MSG_SIMPLE_Q_TAG,
5679 SSD_FULL_SIZE,
5680 /*timeout*/60000);
5681 break;
5682 }
5683 scsi_inquiry(csio,
5684 /*retries*/4,
5685 probedone,
5686 MSG_SIMPLE_Q_TAG,
5687 (u_int8_t *)inq_buf,
5688 inquiry_len,
5689 /*evpd*/FALSE,
5690 /*page_code*/0,
5691 SSD_MIN_SIZE,
5692 /*timeout*/60 * 1000);
5693 break;
5694 }
5695 case PROBE_MODE_SENSE:
5696 {
5697 void *mode_buf;
5698 int mode_buf_len;
5699
5700 mode_buf_len = sizeof(struct scsi_mode_header_6)
5701 + sizeof(struct scsi_mode_blk_desc)
5702 + sizeof(struct scsi_control_page);
5703 mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT);
5704 if (mode_buf != NULL) {
5705 scsi_mode_sense(csio,
5706 /*retries*/4,
5707 probedone,
5708 MSG_SIMPLE_Q_TAG,
5709 /*dbd*/FALSE,
5710 SMS_PAGE_CTRL_CURRENT,
5711 SMS_CONTROL_MODE_PAGE,
5712 mode_buf,
5713 mode_buf_len,
5714 SSD_FULL_SIZE,
5715 /*timeout*/60000);
5716 break;
5717 }
5718 xpt_print(periph->path, "Unable to mode sense control page - "
5719 "malloc failure\n");
5720 softc->action = PROBE_SERIAL_NUM_0;
5721 }
5722 /* FALLTHROUGH */
5723 case PROBE_SERIAL_NUM_0:
5724 {
5725 struct scsi_vpd_supported_page_list *vpd_list = NULL;
5726 struct cam_ed *device;
5727
5728 device = periph->path->device;
5729 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5730 vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT,
5731 M_NOWAIT | M_ZERO);
5732 }
5733
5734 if (vpd_list != NULL) {
5735 scsi_inquiry(csio,
5736 /*retries*/4,
5737 probedone,
5738 MSG_SIMPLE_Q_TAG,
5739 (u_int8_t *)vpd_list,
5740 sizeof(*vpd_list),
5741 /*evpd*/TRUE,
5742 SVPD_SUPPORTED_PAGE_LIST,
5743 SSD_MIN_SIZE,
5744 /*timeout*/60 * 1000);
5745 break;
5746 }
5747 /*
5748 * We'll have to do without, let our probedone
5749 * routine finish up for us.
5750 */
5751 start_ccb->csio.data_ptr = NULL;
5752 probedone(periph, start_ccb);
5753 return;
5754 }
5755 case PROBE_SERIAL_NUM_1:
5756 {
5757 struct scsi_vpd_unit_serial_number *serial_buf;
5758 struct cam_ed* device;
5759
5760 serial_buf = NULL;
5761 device = periph->path->device;
5762 device->serial_num = NULL;
5763 device->serial_num_len = 0;
5764
5765 serial_buf = (struct scsi_vpd_unit_serial_number *)
5766 malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO);
5767
5768 if (serial_buf != NULL) {
5769 scsi_inquiry(csio,
5770 /*retries*/4,
5771 probedone,
5772 MSG_SIMPLE_Q_TAG,
5773 (u_int8_t *)serial_buf,
5774 sizeof(*serial_buf),
5775 /*evpd*/TRUE,
5776 SVPD_UNIT_SERIAL_NUMBER,
5777 SSD_MIN_SIZE,
5778 /*timeout*/60 * 1000);
5779 break;
5780 }
5781 /*
5782 * We'll have to do without, let our probedone
5783 * routine finish up for us.
5784 */
5785 start_ccb->csio.data_ptr = NULL;
5786 probedone(periph, start_ccb);
5787 return;
5788 }
5789 }
5790 xpt_action(start_ccb);
5791}
5792
5793static void
5794proberequestdefaultnegotiation(struct cam_periph *periph)
5795{
5796 struct ccb_trans_settings cts;
5797
5798 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5799 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5800 cts.type = CTS_TYPE_USER_SETTINGS;
5801 xpt_action((union ccb *)&cts);
5802 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5803 return;
5804 }
5805 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5806 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5807 xpt_action((union ccb *)&cts);
5808}
5809
5810/*
5811 * Backoff Negotiation Code- only pertinent for SPI devices.
5812 */
5813static int
5814proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
5815{
5816 struct ccb_trans_settings cts;
5817 struct ccb_trans_settings_spi *spi;
5818
5819 memset(&cts, 0, sizeof (cts));
5820 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5821 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5822 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5823 xpt_action((union ccb *)&cts);
5824 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5825 if (bootverbose) {
5826 xpt_print(periph->path,
5827 "failed to get current device settings\n");
5828 }
5829 return (0);
5830 }
5831 if (cts.transport != XPORT_SPI) {
5832 if (bootverbose) {
5833 xpt_print(periph->path, "not SPI transport\n");
5834 }
5835 return (0);
5836 }
5837 spi = &cts.xport_specific.spi;
5838
5839 /*
5840 * We cannot renegotiate sync rate if we don't have one.
5841 */
5842 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
5843 if (bootverbose) {
5844 xpt_print(periph->path, "no sync rate known\n");
5845 }
5846 return (0);
5847 }
5848
5849 /*
5850 * We'll assert that we don't have to touch PPR options- the
5851 * SIM will see what we do with period and offset and adjust
5852 * the PPR options as appropriate.
5853 */
5854
5855 /*
5856 * A sync rate with unknown or zero offset is nonsensical.
5857 * A sync period of zero means Async.
5858 */
5859 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
5860 || spi->sync_offset == 0 || spi->sync_period == 0) {
5861 if (bootverbose) {
5862 xpt_print(periph->path, "no sync rate available\n");
5863 }
5864 return (0);
5865 }
5866
5867 if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
5868 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5869 ("hit async: giving up on DV\n"));
5870 return (0);
5871 }
5872
5873
5874 /*
5875 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
5876 * We don't try to remember 'last' settings to see if the SIM actually
5877 * gets into the speed we want to set. We check on the SIM telling
5878 * us that a requested speed is bad, but otherwise don't try and
5879 * check the speed due to the asynchronous and handshake nature
5880 * of speed setting.
5881 */
5882 spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
5883 for (;;) {
5884 spi->sync_period++;
5885 if (spi->sync_period >= 0xf) {
5886 spi->sync_period = 0;
5887 spi->sync_offset = 0;
5888 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5889 ("setting to async for DV\n"));
5890 /*
5891 * Once we hit async, we don't want to try
5892 * any more settings.
5893 */
5894 device->flags |= CAM_DEV_DV_HIT_BOTTOM;
5895 } else if (bootverbose) {
5896 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5897 ("DV: period 0x%x\n", spi->sync_period));
5898 printf("setting period to 0x%x\n", spi->sync_period);
5899 }
5900 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5901 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5902 xpt_action((union ccb *)&cts);
5903 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5904 break;
5905 }
5906 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5907 ("DV: failed to set period 0x%x\n", spi->sync_period));
5908 if (spi->sync_period == 0) {
5909 return (0);
5910 }
5911 }
5912 return (1);
5913}
5914
5915static void
5916probedone(struct cam_periph *periph, union ccb *done_ccb)
5917{
5918 probe_softc *softc;
5919 struct cam_path *path;
5920 u_int32_t priority;
5921
5922 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5923
5924 softc = (probe_softc *)periph->softc;
5925 path = done_ccb->ccb_h.path;
5926 priority = done_ccb->ccb_h.pinfo.priority;
5927
5928 switch (softc->action) {
5929 case PROBE_TUR:
5930 {
5931 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5932
5933 if (cam_periph_error(done_ccb, 0,
5934 SF_NO_PRINT, NULL) == ERESTART)
5935 return;
5936 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5937 /* Don't wedge the queue */
5938 xpt_release_devq(done_ccb->ccb_h.path,
5939 /*count*/1,
5940 /*run_queue*/TRUE);
5941 }
5942 softc->action = PROBE_INQUIRY;
5943 xpt_release_ccb(done_ccb);
5944 xpt_schedule(periph, priority);
5945 return;
5946 }
5947 case PROBE_INQUIRY:
5948 case PROBE_FULL_INQUIRY:
5949 {
5950 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5951 struct scsi_inquiry_data *inq_buf;
5952 u_int8_t periph_qual;
5953
5954 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5955 inq_buf = &path->device->inq_data;
5956
5957 periph_qual = SID_QUAL(inq_buf);
5958
5959 switch(periph_qual) {
5960 case SID_QUAL_LU_CONNECTED:
5961 {
5962 u_int8_t len;
5963
5964 /*
5965 * We conservatively request only
5966 * SHORT_INQUIRY_LEN bytes of inquiry
5967 * information during our first try
5968 * at sending an INQUIRY. If the device
5969 * has more information to give,
5970 * perform a second request specifying
5971 * the amount of information the device
5972 * is willing to give.
5973 */
5974 len = inq_buf->additional_length
5975 + offsetof(struct scsi_inquiry_data,
5976 additional_length) + 1;
5977 if (softc->action == PROBE_INQUIRY
5978 && len > SHORT_INQUIRY_LENGTH) {
5979 softc->action = PROBE_FULL_INQUIRY;
5980 xpt_release_ccb(done_ccb);
5981 xpt_schedule(periph, priority);
5982 return;
5983 }
5984
5985 xpt_find_quirk(path->device);
5986
5987 xpt_devise_transport(path);
5988 if (INQ_DATA_TQ_ENABLED(inq_buf))
5989 softc->action = PROBE_MODE_SENSE;
5990 else
5991 softc->action = PROBE_SERIAL_NUM_0;
5992
5993 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5994
5995 xpt_release_ccb(done_ccb);
5996 xpt_schedule(periph, priority);
5997 return;
5998 }
5999 default:
6000 break;
6001 }
6002 } else if (cam_periph_error(done_ccb, 0,
6003 done_ccb->ccb_h.target_lun > 0
6004 ? SF_RETRY_UA|SF_QUIET_IR
6005 : SF_RETRY_UA,
6006 &softc->saved_ccb) == ERESTART) {
6007 return;
6008 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6009 /* Don't wedge the queue */
6010 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6011 /*run_queue*/TRUE);
6012 }
6013 /*
6014 * If we get to this point, we got an error status back
6015 * from the inquiry and the error status doesn't require
6016 * automatically retrying the command. Therefore, the
6017 * inquiry failed. If we had inquiry information before
6018 * for this device, but this latest inquiry command failed,
6019 * the device has probably gone away. If this device isn't
6020 * already marked unconfigured, notify the peripheral
6021 * drivers that this device is no more.
6022 */
6023 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
6024 /* Send the async notification. */
6025 xpt_async(AC_LOST_DEVICE, path, NULL);
6026
6027 xpt_release_ccb(done_ccb);
6028 break;
6029 }
6030 case PROBE_MODE_SENSE:
6031 {
6032 struct ccb_scsiio *csio;
6033 struct scsi_mode_header_6 *mode_hdr;
6034
6035 csio = &done_ccb->csio;
6036 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6037 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6038 struct scsi_control_page *page;
6039 u_int8_t *offset;
6040
6041 offset = ((u_int8_t *)&mode_hdr[1])
6042 + mode_hdr->blk_desc_len;
6043 page = (struct scsi_control_page *)offset;
6044 path->device->queue_flags = page->queue_flags;
6045 } else if (cam_periph_error(done_ccb, 0,
6046 SF_RETRY_UA|SF_NO_PRINT,
6047 &softc->saved_ccb) == ERESTART) {
6048 return;
6049 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6050 /* Don't wedge the queue */
6051 xpt_release_devq(done_ccb->ccb_h.path,
6052 /*count*/1, /*run_queue*/TRUE);
6053 }
6054 xpt_release_ccb(done_ccb);
6055 free(mode_hdr, M_CAMXPT);
6056 softc->action = PROBE_SERIAL_NUM_0;
6057 xpt_schedule(periph, priority);
6058 return;
6059 }
6060 case PROBE_SERIAL_NUM_0:
6061 {
6062 struct ccb_scsiio *csio;
6063 struct scsi_vpd_supported_page_list *page_list;
6064 int length, serialnum_supported, i;
6065
6066 serialnum_supported = 0;
6067 csio = &done_ccb->csio;
6068 page_list =
6069 (struct scsi_vpd_supported_page_list *)csio->data_ptr;
6070
6071 if (page_list == NULL) {
6072 /*
6073 * Don't process the command as it was never sent
6074 */
6075 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6076 && (page_list->length > 0)) {
6077 length = min(page_list->length,
6078 SVPD_SUPPORTED_PAGES_SIZE);
6079 for (i = 0; i < length; i++) {
6080 if (page_list->list[i] ==
6081 SVPD_UNIT_SERIAL_NUMBER) {
6082 serialnum_supported = 1;
6083 break;
6084 }
6085 }
6086 } else if (cam_periph_error(done_ccb, 0,
6087 SF_RETRY_UA|SF_NO_PRINT,
6088 &softc->saved_ccb) == ERESTART) {
6089 return;
6090 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6091 /* Don't wedge the queue */
6092 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6093 /*run_queue*/TRUE);
6094 }
6095
6096 if (page_list != NULL)
6097 free(page_list, M_DEVBUF);
6098
6099 if (serialnum_supported) {
6100 xpt_release_ccb(done_ccb);
6101 softc->action = PROBE_SERIAL_NUM_1;
6102 xpt_schedule(periph, priority);
6103 return;
6104 }
6105 xpt_release_ccb(done_ccb);
6106 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6107 xpt_schedule(periph, done_ccb->ccb_h.pinfo.priority);
6108 return;
6109 }
6110
6111 case PROBE_SERIAL_NUM_1:
6112 {
6113 struct ccb_scsiio *csio;
6114 struct scsi_vpd_unit_serial_number *serial_buf;
6115 u_int32_t priority;
6116 int changed;
6117 int have_serialnum;
6118
6119 changed = 1;
6120 have_serialnum = 0;
6121 csio = &done_ccb->csio;
6122 priority = done_ccb->ccb_h.pinfo.priority;
6123 serial_buf =
6124 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6125
6126 /* Clean up from previous instance of this device */
6127 if (path->device->serial_num != NULL) {
6128 free(path->device->serial_num, M_CAMXPT);
6129 path->device->serial_num = NULL;
6130 path->device->serial_num_len = 0;
6131 }
6132
6133 if (serial_buf == NULL) {
6134 /*
6135 * Don't process the command as it was never sent
6136 */
6137 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6138 && (serial_buf->length > 0)) {
6139
6140 have_serialnum = 1;
6141 path->device->serial_num =
6142 (u_int8_t *)malloc((serial_buf->length + 1),
6143 M_CAMXPT, M_NOWAIT);
6144 if (path->device->serial_num != NULL) {
6145 bcopy(serial_buf->serial_num,
6146 path->device->serial_num,
6147 serial_buf->length);
6148 path->device->serial_num_len =
6149 serial_buf->length;
6150 path->device->serial_num[serial_buf->length]
6151 = '\0';
6152 }
6153 } else if (cam_periph_error(done_ccb, 0,
6154 SF_RETRY_UA|SF_NO_PRINT,
6155 &softc->saved_ccb) == ERESTART) {
6156 return;
6157 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6158 /* Don't wedge the queue */
6159 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6160 /*run_queue*/TRUE);
6161 }
6162
6163 /*
6164 * Let's see if we have seen this device before.
6165 */
6166 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6167 MD5_CTX context;
6168 u_int8_t digest[16];
6169
6170 MD5Init(&context);
6171
6172 MD5Update(&context,
6173 (unsigned char *)&path->device->inq_data,
6174 sizeof(struct scsi_inquiry_data));
6175
6176 if (have_serialnum)
6177 MD5Update(&context, serial_buf->serial_num,
6178 serial_buf->length);
6179
6180 MD5Final(digest, &context);
6181 if (bcmp(softc->digest, digest, 16) == 0)
6182 changed = 0;
6183
6184 /*
6185 * XXX Do we need to do a TUR in order to ensure
6186 * that the device really hasn't changed???
6187 */
6188 if ((changed != 0)
6189 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6190 xpt_async(AC_LOST_DEVICE, path, NULL);
6191 }
6192 if (serial_buf != NULL)
6193 free(serial_buf, M_CAMXPT);
6194
6195 if (changed != 0) {
6196 /*
6197 * Now that we have all the necessary
6198 * information to safely perform transfer
6199 * negotiations... Controllers don't perform
6200 * any negotiation or tagged queuing until
6201 * after the first XPT_SET_TRAN_SETTINGS ccb is
6202 * received. So, on a new device, just retrieve
6203 * the user settings, and set them as the current
6204 * settings to set the device up.
6205 */
6206 proberequestdefaultnegotiation(periph);
6207 xpt_release_ccb(done_ccb);
6208
6209 /*
6210 * Perform a TUR to allow the controller to
6211 * perform any necessary transfer negotiation.
6212 */
6213 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6214 xpt_schedule(periph, priority);
6215 return;
6216 }
6217 xpt_release_ccb(done_ccb);
6218 break;
6219 }
6220 case PROBE_TUR_FOR_NEGOTIATION:
6221 case PROBE_DV_EXIT:
6222 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6223 /* Don't wedge the queue */
6224 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6225 /*run_queue*/TRUE);
6226 }
6227 /*
6228 * Do Domain Validation for lun 0 on devices that claim
6229 * to support Synchronous Transfer modes.
6230 */
6231 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
6232 && done_ccb->ccb_h.target_lun == 0
6233 && (path->device->inq_data.flags & SID_Sync) != 0
6234 && (path->device->flags & CAM_DEV_IN_DV) == 0) {
6235 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6236 ("Begin Domain Validation\n"));
6237 path->device->flags |= CAM_DEV_IN_DV;
6238 xpt_release_ccb(done_ccb);
6239 softc->action = PROBE_INQUIRY_BASIC_DV1;
6240 xpt_schedule(periph, priority);
6241 return;
6242 }
6243 if (softc->action == PROBE_DV_EXIT) {
6244 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6245 ("Leave Domain Validation\n"));
6246 }
6247 path->device->flags &=
6248 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6249 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6250 /* Inform the XPT that a new device has been found */
6251 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6252 xpt_action(done_ccb);
6253 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6254 done_ccb);
6255 }
6256 xpt_release_ccb(done_ccb);
6257 break;
6258 case PROBE_INQUIRY_BASIC_DV1:
6259 case PROBE_INQUIRY_BASIC_DV2:
6260 {
6261 struct scsi_inquiry_data *nbuf;
6262 struct ccb_scsiio *csio;
6263
6264 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6265 /* Don't wedge the queue */
6266 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6267 /*run_queue*/TRUE);
6268 }
6269 csio = &done_ccb->csio;
6270 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
6271 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
6272 xpt_print(path,
6273 "inquiry data fails comparison at DV%d step\n",
6274 softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
6275 if (proberequestbackoff(periph, path->device)) {
6276 path->device->flags &= ~CAM_DEV_IN_DV;
6277 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6278 } else {
6279 /* give up */
6280 softc->action = PROBE_DV_EXIT;
6281 }
6282 free(nbuf, M_CAMXPT);
6283 xpt_release_ccb(done_ccb);
6284 xpt_schedule(periph, priority);
6285 return;
6286 }
6287 free(nbuf, M_CAMXPT);
6288 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
6289 softc->action = PROBE_INQUIRY_BASIC_DV2;
6290 xpt_release_ccb(done_ccb);
6291 xpt_schedule(periph, priority);
6292 return;
6293 }
6294 if (softc->action == PROBE_DV_EXIT) {
6295 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6296 ("Leave Domain Validation Successfully\n"));
6297 }
6298 path->device->flags &=
6299 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6300 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6301 /* Inform the XPT that a new device has been found */
6302 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6303 xpt_action(done_ccb);
6304 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6305 done_ccb);
6306 }
6307 xpt_release_ccb(done_ccb);
6308 break;
6309 }
6310 }
6311 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6312 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6313 done_ccb->ccb_h.status = CAM_REQ_CMP;
6314 xpt_done(done_ccb);
6315 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6316 cam_periph_invalidate(periph);
6317 cam_periph_release(periph);
6318 } else {
6319 probeschedule(periph);
6320 }
6321}
6322
6323static void
6324probecleanup(struct cam_periph *periph)
6325{
6326 free(periph->softc, M_CAMXPT);
6327}
6328
6329static void
6330xpt_find_quirk(struct cam_ed *device)
6331{
6332 caddr_t match;
6333
6334 match = cam_quirkmatch((caddr_t)&device->inq_data,
6335 (caddr_t)xpt_quirk_table,
6336 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6337 sizeof(*xpt_quirk_table), scsi_inquiry_match);
6338
6339 if (match == NULL)
6340 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6341
6342 device->quirk = (struct xpt_quirk_entry *)match;
6343}
6344
6345static int
6346sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6347{
6348 int error, bool;
6349
6350 bool = cam_srch_hi;
6351 error = sysctl_handle_int(oidp, &bool, 0, req);
6352 if (error != 0 || req->newptr == NULL)
6353 return (error);
6354 if (bool == 0 || bool == 1) {
6355 cam_srch_hi = bool;
6356 return (0);
6357 } else {
6358 return (EINVAL);
6359 }
6360}
6361
6362
6363static void
6364xpt_devise_transport(struct cam_path *path)
6365{
6366 struct ccb_pathinq cpi;
6367 struct ccb_trans_settings cts;
6368 struct scsi_inquiry_data *inq_buf;
6369
6370 /* Get transport information from the SIM */
6371 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6372 cpi.ccb_h.func_code = XPT_PATH_INQ;
6373 xpt_action((union ccb *)&cpi);
6374
6375 inq_buf = NULL;
6376 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6377 inq_buf = &path->device->inq_data;
6378 path->device->protocol = PROTO_SCSI;
6379 path->device->protocol_version =
6380 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6381 path->device->transport = cpi.transport;
6382 path->device->transport_version = cpi.transport_version;
6383
6384 /*
6385 * Any device not using SPI3 features should
6386 * be considered SPI2 or lower.
6387 */
6388 if (inq_buf != NULL) {
6389 if (path->device->transport == XPORT_SPI
6390 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6391 && path->device->transport_version > 2)
6392 path->device->transport_version = 2;
6393 } else {
6394 struct cam_ed* otherdev;
6395
6396 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6397 otherdev != NULL;
6398 otherdev = TAILQ_NEXT(otherdev, links)) {
6399 if (otherdev != path->device)
6400 break;
6401 }
6402
6403 if (otherdev != NULL) {
6404 /*
6405 * Initially assume the same versioning as
6406 * prior luns for this target.
6407 */
6408 path->device->protocol_version =
6409 otherdev->protocol_version;
6410 path->device->transport_version =
6411 otherdev->transport_version;
6412 } else {
6413 /* Until we know better, opt for safty */
6414 path->device->protocol_version = 2;
6415 if (path->device->transport == XPORT_SPI)
6416 path->device->transport_version = 2;
6417 else
6418 path->device->transport_version = 0;
6419 }
6420 }
6421
6422 /*
6423 * XXX
6424 * For a device compliant with SPC-2 we should be able
6425 * to determine the transport version supported by
6426 * scrutinizing the version descriptors in the
6427 * inquiry buffer.
6428 */
6429
6430 /* Tell the controller what we think */
6431 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6432 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6433 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6434 cts.transport = path->device->transport;
6435 cts.transport_version = path->device->transport_version;
6436 cts.protocol = path->device->protocol;
6437 cts.protocol_version = path->device->protocol_version;
6438 cts.proto_specific.valid = 0;
6439 cts.xport_specific.valid = 0;
6440 xpt_action((union ccb *)&cts);
6441}
6442
6443static void
6444xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6445 int async_update)
6446{
6447 struct ccb_pathinq cpi;
6448 struct ccb_trans_settings cur_cts;
6449 struct ccb_trans_settings_scsi *scsi;
6450 struct ccb_trans_settings_scsi *cur_scsi;
6451 struct cam_sim *sim;
6452 struct scsi_inquiry_data *inq_data;
6453
6454 if (device == NULL) {
6455 cts->ccb_h.status = CAM_PATH_INVALID;
6456 xpt_done((union ccb *)cts);
6457 return;
6458 }
6459
6460 if (cts->protocol == PROTO_UNKNOWN
6461 || cts->protocol == PROTO_UNSPECIFIED) {
6462 cts->protocol = device->protocol;
6463 cts->protocol_version = device->protocol_version;
6464 }
6465
6466 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6467 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6468 cts->protocol_version = device->protocol_version;
6469
6470 if (cts->protocol != device->protocol) {
6471 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
6472 cts->protocol, device->protocol);
6473 cts->protocol = device->protocol;
6474 }
6475
6476 if (cts->protocol_version > device->protocol_version) {
6477 if (bootverbose) {
6478 xpt_print(cts->ccb_h.path, "Down reving Protocol "
6479 "Version from %d to %d?\n", cts->protocol_version,
6480 device->protocol_version);
6481 }
6482 cts->protocol_version = device->protocol_version;
6483 }
6484
6485 if (cts->transport == XPORT_UNKNOWN
6486 || cts->transport == XPORT_UNSPECIFIED) {
6487 cts->transport = device->transport;
6488 cts->transport_version = device->transport_version;
6489 }
6490
6491 if (cts->transport_version == XPORT_VERSION_UNKNOWN
6492 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6493 cts->transport_version = device->transport_version;
6494
6495 if (cts->transport != device->transport) {
6496 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
6497 cts->transport, device->transport);
6498 cts->transport = device->transport;
6499 }
6500
6501 if (cts->transport_version > device->transport_version) {
6502 if (bootverbose) {
6503 xpt_print(cts->ccb_h.path, "Down reving Transport "
6504 "Version from %d to %d?\n", cts->transport_version,
6505 device->transport_version);
6506 }
6507 cts->transport_version = device->transport_version;
6508 }
6509
6510 sim = cts->ccb_h.path->bus->sim;
6511
6512 /*
6513 * Nothing more of interest to do unless
6514 * this is a device connected via the
6515 * SCSI protocol.
6516 */
6517 if (cts->protocol != PROTO_SCSI) {
6518 if (async_update == FALSE)
6519 (*(sim->sim_action))(sim, (union ccb *)cts);
6520 return;
6521 }
6522
6523 inq_data = &device->inq_data;
6524 scsi = &cts->proto_specific.scsi;
6525 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6526 cpi.ccb_h.func_code = XPT_PATH_INQ;
6527 xpt_action((union ccb *)&cpi);
6528
6529 /* SCSI specific sanity checking */
6530 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6531 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6532 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6533 || (device->quirk->mintags == 0)) {
6534 /*
6535 * Can't tag on hardware that doesn't support tags,
6536 * doesn't have it enabled, or has broken tag support.
6537 */
6538 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6539 }
6540
6541 if (async_update == FALSE) {
6542 /*
6543 * Perform sanity checking against what the
6544 * controller and device can do.
6545 */
6546 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6547 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6548 cur_cts.type = cts->type;
6549 xpt_action((union ccb *)&cur_cts);
6550 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6551 return;
6552 }
6553 cur_scsi = &cur_cts.proto_specific.scsi;
6554 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6555 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6556 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6557 }
6558 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6559 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6560 }
6561
6562 /* SPI specific sanity checking */
6563 if (cts->transport == XPORT_SPI && async_update == FALSE) {
6564 u_int spi3caps;
6565 struct ccb_trans_settings_spi *spi;
6566 struct ccb_trans_settings_spi *cur_spi;
6567
6568 spi = &cts->xport_specific.spi;
6569
6570 cur_spi = &cur_cts.xport_specific.spi;
6571
6572 /* Fill in any gaps in what the user gave us */
6573 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6574 spi->sync_period = cur_spi->sync_period;
6575 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6576 spi->sync_period = 0;
6577 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6578 spi->sync_offset = cur_spi->sync_offset;
6579 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6580 spi->sync_offset = 0;
6581 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6582 spi->ppr_options = cur_spi->ppr_options;
6583 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6584 spi->ppr_options = 0;
6585 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6586 spi->bus_width = cur_spi->bus_width;
6587 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6588 spi->bus_width = 0;
6589 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6590 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6591 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6592 }
6593 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6594 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6595 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6596 && (inq_data->flags & SID_Sync) == 0
6597 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6598 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6599 || (spi->sync_offset == 0)
6600 || (spi->sync_period == 0)) {
6601 /* Force async */
6602 spi->sync_period = 0;
6603 spi->sync_offset = 0;
6604 }
6605
6606 switch (spi->bus_width) {
6607 case MSG_EXT_WDTR_BUS_32_BIT:
6608 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6609 || (inq_data->flags & SID_WBus32) != 0
6610 || cts->type == CTS_TYPE_USER_SETTINGS)
6611 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6612 break;
6613 /* Fall Through to 16-bit */
6614 case MSG_EXT_WDTR_BUS_16_BIT:
6615 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6616 || (inq_data->flags & SID_WBus16) != 0
6617 || cts->type == CTS_TYPE_USER_SETTINGS)
6618 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6619 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6620 break;
6621 }
6622 /* Fall Through to 8-bit */
6623 default: /* New bus width?? */
6624 case MSG_EXT_WDTR_BUS_8_BIT:
6625 /* All targets can do this */
6626 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6627 break;
6628 }
6629
6630 spi3caps = cpi.xport_specific.spi.ppr_options;
6631 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6632 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6633 spi3caps &= inq_data->spi3data;
6634
6635 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6636 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6637
6638 if ((spi3caps & SID_SPI_IUS) == 0)
6639 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6640
6641 if ((spi3caps & SID_SPI_QAS) == 0)
6642 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6643
6644 /* No SPI Transfer settings are allowed unless we are wide */
6645 if (spi->bus_width == 0)
6646 spi->ppr_options = 0;
6647
6648 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6649 /*
6650 * Can't tag queue without disconnection.
6651 */
6652 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6653 scsi->valid |= CTS_SCSI_VALID_TQ;
6654 }
6655
6656 /*
6657 * If we are currently performing tagged transactions to
6658 * this device and want to change its negotiation parameters,
6659 * go non-tagged for a bit to give the controller a chance to
6660 * negotiate unhampered by tag messages.
6661 */
6662 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6663 && (device->inq_flags & SID_CmdQue) != 0
6664 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6665 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6666 CTS_SPI_VALID_SYNC_OFFSET|
6667 CTS_SPI_VALID_BUS_WIDTH)) != 0)
6668 xpt_toggle_tags(cts->ccb_h.path);
6669 }
6670
6671 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6672 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6673 int device_tagenb;
6674
6675 /*
6676 * If we are transitioning from tags to no-tags or
6677 * vice-versa, we need to carefully freeze and restart
6678 * the queue so that we don't overlap tagged and non-tagged
6679 * commands. We also temporarily stop tags if there is
6680 * a change in transfer negotiation settings to allow
6681 * "tag-less" negotiation.
6682 */
6683 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6684 || (device->inq_flags & SID_CmdQue) != 0)
6685 device_tagenb = TRUE;
6686 else
6687 device_tagenb = FALSE;
6688
6689 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6690 && device_tagenb == FALSE)
6691 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6692 && device_tagenb == TRUE)) {
6693
6694 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6695 /*
6696 * Delay change to use tags until after a
6697 * few commands have gone to this device so
6698 * the controller has time to perform transfer
6699 * negotiations without tagged messages getting
6700 * in the way.
6701 */
6702 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6703 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6704 } else {
6705 struct ccb_relsim crs;
6706
6707 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6708 device->inq_flags &= ~SID_CmdQue;
6709 xpt_dev_ccbq_resize(cts->ccb_h.path,
6710 sim->max_dev_openings);
6711 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6712 device->tag_delay_count = 0;
6713
6714 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6715 /*priority*/1);
6716 crs.ccb_h.func_code = XPT_REL_SIMQ;
6717 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6718 crs.openings
6719 = crs.release_timeout
6720 = crs.qfrozen_cnt
6721 = 0;
6722 xpt_action((union ccb *)&crs);
6723 }
6724 }
6725 }
6726 if (async_update == FALSE)
6727 (*(sim->sim_action))(sim, (union ccb *)cts);
6728}
6729
6730
6731static void
6732xpt_toggle_tags(struct cam_path *path)
6733{
6734 struct cam_ed *dev;
6735
6736 /*
6737 * Give controllers a chance to renegotiate
6738 * before starting tag operations. We
6739 * "toggle" tagged queuing off then on
6740 * which causes the tag enable command delay
6741 * counter to come into effect.
6742 */
6743 dev = path->device;
6744 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6745 || ((dev->inq_flags & SID_CmdQue) != 0
6746 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6747 struct ccb_trans_settings cts;
6748
6749 xpt_setup_ccb(&cts.ccb_h, path, 1);
6750 cts.protocol = PROTO_SCSI;
6751 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6752 cts.transport = XPORT_UNSPECIFIED;
6753 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6754 cts.proto_specific.scsi.flags = 0;
6755 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6756 xpt_set_transfer_settings(&cts, path->device,
6757 /*async_update*/TRUE);
6758 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6759 xpt_set_transfer_settings(&cts, path->device,
6760 /*async_update*/TRUE);
6761 }
6762}
6763
6764static void
6765xpt_start_tags(struct cam_path *path)
6766{
6767 struct ccb_relsim crs;
6768 struct cam_ed *device;
6769 struct cam_sim *sim;
6770 int newopenings;
6771
6772 device = path->device;
6773 sim = path->bus->sim;
6774 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6775 xpt_freeze_devq(path, /*count*/1);
6776 device->inq_flags |= SID_CmdQue;
6777 if (device->tag_saved_openings != 0)
6778 newopenings = device->tag_saved_openings;
6779 else
6780 newopenings = min(device->quirk->maxtags,
6781 sim->max_tagged_dev_openings);
6782 xpt_dev_ccbq_resize(path, newopenings);
6783 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6784 crs.ccb_h.func_code = XPT_REL_SIMQ;
6785 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6786 crs.openings
6787 = crs.release_timeout
6788 = crs.qfrozen_cnt
6789 = 0;
6790 xpt_action((union ccb *)&crs);
6791}
6792
6793static int busses_to_config;
6794static int busses_to_reset;
6795
6796static int
6797xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6798{
6799
6800 mtx_assert(bus->sim->mtx, MA_OWNED);
6801
6802 if (bus->path_id != CAM_XPT_PATH_ID) {
6803 struct cam_path path;
6804 struct ccb_pathinq cpi;
6805 int can_negotiate;
6806
6807 busses_to_config++;
6808 xpt_compile_path(&path, NULL, bus->path_id,
6809 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6810 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6811 cpi.ccb_h.func_code = XPT_PATH_INQ;
6812 xpt_action((union ccb *)&cpi);
6813 can_negotiate = cpi.hba_inquiry;
6814 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6815 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6816 && can_negotiate)
6817 busses_to_reset++;
6818 xpt_release_path(&path);
6819 }
6820
6821 return(1);
6822}
6823
6824static int
6825xptconfigfunc(struct cam_eb *bus, void *arg)
6826{
6827 struct cam_path *path;
6828 union ccb *work_ccb;
6829
6830 mtx_assert(bus->sim->mtx, MA_OWNED);
6831
6832 if (bus->path_id != CAM_XPT_PATH_ID) {
6833 cam_status status;
6834 int can_negotiate;
6835
6836 work_ccb = xpt_alloc_ccb_nowait();
6837 if (work_ccb == NULL) {
6838 busses_to_config--;
6839 xpt_finishconfig(xpt_periph, NULL);
6840 return(0);
6841 }
6842 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6843 CAM_TARGET_WILDCARD,
6844 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6845 printf("xptconfigfunc: xpt_create_path failed with "
6846 "status %#x for bus %d\n", status, bus->path_id);
6847 printf("xptconfigfunc: halting bus configuration\n");
6848 xpt_free_ccb(work_ccb);
6849 busses_to_config--;
6850 xpt_finishconfig(xpt_periph, NULL);
6851 return(0);
6852 }
6853 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6854 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6855 xpt_action(work_ccb);
6856 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6857 printf("xptconfigfunc: CPI failed on bus %d "
6858 "with status %d\n", bus->path_id,
6859 work_ccb->ccb_h.status);
6860 xpt_finishconfig(xpt_periph, work_ccb);
6861 return(1);
6862 }
6863
6864 can_negotiate = work_ccb->cpi.hba_inquiry;
6865 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6866 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6867 && (can_negotiate != 0)) {
6868 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6869 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6870 work_ccb->ccb_h.cbfcnp = NULL;
6871 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6872 ("Resetting Bus\n"));
6873 xpt_action(work_ccb);
6874 xpt_finishconfig(xpt_periph, work_ccb);
6875 } else {
6876 /* Act as though we performed a successful BUS RESET */
6877 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6878 xpt_finishconfig(xpt_periph, work_ccb);
6879 }
6880 }
6881
6882 return(1);
6883}
6884
6885static void
6886xpt_config(void *arg)
6887{
6888 /*
6889 * Now that interrupts are enabled, go find our devices
6890 */
6891
6892#ifdef CAMDEBUG
6893 /* Setup debugging flags and path */
6894#ifdef CAM_DEBUG_FLAGS
6895 cam_dflags = CAM_DEBUG_FLAGS;
6896#else /* !CAM_DEBUG_FLAGS */
6897 cam_dflags = CAM_DEBUG_NONE;
6898#endif /* CAM_DEBUG_FLAGS */
6899#ifdef CAM_DEBUG_BUS
6900 if (cam_dflags != CAM_DEBUG_NONE) {
6901 /*
6902 * Locking is specifically omitted here. No SIMs have
6903 * registered yet, so xpt_create_path will only be searching
6904 * empty lists of targets and devices.
6905 */
6906 if (xpt_create_path(&cam_dpath, xpt_periph,
6907 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6908 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6909 printf("xpt_config: xpt_create_path() failed for debug"
6910 " target %d:%d:%d, debugging disabled\n",
6911 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6912 cam_dflags = CAM_DEBUG_NONE;
6913 }
6914 } else
6915 cam_dpath = NULL;
6916#else /* !CAM_DEBUG_BUS */
6917 cam_dpath = NULL;
6918#endif /* CAM_DEBUG_BUS */
6919#endif /* CAMDEBUG */
6920
6921 /*
6922 * Scan all installed busses.
6923 */
6924 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6925
6926 if (busses_to_config == 0) {
6927 /* Call manually because we don't have any busses */
6928 xpt_finishconfig(xpt_periph, NULL);
6929 } else {
6930 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6931 printf("Waiting %d seconds for SCSI "
6932 "devices to settle\n", scsi_delay/1000);
6933 }
6934 xpt_for_all_busses(xptconfigfunc, NULL);
6935 }
6936}
6937
6938/*
6939 * If the given device only has one peripheral attached to it, and if that
6940 * peripheral is the passthrough driver, announce it. This insures that the
6941 * user sees some sort of announcement for every peripheral in their system.
6942 */
6943static int
6944xptpassannouncefunc(struct cam_ed *device, void *arg)
6945{
6946 struct cam_periph *periph;
6947 int i;
6948
6949 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6950 periph = SLIST_NEXT(periph, periph_links), i++);
6951
6952 periph = SLIST_FIRST(&device->periphs);
6953 if ((i == 1)
6954 && (strncmp(periph->periph_name, "pass", 4) == 0))
6955 xpt_announce_periph(periph, NULL);
6956
6957 return(1);
6958}
6959
6960static void
6961xpt_finishconfig_task(void *context, int pending)
6962{
6963 struct periph_driver **p_drv;
6964 int i;
6965
6966 if (busses_to_config == 0) {
6967 /* Register all the peripheral drivers */
6968 /* XXX This will have to change when we have loadable modules */
6969 p_drv = periph_drivers;
6970 for (i = 0; p_drv[i] != NULL; i++) {
6971 (*p_drv[i]->init)();
6972 }
6973
6974 /*
6975 * Check for devices with no "standard" peripheral driver
6976 * attached. For any devices like that, announce the
6977 * passthrough driver so the user will see something.
6978 */
6979 xpt_for_all_devices(xptpassannouncefunc, NULL);
6980
6981 /* Release our hook so that the boot can continue. */
6982 config_intrhook_disestablish(xsoftc.xpt_config_hook);
6983 free(xsoftc.xpt_config_hook, M_CAMXPT);
6984 xsoftc.xpt_config_hook = NULL;
6985 }
6986
6987 free(context, M_CAMXPT);
6988}
6989
6990static void
6991xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6992{
6993 struct xpt_task *task;
6994
6995 if (done_ccb != NULL) {
6996 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6997 ("xpt_finishconfig\n"));
6998 switch(done_ccb->ccb_h.func_code) {
6999 case XPT_RESET_BUS:
7000 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7001 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7002 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7003 done_ccb->crcn.flags = 0;
7004 xpt_action(done_ccb);
7005 return;
7006 }
7007 /* FALLTHROUGH */
7008 case XPT_SCAN_BUS:
7009 default:
7010 xpt_free_path(done_ccb->ccb_h.path);
7011 busses_to_config--;
7012 break;
7013 }
7014 }
7015
7016 if (busses_to_config == 0) {
7017 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
7018 if (task != NULL) {
7019 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
7020 taskqueue_enqueue(taskqueue_thread, &task->task);
7021 }
7022 }
7023
7024 if (done_ccb != NULL)
7025 xpt_free_ccb(done_ccb);
7026}
7027
7028cam_status
7029xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
7030 struct cam_path *path)
7031{
7032 struct ccb_setasync csa;
7033 cam_status status;
7034 int xptpath = 0;
7035
7036 if (path == NULL) {
7037 mtx_lock(&xsoftc.xpt_lock);
7038 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
7039 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
7040 if (status != CAM_REQ_CMP) {
7041 mtx_unlock(&xsoftc.xpt_lock);
7042 return (status);
7043 }
7044 xptpath = 1;
7045 }
7046
7047 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
7048 csa.ccb_h.func_code = XPT_SASYNC_CB;
7049 csa.event_enable = event;
7050 csa.callback = cbfunc;
7051 csa.callback_arg = cbarg;
7052 xpt_action((union ccb *)&csa);
7053 status = csa.ccb_h.status;
7054 if (xptpath) {
7055 xpt_free_path(path);
7056 mtx_unlock(&xsoftc.xpt_lock);
7057 }
7058 return (status);
7059}
7060
7061static void
7062xptaction(struct cam_sim *sim, union ccb *work_ccb)
7063{
7064 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7065
7066 switch (work_ccb->ccb_h.func_code) {
7067 /* Common cases first */
7068 case XPT_PATH_INQ: /* Path routing inquiry */
7069 {
7070 struct ccb_pathinq *cpi;
7071
7072 cpi = &work_ccb->cpi;
7073 cpi->version_num = 1; /* XXX??? */
7074 cpi->hba_inquiry = 0;
7075 cpi->target_sprt = 0;
7076 cpi->hba_misc = 0;
7077 cpi->hba_eng_cnt = 0;
7078 cpi->max_target = 0;
7079 cpi->max_lun = 0;
7080 cpi->initiator_id = 0;
7081 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7082 strncpy(cpi->hba_vid, "", HBA_IDLEN);
7083 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7084 cpi->unit_number = sim->unit_number;
7085 cpi->bus_id = sim->bus_id;
7086 cpi->base_transfer_speed = 0;
7087 cpi->protocol = PROTO_UNSPECIFIED;
7088 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7089 cpi->transport = XPORT_UNSPECIFIED;
7090 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7091 cpi->ccb_h.status = CAM_REQ_CMP;
7092 xpt_done(work_ccb);
7093 break;
7094 }
7095 default:
7096 work_ccb->ccb_h.status = CAM_REQ_INVALID;
7097 xpt_done(work_ccb);
7098 break;
7099 }
7100}
7101
7102/*
7103 * The xpt as a "controller" has no interrupt sources, so polling
7104 * is a no-op.
7105 */
7106static void
7107xptpoll(struct cam_sim *sim)
7108{
7109}
7110
7111void
7112xpt_lock_buses(void)
7113{
7114 mtx_lock(&xsoftc.xpt_topo_lock);
7115}
7116
7117void
7118xpt_unlock_buses(void)
7119{
7120 mtx_unlock(&xsoftc.xpt_topo_lock);
7121}
7122
7123static void
7124camisr(void *dummy)
7125{
7126 cam_simq_t queue;
7127 struct cam_sim *sim;
7128
7129 mtx_lock(&cam_simq_lock);
7130 TAILQ_INIT(&queue);
7131 TAILQ_CONCAT(&queue, &cam_simq, links);
7132 mtx_unlock(&cam_simq_lock);
7133
7134 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
7135 TAILQ_REMOVE(&queue, sim, links);
7136 CAM_SIM_LOCK(sim);
7137 sim->flags &= ~CAM_SIM_ON_DONEQ;
7138 camisr_runqueue(&sim->sim_doneq);
7139 CAM_SIM_UNLOCK(sim);
7140 }
7141}
7142
7143static void
7144camisr_runqueue(void *V_queue)
7145{
7146 cam_isrq_t *queue = V_queue;
7147 struct ccb_hdr *ccb_h;
7148
7149 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
7150 int runq;
7151
7152 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
7153 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7154
7155 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7156 ("camisr\n"));
7157
7158 runq = FALSE;
7159
7160 if (ccb_h->flags & CAM_HIGH_POWER) {
7161 struct highpowerlist *hphead;
7162 union ccb *send_ccb;
7163
7164 mtx_lock(&xsoftc.xpt_lock);
7165 hphead = &xsoftc.highpowerq;
7166
7167 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7168
7169 /*
7170 * Increment the count since this command is done.
7171 */
7172 xsoftc.num_highpower++;
7173
7174 /*
7175 * Any high powered commands queued up?
7176 */
7177 if (send_ccb != NULL) {
7178
7179 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7180 mtx_unlock(&xsoftc.xpt_lock);
7181
7182 xpt_release_devq(send_ccb->ccb_h.path,
7183 /*count*/1, /*runqueue*/TRUE);
7184 } else
7185 mtx_unlock(&xsoftc.xpt_lock);
7186 }
7187
7188 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7189 struct cam_ed *dev;
7190
7191 dev = ccb_h->path->device;
7192
7193 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7194 ccb_h->path->bus->sim->devq->send_active--;
7195 ccb_h->path->bus->sim->devq->send_openings++;
7196
7197 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7198 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7199 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7200 && (dev->ccbq.dev_active == 0))) {
7201
7202 xpt_release_devq(ccb_h->path, /*count*/1,
7203 /*run_queue*/TRUE);
7204 }
7205
7206 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7207 && (--dev->tag_delay_count == 0))
7208 xpt_start_tags(ccb_h->path);
7209
7210 if ((dev->ccbq.queue.entries > 0)
7211 && (dev->qfrozen_cnt == 0)
7212 && (device_is_send_queued(dev) == 0)) {
7213 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7214 dev);
7215 }
7216 }
7217
7218 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7219 xpt_release_simq(ccb_h->path->bus->sim,
7220 /*run_queue*/TRUE);
7221 ccb_h->status &= ~CAM_RELEASE_SIMQ;
7222 runq = FALSE;
7223 }
7224
7225 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7226 && (ccb_h->status & CAM_DEV_QFRZN)) {
7227 xpt_release_devq(ccb_h->path, /*count*/1,
7228 /*run_queue*/TRUE);
7229 ccb_h->status &= ~CAM_DEV_QFRZN;
7230 } else if (runq) {
7231 xpt_run_dev_sendq(ccb_h->path->bus);
7232 }
7233
7234 /* Call the peripheral driver's callback */
7235 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7236 }
7237}
7238