scsi_target.c revision 109345
1/*
2 * SCSI Disk Emulator
3 *
4 * Copyright (c) 2002 Nate Lawson.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: head/share/examples/scsi_target/scsi_target.c 109345 2003-01-16 00:24:29Z njl $
29 */
30
31#include <sys/types.h>
32#include <errno.h>
33#include <err.h>
34#include <fcntl.h>
35#include <signal.h>
36#include <stddef.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <sysexits.h>
41#include <unistd.h>
42#include <aio.h>
43#include <assert.h>
44#include <sys/stat.h>
45#include <sys/queue.h>
46#include <sys/event.h>
47#include <sys/param.h>
48#include <cam/cam_queue.h>
49#include <cam/scsi/scsi_all.h>
50#include <cam/scsi/scsi_targetio.h>
51#include <cam/scsi/scsi_message.h>
52#include "scsi_target.h"
53
54/* Maximum amount to transfer per CTIO */
55#define MAX_XFER	MAXPHYS
56/* Maximum number of allocated CTIOs */
57#define MAX_CTIOS	32
58/* Maximum sector size for emulated volume */
59#define MAX_SECTOR	32768
60
61/* Global variables */
62int		debug;
63u_int32_t	volume_size;
64size_t		sector_size;
65size_t		buf_size;
66
67/* Local variables */
68static int    targ_fd;
69static int    kq_fd;
70static int    file_fd;
71static int    num_ctios;
72static struct ccb_queue		pending_queue;
73static struct ccb_queue		work_queue;
74static struct ioc_enable_lun	ioc_enlun = {
75	CAM_BUS_WILDCARD,
76	CAM_TARGET_WILDCARD,
77	CAM_LUN_WILDCARD
78};
79
80/* Local functions */
81static void		cleanup(void);
82static int		init_ccbs(void);
83static void		request_loop(void);
84static void		handle_read(void);
85/* static int		work_atio(struct ccb_accept_tio *); */
86static void		queue_io(struct ccb_scsiio *);
87static void		run_queue(struct ccb_accept_tio *);
88static int		work_inot(struct ccb_immed_notify *);
89static struct ccb_scsiio *
90			get_ctio(void);
91/* static void		free_ccb(union ccb *); */
92static cam_status	get_sim_flags(u_int16_t *);
93static void		rel_simq(void);
94static void		abort_all_pending(void);
95static void		usage(void);
96
97int
98main(int argc, char *argv[])
99{
100	int ch, unit;
101	char *file_name, targname[16];
102	u_int16_t req_flags, sim_flags;
103	off_t user_size;
104
105	/* Initialize */
106	debug = 0;
107	req_flags = sim_flags = 0;
108	user_size = 0;
109	targ_fd = file_fd = kq_fd = -1;
110	num_ctios = 0;
111	sector_size = SECTOR_SIZE;
112	buf_size = DFLTPHYS;
113
114	/* Prepare resource pools */
115	TAILQ_INIT(&pending_queue);
116	TAILQ_INIT(&work_queue);
117
118	while ((ch = getopt(argc, argv, "AdSTb:c:s:W:")) != -1) {
119		switch(ch) {
120		case 'A':
121			req_flags |= SID_Addr16;
122			break;
123		case 'd':
124			debug = 1;
125			break;
126		case 'S':
127			req_flags |= SID_Sync;
128			break;
129		case 'T':
130			req_flags |= SID_CmdQue;
131			break;
132		case 'b':
133			buf_size = atoi(optarg);
134			if (buf_size < 256 || buf_size > MAX_XFER)
135				errx(1, "Unreasonable buf size: %s", optarg);
136			break;
137		case 'c':
138			sector_size = atoi(optarg);
139			if (sector_size < 512 || sector_size > MAX_SECTOR)
140				errx(1, "Unreasonable sector size: %s", optarg);
141			break;
142		case 's':
143			user_size = strtoll(optarg, (char **)NULL, /*base*/10);
144			if (user_size < 0)
145				errx(1, "Unreasonable volume size: %s", optarg);
146			break;
147		case 'W':
148			req_flags &= ~(SID_WBus16 | SID_WBus32);
149			switch (atoi(optarg)) {
150			case 8:
151				/* Leave req_flags zeroed */
152				break;
153			case 16:
154				req_flags |= SID_WBus16;
155				break;
156			case 32:
157				req_flags |= SID_WBus32;
158				break;
159			default:
160				warnx("Width %s not supported", optarg);
161				usage();
162				/* NOTREACHED */
163			}
164			break;
165		default:
166			usage();
167			/* NOTREACHED */
168		}
169	}
170	argc -= optind;
171	argv += optind;
172
173	if (argc != 2)
174		usage();
175
176	sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
177	       &ioc_enlun.lun_id);
178	file_name = argv[1];
179
180	if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
181	    ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
182	    ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
183		warnx("Incomplete target path specified");
184		usage();
185		/* NOTREACHED */
186	}
187	/* We don't support any vendor-specific commands */
188	ioc_enlun.grp6_len = 0;
189	ioc_enlun.grp7_len = 0;
190
191	/* Open backing store for IO */
192	file_fd = open(file_name, O_RDWR);
193	if (file_fd < 0)
194		err(1, "open backing store file");
195
196	/* Check backing store size or use the size user gave us */
197	if (user_size == 0) {
198		struct stat st;
199
200		if (fstat(file_fd, &st) < 0)
201			err(1, "fstat file");
202		volume_size = st.st_size / sector_size;
203	} else {
204		volume_size = user_size / sector_size;
205	}
206	if (volume_size <= 0)
207		errx(1, "volume must be larger than %d", sector_size);
208
209	{
210		struct aiocb aio, *aiop;
211
212		/* Make sure we have working AIO support */
213		memset(&aio, 0, sizeof(aio));
214		aio.aio_buf = malloc(sector_size);
215		if (aio.aio_buf == NULL)
216			err(1, "malloc");
217		aio.aio_fildes = file_fd;
218		aio.aio_offset = 0;
219		aio.aio_nbytes = sector_size;
220		signal(SIGSYS, SIG_IGN);
221		if (aio_read(&aio) != 0) {
222			printf("You must enable VFS_AIO in your kernel "
223			       "or load the aio(4) module.\n");
224			err(1, "aio_read");
225		}
226		if (aio_waitcomplete(&aiop, NULL) != sector_size)
227			err(1, "aio_waitcomplete");
228		assert(aiop == &aio);
229		signal(SIGSYS, SIG_DFL);
230		free((void *)aio.aio_buf);
231		if (debug)
232			warnx("aio support tested ok");
233	}
234
235	/* Go through all the control devices and find one that isn't busy. */
236	unit = 0;
237	do {
238		snprintf(targname, sizeof(targname), "/dev/targ%d", unit++);
239    		targ_fd = open(targname, O_RDWR);
240	} while (targ_fd < 0 && errno == EBUSY);
241
242	if (targ_fd < 0)
243    	    err(1, "Tried to open %d devices, none available", unit);
244
245	/* The first three are handled by kevent() later */
246	signal(SIGHUP, SIG_IGN);
247	signal(SIGINT, SIG_IGN);
248	signal(SIGTERM, SIG_IGN);
249	signal(SIGPROF, SIG_IGN);
250	signal(SIGALRM, SIG_IGN);
251	signal(SIGSTOP, SIG_IGN);
252	signal(SIGTSTP, SIG_IGN);
253
254	/* Register a cleanup handler to run when exiting */
255	atexit(cleanup);
256
257	/* Enable listening on the specified LUN */
258	if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
259		err(1, "TARGIOCENABLE");
260
261	/* Enable debugging if requested */
262	if (debug) {
263		if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
264			err(1, "TARGIOCDEBUG");
265	}
266
267	/* Set up inquiry data according to what SIM supports */
268	if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
269		errx(1, "get_sim_flags");
270	if (tcmd_init(req_flags, sim_flags) != 0)
271		errx(1, "Initializing tcmd subsystem failed");
272
273	/* Queue ATIOs and INOTs on descriptor */
274	if (init_ccbs() != 0)
275		errx(1, "init_ccbs failed");
276
277	if (debug)
278		warnx("main loop beginning");
279	request_loop();
280
281	exit(0);
282}
283
284static void
285cleanup()
286{
287	struct ccb_hdr *ccb_h;
288
289	if (debug) {
290		warnx("cleanup called");
291		debug = 0;
292		ioctl(targ_fd, TARGIOCDEBUG, &debug);
293	}
294	ioctl(targ_fd, TARGIOCDISABLE, NULL);
295	close(targ_fd);
296
297	while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
298		TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
299		free_ccb((union ccb *)ccb_h);
300	}
301	while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
302		TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
303		free_ccb((union ccb *)ccb_h);
304	}
305
306	if (kq_fd != -1)
307		close(kq_fd);
308}
309
310/* Allocate ATIOs/INOTs and queue on HBA */
311static int
312init_ccbs()
313{
314	int i;
315
316	for (i = 0; i < MAX_INITIATORS; i++) {
317		struct ccb_accept_tio *atio;
318		struct atio_descr *a_descr;
319		struct ccb_immed_notify *inot;
320
321		atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
322		if (atio == NULL) {
323			warn("malloc ATIO");
324			return (-1);
325		}
326		a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
327		if (a_descr == NULL) {
328			free(atio);
329			warn("malloc atio_descr");
330			return (-1);
331		}
332		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
333		atio->ccb_h.targ_descr = a_descr;
334		send_ccb((union ccb *)atio, /*priority*/1);
335
336		inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
337		if (inot == NULL) {
338			warn("malloc INOT");
339			return (-1);
340		}
341		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
342		send_ccb((union ccb *)inot, /*priority*/1);
343	}
344
345	return (0);
346}
347
348static void
349request_loop()
350{
351	struct kevent events[MAX_EVENTS];
352	struct timespec ts, *tptr;
353	int quit;
354
355	/* Register kqueue for event notification */
356	if ((kq_fd = kqueue()) < 0)
357		err(1, "init kqueue");
358
359	/* Set up some default events */
360	EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
361	EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
362	EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
363	EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
364	if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
365		err(1, "kevent signal registration");
366
367	ts.tv_sec = 0;
368	ts.tv_nsec = 0;
369	tptr = NULL;
370	quit = 0;
371
372	/* Loop until user signal */
373	while (quit == 0) {
374		int retval, i;
375		struct ccb_hdr *ccb_h;
376
377		/* Check for the next signal, read ready, or AIO completion */
378		retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
379		if (retval < 0) {
380			if (errno == EINTR) {
381				if (debug)
382					warnx("EINTR, looping");
383				continue;
384            		}
385			else {
386				err(1, "kevent failed");
387			}
388		} else if (retval > MAX_EVENTS) {
389			errx(1, "kevent returned more events than allocated?");
390		}
391
392		/* Process all received events. */
393		for (i = 0; i < retval; i++) {
394			if ((events[i].flags & EV_ERROR) != 0)
395				errx(1, "kevent registration failed");
396
397			switch (events[i].filter) {
398			case EVFILT_READ:
399				if (debug)
400					warnx("read ready");
401				handle_read();
402				break;
403			case EVFILT_AIO:
404			{
405				struct ccb_scsiio *ctio;
406				struct ctio_descr *c_descr;
407				if (debug)
408					warnx("aio ready");
409
410				ctio = (struct ccb_scsiio *)events[i].udata;
411				c_descr = (struct ctio_descr *)
412					  ctio->ccb_h.targ_descr;
413				c_descr->event = AIO_DONE;
414				/* Queue on the appropriate ATIO */
415				queue_io(ctio);
416				/* Process any queued completions. */
417				run_queue(c_descr->atio);
418				break;
419			}
420			case EVFILT_SIGNAL:
421				if (debug)
422					warnx("signal ready, setting quit");
423				quit = 1;
424				break;
425			default:
426				warnx("unknown event %#x", events[i].filter);
427				break;
428			}
429
430			if (debug)
431				warnx("event done");
432		}
433
434		/* Grab the first CCB and perform one work unit. */
435		if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
436			union ccb *ccb;
437
438			ccb = (union ccb *)ccb_h;
439			switch (ccb_h->func_code) {
440			case XPT_ACCEPT_TARGET_IO:
441				/* Start one more transfer. */
442				retval = work_atio(&ccb->atio);
443				break;
444			case XPT_IMMED_NOTIFY:
445				retval = work_inot(&ccb->cin);
446				break;
447			default:
448				warnx("Unhandled ccb type %#x on workq",
449				      ccb_h->func_code);
450				abort();
451				/* NOTREACHED */
452			}
453
454			/* Assume work function handled the exception */
455			if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
456				if (debug) {
457					warnx("Queue frozen receiving CCB, "
458					      "releasing");
459				}
460				rel_simq();
461			}
462
463			/* No more work needed for this command. */
464			if (retval == 0) {
465				TAILQ_REMOVE(&work_queue, ccb_h,
466					     periph_links.tqe);
467			}
468		}
469
470		/*
471		 * Poll for new events (i.e. completions) while we
472		 * are processing CCBs on the work_queue. Once it's
473		 * empty, use an infinite wait.
474		 */
475		if (!TAILQ_EMPTY(&work_queue))
476			tptr = &ts;
477		else
478			tptr = NULL;
479	}
480}
481
482/* CCBs are ready from the kernel */
483static void
484handle_read()
485{
486	union ccb *ccb_array[MAX_INITIATORS], *ccb;
487	int ccb_count, i;
488
489	ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
490	if (ccb_count <= 0) {
491		warn("read ccb ptrs");
492		return;
493	}
494	ccb_count /= sizeof(union ccb *);
495	if (ccb_count < 1) {
496		warnx("truncated read ccb ptr?");
497		return;
498	}
499
500	for (i = 0; i < ccb_count; i++) {
501		ccb = ccb_array[i];
502		TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
503
504		switch (ccb->ccb_h.func_code) {
505		case XPT_ACCEPT_TARGET_IO:
506		{
507			struct ccb_accept_tio *atio;
508			struct atio_descr *a_descr;
509
510			/* Initialize ATIO descr for this transaction */
511			atio = &ccb->atio;
512			a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
513			bzero(a_descr, sizeof(*a_descr));
514			TAILQ_INIT(&a_descr->cmplt_io);
515			a_descr->flags = atio->ccb_h.flags &
516				(CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
517			/* XXX add a_descr->priority */
518			if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
519				a_descr->cdb = atio->cdb_io.cdb_bytes;
520			else
521				a_descr->cdb = atio->cdb_io.cdb_ptr;
522
523			/* ATIOs are processed in FIFO order */
524			TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
525					  periph_links.tqe);
526			break;
527		}
528		case XPT_CONT_TARGET_IO:
529		{
530			struct ccb_scsiio *ctio;
531			struct ctio_descr *c_descr;
532
533			ctio = &ccb->ctio;
534			c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
535			c_descr->event = CTIO_DONE;
536			/* Queue on the appropriate ATIO */
537			queue_io(ctio);
538			/* Process any queued completions. */
539			run_queue(c_descr->atio);
540			break;
541		}
542		case XPT_IMMED_NOTIFY:
543			/* INOTs are handled with priority */
544			TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
545					  periph_links.tqe);
546			break;
547		default:
548			warnx("Unhandled ccb type %#x in handle_read",
549			      ccb->ccb_h.func_code);
550			break;
551		}
552	}
553}
554
555/* Process an ATIO CCB from the kernel */
556int
557work_atio(struct ccb_accept_tio *atio)
558{
559	struct ccb_scsiio *ctio;
560	struct atio_descr *a_descr;
561	struct ctio_descr *c_descr;
562	cam_status status;
563	int ret;
564
565	if (debug)
566		warnx("Working on ATIO %p", atio);
567
568	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
569
570	/* Get a CTIO and initialize it according to our known parameters */
571	ctio = get_ctio();
572	if (ctio == NULL)
573		return (1);
574	ret = 0;
575	ctio->ccb_h.flags = a_descr->flags;
576	ctio->tag_id = atio->tag_id;
577	ctio->init_id = atio->init_id;
578	/* XXX priority needs to be added to a_descr */
579	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
580	c_descr->atio = atio;
581	if ((a_descr->flags & CAM_DIR_IN) != 0)
582		c_descr->offset = a_descr->base_off + a_descr->targ_req;
583	else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
584		c_descr->offset = a_descr->base_off + a_descr->init_req;
585
586	/*
587	 * Return a check condition if there was an error while
588	 * receiving this ATIO.
589	 */
590	if (atio->sense_len != 0) {
591		struct scsi_sense_data *sense;
592
593		if (debug) {
594			warnx("ATIO with %u bytes sense received",
595			      atio->sense_len);
596		}
597		sense = &atio->sense_data;
598		tcmd_sense(ctio->init_id, ctio, sense->flags,
599			   sense->add_sense_code, sense->add_sense_code_qual);
600		send_ccb((union ccb *)ctio, /*priority*/1);
601		return (0);
602	}
603
604	status = atio->ccb_h.status & CAM_STATUS_MASK;
605	switch (status) {
606	case CAM_CDB_RECVD:
607		ret = tcmd_handle(atio, ctio, ATIO_WORK);
608		break;
609	case CAM_REQ_ABORTED:
610		/* Requeue on HBA */
611		TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
612		send_ccb((union ccb *)atio, /*priority*/1);
613		ret = 1;
614		break;
615	default:
616		warnx("ATIO completed with unhandled status %#x", status);
617		abort();
618		/* NOTREACHED */
619		break;
620	}
621
622	return (ret);
623}
624
625static void
626queue_io(struct ccb_scsiio *ctio)
627{
628	struct ccb_hdr *ccb_h;
629	struct io_queue *ioq;
630	struct ctio_descr *c_descr, *curr_descr;
631
632	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
633	/* If the completion is for a specific ATIO, queue in order */
634	if (c_descr->atio != NULL) {
635		struct atio_descr *a_descr;
636
637		a_descr = (struct atio_descr *)c_descr->atio->ccb_h.targ_descr;
638		ioq = &a_descr->cmplt_io;
639	} else {
640		errx(1, "CTIO %p has NULL ATIO", ctio);
641	}
642
643	/* Insert in order, sorted by offset */
644	if (!TAILQ_EMPTY(ioq)) {
645		TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
646			curr_descr = (struct ctio_descr *)ccb_h->targ_descr;
647			if (curr_descr->offset <= c_descr->offset) {
648				TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h,
649						   periph_links.tqe);
650				break;
651			}
652			if (TAILQ_PREV(ccb_h, io_queue, periph_links.tqe)
653			    == NULL) {
654				TAILQ_INSERT_BEFORE(ccb_h, &ctio->ccb_h,
655						    periph_links.tqe);
656				break;
657			}
658		}
659	} else {
660		TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
661	}
662}
663
664/*
665 * Go through all completed AIO/CTIOs for a given ATIO and advance data
666 * counts, start continuation IO, etc.
667 */
668static void
669run_queue(struct ccb_accept_tio *atio)
670{
671	struct atio_descr *a_descr;
672	struct ccb_hdr *ccb_h;
673	int sent_status, event;
674
675	if (atio == NULL)
676		return;
677
678	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
679
680	while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
681		struct ccb_scsiio *ctio;
682		struct ctio_descr *c_descr;
683
684		ctio = (struct ccb_scsiio *)ccb_h;
685		c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
686
687		/* If completed item is in range, call handler */
688		if ((c_descr->event == AIO_DONE &&
689		    c_descr->offset == a_descr->base_off + a_descr->targ_ack)
690		 || (c_descr->event == CTIO_DONE &&
691		    c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
692			sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
693			event = c_descr->event;
694
695			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
696				     periph_links.tqe);
697			tcmd_handle(atio, ctio, c_descr->event);
698
699			/* If entire transfer complete, send back ATIO */
700			if (sent_status != 0 && event == CTIO_DONE)
701				send_ccb((union ccb *)atio, /*priority*/1);
702		} else {
703			/* Gap in offsets so wait until later callback */
704			if (debug)
705				warnx("IO %p out of order", ccb_h);
706			break;
707		}
708	}
709}
710
711static int
712work_inot(struct ccb_immed_notify *inot)
713{
714	cam_status status;
715	int sense;
716
717	if (debug)
718		warnx("Working on INOT %p", inot);
719
720	status = inot->ccb_h.status;
721	sense = (status & CAM_AUTOSNS_VALID) != 0;
722	status &= CAM_STATUS_MASK;
723
724	switch (status) {
725	case CAM_SCSI_BUS_RESET:
726		tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
727		abort_all_pending();
728		break;
729	case CAM_BDR_SENT:
730		tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
731		abort_all_pending();
732		break;
733	case CAM_MESSAGE_RECV:
734		switch (inot->message_args[0]) {
735		case MSG_TASK_COMPLETE:
736		case MSG_INITIATOR_DET_ERR:
737		case MSG_ABORT_TASK_SET:
738		case MSG_MESSAGE_REJECT:
739		case MSG_NOOP:
740		case MSG_PARITY_ERROR:
741		case MSG_TARGET_RESET:
742		case MSG_ABORT_TASK:
743		case MSG_CLEAR_TASK_SET:
744		default:
745			warnx("INOT message %#x", inot->message_args[0]);
746			break;
747		}
748		break;
749	case CAM_REQ_ABORTED:
750		warnx("INOT %p aborted", inot);
751		break;
752	default:
753		warnx("Unhandled INOT status %#x", status);
754		break;
755	}
756
757	/* If there is sense data, use it */
758	if (sense != 0) {
759		struct scsi_sense_data *sense;
760
761		sense = &inot->sense_data;
762		tcmd_sense(inot->initiator_id, NULL, sense->flags,
763			   sense->add_sense_code, sense->add_sense_code_qual);
764		if (debug)
765			warnx("INOT has sense: %#x", sense->flags);
766	}
767
768	/* Requeue on SIM */
769	TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
770	send_ccb((union ccb *)inot, /*priority*/1);
771
772	return (1);
773}
774
775void
776send_ccb(union ccb *ccb, int priority)
777{
778	if (debug)
779		warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
780	ccb->ccb_h.pinfo.priority = priority;
781	if (XPT_FC_IS_QUEUED(ccb)) {
782		TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
783				  periph_links.tqe);
784	}
785	if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
786		warn("write ccb");
787		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
788	}
789}
790
791/* Return a CTIO/descr/buf combo from the freelist or malloc one */
792static struct ccb_scsiio *
793get_ctio()
794{
795	struct ccb_scsiio *ctio;
796	struct ctio_descr *c_descr;
797	struct sigevent *se;
798
799	if (num_ctios == MAX_CTIOS)
800		return (NULL);
801
802	ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
803	if (ctio == NULL) {
804		warn("malloc CTIO");
805		return (NULL);
806	}
807	c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
808	if (c_descr == NULL) {
809		free(ctio);
810		warn("malloc ctio_descr");
811		return (NULL);
812	}
813	c_descr->buf = malloc(buf_size);
814	if (c_descr->buf == NULL) {
815		free(c_descr);
816		free(ctio);
817		warn("malloc backing store");
818		return (NULL);
819	}
820	num_ctios++;
821
822	/* Initialize CTIO, CTIO descr, and AIO */
823	ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
824	ctio->ccb_h.retry_count = 2;
825	ctio->ccb_h.timeout = CAM_TIME_INFINITY;
826	ctio->data_ptr = c_descr->buf;
827	ctio->ccb_h.targ_descr = c_descr;
828	c_descr->aiocb.aio_buf = c_descr->buf;
829	c_descr->aiocb.aio_fildes = file_fd;
830	se = &c_descr->aiocb.aio_sigevent;
831	se->sigev_notify = SIGEV_KEVENT;
832	se->sigev_notify_kqueue = kq_fd;
833	se->sigev_value.sigval_ptr = ctio;
834
835	return (ctio);
836}
837
838void
839free_ccb(union ccb *ccb)
840{
841	switch (ccb->ccb_h.func_code) {
842	case XPT_CONT_TARGET_IO:
843	{
844		struct ctio_descr *c_descr;
845
846		c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
847		free(c_descr->buf);
848		num_ctios--;
849		/* FALLTHROUGH */
850	}
851	case XPT_ACCEPT_TARGET_IO:
852		free(ccb->ccb_h.targ_descr);
853		/* FALLTHROUGH */
854	case XPT_IMMED_NOTIFY:
855	default:
856		free(ccb);
857		break;
858	}
859}
860
861static cam_status
862get_sim_flags(u_int16_t *flags)
863{
864	struct ccb_pathinq cpi;
865	cam_status status;
866
867	/* Find SIM capabilities */
868	bzero(&cpi, sizeof(cpi));
869	cpi.ccb_h.func_code = XPT_PATH_INQ;
870	send_ccb((union ccb *)&cpi, /*priority*/1);
871	status = cpi.ccb_h.status & CAM_STATUS_MASK;
872	if (status != CAM_REQ_CMP) {
873		fprintf(stderr, "CPI failed, status %#x\n", status);
874		return (status);
875	}
876
877	/* Can only enable on controllers that support target mode */
878	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
879		fprintf(stderr, "HBA does not support target mode\n");
880		status = CAM_PATH_INVALID;
881		return (status);
882	}
883
884	*flags = cpi.hba_inquiry;
885	return (status);
886}
887
888static void
889rel_simq()
890{
891	struct ccb_relsim crs;
892
893	bzero(&crs, sizeof(crs));
894	crs.ccb_h.func_code = XPT_REL_SIMQ;
895	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
896	crs.openings = 0;
897	crs.release_timeout = 0;
898	crs.qfrozen_cnt = 0;
899	send_ccb((union ccb *)&crs, /*priority*/0);
900}
901
902/* Cancel all pending CCBs. */
903static void
904abort_all_pending()
905{
906	struct ccb_abort	 cab;
907	struct ccb_hdr		*ccb_h;
908
909	if (debug)
910		  warnx("abort_all_pending");
911
912	bzero(&cab, sizeof(cab));
913	cab.ccb_h.func_code = XPT_ABORT;
914	TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
915		if (debug)
916			  warnx("Aborting pending CCB %p\n", ccb_h);
917		cab.abort_ccb = (union ccb *)ccb_h;
918		send_ccb((union ccb *)&cab, /*priority*/1);
919		if (cab.ccb_h.status != CAM_REQ_CMP) {
920			warnx("Unable to abort CCB, status %#x\n",
921			       cab.ccb_h.status);
922		}
923	}
924}
925
926static void
927usage()
928{
929	fprintf(stderr,
930		"Usage: scsi_target [-AdST] [-b bufsize] [-c sectorsize]\n"
931		"\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
932		"\t\tbus:target:lun filename\n");
933	exit(1);
934}
935