scsi_target.c revision 196955
1/*
2 * SCSI Disk Emulator
3 *
4 * Copyright (c) 2002 Nate Lawson.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: head/share/examples/scsi_target/scsi_target.c 196955 2009-09-07 23:16:27Z sbruno $
29 */
30
31#include <sys/types.h>
32#include <ctype.h>
33#include <errno.h>
34#include <err.h>
35#include <fcntl.h>
36#include <signal.h>
37#include <stddef.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <string.h>
41#include <sysexits.h>
42#include <unistd.h>
43#include <aio.h>
44#include <assert.h>
45#include <sys/stat.h>
46#include <sys/queue.h>
47#include <sys/event.h>
48#include <sys/param.h>
49#include <sys/disk.h>
50#include <cam/cam_queue.h>
51#include <cam/scsi/scsi_all.h>
52#include <cam/scsi/scsi_targetio.h>
53#include <cam/scsi/scsi_message.h>
54#include "scsi_target.h"
55
56/* Maximum amount to transfer per CTIO */
57#define MAX_XFER	MAXPHYS
58/* Maximum number of allocated CTIOs */
59#define MAX_CTIOS	64
60/* Maximum sector size for emulated volume */
61#define MAX_SECTOR	32768
62
63/* Global variables */
64int		debug;
65int		notaio = 0;
66off_t		volume_size;
67u_int		sector_size;
68size_t		buf_size;
69
70/* Local variables */
71static int    targ_fd;
72static int    kq_fd;
73static int    file_fd;
74static int    num_ctios;
75static struct ccb_queue		pending_queue;
76static struct ccb_queue		work_queue;
77static struct ioc_enable_lun	ioc_enlun = {
78	CAM_BUS_WILDCARD,
79	CAM_TARGET_WILDCARD,
80	CAM_LUN_WILDCARD
81};
82
83/* Local functions */
84static void		cleanup(void);
85static int		init_ccbs(void);
86static void		request_loop(void);
87static void		handle_read(void);
88/* static int		work_atio(struct ccb_accept_tio *); */
89static void		queue_io(struct ccb_scsiio *);
90static int		run_queue(struct ccb_accept_tio *);
91static int		work_inot(struct ccb_immed_notify *);
92static struct ccb_scsiio *
93			get_ctio(void);
94/* static void		free_ccb(union ccb *); */
95static cam_status	get_sim_flags(u_int16_t *);
96static void		rel_simq(void);
97static void		abort_all_pending(void);
98static void		usage(void);
99
100int
101main(int argc, char *argv[])
102{
103	int ch, unit;
104	char *file_name, targname[16];
105	u_int16_t req_flags, sim_flags;
106	off_t user_size;
107
108	/* Initialize */
109	debug = 0;
110	req_flags = sim_flags = 0;
111	user_size = 0;
112	targ_fd = file_fd = kq_fd = -1;
113	num_ctios = 0;
114	sector_size = SECTOR_SIZE;
115	buf_size = DFLTPHYS;
116
117	/* Prepare resource pools */
118	TAILQ_INIT(&pending_queue);
119	TAILQ_INIT(&work_queue);
120
121	while ((ch = getopt(argc, argv, "AdSTYb:c:s:W:")) != -1) {
122		switch(ch) {
123		case 'A':
124			req_flags |= SID_Addr16;
125			break;
126		case 'd':
127			debug = 1;
128			break;
129		case 'S':
130			req_flags |= SID_Sync;
131			break;
132		case 'T':
133			req_flags |= SID_CmdQue;
134			break;
135		case 'b':
136			buf_size = atoi(optarg);
137			if (buf_size < 256 || buf_size > MAX_XFER)
138				errx(1, "Unreasonable buf size: %s", optarg);
139			break;
140		case 'c':
141			sector_size = atoi(optarg);
142			if (sector_size < 512 || sector_size > MAX_SECTOR)
143				errx(1, "Unreasonable sector size: %s", optarg);
144			break;
145		case 's':
146		{
147			int last, shift = 0;
148
149			last = strlen(optarg) - 1;
150			if (last > 0) {
151				switch (tolower(optarg[last])) {
152				case 'e':
153					shift += 10;
154					/* FALLTHROUGH */
155				case 'p':
156					shift += 10;
157					/* FALLTHROUGH */
158				case 't':
159					shift += 10;
160					/* FALLTHROUGH */
161				case 'g':
162					shift += 10;
163					/* FALLTHROUGH */
164				case 'm':
165					shift += 10;
166					/* FALLTHROUGH */
167				case 'k':
168					shift += 10;
169					optarg[last] = 0;
170					break;
171				}
172			}
173			user_size = strtoll(optarg, (char **)NULL, /*base*/10);
174			user_size <<= shift;
175			if (user_size < 0)
176				errx(1, "Unreasonable volume size: %s", optarg);
177			break;
178		}
179		case 'W':
180			req_flags &= ~(SID_WBus16 | SID_WBus32);
181			switch (atoi(optarg)) {
182			case 8:
183				/* Leave req_flags zeroed */
184				break;
185			case 16:
186				req_flags |= SID_WBus16;
187				break;
188			case 32:
189				req_flags |= SID_WBus32;
190				break;
191			default:
192				warnx("Width %s not supported", optarg);
193				usage();
194				/* NOTREACHED */
195			}
196			break;
197		case 'Y':
198			notaio = 1;
199			break;
200		default:
201			usage();
202			/* NOTREACHED */
203		}
204	}
205	argc -= optind;
206	argv += optind;
207
208	if (argc != 2)
209		usage();
210
211	sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
212	       &ioc_enlun.lun_id);
213	file_name = argv[1];
214
215	if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
216	    ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
217	    ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
218		warnx("Incomplete target path specified");
219		usage();
220		/* NOTREACHED */
221	}
222	/* We don't support any vendor-specific commands */
223	ioc_enlun.grp6_len = 0;
224	ioc_enlun.grp7_len = 0;
225
226	/* Open backing store for IO */
227	file_fd = open(file_name, O_RDWR);
228	if (file_fd < 0)
229		errx(EX_NOINPUT, "open backing store file");
230
231	/* Check backing store size or use the size user gave us */
232	if (user_size == 0) {
233		struct stat st;
234
235		if (fstat(file_fd, &st) < 0)
236			err(1, "fstat file");
237#if __FreeBSD_version >= 500000
238		if ((st.st_mode & S_IFCHR) != 0) {
239			/* raw device */
240			off_t mediasize;
241			if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
242				err(1, "DIOCGMEDIASIZE");
243
244			/* XXX get sector size by ioctl()?? */
245			volume_size = mediasize / sector_size;
246		} else
247#endif
248			volume_size = st.st_size / sector_size;
249	} else {
250		volume_size = user_size / sector_size;
251	}
252	if (debug)
253		warnx("volume_size: %d bytes x " OFF_FMT " sectors",
254		    sector_size, volume_size);
255
256	if (volume_size <= 0)
257		errx(1, "volume must be larger than %d", sector_size);
258
259	if (notaio == 0) {
260		struct aiocb aio, *aiop;
261
262		/* See if we have we have working AIO support */
263		memset(&aio, 0, sizeof(aio));
264		aio.aio_buf = malloc(sector_size);
265		if (aio.aio_buf == NULL)
266			err(1, "malloc");
267		aio.aio_fildes = file_fd;
268		aio.aio_offset = 0;
269		aio.aio_nbytes = sector_size;
270		signal(SIGSYS, SIG_IGN);
271		if (aio_read(&aio) != 0) {
272			printf("AIO support is not available- switchin to"
273			       " single-threaded mode.\n");
274			notaio = 1;
275		} else {
276			if (aio_waitcomplete(&aiop, NULL) != sector_size)
277				err(1, "aio_waitcomplete");
278			assert(aiop == &aio);
279			signal(SIGSYS, SIG_DFL);
280		}
281		free((void *)aio.aio_buf);
282		if (debug && notaio == 0)
283			warnx("aio support tested ok");
284	}
285
286	/* Go through all the control devices and find one that isn't busy. */
287	unit = 0;
288	do {
289		snprintf(targname, sizeof(targname), "/dev/targ%d", unit++);
290    		targ_fd = open(targname, O_RDWR);
291	} while (targ_fd < 0 && errno == EBUSY);
292
293	if (targ_fd < 0)
294    	    errx(1, "Tried to open %d devices, none available", unit);
295	else
296	    warnx("opened /dev/targ%d", unit);
297
298	/* The first three are handled by kevent() later */
299	signal(SIGHUP, SIG_IGN);
300	signal(SIGINT, SIG_IGN);
301	signal(SIGTERM, SIG_IGN);
302	signal(SIGPROF, SIG_IGN);
303	signal(SIGALRM, SIG_IGN);
304	signal(SIGSTOP, SIG_IGN);
305	signal(SIGTSTP, SIG_IGN);
306
307	/* Register a cleanup handler to run when exiting */
308	atexit(cleanup);
309
310	/* Enable listening on the specified LUN */
311	if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
312		err(1, "TARGIOCENABLE");
313
314	/* Enable debugging if requested */
315	if (debug) {
316		if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
317			warnx("TARGIOCDEBUG");
318	}
319
320	/* Set up inquiry data according to what SIM supports */
321	if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
322		errx(1, "get_sim_flags");
323
324	if (tcmd_init(req_flags, sim_flags) != 0)
325		errx(1, "Initializing tcmd subsystem failed");
326
327	/* Queue ATIOs and INOTs on descriptor */
328	if (init_ccbs() != 0)
329		errx(1, "init_ccbs failed");
330
331	if (debug)
332		warnx("main loop beginning");
333
334	request_loop();
335
336	exit(0);
337}
338
339static void
340cleanup()
341{
342	struct ccb_hdr *ccb_h;
343
344	if (debug) {
345		warnx("cleanup called");
346		debug = 0;
347		ioctl(targ_fd, TARGIOCDEBUG, &debug);
348	}
349	ioctl(targ_fd, TARGIOCDISABLE, NULL);
350	close(targ_fd);
351
352	while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
353		TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
354		free_ccb((union ccb *)ccb_h);
355	}
356	while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
357		TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
358		free_ccb((union ccb *)ccb_h);
359	}
360
361	if (kq_fd != -1)
362		close(kq_fd);
363}
364
365/* Allocate ATIOs/INOTs and queue on HBA */
366static int
367init_ccbs()
368{
369	int i;
370
371	for (i = 0; i < MAX_INITIATORS; i++) {
372		struct ccb_accept_tio *atio;
373		struct atio_descr *a_descr;
374		struct ccb_immed_notify *inot;
375
376		atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
377		if (atio == NULL) {
378			warn("malloc ATIO");
379			return (-1);
380		}
381		a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
382		if (a_descr == NULL) {
383			free(atio);
384			warn("malloc atio_descr");
385			return (-1);
386		}
387		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
388		atio->ccb_h.targ_descr = a_descr;
389		send_ccb((union ccb *)atio, /*priority*/1);
390
391		inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
392		if (inot == NULL) {
393			warn("malloc INOT");
394			return (-1);
395		}
396		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
397		send_ccb((union ccb *)inot, /*priority*/1);
398	}
399
400	return (0);
401}
402
403static void
404request_loop()
405{
406	struct kevent events[MAX_EVENTS];
407	struct timespec ts, *tptr;
408	int quit;
409
410	/* Register kqueue for event notification */
411	if ((kq_fd = kqueue()) < 0)
412		err(1, "init kqueue");
413
414	/* Set up some default events */
415	EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
416	EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
417	EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
418	EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
419	if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
420		err(1, "kevent signal registration");
421
422	ts.tv_sec = 0;
423	ts.tv_nsec = 0;
424	tptr = NULL;
425	quit = 0;
426
427	/* Loop until user signal */
428	while (quit == 0) {
429		int retval, i, oo;
430		struct ccb_hdr *ccb_h;
431
432		/* Check for the next signal, read ready, or AIO completion */
433		retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
434		if (retval < 0) {
435			if (errno == EINTR) {
436				if (debug)
437					warnx("EINTR, looping");
438				continue;
439            		}
440			else {
441				err(1, "kevent failed");
442			}
443		} else if (retval > MAX_EVENTS) {
444			errx(1, "kevent returned more events than allocated?");
445		}
446
447		/* Process all received events. */
448		for (oo = i = 0; i < retval; i++) {
449			if ((events[i].flags & EV_ERROR) != 0)
450				errx(1, "kevent registration failed");
451
452			switch (events[i].filter) {
453			case EVFILT_READ:
454				if (debug)
455					warnx("read ready");
456				handle_read();
457				break;
458			case EVFILT_AIO:
459			{
460				struct ccb_scsiio *ctio;
461				struct ctio_descr *c_descr;
462				if (debug)
463					warnx("aio ready");
464
465				ctio = (struct ccb_scsiio *)events[i].udata;
466				c_descr = (struct ctio_descr *)
467					  ctio->ccb_h.targ_descr;
468				c_descr->event = AIO_DONE;
469				/* Queue on the appropriate ATIO */
470				queue_io(ctio);
471				/* Process any queued completions. */
472				oo += run_queue(c_descr->atio);
473				break;
474			}
475			case EVFILT_SIGNAL:
476				if (debug)
477					warnx("signal ready, setting quit");
478				quit = 1;
479				break;
480			default:
481				warnx("unknown event %d", events[i].filter);
482				break;
483			}
484
485			if (debug)
486				warnx("event %d done", events[i].filter);
487		}
488
489		if (oo) {
490			tptr = &ts;
491			continue;
492		}
493
494		/* Grab the first CCB and perform one work unit. */
495		if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
496			union ccb *ccb;
497
498			ccb = (union ccb *)ccb_h;
499			switch (ccb_h->func_code) {
500			case XPT_ACCEPT_TARGET_IO:
501				/* Start one more transfer. */
502				retval = work_atio(&ccb->atio);
503				break;
504			case XPT_IMMED_NOTIFY:
505				retval = work_inot(&ccb->cin);
506				break;
507			default:
508				warnx("Unhandled ccb type %#x on workq",
509				      ccb_h->func_code);
510				abort();
511				/* NOTREACHED */
512			}
513
514			/* Assume work function handled the exception */
515			if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
516				if (debug) {
517					warnx("Queue frozen receiving CCB, "
518					      "releasing");
519				}
520				rel_simq();
521			}
522
523			/* No more work needed for this command. */
524			if (retval == 0) {
525				TAILQ_REMOVE(&work_queue, ccb_h,
526					     periph_links.tqe);
527			}
528		}
529
530		/*
531		 * Poll for new events (i.e. completions) while we
532		 * are processing CCBs on the work_queue. Once it's
533		 * empty, use an infinite wait.
534		 */
535		if (!TAILQ_EMPTY(&work_queue))
536			tptr = &ts;
537		else
538			tptr = NULL;
539	}
540}
541
542/* CCBs are ready from the kernel */
543static void
544handle_read()
545{
546	union ccb *ccb_array[MAX_INITIATORS], *ccb;
547	int ccb_count, i, oo;
548
549	ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
550	if (ccb_count <= 0) {
551		warn("read ccb ptrs");
552		return;
553	}
554	ccb_count /= sizeof(union ccb *);
555	if (ccb_count < 1) {
556		warnx("truncated read ccb ptr?");
557		return;
558	}
559
560	for (i = 0; i < ccb_count; i++) {
561		ccb = ccb_array[i];
562		TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
563
564		switch (ccb->ccb_h.func_code) {
565		case XPT_ACCEPT_TARGET_IO:
566		{
567			struct ccb_accept_tio *atio;
568			struct atio_descr *a_descr;
569
570			/* Initialize ATIO descr for this transaction */
571			atio = &ccb->atio;
572			a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
573			bzero(a_descr, sizeof(*a_descr));
574			TAILQ_INIT(&a_descr->cmplt_io);
575			a_descr->flags = atio->ccb_h.flags &
576				(CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
577			/* XXX add a_descr->priority */
578			if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
579				a_descr->cdb = atio->cdb_io.cdb_bytes;
580			else
581				a_descr->cdb = atio->cdb_io.cdb_ptr;
582
583			/* ATIOs are processed in FIFO order */
584			TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
585					  periph_links.tqe);
586			break;
587		}
588		case XPT_CONT_TARGET_IO:
589		{
590			struct ccb_scsiio *ctio;
591			struct ctio_descr *c_descr;
592
593			ctio = &ccb->ctio;
594			c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
595			c_descr->event = CTIO_DONE;
596			/* Queue on the appropriate ATIO */
597			queue_io(ctio);
598			/* Process any queued completions. */
599			oo += run_queue(c_descr->atio);
600			break;
601		}
602		case XPT_IMMED_NOTIFY:
603			/* INOTs are handled with priority */
604			TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
605					  periph_links.tqe);
606			break;
607		default:
608			warnx("Unhandled ccb type %#x in handle_read",
609			      ccb->ccb_h.func_code);
610			break;
611		}
612	}
613}
614
615/* Process an ATIO CCB from the kernel */
616int
617work_atio(struct ccb_accept_tio *atio)
618{
619	struct ccb_scsiio *ctio;
620	struct atio_descr *a_descr;
621	struct ctio_descr *c_descr;
622	cam_status status;
623	int ret;
624
625	if (debug)
626		warnx("Working on ATIO %p", atio);
627
628	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
629
630	/* Get a CTIO and initialize it according to our known parameters */
631	ctio = get_ctio();
632	if (ctio == NULL) {
633		return (1);
634	}
635	ret = 0;
636	ctio->ccb_h.flags = a_descr->flags;
637	ctio->tag_id = atio->tag_id;
638	ctio->init_id = atio->init_id;
639	/* XXX priority needs to be added to a_descr */
640	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
641	c_descr->atio = atio;
642	if ((a_descr->flags & CAM_DIR_IN) != 0)
643		c_descr->offset = a_descr->base_off + a_descr->targ_req;
644	else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
645		c_descr->offset = a_descr->base_off + a_descr->init_req;
646	else
647		c_descr->offset = a_descr->base_off;
648
649	/*
650	 * Return a check condition if there was an error while
651	 * receiving this ATIO.
652	 */
653	if (atio->sense_len != 0) {
654		struct scsi_sense_data *sense;
655
656		if (debug) {
657			warnx("ATIO with %u bytes sense received",
658			      atio->sense_len);
659		}
660		sense = &atio->sense_data;
661		tcmd_sense(ctio->init_id, ctio, sense->flags,
662			   sense->add_sense_code, sense->add_sense_code_qual);
663		send_ccb((union ccb *)ctio, /*priority*/1);
664		return (0);
665	}
666
667	status = atio->ccb_h.status & CAM_STATUS_MASK;
668	switch (status) {
669	case CAM_CDB_RECVD:
670		ret = tcmd_handle(atio, ctio, ATIO_WORK);
671		break;
672	case CAM_REQ_ABORTED:
673		warn("ATIO %p aborted", a_descr);
674		/* Requeue on HBA */
675		TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
676		send_ccb((union ccb *)atio, /*priority*/1);
677		ret = 1;
678		break;
679	default:
680		warnx("ATIO completed with unhandled status %#x", status);
681		abort();
682		/* NOTREACHED */
683		break;
684	}
685
686	return (ret);
687}
688
689static void
690queue_io(struct ccb_scsiio *ctio)
691{
692	struct ccb_hdr *ccb_h;
693	struct io_queue *ioq;
694	struct ctio_descr *c_descr;
695
696	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
697	if (c_descr->atio == NULL) {
698		errx(1, "CTIO %p has NULL ATIO", ctio);
699	}
700	ioq = &((struct atio_descr *)c_descr->atio->ccb_h.targ_descr)->cmplt_io;
701
702	if (TAILQ_EMPTY(ioq)) {
703		TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
704		return;
705	}
706
707	TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
708		struct ctio_descr *curr_descr =
709		    (struct ctio_descr *)ccb_h->targ_descr;
710		if (curr_descr->offset <= c_descr->offset) {
711			break;
712		}
713	}
714
715	if (ccb_h) {
716		TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h, periph_links.tqe);
717	} else {
718		TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
719	}
720}
721
722/*
723 * Go through all completed AIO/CTIOs for a given ATIO and advance data
724 * counts, start continuation IO, etc.
725 */
726static int
727run_queue(struct ccb_accept_tio *atio)
728{
729	struct atio_descr *a_descr;
730	struct ccb_hdr *ccb_h;
731	int sent_status, event;
732
733	if (atio == NULL)
734		return (0);
735
736	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
737
738	while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
739		struct ccb_scsiio *ctio;
740		struct ctio_descr *c_descr;
741
742		ctio = (struct ccb_scsiio *)ccb_h;
743		c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
744
745		if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
746			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
747				     periph_links.tqe);
748			free_ccb((union ccb *)ctio);
749			send_ccb((union ccb *)atio, /*priority*/1);
750			continue;
751		}
752
753		/* If completed item is in range, call handler */
754		if ((c_descr->event == AIO_DONE &&
755		    c_descr->offset == a_descr->base_off + a_descr->targ_ack)
756		 || (c_descr->event == CTIO_DONE &&
757		    c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
758			sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
759			event = c_descr->event;
760
761			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
762				     periph_links.tqe);
763			tcmd_handle(atio, ctio, c_descr->event);
764
765			/* If entire transfer complete, send back ATIO */
766			if (sent_status != 0 && event == CTIO_DONE)
767				send_ccb((union ccb *)atio, /*priority*/1);
768		} else {
769			/* Gap in offsets so wait until later callback */
770			if (/* debug */ 1)
771				warnx("IO %p:%p out of order %s",  ccb_h,
772				    a_descr, c_descr->event == AIO_DONE?
773				    "aio" : "ctio");
774			return (1);
775		}
776	}
777	return (0);
778}
779
780static int
781work_inot(struct ccb_immed_notify *inot)
782{
783	cam_status status;
784	int sense;
785
786	if (debug)
787		warnx("Working on INOT %p", inot);
788
789	status = inot->ccb_h.status;
790	sense = (status & CAM_AUTOSNS_VALID) != 0;
791	status &= CAM_STATUS_MASK;
792
793	switch (status) {
794	case CAM_SCSI_BUS_RESET:
795		tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
796		abort_all_pending();
797		break;
798	case CAM_BDR_SENT:
799		tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
800		abort_all_pending();
801		break;
802	case CAM_MESSAGE_RECV:
803		switch (inot->message_args[0]) {
804		case MSG_TASK_COMPLETE:
805		case MSG_INITIATOR_DET_ERR:
806		case MSG_ABORT_TASK_SET:
807		case MSG_MESSAGE_REJECT:
808		case MSG_NOOP:
809		case MSG_PARITY_ERROR:
810		case MSG_TARGET_RESET:
811		case MSG_ABORT_TASK:
812		case MSG_CLEAR_TASK_SET:
813		default:
814			warnx("INOT message %#x", inot->message_args[0]);
815			break;
816		}
817		break;
818	case CAM_REQ_ABORTED:
819		warnx("INOT %p aborted", inot);
820		break;
821	default:
822		warnx("Unhandled INOT status %#x", status);
823		break;
824	}
825
826	/* If there is sense data, use it */
827	if (sense != 0) {
828		struct scsi_sense_data *sense;
829
830		sense = &inot->sense_data;
831		tcmd_sense(inot->initiator_id, NULL, sense->flags,
832			   sense->add_sense_code, sense->add_sense_code_qual);
833		if (debug)
834			warnx("INOT has sense: %#x", sense->flags);
835	}
836
837	/* Requeue on SIM */
838	TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
839	send_ccb((union ccb *)inot, /*priority*/1);
840
841	return (1);
842}
843
844void
845send_ccb(union ccb *ccb, int priority)
846{
847	if (debug)
848		warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
849	ccb->ccb_h.pinfo.priority = priority;
850	if (XPT_FC_IS_QUEUED(ccb)) {
851		TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
852				  periph_links.tqe);
853	}
854	if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
855		warn("write ccb");
856		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
857	}
858}
859
860/* Return a CTIO/descr/buf combo from the freelist or malloc one */
861static struct ccb_scsiio *
862get_ctio()
863{
864	struct ccb_scsiio *ctio;
865	struct ctio_descr *c_descr;
866	struct sigevent *se;
867
868	if (num_ctios == MAX_CTIOS) {
869		warnx("at CTIO max");
870		return (NULL);
871	}
872
873	ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
874	if (ctio == NULL) {
875		warn("malloc CTIO");
876		return (NULL);
877	}
878	c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
879	if (c_descr == NULL) {
880		free(ctio);
881		warn("malloc ctio_descr");
882		return (NULL);
883	}
884	c_descr->buf = malloc(buf_size);
885	if (c_descr->buf == NULL) {
886		free(c_descr);
887		free(ctio);
888		warn("malloc backing store");
889		return (NULL);
890	}
891	num_ctios++;
892
893	/* Initialize CTIO, CTIO descr, and AIO */
894	ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
895	ctio->ccb_h.retry_count = 2;
896	ctio->ccb_h.timeout = CAM_TIME_INFINITY;
897	ctio->data_ptr = c_descr->buf;
898	ctio->ccb_h.targ_descr = c_descr;
899	c_descr->aiocb.aio_buf = c_descr->buf;
900	c_descr->aiocb.aio_fildes = file_fd;
901	se = &c_descr->aiocb.aio_sigevent;
902	se->sigev_notify = SIGEV_KEVENT;
903	se->sigev_notify_kqueue = kq_fd;
904	se->sigev_value.sival_ptr = ctio;
905
906	return (ctio);
907}
908
909void
910free_ccb(union ccb *ccb)
911{
912	switch (ccb->ccb_h.func_code) {
913	case XPT_CONT_TARGET_IO:
914	{
915		struct ctio_descr *c_descr;
916
917		c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
918		free(c_descr->buf);
919		num_ctios--;
920		/* FALLTHROUGH */
921	}
922	case XPT_ACCEPT_TARGET_IO:
923		free(ccb->ccb_h.targ_descr);
924		/* FALLTHROUGH */
925	case XPT_IMMED_NOTIFY:
926	default:
927		free(ccb);
928		break;
929	}
930}
931
932static cam_status
933get_sim_flags(u_int16_t *flags)
934{
935	struct ccb_pathinq cpi;
936	cam_status status;
937
938	/* Find SIM capabilities */
939	bzero(&cpi, sizeof(cpi));
940	cpi.ccb_h.func_code = XPT_PATH_INQ;
941	send_ccb((union ccb *)&cpi, /*priority*/1);
942	status = cpi.ccb_h.status & CAM_STATUS_MASK;
943	if (status != CAM_REQ_CMP) {
944		fprintf(stderr, "CPI failed, status %#x\n", status);
945		return (status);
946	}
947
948	/* Can only enable on controllers that support target mode */
949	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
950		fprintf(stderr, "HBA does not support target mode\n");
951		status = CAM_PATH_INVALID;
952		return (status);
953	}
954
955	*flags = cpi.hba_inquiry;
956	return (status);
957}
958
959static void
960rel_simq()
961{
962	struct ccb_relsim crs;
963
964	bzero(&crs, sizeof(crs));
965	crs.ccb_h.func_code = XPT_REL_SIMQ;
966	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
967	crs.openings = 0;
968	crs.release_timeout = 0;
969	crs.qfrozen_cnt = 0;
970	send_ccb((union ccb *)&crs, /*priority*/0);
971}
972
973/* Cancel all pending CCBs. */
974static void
975abort_all_pending()
976{
977	struct ccb_abort	 cab;
978	struct ccb_hdr		*ccb_h;
979
980	if (debug)
981		  warnx("abort_all_pending");
982
983	bzero(&cab, sizeof(cab));
984	cab.ccb_h.func_code = XPT_ABORT;
985	TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
986		if (debug)
987			  warnx("Aborting pending CCB %p\n", ccb_h);
988		cab.abort_ccb = (union ccb *)ccb_h;
989		send_ccb((union ccb *)&cab, /*priority*/1);
990		if (cab.ccb_h.status != CAM_REQ_CMP) {
991			warnx("Unable to abort CCB, status %#x\n",
992			       cab.ccb_h.status);
993		}
994	}
995}
996
997static void
998usage()
999{
1000	fprintf(stderr,
1001		"Usage: scsi_target [-AdSTY] [-b bufsize] [-c sectorsize]\n"
1002		"\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
1003		"\t\tbus:target:lun filename\n");
1004	exit(1);
1005}
1006