1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * SCSI Disk Emulator
5 *
6 * Copyright (c) 2002 Nate Lawson.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/types.h>
32#include <ctype.h>
33#include <errno.h>
34#include <err.h>
35#include <fcntl.h>
36#include <signal.h>
37#include <stddef.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <string.h>
41#include <sysexits.h>
42#include <unistd.h>
43#include <aio.h>
44#include <assert.h>
45#include <sys/stat.h>
46#include <sys/queue.h>
47#include <sys/event.h>
48#include <sys/param.h>
49#include <sys/disk.h>
50#include <cam/cam_queue.h>
51#include <cam/scsi/scsi_all.h>
52#include <cam/scsi/scsi_targetio.h>
53#include <cam/scsi/scsi_message.h>
54#include "scsi_target.h"
55
56/* Maximum amount to transfer per CTIO */
57#define MAX_XFER	MAXPHYS
58/* Maximum number of allocated CTIOs */
59#define MAX_CTIOS	64
60/* Maximum sector size for emulated volume */
61#define MAX_SECTOR	32768
62
63/* Global variables */
64int		debug;
65int		notaio = 0;
66off_t		volume_size;
67u_int		sector_size;
68size_t		buf_size;
69
70/* Local variables */
71static int    targ_fd;
72static int    kq_fd;
73static int    file_fd;
74static int    num_ctios;
75static struct ccb_queue		pending_queue;
76static struct ccb_queue		work_queue;
77static struct ioc_enable_lun	ioc_enlun = {
78	CAM_BUS_WILDCARD,
79	CAM_TARGET_WILDCARD,
80	CAM_LUN_WILDCARD,
81	0,
82	0
83};
84
85/* Local functions */
86static void		cleanup(void);
87static int		init_ccbs(void);
88static void		request_loop(void);
89static void		handle_read(void);
90/* static int		work_atio(struct ccb_accept_tio *); */
91static void		queue_io(struct ccb_scsiio *);
92static int		run_queue(struct ccb_accept_tio *);
93static int		work_inot(struct ccb_immediate_notify *);
94static struct ccb_scsiio *
95			get_ctio(void);
96/* static void		free_ccb(union ccb *); */
97static cam_status	get_sim_flags(u_int16_t *);
98static void		rel_simq(void);
99static void		abort_all_pending(void);
100static void		usage(void);
101
102int
103main(int argc, char *argv[])
104{
105	int ch;
106	char *file_name;
107	u_int16_t req_flags, sim_flags;
108	off_t user_size;
109
110	/* Initialize */
111	debug = 0;
112	req_flags = sim_flags = 0;
113	user_size = 0;
114	targ_fd = file_fd = kq_fd = -1;
115	num_ctios = 0;
116	sector_size = SECTOR_SIZE;
117	buf_size = DFLTPHYS;
118
119	/* Prepare resource pools */
120	TAILQ_INIT(&pending_queue);
121	TAILQ_INIT(&work_queue);
122
123	while ((ch = getopt(argc, argv, "AdSTYb:c:s:W:")) != -1) {
124		switch(ch) {
125		case 'A':
126			req_flags |= SID_Addr16;
127			break;
128		case 'd':
129			debug = 1;
130			break;
131		case 'S':
132			req_flags |= SID_Sync;
133			break;
134		case 'T':
135			req_flags |= SID_CmdQue;
136			break;
137		case 'b':
138			buf_size = atoi(optarg);
139			if (buf_size < 256 || buf_size > MAX_XFER)
140				errx(1, "Unreasonable buf size: %s", optarg);
141			break;
142		case 'c':
143			sector_size = atoi(optarg);
144			if (sector_size < 512 || sector_size > MAX_SECTOR)
145				errx(1, "Unreasonable sector size: %s", optarg);
146			break;
147		case 's':
148		{
149			int last, shift = 0;
150
151			last = strlen(optarg) - 1;
152			if (last > 0) {
153				switch (tolower(optarg[last])) {
154				case 'e':
155					shift += 10;
156					/* FALLTHROUGH */
157				case 'p':
158					shift += 10;
159					/* FALLTHROUGH */
160				case 't':
161					shift += 10;
162					/* FALLTHROUGH */
163				case 'g':
164					shift += 10;
165					/* FALLTHROUGH */
166				case 'm':
167					shift += 10;
168					/* FALLTHROUGH */
169				case 'k':
170					shift += 10;
171					optarg[last] = 0;
172					break;
173				}
174			}
175			user_size = strtoll(optarg, (char **)NULL, /*base*/10);
176			user_size <<= shift;
177			if (user_size < 0)
178				errx(1, "Unreasonable volume size: %s", optarg);
179			break;
180		}
181		case 'W':
182			req_flags &= ~(SID_WBus16 | SID_WBus32);
183			switch (atoi(optarg)) {
184			case 8:
185				/* Leave req_flags zeroed */
186				break;
187			case 16:
188				req_flags |= SID_WBus16;
189				break;
190			case 32:
191				req_flags |= SID_WBus32;
192				break;
193			default:
194				warnx("Width %s not supported", optarg);
195				usage();
196				/* NOTREACHED */
197			}
198			break;
199		case 'Y':
200			notaio = 1;
201			break;
202		default:
203			usage();
204			/* NOTREACHED */
205		}
206	}
207	argc -= optind;
208	argv += optind;
209
210	if (argc != 2)
211		usage();
212
213	sscanf(argv[0], "%u:%u:%ju", &ioc_enlun.path_id, &ioc_enlun.target_id,
214	       &ioc_enlun.lun_id);
215	file_name = argv[1];
216
217	if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
218	    ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
219	    ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
220		warnx("Incomplete target path specified");
221		usage();
222		/* NOTREACHED */
223	}
224	/* We don't support any vendor-specific commands */
225	ioc_enlun.grp6_len = 0;
226	ioc_enlun.grp7_len = 0;
227
228	/* Open backing store for IO */
229	file_fd = open(file_name, O_RDWR);
230	if (file_fd < 0)
231		errx(EX_NOINPUT, "open backing store file");
232
233	/* Check backing store size or use the size user gave us */
234	if (user_size == 0) {
235		struct stat st;
236
237		if (fstat(file_fd, &st) < 0)
238			err(1, "fstat file");
239#if __FreeBSD_version >= 500000
240		if ((st.st_mode & S_IFCHR) != 0) {
241			/* raw device */
242			off_t mediasize;
243			if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
244				err(1, "DIOCGMEDIASIZE");
245
246			/* XXX get sector size by ioctl()?? */
247			volume_size = mediasize / sector_size;
248		} else
249#endif
250			volume_size = st.st_size / sector_size;
251	} else {
252		volume_size = user_size / sector_size;
253	}
254	if (debug)
255		warnx("volume_size: %d bytes x " OFF_FMT " sectors",
256		    sector_size, volume_size);
257
258	if (volume_size <= 0)
259		errx(1, "volume must be larger than %d", sector_size);
260
261	if (notaio == 0) {
262		struct aiocb aio, *aiop;
263		void *aio_buf;
264
265		/* See if we have we have working AIO support */
266		memset(&aio, 0, sizeof(aio));
267		aio_buf = malloc(sector_size);
268		aio.aio_buf = aio_buf;
269		if (aio.aio_buf == NULL)
270			err(1, "malloc");
271		aio.aio_fildes = file_fd;
272		aio.aio_offset = 0;
273		aio.aio_nbytes = sector_size;
274		signal(SIGSYS, SIG_IGN);
275		if (aio_read(&aio) != 0) {
276			printf("AIO support is not available- switchin to"
277			       " single-threaded mode.\n");
278			notaio = 1;
279		} else {
280			if (aio_waitcomplete(&aiop, NULL) != sector_size)
281				err(1, "aio_waitcomplete");
282			assert(aiop == &aio);
283			signal(SIGSYS, SIG_DFL);
284		}
285		free(aio_buf);
286		if (debug && notaio == 0)
287			warnx("aio support tested ok");
288	}
289
290	targ_fd = open("/dev/targ", O_RDWR);
291	if (targ_fd < 0)
292    	    err(1, "/dev/targ");
293	else
294	    warnx("opened /dev/targ");
295
296	/* The first three are handled by kevent() later */
297	signal(SIGHUP, SIG_IGN);
298	signal(SIGINT, SIG_IGN);
299	signal(SIGTERM, SIG_IGN);
300	signal(SIGPROF, SIG_IGN);
301	signal(SIGALRM, SIG_IGN);
302	signal(SIGSTOP, SIG_IGN);
303	signal(SIGTSTP, SIG_IGN);
304
305	/* Register a cleanup handler to run when exiting */
306	atexit(cleanup);
307
308	/* Enable listening on the specified LUN */
309	if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
310		err(1, "TARGIOCENABLE");
311
312	/* Enable debugging if requested */
313	if (debug) {
314		if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
315			warnx("TARGIOCDEBUG");
316	}
317
318	/* Set up inquiry data according to what SIM supports */
319	if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
320		errx(1, "get_sim_flags");
321
322	if (tcmd_init(req_flags, sim_flags) != 0)
323		errx(1, "Initializing tcmd subsystem failed");
324
325	/* Queue ATIOs and INOTs on descriptor */
326	if (init_ccbs() != 0)
327		errx(1, "init_ccbs failed");
328
329	if (debug)
330		warnx("main loop beginning");
331
332	request_loop();
333
334	exit(0);
335}
336
337static void
338cleanup(void)
339{
340	struct ccb_hdr *ccb_h;
341
342	if (debug) {
343		warnx("cleanup called");
344		debug = 0;
345		ioctl(targ_fd, TARGIOCDEBUG, &debug);
346	}
347	ioctl(targ_fd, TARGIOCDISABLE, NULL);
348	close(targ_fd);
349
350	while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
351		TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
352		free_ccb((union ccb *)ccb_h);
353	}
354	while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
355		TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
356		free_ccb((union ccb *)ccb_h);
357	}
358
359	if (kq_fd != -1)
360		close(kq_fd);
361}
362
363/* Allocate ATIOs/INOTs and queue on HBA */
364static int
365init_ccbs(void)
366{
367	int i;
368
369	for (i = 0; i < MAX_INITIATORS; i++) {
370		struct ccb_accept_tio *atio;
371		struct atio_descr *a_descr;
372		struct ccb_immediate_notify *inot;
373
374		atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
375		if (atio == NULL) {
376			warn("malloc ATIO");
377			return (-1);
378		}
379		a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
380		if (a_descr == NULL) {
381			free(atio);
382			warn("malloc atio_descr");
383			return (-1);
384		}
385		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
386		atio->ccb_h.targ_descr = a_descr;
387		send_ccb((union ccb *)atio, /*priority*/1);
388
389		inot = (struct ccb_immediate_notify *)malloc(sizeof(*inot));
390		if (inot == NULL) {
391			warn("malloc INOT");
392			return (-1);
393		}
394		inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
395		send_ccb((union ccb *)inot, /*priority*/1);
396	}
397
398	return (0);
399}
400
401static void
402request_loop(void)
403{
404	struct kevent events[MAX_EVENTS];
405	struct timespec ts, *tptr;
406	int quit;
407
408	/* Register kqueue for event notification */
409	if ((kq_fd = kqueue()) < 0)
410		err(1, "init kqueue");
411
412	/* Set up some default events */
413	EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
414	EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
415	EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
416	EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
417	if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
418		err(1, "kevent signal registration");
419
420	ts.tv_sec = 0;
421	ts.tv_nsec = 0;
422	tptr = NULL;
423	quit = 0;
424
425	/* Loop until user signal */
426	while (quit == 0) {
427		int retval, i, oo;
428		struct ccb_hdr *ccb_h;
429
430		/* Check for the next signal, read ready, or AIO completion */
431		retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
432		if (retval < 0) {
433			if (errno == EINTR) {
434				if (debug)
435					warnx("EINTR, looping");
436				continue;
437            		}
438			else {
439				err(1, "kevent failed");
440			}
441		} else if (retval > MAX_EVENTS) {
442			errx(1, "kevent returned more events than allocated?");
443		}
444
445		/* Process all received events. */
446		for (oo = i = 0; i < retval; i++) {
447			if ((events[i].flags & EV_ERROR) != 0)
448				errx(1, "kevent registration failed");
449
450			switch (events[i].filter) {
451			case EVFILT_READ:
452				if (debug)
453					warnx("read ready");
454				handle_read();
455				break;
456			case EVFILT_AIO:
457			{
458				struct ccb_scsiio *ctio;
459				struct ctio_descr *c_descr;
460				if (debug)
461					warnx("aio ready");
462
463				ctio = (struct ccb_scsiio *)events[i].udata;
464				c_descr = (struct ctio_descr *)
465					  ctio->ccb_h.targ_descr;
466				c_descr->event = AIO_DONE;
467				/* Queue on the appropriate ATIO */
468				queue_io(ctio);
469				/* Process any queued completions. */
470				oo += run_queue(c_descr->atio);
471				break;
472			}
473			case EVFILT_SIGNAL:
474				if (debug)
475					warnx("signal ready, setting quit");
476				quit = 1;
477				break;
478			default:
479				warnx("unknown event %d", events[i].filter);
480				break;
481			}
482
483			if (debug)
484				warnx("event %d done", events[i].filter);
485		}
486
487		if (oo) {
488			tptr = &ts;
489			continue;
490		}
491
492		/* Grab the first CCB and perform one work unit. */
493		if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
494			union ccb *ccb;
495
496			ccb = (union ccb *)ccb_h;
497			switch (ccb_h->func_code) {
498			case XPT_ACCEPT_TARGET_IO:
499				/* Start one more transfer. */
500				retval = work_atio(&ccb->atio);
501				break;
502			case XPT_IMMEDIATE_NOTIFY:
503				retval = work_inot(&ccb->cin1);
504				break;
505			default:
506				warnx("Unhandled ccb type %#x on workq",
507				      ccb_h->func_code);
508				abort();
509				/* NOTREACHED */
510			}
511
512			/* Assume work function handled the exception */
513			if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
514				if (debug) {
515					warnx("Queue frozen receiving CCB, "
516					      "releasing");
517				}
518				rel_simq();
519			}
520
521			/* No more work needed for this command. */
522			if (retval == 0) {
523				TAILQ_REMOVE(&work_queue, ccb_h,
524					     periph_links.tqe);
525			}
526		}
527
528		/*
529		 * Poll for new events (i.e. completions) while we
530		 * are processing CCBs on the work_queue. Once it's
531		 * empty, use an infinite wait.
532		 */
533		if (!TAILQ_EMPTY(&work_queue))
534			tptr = &ts;
535		else
536			tptr = NULL;
537	}
538}
539
540/* CCBs are ready from the kernel */
541static void
542handle_read(void)
543{
544	union ccb *ccb_array[MAX_INITIATORS], *ccb;
545	int ccb_count, i;
546
547	ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
548	if (ccb_count <= 0) {
549		warn("read ccb ptrs");
550		return;
551	}
552	ccb_count /= sizeof(union ccb *);
553	if (ccb_count < 1) {
554		warnx("truncated read ccb ptr?");
555		return;
556	}
557
558	for (i = 0; i < ccb_count; i++) {
559		ccb = ccb_array[i];
560		TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
561
562		switch (ccb->ccb_h.func_code) {
563		case XPT_ACCEPT_TARGET_IO:
564		{
565			struct ccb_accept_tio *atio;
566			struct atio_descr *a_descr;
567
568			/* Initialize ATIO descr for this transaction */
569			atio = &ccb->atio;
570			a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
571			bzero(a_descr, sizeof(*a_descr));
572			TAILQ_INIT(&a_descr->cmplt_io);
573			a_descr->flags = atio->ccb_h.flags &
574				(CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
575			/* XXX add a_descr->priority */
576			if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
577				a_descr->cdb = atio->cdb_io.cdb_bytes;
578			else
579				a_descr->cdb = atio->cdb_io.cdb_ptr;
580
581			/* ATIOs are processed in FIFO order */
582			TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
583					  periph_links.tqe);
584			break;
585		}
586		case XPT_CONT_TARGET_IO:
587		{
588			struct ccb_scsiio *ctio;
589			struct ctio_descr *c_descr;
590
591			ctio = &ccb->ctio;
592			c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
593			c_descr->event = CTIO_DONE;
594			/* Queue on the appropriate ATIO */
595			queue_io(ctio);
596			/* Process any queued completions. */
597			run_queue(c_descr->atio);
598			break;
599		}
600		case XPT_IMMEDIATE_NOTIFY:
601			/* INOTs are handled with priority */
602			TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
603					  periph_links.tqe);
604			break;
605		default:
606			warnx("Unhandled ccb type %#x in handle_read",
607			      ccb->ccb_h.func_code);
608			break;
609		}
610	}
611}
612
613/* Process an ATIO CCB from the kernel */
614int
615work_atio(struct ccb_accept_tio *atio)
616{
617	struct ccb_scsiio *ctio;
618	struct atio_descr *a_descr;
619	struct ctio_descr *c_descr;
620	cam_status status;
621	int ret;
622
623	if (debug)
624		warnx("Working on ATIO %p", atio);
625
626	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
627
628	/* Get a CTIO and initialize it according to our known parameters */
629	ctio = get_ctio();
630	if (ctio == NULL) {
631		return (1);
632	}
633	ret = 0;
634	ctio->ccb_h.flags = a_descr->flags;
635	ctio->tag_id = atio->tag_id;
636	ctio->init_id = atio->init_id;
637	/* XXX priority needs to be added to a_descr */
638	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
639	c_descr->atio = atio;
640	if ((a_descr->flags & CAM_DIR_IN) != 0)
641		c_descr->offset = a_descr->base_off + a_descr->targ_req;
642	else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
643		c_descr->offset = a_descr->base_off + a_descr->init_req;
644	else
645		c_descr->offset = a_descr->base_off;
646
647	/*
648	 * Return a check condition if there was an error while
649	 * receiving this ATIO.
650	 */
651	if (atio->sense_len != 0) {
652		struct scsi_sense_data_fixed *sense;
653
654		if (debug) {
655			warnx("ATIO with %u bytes sense received",
656			      atio->sense_len);
657		}
658		sense = (struct scsi_sense_data_fixed *)&atio->sense_data;
659		tcmd_sense(ctio->init_id, ctio, sense->flags,
660			   sense->add_sense_code, sense->add_sense_code_qual);
661		send_ccb((union ccb *)ctio, /*priority*/1);
662		return (0);
663	}
664
665	status = atio->ccb_h.status & CAM_STATUS_MASK;
666	switch (status) {
667	case CAM_CDB_RECVD:
668		ret = tcmd_handle(atio, ctio, ATIO_WORK);
669		break;
670	case CAM_REQ_ABORTED:
671		warn("ATIO %p aborted", a_descr);
672		/* Requeue on HBA */
673		TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
674		send_ccb((union ccb *)atio, /*priority*/1);
675		ret = 1;
676		break;
677	default:
678		warnx("ATIO completed with unhandled status %#x", status);
679		abort();
680		/* NOTREACHED */
681		break;
682	}
683
684	return (ret);
685}
686
687static void
688queue_io(struct ccb_scsiio *ctio)
689{
690	struct ccb_hdr *ccb_h;
691	struct io_queue *ioq;
692	struct ctio_descr *c_descr;
693
694	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
695	if (c_descr->atio == NULL) {
696		errx(1, "CTIO %p has NULL ATIO", ctio);
697	}
698	ioq = &((struct atio_descr *)c_descr->atio->ccb_h.targ_descr)->cmplt_io;
699
700	if (TAILQ_EMPTY(ioq)) {
701		TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
702		return;
703	}
704
705	TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
706		struct ctio_descr *curr_descr =
707		    (struct ctio_descr *)ccb_h->targ_descr;
708		if (curr_descr->offset <= c_descr->offset) {
709			break;
710		}
711	}
712
713	if (ccb_h) {
714		TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h, periph_links.tqe);
715	} else {
716		TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
717	}
718}
719
720/*
721 * Go through all completed AIO/CTIOs for a given ATIO and advance data
722 * counts, start continuation IO, etc.
723 */
724static int
725run_queue(struct ccb_accept_tio *atio)
726{
727	struct atio_descr *a_descr;
728	struct ccb_hdr *ccb_h;
729	int sent_status, event;
730
731	if (atio == NULL)
732		return (0);
733
734	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
735
736	while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
737		struct ccb_scsiio *ctio;
738		struct ctio_descr *c_descr;
739
740		ctio = (struct ccb_scsiio *)ccb_h;
741		c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
742
743		if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
744			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
745				     periph_links.tqe);
746			free_ccb((union ccb *)ctio);
747			send_ccb((union ccb *)atio, /*priority*/1);
748			continue;
749		}
750
751		/* If completed item is in range, call handler */
752		if ((c_descr->event == AIO_DONE &&
753		    c_descr->offset == a_descr->base_off + a_descr->targ_ack)
754		 || (c_descr->event == CTIO_DONE &&
755		    c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
756			sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
757			event = c_descr->event;
758
759			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
760				     periph_links.tqe);
761			tcmd_handle(atio, ctio, c_descr->event);
762
763			/* If entire transfer complete, send back ATIO */
764			if (sent_status != 0 && event == CTIO_DONE)
765				send_ccb((union ccb *)atio, /*priority*/1);
766		} else {
767			/* Gap in offsets so wait until later callback */
768			if (/* debug */ 1)
769				warnx("IO %p:%p out of order %s",  ccb_h,
770				    a_descr, c_descr->event == AIO_DONE?
771				    "aio" : "ctio");
772			return (1);
773		}
774	}
775	return (0);
776}
777
778static int
779work_inot(struct ccb_immediate_notify *inot)
780{
781	cam_status status;
782
783	if (debug)
784		warnx("Working on INOT %p", inot);
785
786	status = inot->ccb_h.status;
787	status &= CAM_STATUS_MASK;
788
789	switch (status) {
790	case CAM_SCSI_BUS_RESET:
791		tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
792		abort_all_pending();
793		break;
794	case CAM_BDR_SENT:
795		tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
796		abort_all_pending();
797		break;
798	case CAM_MESSAGE_RECV:
799		switch (inot->arg) {
800		case MSG_TASK_COMPLETE:
801		case MSG_INITIATOR_DET_ERR:
802		case MSG_ABORT_TASK_SET:
803		case MSG_MESSAGE_REJECT:
804		case MSG_NOOP:
805		case MSG_PARITY_ERROR:
806		case MSG_TARGET_RESET:
807		case MSG_ABORT_TASK:
808		case MSG_CLEAR_TASK_SET:
809		default:
810			warnx("INOT message %#x", inot->arg);
811			break;
812		}
813		break;
814	case CAM_REQ_ABORTED:
815		warnx("INOT %p aborted", inot);
816		break;
817	default:
818		warnx("Unhandled INOT status %#x", status);
819		break;
820	}
821
822	/* Requeue on SIM */
823	TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
824	send_ccb((union ccb *)inot, /*priority*/1);
825
826	return (1);
827}
828
829void
830send_ccb(union ccb *ccb, int priority)
831{
832	if (debug)
833		warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
834	ccb->ccb_h.pinfo.priority = priority;
835	if (XPT_FC_IS_QUEUED(ccb)) {
836		TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
837				  periph_links.tqe);
838	}
839	if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
840		warn("write ccb");
841		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
842	}
843}
844
845/* Return a CTIO/descr/buf combo from the freelist or malloc one */
846static struct ccb_scsiio *
847get_ctio(void)
848{
849	struct ccb_scsiio *ctio;
850	struct ctio_descr *c_descr;
851	struct sigevent *se;
852
853	if (num_ctios == MAX_CTIOS) {
854		warnx("at CTIO max");
855		return (NULL);
856	}
857
858	ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
859	if (ctio == NULL) {
860		warn("malloc CTIO");
861		return (NULL);
862	}
863	c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
864	if (c_descr == NULL) {
865		free(ctio);
866		warn("malloc ctio_descr");
867		return (NULL);
868	}
869	c_descr->buf = malloc(buf_size);
870	if (c_descr->buf == NULL) {
871		free(c_descr);
872		free(ctio);
873		warn("malloc backing store");
874		return (NULL);
875	}
876	num_ctios++;
877
878	/* Initialize CTIO, CTIO descr, and AIO */
879	ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
880	ctio->ccb_h.retry_count = 2;
881	ctio->ccb_h.timeout = CAM_TIME_INFINITY;
882	ctio->data_ptr = c_descr->buf;
883	ctio->ccb_h.targ_descr = c_descr;
884	c_descr->aiocb.aio_buf = c_descr->buf;
885	c_descr->aiocb.aio_fildes = file_fd;
886	se = &c_descr->aiocb.aio_sigevent;
887	se->sigev_notify = SIGEV_KEVENT;
888	se->sigev_notify_kqueue = kq_fd;
889	se->sigev_value.sival_ptr = ctio;
890
891	return (ctio);
892}
893
894void
895free_ccb(union ccb *ccb)
896{
897	switch (ccb->ccb_h.func_code) {
898	case XPT_CONT_TARGET_IO:
899	{
900		struct ctio_descr *c_descr;
901
902		c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
903		free(c_descr->buf);
904		num_ctios--;
905		/* FALLTHROUGH */
906	}
907	case XPT_ACCEPT_TARGET_IO:
908		free(ccb->ccb_h.targ_descr);
909		/* FALLTHROUGH */
910	case XPT_IMMEDIATE_NOTIFY:
911	default:
912		free(ccb);
913		break;
914	}
915}
916
917static cam_status
918get_sim_flags(u_int16_t *flags)
919{
920	struct ccb_pathinq cpi;
921	cam_status status;
922
923	/* Find SIM capabilities */
924	bzero(&cpi, sizeof(cpi));
925	cpi.ccb_h.func_code = XPT_PATH_INQ;
926	send_ccb((union ccb *)&cpi, /*priority*/1);
927	status = cpi.ccb_h.status & CAM_STATUS_MASK;
928	if (status != CAM_REQ_CMP) {
929		fprintf(stderr, "CPI failed, status %#x\n", status);
930		return (status);
931	}
932
933	/* Can only enable on controllers that support target mode */
934	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
935		fprintf(stderr, "HBA does not support target mode\n");
936		status = CAM_PATH_INVALID;
937		return (status);
938	}
939
940	*flags = cpi.hba_inquiry;
941	return (status);
942}
943
944static void
945rel_simq(void)
946{
947	struct ccb_relsim crs;
948
949	bzero(&crs, sizeof(crs));
950	crs.ccb_h.func_code = XPT_REL_SIMQ;
951	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
952	crs.openings = 0;
953	crs.release_timeout = 0;
954	crs.qfrozen_cnt = 0;
955	send_ccb((union ccb *)&crs, /*priority*/0);
956}
957
958/* Cancel all pending CCBs. */
959static void
960abort_all_pending(void)
961{
962	struct ccb_abort	 cab;
963	struct ccb_hdr		*ccb_h;
964
965	if (debug)
966		  warnx("abort_all_pending");
967
968	bzero(&cab, sizeof(cab));
969	cab.ccb_h.func_code = XPT_ABORT;
970	TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
971		if (debug)
972			  warnx("Aborting pending CCB %p\n", ccb_h);
973		cab.abort_ccb = (union ccb *)ccb_h;
974		send_ccb((union ccb *)&cab, /*priority*/1);
975		if (cab.ccb_h.status != CAM_REQ_CMP) {
976			warnx("Unable to abort CCB, status %#x\n",
977			       cab.ccb_h.status);
978		}
979	}
980}
981
982static void
983usage(void)
984{
985	fprintf(stderr,
986		"Usage: scsi_target [-AdSTY] [-b bufsize] [-c sectorsize]\n"
987		"\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
988		"\t\tbus:target:lun filename\n");
989	exit(1);
990}
991