scsi_target.c revision 120428
1/*
2 * SCSI Disk Emulator
3 *
4 * Copyright (c) 2002 Nate Lawson.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: head/share/examples/scsi_target/scsi_target.c 120428 2003-09-25 05:43:26Z simokawa $
29 */
30
31#include <sys/types.h>
32#include <errno.h>
33#include <err.h>
34#include <fcntl.h>
35#include <signal.h>
36#include <stddef.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <sysexits.h>
41#include <unistd.h>
42#include <aio.h>
43#include <assert.h>
44#include <sys/stat.h>
45#include <sys/queue.h>
46#include <sys/event.h>
47#include <sys/param.h>
48#include <sys/disk.h>
49#include <cam/cam_queue.h>
50#include <cam/scsi/scsi_all.h>
51#include <cam/scsi/scsi_targetio.h>
52#include <cam/scsi/scsi_message.h>
53#include "scsi_target.h"
54
55/* Maximum amount to transfer per CTIO */
56#define MAX_XFER	MAXPHYS
57/* Maximum number of allocated CTIOs */
58#define MAX_CTIOS	32
59/* Maximum sector size for emulated volume */
60#define MAX_SECTOR	32768
61
62/* Global variables */
63int		debug;
64u_int32_t	volume_size;
65size_t		sector_size;
66size_t		buf_size;
67
68/* Local variables */
69static int    targ_fd;
70static int    kq_fd;
71static int    file_fd;
72static int    num_ctios;
73static struct ccb_queue		pending_queue;
74static struct ccb_queue		work_queue;
75static struct ioc_enable_lun	ioc_enlun = {
76	CAM_BUS_WILDCARD,
77	CAM_TARGET_WILDCARD,
78	CAM_LUN_WILDCARD
79};
80
81/* Local functions */
82static void		cleanup(void);
83static int		init_ccbs(void);
84static void		request_loop(void);
85static void		handle_read(void);
86/* static int		work_atio(struct ccb_accept_tio *); */
87static void		queue_io(struct ccb_scsiio *);
88static void		run_queue(struct ccb_accept_tio *);
89static int		work_inot(struct ccb_immed_notify *);
90static struct ccb_scsiio *
91			get_ctio(void);
92/* static void		free_ccb(union ccb *); */
93static cam_status	get_sim_flags(u_int16_t *);
94static void		rel_simq(void);
95static void		abort_all_pending(void);
96static void		usage(void);
97
98int
99main(int argc, char *argv[])
100{
101	int ch, unit;
102	char *file_name, targname[16];
103	u_int16_t req_flags, sim_flags;
104	off_t user_size;
105
106	/* Initialize */
107	debug = 0;
108	req_flags = sim_flags = 0;
109	user_size = 0;
110	targ_fd = file_fd = kq_fd = -1;
111	num_ctios = 0;
112	sector_size = SECTOR_SIZE;
113	buf_size = DFLTPHYS;
114
115	/* Prepare resource pools */
116	TAILQ_INIT(&pending_queue);
117	TAILQ_INIT(&work_queue);
118
119	while ((ch = getopt(argc, argv, "AdSTb:c:s:W:")) != -1) {
120		switch(ch) {
121		case 'A':
122			req_flags |= SID_Addr16;
123			break;
124		case 'd':
125			debug = 1;
126			break;
127		case 'S':
128			req_flags |= SID_Sync;
129			break;
130		case 'T':
131			req_flags |= SID_CmdQue;
132			break;
133		case 'b':
134			buf_size = atoi(optarg);
135			if (buf_size < 256 || buf_size > MAX_XFER)
136				errx(1, "Unreasonable buf size: %s", optarg);
137			break;
138		case 'c':
139			sector_size = atoi(optarg);
140			if (sector_size < 512 || sector_size > MAX_SECTOR)
141				errx(1, "Unreasonable sector size: %s", optarg);
142			break;
143		case 's':
144			user_size = strtoll(optarg, (char **)NULL, /*base*/10);
145			if (user_size < 0)
146				errx(1, "Unreasonable volume size: %s", optarg);
147			break;
148		case 'W':
149			req_flags &= ~(SID_WBus16 | SID_WBus32);
150			switch (atoi(optarg)) {
151			case 8:
152				/* Leave req_flags zeroed */
153				break;
154			case 16:
155				req_flags |= SID_WBus16;
156				break;
157			case 32:
158				req_flags |= SID_WBus32;
159				break;
160			default:
161				warnx("Width %s not supported", optarg);
162				usage();
163				/* NOTREACHED */
164			}
165			break;
166		default:
167			usage();
168			/* NOTREACHED */
169		}
170	}
171	argc -= optind;
172	argv += optind;
173
174	if (argc != 2)
175		usage();
176
177	sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
178	       &ioc_enlun.lun_id);
179	file_name = argv[1];
180
181	if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
182	    ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
183	    ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
184		warnx("Incomplete target path specified");
185		usage();
186		/* NOTREACHED */
187	}
188	/* We don't support any vendor-specific commands */
189	ioc_enlun.grp6_len = 0;
190	ioc_enlun.grp7_len = 0;
191
192	/* Open backing store for IO */
193	file_fd = open(file_name, O_RDWR);
194	if (file_fd < 0)
195		err(1, "open backing store file");
196
197	/* Check backing store size or use the size user gave us */
198	if (user_size == 0) {
199		struct stat st;
200
201		if (fstat(file_fd, &st) < 0)
202			err(1, "fstat file");
203#if __FreeBSD_version >= 500000
204		if ((st.st_mode & S_IFCHR) != 0) {
205			/* raw device */
206			off_t mediasize;
207			if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
208				err(1, "DIOCGMEDIASIZE");
209
210			/* XXX get sector size by ioctl()?? */
211			volume_size = mediasize / sector_size;
212		} else
213#endif
214			volume_size = st.st_size / sector_size;
215	} else {
216		volume_size = user_size / sector_size;
217	}
218	if (volume_size <= 0)
219		errx(1, "volume must be larger than %d", sector_size);
220
221	{
222		struct aiocb aio, *aiop;
223
224		/* Make sure we have working AIO support */
225		memset(&aio, 0, sizeof(aio));
226		aio.aio_buf = malloc(sector_size);
227		if (aio.aio_buf == NULL)
228			err(1, "malloc");
229		aio.aio_fildes = file_fd;
230		aio.aio_offset = 0;
231		aio.aio_nbytes = sector_size;
232		signal(SIGSYS, SIG_IGN);
233		if (aio_read(&aio) != 0) {
234			printf("You must enable VFS_AIO in your kernel "
235			       "or load the aio(4) module.\n");
236			err(1, "aio_read");
237		}
238		if (aio_waitcomplete(&aiop, NULL) != sector_size)
239			err(1, "aio_waitcomplete");
240		assert(aiop == &aio);
241		signal(SIGSYS, SIG_DFL);
242		free((void *)aio.aio_buf);
243		if (debug)
244			warnx("aio support tested ok");
245	}
246
247	/* Go through all the control devices and find one that isn't busy. */
248	unit = 0;
249	do {
250		snprintf(targname, sizeof(targname), "/dev/targ%d", unit++);
251    		targ_fd = open(targname, O_RDWR);
252	} while (targ_fd < 0 && errno == EBUSY);
253
254	if (targ_fd < 0)
255    	    err(1, "Tried to open %d devices, none available", unit);
256
257	/* The first three are handled by kevent() later */
258	signal(SIGHUP, SIG_IGN);
259	signal(SIGINT, SIG_IGN);
260	signal(SIGTERM, SIG_IGN);
261	signal(SIGPROF, SIG_IGN);
262	signal(SIGALRM, SIG_IGN);
263	signal(SIGSTOP, SIG_IGN);
264	signal(SIGTSTP, SIG_IGN);
265
266	/* Register a cleanup handler to run when exiting */
267	atexit(cleanup);
268
269	/* Enable listening on the specified LUN */
270	if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
271		err(1, "TARGIOCENABLE");
272
273	/* Enable debugging if requested */
274	if (debug) {
275		if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
276			err(1, "TARGIOCDEBUG");
277	}
278
279	/* Set up inquiry data according to what SIM supports */
280	if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
281		errx(1, "get_sim_flags");
282	if (tcmd_init(req_flags, sim_flags) != 0)
283		errx(1, "Initializing tcmd subsystem failed");
284
285	/* Queue ATIOs and INOTs on descriptor */
286	if (init_ccbs() != 0)
287		errx(1, "init_ccbs failed");
288
289	if (debug)
290		warnx("main loop beginning");
291	request_loop();
292
293	exit(0);
294}
295
296static void
297cleanup()
298{
299	struct ccb_hdr *ccb_h;
300
301	if (debug) {
302		warnx("cleanup called");
303		debug = 0;
304		ioctl(targ_fd, TARGIOCDEBUG, &debug);
305	}
306	ioctl(targ_fd, TARGIOCDISABLE, NULL);
307	close(targ_fd);
308
309	while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
310		TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
311		free_ccb((union ccb *)ccb_h);
312	}
313	while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
314		TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
315		free_ccb((union ccb *)ccb_h);
316	}
317
318	if (kq_fd != -1)
319		close(kq_fd);
320}
321
322/* Allocate ATIOs/INOTs and queue on HBA */
323static int
324init_ccbs()
325{
326	int i;
327
328	for (i = 0; i < MAX_INITIATORS; i++) {
329		struct ccb_accept_tio *atio;
330		struct atio_descr *a_descr;
331		struct ccb_immed_notify *inot;
332
333		atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
334		if (atio == NULL) {
335			warn("malloc ATIO");
336			return (-1);
337		}
338		a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
339		if (a_descr == NULL) {
340			free(atio);
341			warn("malloc atio_descr");
342			return (-1);
343		}
344		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
345		atio->ccb_h.targ_descr = a_descr;
346		send_ccb((union ccb *)atio, /*priority*/1);
347
348		inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
349		if (inot == NULL) {
350			warn("malloc INOT");
351			return (-1);
352		}
353		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
354		send_ccb((union ccb *)inot, /*priority*/1);
355	}
356
357	return (0);
358}
359
360static void
361request_loop()
362{
363	struct kevent events[MAX_EVENTS];
364	struct timespec ts, *tptr;
365	int quit;
366
367	/* Register kqueue for event notification */
368	if ((kq_fd = kqueue()) < 0)
369		err(1, "init kqueue");
370
371	/* Set up some default events */
372	EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
373	EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
374	EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
375	EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
376	if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
377		err(1, "kevent signal registration");
378
379	ts.tv_sec = 0;
380	ts.tv_nsec = 0;
381	tptr = NULL;
382	quit = 0;
383
384	/* Loop until user signal */
385	while (quit == 0) {
386		int retval, i;
387		struct ccb_hdr *ccb_h;
388
389		/* Check for the next signal, read ready, or AIO completion */
390		retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
391		if (retval < 0) {
392			if (errno == EINTR) {
393				if (debug)
394					warnx("EINTR, looping");
395				continue;
396            		}
397			else {
398				err(1, "kevent failed");
399			}
400		} else if (retval > MAX_EVENTS) {
401			errx(1, "kevent returned more events than allocated?");
402		}
403
404		/* Process all received events. */
405		for (i = 0; i < retval; i++) {
406			if ((events[i].flags & EV_ERROR) != 0)
407				errx(1, "kevent registration failed");
408
409			switch (events[i].filter) {
410			case EVFILT_READ:
411				if (debug)
412					warnx("read ready");
413				handle_read();
414				break;
415			case EVFILT_AIO:
416			{
417				struct ccb_scsiio *ctio;
418				struct ctio_descr *c_descr;
419				if (debug)
420					warnx("aio ready");
421
422				ctio = (struct ccb_scsiio *)events[i].udata;
423				c_descr = (struct ctio_descr *)
424					  ctio->ccb_h.targ_descr;
425				c_descr->event = AIO_DONE;
426				/* Queue on the appropriate ATIO */
427				queue_io(ctio);
428				/* Process any queued completions. */
429				run_queue(c_descr->atio);
430				break;
431			}
432			case EVFILT_SIGNAL:
433				if (debug)
434					warnx("signal ready, setting quit");
435				quit = 1;
436				break;
437			default:
438				warnx("unknown event %#x", events[i].filter);
439				break;
440			}
441
442			if (debug)
443				warnx("event done");
444		}
445
446		/* Grab the first CCB and perform one work unit. */
447		if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
448			union ccb *ccb;
449
450			ccb = (union ccb *)ccb_h;
451			switch (ccb_h->func_code) {
452			case XPT_ACCEPT_TARGET_IO:
453				/* Start one more transfer. */
454				retval = work_atio(&ccb->atio);
455				break;
456			case XPT_IMMED_NOTIFY:
457				retval = work_inot(&ccb->cin);
458				break;
459			default:
460				warnx("Unhandled ccb type %#x on workq",
461				      ccb_h->func_code);
462				abort();
463				/* NOTREACHED */
464			}
465
466			/* Assume work function handled the exception */
467			if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
468				if (debug) {
469					warnx("Queue frozen receiving CCB, "
470					      "releasing");
471				}
472				rel_simq();
473			}
474
475			/* No more work needed for this command. */
476			if (retval == 0) {
477				TAILQ_REMOVE(&work_queue, ccb_h,
478					     periph_links.tqe);
479			}
480		}
481
482		/*
483		 * Poll for new events (i.e. completions) while we
484		 * are processing CCBs on the work_queue. Once it's
485		 * empty, use an infinite wait.
486		 */
487		if (!TAILQ_EMPTY(&work_queue))
488			tptr = &ts;
489		else
490			tptr = NULL;
491	}
492}
493
494/* CCBs are ready from the kernel */
495static void
496handle_read()
497{
498	union ccb *ccb_array[MAX_INITIATORS], *ccb;
499	int ccb_count, i;
500
501	ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
502	if (ccb_count <= 0) {
503		warn("read ccb ptrs");
504		return;
505	}
506	ccb_count /= sizeof(union ccb *);
507	if (ccb_count < 1) {
508		warnx("truncated read ccb ptr?");
509		return;
510	}
511
512	for (i = 0; i < ccb_count; i++) {
513		ccb = ccb_array[i];
514		TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
515
516		switch (ccb->ccb_h.func_code) {
517		case XPT_ACCEPT_TARGET_IO:
518		{
519			struct ccb_accept_tio *atio;
520			struct atio_descr *a_descr;
521
522			/* Initialize ATIO descr for this transaction */
523			atio = &ccb->atio;
524			a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
525			bzero(a_descr, sizeof(*a_descr));
526			TAILQ_INIT(&a_descr->cmplt_io);
527			a_descr->flags = atio->ccb_h.flags &
528				(CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
529			/* XXX add a_descr->priority */
530			if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
531				a_descr->cdb = atio->cdb_io.cdb_bytes;
532			else
533				a_descr->cdb = atio->cdb_io.cdb_ptr;
534
535			/* ATIOs are processed in FIFO order */
536			TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
537					  periph_links.tqe);
538			break;
539		}
540		case XPT_CONT_TARGET_IO:
541		{
542			struct ccb_scsiio *ctio;
543			struct ctio_descr *c_descr;
544
545			ctio = &ccb->ctio;
546			c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
547			c_descr->event = CTIO_DONE;
548			/* Queue on the appropriate ATIO */
549			queue_io(ctio);
550			/* Process any queued completions. */
551			run_queue(c_descr->atio);
552			break;
553		}
554		case XPT_IMMED_NOTIFY:
555			/* INOTs are handled with priority */
556			TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
557					  periph_links.tqe);
558			break;
559		default:
560			warnx("Unhandled ccb type %#x in handle_read",
561			      ccb->ccb_h.func_code);
562			break;
563		}
564	}
565}
566
567/* Process an ATIO CCB from the kernel */
568int
569work_atio(struct ccb_accept_tio *atio)
570{
571	struct ccb_scsiio *ctio;
572	struct atio_descr *a_descr;
573	struct ctio_descr *c_descr;
574	cam_status status;
575	int ret;
576
577	if (debug)
578		warnx("Working on ATIO %p", atio);
579
580	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
581
582	/* Get a CTIO and initialize it according to our known parameters */
583	ctio = get_ctio();
584	if (ctio == NULL)
585		return (1);
586	ret = 0;
587	ctio->ccb_h.flags = a_descr->flags;
588	ctio->tag_id = atio->tag_id;
589	ctio->init_id = atio->init_id;
590	/* XXX priority needs to be added to a_descr */
591	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
592	c_descr->atio = atio;
593	if ((a_descr->flags & CAM_DIR_IN) != 0)
594		c_descr->offset = a_descr->base_off + a_descr->targ_req;
595	else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
596		c_descr->offset = a_descr->base_off + a_descr->init_req;
597	else
598		c_descr->offset = a_descr->base_off;
599
600	/*
601	 * Return a check condition if there was an error while
602	 * receiving this ATIO.
603	 */
604	if (atio->sense_len != 0) {
605		struct scsi_sense_data *sense;
606
607		if (debug) {
608			warnx("ATIO with %u bytes sense received",
609			      atio->sense_len);
610		}
611		sense = &atio->sense_data;
612		tcmd_sense(ctio->init_id, ctio, sense->flags,
613			   sense->add_sense_code, sense->add_sense_code_qual);
614		send_ccb((union ccb *)ctio, /*priority*/1);
615		return (0);
616	}
617
618	status = atio->ccb_h.status & CAM_STATUS_MASK;
619	switch (status) {
620	case CAM_CDB_RECVD:
621		ret = tcmd_handle(atio, ctio, ATIO_WORK);
622		break;
623	case CAM_REQ_ABORTED:
624		/* Requeue on HBA */
625		TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
626		send_ccb((union ccb *)atio, /*priority*/1);
627		ret = 1;
628		break;
629	default:
630		warnx("ATIO completed with unhandled status %#x", status);
631		abort();
632		/* NOTREACHED */
633		break;
634	}
635
636	return (ret);
637}
638
639static void
640queue_io(struct ccb_scsiio *ctio)
641{
642	struct ccb_hdr *ccb_h;
643	struct io_queue *ioq;
644	struct ctio_descr *c_descr, *curr_descr;
645
646	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
647	/* If the completion is for a specific ATIO, queue in order */
648	if (c_descr->atio != NULL) {
649		struct atio_descr *a_descr;
650
651		a_descr = (struct atio_descr *)c_descr->atio->ccb_h.targ_descr;
652		ioq = &a_descr->cmplt_io;
653	} else {
654		errx(1, "CTIO %p has NULL ATIO", ctio);
655	}
656
657	/* Insert in order, sorted by offset */
658	if (!TAILQ_EMPTY(ioq)) {
659		TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
660			curr_descr = (struct ctio_descr *)ccb_h->targ_descr;
661			if (curr_descr->offset <= c_descr->offset) {
662				TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h,
663						   periph_links.tqe);
664				break;
665			}
666			if (TAILQ_PREV(ccb_h, io_queue, periph_links.tqe)
667			    == NULL) {
668				TAILQ_INSERT_BEFORE(ccb_h, &ctio->ccb_h,
669						    periph_links.tqe);
670				break;
671			}
672		}
673	} else {
674		TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
675	}
676}
677
678/*
679 * Go through all completed AIO/CTIOs for a given ATIO and advance data
680 * counts, start continuation IO, etc.
681 */
682static void
683run_queue(struct ccb_accept_tio *atio)
684{
685	struct atio_descr *a_descr;
686	struct ccb_hdr *ccb_h;
687	int sent_status, event;
688
689	if (atio == NULL)
690		return;
691
692	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
693
694	while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
695		struct ccb_scsiio *ctio;
696		struct ctio_descr *c_descr;
697
698		ctio = (struct ccb_scsiio *)ccb_h;
699		c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
700
701		if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
702			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
703				     periph_links.tqe);
704			free_ccb((union ccb *)ctio);
705			send_ccb((union ccb *)atio, /*priority*/1);
706			continue;
707		}
708
709		/* If completed item is in range, call handler */
710		if ((c_descr->event == AIO_DONE &&
711		    c_descr->offset == a_descr->base_off + a_descr->targ_ack)
712		 || (c_descr->event == CTIO_DONE &&
713		    c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
714			sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
715			event = c_descr->event;
716
717			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
718				     periph_links.tqe);
719			tcmd_handle(atio, ctio, c_descr->event);
720
721			/* If entire transfer complete, send back ATIO */
722			if (sent_status != 0 && event == CTIO_DONE)
723				send_ccb((union ccb *)atio, /*priority*/1);
724		} else {
725			/* Gap in offsets so wait until later callback */
726			if (debug)
727				warnx("IO %p out of order", ccb_h);
728			break;
729		}
730	}
731}
732
733static int
734work_inot(struct ccb_immed_notify *inot)
735{
736	cam_status status;
737	int sense;
738
739	if (debug)
740		warnx("Working on INOT %p", inot);
741
742	status = inot->ccb_h.status;
743	sense = (status & CAM_AUTOSNS_VALID) != 0;
744	status &= CAM_STATUS_MASK;
745
746	switch (status) {
747	case CAM_SCSI_BUS_RESET:
748		tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
749		abort_all_pending();
750		break;
751	case CAM_BDR_SENT:
752		tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
753		abort_all_pending();
754		break;
755	case CAM_MESSAGE_RECV:
756		switch (inot->message_args[0]) {
757		case MSG_TASK_COMPLETE:
758		case MSG_INITIATOR_DET_ERR:
759		case MSG_ABORT_TASK_SET:
760		case MSG_MESSAGE_REJECT:
761		case MSG_NOOP:
762		case MSG_PARITY_ERROR:
763		case MSG_TARGET_RESET:
764		case MSG_ABORT_TASK:
765		case MSG_CLEAR_TASK_SET:
766		default:
767			warnx("INOT message %#x", inot->message_args[0]);
768			break;
769		}
770		break;
771	case CAM_REQ_ABORTED:
772		warnx("INOT %p aborted", inot);
773		break;
774	default:
775		warnx("Unhandled INOT status %#x", status);
776		break;
777	}
778
779	/* If there is sense data, use it */
780	if (sense != 0) {
781		struct scsi_sense_data *sense;
782
783		sense = &inot->sense_data;
784		tcmd_sense(inot->initiator_id, NULL, sense->flags,
785			   sense->add_sense_code, sense->add_sense_code_qual);
786		if (debug)
787			warnx("INOT has sense: %#x", sense->flags);
788	}
789
790	/* Requeue on SIM */
791	TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
792	send_ccb((union ccb *)inot, /*priority*/1);
793
794	return (1);
795}
796
797void
798send_ccb(union ccb *ccb, int priority)
799{
800	if (debug)
801		warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
802	ccb->ccb_h.pinfo.priority = priority;
803	if (XPT_FC_IS_QUEUED(ccb)) {
804		TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
805				  periph_links.tqe);
806	}
807	if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
808		warn("write ccb");
809		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
810	}
811}
812
813/* Return a CTIO/descr/buf combo from the freelist or malloc one */
814static struct ccb_scsiio *
815get_ctio()
816{
817	struct ccb_scsiio *ctio;
818	struct ctio_descr *c_descr;
819	struct sigevent *se;
820
821	if (num_ctios == MAX_CTIOS)
822		return (NULL);
823
824	ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
825	if (ctio == NULL) {
826		warn("malloc CTIO");
827		return (NULL);
828	}
829	c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
830	if (c_descr == NULL) {
831		free(ctio);
832		warn("malloc ctio_descr");
833		return (NULL);
834	}
835	c_descr->buf = malloc(buf_size);
836	if (c_descr->buf == NULL) {
837		free(c_descr);
838		free(ctio);
839		warn("malloc backing store");
840		return (NULL);
841	}
842	num_ctios++;
843
844	/* Initialize CTIO, CTIO descr, and AIO */
845	ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
846	ctio->ccb_h.retry_count = 2;
847	ctio->ccb_h.timeout = CAM_TIME_INFINITY;
848	ctio->data_ptr = c_descr->buf;
849	ctio->ccb_h.targ_descr = c_descr;
850	c_descr->aiocb.aio_buf = c_descr->buf;
851	c_descr->aiocb.aio_fildes = file_fd;
852	se = &c_descr->aiocb.aio_sigevent;
853	se->sigev_notify = SIGEV_KEVENT;
854	se->sigev_notify_kqueue = kq_fd;
855	se->sigev_value.sigval_ptr = ctio;
856
857	return (ctio);
858}
859
860void
861free_ccb(union ccb *ccb)
862{
863	switch (ccb->ccb_h.func_code) {
864	case XPT_CONT_TARGET_IO:
865	{
866		struct ctio_descr *c_descr;
867
868		c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
869		free(c_descr->buf);
870		num_ctios--;
871		/* FALLTHROUGH */
872	}
873	case XPT_ACCEPT_TARGET_IO:
874		free(ccb->ccb_h.targ_descr);
875		/* FALLTHROUGH */
876	case XPT_IMMED_NOTIFY:
877	default:
878		free(ccb);
879		break;
880	}
881}
882
883static cam_status
884get_sim_flags(u_int16_t *flags)
885{
886	struct ccb_pathinq cpi;
887	cam_status status;
888
889	/* Find SIM capabilities */
890	bzero(&cpi, sizeof(cpi));
891	cpi.ccb_h.func_code = XPT_PATH_INQ;
892	send_ccb((union ccb *)&cpi, /*priority*/1);
893	status = cpi.ccb_h.status & CAM_STATUS_MASK;
894	if (status != CAM_REQ_CMP) {
895		fprintf(stderr, "CPI failed, status %#x\n", status);
896		return (status);
897	}
898
899	/* Can only enable on controllers that support target mode */
900	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
901		fprintf(stderr, "HBA does not support target mode\n");
902		status = CAM_PATH_INVALID;
903		return (status);
904	}
905
906	*flags = cpi.hba_inquiry;
907	return (status);
908}
909
910static void
911rel_simq()
912{
913	struct ccb_relsim crs;
914
915	bzero(&crs, sizeof(crs));
916	crs.ccb_h.func_code = XPT_REL_SIMQ;
917	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
918	crs.openings = 0;
919	crs.release_timeout = 0;
920	crs.qfrozen_cnt = 0;
921	send_ccb((union ccb *)&crs, /*priority*/0);
922}
923
924/* Cancel all pending CCBs. */
925static void
926abort_all_pending()
927{
928	struct ccb_abort	 cab;
929	struct ccb_hdr		*ccb_h;
930
931	if (debug)
932		  warnx("abort_all_pending");
933
934	bzero(&cab, sizeof(cab));
935	cab.ccb_h.func_code = XPT_ABORT;
936	TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
937		if (debug)
938			  warnx("Aborting pending CCB %p\n", ccb_h);
939		cab.abort_ccb = (union ccb *)ccb_h;
940		send_ccb((union ccb *)&cab, /*priority*/1);
941		if (cab.ccb_h.status != CAM_REQ_CMP) {
942			warnx("Unable to abort CCB, status %#x\n",
943			       cab.ccb_h.status);
944		}
945	}
946}
947
948static void
949usage()
950{
951	fprintf(stderr,
952		"Usage: scsi_target [-AdST] [-b bufsize] [-c sectorsize]\n"
953		"\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
954		"\t\tbus:target:lun filename\n");
955	exit(1);
956}
957