scsi_target.c revision 228481
1198160Srrs/*
2198160Srrs * SCSI Disk Emulator
3198160Srrs *
4198160Srrs * Copyright (c) 2002 Nate Lawson.
5198160Srrs * All rights reserved.
6198160Srrs *
7198160Srrs * Redistribution and use in source and binary forms, with or without
8198160Srrs * modification, are permitted provided that the following conditions
9198160Srrs * are met:
10198160Srrs * 1. Redistributions of source code must retain the above copyright
11198160Srrs *    notice, this list of conditions, and the following disclaimer,
12198160Srrs *    without modification, immediately at the beginning of the file.
13198160Srrs * 2. The name of the author may not be used to endorse or promote products
14198160Srrs *    derived from this software without specific prior written permission.
15198160Srrs *
16198160Srrs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17198160Srrs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18198160Srrs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19198160Srrs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20198160Srrs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21198160Srrs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22198160Srrs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23198160Srrs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24198160Srrs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25198160Srrs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26198160Srrs * SUCH DAMAGE.
27198160Srrs *
28198160Srrs * $FreeBSD: head/share/examples/scsi_target/scsi_target.c 228481 2011-12-13 21:26:33Z ed $
29198160Srrs */
30203112Srrs
31203112Srrs#include <sys/types.h>
32198160Srrs#include <ctype.h>
33198160Srrs#include <errno.h>
34198160Srrs#include <err.h>
35198160Srrs#include <fcntl.h>
36198160Srrs#include <signal.h>
37198160Srrs#include <stddef.h>
38198160Srrs#include <stdio.h>
39198160Srrs#include <stdlib.h>
40198160Srrs#include <string.h>
41208165Srrs#include <sysexits.h>
42208165Srrs#include <unistd.h>
43208165Srrs#include <aio.h>
44208165Srrs#include <assert.h>
45208165Srrs#include <sys/stat.h>
46208165Srrs#include <sys/queue.h>
47208165Srrs#include <sys/event.h>
48208165Srrs#include <sys/param.h>
49208165Srrs#include <sys/disk.h>
50208165Srrs#include <cam/cam_queue.h>
51198160Srrs#include <cam/scsi/scsi_all.h>
52198160Srrs#include <cam/scsi/scsi_targetio.h>
53208165Srrs#include <cam/scsi/scsi_message.h>
54198160Srrs#include "scsi_target.h"
55198160Srrs
56198160Srrs/* Maximum amount to transfer per CTIO */
57198160Srrs#define MAX_XFER	MAXPHYS
58198607Srrs/* Maximum number of allocated CTIOs */
59198607Srrs#define MAX_CTIOS	64
60198607Srrs/* Maximum sector size for emulated volume */
61198607Srrs#define MAX_SECTOR	32768
62198607Srrs
63198607Srrs/* Global variables */
64198160Srrsint		debug;
65198625Srrsint		notaio = 0;
66198625Srrsoff_t		volume_size;
67198625Srrsu_int		sector_size;
68198625Srrssize_t		buf_size;
69198625Srrs
70198160Srrs/* Local variables */
71198160Srrsstatic int    targ_fd;
72198625Srrsstatic int    kq_fd;
73198160Srrsstatic int    file_fd;
74198160Srrsstatic int    num_ctios;
75198160Srrsstatic struct ccb_queue		pending_queue;
76208165Srrsstatic struct ccb_queue		work_queue;
77208165Srrsstatic struct ioc_enable_lun	ioc_enlun = {
78208165Srrs	CAM_BUS_WILDCARD,
79208165Srrs	CAM_TARGET_WILDCARD,
80208165Srrs	CAM_LUN_WILDCARD
81208369Sjchandra};
82208165Srrs
83208165Srrs/* Local functions */
84208165Srrsstatic void		cleanup(void);
85208165Srrsstatic int		init_ccbs(void);
86198160Srrsstatic void		request_loop(void);
87198160Srrsstatic void		handle_read(void);
88198160Srrs/* static int		work_atio(struct ccb_accept_tio *); */
89198160Srrsstatic void		queue_io(struct ccb_scsiio *);
90198160Srrsstatic int		run_queue(struct ccb_accept_tio *);
91198160Srrsstatic int		work_inot(struct ccb_immed_notify *);
92198160Srrsstatic struct ccb_scsiio *
93198160Srrs			get_ctio(void);
94198160Srrs/* static void		free_ccb(union ccb *); */
95198160Srrsstatic cam_status	get_sim_flags(u_int16_t *);
96198160Srrsstatic void		rel_simq(void);
97198160Srrsstatic void		abort_all_pending(void);
98198160Srrsstatic void		usage(void);
99198160Srrs
100198160Srrsint
101198160Srrsmain(int argc, char *argv[])
102198160Srrs{
103198160Srrs	int ch;
104198160Srrs	char *file_name;
105198160Srrs	u_int16_t req_flags, sim_flags;
106198160Srrs	off_t user_size;
107198625Srrs
108198625Srrs	/* Initialize */
109198625Srrs	debug = 0;
110198625Srrs	req_flags = sim_flags = 0;
111198625Srrs	user_size = 0;
112198160Srrs	targ_fd = file_fd = kq_fd = -1;
113198160Srrs	num_ctios = 0;
114198160Srrs	sector_size = SECTOR_SIZE;
115198160Srrs	buf_size = DFLTPHYS;
116198160Srrs
117198625Srrs	/* Prepare resource pools */
118198625Srrs	TAILQ_INIT(&pending_queue);
119198160Srrs	TAILQ_INIT(&work_queue);
120198160Srrs
121198160Srrs	while ((ch = getopt(argc, argv, "AdSTYb:c:s:W:")) != -1) {
122198160Srrs		switch(ch) {
123198160Srrs		case 'A':
124198160Srrs			req_flags |= SID_Addr16;
125208165Srrs			break;
126208165Srrs		case 'd':
127198160Srrs			debug = 1;
128208165Srrs			break;
129208165Srrs		case 'S':
130198160Srrs			req_flags |= SID_Sync;
131198160Srrs			break;
132198160Srrs		case 'T':
133198160Srrs			req_flags |= SID_CmdQue;
134198625Srrs			break;
135198625Srrs		case 'b':
136198625Srrs			buf_size = atoi(optarg);
137198625Srrs			if (buf_size < 256 || buf_size > MAX_XFER)
138198625Srrs				errx(1, "Unreasonable buf size: %s", optarg);
139198160Srrs			break;
140198625Srrs		case 'c':
141198625Srrs			sector_size = atoi(optarg);
142198625Srrs			if (sector_size < 512 || sector_size > MAX_SECTOR)
143198625Srrs				errx(1, "Unreasonable sector size: %s", optarg);
144198625Srrs			break;
145198625Srrs		case 's':
146198625Srrs		{
147198625Srrs			int last, shift = 0;
148198160Srrs
149198160Srrs			last = strlen(optarg) - 1;
150198160Srrs			if (last > 0) {
151198160Srrs				switch (tolower(optarg[last])) {
152198160Srrs				case 'e':
153198160Srrs					shift += 10;
154198160Srrs					/* FALLTHROUGH */
155198160Srrs				case 'p':
156198160Srrs					shift += 10;
157198160Srrs					/* FALLTHROUGH */
158198160Srrs				case 't':
159198160Srrs					shift += 10;
160198160Srrs					/* FALLTHROUGH */
161198160Srrs				case 'g':
162198160Srrs					shift += 10;
163198160Srrs					/* FALLTHROUGH */
164198160Srrs				case 'm':
165198160Srrs					shift += 10;
166198160Srrs					/* FALLTHROUGH */
167198160Srrs				case 'k':
168198160Srrs					shift += 10;
169198625Srrs					optarg[last] = 0;
170198625Srrs					break;
171198160Srrs				}
172198160Srrs			}
173198160Srrs			user_size = strtoll(optarg, (char **)NULL, /*base*/10);
174198160Srrs			user_size <<= shift;
175198160Srrs			if (user_size < 0)
176198160Srrs				errx(1, "Unreasonable volume size: %s", optarg);
177198160Srrs			break;
178198160Srrs		}
179198160Srrs		case 'W':
180198625Srrs			req_flags &= ~(SID_WBus16 | SID_WBus32);
181198625Srrs			switch (atoi(optarg)) {
182198160Srrs			case 8:
183198160Srrs				/* Leave req_flags zeroed */
184198625Srrs				break;
185198625Srrs			case 16:
186198160Srrs				req_flags |= SID_WBus16;
187198160Srrs				break;
188198625Srrs			case 32:
189198160Srrs				req_flags |= SID_WBus32;
190198160Srrs				break;
191198160Srrs			default:
192198160Srrs				warnx("Width %s not supported", optarg);
193198160Srrs				usage();
194198625Srrs				/* NOTREACHED */
195198160Srrs			}
196198160Srrs			break;
197198625Srrs		case 'Y':
198198625Srrs			notaio = 1;
199198625Srrs			break;
200198160Srrs		default:
201198625Srrs			usage();
202198625Srrs			/* NOTREACHED */
203198625Srrs		}
204198625Srrs	}
205198160Srrs	argc -= optind;
206198625Srrs	argv += optind;
207198625Srrs
208198625Srrs	if (argc != 2)
209198160Srrs		usage();
210198160Srrs
211198160Srrs	sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
212198160Srrs	       &ioc_enlun.lun_id);
213198625Srrs	file_name = argv[1];
214198625Srrs
215198625Srrs	if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
216198160Srrs	    ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
217198160Srrs	    ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
218198625Srrs		warnx("Incomplete target path specified");
219198625Srrs		usage();
220198160Srrs		/* NOTREACHED */
221198625Srrs	}
222198160Srrs	/* We don't support any vendor-specific commands */
223198160Srrs	ioc_enlun.grp6_len = 0;
224198625Srrs	ioc_enlun.grp7_len = 0;
225198160Srrs
226198160Srrs	/* Open backing store for IO */
227198160Srrs	file_fd = open(file_name, O_RDWR);
228198160Srrs	if (file_fd < 0)
229198160Srrs		errx(EX_NOINPUT, "open backing store file");
230198625Srrs
231198625Srrs	/* Check backing store size or use the size user gave us */
232198160Srrs	if (user_size == 0) {
233198625Srrs		struct stat st;
234198160Srrs
235198160Srrs		if (fstat(file_fd, &st) < 0)
236198160Srrs			err(1, "fstat file");
237198625Srrs#if __FreeBSD_version >= 500000
238198625Srrs		if ((st.st_mode & S_IFCHR) != 0) {
239198625Srrs			/* raw device */
240198160Srrs			off_t mediasize;
241198160Srrs			if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
242198625Srrs				err(1, "DIOCGMEDIASIZE");
243198625Srrs
244198160Srrs			/* XXX get sector size by ioctl()?? */
245198625Srrs			volume_size = mediasize / sector_size;
246198160Srrs		} else
247198160Srrs#endif
248198160Srrs			volume_size = st.st_size / sector_size;
249198160Srrs	} else {
250198160Srrs		volume_size = user_size / sector_size;
251198160Srrs	}
252198625Srrs	if (debug)
253198160Srrs		warnx("volume_size: %d bytes x " OFF_FMT " sectors",
254198160Srrs		    sector_size, volume_size);
255208165Srrs
256208165Srrs	if (volume_size <= 0)
257208165Srrs		errx(1, "volume must be larger than %d", sector_size);
258208369Sjchandra
259208165Srrs	if (notaio == 0) {
260208165Srrs		struct aiocb aio, *aiop;
261198160Srrs
262208165Srrs		/* See if we have we have working AIO support */
263208369Sjchandra		memset(&aio, 0, sizeof(aio));
264208369Sjchandra		aio.aio_buf = malloc(sector_size);
265208165Srrs		if (aio.aio_buf == NULL)
266208165Srrs			err(1, "malloc");
267208165Srrs		aio.aio_fildes = file_fd;
268208165Srrs		aio.aio_offset = 0;
269208165Srrs		aio.aio_nbytes = sector_size;
270208165Srrs		signal(SIGSYS, SIG_IGN);
271208165Srrs		if (aio_read(&aio) != 0) {
272208165Srrs			printf("AIO support is not available- switchin to"
273208165Srrs			       " single-threaded mode.\n");
274208165Srrs			notaio = 1;
275208165Srrs		} else {
276208165Srrs			if (aio_waitcomplete(&aiop, NULL) != sector_size)
277208165Srrs				err(1, "aio_waitcomplete");
278208165Srrs			assert(aiop == &aio);
279208165Srrs			signal(SIGSYS, SIG_DFL);
280208165Srrs		}
281208165Srrs		free((void *)aio.aio_buf);
282208165Srrs		if (debug && notaio == 0)
283208165Srrs			warnx("aio support tested ok");
284208165Srrs	}
285208165Srrs
286208165Srrs	targ_fd = open("/dev/targ", O_RDWR);
287208165Srrs	if (targ_fd < 0)
288208165Srrs    	    err(1, "/dev/targ");
289208165Srrs	else
290208165Srrs	    warnx("opened /dev/targ");
291208165Srrs
292208165Srrs	/* The first three are handled by kevent() later */
293208165Srrs	signal(SIGHUP, SIG_IGN);
294208165Srrs	signal(SIGINT, SIG_IGN);
295208165Srrs	signal(SIGTERM, SIG_IGN);
296208165Srrs	signal(SIGPROF, SIG_IGN);
297208369Sjchandra	signal(SIGALRM, SIG_IGN);
298208165Srrs	signal(SIGSTOP, SIG_IGN);
299208165Srrs	signal(SIGTSTP, SIG_IGN);
300208165Srrs
301208369Sjchandra	/* Register a cleanup handler to run when exiting */
302208165Srrs	atexit(cleanup);
303208165Srrs
304208165Srrs	/* Enable listening on the specified LUN */
305208165Srrs	if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
306208165Srrs		err(1, "TARGIOCENABLE");
307208165Srrs
308208165Srrs	/* Enable debugging if requested */
309208165Srrs	if (debug) {
310208165Srrs		if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
311208165Srrs			warnx("TARGIOCDEBUG");
312208165Srrs	}
313208165Srrs
314208165Srrs	/* Set up inquiry data according to what SIM supports */
315208165Srrs	if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
316208165Srrs		errx(1, "get_sim_flags");
317208165Srrs
318208165Srrs	if (tcmd_init(req_flags, sim_flags) != 0)
319208165Srrs		errx(1, "Initializing tcmd subsystem failed");
320208165Srrs
321208165Srrs	/* Queue ATIOs and INOTs on descriptor */
322208165Srrs	if (init_ccbs() != 0)
323208165Srrs		errx(1, "init_ccbs failed");
324208165Srrs
325208165Srrs	if (debug)
326208165Srrs		warnx("main loop beginning");
327208165Srrs
328208165Srrs	request_loop();
329208165Srrs
330208165Srrs	exit(0);
331208165Srrs}
332208369Sjchandra
333208165Srrsstatic void
334208165Srrscleanup()
335208165Srrs{
336208165Srrs	struct ccb_hdr *ccb_h;
337208165Srrs
338208165Srrs	if (debug) {
339208165Srrs		warnx("cleanup called");
340208165Srrs		debug = 0;
341208165Srrs		ioctl(targ_fd, TARGIOCDEBUG, &debug);
342208165Srrs	}
343208165Srrs	ioctl(targ_fd, TARGIOCDISABLE, NULL);
344208165Srrs	close(targ_fd);
345208165Srrs
346208165Srrs	while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
347208165Srrs		TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
348208165Srrs		free_ccb((union ccb *)ccb_h);
349208165Srrs	}
350208165Srrs	while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
351208165Srrs		TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
352208165Srrs		free_ccb((union ccb *)ccb_h);
353208369Sjchandra	}
354208165Srrs
355208165Srrs	if (kq_fd != -1)
356208165Srrs		close(kq_fd);
357208165Srrs}
358208165Srrs
359208369Sjchandra/* Allocate ATIOs/INOTs and queue on HBA */
360208165Srrsstatic int
361208165Srrsinit_ccbs()
362198625Srrs{
363198625Srrs	int i;
364198625Srrs
365198625Srrs	for (i = 0; i < MAX_INITIATORS; i++) {
366198160Srrs		struct ccb_accept_tio *atio;
367198625Srrs		struct atio_descr *a_descr;
368198160Srrs		struct ccb_immed_notify *inot;
369198625Srrs
370198160Srrs		atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
371198160Srrs		if (atio == NULL) {
372198160Srrs			warn("malloc ATIO");
373198160Srrs			return (-1);
374198956Srrs		}
375198956Srrs		a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
376198160Srrs		if (a_descr == NULL) {
377198160Srrs			free(atio);
378198956Srrs			warn("malloc atio_descr");
379198956Srrs			return (-1);
380198625Srrs		}
381198160Srrs		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
382208369Sjchandra		atio->ccb_h.targ_descr = a_descr;
383203112Srrs		send_ccb((union ccb *)atio, /*priority*/1);
384203112Srrs
385203112Srrs		inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
386198160Srrs		if (inot == NULL) {
387198160Srrs			warn("malloc INOT");
388198160Srrs			return (-1);
389198160Srrs		}
390198625Srrs		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
391198625Srrs		send_ccb((union ccb *)inot, /*priority*/1);
392198160Srrs	}
393198160Srrs
394198625Srrs	return (0);
395198160Srrs}
396198160Srrs
397198160Srrsstatic void
398198625Srrsrequest_loop()
399198160Srrs{
400198160Srrs	struct kevent events[MAX_EVENTS];
401198160Srrs	struct timespec ts, *tptr;
402198160Srrs	int quit;
403198160Srrs
404198160Srrs	/* Register kqueue for event notification */
405198625Srrs	if ((kq_fd = kqueue()) < 0)
406198625Srrs		err(1, "init kqueue");
407198160Srrs
408198160Srrs	/* Set up some default events */
409208165Srrs	EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
410208165Srrs	EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
411198625Srrs	EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
412198956Srrs	EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
413198160Srrs	if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
414198160Srrs		err(1, "kevent signal registration");
415198625Srrs
416198625Srrs	ts.tv_sec = 0;
417198160Srrs	ts.tv_nsec = 0;
418198625Srrs	tptr = NULL;
419198160Srrs	quit = 0;
420198160Srrs
421198160Srrs	/* Loop until user signal */
422198160Srrs	while (quit == 0) {
423198160Srrs		int retval, i, oo;
424198160Srrs		struct ccb_hdr *ccb_h;
425198160Srrs
426198160Srrs		/* Check for the next signal, read ready, or AIO completion */
427198625Srrs		retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
428198160Srrs		if (retval < 0) {
429198160Srrs			if (errno == EINTR) {
430208165Srrs				if (debug)
431198160Srrs					warnx("EINTR, looping");
432208165Srrs				continue;
433208165Srrs            		}
434208165Srrs			else {
435208369Sjchandra				err(1, "kevent failed");
436208165Srrs			}
437208369Sjchandra		} else if (retval > MAX_EVENTS) {
438208369Sjchandra			errx(1, "kevent returned more events than allocated?");
439208369Sjchandra		}
440208369Sjchandra
441208369Sjchandra		/* Process all received events. */
442208369Sjchandra		for (oo = i = 0; i < retval; i++) {
443208369Sjchandra			if ((events[i].flags & EV_ERROR) != 0)
444198160Srrs				errx(1, "kevent registration failed");
445208165Srrs
446208165Srrs			switch (events[i].filter) {
447			case EVFILT_READ:
448				if (debug)
449					warnx("read ready");
450				handle_read();
451				break;
452			case EVFILT_AIO:
453			{
454				struct ccb_scsiio *ctio;
455				struct ctio_descr *c_descr;
456				if (debug)
457					warnx("aio ready");
458
459				ctio = (struct ccb_scsiio *)events[i].udata;
460				c_descr = (struct ctio_descr *)
461					  ctio->ccb_h.targ_descr;
462				c_descr->event = AIO_DONE;
463				/* Queue on the appropriate ATIO */
464				queue_io(ctio);
465				/* Process any queued completions. */
466				oo += run_queue(c_descr->atio);
467				break;
468			}
469			case EVFILT_SIGNAL:
470				if (debug)
471					warnx("signal ready, setting quit");
472				quit = 1;
473				break;
474			default:
475				warnx("unknown event %d", events[i].filter);
476				break;
477			}
478
479			if (debug)
480				warnx("event %d done", events[i].filter);
481		}
482
483		if (oo) {
484			tptr = &ts;
485			continue;
486		}
487
488		/* Grab the first CCB and perform one work unit. */
489		if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
490			union ccb *ccb;
491
492			ccb = (union ccb *)ccb_h;
493			switch (ccb_h->func_code) {
494			case XPT_ACCEPT_TARGET_IO:
495				/* Start one more transfer. */
496				retval = work_atio(&ccb->atio);
497				break;
498			case XPT_IMMED_NOTIFY:
499				retval = work_inot(&ccb->cin);
500				break;
501			default:
502				warnx("Unhandled ccb type %#x on workq",
503				      ccb_h->func_code);
504				abort();
505				/* NOTREACHED */
506			}
507
508			/* Assume work function handled the exception */
509			if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
510				if (debug) {
511					warnx("Queue frozen receiving CCB, "
512					      "releasing");
513				}
514				rel_simq();
515			}
516
517			/* No more work needed for this command. */
518			if (retval == 0) {
519				TAILQ_REMOVE(&work_queue, ccb_h,
520					     periph_links.tqe);
521			}
522		}
523
524		/*
525		 * Poll for new events (i.e. completions) while we
526		 * are processing CCBs on the work_queue. Once it's
527		 * empty, use an infinite wait.
528		 */
529		if (!TAILQ_EMPTY(&work_queue))
530			tptr = &ts;
531		else
532			tptr = NULL;
533	}
534}
535
536/* CCBs are ready from the kernel */
537static void
538handle_read()
539{
540	union ccb *ccb_array[MAX_INITIATORS], *ccb;
541	int ccb_count, i, oo;
542
543	ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
544	if (ccb_count <= 0) {
545		warn("read ccb ptrs");
546		return;
547	}
548	ccb_count /= sizeof(union ccb *);
549	if (ccb_count < 1) {
550		warnx("truncated read ccb ptr?");
551		return;
552	}
553
554	for (i = 0; i < ccb_count; i++) {
555		ccb = ccb_array[i];
556		TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
557
558		switch (ccb->ccb_h.func_code) {
559		case XPT_ACCEPT_TARGET_IO:
560		{
561			struct ccb_accept_tio *atio;
562			struct atio_descr *a_descr;
563
564			/* Initialize ATIO descr for this transaction */
565			atio = &ccb->atio;
566			a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
567			bzero(a_descr, sizeof(*a_descr));
568			TAILQ_INIT(&a_descr->cmplt_io);
569			a_descr->flags = atio->ccb_h.flags &
570				(CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
571			/* XXX add a_descr->priority */
572			if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
573				a_descr->cdb = atio->cdb_io.cdb_bytes;
574			else
575				a_descr->cdb = atio->cdb_io.cdb_ptr;
576
577			/* ATIOs are processed in FIFO order */
578			TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
579					  periph_links.tqe);
580			break;
581		}
582		case XPT_CONT_TARGET_IO:
583		{
584			struct ccb_scsiio *ctio;
585			struct ctio_descr *c_descr;
586
587			ctio = &ccb->ctio;
588			c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
589			c_descr->event = CTIO_DONE;
590			/* Queue on the appropriate ATIO */
591			queue_io(ctio);
592			/* Process any queued completions. */
593			oo += run_queue(c_descr->atio);
594			break;
595		}
596		case XPT_IMMED_NOTIFY:
597			/* INOTs are handled with priority */
598			TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
599					  periph_links.tqe);
600			break;
601		default:
602			warnx("Unhandled ccb type %#x in handle_read",
603			      ccb->ccb_h.func_code);
604			break;
605		}
606	}
607}
608
609/* Process an ATIO CCB from the kernel */
610int
611work_atio(struct ccb_accept_tio *atio)
612{
613	struct ccb_scsiio *ctio;
614	struct atio_descr *a_descr;
615	struct ctio_descr *c_descr;
616	cam_status status;
617	int ret;
618
619	if (debug)
620		warnx("Working on ATIO %p", atio);
621
622	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
623
624	/* Get a CTIO and initialize it according to our known parameters */
625	ctio = get_ctio();
626	if (ctio == NULL) {
627		return (1);
628	}
629	ret = 0;
630	ctio->ccb_h.flags = a_descr->flags;
631	ctio->tag_id = atio->tag_id;
632	ctio->init_id = atio->init_id;
633	/* XXX priority needs to be added to a_descr */
634	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
635	c_descr->atio = atio;
636	if ((a_descr->flags & CAM_DIR_IN) != 0)
637		c_descr->offset = a_descr->base_off + a_descr->targ_req;
638	else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
639		c_descr->offset = a_descr->base_off + a_descr->init_req;
640	else
641		c_descr->offset = a_descr->base_off;
642
643	/*
644	 * Return a check condition if there was an error while
645	 * receiving this ATIO.
646	 */
647	if (atio->sense_len != 0) {
648		struct scsi_sense_data_fixed *sense;
649
650		if (debug) {
651			warnx("ATIO with %u bytes sense received",
652			      atio->sense_len);
653		}
654		sense = &atio->sense_data;
655		tcmd_sense(ctio->init_id, ctio, sense->flags,
656			   sense->add_sense_code, sense->add_sense_code_qual);
657		send_ccb((union ccb *)ctio, /*priority*/1);
658		return (0);
659	}
660
661	status = atio->ccb_h.status & CAM_STATUS_MASK;
662	switch (status) {
663	case CAM_CDB_RECVD:
664		ret = tcmd_handle(atio, ctio, ATIO_WORK);
665		break;
666	case CAM_REQ_ABORTED:
667		warn("ATIO %p aborted", a_descr);
668		/* Requeue on HBA */
669		TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
670		send_ccb((union ccb *)atio, /*priority*/1);
671		ret = 1;
672		break;
673	default:
674		warnx("ATIO completed with unhandled status %#x", status);
675		abort();
676		/* NOTREACHED */
677		break;
678	}
679
680	return (ret);
681}
682
683static void
684queue_io(struct ccb_scsiio *ctio)
685{
686	struct ccb_hdr *ccb_h;
687	struct io_queue *ioq;
688	struct ctio_descr *c_descr;
689
690	c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
691	if (c_descr->atio == NULL) {
692		errx(1, "CTIO %p has NULL ATIO", ctio);
693	}
694	ioq = &((struct atio_descr *)c_descr->atio->ccb_h.targ_descr)->cmplt_io;
695
696	if (TAILQ_EMPTY(ioq)) {
697		TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
698		return;
699	}
700
701	TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
702		struct ctio_descr *curr_descr =
703		    (struct ctio_descr *)ccb_h->targ_descr;
704		if (curr_descr->offset <= c_descr->offset) {
705			break;
706		}
707	}
708
709	if (ccb_h) {
710		TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h, periph_links.tqe);
711	} else {
712		TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
713	}
714}
715
716/*
717 * Go through all completed AIO/CTIOs for a given ATIO and advance data
718 * counts, start continuation IO, etc.
719 */
720static int
721run_queue(struct ccb_accept_tio *atio)
722{
723	struct atio_descr *a_descr;
724	struct ccb_hdr *ccb_h;
725	int sent_status, event;
726
727	if (atio == NULL)
728		return (0);
729
730	a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
731
732	while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
733		struct ccb_scsiio *ctio;
734		struct ctio_descr *c_descr;
735
736		ctio = (struct ccb_scsiio *)ccb_h;
737		c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
738
739		if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
740			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
741				     periph_links.tqe);
742			free_ccb((union ccb *)ctio);
743			send_ccb((union ccb *)atio, /*priority*/1);
744			continue;
745		}
746
747		/* If completed item is in range, call handler */
748		if ((c_descr->event == AIO_DONE &&
749		    c_descr->offset == a_descr->base_off + a_descr->targ_ack)
750		 || (c_descr->event == CTIO_DONE &&
751		    c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
752			sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
753			event = c_descr->event;
754
755			TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
756				     periph_links.tqe);
757			tcmd_handle(atio, ctio, c_descr->event);
758
759			/* If entire transfer complete, send back ATIO */
760			if (sent_status != 0 && event == CTIO_DONE)
761				send_ccb((union ccb *)atio, /*priority*/1);
762		} else {
763			/* Gap in offsets so wait until later callback */
764			if (/* debug */ 1)
765				warnx("IO %p:%p out of order %s",  ccb_h,
766				    a_descr, c_descr->event == AIO_DONE?
767				    "aio" : "ctio");
768			return (1);
769		}
770	}
771	return (0);
772}
773
774static int
775work_inot(struct ccb_immed_notify *inot)
776{
777	cam_status status;
778	int sense;
779
780	if (debug)
781		warnx("Working on INOT %p", inot);
782
783	status = inot->ccb_h.status;
784	sense = (status & CAM_AUTOSNS_VALID) != 0;
785	status &= CAM_STATUS_MASK;
786
787	switch (status) {
788	case CAM_SCSI_BUS_RESET:
789		tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
790		abort_all_pending();
791		break;
792	case CAM_BDR_SENT:
793		tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
794		abort_all_pending();
795		break;
796	case CAM_MESSAGE_RECV:
797		switch (inot->message_args[0]) {
798		case MSG_TASK_COMPLETE:
799		case MSG_INITIATOR_DET_ERR:
800		case MSG_ABORT_TASK_SET:
801		case MSG_MESSAGE_REJECT:
802		case MSG_NOOP:
803		case MSG_PARITY_ERROR:
804		case MSG_TARGET_RESET:
805		case MSG_ABORT_TASK:
806		case MSG_CLEAR_TASK_SET:
807		default:
808			warnx("INOT message %#x", inot->message_args[0]);
809			break;
810		}
811		break;
812	case CAM_REQ_ABORTED:
813		warnx("INOT %p aborted", inot);
814		break;
815	default:
816		warnx("Unhandled INOT status %#x", status);
817		break;
818	}
819
820	/* If there is sense data, use it */
821	if (sense != 0) {
822		struct scsi_sense_data_fixed *sense;
823
824		sense = (struct scsi_sense_data_fixed *)&inot->sense_data;
825		tcmd_sense(inot->initiator_id, NULL, sense->flags,
826			   sense->add_sense_code, sense->add_sense_code_qual);
827		if (debug)
828			warnx("INOT has sense: %#x", sense->flags);
829	}
830
831	/* Requeue on SIM */
832	TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
833	send_ccb((union ccb *)inot, /*priority*/1);
834
835	return (1);
836}
837
838void
839send_ccb(union ccb *ccb, int priority)
840{
841	if (debug)
842		warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
843	ccb->ccb_h.pinfo.priority = priority;
844	if (XPT_FC_IS_QUEUED(ccb)) {
845		TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
846				  periph_links.tqe);
847	}
848	if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
849		warn("write ccb");
850		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
851	}
852}
853
854/* Return a CTIO/descr/buf combo from the freelist or malloc one */
855static struct ccb_scsiio *
856get_ctio()
857{
858	struct ccb_scsiio *ctio;
859	struct ctio_descr *c_descr;
860	struct sigevent *se;
861
862	if (num_ctios == MAX_CTIOS) {
863		warnx("at CTIO max");
864		return (NULL);
865	}
866
867	ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
868	if (ctio == NULL) {
869		warn("malloc CTIO");
870		return (NULL);
871	}
872	c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
873	if (c_descr == NULL) {
874		free(ctio);
875		warn("malloc ctio_descr");
876		return (NULL);
877	}
878	c_descr->buf = malloc(buf_size);
879	if (c_descr->buf == NULL) {
880		free(c_descr);
881		free(ctio);
882		warn("malloc backing store");
883		return (NULL);
884	}
885	num_ctios++;
886
887	/* Initialize CTIO, CTIO descr, and AIO */
888	ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
889	ctio->ccb_h.retry_count = 2;
890	ctio->ccb_h.timeout = CAM_TIME_INFINITY;
891	ctio->data_ptr = c_descr->buf;
892	ctio->ccb_h.targ_descr = c_descr;
893	c_descr->aiocb.aio_buf = c_descr->buf;
894	c_descr->aiocb.aio_fildes = file_fd;
895	se = &c_descr->aiocb.aio_sigevent;
896	se->sigev_notify = SIGEV_KEVENT;
897	se->sigev_notify_kqueue = kq_fd;
898	se->sigev_value.sival_ptr = ctio;
899
900	return (ctio);
901}
902
903void
904free_ccb(union ccb *ccb)
905{
906	switch (ccb->ccb_h.func_code) {
907	case XPT_CONT_TARGET_IO:
908	{
909		struct ctio_descr *c_descr;
910
911		c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
912		free(c_descr->buf);
913		num_ctios--;
914		/* FALLTHROUGH */
915	}
916	case XPT_ACCEPT_TARGET_IO:
917		free(ccb->ccb_h.targ_descr);
918		/* FALLTHROUGH */
919	case XPT_IMMED_NOTIFY:
920	default:
921		free(ccb);
922		break;
923	}
924}
925
926static cam_status
927get_sim_flags(u_int16_t *flags)
928{
929	struct ccb_pathinq cpi;
930	cam_status status;
931
932	/* Find SIM capabilities */
933	bzero(&cpi, sizeof(cpi));
934	cpi.ccb_h.func_code = XPT_PATH_INQ;
935	send_ccb((union ccb *)&cpi, /*priority*/1);
936	status = cpi.ccb_h.status & CAM_STATUS_MASK;
937	if (status != CAM_REQ_CMP) {
938		fprintf(stderr, "CPI failed, status %#x\n", status);
939		return (status);
940	}
941
942	/* Can only enable on controllers that support target mode */
943	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
944		fprintf(stderr, "HBA does not support target mode\n");
945		status = CAM_PATH_INVALID;
946		return (status);
947	}
948
949	*flags = cpi.hba_inquiry;
950	return (status);
951}
952
953static void
954rel_simq()
955{
956	struct ccb_relsim crs;
957
958	bzero(&crs, sizeof(crs));
959	crs.ccb_h.func_code = XPT_REL_SIMQ;
960	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
961	crs.openings = 0;
962	crs.release_timeout = 0;
963	crs.qfrozen_cnt = 0;
964	send_ccb((union ccb *)&crs, /*priority*/0);
965}
966
967/* Cancel all pending CCBs. */
968static void
969abort_all_pending()
970{
971	struct ccb_abort	 cab;
972	struct ccb_hdr		*ccb_h;
973
974	if (debug)
975		  warnx("abort_all_pending");
976
977	bzero(&cab, sizeof(cab));
978	cab.ccb_h.func_code = XPT_ABORT;
979	TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
980		if (debug)
981			  warnx("Aborting pending CCB %p\n", ccb_h);
982		cab.abort_ccb = (union ccb *)ccb_h;
983		send_ccb((union ccb *)&cab, /*priority*/1);
984		if (cab.ccb_h.status != CAM_REQ_CMP) {
985			warnx("Unable to abort CCB, status %#x\n",
986			       cab.ccb_h.status);
987		}
988	}
989}
990
991static void
992usage()
993{
994	fprintf(stderr,
995		"Usage: scsi_target [-AdSTY] [-b bufsize] [-c sectorsize]\n"
996		"\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
997		"\t\tbus:target:lun filename\n");
998	exit(1);
999}
1000