scsi_target.c revision 46437
1/*
2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3 *
4 * Copyright (c) 1998, 1999 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *      $Id: scsi_target.c,v 1.9 1999/03/05 23:25:10 gibbs Exp $
29 */
30#include <stddef.h>	/* For offsetof */
31
32#include <sys/param.h>
33#include <sys/queue.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/types.h>
37#include <sys/buf.h>
38#include <sys/conf.h>
39#include <sys/devicestat.h>
40#include <sys/malloc.h>
41#include <sys/poll.h>
42#include <sys/select.h>	/* For struct selinfo. */
43#include <sys/uio.h>
44
45#include <cam/cam.h>
46#include <cam/cam_ccb.h>
47#include <cam/cam_extend.h>
48#include <cam/cam_periph.h>
49#include <cam/cam_queue.h>
50#include <cam/cam_xpt_periph.h>
51#include <cam/cam_debug.h>
52
53#include <cam/scsi/scsi_all.h>
54#include <cam/scsi/scsi_pt.h>
55#include <cam/scsi/scsi_targetio.h>
56#include <cam/scsi/scsi_message.h>
57
58typedef enum {
59	TARG_STATE_NORMAL,
60	TARG_STATE_EXCEPTION,
61	TARG_STATE_TEARDOWN
62} targ_state;
63
64typedef enum {
65	TARG_FLAG_NONE		 = 0x00,
66	TARG_FLAG_SEND_EOF	 = 0x01,
67	TARG_FLAG_RECEIVE_EOF	 = 0x02,
68	TARG_FLAG_LUN_ENABLED	 = 0x04
69} targ_flags;
70
71typedef enum {
72	TARG_CCB_WORKQ,
73	TARG_CCB_WAITING
74} targ_ccb_types;
75
76#define MAX_ACCEPT	16
77#define MAX_IMMEDIATE	16
78#define MAX_BUF_SIZE	256	/* Max inquiry/sense/mode page transfer */
79#define MAX_INITIATORS	16	/* XXX More for Fibre-Channel */
80
81#define MIN(a, b) ((a > b) ? b : a)
82
83#define TARG_CONTROL_UNIT 0xffff00ff
84#define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
85
86/* Offsets into our private CCB area for storing accept information */
87#define ccb_type	ppriv_field0
88#define ccb_descr	ppriv_ptr1
89
90/* We stick a pointer to the originating accept TIO in each continue I/O CCB */
91#define ccb_atio	ppriv_ptr1
92
93TAILQ_HEAD(ccb_queue, ccb_hdr);
94
95struct targ_softc {
96	struct		ccb_queue pending_queue;
97	struct		ccb_queue work_queue;
98	struct		ccb_queue snd_ccb_queue;
99	struct		ccb_queue rcv_ccb_queue;
100	struct		ccb_queue unknown_atio_queue;
101	struct		buf_queue_head snd_buf_queue;
102	struct		buf_queue_head rcv_buf_queue;
103	struct		devstat device_stats;
104	struct		selinfo snd_select;
105	struct		selinfo rcv_select;
106	targ_state	state;
107	targ_flags	flags;
108	targ_exception	exceptions;
109	u_int		init_level;
110	u_int		inq_data_len;
111	struct		scsi_inquiry_data *inq_data;
112	struct		ccb_accept_tio *accept_tio_list;
113	struct		ccb_hdr_slist immed_notify_slist;
114	struct		initiator_state istate[MAX_INITIATORS];
115};
116
117struct targ_cmd_desc {
118	struct	  ccb_accept_tio* atio_link;
119	u_int	  data_resid;	/* How much left to transfer */
120	u_int	  data_increment;/* Amount to send before next disconnect */
121	void*	  data;		/* The data. Can be from backing_store or not */
122	void*	  backing_store;/* Backing store allocated for this descriptor*/
123	struct	  buf *bp;	/* Buffer for this transfer */
124	u_int	  max_size;	/* Size of backing_store */
125	u_int32_t timeout;
126	u_int8_t  status;	/* Status to return to initiator */
127};
128
129static	d_open_t	targopen;
130static	d_close_t	targclose;
131static	d_read_t	targread;
132static	d_write_t	targwrite;
133static	d_ioctl_t	targioctl;
134static	d_poll_t	targpoll;
135static	d_strategy_t	targstrategy;
136
137#define TARG_CDEV_MAJOR	65
138static struct cdevsw targ_cdevsw = {
139	/*d_open*/	targopen,
140	/*d_close*/	targclose,
141	/*d_read*/	targread,
142	/*d_write*/	targwrite,
143	/*d_ioctl*/	targioctl,
144	/*d_stop*/	nostop,
145	/*d_reset*/	noreset,
146	/*d_devtotty*/	nodevtotty,
147	/*d_poll*/	targpoll,
148	/*d_mmap*/	nommap,
149	/*d_strategy*/	targstrategy,
150	/*d_name*/	"targ",
151	/*d_spare*/	NULL,
152	/*d_maj*/	-1,
153	/*d_dump*/	nodump,
154	/*d_psize*/	nopsize,
155	/*d_flags*/	0,
156	/*d_maxio*/	0,
157	/*b_maj*/	-1
158};
159
160static int		targsendccb(struct cam_periph *periph, union ccb *ccb,
161				    union ccb *inccb);
162static periph_init_t	targinit;
163static void		targasync(void *callback_arg, u_int32_t code,
164				struct cam_path *path, void *arg);
165static int		targallocinstance(struct ioc_alloc_unit *alloc_unit);
166static int		targfreeinstance(struct ioc_alloc_unit *alloc_unit);
167static cam_status	targenlun(struct cam_periph *periph);
168static cam_status	targdislun(struct cam_periph *periph);
169static periph_ctor_t	targctor;
170static periph_dtor_t	targdtor;
171static void		targrunqueue(struct cam_periph *periph,
172				     struct targ_softc *softc);
173static periph_start_t	targstart;
174static void		targdone(struct cam_periph *periph,
175				 union ccb *done_ccb);
176static void		targfireexception(struct cam_periph *periph,
177					  struct targ_softc *softc);
178static  int		targerror(union ccb *ccb, u_int32_t cam_flags,
179				  u_int32_t sense_flags);
180static struct targ_cmd_desc*	allocdescr(void);
181static void		freedescr(struct targ_cmd_desc *buf);
182static void		fill_sense(struct scsi_sense_data *sense,
183				   u_int error_code, u_int sense_key,
184				   u_int asc, u_int ascq);
185
186static struct periph_driver targdriver =
187{
188	targinit, "targ",
189	TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
190};
191
192DATA_SET(periphdriver_set, targdriver);
193
194static struct extend_array *targperiphs;
195
196static void
197targinit(void)
198{
199	dev_t dev;
200
201	/*
202	 * Create our extend array for storing the devices we attach to.
203	 */
204	targperiphs = cam_extend_new();
205	if (targperiphs == NULL) {
206		printf("targ: Failed to alloc extend array!\n");
207		return;
208	}
209
210	/* If we were successfull, register our devsw */
211	dev = makedev(TARG_CDEV_MAJOR, 0);
212	cdevsw_add(&dev,&targ_cdevsw, NULL);
213}
214
215static void
216targasync(void *callback_arg, u_int32_t code,
217	  struct cam_path *path, void *arg)
218{
219	struct cam_periph *periph;
220
221	periph = (struct cam_periph *)callback_arg;
222	switch (code) {
223	case AC_PATH_DEREGISTERED:
224	{
225		/* XXX Implement */
226		break;
227	}
228	case AC_BUS_RESET:
229	{
230		/* Flush transaction queue */
231	}
232	default:
233		break;
234	}
235}
236
237/* Attempt to enable our lun */
238static cam_status
239targenlun(struct cam_periph *periph)
240{
241	union ccb immed_ccb;
242	struct targ_softc *softc;
243	cam_status status;
244	int i;
245
246	softc = (struct targ_softc *)periph->softc;
247
248	if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
249		return (CAM_REQ_CMP);
250
251	xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
252	immed_ccb.ccb_h.func_code = XPT_EN_LUN;
253
254	/* Don't need support for any vendor specific commands */
255	immed_ccb.cel.grp6_len = 0;
256	immed_ccb.cel.grp7_len = 0;
257	immed_ccb.cel.enable = 1;
258	xpt_action(&immed_ccb);
259	status = immed_ccb.ccb_h.status;
260	if (status != CAM_REQ_CMP) {
261		xpt_print_path(periph->path);
262		printf("targenlun - Enable Lun Rejected for status 0x%x\n",
263		       status);
264		return (status);
265	}
266
267	softc->flags |= TARG_FLAG_LUN_ENABLED;
268
269	/*
270	 * Build up a buffer of accept target I/O
271	 * operations for incoming selections.
272	 */
273	for (i = 0; i < MAX_ACCEPT; i++) {
274		struct ccb_accept_tio *atio;
275
276		atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
277						      M_NOWAIT);
278		if (atio == NULL) {
279			status = CAM_RESRC_UNAVAIL;
280			break;
281		}
282
283		atio->ccb_h.ccb_descr = allocdescr();
284
285		if (atio->ccb_h.ccb_descr == NULL) {
286			free(atio, M_DEVBUF);
287			status = CAM_RESRC_UNAVAIL;
288			break;
289		}
290
291		xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
292		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
293		atio->ccb_h.cbfcnp = targdone;
294		xpt_action((union ccb *)atio);
295		status = atio->ccb_h.status;
296		if (status != CAM_REQ_INPROG) {
297			xpt_print_path(periph->path);
298			printf("Queue of atio failed\n");
299			freedescr(atio->ccb_h.ccb_descr);
300			free(atio, M_DEVBUF);
301			break;
302		}
303		((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
304		    softc->accept_tio_list;
305		softc->accept_tio_list = atio;
306	}
307
308	if (i == 0) {
309		xpt_print_path(periph->path);
310		printf("targenlun - Could not allocate accept tio CCBs: "
311		       "status = 0x%x\n", status);
312		targdislun(periph);
313		return (CAM_REQ_CMP_ERR);
314	}
315
316	/*
317	 * Build up a buffer of immediate notify CCBs
318	 * so the SIM can tell us of asynchronous target mode events.
319	 */
320	for (i = 0; i < MAX_ACCEPT; i++) {
321		struct ccb_immed_notify *inot;
322
323		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
324						        M_NOWAIT);
325
326		if (inot == NULL) {
327			status = CAM_RESRC_UNAVAIL;
328			break;
329		}
330
331		xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
332		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
333		inot->ccb_h.cbfcnp = targdone;
334		xpt_action((union ccb *)inot);
335		status = inot->ccb_h.status;
336		if (status != CAM_REQ_INPROG) {
337			printf("Queue of inot failed\n");
338			free(inot, M_DEVBUF);
339			break;
340		}
341		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
342				  periph_links.sle);
343	}
344
345	if (i == 0) {
346		xpt_print_path(periph->path);
347		printf("targenlun - Could not allocate immediate notify CCBs: "
348		       "status = 0x%x\n", status);
349		targdislun(periph);
350		return (CAM_REQ_CMP_ERR);
351	}
352
353	return (CAM_REQ_CMP);
354}
355
356static cam_status
357targdislun(struct cam_periph *periph)
358{
359	union ccb ccb;
360	struct targ_softc *softc;
361	struct ccb_accept_tio* atio;
362	struct ccb_hdr *ccb_h;
363
364	softc = (struct targ_softc *)periph->softc;
365	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
366		return CAM_REQ_CMP;
367
368	/* XXX Block for Continue I/O completion */
369
370	/* Kill off all ACCECPT and IMMEDIATE CCBs */
371	while ((atio = softc->accept_tio_list) != NULL) {
372
373		softc->accept_tio_list =
374		    ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
375		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
376		ccb.cab.ccb_h.func_code = XPT_ABORT;
377		ccb.cab.abort_ccb = (union ccb *)atio;
378		xpt_action(&ccb);
379	}
380
381	while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
382		SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
383		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
384		ccb.cab.ccb_h.func_code = XPT_ABORT;
385		ccb.cab.abort_ccb = (union ccb *)ccb_h;
386		xpt_action(&ccb);
387	}
388
389	/*
390	 * Dissable this lun.
391	 */
392	xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
393	ccb.cel.ccb_h.func_code = XPT_EN_LUN;
394	ccb.cel.enable = 0;
395	xpt_action(&ccb);
396
397	if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
398		printf("targdislun - Disabling lun on controller failed "
399		       "with status 0x%x\n", ccb.cel.ccb_h.status);
400	else
401		softc->flags &= ~TARG_FLAG_LUN_ENABLED;
402	return (ccb.cel.ccb_h.status);
403}
404
405static cam_status
406targctor(struct cam_periph *periph, void *arg)
407{
408	struct ccb_pathinq *cpi;
409	struct targ_softc *softc;
410	int i;
411
412	cpi = (struct ccb_pathinq *)arg;
413
414	/* Allocate our per-instance private storage */
415	softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
416	if (softc == NULL) {
417		printf("targctor: unable to malloc softc\n");
418		return (CAM_REQ_CMP_ERR);
419	}
420
421	bzero(softc, sizeof(softc));
422	TAILQ_INIT(&softc->pending_queue);
423	TAILQ_INIT(&softc->work_queue);
424	TAILQ_INIT(&softc->snd_ccb_queue);
425	TAILQ_INIT(&softc->rcv_ccb_queue);
426	TAILQ_INIT(&softc->unknown_atio_queue);
427	bufq_init(&softc->snd_buf_queue);
428	bufq_init(&softc->rcv_buf_queue);
429	softc->accept_tio_list = NULL;
430	SLIST_INIT(&softc->immed_notify_slist);
431	softc->state = TARG_STATE_NORMAL;
432	periph->softc = softc;
433	softc->init_level++;
434
435	cam_extend_set(targperiphs, periph->unit_number, periph);
436
437	/*
438	 * We start out life with a UA to indicate power-on/reset.
439	 */
440	for (i = 0; i < MAX_INITIATORS; i++)
441		softc->istate[i].pending_ua = UA_POWER_ON;
442
443	/*
444	 * Allocate an initial inquiry data buffer.  We might allow the
445	 * user to override this later via an ioctl.
446	 */
447	softc->inq_data_len = sizeof(*softc->inq_data);
448	softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
449	if (softc->inq_data == NULL) {
450		printf("targctor - Unable to malloc inquiry data\n");
451		targdtor(periph);
452		return (CAM_RESRC_UNAVAIL);
453	}
454	bzero(softc->inq_data, softc->inq_data_len);
455	softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
456	softc->inq_data->version = 2;
457	softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
458	softc->inq_data->flags =
459	    cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32);
460	softc->inq_data->additional_length = softc->inq_data_len - 4;
461	strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
462	strncpy(softc->inq_data->product, "TM-PT           ", SID_PRODUCT_SIZE);
463	strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
464	softc->init_level++;
465
466	return (CAM_REQ_CMP);
467}
468
469static void
470targdtor(struct cam_periph *periph)
471{
472	struct targ_softc *softc;
473
474	softc = (struct targ_softc *)periph->softc;
475
476	softc->state = TARG_STATE_TEARDOWN;
477
478	targdislun(periph);
479
480	cam_extend_release(targperiphs, periph->unit_number);
481
482	switch (softc->init_level) {
483	default:
484		/* FALLTHROUGH */
485	case 2:
486		free(softc->inq_data, M_DEVBUF);
487		/* FALLTHROUGH */
488	case 1:
489		free(softc, M_DEVBUF);
490		break;
491	case 0:
492		panic("targdtor - impossible init level");;
493	}
494}
495
496static int
497targopen(dev_t dev, int flags, int fmt, struct proc *p)
498{
499	struct cam_periph *periph;
500	struct	targ_softc *softc;
501	u_int unit;
502	cam_status status;
503	int error;
504	int s;
505
506	unit = minor(dev);
507
508	/* An open of the control device always succeeds */
509	if (TARG_IS_CONTROL_DEV(unit))
510		return 0;
511
512	s = splsoftcam();
513	periph = cam_extend_get(targperiphs, unit);
514	if (periph == NULL) {
515		return (ENXIO);
516        	splx(s);
517	}
518	if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
519		splx(s);
520		return (error);
521	}
522
523	softc = (struct targ_softc *)periph->softc;
524	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
525		if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
526			splx(s);
527			cam_periph_unlock(periph);
528			return(ENXIO);
529		}
530	}
531        splx(s);
532
533	status = targenlun(periph);
534	switch (status) {
535	case CAM_REQ_CMP:
536		error = 0;
537		break;
538	case CAM_RESRC_UNAVAIL:
539		error = ENOMEM;
540		break;
541	case CAM_LUN_ALRDY_ENA:
542		error = EADDRINUSE;
543		break;
544	default:
545		error = ENXIO;
546		break;
547	}
548        cam_periph_unlock(periph);
549	return (error);
550}
551
552static int
553targclose(dev_t dev, int flag, int fmt, struct proc *p)
554{
555	struct	cam_periph *periph;
556	struct	targ_softc *softc;
557	u_int	unit;
558	int	s;
559	int	error;
560
561	unit = minor(dev);
562
563	/* A close of the control device always succeeds */
564	if (TARG_IS_CONTROL_DEV(unit))
565		return 0;
566
567	s = splsoftcam();
568	periph = cam_extend_get(targperiphs, unit);
569	if (periph == NULL) {
570		splx(s);
571		return (ENXIO);
572	}
573	if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
574		return (error);
575	softc = (struct targ_softc *)periph->softc;
576	splx(s);
577
578	targdislun(periph);
579
580	cam_periph_unlock(periph);
581	cam_periph_release(periph);
582
583	return (0);
584}
585
586static int
587targallocinstance(struct ioc_alloc_unit *alloc_unit)
588{
589	struct ccb_pathinq cpi;
590	struct cam_path *path;
591	struct cam_periph *periph;
592	cam_status status;
593	int free_path_on_return;
594	int error;
595
596	free_path_on_return = 0;
597	status = xpt_create_path(&path, /*periph*/NULL,
598				 alloc_unit->path_id,
599				 alloc_unit->target_id,
600				 alloc_unit->lun_id);
601	free_path_on_return++;
602
603	if (status != CAM_REQ_CMP) {
604		printf("Couldn't Allocate Path %x\n", status);
605		goto fail;
606	}
607
608	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
609	cpi.ccb_h.func_code = XPT_PATH_INQ;
610	xpt_action((union ccb *)&cpi);
611	status = cpi.ccb_h.status;
612
613	if (status != CAM_REQ_CMP) {
614		printf("Couldn't CPI %x\n", status);
615		goto fail;
616	}
617
618	/* Can only alloc units on controllers that support target mode */
619	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
620		printf("Controller does not support target mode%x\n", status);
621		status = CAM_PATH_INVALID;
622		goto fail;
623	}
624
625	/* Ensure that we don't already have an instance for this unit. */
626	if ((periph = cam_periph_find(path, "targ")) != NULL) {
627		printf("Lun already enabled%x\n", status);
628		status = CAM_LUN_ALRDY_ENA;
629		goto fail;
630	}
631
632	/*
633	 * Allocate a peripheral instance for
634	 * this target instance.
635	 */
636	status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
637				  "targ", CAM_PERIPH_BIO, path, targasync,
638				  0, &cpi);
639
640fail:
641	switch (status) {
642	case CAM_REQ_CMP:
643	{
644		struct cam_periph *periph;
645
646		if ((periph = cam_periph_find(path, "targ")) == NULL)
647			panic("targallocinstance: Succeeded but no periph?");
648		error = 0;
649		alloc_unit->unit = periph->unit_number;
650		break;
651	}
652	case CAM_RESRC_UNAVAIL:
653		error = ENOMEM;
654		break;
655	case CAM_LUN_ALRDY_ENA:
656		error = EADDRINUSE;
657		break;
658	default:
659		printf("targallocinstance: Unexpected CAM status %x\n", status);
660		/* FALLTHROUGH */
661	case CAM_PATH_INVALID:
662		error = ENXIO;
663		break;
664	case CAM_PROVIDE_FAIL:
665		error = ENODEV;
666		break;
667	}
668
669	if (free_path_on_return != 0)
670		xpt_free_path(path);
671
672	return (error);
673}
674
675static int
676targfreeinstance(struct ioc_alloc_unit *alloc_unit)
677{
678	struct cam_path *path;
679	struct cam_periph *periph;
680	struct targ_softc *softc;
681	cam_status status;
682	int free_path_on_return;
683	int error;
684
685	periph = NULL;
686	free_path_on_return = 0;
687	status = xpt_create_path(&path, /*periph*/NULL,
688				 alloc_unit->path_id,
689				 alloc_unit->target_id,
690				 alloc_unit->lun_id);
691	free_path_on_return++;
692
693	if (status != CAM_REQ_CMP)
694		goto fail;
695
696	/* Find our instance. */
697	if ((periph = cam_periph_find(path, "targ")) == NULL) {
698		xpt_print_path(path);
699		status = CAM_PATH_INVALID;
700		goto fail;
701	}
702
703        softc = (struct targ_softc *)periph->softc;
704
705        if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
706		status = CAM_BUSY;
707		goto fail;
708	}
709
710fail:
711	if (free_path_on_return != 0)
712		xpt_free_path(path);
713
714	switch (status) {
715	case CAM_REQ_CMP:
716		if (periph != NULL)
717			cam_periph_invalidate(periph);
718		error = 0;
719		break;
720	case CAM_RESRC_UNAVAIL:
721		error = ENOMEM;
722		break;
723	case CAM_LUN_ALRDY_ENA:
724		error = EADDRINUSE;
725		break;
726	default:
727		printf("targfreeinstance: Unexpected CAM status %x\n", status);
728		/* FALLTHROUGH */
729	case CAM_PATH_INVALID:
730		error = ENODEV;
731		break;
732	}
733	return (error);
734}
735
736static int
737targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
738{
739	struct cam_periph *periph;
740	struct targ_softc *softc;
741	u_int  unit;
742	int    error;
743
744	unit = minor(dev);
745	error = 0;
746	if (TARG_IS_CONTROL_DEV(unit)) {
747		switch (cmd) {
748		case TARGCTLIOALLOCUNIT:
749			error = targallocinstance((struct ioc_alloc_unit*)addr);
750			break;
751		case TARGCTLIOFREEUNIT:
752			error = targfreeinstance((struct ioc_alloc_unit*)addr);
753			break;
754		default:
755			error = EINVAL;
756			break;
757		}
758		return (error);
759	}
760
761	periph = cam_extend_get(targperiphs, unit);
762	if (periph == NULL)
763		return (ENXIO);
764	softc = (struct targ_softc *)periph->softc;
765	switch (cmd) {
766	case TARGIOCFETCHEXCEPTION:
767		*((targ_exception *)addr) = softc->exceptions;
768		break;
769	case TARGIOCCLEAREXCEPTION:
770	{
771		targ_exception clear_mask;
772
773		clear_mask = *((targ_exception *)addr);
774		if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
775			struct ccb_hdr *ccbh;
776
777			ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
778			if (ccbh != NULL) {
779				TAILQ_REMOVE(&softc->unknown_atio_queue,
780					     ccbh, periph_links.tqe);
781				ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
782			}
783			if (ccbh != NULL)
784				clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
785		}
786		softc->exceptions &= ~clear_mask;
787		if (softc->exceptions == TARG_EXCEPT_NONE
788		 && softc->state == TARG_STATE_EXCEPTION) {
789			softc->state = TARG_STATE_NORMAL;
790			targrunqueue(periph, softc);
791		}
792		break;
793	}
794	case TARGIOCFETCHATIO:
795	{
796		struct ccb_hdr *ccbh;
797
798		ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
799		if (ccbh != NULL) {
800			bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
801		} else {
802			error = ENOENT;
803		}
804		break;
805	}
806	case TARGIOCCOMMAND:
807	{
808		union ccb *inccb;
809		union ccb *ccb;
810
811		/*
812		 * XXX JGibbs
813		 * This code is lifted directly from the pass-thru driver.
814		 * Perhaps this should be moved to a library????
815		 */
816		inccb = (union ccb *)addr;
817		ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
818
819		error = targsendccb(periph, ccb, inccb);
820
821		xpt_release_ccb(ccb);
822
823		break;
824	}
825	case TARGIOCGETISTATE:
826	case TARGIOCSETISTATE:
827	{
828		struct ioc_initiator_state *ioc_istate;
829
830		ioc_istate = (struct ioc_initiator_state *)addr;
831		if (ioc_istate->initiator_id > MAX_INITIATORS) {
832			error = EINVAL;
833			break;
834		}
835		CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
836			  ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
837		if (cmd == TARGIOCGETISTATE) {
838			bcopy(&softc->istate[ioc_istate->initiator_id],
839			      &ioc_istate->istate, sizeof(ioc_istate->istate));
840		} else {
841			bcopy(&ioc_istate->istate,
842			      &softc->istate[ioc_istate->initiator_id],
843			      sizeof(ioc_istate->istate));
844		CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
845			  ("pending_ca now %x\n",
846			   softc->istate[ioc_istate->initiator_id].pending_ca));
847		}
848		break;
849	}
850	default:
851		error = ENOTTY;
852		break;
853	}
854	return (error);
855}
856
857/*
858 * XXX JGibbs lifted from pass-thru driver.
859 * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
860 * should be the CCB that is copied in from the user.
861 */
862static int
863targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
864{
865	struct targ_softc *softc;
866	struct cam_periph_map_info mapinfo;
867	int error, need_unmap;
868
869	softc = (struct targ_softc *)periph->softc;
870
871	need_unmap = 0;
872
873	/*
874	 * There are some fields in the CCB header that need to be
875	 * preserved, the rest we get from the user.
876	 */
877	xpt_merge_ccb(ccb, inccb);
878
879	/*
880	 * There's no way for the user to have a completion
881	 * function, so we put our own completion function in here.
882	 */
883	ccb->ccb_h.cbfcnp = targdone;
884
885	/*
886	 * We only attempt to map the user memory into kernel space
887	 * if they haven't passed in a physical memory pointer,
888	 * and if there is actually an I/O operation to perform.
889	 * Right now cam_periph_mapmem() only supports SCSI and device
890	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
891	 * there's actually data to map.  cam_periph_mapmem() will do the
892	 * right thing, even if there isn't data to map, but since CCBs
893	 * without data are a reasonably common occurance (e.g. test unit
894	 * ready), it will save a few cycles if we check for it here.
895	 */
896	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
897	 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
898	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
899	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
900
901		bzero(&mapinfo, sizeof(mapinfo));
902
903		error = cam_periph_mapmem(ccb, &mapinfo);
904
905		/*
906		 * cam_periph_mapmem returned an error, we can't continue.
907		 * Return the error to the user.
908		 */
909		if (error)
910			return(error);
911
912		/*
913		 * We successfully mapped the memory in, so we need to
914		 * unmap it when the transaction is done.
915		 */
916		need_unmap = 1;
917	}
918
919	/*
920	 * If the user wants us to perform any error recovery, then honor
921	 * that request.  Otherwise, it's up to the user to perform any
922	 * error recovery.
923	 */
924	error = cam_periph_runccb(ccb,
925				  (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ?
926				  targerror : NULL,
927				  /* cam_flags */ 0,
928				  /* sense_flags */SF_RETRY_UA,
929				  &softc->device_stats);
930
931	if (need_unmap != 0)
932		cam_periph_unmapmem(ccb, &mapinfo);
933
934	ccb->ccb_h.cbfcnp = NULL;
935	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
936	bcopy(ccb, inccb, sizeof(union ccb));
937
938	return(error);
939}
940
941
942static int
943targpoll(dev_t dev, int poll_events, struct proc *p)
944{
945	struct cam_periph *periph;
946	struct targ_softc *softc;
947	u_int  unit;
948	int    revents;
949	int    s;
950
951	unit = minor(dev);
952
953	/* ioctl is the only supported operation of the control device */
954	if (TARG_IS_CONTROL_DEV(unit))
955		return EINVAL;
956
957	periph = cam_extend_get(targperiphs, unit);
958	if (periph == NULL)
959		return (ENXIO);
960	softc = (struct targ_softc *)periph->softc;
961
962	revents = 0;
963	s = splcam();
964	if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
965		if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
966		 && bufq_first(&softc->rcv_buf_queue) == NULL)
967			revents |= poll_events & (POLLOUT | POLLWRNORM);
968	}
969	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
970		if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
971		 && bufq_first(&softc->snd_buf_queue) == NULL)
972			revents |= poll_events & (POLLIN | POLLRDNORM);
973	}
974
975	if (softc->state != TARG_STATE_NORMAL)
976		revents |= POLLERR;
977
978	if (revents == 0) {
979		if (poll_events & (POLLOUT | POLLWRNORM))
980			selrecord(p, &softc->rcv_select);
981		if (poll_events & (POLLIN | POLLRDNORM))
982			selrecord(p, &softc->snd_select);
983	}
984	splx(s);
985	return (revents);
986}
987
988static int
989targread(dev_t dev, struct uio *uio, int ioflag)
990{
991	u_int  unit;
992
993	unit = minor(dev);
994	/* ioctl is the only supported operation of the control device */
995	if (TARG_IS_CONTROL_DEV(unit))
996		return EINVAL;
997
998	if (uio->uio_iovcnt == 0
999	 || uio->uio_iov->iov_len == 0) {
1000		/* EOF */
1001		struct cam_periph *periph;
1002		struct targ_softc *softc;
1003		int    s;
1004
1005		s = splcam();
1006		periph = cam_extend_get(targperiphs, unit);
1007		if (periph == NULL)
1008			return (ENXIO);
1009		softc = (struct targ_softc *)periph->softc;
1010		softc->flags |= TARG_FLAG_SEND_EOF;
1011		splx(s);
1012		targrunqueue(periph, softc);
1013		return (0);
1014	}
1015	return(physio(targstrategy, NULL, dev, 1, minphys, uio));
1016}
1017
1018static int
1019targwrite(dev_t dev, struct uio *uio, int ioflag)
1020{
1021	u_int  unit;
1022
1023	unit = minor(dev);
1024	/* ioctl is the only supported operation of the control device */
1025	if (TARG_IS_CONTROL_DEV(unit))
1026		return EINVAL;
1027
1028	if (uio->uio_iovcnt == 0
1029	 || uio->uio_iov->iov_len == 0) {
1030		/* EOF */
1031		struct cam_periph *periph;
1032		struct targ_softc *softc;
1033		int    s;
1034
1035		s = splcam();
1036		periph = cam_extend_get(targperiphs, unit);
1037		if (periph == NULL)
1038			return (ENXIO);
1039		softc = (struct targ_softc *)periph->softc;
1040		softc->flags |= TARG_FLAG_RECEIVE_EOF;
1041		splx(s);
1042		targrunqueue(periph, softc);
1043		return (0);
1044	}
1045	return(physio(targstrategy, NULL, dev, 0, minphys, uio));
1046}
1047
1048/*
1049 * Actually translate the requested transfer into one the physical driver
1050 * can understand.  The transfer is described by a buf and will include
1051 * only one physical transfer.
1052 */
1053static void
1054targstrategy(struct buf *bp)
1055{
1056	struct cam_periph *periph;
1057	struct targ_softc *softc;
1058	u_int  unit;
1059	int    s;
1060
1061	unit = minor(bp->b_dev);
1062
1063	/* ioctl is the only supported operation of the control device */
1064	if (TARG_IS_CONTROL_DEV(unit)) {
1065		bp->b_error = EINVAL;
1066		goto bad;
1067	}
1068
1069	periph = cam_extend_get(targperiphs, unit);
1070	if (periph == NULL) {
1071		bp->b_error = ENXIO;
1072		goto bad;
1073	}
1074	softc = (struct targ_softc *)periph->softc;
1075
1076	/*
1077	 * Mask interrupts so that the device cannot be invalidated until
1078	 * after we are in the queue.  Otherwise, we might not properly
1079	 * clean up one of the buffers.
1080	 */
1081	s = splbio();
1082
1083	/*
1084	 * If there is an exception pending, error out
1085	 */
1086	if (softc->state != TARG_STATE_NORMAL) {
1087		splx(s);
1088		if (softc->state == TARG_STATE_EXCEPTION
1089		 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1090			bp->b_error = EBUSY;
1091		else
1092			bp->b_error = ENXIO;
1093		goto bad;
1094	}
1095
1096	/*
1097	 * Place it in the queue of buffers available for either
1098	 * SEND or RECEIVE commands.
1099	 *
1100	 */
1101	bp->b_resid = bp->b_bcount;
1102	if ((bp->b_flags & B_READ) != 0) {
1103		CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1104			  ("Queued a SEND buffer\n"));
1105		bufq_insert_tail(&softc->snd_buf_queue, bp);
1106	} else {
1107		CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1108			  ("Queued a RECEIVE buffer\n"));
1109		bufq_insert_tail(&softc->rcv_buf_queue, bp);
1110	}
1111
1112	splx(s);
1113
1114	/*
1115	 * Attempt to use the new buffer to service any pending
1116	 * target commands.
1117	 */
1118	targrunqueue(periph, softc);
1119
1120	return;
1121bad:
1122	bp->b_flags |= B_ERROR;
1123
1124	/*
1125	 * Correctly set the buf to indicate a completed xfer
1126	 */
1127	bp->b_resid = bp->b_bcount;
1128	biodone(bp);
1129}
1130
1131static void
1132targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1133{
1134	struct  ccb_queue *pending_queue;
1135	struct	ccb_accept_tio *atio;
1136	struct	buf_queue_head *bufq;
1137	struct	buf *bp;
1138	struct	targ_cmd_desc *desc;
1139	struct	ccb_hdr *ccbh;
1140	int	s;
1141
1142	s = splbio();
1143	pending_queue = NULL;
1144	bufq = NULL;
1145	ccbh = NULL;
1146	/* Only run one request at a time to maintain data ordering. */
1147	if (softc->state != TARG_STATE_NORMAL
1148	 || TAILQ_FIRST(&softc->work_queue) != NULL
1149	 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1150		splx(s);
1151		return;
1152	}
1153
1154	if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL
1155	  || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1156	 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1157
1158		if (bp == NULL)
1159			softc->flags &= ~TARG_FLAG_SEND_EOF;
1160		else {
1161			CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1162				  ("De-Queued a SEND buffer %ld\n",
1163				   bp->b_bcount));
1164		}
1165		bufq = &softc->snd_buf_queue;
1166		pending_queue = &softc->snd_ccb_queue;
1167	} else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL
1168	  	 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1169		&& (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1170
1171		if (bp == NULL)
1172			softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1173		else {
1174			CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1175				  ("De-Queued a RECEIVE buffer %ld\n",
1176				   bp->b_bcount));
1177		}
1178		bufq = &softc->rcv_buf_queue;
1179		pending_queue = &softc->rcv_ccb_queue;
1180	}
1181
1182	if (pending_queue != NULL) {
1183		/* Process a request */
1184		atio = (struct ccb_accept_tio *)ccbh;
1185		TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1186		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1187		desc->bp = bp;
1188		if (bp == NULL) {
1189			/* EOF */
1190			desc->data = NULL;
1191			desc->data_increment = 0;
1192			desc->data_resid = 0;
1193			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1194			atio->ccb_h.flags |= CAM_DIR_NONE;
1195		} else {
1196			bufq_remove(bufq, bp);
1197			desc->data = &bp->b_data[bp->b_bcount - bp->b_resid];
1198			desc->data_increment =
1199			    MIN(desc->data_resid, bp->b_resid);
1200		}
1201		CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1202			  ("Buffer command: data %x: datacnt %d\n",
1203			   (intptr_t)desc->data, desc->data_increment));
1204		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1205				  periph_links.tqe);
1206	}
1207	if (TAILQ_FIRST(&softc->work_queue) != NULL) {
1208		splx(s);
1209		xpt_schedule(periph, /*XXX priority*/1);
1210	} else
1211		splx(s);
1212}
1213
1214static void
1215targstart(struct cam_periph *periph, union ccb *start_ccb)
1216{
1217	struct targ_softc *softc;
1218	struct ccb_hdr *ccbh;
1219	struct ccb_accept_tio *atio;
1220	struct targ_cmd_desc *desc;
1221	struct ccb_scsiio *csio;
1222	ccb_flags flags;
1223	int    s;
1224
1225	softc = (struct targ_softc *)periph->softc;
1226
1227	s = splbio();
1228	ccbh = TAILQ_FIRST(&softc->work_queue);
1229	if (periph->immediate_priority <= periph->pinfo.priority) {
1230		start_ccb->ccb_h.ccb_type = TARG_CCB_WAITING;
1231		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1232				  periph_links.sle);
1233		periph->immediate_priority = CAM_PRIORITY_NONE;
1234		splx(s);
1235		wakeup(&periph->ccb_list);
1236	} else if (ccbh == NULL) {
1237		splx(s);
1238		xpt_release_ccb(start_ccb);
1239	} else {
1240		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1241		TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh,
1242				  periph_links.tqe);
1243		splx(s);
1244		atio = (struct ccb_accept_tio*)ccbh;
1245		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1246
1247		/* Is this a tagged request? */
1248		flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1249
1250		/*
1251		 * If we are done with the transaction, tell the
1252		 * controller to send status and perform a CMD_CMPLT.
1253		 */
1254		if (desc->data_resid == desc->data_increment)
1255			flags |= CAM_SEND_STATUS;
1256
1257		csio = &start_ccb->csio;
1258		cam_fill_ctio(csio,
1259			      /*retries*/2,
1260			      targdone,
1261			      flags,
1262			      /*tag_action*/MSG_SIMPLE_Q_TAG,
1263			      atio->tag_id,
1264			      atio->init_id,
1265			      desc->status,
1266			      /*data_ptr*/desc->data_increment == 0
1267					  ? NULL : desc->data,
1268			      /*dxfer_len*/desc->data_increment,
1269			      /*timeout*/desc->timeout);
1270
1271		start_ccb->ccb_h.ccb_type = TARG_CCB_WORKQ;
1272		start_ccb->ccb_h.ccb_atio = atio;
1273		CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1274			  ("Sending a CTIO\n"));
1275		xpt_action(start_ccb);
1276		s = splbio();
1277		ccbh = TAILQ_FIRST(&softc->work_queue);
1278		splx(s);
1279	}
1280	if (ccbh != NULL)
1281		targrunqueue(periph, softc);
1282}
1283
1284static void
1285targdone(struct cam_periph *periph, union ccb *done_ccb)
1286{
1287	struct targ_softc *softc;
1288
1289	softc = (struct targ_softc *)periph->softc;
1290
1291	if (done_ccb->ccb_h.ccb_type == TARG_CCB_WAITING) {
1292		/* Caller will release the CCB */
1293		wakeup(&done_ccb->ccb_h.cbfcnp);
1294		return;
1295	}
1296
1297	switch (done_ccb->ccb_h.func_code) {
1298	case XPT_ACCEPT_TARGET_IO:
1299	{
1300		struct ccb_accept_tio *atio;
1301		struct targ_cmd_desc *descr;
1302		struct initiator_state *istate;
1303		u_int8_t *cdb;
1304
1305		atio = &done_ccb->atio;
1306		descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1307		istate = &softc->istate[atio->init_id];
1308		cdb = atio->cdb_io.cdb_bytes;
1309		if (softc->state == TARG_STATE_TEARDOWN
1310		 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1311			freedescr(descr);
1312			free(done_ccb, M_DEVBUF);
1313			return;
1314		}
1315
1316		if (istate->pending_ca == 0
1317		 && istate->pending_ua != 0
1318		 && cdb[0] != INQUIRY) {
1319			/* Pending UA, tell initiator */
1320			/* Direction is always relative to the initator */
1321			istate->pending_ca = CA_UNIT_ATTN;
1322			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1323			atio->ccb_h.flags |= CAM_DIR_NONE;
1324			descr->data_resid = 0;
1325			descr->data_increment = 0;
1326			descr->timeout = 5 * 1000;
1327			descr->status = SCSI_STATUS_CHECK_COND;
1328		} else {
1329			/*
1330			 * Save the current CA and UA status so
1331			 * they can be used by this command.
1332			 */
1333			ua_types pending_ua;
1334			ca_types pending_ca;
1335
1336			pending_ua = istate->pending_ua;
1337			pending_ca = istate->pending_ca;
1338
1339			/*
1340			 * As per the SCSI2 spec, any command that occurs
1341			 * after a CA is reported, clears the CA.  If the
1342			 * command is not an inquiry, we are also supposed
1343			 * to clear the UA condition, if any, that caused
1344			 * the CA to occur assuming the UA is not a
1345			 * persistant state.
1346			 */
1347			istate->pending_ca = CA_NONE;
1348			if ((pending_ca
1349			   & (CA_CMD_SENSE|CA_UNIT_ATTN)) == CA_UNIT_ATTN
1350			 && cdb[0] != INQUIRY)
1351				istate->pending_ua = UA_NONE;
1352
1353			/*
1354			 * Determine the type of incoming command and
1355			 * setup our buffer for a response.
1356			 */
1357			switch (cdb[0]) {
1358			case INQUIRY:
1359			{
1360				struct scsi_inquiry *inq;
1361				struct scsi_sense_data *sense;
1362
1363				inq = (struct scsi_inquiry *)cdb;
1364				sense = &istate->sense_data;
1365				CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1366					  ("Saw an inquiry!\n"));
1367				/*
1368				 * Validate the command.  We don't
1369				 * support any VPD pages, so complain
1370				 * if EVPD is set.
1371				 */
1372				if ((inq->byte2 & SI_EVPD) != 0
1373				 || inq->page_code != 0) {
1374					istate->pending_ca = CA_CMD_SENSE;
1375					atio->ccb_h.flags &= ~CAM_DIR_MASK;
1376					atio->ccb_h.flags |= CAM_DIR_NONE;
1377					descr->data_resid = 0;
1378					descr->data_increment = 0;
1379					descr->status = SCSI_STATUS_CHECK_COND;
1380					fill_sense(sense,
1381						   SSD_CURRENT_ERROR,
1382						   SSD_KEY_ILLEGAL_REQUEST,
1383						   /*asc*/0x24, /*ascq*/0x00);
1384					sense->extra_len =
1385						offsetof(struct scsi_sense_data,
1386							 extra_bytes)
1387					      - offsetof(struct scsi_sense_data,
1388							 extra_len);
1389				}
1390
1391				if ((inq->byte2 & SI_EVPD) != 0) {
1392					sense->sense_key_spec[0] =
1393					    SSD_SCS_VALID|SSD_FIELDPTR_CMD
1394					   |SSD_BITPTR_VALID| /*bit value*/1;
1395					sense->sense_key_spec[1] = 0;
1396					sense->sense_key_spec[2] =
1397					    offsetof(struct scsi_inquiry,
1398						     byte2);
1399					break;
1400				} else if (inq->page_code != 0) {
1401					sense->sense_key_spec[0] =
1402					    SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1403					sense->sense_key_spec[1] = 0;
1404					sense->sense_key_spec[2] =
1405					    offsetof(struct scsi_inquiry,
1406						     page_code);
1407					break;
1408				}
1409				/*
1410				 * Direction is always relative
1411				 * to the initator.
1412				 */
1413				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1414				atio->ccb_h.flags |= CAM_DIR_IN;
1415				descr->data = softc->inq_data;
1416				descr->data_resid = MIN(softc->inq_data_len,
1417						       inq->length);
1418				descr->data_increment = descr->data_resid;
1419				descr->timeout = 5 * 1000;
1420				descr->status = SCSI_STATUS_OK;
1421				break;
1422			}
1423			case TEST_UNIT_READY:
1424				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1425				atio->ccb_h.flags |= CAM_DIR_NONE;
1426				descr->data_resid = 0;
1427				descr->data_increment = 0;
1428				descr->timeout = 5 * 1000;
1429				descr->status = SCSI_STATUS_OK;
1430				break;
1431			case REQUEST_SENSE:
1432			{
1433				struct scsi_request_sense *rsense;
1434				struct scsi_sense_data *sense;
1435
1436				rsense = (struct scsi_request_sense *)cdb;
1437				sense = &istate->sense_data;
1438				if (pending_ca == 0) {
1439					fill_sense(sense, SSD_CURRENT_ERROR,
1440						   SSD_KEY_NO_SENSE, 0x00,
1441						   0x00);
1442					CAM_DEBUG(periph->path,
1443						  CAM_DEBUG_SUBTRACE,
1444						  ("No pending CA!\n"));
1445				} else if (pending_ca == CA_UNIT_ATTN) {
1446					u_int ascq;
1447
1448					if (pending_ua == UA_POWER_ON)
1449						ascq = 0x1;
1450					else
1451						ascq = 0x2;
1452					fill_sense(sense, SSD_CURRENT_ERROR,
1453						   SSD_KEY_UNIT_ATTENTION,
1454						   0x29, ascq);
1455					CAM_DEBUG(periph->path,
1456						  CAM_DEBUG_SUBTRACE,
1457						  ("Pending UA!\n"));
1458				}
1459				/*
1460				 * Direction is always relative
1461				 * to the initator.
1462				 */
1463				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1464				atio->ccb_h.flags |= CAM_DIR_IN;
1465				descr->data = sense;
1466				descr->data_resid =
1467			 		offsetof(struct scsi_sense_data,
1468						 extra_len)
1469				      + sense->extra_len;
1470				descr->data_resid = MIN(descr->data_resid,
1471						       rsense->length);
1472				descr->data_increment = descr->data_resid;
1473				descr->timeout = 5 * 1000;
1474				descr->status = SCSI_STATUS_OK;
1475				break;
1476			}
1477			case RECEIVE:
1478			case SEND:
1479			{
1480				struct scsi_send_receive *sr;
1481
1482				sr = (struct scsi_send_receive *)cdb;
1483
1484				/*
1485				 * Direction is always relative
1486				 * to the initator.
1487				 */
1488				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1489				descr->data_resid = scsi_3btoul(sr->xfer_len);
1490				descr->timeout = 5 * 1000;
1491				descr->status = SCSI_STATUS_OK;
1492				if (cdb[0] == SEND) {
1493					atio->ccb_h.flags |= CAM_DIR_OUT;
1494					CAM_DEBUG(periph->path,
1495						  CAM_DEBUG_SUBTRACE,
1496						  ("Saw a SEND!\n"));
1497					atio->ccb_h.flags |= CAM_DIR_OUT;
1498					TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1499							  &atio->ccb_h,
1500							  periph_links.tqe);
1501					selwakeup(&softc->snd_select);
1502				} else {
1503					atio->ccb_h.flags |= CAM_DIR_IN;
1504					CAM_DEBUG(periph->path,
1505						  CAM_DEBUG_SUBTRACE,
1506						  ("Saw a RECEIVE!\n"));
1507					TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1508							  &atio->ccb_h,
1509							  periph_links.tqe);
1510					selwakeup(&softc->rcv_select);
1511				}
1512				/*
1513				 * Attempt to satisfy this request with
1514				 * a user buffer.
1515				 */
1516				targrunqueue(periph, softc);
1517				return;
1518			}
1519			default:
1520				/*
1521				 * Queue for consumption by our userland
1522				 * counterpart and  transition to the exception
1523				 * state.
1524				 */
1525				TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1526						  &atio->ccb_h,
1527						  periph_links.tqe);
1528				softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1529				targfireexception(periph, softc);
1530				return;
1531			}
1532		}
1533
1534		/* Queue us up to receive a Continue Target I/O ccb. */
1535		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1536				  periph_links.tqe);
1537		xpt_schedule(periph, /*priority*/1);
1538		break;
1539	}
1540	case XPT_CONT_TARGET_IO:
1541	{
1542		struct ccb_accept_tio *atio;
1543		struct targ_cmd_desc *desc;
1544		struct buf *bp;
1545
1546		CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1547			  ("Received completed CTIO\n"));
1548		atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1549		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1550
1551		TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h,
1552			     periph_links.tqe);
1553
1554		/* XXX Check for errors */
1555		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1556
1557		}
1558		desc->data_resid -= desc->data_increment;
1559		if ((bp = desc->bp) != NULL) {
1560
1561			bp->b_resid -= desc->data_increment;
1562			bp->b_error = 0;
1563
1564			CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1565				  ("Buffer I/O Completed - Resid %ld:%d\n",
1566				   bp->b_resid, desc->data_resid));
1567			/*
1568			 * Send the buffer back to the client if
1569			 * either the command has completed or all
1570			 * buffer space has been consumed.
1571			 */
1572			if (desc->data_resid == 0
1573			 || bp->b_resid == 0) {
1574				if (bp->b_resid != 0)
1575					/* Short transfer */
1576					bp->b_flags |= B_ERROR;
1577
1578				CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1579					  ("Completing a buffer\n"));
1580				biodone(bp);
1581				desc->bp = NULL;
1582			}
1583		}
1584
1585		xpt_release_ccb(done_ccb);
1586		if (softc->state != TARG_STATE_TEARDOWN) {
1587
1588			if (desc->data_resid == 0) {
1589				/*
1590				 * Send the original accept TIO back to the
1591				 * controller to handle more work.
1592				 */
1593				CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1594					  ("Returning ATIO to target\n"));
1595				xpt_action((union ccb *)atio);
1596				break;
1597			}
1598
1599			/* Queue us up for another buffer */
1600			if (atio->cdb_io.cdb_bytes[0] == SEND) {
1601				if (desc->bp != NULL)
1602					TAILQ_INSERT_HEAD(
1603						&softc->snd_buf_queue.queue,
1604						bp, b_act);
1605				TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1606						  &atio->ccb_h,
1607						  periph_links.tqe);
1608			} else {
1609				if (desc->bp != NULL)
1610					TAILQ_INSERT_HEAD(
1611						&softc->rcv_buf_queue.queue,
1612						bp, b_act);
1613				TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1614						  &atio->ccb_h,
1615						  periph_links.tqe);
1616			}
1617			desc->bp = NULL;
1618			targrunqueue(periph, softc);
1619		} else {
1620			if (desc->bp != NULL) {
1621				bp->b_flags |= B_ERROR;
1622				bp->b_error = ENXIO;
1623				biodone(bp);
1624			}
1625			freedescr(desc);
1626			free(atio, M_DEVBUF);
1627		}
1628		break;
1629	}
1630	case XPT_IMMED_NOTIFY:
1631	{
1632		if (softc->state == TARG_STATE_TEARDOWN
1633		 || done_ccb->ccb_h.status == CAM_REQ_ABORTED)
1634			free(done_ccb, M_DEVBUF);
1635		break;
1636	}
1637	default:
1638		panic("targdone: Impossible xpt opcode %x encountered.",
1639		      done_ccb->ccb_h.func_code);
1640		/* NOTREACHED */
1641		break;
1642	}
1643}
1644
1645/*
1646 * Transition to the exception state and notify our symbiotic
1647 * userland process of the change.
1648 */
1649static void
1650targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1651{
1652	/*
1653	 * return all pending buffers with short read/write status so our
1654	 * process unblocks, and do a selwakeup on any process queued
1655	 * waiting for reads or writes.  When the selwakeup is performed,
1656	 * the waking process will wakeup, call our poll routine again,
1657	 * and pick up the exception.
1658	 */
1659	struct buf *bp;
1660
1661	if (softc->state != TARG_STATE_NORMAL)
1662		/* Already either tearing down or in exception state */
1663		return;
1664
1665	softc->state = TARG_STATE_EXCEPTION;
1666
1667	while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) {
1668		bufq_remove(&softc->snd_buf_queue, bp);
1669		bp->b_flags |= B_ERROR;
1670		biodone(bp);
1671	}
1672
1673	while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) {
1674		bufq_remove(&softc->snd_buf_queue, bp);
1675		bp->b_flags |= B_ERROR;
1676		biodone(bp);
1677	}
1678
1679	selwakeup(&softc->snd_select);
1680	selwakeup(&softc->rcv_select);
1681}
1682
1683static int
1684targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1685{
1686	return 0;
1687}
1688
1689static struct targ_cmd_desc*
1690allocdescr()
1691{
1692	struct targ_cmd_desc* descr;
1693
1694	/* Allocate the targ_descr structure */
1695	descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
1696					       M_DEVBUF, M_NOWAIT);
1697	if (descr == NULL)
1698		return (NULL);
1699
1700	bzero(descr, sizeof(*descr));
1701
1702	/* Allocate buffer backing store */
1703	descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
1704	if (descr->backing_store == NULL) {
1705		free(descr, M_DEVBUF);
1706		return (NULL);
1707	}
1708	descr->max_size = MAX_BUF_SIZE;
1709	return (descr);
1710}
1711
1712static void
1713freedescr(struct targ_cmd_desc *descr)
1714{
1715	free(descr->backing_store, M_DEVBUF);
1716	free(descr, M_DEVBUF);
1717}
1718
1719static void
1720fill_sense(struct scsi_sense_data *sense, u_int error_code, u_int sense_key,
1721	   u_int asc, u_int ascq)
1722{
1723	bzero(sense, sizeof(*sense));
1724	sense->error_code = error_code;
1725	sense->flags = sense_key;
1726	sense->add_sense_code = asc;
1727	sense->add_sense_code_qual = ascq;
1728
1729	sense->extra_len = offsetof(struct scsi_sense_data, fru)
1730			 - offsetof(struct scsi_sense_data, extra_len);
1731}
1732