scsi_target.c revision 63190
1/*
2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM.
3 *
4 * Copyright (c) 1998, 1999 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: head/sys/cam/scsi/scsi_target.c 63190 2000-07-14 21:09:25Z mjacob $
29 */
30#include <stddef.h>	/* For offsetof */
31
32#include <sys/param.h>
33#include <sys/queue.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/types.h>
37#include <sys/bio.h>
38#include <sys/conf.h>
39#include <sys/devicestat.h>
40#include <sys/malloc.h>
41#include <sys/poll.h>
42#include <sys/select.h>	/* For struct selinfo. */
43#include <sys/uio.h>
44
45#include <cam/cam.h>
46#include <cam/cam_ccb.h>
47#include <cam/cam_extend.h>
48#include <cam/cam_periph.h>
49#include <cam/cam_queue.h>
50#include <cam/cam_xpt_periph.h>
51#include <cam/cam_debug.h>
52
53#include <cam/scsi/scsi_all.h>
54#include <cam/scsi/scsi_pt.h>
55#include <cam/scsi/scsi_targetio.h>
56#include <cam/scsi/scsi_message.h>
57
58typedef enum {
59	TARG_STATE_NORMAL,
60	TARG_STATE_EXCEPTION,
61	TARG_STATE_TEARDOWN
62} targ_state;
63
64typedef enum {
65	TARG_FLAG_NONE		 = 0x00,
66	TARG_FLAG_SEND_EOF	 = 0x01,
67	TARG_FLAG_RECEIVE_EOF	 = 0x02,
68	TARG_FLAG_LUN_ENABLED	 = 0x04
69} targ_flags;
70
71typedef enum {
72	TARG_CCB_NONE		= 0x00,
73	TARG_CCB_WAITING	= 0x01,
74	TARG_CCB_HELDQ		= 0x02,
75	TARG_CCB_ABORT_TO_HELDQ = 0x04
76} targ_ccb_flags;
77
78#define MAX_ACCEPT	16
79#define MAX_IMMEDIATE	16
80#define MAX_BUF_SIZE	256	/* Max inquiry/sense/mode page transfer */
81#define MAX_INITIATORS	256	/* includes widest fibre channel for now */
82
83#define MIN(a, b) ((a > b) ? b : a)
84
85#define TARG_CONTROL_UNIT 0xffff00ff
86#define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT)
87
88#define TARG_TAG_WILDCARD ((u_int)~0)
89
90/* Offsets into our private CCB area for storing accept information */
91#define ccb_flags	ppriv_field0
92#define ccb_descr	ppriv_ptr1
93
94/* We stick a pointer to the originating accept TIO in each continue I/O CCB */
95#define ccb_atio	ppriv_ptr1
96
97struct targ_softc {
98	/* CTIOs pending on the controller */
99	struct		ccb_queue pending_queue;
100
101	/* ATIOs awaiting CTIO resources from the XPT */
102	struct		ccb_queue work_queue;
103
104	/*
105	 * ATIOs for SEND operations waiting for 'write'
106	 * buffer resources from our userland daemon.
107	 */
108	struct		ccb_queue snd_ccb_queue;
109
110	/*
111	 * ATIOs for RCV operations waiting for 'read'
112	 * buffer resources from our userland daemon.
113	 */
114	struct		ccb_queue rcv_ccb_queue;
115
116	/*
117	 * ATIOs for commands unknown to the kernel driver.
118	 * These are queued for the userland daemon to
119	 * consume.
120	 */
121	struct		ccb_queue unknown_atio_queue;
122
123	/*
124	 * Userland buffers for SEND commands waiting for
125	 * SEND ATIOs to be queued by an initiator.
126	 */
127	struct		bio_queue_head snd_bio_queue;
128
129	/*
130	 * Userland buffers for RCV commands waiting for
131	 * RCV ATIOs to be queued by an initiator.
132	 */
133	struct		bio_queue_head rcv_bio_queue;
134	struct		devstat device_stats;
135	dev_t		targ_dev;
136	struct		selinfo snd_select;
137	struct		selinfo rcv_select;
138	targ_state	state;
139	targ_flags	flags;
140	targ_exception	exceptions;
141	u_int		init_level;
142	u_int		inq_data_len;
143	struct		scsi_inquiry_data *inq_data;
144	struct		ccb_accept_tio *accept_tio_list;
145	struct		ccb_hdr_slist immed_notify_slist;
146	struct		initiator_state istate[MAX_INITIATORS];
147};
148
149struct targ_cmd_desc {
150	struct	  ccb_accept_tio* atio_link;
151	u_int	  data_resid;	/* How much left to transfer */
152	u_int	  data_increment;/* Amount to send before next disconnect */
153	void*	  data;		/* The data. Can be from backing_store or not */
154	void*	  backing_store;/* Backing store allocated for this descriptor*/
155	struct	  bio *bp;	/* Buffer for this transfer */
156	u_int	  max_size;	/* Size of backing_store */
157	u_int32_t timeout;
158	u_int8_t  status;	/* Status to return to initiator */
159};
160
161static	d_open_t	targopen;
162static	d_close_t	targclose;
163static	d_read_t	targread;
164static	d_write_t	targwrite;
165static	d_ioctl_t	targioctl;
166static	d_poll_t	targpoll;
167static	d_strategy_t	targstrategy;
168
169#define TARG_CDEV_MAJOR	65
170static struct cdevsw targ_cdevsw = {
171	/* open */	targopen,
172	/* close */	targclose,
173	/* read */	targread,
174	/* write */	targwrite,
175	/* ioctl */	targioctl,
176	/* poll */	targpoll,
177	/* mmap */	nommap,
178	/* strategy */	targstrategy,
179	/* name */	"targ",
180	/* maj */	TARG_CDEV_MAJOR,
181	/* dump */	nodump,
182	/* psize */	nopsize,
183	/* flags */	0,
184	/* bmaj */	-1
185};
186
187static int		targsendccb(struct cam_periph *periph, union ccb *ccb,
188				    union ccb *inccb);
189static periph_init_t	targinit;
190static void		targasync(void *callback_arg, u_int32_t code,
191				struct cam_path *path, void *arg);
192static int		targallocinstance(struct ioc_alloc_unit *alloc_unit);
193static int		targfreeinstance(struct ioc_alloc_unit *alloc_unit);
194static cam_status	targenlun(struct cam_periph *periph);
195static cam_status	targdislun(struct cam_periph *periph);
196static periph_ctor_t	targctor;
197static periph_dtor_t	targdtor;
198static void		targrunqueue(struct cam_periph *periph,
199				     struct targ_softc *softc);
200static periph_start_t	targstart;
201static void		targdone(struct cam_periph *periph,
202				 union ccb *done_ccb);
203static void		targfireexception(struct cam_periph *periph,
204					  struct targ_softc *softc);
205static void		targinoterror(struct cam_periph *periph,
206				      struct targ_softc *softc,
207				      struct ccb_immed_notify *inot);
208static  int		targerror(union ccb *ccb, u_int32_t cam_flags,
209				  u_int32_t sense_flags);
210static struct targ_cmd_desc*	allocdescr(void);
211static void		freedescr(struct targ_cmd_desc *buf);
212static void		fill_sense(struct targ_softc *softc,
213				   u_int initiator_id, u_int error_code,
214				   u_int sense_key, u_int asc, u_int ascq);
215static void		copy_sense(struct targ_softc *softc,
216				   struct initiator_state *istate,
217				   u_int8_t *sense_buffer, size_t sense_len);
218static void	set_unit_attention_cond(struct cam_periph *periph,
219					u_int initiator_id, ua_types ua);
220static void	set_ca_condition(struct cam_periph *periph,
221				 u_int initiator_id, ca_types ca);
222static void	abort_pending_transactions(struct cam_periph *periph,
223					   u_int initiator_id, u_int tag_id,
224					   int errno, int to_held_queue);
225
226static struct periph_driver targdriver =
227{
228	targinit, "targ",
229	TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0
230};
231
232DATA_SET(periphdriver_set, targdriver);
233
234static struct extend_array *targperiphs;
235static dev_t targ_ctl_dev;
236
237static void
238targinit(void)
239{
240	/*
241	 * Create our extend array for storing the devices we attach to.
242	 */
243	targperiphs = cam_extend_new();
244	if (targperiphs == NULL) {
245		printf("targ: Failed to alloc extend array!\n");
246		return;
247	}
248	targ_ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT,
249	    GID_OPERATOR, 0600, "%s.ctl", "targ");
250	if (targ_ctl_dev == (dev_t) 0) {
251		printf("targ: failed to create control dev\n");
252	}
253}
254
255static void
256targasync(void *callback_arg, u_int32_t code,
257	  struct cam_path *path, void *arg)
258{
259	struct cam_periph *periph;
260	struct targ_softc *softc;
261
262	periph = (struct cam_periph *)callback_arg;
263	softc = (struct targ_softc *)periph->softc;
264	switch (code) {
265	case AC_PATH_DEREGISTERED:
266	{
267		/* XXX Implement */
268		break;
269	}
270	default:
271		break;
272	}
273}
274
275/* Attempt to enable our lun */
276static cam_status
277targenlun(struct cam_periph *periph)
278{
279	union ccb immed_ccb;
280	struct targ_softc *softc;
281	cam_status status;
282	int i;
283
284	softc = (struct targ_softc *)periph->softc;
285
286	if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0)
287		return (CAM_REQ_CMP);
288
289	xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1);
290	immed_ccb.ccb_h.func_code = XPT_EN_LUN;
291
292	/* Don't need support for any vendor specific commands */
293	immed_ccb.cel.grp6_len = 0;
294	immed_ccb.cel.grp7_len = 0;
295	immed_ccb.cel.enable = 1;
296	xpt_action(&immed_ccb);
297	status = immed_ccb.ccb_h.status;
298	if (status != CAM_REQ_CMP) {
299		xpt_print_path(periph->path);
300		printf("targenlun - Enable Lun Rejected with status 0x%x\n",
301		       status);
302		return (status);
303	}
304
305	softc->flags |= TARG_FLAG_LUN_ENABLED;
306
307	/*
308	 * Build up a buffer of accept target I/O
309	 * operations for incoming selections.
310	 */
311	for (i = 0; i < MAX_ACCEPT; i++) {
312		struct ccb_accept_tio *atio;
313
314		atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF,
315						      M_NOWAIT);
316		if (atio == NULL) {
317			status = CAM_RESRC_UNAVAIL;
318			break;
319		}
320
321		atio->ccb_h.ccb_descr = allocdescr();
322
323		if (atio->ccb_h.ccb_descr == NULL) {
324			free(atio, M_DEVBUF);
325			status = CAM_RESRC_UNAVAIL;
326			break;
327		}
328
329		xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1);
330		atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
331		atio->ccb_h.cbfcnp = targdone;
332		atio->ccb_h.ccb_flags = TARG_CCB_NONE;
333		xpt_action((union ccb *)atio);
334		status = atio->ccb_h.status;
335		if (status != CAM_REQ_INPROG) {
336			xpt_print_path(periph->path);
337			printf("Queue of atio failed\n");
338			freedescr(atio->ccb_h.ccb_descr);
339			free(atio, M_DEVBUF);
340			break;
341		}
342		((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
343		    softc->accept_tio_list;
344		softc->accept_tio_list = atio;
345	}
346
347	if (i == 0) {
348		xpt_print_path(periph->path);
349		printf("targenlun - Could not allocate accept tio CCBs: "
350		       "status = 0x%x\n", status);
351		targdislun(periph);
352		return (CAM_REQ_CMP_ERR);
353	}
354
355	/*
356	 * Build up a buffer of immediate notify CCBs
357	 * so the SIM can tell us of asynchronous target mode events.
358	 */
359	for (i = 0; i < MAX_ACCEPT; i++) {
360		struct ccb_immed_notify *inot;
361
362		inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF,
363						        M_NOWAIT);
364
365		if (inot == NULL) {
366			status = CAM_RESRC_UNAVAIL;
367			break;
368		}
369
370		xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1);
371		inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
372		inot->ccb_h.cbfcnp = targdone;
373		SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
374				  periph_links.sle);
375		xpt_action((union ccb *)inot);
376	}
377
378	if (i == 0) {
379		xpt_print_path(periph->path);
380		printf("targenlun - Could not allocate immediate notify CCBs: "
381		       "status = 0x%x\n", status);
382		targdislun(periph);
383		return (CAM_REQ_CMP_ERR);
384	}
385
386	return (CAM_REQ_CMP);
387}
388
389static cam_status
390targdislun(struct cam_periph *periph)
391{
392	union ccb ccb;
393	struct targ_softc *softc;
394	struct ccb_accept_tio* atio;
395	struct ccb_hdr *ccb_h;
396
397	softc = (struct targ_softc *)periph->softc;
398	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0)
399		return CAM_REQ_CMP;
400
401	/* XXX Block for Continue I/O completion */
402
403	/* Kill off all ACCECPT and IMMEDIATE CCBs */
404	while ((atio = softc->accept_tio_list) != NULL) {
405
406		softc->accept_tio_list =
407		    ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link;
408		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
409		ccb.cab.ccb_h.func_code = XPT_ABORT;
410		ccb.cab.abort_ccb = (union ccb *)atio;
411		xpt_action(&ccb);
412	}
413
414	while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) {
415		SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle);
416		xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1);
417		ccb.cab.ccb_h.func_code = XPT_ABORT;
418		ccb.cab.abort_ccb = (union ccb *)ccb_h;
419		xpt_action(&ccb);
420	}
421
422	/*
423	 * Dissable this lun.
424	 */
425	xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1);
426	ccb.cel.ccb_h.func_code = XPT_EN_LUN;
427	ccb.cel.enable = 0;
428	xpt_action(&ccb);
429
430	if (ccb.cel.ccb_h.status != CAM_REQ_CMP)
431		printf("targdislun - Disabling lun on controller failed "
432		       "with status 0x%x\n", ccb.cel.ccb_h.status);
433	else
434		softc->flags &= ~TARG_FLAG_LUN_ENABLED;
435	return (ccb.cel.ccb_h.status);
436}
437
438static cam_status
439targctor(struct cam_periph *periph, void *arg)
440{
441	struct ccb_pathinq *cpi;
442	struct targ_softc *softc;
443	int i;
444
445	cpi = (struct ccb_pathinq *)arg;
446
447	/* Allocate our per-instance private storage */
448	softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT);
449	if (softc == NULL) {
450		printf("targctor: unable to malloc softc\n");
451		return (CAM_REQ_CMP_ERR);
452	}
453
454	bzero(softc, sizeof(*softc));
455	TAILQ_INIT(&softc->pending_queue);
456	TAILQ_INIT(&softc->work_queue);
457	TAILQ_INIT(&softc->snd_ccb_queue);
458	TAILQ_INIT(&softc->rcv_ccb_queue);
459	TAILQ_INIT(&softc->unknown_atio_queue);
460	bioq_init(&softc->snd_bio_queue);
461	bioq_init(&softc->rcv_bio_queue);
462	softc->accept_tio_list = NULL;
463	SLIST_INIT(&softc->immed_notify_slist);
464	softc->state = TARG_STATE_NORMAL;
465	periph->softc = softc;
466	softc->init_level++;
467
468	cam_extend_set(targperiphs, periph->unit_number, periph);
469
470	/*
471	 * We start out life with a UA to indicate power-on/reset.
472	 */
473	for (i = 0; i < MAX_INITIATORS; i++)
474		softc->istate[i].pending_ua = UA_POWER_ON;
475
476	/*
477	 * Allocate an initial inquiry data buffer.  We might allow the
478	 * user to override this later via an ioctl.
479	 */
480	softc->inq_data_len = sizeof(*softc->inq_data);
481	softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT);
482	if (softc->inq_data == NULL) {
483		printf("targctor - Unable to malloc inquiry data\n");
484		targdtor(periph);
485		return (CAM_RESRC_UNAVAIL);
486	}
487	bzero(softc->inq_data, softc->inq_data_len);
488	softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5);
489	softc->inq_data->version = 2;
490	softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */
491	softc->inq_data->flags =
492	    cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32|PI_TAG_ABLE);
493	softc->inq_data->additional_length = softc->inq_data_len - 4;
494	strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE);
495	strncpy(softc->inq_data->product, "TM-PT           ", SID_PRODUCT_SIZE);
496	strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE);
497	softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT,
498				   GID_OPERATOR, 0600, "%s%d",
499				   periph->periph_name, periph->unit_number);
500	softc->init_level++;
501	return (CAM_REQ_CMP);
502}
503
504static void
505targdtor(struct cam_periph *periph)
506{
507	struct targ_softc *softc;
508
509	softc = (struct targ_softc *)periph->softc;
510
511	softc->state = TARG_STATE_TEARDOWN;
512
513	targdislun(periph);
514
515	cam_extend_release(targperiphs, periph->unit_number);
516
517	switch (softc->init_level) {
518	default:
519		/* FALLTHROUGH */
520	case 2:
521		free(softc->inq_data, M_DEVBUF);
522		destroy_dev(softc->targ_dev);
523		/* FALLTHROUGH */
524	case 1:
525		free(softc, M_DEVBUF);
526		break;
527	case 0:
528		panic("targdtor - impossible init level");;
529	}
530}
531
532static int
533targopen(dev_t dev, int flags, int fmt, struct proc *p)
534{
535	struct cam_periph *periph;
536	struct	targ_softc *softc;
537	u_int unit;
538	cam_status status;
539	int error;
540	int s;
541
542	unit = minor(dev);
543
544	/* An open of the control device always succeeds */
545	if (TARG_IS_CONTROL_DEV(unit))
546		return 0;
547
548	s = splsoftcam();
549	periph = cam_extend_get(targperiphs, unit);
550	if (periph == NULL) {
551        	splx(s);
552		return (ENXIO);
553	}
554	if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) {
555		splx(s);
556		return (error);
557	}
558
559	softc = (struct targ_softc *)periph->softc;
560	if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) {
561		if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
562			splx(s);
563			cam_periph_unlock(periph);
564			return(ENXIO);
565		}
566	}
567        splx(s);
568
569	status = targenlun(periph);
570	switch (status) {
571	case CAM_REQ_CMP:
572		error = 0;
573		break;
574	case CAM_RESRC_UNAVAIL:
575		error = ENOMEM;
576		break;
577	case CAM_LUN_ALRDY_ENA:
578		error = EADDRINUSE;
579		break;
580	default:
581		error = ENXIO;
582		break;
583	}
584        cam_periph_unlock(periph);
585	if (error) {
586		cam_periph_release(periph);
587	}
588	return (error);
589}
590
591static int
592targclose(dev_t dev, int flag, int fmt, struct proc *p)
593{
594	struct	cam_periph *periph;
595	struct	targ_softc *softc;
596	u_int	unit;
597	int	s;
598	int	error;
599
600	unit = minor(dev);
601
602	/* A close of the control device always succeeds */
603	if (TARG_IS_CONTROL_DEV(unit))
604		return 0;
605
606	s = splsoftcam();
607	periph = cam_extend_get(targperiphs, unit);
608	if (periph == NULL) {
609		splx(s);
610		return (ENXIO);
611	}
612	if ((error = cam_periph_lock(periph, PRIBIO)) != 0)
613		return (error);
614	softc = (struct targ_softc *)periph->softc;
615	splx(s);
616
617	targdislun(periph);
618
619	cam_periph_unlock(periph);
620	cam_periph_release(periph);
621
622	return (0);
623}
624
625static int
626targallocinstance(struct ioc_alloc_unit *alloc_unit)
627{
628	struct ccb_pathinq cpi;
629	struct cam_path *path;
630	struct cam_periph *periph;
631	cam_status status;
632	int free_path_on_return;
633	int error;
634
635	free_path_on_return = 0;
636	status = xpt_create_path(&path, /*periph*/NULL,
637				 alloc_unit->path_id,
638				 alloc_unit->target_id,
639				 alloc_unit->lun_id);
640	if (status != CAM_REQ_CMP) {
641		printf("Couldn't Allocate Path %x\n", status);
642		goto fail;
643	}
644
645	free_path_on_return++;
646
647
648	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
649	cpi.ccb_h.func_code = XPT_PATH_INQ;
650	xpt_action((union ccb *)&cpi);
651	status = cpi.ccb_h.status;
652
653	if (status != CAM_REQ_CMP) {
654		printf("Couldn't CPI %x\n", status);
655		goto fail;
656	}
657
658	/* Can only alloc units on controllers that support target mode */
659	if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
660		printf("Controller does not support target mode%x\n", status);
661		status = CAM_PATH_INVALID;
662		goto fail;
663	}
664
665	/* Ensure that we don't already have an instance for this unit. */
666	if ((periph = cam_periph_find(path, "targ")) != NULL) {
667		status = CAM_LUN_ALRDY_ENA;
668		goto fail;
669	}
670
671	/*
672	 * Allocate a peripheral instance for
673	 * this target instance.
674	 */
675	status = cam_periph_alloc(targctor, NULL, targdtor, targstart,
676				  "targ", CAM_PERIPH_BIO, path, targasync,
677				  0, &cpi);
678
679fail:
680	switch (status) {
681	case CAM_REQ_CMP:
682	{
683		struct cam_periph *periph;
684
685		if ((periph = cam_periph_find(path, "targ")) == NULL)
686			panic("targallocinstance: Succeeded but no periph?");
687		error = 0;
688		alloc_unit->unit = periph->unit_number;
689		break;
690	}
691	case CAM_RESRC_UNAVAIL:
692		error = ENOMEM;
693		break;
694	case CAM_LUN_ALRDY_ENA:
695		error = EADDRINUSE;
696		break;
697	default:
698		printf("targallocinstance: Unexpected CAM status %x\n", status);
699		/* FALLTHROUGH */
700	case CAM_PATH_INVALID:
701		error = ENXIO;
702		break;
703	case CAM_PROVIDE_FAIL:
704		error = ENODEV;
705		break;
706	}
707
708	if (free_path_on_return != 0)
709		xpt_free_path(path);
710
711	return (error);
712}
713
714static int
715targfreeinstance(struct ioc_alloc_unit *alloc_unit)
716{
717	struct cam_path *path;
718	struct cam_periph *periph;
719	struct targ_softc *softc;
720	cam_status status;
721	int free_path_on_return;
722	int error;
723
724	periph = NULL;
725	free_path_on_return = 0;
726	status = xpt_create_path(&path, /*periph*/NULL,
727				 alloc_unit->path_id,
728				 alloc_unit->target_id,
729				 alloc_unit->lun_id);
730	free_path_on_return++;
731
732	if (status != CAM_REQ_CMP)
733		goto fail;
734
735	/* Find our instance. */
736	if ((periph = cam_periph_find(path, "targ")) == NULL) {
737		xpt_print_path(path);
738		printf("Invalid path specified for freeing target instance\n");
739		status = CAM_PATH_INVALID;
740		goto fail;
741	}
742
743        softc = (struct targ_softc *)periph->softc;
744
745        if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) {
746		status = CAM_BUSY;
747		goto fail;
748	}
749
750fail:
751	if (free_path_on_return != 0)
752		xpt_free_path(path);
753
754	switch (status) {
755	case CAM_REQ_CMP:
756		if (periph != NULL)
757			cam_periph_invalidate(periph);
758		error = 0;
759		break;
760	case CAM_RESRC_UNAVAIL:
761		error = ENOMEM;
762		break;
763	case CAM_LUN_ALRDY_ENA:
764		error = EADDRINUSE;
765		break;
766	default:
767		printf("targfreeinstance: Unexpected CAM status %x\n", status);
768		/* FALLTHROUGH */
769	case CAM_PATH_INVALID:
770		error = ENODEV;
771		break;
772	}
773	return (error);
774}
775
776static int
777targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
778{
779	struct cam_periph *periph;
780	struct targ_softc *softc;
781	u_int  unit;
782	int    error;
783
784	unit = minor(dev);
785	error = 0;
786	if (TARG_IS_CONTROL_DEV(unit)) {
787		switch (cmd) {
788		case TARGCTLIOALLOCUNIT:
789			error = targallocinstance((struct ioc_alloc_unit*)addr);
790			break;
791		case TARGCTLIOFREEUNIT:
792			error = targfreeinstance((struct ioc_alloc_unit*)addr);
793			break;
794		default:
795			error = EINVAL;
796			break;
797		}
798		return (error);
799	}
800
801	periph = cam_extend_get(targperiphs, unit);
802	if (periph == NULL)
803		return (ENXIO);
804	softc = (struct targ_softc *)periph->softc;
805	switch (cmd) {
806	case TARGIOCFETCHEXCEPTION:
807		*((targ_exception *)addr) = softc->exceptions;
808		break;
809	case TARGIOCCLEAREXCEPTION:
810	{
811		targ_exception clear_mask;
812
813		clear_mask = *((targ_exception *)addr);
814		if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) {
815			struct ccb_hdr *ccbh;
816
817			ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
818			if (ccbh != NULL) {
819				TAILQ_REMOVE(&softc->unknown_atio_queue,
820					     ccbh, periph_links.tqe);
821				/* Requeue the ATIO back to the controller */
822				ccbh->ccb_flags = TARG_CCB_NONE;
823				xpt_action((union ccb *)ccbh);
824				ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
825			}
826			if (ccbh != NULL)
827				clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO;
828		}
829		softc->exceptions &= ~clear_mask;
830		if (softc->exceptions == TARG_EXCEPT_NONE
831		 && softc->state == TARG_STATE_EXCEPTION) {
832			softc->state = TARG_STATE_NORMAL;
833			targrunqueue(periph, softc);
834		}
835		break;
836	}
837	case TARGIOCFETCHATIO:
838	{
839		struct ccb_hdr *ccbh;
840
841		ccbh = TAILQ_FIRST(&softc->unknown_atio_queue);
842		if (ccbh != NULL) {
843			bcopy(ccbh, addr, sizeof(struct ccb_accept_tio));
844		} else {
845			error = ENOENT;
846		}
847		break;
848	}
849	case TARGIOCCOMMAND:
850	{
851		union ccb *inccb;
852		union ccb *ccb;
853
854		/*
855		 * XXX JGibbs
856		 * This code is lifted directly from the pass-thru driver.
857		 * Perhaps this should be moved to a library????
858		 */
859		inccb = (union ccb *)addr;
860		ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority);
861
862		error = targsendccb(periph, ccb, inccb);
863
864		xpt_release_ccb(ccb);
865
866		break;
867	}
868	case TARGIOCGETISTATE:
869	case TARGIOCSETISTATE:
870	{
871		struct ioc_initiator_state *ioc_istate;
872
873		ioc_istate = (struct ioc_initiator_state *)addr;
874		if (ioc_istate->initiator_id > MAX_INITIATORS) {
875			error = EINVAL;
876			break;
877		}
878		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
879			  ("GET/SETISTATE for %d\n", ioc_istate->initiator_id));
880		if (cmd == TARGIOCGETISTATE) {
881			bcopy(&softc->istate[ioc_istate->initiator_id],
882			      &ioc_istate->istate, sizeof(ioc_istate->istate));
883		} else {
884			bcopy(&ioc_istate->istate,
885			      &softc->istate[ioc_istate->initiator_id],
886			      sizeof(ioc_istate->istate));
887		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
888			  ("pending_ca now %x\n",
889			   softc->istate[ioc_istate->initiator_id].pending_ca));
890		}
891		break;
892	}
893#ifdef	CAMDEBUG
894	case TARGIODEBUG:
895	{
896		union ccb ccb;
897		bzero (&ccb, sizeof ccb);
898		if (xpt_create_path(&ccb.ccb_h.path, periph,
899		    xpt_path_path_id(periph->path),
900		    xpt_path_target_id(periph->path),
901		    xpt_path_lun_id(periph->path)) != CAM_REQ_CMP) {
902			error = EINVAL;
903			break;
904		}
905		if (*((int *)addr)) {
906			ccb.cdbg.flags = CAM_DEBUG_PERIPH;
907		} else {
908			ccb.cdbg.flags = CAM_DEBUG_NONE;
909		}
910		xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 0);
911		ccb.ccb_h.func_code = XPT_DEBUG;
912		ccb.ccb_h.path_id = xpt_path_path_id(ccb.ccb_h.path);
913		ccb.ccb_h.target_id = xpt_path_target_id(ccb.ccb_h.path);
914		ccb.ccb_h.target_lun = xpt_path_lun_id(ccb.ccb_h.path);
915		ccb.ccb_h.cbfcnp = targdone;
916		xpt_action(&ccb);
917		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
918			error = EIO;
919		} else {
920			error = 0;
921		}
922		xpt_free_path(ccb.ccb_h.path);
923		break;
924	}
925#endif
926	default:
927		error = ENOTTY;
928		break;
929	}
930	return (error);
931}
932
933/*
934 * XXX JGibbs lifted from pass-thru driver.
935 * Generally, "ccb" should be the CCB supplied by the kernel.  "inccb"
936 * should be the CCB that is copied in from the user.
937 */
938static int
939targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
940{
941	struct targ_softc *softc;
942	struct cam_periph_map_info mapinfo;
943	int error, need_unmap;
944	int s;
945
946	softc = (struct targ_softc *)periph->softc;
947
948	need_unmap = 0;
949
950	/*
951	 * There are some fields in the CCB header that need to be
952	 * preserved, the rest we get from the user.
953	 */
954	xpt_merge_ccb(ccb, inccb);
955
956	/*
957	 * There's no way for the user to have a completion
958	 * function, so we put our own completion function in here.
959	 */
960	ccb->ccb_h.cbfcnp = targdone;
961
962	/*
963	 * We only attempt to map the user memory into kernel space
964	 * if they haven't passed in a physical memory pointer,
965	 * and if there is actually an I/O operation to perform.
966	 * Right now cam_periph_mapmem() only supports SCSI and device
967	 * match CCBs.  For the SCSI CCBs, we only pass the CCB in if
968	 * there's actually data to map.  cam_periph_mapmem() will do the
969	 * right thing, even if there isn't data to map, but since CCBs
970	 * without data are a reasonably common occurance (e.g. test unit
971	 * ready), it will save a few cycles if we check for it here.
972	 */
973	if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
974	 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
975	    && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
976	  || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
977
978		bzero(&mapinfo, sizeof(mapinfo));
979
980		error = cam_periph_mapmem(ccb, &mapinfo);
981
982		/*
983		 * cam_periph_mapmem returned an error, we can't continue.
984		 * Return the error to the user.
985		 */
986		if (error)
987			return(error);
988
989		/*
990		 * We successfully mapped the memory in, so we need to
991		 * unmap it when the transaction is done.
992		 */
993		need_unmap = 1;
994	}
995
996	/*
997	 * Once queued on the pending CCB list, this CCB will be protected
998	 * by the error recovery handling used for 'buffer I/O' ccbs.  Since
999	 * we are in a process context here, however, the software interrupt
1000	 * for this driver may deliver an event invalidating this CCB just
1001	 * before we queue it.  Close this race condition by blocking
1002	 * software interrupt delivery, checking for any pertinent queued
1003	 * events, and only then queuing this CCB.
1004	 */
1005	s = splsoftcam();
1006	if (softc->exceptions == 0) {
1007		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1008			TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h,
1009					  periph_links.tqe);
1010
1011		/*
1012		 * If the user wants us to perform any error recovery,
1013		 * then honor that request.  Otherwise, it's up to the
1014		 * user to perform any error recovery.
1015		 */
1016		error = cam_periph_runccb(ccb,
1017					  /* error handler */NULL,
1018					  /* cam_flags */ 0,
1019					  /* sense_flags */SF_RETRY_UA,
1020					  &softc->device_stats);
1021
1022		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO)
1023			TAILQ_REMOVE(&softc->pending_queue, &ccb->ccb_h,
1024				     periph_links.tqe);
1025	} else {
1026		ccb->ccb_h.status = CAM_UNACKED_EVENT;
1027		error = 0;
1028	}
1029	splx(s);
1030
1031	if (need_unmap != 0)
1032		cam_periph_unmapmem(ccb, &mapinfo);
1033
1034	ccb->ccb_h.cbfcnp = NULL;
1035	ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
1036	bcopy(ccb, inccb, sizeof(union ccb));
1037
1038	return(error);
1039}
1040
1041
1042static int
1043targpoll(dev_t dev, int poll_events, struct proc *p)
1044{
1045	struct cam_periph *periph;
1046	struct targ_softc *softc;
1047	u_int  unit;
1048	int    revents;
1049	int    s;
1050
1051	unit = minor(dev);
1052
1053	/* ioctl is the only supported operation of the control device */
1054	if (TARG_IS_CONTROL_DEV(unit))
1055		return EINVAL;
1056
1057	periph = cam_extend_get(targperiphs, unit);
1058	if (periph == NULL)
1059		return (ENXIO);
1060	softc = (struct targ_softc *)periph->softc;
1061
1062	revents = 0;
1063	s = splcam();
1064	if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) {
1065		if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL
1066		 && bioq_first(&softc->rcv_bio_queue) == NULL)
1067			revents |= poll_events & (POLLOUT | POLLWRNORM);
1068	}
1069	if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
1070		if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL
1071		 && bioq_first(&softc->snd_bio_queue) == NULL)
1072			revents |= poll_events & (POLLIN | POLLRDNORM);
1073	}
1074
1075	if (softc->state != TARG_STATE_NORMAL)
1076		revents |= POLLERR;
1077
1078	if (revents == 0) {
1079		if (poll_events & (POLLOUT | POLLWRNORM))
1080			selrecord(p, &softc->rcv_select);
1081		if (poll_events & (POLLIN | POLLRDNORM))
1082			selrecord(p, &softc->snd_select);
1083	}
1084	splx(s);
1085	return (revents);
1086}
1087
1088static int
1089targread(dev_t dev, struct uio *uio, int ioflag)
1090{
1091	u_int  unit;
1092
1093	unit = minor(dev);
1094	/* ioctl is the only supported operation of the control device */
1095	if (TARG_IS_CONTROL_DEV(unit))
1096		return EINVAL;
1097
1098	if (uio->uio_iovcnt == 0
1099	 || uio->uio_iov->iov_len == 0) {
1100		/* EOF */
1101		struct cam_periph *periph;
1102		struct targ_softc *softc;
1103		int    s;
1104
1105		s = splcam();
1106		periph = cam_extend_get(targperiphs, unit);
1107		if (periph == NULL)
1108			return (ENXIO);
1109		softc = (struct targ_softc *)periph->softc;
1110		softc->flags |= TARG_FLAG_SEND_EOF;
1111		splx(s);
1112		targrunqueue(periph, softc);
1113		return (0);
1114	}
1115	return(physread(dev, uio, ioflag));
1116}
1117
1118static int
1119targwrite(dev_t dev, struct uio *uio, int ioflag)
1120{
1121	u_int  unit;
1122
1123	unit = minor(dev);
1124	/* ioctl is the only supported operation of the control device */
1125	if (TARG_IS_CONTROL_DEV(unit))
1126		return EINVAL;
1127
1128	if (uio->uio_iovcnt == 0
1129	 || uio->uio_iov->iov_len == 0) {
1130		/* EOF */
1131		struct cam_periph *periph;
1132		struct targ_softc *softc;
1133		int    s;
1134
1135		s = splcam();
1136		periph = cam_extend_get(targperiphs, unit);
1137		if (periph == NULL)
1138			return (ENXIO);
1139		softc = (struct targ_softc *)periph->softc;
1140		softc->flags |= TARG_FLAG_RECEIVE_EOF;
1141		splx(s);
1142		targrunqueue(periph, softc);
1143		return (0);
1144	}
1145	return(physwrite(dev, uio, ioflag));
1146}
1147
1148/*
1149 * Actually translate the requested transfer into one the physical driver
1150 * can understand.  The transfer is described by a buf and will include
1151 * only one physical transfer.
1152 */
1153static void
1154targstrategy(struct bio *bp)
1155{
1156	struct cam_periph *periph;
1157	struct targ_softc *softc;
1158	u_int  unit;
1159	int    s;
1160
1161	unit = minor(bp->bio_dev);
1162
1163	/* ioctl is the only supported operation of the control device */
1164	if (TARG_IS_CONTROL_DEV(unit)) {
1165		bp->bio_error = EINVAL;
1166		goto bad;
1167	}
1168
1169	periph = cam_extend_get(targperiphs, unit);
1170	if (periph == NULL) {
1171		bp->bio_error = ENXIO;
1172		goto bad;
1173	}
1174	softc = (struct targ_softc *)periph->softc;
1175
1176	/*
1177	 * Mask interrupts so that the device cannot be invalidated until
1178	 * after we are in the queue.  Otherwise, we might not properly
1179	 * clean up one of the buffers.
1180	 */
1181	s = splbio();
1182
1183	/*
1184	 * If there is an exception pending, error out
1185	 */
1186	if (softc->state != TARG_STATE_NORMAL) {
1187		splx(s);
1188		if (softc->state == TARG_STATE_EXCEPTION
1189		 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0)
1190			bp->bio_error = EBUSY;
1191		else
1192			bp->bio_error = ENXIO;
1193		goto bad;
1194	}
1195
1196	/*
1197	 * Place it in the queue of buffers available for either
1198	 * SEND or RECEIVE commands.
1199	 *
1200	 */
1201	bp->bio_resid = bp->bio_bcount;
1202	if (bp->bio_cmd == BIO_READ) {
1203		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1204			  ("Queued a SEND buffer\n"));
1205		bioq_insert_tail(&softc->snd_bio_queue, bp);
1206	} else {
1207		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1208			  ("Queued a RECEIVE buffer\n"));
1209		bioq_insert_tail(&softc->rcv_bio_queue, bp);
1210	}
1211
1212	splx(s);
1213
1214	/*
1215	 * Attempt to use the new buffer to service any pending
1216	 * target commands.
1217	 */
1218	targrunqueue(periph, softc);
1219
1220	return;
1221bad:
1222	bp->bio_flags |= BIO_ERROR;
1223
1224	/*
1225	 * Correctly set the buf to indicate a completed xfer
1226	 */
1227	bp->bio_resid = bp->bio_bcount;
1228	biodone(bp);
1229}
1230
1231static void
1232targrunqueue(struct cam_periph *periph, struct targ_softc *softc)
1233{
1234	struct  ccb_queue *pending_queue;
1235	struct	ccb_accept_tio *atio;
1236	struct	bio_queue_head *bioq;
1237	struct	bio *bp;
1238	struct	targ_cmd_desc *desc;
1239	struct	ccb_hdr *ccbh;
1240	int	s;
1241
1242	s = splbio();
1243	pending_queue = NULL;
1244	bioq = NULL;
1245	ccbh = NULL;
1246	/* Only run one request at a time to maintain data ordering. */
1247	if (softc->state != TARG_STATE_NORMAL
1248	 || TAILQ_FIRST(&softc->work_queue) != NULL
1249	 || TAILQ_FIRST(&softc->pending_queue) != NULL) {
1250		splx(s);
1251		return;
1252	}
1253
1254	if (((bp = bioq_first(&softc->snd_bio_queue)) != NULL
1255	  || (softc->flags & TARG_FLAG_SEND_EOF) != 0)
1256	 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) {
1257
1258		if (bp == NULL)
1259			softc->flags &= ~TARG_FLAG_SEND_EOF;
1260		else {
1261			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1262				  ("De-Queued a SEND buffer %ld\n",
1263				   bp->bio_bcount));
1264		}
1265		bioq = &softc->snd_bio_queue;
1266		pending_queue = &softc->snd_ccb_queue;
1267	} else if (((bp = bioq_first(&softc->rcv_bio_queue)) != NULL
1268	  	 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0)
1269		&& (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) {
1270
1271		if (bp == NULL)
1272			softc->flags &= ~TARG_FLAG_RECEIVE_EOF;
1273		else {
1274			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1275				  ("De-Queued a RECEIVE buffer %ld\n",
1276				   bp->bio_bcount));
1277		}
1278		bioq = &softc->rcv_bio_queue;
1279		pending_queue = &softc->rcv_ccb_queue;
1280	}
1281
1282	if (pending_queue != NULL) {
1283		/* Process a request */
1284		atio = (struct ccb_accept_tio *)ccbh;
1285		TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe);
1286		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1287		desc->bp = bp;
1288		if (bp == NULL) {
1289			/* EOF */
1290			desc->data = NULL;
1291			desc->data_increment = 0;
1292			desc->data_resid = 0;
1293			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1294			atio->ccb_h.flags |= CAM_DIR_NONE;
1295		} else {
1296			bioq_remove(bioq, bp);
1297			desc->data = &bp->bio_data[bp->bio_bcount - bp->bio_resid];
1298			desc->data_increment =
1299			    MIN(desc->data_resid, bp->bio_resid);
1300		}
1301		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1302			  ("Buffer command: data %p: datacnt %d\n",
1303			   desc->data, desc->data_increment));
1304		TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1305				  periph_links.tqe);
1306	}
1307	atio = (struct ccb_accept_tio *)TAILQ_FIRST(&softc->work_queue);
1308	if (atio != NULL) {
1309		int priority;
1310
1311		priority = (atio->ccb_h.flags & CAM_DIS_DISCONNECT) ? 0 : 1;
1312		splx(s);
1313		xpt_schedule(periph, priority);
1314	} else
1315		splx(s);
1316}
1317
1318static void
1319targstart(struct cam_periph *periph, union ccb *start_ccb)
1320{
1321	struct targ_softc *softc;
1322	struct ccb_hdr *ccbh;
1323	struct ccb_accept_tio *atio;
1324	struct targ_cmd_desc *desc;
1325	struct ccb_scsiio *csio;
1326	targ_ccb_flags flags;
1327	int    s;
1328
1329	softc = (struct targ_softc *)periph->softc;
1330
1331	s = splbio();
1332	ccbh = TAILQ_FIRST(&softc->work_queue);
1333	if (periph->immediate_priority <= periph->pinfo.priority) {
1334		start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING;
1335		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1336				  periph_links.sle);
1337		periph->immediate_priority = CAM_PRIORITY_NONE;
1338		splx(s);
1339		wakeup(&periph->ccb_list);
1340	} else if (ccbh == NULL) {
1341		splx(s);
1342		xpt_release_ccb(start_ccb);
1343	} else {
1344		TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
1345		splx(s);
1346		atio = (struct ccb_accept_tio*)ccbh;
1347		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1348
1349		/* Is this a tagged request? */
1350		flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
1351
1352		/*
1353		 * If we are done with the transaction, tell the
1354		 * controller to send status and perform a CMD_CMPLT.
1355		 */
1356		if (desc->data_resid == desc->data_increment)
1357			flags |= CAM_SEND_STATUS;
1358
1359		csio = &start_ccb->csio;
1360		cam_fill_ctio(csio,
1361			      /*retries*/2,
1362			      targdone,
1363			      flags,
1364			      (flags & CAM_TAG_ACTION_VALID)?
1365				MSG_SIMPLE_Q_TAG : 0,
1366			      atio->tag_id,
1367			      atio->init_id,
1368			      desc->status,
1369			      /*data_ptr*/desc->data_increment == 0
1370					  ? NULL : desc->data,
1371			      /*dxfer_len*/desc->data_increment,
1372			      /*timeout*/desc->timeout);
1373
1374		if ((flags & CAM_SEND_STATUS) != 0
1375		 && (desc->status == SCSI_STATUS_CHECK_COND
1376		  || desc->status == SCSI_STATUS_CMD_TERMINATED)) {
1377			struct initiator_state *istate;
1378
1379			istate = &softc->istate[atio->init_id];
1380			csio->sense_len = istate->sense_data.extra_len
1381					+ offsetof(struct scsi_sense_data,
1382						   extra_len);
1383			bcopy(&istate->sense_data, &csio->sense_data,
1384			      csio->sense_len);
1385			csio->ccb_h.flags |= CAM_SEND_SENSE;
1386		} else {
1387			csio->sense_len = 0;
1388		}
1389
1390		start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
1391		start_ccb->ccb_h.ccb_atio = atio;
1392		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1393			  ("Sending a CTIO (flags 0x%x)\n", csio->ccb_h.flags));
1394		TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h,
1395				  periph_links.tqe);
1396		xpt_action(start_ccb);
1397		/*
1398		 * If the queue was frozen waiting for the response
1399		 * to this ATIO (for instance disconnection was disallowed),
1400		 * then release it now that our response has been queued.
1401		 */
1402		if ((atio->ccb_h.flags & CAM_DEV_QFRZN) != 0) {
1403			cam_release_devq(periph->path,
1404					 /*relsim_flags*/0,
1405					 /*reduction*/0,
1406					 /*timeout*/0,
1407					 /*getcount_only*/0);
1408			atio->ccb_h.flags &= ~CAM_DEV_QFRZN;
1409		}
1410		s = splbio();
1411		ccbh = TAILQ_FIRST(&softc->work_queue);
1412		splx(s);
1413	}
1414	if (ccbh != NULL)
1415		targrunqueue(periph, softc);
1416}
1417
1418static void
1419targdone(struct cam_periph *periph, union ccb *done_ccb)
1420{
1421	struct targ_softc *softc;
1422
1423	softc = (struct targ_softc *)periph->softc;
1424
1425	if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) {
1426		/* Caller will release the CCB */
1427		wakeup(&done_ccb->ccb_h.cbfcnp);
1428		return;
1429	}
1430
1431	CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1432		("targdone %x\n", done_ccb->ccb_h.func_code));
1433
1434	CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1435		("targdone %x\n", done_ccb->ccb_h.func_code));
1436
1437	switch (done_ccb->ccb_h.func_code) {
1438	case XPT_ACCEPT_TARGET_IO:
1439	{
1440		struct ccb_accept_tio *atio;
1441		struct targ_cmd_desc *descr;
1442		struct initiator_state *istate;
1443		u_int8_t *cdb;
1444		int priority;
1445
1446		atio = &done_ccb->atio;
1447		descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr;
1448		istate = &softc->istate[atio->init_id];
1449		cdb = atio->cdb_io.cdb_bytes;
1450		if (softc->state == TARG_STATE_TEARDOWN
1451		 || atio->ccb_h.status == CAM_REQ_ABORTED) {
1452			freedescr(descr);
1453			free(done_ccb, M_DEVBUF);
1454			return;
1455		}
1456
1457		if (atio->sense_len != 0) {
1458			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1459				  ("ATIO with sense_len\n"));
1460
1461			/*
1462			 * We had an error in the reception of
1463			 * this command.  Immediately issue a CA.
1464			 */
1465			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1466			atio->ccb_h.flags |= CAM_DIR_NONE;
1467			descr->data_resid = 0;
1468			descr->data_increment = 0;
1469			descr->timeout = 5 * 1000;
1470			descr->status = SCSI_STATUS_CHECK_COND;
1471			copy_sense(softc, istate, (u_int8_t *)&atio->sense_data,
1472				   atio->sense_len);
1473			set_ca_condition(periph, atio->init_id, CA_CMD_SENSE);
1474		} else if (istate->pending_ca == 0
1475			&& istate->pending_ua != 0
1476			&& cdb[0] != INQUIRY) {
1477
1478			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1479			    ("pending_ca %d pending_ua %dn", istate->pending_ca,			    istate->pending_ua));
1480
1481			/* Pending UA, tell initiator */
1482			/* Direction is always relative to the initator */
1483			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1484			atio->ccb_h.flags |= CAM_DIR_NONE;
1485			descr->data_resid = 0;
1486			descr->data_increment = 0;
1487			descr->timeout = 5 * 1000;
1488			descr->status = SCSI_STATUS_CHECK_COND;
1489			fill_sense(softc, atio->init_id,
1490				   SSD_CURRENT_ERROR, SSD_KEY_UNIT_ATTENTION,
1491				   0x29,
1492				   istate->pending_ua == UA_POWER_ON ? 1 : 2);
1493			set_ca_condition(periph, atio->init_id, CA_UNIT_ATTN);
1494		} else {
1495			/*
1496			 * Save the current CA and UA status so
1497			 * they can be used by this command.
1498			 */
1499			ua_types pending_ua;
1500			ca_types pending_ca;
1501
1502			pending_ua = istate->pending_ua;
1503			pending_ca = istate->pending_ca;
1504
1505			/*
1506			 * As per the SCSI2 spec, any command that occurs
1507			 * after a CA is reported, clears the CA.  We must
1508			 * also clear the UA condition, if any, that caused
1509			 * the CA to occur assuming the UA is not for a
1510			 * persistant condition.
1511			 */
1512			istate->pending_ca = CA_NONE;
1513			if (pending_ca == CA_UNIT_ATTN)
1514				istate->pending_ua = UA_NONE;
1515
1516			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1517				("cdb[..] = %x %x %x %x %x %x\n",
1518				  cdb[0], cdb[1], cdb[2], cdb[3],
1519				  cdb[4], cdb[5]));
1520			/*
1521			 * Determine the type of incoming command and
1522			 * setup our buffer for a response.
1523			 */
1524			switch (cdb[0]) {
1525			case INQUIRY:
1526			{
1527				struct scsi_inquiry *inq;
1528				struct scsi_sense_data *sense;
1529
1530				inq = (struct scsi_inquiry *)cdb;
1531				sense = &istate->sense_data;
1532				descr->status = SCSI_STATUS_OK;
1533				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1534					  ("Saw an inquiry!\n"));
1535				/*
1536				 * Validate the command.  We don't
1537				 * support any VPD pages, so complain
1538				 * if EVPD is set.
1539				 */
1540				if ((inq->byte2 & SI_EVPD) != 0
1541				 || inq->page_code != 0) {
1542					atio->ccb_h.flags &= ~CAM_DIR_MASK;
1543					atio->ccb_h.flags |= CAM_DIR_NONE;
1544					descr->data_resid = 0;
1545					descr->data_increment = 0;
1546					descr->timeout = 5 * 1000;
1547					descr->status = SCSI_STATUS_CHECK_COND;
1548					fill_sense(softc, atio->init_id,
1549						   SSD_CURRENT_ERROR,
1550						   SSD_KEY_ILLEGAL_REQUEST,
1551						   /*asc*/0x24, /*ascq*/0x00);
1552					sense->extra_len =
1553						offsetof(struct scsi_sense_data,
1554							 extra_bytes)
1555					      - offsetof(struct scsi_sense_data,
1556							 extra_len);
1557					set_ca_condition(periph, atio->init_id,
1558							 CA_CMD_SENSE);
1559				}
1560
1561				if ((inq->byte2 & SI_EVPD) != 0) {
1562					sense->sense_key_spec[0] =
1563					    SSD_SCS_VALID|SSD_FIELDPTR_CMD
1564					   |SSD_BITPTR_VALID| /*bit value*/1;
1565					sense->sense_key_spec[1] = 0;
1566					sense->sense_key_spec[2] =
1567					    offsetof(struct scsi_inquiry,
1568						     byte2);
1569				} else if (inq->page_code != 0) {
1570					sense->sense_key_spec[0] =
1571					    SSD_SCS_VALID|SSD_FIELDPTR_CMD;
1572					sense->sense_key_spec[1] = 0;
1573					sense->sense_key_spec[2] =
1574					    offsetof(struct scsi_inquiry,
1575						     page_code);
1576				}
1577				if (descr->status == SCSI_STATUS_CHECK_COND)
1578					break;
1579
1580				/*
1581				 * Direction is always relative
1582				 * to the initator.
1583				 */
1584				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1585				atio->ccb_h.flags |= CAM_DIR_IN;
1586				descr->data = softc->inq_data;
1587				descr->data_resid =
1588				    MIN(softc->inq_data_len,
1589					SCSI_CDB6_LEN(inq->length));
1590				descr->data_increment = descr->data_resid;
1591				descr->timeout = 5 * 1000;
1592				break;
1593			}
1594			case TEST_UNIT_READY:
1595				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1596				atio->ccb_h.flags |= CAM_DIR_NONE;
1597				descr->data_resid = 0;
1598				descr->data_increment = 0;
1599				descr->timeout = 5 * 1000;
1600				descr->status = SCSI_STATUS_OK;
1601				break;
1602			case REQUEST_SENSE:
1603			{
1604				struct scsi_request_sense *rsense;
1605				struct scsi_sense_data *sense;
1606
1607				rsense = (struct scsi_request_sense *)cdb;
1608				sense = &istate->sense_data;
1609				if (pending_ca == 0) {
1610					fill_sense(softc, atio->init_id,
1611						   SSD_CURRENT_ERROR,
1612						   SSD_KEY_NO_SENSE, 0x00,
1613						   0x00);
1614					CAM_DEBUG(periph->path,
1615						  CAM_DEBUG_PERIPH,
1616						  ("No pending CA!\n"));
1617				}
1618				/*
1619				 * Direction is always relative
1620				 * to the initator.
1621				 */
1622				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1623				atio->ccb_h.flags |= CAM_DIR_IN;
1624				descr->data = sense;
1625				descr->data_resid =
1626			 		offsetof(struct scsi_sense_data,
1627						 extra_len)
1628				      + sense->extra_len;
1629				descr->data_resid =
1630				    MIN(descr->data_resid,
1631					SCSI_CDB6_LEN(rsense->length));
1632				descr->data_increment = descr->data_resid;
1633				descr->timeout = 5 * 1000;
1634				descr->status = SCSI_STATUS_OK;
1635				break;
1636			}
1637			case RECEIVE:
1638			case SEND:
1639			{
1640				struct scsi_send_receive *sr;
1641
1642				sr = (struct scsi_send_receive *)cdb;
1643
1644				/*
1645				 * Direction is always relative
1646				 * to the initator.
1647				 */
1648				atio->ccb_h.flags &= ~CAM_DIR_MASK;
1649				descr->data_resid = scsi_3btoul(sr->xfer_len);
1650				descr->timeout = 5 * 1000;
1651				descr->status = SCSI_STATUS_OK;
1652				if (cdb[0] == SEND) {
1653					atio->ccb_h.flags |= CAM_DIR_OUT;
1654					CAM_DEBUG(periph->path,
1655						  CAM_DEBUG_PERIPH,
1656						  ("Saw a SEND!\n"));
1657					atio->ccb_h.flags |= CAM_DIR_OUT;
1658					TAILQ_INSERT_TAIL(&softc->snd_ccb_queue,
1659							  &atio->ccb_h,
1660							  periph_links.tqe);
1661					selwakeup(&softc->snd_select);
1662				} else {
1663					atio->ccb_h.flags |= CAM_DIR_IN;
1664					CAM_DEBUG(periph->path,
1665						  CAM_DEBUG_PERIPH,
1666						  ("Saw a RECEIVE!\n"));
1667					TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue,
1668							  &atio->ccb_h,
1669							  periph_links.tqe);
1670					selwakeup(&softc->rcv_select);
1671				}
1672				/*
1673				 * Attempt to satisfy this request with
1674				 * a user buffer.
1675				 */
1676				targrunqueue(periph, softc);
1677				return;
1678			}
1679			default:
1680				/*
1681				 * Queue for consumption by our userland
1682				 * counterpart and  transition to the exception
1683				 * state.
1684				 */
1685				TAILQ_INSERT_TAIL(&softc->unknown_atio_queue,
1686						  &atio->ccb_h,
1687						  periph_links.tqe);
1688				softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO;
1689				targfireexception(periph, softc);
1690				return;
1691			}
1692		}
1693
1694		/* Queue us up to receive a Continue Target I/O ccb. */
1695		if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) {
1696			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1697					  periph_links.tqe);
1698			priority = 0;
1699		} else {
1700			TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1701					  periph_links.tqe);
1702			priority = 1;
1703		}
1704		xpt_schedule(periph, priority);
1705		break;
1706	}
1707	case XPT_CONT_TARGET_IO:
1708	{
1709		struct ccb_scsiio *csio;
1710		struct ccb_accept_tio *atio;
1711		struct targ_cmd_desc *desc;
1712		struct bio *bp;
1713		int    error;
1714
1715		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1716			  ("Received completed CTIO\n"));
1717		csio = &done_ccb->csio;
1718		atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio;
1719		desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
1720
1721		TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h,
1722			     periph_links.tqe);
1723
1724		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1725			printf("CCB with error %x\n", done_ccb->ccb_h.status);
1726			error = targerror(done_ccb, 0, 0);
1727			if (error == ERESTART)
1728				break;
1729			/*
1730			 * Right now we don't need to do anything
1731			 * prior to unfreezing the queue...
1732			 */
1733			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1734				printf("Releasing Queue\n");
1735				cam_release_devq(done_ccb->ccb_h.path,
1736						 /*relsim_flags*/0,
1737						 /*reduction*/0,
1738						 /*timeout*/0,
1739						 /*getcount_only*/0);
1740			}
1741		} else
1742			error = 0;
1743
1744		/*
1745		 * If we shipped back sense data when completing
1746		 * this command, clear the pending CA for it.
1747		 */
1748		if (done_ccb->ccb_h.status & CAM_SENT_SENSE) {
1749			struct initiator_state *istate;
1750
1751			istate = &softc->istate[csio->init_id];
1752			if (istate->pending_ca == CA_UNIT_ATTN)
1753				istate->pending_ua = UA_NONE;
1754			istate->pending_ca = CA_NONE;
1755			softc->istate[csio->init_id].pending_ca = CA_NONE;
1756			done_ccb->ccb_h.status &= ~CAM_SENT_SENSE;
1757			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1758				  ("Sent Sense\n"));
1759		}
1760		done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1761
1762		desc->data_increment -= csio->resid;
1763		desc->data_resid -= desc->data_increment;
1764		if ((bp = desc->bp) != NULL) {
1765
1766			bp->bio_resid -= desc->data_increment;
1767			bp->bio_error = error;
1768
1769			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1770				  ("Buffer I/O Completed - Resid %ld:%d\n",
1771				   bp->bio_resid, desc->data_resid));
1772			/*
1773			 * Send the buffer back to the client if
1774			 * either the command has completed or all
1775			 * buffer space has been consumed.
1776			 */
1777			if (desc->data_resid == 0
1778			 || bp->bio_resid == 0
1779			 || error != 0) {
1780				if (bp->bio_resid != 0)
1781					/* Short transfer */
1782					bp->bio_flags |= BIO_ERROR;
1783
1784				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1785					  ("Completing a buffer\n"));
1786				biodone(bp);
1787				desc->bp = NULL;
1788			}
1789		}
1790
1791		xpt_release_ccb(done_ccb);
1792		if (softc->state != TARG_STATE_TEARDOWN) {
1793
1794			if (desc->data_resid == 0) {
1795				/*
1796				 * Send the original accept TIO back to the
1797				 * controller to handle more work.
1798				 */
1799				CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
1800					  ("Returning ATIO to target SIM\n"));
1801				atio->ccb_h.ccb_flags = TARG_CCB_NONE;
1802				xpt_action((union ccb *)atio);
1803				break;
1804			}
1805
1806			/* Queue us up for another buffer */
1807			if (atio->cdb_io.cdb_bytes[0] == SEND) {
1808				if (desc->bp != NULL)
1809					TAILQ_INSERT_HEAD(
1810						&softc->snd_bio_queue.queue,
1811						bp, bio_queue);
1812				TAILQ_INSERT_HEAD(&softc->snd_ccb_queue,
1813						  &atio->ccb_h,
1814						  periph_links.tqe);
1815			} else {
1816				if (desc->bp != NULL)
1817					TAILQ_INSERT_HEAD(
1818						&softc->rcv_bio_queue.queue,
1819						bp, bio_queue);
1820				TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue,
1821						  &atio->ccb_h,
1822						  periph_links.tqe);
1823			}
1824			desc->bp = NULL;
1825			targrunqueue(periph, softc);
1826		} else {
1827			if (desc->bp != NULL) {
1828				bp->bio_flags |= BIO_ERROR;
1829				bp->bio_error = ENXIO;
1830				biodone(bp);
1831			}
1832			freedescr(desc);
1833			free(atio, M_DEVBUF);
1834		}
1835		break;
1836	}
1837	case XPT_IMMED_NOTIFY:
1838	{
1839		int frozen;
1840
1841		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1842		if (softc->state == TARG_STATE_TEARDOWN) {
1843			SLIST_REMOVE(&softc->immed_notify_slist,
1844				     &done_ccb->ccb_h, ccb_hdr,
1845				     periph_links.sle);
1846			free(done_ccb, M_DEVBUF);
1847		} else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) {
1848			free(done_ccb, M_DEVBUF);
1849		} else {
1850			printf("Saw event %x:%x\n", done_ccb->ccb_h.status,
1851			       done_ccb->cin.message_args[0]);
1852			/* Process error condition. */
1853			targinoterror(periph, softc, &done_ccb->cin);
1854
1855			/* Requeue for another immediate event */
1856			xpt_action(done_ccb);
1857		}
1858		if (frozen != 0)
1859			cam_release_devq(periph->path,
1860					 /*relsim_flags*/0,
1861					 /*opening reduction*/0,
1862					 /*timeout*/0,
1863					 /*getcount_only*/0);
1864		break;
1865	}
1866	case XPT_DEBUG:
1867		wakeup(&done_ccb->ccb_h.cbfcnp);
1868		break;
1869	default:
1870		panic("targdone: Impossible xpt opcode %x encountered.",
1871		      done_ccb->ccb_h.func_code);
1872		/* NOTREACHED */
1873		break;
1874	}
1875}
1876
1877/*
1878 * Transition to the exception state and notify our symbiotic
1879 * userland process of the change.
1880 */
1881static void
1882targfireexception(struct cam_periph *periph, struct targ_softc *softc)
1883{
1884	/*
1885	 * return all pending buffers with short read/write status so our
1886	 * process unblocks, and do a selwakeup on any process queued
1887	 * waiting for reads or writes.  When the selwakeup is performed,
1888	 * the waking process will wakeup, call our poll routine again,
1889	 * and pick up the exception.
1890	 */
1891	struct bio *bp;
1892
1893	if (softc->state != TARG_STATE_NORMAL)
1894		/* Already either tearing down or in exception state */
1895		return;
1896
1897	softc->state = TARG_STATE_EXCEPTION;
1898
1899	while ((bp = bioq_first(&softc->snd_bio_queue)) != NULL) {
1900		bioq_remove(&softc->snd_bio_queue, bp);
1901		bp->bio_flags |= BIO_ERROR;
1902		biodone(bp);
1903	}
1904
1905	while ((bp = bioq_first(&softc->rcv_bio_queue)) != NULL) {
1906		bioq_remove(&softc->snd_bio_queue, bp);
1907		bp->bio_flags |= BIO_ERROR;
1908		biodone(bp);
1909	}
1910
1911	selwakeup(&softc->snd_select);
1912	selwakeup(&softc->rcv_select);
1913}
1914
1915static void
1916targinoterror(struct cam_periph *periph, struct targ_softc *softc,
1917	      struct ccb_immed_notify *inot)
1918{
1919	cam_status status;
1920	int sense;
1921
1922	status = inot->ccb_h.status;
1923	sense = (status & CAM_AUTOSNS_VALID) != 0;
1924	status &= CAM_STATUS_MASK;
1925	switch (status) {
1926	case CAM_SCSI_BUS_RESET:
1927		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1928					UA_BUS_RESET);
1929		abort_pending_transactions(periph,
1930					   /*init_id*/CAM_TARGET_WILDCARD,
1931					   TARG_TAG_WILDCARD, EINTR,
1932					   /*to_held_queue*/FALSE);
1933		softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN;
1934		targfireexception(periph, softc);
1935		break;
1936	case CAM_BDR_SENT:
1937		set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD,
1938					UA_BDR);
1939		abort_pending_transactions(periph, CAM_TARGET_WILDCARD,
1940					   TARG_TAG_WILDCARD, EINTR,
1941					   /*to_held_queue*/FALSE);
1942		softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED;
1943		targfireexception(periph, softc);
1944		break;
1945	case CAM_MESSAGE_RECV:
1946		switch (inot->message_args[0]) {
1947		case MSG_INITIATOR_DET_ERR:
1948			break;
1949		case MSG_ABORT:
1950			break;
1951		case MSG_BUS_DEV_RESET:
1952			break;
1953		case MSG_ABORT_TAG:
1954			break;
1955		case MSG_CLEAR_QUEUE:
1956			break;
1957		case MSG_TERM_IO_PROC:
1958			break;
1959		default:
1960			break;
1961		}
1962		break;
1963	default:
1964		break;
1965	}
1966}
1967
1968static int
1969targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1970{
1971	struct cam_periph *periph;
1972	struct targ_softc *softc;
1973	struct ccb_scsiio *csio;
1974	struct initiator_state *istate;
1975	cam_status status;
1976	int frozen;
1977	int sense;
1978	int error;
1979	int on_held_queue;
1980
1981	periph = xpt_path_periph(ccb->ccb_h.path);
1982	softc = (struct targ_softc *)periph->softc;
1983	status = ccb->ccb_h.status;
1984	sense = (status & CAM_AUTOSNS_VALID) != 0;
1985	frozen = (status & CAM_DEV_QFRZN) != 0;
1986	status &= CAM_STATUS_MASK;
1987	on_held_queue = FALSE;
1988	csio = &ccb->csio;
1989	istate = &softc->istate[csio->init_id];
1990	switch (status) {
1991	case CAM_REQ_ABORTED:
1992		if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) {
1993
1994			/*
1995			 * Place this CCB into the initiators
1996			 * 'held' queue until the pending CA is cleared.
1997			 * If there is no CA pending, reissue immediately.
1998			 */
1999			if (istate->pending_ca == 0) {
2000				ccb->ccb_h.ccb_flags = TARG_CCB_NONE;
2001				xpt_action(ccb);
2002			} else {
2003				ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ;
2004				TAILQ_INSERT_TAIL(&softc->pending_queue,
2005						  &ccb->ccb_h,
2006						  periph_links.tqe);
2007			}
2008			/* The command will be retried at a later time. */
2009			on_held_queue = TRUE;
2010			error = ERESTART;
2011			break;
2012		}
2013		/* FALLTHROUGH */
2014	case CAM_SCSI_BUS_RESET:
2015	case CAM_BDR_SENT:
2016	case CAM_REQ_TERMIO:
2017	case CAM_CMD_TIMEOUT:
2018		/* Assume we did not send any data */
2019		csio->resid = csio->dxfer_len;
2020		error = EIO;
2021		break;
2022	case CAM_SEL_TIMEOUT:
2023		if (ccb->ccb_h.retry_count > 0) {
2024			ccb->ccb_h.retry_count--;
2025			error = ERESTART;
2026		} else {
2027			/* "Select or reselect failure" */
2028			csio->resid = csio->dxfer_len;
2029			fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2030				   SSD_KEY_HARDWARE_ERROR, 0x45, 0x00);
2031			set_ca_condition(periph, csio->init_id, CA_CMD_SENSE);
2032			error = EIO;
2033		}
2034		break;
2035	case CAM_UNCOR_PARITY:
2036		/* "SCSI parity error" */
2037		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2038			   SSD_KEY_HARDWARE_ERROR, 0x47, 0x00);
2039		set_ca_condition(periph, csio->init_id,
2040					       CA_CMD_SENSE);
2041		csio->resid = csio->dxfer_len;
2042		error = EIO;
2043		break;
2044	case CAM_NO_HBA:
2045		csio->resid = csio->dxfer_len;
2046		error = ENXIO;
2047		break;
2048	case CAM_SEQUENCE_FAIL:
2049		if (sense != 0) {
2050			copy_sense(softc, istate, (u_int8_t *)&csio->sense_data,
2051				   csio->sense_len);
2052			set_ca_condition(periph,
2053						       csio->init_id,
2054						       CA_CMD_SENSE);
2055		}
2056		csio->resid = csio->dxfer_len;
2057		error = EIO;
2058		break;
2059	case CAM_IDE:
2060		/* "Initiator detected error message received" */
2061		fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR,
2062			   SSD_KEY_HARDWARE_ERROR, 0x48, 0x00);
2063		set_ca_condition(periph, csio->init_id,
2064					       CA_CMD_SENSE);
2065		csio->resid = csio->dxfer_len;
2066		error = EIO;
2067		break;
2068	case CAM_REQUEUE_REQ:
2069		printf("Requeue Request!\n");
2070		error = ERESTART;
2071		break;
2072	default:
2073		csio->resid = csio->dxfer_len;
2074		error = EIO;
2075		panic("targerror: Unexpected status %x encounterd", status);
2076		/* NOTREACHED */
2077	}
2078
2079	if (error == ERESTART || error == 0) {
2080		/* Clear the QFRZN flag as we will release the queue */
2081		if (frozen != 0)
2082			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2083
2084		if (error == ERESTART && !on_held_queue)
2085			xpt_action(ccb);
2086
2087		if (frozen != 0)
2088			cam_release_devq(ccb->ccb_h.path,
2089					 /*relsim_flags*/0,
2090					 /*opening reduction*/0,
2091					 /*timeout*/0,
2092					 /*getcount_only*/0);
2093	}
2094	return (error);
2095}
2096
2097static struct targ_cmd_desc*
2098allocdescr()
2099{
2100	struct targ_cmd_desc* descr;
2101
2102	/* Allocate the targ_descr structure */
2103	descr = (struct targ_cmd_desc *)malloc(sizeof(*descr),
2104					       M_DEVBUF, M_NOWAIT);
2105	if (descr == NULL)
2106		return (NULL);
2107
2108	bzero(descr, sizeof(*descr));
2109
2110	/* Allocate buffer backing store */
2111	descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT);
2112	if (descr->backing_store == NULL) {
2113		free(descr, M_DEVBUF);
2114		return (NULL);
2115	}
2116	descr->max_size = MAX_BUF_SIZE;
2117	return (descr);
2118}
2119
2120static void
2121freedescr(struct targ_cmd_desc *descr)
2122{
2123	free(descr->backing_store, M_DEVBUF);
2124	free(descr, M_DEVBUF);
2125}
2126
2127static void
2128fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code,
2129	   u_int sense_key, u_int asc, u_int ascq)
2130{
2131	struct initiator_state *istate;
2132	struct scsi_sense_data *sense;
2133
2134	istate = &softc->istate[initiator_id];
2135	sense = &istate->sense_data;
2136	bzero(sense, sizeof(*sense));
2137	sense->error_code = error_code;
2138	sense->flags = sense_key;
2139	sense->add_sense_code = asc;
2140	sense->add_sense_code_qual = ascq;
2141
2142	sense->extra_len = offsetof(struct scsi_sense_data, fru)
2143			 - offsetof(struct scsi_sense_data, extra_len);
2144}
2145
2146static void
2147copy_sense(struct targ_softc *softc, struct initiator_state *istate,
2148	   u_int8_t *sense_buffer, size_t sense_len)
2149{
2150	struct scsi_sense_data *sense;
2151	size_t copylen;
2152
2153	sense = &istate->sense_data;
2154	copylen = sizeof(*sense);
2155	if (copylen > sense_len)
2156		copylen = sense_len;
2157	bcopy(sense_buffer, sense, copylen);
2158}
2159
2160static void
2161set_unit_attention_cond(struct cam_periph *periph,
2162			u_int initiator_id, ua_types ua)
2163{
2164	int start;
2165	int end;
2166	struct targ_softc *softc;
2167
2168	softc = (struct targ_softc *)periph->softc;
2169	if (initiator_id == CAM_TARGET_WILDCARD) {
2170		start = 0;
2171		end = MAX_INITIATORS - 1;
2172	} else
2173		start = end = initiator_id;
2174
2175	while (start <= end) {
2176		softc->istate[start].pending_ua = ua;
2177		start++;
2178	}
2179}
2180
2181static void
2182set_ca_condition(struct cam_periph *periph, u_int initiator_id, ca_types ca)
2183{
2184	struct targ_softc *softc;
2185
2186	softc = (struct targ_softc *)periph->softc;
2187	softc->istate[initiator_id].pending_ca = ca;
2188	abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD,
2189				   /*errno*/0, /*to_held_queue*/TRUE);
2190}
2191
2192static void
2193abort_pending_transactions(struct cam_periph *periph, u_int initiator_id,
2194			   u_int tag_id, int errno, int to_held_queue)
2195{
2196	struct ccb_abort cab;
2197	struct ccb_queue *atio_queues[3];
2198	struct targ_softc *softc;
2199	struct ccb_hdr *ccbh;
2200	u_int i;
2201
2202	softc = (struct targ_softc *)periph->softc;
2203
2204	atio_queues[0] = &softc->work_queue;
2205	atio_queues[1] = &softc->snd_ccb_queue;
2206	atio_queues[2] = &softc->rcv_ccb_queue;
2207
2208	/* First address the ATIOs awaiting resources */
2209	for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) {
2210		struct ccb_queue *atio_queue;
2211
2212		if (to_held_queue) {
2213			/*
2214			 * The device queue is frozen anyway, so there
2215			 * is nothing for us to do.
2216			 */
2217			continue;
2218		}
2219		atio_queue = atio_queues[i];
2220		ccbh = TAILQ_FIRST(atio_queue);
2221		while (ccbh != NULL) {
2222			struct ccb_accept_tio *atio;
2223			struct targ_cmd_desc *desc;
2224
2225			atio = (struct ccb_accept_tio *)ccbh;
2226			desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr;
2227			ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2228
2229			/* Only abort the CCBs that match */
2230			if ((atio->init_id != initiator_id
2231			  && initiator_id != CAM_TARGET_WILDCARD)
2232			 || (tag_id != TARG_TAG_WILDCARD
2233			  && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2234			   || atio->tag_id != tag_id)))
2235				continue;
2236
2237			TAILQ_REMOVE(atio_queue, &atio->ccb_h,
2238				     periph_links.tqe);
2239
2240			CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2241				  ("Aborting ATIO\n"));
2242			if (desc->bp != NULL) {
2243				desc->bp->bio_flags |= BIO_ERROR;
2244				if (softc->state != TARG_STATE_TEARDOWN)
2245					desc->bp->bio_error = errno;
2246				else
2247					desc->bp->bio_error = ENXIO;
2248				biodone(desc->bp);
2249				desc->bp = NULL;
2250			}
2251			if (softc->state == TARG_STATE_TEARDOWN) {
2252				freedescr(desc);
2253				free(atio, M_DEVBUF);
2254			} else {
2255				/* Return the ATIO back to the controller */
2256				atio->ccb_h.ccb_flags = TARG_CCB_NONE;
2257				xpt_action((union ccb *)atio);
2258			}
2259		}
2260	}
2261
2262	ccbh = TAILQ_FIRST(&softc->pending_queue);
2263	while (ccbh != NULL) {
2264		struct ccb_scsiio *csio;
2265
2266		csio = (struct ccb_scsiio *)ccbh;
2267		ccbh = TAILQ_NEXT(ccbh, periph_links.tqe);
2268
2269		/* Only abort the CCBs that match */
2270		if ((csio->init_id != initiator_id
2271		  && initiator_id != CAM_TARGET_WILDCARD)
2272		 || (tag_id != TARG_TAG_WILDCARD
2273		  && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
2274		   || csio->tag_id != tag_id)))
2275			continue;
2276
2277		CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH,
2278			  ("Aborting CTIO\n"));
2279
2280		TAILQ_REMOVE(&softc->pending_queue, &csio->ccb_h,
2281			     periph_links.tqe);
2282
2283		if (to_held_queue != 0)
2284			csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ;
2285		xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1);
2286		cab.abort_ccb = (union ccb *)csio;
2287		xpt_action((union ccb *)&cab);
2288		if (cab.ccb_h.status != CAM_REQ_CMP) {
2289			xpt_print_path(cab.ccb_h.path);
2290			printf("Unable to abort CCB.  Status %x\n",
2291			       cab.ccb_h.status);
2292		}
2293	}
2294}
2295