scsi_ctl.c revision 315889
1/*-
2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp.
3 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions, and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    substantially similar to the "NO WARRANTY" disclaimer below
14 *    ("Disclaimer") and any redistribution must be conditioned upon
15 *    including a substantially similar Disclaimer requirement for further
16 *    binary redistribution.
17 *
18 * NO WARRANTY
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGES.
30 *
31 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $
32 */
33/*
34 * Peripheral driver interface between CAM and CTL (CAM Target Layer).
35 *
36 * Author: Ken Merry <ken@FreeBSD.org>
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/scsi_ctl.c 315889 2017-03-24 07:02:03Z mav $");
41
42#include <sys/param.h>
43#include <sys/queue.h>
44#include <sys/systm.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/condvar.h>
49#include <sys/malloc.h>
50#include <sys/bus.h>
51#include <sys/endian.h>
52#include <sys/sbuf.h>
53#include <sys/sysctl.h>
54#include <sys/types.h>
55#include <sys/systm.h>
56#include <sys/taskqueue.h>
57#include <machine/bus.h>
58
59#include <cam/cam.h>
60#include <cam/cam_ccb.h>
61#include <cam/cam_periph.h>
62#include <cam/cam_queue.h>
63#include <cam/cam_xpt_periph.h>
64#include <cam/cam_debug.h>
65#include <cam/cam_sim.h>
66#include <cam/cam_xpt.h>
67
68#include <cam/scsi/scsi_all.h>
69#include <cam/scsi/scsi_message.h>
70
71#include <cam/ctl/ctl_io.h>
72#include <cam/ctl/ctl.h>
73#include <cam/ctl/ctl_frontend.h>
74#include <cam/ctl/ctl_util.h>
75#include <cam/ctl/ctl_error.h>
76
77struct ctlfe_softc {
78	struct ctl_port	port;
79	path_id_t	path_id;
80	target_id_t	target_id;
81	uint32_t	hba_misc;
82	u_int		maxio;
83	struct cam_sim *sim;
84	char		port_name[DEV_IDLEN];
85	struct mtx	lun_softc_mtx;
86	STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
87	STAILQ_ENTRY(ctlfe_softc) links;
88};
89
90STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list;
91struct mtx ctlfe_list_mtx;
92static char ctlfe_mtx_desc[] = "ctlfelist";
93
94typedef enum {
95	CTLFE_LUN_NONE		= 0x00,
96	CTLFE_LUN_WILDCARD	= 0x01
97} ctlfe_lun_flags;
98
99struct ctlfe_lun_softc {
100	struct ctlfe_softc *parent_softc;
101	struct cam_periph *periph;
102	ctlfe_lun_flags flags;
103	int	 ctios_sent;		/* Number of active CTIOs */
104	int	 refcount;		/* Number of active xpt_action() */
105	int	 atios_alloced;		/* Number of ATIOs not freed */
106	int	 inots_alloced;		/* Number of INOTs not freed */
107	struct task	refdrain_task;
108	STAILQ_HEAD(, ccb_hdr) work_queue;
109	LIST_HEAD(, ccb_hdr) atio_list;	/* List of ATIOs queued to SIM. */
110	LIST_HEAD(, ccb_hdr) inot_list;	/* List of INOTs queued to SIM. */
111	STAILQ_ENTRY(ctlfe_lun_softc) links;
112};
113
114typedef enum {
115	CTLFE_CMD_NONE		= 0x00,
116	CTLFE_CMD_PIECEWISE	= 0x01
117} ctlfe_cmd_flags;
118
119struct ctlfe_cmd_info {
120	int cur_transfer_index;
121	size_t cur_transfer_off;
122	ctlfe_cmd_flags flags;
123	/*
124	 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16
125	 * bytes on amd64.  So with 32 elements, this is 256 bytes on
126	 * i386 and 512 bytes on amd64.
127	 */
128#define CTLFE_MAX_SEGS	32
129	bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS];
130};
131
132/*
133 * When we register the adapter/bus, request that this many ctl_ios be
134 * allocated.  This should be the maximum supported by the adapter, but we
135 * currently don't have a way to get that back from the path inquiry.
136 * XXX KDM add that to the path inquiry.
137 */
138#define	CTLFE_REQ_CTL_IO	4096
139/*
140 * Number of Accept Target I/O CCBs to allocate and queue down to the
141 * adapter per LUN.
142 * XXX KDM should this be controlled by CTL?
143 */
144#define	CTLFE_ATIO_PER_LUN	1024
145/*
146 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to
147 * allocate and queue down to the adapter per LUN.
148 * XXX KDM should this be controlled by CTL?
149 */
150#define	CTLFE_IN_PER_LUN	1024
151
152/*
153 * Timeout (in seconds) on CTIO CCB doing DMA or sending status
154 */
155#define	CTLFE_TIMEOUT	5
156
157/*
158 * Turn this on to enable extra debugging prints.
159 */
160#if 0
161#define	CTLFE_DEBUG
162#endif
163
164MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
165
166#define	io_ptr		ppriv_ptr0
167
168/* This is only used in the CTIO */
169#define	ccb_atio	ppriv_ptr1
170
171#define PRIV_CCB(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0])
172#define PRIV_INFO(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1])
173
174static int		ctlfeinitialize(void);
175static int		ctlfeshutdown(void);
176static periph_init_t	ctlfeperiphinit;
177static void		ctlfeasync(void *callback_arg, uint32_t code,
178				   struct cam_path *path, void *arg);
179static periph_ctor_t	ctlferegister;
180static periph_oninv_t	ctlfeoninvalidate;
181static periph_dtor_t	ctlfecleanup;
182static periph_start_t	ctlfestart;
183static void		ctlfedone(struct cam_periph *periph,
184				  union ccb *done_ccb);
185
186static void 		ctlfe_onoffline(void *arg, int online);
187static void 		ctlfe_online(void *arg);
188static void 		ctlfe_offline(void *arg);
189static int 		ctlfe_lun_enable(void *arg, int lun_id);
190static int 		ctlfe_lun_disable(void *arg, int lun_id);
191static void		ctlfe_dump_sim(struct cam_sim *sim);
192static void		ctlfe_dump_queue(struct ctlfe_lun_softc *softc);
193static void 		ctlfe_datamove(union ctl_io *io);
194static void 		ctlfe_done(union ctl_io *io);
195static void 		ctlfe_dump(void);
196static void		ctlfe_free_ccb(struct cam_periph *periph,
197			    union ccb *ccb);
198static void		ctlfe_requeue_ccb(struct cam_periph *periph,
199			    union ccb *ccb, int unlock);
200
201static struct periph_driver ctlfe_driver =
202{
203	ctlfeperiphinit, "ctl",
204	TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0,
205	CAM_PERIPH_DRV_EARLY
206};
207
208static struct ctl_frontend ctlfe_frontend =
209{
210	.name = "camtgt",
211	.init = ctlfeinitialize,
212	.fe_dump = ctlfe_dump,
213	.shutdown = ctlfeshutdown,
214};
215CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend);
216
217static int
218ctlfeshutdown(void)
219{
220
221	/* CAM does not support periph driver unregister now. */
222	return (EBUSY);
223}
224
225static int
226ctlfeinitialize(void)
227{
228
229	STAILQ_INIT(&ctlfe_softc_list);
230	mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
231	periphdriver_register(&ctlfe_driver);
232	return (0);
233}
234
235static void
236ctlfeperiphinit(void)
237{
238	cam_status status;
239
240	status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
241				    AC_CONTRACT, ctlfeasync, NULL, NULL);
242	if (status != CAM_REQ_CMP) {
243		printf("ctl: Failed to attach async callback due to CAM "
244		       "status 0x%x!\n", status);
245	}
246}
247
248static void
249ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
250{
251	struct ctlfe_softc *softc;
252
253#ifdef CTLFEDEBUG
254	printf("%s: entered\n", __func__);
255#endif
256
257	mtx_lock(&ctlfe_list_mtx);
258	STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
259		if (softc->path_id == xpt_path_path_id(path))
260			break;
261	}
262	mtx_unlock(&ctlfe_list_mtx);
263
264	/*
265	 * When a new path gets registered, and it is capable of target
266	 * mode, go ahead and attach.  Later on, we may need to be more
267	 * selective, but for now this will be sufficient.
268 	 */
269	switch (code) {
270	case AC_PATH_REGISTERED: {
271		struct ctl_port *port;
272		struct ccb_pathinq *cpi;
273		int retval;
274
275		cpi = (struct ccb_pathinq *)arg;
276
277		/* Don't attach if it doesn't support target mode */
278		if ((cpi->target_sprt & PIT_PROCESSOR) == 0) {
279#ifdef CTLFEDEBUG
280			printf("%s: SIM %s%d doesn't support target mode\n",
281			       __func__, cpi->dev_name, cpi->unit_number);
282#endif
283			break;
284		}
285
286		if (softc != NULL) {
287#ifdef CTLFEDEBUG
288			printf("%s: CTL port for CAM path %u already exists\n",
289			       __func__, xpt_path_path_id(path));
290#endif
291			break;
292		}
293
294		/*
295		 * We're in an interrupt context here, so we have to
296		 * use M_NOWAIT.  Of course this means trouble if we
297		 * can't allocate memory.
298		 */
299		softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO);
300		if (softc == NULL) {
301			printf("%s: unable to malloc %zd bytes for softc\n",
302			       __func__, sizeof(*softc));
303			return;
304		}
305
306		softc->path_id = cpi->ccb_h.path_id;
307		softc->target_id = cpi->initiator_id;
308		softc->sim = xpt_path_sim(path);
309		softc->hba_misc = cpi->hba_misc;
310		if (cpi->maxio != 0)
311			softc->maxio = cpi->maxio;
312		else
313			softc->maxio = DFLTPHYS;
314		mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF);
315		STAILQ_INIT(&softc->lun_softc_list);
316
317		port = &softc->port;
318		port->frontend = &ctlfe_frontend;
319
320		/*
321		 * XXX KDM should we be more accurate here ?
322		 */
323		if (cpi->transport == XPORT_FC)
324			port->port_type = CTL_PORT_FC;
325		else if (cpi->transport == XPORT_SAS)
326			port->port_type = CTL_PORT_SAS;
327		else
328			port->port_type = CTL_PORT_SCSI;
329
330		/* XXX KDM what should the real number be here? */
331		port->num_requested_ctl_io = CTLFE_REQ_CTL_IO;
332		snprintf(softc->port_name, sizeof(softc->port_name),
333			 "%s%d", cpi->dev_name, cpi->unit_number);
334		/*
335		 * XXX KDM it would be nice to allocate storage in the
336		 * frontend structure itself.
337	 	 */
338		port->port_name = softc->port_name;
339		port->physical_port = cpi->bus_id;
340		port->virtual_port = 0;
341		port->port_online = ctlfe_online;
342		port->port_offline = ctlfe_offline;
343		port->onoff_arg = softc;
344		port->lun_enable = ctlfe_lun_enable;
345		port->lun_disable = ctlfe_lun_disable;
346		port->targ_lun_arg = softc;
347		port->fe_datamove = ctlfe_datamove;
348		port->fe_done = ctlfe_done;
349		/*
350		 * XXX KDM the path inquiry doesn't give us the maximum
351		 * number of targets supported.
352		 */
353		port->max_targets = cpi->max_target;
354		port->max_target_id = cpi->max_target;
355		port->targ_port = -1;
356
357		retval = ctl_port_register(port);
358		if (retval != 0) {
359			printf("%s: ctl_port_register() failed with "
360			       "error %d!\n", __func__, retval);
361			mtx_destroy(&softc->lun_softc_mtx);
362			free(softc, M_CTLFE);
363			break;
364		} else {
365			mtx_lock(&ctlfe_list_mtx);
366			STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links);
367			mtx_unlock(&ctlfe_list_mtx);
368		}
369
370		break;
371	}
372	case AC_PATH_DEREGISTERED: {
373
374		if (softc != NULL) {
375			/*
376			 * XXX KDM are we certain at this point that there
377			 * are no outstanding commands for this frontend?
378			 */
379			mtx_lock(&ctlfe_list_mtx);
380			STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc,
381			    links);
382			mtx_unlock(&ctlfe_list_mtx);
383			ctl_port_deregister(&softc->port);
384			mtx_destroy(&softc->lun_softc_mtx);
385			free(softc, M_CTLFE);
386		}
387		break;
388	}
389	case AC_CONTRACT: {
390		struct ac_contract *ac;
391
392		ac = (struct ac_contract *)arg;
393
394		switch (ac->contract_number) {
395		case AC_CONTRACT_DEV_CHG: {
396			struct ac_device_changed *dev_chg;
397			int retval;
398
399			dev_chg = (struct ac_device_changed *)ac->contract_data;
400
401			printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n",
402			       __func__, dev_chg->wwpn, dev_chg->port,
403			       xpt_path_path_id(path), dev_chg->target,
404			       (dev_chg->arrived == 0) ?  "left" : "arrived");
405
406			if (softc == NULL) {
407				printf("%s: CTL port for CAM path %u not "
408				       "found!\n", __func__,
409				       xpt_path_path_id(path));
410				break;
411			}
412			if (dev_chg->arrived != 0) {
413				retval = ctl_add_initiator(&softc->port,
414				    dev_chg->target, dev_chg->wwpn, NULL);
415			} else {
416				retval = ctl_remove_initiator(&softc->port,
417				    dev_chg->target);
418			}
419
420			if (retval < 0) {
421				printf("%s: could not %s port %d iid %u "
422				       "WWPN %#jx!\n", __func__,
423				       (dev_chg->arrived != 0) ? "add" :
424				       "remove", softc->port.targ_port,
425				       dev_chg->target,
426				       (uintmax_t)dev_chg->wwpn);
427			}
428			break;
429		}
430		default:
431			printf("%s: unsupported contract number %ju\n",
432			       __func__, (uintmax_t)ac->contract_number);
433			break;
434		}
435		break;
436	}
437	default:
438		break;
439	}
440}
441
442static cam_status
443ctlferegister(struct cam_periph *periph, void *arg)
444{
445	struct ctlfe_softc *bus_softc;
446	struct ctlfe_lun_softc *softc;
447	union ccb ccb;
448	cam_status status;
449	int i;
450
451	softc = (struct ctlfe_lun_softc *)arg;
452	bus_softc = softc->parent_softc;
453
454	STAILQ_INIT(&softc->work_queue);
455	LIST_INIT(&softc->atio_list);
456	LIST_INIT(&softc->inot_list);
457	softc->periph = periph;
458	periph->softc = softc;
459
460	xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
461	ccb.ccb_h.func_code = XPT_EN_LUN;
462	ccb.cel.grp6_len = 0;
463	ccb.cel.grp7_len = 0;
464	ccb.cel.enable = 1;
465	xpt_action(&ccb);
466	status = (ccb.ccb_h.status & CAM_STATUS_MASK);
467	if (status != CAM_REQ_CMP) {
468		xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n",
469			  __func__, ccb.ccb_h.status);
470		return (status);
471	}
472
473	status = CAM_REQ_CMP;
474
475	for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
476		union ccb *new_ccb;
477		union ctl_io *new_io;
478		struct ctlfe_cmd_info *cmd_info;
479
480		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
481					      M_ZERO|M_NOWAIT);
482		if (new_ccb == NULL) {
483			status = CAM_RESRC_UNAVAIL;
484			break;
485		}
486		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
487		if (new_io == NULL) {
488			free(new_ccb, M_CTLFE);
489			status = CAM_RESRC_UNAVAIL;
490			break;
491		}
492		cmd_info = malloc(sizeof(*cmd_info), M_CTLFE,
493		    M_ZERO | M_NOWAIT);
494		if (cmd_info == NULL) {
495			ctl_free_io(new_io);
496			free(new_ccb, M_CTLFE);
497			status = CAM_RESRC_UNAVAIL;
498			break;
499		}
500		PRIV_INFO(new_io) = cmd_info;
501		softc->atios_alloced++;
502		new_ccb->ccb_h.io_ptr = new_io;
503		LIST_INSERT_HEAD(&softc->atio_list, &new_ccb->ccb_h, periph_links.le);
504
505		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
506		new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
507		new_ccb->ccb_h.cbfcnp = ctlfedone;
508		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
509		xpt_action(new_ccb);
510		status = new_ccb->ccb_h.status;
511		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
512			free(cmd_info, M_CTLFE);
513			ctl_free_io(new_io);
514			free(new_ccb, M_CTLFE);
515			break;
516		}
517	}
518
519	status = cam_periph_acquire(periph);
520	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
521		xpt_print(periph->path, "%s: could not acquire reference "
522			  "count, status = %#x\n", __func__, status);
523		return (status);
524	}
525
526	if (i == 0) {
527		xpt_print(periph->path, "%s: could not allocate ATIO CCBs, "
528			  "status 0x%x\n", __func__, status);
529		return (CAM_REQ_CMP_ERR);
530	}
531
532	for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
533		union ccb *new_ccb;
534		union ctl_io *new_io;
535
536		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
537					      M_ZERO|M_NOWAIT);
538		if (new_ccb == NULL) {
539			status = CAM_RESRC_UNAVAIL;
540			break;
541		}
542		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
543		if (new_io == NULL) {
544			free(new_ccb, M_CTLFE);
545			status = CAM_RESRC_UNAVAIL;
546			break;
547		}
548		softc->inots_alloced++;
549		new_ccb->ccb_h.io_ptr = new_io;
550		LIST_INSERT_HEAD(&softc->inot_list, &new_ccb->ccb_h, periph_links.le);
551
552		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
553		new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
554		new_ccb->ccb_h.cbfcnp = ctlfedone;
555		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
556		xpt_action(new_ccb);
557		status = new_ccb->ccb_h.status;
558		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
559			/*
560			 * Note that we don't free the CCB here.  If the
561			 * status is not CAM_REQ_INPROG, then we're
562			 * probably talking to a SIM that says it is
563			 * target-capable but doesn't support the
564			 * XPT_IMMEDIATE_NOTIFY CCB.  i.e. it supports the
565			 * older API.  In that case, it'll call xpt_done()
566			 * on the CCB, and we need to free it in our done
567			 * routine as a result.
568			 */
569			break;
570		}
571	}
572	if ((i == 0)
573	 || (status != CAM_REQ_INPROG)) {
574		xpt_print(periph->path, "%s: could not allocate immediate "
575			  "notify CCBs, status 0x%x\n", __func__, status);
576		return (CAM_REQ_CMP_ERR);
577	}
578	mtx_lock(&bus_softc->lun_softc_mtx);
579	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
580	mtx_unlock(&bus_softc->lun_softc_mtx);
581	return (CAM_REQ_CMP);
582}
583
584static void
585ctlfeoninvalidate(struct cam_periph *periph)
586{
587	struct ctlfe_lun_softc *softc = (struct ctlfe_lun_softc *)periph->softc;
588	struct ctlfe_softc *bus_softc;
589	union ccb ccb;
590	struct ccb_hdr *hdr;
591	cam_status status;
592
593	/* Abort all ATIOs and INOTs queued to SIM. */
594	xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
595	ccb.ccb_h.func_code = XPT_ABORT;
596	LIST_FOREACH(hdr, &softc->atio_list, periph_links.le) {
597		ccb.cab.abort_ccb = (union ccb *)hdr;
598		xpt_action(&ccb);
599	}
600	LIST_FOREACH(hdr, &softc->inot_list, periph_links.le) {
601		ccb.cab.abort_ccb = (union ccb *)hdr;
602		xpt_action(&ccb);
603	}
604
605	/* Disable the LUN in SIM. */
606	ccb.ccb_h.func_code = XPT_EN_LUN;
607	ccb.cel.grp6_len = 0;
608	ccb.cel.grp7_len = 0;
609	ccb.cel.enable = 0;
610	xpt_action(&ccb);
611	status = (ccb.ccb_h.status & CAM_STATUS_MASK);
612	if (status != CAM_REQ_CMP) {
613		xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n",
614			  __func__, ccb.ccb_h.status);
615		/*
616		 * XXX KDM what do we do now?
617		 */
618	}
619
620	bus_softc = softc->parent_softc;
621	mtx_lock(&bus_softc->lun_softc_mtx);
622	STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
623	mtx_unlock(&bus_softc->lun_softc_mtx);
624}
625
626static void
627ctlfecleanup(struct cam_periph *periph)
628{
629	struct ctlfe_lun_softc *softc;
630
631	softc = (struct ctlfe_lun_softc *)periph->softc;
632
633	KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0",
634	    __func__, softc->ctios_sent));
635	KASSERT(softc->refcount == 0, ("%s: refcount %d != 0",
636	    __func__, softc->refcount));
637	KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0",
638	    __func__, softc->atios_alloced));
639	KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0",
640	    __func__, softc->inots_alloced));
641
642	free(softc, M_CTLFE);
643}
644
645static void
646ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
647    ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len,
648    u_int16_t *sglist_cnt)
649{
650	struct ctlfe_softc *bus_softc;
651	struct ctlfe_cmd_info *cmd_info;
652	struct ctl_sg_entry *ctl_sglist;
653	bus_dma_segment_t *cam_sglist;
654	size_t off;
655	int i, idx;
656
657	cmd_info = PRIV_INFO(io);
658	bus_softc = softc->parent_softc;
659
660	/*
661	 * Set the direction, relative to the initiator.
662	 */
663	*flags &= ~CAM_DIR_MASK;
664	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
665		*flags |= CAM_DIR_IN;
666	else
667		*flags |= CAM_DIR_OUT;
668
669	*flags &= ~CAM_DATA_MASK;
670	idx = cmd_info->cur_transfer_index;
671	off = cmd_info->cur_transfer_off;
672	cmd_info->flags &= ~CTLFE_CMD_PIECEWISE;
673	if (io->scsiio.kern_sg_entries == 0) {	/* No S/G list. */
674
675		/* One time shift for SRR offset. */
676		off += io->scsiio.ext_data_filled;
677		io->scsiio.ext_data_filled = 0;
678
679		*data_ptr = io->scsiio.kern_data_ptr + off;
680		if (io->scsiio.kern_data_len - off <= bus_softc->maxio) {
681			*dxfer_len = io->scsiio.kern_data_len - off;
682		} else {
683			*dxfer_len = bus_softc->maxio;
684			cmd_info->cur_transfer_off += bus_softc->maxio;
685			cmd_info->flags |= CTLFE_CMD_PIECEWISE;
686		}
687		*sglist_cnt = 0;
688
689		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
690			*flags |= CAM_DATA_PADDR;
691		else
692			*flags |= CAM_DATA_VADDR;
693	} else {	/* S/G list with physical or virtual pointers. */
694		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
695
696		/* One time shift for SRR offset. */
697		while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) {
698			io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off;
699			idx++;
700			off = 0;
701		}
702		off += io->scsiio.ext_data_filled;
703		io->scsiio.ext_data_filled = 0;
704
705		cam_sglist = cmd_info->cam_sglist;
706		*dxfer_len = 0;
707		for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) {
708			cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off;
709			if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) {
710				cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off;
711				*dxfer_len += cam_sglist[i].ds_len;
712			} else {
713				cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len;
714				cmd_info->cur_transfer_index = idx + i;
715				cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off;
716				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
717				*dxfer_len += cam_sglist[i].ds_len;
718				if (ctl_sglist[i].len != 0)
719					i++;
720				break;
721			}
722			if (i == (CTLFE_MAX_SEGS - 1) &&
723			    idx + i < (io->scsiio.kern_sg_entries - 1)) {
724				cmd_info->cur_transfer_index = idx + i + 1;
725				cmd_info->cur_transfer_off = 0;
726				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
727				i++;
728				break;
729			}
730			off = 0;
731		}
732		*sglist_cnt = i;
733		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
734			*flags |= CAM_DATA_SG_PADDR;
735		else
736			*flags |= CAM_DATA_SG;
737		*data_ptr = (uint8_t *)cam_sglist;
738	}
739}
740
741static void
742ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
743{
744	struct ctlfe_lun_softc *softc;
745	struct ctlfe_cmd_info *cmd_info;
746	struct ccb_hdr *ccb_h;
747	struct ccb_accept_tio *atio;
748	struct ccb_scsiio *csio;
749	uint8_t *data_ptr;
750	uint32_t dxfer_len;
751	ccb_flags flags;
752	union ctl_io *io;
753	uint8_t scsi_status;
754
755	softc = (struct ctlfe_lun_softc *)periph->softc;
756
757next:
758	/* Take the ATIO off the work queue */
759	ccb_h = STAILQ_FIRST(&softc->work_queue);
760	if (ccb_h == NULL) {
761		xpt_release_ccb(start_ccb);
762		return;
763	}
764	STAILQ_REMOVE_HEAD(&softc->work_queue, periph_links.stqe);
765	atio = (struct ccb_accept_tio *)ccb_h;
766	io = (union ctl_io *)ccb_h->io_ptr;
767	csio = &start_ccb->csio;
768
769	flags = atio->ccb_h.flags &
770		(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
771	cmd_info = PRIV_INFO(io);
772	cmd_info->cur_transfer_index = 0;
773	cmd_info->cur_transfer_off = 0;
774	cmd_info->flags = 0;
775
776	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
777		/*
778		 * Datamove call, we need to setup the S/G list.
779		 */
780		ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len,
781		    &csio->sglist_cnt);
782	} else {
783		/*
784		 * We're done, send status back.
785		 */
786		if ((io->io_hdr.flags & CTL_FLAG_ABORT) &&
787		    (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) {
788			io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
789
790			/* Tell the SIM that we've aborted this ATIO */
791#ifdef CTLFEDEBUG
792			printf("%s: tag %04x abort\n", __func__, atio->tag_id);
793#endif
794			KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO,
795			    ("func_code %#x is not ATIO", atio->ccb_h.func_code));
796			start_ccb->ccb_h.func_code = XPT_ABORT;
797			start_ccb->cab.abort_ccb = (union ccb *)atio;
798			xpt_action(start_ccb);
799
800			ctlfe_requeue_ccb(periph, (union ccb *)atio,
801			    /* unlock */0);
802
803			/* XPT_ABORT is not queued, so we can take next I/O. */
804			goto next;
805		}
806		data_ptr = NULL;
807		dxfer_len = 0;
808		csio->sglist_cnt = 0;
809	}
810	scsi_status = 0;
811	if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) &&
812	    (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 &&
813	    ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 ||
814	     io->io_hdr.status == CTL_SUCCESS)) {
815		flags |= CAM_SEND_STATUS;
816		scsi_status = io->scsiio.scsi_status;
817		csio->sense_len = io->scsiio.sense_len;
818#ifdef CTLFEDEBUG
819		printf("%s: tag %04x status %x\n", __func__,
820		       atio->tag_id, io->io_hdr.status);
821#endif
822		if (csio->sense_len != 0) {
823			csio->sense_data = io->scsiio.sense_data;
824			flags |= CAM_SEND_SENSE;
825		}
826	}
827
828#ifdef CTLFEDEBUG
829	printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
830	       (flags & CAM_SEND_STATUS) ? "done" : "datamove",
831	       atio->tag_id, flags, data_ptr, dxfer_len);
832#endif
833
834	/*
835	 * Valid combinations:
836	 *  - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0,
837	 *    sglist_cnt = 0
838	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0,
839	 *    sglist_cnt = 0
840	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0,
841	 *    sglist_cnt != 0
842	 */
843#ifdef CTLFEDEBUG
844	if (((flags & CAM_SEND_STATUS)
845	  && (((flags & CAM_DATA_SG) != 0)
846	   || (dxfer_len != 0)
847	   || (csio->sglist_cnt != 0)))
848	 || (((flags & CAM_SEND_STATUS) == 0)
849	  && (dxfer_len == 0))
850	 || ((flags & CAM_DATA_SG)
851	  && (csio->sglist_cnt == 0))
852	 || (((flags & CAM_DATA_SG) == 0)
853	  && (csio->sglist_cnt != 0))) {
854		printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
855		       "%d sg %u\n", __func__, atio->tag_id,
856		       atio_cdb_ptr(atio)[0], flags, dxfer_len,
857		       csio->sglist_cnt);
858		printf("%s: tag %04x io status %#x\n", __func__,
859		       atio->tag_id, io->io_hdr.status);
860	}
861#endif
862	cam_fill_ctio(csio,
863		      /*retries*/ 2,
864		      ctlfedone,
865		      flags,
866		      (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0,
867		      atio->tag_id,
868		      atio->init_id,
869		      scsi_status,
870		      /*data_ptr*/ data_ptr,
871		      /*dxfer_len*/ dxfer_len,
872		      /*timeout*/ CTLFE_TIMEOUT * 1000);
873	start_ccb->ccb_h.flags |= CAM_UNLOCKED;
874	start_ccb->ccb_h.ccb_atio = atio;
875	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
876		io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
877	io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED);
878
879	softc->ctios_sent++;
880	softc->refcount++;
881	cam_periph_unlock(periph);
882	xpt_action(start_ccb);
883	cam_periph_lock(periph);
884	softc->refcount--;
885
886	/*
887	 * If we still have work to do, ask for another CCB.
888	 */
889	if (!STAILQ_EMPTY(&softc->work_queue))
890		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
891}
892
893static void
894ctlfe_drain(void *context, int pending)
895{
896	struct cam_periph *periph = context;
897	struct ctlfe_lun_softc *softc = periph->softc;
898
899	cam_periph_lock(periph);
900	while (softc->refcount != 0) {
901		cam_periph_sleep(periph, &softc->refcount, PRIBIO,
902		    "ctlfe_drain", 1);
903	}
904	cam_periph_unlock(periph);
905	cam_periph_release(periph);
906}
907
908static void
909ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
910{
911	struct ctlfe_lun_softc *softc;
912	union ctl_io *io;
913	struct ctlfe_cmd_info *cmd_info;
914
915	softc = (struct ctlfe_lun_softc *)periph->softc;
916	io = ccb->ccb_h.io_ptr;
917
918	switch (ccb->ccb_h.func_code) {
919	case XPT_ACCEPT_TARGET_IO:
920		softc->atios_alloced--;
921		cmd_info = PRIV_INFO(io);
922		free(cmd_info, M_CTLFE);
923		break;
924	case XPT_IMMEDIATE_NOTIFY:
925	case XPT_NOTIFY_ACKNOWLEDGE:
926		softc->inots_alloced--;
927		break;
928	default:
929		break;
930	}
931
932	ctl_free_io(io);
933	free(ccb, M_CTLFE);
934
935	KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0",
936	    __func__, softc->atios_alloced));
937	KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0",
938	    __func__, softc->inots_alloced));
939
940	/*
941	 * If we have received all of our CCBs, we can release our
942	 * reference on the peripheral driver.  It will probably go away
943	 * now.
944	 */
945	if (softc->atios_alloced == 0 && softc->inots_alloced == 0) {
946		if (softc->refcount == 0) {
947			cam_periph_release_locked(periph);
948		} else {
949			TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph);
950			taskqueue_enqueue(taskqueue_thread,
951			    &softc->refdrain_task);
952		}
953	}
954}
955
956/*
957 * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated.
958 */
959static void
960ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock)
961{
962	struct ctlfe_lun_softc *softc;
963	struct mtx *mtx;
964
965	if (periph->flags & CAM_PERIPH_INVALID) {
966		mtx = cam_periph_mtx(periph);
967		ctlfe_free_ccb(periph, ccb);
968		if (unlock)
969			mtx_unlock(mtx);
970		return;
971	}
972	softc = (struct ctlfe_lun_softc *)periph->softc;
973	if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
974		LIST_INSERT_HEAD(&softc->atio_list, &ccb->ccb_h, periph_links.le);
975	else
976		LIST_INSERT_HEAD(&softc->inot_list, &ccb->ccb_h, periph_links.le);
977	if (unlock)
978		cam_periph_unlock(periph);
979
980	/*
981	 * For a wildcard attachment, commands can come in with a specific
982	 * target/lun.  Reset the target and LUN fields back to the wildcard
983	 * values before we send them back down to the SIM.
984	 */
985	if (softc->flags & CTLFE_LUN_WILDCARD) {
986		ccb->ccb_h.target_id = CAM_TARGET_WILDCARD;
987		ccb->ccb_h.target_lun = CAM_LUN_WILDCARD;
988	}
989
990	xpt_action(ccb);
991}
992
993static int
994ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
995{
996	uint64_t lba;
997	uint32_t num_blocks, nbc;
998	uint8_t *cmdbyt = atio_cdb_ptr(atio);
999
1000	nbc = offset >> 9;	/* ASSUMING 512 BYTE BLOCKS */
1001
1002	switch (cmdbyt[0]) {
1003	case READ_6:
1004	case WRITE_6:
1005	{
1006		struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt;
1007		lba = scsi_3btoul(cdb->addr);
1008		lba &= 0x1fffff;
1009		num_blocks = cdb->length;
1010		if (num_blocks == 0)
1011			num_blocks = 256;
1012		lba += nbc;
1013		num_blocks -= nbc;
1014		scsi_ulto3b(lba, cdb->addr);
1015		cdb->length = num_blocks;
1016		break;
1017	}
1018	case READ_10:
1019	case WRITE_10:
1020	{
1021		struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt;
1022		lba = scsi_4btoul(cdb->addr);
1023		num_blocks = scsi_2btoul(cdb->length);
1024		lba += nbc;
1025		num_blocks -= nbc;
1026		scsi_ulto4b(lba, cdb->addr);
1027		scsi_ulto2b(num_blocks, cdb->length);
1028		break;
1029	}
1030	case READ_12:
1031	case WRITE_12:
1032	{
1033		struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt;
1034		lba = scsi_4btoul(cdb->addr);
1035		num_blocks = scsi_4btoul(cdb->length);
1036		lba += nbc;
1037		num_blocks -= nbc;
1038		scsi_ulto4b(lba, cdb->addr);
1039		scsi_ulto4b(num_blocks, cdb->length);
1040		break;
1041	}
1042	case READ_16:
1043	case WRITE_16:
1044	{
1045		struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt;
1046		lba = scsi_8btou64(cdb->addr);
1047		num_blocks = scsi_4btoul(cdb->length);
1048		lba += nbc;
1049		num_blocks -= nbc;
1050		scsi_u64to8b(lba, cdb->addr);
1051		scsi_ulto4b(num_blocks, cdb->length);
1052		break;
1053	}
1054	default:
1055		return -1;
1056	}
1057	return (0);
1058}
1059
1060static void
1061ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
1062{
1063	struct ctlfe_lun_softc *softc;
1064	struct ctlfe_softc *bus_softc;
1065	struct ctlfe_cmd_info *cmd_info;
1066	struct ccb_accept_tio *atio = NULL;
1067	union ctl_io *io = NULL;
1068	struct mtx *mtx;
1069	cam_status status;
1070
1071	KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0,
1072	    ("CCB in ctlfedone() without CAM_UNLOCKED flag"));
1073#ifdef CTLFE_DEBUG
1074	printf("%s: entered, func_code = %#x\n", __func__,
1075	       done_ccb->ccb_h.func_code);
1076#endif
1077
1078	/*
1079	 * At this point CTL has no known use case for device queue freezes.
1080	 * In case some SIM think different -- drop its freeze right here.
1081	 */
1082	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1083		cam_release_devq(periph->path,
1084				 /*relsim_flags*/0,
1085				 /*reduction*/0,
1086				 /*timeout*/0,
1087				 /*getcount_only*/0);
1088		done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1089	}
1090
1091	softc = (struct ctlfe_lun_softc *)periph->softc;
1092	bus_softc = softc->parent_softc;
1093	mtx = cam_periph_mtx(periph);
1094	mtx_lock(mtx);
1095
1096	switch (done_ccb->ccb_h.func_code) {
1097	case XPT_ACCEPT_TARGET_IO: {
1098
1099		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1100		atio = &done_ccb->atio;
1101		status = atio->ccb_h.status & CAM_STATUS_MASK;
1102		if (status != CAM_CDB_RECVD) {
1103			ctlfe_free_ccb(periph, done_ccb);
1104			goto out;
1105		}
1106
1107 resubmit:
1108		/*
1109		 * Allocate a ctl_io, pass it to CTL, and wait for the
1110		 * datamove or done.
1111		 */
1112		mtx_unlock(mtx);
1113		io = done_ccb->ccb_h.io_ptr;
1114		cmd_info = PRIV_INFO(io);
1115		ctl_zero_io(io);
1116
1117		/* Save pointers on both sides */
1118		PRIV_CCB(io) = done_ccb;
1119		PRIV_INFO(io) = cmd_info;
1120		done_ccb->ccb_h.io_ptr = io;
1121
1122		/*
1123		 * Only SCSI I/O comes down this path, resets, etc. come
1124		 * down the immediate notify path below.
1125		 */
1126		io->io_hdr.io_type = CTL_IO_SCSI;
1127		io->io_hdr.nexus.initid = atio->init_id;
1128		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
1129		if (bus_softc->hba_misc & PIM_EXTLUNS) {
1130			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
1131			    CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun));
1132		} else {
1133			io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
1134		}
1135		io->scsiio.tag_num = atio->tag_id;
1136		switch (atio->tag_action) {
1137		case CAM_TAG_ACTION_NONE:
1138			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1139			break;
1140		case MSG_SIMPLE_TASK:
1141			io->scsiio.tag_type = CTL_TAG_SIMPLE;
1142			break;
1143		case MSG_HEAD_OF_QUEUE_TASK:
1144        		io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
1145			break;
1146		case MSG_ORDERED_TASK:
1147        		io->scsiio.tag_type = CTL_TAG_ORDERED;
1148			break;
1149		case MSG_ACA_TASK:
1150			io->scsiio.tag_type = CTL_TAG_ACA;
1151			break;
1152		default:
1153			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1154			printf("%s: unhandled tag type %#x!!\n", __func__,
1155			       atio->tag_action);
1156			break;
1157		}
1158		if (atio->cdb_len > sizeof(io->scsiio.cdb)) {
1159			printf("%s: WARNING: CDB len %d > ctl_io space %zd\n",
1160			       __func__, atio->cdb_len, sizeof(io->scsiio.cdb));
1161		}
1162		io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
1163		bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len);
1164
1165#ifdef CTLFEDEBUG
1166		printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__,
1167		        io->io_hdr.nexus.initid,
1168		        io->io_hdr.nexus.targ_port,
1169		        io->io_hdr.nexus.targ_lun,
1170			io->scsiio.tag_num, io->scsiio.cdb[0]);
1171#endif
1172
1173		ctl_queue(io);
1174		return;
1175	}
1176	case XPT_CONT_TARGET_IO: {
1177		int srr = 0;
1178		uint32_t srr_off = 0;
1179
1180		atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio;
1181		io = (union ctl_io *)atio->ccb_h.io_ptr;
1182
1183		softc->ctios_sent--;
1184#ifdef CTLFEDEBUG
1185		printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n",
1186		       __func__, atio->tag_id, done_ccb->ccb_h.flags);
1187#endif
1188		/*
1189		 * Handle SRR case were the data pointer is pushed back hack
1190		 */
1191		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV
1192		    && done_ccb->csio.msg_ptr != NULL
1193		    && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED
1194		    && done_ccb->csio.msg_ptr[1] == 5
1195       		    && done_ccb->csio.msg_ptr[2] == 0) {
1196			srr = 1;
1197			srr_off =
1198			    (done_ccb->csio.msg_ptr[3] << 24)
1199			    | (done_ccb->csio.msg_ptr[4] << 16)
1200			    | (done_ccb->csio.msg_ptr[5] << 8)
1201			    | (done_ccb->csio.msg_ptr[6]);
1202		}
1203
1204		/*
1205		 * If we have an SRR and we're still sending data, we
1206		 * should be able to adjust offsets and cycle again.
1207		 * It is possible only if offset is from this datamove.
1208		 */
1209		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) &&
1210		    srr_off >= io->scsiio.kern_rel_offset &&
1211		    srr_off < io->scsiio.kern_rel_offset +
1212		     io->scsiio.kern_data_len) {
1213			io->scsiio.kern_data_resid =
1214			    io->scsiio.kern_rel_offset +
1215			    io->scsiio.kern_data_len - srr_off;
1216			io->scsiio.ext_data_filled = srr_off;
1217			io->scsiio.io_hdr.status = CTL_STATUS_NONE;
1218			io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
1219			xpt_release_ccb(done_ccb);
1220			STAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1221					  periph_links.stqe);
1222			xpt_schedule(periph, CAM_PRIORITY_NORMAL);
1223			break;
1224		}
1225
1226		/*
1227		 * If status was being sent, the back end data is now history.
1228		 * Hack it up and resubmit a new command with the CDB adjusted.
1229		 * If the SIM does the right thing, all of the resid math
1230		 * should work.
1231		 */
1232		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
1233			xpt_release_ccb(done_ccb);
1234			if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
1235				done_ccb = (union ccb *)atio;
1236				goto resubmit;
1237			}
1238			/*
1239			 * Fall through to doom....
1240			 */
1241		}
1242
1243		if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1244		    (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1245			io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
1246
1247		/*
1248		 * If we were sending status back to the initiator, free up
1249		 * resources.  If we were doing a datamove, call the
1250		 * datamove done routine.
1251		 */
1252		if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
1253			/*
1254			 * If we asked to send sense data but it wasn't sent,
1255			 * queue the I/O back to CTL for later REQUEST SENSE.
1256			 */
1257			if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 &&
1258			    (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
1259			    (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 &&
1260			    (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) {
1261				PRIV_INFO(io) = PRIV_INFO(
1262				    (union ctl_io *)atio->ccb_h.io_ptr);
1263				ctl_queue_sense(atio->ccb_h.io_ptr);
1264				atio->ccb_h.io_ptr = io;
1265			}
1266
1267			/* Abort ATIO if CTIO sending status has failed. */
1268			if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) !=
1269			    CAM_REQ_CMP) {
1270				done_ccb->ccb_h.func_code = XPT_ABORT;
1271				done_ccb->cab.abort_ccb = (union ccb *)atio;
1272				xpt_action(done_ccb);
1273			}
1274
1275			xpt_release_ccb(done_ccb);
1276			ctlfe_requeue_ccb(periph, (union ccb *)atio,
1277			    /* unlock */1);
1278			return;
1279		} else {
1280			struct ctlfe_cmd_info *cmd_info;
1281			struct ccb_scsiio *csio;
1282
1283			csio = &done_ccb->csio;
1284			cmd_info = PRIV_INFO(io);
1285
1286			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1287
1288			/*
1289			 * Translate CAM status to CTL status.  Success
1290			 * does not change the overall, ctl_io status.  In
1291			 * that case we just set port_status to 0.  If we
1292			 * have a failure, though, set a data phase error
1293			 * for the overall ctl_io.
1294			 */
1295			switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
1296			case CAM_REQ_CMP:
1297				io->scsiio.kern_data_resid -=
1298				    csio->dxfer_len - csio->resid;
1299				io->io_hdr.port_status = 0;
1300				break;
1301			default:
1302				/*
1303				 * XXX KDM we probably need to figure out a
1304				 * standard set of errors that the SIM
1305				 * drivers should return in the event of a
1306				 * data transfer failure.  A data phase
1307				 * error will at least point the user to a
1308				 * data transfer error of some sort.
1309				 * Hopefully the SIM printed out some
1310				 * additional information to give the user
1311				 * a clue what happened.
1312				 */
1313				io->io_hdr.port_status = 0xbad1;
1314				ctl_set_data_phase_error(&io->scsiio);
1315				/*
1316				 * XXX KDM figure out residual.
1317				 */
1318				break;
1319			}
1320			/*
1321			 * If we had to break this S/G list into multiple
1322			 * pieces, figure out where we are in the list, and
1323			 * continue sending pieces if necessary.
1324			 */
1325			if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) &&
1326			    io->io_hdr.port_status == 0 && csio->resid == 0) {
1327				ccb_flags flags;
1328				uint8_t *data_ptr;
1329				uint32_t dxfer_len;
1330
1331				flags = atio->ccb_h.flags &
1332					(CAM_DIS_DISCONNECT|
1333					 CAM_TAG_ACTION_VALID);
1334
1335				ctlfedata(softc, io, &flags, &data_ptr,
1336				    &dxfer_len, &csio->sglist_cnt);
1337
1338				if (((flags & CAM_SEND_STATUS) == 0)
1339				 && (dxfer_len == 0)) {
1340					printf("%s: tag %04x no status or "
1341					       "len cdb = %02x\n", __func__,
1342					       atio->tag_id,
1343					       atio_cdb_ptr(atio)[0]);
1344					printf("%s: tag %04x io status %#x\n",
1345					       __func__, atio->tag_id,
1346					       io->io_hdr.status);
1347				}
1348
1349				cam_fill_ctio(csio,
1350					      /*retries*/ 2,
1351					      ctlfedone,
1352					      flags,
1353					      (flags & CAM_TAG_ACTION_VALID) ?
1354					       MSG_SIMPLE_Q_TAG : 0,
1355					      atio->tag_id,
1356					      atio->init_id,
1357					      0,
1358					      /*data_ptr*/ data_ptr,
1359					      /*dxfer_len*/ dxfer_len,
1360					      CTLFE_TIMEOUT * 1000);
1361
1362				csio->ccb_h.flags |= CAM_UNLOCKED;
1363				csio->resid = 0;
1364				csio->ccb_h.ccb_atio = atio;
1365				io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
1366				softc->ctios_sent++;
1367				mtx_unlock(mtx);
1368				xpt_action((union ccb *)csio);
1369			} else {
1370				/*
1371				 * Release the CTIO.  The ATIO will be sent back
1372				 * down to the SIM once we send status.
1373				 */
1374				xpt_release_ccb(done_ccb);
1375				mtx_unlock(mtx);
1376
1377				/* Call the backend move done callback */
1378				io->scsiio.be_move_done(io);
1379			}
1380			return;
1381		}
1382		break;
1383	}
1384	case XPT_IMMEDIATE_NOTIFY: {
1385		union ctl_io *io;
1386		struct ccb_immediate_notify *inot;
1387		int send_ctl_io;
1388
1389		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1390		inot = &done_ccb->cin1;
1391		io = done_ccb->ccb_h.io_ptr;
1392		ctl_zero_io(io);
1393
1394		send_ctl_io = 1;
1395
1396		io->io_hdr.io_type = CTL_IO_TASK;
1397		PRIV_CCB(io) = done_ccb;
1398		inot->ccb_h.io_ptr = io;
1399		io->io_hdr.nexus.initid = inot->initiator_id;
1400		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
1401		if (bus_softc->hba_misc & PIM_EXTLUNS) {
1402			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
1403			    CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun));
1404		} else {
1405			io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
1406		}
1407		/* XXX KDM should this be the tag_id? */
1408		io->taskio.tag_num = inot->seq_id;
1409
1410		status = inot->ccb_h.status & CAM_STATUS_MASK;
1411		switch (status) {
1412		case CAM_SCSI_BUS_RESET:
1413			io->taskio.task_action = CTL_TASK_BUS_RESET;
1414			break;
1415		case CAM_BDR_SENT:
1416			io->taskio.task_action = CTL_TASK_TARGET_RESET;
1417			break;
1418		case CAM_MESSAGE_RECV:
1419			switch (inot->arg) {
1420			case MSG_ABORT_TASK_SET:
1421				io->taskio.task_action =
1422				    CTL_TASK_ABORT_TASK_SET;
1423				break;
1424			case MSG_TARGET_RESET:
1425				io->taskio.task_action = CTL_TASK_TARGET_RESET;
1426				break;
1427			case MSG_ABORT_TASK:
1428				io->taskio.task_action = CTL_TASK_ABORT_TASK;
1429				break;
1430			case MSG_LOGICAL_UNIT_RESET:
1431				io->taskio.task_action = CTL_TASK_LUN_RESET;
1432				break;
1433			case MSG_CLEAR_TASK_SET:
1434				io->taskio.task_action =
1435				    CTL_TASK_CLEAR_TASK_SET;
1436				break;
1437			case MSG_CLEAR_ACA:
1438				io->taskio.task_action = CTL_TASK_CLEAR_ACA;
1439				break;
1440			case MSG_QUERY_TASK:
1441				io->taskio.task_action = CTL_TASK_QUERY_TASK;
1442				break;
1443			case MSG_QUERY_TASK_SET:
1444				io->taskio.task_action =
1445				    CTL_TASK_QUERY_TASK_SET;
1446				break;
1447			case MSG_QUERY_ASYNC_EVENT:
1448				io->taskio.task_action =
1449				    CTL_TASK_QUERY_ASYNC_EVENT;
1450				break;
1451			case MSG_NOOP:
1452				send_ctl_io = 0;
1453				break;
1454			default:
1455				xpt_print(periph->path,
1456				    "%s: unsupported INOT message 0x%x\n",
1457				    __func__, inot->arg);
1458				send_ctl_io = 0;
1459				break;
1460			}
1461			break;
1462		default:
1463			xpt_print(periph->path,
1464			    "%s: unsupported INOT status 0x%x\n",
1465			    __func__, status);
1466			/* FALLTHROUGH */
1467		case CAM_REQ_ABORTED:
1468		case CAM_REQ_INVALID:
1469		case CAM_DEV_NOT_THERE:
1470		case CAM_PROVIDE_FAIL:
1471			ctlfe_free_ccb(periph, done_ccb);
1472			goto out;
1473		}
1474		if (send_ctl_io != 0) {
1475			ctl_queue(io);
1476		} else {
1477			done_ccb->ccb_h.status = CAM_REQ_INPROG;
1478			done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
1479			xpt_action(done_ccb);
1480		}
1481		break;
1482	}
1483	case XPT_NOTIFY_ACKNOWLEDGE:
1484		/* Queue this back down to the SIM as an immediate notify. */
1485		done_ccb->ccb_h.status = CAM_REQ_INPROG;
1486		done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
1487		ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1);
1488		return;
1489	case XPT_SET_SIM_KNOB:
1490	case XPT_GET_SIM_KNOB:
1491		break;
1492	default:
1493		panic("%s: unexpected CCB type %#x", __func__,
1494		      done_ccb->ccb_h.func_code);
1495		break;
1496	}
1497
1498out:
1499	mtx_unlock(mtx);
1500}
1501
1502static void
1503ctlfe_onoffline(void *arg, int online)
1504{
1505	struct ctlfe_softc *bus_softc;
1506	union ccb *ccb;
1507	cam_status status;
1508	struct cam_path *path;
1509	int set_wwnn;
1510
1511	bus_softc = (struct ctlfe_softc *)arg;
1512
1513	set_wwnn = 0;
1514
1515	status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
1516		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1517	if (status != CAM_REQ_CMP) {
1518		printf("%s: unable to create path!\n", __func__);
1519		return;
1520	}
1521	ccb = xpt_alloc_ccb();
1522	xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
1523	ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
1524	xpt_action(ccb);
1525
1526	/*
1527	 * Copan WWN format:
1528	 *
1529	 * Bits 63-60:	0x5		NAA, IEEE registered name
1530	 * Bits 59-36:	0x000ED5	IEEE Company name assigned to Copan
1531	 * Bits 35-12:			Copan SSN (Sequential Serial Number)
1532	 * Bits 11-8:			Type of port:
1533	 *					1 == N-Port
1534	 *					2 == F-Port
1535	 *					3 == NL-Port
1536	 * Bits 7-0:			0 == Node Name, >0 == Port Number
1537	 */
1538	if (online != 0) {
1539		if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){
1540
1541			printf("%s: %s current WWNN %#jx\n", __func__,
1542			       bus_softc->port_name,
1543			       ccb->knob.xport_specific.fc.wwnn);
1544			printf("%s: %s current WWPN %#jx\n", __func__,
1545			       bus_softc->port_name,
1546			       ccb->knob.xport_specific.fc.wwpn);
1547
1548			/*
1549			 * If the user has specified a WWNN/WWPN, send them
1550			 * down to the SIM.  Otherwise, record what the SIM
1551			 * has reported.
1552			 */
1553			if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn
1554			    != ccb->knob.xport_specific.fc.wwnn) {
1555				ccb->knob.xport_specific.fc.wwnn =
1556				    bus_softc->port.wwnn;
1557				set_wwnn = 1;
1558			} else {
1559				ctl_port_set_wwns(&bus_softc->port,
1560				    true, ccb->knob.xport_specific.fc.wwnn,
1561				    false, 0);
1562			}
1563			if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn
1564			     != ccb->knob.xport_specific.fc.wwpn) {
1565				ccb->knob.xport_specific.fc.wwpn =
1566				    bus_softc->port.wwpn;
1567				set_wwnn = 1;
1568			} else {
1569				ctl_port_set_wwns(&bus_softc->port,
1570				    false, 0,
1571				    true, ccb->knob.xport_specific.fc.wwpn);
1572			}
1573
1574
1575			if (set_wwnn != 0) {
1576				printf("%s: %s new WWNN %#jx\n", __func__,
1577				       bus_softc->port_name,
1578				ccb->knob.xport_specific.fc.wwnn);
1579				printf("%s: %s new WWPN %#jx\n", __func__,
1580				       bus_softc->port_name,
1581				       ccb->knob.xport_specific.fc.wwpn);
1582			}
1583		} else {
1584			printf("%s: %s has no valid WWNN/WWPN\n", __func__,
1585			       bus_softc->port_name);
1586		}
1587	}
1588	ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
1589	ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
1590	if (set_wwnn != 0)
1591		ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS;
1592
1593	if (online != 0)
1594		ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET;
1595	else
1596		ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET;
1597
1598	xpt_action(ccb);
1599
1600	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1601		printf("%s: SIM %s (path id %d) target %s failed with "
1602		       "status %#x\n",
1603		       __func__, bus_softc->port_name, bus_softc->path_id,
1604		       (online != 0) ? "enable" : "disable",
1605		       ccb->ccb_h.status);
1606	} else {
1607		printf("%s: SIM %s (path id %d) target %s succeeded\n",
1608		       __func__, bus_softc->port_name, bus_softc->path_id,
1609		       (online != 0) ? "enable" : "disable");
1610	}
1611
1612	xpt_free_path(path);
1613	xpt_free_ccb(ccb);
1614}
1615
1616static void
1617ctlfe_online(void *arg)
1618{
1619	struct ctlfe_softc *bus_softc;
1620	struct cam_path *path;
1621	cam_status status;
1622	struct ctlfe_lun_softc *lun_softc;
1623	struct cam_periph *periph;
1624
1625	bus_softc = (struct ctlfe_softc *)arg;
1626
1627	/*
1628	 * Create the wildcard LUN before bringing the port online.
1629	 */
1630	status = xpt_create_path(&path, /*periph*/ NULL,
1631				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1632				 CAM_LUN_WILDCARD);
1633	if (status != CAM_REQ_CMP) {
1634		printf("%s: unable to create path for wildcard periph\n",
1635				__func__);
1636		return;
1637	}
1638
1639	lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO);
1640
1641	xpt_path_lock(path);
1642	periph = cam_periph_find(path, "ctl");
1643	if (periph != NULL) {
1644		/* We've already got a periph, no need to alloc a new one. */
1645		xpt_path_unlock(path);
1646		xpt_free_path(path);
1647		free(lun_softc, M_CTLFE);
1648		return;
1649	}
1650	lun_softc->parent_softc = bus_softc;
1651	lun_softc->flags |= CTLFE_LUN_WILDCARD;
1652
1653	status = cam_periph_alloc(ctlferegister,
1654				  ctlfeoninvalidate,
1655				  ctlfecleanup,
1656				  ctlfestart,
1657				  "ctl",
1658				  CAM_PERIPH_BIO,
1659				  path,
1660				  ctlfeasync,
1661				  0,
1662				  lun_softc);
1663
1664	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1665		const struct cam_status_entry *entry;
1666
1667		entry = cam_fetch_status_entry(status);
1668		printf("%s: CAM error %s (%#x) returned from "
1669		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1670		       entry->status_text : "Unknown", status);
1671		free(lun_softc, M_CTLFE);
1672	}
1673
1674	xpt_path_unlock(path);
1675	ctlfe_onoffline(arg, /*online*/ 1);
1676	xpt_free_path(path);
1677}
1678
1679static void
1680ctlfe_offline(void *arg)
1681{
1682	struct ctlfe_softc *bus_softc;
1683	struct cam_path *path;
1684	cam_status status;
1685	struct cam_periph *periph;
1686
1687	bus_softc = (struct ctlfe_softc *)arg;
1688
1689	ctlfe_onoffline(arg, /*online*/ 0);
1690
1691	/*
1692	 * Disable the wildcard LUN for this port now that we have taken
1693	 * the port offline.
1694	 */
1695	status = xpt_create_path(&path, /*periph*/ NULL,
1696				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1697				 CAM_LUN_WILDCARD);
1698	if (status != CAM_REQ_CMP) {
1699		printf("%s: unable to create path for wildcard periph\n",
1700		       __func__);
1701		return;
1702	}
1703	xpt_path_lock(path);
1704	if ((periph = cam_periph_find(path, "ctl")) != NULL)
1705		cam_periph_invalidate(periph);
1706	xpt_path_unlock(path);
1707	xpt_free_path(path);
1708}
1709
1710/*
1711 * This will get called to enable a LUN on every bus that is attached to
1712 * CTL.  So we only need to create a path/periph for this particular bus.
1713 */
1714static int
1715ctlfe_lun_enable(void *arg, int lun_id)
1716{
1717	struct ctlfe_softc *bus_softc;
1718	struct ctlfe_lun_softc *softc;
1719	struct cam_path *path;
1720	struct cam_periph *periph;
1721	cam_status status;
1722
1723	bus_softc = (struct ctlfe_softc *)arg;
1724	if (bus_softc->hba_misc & PIM_EXTLUNS)
1725		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
1726
1727	status = xpt_create_path(&path, /*periph*/ NULL,
1728	    bus_softc->path_id, bus_softc->target_id, lun_id);
1729	/* XXX KDM need some way to return status to CTL here? */
1730	if (status != CAM_REQ_CMP) {
1731		printf("%s: could not create path, status %#x\n", __func__,
1732		       status);
1733		return (1);
1734	}
1735
1736	softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
1737	xpt_path_lock(path);
1738	periph = cam_periph_find(path, "ctl");
1739	if (periph != NULL) {
1740		/* We've already got a periph, no need to alloc a new one. */
1741		xpt_path_unlock(path);
1742		xpt_free_path(path);
1743		free(softc, M_CTLFE);
1744		return (0);
1745	}
1746	softc->parent_softc = bus_softc;
1747
1748	status = cam_periph_alloc(ctlferegister,
1749				  ctlfeoninvalidate,
1750				  ctlfecleanup,
1751				  ctlfestart,
1752				  "ctl",
1753				  CAM_PERIPH_BIO,
1754				  path,
1755				  ctlfeasync,
1756				  0,
1757				  softc);
1758
1759	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1760		const struct cam_status_entry *entry;
1761
1762		entry = cam_fetch_status_entry(status);
1763		printf("%s: CAM error %s (%#x) returned from "
1764		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1765		       entry->status_text : "Unknown", status);
1766		free(softc, M_CTLFE);
1767	}
1768
1769	xpt_path_unlock(path);
1770	xpt_free_path(path);
1771	return (0);
1772}
1773
1774/*
1775 * This will get called when the user removes a LUN to disable that LUN
1776 * on every bus that is attached to CTL.
1777 */
1778static int
1779ctlfe_lun_disable(void *arg, int lun_id)
1780{
1781	struct ctlfe_softc *softc;
1782	struct ctlfe_lun_softc *lun_softc;
1783
1784	softc = (struct ctlfe_softc *)arg;
1785	if (softc->hba_misc & PIM_EXTLUNS)
1786		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
1787
1788	mtx_lock(&softc->lun_softc_mtx);
1789	STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
1790		struct cam_path *path;
1791
1792		path = lun_softc->periph->path;
1793
1794		if ((xpt_path_target_id(path) == softc->target_id)
1795		 && (xpt_path_lun_id(path) == lun_id)) {
1796			break;
1797		}
1798	}
1799	if (lun_softc == NULL) {
1800		mtx_unlock(&softc->lun_softc_mtx);
1801		printf("%s: can't find lun %d\n", __func__, lun_id);
1802		return (1);
1803	}
1804	cam_periph_acquire(lun_softc->periph);
1805	mtx_unlock(&softc->lun_softc_mtx);
1806
1807	cam_periph_lock(lun_softc->periph);
1808	cam_periph_invalidate(lun_softc->periph);
1809	cam_periph_unlock(lun_softc->periph);
1810	cam_periph_release(lun_softc->periph);
1811	return (0);
1812}
1813
1814static void
1815ctlfe_dump_sim(struct cam_sim *sim)
1816{
1817
1818	printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
1819	       sim->sim_name, sim->unit_number,
1820	       sim->max_tagged_dev_openings, sim->max_dev_openings);
1821}
1822
1823/*
1824 * Assumes that the SIM lock is held.
1825 */
1826static void
1827ctlfe_dump_queue(struct ctlfe_lun_softc *softc)
1828{
1829	struct ccb_hdr *hdr;
1830	struct cam_periph *periph;
1831	int num_items;
1832
1833	periph = softc->periph;
1834	num_items = 0;
1835
1836	STAILQ_FOREACH(hdr, &softc->work_queue, periph_links.stqe) {
1837		union ctl_io *io = hdr->io_ptr;
1838
1839		num_items++;
1840
1841		/*
1842		 * Only regular SCSI I/O is put on the work
1843		 * queue, so we can print sense here.  There may be no
1844		 * sense if it's no the queue for a DMA, but this serves to
1845		 * print out the CCB as well.
1846		 *
1847		 * XXX KDM switch this over to scsi_sense_print() when
1848		 * CTL is merged in with CAM.
1849		 */
1850		ctl_io_error_print(io, NULL);
1851
1852		/*
1853		 * Print DMA status if we are DMA_QUEUED.
1854		 */
1855		if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
1856			xpt_print(periph->path,
1857			    "Total %u, Current %u, Resid %u\n",
1858			    io->scsiio.kern_total_len,
1859			    io->scsiio.kern_data_len,
1860			    io->scsiio.kern_data_resid);
1861		}
1862	}
1863
1864	xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items);
1865	xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent);
1866}
1867
1868/*
1869 * Datamove/done routine called by CTL.  Put ourselves on the queue to
1870 * receive a CCB from CAM so we can queue the continue I/O request down
1871 * to the adapter.
1872 */
1873static void
1874ctlfe_datamove(union ctl_io *io)
1875{
1876	union ccb *ccb;
1877	struct cam_periph *periph;
1878	struct ctlfe_lun_softc *softc;
1879
1880	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
1881	    ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type));
1882
1883	io->scsiio.ext_data_filled = 0;
1884	ccb = PRIV_CCB(io);
1885	periph = xpt_path_periph(ccb->ccb_h.path);
1886	cam_periph_lock(periph);
1887	softc = (struct ctlfe_lun_softc *)periph->softc;
1888	io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
1889	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
1890		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
1891	STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
1892			  periph_links.stqe);
1893	xpt_schedule(periph, CAM_PRIORITY_NORMAL);
1894	cam_periph_unlock(periph);
1895}
1896
1897static void
1898ctlfe_done(union ctl_io *io)
1899{
1900	union ccb *ccb;
1901	struct cam_periph *periph;
1902	struct ctlfe_lun_softc *softc;
1903
1904	ccb = PRIV_CCB(io);
1905	periph = xpt_path_periph(ccb->ccb_h.path);
1906	cam_periph_lock(periph);
1907	softc = (struct ctlfe_lun_softc *)periph->softc;
1908
1909	if (io->io_hdr.io_type == CTL_IO_TASK) {
1910		/*
1911		 * Send the notify acknowledge down to the SIM, to let it
1912		 * know we processed the task management command.
1913		 */
1914		ccb->ccb_h.status = CAM_REQ_INPROG;
1915		ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
1916		switch (io->taskio.task_status) {
1917		case CTL_TASK_FUNCTION_COMPLETE:
1918			ccb->cna2.arg = CAM_RSP_TMF_COMPLETE;
1919			break;
1920		case CTL_TASK_FUNCTION_SUCCEEDED:
1921			ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED;
1922			ccb->ccb_h.flags |= CAM_SEND_STATUS;
1923			break;
1924		case CTL_TASK_FUNCTION_REJECTED:
1925			ccb->cna2.arg = CAM_RSP_TMF_REJECTED;
1926			ccb->ccb_h.flags |= CAM_SEND_STATUS;
1927			break;
1928		case CTL_TASK_LUN_DOES_NOT_EXIST:
1929			ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN;
1930			ccb->ccb_h.flags |= CAM_SEND_STATUS;
1931			break;
1932		case CTL_TASK_FUNCTION_NOT_SUPPORTED:
1933			ccb->cna2.arg = CAM_RSP_TMF_FAILED;
1934			ccb->ccb_h.flags |= CAM_SEND_STATUS;
1935			break;
1936		}
1937		ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8;
1938		xpt_action(ccb);
1939	} else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) {
1940		ctlfe_requeue_ccb(periph, ccb, /* unlock */1);
1941		return;
1942	} else {
1943		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
1944		STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
1945				  periph_links.stqe);
1946		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
1947	}
1948
1949	cam_periph_unlock(periph);
1950}
1951
1952static void
1953ctlfe_dump(void)
1954{
1955	struct ctlfe_softc *bus_softc;
1956	struct ctlfe_lun_softc *lun_softc;
1957
1958	STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) {
1959		ctlfe_dump_sim(bus_softc->sim);
1960		STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links)
1961			ctlfe_dump_queue(lun_softc);
1962	}
1963}
1964