scsi_ctl.c revision 313365
1/*-
2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp.
3 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions, and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    substantially similar to the "NO WARRANTY" disclaimer below
14 *    ("Disclaimer") and any redistribution must be conditioned upon
15 *    including a substantially similar Disclaimer requirement for further
16 *    binary redistribution.
17 *
18 * NO WARRANTY
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGES.
30 *
31 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $
32 */
33/*
34 * Peripheral driver interface between CAM and CTL (CAM Target Layer).
35 *
36 * Author: Ken Merry <ken@FreeBSD.org>
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/scsi_ctl.c 313365 2017-02-07 01:42:53Z mav $");
41
42#include <sys/param.h>
43#include <sys/queue.h>
44#include <sys/systm.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/condvar.h>
49#include <sys/malloc.h>
50#include <sys/bus.h>
51#include <sys/endian.h>
52#include <sys/sbuf.h>
53#include <sys/sysctl.h>
54#include <sys/types.h>
55#include <sys/systm.h>
56#include <machine/bus.h>
57
58#include <cam/cam.h>
59#include <cam/cam_ccb.h>
60#include <cam/cam_periph.h>
61#include <cam/cam_queue.h>
62#include <cam/cam_xpt_periph.h>
63#include <cam/cam_debug.h>
64#include <cam/cam_sim.h>
65#include <cam/cam_xpt.h>
66
67#include <cam/scsi/scsi_all.h>
68#include <cam/scsi/scsi_message.h>
69
70#include <cam/ctl/ctl_io.h>
71#include <cam/ctl/ctl.h>
72#include <cam/ctl/ctl_frontend.h>
73#include <cam/ctl/ctl_util.h>
74#include <cam/ctl/ctl_error.h>
75
76struct ctlfe_softc {
77	struct ctl_port	port;
78	path_id_t	path_id;
79	target_id_t	target_id;
80	uint32_t	hba_misc;
81	u_int		maxio;
82	struct cam_sim *sim;
83	char		port_name[DEV_IDLEN];
84	struct mtx	lun_softc_mtx;
85	STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
86	STAILQ_ENTRY(ctlfe_softc) links;
87};
88
89STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list;
90struct mtx ctlfe_list_mtx;
91static char ctlfe_mtx_desc[] = "ctlfelist";
92#ifdef CTLFE_INIT_ENABLE
93static int ctlfe_max_targets = 1;
94static int ctlfe_num_targets = 0;
95#endif
96
97typedef enum {
98	CTLFE_LUN_NONE		= 0x00,
99	CTLFE_LUN_WILDCARD	= 0x01
100} ctlfe_lun_flags;
101
102struct ctlfe_lun_softc {
103	struct ctlfe_softc *parent_softc;
104	struct cam_periph *periph;
105	ctlfe_lun_flags flags;
106	uint64_t ccbs_alloced;
107	uint64_t ccbs_freed;
108	uint64_t ctios_sent;
109	uint64_t ctios_returned;
110	uint64_t atios_alloced;
111	uint64_t atios_freed;
112	uint64_t inots_alloced;
113	uint64_t inots_freed;
114	/* bus_dma_tag_t dma_tag; */
115	TAILQ_HEAD(, ccb_hdr) work_queue;
116	STAILQ_ENTRY(ctlfe_lun_softc) links;
117};
118
119typedef enum {
120	CTLFE_CMD_NONE		= 0x00,
121	CTLFE_CMD_PIECEWISE	= 0x01
122} ctlfe_cmd_flags;
123
124struct ctlfe_cmd_info {
125	int cur_transfer_index;
126	size_t cur_transfer_off;
127	ctlfe_cmd_flags flags;
128	/*
129	 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16
130	 * bytes on amd64.  So with 32 elements, this is 256 bytes on
131	 * i386 and 512 bytes on amd64.
132	 */
133#define CTLFE_MAX_SEGS	32
134	bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS];
135};
136
137/*
138 * When we register the adapter/bus, request that this many ctl_ios be
139 * allocated.  This should be the maximum supported by the adapter, but we
140 * currently don't have a way to get that back from the path inquiry.
141 * XXX KDM add that to the path inquiry.
142 */
143#define	CTLFE_REQ_CTL_IO	4096
144/*
145 * Number of Accept Target I/O CCBs to allocate and queue down to the
146 * adapter per LUN.
147 * XXX KDM should this be controlled by CTL?
148 */
149#define	CTLFE_ATIO_PER_LUN	1024
150/*
151 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to
152 * allocate and queue down to the adapter per LUN.
153 * XXX KDM should this be controlled by CTL?
154 */
155#define	CTLFE_IN_PER_LUN	1024
156
157/*
158 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending
159 * status to the initiator.  The SIM is expected to have its own timeouts,
160 * so we're not putting this timeout around the CCB execution time.  The
161 * SIM should timeout and let us know if it has an issue.
162 */
163#define	CTLFE_DMA_TIMEOUT	60
164
165/*
166 * Turn this on to enable extra debugging prints.
167 */
168#if 0
169#define	CTLFE_DEBUG
170#endif
171
172/*
173 * Use randomly assigned WWNN/WWPN values.  This is to work around an issue
174 * in the FreeBSD initiator that makes it unable to rescan the target if
175 * the target gets rebooted and the WWNN/WWPN stay the same.
176 */
177#if 0
178#define	RANDOM_WWNN
179#endif
180
181MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
182
183#define	io_ptr		ppriv_ptr0
184
185/* This is only used in the CTIO */
186#define	ccb_atio	ppriv_ptr1
187
188#define PRIV_CCB(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0])
189#define PRIV_INFO(io)	((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1])
190
191int			ctlfeinitialize(void);
192void			ctlfeshutdown(void);
193static periph_init_t	ctlfeperiphinit;
194static void		ctlfeasync(void *callback_arg, uint32_t code,
195				   struct cam_path *path, void *arg);
196static periph_ctor_t	ctlferegister;
197static periph_oninv_t	ctlfeoninvalidate;
198static periph_dtor_t	ctlfecleanup;
199static periph_start_t	ctlfestart;
200static void		ctlfedone(struct cam_periph *periph,
201				  union ccb *done_ccb);
202
203static void 		ctlfe_onoffline(void *arg, int online);
204static void 		ctlfe_online(void *arg);
205static void 		ctlfe_offline(void *arg);
206static int 		ctlfe_lun_enable(void *arg, int lun_id);
207static int 		ctlfe_lun_disable(void *arg, int lun_id);
208static void		ctlfe_dump_sim(struct cam_sim *sim);
209static void		ctlfe_dump_queue(struct ctlfe_lun_softc *softc);
210static void 		ctlfe_datamove(union ctl_io *io);
211static void 		ctlfe_done(union ctl_io *io);
212static void 		ctlfe_dump(void);
213
214static struct periph_driver ctlfe_driver =
215{
216	ctlfeperiphinit, "ctl",
217	TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0,
218	CAM_PERIPH_DRV_EARLY
219};
220
221static struct ctl_frontend ctlfe_frontend =
222{
223	.name = "camtgt",
224	.init = ctlfeinitialize,
225	.fe_dump = ctlfe_dump,
226	.shutdown = ctlfeshutdown,
227};
228CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend);
229
230void
231ctlfeshutdown(void)
232{
233	return;
234}
235
236int
237ctlfeinitialize(void)
238{
239
240	STAILQ_INIT(&ctlfe_softc_list);
241	mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
242	periphdriver_register(&ctlfe_driver);
243	return (0);
244}
245
246void
247ctlfeperiphinit(void)
248{
249	cam_status status;
250
251	status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
252				    AC_CONTRACT, ctlfeasync, NULL, NULL);
253	if (status != CAM_REQ_CMP) {
254		printf("ctl: Failed to attach async callback due to CAM "
255		       "status 0x%x!\n", status);
256	}
257}
258
259static void
260ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
261{
262	struct ctlfe_softc *softc;
263
264#ifdef CTLFEDEBUG
265	printf("%s: entered\n", __func__);
266#endif
267
268	mtx_lock(&ctlfe_list_mtx);
269	STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
270		if (softc->path_id == xpt_path_path_id(path))
271			break;
272	}
273	mtx_unlock(&ctlfe_list_mtx);
274
275	/*
276	 * When a new path gets registered, and it is capable of target
277	 * mode, go ahead and attach.  Later on, we may need to be more
278	 * selective, but for now this will be sufficient.
279 	 */
280	switch (code) {
281	case AC_PATH_REGISTERED: {
282		struct ctl_port *port;
283		struct ccb_pathinq *cpi;
284		int retval;
285
286		cpi = (struct ccb_pathinq *)arg;
287
288		/* Don't attach if it doesn't support target mode */
289		if ((cpi->target_sprt & PIT_PROCESSOR) == 0) {
290#ifdef CTLFEDEBUG
291			printf("%s: SIM %s%d doesn't support target mode\n",
292			       __func__, cpi->dev_name, cpi->unit_number);
293#endif
294			break;
295		}
296
297		if (softc != NULL) {
298#ifdef CTLFEDEBUG
299			printf("%s: CTL port for CAM path %u already exists\n",
300			       __func__, xpt_path_path_id(path));
301#endif
302			break;
303		}
304
305#ifdef CTLFE_INIT_ENABLE
306		if (ctlfe_num_targets >= ctlfe_max_targets) {
307			union ccb *ccb;
308
309			ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP,
310						  M_NOWAIT | M_ZERO);
311			if (ccb == NULL) {
312				printf("%s: unable to malloc CCB!\n", __func__);
313				return;
314			}
315			xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
316
317			ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
318			ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
319			ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR;
320
321			xpt_action(ccb);
322
323			if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
324			     CAM_REQ_CMP) {
325				printf("%s: SIM %s%d (path id %d) initiator "
326				       "enable failed with status %#x\n",
327				       __func__, cpi->dev_name,
328				       cpi->unit_number, cpi->ccb_h.path_id,
329				       ccb->ccb_h.status);
330			} else {
331				printf("%s: SIM %s%d (path id %d) initiator "
332				       "enable succeeded\n",
333				       __func__, cpi->dev_name,
334				       cpi->unit_number, cpi->ccb_h.path_id);
335			}
336
337			free(ccb, M_TEMP);
338
339			break;
340		} else {
341			ctlfe_num_targets++;
342		}
343
344		printf("%s: ctlfe_num_targets = %d\n", __func__,
345		       ctlfe_num_targets);
346#endif /* CTLFE_INIT_ENABLE */
347
348		/*
349		 * We're in an interrupt context here, so we have to
350		 * use M_NOWAIT.  Of course this means trouble if we
351		 * can't allocate memory.
352		 */
353		softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO);
354		if (softc == NULL) {
355			printf("%s: unable to malloc %zd bytes for softc\n",
356			       __func__, sizeof(*softc));
357			return;
358		}
359
360		softc->path_id = cpi->ccb_h.path_id;
361		softc->target_id = cpi->initiator_id;
362		softc->sim = xpt_path_sim(path);
363		softc->hba_misc = cpi->hba_misc;
364		if (cpi->maxio != 0)
365			softc->maxio = cpi->maxio;
366		else
367			softc->maxio = DFLTPHYS;
368		mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF);
369		STAILQ_INIT(&softc->lun_softc_list);
370
371		port = &softc->port;
372		port->frontend = &ctlfe_frontend;
373
374		/*
375		 * XXX KDM should we be more accurate here ?
376		 */
377		if (cpi->transport == XPORT_FC)
378			port->port_type = CTL_PORT_FC;
379		else if (cpi->transport == XPORT_SAS)
380			port->port_type = CTL_PORT_SAS;
381		else
382			port->port_type = CTL_PORT_SCSI;
383
384		/* XXX KDM what should the real number be here? */
385		port->num_requested_ctl_io = 4096;
386		snprintf(softc->port_name, sizeof(softc->port_name),
387			 "%s%d", cpi->dev_name, cpi->unit_number);
388		/*
389		 * XXX KDM it would be nice to allocate storage in the
390		 * frontend structure itself.
391	 	 */
392		port->port_name = softc->port_name;
393		port->physical_port = cpi->bus_id;
394		port->virtual_port = 0;
395		port->port_online = ctlfe_online;
396		port->port_offline = ctlfe_offline;
397		port->onoff_arg = softc;
398		port->lun_enable = ctlfe_lun_enable;
399		port->lun_disable = ctlfe_lun_disable;
400		port->targ_lun_arg = softc;
401		port->fe_datamove = ctlfe_datamove;
402		port->fe_done = ctlfe_done;
403		/*
404		 * XXX KDM the path inquiry doesn't give us the maximum
405		 * number of targets supported.
406		 */
407		port->max_targets = cpi->max_target;
408		port->max_target_id = cpi->max_target;
409		port->targ_port = -1;
410
411		/*
412		 * XXX KDM need to figure out whether we're the master or
413		 * slave.
414		 */
415#ifdef CTLFEDEBUG
416		printf("%s: calling ctl_port_register() for %s%d\n",
417		       __func__, cpi->dev_name, cpi->unit_number);
418#endif
419		retval = ctl_port_register(port);
420		if (retval != 0) {
421			printf("%s: ctl_port_register() failed with "
422			       "error %d!\n", __func__, retval);
423			mtx_destroy(&softc->lun_softc_mtx);
424			free(softc, M_CTLFE);
425			break;
426		} else {
427			mtx_lock(&ctlfe_list_mtx);
428			STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links);
429			mtx_unlock(&ctlfe_list_mtx);
430		}
431
432		break;
433	}
434	case AC_PATH_DEREGISTERED: {
435
436		if (softc != NULL) {
437			/*
438			 * XXX KDM are we certain at this point that there
439			 * are no outstanding commands for this frontend?
440			 */
441			mtx_lock(&ctlfe_list_mtx);
442			STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc,
443			    links);
444			mtx_unlock(&ctlfe_list_mtx);
445			ctl_port_deregister(&softc->port);
446			mtx_destroy(&softc->lun_softc_mtx);
447			free(softc, M_CTLFE);
448		}
449		break;
450	}
451	case AC_CONTRACT: {
452		struct ac_contract *ac;
453
454		ac = (struct ac_contract *)arg;
455
456		switch (ac->contract_number) {
457		case AC_CONTRACT_DEV_CHG: {
458			struct ac_device_changed *dev_chg;
459			int retval;
460
461			dev_chg = (struct ac_device_changed *)ac->contract_data;
462
463			printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n",
464			       __func__, dev_chg->wwpn, dev_chg->port,
465			       xpt_path_path_id(path), dev_chg->target,
466			       (dev_chg->arrived == 0) ?  "left" : "arrived");
467
468			if (softc == NULL) {
469				printf("%s: CTL port for CAM path %u not "
470				       "found!\n", __func__,
471				       xpt_path_path_id(path));
472				break;
473			}
474			if (dev_chg->arrived != 0) {
475				retval = ctl_add_initiator(&softc->port,
476				    dev_chg->target, dev_chg->wwpn, NULL);
477			} else {
478				retval = ctl_remove_initiator(&softc->port,
479				    dev_chg->target);
480			}
481
482			if (retval < 0) {
483				printf("%s: could not %s port %d iid %u "
484				       "WWPN %#jx!\n", __func__,
485				       (dev_chg->arrived != 0) ? "add" :
486				       "remove", softc->port.targ_port,
487				       dev_chg->target,
488				       (uintmax_t)dev_chg->wwpn);
489			}
490			break;
491		}
492		default:
493			printf("%s: unsupported contract number %ju\n",
494			       __func__, (uintmax_t)ac->contract_number);
495			break;
496		}
497		break;
498	}
499	default:
500		break;
501	}
502}
503
504static cam_status
505ctlferegister(struct cam_periph *periph, void *arg)
506{
507	struct ctlfe_softc *bus_softc;
508	struct ctlfe_lun_softc *softc;
509	union ccb en_lun_ccb;
510	cam_status status;
511	int i;
512
513	softc = (struct ctlfe_lun_softc *)arg;
514	bus_softc = softc->parent_softc;
515
516	TAILQ_INIT(&softc->work_queue);
517	softc->periph = periph;
518	periph->softc = softc;
519
520	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
521	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
522	en_lun_ccb.cel.grp6_len = 0;
523	en_lun_ccb.cel.grp7_len = 0;
524	en_lun_ccb.cel.enable = 1;
525	xpt_action(&en_lun_ccb);
526	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
527	if (status != CAM_REQ_CMP) {
528		xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n",
529			  __func__, en_lun_ccb.ccb_h.status);
530		return (status);
531	}
532
533	status = CAM_REQ_CMP;
534
535	for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
536		union ccb *new_ccb;
537		union ctl_io *new_io;
538		struct ctlfe_cmd_info *cmd_info;
539
540		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
541					      M_ZERO|M_NOWAIT);
542		if (new_ccb == NULL) {
543			status = CAM_RESRC_UNAVAIL;
544			break;
545		}
546		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
547		if (new_io == NULL) {
548			free(new_ccb, M_CTLFE);
549			status = CAM_RESRC_UNAVAIL;
550			break;
551		}
552		cmd_info = malloc(sizeof(*cmd_info), M_CTLFE,
553		    M_ZERO | M_NOWAIT);
554		if (cmd_info == NULL) {
555			ctl_free_io(new_io);
556			free(new_ccb, M_CTLFE);
557			status = CAM_RESRC_UNAVAIL;
558			break;
559		}
560		PRIV_INFO(new_io) = cmd_info;
561		softc->atios_alloced++;
562		new_ccb->ccb_h.io_ptr = new_io;
563
564		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
565		new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
566		new_ccb->ccb_h.cbfcnp = ctlfedone;
567		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
568		xpt_action(new_ccb);
569		status = new_ccb->ccb_h.status;
570		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
571			free(cmd_info, M_CTLFE);
572			ctl_free_io(new_io);
573			free(new_ccb, M_CTLFE);
574			break;
575		}
576	}
577
578	status = cam_periph_acquire(periph);
579	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
580		xpt_print(periph->path, "%s: could not acquire reference "
581			  "count, status = %#x\n", __func__, status);
582		return (status);
583	}
584
585	if (i == 0) {
586		xpt_print(periph->path, "%s: could not allocate ATIO CCBs, "
587			  "status 0x%x\n", __func__, status);
588		return (CAM_REQ_CMP_ERR);
589	}
590
591	for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
592		union ccb *new_ccb;
593		union ctl_io *new_io;
594
595		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
596					      M_ZERO|M_NOWAIT);
597		if (new_ccb == NULL) {
598			status = CAM_RESRC_UNAVAIL;
599			break;
600		}
601		new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
602		if (new_io == NULL) {
603			free(new_ccb, M_CTLFE);
604			status = CAM_RESRC_UNAVAIL;
605			break;
606		}
607		softc->inots_alloced++;
608		new_ccb->ccb_h.io_ptr = new_io;
609
610		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
611		new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
612		new_ccb->ccb_h.cbfcnp = ctlfedone;
613		new_ccb->ccb_h.flags |= CAM_UNLOCKED;
614		xpt_action(new_ccb);
615		status = new_ccb->ccb_h.status;
616		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
617			/*
618			 * Note that we don't free the CCB here.  If the
619			 * status is not CAM_REQ_INPROG, then we're
620			 * probably talking to a SIM that says it is
621			 * target-capable but doesn't support the
622			 * XPT_IMMEDIATE_NOTIFY CCB.  i.e. it supports the
623			 * older API.  In that case, it'll call xpt_done()
624			 * on the CCB, and we need to free it in our done
625			 * routine as a result.
626			 */
627			break;
628		}
629	}
630	if ((i == 0)
631	 || (status != CAM_REQ_INPROG)) {
632		xpt_print(periph->path, "%s: could not allocate immediate "
633			  "notify CCBs, status 0x%x\n", __func__, status);
634		return (CAM_REQ_CMP_ERR);
635	}
636	mtx_lock(&bus_softc->lun_softc_mtx);
637	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
638	mtx_unlock(&bus_softc->lun_softc_mtx);
639	return (CAM_REQ_CMP);
640}
641
642static void
643ctlfeoninvalidate(struct cam_periph *periph)
644{
645	union ccb en_lun_ccb;
646	cam_status status;
647	struct ctlfe_softc *bus_softc;
648	struct ctlfe_lun_softc *softc;
649
650	softc = (struct ctlfe_lun_softc *)periph->softc;
651
652	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
653	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
654	en_lun_ccb.cel.grp6_len = 0;
655	en_lun_ccb.cel.grp7_len = 0;
656	en_lun_ccb.cel.enable = 0;
657	xpt_action(&en_lun_ccb);
658	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
659	if (status != CAM_REQ_CMP) {
660		xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n",
661			  __func__, en_lun_ccb.ccb_h.status);
662		/*
663		 * XXX KDM what do we do now?
664		 */
665	}
666
667	bus_softc = softc->parent_softc;
668	mtx_lock(&bus_softc->lun_softc_mtx);
669	STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
670	mtx_unlock(&bus_softc->lun_softc_mtx);
671}
672
673static void
674ctlfecleanup(struct cam_periph *periph)
675{
676	struct ctlfe_lun_softc *softc;
677
678	softc = (struct ctlfe_lun_softc *)periph->softc;
679
680	KASSERT(softc->ccbs_freed == softc->ccbs_alloced, ("%s: "
681		"ccbs_freed %ju != ccbs_alloced %ju", __func__,
682		softc->ccbs_freed, softc->ccbs_alloced));
683	KASSERT(softc->ctios_returned == softc->ctios_sent, ("%s: "
684		"ctios_returned %ju != ctios_sent %ju", __func__,
685		softc->ctios_returned, softc->ctios_sent));
686	KASSERT(softc->atios_freed == softc->atios_alloced, ("%s: "
687		"atios_freed %ju != atios_alloced %ju", __func__,
688		softc->atios_freed, softc->atios_alloced));
689	KASSERT(softc->inots_freed == softc->inots_alloced, ("%s: "
690		"inots_freed %ju != inots_alloced %ju", __func__,
691		softc->inots_freed, softc->inots_alloced));
692
693	free(softc, M_CTLFE);
694}
695
696static void
697ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
698    ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len,
699    u_int16_t *sglist_cnt)
700{
701	struct ctlfe_softc *bus_softc;
702	struct ctlfe_cmd_info *cmd_info;
703	struct ctl_sg_entry *ctl_sglist;
704	bus_dma_segment_t *cam_sglist;
705	size_t off;
706	int i, idx;
707
708	cmd_info = PRIV_INFO(io);
709	bus_softc = softc->parent_softc;
710
711	/*
712	 * Set the direction, relative to the initiator.
713	 */
714	*flags &= ~CAM_DIR_MASK;
715	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
716		*flags |= CAM_DIR_IN;
717	else
718		*flags |= CAM_DIR_OUT;
719
720	*flags &= ~CAM_DATA_MASK;
721	idx = cmd_info->cur_transfer_index;
722	off = cmd_info->cur_transfer_off;
723	cmd_info->flags &= ~CTLFE_CMD_PIECEWISE;
724	if (io->scsiio.kern_sg_entries == 0) {	/* No S/G list. */
725
726		/* One time shift for SRR offset. */
727		off += io->scsiio.ext_data_filled;
728		io->scsiio.ext_data_filled = 0;
729
730		*data_ptr = io->scsiio.kern_data_ptr + off;
731		if (io->scsiio.kern_data_len - off <= bus_softc->maxio) {
732			*dxfer_len = io->scsiio.kern_data_len - off;
733		} else {
734			*dxfer_len = bus_softc->maxio;
735			cmd_info->cur_transfer_off += bus_softc->maxio;
736			cmd_info->flags |= CTLFE_CMD_PIECEWISE;
737		}
738		*sglist_cnt = 0;
739
740		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
741			*flags |= CAM_DATA_PADDR;
742		else
743			*flags |= CAM_DATA_VADDR;
744	} else {	/* S/G list with physical or virtual pointers. */
745		ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
746
747		/* One time shift for SRR offset. */
748		while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) {
749			io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off;
750			idx++;
751			off = 0;
752		}
753		off += io->scsiio.ext_data_filled;
754		io->scsiio.ext_data_filled = 0;
755
756		cam_sglist = cmd_info->cam_sglist;
757		*dxfer_len = 0;
758		for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) {
759			cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off;
760			if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) {
761				cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off;
762				*dxfer_len += cam_sglist[i].ds_len;
763			} else {
764				cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len;
765				cmd_info->cur_transfer_index = idx + i;
766				cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off;
767				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
768				*dxfer_len += cam_sglist[i].ds_len;
769				if (ctl_sglist[i].len != 0)
770					i++;
771				break;
772			}
773			if (i == (CTLFE_MAX_SEGS - 1) &&
774			    idx + i < (io->scsiio.kern_sg_entries - 1)) {
775				cmd_info->cur_transfer_index = idx + i + 1;
776				cmd_info->cur_transfer_off = 0;
777				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
778				i++;
779				break;
780			}
781			off = 0;
782		}
783		*sglist_cnt = i;
784		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
785			*flags |= CAM_DATA_SG_PADDR;
786		else
787			*flags |= CAM_DATA_SG;
788		*data_ptr = (uint8_t *)cam_sglist;
789	}
790}
791
792static void
793ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
794{
795	struct ctlfe_lun_softc *softc;
796	struct ctlfe_cmd_info *cmd_info;
797	struct ccb_hdr *ccb_h;
798	struct ccb_accept_tio *atio;
799	struct ccb_scsiio *csio;
800	uint8_t *data_ptr;
801	uint32_t dxfer_len;
802	ccb_flags flags;
803	union ctl_io *io;
804	uint8_t scsi_status;
805
806	softc = (struct ctlfe_lun_softc *)periph->softc;
807	softc->ccbs_alloced++;
808
809	ccb_h = TAILQ_FIRST(&softc->work_queue);
810	if (ccb_h == NULL) {
811		softc->ccbs_freed++;
812		xpt_release_ccb(start_ccb);
813		return;
814	}
815
816	/* Take the ATIO off the work queue */
817	TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe);
818	atio = (struct ccb_accept_tio *)ccb_h;
819	io = (union ctl_io *)ccb_h->io_ptr;
820	csio = &start_ccb->csio;
821
822	flags = atio->ccb_h.flags &
823		(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
824	cmd_info = PRIV_INFO(io);
825	cmd_info->cur_transfer_index = 0;
826	cmd_info->cur_transfer_off = 0;
827	cmd_info->flags = 0;
828
829	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
830		/*
831		 * Datamove call, we need to setup the S/G list.
832		 */
833		ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len,
834		    &csio->sglist_cnt);
835	} else {
836		/*
837		 * We're done, send status back.
838		 */
839		if ((io->io_hdr.flags & CTL_FLAG_ABORT) &&
840		    (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) {
841			io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
842
843			/*
844			 * If this command was aborted, we don't
845			 * need to send status back to the SIM.
846			 * Just free the CTIO and ctl_io, and
847			 * recycle the ATIO back to the SIM.
848			 */
849			xpt_print(periph->path, "%s: aborted "
850				  "command 0x%04x discarded\n",
851				  __func__, io->scsiio.tag_num);
852			/*
853			 * For a wildcard attachment, commands can
854			 * come in with a specific target/lun.  Reset
855			 * the target and LUN fields back to the
856			 * wildcard values before we send them back
857			 * down to the SIM.  The SIM has a wildcard
858			 * LUN enabled, not whatever target/lun
859			 * these happened to be.
860			 */
861			if (softc->flags & CTLFE_LUN_WILDCARD) {
862				atio->ccb_h.target_id = CAM_TARGET_WILDCARD;
863				atio->ccb_h.target_lun = CAM_LUN_WILDCARD;
864			}
865
866			if (atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) {
867				xpt_print(periph->path, "%s: func_code "
868					  "is %#x\n", __func__,
869					  atio->ccb_h.func_code);
870			}
871			start_ccb->ccb_h.func_code = XPT_ABORT;
872			start_ccb->cab.abort_ccb = (union ccb *)atio;
873
874			/* Tell the SIM that we've aborted this ATIO */
875			xpt_action(start_ccb);
876			softc->ccbs_freed++;
877			xpt_release_ccb(start_ccb);
878
879			/*
880			 * Send the ATIO back down to the SIM.
881			 */
882			xpt_action((union ccb *)atio);
883
884			/*
885			 * If we still have work to do, ask for
886			 * another CCB.  Otherwise, deactivate our
887			 * callout.
888			 */
889			if (!TAILQ_EMPTY(&softc->work_queue))
890				xpt_schedule(periph, /*priority*/ 1);
891			return;
892		}
893		data_ptr = NULL;
894		dxfer_len = 0;
895		csio->sglist_cnt = 0;
896	}
897	scsi_status = 0;
898	if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) &&
899	    (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 &&
900	    ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 ||
901	     io->io_hdr.status == CTL_SUCCESS)) {
902		flags |= CAM_SEND_STATUS;
903		scsi_status = io->scsiio.scsi_status;
904		csio->sense_len = io->scsiio.sense_len;
905#ifdef CTLFEDEBUG
906		printf("%s: tag %04x status %x\n", __func__,
907		       atio->tag_id, io->io_hdr.status);
908#endif
909		if (csio->sense_len != 0) {
910			csio->sense_data = io->scsiio.sense_data;
911			flags |= CAM_SEND_SENSE;
912		} else if (scsi_status == SCSI_STATUS_CHECK_COND) {
913			xpt_print(periph->path, "%s: check condition "
914				  "with no sense\n", __func__);
915		}
916	}
917
918#ifdef CTLFEDEBUG
919	printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
920	       (flags & CAM_SEND_STATUS) ? "done" : "datamove",
921	       atio->tag_id, flags, data_ptr, dxfer_len);
922#endif
923
924	/*
925	 * Valid combinations:
926	 *  - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0,
927	 *    sglist_cnt = 0
928	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0,
929	 *    sglist_cnt = 0
930	 *  - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0,
931	 *    sglist_cnt != 0
932	 */
933#ifdef CTLFEDEBUG
934	if (((flags & CAM_SEND_STATUS)
935	  && (((flags & CAM_DATA_SG) != 0)
936	   || (dxfer_len != 0)
937	   || (csio->sglist_cnt != 0)))
938	 || (((flags & CAM_SEND_STATUS) == 0)
939	  && (dxfer_len == 0))
940	 || ((flags & CAM_DATA_SG)
941	  && (csio->sglist_cnt == 0))
942	 || (((flags & CAM_DATA_SG) == 0)
943	  && (csio->sglist_cnt != 0))) {
944		printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
945		       "%d sg %u\n", __func__, atio->tag_id,
946		       atio_cdb_ptr(atio)[0], flags, dxfer_len,
947		       csio->sglist_cnt);
948		printf("%s: tag %04x io status %#x\n", __func__,
949		       atio->tag_id, io->io_hdr.status);
950	}
951#endif
952	cam_fill_ctio(csio,
953		      /*retries*/ 2,
954		      ctlfedone,
955		      flags,
956		      (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0,
957		      atio->tag_id,
958		      atio->init_id,
959		      scsi_status,
960		      /*data_ptr*/ data_ptr,
961		      /*dxfer_len*/ dxfer_len,
962		      /*timeout*/ 5 * 1000);
963	start_ccb->ccb_h.flags |= CAM_UNLOCKED;
964	start_ccb->ccb_h.ccb_atio = atio;
965	if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
966		io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
967	io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED);
968
969	softc->ctios_sent++;
970
971	cam_periph_unlock(periph);
972	xpt_action(start_ccb);
973	cam_periph_lock(periph);
974
975	/*
976	 * If we still have work to do, ask for another CCB.
977	 */
978	if (!TAILQ_EMPTY(&softc->work_queue))
979		xpt_schedule(periph, /*priority*/ 1);
980}
981
982static void
983ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
984{
985	struct ctlfe_lun_softc *softc;
986	union ctl_io *io;
987	struct ctlfe_cmd_info *cmd_info;
988
989	softc = (struct ctlfe_lun_softc *)periph->softc;
990	io = ccb->ccb_h.io_ptr;
991
992	switch (ccb->ccb_h.func_code) {
993	case XPT_ACCEPT_TARGET_IO:
994		softc->atios_freed++;
995		cmd_info = PRIV_INFO(io);
996		free(cmd_info, M_CTLFE);
997		break;
998	case XPT_IMMEDIATE_NOTIFY:
999	case XPT_NOTIFY_ACKNOWLEDGE:
1000		softc->inots_freed++;
1001		break;
1002	default:
1003		break;
1004	}
1005
1006	ctl_free_io(io);
1007	free(ccb, M_CTLFE);
1008
1009	KASSERT(softc->atios_freed <= softc->atios_alloced, ("%s: "
1010		"atios_freed %ju > atios_alloced %ju", __func__,
1011		softc->atios_freed, softc->atios_alloced));
1012	KASSERT(softc->inots_freed <= softc->inots_alloced, ("%s: "
1013		"inots_freed %ju > inots_alloced %ju", __func__,
1014		softc->inots_freed, softc->inots_alloced));
1015
1016	/*
1017	 * If we have received all of our CCBs, we can release our
1018	 * reference on the peripheral driver.  It will probably go away
1019	 * now.
1020	 */
1021	if ((softc->atios_freed == softc->atios_alloced)
1022	 && (softc->inots_freed == softc->inots_alloced)) {
1023		cam_periph_release_locked(periph);
1024	}
1025}
1026
1027static int
1028ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
1029{
1030	uint64_t lba;
1031	uint32_t num_blocks, nbc;
1032	uint8_t *cmdbyt = atio_cdb_ptr(atio);
1033
1034	nbc = offset >> 9;	/* ASSUMING 512 BYTE BLOCKS */
1035
1036	switch (cmdbyt[0]) {
1037	case READ_6:
1038	case WRITE_6:
1039	{
1040		struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt;
1041		lba = scsi_3btoul(cdb->addr);
1042		lba &= 0x1fffff;
1043		num_blocks = cdb->length;
1044		if (num_blocks == 0)
1045			num_blocks = 256;
1046		lba += nbc;
1047		num_blocks -= nbc;
1048		scsi_ulto3b(lba, cdb->addr);
1049		cdb->length = num_blocks;
1050		break;
1051	}
1052	case READ_10:
1053	case WRITE_10:
1054	{
1055		struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt;
1056		lba = scsi_4btoul(cdb->addr);
1057		num_blocks = scsi_2btoul(cdb->length);
1058		lba += nbc;
1059		num_blocks -= nbc;
1060		scsi_ulto4b(lba, cdb->addr);
1061		scsi_ulto2b(num_blocks, cdb->length);
1062		break;
1063	}
1064	case READ_12:
1065	case WRITE_12:
1066	{
1067		struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt;
1068		lba = scsi_4btoul(cdb->addr);
1069		num_blocks = scsi_4btoul(cdb->length);
1070		lba += nbc;
1071		num_blocks -= nbc;
1072		scsi_ulto4b(lba, cdb->addr);
1073		scsi_ulto4b(num_blocks, cdb->length);
1074		break;
1075	}
1076	case READ_16:
1077	case WRITE_16:
1078	{
1079		struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt;
1080		lba = scsi_8btou64(cdb->addr);
1081		num_blocks = scsi_4btoul(cdb->length);
1082		lba += nbc;
1083		num_blocks -= nbc;
1084		scsi_u64to8b(lba, cdb->addr);
1085		scsi_ulto4b(num_blocks, cdb->length);
1086		break;
1087	}
1088	default:
1089		return -1;
1090	}
1091	return (0);
1092}
1093
1094static void
1095ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
1096{
1097	struct ctlfe_lun_softc *softc;
1098	struct ctlfe_softc *bus_softc;
1099	struct ctlfe_cmd_info *cmd_info;
1100	struct ccb_accept_tio *atio = NULL;
1101	union ctl_io *io = NULL;
1102	struct mtx *mtx;
1103
1104	KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0,
1105	    ("CCB in ctlfedone() without CAM_UNLOCKED flag"));
1106#ifdef CTLFE_DEBUG
1107	printf("%s: entered, func_code = %#x\n", __func__,
1108	       done_ccb->ccb_h.func_code);
1109#endif
1110
1111	/*
1112	 * At this point CTL has no known use case for device queue freezes.
1113	 * In case some SIM think different -- drop its freeze right here.
1114	 */
1115	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1116		cam_release_devq(periph->path,
1117				 /*relsim_flags*/0,
1118				 /*reduction*/0,
1119				 /*timeout*/0,
1120				 /*getcount_only*/0);
1121		done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1122	}
1123
1124	softc = (struct ctlfe_lun_softc *)periph->softc;
1125	bus_softc = softc->parent_softc;
1126	mtx = cam_periph_mtx(periph);
1127	mtx_lock(mtx);
1128
1129	/*
1130	 * If the peripheral is invalid, ATIOs and immediate notify CCBs
1131	 * need to be freed.  Most of the ATIOs and INOTs that come back
1132	 * will be CCBs that are being returned from the SIM as a result of
1133	 * our disabling the LUN.
1134	 *
1135	 * Other CCB types are handled in their respective cases below.
1136	 */
1137	if (periph->flags & CAM_PERIPH_INVALID) {
1138		switch (done_ccb->ccb_h.func_code) {
1139		case XPT_ACCEPT_TARGET_IO:
1140		case XPT_IMMEDIATE_NOTIFY:
1141		case XPT_NOTIFY_ACKNOWLEDGE:
1142			ctlfe_free_ccb(periph, done_ccb);
1143			goto out;
1144		default:
1145			break;
1146		}
1147
1148	}
1149	switch (done_ccb->ccb_h.func_code) {
1150	case XPT_ACCEPT_TARGET_IO: {
1151
1152		atio = &done_ccb->atio;
1153
1154 resubmit:
1155		/*
1156		 * Allocate a ctl_io, pass it to CTL, and wait for the
1157		 * datamove or done.
1158		 */
1159		mtx_unlock(mtx);
1160		io = done_ccb->ccb_h.io_ptr;
1161		cmd_info = PRIV_INFO(io);
1162		ctl_zero_io(io);
1163
1164		/* Save pointers on both sides */
1165		PRIV_CCB(io) = done_ccb;
1166		PRIV_INFO(io) = cmd_info;
1167		done_ccb->ccb_h.io_ptr = io;
1168
1169		/*
1170		 * Only SCSI I/O comes down this path, resets, etc. come
1171		 * down the immediate notify path below.
1172		 */
1173		io->io_hdr.io_type = CTL_IO_SCSI;
1174		io->io_hdr.nexus.initid = atio->init_id;
1175		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
1176		if (bus_softc->hba_misc & PIM_EXTLUNS) {
1177			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
1178			    CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun));
1179		} else {
1180			io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
1181		}
1182		io->scsiio.tag_num = atio->tag_id;
1183		switch (atio->tag_action) {
1184		case CAM_TAG_ACTION_NONE:
1185			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1186			break;
1187		case MSG_SIMPLE_TASK:
1188			io->scsiio.tag_type = CTL_TAG_SIMPLE;
1189			break;
1190		case MSG_HEAD_OF_QUEUE_TASK:
1191        		io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
1192			break;
1193		case MSG_ORDERED_TASK:
1194        		io->scsiio.tag_type = CTL_TAG_ORDERED;
1195			break;
1196		case MSG_ACA_TASK:
1197			io->scsiio.tag_type = CTL_TAG_ACA;
1198			break;
1199		default:
1200			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1201			printf("%s: unhandled tag type %#x!!\n", __func__,
1202			       atio->tag_action);
1203			break;
1204		}
1205		if (atio->cdb_len > sizeof(io->scsiio.cdb)) {
1206			printf("%s: WARNING: CDB len %d > ctl_io space %zd\n",
1207			       __func__, atio->cdb_len, sizeof(io->scsiio.cdb));
1208		}
1209		io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
1210		bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len);
1211
1212#ifdef CTLFEDEBUG
1213		printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__,
1214		        io->io_hdr.nexus.initid,
1215		        io->io_hdr.nexus.targ_port,
1216		        io->io_hdr.nexus.targ_lun,
1217			io->scsiio.tag_num, io->scsiio.cdb[0]);
1218#endif
1219
1220		ctl_queue(io);
1221		return;
1222	}
1223	case XPT_CONT_TARGET_IO: {
1224		int srr = 0;
1225		uint32_t srr_off = 0;
1226
1227		atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio;
1228		io = (union ctl_io *)atio->ccb_h.io_ptr;
1229
1230		softc->ctios_returned++;
1231#ifdef CTLFEDEBUG
1232		printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n",
1233		       __func__, atio->tag_id, done_ccb->ccb_h.flags);
1234#endif
1235		/*
1236		 * Handle SRR case were the data pointer is pushed back hack
1237		 */
1238		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV
1239		    && done_ccb->csio.msg_ptr != NULL
1240		    && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED
1241		    && done_ccb->csio.msg_ptr[1] == 5
1242       		    && done_ccb->csio.msg_ptr[2] == 0) {
1243			srr = 1;
1244			srr_off =
1245			    (done_ccb->csio.msg_ptr[3] << 24)
1246			    | (done_ccb->csio.msg_ptr[4] << 16)
1247			    | (done_ccb->csio.msg_ptr[5] << 8)
1248			    | (done_ccb->csio.msg_ptr[6]);
1249		}
1250
1251		/*
1252		 * If we have an SRR and we're still sending data, we
1253		 * should be able to adjust offsets and cycle again.
1254		 * It is possible only if offset is from this datamove.
1255		 */
1256		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) &&
1257		    srr_off >= io->scsiio.kern_rel_offset &&
1258		    srr_off < io->scsiio.kern_rel_offset +
1259		     io->scsiio.kern_data_len) {
1260			io->scsiio.kern_data_resid =
1261			    io->scsiio.kern_rel_offset +
1262			    io->scsiio.kern_data_len - srr_off;
1263			io->scsiio.ext_data_filled = srr_off;
1264			io->scsiio.io_hdr.status = CTL_STATUS_NONE;
1265			io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
1266			softc->ccbs_freed++;
1267			xpt_release_ccb(done_ccb);
1268			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1269					  periph_links.tqe);
1270			xpt_schedule(periph, /*priority*/ 1);
1271			break;
1272		}
1273
1274		/*
1275		 * If status was being sent, the back end data is now history.
1276		 * Hack it up and resubmit a new command with the CDB adjusted.
1277		 * If the SIM does the right thing, all of the resid math
1278		 * should work.
1279		 */
1280		if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
1281			softc->ccbs_freed++;
1282			xpt_release_ccb(done_ccb);
1283			if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
1284				done_ccb = (union ccb *)atio;
1285				goto resubmit;
1286			}
1287			/*
1288			 * Fall through to doom....
1289			 */
1290		}
1291
1292		if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1293		    (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1294			io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
1295
1296		/*
1297		 * If we were sending status back to the initiator, free up
1298		 * resources.  If we were doing a datamove, call the
1299		 * datamove done routine.
1300		 */
1301		if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
1302			softc->ccbs_freed++;
1303			xpt_release_ccb(done_ccb);
1304			/*
1305			 * For a wildcard attachment, commands can come in
1306			 * with a specific target/lun.  Reset the target
1307			 * and LUN fields back to the wildcard values before
1308			 * we send them back down to the SIM.  The SIM has
1309			 * a wildcard LUN enabled, not whatever target/lun
1310			 * these happened to be.
1311			 */
1312			if (softc->flags & CTLFE_LUN_WILDCARD) {
1313				atio->ccb_h.target_id = CAM_TARGET_WILDCARD;
1314				atio->ccb_h.target_lun = CAM_LUN_WILDCARD;
1315			}
1316			if (periph->flags & CAM_PERIPH_INVALID) {
1317				ctlfe_free_ccb(periph, (union ccb *)atio);
1318			} else {
1319				mtx_unlock(mtx);
1320				xpt_action((union ccb *)atio);
1321				return;
1322			}
1323		} else {
1324			struct ctlfe_cmd_info *cmd_info;
1325			struct ccb_scsiio *csio;
1326
1327			csio = &done_ccb->csio;
1328			cmd_info = PRIV_INFO(io);
1329
1330			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1331
1332			/*
1333			 * Translate CAM status to CTL status.  Success
1334			 * does not change the overall, ctl_io status.  In
1335			 * that case we just set port_status to 0.  If we
1336			 * have a failure, though, set a data phase error
1337			 * for the overall ctl_io.
1338			 */
1339			switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
1340			case CAM_REQ_CMP:
1341				io->scsiio.kern_data_resid -= csio->dxfer_len;
1342				io->io_hdr.port_status = 0;
1343				break;
1344			default:
1345				/*
1346				 * XXX KDM we probably need to figure out a
1347				 * standard set of errors that the SIM
1348				 * drivers should return in the event of a
1349				 * data transfer failure.  A data phase
1350				 * error will at least point the user to a
1351				 * data transfer error of some sort.
1352				 * Hopefully the SIM printed out some
1353				 * additional information to give the user
1354				 * a clue what happened.
1355				 */
1356				io->io_hdr.port_status = 0xbad1;
1357				ctl_set_data_phase_error(&io->scsiio);
1358				/*
1359				 * XXX KDM figure out residual.
1360				 */
1361				break;
1362			}
1363			/*
1364			 * If we had to break this S/G list into multiple
1365			 * pieces, figure out where we are in the list, and
1366			 * continue sending pieces if necessary.
1367			 */
1368			if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
1369			 && (io->io_hdr.port_status == 0)) {
1370				ccb_flags flags;
1371				uint8_t *data_ptr;
1372				uint32_t dxfer_len;
1373
1374				flags = atio->ccb_h.flags &
1375					(CAM_DIS_DISCONNECT|
1376					 CAM_TAG_ACTION_VALID);
1377
1378				ctlfedata(softc, io, &flags, &data_ptr,
1379				    &dxfer_len, &csio->sglist_cnt);
1380
1381				if (((flags & CAM_SEND_STATUS) == 0)
1382				 && (dxfer_len == 0)) {
1383					printf("%s: tag %04x no status or "
1384					       "len cdb = %02x\n", __func__,
1385					       atio->tag_id,
1386					       atio_cdb_ptr(atio)[0]);
1387					printf("%s: tag %04x io status %#x\n",
1388					       __func__, atio->tag_id,
1389					       io->io_hdr.status);
1390				}
1391
1392				cam_fill_ctio(csio,
1393					      /*retries*/ 2,
1394					      ctlfedone,
1395					      flags,
1396					      (flags & CAM_TAG_ACTION_VALID) ?
1397					       MSG_SIMPLE_Q_TAG : 0,
1398					      atio->tag_id,
1399					      atio->init_id,
1400					      0,
1401					      /*data_ptr*/ data_ptr,
1402					      /*dxfer_len*/ dxfer_len,
1403					      /*timeout*/ 5 * 1000);
1404
1405				csio->ccb_h.flags |= CAM_UNLOCKED;
1406				csio->resid = 0;
1407				csio->ccb_h.ccb_atio = atio;
1408				io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
1409				softc->ctios_sent++;
1410				mtx_unlock(mtx);
1411				xpt_action((union ccb *)csio);
1412			} else {
1413				/*
1414				 * Release the CTIO.  The ATIO will be sent back
1415				 * down to the SIM once we send status.
1416				 */
1417				softc->ccbs_freed++;
1418				xpt_release_ccb(done_ccb);
1419				mtx_unlock(mtx);
1420
1421				/* Call the backend move done callback */
1422				io->scsiio.be_move_done(io);
1423			}
1424			return;
1425		}
1426		break;
1427	}
1428	case XPT_IMMEDIATE_NOTIFY: {
1429		union ctl_io *io;
1430		struct ccb_immediate_notify *inot;
1431		cam_status status;
1432		int send_ctl_io;
1433
1434		inot = &done_ccb->cin1;
1435		printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x "
1436		       "seq %#x\n", __func__, inot->ccb_h.status,
1437		       inot->tag_id, inot->seq_id);
1438
1439		io = done_ccb->ccb_h.io_ptr;
1440		ctl_zero_io(io);
1441
1442		send_ctl_io = 1;
1443
1444		io->io_hdr.io_type = CTL_IO_TASK;
1445		PRIV_CCB(io) = done_ccb;
1446		inot->ccb_h.io_ptr = io;
1447		io->io_hdr.nexus.initid = inot->initiator_id;
1448		io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
1449		if (bus_softc->hba_misc & PIM_EXTLUNS) {
1450			io->io_hdr.nexus.targ_lun = ctl_decode_lun(
1451			    CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun));
1452		} else {
1453			io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
1454		}
1455		/* XXX KDM should this be the tag_id? */
1456		io->taskio.tag_num = inot->seq_id;
1457
1458		status = inot->ccb_h.status & CAM_STATUS_MASK;
1459		switch (status) {
1460		case CAM_SCSI_BUS_RESET:
1461			io->taskio.task_action = CTL_TASK_BUS_RESET;
1462			break;
1463		case CAM_BDR_SENT:
1464			io->taskio.task_action = CTL_TASK_TARGET_RESET;
1465			break;
1466		case CAM_MESSAGE_RECV:
1467			switch (inot->arg) {
1468			case MSG_ABORT_TASK_SET:
1469				io->taskio.task_action =
1470				    CTL_TASK_ABORT_TASK_SET;
1471				break;
1472			case MSG_TARGET_RESET:
1473				io->taskio.task_action = CTL_TASK_TARGET_RESET;
1474				break;
1475			case MSG_ABORT_TASK:
1476				io->taskio.task_action = CTL_TASK_ABORT_TASK;
1477				break;
1478			case MSG_LOGICAL_UNIT_RESET:
1479				io->taskio.task_action = CTL_TASK_LUN_RESET;
1480				break;
1481			case MSG_CLEAR_TASK_SET:
1482				io->taskio.task_action =
1483				    CTL_TASK_CLEAR_TASK_SET;
1484				break;
1485			case MSG_CLEAR_ACA:
1486				io->taskio.task_action = CTL_TASK_CLEAR_ACA;
1487				break;
1488			case MSG_QUERY_TASK:
1489				io->taskio.task_action = CTL_TASK_QUERY_TASK;
1490				break;
1491			case MSG_QUERY_TASK_SET:
1492				io->taskio.task_action =
1493				    CTL_TASK_QUERY_TASK_SET;
1494				break;
1495			case MSG_QUERY_ASYNC_EVENT:
1496				io->taskio.task_action =
1497				    CTL_TASK_QUERY_ASYNC_EVENT;
1498				break;
1499			case MSG_NOOP:
1500				send_ctl_io = 0;
1501				break;
1502			default:
1503				xpt_print(periph->path,
1504					  "%s: unsupported message 0x%x\n",
1505					  __func__, inot->arg);
1506				send_ctl_io = 0;
1507				break;
1508			}
1509			break;
1510		case CAM_REQ_ABORTED:
1511			/*
1512			 * This request was sent back by the driver.
1513			 * XXX KDM what do we do here?
1514			 */
1515			send_ctl_io = 0;
1516			break;
1517		case CAM_REQ_INVALID:
1518		case CAM_PROVIDE_FAIL:
1519		default:
1520			/*
1521			 * We should only get here if we're talking
1522			 * to a talking to a SIM that is target
1523			 * capable but supports the old API.  In
1524			 * that case, we need to just free the CCB.
1525			 * If we actually send a notify acknowledge,
1526			 * it will send that back with an error as
1527			 * well.
1528			 */
1529
1530			if ((status != CAM_REQ_INVALID)
1531			 && (status != CAM_PROVIDE_FAIL))
1532				xpt_print(periph->path,
1533					  "%s: unsupported CAM status 0x%x\n",
1534					  __func__, status);
1535
1536			ctlfe_free_ccb(periph, done_ccb);
1537
1538			goto out;
1539		}
1540		if (send_ctl_io != 0) {
1541			ctl_queue(io);
1542		} else {
1543			done_ccb->ccb_h.status = CAM_REQ_INPROG;
1544			done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
1545			xpt_action(done_ccb);
1546		}
1547		break;
1548	}
1549	case XPT_NOTIFY_ACKNOWLEDGE:
1550		/*
1551		 * Queue this back down to the SIM as an immediate notify.
1552		 */
1553		done_ccb->ccb_h.status = CAM_REQ_INPROG;
1554		done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
1555		xpt_action(done_ccb);
1556		break;
1557	case XPT_SET_SIM_KNOB:
1558	case XPT_GET_SIM_KNOB:
1559		break;
1560	default:
1561		panic("%s: unexpected CCB type %#x", __func__,
1562		      done_ccb->ccb_h.func_code);
1563		break;
1564	}
1565
1566out:
1567	mtx_unlock(mtx);
1568}
1569
1570static void
1571ctlfe_onoffline(void *arg, int online)
1572{
1573	struct ctlfe_softc *bus_softc;
1574	union ccb *ccb;
1575	cam_status status;
1576	struct cam_path *path;
1577	int set_wwnn;
1578
1579	bus_softc = (struct ctlfe_softc *)arg;
1580
1581	set_wwnn = 0;
1582
1583	status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
1584		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1585	if (status != CAM_REQ_CMP) {
1586		printf("%s: unable to create path!\n", __func__);
1587		return;
1588	}
1589	ccb = xpt_alloc_ccb();
1590	xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
1591	ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
1592	xpt_action(ccb);
1593
1594	/*
1595	 * Copan WWN format:
1596	 *
1597	 * Bits 63-60:	0x5		NAA, IEEE registered name
1598	 * Bits 59-36:	0x000ED5	IEEE Company name assigned to Copan
1599	 * Bits 35-12:			Copan SSN (Sequential Serial Number)
1600	 * Bits 11-8:			Type of port:
1601	 *					1 == N-Port
1602	 *					2 == F-Port
1603	 *					3 == NL-Port
1604	 * Bits 7-0:			0 == Node Name, >0 == Port Number
1605	 */
1606	if (online != 0) {
1607		if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){
1608#ifdef RANDOM_WWNN
1609			uint64_t random_bits;
1610#endif
1611
1612			printf("%s: %s current WWNN %#jx\n", __func__,
1613			       bus_softc->port_name,
1614			       ccb->knob.xport_specific.fc.wwnn);
1615			printf("%s: %s current WWPN %#jx\n", __func__,
1616			       bus_softc->port_name,
1617			       ccb->knob.xport_specific.fc.wwpn);
1618
1619#ifdef RANDOM_WWNN
1620			arc4rand(&random_bits, sizeof(random_bits), 0);
1621#endif
1622
1623			/*
1624			 * XXX KDM this is a bit of a kludge for now.  We
1625			 * take the current WWNN/WWPN from the card, and
1626			 * replace the company identifier and the NL-Port
1627			 * indicator and the port number (for the WWPN).
1628			 * This should be replaced later with ddb_GetWWNN,
1629			 * or possibly a more centralized scheme.  (It
1630			 * would be nice to have the WWNN/WWPN for each
1631			 * port stored in the ctl_port structure.)
1632			 */
1633#ifdef RANDOM_WWNN
1634			ccb->knob.xport_specific.fc.wwnn =
1635				(random_bits &
1636				0x0000000fffffff00ULL) |
1637				/* Company ID */ 0x5000ED5000000000ULL |
1638				/* NL-Port */    0x0300;
1639			ccb->knob.xport_specific.fc.wwpn =
1640				(random_bits &
1641				0x0000000fffffff00ULL) |
1642				/* Company ID */ 0x5000ED5000000000ULL |
1643				/* NL-Port */    0x3000 |
1644				/* Port Num */ (bus_softc->port.targ_port & 0xff);
1645
1646			/*
1647			 * This is a bit of an API break/reversal, but if
1648			 * we're doing the random WWNN that's a little
1649			 * different anyway.  So record what we're actually
1650			 * using with the frontend code so it's reported
1651			 * accurately.
1652			 */
1653			ctl_port_set_wwns(&bus_softc->port,
1654			    true, ccb->knob.xport_specific.fc.wwnn,
1655			    true, ccb->knob.xport_specific.fc.wwpn);
1656			set_wwnn = 1;
1657#else /* RANDOM_WWNN */
1658			/*
1659			 * If the user has specified a WWNN/WWPN, send them
1660			 * down to the SIM.  Otherwise, record what the SIM
1661			 * has reported.
1662			 */
1663			if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn
1664			    != ccb->knob.xport_specific.fc.wwnn) {
1665				ccb->knob.xport_specific.fc.wwnn =
1666				    bus_softc->port.wwnn;
1667				set_wwnn = 1;
1668			} else {
1669				ctl_port_set_wwns(&bus_softc->port,
1670				    true, ccb->knob.xport_specific.fc.wwnn,
1671				    false, 0);
1672			}
1673			if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn
1674			     != ccb->knob.xport_specific.fc.wwpn) {
1675				ccb->knob.xport_specific.fc.wwpn =
1676				    bus_softc->port.wwpn;
1677				set_wwnn = 1;
1678			} else {
1679				ctl_port_set_wwns(&bus_softc->port,
1680				    false, 0,
1681				    true, ccb->knob.xport_specific.fc.wwpn);
1682			}
1683#endif /* RANDOM_WWNN */
1684
1685
1686			if (set_wwnn != 0) {
1687				printf("%s: %s new WWNN %#jx\n", __func__,
1688				       bus_softc->port_name,
1689				ccb->knob.xport_specific.fc.wwnn);
1690				printf("%s: %s new WWPN %#jx\n", __func__,
1691				       bus_softc->port_name,
1692				       ccb->knob.xport_specific.fc.wwpn);
1693			}
1694		} else {
1695			printf("%s: %s has no valid WWNN/WWPN\n", __func__,
1696			       bus_softc->port_name);
1697		}
1698	}
1699	ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
1700	ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
1701	if (set_wwnn != 0)
1702		ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS;
1703
1704	if (online != 0)
1705		ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET;
1706	else
1707		ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET;
1708
1709	xpt_action(ccb);
1710
1711	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1712		printf("%s: SIM %s (path id %d) target %s failed with "
1713		       "status %#x\n",
1714		       __func__, bus_softc->port_name, bus_softc->path_id,
1715		       (online != 0) ? "enable" : "disable",
1716		       ccb->ccb_h.status);
1717	} else {
1718		printf("%s: SIM %s (path id %d) target %s succeeded\n",
1719		       __func__, bus_softc->port_name, bus_softc->path_id,
1720		       (online != 0) ? "enable" : "disable");
1721	}
1722
1723	xpt_free_path(path);
1724	xpt_free_ccb(ccb);
1725}
1726
1727static void
1728ctlfe_online(void *arg)
1729{
1730	struct ctlfe_softc *bus_softc;
1731	struct cam_path *path;
1732	cam_status status;
1733	struct ctlfe_lun_softc *lun_softc;
1734	struct cam_periph *periph;
1735
1736	bus_softc = (struct ctlfe_softc *)arg;
1737
1738	/*
1739	 * Create the wildcard LUN before bringing the port online.
1740	 */
1741	status = xpt_create_path(&path, /*periph*/ NULL,
1742				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1743				 CAM_LUN_WILDCARD);
1744	if (status != CAM_REQ_CMP) {
1745		printf("%s: unable to create path for wildcard periph\n",
1746				__func__);
1747		return;
1748	}
1749
1750	lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO);
1751
1752	xpt_path_lock(path);
1753	periph = cam_periph_find(path, "ctl");
1754	if (periph != NULL) {
1755		/* We've already got a periph, no need to alloc a new one. */
1756		xpt_path_unlock(path);
1757		xpt_free_path(path);
1758		free(lun_softc, M_CTLFE);
1759		return;
1760	}
1761	lun_softc->parent_softc = bus_softc;
1762	lun_softc->flags |= CTLFE_LUN_WILDCARD;
1763
1764	status = cam_periph_alloc(ctlferegister,
1765				  ctlfeoninvalidate,
1766				  ctlfecleanup,
1767				  ctlfestart,
1768				  "ctl",
1769				  CAM_PERIPH_BIO,
1770				  path,
1771				  ctlfeasync,
1772				  0,
1773				  lun_softc);
1774
1775	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1776		const struct cam_status_entry *entry;
1777
1778		entry = cam_fetch_status_entry(status);
1779		printf("%s: CAM error %s (%#x) returned from "
1780		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1781		       entry->status_text : "Unknown", status);
1782		free(lun_softc, M_CTLFE);
1783	}
1784
1785	xpt_path_unlock(path);
1786	ctlfe_onoffline(arg, /*online*/ 1);
1787	xpt_free_path(path);
1788}
1789
1790static void
1791ctlfe_offline(void *arg)
1792{
1793	struct ctlfe_softc *bus_softc;
1794	struct cam_path *path;
1795	cam_status status;
1796	struct cam_periph *periph;
1797
1798	bus_softc = (struct ctlfe_softc *)arg;
1799
1800	ctlfe_onoffline(arg, /*online*/ 0);
1801
1802	/*
1803	 * Disable the wildcard LUN for this port now that we have taken
1804	 * the port offline.
1805	 */
1806	status = xpt_create_path(&path, /*periph*/ NULL,
1807				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1808				 CAM_LUN_WILDCARD);
1809	if (status != CAM_REQ_CMP) {
1810		printf("%s: unable to create path for wildcard periph\n",
1811		       __func__);
1812		return;
1813	}
1814	xpt_path_lock(path);
1815	if ((periph = cam_periph_find(path, "ctl")) != NULL)
1816		cam_periph_invalidate(periph);
1817	xpt_path_unlock(path);
1818	xpt_free_path(path);
1819}
1820
1821/*
1822 * This will get called to enable a LUN on every bus that is attached to
1823 * CTL.  So we only need to create a path/periph for this particular bus.
1824 */
1825static int
1826ctlfe_lun_enable(void *arg, int lun_id)
1827{
1828	struct ctlfe_softc *bus_softc;
1829	struct ctlfe_lun_softc *softc;
1830	struct cam_path *path;
1831	struct cam_periph *periph;
1832	cam_status status;
1833
1834	bus_softc = (struct ctlfe_softc *)arg;
1835	if (bus_softc->hba_misc & PIM_EXTLUNS)
1836		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
1837
1838	status = xpt_create_path(&path, /*periph*/ NULL,
1839	    bus_softc->path_id, bus_softc->target_id, lun_id);
1840	/* XXX KDM need some way to return status to CTL here? */
1841	if (status != CAM_REQ_CMP) {
1842		printf("%s: could not create path, status %#x\n", __func__,
1843		       status);
1844		return (1);
1845	}
1846
1847	softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
1848	xpt_path_lock(path);
1849	periph = cam_periph_find(path, "ctl");
1850	if (periph != NULL) {
1851		/* We've already got a periph, no need to alloc a new one. */
1852		xpt_path_unlock(path);
1853		xpt_free_path(path);
1854		free(softc, M_CTLFE);
1855		return (0);
1856	}
1857	softc->parent_softc = bus_softc;
1858
1859	status = cam_periph_alloc(ctlferegister,
1860				  ctlfeoninvalidate,
1861				  ctlfecleanup,
1862				  ctlfestart,
1863				  "ctl",
1864				  CAM_PERIPH_BIO,
1865				  path,
1866				  ctlfeasync,
1867				  0,
1868				  softc);
1869
1870	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1871		const struct cam_status_entry *entry;
1872
1873		entry = cam_fetch_status_entry(status);
1874		printf("%s: CAM error %s (%#x) returned from "
1875		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1876		       entry->status_text : "Unknown", status);
1877		free(softc, M_CTLFE);
1878	}
1879
1880	xpt_path_unlock(path);
1881	xpt_free_path(path);
1882	return (0);
1883}
1884
1885/*
1886 * This will get called when the user removes a LUN to disable that LUN
1887 * on every bus that is attached to CTL.
1888 */
1889static int
1890ctlfe_lun_disable(void *arg, int lun_id)
1891{
1892	struct ctlfe_softc *softc;
1893	struct ctlfe_lun_softc *lun_softc;
1894
1895	softc = (struct ctlfe_softc *)arg;
1896	if (softc->hba_misc & PIM_EXTLUNS)
1897		lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id));
1898
1899	mtx_lock(&softc->lun_softc_mtx);
1900	STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
1901		struct cam_path *path;
1902
1903		path = lun_softc->periph->path;
1904
1905		if ((xpt_path_target_id(path) == softc->target_id)
1906		 && (xpt_path_lun_id(path) == lun_id)) {
1907			break;
1908		}
1909	}
1910	if (lun_softc == NULL) {
1911		mtx_unlock(&softc->lun_softc_mtx);
1912		printf("%s: can't find lun %d\n", __func__, lun_id);
1913		return (1);
1914	}
1915	cam_periph_acquire(lun_softc->periph);
1916	mtx_unlock(&softc->lun_softc_mtx);
1917
1918	cam_periph_lock(lun_softc->periph);
1919	cam_periph_invalidate(lun_softc->periph);
1920	cam_periph_unlock(lun_softc->periph);
1921	cam_periph_release(lun_softc->periph);
1922	return (0);
1923}
1924
1925static void
1926ctlfe_dump_sim(struct cam_sim *sim)
1927{
1928
1929	printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
1930	       sim->sim_name, sim->unit_number,
1931	       sim->max_tagged_dev_openings, sim->max_dev_openings);
1932}
1933
1934/*
1935 * Assumes that the SIM lock is held.
1936 */
1937static void
1938ctlfe_dump_queue(struct ctlfe_lun_softc *softc)
1939{
1940	struct ccb_hdr *hdr;
1941	struct cam_periph *periph;
1942	int num_items;
1943
1944	periph = softc->periph;
1945	num_items = 0;
1946
1947	TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) {
1948		union ctl_io *io = hdr->io_ptr;
1949
1950		num_items++;
1951
1952		/*
1953		 * Only regular SCSI I/O is put on the work
1954		 * queue, so we can print sense here.  There may be no
1955		 * sense if it's no the queue for a DMA, but this serves to
1956		 * print out the CCB as well.
1957		 *
1958		 * XXX KDM switch this over to scsi_sense_print() when
1959		 * CTL is merged in with CAM.
1960		 */
1961		ctl_io_error_print(io, NULL);
1962
1963		/*
1964		 * Print DMA status if we are DMA_QUEUED.
1965		 */
1966		if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) {
1967			xpt_print(periph->path,
1968			    "Total %u, Current %u, Resid %u\n",
1969			    io->scsiio.kern_total_len,
1970			    io->scsiio.kern_data_len,
1971			    io->scsiio.kern_data_resid);
1972		}
1973	}
1974
1975	xpt_print(periph->path, "%d requests total waiting for CCBs\n",
1976		  num_items);
1977	xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju "
1978		  "freed)\n", (uintmax_t)(softc->ccbs_alloced -
1979		  softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced,
1980		  (uintmax_t)softc->ccbs_freed);
1981	xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju "
1982		  "returned\n", (uintmax_t)(softc->ctios_sent -
1983		  softc->ctios_returned), softc->ctios_sent,
1984		  softc->ctios_returned);
1985}
1986
1987/*
1988 * Datamove/done routine called by CTL.  Put ourselves on the queue to
1989 * receive a CCB from CAM so we can queue the continue I/O request down
1990 * to the adapter.
1991 */
1992static void
1993ctlfe_datamove(union ctl_io *io)
1994{
1995	union ccb *ccb;
1996	struct cam_periph *periph;
1997	struct ctlfe_lun_softc *softc;
1998
1999	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
2000	    ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type));
2001
2002	io->scsiio.ext_data_filled = 0;
2003	ccb = PRIV_CCB(io);
2004	periph = xpt_path_periph(ccb->ccb_h.path);
2005	cam_periph_lock(periph);
2006	softc = (struct ctlfe_lun_softc *)periph->softc;
2007	io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
2008	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
2009		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
2010	TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
2011			  periph_links.tqe);
2012	xpt_schedule(periph, /*priority*/ 1);
2013	cam_periph_unlock(periph);
2014}
2015
2016static void
2017ctlfe_done(union ctl_io *io)
2018{
2019	union ccb *ccb;
2020	struct cam_periph *periph;
2021	struct ctlfe_lun_softc *softc;
2022
2023	ccb = PRIV_CCB(io);
2024	periph = xpt_path_periph(ccb->ccb_h.path);
2025	cam_periph_lock(periph);
2026	softc = (struct ctlfe_lun_softc *)periph->softc;
2027
2028	if (io->io_hdr.io_type == CTL_IO_TASK) {
2029		/*
2030		 * Task management commands don't require any further
2031		 * communication back to the adapter.  Requeue the CCB
2032		 * to the adapter, and free the CTL I/O.
2033		 */
2034		xpt_print(ccb->ccb_h.path, "%s: returning task I/O "
2035			  "tag %#x seq %#x\n", __func__,
2036			  ccb->cin1.tag_id, ccb->cin1.seq_id);
2037		/*
2038		 * Send the notify acknowledge down to the SIM, to let it
2039		 * know we processed the task management command.
2040		 */
2041		ccb->ccb_h.status = CAM_REQ_INPROG;
2042		ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
2043		switch (io->taskio.task_status) {
2044		case CTL_TASK_FUNCTION_COMPLETE:
2045			ccb->cna2.arg = CAM_RSP_TMF_COMPLETE;
2046			break;
2047		case CTL_TASK_FUNCTION_SUCCEEDED:
2048			ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED;
2049			ccb->ccb_h.flags |= CAM_SEND_STATUS;
2050			break;
2051		case CTL_TASK_FUNCTION_REJECTED:
2052			ccb->cna2.arg = CAM_RSP_TMF_REJECTED;
2053			ccb->ccb_h.flags |= CAM_SEND_STATUS;
2054			break;
2055		case CTL_TASK_LUN_DOES_NOT_EXIST:
2056			ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN;
2057			ccb->ccb_h.flags |= CAM_SEND_STATUS;
2058			break;
2059		case CTL_TASK_FUNCTION_NOT_SUPPORTED:
2060			ccb->cna2.arg = CAM_RSP_TMF_FAILED;
2061			ccb->ccb_h.flags |= CAM_SEND_STATUS;
2062			break;
2063		}
2064		ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8;
2065		xpt_action(ccb);
2066	} else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) {
2067		if (softc->flags & CTLFE_LUN_WILDCARD) {
2068			ccb->ccb_h.target_id = CAM_TARGET_WILDCARD;
2069			ccb->ccb_h.target_lun = CAM_LUN_WILDCARD;
2070		}
2071		if (periph->flags & CAM_PERIPH_INVALID) {
2072			ctlfe_free_ccb(periph, ccb);
2073		} else {
2074			cam_periph_unlock(periph);
2075			xpt_action(ccb);
2076			return;
2077		}
2078	} else {
2079		io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
2080		TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
2081				  periph_links.tqe);
2082		xpt_schedule(periph, /*priority*/ 1);
2083	}
2084
2085	cam_periph_unlock(periph);
2086}
2087
2088static void
2089ctlfe_dump(void)
2090{
2091	struct ctlfe_softc *bus_softc;
2092	struct ctlfe_lun_softc *lun_softc;
2093
2094	STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) {
2095		ctlfe_dump_sim(bus_softc->sim);
2096		STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links)
2097			ctlfe_dump_queue(lun_softc);
2098	}
2099}
2100