scsi_ctl.c revision 250460
1/*-
2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    substantially similar to the "NO WARRANTY" disclaimer below
13 *    ("Disclaimer") and any redistribution must be conditioned upon
14 *    including a substantially similar Disclaimer requirement for further
15 *    binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
29 *
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $
31 */
32/*
33 * Peripheral driver interface between CAM and CTL (CAM Target Layer).
34 *
35 * Author: Ken Merry <ken@FreeBSD.org>
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/cam/ctl/scsi_ctl.c 250460 2013-05-10 16:41:26Z eadler $");
40
41#include <sys/param.h>
42#include <sys/queue.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/condvar.h>
48#include <sys/malloc.h>
49#include <sys/bus.h>
50#include <sys/endian.h>
51#include <sys/sbuf.h>
52#include <sys/sysctl.h>
53#include <sys/types.h>
54#include <sys/systm.h>
55#include <machine/bus.h>
56
57#include <cam/cam.h>
58#include <cam/cam_ccb.h>
59#include <cam/cam_periph.h>
60#include <cam/cam_queue.h>
61#include <cam/cam_xpt_periph.h>
62#include <cam/cam_debug.h>
63#include <cam/cam_sim.h>
64#include <cam/cam_xpt.h>
65
66#include <cam/scsi/scsi_all.h>
67#include <cam/scsi/scsi_message.h>
68
69#include <cam/ctl/ctl_io.h>
70#include <cam/ctl/ctl.h>
71#include <cam/ctl/ctl_frontend.h>
72#include <cam/ctl/ctl_util.h>
73#include <cam/ctl/ctl_error.h>
74
75typedef enum {
76	CTLFE_CCB_DEFAULT	= 0x00,
77	CTLFE_CCB_WAITING 	= 0x01
78} ctlfe_ccb_types;
79
80struct ctlfe_softc {
81	struct ctl_frontend fe;
82	path_id_t path_id;
83	struct cam_sim *sim;
84	char port_name[DEV_IDLEN];
85	STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list;
86	STAILQ_ENTRY(ctlfe_softc) links;
87};
88
89STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list;
90struct mtx ctlfe_list_mtx;
91static char ctlfe_mtx_desc[] = "ctlfelist";
92static int ctlfe_dma_enabled = 1;
93#ifdef CTLFE_INIT_ENABLE
94static int ctlfe_max_targets = 1;
95static int ctlfe_num_targets = 0;
96#endif
97
98typedef enum {
99	CTLFE_LUN_NONE		= 0x00,
100	CTLFE_LUN_WILDCARD	= 0x01
101} ctlfe_lun_flags;
102
103struct ctlfe_lun_softc {
104	struct ctlfe_softc *parent_softc;
105	struct cam_periph *periph;
106	ctlfe_lun_flags flags;
107	struct callout dma_callout;
108	uint64_t ccbs_alloced;
109	uint64_t ccbs_freed;
110	uint64_t ctios_sent;
111	uint64_t ctios_returned;
112	uint64_t atios_sent;
113	uint64_t atios_returned;
114	uint64_t inots_sent;
115	uint64_t inots_returned;
116	/* bus_dma_tag_t dma_tag; */
117	TAILQ_HEAD(, ccb_hdr) work_queue;
118	STAILQ_ENTRY(ctlfe_lun_softc) links;
119};
120
121typedef enum {
122	CTLFE_CMD_NONE		= 0x00,
123	CTLFE_CMD_PIECEWISE	= 0x01
124} ctlfe_cmd_flags;
125
126/*
127 * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h.
128 * Currently that is 600 bytes.
129 */
130struct ctlfe_lun_cmd_info {
131	int cur_transfer_index;
132	ctlfe_cmd_flags flags;
133	/*
134	 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16
135	 * bytes on amd64.  So with 32 elements, this is 256 bytes on
136	 * i386 and 512 bytes on amd64.
137	 */
138	bus_dma_segment_t cam_sglist[32];
139};
140
141/*
142 * When we register the adapter/bus, request that this many ctl_ios be
143 * allocated.  This should be the maximum supported by the adapter, but we
144 * currently don't have a way to get that back from the path inquiry.
145 * XXX KDM add that to the path inquiry.
146 */
147#define	CTLFE_REQ_CTL_IO	4096
148/*
149 * Number of Accept Target I/O CCBs to allocate and queue down to the
150 * adapter per LUN.
151 * XXX KDM should this be controlled by CTL?
152 */
153#define	CTLFE_ATIO_PER_LUN	1024
154/*
155 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to
156 * allocate and queue down to the adapter per LUN.
157 * XXX KDM should this be controlled by CTL?
158 */
159#define	CTLFE_IN_PER_LUN	1024
160
161/*
162 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending
163 * status to the initiator.  The SIM is expected to have its own timeouts,
164 * so we're not putting this timeout around the CCB execution time.  The
165 * SIM should timeout and let us know if it has an issue.
166 */
167#define	CTLFE_DMA_TIMEOUT	60
168
169/*
170 * Turn this on to enable extra debugging prints.
171 */
172#if 0
173#define	CTLFE_DEBUG
174#endif
175
176/*
177 * Use randomly assigned WWNN/WWPN values.  This is to work around an issue
178 * in the FreeBSD initiator that makes it unable to rescan the target if
179 * the target gets rebooted and the WWNN/WWPN stay the same.
180 */
181#if 0
182#define	RANDOM_WWNN
183#endif
184
185SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW,
186	   &ctlfe_dma_enabled, 0, "DMA enabled");
187MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
188
189#define	ccb_type	ppriv_field0
190/* This is only used in the ATIO */
191#define	io_ptr		ppriv_ptr1
192
193/* This is only used in the CTIO */
194#define	ccb_atio	ppriv_ptr1
195
196int			ctlfeinitialize(void);
197void			ctlfeshutdown(void);
198static periph_init_t	ctlfeinit;
199static void		ctlfeasync(void *callback_arg, uint32_t code,
200				   struct cam_path *path, void *arg);
201static periph_ctor_t	ctlferegister;
202static periph_oninv_t	ctlfeoninvalidate;
203static periph_dtor_t	ctlfecleanup;
204static periph_start_t	ctlfestart;
205static void		ctlfedone(struct cam_periph *periph,
206				  union ccb *done_ccb);
207
208static void 		ctlfe_onoffline(void *arg, int online);
209static void 		ctlfe_online(void *arg);
210static void 		ctlfe_offline(void *arg);
211static int 		ctlfe_targ_enable(void *arg, struct ctl_id targ_id);
212static int 		ctlfe_targ_disable(void *arg, struct ctl_id targ_id);
213static int 		ctlfe_lun_enable(void *arg, struct ctl_id targ_id,
214					 int lun_id);
215static int 		ctlfe_lun_disable(void *arg, struct ctl_id targ_id,
216					  int lun_id);
217static void		ctlfe_dump_sim(struct cam_sim *sim);
218static void		ctlfe_dump_queue(struct ctlfe_lun_softc *softc);
219static void		ctlfe_dma_timeout(void *arg);
220static void 		ctlfe_datamove_done(union ctl_io *io);
221static void 		ctlfe_dump(void);
222
223static struct periph_driver ctlfe_driver =
224{
225	ctlfeinit, "ctl",
226	TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0
227};
228
229static int ctlfe_module_event_handler(module_t, int /*modeventtype_t*/, void *);
230
231/*
232 * We're not using PERIPHDRIVER_DECLARE(), because it runs at SI_SUB_DRIVERS,
233 * and that happens before CTL gets initialised.
234 */
235static moduledata_t ctlfe_moduledata = {
236	"ctlfe",
237	ctlfe_module_event_handler,
238	NULL
239};
240
241DECLARE_MODULE(ctlfe, ctlfe_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
242MODULE_VERSION(ctlfe, 1);
243MODULE_DEPEND(ctlfe, ctl, 1, 1, 1);
244MODULE_DEPEND(ctlfe, cam, 1, 1, 1);
245
246extern struct ctl_softc *control_softc;
247
248void
249ctlfeshutdown(void)
250{
251	return;
252}
253
254void
255ctlfeinit(void)
256{
257	cam_status status;
258
259	STAILQ_INIT(&ctlfe_softc_list);
260
261	mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
262
263	KASSERT(control_softc != NULL, ("CTL is not initialized!"));
264
265	status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
266				    AC_CONTRACT, ctlfeasync, NULL, NULL);
267
268	if (status != CAM_REQ_CMP) {
269		printf("ctl: Failed to attach async callback due to CAM "
270		       "status 0x%x!\n", status);
271	}
272}
273
274static int
275ctlfe_module_event_handler(module_t mod, int what, void *arg)
276{
277
278	switch (what) {
279	case MOD_LOAD:
280		periphdriver_register(&ctlfe_driver);
281		return (0);
282	case MOD_UNLOAD:
283		return (EBUSY);
284	default:
285		return (EOPNOTSUPP);
286	}
287}
288
289static void
290ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
291{
292
293#ifdef CTLFEDEBUG
294	printf("%s: entered\n", __func__);
295#endif
296
297	/*
298	 * When a new path gets registered, and it is capable of target
299	 * mode, go ahead and attach.  Later on, we may need to be more
300	 * selective, but for now this will be sufficient.
301 	 */
302	switch (code) {
303	case AC_PATH_REGISTERED: {
304		struct ctl_frontend *fe;
305		struct ctlfe_softc *bus_softc;
306		struct ccb_pathinq *cpi;
307		int retval;
308
309		cpi = (struct ccb_pathinq *)arg;
310
311		/* Don't attach if it doesn't support target mode */
312		if ((cpi->target_sprt & PIT_PROCESSOR) == 0) {
313#ifdef CTLFEDEBUG
314			printf("%s: SIM %s%d doesn't support target mode\n",
315			       __func__, cpi->dev_name, cpi->unit_number);
316#endif
317			break;
318		}
319
320#ifdef CTLFE_INIT_ENABLE
321		if (ctlfe_num_targets >= ctlfe_max_targets) {
322			union ccb *ccb;
323			struct cam_sim *sim;
324
325			ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP,
326						  M_NOWAIT | M_ZERO);
327			if (ccb == NULL) {
328				printf("%s: unable to malloc CCB!\n", __func__);
329				return;
330			}
331			xpt_setup_ccb(&ccb->ccb_h, cpi->ccb_h.path,
332				      CAM_PRIORITY_NONE);
333
334			sim = xpt_path_sim(cpi->ccb_h.path);
335
336			ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
337			ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
338			ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR;
339
340			/* We should hold the SIM lock here */
341			mtx_assert(sim->mtx, MA_OWNED);
342
343			xpt_action(ccb);
344
345			if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
346			     CAM_REQ_CMP) {
347				printf("%s: SIM %s%d (path id %d) initiator "
348				       "enable failed with status %#x\n",
349				       __func__, cpi->dev_name,
350				       cpi->unit_number, cpi->ccb_h.path_id,
351				       ccb->ccb_h.status);
352			} else {
353				printf("%s: SIM %s%d (path id %d) initiator "
354				       "enable succeeded\n",
355				       __func__, cpi->dev_name,
356				       cpi->unit_number, cpi->ccb_h.path_id);
357			}
358
359			free(ccb, M_TEMP);
360
361			break;
362		} else {
363			ctlfe_num_targets++;
364		}
365
366		printf("%s: ctlfe_num_targets = %d\n", __func__,
367		       ctlfe_num_targets);
368#endif /* CTLFE_INIT_ENABLE */
369
370		/*
371		 * We're in an interrupt context here, so we have to
372		 * use M_NOWAIT.  Of course this means trouble if we
373		 * can't allocate memory.
374		 */
375		bus_softc = malloc(sizeof(*bus_softc), M_CTLFE,
376				   M_NOWAIT | M_ZERO);
377		if (bus_softc == NULL) {
378			printf("%s: unable to malloc %zd bytes for softc\n",
379			       __func__, sizeof(*bus_softc));
380			return;
381		}
382
383		bus_softc->path_id = cpi->ccb_h.path_id;
384		bus_softc->sim = xpt_path_sim(cpi->ccb_h.path);
385		STAILQ_INIT(&bus_softc->lun_softc_list);
386
387		fe = &bus_softc->fe;
388
389		/*
390		 * XXX KDM should we be more accurate here ?
391		 */
392		if (cpi->transport == XPORT_FC)
393			fe->port_type = CTL_PORT_FC;
394		else
395			fe->port_type = CTL_PORT_SCSI;
396
397		/* XXX KDM what should the real number be here? */
398		fe->num_requested_ctl_io = 4096;
399		snprintf(bus_softc->port_name, sizeof(bus_softc->port_name),
400			 "%s%d", cpi->dev_name, cpi->unit_number);
401		/*
402		 * XXX KDM it would be nice to allocate storage in the
403		 * frontend structure itself.
404	 	 */
405		fe->port_name = bus_softc->port_name;
406		fe->physical_port = cpi->unit_number;
407		fe->virtual_port = cpi->bus_id;
408		fe->port_online = ctlfe_online;
409		fe->port_offline = ctlfe_offline;
410		fe->onoff_arg = bus_softc;
411		fe->targ_enable = ctlfe_targ_enable;
412		fe->targ_disable = ctlfe_targ_disable;
413		fe->lun_enable = ctlfe_lun_enable;
414		fe->lun_disable = ctlfe_lun_disable;
415		fe->targ_lun_arg = bus_softc;
416		fe->fe_datamove = ctlfe_datamove_done;
417		fe->fe_done = ctlfe_datamove_done;
418		fe->fe_dump = ctlfe_dump;
419		/*
420		 * XXX KDM the path inquiry doesn't give us the maximum
421		 * number of targets supported.
422		 */
423		fe->max_targets = cpi->max_target;
424		fe->max_target_id = cpi->max_target;
425
426		/*
427		 * XXX KDM need to figure out whether we're the master or
428		 * slave.
429		 */
430#ifdef CTLFEDEBUG
431		printf("%s: calling ctl_frontend_register() for %s%d\n",
432		       __func__, cpi->dev_name, cpi->unit_number);
433#endif
434		retval = ctl_frontend_register(fe, /*master_SC*/ 1);
435		if (retval != 0) {
436			printf("%s: ctl_frontend_register() failed with "
437			       "error %d!\n", __func__, retval);
438			free(bus_softc, M_CTLFE);
439			break;
440		} else {
441			mtx_lock(&ctlfe_list_mtx);
442			STAILQ_INSERT_TAIL(&ctlfe_softc_list, bus_softc, links);
443			mtx_unlock(&ctlfe_list_mtx);
444		}
445
446		break;
447	}
448	case AC_PATH_DEREGISTERED: {
449		struct ctlfe_softc *softc = NULL;
450
451		mtx_lock(&ctlfe_list_mtx);
452		STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
453			if (softc->path_id == xpt_path_path_id(path)) {
454				STAILQ_REMOVE(&ctlfe_softc_list, softc,
455						ctlfe_softc, links);
456				break;
457			}
458		}
459		mtx_unlock(&ctlfe_list_mtx);
460
461		if (softc != NULL) {
462			/*
463			 * XXX KDM are we certain at this point that there
464			 * are no outstanding commands for this frontend?
465			 */
466			ctl_frontend_deregister(&softc->fe);
467			free(softc, M_CTLFE);
468		}
469		break;
470	}
471	case AC_CONTRACT: {
472		struct ac_contract *ac;
473
474		ac = (struct ac_contract *)arg;
475
476		switch (ac->contract_number) {
477		case AC_CONTRACT_DEV_CHG: {
478			struct ac_device_changed *dev_chg;
479			struct ctlfe_softc *softc;
480			int retval, found;
481
482			dev_chg = (struct ac_device_changed *)ac->contract_data;
483
484			printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n",
485			       __func__, dev_chg->wwpn, dev_chg->port,
486			       xpt_path_path_id(path), dev_chg->target,
487			       (dev_chg->arrived == 0) ?  "left" : "arrived");
488
489			found = 0;
490
491			mtx_lock(&ctlfe_list_mtx);
492			STAILQ_FOREACH(softc, &ctlfe_softc_list, links) {
493				if (softc->path_id == xpt_path_path_id(path)) {
494					found = 1;
495					break;
496				}
497			}
498			mtx_unlock(&ctlfe_list_mtx);
499
500			if (found == 0) {
501				printf("%s: CTL port for CAM path %u not "
502				       "found!\n", __func__,
503				       xpt_path_path_id(path));
504				break;
505			}
506			if (dev_chg->arrived != 0) {
507				retval = ctl_add_initiator(dev_chg->wwpn,
508					softc->fe.targ_port, dev_chg->target);
509			} else {
510				retval = ctl_remove_initiator(
511					softc->fe.targ_port, dev_chg->target);
512			}
513
514			if (retval != 0) {
515				printf("%s: could not %s port %d iid %u "
516				       "WWPN %#jx!\n", __func__,
517				       (dev_chg->arrived != 0) ? "add" :
518				       "remove", softc->fe.targ_port,
519				       dev_chg->target,
520				       (uintmax_t)dev_chg->wwpn);
521			}
522			break;
523		}
524		default:
525			printf("%s: unsupported contract number %ju\n",
526			       __func__, (uintmax_t)ac->contract_number);
527			break;
528		}
529		break;
530	}
531	default:
532		break;
533	}
534}
535
536static cam_status
537ctlferegister(struct cam_periph *periph, void *arg)
538{
539	struct ctlfe_softc *bus_softc;
540	struct ctlfe_lun_softc *softc;
541	struct cam_sim *sim;
542	union ccb en_lun_ccb;
543	cam_status status;
544	int i;
545
546	softc = (struct ctlfe_lun_softc *)arg;
547	bus_softc = softc->parent_softc;
548	sim = xpt_path_sim(periph->path);
549
550	TAILQ_INIT(&softc->work_queue);
551	softc->periph = periph;
552
553	callout_init_mtx(&softc->dma_callout, sim->mtx, /*flags*/ 0);
554	periph->softc = softc;
555
556	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
557	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
558	en_lun_ccb.cel.grp6_len = 0;
559	en_lun_ccb.cel.grp7_len = 0;
560	en_lun_ccb.cel.enable = 1;
561	xpt_action(&en_lun_ccb);
562	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
563	if (status != CAM_REQ_CMP) {
564		xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n",
565			  __func__, en_lun_ccb.ccb_h.status);
566		return (status);
567	}
568
569	status = CAM_REQ_CMP;
570
571	for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
572		union ccb *new_ccb;
573
574		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
575					      M_ZERO|M_NOWAIT);
576		if (new_ccb == NULL) {
577			status = CAM_RESRC_UNAVAIL;
578			break;
579		}
580		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
581		new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
582		new_ccb->ccb_h.cbfcnp = ctlfedone;
583		xpt_action(new_ccb);
584		softc->atios_sent++;
585		status = new_ccb->ccb_h.status;
586		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
587			free(new_ccb, M_CTLFE);
588			break;
589		}
590	}
591
592	status = cam_periph_acquire(periph);
593	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
594		xpt_print(periph->path, "%s: could not acquire reference "
595			  "count, status = %#x\n", __func__, status);
596		return (status);
597	}
598
599	if (i == 0) {
600		xpt_print(periph->path, "%s: could not allocate ATIO CCBs, "
601			  "status 0x%x\n", __func__, status);
602		return (CAM_REQ_CMP_ERR);
603	}
604
605	for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
606		union ccb *new_ccb;
607
608		new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
609					      M_ZERO|M_NOWAIT);
610		if (new_ccb == NULL) {
611			status = CAM_RESRC_UNAVAIL;
612			break;
613		}
614
615		xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
616		new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
617		new_ccb->ccb_h.cbfcnp = ctlfedone;
618		xpt_action(new_ccb);
619		softc->inots_sent++;
620		status = new_ccb->ccb_h.status;
621		if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
622			/*
623			 * Note that we don't free the CCB here.  If the
624			 * status is not CAM_REQ_INPROG, then we're
625			 * probably talking to a SIM that says it is
626			 * target-capable but doesn't support the
627			 * XPT_IMMEDIATE_NOTIFY CCB.  i.e. it supports the
628			 * older API.  In that case, it'll call xpt_done()
629			 * on the CCB, and we need to free it in our done
630			 * routine as a result.
631			 */
632			break;
633		}
634	}
635	if ((i == 0)
636	 || (status != CAM_REQ_INPROG)) {
637		xpt_print(periph->path, "%s: could not allocate immediate "
638			  "notify CCBs, status 0x%x\n", __func__, status);
639		return (CAM_REQ_CMP_ERR);
640	}
641	return (CAM_REQ_CMP);
642}
643
644static void
645ctlfeoninvalidate(struct cam_periph *periph)
646{
647	union ccb en_lun_ccb;
648	cam_status status;
649	struct ctlfe_lun_softc *softc;
650
651	softc = (struct ctlfe_lun_softc *)periph->softc;
652
653	xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE);
654	en_lun_ccb.ccb_h.func_code = XPT_EN_LUN;
655	en_lun_ccb.cel.grp6_len = 0;
656	en_lun_ccb.cel.grp7_len = 0;
657	en_lun_ccb.cel.enable = 0;
658	xpt_action(&en_lun_ccb);
659	status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK);
660	if (status != CAM_REQ_CMP) {
661		xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n",
662			  __func__, en_lun_ccb.ccb_h.status);
663		/*
664		 * XXX KDM what do we do now?
665		 */
666	}
667	xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju "
668		  "INOTs outstanding, %d refs\n", softc->atios_sent -
669		  softc->atios_returned, softc->inots_sent -
670		  softc->inots_returned, periph->refcount);
671}
672
673static void
674ctlfecleanup(struct cam_periph *periph)
675{
676	struct ctlfe_lun_softc *softc;
677	struct ctlfe_softc *bus_softc;
678
679	xpt_print(periph->path, "%s: Called\n", __func__);
680
681	softc = (struct ctlfe_lun_softc *)periph->softc;
682	bus_softc = softc->parent_softc;
683
684	STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links);
685
686	/*
687	 * XXX KDM is there anything else that needs to be done here?
688	 */
689
690	callout_stop(&softc->dma_callout);
691
692	free(softc, M_CTLFE);
693}
694
695static void
696ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
697{
698	struct ctlfe_lun_softc *softc;
699	struct ccb_hdr *ccb_h;
700
701	softc = (struct ctlfe_lun_softc *)periph->softc;
702
703	softc->ccbs_alloced++;
704
705	start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT;
706
707	ccb_h = TAILQ_FIRST(&softc->work_queue);
708	if (periph->immediate_priority <= periph->pinfo.priority) {
709		panic("shouldn't get to the CCB waiting case!");
710		start_ccb->ccb_h.ccb_type = CTLFE_CCB_WAITING;
711		SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
712				  periph_links.sle);
713		periph->immediate_priority = CAM_PRIORITY_NONE;
714		wakeup(&periph->ccb_list);
715	} else if (ccb_h == NULL) {
716		softc->ccbs_freed++;
717		xpt_release_ccb(start_ccb);
718	} else {
719		struct ccb_accept_tio *atio;
720		struct ccb_scsiio *csio;
721		uint8_t *data_ptr;
722		uint32_t dxfer_len;
723		ccb_flags flags;
724		union ctl_io *io;
725		uint8_t scsi_status;
726
727		/* Take the ATIO off the work queue */
728		TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe);
729		atio = (struct ccb_accept_tio *)ccb_h;
730		io = (union ctl_io *)ccb_h->io_ptr;
731		csio = &start_ccb->csio;
732
733		flags = atio->ccb_h.flags &
734			(CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK);
735
736		if ((io == NULL)
737		 || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
738			/*
739			 * We're done, send status back.
740			 */
741			flags |= CAM_SEND_STATUS;
742			if (io == NULL) {
743				scsi_status = SCSI_STATUS_BUSY;
744				csio->sense_len = 0;
745			} else if ((io->io_hdr.status & CTL_STATUS_MASK) ==
746				   CTL_CMD_ABORTED) {
747				io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
748
749				/*
750				 * If this command was aborted, we don't
751				 * need to send status back to the SIM.
752				 * Just free the CTIO and ctl_io, and
753				 * recycle the ATIO back to the SIM.
754				 */
755				xpt_print(periph->path, "%s: aborted "
756					  "command 0x%04x discarded\n",
757					  __func__, io->scsiio.tag_num);
758				ctl_free_io(io);
759				/*
760				 * For a wildcard attachment, commands can
761				 * come in with a specific target/lun.  Reset
762				 * the target and LUN fields back to the
763				 * wildcard values before we send them back
764				 * down to the SIM.  The SIM has a wildcard
765				 * LUN enabled, not whatever target/lun
766				 * these happened to be.
767				 */
768				if (softc->flags & CTLFE_LUN_WILDCARD) {
769					atio->ccb_h.target_id =
770						CAM_TARGET_WILDCARD;
771					atio->ccb_h.target_lun =
772						CAM_LUN_WILDCARD;
773				}
774
775				if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
776					cam_release_devq(periph->path,
777							 /*relsim_flags*/0,
778							 /*reduction*/0,
779 							 /*timeout*/0,
780							 /*getcount_only*/0);
781					atio->ccb_h.status &= ~CAM_DEV_QFRZN;
782				}
783
784				ccb_h = TAILQ_FIRST(&softc->work_queue);
785
786				if (atio->ccb_h.func_code !=
787				    XPT_ACCEPT_TARGET_IO) {
788					xpt_print(periph->path, "%s: func_code "
789						  "is %#x\n", __func__,
790						  atio->ccb_h.func_code);
791				}
792				start_ccb->ccb_h.func_code = XPT_ABORT;
793				start_ccb->cab.abort_ccb = (union ccb *)atio;
794				start_ccb->ccb_h.cbfcnp = ctlfedone;
795
796				/* Tell the SIM that we've aborted this ATIO */
797				xpt_action(start_ccb);
798				softc->ccbs_freed++;
799				xpt_release_ccb(start_ccb);
800
801				/*
802				 * Send the ATIO back down to the SIM.
803				 */
804				xpt_action((union ccb *)atio);
805				softc->atios_sent++;
806
807				/*
808				 * If we still have work to do, ask for
809				 * another CCB.  Otherwise, deactivate our
810				 * callout.
811				 */
812				if (ccb_h != NULL)
813					xpt_schedule(periph, /*priority*/ 1);
814				else
815					callout_stop(&softc->dma_callout);
816
817				return;
818			} else {
819				io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
820				scsi_status = io->scsiio.scsi_status;
821				csio->sense_len = io->scsiio.sense_len;
822			}
823			data_ptr = NULL;
824			dxfer_len = 0;
825			if (io == NULL) {
826				printf("%s: tag %04x io is NULL\n", __func__,
827				       atio->tag_id);
828			} else {
829#ifdef CTLFEDEBUG
830				printf("%s: tag %04x status %x\n", __func__,
831				       atio->tag_id, io->io_hdr.status);
832#endif
833			}
834			csio->sglist_cnt = 0;
835			if (csio->sense_len != 0) {
836				csio->sense_data = io->scsiio.sense_data;
837				flags |= CAM_SEND_SENSE;
838			} else if (scsi_status == SCSI_STATUS_CHECK_COND) {
839				xpt_print(periph->path, "%s: check condition "
840					  "with no sense\n", __func__);
841			}
842		} else {
843			struct ctlfe_lun_cmd_info *cmd_info;
844
845			/*
846			 * Datamove call, we need to setup the S/G list.
847			 */
848
849			cmd_info = (struct ctlfe_lun_cmd_info *)
850				io->io_hdr.port_priv;
851
852			KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE,
853				("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < "
854				"CTL_PORT_PRIV_SIZE %d", __func__,
855				sizeof(*cmd_info), CTL_PORT_PRIV_SIZE));
856			io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED;
857
858			/*
859			 * Need to zero this, in case it has been used for
860			 * a previous datamove for this particular I/O.
861			 */
862			bzero(cmd_info, sizeof(*cmd_info));
863			scsi_status = 0;
864
865			/*
866			 * Set the direction, relative to the initiator.
867			 */
868			flags &= ~CAM_DIR_MASK;
869			if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
870			     CTL_FLAG_DATA_IN)
871				flags |= CAM_DIR_IN;
872			else
873				flags |= CAM_DIR_OUT;
874
875			csio->cdb_len = atio->cdb_len;
876
877			flags &= ~CAM_DATA_MASK;
878			if (io->scsiio.kern_sg_entries == 0) {
879				/* No S/G list */
880				data_ptr = io->scsiio.kern_data_ptr;
881				dxfer_len = io->scsiio.kern_data_len;
882				csio->sglist_cnt = 0;
883
884				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
885					flags |= CAM_DATA_PADDR;
886				else
887					flags |= CAM_DATA_VADDR;
888			} else if (io->scsiio.kern_sg_entries <=
889				   (sizeof(cmd_info->cam_sglist)/
890				   sizeof(cmd_info->cam_sglist[0]))) {
891				/*
892				 * S/G list with physical or virtual pointers.
893				 * Just populate the CAM S/G list with the
894				 * pointers.
895				 */
896				int i;
897				struct ctl_sg_entry *ctl_sglist;
898				bus_dma_segment_t *cam_sglist;
899
900				ctl_sglist = (struct ctl_sg_entry *)
901					io->scsiio.kern_data_ptr;
902				cam_sglist = cmd_info->cam_sglist;
903
904				for (i = 0; i < io->scsiio.kern_sg_entries;i++){
905					cam_sglist[i].ds_addr =
906						(bus_addr_t)ctl_sglist[i].addr;
907					cam_sglist[i].ds_len =
908						ctl_sglist[i].len;
909				}
910				csio->sglist_cnt = io->scsiio.kern_sg_entries;
911				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
912					flags |= CAM_DATA_SG_PADDR;
913				else
914					flags |= CAM_DATA_SG;
915				data_ptr = (uint8_t *)cam_sglist;
916				dxfer_len = io->scsiio.kern_data_len;
917			} else {
918				/* S/G list with virtual pointers */
919				struct ctl_sg_entry *sglist;
920				int *ti;
921
922				/*
923				 * If we have more S/G list pointers than
924				 * will fit in the available storage in the
925				 * cmd_info structure inside the ctl_io header,
926				 * then we need to send down the pointers
927				 * one element at a time.
928				 */
929
930				sglist = (struct ctl_sg_entry *)
931					io->scsiio.kern_data_ptr;
932				ti = &cmd_info->cur_transfer_index;
933				data_ptr = sglist[*ti].addr;
934				dxfer_len = sglist[*ti].len;
935				csio->sglist_cnt = 0;
936				if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
937					flags |= CAM_DATA_PADDR;
938				else
939					flags |= CAM_DATA_VADDR;
940				cmd_info->flags |= CTLFE_CMD_PIECEWISE;
941				(*ti)++;
942			}
943
944			io->scsiio.ext_data_filled += dxfer_len;
945
946			if (io->scsiio.ext_data_filled >
947			    io->scsiio.kern_total_len) {
948				xpt_print(periph->path, "%s: tag 0x%04x "
949					  "fill len %u > total %u\n",
950					  __func__, io->scsiio.tag_num,
951					  io->scsiio.ext_data_filled,
952					  io->scsiio.kern_total_len);
953			}
954		}
955
956#ifdef CTLFEDEBUG
957		printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__,
958		       (flags & CAM_SEND_STATUS) ? "done" : "datamove",
959		       atio->tag_id, flags, data_ptr, dxfer_len);
960#endif
961
962		/*
963		 * Valid combinations:
964		 *  - CAM_SEND_STATUS, SCATTER_VALID = 0, dxfer_len = 0,
965		 *    sglist_cnt = 0
966		 *  - CAM_SEND_STATUS = 0, SCATTER_VALID = 0, dxfer_len != 0,
967		 *    sglist_cnt = 0
968		 *  - CAM_SEND_STATUS = 0, SCATTER_VALID, dxfer_len != 0,
969		 *    sglist_cnt != 0
970		 */
971#ifdef CTLFEDEBUG
972		if (((flags & CAM_SEND_STATUS)
973		  && (((flags & CAM_SCATTER_VALID) != 0)
974		   || (dxfer_len != 0)
975		   || (csio->sglist_cnt != 0)))
976		 || (((flags & CAM_SEND_STATUS) == 0)
977		  && (dxfer_len == 0))
978		 || ((flags & CAM_SCATTER_VALID)
979		  && (csio->sglist_cnt == 0))
980		 || (((flags & CAM_SCATTER_VALID) == 0)
981		  && (csio->sglist_cnt != 0))) {
982			printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
983			       "%d sg %u\n", __func__, atio->tag_id,
984			       atio->cdb_io.cdb_bytes[0], flags, dxfer_len,
985			       csio->sglist_cnt);
986			if (io != NULL) {
987				printf("%s: tag %04x io status %#x\n", __func__,
988				       atio->tag_id, io->io_hdr.status);
989			} else {
990				printf("%s: tag %04x no associated io\n",
991				       __func__, atio->tag_id);
992			}
993		}
994#endif
995		cam_fill_ctio(csio,
996			      /*retries*/ 2,
997			      ctlfedone,
998			      flags,
999			      (flags & CAM_TAG_ACTION_VALID) ?
1000			       MSG_SIMPLE_Q_TAG : 0,
1001			      atio->tag_id,
1002			      atio->init_id,
1003			      scsi_status,
1004			      /*data_ptr*/ data_ptr,
1005			      /*dxfer_len*/ dxfer_len,
1006			      /*timeout*/ 5 * 1000);
1007		start_ccb->ccb_h.ccb_atio = atio;
1008		if (((flags & CAM_SEND_STATUS) == 0)
1009		 && (io != NULL))
1010			io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
1011
1012		softc->ctios_sent++;
1013
1014		xpt_action(start_ccb);
1015
1016		if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1017			cam_release_devq(periph->path,
1018					 /*relsim_flags*/0,
1019					 /*reduction*/0,
1020 					 /*timeout*/0,
1021					 /*getcount_only*/0);
1022			atio->ccb_h.status &= ~CAM_DEV_QFRZN;
1023		}
1024
1025		ccb_h = TAILQ_FIRST(&softc->work_queue);
1026	}
1027	/*
1028	 * If we still have work to do, ask for another CCB.  Otherwise,
1029	 * deactivate our callout.
1030	 */
1031	if (ccb_h != NULL)
1032		xpt_schedule(periph, /*priority*/ 1);
1033	else
1034		callout_stop(&softc->dma_callout);
1035}
1036
1037static void
1038ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
1039{
1040	struct ctlfe_lun_softc *softc;
1041
1042	softc = (struct ctlfe_lun_softc *)periph->softc;
1043
1044	switch (ccb->ccb_h.func_code) {
1045	case XPT_ACCEPT_TARGET_IO:
1046		softc->atios_returned++;
1047		break;
1048	case XPT_IMMEDIATE_NOTIFY:
1049	case XPT_NOTIFY_ACKNOWLEDGE:
1050		softc->inots_returned++;
1051		break;
1052	default:
1053		break;
1054	}
1055
1056	free(ccb, M_CTLFE);
1057
1058	KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: "
1059		"atios_returned %ju > atios_sent %ju", __func__,
1060		softc->atios_returned, softc->atios_sent));
1061	KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: "
1062		"inots_returned %ju > inots_sent %ju", __func__,
1063		softc->inots_returned, softc->inots_sent));
1064
1065	/*
1066	 * If we have received all of our CCBs, we can release our
1067	 * reference on the peripheral driver.  It will probably go away
1068	 * now.
1069	 */
1070	if ((softc->atios_returned == softc->atios_sent)
1071	 && (softc->inots_returned == softc->inots_sent)) {
1072		cam_periph_release_locked(periph);
1073	}
1074}
1075
1076static int
1077ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset)
1078{
1079	uint64_t lba;
1080	uint32_t num_blocks, nbc;
1081	uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)?
1082	    atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes;
1083
1084	nbc = offset >> 9;	/* ASSUMING 512 BYTE BLOCKS */
1085
1086	switch (cmdbyt[0]) {
1087	case READ_6:
1088	case WRITE_6:
1089	{
1090		struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt;
1091		lba = scsi_3btoul(cdb->addr);
1092		lba &= 0x1fffff;
1093		num_blocks = cdb->length;
1094		if (num_blocks == 0)
1095			num_blocks = 256;
1096		lba += nbc;
1097		num_blocks -= nbc;
1098		scsi_ulto3b(lba, cdb->addr);
1099		cdb->length = num_blocks;
1100		break;
1101	}
1102	case READ_10:
1103	case WRITE_10:
1104	{
1105		struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt;
1106		lba = scsi_4btoul(cdb->addr);
1107		num_blocks = scsi_2btoul(cdb->length);
1108		lba += nbc;
1109		num_blocks -= nbc;
1110		scsi_ulto4b(lba, cdb->addr);
1111		scsi_ulto2b(num_blocks, cdb->length);
1112		break;
1113	}
1114	case READ_12:
1115	case WRITE_12:
1116	{
1117		struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt;
1118		lba = scsi_4btoul(cdb->addr);
1119		num_blocks = scsi_4btoul(cdb->length);
1120		lba += nbc;
1121		num_blocks -= nbc;
1122		scsi_ulto4b(lba, cdb->addr);
1123		scsi_ulto4b(num_blocks, cdb->length);
1124		break;
1125	}
1126	case READ_16:
1127	case WRITE_16:
1128	{
1129		struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt;
1130		lba = scsi_8btou64(cdb->addr);
1131		num_blocks = scsi_4btoul(cdb->length);
1132		lba += nbc;
1133		num_blocks -= nbc;
1134		scsi_u64to8b(lba, cdb->addr);
1135		scsi_ulto4b(num_blocks, cdb->length);
1136		break;
1137	}
1138	default:
1139		return -1;
1140	}
1141	return (0);
1142}
1143
1144static void
1145ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
1146{
1147	struct ctlfe_lun_softc *softc;
1148	struct ctlfe_softc *bus_softc;
1149	struct ccb_accept_tio *atio = NULL;
1150	union ctl_io *io = NULL;
1151
1152#ifdef CTLFE_DEBUG
1153	printf("%s: entered, func_code = %#x, type = %#lx\n", __func__,
1154	       done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type);
1155#endif
1156
1157	softc = (struct ctlfe_lun_softc *)periph->softc;
1158	bus_softc = softc->parent_softc;
1159
1160	if (done_ccb->ccb_h.ccb_type == CTLFE_CCB_WAITING) {
1161		panic("shouldn't get to the CCB waiting case!");
1162		wakeup(&done_ccb->ccb_h.cbfcnp);
1163		return;
1164	}
1165
1166	/*
1167	 * If the peripheral is invalid, ATIOs and immediate notify CCBs
1168	 * need to be freed.  Most of the ATIOs and INOTs that come back
1169	 * will be CCBs that are being returned from the SIM as a result of
1170	 * our disabling the LUN.
1171	 *
1172	 * Other CCB types are handled in their respective cases below.
1173	 */
1174	if (periph->flags & CAM_PERIPH_INVALID) {
1175		switch (done_ccb->ccb_h.func_code) {
1176		case XPT_ACCEPT_TARGET_IO:
1177		case XPT_IMMEDIATE_NOTIFY:
1178		case XPT_NOTIFY_ACKNOWLEDGE:
1179			ctlfe_free_ccb(periph, done_ccb);
1180			return;
1181		default:
1182			break;
1183		}
1184
1185	}
1186	switch (done_ccb->ccb_h.func_code) {
1187	case XPT_ACCEPT_TARGET_IO: {
1188
1189		atio = &done_ccb->atio;
1190
1191		softc->atios_returned++;
1192
1193 resubmit:
1194		/*
1195		 * Allocate a ctl_io, pass it to CTL, and wait for the
1196		 * datamove or done.
1197		 */
1198		io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
1199		if (io == NULL) {
1200			atio->ccb_h.flags &= ~CAM_DIR_MASK;
1201			atio->ccb_h.flags |= CAM_DIR_NONE;
1202
1203			printf("%s: ctl_alloc_io failed!\n", __func__);
1204
1205			/*
1206			 * XXX KDM need to set SCSI_STATUS_BUSY, but there
1207			 * is no field in the ATIO structure to do that,
1208			 * and we aren't able to allocate a ctl_io here.
1209			 * What to do?
1210			 */
1211			atio->sense_len = 0;
1212			done_ccb->ccb_h.io_ptr = NULL;
1213			TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
1214					  periph_links.tqe);
1215			xpt_schedule(periph, /*priority*/ 1);
1216			break;
1217		}
1218		ctl_zero_io(io);
1219
1220		/* Save pointers on both sides */
1221		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb;
1222		done_ccb->ccb_h.io_ptr = io;
1223
1224		/*
1225		 * Only SCSI I/O comes down this path, resets, etc. come
1226		 * down the immediate notify path below.
1227		 */
1228		io->io_hdr.io_type = CTL_IO_SCSI;
1229		io->io_hdr.nexus.initid.id = atio->init_id;
1230		io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
1231		io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id;
1232		io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
1233		io->scsiio.tag_num = atio->tag_id;
1234		switch (atio->tag_action) {
1235		case CAM_TAG_ACTION_NONE:
1236			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1237			break;
1238		case MSG_SIMPLE_TASK:
1239			io->scsiio.tag_type = CTL_TAG_SIMPLE;
1240			break;
1241		case MSG_HEAD_OF_QUEUE_TASK:
1242        		io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
1243			break;
1244		case MSG_ORDERED_TASK:
1245        		io->scsiio.tag_type = CTL_TAG_ORDERED;
1246			break;
1247		case MSG_ACA_TASK:
1248			io->scsiio.tag_type = CTL_TAG_ACA;
1249			break;
1250		default:
1251			io->scsiio.tag_type = CTL_TAG_UNTAGGED;
1252			printf("%s: unhandled tag type %#x!!\n", __func__,
1253			       atio->tag_action);
1254			break;
1255		}
1256		if (atio->cdb_len > sizeof(io->scsiio.cdb)) {
1257			printf("%s: WARNING: CDB len %d > ctl_io space %zd\n",
1258			       __func__, atio->cdb_len, sizeof(io->scsiio.cdb));
1259		}
1260		io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb));
1261		bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb,
1262		      io->scsiio.cdb_len);
1263
1264#ifdef CTLFEDEBUG
1265		printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__,
1266		        (uintmax_t)io->io_hdr.nexus.initid.id,
1267		        io->io_hdr.nexus.targ_port,
1268		        (uintmax_t)io->io_hdr.nexus.targ_target.id,
1269		        io->io_hdr.nexus.targ_lun,
1270			io->scsiio.tag_num, io->scsiio.cdb[0]);
1271#endif
1272
1273		ctl_queue(io);
1274		break;
1275	}
1276	case XPT_CONT_TARGET_IO: {
1277		int srr = 0;
1278		uint32_t srr_off = 0;
1279
1280		atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio;
1281		io = (union ctl_io *)atio->ccb_h.io_ptr;
1282
1283		softc->ctios_returned++;
1284#ifdef CTLFEDEBUG
1285		printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n",
1286		       __func__, atio->tag_id, done_ccb->ccb_h.flags);
1287#endif
1288		/*
1289		 * Handle SRR case were the data pointer is pushed back hack
1290		 */
1291		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV
1292		    && done_ccb->csio.msg_ptr != NULL
1293		    && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED
1294		    && done_ccb->csio.msg_ptr[1] == 5
1295       		    && done_ccb->csio.msg_ptr[2] == 0) {
1296			srr = 1;
1297			srr_off =
1298			    (done_ccb->csio.msg_ptr[3] << 24)
1299			    | (done_ccb->csio.msg_ptr[4] << 16)
1300			    | (done_ccb->csio.msg_ptr[5] << 8)
1301			    | (done_ccb->csio.msg_ptr[6]);
1302		}
1303
1304		if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) {
1305			/*
1306			 * If status was being sent, the back end data is now
1307			 * history. Hack it up and resubmit a new command with
1308			 * the CDB adjusted. If the SIM does the right thing,
1309			 * all of the resid math should work.
1310			 */
1311			softc->ccbs_freed++;
1312			xpt_release_ccb(done_ccb);
1313			ctl_free_io(io);
1314			if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
1315				done_ccb = (union ccb *)atio;
1316				goto resubmit;
1317			}
1318			/*
1319			 * Fall through to doom....
1320			 */
1321		} else if (srr) {
1322			/*
1323			 * If we have an srr and we're still sending data, we
1324			 * should be able to adjust offsets and cycle again.
1325			 */
1326			io->scsiio.kern_rel_offset =
1327			    io->scsiio.ext_data_filled = srr_off;
1328			io->scsiio.ext_data_len = io->scsiio.kern_total_len -
1329			    io->scsiio.kern_rel_offset;
1330			softc->ccbs_freed++;
1331			io->scsiio.io_hdr.status = CTL_STATUS_NONE;
1332			xpt_release_ccb(done_ccb);
1333			TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
1334					  periph_links.tqe);
1335			xpt_schedule(periph, /*priority*/ 1);
1336			return;
1337		}
1338
1339		/*
1340		 * If we were sending status back to the initiator, free up
1341		 * resources.  If we were doing a datamove, call the
1342		 * datamove done routine.
1343		 */
1344		if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) {
1345			softc->ccbs_freed++;
1346			xpt_release_ccb(done_ccb);
1347			ctl_free_io(io);
1348			/*
1349			 * For a wildcard attachment, commands can come in
1350			 * with a specific target/lun.  Reset the target
1351			 * and LUN fields back to the wildcard values before
1352			 * we send them back down to the SIM.  The SIM has
1353			 * a wildcard LUN enabled, not whatever target/lun
1354			 * these happened to be.
1355			 */
1356			if (softc->flags & CTLFE_LUN_WILDCARD) {
1357				atio->ccb_h.target_id = CAM_TARGET_WILDCARD;
1358				atio->ccb_h.target_lun = CAM_LUN_WILDCARD;
1359			}
1360			if (periph->flags & CAM_PERIPH_INVALID) {
1361				ctlfe_free_ccb(periph, (union ccb *)atio);
1362				return;
1363			} else {
1364				xpt_action((union ccb *)atio);
1365				softc->atios_sent++;
1366			}
1367		} else {
1368			struct ctlfe_lun_cmd_info *cmd_info;
1369			struct ccb_scsiio *csio;
1370
1371			csio = &done_ccb->csio;
1372			cmd_info = (struct ctlfe_lun_cmd_info *)
1373				io->io_hdr.port_priv;
1374
1375			io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1376
1377			io->scsiio.ext_data_len += csio->dxfer_len;
1378			if (io->scsiio.ext_data_len >
1379			    io->scsiio.kern_total_len) {
1380				xpt_print(periph->path, "%s: tag 0x%04x "
1381					  "done len %u > total %u sent %u\n",
1382					  __func__, io->scsiio.tag_num,
1383					  io->scsiio.ext_data_len,
1384					  io->scsiio.kern_total_len,
1385					  io->scsiio.ext_data_filled);
1386			}
1387			/*
1388			 * Translate CAM status to CTL status.  Success
1389			 * does not change the overall, ctl_io status.  In
1390			 * that case we just set port_status to 0.  If we
1391			 * have a failure, though, set a data phase error
1392			 * for the overall ctl_io.
1393			 */
1394			switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
1395			case CAM_REQ_CMP:
1396				io->io_hdr.port_status = 0;
1397				break;
1398			default:
1399				/*
1400				 * XXX KDM we probably need to figure out a
1401				 * standard set of errors that the SIM
1402				 * drivers should return in the event of a
1403				 * data transfer failure.  A data phase
1404				 * error will at least point the user to a
1405				 * data transfer error of some sort.
1406				 * Hopefully the SIM printed out some
1407				 * additional information to give the user
1408				 * a clue what happened.
1409				 */
1410				io->io_hdr.port_status = 0xbad1;
1411				ctl_set_data_phase_error(&io->scsiio);
1412				/*
1413				 * XXX KDM figure out residual.
1414				 */
1415				break;
1416			}
1417			/*
1418			 * If we had to break this S/G list into multiple
1419			 * pieces, figure out where we are in the list, and
1420			 * continue sending pieces if necessary.
1421			 */
1422			if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
1423			 && (io->io_hdr.port_status == 0)
1424			 && (cmd_info->cur_transfer_index <
1425			     io->scsiio.kern_sg_entries)) {
1426				struct ctl_sg_entry *sglist;
1427				ccb_flags flags;
1428				uint8_t scsi_status;
1429				uint8_t *data_ptr;
1430				uint32_t dxfer_len;
1431				int *ti;
1432
1433				sglist = (struct ctl_sg_entry *)
1434					io->scsiio.kern_data_ptr;
1435				ti = &cmd_info->cur_transfer_index;
1436				flags = atio->ccb_h.flags &
1437					(CAM_DIS_DISCONNECT|
1438					 CAM_TAG_ACTION_VALID|
1439					 CAM_DIR_MASK);
1440
1441				/*
1442				 * Set the direction, relative to the initiator.
1443				 */
1444				flags &= ~CAM_DIR_MASK;
1445				if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
1446				     CTL_FLAG_DATA_IN)
1447					flags |= CAM_DIR_IN;
1448				else
1449					flags |= CAM_DIR_OUT;
1450
1451				data_ptr = sglist[*ti].addr;
1452				dxfer_len = sglist[*ti].len;
1453				(*ti)++;
1454
1455				scsi_status = 0;
1456
1457				if (((flags & CAM_SEND_STATUS) == 0)
1458				 && (dxfer_len == 0)) {
1459					printf("%s: tag %04x no status or "
1460					       "len cdb = %02x\n", __func__,
1461					       atio->tag_id,
1462					atio->cdb_io.cdb_bytes[0]);
1463					printf("%s: tag %04x io status %#x\n",
1464					       __func__, atio->tag_id,
1465					       io->io_hdr.status);
1466				}
1467
1468				cam_fill_ctio(csio,
1469					      /*retries*/ 2,
1470					      ctlfedone,
1471					      flags,
1472					      (flags & CAM_TAG_ACTION_VALID) ?
1473					       MSG_SIMPLE_Q_TAG : 0,
1474					      atio->tag_id,
1475					      atio->init_id,
1476					      scsi_status,
1477					      /*data_ptr*/ data_ptr,
1478					      /*dxfer_len*/ dxfer_len,
1479					      /*timeout*/ 5 * 1000);
1480
1481				csio->resid = 0;
1482				csio->ccb_h.ccb_atio = atio;
1483				io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
1484				softc->ctios_sent++;
1485				xpt_action((union ccb *)csio);
1486			} else {
1487				/*
1488				 * Release the CTIO.  The ATIO will be sent back
1489				 * down to the SIM once we send status.
1490				 */
1491				softc->ccbs_freed++;
1492				xpt_release_ccb(done_ccb);
1493
1494				/* Call the backend move done callback */
1495				io->scsiio.be_move_done(io);
1496			}
1497		}
1498		break;
1499	}
1500	case XPT_IMMEDIATE_NOTIFY: {
1501		union ctl_io *io;
1502		struct ccb_immediate_notify *inot;
1503		cam_status status;
1504		int frozen;
1505
1506		inot = &done_ccb->cin1;
1507
1508		softc->inots_returned++;
1509
1510		frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1511
1512		printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x "
1513		       "seq %#x\n", __func__, inot->ccb_h.status,
1514		       inot->tag_id, inot->seq_id);
1515
1516		io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
1517		if (io != NULL) {
1518			int send_ctl_io;
1519
1520			send_ctl_io = 1;
1521
1522			ctl_zero_io(io);
1523			io->io_hdr.io_type = CTL_IO_TASK;
1524			io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
1525			inot->ccb_h.io_ptr = io;
1526			io->io_hdr.nexus.initid.id = inot->initiator_id;
1527			io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
1528			io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id;
1529			io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
1530			/* XXX KDM should this be the tag_id? */
1531			io->taskio.tag_num = inot->seq_id;
1532
1533			status = inot->ccb_h.status & CAM_STATUS_MASK;
1534			switch (status) {
1535			case CAM_SCSI_BUS_RESET:
1536				io->taskio.task_action = CTL_TASK_BUS_RESET;
1537				break;
1538			case CAM_BDR_SENT:
1539				io->taskio.task_action = CTL_TASK_TARGET_RESET;
1540				break;
1541			case CAM_MESSAGE_RECV:
1542				switch (inot->arg) {
1543				case MSG_ABORT_TASK_SET:
1544					/*
1545					 * XXX KDM this isn't currently
1546					 * supported by CTL.  It ends up
1547					 * being a no-op.
1548					 */
1549					io->taskio.task_action =
1550						CTL_TASK_ABORT_TASK_SET;
1551					break;
1552				case MSG_TARGET_RESET:
1553					io->taskio.task_action =
1554						CTL_TASK_TARGET_RESET;
1555					break;
1556				case MSG_ABORT_TASK:
1557					io->taskio.task_action =
1558						CTL_TASK_ABORT_TASK;
1559					break;
1560				case MSG_LOGICAL_UNIT_RESET:
1561					io->taskio.task_action =
1562						CTL_TASK_LUN_RESET;
1563					break;
1564				case MSG_CLEAR_TASK_SET:
1565					/*
1566					 * XXX KDM this isn't currently
1567					 * supported by CTL.  It ends up
1568					 * being a no-op.
1569					 */
1570					io->taskio.task_action =
1571						CTL_TASK_CLEAR_TASK_SET;
1572					break;
1573				case MSG_CLEAR_ACA:
1574					io->taskio.task_action =
1575						CTL_TASK_CLEAR_ACA;
1576					break;
1577				case MSG_NOOP:
1578					send_ctl_io = 0;
1579					break;
1580				default:
1581					xpt_print(periph->path, "%s: "
1582						  "unsupported message 0x%x\n",
1583						  __func__, inot->arg);
1584					send_ctl_io = 0;
1585					break;
1586				}
1587				break;
1588			case CAM_REQ_ABORTED:
1589				/*
1590				 * This request was sent back by the driver.
1591				 * XXX KDM what do we do here?
1592				 */
1593				send_ctl_io = 0;
1594				break;
1595			case CAM_REQ_INVALID:
1596			case CAM_PROVIDE_FAIL:
1597			default:
1598				/*
1599				 * We should only get here if we're talking
1600				 * to a talking to a SIM that is target
1601				 * capable but supports the old API.  In
1602				 * that case, we need to just free the CCB.
1603				 * If we actually send a notify acknowledge,
1604				 * it will send that back with an error as
1605				 * well.
1606				 */
1607
1608				if ((status != CAM_REQ_INVALID)
1609				 && (status != CAM_PROVIDE_FAIL))
1610					xpt_print(periph->path, "%s: "
1611						  "unsupported CAM status "
1612						  "0x%x\n", __func__, status);
1613
1614				ctl_free_io(io);
1615				ctlfe_free_ccb(periph, done_ccb);
1616
1617				return;
1618			}
1619			if (send_ctl_io != 0) {
1620				ctl_queue(io);
1621			} else {
1622				ctl_free_io(io);
1623				done_ccb->ccb_h.status = CAM_REQ_INPROG;
1624				done_ccb->ccb_h.func_code =
1625					XPT_NOTIFY_ACKNOWLEDGE;
1626				xpt_action(done_ccb);
1627			}
1628		} else {
1629			xpt_print(periph->path, "%s: could not allocate "
1630				  "ctl_io for immediate notify!\n", __func__);
1631			/* requeue this to the adapter */
1632			done_ccb->ccb_h.status = CAM_REQ_INPROG;
1633			done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
1634			xpt_action(done_ccb);
1635		}
1636
1637		if (frozen != 0) {
1638			cam_release_devq(periph->path,
1639					 /*relsim_flags*/ 0,
1640					 /*opening reduction*/ 0,
1641					 /*timeout*/ 0,
1642					 /*getcount_only*/ 0);
1643		}
1644		break;
1645	}
1646	case XPT_NOTIFY_ACKNOWLEDGE:
1647		/*
1648		 * Queue this back down to the SIM as an immediate notify.
1649		 */
1650		done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
1651		xpt_action(done_ccb);
1652		softc->inots_sent++;
1653		break;
1654	case XPT_ABORT:
1655		/*
1656		 * XPT_ABORT is an immediate CCB, we shouldn't get here.
1657		 */
1658		panic("%s: XPT_ABORT CCB returned!", __func__);
1659		break;
1660	case XPT_SET_SIM_KNOB:
1661	case XPT_GET_SIM_KNOB:
1662		break;
1663	default:
1664		panic("%s: unexpected CCB type %#x", __func__,
1665		      done_ccb->ccb_h.func_code);
1666		break;
1667	}
1668}
1669
1670static void
1671ctlfe_onoffline(void *arg, int online)
1672{
1673	struct ctlfe_softc *bus_softc;
1674	union ccb *ccb;
1675	cam_status status;
1676	struct cam_path *path;
1677	struct cam_sim *sim;
1678	int set_wwnn;
1679
1680	bus_softc = (struct ctlfe_softc *)arg;
1681
1682	set_wwnn = 0;
1683
1684	sim = bus_softc->sim;
1685
1686	mtx_assert(sim->mtx, MA_OWNED);
1687
1688	status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id,
1689		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1690	if (status != CAM_REQ_CMP) {
1691		printf("%s: unable to create path!\n", __func__);
1692		return;
1693	}
1694	ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO);
1695	if (ccb == NULL) {
1696		printf("%s: unable to malloc CCB!\n", __func__);
1697		xpt_free_path(path);
1698		return;
1699	}
1700	xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE);
1701
1702	/*
1703	 * Copan WWN format:
1704	 *
1705	 * Bits 63-60:	0x5		NAA, IEEE registered name
1706	 * Bits 59-36:	0x000ED5	IEEE Company name assigned to Copan
1707	 * Bits 35-12:			Copan SSN (Sequential Serial Number)
1708	 * Bits 11-8:			Type of port:
1709	 *					1 == N-Port
1710	 *					2 == F-Port
1711	 *					3 == NL-Port
1712	 * Bits 7-0:			0 == Node Name, >0 == Port Number
1713	 */
1714
1715	if (online != 0) {
1716
1717		ccb->ccb_h.func_code = XPT_GET_SIM_KNOB;
1718
1719
1720		xpt_action(ccb);
1721
1722
1723		if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){
1724#ifdef RANDOM_WWNN
1725			uint64_t random_bits;
1726#endif
1727
1728			printf("%s: %s current WWNN %#jx\n", __func__,
1729			       bus_softc->port_name,
1730			       ccb->knob.xport_specific.fc.wwnn);
1731			printf("%s: %s current WWPN %#jx\n", __func__,
1732			       bus_softc->port_name,
1733			       ccb->knob.xport_specific.fc.wwpn);
1734
1735#ifdef RANDOM_WWNN
1736			arc4rand(&random_bits, sizeof(random_bits), 0);
1737#endif
1738
1739			/*
1740			 * XXX KDM this is a bit of a kludge for now.  We
1741			 * take the current WWNN/WWPN from the card, and
1742			 * replace the company identifier and the NL-Port
1743			 * indicator and the port number (for the WWPN).
1744			 * This should be replaced later with ddb_GetWWNN,
1745			 * or possibly a more centralized scheme.  (It
1746			 * would be nice to have the WWNN/WWPN for each
1747			 * port stored in the ctl_frontend structure.)
1748			 */
1749#ifdef RANDOM_WWNN
1750			ccb->knob.xport_specific.fc.wwnn =
1751				(random_bits &
1752				0x0000000fffffff00ULL) |
1753				/* Company ID */ 0x5000ED5000000000ULL |
1754				/* NL-Port */    0x0300;
1755			ccb->knob.xport_specific.fc.wwpn =
1756				(random_bits &
1757				0x0000000fffffff00ULL) |
1758				/* Company ID */ 0x5000ED5000000000ULL |
1759				/* NL-Port */    0x3000 |
1760				/* Port Num */ (bus_softc->fe.targ_port & 0xff);
1761
1762			/*
1763			 * This is a bit of an API break/reversal, but if
1764			 * we're doing the random WWNN that's a little
1765			 * different anyway.  So record what we're actually
1766			 * using with the frontend code so it's reported
1767			 * accurately.
1768			 */
1769			bus_softc->fe.wwnn =
1770				ccb->knob.xport_specific.fc.wwnn;
1771			bus_softc->fe.wwpn =
1772				ccb->knob.xport_specific.fc.wwpn;
1773			set_wwnn = 1;
1774#else /* RANDOM_WWNN */
1775			/*
1776			 * If the user has specified a WWNN/WWPN, send them
1777			 * down to the SIM.  Otherwise, record what the SIM
1778			 * has reported.
1779			 */
1780			if ((bus_softc->fe.wwnn != 0)
1781			 && (bus_softc->fe.wwpn != 0)) {
1782				ccb->knob.xport_specific.fc.wwnn =
1783					bus_softc->fe.wwnn;
1784				ccb->knob.xport_specific.fc.wwpn =
1785					bus_softc->fe.wwpn;
1786				set_wwnn = 1;
1787			} else {
1788				bus_softc->fe.wwnn =
1789					ccb->knob.xport_specific.fc.wwnn;
1790				bus_softc->fe.wwpn =
1791					ccb->knob.xport_specific.fc.wwpn;
1792			}
1793#endif /* RANDOM_WWNN */
1794
1795
1796			if (set_wwnn != 0) {
1797				printf("%s: %s new WWNN %#jx\n", __func__,
1798				       bus_softc->port_name,
1799				ccb->knob.xport_specific.fc.wwnn);
1800				printf("%s: %s new WWPN %#jx\n", __func__,
1801				       bus_softc->port_name,
1802				       ccb->knob.xport_specific.fc.wwpn);
1803			}
1804		} else {
1805			printf("%s: %s has no valid WWNN/WWPN\n", __func__,
1806			       bus_softc->port_name);
1807		}
1808	}
1809	ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
1810	ccb->knob.xport_specific.valid = KNOB_VALID_ROLE;
1811	if (set_wwnn != 0)
1812		ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS;
1813
1814	if (online != 0)
1815		ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET;
1816	else
1817		ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE;
1818
1819	xpt_action(ccb);
1820
1821	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1822		printf("%s: SIM %s (path id %d) target %s failed with "
1823		       "status %#x\n",
1824		       __func__, bus_softc->port_name, bus_softc->path_id,
1825		       (online != 0) ? "enable" : "disable",
1826		       ccb->ccb_h.status);
1827	} else {
1828		printf("%s: SIM %s (path id %d) target %s succeeded\n",
1829		       __func__, bus_softc->port_name, bus_softc->path_id,
1830		       (online != 0) ? "enable" : "disable");
1831	}
1832
1833	xpt_free_path(path);
1834
1835	free(ccb, M_TEMP);
1836
1837	return;
1838}
1839
1840static void
1841ctlfe_online(void *arg)
1842{
1843	struct ctlfe_softc *bus_softc;
1844	struct cam_path *path;
1845	cam_status status;
1846	struct ctlfe_lun_softc *lun_softc;
1847	struct cam_sim *sim;
1848
1849	bus_softc = (struct ctlfe_softc *)arg;
1850	sim = bus_softc->sim;
1851
1852	CAM_SIM_LOCK(sim);
1853
1854	/*
1855	 * Create the wildcard LUN before bringing the port online.
1856	 */
1857	status = xpt_create_path(&path, /*periph*/ NULL,
1858				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1859				 CAM_LUN_WILDCARD);
1860	if (status != CAM_REQ_CMP) {
1861		printf("%s: unable to create path for wildcard periph\n",
1862				__func__);
1863		CAM_SIM_UNLOCK(sim);
1864		return;
1865	}
1866
1867	lun_softc = malloc(sizeof(*lun_softc), M_CTLFE,
1868			M_NOWAIT | M_ZERO);
1869	if (lun_softc == NULL) {
1870		xpt_print(path, "%s: unable to allocate softc for "
1871				"wildcard periph\n", __func__);
1872		xpt_free_path(path);
1873		CAM_SIM_UNLOCK(sim);
1874		return;
1875	}
1876
1877	lun_softc->parent_softc = bus_softc;
1878	lun_softc->flags |= CTLFE_LUN_WILDCARD;
1879
1880	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links);
1881
1882
1883	status = cam_periph_alloc(ctlferegister,
1884				  ctlfeoninvalidate,
1885				  ctlfecleanup,
1886				  ctlfestart,
1887				  "ctl",
1888				  CAM_PERIPH_BIO,
1889				  path,
1890				  ctlfeasync,
1891				  0,
1892				  lun_softc);
1893
1894	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1895		const struct cam_status_entry *entry;
1896
1897		entry = cam_fetch_status_entry(status);
1898
1899		printf("%s: CAM error %s (%#x) returned from "
1900		       "cam_periph_alloc()\n", __func__, (entry != NULL) ?
1901		       entry->status_text : "Unknown", status);
1902	}
1903
1904	xpt_free_path(path);
1905
1906	ctlfe_onoffline(arg, /*online*/ 1);
1907
1908	CAM_SIM_UNLOCK(sim);
1909}
1910
1911static void
1912ctlfe_offline(void *arg)
1913{
1914	struct ctlfe_softc *bus_softc;
1915	struct cam_path *path;
1916	cam_status status;
1917	struct cam_periph *periph;
1918	struct cam_sim *sim;
1919
1920	bus_softc = (struct ctlfe_softc *)arg;
1921	sim = bus_softc->sim;
1922
1923	CAM_SIM_LOCK(sim);
1924
1925	ctlfe_onoffline(arg, /*online*/ 0);
1926
1927	/*
1928	 * Disable the wildcard LUN for this port now that we have taken
1929	 * the port offline.
1930	 */
1931	status = xpt_create_path(&path, /*periph*/ NULL,
1932				 bus_softc->path_id, CAM_TARGET_WILDCARD,
1933				 CAM_LUN_WILDCARD);
1934	if (status != CAM_REQ_CMP) {
1935		CAM_SIM_UNLOCK(sim);
1936		printf("%s: unable to create path for wildcard periph\n",
1937		       __func__);
1938		return;
1939	}
1940
1941
1942	if ((periph = cam_periph_find(path, "ctl")) != NULL)
1943		cam_periph_invalidate(periph);
1944
1945	xpt_free_path(path);
1946
1947	CAM_SIM_UNLOCK(sim);
1948}
1949
1950static int
1951ctlfe_targ_enable(void *arg, struct ctl_id targ_id)
1952{
1953	return (0);
1954}
1955
1956static int
1957ctlfe_targ_disable(void *arg, struct ctl_id targ_id)
1958{
1959	return (0);
1960}
1961
1962/*
1963 * This will get called to enable a LUN on every bus that is attached to
1964 * CTL.  So we only need to create a path/periph for this particular bus.
1965 */
1966static int
1967ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
1968{
1969	struct ctlfe_softc *bus_softc;
1970	struct ctlfe_lun_softc *softc;
1971	struct cam_path *path;
1972	struct cam_periph *periph;
1973	struct cam_sim *sim;
1974	cam_status status;
1975
1976	bus_softc = (struct ctlfe_softc *)arg;
1977	sim = bus_softc->sim;
1978
1979	status = xpt_create_path_unlocked(&path, /*periph*/ NULL,
1980					  bus_softc->path_id,
1981					  targ_id.id, lun_id);
1982	/* XXX KDM need some way to return status to CTL here? */
1983	if (status != CAM_REQ_CMP) {
1984		printf("%s: could not create path, status %#x\n", __func__,
1985		       status);
1986		return (1);
1987	}
1988
1989	softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO);
1990	CAM_SIM_LOCK(sim);
1991	periph = cam_periph_find(path, "ctl");
1992	if (periph != NULL) {
1993		/* We've already got a periph, no need to alloc a new one. */
1994		xpt_free_path(path);
1995		free(softc, M_CTLFE);
1996		CAM_SIM_UNLOCK(sim);
1997		return (0);
1998	}
1999
2000	softc->parent_softc = bus_softc;
2001	STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links);
2002
2003	status = cam_periph_alloc(ctlferegister,
2004				  ctlfeoninvalidate,
2005				  ctlfecleanup,
2006				  ctlfestart,
2007				  "ctl",
2008				  CAM_PERIPH_BIO,
2009				  path,
2010				  ctlfeasync,
2011				  0,
2012				  softc);
2013
2014	xpt_free_path(path);
2015
2016	CAM_SIM_UNLOCK(sim);
2017
2018	return (0);
2019}
2020
2021/*
2022 * This will get called when the user removes a LUN to disable that LUN
2023 * on every bus that is attached to CTL.
2024 */
2025static int
2026ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
2027{
2028	struct ctlfe_softc *softc;
2029	struct ctlfe_lun_softc *lun_softc;
2030	struct cam_sim *sim;
2031
2032	softc = (struct ctlfe_softc *)arg;
2033	sim = softc->sim;
2034
2035	CAM_SIM_LOCK(sim);
2036	STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) {
2037		struct cam_path *path;
2038
2039		path = lun_softc->periph->path;
2040
2041		if ((xpt_path_target_id(path) == targ_id.id)
2042		 && (xpt_path_lun_id(path) == lun_id)) {
2043			break;
2044		}
2045	}
2046	if (lun_softc == NULL) {
2047		CAM_SIM_UNLOCK(sim);
2048		printf("%s: can't find target %d lun %d\n", __func__,
2049		       targ_id.id, lun_id);
2050		return (1);
2051	}
2052
2053	cam_periph_invalidate(lun_softc->periph);
2054
2055	CAM_SIM_UNLOCK(sim);
2056
2057	return (0);
2058}
2059
2060static void
2061ctlfe_dump_sim(struct cam_sim *sim)
2062{
2063
2064	printf("%s%d: max tagged openings: %d, max dev openings: %d\n",
2065	       sim->sim_name, sim->unit_number,
2066	       sim->max_tagged_dev_openings, sim->max_dev_openings);
2067	printf("%s%d: max_ccbs: %u, ccb_count: %u\n",
2068	       sim->sim_name, sim->unit_number,
2069	       sim->max_ccbs, sim->ccb_count);
2070	printf("%s%d: ccb_freeq is %sempty\n",
2071	       sim->sim_name, sim->unit_number,
2072	       (SLIST_FIRST(&sim->ccb_freeq) == NULL) ? "" : "NOT ");
2073	printf("\n");
2074}
2075
2076/*
2077 * Assumes that the SIM lock is held.
2078 */
2079static void
2080ctlfe_dump_queue(struct ctlfe_lun_softc *softc)
2081{
2082	struct ccb_hdr *hdr;
2083	struct cam_periph *periph;
2084	int num_items;
2085
2086	periph = softc->periph;
2087	num_items = 0;
2088
2089	TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) {
2090		union ctl_io *io;
2091
2092		io = hdr->io_ptr;
2093
2094		num_items++;
2095
2096		/*
2097		 * This can happen when we get an ATIO but can't allocate
2098		 * a ctl_io.  See the XPT_ACCEPT_TARGET_IO case in ctlfedone().
2099		 */
2100		if (io == NULL) {
2101			struct ccb_scsiio *csio;
2102
2103			csio = (struct ccb_scsiio *)hdr;
2104
2105			xpt_print(periph->path, "CCB %#x ctl_io allocation "
2106				  "failed\n", csio->tag_id);
2107			continue;
2108		}
2109
2110		/*
2111		 * Only regular SCSI I/O is put on the work
2112		 * queue, so we can print sense here.  There may be no
2113		 * sense if it's no the queue for a DMA, but this serves to
2114		 * print out the CCB as well.
2115		 *
2116		 * XXX KDM switch this over to scsi_sense_print() when
2117		 * CTL is merged in with CAM.
2118		 */
2119		ctl_io_error_print(io, NULL);
2120
2121		/*
2122		 * We're sending status back to the
2123		 * initiator, so we're on the queue waiting
2124		 * for a CTIO to do that.
2125		 */
2126		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
2127			continue;
2128
2129		/*
2130		 * Otherwise, we're on the queue waiting to
2131		 * do a data transfer.
2132		 */
2133		xpt_print(periph->path, "Total %u, Current %u, Resid %u\n",
2134			  io->scsiio.kern_total_len, io->scsiio.kern_data_len,
2135			  io->scsiio.kern_data_resid);
2136	}
2137
2138	xpt_print(periph->path, "%d requests total waiting for CCBs\n",
2139		  num_items);
2140	xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju "
2141		  "freed)\n", (uintmax_t)(softc->ccbs_alloced -
2142		  softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced,
2143		  (uintmax_t)softc->ccbs_freed);
2144	xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju "
2145		  "returned\n", (uintmax_t)(softc->ctios_sent -
2146		  softc->ctios_returned), softc->ctios_sent,
2147		  softc->ctios_returned);
2148}
2149
2150/*
2151 * This function is called when we fail to get a CCB for a DMA or status return
2152 * to the initiator within the specified time period.
2153 *
2154 * The callout code should insure that we hold the sim mutex here.
2155 */
2156static void
2157ctlfe_dma_timeout(void *arg)
2158{
2159	struct ctlfe_lun_softc *softc;
2160	struct cam_periph *periph;
2161	struct cam_sim *sim;
2162	int num_queued;
2163
2164	softc = (struct ctlfe_lun_softc *)arg;
2165	periph = softc->periph;
2166	sim = xpt_path_sim(periph->path);
2167	num_queued = 0;
2168
2169	/*
2170	 * Nothing to do...
2171	 */
2172	if (TAILQ_FIRST(&softc->work_queue) == NULL) {
2173		xpt_print(periph->path, "TIMEOUT triggered after %d "
2174			  "seconds, but nothing on work queue??\n",
2175			  CTLFE_DMA_TIMEOUT);
2176		return;
2177	}
2178
2179	xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to "
2180		  "start\n", CTLFE_DMA_TIMEOUT);
2181
2182	ctlfe_dump_queue(softc);
2183
2184	ctlfe_dump_sim(sim);
2185
2186	xpt_print(periph->path, "calling xpt_schedule() to attempt to "
2187		  "unstick our queue\n");
2188
2189	xpt_schedule(periph, /*priority*/ 1);
2190
2191	xpt_print(periph->path, "xpt_schedule() call complete\n");
2192}
2193
2194/*
2195 * Datamove/done routine called by CTL.  Put ourselves on the queue to
2196 * receive a CCB from CAM so we can queue the continue I/O request down
2197 * to the adapter.
2198 */
2199static void
2200ctlfe_datamove_done(union ctl_io *io)
2201{
2202	union ccb *ccb;
2203	struct cam_sim *sim;
2204	struct cam_periph *periph;
2205	struct ctlfe_lun_softc *softc;
2206
2207	ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
2208
2209	sim = xpt_path_sim(ccb->ccb_h.path);
2210
2211	CAM_SIM_LOCK(sim);
2212
2213	periph = xpt_path_periph(ccb->ccb_h.path);
2214
2215	softc = (struct ctlfe_lun_softc *)periph->softc;
2216
2217	if (io->io_hdr.io_type == CTL_IO_TASK) {
2218		/*
2219		 * Task management commands don't require any further
2220		 * communication back to the adapter.  Requeue the CCB
2221		 * to the adapter, and free the CTL I/O.
2222		 */
2223		xpt_print(ccb->ccb_h.path, "%s: returning task I/O "
2224			  "tag %#x seq %#x\n", __func__,
2225			  ccb->cin1.tag_id, ccb->cin1.seq_id);
2226		/*
2227		 * Send the notify acknowledge down to the SIM, to let it
2228		 * know we processed the task management command.
2229		 */
2230		ccb->ccb_h.status = CAM_REQ_INPROG;
2231		ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
2232		xpt_action(ccb);
2233		ctl_free_io(io);
2234	} else {
2235		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
2236			io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
2237		else
2238			io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
2239
2240		TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h,
2241				  periph_links.tqe);
2242
2243		/*
2244		 * Reset the timeout for our latest active DMA.
2245		 */
2246		callout_reset(&softc->dma_callout,
2247			      CTLFE_DMA_TIMEOUT * hz,
2248			      ctlfe_dma_timeout, softc);
2249		/*
2250		 * Ask for the CAM transport layer to send us a CCB to do
2251		 * the DMA or send status, unless ctlfe_dma_enabled is set
2252		 * to 0.
2253		 */
2254		if (ctlfe_dma_enabled != 0)
2255			xpt_schedule(periph, /*priority*/ 1);
2256	}
2257
2258	CAM_SIM_UNLOCK(sim);
2259}
2260
2261static void
2262ctlfe_dump(void)
2263{
2264	struct ctlfe_softc *bus_softc;
2265
2266	STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) {
2267		struct ctlfe_lun_softc *lun_softc;
2268
2269		ctlfe_dump_sim(bus_softc->sim);
2270
2271		STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) {
2272			ctlfe_dump_queue(lun_softc);
2273		}
2274	}
2275}
2276