1/*-
2 * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    substantially similar to the "NO WARRANTY" disclaimer below
13 *    ("Disclaimer") and any redistribution must be conditioned upon
14 *    including a substantially similar Disclaimer requirement for further
15 *    binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
29 *
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
31 */
32/*
33 * CTL kernel internal frontend target driver.  This allows kernel-level
34 * clients to send commands into CTL.
35 *
36 * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
37 * port, target, and LUN) and elements of an initiator (LUN discovery and
38 * probing, error recovery, command initiation).  Even though this has some
39 * initiator type elements, this is not intended to be a full fledged
40 * initiator layer.  It is only intended to send a limited number of
41 * commands to a well known target layer.
42 *
43 * To be able to fulfill the role of a full initiator layer, it would need
44 * a whole lot more functionality.
45 *
46 * Author: Ken Merry <ken@FreeBSD.org>
47 *
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: releng/10.2/sys/cam/ctl/ctl_frontend_internal.c 284798 2015-06-25 07:11:48Z mav $");
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/kernel.h>
56#include <sys/types.h>
57#include <sys/malloc.h>
58#include <sys/module.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/condvar.h>
62#include <sys/queue.h>
63#include <sys/sbuf.h>
64#include <sys/sysctl.h>
65#include <vm/uma.h>
66#include <cam/scsi/scsi_all.h>
67#include <cam/scsi/scsi_da.h>
68#include <cam/ctl/ctl_io.h>
69#include <cam/ctl/ctl.h>
70#include <cam/ctl/ctl_frontend.h>
71#include <cam/ctl/ctl_frontend_internal.h>
72#include <cam/ctl/ctl_backend.h>
73#include <cam/ctl/ctl_ioctl.h>
74#include <cam/ctl/ctl_util.h>
75#include <cam/ctl/ctl_ha.h>
76#include <cam/ctl/ctl_private.h>
77#include <cam/ctl/ctl_debug.h>
78#include <cam/ctl/ctl_scsi_all.h>
79#include <cam/ctl/ctl_error.h>
80
81/*
82 * Task structure:
83 *  - overall metatask, different potential metatask types (e.g. forced
84 *    shutdown, gentle shutdown)
85 *  - forced shutdown metatask:
86 *     - states:  report luns, pending, done?
87 *     - list of luns pending, with the relevant I/O for that lun attached.
88 *       This would allow moving ahead on LUNs with no errors, and going
89 *       into error recovery on LUNs with problems.  Per-LUN states might
90 *       include inquiry, stop/offline, done.
91 *
92 * Use LUN enable for LUN list instead of getting it manually?  We'd still
93 * need inquiry data for each LUN.
94 *
95 * How to handle processor LUN w.r.t. found/stopped counts?
96 */
97#ifdef oldapi
98typedef enum {
99	CFI_TASK_NONE,
100	CFI_TASK_SHUTDOWN,
101	CFI_TASK_STARTUP
102} cfi_tasktype;
103
104struct cfi_task_startstop {
105	int total_luns;
106	int luns_complete;
107	int luns_failed;
108	cfi_cb_t callback;
109	void *callback_arg;
110	/* XXX KDM add more fields here */
111};
112
113union cfi_taskinfo {
114	struct cfi_task_startstop startstop;
115};
116
117struct cfi_metatask {
118	cfi_tasktype		tasktype;
119	cfi_mt_status		status;
120	union cfi_taskinfo	taskinfo;
121	void			*cfi_context;
122	STAILQ_ENTRY(cfi_metatask) links;
123};
124#endif
125
126typedef enum {
127	CFI_ERR_RETRY		= 0x000,
128	CFI_ERR_FAIL		= 0x001,
129	CFI_ERR_LUN_RESET	= 0x002,
130	CFI_ERR_MASK		= 0x0ff,
131	CFI_ERR_NO_DECREMENT	= 0x100
132} cfi_error_action;
133
134typedef enum {
135	CFI_ERR_SOFT,
136	CFI_ERR_HARD
137} cfi_error_policy;
138
139typedef enum {
140	CFI_LUN_INQUIRY,
141	CFI_LUN_READCAPACITY,
142	CFI_LUN_READCAPACITY_16,
143	CFI_LUN_READY
144} cfi_lun_state;
145
146struct cfi_lun {
147	int lun_id;
148	struct scsi_inquiry_data inq_data;
149	uint64_t num_blocks;
150	uint32_t blocksize;
151	int blocksize_powerof2;
152	uint32_t cur_tag_num;
153	cfi_lun_state state;
154	struct cfi_softc *softc;
155	STAILQ_HEAD(, cfi_lun_io) io_list;
156	STAILQ_ENTRY(cfi_lun) links;
157};
158
159struct cfi_lun_io {
160	struct cfi_lun *lun;
161	struct cfi_metatask *metatask;
162	cfi_error_policy policy;
163	void (*done_function)(union ctl_io *io);
164	union ctl_io *ctl_io;
165	struct cfi_lun_io *orig_lun_io;
166	STAILQ_ENTRY(cfi_lun_io) links;
167};
168
169typedef enum {
170	CFI_NONE	= 0x00,
171	CFI_ONLINE	= 0x01,
172} cfi_flags;
173
174struct cfi_softc {
175	struct ctl_port port;
176	char fe_name[40];
177	struct mtx lock;
178	cfi_flags flags;
179	STAILQ_HEAD(, cfi_lun) lun_list;
180	STAILQ_HEAD(, cfi_metatask) metatask_list;
181};
182
183MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
184
185static uma_zone_t cfi_lun_zone;
186static uma_zone_t cfi_metatask_zone;
187
188static struct cfi_softc fetd_internal_softc;
189
190int cfi_init(void);
191void cfi_shutdown(void) __unused;
192static void cfi_online(void *arg);
193static void cfi_offline(void *arg);
194static int cfi_lun_enable(void *arg, int lun_id);
195static int cfi_lun_disable(void *arg, int lun_id);
196static void cfi_datamove(union ctl_io *io);
197static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
198					    struct cfi_lun_io *lun_io);
199static cfi_error_action cfi_error_parse(union ctl_io *io,
200					struct cfi_lun_io *lun_io);
201static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
202			struct cfi_metatask *metatask, cfi_error_policy policy,
203			int retries, struct cfi_lun_io *orig_lun_io,
204			void (*done_function)(union ctl_io *io));
205static void cfi_done(union ctl_io *io);
206static void cfi_lun_probe_done(union ctl_io *io);
207static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
208static void cfi_metatask_done(struct cfi_softc *softc,
209			      struct cfi_metatask *metatask);
210static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
211					union ctl_io *io);
212static void cfi_metatask_io_done(union ctl_io *io);
213static void cfi_err_recovery_done(union ctl_io *io);
214static void cfi_lun_io_done(union ctl_io *io);
215
216static struct ctl_frontend cfi_frontend =
217{
218	.name = "kernel",
219	.init = cfi_init,
220	.shutdown = cfi_shutdown,
221};
222CTL_FRONTEND_DECLARE(ctlcfi, cfi_frontend);
223
224int
225cfi_init(void)
226{
227	struct cfi_softc *softc;
228	struct ctl_port *port;
229	int retval;
230
231	softc = &fetd_internal_softc;
232
233	port = &softc->port;
234
235	retval = 0;
236
237	if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
238		printf("%s: size of struct cfi_lun_io %zd > "
239		       "CTL_PORT_PRIV_SIZE %d\n", __func__,
240		       sizeof(struct cfi_lun_io),
241		       CTL_PORT_PRIV_SIZE);
242	}
243	memset(softc, 0, sizeof(*softc));
244
245	mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
246	STAILQ_INIT(&softc->lun_list);
247	STAILQ_INIT(&softc->metatask_list);
248	sprintf(softc->fe_name, "kernel");
249	port->frontend = &cfi_frontend;
250	port->port_type = CTL_PORT_INTERNAL;
251	port->num_requested_ctl_io = 100;
252	port->port_name = softc->fe_name;
253	port->port_online = cfi_online;
254	port->port_offline = cfi_offline;
255	port->onoff_arg = softc;
256	port->lun_enable = cfi_lun_enable;
257	port->lun_disable = cfi_lun_disable;
258	port->targ_lun_arg = softc;
259	port->fe_datamove = cfi_datamove;
260	port->fe_done = cfi_done;
261	port->max_targets = 15;
262	port->max_target_id = 15;
263
264	if (ctl_port_register(port) != 0)
265	{
266		printf("%s: internal frontend registration failed\n", __func__);
267		return (0);
268	}
269
270	cfi_lun_zone = uma_zcreate("cfi_lun", sizeof(struct cfi_lun),
271	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
272	cfi_metatask_zone = uma_zcreate("cfi_metatask", sizeof(struct cfi_metatask),
273	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
274
275	return (0);
276}
277
278void
279cfi_shutdown(void)
280{
281	struct cfi_softc *softc;
282
283	softc = &fetd_internal_softc;
284
285	/*
286	 * XXX KDM need to clear out any I/O pending on each LUN.
287	 */
288	if (ctl_port_deregister(&softc->port) != 0)
289		printf("%s: ctl_frontend_deregister() failed\n", __func__);
290
291	uma_zdestroy(cfi_lun_zone);
292	uma_zdestroy(cfi_metatask_zone);
293}
294
295static void
296cfi_online(void *arg)
297{
298	struct cfi_softc *softc;
299	struct cfi_lun *lun;
300
301	softc = (struct cfi_softc *)arg;
302
303	softc->flags |= CFI_ONLINE;
304
305	/*
306	 * Go through and kick off the probe for each lun.  Should we check
307	 * the LUN flags here to determine whether or not to probe it?
308	 */
309	mtx_lock(&softc->lock);
310	STAILQ_FOREACH(lun, &softc->lun_list, links)
311		cfi_lun_probe(lun, /*have_lock*/ 1);
312	mtx_unlock(&softc->lock);
313}
314
315static void
316cfi_offline(void *arg)
317{
318	struct cfi_softc *softc;
319
320	softc = (struct cfi_softc *)arg;
321
322	softc->flags &= ~CFI_ONLINE;
323}
324
325static int
326cfi_lun_enable(void *arg, int lun_id)
327{
328	struct cfi_softc *softc;
329	struct cfi_lun *lun;
330	int found;
331
332	softc = (struct cfi_softc *)arg;
333
334	found = 0;
335	mtx_lock(&softc->lock);
336	STAILQ_FOREACH(lun, &softc->lun_list, links) {
337		if (lun->lun_id == lun_id) {
338			found = 1;
339			break;
340		}
341	}
342	mtx_unlock(&softc->lock);
343
344	/*
345	 * If we already have this target/LUN, there is no reason to add
346	 * it to our lists again.
347	 */
348	if (found != 0)
349		return (0);
350
351	lun = uma_zalloc(cfi_lun_zone, M_NOWAIT | M_ZERO);
352	if (lun == NULL) {
353		printf("%s: unable to allocate LUN structure\n", __func__);
354		return (1);
355	}
356
357	lun->lun_id = lun_id;
358	lun->cur_tag_num = 0;
359	lun->state = CFI_LUN_INQUIRY;
360	lun->softc = softc;
361	STAILQ_INIT(&lun->io_list);
362
363	mtx_lock(&softc->lock);
364	STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
365	mtx_unlock(&softc->lock);
366
367	cfi_lun_probe(lun, /*have_lock*/ 0);
368
369	return (0);
370}
371
372static int
373cfi_lun_disable(void *arg, int lun_id)
374{
375	struct cfi_softc *softc;
376	struct cfi_lun *lun;
377	int found;
378
379	softc = (struct cfi_softc *)arg;
380
381	found = 0;
382
383	/*
384	 * XXX KDM need to do an invalidate and then a free when any
385	 * pending I/O has completed.  Or do we?  CTL won't free a LUN
386	 * while any I/O is pending.  So we won't get this notification
387	 * unless any I/O we have pending on a LUN has completed.
388	 */
389	mtx_lock(&softc->lock);
390	STAILQ_FOREACH(lun, &softc->lun_list, links) {
391		if (lun->lun_id == lun_id) {
392			found = 1;
393			break;
394		}
395	}
396	if (found != 0)
397		STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
398
399	mtx_unlock(&softc->lock);
400
401	if (found == 0) {
402		printf("%s: can't find lun %d\n", __func__, lun_id);
403		return (1);
404	}
405
406	uma_zfree(cfi_lun_zone, lun);
407
408	return (0);
409}
410
411static void
412cfi_datamove(union ctl_io *io)
413{
414	struct ctl_sg_entry *ext_sglist, *kern_sglist;
415	struct ctl_sg_entry ext_entry, kern_entry;
416	int ext_sglen, ext_sg_entries, kern_sg_entries;
417	int ext_sg_start, ext_offset;
418	int len_to_copy, len_copied;
419	int kern_watermark, ext_watermark;
420	int ext_sglist_malloced;
421	struct ctl_scsiio *ctsio;
422	int i, j;
423
424	ext_sglist_malloced = 0;
425	ext_sg_start = 0;
426	ext_offset = 0;
427	ext_sglist = NULL;
428
429	CTL_DEBUG_PRINT(("%s\n", __func__));
430
431	ctsio = &io->scsiio;
432
433	/*
434	 * If this is the case, we're probably doing a BBR read and don't
435	 * actually need to transfer the data.  This will effectively
436	 * bit-bucket the data.
437	 */
438	if (ctsio->ext_data_ptr == NULL)
439		goto bailout;
440
441	/*
442	 * To simplify things here, if we have a single buffer, stick it in
443	 * a S/G entry and just make it a single entry S/G list.
444	 */
445	if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
446		int len_seen;
447
448		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
449
450		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
451							   M_WAITOK);
452		ext_sglist_malloced = 1;
453		if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
454			ctl_set_internal_failure(ctsio,
455						 /*sks_valid*/ 0,
456						 /*retry_count*/ 0);
457			goto bailout;
458		}
459		ext_sg_entries = ctsio->ext_sg_entries;
460		len_seen = 0;
461		for (i = 0; i < ext_sg_entries; i++) {
462			if ((len_seen + ext_sglist[i].len) >=
463			     ctsio->ext_data_filled) {
464				ext_sg_start = i;
465				ext_offset = ctsio->ext_data_filled - len_seen;
466				break;
467			}
468			len_seen += ext_sglist[i].len;
469		}
470	} else {
471		ext_sglist = &ext_entry;
472		ext_sglist->addr = ctsio->ext_data_ptr;
473		ext_sglist->len = ctsio->ext_data_len;
474		ext_sg_entries = 1;
475		ext_sg_start = 0;
476		ext_offset = ctsio->ext_data_filled;
477	}
478
479	if (ctsio->kern_sg_entries > 0) {
480		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
481		kern_sg_entries = ctsio->kern_sg_entries;
482	} else {
483		kern_sglist = &kern_entry;
484		kern_sglist->addr = ctsio->kern_data_ptr;
485		kern_sglist->len = ctsio->kern_data_len;
486		kern_sg_entries = 1;
487	}
488
489
490	kern_watermark = 0;
491	ext_watermark = ext_offset;
492	len_copied = 0;
493	for (i = ext_sg_start, j = 0;
494	     i < ext_sg_entries && j < kern_sg_entries;) {
495		uint8_t *ext_ptr, *kern_ptr;
496
497		len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
498				  kern_sglist[j].len - kern_watermark);
499
500		ext_ptr = (uint8_t *)ext_sglist[i].addr;
501		ext_ptr = ext_ptr + ext_watermark;
502		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
503			/*
504			 * XXX KDM fix this!
505			 */
506			panic("need to implement bus address support");
507#if 0
508			kern_ptr = bus_to_virt(kern_sglist[j].addr);
509#endif
510		} else
511			kern_ptr = (uint8_t *)kern_sglist[j].addr;
512		kern_ptr = kern_ptr + kern_watermark;
513
514		kern_watermark += len_to_copy;
515		ext_watermark += len_to_copy;
516
517		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
518		     CTL_FLAG_DATA_IN) {
519			CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
520					 __func__, len_to_copy));
521			CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
522					 kern_ptr, ext_ptr));
523			memcpy(ext_ptr, kern_ptr, len_to_copy);
524		} else {
525			CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
526					 __func__, len_to_copy));
527			CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
528					 ext_ptr, kern_ptr));
529			memcpy(kern_ptr, ext_ptr, len_to_copy);
530		}
531
532		len_copied += len_to_copy;
533
534		if (ext_sglist[i].len == ext_watermark) {
535			i++;
536			ext_watermark = 0;
537		}
538
539		if (kern_sglist[j].len == kern_watermark) {
540			j++;
541			kern_watermark = 0;
542		}
543	}
544
545	ctsio->ext_data_filled += len_copied;
546
547	CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
548			 __func__, ext_sg_entries, kern_sg_entries));
549	CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
550			 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
551
552
553	/* XXX KDM set residual?? */
554bailout:
555
556	if (ext_sglist_malloced != 0)
557		free(ext_sglist, M_CTL_CFI);
558
559	io->scsiio.be_move_done(io);
560
561	return;
562}
563
564/*
565 * For any sort of check condition, busy, etc., we just retry.  We do not
566 * decrement the retry count for unit attention type errors.  These are
567 * normal, and we want to save the retry count for "real" errors.  Otherwise,
568 * we could end up with situations where a command will succeed in some
569 * situations and fail in others, depending on whether a unit attention is
570 * pending.  Also, some of our error recovery actions, most notably the
571 * LUN reset action, will cause a unit attention.
572 *
573 * We can add more detail here later if necessary.
574 */
575static cfi_error_action
576cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
577{
578	cfi_error_action error_action;
579	int error_code, sense_key, asc, ascq;
580
581	/*
582	 * Default to retrying the command.
583	 */
584	error_action = CFI_ERR_RETRY;
585
586	scsi_extract_sense_len(&io->scsiio.sense_data,
587			       io->scsiio.sense_len,
588			       &error_code,
589			       &sense_key,
590			       &asc,
591			       &ascq,
592			       /*show_errors*/ 1);
593
594	switch (error_code) {
595	case SSD_DEFERRED_ERROR:
596	case SSD_DESC_DEFERRED_ERROR:
597		error_action |= CFI_ERR_NO_DECREMENT;
598		break;
599	case SSD_CURRENT_ERROR:
600	case SSD_DESC_CURRENT_ERROR:
601	default: {
602		switch (sense_key) {
603		case SSD_KEY_UNIT_ATTENTION:
604			error_action |= CFI_ERR_NO_DECREMENT;
605			break;
606		case SSD_KEY_HARDWARE_ERROR:
607			/*
608			 * This is our generic "something bad happened"
609			 * error code.  It often isn't recoverable.
610			 */
611			if ((asc == 0x44) && (ascq == 0x00))
612				error_action = CFI_ERR_FAIL;
613			break;
614		case SSD_KEY_NOT_READY:
615			/*
616			 * If the LUN is powered down, there likely isn't
617			 * much point in retrying right now.
618			 */
619			if ((asc == 0x04) && (ascq == 0x02))
620				error_action = CFI_ERR_FAIL;
621			/*
622			 * If the LUN is offline, there probably isn't much
623			 * point in retrying, either.
624			 */
625			if ((asc == 0x04) && (ascq == 0x03))
626				error_action = CFI_ERR_FAIL;
627			break;
628		}
629	}
630	}
631
632	return (error_action);
633}
634
635static cfi_error_action
636cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
637{
638	cfi_error_action error_action;
639
640	error_action = CFI_ERR_RETRY;
641
642	switch (io->io_hdr.io_type) {
643	case CTL_IO_SCSI:
644		switch (io->io_hdr.status & CTL_STATUS_MASK) {
645		case CTL_SCSI_ERROR:
646			switch (io->scsiio.scsi_status) {
647			case SCSI_STATUS_RESERV_CONFLICT:
648				/*
649				 * For a reservation conflict, we'll usually
650				 * want the hard error recovery policy, so
651				 * we'll reset the LUN.
652				 */
653				if (lun_io->policy == CFI_ERR_HARD)
654					error_action =
655						CFI_ERR_LUN_RESET;
656				else
657					error_action =
658						CFI_ERR_RETRY;
659				break;
660			case SCSI_STATUS_CHECK_COND:
661			default:
662				error_action = cfi_checkcond_parse(io, lun_io);
663				break;
664			}
665			break;
666		default:
667			error_action = CFI_ERR_RETRY;
668			break;
669		}
670		break;
671	case CTL_IO_TASK:
672		/*
673		 * In theory task management commands shouldn't fail...
674		 */
675		error_action = CFI_ERR_RETRY;
676		break;
677	default:
678		printf("%s: invalid ctl_io type %d\n", __func__,
679		       io->io_hdr.io_type);
680		panic("%s: invalid ctl_io type %d\n", __func__,
681		      io->io_hdr.io_type);
682		break;
683	}
684
685	return (error_action);
686}
687
688static void
689cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
690	    struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
691	    struct cfi_lun_io *orig_lun_io,
692	    void (*done_function)(union ctl_io *io))
693{
694	struct cfi_lun_io *lun_io;
695
696	io->io_hdr.nexus.initid.id = 7;
697	io->io_hdr.nexus.targ_port = lun->softc->port.targ_port;
698	io->io_hdr.nexus.targ_target.id = 0;
699	io->io_hdr.nexus.targ_lun = lun->lun_id;
700	io->io_hdr.retries = retries;
701	lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
702	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
703	lun_io->lun = lun;
704	lun_io->metatask = metatask;
705	lun_io->ctl_io = io;
706	lun_io->policy = policy;
707	lun_io->orig_lun_io = orig_lun_io;
708	lun_io->done_function = done_function;
709	/*
710	 * We only set the tag number for SCSI I/Os.  For task management
711	 * commands, the tag number is only really needed for aborts, so
712	 * the caller can set it if necessary.
713	 */
714	switch (io->io_hdr.io_type) {
715	case CTL_IO_SCSI:
716		io->scsiio.tag_num = lun->cur_tag_num++;
717		break;
718	case CTL_IO_TASK:
719	default:
720		break;
721	}
722}
723
724static void
725cfi_done(union ctl_io *io)
726{
727	struct cfi_lun_io *lun_io;
728	struct cfi_softc *softc;
729	struct cfi_lun *lun;
730
731	lun_io = (struct cfi_lun_io *)
732		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
733
734	lun = lun_io->lun;
735	softc = lun->softc;
736
737	/*
738	 * Very minimal retry logic.  We basically retry if we got an error
739	 * back, and the retry count is greater than 0.  If we ever want
740	 * more sophisticated initiator type behavior, the CAM error
741	 * recovery code in ../common might be helpful.
742	 */
743	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
744	 && (io->io_hdr.retries > 0)) {
745		ctl_io_status old_status;
746		cfi_error_action error_action;
747
748		error_action = cfi_error_parse(io, lun_io);
749
750		switch (error_action & CFI_ERR_MASK) {
751		case CFI_ERR_FAIL:
752			goto done;
753			break; /* NOTREACHED */
754		case CFI_ERR_LUN_RESET: {
755			union ctl_io *new_io;
756			struct cfi_lun_io *new_lun_io;
757
758			new_io = ctl_alloc_io(softc->port.ctl_pool_ref);
759			ctl_zero_io(new_io);
760
761			new_io->io_hdr.io_type = CTL_IO_TASK;
762			new_io->taskio.task_action = CTL_TASK_LUN_RESET;
763
764			cfi_init_io(new_io,
765				    /*lun*/ lun_io->lun,
766				    /*metatask*/ NULL,
767				    /*policy*/ CFI_ERR_SOFT,
768				    /*retries*/ 0,
769				    /*orig_lun_io*/lun_io,
770				    /*done_function*/ cfi_err_recovery_done);
771
772
773			new_lun_io = (struct cfi_lun_io *)
774				new_io->io_hdr.port_priv;
775
776			mtx_lock(&lun->softc->lock);
777			STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
778			mtx_unlock(&lun->softc->lock);
779
780			io = new_io;
781			break;
782		}
783		case CFI_ERR_RETRY:
784		default:
785			if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
786				io->io_hdr.retries--;
787			break;
788		}
789
790		old_status = io->io_hdr.status;
791		io->io_hdr.status = CTL_STATUS_NONE;
792#if 0
793		io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
794#endif
795		io->io_hdr.flags &= ~CTL_FLAG_ABORT;
796		io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
797
798		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
799			printf("%s: error returned from ctl_queue()!\n",
800			       __func__);
801			io->io_hdr.status = old_status;
802		} else
803			return;
804	}
805done:
806	lun_io->done_function(io);
807}
808
809static void
810cfi_lun_probe_done(union ctl_io *io)
811{
812	struct cfi_lun *lun;
813	struct cfi_lun_io *lun_io;
814
815	lun_io = (struct cfi_lun_io *)
816		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
817	lun = lun_io->lun;
818
819	switch (lun->state) {
820	case CFI_LUN_INQUIRY: {
821		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
822			/* print out something here?? */
823			printf("%s: LUN %d probe failed because inquiry "
824			       "failed\n", __func__, lun->lun_id);
825			ctl_io_error_print(io, NULL);
826		} else {
827
828			if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
829				char path_str[40];
830
831				lun->state = CFI_LUN_READY;
832				ctl_scsi_path_string(io, path_str,
833						     sizeof(path_str));
834				printf("%s", path_str);
835				scsi_print_inquiry(&lun->inq_data);
836			} else {
837				lun->state = CFI_LUN_READCAPACITY;
838				cfi_lun_probe(lun, /*have_lock*/ 0);
839			}
840		}
841		mtx_lock(&lun->softc->lock);
842		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
843		mtx_unlock(&lun->softc->lock);
844		ctl_free_io(io);
845		break;
846	}
847	case CFI_LUN_READCAPACITY:
848	case CFI_LUN_READCAPACITY_16: {
849		uint64_t maxlba;
850		uint32_t blocksize;
851
852		maxlba = 0;
853		blocksize = 0;
854
855		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
856			printf("%s: LUN %d probe failed because READ CAPACITY "
857			       "failed\n", __func__, lun->lun_id);
858			ctl_io_error_print(io, NULL);
859		} else {
860
861			if (lun->state == CFI_LUN_READCAPACITY) {
862				struct scsi_read_capacity_data *rdcap;
863
864				rdcap = (struct scsi_read_capacity_data *)
865					io->scsiio.ext_data_ptr;
866
867				maxlba = scsi_4btoul(rdcap->addr);
868				blocksize = scsi_4btoul(rdcap->length);
869				if (blocksize == 0) {
870					printf("%s: LUN %d has invalid "
871					       "blocksize 0, probe aborted\n",
872					       __func__, lun->lun_id);
873				} else if (maxlba == 0xffffffff) {
874					lun->state = CFI_LUN_READCAPACITY_16;
875					cfi_lun_probe(lun, /*have_lock*/ 0);
876				} else
877					lun->state = CFI_LUN_READY;
878			} else {
879				struct scsi_read_capacity_data_long *rdcap_long;
880
881				rdcap_long = (struct
882					scsi_read_capacity_data_long *)
883					io->scsiio.ext_data_ptr;
884				maxlba = scsi_8btou64(rdcap_long->addr);
885				blocksize = scsi_4btoul(rdcap_long->length);
886
887				if (blocksize == 0) {
888					printf("%s: LUN %d has invalid "
889					       "blocksize 0, probe aborted\n",
890					       __func__, lun->lun_id);
891				} else
892					lun->state = CFI_LUN_READY;
893			}
894		}
895
896		if (lun->state == CFI_LUN_READY) {
897			char path_str[40];
898
899			lun->num_blocks = maxlba + 1;
900			lun->blocksize = blocksize;
901
902			/*
903			 * If this is true, the blocksize is a power of 2.
904			 * We already checked for 0 above.
905			 */
906			if (((blocksize - 1) & blocksize) == 0) {
907				int i;
908
909				for (i = 0; i < 32; i++) {
910					if ((blocksize & (1 << i)) != 0) {
911						lun->blocksize_powerof2 = i;
912						break;
913					}
914				}
915			}
916			ctl_scsi_path_string(io, path_str,sizeof(path_str));
917			printf("%s", path_str);
918			scsi_print_inquiry(&lun->inq_data);
919			printf("%s %ju blocks, blocksize %d\n", path_str,
920			       (uintmax_t)maxlba + 1, blocksize);
921		}
922		mtx_lock(&lun->softc->lock);
923		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
924		mtx_unlock(&lun->softc->lock);
925		free(io->scsiio.ext_data_ptr, M_CTL_CFI);
926		ctl_free_io(io);
927		break;
928	}
929	case CFI_LUN_READY:
930	default:
931		mtx_lock(&lun->softc->lock);
932		/* How did we get here?? */
933		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
934		mtx_unlock(&lun->softc->lock);
935		ctl_free_io(io);
936		break;
937	}
938}
939
940static void
941cfi_lun_probe(struct cfi_lun *lun, int have_lock)
942{
943
944	if (have_lock == 0)
945		mtx_lock(&lun->softc->lock);
946	if ((lun->softc->flags & CFI_ONLINE) == 0) {
947		if (have_lock == 0)
948			mtx_unlock(&lun->softc->lock);
949		return;
950	}
951	if (have_lock == 0)
952		mtx_unlock(&lun->softc->lock);
953
954	switch (lun->state) {
955	case CFI_LUN_INQUIRY: {
956		struct cfi_lun_io *lun_io;
957		union ctl_io *io;
958
959		io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
960		ctl_scsi_inquiry(io,
961				 /*data_ptr*/(uint8_t *)&lun->inq_data,
962				 /*data_len*/ sizeof(lun->inq_data),
963				 /*byte2*/ 0,
964				 /*page_code*/ 0,
965				 /*tag_type*/ CTL_TAG_SIMPLE,
966				 /*control*/ 0);
967
968		cfi_init_io(io,
969			    /*lun*/ lun,
970			    /*metatask*/ NULL,
971			    /*policy*/ CFI_ERR_SOFT,
972			    /*retries*/ 5,
973			    /*orig_lun_io*/ NULL,
974			    /*done_function*/
975			    cfi_lun_probe_done);
976
977		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
978
979		if (have_lock == 0)
980			mtx_lock(&lun->softc->lock);
981		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
982		if (have_lock == 0)
983			mtx_unlock(&lun->softc->lock);
984
985		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
986			printf("%s: error returned from ctl_queue()!\n",
987			       __func__);
988			STAILQ_REMOVE(&lun->io_list, lun_io,
989				      cfi_lun_io, links);
990			ctl_free_io(io);
991		}
992		break;
993	}
994	case CFI_LUN_READCAPACITY:
995	case CFI_LUN_READCAPACITY_16: {
996		struct cfi_lun_io *lun_io;
997		uint8_t *dataptr;
998		union ctl_io *io;
999
1000		io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
1001
1002		dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
1003				 M_CTL_CFI, M_NOWAIT);
1004		if (dataptr == NULL) {
1005			printf("%s: unable to allocate SCSI read capacity "
1006			       "buffer for lun %d\n", __func__, lun->lun_id);
1007			return;
1008		}
1009		if (lun->state == CFI_LUN_READCAPACITY) {
1010			ctl_scsi_read_capacity(io,
1011				/*data_ptr*/ dataptr,
1012				/*data_len*/
1013				sizeof(struct scsi_read_capacity_data_long),
1014				/*addr*/ 0,
1015				/*reladr*/ 0,
1016				/*pmi*/ 0,
1017				/*tag_type*/ CTL_TAG_SIMPLE,
1018				/*control*/ 0);
1019		} else {
1020			ctl_scsi_read_capacity_16(io,
1021				/*data_ptr*/ dataptr,
1022				/*data_len*/
1023				sizeof(struct scsi_read_capacity_data_long),
1024				/*addr*/ 0,
1025				/*reladr*/ 0,
1026				/*pmi*/ 0,
1027				/*tag_type*/ CTL_TAG_SIMPLE,
1028				/*control*/ 0);
1029		}
1030		cfi_init_io(io,
1031			    /*lun*/ lun,
1032			    /*metatask*/ NULL,
1033			    /*policy*/ CFI_ERR_SOFT,
1034			    /*retries*/ 7,
1035			    /*orig_lun_io*/ NULL,
1036			    /*done_function*/ cfi_lun_probe_done);
1037
1038		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1039
1040		if (have_lock == 0)
1041			mtx_lock(&lun->softc->lock);
1042		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1043		if (have_lock == 0)
1044			mtx_unlock(&lun->softc->lock);
1045
1046		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1047			printf("%s: error returned from ctl_queue()!\n",
1048			       __func__);
1049			STAILQ_REMOVE(&lun->io_list, lun_io,
1050				      cfi_lun_io, links);
1051			free(dataptr, M_CTL_CFI);
1052			ctl_free_io(io);
1053		}
1054		break;
1055	}
1056	case CFI_LUN_READY:
1057	default:
1058		/* Why were we called? */
1059		break;
1060	}
1061}
1062
1063static void
1064cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
1065{
1066	mtx_lock(&softc->lock);
1067	STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
1068	mtx_unlock(&softc->lock);
1069
1070	/*
1071	 * Return status to the caller.  Caller allocated storage, and is
1072	 * responsible for calling cfi_free_metatask to release it once
1073	 * they've seen the status.
1074	 */
1075	metatask->callback(metatask->callback_arg, metatask);
1076}
1077
1078static void
1079cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
1080{
1081	int error_code, sense_key, asc, ascq;
1082
1083	if (metatask->tasktype != CFI_TASK_BBRREAD)
1084		return;
1085
1086	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
1087		metatask->status = CFI_MT_SUCCESS;
1088		metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
1089		return;
1090	}
1091
1092	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
1093		metatask->status = CFI_MT_ERROR;
1094		metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1095		return;
1096	}
1097
1098	metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
1099	memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
1100	       MIN(sizeof(metatask->taskinfo.bbrread.sense_data),
1101		   sizeof(io->scsiio.sense_data)));
1102
1103	if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
1104		metatask->status = CFI_MT_ERROR;
1105		metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
1106		return;
1107	}
1108
1109	if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
1110		metatask->status = CFI_MT_ERROR;
1111		metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1112		return;
1113	}
1114
1115	scsi_extract_sense_len(&io->scsiio.sense_data,
1116			       io->scsiio.sense_len,
1117			       &error_code,
1118			       &sense_key,
1119			       &asc,
1120			       &ascq,
1121			       /*show_errors*/ 1);
1122
1123	switch (error_code) {
1124	case SSD_DEFERRED_ERROR:
1125	case SSD_DESC_DEFERRED_ERROR:
1126		metatask->status = CFI_MT_ERROR;
1127		metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1128		break;
1129	case SSD_CURRENT_ERROR:
1130	case SSD_DESC_CURRENT_ERROR:
1131	default: {
1132		struct scsi_sense_data *sense;
1133
1134		sense = &io->scsiio.sense_data;
1135
1136		if ((asc == 0x04) && (ascq == 0x02)) {
1137			metatask->status = CFI_MT_ERROR;
1138			metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
1139		} else if ((asc == 0x04) && (ascq == 0x03)) {
1140			metatask->status = CFI_MT_ERROR;
1141			metatask->taskinfo.bbrread.status =
1142				CFI_BBR_LUN_OFFLINE_CTL;
1143		} else if ((asc == 0x44) && (ascq == 0x00)) {
1144#ifdef NEEDTOPORT
1145			if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
1146				uint16_t retry_count;
1147
1148				retry_count = sense->sense_key_spec[1] << 8 |
1149					      sense->sense_key_spec[2];
1150				if (((retry_count & 0xf000) == CSC_RAIDCORE)
1151				 && ((retry_count & 0x0f00) == CSC_SHELF_SW)
1152				 && ((retry_count & 0xff) ==
1153				      RC_STS_DEVICE_OFFLINE)) {
1154					metatask->status = CFI_MT_ERROR;
1155					metatask->taskinfo.bbrread.status =
1156						CFI_BBR_LUN_OFFLINE_RC;
1157				} else {
1158					metatask->status = CFI_MT_ERROR;
1159					metatask->taskinfo.bbrread.status =
1160						CFI_BBR_SCSI_ERROR;
1161				}
1162			} else {
1163#endif /* NEEDTOPORT */
1164				metatask->status = CFI_MT_ERROR;
1165				metatask->taskinfo.bbrread.status =
1166					CFI_BBR_SCSI_ERROR;
1167#ifdef NEEDTOPORT
1168			}
1169#endif
1170		} else {
1171			metatask->status = CFI_MT_ERROR;
1172			metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1173		}
1174		break;
1175	}
1176	}
1177}
1178
1179static void
1180cfi_metatask_io_done(union ctl_io *io)
1181{
1182	struct cfi_lun_io *lun_io;
1183	struct cfi_metatask *metatask;
1184	struct cfi_softc *softc;
1185	struct cfi_lun *lun;
1186
1187	lun_io = (struct cfi_lun_io *)
1188		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1189
1190	lun = lun_io->lun;
1191	softc = lun->softc;
1192
1193	metatask = lun_io->metatask;
1194
1195	switch (metatask->tasktype) {
1196	case CFI_TASK_STARTUP:
1197	case CFI_TASK_SHUTDOWN: {
1198		int failed, done, is_start;
1199
1200		failed = 0;
1201		done = 0;
1202		if (metatask->tasktype == CFI_TASK_STARTUP)
1203			is_start = 1;
1204		else
1205			is_start = 0;
1206
1207		mtx_lock(&softc->lock);
1208		if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
1209			metatask->taskinfo.startstop.luns_complete++;
1210		else {
1211			metatask->taskinfo.startstop.luns_failed++;
1212			failed = 1;
1213		}
1214		if ((metatask->taskinfo.startstop.luns_complete +
1215		     metatask->taskinfo.startstop.luns_failed) >=
1216		     metatask->taskinfo.startstop.total_luns)
1217			done = 1;
1218
1219		mtx_unlock(&softc->lock);
1220
1221		if (failed != 0) {
1222			printf("%s: LUN %d %s request failed\n", __func__,
1223			       lun_io->lun->lun_id, (is_start == 1) ? "start" :
1224			       "stop");
1225			ctl_io_error_print(io, &lun_io->lun->inq_data);
1226		}
1227		if (done != 0) {
1228			if (metatask->taskinfo.startstop.luns_failed > 0)
1229				metatask->status = CFI_MT_ERROR;
1230			else
1231				metatask->status = CFI_MT_SUCCESS;
1232			cfi_metatask_done(softc, metatask);
1233		}
1234		mtx_lock(&softc->lock);
1235		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1236		mtx_unlock(&softc->lock);
1237
1238		ctl_free_io(io);
1239		break;
1240	}
1241	case CFI_TASK_BBRREAD: {
1242		/*
1243		 * Translate the SCSI error into an enumeration.
1244		 */
1245		cfi_metatask_bbr_errorparse(metatask, io);
1246
1247		mtx_lock(&softc->lock);
1248		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1249		mtx_unlock(&softc->lock);
1250
1251		ctl_free_io(io);
1252
1253		cfi_metatask_done(softc, metatask);
1254		break;
1255	}
1256	default:
1257		/*
1258		 * This shouldn't happen.
1259		 */
1260		mtx_lock(&softc->lock);
1261		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1262		mtx_unlock(&softc->lock);
1263
1264		ctl_free_io(io);
1265		break;
1266	}
1267}
1268
1269static void
1270cfi_err_recovery_done(union ctl_io *io)
1271{
1272	struct cfi_lun_io *lun_io, *orig_lun_io;
1273	struct cfi_lun *lun;
1274	union ctl_io *orig_io;
1275
1276	lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1277	orig_lun_io = lun_io->orig_lun_io;
1278	orig_io = orig_lun_io->ctl_io;
1279	lun = lun_io->lun;
1280
1281	if (io->io_hdr.status != CTL_SUCCESS) {
1282		printf("%s: error recovery action failed.  Original "
1283		       "error:\n", __func__);
1284
1285		ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
1286
1287		printf("%s: error from error recovery action:\n", __func__);
1288
1289		ctl_io_error_print(io, &lun->inq_data);
1290
1291		printf("%s: trying original command again...\n", __func__);
1292	}
1293
1294	mtx_lock(&lun->softc->lock);
1295	STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1296	mtx_unlock(&lun->softc->lock);
1297	ctl_free_io(io);
1298
1299	orig_io->io_hdr.retries--;
1300	orig_io->io_hdr.status = CTL_STATUS_NONE;
1301
1302	if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
1303		printf("%s: error returned from ctl_queue()!\n", __func__);
1304		STAILQ_REMOVE(&lun->io_list, orig_lun_io,
1305			      cfi_lun_io, links);
1306		ctl_free_io(orig_io);
1307	}
1308}
1309
1310static void
1311cfi_lun_io_done(union ctl_io *io)
1312{
1313	struct cfi_lun *lun;
1314	struct cfi_lun_io *lun_io;
1315
1316	lun_io = (struct cfi_lun_io *)
1317		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1318	lun = lun_io->lun;
1319
1320	if (lun_io->metatask == NULL) {
1321		printf("%s: I/O has no metatask pointer, discarding\n",
1322		       __func__);
1323		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1324		ctl_free_io(io);
1325		return;
1326	}
1327	cfi_metatask_io_done(io);
1328}
1329
1330void
1331cfi_action(struct cfi_metatask *metatask)
1332{
1333	struct cfi_softc *softc;
1334
1335	softc = &fetd_internal_softc;
1336
1337	mtx_lock(&softc->lock);
1338
1339	STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
1340
1341	if ((softc->flags & CFI_ONLINE) == 0) {
1342		mtx_unlock(&softc->lock);
1343		metatask->status = CFI_MT_PORT_OFFLINE;
1344		cfi_metatask_done(softc, metatask);
1345		return;
1346	} else
1347		mtx_unlock(&softc->lock);
1348
1349	switch (metatask->tasktype) {
1350	case CFI_TASK_STARTUP:
1351	case CFI_TASK_SHUTDOWN: {
1352		union ctl_io *io;
1353		int da_luns, ios_allocated, do_start;
1354		struct cfi_lun *lun;
1355		STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
1356
1357		da_luns = 0;
1358		ios_allocated = 0;
1359		STAILQ_INIT(&tmp_io_list);
1360
1361		if (metatask->tasktype == CFI_TASK_STARTUP)
1362			do_start = 1;
1363		else
1364			do_start = 0;
1365
1366		mtx_lock(&softc->lock);
1367		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1368			if (lun->state != CFI_LUN_READY)
1369				continue;
1370
1371			if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1372				continue;
1373			da_luns++;
1374			io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
1375			if (io != NULL) {
1376				ios_allocated++;
1377				STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
1378						   links);
1379			}
1380		}
1381
1382		if (ios_allocated < da_luns) {
1383			printf("%s: error allocating ctl_io for %s\n",
1384			       __func__, (do_start == 1) ? "startup" :
1385			       "shutdown");
1386			da_luns = ios_allocated;
1387		}
1388
1389		metatask->taskinfo.startstop.total_luns = da_luns;
1390
1391		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1392			struct cfi_lun_io *lun_io;
1393
1394			if (lun->state != CFI_LUN_READY)
1395				continue;
1396
1397			if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1398				continue;
1399
1400			io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1401			if (io == NULL)
1402				break;
1403
1404			STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
1405				      links);
1406
1407			ctl_scsi_start_stop(io,
1408					    /*start*/ do_start,
1409					    /*load_eject*/ 0,
1410					    /*immediate*/ 0,
1411					    /*power_conditions*/
1412					    SSS_PC_START_VALID,
1413					    /*onoffline*/ 1,
1414					    /*ctl_tag_type*/ CTL_TAG_ORDERED,
1415					    /*control*/ 0);
1416
1417			cfi_init_io(io,
1418				    /*lun*/ lun,
1419				    /*metatask*/ metatask,
1420				    /*policy*/ CFI_ERR_HARD,
1421				    /*retries*/ 3,
1422				    /*orig_lun_io*/ NULL,
1423				    /*done_function*/ cfi_lun_io_done);
1424
1425			lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
1426
1427			STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1428
1429			if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1430				printf("%s: error returned from ctl_queue()!\n",
1431				       __func__);
1432				STAILQ_REMOVE(&lun->io_list, lun_io,
1433					      cfi_lun_io, links);
1434				ctl_free_io(io);
1435				metatask->taskinfo.startstop.total_luns--;
1436			}
1437		}
1438
1439		if (STAILQ_FIRST(&tmp_io_list) != NULL) {
1440			printf("%s: error: tmp_io_list != NULL\n", __func__);
1441			for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1442			     io != NULL;
1443			     io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
1444				STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
1445					      ctl_io_hdr, links);
1446				ctl_free_io(io);
1447			}
1448		}
1449		mtx_unlock(&softc->lock);
1450
1451		break;
1452	}
1453	case CFI_TASK_BBRREAD: {
1454		union ctl_io *io;
1455		struct cfi_lun *lun;
1456		struct cfi_lun_io *lun_io;
1457		cfi_bbrread_status status;
1458		int req_lun_num;
1459		uint32_t num_blocks;
1460
1461		status = CFI_BBR_SUCCESS;
1462
1463		req_lun_num = metatask->taskinfo.bbrread.lun_num;
1464
1465		mtx_lock(&softc->lock);
1466		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1467			if (lun->lun_id != req_lun_num)
1468				continue;
1469			if (lun->state != CFI_LUN_READY) {
1470				status = CFI_BBR_LUN_UNCONFIG;
1471				break;
1472			} else
1473				break;
1474		}
1475
1476		if (lun == NULL)
1477			status = CFI_BBR_NO_LUN;
1478
1479		if (status != CFI_BBR_SUCCESS) {
1480			metatask->status = CFI_MT_ERROR;
1481			metatask->taskinfo.bbrread.status = status;
1482			mtx_unlock(&softc->lock);
1483			cfi_metatask_done(softc, metatask);
1484			break;
1485		}
1486
1487		/*
1488		 * Convert the number of bytes given into blocks and check
1489		 * that the number of bytes is a multiple of the blocksize.
1490		 * CTL will verify that the LBA is okay.
1491		 */
1492		if (lun->blocksize_powerof2 != 0) {
1493			if ((metatask->taskinfo.bbrread.len &
1494			    (lun->blocksize - 1)) != 0) {
1495				metatask->status = CFI_MT_ERROR;
1496				metatask->taskinfo.bbrread.status =
1497					CFI_BBR_BAD_LEN;
1498				cfi_metatask_done(softc, metatask);
1499				break;
1500			}
1501
1502			num_blocks = metatask->taskinfo.bbrread.len >>
1503				lun->blocksize_powerof2;
1504		} else {
1505			/*
1506			 * XXX KDM this could result in floating point
1507			 * division, which isn't supported in the kernel on
1508			 * x86 at least.
1509			 */
1510			if ((metatask->taskinfo.bbrread.len %
1511			     lun->blocksize) != 0) {
1512				metatask->status = CFI_MT_ERROR;
1513				metatask->taskinfo.bbrread.status =
1514					CFI_BBR_BAD_LEN;
1515				cfi_metatask_done(softc, metatask);
1516				break;
1517			}
1518
1519			/*
1520			 * XXX KDM this could result in floating point
1521			 * division in some cases.
1522			 */
1523			num_blocks = metatask->taskinfo.bbrread.len /
1524				lun->blocksize;
1525
1526		}
1527
1528		io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
1529		if (io == NULL) {
1530			metatask->status = CFI_MT_ERROR;
1531			metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
1532			mtx_unlock(&softc->lock);
1533			cfi_metatask_done(softc, metatask);
1534			break;
1535		}
1536
1537		/*
1538		 * XXX KDM need to do a read capacity to get the blocksize
1539		 * for this device.
1540		 */
1541		ctl_scsi_read_write(io,
1542				    /*data_ptr*/ NULL,
1543				    /*data_len*/ metatask->taskinfo.bbrread.len,
1544				    /*read_op*/ 1,
1545				    /*byte2*/ 0,
1546				    /*minimum_cdb_size*/ 0,
1547				    /*lba*/ metatask->taskinfo.bbrread.lba,
1548				    /*num_blocks*/ num_blocks,
1549				    /*tag_type*/ CTL_TAG_SIMPLE,
1550				    /*control*/ 0);
1551
1552		cfi_init_io(io,
1553			    /*lun*/ lun,
1554			    /*metatask*/ metatask,
1555			    /*policy*/ CFI_ERR_SOFT,
1556			    /*retries*/ 3,
1557			    /*orig_lun_io*/ NULL,
1558			    /*done_function*/ cfi_lun_io_done);
1559
1560		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1561
1562		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1563
1564		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1565			printf("%s: error returned from ctl_queue()!\n",
1566			       __func__);
1567			STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1568			ctl_free_io(io);
1569			metatask->status = CFI_MT_ERROR;
1570			metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1571			mtx_unlock(&softc->lock);
1572			cfi_metatask_done(softc, metatask);
1573			break;
1574		}
1575
1576		mtx_unlock(&softc->lock);
1577		break;
1578	}
1579	default:
1580		panic("invalid metatask type %d", metatask->tasktype);
1581		break; /* NOTREACHED */
1582	}
1583}
1584
1585struct cfi_metatask *
1586cfi_alloc_metatask(int can_wait)
1587{
1588	struct cfi_metatask *metatask;
1589	struct cfi_softc *softc;
1590
1591	softc = &fetd_internal_softc;
1592
1593	metatask = uma_zalloc(cfi_metatask_zone,
1594	    (can_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
1595	if (metatask == NULL)
1596		return (NULL);
1597
1598	metatask->status = CFI_MT_NONE;
1599
1600	return (metatask);
1601}
1602
1603void
1604cfi_free_metatask(struct cfi_metatask *metatask)
1605{
1606
1607	uma_zfree(cfi_metatask_zone, metatask);
1608}
1609
1610/*
1611 * vim: ts=8
1612 */
1613