ctl_frontend_internal.c revision 256222
1/*-
2 * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    substantially similar to the "NO WARRANTY" disclaimer below
13 *    ("Disclaimer") and any redistribution must be conditioned upon
14 *    including a substantially similar Disclaimer requirement for further
15 *    binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
29 *
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
31 */
32/*
33 * CTL kernel internal frontend target driver.  This allows kernel-level
34 * clients to send commands into CTL.
35 *
36 * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
37 * port, target, and LUN) and elements of an initiator (LUN discovery and
38 * probing, error recovery, command initiation).  Even though this has some
39 * initiator type elements, this is not intended to be a full fledged
40 * initiator layer.  It is only intended to send a limited number of
41 * commands to a well known target layer.
42 *
43 * To be able to fulfill the role of a full initiator layer, it would need
44 * a whole lot more functionality.
45 *
46 * Author: Ken Merry <ken@FreeBSD.org>
47 *
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: stable/9/sys/cam/ctl/ctl_frontend_internal.c 256222 2013-10-09 19:19:53Z mav $");
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/kernel.h>
56#include <sys/types.h>
57#include <sys/malloc.h>
58#include <sys/module.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/condvar.h>
62#include <sys/queue.h>
63#include <sys/sbuf.h>
64#include <sys/sysctl.h>
65#include <cam/scsi/scsi_all.h>
66#include <cam/scsi/scsi_da.h>
67#include <cam/ctl/ctl_io.h>
68#include <cam/ctl/ctl.h>
69#include <cam/ctl/ctl_frontend.h>
70#include <cam/ctl/ctl_frontend_internal.h>
71#include <cam/ctl/ctl_backend.h>
72#include <cam/ctl/ctl_ioctl.h>
73#include <cam/ctl/ctl_util.h>
74#include <cam/ctl/ctl_ha.h>
75#include <cam/ctl/ctl_private.h>
76#include <cam/ctl/ctl_mem_pool.h>
77#include <cam/ctl/ctl_debug.h>
78#include <cam/ctl/ctl_scsi_all.h>
79#include <cam/ctl/ctl_error.h>
80
81/*
82 * Task structure:
83 *  - overall metatask, different potential metatask types (e.g. forced
84 *    shutdown, gentle shutdown)
85 *  - forced shutdown metatask:
86 *     - states:  report luns, pending, done?
87 *     - list of luns pending, with the relevant I/O for that lun attached.
88 *       This would allow moving ahead on LUNs with no errors, and going
89 *       into error recovery on LUNs with problems.  Per-LUN states might
90 *       include inquiry, stop/offline, done.
91 *
92 * Use LUN enable for LUN list instead of getting it manually?  We'd still
93 * need inquiry data for each LUN.
94 *
95 * How to handle processor LUN w.r.t. found/stopped counts?
96 */
97#ifdef oldapi
98typedef enum {
99	CFI_TASK_NONE,
100	CFI_TASK_SHUTDOWN,
101	CFI_TASK_STARTUP
102} cfi_tasktype;
103
104struct cfi_task_startstop {
105	int total_luns;
106	int luns_complete;
107	int luns_failed;
108	cfi_cb_t callback;
109	void *callback_arg;
110	/* XXX KDM add more fields here */
111};
112
113union cfi_taskinfo {
114	struct cfi_task_startstop startstop;
115};
116
117struct cfi_metatask {
118	cfi_tasktype		tasktype;
119	cfi_mt_status		status;
120	union cfi_taskinfo	taskinfo;
121	struct ctl_mem_element	*element;
122	void			*cfi_context;
123	STAILQ_ENTRY(cfi_metatask) links;
124};
125#endif
126
127typedef enum {
128	CFI_ERR_RETRY		= 0x000,
129	CFI_ERR_FAIL		= 0x001,
130	CFI_ERR_LUN_RESET	= 0x002,
131	CFI_ERR_MASK		= 0x0ff,
132	CFI_ERR_NO_DECREMENT	= 0x100
133} cfi_error_action;
134
135typedef enum {
136	CFI_ERR_SOFT,
137	CFI_ERR_HARD
138} cfi_error_policy;
139
140typedef enum {
141	CFI_LUN_INQUIRY,
142	CFI_LUN_READCAPACITY,
143	CFI_LUN_READCAPACITY_16,
144	CFI_LUN_READY
145} cfi_lun_state;
146
147struct cfi_lun {
148	struct ctl_id target_id;
149	int lun_id;
150	struct scsi_inquiry_data inq_data;
151	uint64_t num_blocks;
152	uint32_t blocksize;
153	int blocksize_powerof2;
154	uint32_t cur_tag_num;
155	cfi_lun_state state;
156	struct ctl_mem_element *element;
157	struct cfi_softc *softc;
158	STAILQ_HEAD(, cfi_lun_io) io_list;
159	STAILQ_ENTRY(cfi_lun) links;
160};
161
162struct cfi_lun_io {
163	struct cfi_lun *lun;
164	struct cfi_metatask *metatask;
165	cfi_error_policy policy;
166	void (*done_function)(union ctl_io *io);
167	union ctl_io *ctl_io;
168	struct cfi_lun_io *orig_lun_io;
169	STAILQ_ENTRY(cfi_lun_io) links;
170};
171
172typedef enum {
173	CFI_NONE	= 0x00,
174	CFI_ONLINE	= 0x01,
175} cfi_flags;
176
177struct cfi_softc {
178	struct ctl_frontend fe;
179	char fe_name[40];
180	struct mtx lock;
181	cfi_flags flags;
182	STAILQ_HEAD(, cfi_lun) lun_list;
183	STAILQ_HEAD(, cfi_metatask) metatask_list;
184	struct ctl_mem_pool lun_pool;
185	struct ctl_mem_pool metatask_pool;
186};
187
188MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
189
190static struct cfi_softc fetd_internal_softc;
191extern int ctl_disable;
192
193int cfi_init(void);
194void cfi_shutdown(void) __unused;
195static void cfi_online(void *arg);
196static void cfi_offline(void *arg);
197static int cfi_targ_enable(void *arg, struct ctl_id targ_id);
198static int cfi_targ_disable(void *arg, struct ctl_id targ_id);
199static int cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
200static int cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
201static void cfi_datamove(union ctl_io *io);
202static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
203					    struct cfi_lun_io *lun_io);
204static cfi_error_action cfi_error_parse(union ctl_io *io,
205					struct cfi_lun_io *lun_io);
206static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
207			struct cfi_metatask *metatask, cfi_error_policy policy,
208			int retries, struct cfi_lun_io *orig_lun_io,
209			void (*done_function)(union ctl_io *io));
210static void cfi_done(union ctl_io *io);
211static void cfi_lun_probe_done(union ctl_io *io);
212static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
213static void cfi_metatask_done(struct cfi_softc *softc,
214			      struct cfi_metatask *metatask);
215static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
216					union ctl_io *io);
217static void cfi_metatask_io_done(union ctl_io *io);
218static void cfi_err_recovery_done(union ctl_io *io);
219static void cfi_lun_io_done(union ctl_io *io);
220
221static int cfi_module_event_handler(module_t, int /*modeventtype_t*/, void *);
222
223static moduledata_t cfi_moduledata = {
224	"ctlcfi",
225	cfi_module_event_handler,
226	NULL
227};
228
229DECLARE_MODULE(ctlcfi, cfi_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
230MODULE_VERSION(ctlcfi, 1);
231MODULE_DEPEND(ctlcfi, ctl, 1, 1, 1);
232
233int
234cfi_init(void)
235{
236	struct cfi_softc *softc;
237	struct ctl_frontend *fe;
238	int retval;
239
240	softc = &fetd_internal_softc;
241
242	fe = &softc->fe;
243
244	retval = 0;
245
246	/* If we're disabled, don't initialize */
247	if (ctl_disable != 0)
248		return (0);
249
250	if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
251		printf("%s: size of struct cfi_lun_io %zd > "
252		       "CTL_PORT_PRIV_SIZE %d\n", __func__,
253		       sizeof(struct cfi_lun_io),
254		       CTL_PORT_PRIV_SIZE);
255	}
256	memset(softc, 0, sizeof(*softc));
257
258	mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
259	softc->flags |= CTL_FLAG_MASTER_SHELF;
260
261	STAILQ_INIT(&softc->lun_list);
262	STAILQ_INIT(&softc->metatask_list);
263	sprintf(softc->fe_name, "CTL internal");
264	fe->port_type = CTL_PORT_INTERNAL;
265	fe->num_requested_ctl_io = 100;
266	fe->port_name = softc->fe_name;
267	fe->port_online = cfi_online;
268	fe->port_offline = cfi_offline;
269	fe->onoff_arg = softc;
270	fe->targ_enable = cfi_targ_enable;
271	fe->targ_disable = cfi_targ_disable;
272	fe->lun_enable = cfi_lun_enable;
273	fe->lun_disable = cfi_lun_disable;
274	fe->targ_lun_arg = softc;
275	fe->fe_datamove = cfi_datamove;
276	fe->fe_done = cfi_done;
277	fe->max_targets = 15;
278	fe->max_target_id = 15;
279
280	if (ctl_frontend_register(fe, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
281	{
282		printf("%s: internal frontend registration failed\n", __func__);
283		retval = 1;
284		goto bailout;
285	}
286
287	if (ctl_init_mem_pool(&softc->lun_pool,
288			      sizeof(struct cfi_lun),
289			      CTL_MEM_POOL_PERM_GROW, /*grow_inc*/ 3,
290			      /* initial_pool_size */ CTL_MAX_LUNS) != 0) {
291		printf("%s: can't initialize LUN memory pool\n", __func__);
292		retval = 1;
293		goto bailout_error;
294	}
295
296	if (ctl_init_mem_pool(&softc->metatask_pool,
297			      sizeof(struct cfi_metatask),
298			      CTL_MEM_POOL_PERM_GROW, /*grow_inc*/ 3,
299			      /*initial_pool_size*/ 10) != 0) {
300		printf("%s: can't initialize metatask memory pool\n", __func__);
301		retval = 2;
302		goto bailout_error;
303	}
304bailout:
305
306	return (0);
307
308bailout_error:
309
310	switch (retval) {
311	case 3:
312		ctl_shrink_mem_pool(&softc->metatask_pool);
313		/* FALLTHROUGH */
314	case 2:
315		ctl_shrink_mem_pool(&softc->lun_pool);
316		/* FALLTHROUGH */
317	case 1:
318		ctl_frontend_deregister(fe);
319		break;
320	default:
321		break;
322	}
323
324	return (ENOMEM);
325}
326
327void
328cfi_shutdown(void)
329{
330	struct cfi_softc *softc;
331
332	softc = &fetd_internal_softc;
333
334	/*
335	 * XXX KDM need to clear out any I/O pending on each LUN.
336	 */
337	if (ctl_frontend_deregister(&softc->fe) != 0)
338		printf("%s: ctl_frontend_deregister() failed\n", __func__);
339
340	if (ctl_shrink_mem_pool(&softc->lun_pool) != 0)
341		printf("%s: error shrinking LUN pool\n", __func__);
342
343	if (ctl_shrink_mem_pool(&softc->metatask_pool) != 0)
344		printf("%s: error shrinking LUN pool\n", __func__);
345}
346
347static int
348cfi_module_event_handler(module_t mod, int what, void *arg)
349{
350
351	switch (what) {
352	case MOD_LOAD:
353		return (cfi_init());
354	case MOD_UNLOAD:
355		return (EBUSY);
356	default:
357		return (EOPNOTSUPP);
358	}
359}
360
361static void
362cfi_online(void *arg)
363{
364	struct cfi_softc *softc;
365	struct cfi_lun *lun;
366
367	softc = (struct cfi_softc *)arg;
368
369	softc->flags |= CFI_ONLINE;
370
371	/*
372	 * Go through and kick off the probe for each lun.  Should we check
373	 * the LUN flags here to determine whether or not to probe it?
374	 */
375	mtx_lock(&softc->lock);
376	STAILQ_FOREACH(lun, &softc->lun_list, links)
377		cfi_lun_probe(lun, /*have_lock*/ 1);
378	mtx_unlock(&softc->lock);
379}
380
381static void
382cfi_offline(void *arg)
383{
384	struct cfi_softc *softc;
385
386	softc = (struct cfi_softc *)arg;
387
388	softc->flags &= ~CFI_ONLINE;
389}
390
391static int
392cfi_targ_enable(void *arg, struct ctl_id targ_id)
393{
394	return (0);
395}
396
397static int
398cfi_targ_disable(void *arg, struct ctl_id targ_id)
399{
400	return (0);
401}
402
403static int
404cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
405{
406	struct ctl_mem_element *element;
407	struct cfi_softc *softc;
408	struct cfi_lun *lun;
409	int found;
410
411	softc = (struct cfi_softc *)arg;
412
413	found = 0;
414	mtx_lock(&softc->lock);
415	STAILQ_FOREACH(lun, &softc->lun_list, links) {
416		if ((lun->target_id.id == target_id.id)
417		 && (lun->lun_id == lun_id)) {
418			found = 1;
419			break;
420		}
421	}
422	mtx_unlock(&softc->lock);
423
424	/*
425	 * If we already have this target/LUN, there is no reason to add
426	 * it to our lists again.
427	 */
428	if (found != 0)
429		return (0);
430
431	element = ctl_alloc_mem_element(&softc->lun_pool, /*can_wait*/ 0);
432
433	if (element == NULL) {
434		printf("%s: unable to allocate LUN structure\n", __func__);
435		return (1);
436	}
437
438	lun = (struct cfi_lun *)element->bytes;
439
440	lun->element = element;
441	lun->target_id = target_id;
442	lun->lun_id = lun_id;
443	lun->cur_tag_num = 0;
444	lun->state = CFI_LUN_INQUIRY;
445	lun->softc = softc;
446	STAILQ_INIT(&lun->io_list);
447
448	mtx_lock(&softc->lock);
449	STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
450	mtx_unlock(&softc->lock);
451
452	cfi_lun_probe(lun, /*have_lock*/ 0);
453
454	return (0);
455}
456
457static int
458cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
459{
460	struct cfi_softc *softc;
461	struct cfi_lun *lun;
462	int found;
463
464	softc = (struct cfi_softc *)arg;
465
466	found = 0;
467
468	/*
469	 * XXX KDM need to do an invalidate and then a free when any
470	 * pending I/O has completed.  Or do we?  CTL won't free a LUN
471	 * while any I/O is pending.  So we won't get this notification
472	 * unless any I/O we have pending on a LUN has completed.
473	 */
474	mtx_lock(&softc->lock);
475	STAILQ_FOREACH(lun, &softc->lun_list, links) {
476		if ((lun->target_id.id == target_id.id)
477		 && (lun->lun_id == lun_id)) {
478			found = 1;
479			break;
480		}
481	}
482	if (found != 0)
483		STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
484
485	mtx_unlock(&softc->lock);
486
487	if (found == 0) {
488		printf("%s: can't find target %ju lun %d\n", __func__,
489		       (uintmax_t)target_id.id, lun_id);
490		return (1);
491	}
492
493	ctl_free_mem_element(lun->element);
494
495	return (0);
496}
497
498static void
499cfi_datamove(union ctl_io *io)
500{
501	struct ctl_sg_entry *ext_sglist, *kern_sglist;
502	struct ctl_sg_entry ext_entry, kern_entry;
503	int ext_sglen, ext_sg_entries, kern_sg_entries;
504	int ext_sg_start, ext_offset;
505	int len_to_copy, len_copied;
506	int kern_watermark, ext_watermark;
507	int ext_sglist_malloced;
508	struct ctl_scsiio *ctsio;
509	int i, j;
510
511	ext_sglist_malloced = 0;
512	ext_sg_start = 0;
513	ext_offset = 0;
514	ext_sglist = NULL;
515
516	CTL_DEBUG_PRINT(("%s\n", __func__));
517
518	ctsio = &io->scsiio;
519
520	/*
521	 * If this is the case, we're probably doing a BBR read and don't
522	 * actually need to transfer the data.  This will effectively
523	 * bit-bucket the data.
524	 */
525	if (ctsio->ext_data_ptr == NULL)
526		goto bailout;
527
528	/*
529	 * To simplify things here, if we have a single buffer, stick it in
530	 * a S/G entry and just make it a single entry S/G list.
531	 */
532	if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
533		int len_seen;
534
535		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
536
537		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
538							   M_WAITOK);
539		ext_sglist_malloced = 1;
540		if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
541			ctl_set_internal_failure(ctsio,
542						 /*sks_valid*/ 0,
543						 /*retry_count*/ 0);
544			goto bailout;
545		}
546		ext_sg_entries = ctsio->ext_sg_entries;
547		len_seen = 0;
548		for (i = 0; i < ext_sg_entries; i++) {
549			if ((len_seen + ext_sglist[i].len) >=
550			     ctsio->ext_data_filled) {
551				ext_sg_start = i;
552				ext_offset = ctsio->ext_data_filled - len_seen;
553				break;
554			}
555			len_seen += ext_sglist[i].len;
556		}
557	} else {
558		ext_sglist = &ext_entry;
559		ext_sglist->addr = ctsio->ext_data_ptr;
560		ext_sglist->len = ctsio->ext_data_len;
561		ext_sg_entries = 1;
562		ext_sg_start = 0;
563		ext_offset = ctsio->ext_data_filled;
564	}
565
566	if (ctsio->kern_sg_entries > 0) {
567		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
568		kern_sg_entries = ctsio->kern_sg_entries;
569	} else {
570		kern_sglist = &kern_entry;
571		kern_sglist->addr = ctsio->kern_data_ptr;
572		kern_sglist->len = ctsio->kern_data_len;
573		kern_sg_entries = 1;
574	}
575
576
577	kern_watermark = 0;
578	ext_watermark = ext_offset;
579	len_copied = 0;
580	for (i = ext_sg_start, j = 0;
581	     i < ext_sg_entries && j < kern_sg_entries;) {
582		uint8_t *ext_ptr, *kern_ptr;
583
584		len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
585				      kern_sglist[j].len - kern_watermark);
586
587		ext_ptr = (uint8_t *)ext_sglist[i].addr;
588		ext_ptr = ext_ptr + ext_watermark;
589		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
590			/*
591			 * XXX KDM fix this!
592			 */
593			panic("need to implement bus address support");
594#if 0
595			kern_ptr = bus_to_virt(kern_sglist[j].addr);
596#endif
597		} else
598			kern_ptr = (uint8_t *)kern_sglist[j].addr;
599		kern_ptr = kern_ptr + kern_watermark;
600
601		kern_watermark += len_to_copy;
602		ext_watermark += len_to_copy;
603
604		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
605		     CTL_FLAG_DATA_IN) {
606			CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
607					 __func__, len_to_copy));
608			CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
609					 kern_ptr, ext_ptr));
610			memcpy(ext_ptr, kern_ptr, len_to_copy);
611		} else {
612			CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
613					 __func__, len_to_copy));
614			CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
615					 ext_ptr, kern_ptr));
616			memcpy(kern_ptr, ext_ptr, len_to_copy);
617		}
618
619		len_copied += len_to_copy;
620
621		if (ext_sglist[i].len == ext_watermark) {
622			i++;
623			ext_watermark = 0;
624		}
625
626		if (kern_sglist[j].len == kern_watermark) {
627			j++;
628			kern_watermark = 0;
629		}
630	}
631
632	ctsio->ext_data_filled += len_copied;
633
634	CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
635			 __func__, ext_sg_entries, kern_sg_entries));
636	CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
637			 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
638
639
640	/* XXX KDM set residual?? */
641bailout:
642
643	if (ext_sglist_malloced != 0)
644		free(ext_sglist, M_CTL_CFI);
645
646	io->scsiio.be_move_done(io);
647
648	return;
649}
650
651/*
652 * For any sort of check condition, busy, etc., we just retry.  We do not
653 * decrement the retry count for unit attention type errors.  These are
654 * normal, and we want to save the retry count for "real" errors.  Otherwise,
655 * we could end up with situations where a command will succeed in some
656 * situations and fail in others, depending on whether a unit attention is
657 * pending.  Also, some of our error recovery actions, most notably the
658 * LUN reset action, will cause a unit attention.
659 *
660 * We can add more detail here later if necessary.
661 */
662static cfi_error_action
663cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
664{
665	cfi_error_action error_action;
666	int error_code, sense_key, asc, ascq;
667
668	/*
669	 * Default to retrying the command.
670	 */
671	error_action = CFI_ERR_RETRY;
672
673	scsi_extract_sense_len(&io->scsiio.sense_data,
674			       io->scsiio.sense_len,
675			       &error_code,
676			       &sense_key,
677			       &asc,
678			       &ascq,
679			       /*show_errors*/ 1);
680
681	switch (error_code) {
682	case SSD_DEFERRED_ERROR:
683	case SSD_DESC_DEFERRED_ERROR:
684		error_action |= CFI_ERR_NO_DECREMENT;
685		break;
686	case SSD_CURRENT_ERROR:
687	case SSD_DESC_CURRENT_ERROR:
688	default: {
689		switch (sense_key) {
690		case SSD_KEY_UNIT_ATTENTION:
691			error_action |= CFI_ERR_NO_DECREMENT;
692			break;
693		case SSD_KEY_HARDWARE_ERROR:
694			/*
695			 * This is our generic "something bad happened"
696			 * error code.  It often isn't recoverable.
697			 */
698			if ((asc == 0x44) && (ascq == 0x00))
699				error_action = CFI_ERR_FAIL;
700			break;
701		case SSD_KEY_NOT_READY:
702			/*
703			 * If the LUN is powered down, there likely isn't
704			 * much point in retrying right now.
705			 */
706			if ((asc == 0x04) && (ascq == 0x02))
707				error_action = CFI_ERR_FAIL;
708			/*
709			 * If the LUN is offline, there probably isn't much
710			 * point in retrying, either.
711			 */
712			if ((asc == 0x04) && (ascq == 0x03))
713				error_action = CFI_ERR_FAIL;
714			break;
715		}
716	}
717	}
718
719	return (error_action);
720}
721
722static cfi_error_action
723cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
724{
725	cfi_error_action error_action;
726
727	error_action = CFI_ERR_RETRY;
728
729	switch (io->io_hdr.io_type) {
730	case CTL_IO_SCSI:
731		switch (io->io_hdr.status & CTL_STATUS_MASK) {
732		case CTL_SCSI_ERROR:
733			switch (io->scsiio.scsi_status) {
734			case SCSI_STATUS_RESERV_CONFLICT:
735				/*
736				 * For a reservation conflict, we'll usually
737				 * want the hard error recovery policy, so
738				 * we'll reset the LUN.
739				 */
740				if (lun_io->policy == CFI_ERR_HARD)
741					error_action =
742						CFI_ERR_LUN_RESET;
743				else
744					error_action =
745						CFI_ERR_RETRY;
746				break;
747			case SCSI_STATUS_CHECK_COND:
748			default:
749				error_action = cfi_checkcond_parse(io, lun_io);
750				break;
751			}
752			break;
753		default:
754			error_action = CFI_ERR_RETRY;
755			break;
756		}
757		break;
758	case CTL_IO_TASK:
759		/*
760		 * In theory task management commands shouldn't fail...
761		 */
762		error_action = CFI_ERR_RETRY;
763		break;
764	default:
765		printf("%s: invalid ctl_io type %d\n", __func__,
766		       io->io_hdr.io_type);
767		panic("%s: invalid ctl_io type %d\n", __func__,
768		      io->io_hdr.io_type);
769		break;
770	}
771
772	return (error_action);
773}
774
775static void
776cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
777	    struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
778	    struct cfi_lun_io *orig_lun_io,
779	    void (*done_function)(union ctl_io *io))
780{
781	struct cfi_lun_io *lun_io;
782
783	io->io_hdr.nexus.initid.id = 7;
784	io->io_hdr.nexus.targ_port = lun->softc->fe.targ_port;
785	io->io_hdr.nexus.targ_target.id = lun->target_id.id;
786	io->io_hdr.nexus.targ_lun = lun->lun_id;
787	io->io_hdr.retries = retries;
788	lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
789	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
790	lun_io->lun = lun;
791	lun_io->metatask = metatask;
792	lun_io->ctl_io = io;
793	lun_io->policy = policy;
794	lun_io->orig_lun_io = orig_lun_io;
795	lun_io->done_function = done_function;
796	/*
797	 * We only set the tag number for SCSI I/Os.  For task management
798	 * commands, the tag number is only really needed for aborts, so
799	 * the caller can set it if necessary.
800	 */
801	switch (io->io_hdr.io_type) {
802	case CTL_IO_SCSI:
803		io->scsiio.tag_num = lun->cur_tag_num++;
804		break;
805	case CTL_IO_TASK:
806	default:
807		break;
808	}
809}
810
811static void
812cfi_done(union ctl_io *io)
813{
814	struct cfi_lun_io *lun_io;
815	struct cfi_softc *softc;
816	struct cfi_lun *lun;
817
818	lun_io = (struct cfi_lun_io *)
819		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
820
821	lun = lun_io->lun;
822	softc = lun->softc;
823
824	/*
825	 * Very minimal retry logic.  We basically retry if we got an error
826	 * back, and the retry count is greater than 0.  If we ever want
827	 * more sophisticated initiator type behavior, the CAM error
828	 * recovery code in ../common might be helpful.
829	 */
830	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
831	 && (io->io_hdr.retries > 0)) {
832		ctl_io_status old_status;
833		cfi_error_action error_action;
834
835		error_action = cfi_error_parse(io, lun_io);
836
837		switch (error_action & CFI_ERR_MASK) {
838		case CFI_ERR_FAIL:
839			goto done;
840			break; /* NOTREACHED */
841		case CFI_ERR_LUN_RESET: {
842			union ctl_io *new_io;
843			struct cfi_lun_io *new_lun_io;
844
845			new_io = ctl_alloc_io(softc->fe.ctl_pool_ref);
846			if (new_io == NULL) {
847				printf("%s: unable to allocate ctl_io for "
848				       "error recovery\n", __func__);
849				goto done;
850			}
851			ctl_zero_io(new_io);
852
853			new_io->io_hdr.io_type = CTL_IO_TASK;
854			new_io->taskio.task_action = CTL_TASK_LUN_RESET;
855
856			cfi_init_io(new_io,
857				    /*lun*/ lun_io->lun,
858				    /*metatask*/ NULL,
859				    /*policy*/ CFI_ERR_SOFT,
860				    /*retries*/ 0,
861				    /*orig_lun_io*/lun_io,
862				    /*done_function*/ cfi_err_recovery_done);
863
864
865			new_lun_io = (struct cfi_lun_io *)
866				new_io->io_hdr.port_priv;
867
868			mtx_lock(&lun->softc->lock);
869			STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
870			mtx_unlock(&lun->softc->lock);
871
872			io = new_io;
873			break;
874		}
875		case CFI_ERR_RETRY:
876		default:
877			if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
878				io->io_hdr.retries--;
879			break;
880		}
881
882		old_status = io->io_hdr.status;
883		io->io_hdr.status = CTL_STATUS_NONE;
884#if 0
885		io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
886#endif
887		io->io_hdr.flags &= ~CTL_FLAG_ABORT;
888		io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
889
890		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
891			printf("%s: error returned from ctl_queue()!\n",
892			       __func__);
893			io->io_hdr.status = old_status;
894		} else
895			return;
896	}
897done:
898	lun_io->done_function(io);
899}
900
901static void
902cfi_lun_probe_done(union ctl_io *io)
903{
904	struct cfi_lun *lun;
905	struct cfi_lun_io *lun_io;
906
907	lun_io = (struct cfi_lun_io *)
908		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
909	lun = lun_io->lun;
910
911	switch (lun->state) {
912	case CFI_LUN_INQUIRY: {
913		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
914			/* print out something here?? */
915			printf("%s: LUN %d probe failed because inquiry "
916			       "failed\n", __func__, lun->lun_id);
917			ctl_io_error_print(io, NULL);
918		} else {
919
920			if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
921				char path_str[40];
922
923				lun->state = CFI_LUN_READY;
924				ctl_scsi_path_string(io, path_str,
925						     sizeof(path_str));
926				printf("%s", path_str);
927				scsi_print_inquiry(&lun->inq_data);
928			} else {
929				lun->state = CFI_LUN_READCAPACITY;
930				cfi_lun_probe(lun, /*have_lock*/ 0);
931			}
932		}
933		mtx_lock(&lun->softc->lock);
934		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
935		mtx_unlock(&lun->softc->lock);
936		ctl_free_io(io);
937		break;
938	}
939	case CFI_LUN_READCAPACITY:
940	case CFI_LUN_READCAPACITY_16: {
941		uint64_t maxlba;
942		uint32_t blocksize;
943
944		maxlba = 0;
945		blocksize = 0;
946
947		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
948			printf("%s: LUN %d probe failed because READ CAPACITY "
949			       "failed\n", __func__, lun->lun_id);
950			ctl_io_error_print(io, NULL);
951		} else {
952
953			if (lun->state == CFI_LUN_READCAPACITY) {
954				struct scsi_read_capacity_data *rdcap;
955
956				rdcap = (struct scsi_read_capacity_data *)
957					io->scsiio.ext_data_ptr;
958
959				maxlba = scsi_4btoul(rdcap->addr);
960				blocksize = scsi_4btoul(rdcap->length);
961				if (blocksize == 0) {
962					printf("%s: LUN %d has invalid "
963					       "blocksize 0, probe aborted\n",
964					       __func__, lun->lun_id);
965				} else if (maxlba == 0xffffffff) {
966					lun->state = CFI_LUN_READCAPACITY_16;
967					cfi_lun_probe(lun, /*have_lock*/ 0);
968				} else
969					lun->state = CFI_LUN_READY;
970			} else {
971				struct scsi_read_capacity_data_long *rdcap_long;
972
973				rdcap_long = (struct
974					scsi_read_capacity_data_long *)
975					io->scsiio.ext_data_ptr;
976				maxlba = scsi_8btou64(rdcap_long->addr);
977				blocksize = scsi_4btoul(rdcap_long->length);
978
979				if (blocksize == 0) {
980					printf("%s: LUN %d has invalid "
981					       "blocksize 0, probe aborted\n",
982					       __func__, lun->lun_id);
983				} else
984					lun->state = CFI_LUN_READY;
985			}
986		}
987
988		if (lun->state == CFI_LUN_READY) {
989			char path_str[40];
990
991			lun->num_blocks = maxlba + 1;
992			lun->blocksize = blocksize;
993
994			/*
995			 * If this is true, the blocksize is a power of 2.
996			 * We already checked for 0 above.
997			 */
998			if (((blocksize - 1) & blocksize) == 0) {
999				int i;
1000
1001				for (i = 0; i < 32; i++) {
1002					if ((blocksize & (1 << i)) != 0) {
1003						lun->blocksize_powerof2 = i;
1004						break;
1005					}
1006				}
1007			}
1008			ctl_scsi_path_string(io, path_str,sizeof(path_str));
1009			printf("%s", path_str);
1010			scsi_print_inquiry(&lun->inq_data);
1011			printf("%s %ju blocks, blocksize %d\n", path_str,
1012			       (uintmax_t)maxlba + 1, blocksize);
1013		}
1014		mtx_lock(&lun->softc->lock);
1015		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1016		mtx_unlock(&lun->softc->lock);
1017		free(io->scsiio.ext_data_ptr, M_CTL_CFI);
1018		ctl_free_io(io);
1019		break;
1020	}
1021	case CFI_LUN_READY:
1022	default:
1023		mtx_lock(&lun->softc->lock);
1024		/* How did we get here?? */
1025		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1026		mtx_unlock(&lun->softc->lock);
1027		ctl_free_io(io);
1028		break;
1029	}
1030}
1031
1032static void
1033cfi_lun_probe(struct cfi_lun *lun, int have_lock)
1034{
1035
1036	if (have_lock == 0)
1037		mtx_lock(&lun->softc->lock);
1038	if ((lun->softc->flags & CFI_ONLINE) == 0) {
1039		if (have_lock == 0)
1040			mtx_unlock(&lun->softc->lock);
1041		return;
1042	}
1043	if (have_lock == 0)
1044		mtx_unlock(&lun->softc->lock);
1045
1046	switch (lun->state) {
1047	case CFI_LUN_INQUIRY: {
1048		struct cfi_lun_io *lun_io;
1049		union ctl_io *io;
1050
1051		io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1052		if (io == NULL) {
1053			printf("%s: unable to alloc ctl_io for target %ju "
1054			       "lun %d probe\n", __func__,
1055			       (uintmax_t)lun->target_id.id, lun->lun_id);
1056			return;
1057		}
1058		ctl_scsi_inquiry(io,
1059				 /*data_ptr*/(uint8_t *)&lun->inq_data,
1060				 /*data_len*/ sizeof(lun->inq_data),
1061				 /*byte2*/ 0,
1062				 /*page_code*/ 0,
1063				 /*tag_type*/ CTL_TAG_SIMPLE,
1064				 /*control*/ 0);
1065
1066		cfi_init_io(io,
1067			    /*lun*/ lun,
1068			    /*metatask*/ NULL,
1069			    /*policy*/ CFI_ERR_SOFT,
1070			    /*retries*/ 5,
1071			    /*orig_lun_io*/ NULL,
1072			    /*done_function*/
1073			    cfi_lun_probe_done);
1074
1075		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1076
1077		if (have_lock == 0)
1078			mtx_lock(&lun->softc->lock);
1079		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1080		if (have_lock == 0)
1081			mtx_unlock(&lun->softc->lock);
1082
1083		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1084			printf("%s: error returned from ctl_queue()!\n",
1085			       __func__);
1086			STAILQ_REMOVE(&lun->io_list, lun_io,
1087				      cfi_lun_io, links);
1088			ctl_free_io(io);
1089		}
1090		break;
1091	}
1092	case CFI_LUN_READCAPACITY:
1093	case CFI_LUN_READCAPACITY_16: {
1094		struct cfi_lun_io *lun_io;
1095		uint8_t *dataptr;
1096		union ctl_io *io;
1097
1098		io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1099		if (io == NULL) {
1100			printf("%s: unable to alloc ctl_io for target %ju "
1101			       "lun %d probe\n", __func__,
1102			       (uintmax_t)lun->target_id.id, lun->lun_id);
1103			return;
1104		}
1105
1106		dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
1107				 M_CTL_CFI, M_NOWAIT);
1108		if (dataptr == NULL) {
1109			printf("%s: unable to allocate SCSI read capacity "
1110			       "buffer for target %ju lun %d\n", __func__,
1111			       (uintmax_t)lun->target_id.id, lun->lun_id);
1112			return;
1113		}
1114		if (lun->state == CFI_LUN_READCAPACITY) {
1115			ctl_scsi_read_capacity(io,
1116				/*data_ptr*/ dataptr,
1117				/*data_len*/
1118				sizeof(struct scsi_read_capacity_data_long),
1119				/*addr*/ 0,
1120				/*reladr*/ 0,
1121				/*pmi*/ 0,
1122				/*tag_type*/ CTL_TAG_SIMPLE,
1123				/*control*/ 0);
1124		} else {
1125			ctl_scsi_read_capacity_16(io,
1126				/*data_ptr*/ dataptr,
1127				/*data_len*/
1128				sizeof(struct scsi_read_capacity_data_long),
1129				/*addr*/ 0,
1130				/*reladr*/ 0,
1131				/*pmi*/ 0,
1132				/*tag_type*/ CTL_TAG_SIMPLE,
1133				/*control*/ 0);
1134		}
1135		cfi_init_io(io,
1136			    /*lun*/ lun,
1137			    /*metatask*/ NULL,
1138			    /*policy*/ CFI_ERR_SOFT,
1139			    /*retries*/ 7,
1140			    /*orig_lun_io*/ NULL,
1141			    /*done_function*/ cfi_lun_probe_done);
1142
1143		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1144
1145		if (have_lock == 0)
1146			mtx_lock(&lun->softc->lock);
1147		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1148		if (have_lock == 0)
1149			mtx_unlock(&lun->softc->lock);
1150
1151		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1152			printf("%s: error returned from ctl_queue()!\n",
1153			       __func__);
1154			STAILQ_REMOVE(&lun->io_list, lun_io,
1155				      cfi_lun_io, links);
1156			free(dataptr, M_CTL_CFI);
1157			ctl_free_io(io);
1158		}
1159		break;
1160	}
1161	case CFI_LUN_READY:
1162	default:
1163		/* Why were we called? */
1164		break;
1165	}
1166}
1167
1168static void
1169cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
1170{
1171	mtx_lock(&softc->lock);
1172	STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
1173	mtx_unlock(&softc->lock);
1174
1175	/*
1176	 * Return status to the caller.  Caller allocated storage, and is
1177	 * responsible for calling cfi_free_metatask to release it once
1178	 * they've seen the status.
1179	 */
1180	metatask->callback(metatask->callback_arg, metatask);
1181}
1182
1183static void
1184cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
1185{
1186	int error_code, sense_key, asc, ascq;
1187
1188	if (metatask->tasktype != CFI_TASK_BBRREAD)
1189		return;
1190
1191	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
1192		metatask->status = CFI_MT_SUCCESS;
1193		metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
1194		return;
1195	}
1196
1197	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
1198		metatask->status = CFI_MT_ERROR;
1199		metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1200		return;
1201	}
1202
1203	metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
1204	memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
1205	       ctl_min(sizeof(metatask->taskinfo.bbrread.sense_data),
1206		       sizeof(io->scsiio.sense_data)));
1207
1208	if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
1209		metatask->status = CFI_MT_ERROR;
1210		metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
1211		return;
1212	}
1213
1214	if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
1215		metatask->status = CFI_MT_ERROR;
1216		metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1217		return;
1218	}
1219
1220	scsi_extract_sense_len(&io->scsiio.sense_data,
1221			       io->scsiio.sense_len,
1222			       &error_code,
1223			       &sense_key,
1224			       &asc,
1225			       &ascq,
1226			       /*show_errors*/ 1);
1227
1228	switch (error_code) {
1229	case SSD_DEFERRED_ERROR:
1230	case SSD_DESC_DEFERRED_ERROR:
1231		metatask->status = CFI_MT_ERROR;
1232		metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1233		break;
1234	case SSD_CURRENT_ERROR:
1235	case SSD_DESC_CURRENT_ERROR:
1236	default: {
1237		struct scsi_sense_data *sense;
1238
1239		sense = &io->scsiio.sense_data;
1240
1241		if ((asc == 0x04) && (ascq == 0x02)) {
1242			metatask->status = CFI_MT_ERROR;
1243			metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
1244		} else if ((asc == 0x04) && (ascq == 0x03)) {
1245			metatask->status = CFI_MT_ERROR;
1246			metatask->taskinfo.bbrread.status =
1247				CFI_BBR_LUN_OFFLINE_CTL;
1248		} else if ((asc == 0x44) && (ascq == 0x00)) {
1249#ifdef NEEDTOPORT
1250			if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
1251				uint16_t retry_count;
1252
1253				retry_count = sense->sense_key_spec[1] << 8 |
1254					      sense->sense_key_spec[2];
1255				if (((retry_count & 0xf000) == CSC_RAIDCORE)
1256				 && ((retry_count & 0x0f00) == CSC_SHELF_SW)
1257				 && ((retry_count & 0xff) ==
1258				      RC_STS_DEVICE_OFFLINE)) {
1259					metatask->status = CFI_MT_ERROR;
1260					metatask->taskinfo.bbrread.status =
1261						CFI_BBR_LUN_OFFLINE_RC;
1262				} else {
1263					metatask->status = CFI_MT_ERROR;
1264					metatask->taskinfo.bbrread.status =
1265						CFI_BBR_SCSI_ERROR;
1266				}
1267			} else {
1268#endif /* NEEDTOPORT */
1269				metatask->status = CFI_MT_ERROR;
1270				metatask->taskinfo.bbrread.status =
1271					CFI_BBR_SCSI_ERROR;
1272#ifdef NEEDTOPORT
1273			}
1274#endif
1275		} else {
1276			metatask->status = CFI_MT_ERROR;
1277			metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1278		}
1279		break;
1280	}
1281	}
1282}
1283
1284static void
1285cfi_metatask_io_done(union ctl_io *io)
1286{
1287	struct cfi_lun_io *lun_io;
1288	struct cfi_metatask *metatask;
1289	struct cfi_softc *softc;
1290	struct cfi_lun *lun;
1291
1292	lun_io = (struct cfi_lun_io *)
1293		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1294
1295	lun = lun_io->lun;
1296	softc = lun->softc;
1297
1298	metatask = lun_io->metatask;
1299
1300	switch (metatask->tasktype) {
1301	case CFI_TASK_STARTUP:
1302	case CFI_TASK_SHUTDOWN: {
1303		int failed, done, is_start;
1304
1305		failed = 0;
1306		done = 0;
1307		if (metatask->tasktype == CFI_TASK_STARTUP)
1308			is_start = 1;
1309		else
1310			is_start = 0;
1311
1312		mtx_lock(&softc->lock);
1313		if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
1314			metatask->taskinfo.startstop.luns_complete++;
1315		else {
1316			metatask->taskinfo.startstop.luns_failed++;
1317			failed = 1;
1318		}
1319		if ((metatask->taskinfo.startstop.luns_complete +
1320		     metatask->taskinfo.startstop.luns_failed) >=
1321		     metatask->taskinfo.startstop.total_luns)
1322			done = 1;
1323
1324		mtx_unlock(&softc->lock);
1325
1326		if (failed != 0) {
1327			printf("%s: LUN %d %s request failed\n", __func__,
1328			       lun_io->lun->lun_id, (is_start == 1) ? "start" :
1329			       "stop");
1330			ctl_io_error_print(io, &lun_io->lun->inq_data);
1331		}
1332		if (done != 0) {
1333			if (metatask->taskinfo.startstop.luns_failed > 0)
1334				metatask->status = CFI_MT_ERROR;
1335			else
1336				metatask->status = CFI_MT_SUCCESS;
1337			cfi_metatask_done(softc, metatask);
1338		}
1339		mtx_lock(&softc->lock);
1340		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1341		mtx_unlock(&softc->lock);
1342
1343		ctl_free_io(io);
1344		break;
1345	}
1346	case CFI_TASK_BBRREAD: {
1347		/*
1348		 * Translate the SCSI error into an enumeration.
1349		 */
1350		cfi_metatask_bbr_errorparse(metatask, io);
1351
1352		mtx_lock(&softc->lock);
1353		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1354		mtx_unlock(&softc->lock);
1355
1356		ctl_free_io(io);
1357
1358		cfi_metatask_done(softc, metatask);
1359		break;
1360	}
1361	default:
1362		/*
1363		 * This shouldn't happen.
1364		 */
1365		mtx_lock(&softc->lock);
1366		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1367		mtx_unlock(&softc->lock);
1368
1369		ctl_free_io(io);
1370		break;
1371	}
1372}
1373
1374static void
1375cfi_err_recovery_done(union ctl_io *io)
1376{
1377	struct cfi_lun_io *lun_io, *orig_lun_io;
1378	struct cfi_lun *lun;
1379	union ctl_io *orig_io;
1380
1381	lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1382	orig_lun_io = lun_io->orig_lun_io;
1383	orig_io = orig_lun_io->ctl_io;
1384	lun = lun_io->lun;
1385
1386	if (io->io_hdr.status != CTL_SUCCESS) {
1387		printf("%s: error recovery action failed.  Original "
1388		       "error:\n", __func__);
1389
1390		ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
1391
1392		printf("%s: error from error recovery action:\n", __func__);
1393
1394		ctl_io_error_print(io, &lun->inq_data);
1395
1396		printf("%s: trying original command again...\n", __func__);
1397	}
1398
1399	mtx_lock(&lun->softc->lock);
1400	STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1401	mtx_unlock(&lun->softc->lock);
1402	ctl_free_io(io);
1403
1404	orig_io->io_hdr.retries--;
1405	orig_io->io_hdr.status = CTL_STATUS_NONE;
1406
1407	if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
1408		printf("%s: error returned from ctl_queue()!\n", __func__);
1409		STAILQ_REMOVE(&lun->io_list, orig_lun_io,
1410			      cfi_lun_io, links);
1411		ctl_free_io(orig_io);
1412	}
1413}
1414
1415static void
1416cfi_lun_io_done(union ctl_io *io)
1417{
1418	struct cfi_lun *lun;
1419	struct cfi_lun_io *lun_io;
1420
1421	lun_io = (struct cfi_lun_io *)
1422		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1423	lun = lun_io->lun;
1424
1425	if (lun_io->metatask == NULL) {
1426		printf("%s: I/O has no metatask pointer, discarding\n",
1427		       __func__);
1428		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1429		ctl_free_io(io);
1430		return;
1431	}
1432	cfi_metatask_io_done(io);
1433}
1434
1435void
1436cfi_action(struct cfi_metatask *metatask)
1437{
1438	struct cfi_softc *softc;
1439
1440	softc = &fetd_internal_softc;
1441
1442	mtx_lock(&softc->lock);
1443
1444	STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
1445
1446	if ((softc->flags & CFI_ONLINE) == 0) {
1447		mtx_unlock(&softc->lock);
1448		metatask->status = CFI_MT_PORT_OFFLINE;
1449		cfi_metatask_done(softc, metatask);
1450		return;
1451	} else
1452		mtx_unlock(&softc->lock);
1453
1454	switch (metatask->tasktype) {
1455	case CFI_TASK_STARTUP:
1456	case CFI_TASK_SHUTDOWN: {
1457		union ctl_io *io;
1458		int da_luns, ios_allocated, do_start;
1459		struct cfi_lun *lun;
1460		STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
1461
1462		da_luns = 0;
1463		ios_allocated = 0;
1464		STAILQ_INIT(&tmp_io_list);
1465
1466		if (metatask->tasktype == CFI_TASK_STARTUP)
1467			do_start = 1;
1468		else
1469			do_start = 0;
1470
1471		mtx_lock(&softc->lock);
1472		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1473			if (lun->state != CFI_LUN_READY)
1474				continue;
1475
1476			if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1477				continue;
1478			da_luns++;
1479			io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1480			if (io != NULL) {
1481				ios_allocated++;
1482				STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
1483						   links);
1484			}
1485		}
1486
1487		if (ios_allocated < da_luns) {
1488			printf("%s: error allocating ctl_io for %s\n",
1489			       __func__, (do_start == 1) ? "startup" :
1490			       "shutdown");
1491			da_luns = ios_allocated;
1492		}
1493
1494		metatask->taskinfo.startstop.total_luns = da_luns;
1495
1496		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1497			struct cfi_lun_io *lun_io;
1498
1499			if (lun->state != CFI_LUN_READY)
1500				continue;
1501
1502			if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1503				continue;
1504
1505			io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1506			if (io == NULL)
1507				break;
1508
1509			STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
1510				      links);
1511
1512			ctl_scsi_start_stop(io,
1513					    /*start*/ do_start,
1514					    /*load_eject*/ 0,
1515					    /*immediate*/ 0,
1516					    /*power_conditions*/
1517					    SSS_PC_START_VALID,
1518					    /*onoffline*/ 1,
1519					    /*ctl_tag_type*/ CTL_TAG_ORDERED,
1520					    /*control*/ 0);
1521
1522			cfi_init_io(io,
1523				    /*lun*/ lun,
1524				    /*metatask*/ metatask,
1525				    /*policy*/ CFI_ERR_HARD,
1526				    /*retries*/ 3,
1527				    /*orig_lun_io*/ NULL,
1528				    /*done_function*/ cfi_lun_io_done);
1529
1530			lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
1531
1532			STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1533
1534			if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1535				printf("%s: error returned from ctl_queue()!\n",
1536				       __func__);
1537				STAILQ_REMOVE(&lun->io_list, lun_io,
1538					      cfi_lun_io, links);
1539				ctl_free_io(io);
1540				metatask->taskinfo.startstop.total_luns--;
1541			}
1542		}
1543
1544		if (STAILQ_FIRST(&tmp_io_list) != NULL) {
1545			printf("%s: error: tmp_io_list != NULL\n", __func__);
1546			for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1547			     io != NULL;
1548			     io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
1549				STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
1550					      ctl_io_hdr, links);
1551				ctl_free_io(io);
1552			}
1553		}
1554		mtx_unlock(&softc->lock);
1555
1556		break;
1557	}
1558	case CFI_TASK_BBRREAD: {
1559		union ctl_io *io;
1560		struct cfi_lun *lun;
1561		struct cfi_lun_io *lun_io;
1562		cfi_bbrread_status status;
1563		int req_lun_num;
1564		uint32_t num_blocks;
1565
1566		status = CFI_BBR_SUCCESS;
1567
1568		req_lun_num = metatask->taskinfo.bbrread.lun_num;
1569
1570		mtx_lock(&softc->lock);
1571		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1572			if (lun->lun_id != req_lun_num)
1573				continue;
1574			if (lun->state != CFI_LUN_READY) {
1575				status = CFI_BBR_LUN_UNCONFIG;
1576				break;
1577			} else
1578				break;
1579		}
1580
1581		if (lun == NULL)
1582			status = CFI_BBR_NO_LUN;
1583
1584		if (status != CFI_BBR_SUCCESS) {
1585			metatask->status = CFI_MT_ERROR;
1586			metatask->taskinfo.bbrread.status = status;
1587			mtx_unlock(&softc->lock);
1588			cfi_metatask_done(softc, metatask);
1589			break;
1590		}
1591
1592		/*
1593		 * Convert the number of bytes given into blocks and check
1594		 * that the number of bytes is a multiple of the blocksize.
1595		 * CTL will verify that the LBA is okay.
1596		 */
1597		if (lun->blocksize_powerof2 != 0) {
1598			if ((metatask->taskinfo.bbrread.len &
1599			    (lun->blocksize - 1)) != 0) {
1600				metatask->status = CFI_MT_ERROR;
1601				metatask->taskinfo.bbrread.status =
1602					CFI_BBR_BAD_LEN;
1603				cfi_metatask_done(softc, metatask);
1604				break;
1605			}
1606
1607			num_blocks = metatask->taskinfo.bbrread.len >>
1608				lun->blocksize_powerof2;
1609		} else {
1610			/*
1611			 * XXX KDM this could result in floating point
1612			 * division, which isn't supported in the kernel on
1613			 * x86 at least.
1614			 */
1615			if ((metatask->taskinfo.bbrread.len %
1616			     lun->blocksize) != 0) {
1617				metatask->status = CFI_MT_ERROR;
1618				metatask->taskinfo.bbrread.status =
1619					CFI_BBR_BAD_LEN;
1620				cfi_metatask_done(softc, metatask);
1621				break;
1622			}
1623
1624			/*
1625			 * XXX KDM this could result in floating point
1626			 * division in some cases.
1627			 */
1628			num_blocks = metatask->taskinfo.bbrread.len /
1629				lun->blocksize;
1630
1631		}
1632
1633		io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1634		if (io == NULL) {
1635			metatask->status = CFI_MT_ERROR;
1636			metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
1637			mtx_unlock(&softc->lock);
1638			cfi_metatask_done(softc, metatask);
1639			break;
1640		}
1641
1642		/*
1643		 * XXX KDM need to do a read capacity to get the blocksize
1644		 * for this device.
1645		 */
1646		ctl_scsi_read_write(io,
1647				    /*data_ptr*/ NULL,
1648				    /*data_len*/ metatask->taskinfo.bbrread.len,
1649				    /*read_op*/ 1,
1650				    /*byte2*/ 0,
1651				    /*minimum_cdb_size*/ 0,
1652				    /*lba*/ metatask->taskinfo.bbrread.lba,
1653				    /*num_blocks*/ num_blocks,
1654				    /*tag_type*/ CTL_TAG_SIMPLE,
1655				    /*control*/ 0);
1656
1657		cfi_init_io(io,
1658			    /*lun*/ lun,
1659			    /*metatask*/ metatask,
1660			    /*policy*/ CFI_ERR_SOFT,
1661			    /*retries*/ 3,
1662			    /*orig_lun_io*/ NULL,
1663			    /*done_function*/ cfi_lun_io_done);
1664
1665		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1666
1667		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1668
1669		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1670			printf("%s: error returned from ctl_queue()!\n",
1671			       __func__);
1672			STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1673			ctl_free_io(io);
1674			metatask->status = CFI_MT_ERROR;
1675			metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1676			mtx_unlock(&softc->lock);
1677			cfi_metatask_done(softc, metatask);
1678			break;
1679		}
1680
1681		mtx_unlock(&softc->lock);
1682		break;
1683	}
1684	default:
1685		panic("invalid metatask type %d", metatask->tasktype);
1686		break; /* NOTREACHED */
1687	}
1688}
1689
1690#ifdef oldapi
1691void
1692cfi_shutdown_shelf(cfi_cb_t callback, void *callback_arg)
1693{
1694	struct ctl_mem_element *element;
1695	struct cfi_softc *softc;
1696	struct cfi_metatask *metatask;
1697
1698	softc = &fetd_internal_softc;
1699
1700	element = ctl_alloc_mem_element(&softc->metatask_pool, /*can_wait*/ 0);
1701	if (element == NULL) {
1702		callback(callback_arg,
1703			 /*status*/ CFI_MT_ERROR,
1704			 /*sluns_found*/ 0,
1705			 /*sluns_complete*/ 0,
1706			 /*sluns_failed*/ 0);
1707		return;
1708	}
1709
1710	metatask = (struct cfi_metatask *)element->bytes;
1711
1712	memset(metatask, 0, sizeof(*metatask));
1713	metatask->tasktype = CFI_TASK_SHUTDOWN;
1714	metatask->status = CFI_MT_NONE;
1715	metatask->taskinfo.startstop.callback = callback;
1716	metatask->taskinfo.startstop.callback_arg = callback_arg;
1717	metatask->element = element;
1718
1719	cfi_action(softc, metatask);
1720
1721	/*
1722	 * - send a report luns to lun 0, get LUN list.
1723	 * - send an inquiry to each lun
1724	 * - send a stop/offline to each direct access LUN
1725	 *    - if we get a reservation conflict, reset the LUN and then
1726	 *      retry sending the stop/offline
1727	 * - return status back to the caller
1728	 */
1729}
1730
1731void
1732cfi_start_shelf(cfi_cb_t callback, void *callback_arg)
1733{
1734	struct ctl_mem_element *element;
1735	struct cfi_softc *softc;
1736	struct cfi_metatask *metatask;
1737
1738	softc = &fetd_internal_softc;
1739
1740	element = ctl_alloc_mem_element(&softc->metatask_pool, /*can_wait*/ 0);
1741	if (element == NULL) {
1742		callback(callback_arg,
1743			 /*status*/ CFI_MT_ERROR,
1744			 /*sluns_found*/ 0,
1745			 /*sluns_complete*/ 0,
1746			 /*sluns_failed*/ 0);
1747		return;
1748	}
1749
1750	metatask = (struct cfi_metatask *)element->bytes;
1751
1752	memset(metatask, 0, sizeof(*metatask));
1753	metatask->tasktype = CFI_TASK_STARTUP;
1754	metatask->status = CFI_MT_NONE;
1755	metatask->taskinfo.startstop.callback = callback;
1756	metatask->taskinfo.startstop.callback_arg = callback_arg;
1757	metatask->element = element;
1758
1759	cfi_action(softc, metatask);
1760
1761	/*
1762	 * - send a report luns to lun 0, get LUN list.
1763	 * - send an inquiry to each lun
1764	 * - send a stop/offline to each direct access LUN
1765	 *    - if we get a reservation conflict, reset the LUN and then
1766	 *      retry sending the stop/offline
1767	 * - return status back to the caller
1768	 */
1769}
1770
1771#endif
1772
1773struct cfi_metatask *
1774cfi_alloc_metatask(int can_wait)
1775{
1776	struct ctl_mem_element *element;
1777	struct cfi_metatask *metatask;
1778	struct cfi_softc *softc;
1779
1780	softc = &fetd_internal_softc;
1781
1782	element = ctl_alloc_mem_element(&softc->metatask_pool, can_wait);
1783	if (element == NULL)
1784		return (NULL);
1785
1786	metatask = (struct cfi_metatask *)element->bytes;
1787	memset(metatask, 0, sizeof(*metatask));
1788	metatask->status = CFI_MT_NONE;
1789	metatask->element = element;
1790
1791	return (metatask);
1792}
1793
1794void
1795cfi_free_metatask(struct cfi_metatask *metatask)
1796{
1797	ctl_free_mem_element(metatask->element);
1798}
1799
1800/*
1801 * vim: ts=8
1802 */
1803