1/*-
2 * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    substantially similar to the "NO WARRANTY" disclaimer below
13 *    ("Disclaimer") and any redistribution must be conditioned upon
14 *    including a substantially similar Disclaimer requirement for further
15 *    binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
29 *
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
31 */
32/*
33 * CTL kernel internal frontend target driver.  This allows kernel-level
34 * clients to send commands into CTL.
35 *
36 * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
37 * port, target, and LUN) and elements of an initiator (LUN discovery and
38 * probing, error recovery, command initiation).  Even though this has some
39 * initiator type elements, this is not intended to be a full fledged
40 * initiator layer.  It is only intended to send a limited number of
41 * commands to a well known target layer.
42 *
43 * To be able to fulfill the role of a full initiator layer, it would need
44 * a whole lot more functionality.
45 *
46 * Author: Ken Merry <ken@FreeBSD.org>
47 *
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD$");
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/kernel.h>
56#include <sys/types.h>
57#include <sys/malloc.h>
58#include <sys/module.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/condvar.h>
62#include <sys/queue.h>
63#include <sys/sbuf.h>
64#include <sys/sysctl.h>
65#include <vm/uma.h>
66#include <cam/scsi/scsi_all.h>
67#include <cam/scsi/scsi_da.h>
68#include <cam/ctl/ctl_io.h>
69#include <cam/ctl/ctl.h>
70#include <cam/ctl/ctl_frontend.h>
71#include <cam/ctl/ctl_frontend_internal.h>
72#include <cam/ctl/ctl_backend.h>
73#include <cam/ctl/ctl_ioctl.h>
74#include <cam/ctl/ctl_util.h>
75#include <cam/ctl/ctl_ha.h>
76#include <cam/ctl/ctl_private.h>
77#include <cam/ctl/ctl_debug.h>
78#include <cam/ctl/ctl_scsi_all.h>
79#include <cam/ctl/ctl_error.h>
80
81/*
82 * Task structure:
83 *  - overall metatask, different potential metatask types (e.g. forced
84 *    shutdown, gentle shutdown)
85 *  - forced shutdown metatask:
86 *     - states:  report luns, pending, done?
87 *     - list of luns pending, with the relevant I/O for that lun attached.
88 *       This would allow moving ahead on LUNs with no errors, and going
89 *       into error recovery on LUNs with problems.  Per-LUN states might
90 *       include inquiry, stop/offline, done.
91 *
92 * Use LUN enable for LUN list instead of getting it manually?  We'd still
93 * need inquiry data for each LUN.
94 *
95 * How to handle processor LUN w.r.t. found/stopped counts?
96 */
97#ifdef oldapi
98typedef enum {
99	CFI_TASK_NONE,
100	CFI_TASK_SHUTDOWN,
101	CFI_TASK_STARTUP
102} cfi_tasktype;
103
104struct cfi_task_startstop {
105	int total_luns;
106	int luns_complete;
107	int luns_failed;
108	cfi_cb_t callback;
109	void *callback_arg;
110	/* XXX KDM add more fields here */
111};
112
113union cfi_taskinfo {
114	struct cfi_task_startstop startstop;
115};
116
117struct cfi_metatask {
118	cfi_tasktype		tasktype;
119	cfi_mt_status		status;
120	union cfi_taskinfo	taskinfo;
121	void			*cfi_context;
122	STAILQ_ENTRY(cfi_metatask) links;
123};
124#endif
125
126typedef enum {
127	CFI_ERR_RETRY		= 0x000,
128	CFI_ERR_FAIL		= 0x001,
129	CFI_ERR_LUN_RESET	= 0x002,
130	CFI_ERR_MASK		= 0x0ff,
131	CFI_ERR_NO_DECREMENT	= 0x100
132} cfi_error_action;
133
134typedef enum {
135	CFI_ERR_SOFT,
136	CFI_ERR_HARD
137} cfi_error_policy;
138
139typedef enum {
140	CFI_LUN_INQUIRY,
141	CFI_LUN_READCAPACITY,
142	CFI_LUN_READCAPACITY_16,
143	CFI_LUN_READY
144} cfi_lun_state;
145
146struct cfi_lun {
147	struct ctl_id target_id;
148	int lun_id;
149	struct scsi_inquiry_data inq_data;
150	uint64_t num_blocks;
151	uint32_t blocksize;
152	int blocksize_powerof2;
153	uint32_t cur_tag_num;
154	cfi_lun_state state;
155	struct cfi_softc *softc;
156	STAILQ_HEAD(, cfi_lun_io) io_list;
157	STAILQ_ENTRY(cfi_lun) links;
158};
159
160struct cfi_lun_io {
161	struct cfi_lun *lun;
162	struct cfi_metatask *metatask;
163	cfi_error_policy policy;
164	void (*done_function)(union ctl_io *io);
165	union ctl_io *ctl_io;
166	struct cfi_lun_io *orig_lun_io;
167	STAILQ_ENTRY(cfi_lun_io) links;
168};
169
170typedef enum {
171	CFI_NONE	= 0x00,
172	CFI_ONLINE	= 0x01,
173} cfi_flags;
174
175struct cfi_softc {
176	struct ctl_frontend fe;
177	char fe_name[40];
178	struct mtx lock;
179	cfi_flags flags;
180	STAILQ_HEAD(, cfi_lun) lun_list;
181	STAILQ_HEAD(, cfi_metatask) metatask_list;
182};
183
184MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
185
186static uma_zone_t cfi_lun_zone;
187static uma_zone_t cfi_metatask_zone;
188
189static struct cfi_softc fetd_internal_softc;
190extern int ctl_disable;
191
192int cfi_init(void);
193void cfi_shutdown(void) __unused;
194static void cfi_online(void *arg);
195static void cfi_offline(void *arg);
196static int cfi_targ_enable(void *arg, struct ctl_id targ_id);
197static int cfi_targ_disable(void *arg, struct ctl_id targ_id);
198static int cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
199static int cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
200static void cfi_datamove(union ctl_io *io);
201static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
202					    struct cfi_lun_io *lun_io);
203static cfi_error_action cfi_error_parse(union ctl_io *io,
204					struct cfi_lun_io *lun_io);
205static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
206			struct cfi_metatask *metatask, cfi_error_policy policy,
207			int retries, struct cfi_lun_io *orig_lun_io,
208			void (*done_function)(union ctl_io *io));
209static void cfi_done(union ctl_io *io);
210static void cfi_lun_probe_done(union ctl_io *io);
211static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
212static void cfi_metatask_done(struct cfi_softc *softc,
213			      struct cfi_metatask *metatask);
214static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
215					union ctl_io *io);
216static void cfi_metatask_io_done(union ctl_io *io);
217static void cfi_err_recovery_done(union ctl_io *io);
218static void cfi_lun_io_done(union ctl_io *io);
219
220static int cfi_module_event_handler(module_t, int /*modeventtype_t*/, void *);
221
222static moduledata_t cfi_moduledata = {
223	"ctlcfi",
224	cfi_module_event_handler,
225	NULL
226};
227
228DECLARE_MODULE(ctlcfi, cfi_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
229MODULE_VERSION(ctlcfi, 1);
230MODULE_DEPEND(ctlcfi, ctl, 1, 1, 1);
231
232int
233cfi_init(void)
234{
235	struct cfi_softc *softc;
236	struct ctl_frontend *fe;
237	int retval;
238
239	softc = &fetd_internal_softc;
240
241	fe = &softc->fe;
242
243	retval = 0;
244
245	/* If we're disabled, don't initialize */
246	if (ctl_disable != 0)
247		return (0);
248
249	if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
250		printf("%s: size of struct cfi_lun_io %zd > "
251		       "CTL_PORT_PRIV_SIZE %d\n", __func__,
252		       sizeof(struct cfi_lun_io),
253		       CTL_PORT_PRIV_SIZE);
254	}
255	memset(softc, 0, sizeof(*softc));
256
257	mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
258	softc->flags |= CTL_FLAG_MASTER_SHELF;
259
260	STAILQ_INIT(&softc->lun_list);
261	STAILQ_INIT(&softc->metatask_list);
262	sprintf(softc->fe_name, "CTL internal");
263	fe->port_type = CTL_PORT_INTERNAL;
264	fe->num_requested_ctl_io = 100;
265	fe->port_name = softc->fe_name;
266	fe->port_online = cfi_online;
267	fe->port_offline = cfi_offline;
268	fe->onoff_arg = softc;
269	fe->targ_enable = cfi_targ_enable;
270	fe->targ_disable = cfi_targ_disable;
271	fe->lun_enable = cfi_lun_enable;
272	fe->lun_disable = cfi_lun_disable;
273	fe->targ_lun_arg = softc;
274	fe->fe_datamove = cfi_datamove;
275	fe->fe_done = cfi_done;
276	fe->max_targets = 15;
277	fe->max_target_id = 15;
278
279	if (ctl_frontend_register(fe, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
280	{
281		printf("%s: internal frontend registration failed\n", __func__);
282		return (0);
283	}
284
285	cfi_lun_zone = uma_zcreate("cfi_lun", sizeof(struct cfi_lun),
286	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
287	cfi_metatask_zone = uma_zcreate("cfi_metatask", sizeof(struct cfi_metatask),
288	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
289
290	return (0);
291}
292
293void
294cfi_shutdown(void)
295{
296	struct cfi_softc *softc;
297
298	softc = &fetd_internal_softc;
299
300	/*
301	 * XXX KDM need to clear out any I/O pending on each LUN.
302	 */
303	if (ctl_frontend_deregister(&softc->fe) != 0)
304		printf("%s: ctl_frontend_deregister() failed\n", __func__);
305
306	uma_zdestroy(cfi_lun_zone);
307	uma_zdestroy(cfi_metatask_zone);
308}
309
310static int
311cfi_module_event_handler(module_t mod, int what, void *arg)
312{
313
314	switch (what) {
315	case MOD_LOAD:
316		return (cfi_init());
317	case MOD_UNLOAD:
318		return (EBUSY);
319	default:
320		return (EOPNOTSUPP);
321	}
322}
323
324static void
325cfi_online(void *arg)
326{
327	struct cfi_softc *softc;
328	struct cfi_lun *lun;
329
330	softc = (struct cfi_softc *)arg;
331
332	softc->flags |= CFI_ONLINE;
333
334	/*
335	 * Go through and kick off the probe for each lun.  Should we check
336	 * the LUN flags here to determine whether or not to probe it?
337	 */
338	mtx_lock(&softc->lock);
339	STAILQ_FOREACH(lun, &softc->lun_list, links)
340		cfi_lun_probe(lun, /*have_lock*/ 1);
341	mtx_unlock(&softc->lock);
342}
343
344static void
345cfi_offline(void *arg)
346{
347	struct cfi_softc *softc;
348
349	softc = (struct cfi_softc *)arg;
350
351	softc->flags &= ~CFI_ONLINE;
352}
353
354static int
355cfi_targ_enable(void *arg, struct ctl_id targ_id)
356{
357	return (0);
358}
359
360static int
361cfi_targ_disable(void *arg, struct ctl_id targ_id)
362{
363	return (0);
364}
365
366static int
367cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
368{
369	struct cfi_softc *softc;
370	struct cfi_lun *lun;
371	int found;
372
373	softc = (struct cfi_softc *)arg;
374
375	found = 0;
376	mtx_lock(&softc->lock);
377	STAILQ_FOREACH(lun, &softc->lun_list, links) {
378		if ((lun->target_id.id == target_id.id)
379		 && (lun->lun_id == lun_id)) {
380			found = 1;
381			break;
382		}
383	}
384	mtx_unlock(&softc->lock);
385
386	/*
387	 * If we already have this target/LUN, there is no reason to add
388	 * it to our lists again.
389	 */
390	if (found != 0)
391		return (0);
392
393	lun = uma_zalloc(cfi_lun_zone, M_NOWAIT | M_ZERO);
394	if (lun == NULL) {
395		printf("%s: unable to allocate LUN structure\n", __func__);
396		return (1);
397	}
398
399	lun->target_id = target_id;
400	lun->lun_id = lun_id;
401	lun->cur_tag_num = 0;
402	lun->state = CFI_LUN_INQUIRY;
403	lun->softc = softc;
404	STAILQ_INIT(&lun->io_list);
405
406	mtx_lock(&softc->lock);
407	STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
408	mtx_unlock(&softc->lock);
409
410	cfi_lun_probe(lun, /*have_lock*/ 0);
411
412	return (0);
413}
414
415static int
416cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
417{
418	struct cfi_softc *softc;
419	struct cfi_lun *lun;
420	int found;
421
422	softc = (struct cfi_softc *)arg;
423
424	found = 0;
425
426	/*
427	 * XXX KDM need to do an invalidate and then a free when any
428	 * pending I/O has completed.  Or do we?  CTL won't free a LUN
429	 * while any I/O is pending.  So we won't get this notification
430	 * unless any I/O we have pending on a LUN has completed.
431	 */
432	mtx_lock(&softc->lock);
433	STAILQ_FOREACH(lun, &softc->lun_list, links) {
434		if ((lun->target_id.id == target_id.id)
435		 && (lun->lun_id == lun_id)) {
436			found = 1;
437			break;
438		}
439	}
440	if (found != 0)
441		STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
442
443	mtx_unlock(&softc->lock);
444
445	if (found == 0) {
446		printf("%s: can't find target %ju lun %d\n", __func__,
447		       (uintmax_t)target_id.id, lun_id);
448		return (1);
449	}
450
451	uma_zfree(cfi_lun_zone, lun);
452
453	return (0);
454}
455
456static void
457cfi_datamove(union ctl_io *io)
458{
459	struct ctl_sg_entry *ext_sglist, *kern_sglist;
460	struct ctl_sg_entry ext_entry, kern_entry;
461	int ext_sglen, ext_sg_entries, kern_sg_entries;
462	int ext_sg_start, ext_offset;
463	int len_to_copy, len_copied;
464	int kern_watermark, ext_watermark;
465	int ext_sglist_malloced;
466	struct ctl_scsiio *ctsio;
467	int i, j;
468
469	ext_sglist_malloced = 0;
470	ext_sg_start = 0;
471	ext_offset = 0;
472	ext_sglist = NULL;
473
474	CTL_DEBUG_PRINT(("%s\n", __func__));
475
476	ctsio = &io->scsiio;
477
478	/*
479	 * If this is the case, we're probably doing a BBR read and don't
480	 * actually need to transfer the data.  This will effectively
481	 * bit-bucket the data.
482	 */
483	if (ctsio->ext_data_ptr == NULL)
484		goto bailout;
485
486	/*
487	 * To simplify things here, if we have a single buffer, stick it in
488	 * a S/G entry and just make it a single entry S/G list.
489	 */
490	if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
491		int len_seen;
492
493		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
494
495		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
496							   M_WAITOK);
497		ext_sglist_malloced = 1;
498		if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
499			ctl_set_internal_failure(ctsio,
500						 /*sks_valid*/ 0,
501						 /*retry_count*/ 0);
502			goto bailout;
503		}
504		ext_sg_entries = ctsio->ext_sg_entries;
505		len_seen = 0;
506		for (i = 0; i < ext_sg_entries; i++) {
507			if ((len_seen + ext_sglist[i].len) >=
508			     ctsio->ext_data_filled) {
509				ext_sg_start = i;
510				ext_offset = ctsio->ext_data_filled - len_seen;
511				break;
512			}
513			len_seen += ext_sglist[i].len;
514		}
515	} else {
516		ext_sglist = &ext_entry;
517		ext_sglist->addr = ctsio->ext_data_ptr;
518		ext_sglist->len = ctsio->ext_data_len;
519		ext_sg_entries = 1;
520		ext_sg_start = 0;
521		ext_offset = ctsio->ext_data_filled;
522	}
523
524	if (ctsio->kern_sg_entries > 0) {
525		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
526		kern_sg_entries = ctsio->kern_sg_entries;
527	} else {
528		kern_sglist = &kern_entry;
529		kern_sglist->addr = ctsio->kern_data_ptr;
530		kern_sglist->len = ctsio->kern_data_len;
531		kern_sg_entries = 1;
532	}
533
534
535	kern_watermark = 0;
536	ext_watermark = ext_offset;
537	len_copied = 0;
538	for (i = ext_sg_start, j = 0;
539	     i < ext_sg_entries && j < kern_sg_entries;) {
540		uint8_t *ext_ptr, *kern_ptr;
541
542		len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
543				      kern_sglist[j].len - kern_watermark);
544
545		ext_ptr = (uint8_t *)ext_sglist[i].addr;
546		ext_ptr = ext_ptr + ext_watermark;
547		if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
548			/*
549			 * XXX KDM fix this!
550			 */
551			panic("need to implement bus address support");
552#if 0
553			kern_ptr = bus_to_virt(kern_sglist[j].addr);
554#endif
555		} else
556			kern_ptr = (uint8_t *)kern_sglist[j].addr;
557		kern_ptr = kern_ptr + kern_watermark;
558
559		kern_watermark += len_to_copy;
560		ext_watermark += len_to_copy;
561
562		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
563		     CTL_FLAG_DATA_IN) {
564			CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
565					 __func__, len_to_copy));
566			CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
567					 kern_ptr, ext_ptr));
568			memcpy(ext_ptr, kern_ptr, len_to_copy);
569		} else {
570			CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
571					 __func__, len_to_copy));
572			CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
573					 ext_ptr, kern_ptr));
574			memcpy(kern_ptr, ext_ptr, len_to_copy);
575		}
576
577		len_copied += len_to_copy;
578
579		if (ext_sglist[i].len == ext_watermark) {
580			i++;
581			ext_watermark = 0;
582		}
583
584		if (kern_sglist[j].len == kern_watermark) {
585			j++;
586			kern_watermark = 0;
587		}
588	}
589
590	ctsio->ext_data_filled += len_copied;
591
592	CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
593			 __func__, ext_sg_entries, kern_sg_entries));
594	CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
595			 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
596
597
598	/* XXX KDM set residual?? */
599bailout:
600
601	if (ext_sglist_malloced != 0)
602		free(ext_sglist, M_CTL_CFI);
603
604	io->scsiio.be_move_done(io);
605
606	return;
607}
608
609/*
610 * For any sort of check condition, busy, etc., we just retry.  We do not
611 * decrement the retry count for unit attention type errors.  These are
612 * normal, and we want to save the retry count for "real" errors.  Otherwise,
613 * we could end up with situations where a command will succeed in some
614 * situations and fail in others, depending on whether a unit attention is
615 * pending.  Also, some of our error recovery actions, most notably the
616 * LUN reset action, will cause a unit attention.
617 *
618 * We can add more detail here later if necessary.
619 */
620static cfi_error_action
621cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
622{
623	cfi_error_action error_action;
624	int error_code, sense_key, asc, ascq;
625
626	/*
627	 * Default to retrying the command.
628	 */
629	error_action = CFI_ERR_RETRY;
630
631	scsi_extract_sense_len(&io->scsiio.sense_data,
632			       io->scsiio.sense_len,
633			       &error_code,
634			       &sense_key,
635			       &asc,
636			       &ascq,
637			       /*show_errors*/ 1);
638
639	switch (error_code) {
640	case SSD_DEFERRED_ERROR:
641	case SSD_DESC_DEFERRED_ERROR:
642		error_action |= CFI_ERR_NO_DECREMENT;
643		break;
644	case SSD_CURRENT_ERROR:
645	case SSD_DESC_CURRENT_ERROR:
646	default: {
647		switch (sense_key) {
648		case SSD_KEY_UNIT_ATTENTION:
649			error_action |= CFI_ERR_NO_DECREMENT;
650			break;
651		case SSD_KEY_HARDWARE_ERROR:
652			/*
653			 * This is our generic "something bad happened"
654			 * error code.  It often isn't recoverable.
655			 */
656			if ((asc == 0x44) && (ascq == 0x00))
657				error_action = CFI_ERR_FAIL;
658			break;
659		case SSD_KEY_NOT_READY:
660			/*
661			 * If the LUN is powered down, there likely isn't
662			 * much point in retrying right now.
663			 */
664			if ((asc == 0x04) && (ascq == 0x02))
665				error_action = CFI_ERR_FAIL;
666			/*
667			 * If the LUN is offline, there probably isn't much
668			 * point in retrying, either.
669			 */
670			if ((asc == 0x04) && (ascq == 0x03))
671				error_action = CFI_ERR_FAIL;
672			break;
673		}
674	}
675	}
676
677	return (error_action);
678}
679
680static cfi_error_action
681cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
682{
683	cfi_error_action error_action;
684
685	error_action = CFI_ERR_RETRY;
686
687	switch (io->io_hdr.io_type) {
688	case CTL_IO_SCSI:
689		switch (io->io_hdr.status & CTL_STATUS_MASK) {
690		case CTL_SCSI_ERROR:
691			switch (io->scsiio.scsi_status) {
692			case SCSI_STATUS_RESERV_CONFLICT:
693				/*
694				 * For a reservation conflict, we'll usually
695				 * want the hard error recovery policy, so
696				 * we'll reset the LUN.
697				 */
698				if (lun_io->policy == CFI_ERR_HARD)
699					error_action =
700						CFI_ERR_LUN_RESET;
701				else
702					error_action =
703						CFI_ERR_RETRY;
704				break;
705			case SCSI_STATUS_CHECK_COND:
706			default:
707				error_action = cfi_checkcond_parse(io, lun_io);
708				break;
709			}
710			break;
711		default:
712			error_action = CFI_ERR_RETRY;
713			break;
714		}
715		break;
716	case CTL_IO_TASK:
717		/*
718		 * In theory task management commands shouldn't fail...
719		 */
720		error_action = CFI_ERR_RETRY;
721		break;
722	default:
723		printf("%s: invalid ctl_io type %d\n", __func__,
724		       io->io_hdr.io_type);
725		panic("%s: invalid ctl_io type %d\n", __func__,
726		      io->io_hdr.io_type);
727		break;
728	}
729
730	return (error_action);
731}
732
733static void
734cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
735	    struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
736	    struct cfi_lun_io *orig_lun_io,
737	    void (*done_function)(union ctl_io *io))
738{
739	struct cfi_lun_io *lun_io;
740
741	io->io_hdr.nexus.initid.id = 7;
742	io->io_hdr.nexus.targ_port = lun->softc->fe.targ_port;
743	io->io_hdr.nexus.targ_target.id = lun->target_id.id;
744	io->io_hdr.nexus.targ_lun = lun->lun_id;
745	io->io_hdr.retries = retries;
746	lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
747	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
748	lun_io->lun = lun;
749	lun_io->metatask = metatask;
750	lun_io->ctl_io = io;
751	lun_io->policy = policy;
752	lun_io->orig_lun_io = orig_lun_io;
753	lun_io->done_function = done_function;
754	/*
755	 * We only set the tag number for SCSI I/Os.  For task management
756	 * commands, the tag number is only really needed for aborts, so
757	 * the caller can set it if necessary.
758	 */
759	switch (io->io_hdr.io_type) {
760	case CTL_IO_SCSI:
761		io->scsiio.tag_num = lun->cur_tag_num++;
762		break;
763	case CTL_IO_TASK:
764	default:
765		break;
766	}
767}
768
769static void
770cfi_done(union ctl_io *io)
771{
772	struct cfi_lun_io *lun_io;
773	struct cfi_softc *softc;
774	struct cfi_lun *lun;
775
776	lun_io = (struct cfi_lun_io *)
777		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
778
779	lun = lun_io->lun;
780	softc = lun->softc;
781
782	/*
783	 * Very minimal retry logic.  We basically retry if we got an error
784	 * back, and the retry count is greater than 0.  If we ever want
785	 * more sophisticated initiator type behavior, the CAM error
786	 * recovery code in ../common might be helpful.
787	 */
788	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
789	 && (io->io_hdr.retries > 0)) {
790		ctl_io_status old_status;
791		cfi_error_action error_action;
792
793		error_action = cfi_error_parse(io, lun_io);
794
795		switch (error_action & CFI_ERR_MASK) {
796		case CFI_ERR_FAIL:
797			goto done;
798			break; /* NOTREACHED */
799		case CFI_ERR_LUN_RESET: {
800			union ctl_io *new_io;
801			struct cfi_lun_io *new_lun_io;
802
803			new_io = ctl_alloc_io(softc->fe.ctl_pool_ref);
804			if (new_io == NULL) {
805				printf("%s: unable to allocate ctl_io for "
806				       "error recovery\n", __func__);
807				goto done;
808			}
809			ctl_zero_io(new_io);
810
811			new_io->io_hdr.io_type = CTL_IO_TASK;
812			new_io->taskio.task_action = CTL_TASK_LUN_RESET;
813
814			cfi_init_io(new_io,
815				    /*lun*/ lun_io->lun,
816				    /*metatask*/ NULL,
817				    /*policy*/ CFI_ERR_SOFT,
818				    /*retries*/ 0,
819				    /*orig_lun_io*/lun_io,
820				    /*done_function*/ cfi_err_recovery_done);
821
822
823			new_lun_io = (struct cfi_lun_io *)
824				new_io->io_hdr.port_priv;
825
826			mtx_lock(&lun->softc->lock);
827			STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
828			mtx_unlock(&lun->softc->lock);
829
830			io = new_io;
831			break;
832		}
833		case CFI_ERR_RETRY:
834		default:
835			if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
836				io->io_hdr.retries--;
837			break;
838		}
839
840		old_status = io->io_hdr.status;
841		io->io_hdr.status = CTL_STATUS_NONE;
842#if 0
843		io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
844#endif
845		io->io_hdr.flags &= ~CTL_FLAG_ABORT;
846		io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
847
848		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
849			printf("%s: error returned from ctl_queue()!\n",
850			       __func__);
851			io->io_hdr.status = old_status;
852		} else
853			return;
854	}
855done:
856	lun_io->done_function(io);
857}
858
859static void
860cfi_lun_probe_done(union ctl_io *io)
861{
862	struct cfi_lun *lun;
863	struct cfi_lun_io *lun_io;
864
865	lun_io = (struct cfi_lun_io *)
866		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
867	lun = lun_io->lun;
868
869	switch (lun->state) {
870	case CFI_LUN_INQUIRY: {
871		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
872			/* print out something here?? */
873			printf("%s: LUN %d probe failed because inquiry "
874			       "failed\n", __func__, lun->lun_id);
875			ctl_io_error_print(io, NULL);
876		} else {
877
878			if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
879				char path_str[40];
880
881				lun->state = CFI_LUN_READY;
882				ctl_scsi_path_string(io, path_str,
883						     sizeof(path_str));
884				printf("%s", path_str);
885				scsi_print_inquiry(&lun->inq_data);
886			} else {
887				lun->state = CFI_LUN_READCAPACITY;
888				cfi_lun_probe(lun, /*have_lock*/ 0);
889			}
890		}
891		mtx_lock(&lun->softc->lock);
892		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
893		mtx_unlock(&lun->softc->lock);
894		ctl_free_io(io);
895		break;
896	}
897	case CFI_LUN_READCAPACITY:
898	case CFI_LUN_READCAPACITY_16: {
899		uint64_t maxlba;
900		uint32_t blocksize;
901
902		maxlba = 0;
903		blocksize = 0;
904
905		if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
906			printf("%s: LUN %d probe failed because READ CAPACITY "
907			       "failed\n", __func__, lun->lun_id);
908			ctl_io_error_print(io, NULL);
909		} else {
910
911			if (lun->state == CFI_LUN_READCAPACITY) {
912				struct scsi_read_capacity_data *rdcap;
913
914				rdcap = (struct scsi_read_capacity_data *)
915					io->scsiio.ext_data_ptr;
916
917				maxlba = scsi_4btoul(rdcap->addr);
918				blocksize = scsi_4btoul(rdcap->length);
919				if (blocksize == 0) {
920					printf("%s: LUN %d has invalid "
921					       "blocksize 0, probe aborted\n",
922					       __func__, lun->lun_id);
923				} else if (maxlba == 0xffffffff) {
924					lun->state = CFI_LUN_READCAPACITY_16;
925					cfi_lun_probe(lun, /*have_lock*/ 0);
926				} else
927					lun->state = CFI_LUN_READY;
928			} else {
929				struct scsi_read_capacity_data_long *rdcap_long;
930
931				rdcap_long = (struct
932					scsi_read_capacity_data_long *)
933					io->scsiio.ext_data_ptr;
934				maxlba = scsi_8btou64(rdcap_long->addr);
935				blocksize = scsi_4btoul(rdcap_long->length);
936
937				if (blocksize == 0) {
938					printf("%s: LUN %d has invalid "
939					       "blocksize 0, probe aborted\n",
940					       __func__, lun->lun_id);
941				} else
942					lun->state = CFI_LUN_READY;
943			}
944		}
945
946		if (lun->state == CFI_LUN_READY) {
947			char path_str[40];
948
949			lun->num_blocks = maxlba + 1;
950			lun->blocksize = blocksize;
951
952			/*
953			 * If this is true, the blocksize is a power of 2.
954			 * We already checked for 0 above.
955			 */
956			if (((blocksize - 1) & blocksize) == 0) {
957				int i;
958
959				for (i = 0; i < 32; i++) {
960					if ((blocksize & (1 << i)) != 0) {
961						lun->blocksize_powerof2 = i;
962						break;
963					}
964				}
965			}
966			ctl_scsi_path_string(io, path_str,sizeof(path_str));
967			printf("%s", path_str);
968			scsi_print_inquiry(&lun->inq_data);
969			printf("%s %ju blocks, blocksize %d\n", path_str,
970			       (uintmax_t)maxlba + 1, blocksize);
971		}
972		mtx_lock(&lun->softc->lock);
973		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
974		mtx_unlock(&lun->softc->lock);
975		free(io->scsiio.ext_data_ptr, M_CTL_CFI);
976		ctl_free_io(io);
977		break;
978	}
979	case CFI_LUN_READY:
980	default:
981		mtx_lock(&lun->softc->lock);
982		/* How did we get here?? */
983		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
984		mtx_unlock(&lun->softc->lock);
985		ctl_free_io(io);
986		break;
987	}
988}
989
990static void
991cfi_lun_probe(struct cfi_lun *lun, int have_lock)
992{
993
994	if (have_lock == 0)
995		mtx_lock(&lun->softc->lock);
996	if ((lun->softc->flags & CFI_ONLINE) == 0) {
997		if (have_lock == 0)
998			mtx_unlock(&lun->softc->lock);
999		return;
1000	}
1001	if (have_lock == 0)
1002		mtx_unlock(&lun->softc->lock);
1003
1004	switch (lun->state) {
1005	case CFI_LUN_INQUIRY: {
1006		struct cfi_lun_io *lun_io;
1007		union ctl_io *io;
1008
1009		io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1010		if (io == NULL) {
1011			printf("%s: unable to alloc ctl_io for target %ju "
1012			       "lun %d probe\n", __func__,
1013			       (uintmax_t)lun->target_id.id, lun->lun_id);
1014			return;
1015		}
1016		ctl_scsi_inquiry(io,
1017				 /*data_ptr*/(uint8_t *)&lun->inq_data,
1018				 /*data_len*/ sizeof(lun->inq_data),
1019				 /*byte2*/ 0,
1020				 /*page_code*/ 0,
1021				 /*tag_type*/ CTL_TAG_SIMPLE,
1022				 /*control*/ 0);
1023
1024		cfi_init_io(io,
1025			    /*lun*/ lun,
1026			    /*metatask*/ NULL,
1027			    /*policy*/ CFI_ERR_SOFT,
1028			    /*retries*/ 5,
1029			    /*orig_lun_io*/ NULL,
1030			    /*done_function*/
1031			    cfi_lun_probe_done);
1032
1033		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1034
1035		if (have_lock == 0)
1036			mtx_lock(&lun->softc->lock);
1037		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1038		if (have_lock == 0)
1039			mtx_unlock(&lun->softc->lock);
1040
1041		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1042			printf("%s: error returned from ctl_queue()!\n",
1043			       __func__);
1044			STAILQ_REMOVE(&lun->io_list, lun_io,
1045				      cfi_lun_io, links);
1046			ctl_free_io(io);
1047		}
1048		break;
1049	}
1050	case CFI_LUN_READCAPACITY:
1051	case CFI_LUN_READCAPACITY_16: {
1052		struct cfi_lun_io *lun_io;
1053		uint8_t *dataptr;
1054		union ctl_io *io;
1055
1056		io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1057		if (io == NULL) {
1058			printf("%s: unable to alloc ctl_io for target %ju "
1059			       "lun %d probe\n", __func__,
1060			       (uintmax_t)lun->target_id.id, lun->lun_id);
1061			return;
1062		}
1063
1064		dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
1065				 M_CTL_CFI, M_NOWAIT);
1066		if (dataptr == NULL) {
1067			printf("%s: unable to allocate SCSI read capacity "
1068			       "buffer for target %ju lun %d\n", __func__,
1069			       (uintmax_t)lun->target_id.id, lun->lun_id);
1070			return;
1071		}
1072		if (lun->state == CFI_LUN_READCAPACITY) {
1073			ctl_scsi_read_capacity(io,
1074				/*data_ptr*/ dataptr,
1075				/*data_len*/
1076				sizeof(struct scsi_read_capacity_data_long),
1077				/*addr*/ 0,
1078				/*reladr*/ 0,
1079				/*pmi*/ 0,
1080				/*tag_type*/ CTL_TAG_SIMPLE,
1081				/*control*/ 0);
1082		} else {
1083			ctl_scsi_read_capacity_16(io,
1084				/*data_ptr*/ dataptr,
1085				/*data_len*/
1086				sizeof(struct scsi_read_capacity_data_long),
1087				/*addr*/ 0,
1088				/*reladr*/ 0,
1089				/*pmi*/ 0,
1090				/*tag_type*/ CTL_TAG_SIMPLE,
1091				/*control*/ 0);
1092		}
1093		cfi_init_io(io,
1094			    /*lun*/ lun,
1095			    /*metatask*/ NULL,
1096			    /*policy*/ CFI_ERR_SOFT,
1097			    /*retries*/ 7,
1098			    /*orig_lun_io*/ NULL,
1099			    /*done_function*/ cfi_lun_probe_done);
1100
1101		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1102
1103		if (have_lock == 0)
1104			mtx_lock(&lun->softc->lock);
1105		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1106		if (have_lock == 0)
1107			mtx_unlock(&lun->softc->lock);
1108
1109		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1110			printf("%s: error returned from ctl_queue()!\n",
1111			       __func__);
1112			STAILQ_REMOVE(&lun->io_list, lun_io,
1113				      cfi_lun_io, links);
1114			free(dataptr, M_CTL_CFI);
1115			ctl_free_io(io);
1116		}
1117		break;
1118	}
1119	case CFI_LUN_READY:
1120	default:
1121		/* Why were we called? */
1122		break;
1123	}
1124}
1125
1126static void
1127cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
1128{
1129	mtx_lock(&softc->lock);
1130	STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
1131	mtx_unlock(&softc->lock);
1132
1133	/*
1134	 * Return status to the caller.  Caller allocated storage, and is
1135	 * responsible for calling cfi_free_metatask to release it once
1136	 * they've seen the status.
1137	 */
1138	metatask->callback(metatask->callback_arg, metatask);
1139}
1140
1141static void
1142cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
1143{
1144	int error_code, sense_key, asc, ascq;
1145
1146	if (metatask->tasktype != CFI_TASK_BBRREAD)
1147		return;
1148
1149	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
1150		metatask->status = CFI_MT_SUCCESS;
1151		metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
1152		return;
1153	}
1154
1155	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
1156		metatask->status = CFI_MT_ERROR;
1157		metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1158		return;
1159	}
1160
1161	metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
1162	memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
1163	       ctl_min(sizeof(metatask->taskinfo.bbrread.sense_data),
1164		       sizeof(io->scsiio.sense_data)));
1165
1166	if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
1167		metatask->status = CFI_MT_ERROR;
1168		metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
1169		return;
1170	}
1171
1172	if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
1173		metatask->status = CFI_MT_ERROR;
1174		metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1175		return;
1176	}
1177
1178	scsi_extract_sense_len(&io->scsiio.sense_data,
1179			       io->scsiio.sense_len,
1180			       &error_code,
1181			       &sense_key,
1182			       &asc,
1183			       &ascq,
1184			       /*show_errors*/ 1);
1185
1186	switch (error_code) {
1187	case SSD_DEFERRED_ERROR:
1188	case SSD_DESC_DEFERRED_ERROR:
1189		metatask->status = CFI_MT_ERROR;
1190		metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1191		break;
1192	case SSD_CURRENT_ERROR:
1193	case SSD_DESC_CURRENT_ERROR:
1194	default: {
1195		struct scsi_sense_data *sense;
1196
1197		sense = &io->scsiio.sense_data;
1198
1199		if ((asc == 0x04) && (ascq == 0x02)) {
1200			metatask->status = CFI_MT_ERROR;
1201			metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
1202		} else if ((asc == 0x04) && (ascq == 0x03)) {
1203			metatask->status = CFI_MT_ERROR;
1204			metatask->taskinfo.bbrread.status =
1205				CFI_BBR_LUN_OFFLINE_CTL;
1206		} else if ((asc == 0x44) && (ascq == 0x00)) {
1207#ifdef NEEDTOPORT
1208			if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
1209				uint16_t retry_count;
1210
1211				retry_count = sense->sense_key_spec[1] << 8 |
1212					      sense->sense_key_spec[2];
1213				if (((retry_count & 0xf000) == CSC_RAIDCORE)
1214				 && ((retry_count & 0x0f00) == CSC_SHELF_SW)
1215				 && ((retry_count & 0xff) ==
1216				      RC_STS_DEVICE_OFFLINE)) {
1217					metatask->status = CFI_MT_ERROR;
1218					metatask->taskinfo.bbrread.status =
1219						CFI_BBR_LUN_OFFLINE_RC;
1220				} else {
1221					metatask->status = CFI_MT_ERROR;
1222					metatask->taskinfo.bbrread.status =
1223						CFI_BBR_SCSI_ERROR;
1224				}
1225			} else {
1226#endif /* NEEDTOPORT */
1227				metatask->status = CFI_MT_ERROR;
1228				metatask->taskinfo.bbrread.status =
1229					CFI_BBR_SCSI_ERROR;
1230#ifdef NEEDTOPORT
1231			}
1232#endif
1233		} else {
1234			metatask->status = CFI_MT_ERROR;
1235			metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1236		}
1237		break;
1238	}
1239	}
1240}
1241
1242static void
1243cfi_metatask_io_done(union ctl_io *io)
1244{
1245	struct cfi_lun_io *lun_io;
1246	struct cfi_metatask *metatask;
1247	struct cfi_softc *softc;
1248	struct cfi_lun *lun;
1249
1250	lun_io = (struct cfi_lun_io *)
1251		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1252
1253	lun = lun_io->lun;
1254	softc = lun->softc;
1255
1256	metatask = lun_io->metatask;
1257
1258	switch (metatask->tasktype) {
1259	case CFI_TASK_STARTUP:
1260	case CFI_TASK_SHUTDOWN: {
1261		int failed, done, is_start;
1262
1263		failed = 0;
1264		done = 0;
1265		if (metatask->tasktype == CFI_TASK_STARTUP)
1266			is_start = 1;
1267		else
1268			is_start = 0;
1269
1270		mtx_lock(&softc->lock);
1271		if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
1272			metatask->taskinfo.startstop.luns_complete++;
1273		else {
1274			metatask->taskinfo.startstop.luns_failed++;
1275			failed = 1;
1276		}
1277		if ((metatask->taskinfo.startstop.luns_complete +
1278		     metatask->taskinfo.startstop.luns_failed) >=
1279		     metatask->taskinfo.startstop.total_luns)
1280			done = 1;
1281
1282		mtx_unlock(&softc->lock);
1283
1284		if (failed != 0) {
1285			printf("%s: LUN %d %s request failed\n", __func__,
1286			       lun_io->lun->lun_id, (is_start == 1) ? "start" :
1287			       "stop");
1288			ctl_io_error_print(io, &lun_io->lun->inq_data);
1289		}
1290		if (done != 0) {
1291			if (metatask->taskinfo.startstop.luns_failed > 0)
1292				metatask->status = CFI_MT_ERROR;
1293			else
1294				metatask->status = CFI_MT_SUCCESS;
1295			cfi_metatask_done(softc, metatask);
1296		}
1297		mtx_lock(&softc->lock);
1298		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1299		mtx_unlock(&softc->lock);
1300
1301		ctl_free_io(io);
1302		break;
1303	}
1304	case CFI_TASK_BBRREAD: {
1305		/*
1306		 * Translate the SCSI error into an enumeration.
1307		 */
1308		cfi_metatask_bbr_errorparse(metatask, io);
1309
1310		mtx_lock(&softc->lock);
1311		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1312		mtx_unlock(&softc->lock);
1313
1314		ctl_free_io(io);
1315
1316		cfi_metatask_done(softc, metatask);
1317		break;
1318	}
1319	default:
1320		/*
1321		 * This shouldn't happen.
1322		 */
1323		mtx_lock(&softc->lock);
1324		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1325		mtx_unlock(&softc->lock);
1326
1327		ctl_free_io(io);
1328		break;
1329	}
1330}
1331
1332static void
1333cfi_err_recovery_done(union ctl_io *io)
1334{
1335	struct cfi_lun_io *lun_io, *orig_lun_io;
1336	struct cfi_lun *lun;
1337	union ctl_io *orig_io;
1338
1339	lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1340	orig_lun_io = lun_io->orig_lun_io;
1341	orig_io = orig_lun_io->ctl_io;
1342	lun = lun_io->lun;
1343
1344	if (io->io_hdr.status != CTL_SUCCESS) {
1345		printf("%s: error recovery action failed.  Original "
1346		       "error:\n", __func__);
1347
1348		ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
1349
1350		printf("%s: error from error recovery action:\n", __func__);
1351
1352		ctl_io_error_print(io, &lun->inq_data);
1353
1354		printf("%s: trying original command again...\n", __func__);
1355	}
1356
1357	mtx_lock(&lun->softc->lock);
1358	STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1359	mtx_unlock(&lun->softc->lock);
1360	ctl_free_io(io);
1361
1362	orig_io->io_hdr.retries--;
1363	orig_io->io_hdr.status = CTL_STATUS_NONE;
1364
1365	if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
1366		printf("%s: error returned from ctl_queue()!\n", __func__);
1367		STAILQ_REMOVE(&lun->io_list, orig_lun_io,
1368			      cfi_lun_io, links);
1369		ctl_free_io(orig_io);
1370	}
1371}
1372
1373static void
1374cfi_lun_io_done(union ctl_io *io)
1375{
1376	struct cfi_lun *lun;
1377	struct cfi_lun_io *lun_io;
1378
1379	lun_io = (struct cfi_lun_io *)
1380		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1381	lun = lun_io->lun;
1382
1383	if (lun_io->metatask == NULL) {
1384		printf("%s: I/O has no metatask pointer, discarding\n",
1385		       __func__);
1386		STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1387		ctl_free_io(io);
1388		return;
1389	}
1390	cfi_metatask_io_done(io);
1391}
1392
1393void
1394cfi_action(struct cfi_metatask *metatask)
1395{
1396	struct cfi_softc *softc;
1397
1398	softc = &fetd_internal_softc;
1399
1400	mtx_lock(&softc->lock);
1401
1402	STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
1403
1404	if ((softc->flags & CFI_ONLINE) == 0) {
1405		mtx_unlock(&softc->lock);
1406		metatask->status = CFI_MT_PORT_OFFLINE;
1407		cfi_metatask_done(softc, metatask);
1408		return;
1409	} else
1410		mtx_unlock(&softc->lock);
1411
1412	switch (metatask->tasktype) {
1413	case CFI_TASK_STARTUP:
1414	case CFI_TASK_SHUTDOWN: {
1415		union ctl_io *io;
1416		int da_luns, ios_allocated, do_start;
1417		struct cfi_lun *lun;
1418		STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
1419
1420		da_luns = 0;
1421		ios_allocated = 0;
1422		STAILQ_INIT(&tmp_io_list);
1423
1424		if (metatask->tasktype == CFI_TASK_STARTUP)
1425			do_start = 1;
1426		else
1427			do_start = 0;
1428
1429		mtx_lock(&softc->lock);
1430		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1431			if (lun->state != CFI_LUN_READY)
1432				continue;
1433
1434			if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1435				continue;
1436			da_luns++;
1437			io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1438			if (io != NULL) {
1439				ios_allocated++;
1440				STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
1441						   links);
1442			}
1443		}
1444
1445		if (ios_allocated < da_luns) {
1446			printf("%s: error allocating ctl_io for %s\n",
1447			       __func__, (do_start == 1) ? "startup" :
1448			       "shutdown");
1449			da_luns = ios_allocated;
1450		}
1451
1452		metatask->taskinfo.startstop.total_luns = da_luns;
1453
1454		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1455			struct cfi_lun_io *lun_io;
1456
1457			if (lun->state != CFI_LUN_READY)
1458				continue;
1459
1460			if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1461				continue;
1462
1463			io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1464			if (io == NULL)
1465				break;
1466
1467			STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
1468				      links);
1469
1470			ctl_scsi_start_stop(io,
1471					    /*start*/ do_start,
1472					    /*load_eject*/ 0,
1473					    /*immediate*/ 0,
1474					    /*power_conditions*/
1475					    SSS_PC_START_VALID,
1476					    /*onoffline*/ 1,
1477					    /*ctl_tag_type*/ CTL_TAG_ORDERED,
1478					    /*control*/ 0);
1479
1480			cfi_init_io(io,
1481				    /*lun*/ lun,
1482				    /*metatask*/ metatask,
1483				    /*policy*/ CFI_ERR_HARD,
1484				    /*retries*/ 3,
1485				    /*orig_lun_io*/ NULL,
1486				    /*done_function*/ cfi_lun_io_done);
1487
1488			lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
1489
1490			STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1491
1492			if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1493				printf("%s: error returned from ctl_queue()!\n",
1494				       __func__);
1495				STAILQ_REMOVE(&lun->io_list, lun_io,
1496					      cfi_lun_io, links);
1497				ctl_free_io(io);
1498				metatask->taskinfo.startstop.total_luns--;
1499			}
1500		}
1501
1502		if (STAILQ_FIRST(&tmp_io_list) != NULL) {
1503			printf("%s: error: tmp_io_list != NULL\n", __func__);
1504			for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1505			     io != NULL;
1506			     io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
1507				STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
1508					      ctl_io_hdr, links);
1509				ctl_free_io(io);
1510			}
1511		}
1512		mtx_unlock(&softc->lock);
1513
1514		break;
1515	}
1516	case CFI_TASK_BBRREAD: {
1517		union ctl_io *io;
1518		struct cfi_lun *lun;
1519		struct cfi_lun_io *lun_io;
1520		cfi_bbrread_status status;
1521		int req_lun_num;
1522		uint32_t num_blocks;
1523
1524		status = CFI_BBR_SUCCESS;
1525
1526		req_lun_num = metatask->taskinfo.bbrread.lun_num;
1527
1528		mtx_lock(&softc->lock);
1529		STAILQ_FOREACH(lun, &softc->lun_list, links) {
1530			if (lun->lun_id != req_lun_num)
1531				continue;
1532			if (lun->state != CFI_LUN_READY) {
1533				status = CFI_BBR_LUN_UNCONFIG;
1534				break;
1535			} else
1536				break;
1537		}
1538
1539		if (lun == NULL)
1540			status = CFI_BBR_NO_LUN;
1541
1542		if (status != CFI_BBR_SUCCESS) {
1543			metatask->status = CFI_MT_ERROR;
1544			metatask->taskinfo.bbrread.status = status;
1545			mtx_unlock(&softc->lock);
1546			cfi_metatask_done(softc, metatask);
1547			break;
1548		}
1549
1550		/*
1551		 * Convert the number of bytes given into blocks and check
1552		 * that the number of bytes is a multiple of the blocksize.
1553		 * CTL will verify that the LBA is okay.
1554		 */
1555		if (lun->blocksize_powerof2 != 0) {
1556			if ((metatask->taskinfo.bbrread.len &
1557			    (lun->blocksize - 1)) != 0) {
1558				metatask->status = CFI_MT_ERROR;
1559				metatask->taskinfo.bbrread.status =
1560					CFI_BBR_BAD_LEN;
1561				cfi_metatask_done(softc, metatask);
1562				break;
1563			}
1564
1565			num_blocks = metatask->taskinfo.bbrread.len >>
1566				lun->blocksize_powerof2;
1567		} else {
1568			/*
1569			 * XXX KDM this could result in floating point
1570			 * division, which isn't supported in the kernel on
1571			 * x86 at least.
1572			 */
1573			if ((metatask->taskinfo.bbrread.len %
1574			     lun->blocksize) != 0) {
1575				metatask->status = CFI_MT_ERROR;
1576				metatask->taskinfo.bbrread.status =
1577					CFI_BBR_BAD_LEN;
1578				cfi_metatask_done(softc, metatask);
1579				break;
1580			}
1581
1582			/*
1583			 * XXX KDM this could result in floating point
1584			 * division in some cases.
1585			 */
1586			num_blocks = metatask->taskinfo.bbrread.len /
1587				lun->blocksize;
1588
1589		}
1590
1591		io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1592		if (io == NULL) {
1593			metatask->status = CFI_MT_ERROR;
1594			metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
1595			mtx_unlock(&softc->lock);
1596			cfi_metatask_done(softc, metatask);
1597			break;
1598		}
1599
1600		/*
1601		 * XXX KDM need to do a read capacity to get the blocksize
1602		 * for this device.
1603		 */
1604		ctl_scsi_read_write(io,
1605				    /*data_ptr*/ NULL,
1606				    /*data_len*/ metatask->taskinfo.bbrread.len,
1607				    /*read_op*/ 1,
1608				    /*byte2*/ 0,
1609				    /*minimum_cdb_size*/ 0,
1610				    /*lba*/ metatask->taskinfo.bbrread.lba,
1611				    /*num_blocks*/ num_blocks,
1612				    /*tag_type*/ CTL_TAG_SIMPLE,
1613				    /*control*/ 0);
1614
1615		cfi_init_io(io,
1616			    /*lun*/ lun,
1617			    /*metatask*/ metatask,
1618			    /*policy*/ CFI_ERR_SOFT,
1619			    /*retries*/ 3,
1620			    /*orig_lun_io*/ NULL,
1621			    /*done_function*/ cfi_lun_io_done);
1622
1623		lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1624
1625		STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1626
1627		if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1628			printf("%s: error returned from ctl_queue()!\n",
1629			       __func__);
1630			STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1631			ctl_free_io(io);
1632			metatask->status = CFI_MT_ERROR;
1633			metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1634			mtx_unlock(&softc->lock);
1635			cfi_metatask_done(softc, metatask);
1636			break;
1637		}
1638
1639		mtx_unlock(&softc->lock);
1640		break;
1641	}
1642	default:
1643		panic("invalid metatask type %d", metatask->tasktype);
1644		break; /* NOTREACHED */
1645	}
1646}
1647
1648struct cfi_metatask *
1649cfi_alloc_metatask(int can_wait)
1650{
1651	struct cfi_metatask *metatask;
1652	struct cfi_softc *softc;
1653
1654	softc = &fetd_internal_softc;
1655
1656	metatask = uma_zalloc(cfi_metatask_zone,
1657	    (can_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
1658	if (metatask == NULL)
1659		return (NULL);
1660
1661	metatask->status = CFI_MT_NONE;
1662
1663	return (metatask);
1664}
1665
1666void
1667cfi_free_metatask(struct cfi_metatask *metatask)
1668{
1669
1670	uma_zfree(cfi_metatask_zone, metatask);
1671}
1672
1673/*
1674 * vim: ts=8
1675 */
1676