1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/* Driver for VirtIO SCSI devices. */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/kthread.h>
38#include <sys/malloc.h>
39#include <sys/module.h>
40#include <sys/sglist.h>
41#include <sys/sysctl.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/callout.h>
45#include <sys/queue.h>
46#include <sys/sbuf.h>
47
48#include <machine/stdarg.h>
49
50#include <machine/bus.h>
51#include <machine/resource.h>
52#include <sys/bus.h>
53#include <sys/rman.h>
54
55#include <cam/cam.h>
56#include <cam/cam_ccb.h>
57#include <cam/cam_sim.h>
58#include <cam/cam_periph.h>
59#include <cam/cam_xpt_sim.h>
60#include <cam/cam_debug.h>
61#include <cam/scsi/scsi_all.h>
62#include <cam/scsi/scsi_message.h>
63
64#include <dev/virtio/virtio.h>
65#include <dev/virtio/virtqueue.h>
66#include <dev/virtio/scsi/virtio_scsi.h>
67#include <dev/virtio/scsi/virtio_scsivar.h>
68
69#include "virtio_if.h"
70
71static int	vtscsi_modevent(module_t, int, void *);
72
73static int	vtscsi_probe(device_t);
74static int	vtscsi_attach(device_t);
75static int	vtscsi_detach(device_t);
76static int	vtscsi_suspend(device_t);
77static int	vtscsi_resume(device_t);
78
79static int	vtscsi_negotiate_features(struct vtscsi_softc *);
80static int	vtscsi_setup_features(struct vtscsi_softc *);
81static void	vtscsi_read_config(struct vtscsi_softc *,
82		    struct virtio_scsi_config *);
83static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
84static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
85static void	vtscsi_check_sizes(struct vtscsi_softc *);
86static void	vtscsi_write_device_config(struct vtscsi_softc *);
87static int	vtscsi_reinit(struct vtscsi_softc *);
88
89static int	vtscsi_alloc_cam(struct vtscsi_softc *);
90static int	vtscsi_register_cam(struct vtscsi_softc *);
91static void	vtscsi_free_cam(struct vtscsi_softc *);
92static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
93static int	vtscsi_register_async(struct vtscsi_softc *);
94static void	vtscsi_deregister_async(struct vtscsi_softc *);
95static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
96static void	vtscsi_cam_poll(struct cam_sim *);
97
98static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
99		    union ccb *);
100static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
101		    union ccb *);
102static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
103static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
104static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
105static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
106		    struct cam_sim *, union ccb *);
107
108static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
109		    struct sglist *, struct ccb_scsiio *);
110static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
111		    struct vtscsi_request *, int *, int *);
112static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
113		    struct vtscsi_request *);
114static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
115static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
116		    struct vtscsi_request *);
117static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
118		    struct vtscsi_request *);
119static void	vtscsi_timedout_scsi_cmd(void *);
120static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
121static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
122		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
123static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
124		    struct vtscsi_request *);
125
126static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
127		    struct vtscsi_request *);
128static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
129		    struct vtscsi_request *, struct sglist *, int, int, int);
130static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
131		    struct vtscsi_request *);
132static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
133		    struct vtscsi_request *);
134static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
135		    struct vtscsi_request *);
136
137static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
138static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
139static void	vtscsi_init_scsi_cmd_req(struct vtscsi_softc *,
140		    struct ccb_scsiio *, struct virtio_scsi_cmd_req *);
141static void	vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *,
142		    uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
143
144static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
145static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
146
147static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
148		    lun_id_t);
149static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
150		    lun_id_t);
151static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
152
153static void	vtscsi_handle_event(struct vtscsi_softc *,
154		    struct virtio_scsi_event *);
155static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
156		    struct virtio_scsi_event *);
157static int	vtscsi_init_event_vq(struct vtscsi_softc *);
158static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
159static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
160
161static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
162static void	vtscsi_complete_vqs(struct vtscsi_softc *);
163static void	vtscsi_drain_vqs(struct vtscsi_softc *);
164static void	vtscsi_cancel_request(struct vtscsi_softc *,
165		    struct vtscsi_request *);
166static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
167static void	vtscsi_stop(struct vtscsi_softc *);
168static int	vtscsi_reset_bus(struct vtscsi_softc *);
169
170static void	vtscsi_init_request(struct vtscsi_softc *,
171		    struct vtscsi_request *);
172static int	vtscsi_alloc_requests(struct vtscsi_softc *);
173static void	vtscsi_free_requests(struct vtscsi_softc *);
174static void	vtscsi_enqueue_request(struct vtscsi_softc *,
175		    struct vtscsi_request *);
176static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
177
178static void	vtscsi_complete_request(struct vtscsi_request *);
179static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
180
181static void	vtscsi_control_vq_intr(void *);
182static void	vtscsi_event_vq_intr(void *);
183static void	vtscsi_request_vq_intr(void *);
184static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
185static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
186
187static void	vtscsi_get_tunables(struct vtscsi_softc *);
188static void	vtscsi_setup_sysctl(struct vtscsi_softc *);
189
190static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
191		    const char *, ...);
192
193#define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0)
194#define vtscsi_htog16(_sc, _val)	virtio_htog16(vtscsi_modern(_sc), _val)
195#define vtscsi_htog32(_sc, _val)	virtio_htog32(vtscsi_modern(_sc), _val)
196#define vtscsi_htog64(_sc, _val)	virtio_htog64(vtscsi_modern(_sc), _val)
197#define vtscsi_gtoh16(_sc, _val)	virtio_gtoh16(vtscsi_modern(_sc), _val)
198#define vtscsi_gtoh32(_sc, _val)	virtio_gtoh32(vtscsi_modern(_sc), _val)
199#define vtscsi_gtoh64(_sc, _val)	virtio_gtoh64(vtscsi_modern(_sc), _val)
200
201/* Global tunables. */
202/*
203 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
204 * IO during virtio_stop(). So in-flight requests still complete after the
205 * device reset. We would have to wait for all the in-flight IO to complete,
206 * which defeats the typical purpose of a bus reset. We could simulate the
207 * bus reset with either I_T_NEXUS_RESET of all the targets, or with
208 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
209 * control virtqueue). But this isn't very useful if things really go off
210 * the rails, so default to disabled for now.
211 */
212static int vtscsi_bus_reset_disable = 1;
213TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
214
215static struct virtio_feature_desc vtscsi_feature_desc[] = {
216	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
217	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
218	{ VIRTIO_SCSI_F_CHANGE,		"ChangeEvent"	},
219	{ VIRTIO_SCSI_F_T10_PI, 	"T10PI"		},
220
221	{ 0, NULL }
222};
223
224static device_method_t vtscsi_methods[] = {
225	/* Device methods. */
226	DEVMETHOD(device_probe,		vtscsi_probe),
227	DEVMETHOD(device_attach,	vtscsi_attach),
228	DEVMETHOD(device_detach,	vtscsi_detach),
229	DEVMETHOD(device_suspend,	vtscsi_suspend),
230	DEVMETHOD(device_resume,	vtscsi_resume),
231
232	DEVMETHOD_END
233};
234
235static driver_t vtscsi_driver = {
236	"vtscsi",
237	vtscsi_methods,
238	sizeof(struct vtscsi_softc)
239};
240static devclass_t vtscsi_devclass;
241
242VIRTIO_DRIVER_MODULE(virtio_scsi, vtscsi_driver, vtscsi_devclass,
243    vtscsi_modevent, 0);
244MODULE_VERSION(virtio_scsi, 1);
245MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
246MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
247
248VIRTIO_SIMPLE_PNPINFO(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
249
250static int
251vtscsi_modevent(module_t mod, int type, void *unused)
252{
253	int error;
254
255	switch (type) {
256	case MOD_LOAD:
257	case MOD_QUIESCE:
258	case MOD_UNLOAD:
259	case MOD_SHUTDOWN:
260		error = 0;
261		break;
262	default:
263		error = EOPNOTSUPP;
264		break;
265	}
266
267	return (error);
268}
269
270static int
271vtscsi_probe(device_t dev)
272{
273	return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
274}
275
276static int
277vtscsi_attach(device_t dev)
278{
279	struct vtscsi_softc *sc;
280	struct virtio_scsi_config scsicfg;
281	int error;
282
283	sc = device_get_softc(dev);
284	sc->vtscsi_dev = dev;
285	virtio_set_feature_desc(dev, vtscsi_feature_desc);
286
287	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
288	TAILQ_INIT(&sc->vtscsi_req_free);
289
290	vtscsi_get_tunables(sc);
291	vtscsi_setup_sysctl(sc);
292
293	error = vtscsi_setup_features(sc);
294	if (error) {
295		device_printf(dev, "cannot setup features\n");
296		goto fail;
297	}
298
299	vtscsi_read_config(sc, &scsicfg);
300
301	sc->vtscsi_max_channel = scsicfg.max_channel;
302	sc->vtscsi_max_target = scsicfg.max_target;
303	sc->vtscsi_max_lun = scsicfg.max_lun;
304	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
305
306	vtscsi_write_device_config(sc);
307
308	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
309	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
310	if (sc->vtscsi_sglist == NULL) {
311		error = ENOMEM;
312		device_printf(dev, "cannot allocate sglist\n");
313		goto fail;
314	}
315
316	error = vtscsi_alloc_virtqueues(sc);
317	if (error) {
318		device_printf(dev, "cannot allocate virtqueues\n");
319		goto fail;
320	}
321
322	vtscsi_check_sizes(sc);
323
324	error = vtscsi_init_event_vq(sc);
325	if (error) {
326		device_printf(dev, "cannot populate the eventvq\n");
327		goto fail;
328	}
329
330	error = vtscsi_alloc_requests(sc);
331	if (error) {
332		device_printf(dev, "cannot allocate requests\n");
333		goto fail;
334	}
335
336	error = vtscsi_alloc_cam(sc);
337	if (error) {
338		device_printf(dev, "cannot allocate CAM structures\n");
339		goto fail;
340	}
341
342	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
343	if (error) {
344		device_printf(dev, "cannot setup virtqueue interrupts\n");
345		goto fail;
346	}
347
348	vtscsi_enable_vqs_intr(sc);
349
350	/*
351	 * Register with CAM after interrupts are enabled so we will get
352	 * notified of the probe responses.
353	 */
354	error = vtscsi_register_cam(sc);
355	if (error) {
356		device_printf(dev, "cannot register with CAM\n");
357		goto fail;
358	}
359
360fail:
361	if (error)
362		vtscsi_detach(dev);
363
364	return (error);
365}
366
367static int
368vtscsi_detach(device_t dev)
369{
370	struct vtscsi_softc *sc;
371
372	sc = device_get_softc(dev);
373
374	VTSCSI_LOCK(sc);
375	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
376	if (device_is_attached(dev))
377		vtscsi_stop(sc);
378	VTSCSI_UNLOCK(sc);
379
380	vtscsi_complete_vqs(sc);
381	vtscsi_drain_vqs(sc);
382
383	vtscsi_free_cam(sc);
384	vtscsi_free_requests(sc);
385
386	if (sc->vtscsi_sglist != NULL) {
387		sglist_free(sc->vtscsi_sglist);
388		sc->vtscsi_sglist = NULL;
389	}
390
391	VTSCSI_LOCK_DESTROY(sc);
392
393	return (0);
394}
395
396static int
397vtscsi_suspend(device_t dev)
398{
399
400	return (0);
401}
402
403static int
404vtscsi_resume(device_t dev)
405{
406
407	return (0);
408}
409
410static int
411vtscsi_negotiate_features(struct vtscsi_softc *sc)
412{
413	device_t dev;
414	uint64_t features;
415
416	dev = sc->vtscsi_dev;
417	features = VTSCSI_FEATURES;
418
419	sc->vtscsi_features = virtio_negotiate_features(dev, features);
420	return (virtio_finalize_features(dev));
421}
422
423static int
424vtscsi_setup_features(struct vtscsi_softc *sc)
425{
426	device_t dev;
427	int error;
428
429	dev = sc->vtscsi_dev;
430
431	error = vtscsi_negotiate_features(sc);
432	if (error)
433		return (error);
434
435	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
436		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
437	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
438		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
439	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
440		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
441
442	return (0);
443}
444
445#define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
446	virtio_read_device_config(_dev,				\
447	    offsetof(struct virtio_scsi_config, _field),	\
448	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
449
450static void
451vtscsi_read_config(struct vtscsi_softc *sc,
452    struct virtio_scsi_config *scsicfg)
453{
454	device_t dev;
455
456	dev = sc->vtscsi_dev;
457
458	bzero(scsicfg, sizeof(struct virtio_scsi_config));
459
460	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
461	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
462	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
463	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
464	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
465	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
466	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
467	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
468	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
469	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
470}
471
472#undef VTSCSI_GET_CONFIG
473
474static int
475vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
476{
477	int nsegs;
478
479	nsegs = VTSCSI_MIN_SEGMENTS;
480
481	if (seg_max > 0) {
482		nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1);
483		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
484			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
485	} else
486		nsegs += 1;
487
488	return (nsegs);
489}
490
491static int
492vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
493{
494	device_t dev;
495	struct vq_alloc_info vq_info[3];
496	int nvqs;
497
498	dev = sc->vtscsi_dev;
499	nvqs = 3;
500
501	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
502	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
503
504	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
505	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
506
507	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
508	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
509	    "%s request", device_get_nameunit(dev));
510
511	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
512}
513
514static void
515vtscsi_check_sizes(struct vtscsi_softc *sc)
516{
517	int rqsize;
518
519	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
520		/*
521		 * Ensure the assertions in virtqueue_enqueue(),
522		 * even if the hypervisor reports a bad seg_max.
523		 */
524		rqsize = virtqueue_size(sc->vtscsi_request_vq);
525		if (sc->vtscsi_max_nsegs > rqsize) {
526			device_printf(sc->vtscsi_dev,
527			    "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
528			    rqsize);
529			sc->vtscsi_max_nsegs = rqsize;
530		}
531	}
532}
533
534static void
535vtscsi_write_device_config(struct vtscsi_softc *sc)
536{
537
538	virtio_write_dev_config_4(sc->vtscsi_dev,
539	    offsetof(struct virtio_scsi_config, sense_size),
540	    VIRTIO_SCSI_SENSE_SIZE);
541
542	/*
543	 * This is the size in the virtio_scsi_cmd_req structure. Note
544	 * this value (32) is larger than the maximum CAM CDB size (16).
545	 */
546	virtio_write_dev_config_4(sc->vtscsi_dev,
547	    offsetof(struct virtio_scsi_config, cdb_size),
548	    VIRTIO_SCSI_CDB_SIZE);
549}
550
551static int
552vtscsi_reinit(struct vtscsi_softc *sc)
553{
554	device_t dev;
555	int error;
556
557	dev = sc->vtscsi_dev;
558
559	error = virtio_reinit(dev, sc->vtscsi_features);
560	if (error == 0) {
561		vtscsi_write_device_config(sc);
562		virtio_reinit_complete(dev);
563		vtscsi_reinit_event_vq(sc);
564
565		vtscsi_enable_vqs_intr(sc);
566	}
567
568	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
569
570	return (error);
571}
572
573static int
574vtscsi_alloc_cam(struct vtscsi_softc *sc)
575{
576	device_t dev;
577	struct cam_devq *devq;
578	int openings;
579
580	dev = sc->vtscsi_dev;
581	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
582
583	devq = cam_simq_alloc(openings);
584	if (devq == NULL) {
585		device_printf(dev, "cannot allocate SIM queue\n");
586		return (ENOMEM);
587	}
588
589	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
590	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
591	    openings, devq);
592	if (sc->vtscsi_sim == NULL) {
593		cam_simq_free(devq);
594		device_printf(dev, "cannot allocate SIM\n");
595		return (ENOMEM);
596	}
597
598	return (0);
599}
600
601static int
602vtscsi_register_cam(struct vtscsi_softc *sc)
603{
604	device_t dev;
605	int registered, error;
606
607	dev = sc->vtscsi_dev;
608	registered = 0;
609
610	VTSCSI_LOCK(sc);
611
612	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
613		error = ENOMEM;
614		device_printf(dev, "cannot register XPT bus\n");
615		goto fail;
616	}
617
618	registered = 1;
619
620	if (xpt_create_path(&sc->vtscsi_path, NULL,
621	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
622	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
623		error = ENOMEM;
624		device_printf(dev, "cannot create bus path\n");
625		goto fail;
626	}
627
628	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
629		error = EIO;
630		device_printf(dev, "cannot register async callback\n");
631		goto fail;
632	}
633
634	VTSCSI_UNLOCK(sc);
635
636	return (0);
637
638fail:
639	if (sc->vtscsi_path != NULL) {
640		xpt_free_path(sc->vtscsi_path);
641		sc->vtscsi_path = NULL;
642	}
643
644	if (registered != 0)
645		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
646
647	VTSCSI_UNLOCK(sc);
648
649	return (error);
650}
651
652static void
653vtscsi_free_cam(struct vtscsi_softc *sc)
654{
655
656	VTSCSI_LOCK(sc);
657
658	if (sc->vtscsi_path != NULL) {
659		vtscsi_deregister_async(sc);
660
661		xpt_free_path(sc->vtscsi_path);
662		sc->vtscsi_path = NULL;
663
664		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
665	}
666
667	if (sc->vtscsi_sim != NULL) {
668		cam_sim_free(sc->vtscsi_sim, 1);
669		sc->vtscsi_sim = NULL;
670	}
671
672	VTSCSI_UNLOCK(sc);
673}
674
675static void
676vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
677{
678	struct cam_sim *sim;
679	struct vtscsi_softc *sc;
680
681	sim = cb_arg;
682	sc = cam_sim_softc(sim);
683
684	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
685
686	/*
687	 * TODO Once QEMU supports event reporting, we should
688	 *      (un)subscribe to events here.
689	 */
690	switch (code) {
691	case AC_FOUND_DEVICE:
692		break;
693	case AC_LOST_DEVICE:
694		break;
695	}
696}
697
698static int
699vtscsi_register_async(struct vtscsi_softc *sc)
700{
701	struct ccb_setasync csa;
702
703	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
704	csa.ccb_h.func_code = XPT_SASYNC_CB;
705	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
706	csa.callback = vtscsi_cam_async;
707	csa.callback_arg = sc->vtscsi_sim;
708
709	xpt_action((union ccb *) &csa);
710
711	return (csa.ccb_h.status);
712}
713
714static void
715vtscsi_deregister_async(struct vtscsi_softc *sc)
716{
717	struct ccb_setasync csa;
718
719	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
720	csa.ccb_h.func_code = XPT_SASYNC_CB;
721	csa.event_enable = 0;
722	csa.callback = vtscsi_cam_async;
723	csa.callback_arg = sc->vtscsi_sim;
724
725	xpt_action((union ccb *) &csa);
726}
727
728static void
729vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
730{
731	struct vtscsi_softc *sc;
732	struct ccb_hdr *ccbh;
733
734	sc = cam_sim_softc(sim);
735	ccbh = &ccb->ccb_h;
736
737	VTSCSI_LOCK_OWNED(sc);
738
739	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
740		/*
741		 * The VTSCSI_MTX is briefly dropped between setting
742		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
743		 * drop any CCBs that come in during that window.
744		 */
745		ccbh->status = CAM_NO_HBA;
746		xpt_done(ccb);
747		return;
748	}
749
750	switch (ccbh->func_code) {
751	case XPT_SCSI_IO:
752		vtscsi_cam_scsi_io(sc, sim, ccb);
753		break;
754
755	case XPT_SET_TRAN_SETTINGS:
756		ccbh->status = CAM_FUNC_NOTAVAIL;
757		xpt_done(ccb);
758		break;
759
760	case XPT_GET_TRAN_SETTINGS:
761		vtscsi_cam_get_tran_settings(sc, ccb);
762		break;
763
764	case XPT_RESET_BUS:
765		vtscsi_cam_reset_bus(sc, ccb);
766		break;
767
768	case XPT_RESET_DEV:
769		vtscsi_cam_reset_dev(sc, ccb);
770		break;
771
772	case XPT_ABORT:
773		vtscsi_cam_abort(sc, ccb);
774		break;
775
776	case XPT_CALC_GEOMETRY:
777		cam_calc_geometry(&ccb->ccg, 1);
778		xpt_done(ccb);
779		break;
780
781	case XPT_PATH_INQ:
782		vtscsi_cam_path_inquiry(sc, sim, ccb);
783		break;
784
785	default:
786		vtscsi_dprintf(sc, VTSCSI_ERROR,
787		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
788
789		ccbh->status = CAM_REQ_INVALID;
790		xpt_done(ccb);
791		break;
792	}
793}
794
795static void
796vtscsi_cam_poll(struct cam_sim *sim)
797{
798	struct vtscsi_softc *sc;
799
800	sc = cam_sim_softc(sim);
801
802	vtscsi_complete_vqs_locked(sc);
803}
804
805static void
806vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
807    union ccb *ccb)
808{
809	struct ccb_hdr *ccbh;
810	struct ccb_scsiio *csio;
811	int error;
812
813	ccbh = &ccb->ccb_h;
814	csio = &ccb->csio;
815
816	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
817		error = EINVAL;
818		ccbh->status = CAM_REQ_INVALID;
819		goto done;
820	}
821
822	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
823	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
824		error = EINVAL;
825		ccbh->status = CAM_REQ_INVALID;
826		goto done;
827	}
828
829	error = vtscsi_start_scsi_cmd(sc, ccb);
830
831done:
832	if (error) {
833		vtscsi_dprintf(sc, VTSCSI_ERROR,
834		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
835		xpt_done(ccb);
836	}
837}
838
839static void
840vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
841{
842	struct ccb_trans_settings *cts;
843	struct ccb_trans_settings_scsi *scsi;
844
845	cts = &ccb->cts;
846	scsi = &cts->proto_specific.scsi;
847
848	cts->protocol = PROTO_SCSI;
849	cts->protocol_version = SCSI_REV_SPC3;
850	cts->transport = XPORT_SAS;
851	cts->transport_version = 0;
852
853	scsi->valid = CTS_SCSI_VALID_TQ;
854	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
855
856	ccb->ccb_h.status = CAM_REQ_CMP;
857	xpt_done(ccb);
858}
859
860static void
861vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
862{
863	int error;
864
865	error = vtscsi_reset_bus(sc);
866	if (error == 0)
867		ccb->ccb_h.status = CAM_REQ_CMP;
868	else
869		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
870
871	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
872	    error, ccb, ccb->ccb_h.status);
873
874	xpt_done(ccb);
875}
876
877static void
878vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
879{
880	struct ccb_hdr *ccbh;
881	struct vtscsi_request *req;
882	int error;
883
884	ccbh = &ccb->ccb_h;
885
886	req = vtscsi_dequeue_request(sc);
887	if (req == NULL) {
888		error = EAGAIN;
889		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
890		goto fail;
891	}
892
893	req->vsr_ccb = ccb;
894
895	error = vtscsi_execute_reset_dev_cmd(sc, req);
896	if (error == 0)
897		return;
898
899	vtscsi_enqueue_request(sc, req);
900
901fail:
902	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
903	    error, req, ccb);
904
905	if (error == EAGAIN)
906		ccbh->status = CAM_RESRC_UNAVAIL;
907	else
908		ccbh->status = CAM_REQ_CMP_ERR;
909
910	xpt_done(ccb);
911}
912
913static void
914vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
915{
916	struct vtscsi_request *req;
917	struct ccb_hdr *ccbh;
918	int error;
919
920	ccbh = &ccb->ccb_h;
921
922	req = vtscsi_dequeue_request(sc);
923	if (req == NULL) {
924		error = EAGAIN;
925		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
926		goto fail;
927	}
928
929	req->vsr_ccb = ccb;
930
931	error = vtscsi_execute_abort_task_cmd(sc, req);
932	if (error == 0)
933		return;
934
935	vtscsi_enqueue_request(sc, req);
936
937fail:
938	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
939	    error, req, ccb);
940
941	if (error == EAGAIN)
942		ccbh->status = CAM_RESRC_UNAVAIL;
943	else
944		ccbh->status = CAM_REQ_CMP_ERR;
945
946	xpt_done(ccb);
947}
948
949static void
950vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
951    union ccb *ccb)
952{
953	device_t dev;
954	struct ccb_pathinq *cpi;
955
956	dev = sc->vtscsi_dev;
957	cpi = &ccb->cpi;
958
959	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
960
961	cpi->version_num = 1;
962	cpi->hba_inquiry = PI_TAG_ABLE;
963	cpi->target_sprt = 0;
964	cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
965	if (vtscsi_bus_reset_disable != 0)
966		cpi->hba_misc |= PIM_NOBUSRESET;
967	cpi->hba_eng_cnt = 0;
968
969	cpi->max_target = sc->vtscsi_max_target;
970	cpi->max_lun = sc->vtscsi_max_lun;
971	cpi->initiator_id = cpi->max_target + 1;
972
973	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
974	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
975	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
976
977	cpi->unit_number = cam_sim_unit(sim);
978	cpi->bus_id = cam_sim_bus(sim);
979
980	cpi->base_transfer_speed = 300000;
981
982	cpi->protocol = PROTO_SCSI;
983	cpi->protocol_version = SCSI_REV_SPC3;
984	cpi->transport = XPORT_SAS;
985	cpi->transport_version = 0;
986
987	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
988	    PAGE_SIZE;
989
990	cpi->hba_vendor = virtio_get_vendor(dev);
991	cpi->hba_device = virtio_get_device(dev);
992	cpi->hba_subvendor = virtio_get_subvendor(dev);
993	cpi->hba_subdevice = virtio_get_subdevice(dev);
994
995	ccb->ccb_h.status = CAM_REQ_CMP;
996	xpt_done(ccb);
997}
998
999static int
1000vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
1001    struct ccb_scsiio *csio)
1002{
1003	struct ccb_hdr *ccbh;
1004	struct bus_dma_segment *dseg;
1005	int i, error;
1006
1007	ccbh = &csio->ccb_h;
1008	error = 0;
1009
1010	switch ((ccbh->flags & CAM_DATA_MASK)) {
1011	case CAM_DATA_VADDR:
1012		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
1013		break;
1014	case CAM_DATA_PADDR:
1015		error = sglist_append_phys(sg,
1016		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
1017		break;
1018	case CAM_DATA_SG:
1019		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1020			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1021			error = sglist_append(sg,
1022			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
1023		}
1024		break;
1025	case CAM_DATA_SG_PADDR:
1026		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1027			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1028			error = sglist_append_phys(sg,
1029			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1030		}
1031		break;
1032	case CAM_DATA_BIO:
1033		error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
1034		break;
1035	default:
1036		error = EINVAL;
1037		break;
1038	}
1039
1040	return (error);
1041}
1042
1043static int
1044vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1045    int *readable, int *writable)
1046{
1047	struct sglist *sg;
1048	struct ccb_hdr *ccbh;
1049	struct ccb_scsiio *csio;
1050	struct virtio_scsi_cmd_req *cmd_req;
1051	struct virtio_scsi_cmd_resp *cmd_resp;
1052	int error;
1053
1054	sg = sc->vtscsi_sglist;
1055	csio = &req->vsr_ccb->csio;
1056	ccbh = &csio->ccb_h;
1057	cmd_req = &req->vsr_cmd_req;
1058	cmd_resp = &req->vsr_cmd_resp;
1059
1060	sglist_reset(sg);
1061
1062	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1063	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1064		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1065		/* At least one segment must be left for the response. */
1066		if (error || sg->sg_nseg == sg->sg_maxseg)
1067			goto fail;
1068	}
1069
1070	*readable = sg->sg_nseg;
1071
1072	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1073	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1074		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1075		if (error)
1076			goto fail;
1077	}
1078
1079	*writable = sg->sg_nseg - *readable;
1080
1081	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1082	    "writable=%d\n", req, ccbh, *readable, *writable);
1083
1084	return (0);
1085
1086fail:
1087	/*
1088	 * This should never happen unless maxio was incorrectly set.
1089	 */
1090	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1091
1092	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1093	    "nseg=%d maxseg=%d\n",
1094	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1095
1096	return (EFBIG);
1097}
1098
1099static int
1100vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1101{
1102	struct sglist *sg;
1103	struct virtqueue *vq;
1104	struct ccb_scsiio *csio;
1105	struct ccb_hdr *ccbh;
1106	struct virtio_scsi_cmd_req *cmd_req;
1107	struct virtio_scsi_cmd_resp *cmd_resp;
1108	int readable, writable, error;
1109
1110	sg = sc->vtscsi_sglist;
1111	vq = sc->vtscsi_request_vq;
1112	csio = &req->vsr_ccb->csio;
1113	ccbh = &csio->ccb_h;
1114	cmd_req = &req->vsr_cmd_req;
1115	cmd_resp = &req->vsr_cmd_resp;
1116
1117	vtscsi_init_scsi_cmd_req(sc, csio, cmd_req);
1118
1119	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1120	if (error)
1121		return (error);
1122
1123	req->vsr_complete = vtscsi_complete_scsi_cmd;
1124	cmd_resp->response = -1;
1125
1126	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1127	if (error) {
1128		vtscsi_dprintf(sc, VTSCSI_ERROR,
1129		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1130
1131		ccbh->status = CAM_REQUEUE_REQ;
1132		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1133		return (error);
1134	}
1135
1136	ccbh->status |= CAM_SIM_QUEUED;
1137	ccbh->ccbh_vtscsi_req = req;
1138
1139	virtqueue_notify(vq);
1140
1141	if (ccbh->timeout != CAM_TIME_INFINITY) {
1142		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1143		callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1144		    0, vtscsi_timedout_scsi_cmd, req, 0);
1145	}
1146
1147	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1148	    req, ccbh);
1149
1150	return (0);
1151}
1152
1153static int
1154vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1155{
1156	struct vtscsi_request *req;
1157	int error;
1158
1159	req = vtscsi_dequeue_request(sc);
1160	if (req == NULL) {
1161		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1162		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1163		return (ENOBUFS);
1164	}
1165
1166	req->vsr_ccb = ccb;
1167
1168	error = vtscsi_execute_scsi_cmd(sc, req);
1169	if (error)
1170		vtscsi_enqueue_request(sc, req);
1171
1172	return (error);
1173}
1174
1175static void
1176vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1177    struct vtscsi_request *req)
1178{
1179	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1180	struct vtscsi_request *to_req;
1181	uint8_t response;
1182
1183	tmf_resp = &req->vsr_tmf_resp;
1184	response = tmf_resp->response;
1185	to_req = req->vsr_timedout_req;
1186
1187	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1188	    req, to_req, response);
1189
1190	vtscsi_enqueue_request(sc, req);
1191
1192	/*
1193	 * The timedout request could have completed between when the
1194	 * abort task was sent and when the host processed it.
1195	 */
1196	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1197		return;
1198
1199	/* The timedout request was successfully aborted. */
1200	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1201		return;
1202
1203	/* Don't bother if the device is going away. */
1204	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1205		return;
1206
1207	/* The timedout request will be aborted by the reset. */
1208	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1209		return;
1210
1211	vtscsi_reset_bus(sc);
1212}
1213
1214static int
1215vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1216    struct vtscsi_request *to_req)
1217{
1218	struct sglist *sg;
1219	struct ccb_hdr *to_ccbh;
1220	struct vtscsi_request *req;
1221	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1222	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1223	int error;
1224
1225	sg = sc->vtscsi_sglist;
1226	to_ccbh = &to_req->vsr_ccb->ccb_h;
1227
1228	req = vtscsi_dequeue_request(sc);
1229	if (req == NULL) {
1230		error = ENOBUFS;
1231		goto fail;
1232	}
1233
1234	tmf_req = &req->vsr_tmf_req;
1235	tmf_resp = &req->vsr_tmf_resp;
1236
1237	vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1238	    (uintptr_t) to_ccbh, tmf_req);
1239
1240	sglist_reset(sg);
1241	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1242	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1243
1244	req->vsr_timedout_req = to_req;
1245	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1246	tmf_resp->response = -1;
1247
1248	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1249	    VTSCSI_EXECUTE_ASYNC);
1250	if (error == 0)
1251		return (0);
1252
1253	vtscsi_enqueue_request(sc, req);
1254
1255fail:
1256	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1257	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1258
1259	return (error);
1260}
1261
1262static void
1263vtscsi_timedout_scsi_cmd(void *xreq)
1264{
1265	struct vtscsi_softc *sc;
1266	struct vtscsi_request *to_req;
1267
1268	to_req = xreq;
1269	sc = to_req->vsr_softc;
1270
1271	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1272	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1273
1274	/* Don't bother if the device is going away. */
1275	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1276		return;
1277
1278	/*
1279	 * Bail if the request is not in use. We likely raced when
1280	 * stopping the callout handler or it has already been aborted.
1281	 */
1282	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1283	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1284		return;
1285
1286	/*
1287	 * Complete the request queue in case the timedout request is
1288	 * actually just pending.
1289	 */
1290	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1291	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1292		return;
1293
1294	sc->vtscsi_stats.scsi_cmd_timeouts++;
1295	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1296
1297	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1298		return;
1299
1300	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1301	vtscsi_reset_bus(sc);
1302}
1303
1304static cam_status
1305vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1306{
1307	cam_status status;
1308
1309	switch (cmd_resp->response) {
1310	case VIRTIO_SCSI_S_OK:
1311		status = CAM_REQ_CMP;
1312		break;
1313	case VIRTIO_SCSI_S_OVERRUN:
1314		status = CAM_DATA_RUN_ERR;
1315		break;
1316	case VIRTIO_SCSI_S_ABORTED:
1317		status = CAM_REQ_ABORTED;
1318		break;
1319	case VIRTIO_SCSI_S_BAD_TARGET:
1320		status = CAM_SEL_TIMEOUT;
1321		break;
1322	case VIRTIO_SCSI_S_RESET:
1323		status = CAM_SCSI_BUS_RESET;
1324		break;
1325	case VIRTIO_SCSI_S_BUSY:
1326		status = CAM_SCSI_BUSY;
1327		break;
1328	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1329	case VIRTIO_SCSI_S_TARGET_FAILURE:
1330	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1331		status = CAM_SCSI_IT_NEXUS_LOST;
1332		break;
1333	default: /* VIRTIO_SCSI_S_FAILURE */
1334		status = CAM_REQ_CMP_ERR;
1335		break;
1336	}
1337
1338	return (status);
1339}
1340
1341static cam_status
1342vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1343    struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1344{
1345	uint32_t resp_sense_length;
1346	cam_status status;
1347
1348	csio->scsi_status = cmd_resp->status;
1349	csio->resid = vtscsi_htog32(sc, cmd_resp->resid);
1350
1351	if (csio->scsi_status == SCSI_STATUS_OK)
1352		status = CAM_REQ_CMP;
1353	else
1354		status = CAM_SCSI_STATUS_ERROR;
1355
1356	resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len);
1357
1358	if (resp_sense_length > 0) {
1359		status |= CAM_AUTOSNS_VALID;
1360
1361		if (resp_sense_length < csio->sense_len)
1362			csio->sense_resid = csio->sense_len - resp_sense_length;
1363		else
1364			csio->sense_resid = 0;
1365
1366		memcpy(&csio->sense_data, cmd_resp->sense,
1367		    csio->sense_len - csio->sense_resid);
1368	}
1369
1370	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1371	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1372	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1373
1374	return (status);
1375}
1376
1377static void
1378vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1379{
1380	struct ccb_hdr *ccbh;
1381	struct ccb_scsiio *csio;
1382	struct virtio_scsi_cmd_resp *cmd_resp;
1383	cam_status status;
1384
1385	csio = &req->vsr_ccb->csio;
1386	ccbh = &csio->ccb_h;
1387	cmd_resp = &req->vsr_cmd_resp;
1388
1389	KASSERT(ccbh->ccbh_vtscsi_req == req,
1390	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1391
1392	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1393		callout_stop(&req->vsr_callout);
1394
1395	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1396	if (status == CAM_REQ_ABORTED) {
1397		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1398			status = CAM_CMD_TIMEOUT;
1399	} else if (status == CAM_REQ_CMP)
1400		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1401
1402	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1403		status |= CAM_DEV_QFRZN;
1404		xpt_freeze_devq(ccbh->path, 1);
1405	}
1406
1407	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1408		status |= CAM_RELEASE_SIMQ;
1409
1410	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1411	    req, ccbh, status);
1412
1413	ccbh->status = status;
1414	xpt_done(req->vsr_ccb);
1415	vtscsi_enqueue_request(sc, req);
1416}
1417
1418static void
1419vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1420{
1421
1422	/* XXX We probably shouldn't poll forever. */
1423	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1424	do
1425		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1426	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1427
1428	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1429}
1430
1431static int
1432vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1433    struct sglist *sg, int readable, int writable, int flag)
1434{
1435	struct virtqueue *vq;
1436	int error;
1437
1438	vq = sc->vtscsi_control_vq;
1439
1440	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1441
1442	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1443	if (error) {
1444		/*
1445		 * Return EAGAIN when the virtqueue does not have enough
1446		 * descriptors available.
1447		 */
1448		if (error == ENOSPC || error == EMSGSIZE)
1449			error = EAGAIN;
1450
1451		return (error);
1452	}
1453
1454	virtqueue_notify(vq);
1455	if (flag == VTSCSI_EXECUTE_POLL)
1456		vtscsi_poll_ctrl_req(sc, req);
1457
1458	return (0);
1459}
1460
1461static void
1462vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1463    struct vtscsi_request *req)
1464{
1465	union ccb *ccb;
1466	struct ccb_hdr *ccbh;
1467	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1468
1469	ccb = req->vsr_ccb;
1470	ccbh = &ccb->ccb_h;
1471	tmf_resp = &req->vsr_tmf_resp;
1472
1473	switch (tmf_resp->response) {
1474	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1475		ccbh->status = CAM_REQ_CMP;
1476		break;
1477	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1478		ccbh->status = CAM_UA_ABORT;
1479		break;
1480	default:
1481		ccbh->status = CAM_REQ_CMP_ERR;
1482		break;
1483	}
1484
1485	xpt_done(ccb);
1486	vtscsi_enqueue_request(sc, req);
1487}
1488
1489static int
1490vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1491    struct vtscsi_request *req)
1492{
1493	struct sglist *sg;
1494	struct ccb_abort *cab;
1495	struct ccb_hdr *ccbh;
1496	struct ccb_hdr *abort_ccbh;
1497	struct vtscsi_request *abort_req;
1498	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1499	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1500	int error;
1501
1502	sg = sc->vtscsi_sglist;
1503	cab = &req->vsr_ccb->cab;
1504	ccbh = &cab->ccb_h;
1505	tmf_req = &req->vsr_tmf_req;
1506	tmf_resp = &req->vsr_tmf_resp;
1507
1508	/* CCB header and request that's to be aborted. */
1509	abort_ccbh = &cab->abort_ccb->ccb_h;
1510	abort_req = abort_ccbh->ccbh_vtscsi_req;
1511
1512	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1513		error = EINVAL;
1514		goto fail;
1515	}
1516
1517	/* Only attempt to abort requests that could be in-flight. */
1518	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1519		error = EALREADY;
1520		goto fail;
1521	}
1522
1523	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1524	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1525		callout_stop(&abort_req->vsr_callout);
1526
1527	vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1528	    (uintptr_t) abort_ccbh, tmf_req);
1529
1530	sglist_reset(sg);
1531	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1532	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1533
1534	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1535	tmf_resp->response = -1;
1536
1537	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1538	    VTSCSI_EXECUTE_ASYNC);
1539
1540fail:
1541	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1542	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1543
1544	return (error);
1545}
1546
1547static void
1548vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1549    struct vtscsi_request *req)
1550{
1551	union ccb *ccb;
1552	struct ccb_hdr *ccbh;
1553	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1554
1555	ccb = req->vsr_ccb;
1556	ccbh = &ccb->ccb_h;
1557	tmf_resp = &req->vsr_tmf_resp;
1558
1559	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1560	    req, ccb, tmf_resp->response);
1561
1562	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1563		ccbh->status = CAM_REQ_CMP;
1564		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1565		    ccbh->target_lun);
1566	} else
1567		ccbh->status = CAM_REQ_CMP_ERR;
1568
1569	xpt_done(ccb);
1570	vtscsi_enqueue_request(sc, req);
1571}
1572
1573static int
1574vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1575    struct vtscsi_request *req)
1576{
1577	struct sglist *sg;
1578	struct ccb_resetdev *crd;
1579	struct ccb_hdr *ccbh;
1580	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1581	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1582	uint32_t subtype;
1583	int error;
1584
1585	sg = sc->vtscsi_sglist;
1586	crd = &req->vsr_ccb->crd;
1587	ccbh = &crd->ccb_h;
1588	tmf_req = &req->vsr_tmf_req;
1589	tmf_resp = &req->vsr_tmf_resp;
1590
1591	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1592		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1593	else
1594		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1595
1596	vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req);
1597
1598	sglist_reset(sg);
1599	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1600	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1601
1602	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1603	tmf_resp->response = -1;
1604
1605	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1606	    VTSCSI_EXECUTE_ASYNC);
1607
1608	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1609	    error, req, ccbh);
1610
1611	return (error);
1612}
1613
1614static void
1615vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1616{
1617
1618	*target_id = lun[1];
1619	*lun_id = (lun[2] << 8) | lun[3];
1620}
1621
1622static void
1623vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1624{
1625
1626	lun[0] = 1;
1627	lun[1] = ccbh->target_id;
1628	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1629	lun[3] = ccbh->target_lun & 0xFF;
1630}
1631
1632static void
1633vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio,
1634    struct virtio_scsi_cmd_req *cmd_req)
1635{
1636	uint8_t attr;
1637
1638	switch (csio->tag_action) {
1639	case MSG_HEAD_OF_Q_TAG:
1640		attr = VIRTIO_SCSI_S_HEAD;
1641		break;
1642	case MSG_ORDERED_Q_TAG:
1643		attr = VIRTIO_SCSI_S_ORDERED;
1644		break;
1645	case MSG_ACA_TASK:
1646		attr = VIRTIO_SCSI_S_ACA;
1647		break;
1648	default: /* MSG_SIMPLE_Q_TAG */
1649		attr = VIRTIO_SCSI_S_SIMPLE;
1650		break;
1651	}
1652
1653	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1654	cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio);
1655	cmd_req->task_attr = attr;
1656
1657	memcpy(cmd_req->cdb,
1658	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1659	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1660	    csio->cdb_len);
1661}
1662
1663static void
1664vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh,
1665    uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1666{
1667
1668	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1669
1670	tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF);
1671	tmf_req->subtype = vtscsi_gtoh32(sc, subtype);
1672	tmf_req->tag = vtscsi_gtoh64(sc, tag);
1673}
1674
1675static void
1676vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1677{
1678	int frozen;
1679
1680	frozen = sc->vtscsi_frozen;
1681
1682	if (reason & VTSCSI_REQUEST &&
1683	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1684		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1685
1686	if (reason & VTSCSI_REQUEST_VQ &&
1687	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1688		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1689
1690	/* Freeze the SIMQ if transitioned to frozen. */
1691	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1692		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1693		xpt_freeze_simq(sc->vtscsi_sim, 1);
1694	}
1695}
1696
1697static int
1698vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1699{
1700	int thawed;
1701
1702	if (sc->vtscsi_frozen == 0 || reason == 0)
1703		return (0);
1704
1705	if (reason & VTSCSI_REQUEST &&
1706	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1707		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1708
1709	if (reason & VTSCSI_REQUEST_VQ &&
1710	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1711		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1712
1713	thawed = sc->vtscsi_frozen == 0;
1714	if (thawed != 0)
1715		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1716
1717	return (thawed);
1718}
1719
1720static void
1721vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1722    target_id_t target_id, lun_id_t lun_id)
1723{
1724	struct cam_path *path;
1725
1726	/* Use the wildcard path from our softc for bus announcements. */
1727	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1728		xpt_async(ac_code, sc->vtscsi_path, NULL);
1729		return;
1730	}
1731
1732	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1733	    target_id, lun_id) != CAM_REQ_CMP) {
1734		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1735		return;
1736	}
1737
1738	xpt_async(ac_code, path, NULL);
1739	xpt_free_path(path);
1740}
1741
1742static void
1743vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1744    lun_id_t lun_id)
1745{
1746	union ccb *ccb;
1747	cam_status status;
1748
1749	ccb = xpt_alloc_ccb_nowait();
1750	if (ccb == NULL) {
1751		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1752		return;
1753	}
1754
1755	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1756	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1757	if (status != CAM_REQ_CMP) {
1758		xpt_free_ccb(ccb);
1759		return;
1760	}
1761
1762	xpt_rescan(ccb);
1763}
1764
1765static void
1766vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1767{
1768
1769	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1770}
1771
1772static void
1773vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1774    struct virtio_scsi_event *event)
1775{
1776	target_id_t target_id;
1777	lun_id_t lun_id;
1778
1779	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1780
1781	switch (event->reason) {
1782	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1783	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1784		vtscsi_execute_rescan(sc, target_id, lun_id);
1785		break;
1786	default:
1787		device_printf(sc->vtscsi_dev,
1788		    "unhandled transport event reason: %d\n", event->reason);
1789		break;
1790	}
1791}
1792
1793static void
1794vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1795{
1796	int error;
1797
1798	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1799		switch (event->event) {
1800		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1801			vtscsi_transport_reset_event(sc, event);
1802			break;
1803		default:
1804			device_printf(sc->vtscsi_dev,
1805			    "unhandled event: %d\n", event->event);
1806			break;
1807		}
1808	} else
1809		vtscsi_execute_rescan_bus(sc);
1810
1811	/*
1812	 * This should always be successful since the buffer
1813	 * was just dequeued.
1814	 */
1815	error = vtscsi_enqueue_event_buf(sc, event);
1816	KASSERT(error == 0,
1817	    ("cannot requeue event buffer: %d", error));
1818}
1819
1820static int
1821vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1822    struct virtio_scsi_event *event)
1823{
1824	struct sglist *sg;
1825	struct virtqueue *vq;
1826	int size, error;
1827
1828	sg = sc->vtscsi_sglist;
1829	vq = sc->vtscsi_event_vq;
1830	size = sc->vtscsi_event_buf_size;
1831
1832	bzero(event, size);
1833
1834	sglist_reset(sg);
1835	error = sglist_append(sg, event, size);
1836	if (error)
1837		return (error);
1838
1839	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1840	if (error)
1841		return (error);
1842
1843	virtqueue_notify(vq);
1844
1845	return (0);
1846}
1847
1848static int
1849vtscsi_init_event_vq(struct vtscsi_softc *sc)
1850{
1851	struct virtio_scsi_event *event;
1852	int i, size, error;
1853
1854	/*
1855	 * The first release of QEMU with VirtIO SCSI support would crash
1856	 * when attempting to notify the event virtqueue. This was fixed
1857	 * when hotplug support was added.
1858	 */
1859	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1860		size = sc->vtscsi_event_buf_size;
1861	else
1862		size = 0;
1863
1864	if (size < sizeof(struct virtio_scsi_event))
1865		return (0);
1866
1867	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1868		event = &sc->vtscsi_event_bufs[i];
1869
1870		error = vtscsi_enqueue_event_buf(sc, event);
1871		if (error)
1872			break;
1873	}
1874
1875	/*
1876	 * Even just one buffer is enough. Missed events are
1877	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1878	 */
1879	if (i > 0)
1880		error = 0;
1881
1882	return (error);
1883}
1884
1885static void
1886vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1887{
1888	struct virtio_scsi_event *event;
1889	int i, error;
1890
1891	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1892	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1893		return;
1894
1895	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1896		event = &sc->vtscsi_event_bufs[i];
1897
1898		error = vtscsi_enqueue_event_buf(sc, event);
1899		if (error)
1900			break;
1901	}
1902
1903	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1904}
1905
1906static void
1907vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1908{
1909	struct virtqueue *vq;
1910	int last;
1911
1912	vq = sc->vtscsi_event_vq;
1913	last = 0;
1914
1915	while (virtqueue_drain(vq, &last) != NULL)
1916		;
1917
1918	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1919}
1920
1921static void
1922vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1923{
1924
1925	VTSCSI_LOCK_OWNED(sc);
1926
1927	if (sc->vtscsi_request_vq != NULL)
1928		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1929	if (sc->vtscsi_control_vq != NULL)
1930		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1931}
1932
1933static void
1934vtscsi_complete_vqs(struct vtscsi_softc *sc)
1935{
1936
1937	VTSCSI_LOCK(sc);
1938	vtscsi_complete_vqs_locked(sc);
1939	VTSCSI_UNLOCK(sc);
1940}
1941
1942static void
1943vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1944{
1945	union ccb *ccb;
1946	int detach;
1947
1948	ccb = req->vsr_ccb;
1949
1950	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1951
1952	/*
1953	 * The callout must be drained when detaching since the request is
1954	 * about to be freed. The VTSCSI_MTX must not be held for this in
1955	 * case the callout is pending because there is a deadlock potential.
1956	 * Otherwise, the virtqueue is being drained because of a bus reset
1957	 * so we only need to attempt to stop the callouts.
1958	 */
1959	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1960	if (detach != 0)
1961		VTSCSI_LOCK_NOTOWNED(sc);
1962	else
1963		VTSCSI_LOCK_OWNED(sc);
1964
1965	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1966		if (detach != 0)
1967			callout_drain(&req->vsr_callout);
1968		else
1969			callout_stop(&req->vsr_callout);
1970	}
1971
1972	if (ccb != NULL) {
1973		if (detach != 0) {
1974			VTSCSI_LOCK(sc);
1975			ccb->ccb_h.status = CAM_NO_HBA;
1976		} else
1977			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1978		xpt_done(ccb);
1979		if (detach != 0)
1980			VTSCSI_UNLOCK(sc);
1981	}
1982
1983	vtscsi_enqueue_request(sc, req);
1984}
1985
1986static void
1987vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1988{
1989	struct vtscsi_request *req;
1990	int last;
1991
1992	last = 0;
1993
1994	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1995
1996	while ((req = virtqueue_drain(vq, &last)) != NULL)
1997		vtscsi_cancel_request(sc, req);
1998
1999	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
2000}
2001
2002static void
2003vtscsi_drain_vqs(struct vtscsi_softc *sc)
2004{
2005
2006	if (sc->vtscsi_control_vq != NULL)
2007		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
2008	if (sc->vtscsi_request_vq != NULL)
2009		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
2010	if (sc->vtscsi_event_vq != NULL)
2011		vtscsi_drain_event_vq(sc);
2012}
2013
2014static void
2015vtscsi_stop(struct vtscsi_softc *sc)
2016{
2017
2018	vtscsi_disable_vqs_intr(sc);
2019	virtio_stop(sc->vtscsi_dev);
2020}
2021
2022static int
2023vtscsi_reset_bus(struct vtscsi_softc *sc)
2024{
2025	int error;
2026
2027	VTSCSI_LOCK_OWNED(sc);
2028
2029	if (vtscsi_bus_reset_disable != 0) {
2030		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2031		return (0);
2032	}
2033
2034	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2035
2036	/*
2037	 * vtscsi_stop() will cause the in-flight requests to be canceled.
2038	 * Those requests are then completed here so CAM will retry them
2039	 * after the reset is complete.
2040	 */
2041	vtscsi_stop(sc);
2042	vtscsi_complete_vqs_locked(sc);
2043
2044	/* Rid the virtqueues of any remaining requests. */
2045	vtscsi_drain_vqs(sc);
2046
2047	/*
2048	 * Any resource shortage that froze the SIMQ cannot persist across
2049	 * a bus reset so ensure it gets thawed here.
2050	 */
2051	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2052		xpt_release_simq(sc->vtscsi_sim, 0);
2053
2054	error = vtscsi_reinit(sc);
2055	if (error) {
2056		device_printf(sc->vtscsi_dev,
2057		    "reinitialization failed, stopping device...\n");
2058		vtscsi_stop(sc);
2059	} else
2060		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2061		    CAM_LUN_WILDCARD);
2062
2063	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2064
2065	return (error);
2066}
2067
2068static void
2069vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2070{
2071
2072#ifdef INVARIANTS
2073	int req_nsegs, resp_nsegs;
2074
2075	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2076	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2077
2078	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2079	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2080#endif
2081
2082	req->vsr_softc = sc;
2083	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2084}
2085
2086static int
2087vtscsi_alloc_requests(struct vtscsi_softc *sc)
2088{
2089	struct vtscsi_request *req;
2090	int i, nreqs;
2091
2092	/*
2093	 * Commands destined for either the request or control queues come
2094	 * from the same SIM queue. Use the size of the request virtqueue
2095	 * as it (should) be much more frequently used. Some additional
2096	 * requests are allocated for internal (TMF) use.
2097	 */
2098	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2099	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2100		nreqs /= VTSCSI_MIN_SEGMENTS;
2101	nreqs += VTSCSI_RESERVED_REQUESTS;
2102
2103	for (i = 0; i < nreqs; i++) {
2104		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2105		    M_NOWAIT);
2106		if (req == NULL)
2107			return (ENOMEM);
2108
2109		vtscsi_init_request(sc, req);
2110
2111		sc->vtscsi_nrequests++;
2112		vtscsi_enqueue_request(sc, req);
2113	}
2114
2115	return (0);
2116}
2117
2118static void
2119vtscsi_free_requests(struct vtscsi_softc *sc)
2120{
2121	struct vtscsi_request *req;
2122
2123	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2124		KASSERT(callout_active(&req->vsr_callout) == 0,
2125		    ("request callout still active"));
2126
2127		sc->vtscsi_nrequests--;
2128		free(req, M_DEVBUF);
2129	}
2130
2131	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2132	    sc->vtscsi_nrequests));
2133}
2134
2135static void
2136vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2137{
2138
2139	KASSERT(req->vsr_softc == sc,
2140	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2141
2142	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2143
2144	/* A request is available so the SIMQ could be released. */
2145	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2146		xpt_release_simq(sc->vtscsi_sim, 1);
2147
2148	req->vsr_ccb = NULL;
2149	req->vsr_complete = NULL;
2150	req->vsr_ptr0 = NULL;
2151	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2152	req->vsr_flags = 0;
2153
2154	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2155	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2156
2157	/*
2158	 * We insert at the tail of the queue in order to make it
2159	 * very unlikely a request will be reused if we race with
2160	 * stopping its callout handler.
2161	 */
2162	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2163}
2164
2165static struct vtscsi_request *
2166vtscsi_dequeue_request(struct vtscsi_softc *sc)
2167{
2168	struct vtscsi_request *req;
2169
2170	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2171	if (req != NULL) {
2172		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2173		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2174	} else
2175		sc->vtscsi_stats.dequeue_no_requests++;
2176
2177	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2178
2179	return (req);
2180}
2181
2182static void
2183vtscsi_complete_request(struct vtscsi_request *req)
2184{
2185
2186	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2187		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2188
2189	if (req->vsr_complete != NULL)
2190		req->vsr_complete(req->vsr_softc, req);
2191}
2192
2193static void
2194vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2195{
2196	struct vtscsi_request *req;
2197
2198	VTSCSI_LOCK_OWNED(sc);
2199
2200	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2201		vtscsi_complete_request(req);
2202}
2203
2204static void
2205vtscsi_control_vq_intr(void *xsc)
2206{
2207	struct vtscsi_softc *sc;
2208	struct virtqueue *vq;
2209
2210	sc = xsc;
2211	vq = sc->vtscsi_control_vq;
2212
2213again:
2214	VTSCSI_LOCK(sc);
2215
2216	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2217
2218	if (virtqueue_enable_intr(vq) != 0) {
2219		virtqueue_disable_intr(vq);
2220		VTSCSI_UNLOCK(sc);
2221		goto again;
2222	}
2223
2224	VTSCSI_UNLOCK(sc);
2225}
2226
2227static void
2228vtscsi_event_vq_intr(void *xsc)
2229{
2230	struct vtscsi_softc *sc;
2231	struct virtqueue *vq;
2232	struct virtio_scsi_event *event;
2233
2234	sc = xsc;
2235	vq = sc->vtscsi_event_vq;
2236
2237again:
2238	VTSCSI_LOCK(sc);
2239
2240	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2241		vtscsi_handle_event(sc, event);
2242
2243	if (virtqueue_enable_intr(vq) != 0) {
2244		virtqueue_disable_intr(vq);
2245		VTSCSI_UNLOCK(sc);
2246		goto again;
2247	}
2248
2249	VTSCSI_UNLOCK(sc);
2250}
2251
2252static void
2253vtscsi_request_vq_intr(void *xsc)
2254{
2255	struct vtscsi_softc *sc;
2256	struct virtqueue *vq;
2257
2258	sc = xsc;
2259	vq = sc->vtscsi_request_vq;
2260
2261again:
2262	VTSCSI_LOCK(sc);
2263
2264	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2265
2266	if (virtqueue_enable_intr(vq) != 0) {
2267		virtqueue_disable_intr(vq);
2268		VTSCSI_UNLOCK(sc);
2269		goto again;
2270	}
2271
2272	VTSCSI_UNLOCK(sc);
2273}
2274
2275static void
2276vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2277{
2278
2279	virtqueue_disable_intr(sc->vtscsi_control_vq);
2280	virtqueue_disable_intr(sc->vtscsi_event_vq);
2281	virtqueue_disable_intr(sc->vtscsi_request_vq);
2282}
2283
2284static void
2285vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2286{
2287
2288	virtqueue_enable_intr(sc->vtscsi_control_vq);
2289	virtqueue_enable_intr(sc->vtscsi_event_vq);
2290	virtqueue_enable_intr(sc->vtscsi_request_vq);
2291}
2292
2293static void
2294vtscsi_get_tunables(struct vtscsi_softc *sc)
2295{
2296	char tmpstr[64];
2297
2298	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2299
2300	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2301	    device_get_unit(sc->vtscsi_dev));
2302	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2303}
2304
2305static void
2306vtscsi_setup_sysctl(struct vtscsi_softc *sc)
2307{
2308	device_t dev;
2309	struct vtscsi_statistics *stats;
2310        struct sysctl_ctx_list *ctx;
2311	struct sysctl_oid *tree;
2312	struct sysctl_oid_list *child;
2313
2314	dev = sc->vtscsi_dev;
2315	stats = &sc->vtscsi_stats;
2316	ctx = device_get_sysctl_ctx(dev);
2317	tree = device_get_sysctl_tree(dev);
2318	child = SYSCTL_CHILDREN(tree);
2319
2320	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2321	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2322	    "Debug level");
2323
2324	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2325	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2326	    "SCSI command timeouts");
2327	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2328	    CTLFLAG_RD, &stats->dequeue_no_requests,
2329	    "No available requests to dequeue");
2330}
2331
2332static void
2333vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2334    const char *fmt, ...)
2335{
2336	struct vtscsi_softc *sc;
2337	union ccb *ccb;
2338	struct sbuf sb;
2339	va_list ap;
2340	char str[192];
2341	char path_str[64];
2342
2343	if (req == NULL)
2344		return;
2345
2346	sc = req->vsr_softc;
2347	ccb = req->vsr_ccb;
2348
2349	va_start(ap, fmt);
2350	sbuf_new(&sb, str, sizeof(str), 0);
2351
2352	if (ccb == NULL) {
2353		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2354		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2355		    cam_sim_bus(sc->vtscsi_sim));
2356	} else {
2357		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2358		sbuf_cat(&sb, path_str);
2359		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2360			scsi_command_string(&ccb->csio, &sb);
2361			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2362		}
2363	}
2364
2365	sbuf_vprintf(&sb, fmt, ap);
2366	va_end(ap);
2367
2368	sbuf_finish(&sb);
2369	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2370	    sbuf_data(&sb));
2371}
2372