virtio_scsi.c revision 349693
1/*-
2 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/* Driver for VirtIO SCSI devices. */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/dev/virtio/scsi/virtio_scsi.c 349693 2019-07-03 19:54:56Z vangyzen $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/sglist.h>
39#include <sys/sysctl.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/callout.h>
43#include <sys/queue.h>
44#include <sys/sbuf.h>
45
46#include <machine/stdarg.h>
47
48#include <machine/bus.h>
49#include <machine/resource.h>
50#include <sys/bus.h>
51#include <sys/rman.h>
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_sim.h>
56#include <cam/cam_periph.h>
57#include <cam/cam_xpt_sim.h>
58#include <cam/cam_debug.h>
59#include <cam/scsi/scsi_all.h>
60#include <cam/scsi/scsi_message.h>
61
62#include <dev/virtio/virtio.h>
63#include <dev/virtio/virtqueue.h>
64#include <dev/virtio/scsi/virtio_scsi.h>
65#include <dev/virtio/scsi/virtio_scsivar.h>
66
67#include "virtio_if.h"
68
69static int	vtscsi_modevent(module_t, int, void *);
70
71static int	vtscsi_probe(device_t);
72static int	vtscsi_attach(device_t);
73static int	vtscsi_detach(device_t);
74static int	vtscsi_suspend(device_t);
75static int	vtscsi_resume(device_t);
76
77static void	vtscsi_negotiate_features(struct vtscsi_softc *);
78static void	vtscsi_read_config(struct vtscsi_softc *,
79		    struct virtio_scsi_config *);
80static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
81static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
82static void	vtscsi_check_sizes(struct vtscsi_softc *);
83static void	vtscsi_write_device_config(struct vtscsi_softc *);
84static int	vtscsi_reinit(struct vtscsi_softc *);
85
86static int	vtscsi_alloc_cam(struct vtscsi_softc *);
87static int	vtscsi_register_cam(struct vtscsi_softc *);
88static void	vtscsi_free_cam(struct vtscsi_softc *);
89static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
90static int	vtscsi_register_async(struct vtscsi_softc *);
91static void	vtscsi_deregister_async(struct vtscsi_softc *);
92static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
93static void	vtscsi_cam_poll(struct cam_sim *);
94
95static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
96		    union ccb *);
97static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
98		    union ccb *);
99static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
100static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
101static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
102static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
103		    struct cam_sim *, union ccb *);
104
105static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
106		    struct sglist *, struct ccb_scsiio *);
107static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
108		    struct vtscsi_request *, int *, int *);
109static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
110		    struct vtscsi_request *);
111static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
112static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
113		    struct vtscsi_request *);
114static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
115		    struct vtscsi_request *);
116static void	vtscsi_timedout_scsi_cmd(void *);
117static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
118static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
119		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
120static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
121		    struct vtscsi_request *);
122
123static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
124		    struct vtscsi_request *);
125static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
126		    struct vtscsi_request *, struct sglist *, int, int, int);
127static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
128		    struct vtscsi_request *);
129static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
130		    struct vtscsi_request *);
131static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
132		    struct vtscsi_request *);
133
134static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
135static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
136static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
137		    struct virtio_scsi_cmd_req *);
138static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
139		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
140
141static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
142static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
143
144static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
145		    lun_id_t);
146static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
147		    lun_id_t);
148static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
149
150static void	vtscsi_handle_event(struct vtscsi_softc *,
151		    struct virtio_scsi_event *);
152static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
153		    struct virtio_scsi_event *);
154static int	vtscsi_init_event_vq(struct vtscsi_softc *);
155static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
156static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
157
158static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
159static void	vtscsi_complete_vqs(struct vtscsi_softc *);
160static void	vtscsi_drain_vqs(struct vtscsi_softc *);
161static void	vtscsi_cancel_request(struct vtscsi_softc *,
162		    struct vtscsi_request *);
163static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
164static void	vtscsi_stop(struct vtscsi_softc *);
165static int	vtscsi_reset_bus(struct vtscsi_softc *);
166
167static void	vtscsi_init_request(struct vtscsi_softc *,
168		    struct vtscsi_request *);
169static int	vtscsi_alloc_requests(struct vtscsi_softc *);
170static void	vtscsi_free_requests(struct vtscsi_softc *);
171static void	vtscsi_enqueue_request(struct vtscsi_softc *,
172		    struct vtscsi_request *);
173static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
174
175static void	vtscsi_complete_request(struct vtscsi_request *);
176static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
177
178static void	vtscsi_control_vq_intr(void *);
179static void	vtscsi_event_vq_intr(void *);
180static void	vtscsi_request_vq_intr(void *);
181static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
182static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
183
184static void	vtscsi_get_tunables(struct vtscsi_softc *);
185static void	vtscsi_add_sysctl(struct vtscsi_softc *);
186
187static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
188		    const char *, ...);
189
190/* Global tunables. */
191/*
192 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
193 * IO during virtio_stop(). So in-flight requests still complete after the
194 * device reset. We would have to wait for all the in-flight IO to complete,
195 * which defeats the typical purpose of a bus reset. We could simulate the
196 * bus reset with either I_T_NEXUS_RESET of all the targets, or with
197 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
198 * control virtqueue). But this isn't very useful if things really go off
199 * the rails, so default to disabled for now.
200 */
201static int vtscsi_bus_reset_disable = 1;
202TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
203
204static struct virtio_feature_desc vtscsi_feature_desc[] = {
205	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
206	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
207
208	{ 0, NULL }
209};
210
211static device_method_t vtscsi_methods[] = {
212	/* Device methods. */
213	DEVMETHOD(device_probe,		vtscsi_probe),
214	DEVMETHOD(device_attach,	vtscsi_attach),
215	DEVMETHOD(device_detach,	vtscsi_detach),
216	DEVMETHOD(device_suspend,	vtscsi_suspend),
217	DEVMETHOD(device_resume,	vtscsi_resume),
218
219	DEVMETHOD_END
220};
221
222static driver_t vtscsi_driver = {
223	"vtscsi",
224	vtscsi_methods,
225	sizeof(struct vtscsi_softc)
226};
227static devclass_t vtscsi_devclass;
228
229DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
230    vtscsi_modevent, 0);
231MODULE_VERSION(virtio_scsi, 1);
232MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
233MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
234
235static int
236vtscsi_modevent(module_t mod, int type, void *unused)
237{
238	int error;
239
240	switch (type) {
241	case MOD_LOAD:
242	case MOD_QUIESCE:
243	case MOD_UNLOAD:
244	case MOD_SHUTDOWN:
245		error = 0;
246		break;
247	default:
248		error = EOPNOTSUPP;
249		break;
250	}
251
252	return (error);
253}
254
255static int
256vtscsi_probe(device_t dev)
257{
258
259	if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
260		return (ENXIO);
261
262	device_set_desc(dev, "VirtIO SCSI Adapter");
263
264	return (BUS_PROBE_DEFAULT);
265}
266
267static int
268vtscsi_attach(device_t dev)
269{
270	struct vtscsi_softc *sc;
271	struct virtio_scsi_config scsicfg;
272	int error;
273
274	sc = device_get_softc(dev);
275	sc->vtscsi_dev = dev;
276
277	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
278	TAILQ_INIT(&sc->vtscsi_req_free);
279
280	vtscsi_get_tunables(sc);
281	vtscsi_add_sysctl(sc);
282
283	virtio_set_feature_desc(dev, vtscsi_feature_desc);
284	vtscsi_negotiate_features(sc);
285
286	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
287		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
288	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
289		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
290	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
291		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
292
293	vtscsi_read_config(sc, &scsicfg);
294
295	sc->vtscsi_max_channel = scsicfg.max_channel;
296	sc->vtscsi_max_target = scsicfg.max_target;
297	sc->vtscsi_max_lun = scsicfg.max_lun;
298	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
299
300	vtscsi_write_device_config(sc);
301
302	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
303	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
304	if (sc->vtscsi_sglist == NULL) {
305		error = ENOMEM;
306		device_printf(dev, "cannot allocate sglist\n");
307		goto fail;
308	}
309
310	error = vtscsi_alloc_virtqueues(sc);
311	if (error) {
312		device_printf(dev, "cannot allocate virtqueues\n");
313		goto fail;
314	}
315
316	vtscsi_check_sizes(sc);
317
318	error = vtscsi_init_event_vq(sc);
319	if (error) {
320		device_printf(dev, "cannot populate the eventvq\n");
321		goto fail;
322	}
323
324	error = vtscsi_alloc_requests(sc);
325	if (error) {
326		device_printf(dev, "cannot allocate requests\n");
327		goto fail;
328	}
329
330	error = vtscsi_alloc_cam(sc);
331	if (error) {
332		device_printf(dev, "cannot allocate CAM structures\n");
333		goto fail;
334	}
335
336	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
337	if (error) {
338		device_printf(dev, "cannot setup virtqueue interrupts\n");
339		goto fail;
340	}
341
342	vtscsi_enable_vqs_intr(sc);
343
344	/*
345	 * Register with CAM after interrupts are enabled so we will get
346	 * notified of the probe responses.
347	 */
348	error = vtscsi_register_cam(sc);
349	if (error) {
350		device_printf(dev, "cannot register with CAM\n");
351		goto fail;
352	}
353
354fail:
355	if (error)
356		vtscsi_detach(dev);
357
358	return (error);
359}
360
361static int
362vtscsi_detach(device_t dev)
363{
364	struct vtscsi_softc *sc;
365
366	sc = device_get_softc(dev);
367
368	VTSCSI_LOCK(sc);
369	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
370	if (device_is_attached(dev))
371		vtscsi_stop(sc);
372	VTSCSI_UNLOCK(sc);
373
374	vtscsi_complete_vqs(sc);
375	vtscsi_drain_vqs(sc);
376
377	vtscsi_free_cam(sc);
378	vtscsi_free_requests(sc);
379
380	if (sc->vtscsi_sglist != NULL) {
381		sglist_free(sc->vtscsi_sglist);
382		sc->vtscsi_sglist = NULL;
383	}
384
385	VTSCSI_LOCK_DESTROY(sc);
386
387	return (0);
388}
389
390static int
391vtscsi_suspend(device_t dev)
392{
393
394	return (0);
395}
396
397static int
398vtscsi_resume(device_t dev)
399{
400
401	return (0);
402}
403
404static void
405vtscsi_negotiate_features(struct vtscsi_softc *sc)
406{
407	device_t dev;
408	uint64_t features;
409
410	dev = sc->vtscsi_dev;
411	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
412	sc->vtscsi_features = features;
413}
414
415#define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
416	virtio_read_device_config(_dev,				\
417	    offsetof(struct virtio_scsi_config, _field),	\
418	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
419
420static void
421vtscsi_read_config(struct vtscsi_softc *sc,
422    struct virtio_scsi_config *scsicfg)
423{
424	device_t dev;
425
426	dev = sc->vtscsi_dev;
427
428	bzero(scsicfg, sizeof(struct virtio_scsi_config));
429
430	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
431	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
432	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
433	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
434	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
435	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
436	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
437	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
438	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
439	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
440}
441
442#undef VTSCSI_GET_CONFIG
443
444static int
445vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
446{
447	int nsegs;
448
449	nsegs = VTSCSI_MIN_SEGMENTS;
450
451	if (seg_max > 0) {
452		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
453		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
454			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
455	} else
456		nsegs += 1;
457
458	return (nsegs);
459}
460
461static int
462vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
463{
464	device_t dev;
465	struct vq_alloc_info vq_info[3];
466	int nvqs;
467
468	dev = sc->vtscsi_dev;
469	nvqs = 3;
470
471	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
472	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
473
474	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
475	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
476
477	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
478	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
479	    "%s request", device_get_nameunit(dev));
480
481	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
482}
483
484static void
485vtscsi_check_sizes(struct vtscsi_softc *sc)
486{
487	int rqsize;
488
489	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
490		/*
491		 * Ensure the assertions in virtqueue_enqueue(),
492		 * even if the hypervisor reports a bad seg_max.
493		 */
494		rqsize = virtqueue_size(sc->vtscsi_request_vq);
495		if (sc->vtscsi_max_nsegs > rqsize) {
496			device_printf(sc->vtscsi_dev,
497			    "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
498			    rqsize);
499			sc->vtscsi_max_nsegs = rqsize;
500		}
501	}
502}
503
504static void
505vtscsi_write_device_config(struct vtscsi_softc *sc)
506{
507
508	virtio_write_dev_config_4(sc->vtscsi_dev,
509	    offsetof(struct virtio_scsi_config, sense_size),
510	    VIRTIO_SCSI_SENSE_SIZE);
511
512	/*
513	 * This is the size in the virtio_scsi_cmd_req structure. Note
514	 * this value (32) is larger than the maximum CAM CDB size (16).
515	 */
516	virtio_write_dev_config_4(sc->vtscsi_dev,
517	    offsetof(struct virtio_scsi_config, cdb_size),
518	    VIRTIO_SCSI_CDB_SIZE);
519}
520
521static int
522vtscsi_reinit(struct vtscsi_softc *sc)
523{
524	device_t dev;
525	int error;
526
527	dev = sc->vtscsi_dev;
528
529	error = virtio_reinit(dev, sc->vtscsi_features);
530	if (error == 0) {
531		vtscsi_write_device_config(sc);
532		vtscsi_reinit_event_vq(sc);
533		virtio_reinit_complete(dev);
534
535		vtscsi_enable_vqs_intr(sc);
536	}
537
538	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
539
540	return (error);
541}
542
543static int
544vtscsi_alloc_cam(struct vtscsi_softc *sc)
545{
546	device_t dev;
547	struct cam_devq *devq;
548	int openings;
549
550	dev = sc->vtscsi_dev;
551	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
552
553	devq = cam_simq_alloc(openings);
554	if (devq == NULL) {
555		device_printf(dev, "cannot allocate SIM queue\n");
556		return (ENOMEM);
557	}
558
559	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
560	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
561	    openings, devq);
562	if (sc->vtscsi_sim == NULL) {
563		cam_simq_free(devq);
564		device_printf(dev, "cannot allocate SIM\n");
565		return (ENOMEM);
566	}
567
568	return (0);
569}
570
571static int
572vtscsi_register_cam(struct vtscsi_softc *sc)
573{
574	device_t dev;
575	int registered, error;
576
577	dev = sc->vtscsi_dev;
578	registered = 0;
579
580	VTSCSI_LOCK(sc);
581
582	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
583		error = ENOMEM;
584		device_printf(dev, "cannot register XPT bus\n");
585		goto fail;
586	}
587
588	registered = 1;
589
590	if (xpt_create_path(&sc->vtscsi_path, NULL,
591	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
592	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
593		error = ENOMEM;
594		device_printf(dev, "cannot create bus path\n");
595		goto fail;
596	}
597
598	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
599		error = EIO;
600		device_printf(dev, "cannot register async callback\n");
601		goto fail;
602	}
603
604	VTSCSI_UNLOCK(sc);
605
606	return (0);
607
608fail:
609	if (sc->vtscsi_path != NULL) {
610		xpt_free_path(sc->vtscsi_path);
611		sc->vtscsi_path = NULL;
612	}
613
614	if (registered != 0)
615		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
616
617	VTSCSI_UNLOCK(sc);
618
619	return (error);
620}
621
622static void
623vtscsi_free_cam(struct vtscsi_softc *sc)
624{
625
626	VTSCSI_LOCK(sc);
627
628	if (sc->vtscsi_path != NULL) {
629		vtscsi_deregister_async(sc);
630
631		xpt_free_path(sc->vtscsi_path);
632		sc->vtscsi_path = NULL;
633
634		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
635	}
636
637	if (sc->vtscsi_sim != NULL) {
638		cam_sim_free(sc->vtscsi_sim, 1);
639		sc->vtscsi_sim = NULL;
640	}
641
642	VTSCSI_UNLOCK(sc);
643}
644
645static void
646vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
647{
648	struct cam_sim *sim;
649	struct vtscsi_softc *sc;
650
651	sim = cb_arg;
652	sc = cam_sim_softc(sim);
653
654	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
655
656	/*
657	 * TODO Once QEMU supports event reporting, we should
658	 *      (un)subscribe to events here.
659	 */
660	switch (code) {
661	case AC_FOUND_DEVICE:
662		break;
663	case AC_LOST_DEVICE:
664		break;
665	}
666}
667
668static int
669vtscsi_register_async(struct vtscsi_softc *sc)
670{
671	struct ccb_setasync csa;
672
673	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
674	csa.ccb_h.func_code = XPT_SASYNC_CB;
675	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
676	csa.callback = vtscsi_cam_async;
677	csa.callback_arg = sc->vtscsi_sim;
678
679	xpt_action((union ccb *) &csa);
680
681	return (csa.ccb_h.status);
682}
683
684static void
685vtscsi_deregister_async(struct vtscsi_softc *sc)
686{
687	struct ccb_setasync csa;
688
689	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
690	csa.ccb_h.func_code = XPT_SASYNC_CB;
691	csa.event_enable = 0;
692	csa.callback = vtscsi_cam_async;
693	csa.callback_arg = sc->vtscsi_sim;
694
695	xpt_action((union ccb *) &csa);
696}
697
698static void
699vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
700{
701	struct vtscsi_softc *sc;
702	struct ccb_hdr *ccbh;
703
704	sc = cam_sim_softc(sim);
705	ccbh = &ccb->ccb_h;
706
707	VTSCSI_LOCK_OWNED(sc);
708
709	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
710		/*
711		 * The VTSCSI_MTX is briefly dropped between setting
712		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
713		 * drop any CCBs that come in during that window.
714		 */
715		ccbh->status = CAM_NO_HBA;
716		xpt_done(ccb);
717		return;
718	}
719
720	switch (ccbh->func_code) {
721	case XPT_SCSI_IO:
722		vtscsi_cam_scsi_io(sc, sim, ccb);
723		break;
724
725	case XPT_SET_TRAN_SETTINGS:
726		ccbh->status = CAM_FUNC_NOTAVAIL;
727		xpt_done(ccb);
728		break;
729
730	case XPT_GET_TRAN_SETTINGS:
731		vtscsi_cam_get_tran_settings(sc, ccb);
732		break;
733
734	case XPT_RESET_BUS:
735		vtscsi_cam_reset_bus(sc, ccb);
736		break;
737
738	case XPT_RESET_DEV:
739		vtscsi_cam_reset_dev(sc, ccb);
740		break;
741
742	case XPT_ABORT:
743		vtscsi_cam_abort(sc, ccb);
744		break;
745
746	case XPT_CALC_GEOMETRY:
747		cam_calc_geometry(&ccb->ccg, 1);
748		xpt_done(ccb);
749		break;
750
751	case XPT_PATH_INQ:
752		vtscsi_cam_path_inquiry(sc, sim, ccb);
753		break;
754
755	default:
756		vtscsi_dprintf(sc, VTSCSI_ERROR,
757		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
758
759		ccbh->status = CAM_REQ_INVALID;
760		xpt_done(ccb);
761		break;
762	}
763}
764
765static void
766vtscsi_cam_poll(struct cam_sim *sim)
767{
768	struct vtscsi_softc *sc;
769
770	sc = cam_sim_softc(sim);
771
772	vtscsi_complete_vqs_locked(sc);
773}
774
775static void
776vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
777    union ccb *ccb)
778{
779	struct ccb_hdr *ccbh;
780	struct ccb_scsiio *csio;
781	int error;
782
783	ccbh = &ccb->ccb_h;
784	csio = &ccb->csio;
785
786	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
787		error = EINVAL;
788		ccbh->status = CAM_REQ_INVALID;
789		goto done;
790	}
791
792	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
793	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
794		error = EINVAL;
795		ccbh->status = CAM_REQ_INVALID;
796		goto done;
797	}
798
799	error = vtscsi_start_scsi_cmd(sc, ccb);
800
801done:
802	if (error) {
803		vtscsi_dprintf(sc, VTSCSI_ERROR,
804		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
805		xpt_done(ccb);
806	}
807}
808
809static void
810vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
811{
812	struct ccb_trans_settings *cts;
813	struct ccb_trans_settings_scsi *scsi;
814
815	cts = &ccb->cts;
816	scsi = &cts->proto_specific.scsi;
817
818	cts->protocol = PROTO_SCSI;
819	cts->protocol_version = SCSI_REV_SPC3;
820	cts->transport = XPORT_SAS;
821	cts->transport_version = 0;
822
823	scsi->valid = CTS_SCSI_VALID_TQ;
824	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
825
826	ccb->ccb_h.status = CAM_REQ_CMP;
827	xpt_done(ccb);
828}
829
830static void
831vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
832{
833	int error;
834
835	error = vtscsi_reset_bus(sc);
836	if (error == 0)
837		ccb->ccb_h.status = CAM_REQ_CMP;
838	else
839		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
840
841	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
842	    error, ccb, ccb->ccb_h.status);
843
844	xpt_done(ccb);
845}
846
847static void
848vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
849{
850	struct ccb_hdr *ccbh;
851	struct vtscsi_request *req;
852	int error;
853
854	ccbh = &ccb->ccb_h;
855
856	req = vtscsi_dequeue_request(sc);
857	if (req == NULL) {
858		error = EAGAIN;
859		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
860		goto fail;
861	}
862
863	req->vsr_ccb = ccb;
864
865	error = vtscsi_execute_reset_dev_cmd(sc, req);
866	if (error == 0)
867		return;
868
869	vtscsi_enqueue_request(sc, req);
870
871fail:
872	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
873	    error, req, ccb);
874
875	if (error == EAGAIN)
876		ccbh->status = CAM_RESRC_UNAVAIL;
877	else
878		ccbh->status = CAM_REQ_CMP_ERR;
879
880	xpt_done(ccb);
881}
882
883static void
884vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
885{
886	struct vtscsi_request *req;
887	struct ccb_hdr *ccbh;
888	int error;
889
890	ccbh = &ccb->ccb_h;
891
892	req = vtscsi_dequeue_request(sc);
893	if (req == NULL) {
894		error = EAGAIN;
895		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
896		goto fail;
897	}
898
899	req->vsr_ccb = ccb;
900
901	error = vtscsi_execute_abort_task_cmd(sc, req);
902	if (error == 0)
903		return;
904
905	vtscsi_enqueue_request(sc, req);
906
907fail:
908	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
909	    error, req, ccb);
910
911	if (error == EAGAIN)
912		ccbh->status = CAM_RESRC_UNAVAIL;
913	else
914		ccbh->status = CAM_REQ_CMP_ERR;
915
916	xpt_done(ccb);
917}
918
919static void
920vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
921    union ccb *ccb)
922{
923	device_t dev;
924	struct ccb_pathinq *cpi;
925
926	dev = sc->vtscsi_dev;
927	cpi = &ccb->cpi;
928
929	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
930
931	cpi->version_num = 1;
932	cpi->hba_inquiry = PI_TAG_ABLE;
933	cpi->target_sprt = 0;
934	cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
935	if (vtscsi_bus_reset_disable != 0)
936		cpi->hba_misc |= PIM_NOBUSRESET;
937	cpi->hba_eng_cnt = 0;
938
939	cpi->max_target = sc->vtscsi_max_target;
940	cpi->max_lun = sc->vtscsi_max_lun;
941	cpi->initiator_id = VTSCSI_INITIATOR_ID;
942
943	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
944	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
945	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
946
947	cpi->unit_number = cam_sim_unit(sim);
948	cpi->bus_id = cam_sim_bus(sim);
949
950	cpi->base_transfer_speed = 300000;
951
952	cpi->protocol = PROTO_SCSI;
953	cpi->protocol_version = SCSI_REV_SPC3;
954	cpi->transport = XPORT_SAS;
955	cpi->transport_version = 0;
956
957	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
958	    PAGE_SIZE;
959
960	cpi->hba_vendor = virtio_get_vendor(dev);
961	cpi->hba_device = virtio_get_device(dev);
962	cpi->hba_subvendor = virtio_get_subvendor(dev);
963	cpi->hba_subdevice = virtio_get_subdevice(dev);
964
965	ccb->ccb_h.status = CAM_REQ_CMP;
966	xpt_done(ccb);
967}
968
969static int
970vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
971    struct ccb_scsiio *csio)
972{
973	struct ccb_hdr *ccbh;
974	struct bus_dma_segment *dseg;
975	int i, error;
976
977	ccbh = &csio->ccb_h;
978	error = 0;
979
980	switch ((ccbh->flags & CAM_DATA_MASK)) {
981	case CAM_DATA_VADDR:
982		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
983		break;
984	case CAM_DATA_PADDR:
985		error = sglist_append_phys(sg,
986		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
987		break;
988	case CAM_DATA_SG:
989		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
990			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
991			error = sglist_append(sg,
992			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
993		}
994		break;
995	case CAM_DATA_SG_PADDR:
996		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
997			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
998			error = sglist_append_phys(sg,
999			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1000		}
1001		break;
1002	case CAM_DATA_BIO:
1003		error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
1004		break;
1005	default:
1006		error = EINVAL;
1007		break;
1008	}
1009
1010	return (error);
1011}
1012
1013static int
1014vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1015    int *readable, int *writable)
1016{
1017	struct sglist *sg;
1018	struct ccb_hdr *ccbh;
1019	struct ccb_scsiio *csio;
1020	struct virtio_scsi_cmd_req *cmd_req;
1021	struct virtio_scsi_cmd_resp *cmd_resp;
1022	int error;
1023
1024	sg = sc->vtscsi_sglist;
1025	csio = &req->vsr_ccb->csio;
1026	ccbh = &csio->ccb_h;
1027	cmd_req = &req->vsr_cmd_req;
1028	cmd_resp = &req->vsr_cmd_resp;
1029
1030	sglist_reset(sg);
1031
1032	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1033	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1034		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1035		/* At least one segment must be left for the response. */
1036		if (error || sg->sg_nseg == sg->sg_maxseg)
1037			goto fail;
1038	}
1039
1040	*readable = sg->sg_nseg;
1041
1042	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1043	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1044		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1045		if (error)
1046			goto fail;
1047	}
1048
1049	*writable = sg->sg_nseg - *readable;
1050
1051	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1052	    "writable=%d\n", req, ccbh, *readable, *writable);
1053
1054	return (0);
1055
1056fail:
1057	/*
1058	 * This should never happen unless maxio was incorrectly set.
1059	 */
1060	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1061
1062	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1063	    "nseg=%d maxseg=%d\n",
1064	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1065
1066	return (EFBIG);
1067}
1068
1069static int
1070vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1071{
1072	struct sglist *sg;
1073	struct virtqueue *vq;
1074	struct ccb_scsiio *csio;
1075	struct ccb_hdr *ccbh;
1076	struct virtio_scsi_cmd_req *cmd_req;
1077	struct virtio_scsi_cmd_resp *cmd_resp;
1078	int readable, writable, error;
1079
1080	sg = sc->vtscsi_sglist;
1081	vq = sc->vtscsi_request_vq;
1082	csio = &req->vsr_ccb->csio;
1083	ccbh = &csio->ccb_h;
1084	cmd_req = &req->vsr_cmd_req;
1085	cmd_resp = &req->vsr_cmd_resp;
1086
1087	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1088
1089	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1090	if (error)
1091		return (error);
1092
1093	req->vsr_complete = vtscsi_complete_scsi_cmd;
1094	cmd_resp->response = -1;
1095
1096	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1097	if (error) {
1098		vtscsi_dprintf(sc, VTSCSI_ERROR,
1099		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1100
1101		ccbh->status = CAM_REQUEUE_REQ;
1102		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1103		return (error);
1104	}
1105
1106	ccbh->status |= CAM_SIM_QUEUED;
1107	ccbh->ccbh_vtscsi_req = req;
1108
1109	virtqueue_notify(vq);
1110
1111	if (ccbh->timeout != CAM_TIME_INFINITY) {
1112		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1113		callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1114		    0, vtscsi_timedout_scsi_cmd, req, 0);
1115	}
1116
1117	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1118	    req, ccbh);
1119
1120	return (0);
1121}
1122
1123static int
1124vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1125{
1126	struct vtscsi_request *req;
1127	int error;
1128
1129	req = vtscsi_dequeue_request(sc);
1130	if (req == NULL) {
1131		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1132		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1133		return (ENOBUFS);
1134	}
1135
1136	req->vsr_ccb = ccb;
1137
1138	error = vtscsi_execute_scsi_cmd(sc, req);
1139	if (error)
1140		vtscsi_enqueue_request(sc, req);
1141
1142	return (error);
1143}
1144
1145static void
1146vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1147    struct vtscsi_request *req)
1148{
1149	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1150	struct vtscsi_request *to_req;
1151	uint8_t response;
1152
1153	tmf_resp = &req->vsr_tmf_resp;
1154	response = tmf_resp->response;
1155	to_req = req->vsr_timedout_req;
1156
1157	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1158	    req, to_req, response);
1159
1160	vtscsi_enqueue_request(sc, req);
1161
1162	/*
1163	 * The timedout request could have completed between when the
1164	 * abort task was sent and when the host processed it.
1165	 */
1166	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1167		return;
1168
1169	/* The timedout request was successfully aborted. */
1170	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1171		return;
1172
1173	/* Don't bother if the device is going away. */
1174	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1175		return;
1176
1177	/* The timedout request will be aborted by the reset. */
1178	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1179		return;
1180
1181	vtscsi_reset_bus(sc);
1182}
1183
1184static int
1185vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1186    struct vtscsi_request *to_req)
1187{
1188	struct sglist *sg;
1189	struct ccb_hdr *to_ccbh;
1190	struct vtscsi_request *req;
1191	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1192	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1193	int error;
1194
1195	sg = sc->vtscsi_sglist;
1196	to_ccbh = &to_req->vsr_ccb->ccb_h;
1197
1198	req = vtscsi_dequeue_request(sc);
1199	if (req == NULL) {
1200		error = ENOBUFS;
1201		goto fail;
1202	}
1203
1204	tmf_req = &req->vsr_tmf_req;
1205	tmf_resp = &req->vsr_tmf_resp;
1206
1207	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1208	    (uintptr_t) to_ccbh, tmf_req);
1209
1210	sglist_reset(sg);
1211	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1212	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1213
1214	req->vsr_timedout_req = to_req;
1215	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1216	tmf_resp->response = -1;
1217
1218	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1219	    VTSCSI_EXECUTE_ASYNC);
1220	if (error == 0)
1221		return (0);
1222
1223	vtscsi_enqueue_request(sc, req);
1224
1225fail:
1226	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1227	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1228
1229	return (error);
1230}
1231
1232static void
1233vtscsi_timedout_scsi_cmd(void *xreq)
1234{
1235	struct vtscsi_softc *sc;
1236	struct vtscsi_request *to_req;
1237
1238	to_req = xreq;
1239	sc = to_req->vsr_softc;
1240
1241	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1242	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1243
1244	/* Don't bother if the device is going away. */
1245	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1246		return;
1247
1248	/*
1249	 * Bail if the request is not in use. We likely raced when
1250	 * stopping the callout handler or it has already been aborted.
1251	 */
1252	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1253	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1254		return;
1255
1256	/*
1257	 * Complete the request queue in case the timedout request is
1258	 * actually just pending.
1259	 */
1260	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1261	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1262		return;
1263
1264	sc->vtscsi_stats.scsi_cmd_timeouts++;
1265	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1266
1267	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1268		return;
1269
1270	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1271	vtscsi_reset_bus(sc);
1272}
1273
1274static cam_status
1275vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1276{
1277	cam_status status;
1278
1279	switch (cmd_resp->response) {
1280	case VIRTIO_SCSI_S_OK:
1281		status = CAM_REQ_CMP;
1282		break;
1283	case VIRTIO_SCSI_S_OVERRUN:
1284		status = CAM_DATA_RUN_ERR;
1285		break;
1286	case VIRTIO_SCSI_S_ABORTED:
1287		status = CAM_REQ_ABORTED;
1288		break;
1289	case VIRTIO_SCSI_S_BAD_TARGET:
1290		status = CAM_SEL_TIMEOUT;
1291		break;
1292	case VIRTIO_SCSI_S_RESET:
1293		status = CAM_SCSI_BUS_RESET;
1294		break;
1295	case VIRTIO_SCSI_S_BUSY:
1296		status = CAM_SCSI_BUSY;
1297		break;
1298	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1299	case VIRTIO_SCSI_S_TARGET_FAILURE:
1300	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1301		status = CAM_SCSI_IT_NEXUS_LOST;
1302		break;
1303	default: /* VIRTIO_SCSI_S_FAILURE */
1304		status = CAM_REQ_CMP_ERR;
1305		break;
1306	}
1307
1308	return (status);
1309}
1310
1311static cam_status
1312vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1313    struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1314{
1315	cam_status status;
1316
1317	csio->scsi_status = cmd_resp->status;
1318	csio->resid = cmd_resp->resid;
1319
1320	if (csio->scsi_status == SCSI_STATUS_OK)
1321		status = CAM_REQ_CMP;
1322	else
1323		status = CAM_SCSI_STATUS_ERROR;
1324
1325	if (cmd_resp->sense_len > 0) {
1326		status |= CAM_AUTOSNS_VALID;
1327
1328		if (cmd_resp->sense_len < csio->sense_len)
1329			csio->sense_resid = csio->sense_len -
1330			    cmd_resp->sense_len;
1331		else
1332			csio->sense_resid = 0;
1333
1334		memcpy(&csio->sense_data, cmd_resp->sense,
1335		    csio->sense_len - csio->sense_resid);
1336	}
1337
1338	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1339	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1340	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1341
1342	return (status);
1343}
1344
1345static void
1346vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1347{
1348	struct ccb_hdr *ccbh;
1349	struct ccb_scsiio *csio;
1350	struct virtio_scsi_cmd_resp *cmd_resp;
1351	cam_status status;
1352
1353	csio = &req->vsr_ccb->csio;
1354	ccbh = &csio->ccb_h;
1355	cmd_resp = &req->vsr_cmd_resp;
1356
1357	KASSERT(ccbh->ccbh_vtscsi_req == req,
1358	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1359
1360	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1361		callout_stop(&req->vsr_callout);
1362
1363	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1364	if (status == CAM_REQ_ABORTED) {
1365		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1366			status = CAM_CMD_TIMEOUT;
1367	} else if (status == CAM_REQ_CMP)
1368		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1369
1370	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1371		status |= CAM_DEV_QFRZN;
1372		xpt_freeze_devq(ccbh->path, 1);
1373	}
1374
1375	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1376		status |= CAM_RELEASE_SIMQ;
1377
1378	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1379	    req, ccbh, status);
1380
1381	ccbh->status = status;
1382	xpt_done(req->vsr_ccb);
1383	vtscsi_enqueue_request(sc, req);
1384}
1385
1386static void
1387vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1388{
1389
1390	/* XXX We probably shouldn't poll forever. */
1391	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1392	do
1393		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1394	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1395
1396	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1397}
1398
1399static int
1400vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1401    struct sglist *sg, int readable, int writable, int flag)
1402{
1403	struct virtqueue *vq;
1404	int error;
1405
1406	vq = sc->vtscsi_control_vq;
1407
1408	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1409
1410	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1411	if (error) {
1412		/*
1413		 * Return EAGAIN when the virtqueue does not have enough
1414		 * descriptors available.
1415		 */
1416		if (error == ENOSPC || error == EMSGSIZE)
1417			error = EAGAIN;
1418
1419		return (error);
1420	}
1421
1422	virtqueue_notify(vq);
1423	if (flag == VTSCSI_EXECUTE_POLL)
1424		vtscsi_poll_ctrl_req(sc, req);
1425
1426	return (0);
1427}
1428
1429static void
1430vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1431    struct vtscsi_request *req)
1432{
1433	union ccb *ccb;
1434	struct ccb_hdr *ccbh;
1435	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1436
1437	ccb = req->vsr_ccb;
1438	ccbh = &ccb->ccb_h;
1439	tmf_resp = &req->vsr_tmf_resp;
1440
1441	switch (tmf_resp->response) {
1442	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1443		ccbh->status = CAM_REQ_CMP;
1444		break;
1445	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1446		ccbh->status = CAM_UA_ABORT;
1447		break;
1448	default:
1449		ccbh->status = CAM_REQ_CMP_ERR;
1450		break;
1451	}
1452
1453	xpt_done(ccb);
1454	vtscsi_enqueue_request(sc, req);
1455}
1456
1457static int
1458vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1459    struct vtscsi_request *req)
1460{
1461	struct sglist *sg;
1462	struct ccb_abort *cab;
1463	struct ccb_hdr *ccbh;
1464	struct ccb_hdr *abort_ccbh;
1465	struct vtscsi_request *abort_req;
1466	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1467	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1468	int error;
1469
1470	sg = sc->vtscsi_sglist;
1471	cab = &req->vsr_ccb->cab;
1472	ccbh = &cab->ccb_h;
1473	tmf_req = &req->vsr_tmf_req;
1474	tmf_resp = &req->vsr_tmf_resp;
1475
1476	/* CCB header and request that's to be aborted. */
1477	abort_ccbh = &cab->abort_ccb->ccb_h;
1478	abort_req = abort_ccbh->ccbh_vtscsi_req;
1479
1480	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1481		error = EINVAL;
1482		goto fail;
1483	}
1484
1485	/* Only attempt to abort requests that could be in-flight. */
1486	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1487		error = EALREADY;
1488		goto fail;
1489	}
1490
1491	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1492	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1493		callout_stop(&abort_req->vsr_callout);
1494
1495	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1496	    (uintptr_t) abort_ccbh, tmf_req);
1497
1498	sglist_reset(sg);
1499	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1500	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1501
1502	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1503	tmf_resp->response = -1;
1504
1505	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1506	    VTSCSI_EXECUTE_ASYNC);
1507
1508fail:
1509	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1510	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1511
1512	return (error);
1513}
1514
1515static void
1516vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1517    struct vtscsi_request *req)
1518{
1519	union ccb *ccb;
1520	struct ccb_hdr *ccbh;
1521	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1522
1523	ccb = req->vsr_ccb;
1524	ccbh = &ccb->ccb_h;
1525	tmf_resp = &req->vsr_tmf_resp;
1526
1527	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1528	    req, ccb, tmf_resp->response);
1529
1530	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1531		ccbh->status = CAM_REQ_CMP;
1532		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1533		    ccbh->target_lun);
1534	} else
1535		ccbh->status = CAM_REQ_CMP_ERR;
1536
1537	xpt_done(ccb);
1538	vtscsi_enqueue_request(sc, req);
1539}
1540
1541static int
1542vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1543    struct vtscsi_request *req)
1544{
1545	struct sglist *sg;
1546	struct ccb_resetdev *crd;
1547	struct ccb_hdr *ccbh;
1548	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1549	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1550	uint32_t subtype;
1551	int error;
1552
1553	sg = sc->vtscsi_sglist;
1554	crd = &req->vsr_ccb->crd;
1555	ccbh = &crd->ccb_h;
1556	tmf_req = &req->vsr_tmf_req;
1557	tmf_resp = &req->vsr_tmf_resp;
1558
1559	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1560		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1561	else
1562		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1563
1564	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1565
1566	sglist_reset(sg);
1567	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1568	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1569
1570	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1571	tmf_resp->response = -1;
1572
1573	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1574	    VTSCSI_EXECUTE_ASYNC);
1575
1576	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1577	    error, req, ccbh);
1578
1579	return (error);
1580}
1581
1582static void
1583vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1584{
1585
1586	*target_id = lun[1];
1587	*lun_id = (lun[2] << 8) | lun[3];
1588}
1589
1590static void
1591vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1592{
1593
1594	lun[0] = 1;
1595	lun[1] = ccbh->target_id;
1596	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1597	lun[3] = ccbh->target_lun & 0xFF;
1598}
1599
1600static void
1601vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1602    struct virtio_scsi_cmd_req *cmd_req)
1603{
1604	uint8_t attr;
1605
1606	switch (csio->tag_action) {
1607	case MSG_HEAD_OF_Q_TAG:
1608		attr = VIRTIO_SCSI_S_HEAD;
1609		break;
1610	case MSG_ORDERED_Q_TAG:
1611		attr = VIRTIO_SCSI_S_ORDERED;
1612		break;
1613	case MSG_ACA_TASK:
1614		attr = VIRTIO_SCSI_S_ACA;
1615		break;
1616	default: /* MSG_SIMPLE_Q_TAG */
1617		attr = VIRTIO_SCSI_S_SIMPLE;
1618		break;
1619	}
1620
1621	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1622	cmd_req->tag = (uintptr_t) csio;
1623	cmd_req->task_attr = attr;
1624
1625	memcpy(cmd_req->cdb,
1626	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1627	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1628	    csio->cdb_len);
1629}
1630
1631static void
1632vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1633    uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1634{
1635
1636	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1637
1638	tmf_req->type = VIRTIO_SCSI_T_TMF;
1639	tmf_req->subtype = subtype;
1640	tmf_req->tag = tag;
1641}
1642
1643static void
1644vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1645{
1646	int frozen;
1647
1648	frozen = sc->vtscsi_frozen;
1649
1650	if (reason & VTSCSI_REQUEST &&
1651	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1652		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1653
1654	if (reason & VTSCSI_REQUEST_VQ &&
1655	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1656		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1657
1658	/* Freeze the SIMQ if transitioned to frozen. */
1659	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1660		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1661		xpt_freeze_simq(sc->vtscsi_sim, 1);
1662	}
1663}
1664
1665static int
1666vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1667{
1668	int thawed;
1669
1670	if (sc->vtscsi_frozen == 0 || reason == 0)
1671		return (0);
1672
1673	if (reason & VTSCSI_REQUEST &&
1674	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1675		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1676
1677	if (reason & VTSCSI_REQUEST_VQ &&
1678	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1679		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1680
1681	thawed = sc->vtscsi_frozen == 0;
1682	if (thawed != 0)
1683		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1684
1685	return (thawed);
1686}
1687
1688static void
1689vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1690    target_id_t target_id, lun_id_t lun_id)
1691{
1692	struct cam_path *path;
1693
1694	/* Use the wildcard path from our softc for bus announcements. */
1695	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1696		xpt_async(ac_code, sc->vtscsi_path, NULL);
1697		return;
1698	}
1699
1700	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1701	    target_id, lun_id) != CAM_REQ_CMP) {
1702		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1703		return;
1704	}
1705
1706	xpt_async(ac_code, path, NULL);
1707	xpt_free_path(path);
1708}
1709
1710static void
1711vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1712    lun_id_t lun_id)
1713{
1714	union ccb *ccb;
1715	cam_status status;
1716
1717	ccb = xpt_alloc_ccb_nowait();
1718	if (ccb == NULL) {
1719		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1720		return;
1721	}
1722
1723	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1724	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1725	if (status != CAM_REQ_CMP) {
1726		xpt_free_ccb(ccb);
1727		return;
1728	}
1729
1730	xpt_rescan(ccb);
1731}
1732
1733static void
1734vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1735{
1736
1737	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1738}
1739
1740static void
1741vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1742    struct virtio_scsi_event *event)
1743{
1744	target_id_t target_id;
1745	lun_id_t lun_id;
1746
1747	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1748
1749	switch (event->reason) {
1750	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1751	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1752		vtscsi_execute_rescan(sc, target_id, lun_id);
1753		break;
1754	default:
1755		device_printf(sc->vtscsi_dev,
1756		    "unhandled transport event reason: %d\n", event->reason);
1757		break;
1758	}
1759}
1760
1761static void
1762vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1763{
1764	int error;
1765
1766	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1767		switch (event->event) {
1768		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1769			vtscsi_transport_reset_event(sc, event);
1770			break;
1771		default:
1772			device_printf(sc->vtscsi_dev,
1773			    "unhandled event: %d\n", event->event);
1774			break;
1775		}
1776	} else
1777		vtscsi_execute_rescan_bus(sc);
1778
1779	/*
1780	 * This should always be successful since the buffer
1781	 * was just dequeued.
1782	 */
1783	error = vtscsi_enqueue_event_buf(sc, event);
1784	KASSERT(error == 0,
1785	    ("cannot requeue event buffer: %d", error));
1786}
1787
1788static int
1789vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1790    struct virtio_scsi_event *event)
1791{
1792	struct sglist *sg;
1793	struct virtqueue *vq;
1794	int size, error;
1795
1796	sg = sc->vtscsi_sglist;
1797	vq = sc->vtscsi_event_vq;
1798	size = sc->vtscsi_event_buf_size;
1799
1800	bzero(event, size);
1801
1802	sglist_reset(sg);
1803	error = sglist_append(sg, event, size);
1804	if (error)
1805		return (error);
1806
1807	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1808	if (error)
1809		return (error);
1810
1811	virtqueue_notify(vq);
1812
1813	return (0);
1814}
1815
1816static int
1817vtscsi_init_event_vq(struct vtscsi_softc *sc)
1818{
1819	struct virtio_scsi_event *event;
1820	int i, size, error;
1821
1822	/*
1823	 * The first release of QEMU with VirtIO SCSI support would crash
1824	 * when attempting to notify the event virtqueue. This was fixed
1825	 * when hotplug support was added.
1826	 */
1827	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1828		size = sc->vtscsi_event_buf_size;
1829	else
1830		size = 0;
1831
1832	if (size < sizeof(struct virtio_scsi_event))
1833		return (0);
1834
1835	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1836		event = &sc->vtscsi_event_bufs[i];
1837
1838		error = vtscsi_enqueue_event_buf(sc, event);
1839		if (error)
1840			break;
1841	}
1842
1843	/*
1844	 * Even just one buffer is enough. Missed events are
1845	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1846	 */
1847	if (i > 0)
1848		error = 0;
1849
1850	return (error);
1851}
1852
1853static void
1854vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1855{
1856	struct virtio_scsi_event *event;
1857	int i, error;
1858
1859	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1860	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1861		return;
1862
1863	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1864		event = &sc->vtscsi_event_bufs[i];
1865
1866		error = vtscsi_enqueue_event_buf(sc, event);
1867		if (error)
1868			break;
1869	}
1870
1871	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1872}
1873
1874static void
1875vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1876{
1877	struct virtqueue *vq;
1878	int last;
1879
1880	vq = sc->vtscsi_event_vq;
1881	last = 0;
1882
1883	while (virtqueue_drain(vq, &last) != NULL)
1884		;
1885
1886	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1887}
1888
1889static void
1890vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1891{
1892
1893	VTSCSI_LOCK_OWNED(sc);
1894
1895	if (sc->vtscsi_request_vq != NULL)
1896		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1897	if (sc->vtscsi_control_vq != NULL)
1898		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1899}
1900
1901static void
1902vtscsi_complete_vqs(struct vtscsi_softc *sc)
1903{
1904
1905	VTSCSI_LOCK(sc);
1906	vtscsi_complete_vqs_locked(sc);
1907	VTSCSI_UNLOCK(sc);
1908}
1909
1910static void
1911vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1912{
1913	union ccb *ccb;
1914	int detach;
1915
1916	ccb = req->vsr_ccb;
1917
1918	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1919
1920	/*
1921	 * The callout must be drained when detaching since the request is
1922	 * about to be freed. The VTSCSI_MTX must not be held for this in
1923	 * case the callout is pending because there is a deadlock potential.
1924	 * Otherwise, the virtqueue is being drained because of a bus reset
1925	 * so we only need to attempt to stop the callouts.
1926	 */
1927	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1928	if (detach != 0)
1929		VTSCSI_LOCK_NOTOWNED(sc);
1930	else
1931		VTSCSI_LOCK_OWNED(sc);
1932
1933	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1934		if (detach != 0)
1935			callout_drain(&req->vsr_callout);
1936		else
1937			callout_stop(&req->vsr_callout);
1938	}
1939
1940	if (ccb != NULL) {
1941		if (detach != 0) {
1942			VTSCSI_LOCK(sc);
1943			ccb->ccb_h.status = CAM_NO_HBA;
1944		} else
1945			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1946		xpt_done(ccb);
1947		if (detach != 0)
1948			VTSCSI_UNLOCK(sc);
1949	}
1950
1951	vtscsi_enqueue_request(sc, req);
1952}
1953
1954static void
1955vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1956{
1957	struct vtscsi_request *req;
1958	int last;
1959
1960	last = 0;
1961
1962	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1963
1964	while ((req = virtqueue_drain(vq, &last)) != NULL)
1965		vtscsi_cancel_request(sc, req);
1966
1967	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1968}
1969
1970static void
1971vtscsi_drain_vqs(struct vtscsi_softc *sc)
1972{
1973
1974	if (sc->vtscsi_control_vq != NULL)
1975		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1976	if (sc->vtscsi_request_vq != NULL)
1977		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1978	if (sc->vtscsi_event_vq != NULL)
1979		vtscsi_drain_event_vq(sc);
1980}
1981
1982static void
1983vtscsi_stop(struct vtscsi_softc *sc)
1984{
1985
1986	vtscsi_disable_vqs_intr(sc);
1987	virtio_stop(sc->vtscsi_dev);
1988}
1989
1990static int
1991vtscsi_reset_bus(struct vtscsi_softc *sc)
1992{
1993	int error;
1994
1995	VTSCSI_LOCK_OWNED(sc);
1996
1997	if (vtscsi_bus_reset_disable != 0) {
1998		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
1999		return (0);
2000	}
2001
2002	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2003
2004	/*
2005	 * vtscsi_stop() will cause the in-flight requests to be canceled.
2006	 * Those requests are then completed here so CAM will retry them
2007	 * after the reset is complete.
2008	 */
2009	vtscsi_stop(sc);
2010	vtscsi_complete_vqs_locked(sc);
2011
2012	/* Rid the virtqueues of any remaining requests. */
2013	vtscsi_drain_vqs(sc);
2014
2015	/*
2016	 * Any resource shortage that froze the SIMQ cannot persist across
2017	 * a bus reset so ensure it gets thawed here.
2018	 */
2019	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2020		xpt_release_simq(sc->vtscsi_sim, 0);
2021
2022	error = vtscsi_reinit(sc);
2023	if (error) {
2024		device_printf(sc->vtscsi_dev,
2025		    "reinitialization failed, stopping device...\n");
2026		vtscsi_stop(sc);
2027	} else
2028		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2029		    CAM_LUN_WILDCARD);
2030
2031	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2032
2033	return (error);
2034}
2035
2036static void
2037vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2038{
2039
2040#ifdef INVARIANTS
2041	int req_nsegs, resp_nsegs;
2042
2043	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2044	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2045
2046	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2047	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2048#endif
2049
2050	req->vsr_softc = sc;
2051	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2052}
2053
2054static int
2055vtscsi_alloc_requests(struct vtscsi_softc *sc)
2056{
2057	struct vtscsi_request *req;
2058	int i, nreqs;
2059
2060	/*
2061	 * Commands destined for either the request or control queues come
2062	 * from the same SIM queue. Use the size of the request virtqueue
2063	 * as it (should) be much more frequently used. Some additional
2064	 * requests are allocated for internal (TMF) use.
2065	 */
2066	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2067	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2068		nreqs /= VTSCSI_MIN_SEGMENTS;
2069	nreqs += VTSCSI_RESERVED_REQUESTS;
2070
2071	for (i = 0; i < nreqs; i++) {
2072		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2073		    M_NOWAIT);
2074		if (req == NULL)
2075			return (ENOMEM);
2076
2077		vtscsi_init_request(sc, req);
2078
2079		sc->vtscsi_nrequests++;
2080		vtscsi_enqueue_request(sc, req);
2081	}
2082
2083	return (0);
2084}
2085
2086static void
2087vtscsi_free_requests(struct vtscsi_softc *sc)
2088{
2089	struct vtscsi_request *req;
2090
2091	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2092		KASSERT(callout_active(&req->vsr_callout) == 0,
2093		    ("request callout still active"));
2094
2095		sc->vtscsi_nrequests--;
2096		free(req, M_DEVBUF);
2097	}
2098
2099	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2100	    sc->vtscsi_nrequests));
2101}
2102
2103static void
2104vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2105{
2106
2107	KASSERT(req->vsr_softc == sc,
2108	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2109
2110	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2111
2112	/* A request is available so the SIMQ could be released. */
2113	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2114		xpt_release_simq(sc->vtscsi_sim, 1);
2115
2116	req->vsr_ccb = NULL;
2117	req->vsr_complete = NULL;
2118	req->vsr_ptr0 = NULL;
2119	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2120	req->vsr_flags = 0;
2121
2122	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2123	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2124
2125	/*
2126	 * We insert at the tail of the queue in order to make it
2127	 * very unlikely a request will be reused if we race with
2128	 * stopping its callout handler.
2129	 */
2130	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2131}
2132
2133static struct vtscsi_request *
2134vtscsi_dequeue_request(struct vtscsi_softc *sc)
2135{
2136	struct vtscsi_request *req;
2137
2138	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2139	if (req != NULL) {
2140		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2141		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2142	} else
2143		sc->vtscsi_stats.dequeue_no_requests++;
2144
2145	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2146
2147	return (req);
2148}
2149
2150static void
2151vtscsi_complete_request(struct vtscsi_request *req)
2152{
2153
2154	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2155		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2156
2157	if (req->vsr_complete != NULL)
2158		req->vsr_complete(req->vsr_softc, req);
2159}
2160
2161static void
2162vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2163{
2164	struct vtscsi_request *req;
2165
2166	VTSCSI_LOCK_OWNED(sc);
2167
2168	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2169		vtscsi_complete_request(req);
2170}
2171
2172static void
2173vtscsi_control_vq_intr(void *xsc)
2174{
2175	struct vtscsi_softc *sc;
2176	struct virtqueue *vq;
2177
2178	sc = xsc;
2179	vq = sc->vtscsi_control_vq;
2180
2181again:
2182	VTSCSI_LOCK(sc);
2183
2184	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2185
2186	if (virtqueue_enable_intr(vq) != 0) {
2187		virtqueue_disable_intr(vq);
2188		VTSCSI_UNLOCK(sc);
2189		goto again;
2190	}
2191
2192	VTSCSI_UNLOCK(sc);
2193}
2194
2195static void
2196vtscsi_event_vq_intr(void *xsc)
2197{
2198	struct vtscsi_softc *sc;
2199	struct virtqueue *vq;
2200	struct virtio_scsi_event *event;
2201
2202	sc = xsc;
2203	vq = sc->vtscsi_event_vq;
2204
2205again:
2206	VTSCSI_LOCK(sc);
2207
2208	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2209		vtscsi_handle_event(sc, event);
2210
2211	if (virtqueue_enable_intr(vq) != 0) {
2212		virtqueue_disable_intr(vq);
2213		VTSCSI_UNLOCK(sc);
2214		goto again;
2215	}
2216
2217	VTSCSI_UNLOCK(sc);
2218}
2219
2220static void
2221vtscsi_request_vq_intr(void *xsc)
2222{
2223	struct vtscsi_softc *sc;
2224	struct virtqueue *vq;
2225
2226	sc = xsc;
2227	vq = sc->vtscsi_request_vq;
2228
2229again:
2230	VTSCSI_LOCK(sc);
2231
2232	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2233
2234	if (virtqueue_enable_intr(vq) != 0) {
2235		virtqueue_disable_intr(vq);
2236		VTSCSI_UNLOCK(sc);
2237		goto again;
2238	}
2239
2240	VTSCSI_UNLOCK(sc);
2241}
2242
2243static void
2244vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2245{
2246
2247	virtqueue_disable_intr(sc->vtscsi_control_vq);
2248	virtqueue_disable_intr(sc->vtscsi_event_vq);
2249	virtqueue_disable_intr(sc->vtscsi_request_vq);
2250}
2251
2252static void
2253vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2254{
2255
2256	virtqueue_enable_intr(sc->vtscsi_control_vq);
2257	virtqueue_enable_intr(sc->vtscsi_event_vq);
2258	virtqueue_enable_intr(sc->vtscsi_request_vq);
2259}
2260
2261static void
2262vtscsi_get_tunables(struct vtscsi_softc *sc)
2263{
2264	char tmpstr[64];
2265
2266	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2267
2268	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2269	    device_get_unit(sc->vtscsi_dev));
2270	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2271}
2272
2273static void
2274vtscsi_add_sysctl(struct vtscsi_softc *sc)
2275{
2276	device_t dev;
2277	struct vtscsi_statistics *stats;
2278        struct sysctl_ctx_list *ctx;
2279	struct sysctl_oid *tree;
2280	struct sysctl_oid_list *child;
2281
2282	dev = sc->vtscsi_dev;
2283	stats = &sc->vtscsi_stats;
2284	ctx = device_get_sysctl_ctx(dev);
2285	tree = device_get_sysctl_tree(dev);
2286	child = SYSCTL_CHILDREN(tree);
2287
2288	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2289	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2290	    "Debug level");
2291
2292	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2293	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2294	    "SCSI command timeouts");
2295	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2296	    CTLFLAG_RD, &stats->dequeue_no_requests,
2297	    "No available requests to dequeue");
2298}
2299
2300static void
2301vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2302    const char *fmt, ...)
2303{
2304	struct vtscsi_softc *sc;
2305	union ccb *ccb;
2306	struct sbuf sb;
2307	va_list ap;
2308	char str[192];
2309	char path_str[64];
2310
2311	if (req == NULL)
2312		return;
2313
2314	sc = req->vsr_softc;
2315	ccb = req->vsr_ccb;
2316
2317	va_start(ap, fmt);
2318	sbuf_new(&sb, str, sizeof(str), 0);
2319
2320	if (ccb == NULL) {
2321		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2322		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2323		    cam_sim_bus(sc->vtscsi_sim));
2324	} else {
2325		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2326		sbuf_cat(&sb, path_str);
2327		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2328			scsi_command_string(&ccb->csio, &sb);
2329			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2330		}
2331	}
2332
2333	sbuf_vprintf(&sb, fmt, ap);
2334	va_end(ap);
2335
2336	sbuf_finish(&sb);
2337	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2338	    sbuf_data(&sb));
2339}
2340