virtio_scsi.c revision 260837
1/*-
2 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/* Driver for VirtIO SCSI devices. */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/sys/dev/virtio/scsi/virtio_scsi.c 260837 2014-01-18 03:33:01Z bryanv $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/sglist.h>
39#include <sys/sysctl.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/callout.h>
43#include <sys/queue.h>
44#include <sys/sbuf.h>
45
46#include <machine/stdarg.h>
47
48#include <machine/bus.h>
49#include <machine/resource.h>
50#include <sys/bus.h>
51#include <sys/rman.h>
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_sim.h>
56#include <cam/cam_periph.h>
57#include <cam/cam_xpt_sim.h>
58#include <cam/cam_debug.h>
59#include <cam/scsi/scsi_all.h>
60#include <cam/scsi/scsi_message.h>
61
62#include <dev/virtio/virtio.h>
63#include <dev/virtio/virtqueue.h>
64#include <dev/virtio/scsi/virtio_scsi.h>
65#include <dev/virtio/scsi/virtio_scsivar.h>
66
67#include "virtio_if.h"
68
69static int	vtscsi_modevent(module_t, int, void *);
70
71static int	vtscsi_probe(device_t);
72static int	vtscsi_attach(device_t);
73static int	vtscsi_detach(device_t);
74static int	vtscsi_suspend(device_t);
75static int	vtscsi_resume(device_t);
76
77static void	vtscsi_negotiate_features(struct vtscsi_softc *);
78static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
79static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
80static void	vtscsi_write_device_config(struct vtscsi_softc *);
81static int	vtscsi_reinit(struct vtscsi_softc *);
82
83static int	vtscsi_alloc_cam(struct vtscsi_softc *);
84static int 	vtscsi_register_cam(struct vtscsi_softc *);
85static void	vtscsi_free_cam(struct vtscsi_softc *);
86static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
87static int	vtscsi_register_async(struct vtscsi_softc *);
88static void	vtscsi_deregister_async(struct vtscsi_softc *);
89static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
90static void	vtscsi_cam_poll(struct cam_sim *);
91
92static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
93		    union ccb *);
94static void 	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
95		    union ccb *);
96static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
97static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
98static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
99static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
100		    struct cam_sim *, union ccb *);
101
102static int 	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
103		    struct sglist *, struct ccb_scsiio *);
104static int 	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
105		    struct vtscsi_request *, int *, int *);
106static int 	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
107		    struct vtscsi_request *);
108static int 	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
109static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
110		    struct vtscsi_request *);
111static int 	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
112		    struct vtscsi_request *);
113static void	vtscsi_timedout_scsi_cmd(void *);
114static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
115static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
116		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
117static void 	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
118		    struct vtscsi_request *);
119
120static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
121		    struct vtscsi_request *);
122static int 	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
123		    struct vtscsi_request *, struct sglist *, int, int, int);
124static void 	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
125		    struct vtscsi_request *);
126static int 	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
127		    struct vtscsi_request *);
128static int 	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
129		    struct vtscsi_request *);
130
131static void 	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
132static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
133static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
134		    struct virtio_scsi_cmd_req *);
135static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
136		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
137
138static void 	vtscsi_freeze_simq(struct vtscsi_softc *, int);
139static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
140
141static void 	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
142		    lun_id_t);
143static void 	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
144		    lun_id_t);
145static void 	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
146
147static void 	vtscsi_handle_event(struct vtscsi_softc *,
148		    struct virtio_scsi_event *);
149static int 	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
150		    struct virtio_scsi_event *);
151static int	vtscsi_init_event_vq(struct vtscsi_softc *);
152static void 	vtscsi_reinit_event_vq(struct vtscsi_softc *);
153static void 	vtscsi_drain_event_vq(struct vtscsi_softc *);
154
155static void 	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
156static void 	vtscsi_complete_vqs(struct vtscsi_softc *);
157static void 	vtscsi_drain_vqs(struct vtscsi_softc *);
158static void 	vtscsi_cancel_request(struct vtscsi_softc *,
159		    struct vtscsi_request *);
160static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
161static void	vtscsi_stop(struct vtscsi_softc *);
162static int	vtscsi_reset_bus(struct vtscsi_softc *);
163
164static void 	vtscsi_init_request(struct vtscsi_softc *,
165		    struct vtscsi_request *);
166static int	vtscsi_alloc_requests(struct vtscsi_softc *);
167static void	vtscsi_free_requests(struct vtscsi_softc *);
168static void	vtscsi_enqueue_request(struct vtscsi_softc *,
169		    struct vtscsi_request *);
170static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
171
172static void	vtscsi_complete_request(struct vtscsi_request *);
173static void 	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
174
175static void	vtscsi_control_vq_intr(void *);
176static void	vtscsi_event_vq_intr(void *);
177static void	vtscsi_request_vq_intr(void *);
178static void 	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
179static void 	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
180
181static void 	vtscsi_get_tunables(struct vtscsi_softc *);
182static void 	vtscsi_add_sysctl(struct vtscsi_softc *);
183
184static void 	vtscsi_printf_req(struct vtscsi_request *, const char *,
185		    const char *, ...);
186
187/* Global tunables. */
188/*
189 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
190 * IO during virtio_stop(). So in-flight requests still complete after the
191 * device reset. We would have to wait for all the in-flight IO to complete,
192 * which defeats the typical purpose of a bus reset. We could simulate the
193 * bus reset with either I_T_NEXUS_RESET of all the targets, or with
194 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
195 * control virtqueue). But this isn't very useful if things really go off
196 * the rails, so default to disabled for now.
197 */
198static int vtscsi_bus_reset_disable = 1;
199TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
200
201static struct virtio_feature_desc vtscsi_feature_desc[] = {
202	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
203	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
204
205	{ 0, NULL }
206};
207
208static device_method_t vtscsi_methods[] = {
209	/* Device methods. */
210	DEVMETHOD(device_probe,		vtscsi_probe),
211	DEVMETHOD(device_attach,	vtscsi_attach),
212	DEVMETHOD(device_detach,	vtscsi_detach),
213	DEVMETHOD(device_suspend,	vtscsi_suspend),
214	DEVMETHOD(device_resume,	vtscsi_resume),
215
216	DEVMETHOD_END
217};
218
219static driver_t vtscsi_driver = {
220	"vtscsi",
221	vtscsi_methods,
222	sizeof(struct vtscsi_softc)
223};
224static devclass_t vtscsi_devclass;
225
226DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
227    vtscsi_modevent, 0);
228MODULE_VERSION(virtio_scsi, 1);
229MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
230MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
231
232static int
233vtscsi_modevent(module_t mod, int type, void *unused)
234{
235	int error;
236
237	switch (type) {
238	case MOD_LOAD:
239	case MOD_QUIESCE:
240	case MOD_UNLOAD:
241	case MOD_SHUTDOWN:
242		error = 0;
243		break;
244	default:
245		error = EOPNOTSUPP;
246		break;
247	}
248
249	return (error);
250}
251
252static int
253vtscsi_probe(device_t dev)
254{
255
256	if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
257		return (ENXIO);
258
259	device_set_desc(dev, "VirtIO SCSI Adapter");
260
261	return (BUS_PROBE_DEFAULT);
262}
263
264static int
265vtscsi_attach(device_t dev)
266{
267	struct vtscsi_softc *sc;
268	struct virtio_scsi_config scsicfg;
269	int error;
270
271	sc = device_get_softc(dev);
272	sc->vtscsi_dev = dev;
273
274	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
275	TAILQ_INIT(&sc->vtscsi_req_free);
276
277	vtscsi_get_tunables(sc);
278	vtscsi_add_sysctl(sc);
279
280	virtio_set_feature_desc(dev, vtscsi_feature_desc);
281	vtscsi_negotiate_features(sc);
282
283	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
284		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
285	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
286		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
287	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
288		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
289
290	virtio_read_device_config(dev, 0, &scsicfg,
291	    sizeof(struct virtio_scsi_config));
292
293	sc->vtscsi_max_channel = scsicfg.max_channel;
294	sc->vtscsi_max_target = scsicfg.max_target;
295	sc->vtscsi_max_lun = scsicfg.max_lun;
296	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
297
298	vtscsi_write_device_config(sc);
299
300	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
301	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
302	if (sc->vtscsi_sglist == NULL) {
303		error = ENOMEM;
304		device_printf(dev, "cannot allocate sglist\n");
305		goto fail;
306	}
307
308	error = vtscsi_alloc_virtqueues(sc);
309	if (error) {
310		device_printf(dev, "cannot allocate virtqueues\n");
311		goto fail;
312	}
313
314	error = vtscsi_init_event_vq(sc);
315	if (error) {
316		device_printf(dev, "cannot populate the eventvq\n");
317		goto fail;
318	}
319
320	error = vtscsi_alloc_requests(sc);
321	if (error) {
322		device_printf(dev, "cannot allocate requests\n");
323		goto fail;
324	}
325
326	error = vtscsi_alloc_cam(sc);
327	if (error) {
328		device_printf(dev, "cannot allocate CAM structures\n");
329		goto fail;
330	}
331
332	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
333	if (error) {
334		device_printf(dev, "cannot setup virtqueue interrupts\n");
335		goto fail;
336	}
337
338	vtscsi_enable_vqs_intr(sc);
339
340	/*
341	 * Register with CAM after interrupts are enabled so we will get
342	 * notified of the probe responses.
343	 */
344	error = vtscsi_register_cam(sc);
345	if (error) {
346		device_printf(dev, "cannot register with CAM\n");
347		goto fail;
348	}
349
350fail:
351	if (error)
352		vtscsi_detach(dev);
353
354	return (error);
355}
356
357static int
358vtscsi_detach(device_t dev)
359{
360	struct vtscsi_softc *sc;
361
362	sc = device_get_softc(dev);
363
364	VTSCSI_LOCK(sc);
365	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
366	if (device_is_attached(dev))
367		vtscsi_stop(sc);
368	VTSCSI_UNLOCK(sc);
369
370	vtscsi_complete_vqs(sc);
371	vtscsi_drain_vqs(sc);
372
373	vtscsi_free_cam(sc);
374	vtscsi_free_requests(sc);
375
376	if (sc->vtscsi_sglist != NULL) {
377		sglist_free(sc->vtscsi_sglist);
378		sc->vtscsi_sglist = NULL;
379	}
380
381	VTSCSI_LOCK_DESTROY(sc);
382
383	return (0);
384}
385
386static int
387vtscsi_suspend(device_t dev)
388{
389
390	return (0);
391}
392
393static int
394vtscsi_resume(device_t dev)
395{
396
397	return (0);
398}
399
400static void
401vtscsi_negotiate_features(struct vtscsi_softc *sc)
402{
403	device_t dev;
404	uint64_t features;
405
406	dev = sc->vtscsi_dev;
407	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
408	sc->vtscsi_features = features;
409}
410
411static int
412vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
413{
414	int nsegs;
415
416	nsegs = VTSCSI_MIN_SEGMENTS;
417
418	if (seg_max > 0) {
419		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
420		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
421			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
422	} else
423		nsegs += 1;
424
425	return (nsegs);
426}
427
428static int
429vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
430{
431	device_t dev;
432	struct vq_alloc_info vq_info[3];
433	int nvqs;
434
435	dev = sc->vtscsi_dev;
436	nvqs = 3;
437
438	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
439	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
440
441	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
442	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
443
444	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
445	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
446	    "%s request", device_get_nameunit(dev));
447
448	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
449}
450
451static void
452vtscsi_write_device_config(struct vtscsi_softc *sc)
453{
454
455	virtio_write_dev_config_4(sc->vtscsi_dev,
456	    offsetof(struct virtio_scsi_config, sense_size),
457	    VIRTIO_SCSI_SENSE_SIZE);
458
459	/*
460	 * This is the size in the virtio_scsi_cmd_req structure. Note
461	 * this value (32) is larger than the maximum CAM CDB size (16).
462	 */
463	virtio_write_dev_config_4(sc->vtscsi_dev,
464	    offsetof(struct virtio_scsi_config, cdb_size),
465	    VIRTIO_SCSI_CDB_SIZE);
466}
467
468static int
469vtscsi_reinit(struct vtscsi_softc *sc)
470{
471	device_t dev;
472	int error;
473
474	dev = sc->vtscsi_dev;
475
476	error = virtio_reinit(dev, sc->vtscsi_features);
477	if (error == 0) {
478		vtscsi_write_device_config(sc);
479		vtscsi_reinit_event_vq(sc);
480		virtio_reinit_complete(dev);
481
482		vtscsi_enable_vqs_intr(sc);
483	}
484
485	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
486
487	return (error);
488}
489
490static int
491vtscsi_alloc_cam(struct vtscsi_softc *sc)
492{
493	device_t dev;
494	struct cam_devq *devq;
495	int openings;
496
497	dev = sc->vtscsi_dev;
498	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
499
500	devq = cam_simq_alloc(openings);
501	if (devq == NULL) {
502		device_printf(dev, "cannot allocate SIM queue\n");
503		return (ENOMEM);
504	}
505
506	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
507	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
508	    openings, devq);
509	if (sc->vtscsi_sim == NULL) {
510		cam_simq_free(devq);
511		device_printf(dev, "cannot allocate SIM\n");
512		return (ENOMEM);
513	}
514
515	return (0);
516}
517
518static int
519vtscsi_register_cam(struct vtscsi_softc *sc)
520{
521	device_t dev;
522	int registered, error;
523
524	dev = sc->vtscsi_dev;
525	registered = 0;
526
527	VTSCSI_LOCK(sc);
528
529	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
530		error = ENOMEM;
531		device_printf(dev, "cannot register XPT bus\n");
532		goto fail;
533	}
534
535	registered = 1;
536
537	if (xpt_create_path(&sc->vtscsi_path, NULL,
538	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
539	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
540		error = ENOMEM;
541		device_printf(dev, "cannot create bus path\n");
542		goto fail;
543	}
544
545	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
546		error = EIO;
547		device_printf(dev, "cannot register async callback\n");
548		goto fail;
549	}
550
551	VTSCSI_UNLOCK(sc);
552
553	return (0);
554
555fail:
556	if (sc->vtscsi_path != NULL) {
557		xpt_free_path(sc->vtscsi_path);
558		sc->vtscsi_path = NULL;
559	}
560
561	if (registered != 0)
562		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
563
564	VTSCSI_UNLOCK(sc);
565
566	return (error);
567}
568
569static void
570vtscsi_free_cam(struct vtscsi_softc *sc)
571{
572
573	VTSCSI_LOCK(sc);
574
575	if (sc->vtscsi_path != NULL) {
576		vtscsi_deregister_async(sc);
577
578		xpt_free_path(sc->vtscsi_path);
579		sc->vtscsi_path = NULL;
580
581		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
582	}
583
584	if (sc->vtscsi_sim != NULL) {
585		cam_sim_free(sc->vtscsi_sim, 1);
586		sc->vtscsi_sim = NULL;
587	}
588
589	VTSCSI_UNLOCK(sc);
590}
591
592static void
593vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
594{
595	struct cam_sim *sim;
596	struct vtscsi_softc *sc;
597
598	sim = cb_arg;
599	sc = cam_sim_softc(sim);
600
601	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
602
603	/*
604	 * TODO Once QEMU supports event reporting, we should
605	 *      (un)subscribe to events here.
606	 */
607	switch (code) {
608	case AC_FOUND_DEVICE:
609		break;
610	case AC_LOST_DEVICE:
611		break;
612	}
613}
614
615static int
616vtscsi_register_async(struct vtscsi_softc *sc)
617{
618	struct ccb_setasync csa;
619
620	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
621	csa.ccb_h.func_code = XPT_SASYNC_CB;
622	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
623	csa.callback = vtscsi_cam_async;
624	csa.callback_arg = sc->vtscsi_sim;
625
626	xpt_action((union ccb *) &csa);
627
628	return (csa.ccb_h.status);
629}
630
631static void
632vtscsi_deregister_async(struct vtscsi_softc *sc)
633{
634	struct ccb_setasync csa;
635
636	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
637	csa.ccb_h.func_code = XPT_SASYNC_CB;
638	csa.event_enable = 0;
639	csa.callback = vtscsi_cam_async;
640	csa.callback_arg = sc->vtscsi_sim;
641
642	xpt_action((union ccb *) &csa);
643}
644
645static void
646vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
647{
648	struct vtscsi_softc *sc;
649	struct ccb_hdr *ccbh;
650
651	sc = cam_sim_softc(sim);
652	ccbh = &ccb->ccb_h;
653
654	VTSCSI_LOCK_OWNED(sc);
655
656	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
657		/*
658		 * The VTSCSI_MTX is briefly dropped between setting
659		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
660		 * drop any CCBs that come in during that window.
661		 */
662		ccbh->status = CAM_NO_HBA;
663		xpt_done(ccb);
664		return;
665	}
666
667	switch (ccbh->func_code) {
668	case XPT_SCSI_IO:
669		vtscsi_cam_scsi_io(sc, sim, ccb);
670		break;
671
672	case XPT_SET_TRAN_SETTINGS:
673		ccbh->status = CAM_FUNC_NOTAVAIL;
674		xpt_done(ccb);
675		break;
676
677	case XPT_GET_TRAN_SETTINGS:
678		vtscsi_cam_get_tran_settings(sc, ccb);
679		break;
680
681	case XPT_RESET_BUS:
682		vtscsi_cam_reset_bus(sc, ccb);
683		break;
684
685	case XPT_RESET_DEV:
686		vtscsi_cam_reset_dev(sc, ccb);
687		break;
688
689	case XPT_ABORT:
690		vtscsi_cam_abort(sc, ccb);
691		break;
692
693	case XPT_CALC_GEOMETRY:
694		cam_calc_geometry(&ccb->ccg, 1);
695		xpt_done(ccb);
696		break;
697
698	case XPT_PATH_INQ:
699		vtscsi_cam_path_inquiry(sc, sim, ccb);
700		break;
701
702	default:
703		vtscsi_dprintf(sc, VTSCSI_ERROR,
704		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
705
706		ccbh->status = CAM_REQ_INVALID;
707		xpt_done(ccb);
708		break;
709	}
710}
711
712static void
713vtscsi_cam_poll(struct cam_sim *sim)
714{
715	struct vtscsi_softc *sc;
716
717	sc = cam_sim_softc(sim);
718
719	vtscsi_complete_vqs_locked(sc);
720}
721
722static void
723vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
724    union ccb *ccb)
725{
726	struct ccb_hdr *ccbh;
727	struct ccb_scsiio *csio;
728	int error;
729
730	ccbh = &ccb->ccb_h;
731	csio = &ccb->csio;
732
733	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
734		error = EINVAL;
735		ccbh->status = CAM_REQ_INVALID;
736		goto done;
737	}
738
739	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
740	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
741		error = EINVAL;
742		ccbh->status = CAM_REQ_INVALID;
743		goto done;
744	}
745
746	error = vtscsi_start_scsi_cmd(sc, ccb);
747
748done:
749	if (error) {
750		vtscsi_dprintf(sc, VTSCSI_ERROR,
751		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
752		xpt_done(ccb);
753	}
754}
755
756static void
757vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
758{
759	struct ccb_trans_settings *cts;
760	struct ccb_trans_settings_scsi *scsi;
761
762	cts = &ccb->cts;
763	scsi = &cts->proto_specific.scsi;
764
765	cts->protocol = PROTO_SCSI;
766	cts->protocol_version = SCSI_REV_SPC3;
767	cts->transport = XPORT_SAS;
768	cts->transport_version = 0;
769
770	scsi->valid = CTS_SCSI_VALID_TQ;
771	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
772
773	ccb->ccb_h.status = CAM_REQ_CMP;
774	xpt_done(ccb);
775}
776
777static void
778vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
779{
780	int error;
781
782	error = vtscsi_reset_bus(sc);
783	if (error == 0)
784		ccb->ccb_h.status = CAM_REQ_CMP;
785	else
786		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
787
788	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
789	    error, ccb, ccb->ccb_h.status);
790
791	xpt_done(ccb);
792}
793
794static void
795vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
796{
797	struct ccb_hdr *ccbh;
798	struct vtscsi_request *req;
799	int error;
800
801	ccbh = &ccb->ccb_h;
802
803	req = vtscsi_dequeue_request(sc);
804	if (req == NULL) {
805		error = EAGAIN;
806		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
807		goto fail;
808	}
809
810	req->vsr_ccb = ccb;
811
812	error = vtscsi_execute_reset_dev_cmd(sc, req);
813	if (error == 0)
814		return;
815
816	vtscsi_enqueue_request(sc, req);
817
818fail:
819	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
820	    error, req, ccb);
821
822	if (error == EAGAIN)
823		ccbh->status = CAM_RESRC_UNAVAIL;
824	else
825		ccbh->status = CAM_REQ_CMP_ERR;
826
827	xpt_done(ccb);
828}
829
830static void
831vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
832{
833	struct vtscsi_request *req;
834	struct ccb_hdr *ccbh;
835	int error;
836
837	ccbh = &ccb->ccb_h;
838
839	req = vtscsi_dequeue_request(sc);
840	if (req == NULL) {
841		error = EAGAIN;
842		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
843		goto fail;
844	}
845
846	req->vsr_ccb = ccb;
847
848	error = vtscsi_execute_abort_task_cmd(sc, req);
849	if (error == 0)
850		return;
851
852	vtscsi_enqueue_request(sc, req);
853
854fail:
855	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
856	    error, req, ccb);
857
858	if (error == EAGAIN)
859		ccbh->status = CAM_RESRC_UNAVAIL;
860	else
861		ccbh->status = CAM_REQ_CMP_ERR;
862
863	xpt_done(ccb);
864}
865
866static void
867vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
868    union ccb *ccb)
869{
870	device_t dev;
871	struct ccb_pathinq *cpi;
872
873	dev = sc->vtscsi_dev;
874	cpi = &ccb->cpi;
875
876	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
877
878	cpi->version_num = 1;
879	cpi->hba_inquiry = PI_TAG_ABLE;
880	cpi->target_sprt = 0;
881	cpi->hba_misc = PIM_SEQSCAN;
882	if (vtscsi_bus_reset_disable != 0)
883		cpi->hba_misc |= PIM_NOBUSRESET;
884	cpi->hba_eng_cnt = 0;
885
886	cpi->max_target = sc->vtscsi_max_target;
887	cpi->max_lun = sc->vtscsi_max_lun;
888	cpi->initiator_id = VTSCSI_INITIATOR_ID;
889
890	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
891	strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
892	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
893
894	cpi->unit_number = cam_sim_unit(sim);
895	cpi->bus_id = cam_sim_bus(sim);
896
897	cpi->base_transfer_speed = 300000;
898
899	cpi->protocol = PROTO_SCSI;
900	cpi->protocol_version = SCSI_REV_SPC3;
901	cpi->transport = XPORT_SAS;
902	cpi->transport_version = 0;
903
904	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
905	    PAGE_SIZE;
906
907	cpi->hba_vendor = virtio_get_vendor(dev);
908	cpi->hba_device = virtio_get_device(dev);
909	cpi->hba_subvendor = virtio_get_subvendor(dev);
910	cpi->hba_subdevice = virtio_get_subdevice(dev);
911
912	ccb->ccb_h.status = CAM_REQ_CMP;
913	xpt_done(ccb);
914}
915
916static int
917vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
918    struct ccb_scsiio *csio)
919{
920	struct ccb_hdr *ccbh;
921	struct bus_dma_segment *dseg;
922	int i, error;
923
924	ccbh = &csio->ccb_h;
925	error = 0;
926
927	switch ((ccbh->flags & CAM_DATA_MASK)) {
928	case CAM_DATA_VADDR:
929		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
930		break;
931	case CAM_DATA_PADDR:
932		error = sglist_append_phys(sg,
933		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
934		break;
935	case CAM_DATA_SG:
936		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
937			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
938			error = sglist_append(sg,
939			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
940		}
941		break;
942	case CAM_DATA_SG_PADDR:
943		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
944			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
945			error = sglist_append_phys(sg,
946			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
947		}
948		break;
949	default:
950		error = EINVAL;
951		break;
952	}
953
954	return (error);
955}
956
957static int
958vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
959    int *readable, int *writable)
960{
961	struct sglist *sg;
962	struct ccb_hdr *ccbh;
963	struct ccb_scsiio *csio;
964	struct virtio_scsi_cmd_req *cmd_req;
965	struct virtio_scsi_cmd_resp *cmd_resp;
966	int error;
967
968	sg = sc->vtscsi_sglist;
969	csio = &req->vsr_ccb->csio;
970	ccbh = &csio->ccb_h;
971	cmd_req = &req->vsr_cmd_req;
972	cmd_resp = &req->vsr_cmd_resp;
973
974	sglist_reset(sg);
975
976	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
977	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
978		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
979		/* At least one segment must be left for the response. */
980		if (error || sg->sg_nseg == sg->sg_maxseg)
981			goto fail;
982	}
983
984	*readable = sg->sg_nseg;
985
986	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
987	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
988		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
989		if (error)
990			goto fail;
991	}
992
993	*writable = sg->sg_nseg - *readable;
994
995	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
996	    "writable=%d\n", req, ccbh, *readable, *writable);
997
998	return (0);
999
1000fail:
1001	/*
1002	 * This should never happen unless maxio was incorrectly set.
1003	 */
1004	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1005
1006	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1007	    "nseg=%d maxseg=%d\n",
1008	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1009
1010	return (EFBIG);
1011}
1012
1013static int
1014vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1015{
1016	struct sglist *sg;
1017	struct virtqueue *vq;
1018	struct ccb_scsiio *csio;
1019	struct ccb_hdr *ccbh;
1020	struct virtio_scsi_cmd_req *cmd_req;
1021	struct virtio_scsi_cmd_resp *cmd_resp;
1022	int readable, writable, error;
1023
1024	sg = sc->vtscsi_sglist;
1025	vq = sc->vtscsi_request_vq;
1026	csio = &req->vsr_ccb->csio;
1027	ccbh = &csio->ccb_h;
1028	cmd_req = &req->vsr_cmd_req;
1029	cmd_resp = &req->vsr_cmd_resp;
1030
1031	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1032
1033	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1034	if (error)
1035		return (error);
1036
1037	req->vsr_complete = vtscsi_complete_scsi_cmd;
1038	cmd_resp->response = -1;
1039
1040	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1041	if (error) {
1042		vtscsi_dprintf(sc, VTSCSI_ERROR,
1043		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1044
1045		ccbh->status = CAM_REQUEUE_REQ;
1046		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1047		return (error);
1048	}
1049
1050	ccbh->status |= CAM_SIM_QUEUED;
1051	ccbh->ccbh_vtscsi_req = req;
1052
1053	virtqueue_notify(vq);
1054
1055	if (ccbh->timeout != CAM_TIME_INFINITY) {
1056		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1057		callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000,
1058		    vtscsi_timedout_scsi_cmd, req);
1059	}
1060
1061	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1062	    req, ccbh);
1063
1064	return (0);
1065}
1066
1067static int
1068vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1069{
1070	struct vtscsi_request *req;
1071	int error;
1072
1073	req = vtscsi_dequeue_request(sc);
1074	if (req == NULL) {
1075		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1076		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1077		return (ENOBUFS);
1078	}
1079
1080	req->vsr_ccb = ccb;
1081
1082	error = vtscsi_execute_scsi_cmd(sc, req);
1083	if (error)
1084		vtscsi_enqueue_request(sc, req);
1085
1086	return (error);
1087}
1088
1089static void
1090vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1091    struct vtscsi_request *req)
1092{
1093	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1094	struct vtscsi_request *to_req;
1095	uint8_t response;
1096
1097	tmf_resp = &req->vsr_tmf_resp;
1098	response = tmf_resp->response;
1099	to_req = req->vsr_timedout_req;
1100
1101	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1102	    req, to_req, response);
1103
1104	vtscsi_enqueue_request(sc, req);
1105
1106	/*
1107	 * The timedout request could have completed between when the
1108	 * abort task was sent and when the host processed it.
1109	 */
1110	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1111		return;
1112
1113	/* The timedout request was successfully aborted. */
1114	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1115		return;
1116
1117	/* Don't bother if the device is going away. */
1118	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1119		return;
1120
1121	/* The timedout request will be aborted by the reset. */
1122	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1123		return;
1124
1125	vtscsi_reset_bus(sc);
1126}
1127
1128static int
1129vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1130    struct vtscsi_request *to_req)
1131{
1132	struct sglist *sg;
1133	struct ccb_hdr *to_ccbh;
1134	struct vtscsi_request *req;
1135	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1136	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1137	int error;
1138
1139	sg = sc->vtscsi_sglist;
1140	to_ccbh = &to_req->vsr_ccb->ccb_h;
1141
1142	req = vtscsi_dequeue_request(sc);
1143	if (req == NULL) {
1144		error = ENOBUFS;
1145		goto fail;
1146	}
1147
1148	tmf_req = &req->vsr_tmf_req;
1149	tmf_resp = &req->vsr_tmf_resp;
1150
1151	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1152	    (uintptr_t) to_ccbh, tmf_req);
1153
1154	sglist_reset(sg);
1155	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1156	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1157
1158	req->vsr_timedout_req = to_req;
1159	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1160	tmf_resp->response = -1;
1161
1162	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1163	    VTSCSI_EXECUTE_ASYNC);
1164	if (error == 0)
1165		return (0);
1166
1167	vtscsi_enqueue_request(sc, req);
1168
1169fail:
1170	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1171	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1172
1173	return (error);
1174}
1175
1176static void
1177vtscsi_timedout_scsi_cmd(void *xreq)
1178{
1179	struct vtscsi_softc *sc;
1180	struct vtscsi_request *to_req;
1181
1182	to_req = xreq;
1183	sc = to_req->vsr_softc;
1184
1185	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1186	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1187
1188	/* Don't bother if the device is going away. */
1189	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1190		return;
1191
1192	/*
1193	 * Bail if the request is not in use. We likely raced when
1194	 * stopping the callout handler or it has already been aborted.
1195	 */
1196	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1197	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1198		return;
1199
1200	/*
1201	 * Complete the request queue in case the timedout request is
1202	 * actually just pending.
1203	 */
1204	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1205	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1206		return;
1207
1208	sc->vtscsi_stats.scsi_cmd_timeouts++;
1209	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1210
1211	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1212		return;
1213
1214	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1215	vtscsi_reset_bus(sc);
1216}
1217
1218static cam_status
1219vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1220{
1221	cam_status status;
1222
1223	switch (cmd_resp->response) {
1224	case VIRTIO_SCSI_S_OK:
1225		status = CAM_REQ_CMP;
1226		break;
1227	case VIRTIO_SCSI_S_OVERRUN:
1228		status = CAM_DATA_RUN_ERR;
1229		break;
1230	case VIRTIO_SCSI_S_ABORTED:
1231		status = CAM_REQ_ABORTED;
1232		break;
1233	case VIRTIO_SCSI_S_BAD_TARGET:
1234		status = CAM_SEL_TIMEOUT;
1235		break;
1236	case VIRTIO_SCSI_S_RESET:
1237		status = CAM_SCSI_BUS_RESET;
1238		break;
1239	case VIRTIO_SCSI_S_BUSY:
1240		status = CAM_SCSI_BUSY;
1241		break;
1242	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1243	case VIRTIO_SCSI_S_TARGET_FAILURE:
1244	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1245		status = CAM_SCSI_IT_NEXUS_LOST;
1246		break;
1247	default: /* VIRTIO_SCSI_S_FAILURE */
1248		status = CAM_REQ_CMP_ERR;
1249		break;
1250	}
1251
1252	return (status);
1253}
1254
1255static cam_status
1256vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1257    struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1258{
1259	cam_status status;
1260
1261	csio->scsi_status = cmd_resp->status;
1262	csio->resid = cmd_resp->resid;
1263
1264	if (csio->scsi_status == SCSI_STATUS_OK)
1265		status = CAM_REQ_CMP;
1266	else
1267		status = CAM_SCSI_STATUS_ERROR;
1268
1269	if (cmd_resp->sense_len > 0) {
1270		status |= CAM_AUTOSNS_VALID;
1271
1272		if (cmd_resp->sense_len < csio->sense_len)
1273			csio->sense_resid = csio->sense_len -
1274			    cmd_resp->sense_len;
1275		else
1276			csio->sense_resid = 0;
1277
1278		bzero(&csio->sense_data, sizeof(csio->sense_data));
1279		memcpy(cmd_resp->sense, &csio->sense_data,
1280		    csio->sense_len - csio->sense_resid);
1281	}
1282
1283	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1284	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1285	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1286
1287	return (status);
1288}
1289
1290static void
1291vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1292{
1293	struct ccb_hdr *ccbh;
1294	struct ccb_scsiio *csio;
1295	struct virtio_scsi_cmd_resp *cmd_resp;
1296	cam_status status;
1297
1298	csio = &req->vsr_ccb->csio;
1299	ccbh = &csio->ccb_h;
1300	cmd_resp = &req->vsr_cmd_resp;
1301
1302	KASSERT(ccbh->ccbh_vtscsi_req == req,
1303	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1304
1305	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1306		callout_stop(&req->vsr_callout);
1307
1308	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1309	if (status == CAM_REQ_ABORTED) {
1310		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1311			status = CAM_CMD_TIMEOUT;
1312	} else if (status == CAM_REQ_CMP)
1313		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1314
1315	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1316		status |= CAM_DEV_QFRZN;
1317		xpt_freeze_devq(ccbh->path, 1);
1318	}
1319
1320	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1321		status |= CAM_RELEASE_SIMQ;
1322
1323	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1324	    req, ccbh, status);
1325
1326	ccbh->status = status;
1327	xpt_done(req->vsr_ccb);
1328	vtscsi_enqueue_request(sc, req);
1329}
1330
1331static void
1332vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1333{
1334
1335	/* XXX We probably shouldn't poll forever. */
1336	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1337	do
1338		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1339	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1340
1341	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1342}
1343
1344static int
1345vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1346    struct sglist *sg, int readable, int writable, int flag)
1347{
1348	struct virtqueue *vq;
1349	int error;
1350
1351	vq = sc->vtscsi_control_vq;
1352
1353	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1354
1355	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1356	if (error) {
1357		/*
1358		 * Return EAGAIN when the virtqueue does not have enough
1359		 * descriptors available.
1360		 */
1361		if (error == ENOSPC || error == EMSGSIZE)
1362			error = EAGAIN;
1363
1364		return (error);
1365	}
1366
1367	virtqueue_notify(vq);
1368	if (flag == VTSCSI_EXECUTE_POLL)
1369		vtscsi_poll_ctrl_req(sc, req);
1370
1371	return (0);
1372}
1373
1374static void
1375vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1376    struct vtscsi_request *req)
1377{
1378	union ccb *ccb;
1379	struct ccb_hdr *ccbh;
1380	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1381
1382	ccb = req->vsr_ccb;
1383	ccbh = &ccb->ccb_h;
1384	tmf_resp = &req->vsr_tmf_resp;
1385
1386	switch (tmf_resp->response) {
1387	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1388		ccbh->status = CAM_REQ_CMP;
1389		break;
1390	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1391		ccbh->status = CAM_UA_ABORT;
1392		break;
1393	default:
1394		ccbh->status = CAM_REQ_CMP_ERR;
1395		break;
1396	}
1397
1398	xpt_done(ccb);
1399	vtscsi_enqueue_request(sc, req);
1400}
1401
1402static int
1403vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1404    struct vtscsi_request *req)
1405{
1406	struct sglist *sg;
1407	struct ccb_abort *cab;
1408	struct ccb_hdr *ccbh;
1409	struct ccb_hdr *abort_ccbh;
1410	struct vtscsi_request *abort_req;
1411	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1412	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1413	int error;
1414
1415	sg = sc->vtscsi_sglist;
1416	cab = &req->vsr_ccb->cab;
1417	ccbh = &cab->ccb_h;
1418	tmf_req = &req->vsr_tmf_req;
1419	tmf_resp = &req->vsr_tmf_resp;
1420
1421	/* CCB header and request that's to be aborted. */
1422	abort_ccbh = &cab->abort_ccb->ccb_h;
1423	abort_req = abort_ccbh->ccbh_vtscsi_req;
1424
1425	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1426		error = EINVAL;
1427		goto fail;
1428	}
1429
1430	/* Only attempt to abort requests that could be in-flight. */
1431	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1432		error = EALREADY;
1433		goto fail;
1434	}
1435
1436	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1437	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1438		callout_stop(&abort_req->vsr_callout);
1439
1440	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1441	    (uintptr_t) abort_ccbh, tmf_req);
1442
1443	sglist_reset(sg);
1444	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1445	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1446
1447	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1448	tmf_resp->response = -1;
1449
1450	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1451	    VTSCSI_EXECUTE_ASYNC);
1452
1453fail:
1454	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1455	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1456
1457	return (error);
1458}
1459
1460static void
1461vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1462    struct vtscsi_request *req)
1463{
1464	union ccb *ccb;
1465	struct ccb_hdr *ccbh;
1466	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1467
1468	ccb = req->vsr_ccb;
1469	ccbh = &ccb->ccb_h;
1470	tmf_resp = &req->vsr_tmf_resp;
1471
1472	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1473	    req, ccb, tmf_resp->response);
1474
1475	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1476		ccbh->status = CAM_REQ_CMP;
1477		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1478		    ccbh->target_lun);
1479	} else
1480		ccbh->status = CAM_REQ_CMP_ERR;
1481
1482	xpt_done(ccb);
1483	vtscsi_enqueue_request(sc, req);
1484}
1485
1486static int
1487vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1488    struct vtscsi_request *req)
1489{
1490	struct sglist *sg;
1491	struct ccb_resetdev *crd;
1492	struct ccb_hdr *ccbh;
1493	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1494	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1495	uint32_t subtype;
1496	int error;
1497
1498	sg = sc->vtscsi_sglist;
1499	crd = &req->vsr_ccb->crd;
1500	ccbh = &crd->ccb_h;
1501	tmf_req = &req->vsr_tmf_req;
1502	tmf_resp = &req->vsr_tmf_resp;
1503
1504	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1505		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1506	else
1507		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1508
1509	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1510
1511	sglist_reset(sg);
1512	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1513	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1514
1515	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1516	tmf_resp->response = -1;
1517
1518	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1519	    VTSCSI_EXECUTE_ASYNC);
1520
1521	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1522	    error, req, ccbh);
1523
1524	return (error);
1525}
1526
1527static void
1528vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1529{
1530
1531	*target_id = lun[1];
1532	*lun_id = (lun[2] << 8) | lun[3];
1533}
1534
1535static void
1536vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1537{
1538
1539	lun[0] = 1;
1540	lun[1] = ccbh->target_id;
1541	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1542	lun[3] = ccbh->target_lun & 0xFF;
1543}
1544
1545static void
1546vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1547    struct virtio_scsi_cmd_req *cmd_req)
1548{
1549	uint8_t attr;
1550
1551	switch (csio->tag_action) {
1552	case MSG_HEAD_OF_Q_TAG:
1553		attr = VIRTIO_SCSI_S_HEAD;
1554		break;
1555	case MSG_ORDERED_Q_TAG:
1556		attr = VIRTIO_SCSI_S_ORDERED;
1557		break;
1558	case MSG_ACA_TASK:
1559		attr = VIRTIO_SCSI_S_ACA;
1560		break;
1561	default: /* MSG_SIMPLE_Q_TAG */
1562		attr = VIRTIO_SCSI_S_SIMPLE;
1563		break;
1564	}
1565
1566	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1567	cmd_req->tag = (uintptr_t) csio;
1568	cmd_req->task_attr = attr;
1569
1570	memcpy(cmd_req->cdb,
1571	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1572	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1573	    csio->cdb_len);
1574}
1575
1576static void
1577vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1578    uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1579{
1580
1581	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1582
1583	tmf_req->type = VIRTIO_SCSI_T_TMF;
1584	tmf_req->subtype = subtype;
1585	tmf_req->tag = tag;
1586}
1587
1588static void
1589vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1590{
1591	int frozen;
1592
1593	frozen = sc->vtscsi_frozen;
1594
1595	if (reason & VTSCSI_REQUEST &&
1596	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1597		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1598
1599	if (reason & VTSCSI_REQUEST_VQ &&
1600	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1601		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1602
1603	/* Freeze the SIMQ if transitioned to frozen. */
1604	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1605		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1606		xpt_freeze_simq(sc->vtscsi_sim, 1);
1607	}
1608}
1609
1610static int
1611vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1612{
1613	int thawed;
1614
1615	if (sc->vtscsi_frozen == 0 || reason == 0)
1616		return (0);
1617
1618	if (reason & VTSCSI_REQUEST &&
1619	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1620		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1621
1622	if (reason & VTSCSI_REQUEST_VQ &&
1623	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1624		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1625
1626	thawed = sc->vtscsi_frozen == 0;
1627	if (thawed != 0)
1628		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1629
1630	return (thawed);
1631}
1632
1633static void
1634vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1635    target_id_t target_id, lun_id_t lun_id)
1636{
1637	struct cam_path *path;
1638
1639	/* Use the wildcard path from our softc for bus announcements. */
1640	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1641		xpt_async(ac_code, sc->vtscsi_path, NULL);
1642		return;
1643	}
1644
1645	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1646	    target_id, lun_id) != CAM_REQ_CMP) {
1647		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1648		return;
1649	}
1650
1651	xpt_async(ac_code, path, NULL);
1652	xpt_free_path(path);
1653}
1654
1655static void
1656vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1657    lun_id_t lun_id)
1658{
1659	union ccb *ccb;
1660	cam_status status;
1661
1662	ccb = xpt_alloc_ccb_nowait();
1663	if (ccb == NULL) {
1664		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1665		return;
1666	}
1667
1668	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1669	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1670	if (status != CAM_REQ_CMP) {
1671		xpt_free_ccb(ccb);
1672		return;
1673	}
1674
1675	xpt_rescan(ccb);
1676}
1677
1678static void
1679vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1680{
1681
1682	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1683}
1684
1685static void
1686vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1687    struct virtio_scsi_event *event)
1688{
1689	target_id_t target_id;
1690	lun_id_t lun_id;
1691
1692	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1693
1694	switch (event->reason) {
1695	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1696	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1697		vtscsi_execute_rescan(sc, target_id, lun_id);
1698		break;
1699	default:
1700		device_printf(sc->vtscsi_dev,
1701		    "unhandled transport event reason: %d\n", event->reason);
1702		break;
1703	}
1704}
1705
1706static void
1707vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1708{
1709	int error;
1710
1711	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1712		switch (event->event) {
1713		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1714			vtscsi_transport_reset_event(sc, event);
1715			break;
1716		default:
1717			device_printf(sc->vtscsi_dev,
1718			    "unhandled event: %d\n", event->event);
1719			break;
1720		}
1721	} else
1722		vtscsi_execute_rescan_bus(sc);
1723
1724	/*
1725	 * This should always be successful since the buffer
1726	 * was just dequeued.
1727	 */
1728	error = vtscsi_enqueue_event_buf(sc, event);
1729	KASSERT(error == 0,
1730	    ("cannot requeue event buffer: %d", error));
1731}
1732
1733static int
1734vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1735    struct virtio_scsi_event *event)
1736{
1737	struct sglist *sg;
1738	struct virtqueue *vq;
1739	int size, error;
1740
1741	sg = sc->vtscsi_sglist;
1742	vq = sc->vtscsi_event_vq;
1743	size = sc->vtscsi_event_buf_size;
1744
1745	bzero(event, size);
1746
1747	sglist_reset(sg);
1748	error = sglist_append(sg, event, size);
1749	if (error)
1750		return (error);
1751
1752	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1753	if (error)
1754		return (error);
1755
1756	virtqueue_notify(vq);
1757
1758	return (0);
1759}
1760
1761static int
1762vtscsi_init_event_vq(struct vtscsi_softc *sc)
1763{
1764	struct virtio_scsi_event *event;
1765	int i, size, error;
1766
1767	/*
1768	 * The first release of QEMU with VirtIO SCSI support would crash
1769	 * when attempting to notify the event virtqueue. This was fixed
1770	 * when hotplug support was added.
1771	 */
1772	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1773		size = sc->vtscsi_event_buf_size;
1774	else
1775		size = 0;
1776
1777	if (size < sizeof(struct virtio_scsi_event))
1778		return (0);
1779
1780	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1781		event = &sc->vtscsi_event_bufs[i];
1782
1783		error = vtscsi_enqueue_event_buf(sc, event);
1784		if (error)
1785			break;
1786	}
1787
1788	/*
1789	 * Even just one buffer is enough. Missed events are
1790	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1791	 */
1792	if (i > 0)
1793		error = 0;
1794
1795	return (error);
1796}
1797
1798static void
1799vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1800{
1801	struct virtio_scsi_event *event;
1802	int i, error;
1803
1804	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1805	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1806		return;
1807
1808	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1809		event = &sc->vtscsi_event_bufs[i];
1810
1811		error = vtscsi_enqueue_event_buf(sc, event);
1812		if (error)
1813			break;
1814	}
1815
1816	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1817}
1818
1819static void
1820vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1821{
1822	struct virtqueue *vq;
1823	int last;
1824
1825	vq = sc->vtscsi_event_vq;
1826	last = 0;
1827
1828	while (virtqueue_drain(vq, &last) != NULL)
1829		;
1830
1831	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1832}
1833
1834static void
1835vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1836{
1837
1838	VTSCSI_LOCK_OWNED(sc);
1839
1840	if (sc->vtscsi_request_vq != NULL)
1841		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1842	if (sc->vtscsi_control_vq != NULL)
1843		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1844}
1845
1846static void
1847vtscsi_complete_vqs(struct vtscsi_softc *sc)
1848{
1849
1850	VTSCSI_LOCK(sc);
1851	vtscsi_complete_vqs_locked(sc);
1852	VTSCSI_UNLOCK(sc);
1853}
1854
1855static void
1856vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1857{
1858	union ccb *ccb;
1859	int detach;
1860
1861	ccb = req->vsr_ccb;
1862
1863	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1864
1865	/*
1866	 * The callout must be drained when detaching since the request is
1867	 * about to be freed. The VTSCSI_MTX must not be held for this in
1868	 * case the callout is pending because there is a deadlock potential.
1869	 * Otherwise, the virtqueue is being drained because of a bus reset
1870	 * so we only need to attempt to stop the callouts.
1871	 */
1872	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1873	if (detach != 0)
1874		VTSCSI_LOCK_NOTOWNED(sc);
1875	else
1876		VTSCSI_LOCK_OWNED(sc);
1877
1878	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1879		if (detach != 0)
1880			callout_drain(&req->vsr_callout);
1881		else
1882			callout_stop(&req->vsr_callout);
1883	}
1884
1885	if (ccb != NULL) {
1886		if (detach != 0) {
1887			VTSCSI_LOCK(sc);
1888			ccb->ccb_h.status = CAM_NO_HBA;
1889		} else
1890			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1891		xpt_done(ccb);
1892		if (detach != 0)
1893			VTSCSI_UNLOCK(sc);
1894	}
1895
1896	vtscsi_enqueue_request(sc, req);
1897}
1898
1899static void
1900vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1901{
1902	struct vtscsi_request *req;
1903	int last;
1904
1905	last = 0;
1906
1907	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1908
1909	while ((req = virtqueue_drain(vq, &last)) != NULL)
1910		vtscsi_cancel_request(sc, req);
1911
1912	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1913}
1914
1915static void
1916vtscsi_drain_vqs(struct vtscsi_softc *sc)
1917{
1918
1919	if (sc->vtscsi_control_vq != NULL)
1920		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1921	if (sc->vtscsi_request_vq != NULL)
1922		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1923	if (sc->vtscsi_event_vq != NULL)
1924		vtscsi_drain_event_vq(sc);
1925}
1926
1927static void
1928vtscsi_stop(struct vtscsi_softc *sc)
1929{
1930
1931	vtscsi_disable_vqs_intr(sc);
1932	virtio_stop(sc->vtscsi_dev);
1933}
1934
1935static int
1936vtscsi_reset_bus(struct vtscsi_softc *sc)
1937{
1938	int error;
1939
1940	VTSCSI_LOCK_OWNED(sc);
1941
1942	if (vtscsi_bus_reset_disable != 0) {
1943		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
1944		return (0);
1945	}
1946
1947	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
1948
1949	/*
1950	 * vtscsi_stop() will cause the in-flight requests to be canceled.
1951	 * Those requests are then completed here so CAM will retry them
1952	 * after the reset is complete.
1953	 */
1954	vtscsi_stop(sc);
1955	vtscsi_complete_vqs_locked(sc);
1956
1957	/* Rid the virtqueues of any remaining requests. */
1958	vtscsi_drain_vqs(sc);
1959
1960	/*
1961	 * Any resource shortage that froze the SIMQ cannot persist across
1962	 * a bus reset so ensure it gets thawed here.
1963	 */
1964	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1965		xpt_release_simq(sc->vtscsi_sim, 0);
1966
1967	error = vtscsi_reinit(sc);
1968	if (error) {
1969		device_printf(sc->vtscsi_dev,
1970		    "reinitialization failed, stopping device...\n");
1971		vtscsi_stop(sc);
1972	} else
1973		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1974		    CAM_LUN_WILDCARD);
1975
1976	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
1977
1978	return (error);
1979}
1980
1981static void
1982vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1983{
1984
1985#ifdef INVARIANTS
1986	int req_nsegs, resp_nsegs;
1987
1988	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
1989	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
1990
1991	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
1992	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
1993#endif
1994
1995	req->vsr_softc = sc;
1996	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
1997}
1998
1999static int
2000vtscsi_alloc_requests(struct vtscsi_softc *sc)
2001{
2002	struct vtscsi_request *req;
2003	int i, nreqs;
2004
2005	/*
2006	 * Commands destined for either the request or control queues come
2007	 * from the same SIM queue. Use the size of the request virtqueue
2008	 * as it (should) be much more frequently used. Some additional
2009	 * requests are allocated for internal (TMF) use.
2010	 */
2011	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2012	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2013		nreqs /= VTSCSI_MIN_SEGMENTS;
2014	nreqs += VTSCSI_RESERVED_REQUESTS;
2015
2016	for (i = 0; i < nreqs; i++) {
2017		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2018		    M_NOWAIT);
2019		if (req == NULL)
2020			return (ENOMEM);
2021
2022		vtscsi_init_request(sc, req);
2023
2024		sc->vtscsi_nrequests++;
2025		vtscsi_enqueue_request(sc, req);
2026	}
2027
2028	return (0);
2029}
2030
2031static void
2032vtscsi_free_requests(struct vtscsi_softc *sc)
2033{
2034	struct vtscsi_request *req;
2035
2036	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2037		KASSERT(callout_active(&req->vsr_callout) == 0,
2038		    ("request callout still active"));
2039
2040		sc->vtscsi_nrequests--;
2041		free(req, M_DEVBUF);
2042	}
2043
2044	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2045	    sc->vtscsi_nrequests));
2046}
2047
2048static void
2049vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2050{
2051
2052	KASSERT(req->vsr_softc == sc,
2053	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2054
2055	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2056
2057	/* A request is available so the SIMQ could be released. */
2058	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2059		xpt_release_simq(sc->vtscsi_sim, 1);
2060
2061	req->vsr_ccb = NULL;
2062	req->vsr_complete = NULL;
2063	req->vsr_ptr0 = NULL;
2064	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2065	req->vsr_flags = 0;
2066
2067	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2068	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2069
2070	/*
2071	 * We insert at the tail of the queue in order to make it
2072	 * very unlikely a request will be reused if we race with
2073	 * stopping its callout handler.
2074	 */
2075	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2076}
2077
2078static struct vtscsi_request *
2079vtscsi_dequeue_request(struct vtscsi_softc *sc)
2080{
2081	struct vtscsi_request *req;
2082
2083	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2084	if (req != NULL) {
2085		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2086		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2087	} else
2088		sc->vtscsi_stats.dequeue_no_requests++;
2089
2090	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2091
2092	return (req);
2093}
2094
2095static void
2096vtscsi_complete_request(struct vtscsi_request *req)
2097{
2098
2099	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2100		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2101
2102	if (req->vsr_complete != NULL)
2103		req->vsr_complete(req->vsr_softc, req);
2104}
2105
2106static void
2107vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2108{
2109	struct vtscsi_request *req;
2110
2111	VTSCSI_LOCK_OWNED(sc);
2112
2113	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2114		vtscsi_complete_request(req);
2115}
2116
2117static void
2118vtscsi_control_vq_intr(void *xsc)
2119{
2120	struct vtscsi_softc *sc;
2121	struct virtqueue *vq;
2122
2123	sc = xsc;
2124	vq = sc->vtscsi_control_vq;
2125
2126again:
2127	VTSCSI_LOCK(sc);
2128
2129	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2130
2131	if (virtqueue_enable_intr(vq) != 0) {
2132		virtqueue_disable_intr(vq);
2133		VTSCSI_UNLOCK(sc);
2134		goto again;
2135	}
2136
2137	VTSCSI_UNLOCK(sc);
2138}
2139
2140static void
2141vtscsi_event_vq_intr(void *xsc)
2142{
2143	struct vtscsi_softc *sc;
2144	struct virtqueue *vq;
2145	struct virtio_scsi_event *event;
2146
2147	sc = xsc;
2148	vq = sc->vtscsi_event_vq;
2149
2150again:
2151	VTSCSI_LOCK(sc);
2152
2153	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2154		vtscsi_handle_event(sc, event);
2155
2156	if (virtqueue_enable_intr(vq) != 0) {
2157		virtqueue_disable_intr(vq);
2158		VTSCSI_UNLOCK(sc);
2159		goto again;
2160	}
2161
2162	VTSCSI_UNLOCK(sc);
2163}
2164
2165static void
2166vtscsi_request_vq_intr(void *xsc)
2167{
2168	struct vtscsi_softc *sc;
2169	struct virtqueue *vq;
2170
2171	sc = xsc;
2172	vq = sc->vtscsi_request_vq;
2173
2174again:
2175	VTSCSI_LOCK(sc);
2176
2177	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2178
2179	if (virtqueue_enable_intr(vq) != 0) {
2180		virtqueue_disable_intr(vq);
2181		VTSCSI_UNLOCK(sc);
2182		goto again;
2183	}
2184
2185	VTSCSI_UNLOCK(sc);
2186}
2187
2188static void
2189vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2190{
2191
2192	virtqueue_disable_intr(sc->vtscsi_control_vq);
2193	virtqueue_disable_intr(sc->vtscsi_event_vq);
2194	virtqueue_disable_intr(sc->vtscsi_request_vq);
2195}
2196
2197static void
2198vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2199{
2200
2201	virtqueue_enable_intr(sc->vtscsi_control_vq);
2202	virtqueue_enable_intr(sc->vtscsi_event_vq);
2203	virtqueue_enable_intr(sc->vtscsi_request_vq);
2204}
2205
2206static void
2207vtscsi_get_tunables(struct vtscsi_softc *sc)
2208{
2209	char tmpstr[64];
2210
2211	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2212
2213	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2214	    device_get_unit(sc->vtscsi_dev));
2215	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2216}
2217
2218static void
2219vtscsi_add_sysctl(struct vtscsi_softc *sc)
2220{
2221	device_t dev;
2222	struct vtscsi_statistics *stats;
2223        struct sysctl_ctx_list *ctx;
2224	struct sysctl_oid *tree;
2225	struct sysctl_oid_list *child;
2226
2227	dev = sc->vtscsi_dev;
2228	stats = &sc->vtscsi_stats;
2229	ctx = device_get_sysctl_ctx(dev);
2230	tree = device_get_sysctl_tree(dev);
2231	child = SYSCTL_CHILDREN(tree);
2232
2233	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2234	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2235	    "Debug level");
2236
2237	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2238	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2239	    "SCSI command timeouts");
2240	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2241	    CTLFLAG_RD, &stats->dequeue_no_requests,
2242	    "No available requests to dequeue");
2243}
2244
2245static void
2246vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2247    const char *fmt, ...)
2248{
2249	struct vtscsi_softc *sc;
2250	union ccb *ccb;
2251	struct sbuf sb;
2252	va_list ap;
2253	char str[192];
2254	char path_str[64];
2255
2256	if (req == NULL)
2257		return;
2258
2259	sc = req->vsr_softc;
2260	ccb = req->vsr_ccb;
2261
2262	va_start(ap, fmt);
2263	sbuf_new(&sb, str, sizeof(str), 0);
2264
2265	if (ccb == NULL) {
2266		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2267		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2268		    cam_sim_bus(sc->vtscsi_sim));
2269	} else {
2270		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2271		sbuf_cat(&sb, path_str);
2272		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2273			scsi_command_string(&ccb->csio, &sb);
2274			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2275		}
2276	}
2277
2278	sbuf_vprintf(&sb, fmt, ap);
2279	va_end(ap);
2280
2281	sbuf_finish(&sb);
2282	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2283	    sbuf_data(&sb));
2284}
2285