1/*	$OpenBSD: vioblk.c,v 1.39 2024/06/26 01:40:49 jsg Exp $	*/
2
3/*
4 * Copyright (c) 2012 Stefan Fritsch.
5 * Copyright (c) 2010 Minoura Makoto.
6 * Copyright (c) 1998, 2001 Manuel Bouyer.
7 * All rights reserved.
8 *
9 * This code is based in part on the NetBSD ld_virtio driver and the
10 * OpenBSD vdsk driver.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *	notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *	notice, this list of conditions and the following disclaimer in the
19 *	documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 2009, 2011 Mark Kettenis
35 *
36 * Permission to use, copy, modify, and distribute this software for any
37 * purpose with or without fee is hereby granted, provided that the above
38 * copyright notice and this permission notice appear in all copies.
39 *
40 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
41 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
42 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
43 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
44 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
45 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
46 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47 */
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <machine/bus.h>
52
53#include <sys/device.h>
54#include <sys/mutex.h>
55#include <dev/pv/virtioreg.h>
56#include <dev/pv/virtiovar.h>
57#include <dev/pv/vioblkreg.h>
58
59#include <scsi/scsi_all.h>
60#include <scsi/scsi_disk.h>
61#include <scsi/scsiconf.h>
62
63#define VIOBLK_DONE	-1
64
65/* Number of DMA segments for buffers that the device must support */
66#define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
67/* In the virtqueue, we need space for header and footer, too */
68#define ALLOC_SEGS	(SEG_MAX + 2)
69
70struct virtio_feature_name vioblk_feature_names[] = {
71#if VIRTIO_DEBUG
72	{ VIRTIO_BLK_F_BARRIER,		"Barrier" },
73	{ VIRTIO_BLK_F_SIZE_MAX,	"SizeMax" },
74	{ VIRTIO_BLK_F_SEG_MAX,		"SegMax" },
75	{ VIRTIO_BLK_F_GEOMETRY,	"Geometry" },
76	{ VIRTIO_BLK_F_RO,		"RO" },
77	{ VIRTIO_BLK_F_BLK_SIZE,	"BlkSize" },
78	{ VIRTIO_BLK_F_SCSI,		"SCSI" },
79	{ VIRTIO_BLK_F_FLUSH,		"Flush" },
80	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology" },
81	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE" },
82	{ VIRTIO_BLK_F_DISCARD,		"Discard" },
83	{ VIRTIO_BLK_F_WRITE_ZEROES,	"Write0s" },
84#endif
85	{ 0,				NULL }
86};
87
88struct virtio_blk_req {
89	struct virtio_blk_req_hdr	 vr_hdr;
90	uint8_t				 vr_status;
91#define VR_DMA_END	offsetof(struct virtio_blk_req, vr_qe_index)
92	int16_t				 vr_qe_index;
93	int				 vr_len;
94	struct scsi_xfer		*vr_xs;
95	bus_dmamap_t			 vr_cmdsts;
96	bus_dmamap_t			 vr_payload;
97	SLIST_ENTRY(virtio_blk_req)	 vr_list;
98};
99
100struct vioblk_softc {
101	struct device		 sc_dev;
102	struct virtio_softc	*sc_virtio;
103
104	struct virtqueue         sc_vq[1];
105	struct virtio_blk_req   *sc_reqs;
106	bus_dma_segment_t        sc_reqs_segs[1];
107	int			 sc_nreqs;
108
109	struct scsi_iopool	 sc_iopool;
110	struct mutex		 sc_vr_mtx;
111	SLIST_HEAD(, virtio_blk_req) sc_freelist;
112
113	int			 sc_notify_on_empty;
114
115	uint32_t		 sc_queued;
116
117	uint64_t		 sc_capacity;
118};
119
120int	vioblk_match(struct device *, void *, void *);
121void	vioblk_attach(struct device *, struct device *, void *);
122int	vioblk_alloc_reqs(struct vioblk_softc *, int);
123int	vioblk_vq_done(struct virtqueue *);
124void	vioblk_vq_done1(struct vioblk_softc *, struct virtio_softc *,
125			struct virtqueue *, int);
126void	vioblk_reset(struct vioblk_softc *);
127
128void	vioblk_scsi_cmd(struct scsi_xfer *);
129
130void   *vioblk_req_get(void *);
131void	vioblk_req_put(void *, void *);
132
133void	vioblk_scsi_inq(struct scsi_xfer *);
134void	vioblk_scsi_capacity(struct scsi_xfer *);
135void	vioblk_scsi_capacity16(struct scsi_xfer *);
136void	vioblk_scsi_done(struct scsi_xfer *, int);
137
138const struct cfattach vioblk_ca = {
139	sizeof(struct vioblk_softc),
140	vioblk_match,
141	vioblk_attach,
142	NULL
143};
144
145struct cfdriver vioblk_cd = {
146	NULL, "vioblk", DV_DULL
147};
148
149const struct scsi_adapter vioblk_switch = {
150	vioblk_scsi_cmd, NULL, NULL, NULL, NULL
151};
152
153int
154vioblk_match(struct device *parent, void *match, void *aux)
155{
156	struct virtio_softc *va = aux;
157	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
158		return 1;
159	return 0;
160}
161
162#define DNPRINTF(n,x...)				\
163    do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
164
165void
166vioblk_attach(struct device *parent, struct device *self, void *aux)
167{
168	struct vioblk_softc *sc = (struct vioblk_softc *)self;
169	struct virtio_softc *vsc = (struct virtio_softc *)parent;
170	struct scsibus_attach_args saa;
171	int qsize;
172
173	vsc->sc_vqs = &sc->sc_vq[0];
174	vsc->sc_nvqs = 1;
175	vsc->sc_config_change = NULL;
176	if (vsc->sc_child)
177		panic("already attached to something else");
178	vsc->sc_child = self;
179	vsc->sc_ipl = IPL_BIO;
180	sc->sc_virtio = vsc;
181	vsc->sc_driver_features = VIRTIO_BLK_F_RO | VIRTIO_F_NOTIFY_ON_EMPTY |
182	     VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_FLUSH;
183
184        virtio_negotiate_features(vsc, vioblk_feature_names);
185
186	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SIZE_MAX)) {
187		uint32_t size_max = virtio_read_device_config_4(vsc,
188		    VIRTIO_BLK_CONFIG_SIZE_MAX);
189		if (size_max < PAGE_SIZE) {
190			printf("\nMax segment size %u too low\n", size_max);
191			goto err;
192		}
193	}
194
195	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SEG_MAX)) {
196		uint32_t seg_max = virtio_read_device_config_4(vsc,
197		    VIRTIO_BLK_CONFIG_SEG_MAX);
198		if (seg_max < SEG_MAX) {
199			printf("\nMax number of segments %d too small\n",
200			    seg_max);
201			goto err;
202		}
203	}
204
205	sc->sc_capacity = virtio_read_device_config_8(vsc,
206	    VIRTIO_BLK_CONFIG_CAPACITY);
207
208	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, MAXPHYS, ALLOC_SEGS,
209	    "I/O request") != 0) {
210		printf("\nCan't alloc virtqueue\n");
211		goto err;
212	}
213	qsize = sc->sc_vq[0].vq_num;
214	sc->sc_vq[0].vq_done = vioblk_vq_done;
215
216	if (virtio_has_feature(vsc, VIRTIO_F_NOTIFY_ON_EMPTY)) {
217		virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
218		sc->sc_notify_on_empty = 1;
219	}
220	else {
221		sc->sc_notify_on_empty = 0;
222	}
223
224	sc->sc_queued = 0;
225
226	SLIST_INIT(&sc->sc_freelist);
227	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
228	scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
229
230	sc->sc_nreqs = vioblk_alloc_reqs(sc, qsize);
231	if (sc->sc_nreqs == 0) {
232		printf("\nCan't alloc reqs\n");
233		goto err;
234	}
235	DNPRINTF(1, "%s: qsize: %d\n", __func__, qsize);
236	printf("\n");
237
238	saa.saa_adapter = &vioblk_switch;
239	saa.saa_adapter_softc = self;
240	saa.saa_adapter_buswidth = 1;
241	saa.saa_luns = 1;
242	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
243	saa.saa_openings = sc->sc_nreqs;
244	saa.saa_pool = &sc->sc_iopool;
245	if (virtio_has_feature(vsc, VIRTIO_BLK_F_RO))
246		saa.saa_flags = SDEV_READONLY;
247	else
248		saa.saa_flags = 0;
249	saa.saa_quirks = 0;
250	saa.saa_wwpn = saa.saa_wwnn = 0;
251
252	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
253	config_found(self, &saa, scsiprint);
254
255	return;
256err:
257	vsc->sc_child = VIRTIO_CHILD_ERROR;
258	return;
259}
260
261/*
262 * vioblk_req_get() provides the SCSI layer with all the
263 * resources necessary to start an I/O on the device.
264 *
265 * Since the size of the I/O is unknown at this time the
266 * resources allocated (a.k.a. reserved) must be sufficient
267 * to allow the maximum possible I/O size.
268 *
269 * When the I/O is actually attempted via vioblk_scsi_cmd()
270 * excess resources will be returned via virtio_enqueue_trim().
271 */
272void *
273vioblk_req_get(void *cookie)
274{
275	struct vioblk_softc *sc = cookie;
276	struct virtio_blk_req *vr = NULL;
277
278	mtx_enter(&sc->sc_vr_mtx);
279	vr = SLIST_FIRST(&sc->sc_freelist);
280	if (vr != NULL)
281		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
282	mtx_leave(&sc->sc_vr_mtx);
283
284	DNPRINTF(2, "%s: %p\n", __func__, vr);
285
286	return vr;
287}
288
289void
290vioblk_req_put(void *cookie, void *io)
291{
292	struct vioblk_softc *sc = cookie;
293	struct virtio_blk_req *vr = io;
294
295	DNPRINTF(2, "%s: %p\n", __func__, vr);
296
297	mtx_enter(&sc->sc_vr_mtx);
298	/*
299	 * Do *NOT* call virtio_dequeue_commit()!
300	 *
301	 * Descriptors are permanently associated with the vioscsi_req and
302	 * should not be placed on the free list!
303	 */
304	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
305	mtx_leave(&sc->sc_vr_mtx);
306}
307
308int
309vioblk_vq_done(struct virtqueue *vq)
310{
311	struct virtio_softc *vsc = vq->vq_owner;
312	struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
313	struct vq_entry *qe;
314	int slot;
315	int ret = 0;
316
317	if (!sc->sc_notify_on_empty)
318		virtio_stop_vq_intr(vsc, vq);
319	for (;;) {
320		if (virtio_dequeue(vsc, vq, &slot, NULL) != 0) {
321			if (sc->sc_notify_on_empty)
322				break;
323			virtio_start_vq_intr(vsc, vq);
324			if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
325				break;
326		}
327		qe = &vq->vq_entries[slot];
328		vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
329		ret = 1;
330	}
331	return ret;
332}
333
334void
335vioblk_vq_done1(struct vioblk_softc *sc, struct virtio_softc *vsc,
336    struct virtqueue *vq, int slot)
337{
338	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
339	struct scsi_xfer *xs = vr->vr_xs;
340	KASSERT(vr->vr_len != VIOBLK_DONE);
341	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0,
342	    sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE);
343	if (vr->vr_hdr.type != VIRTIO_BLK_T_FLUSH) {
344		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, vr->vr_len,
345		    (vr->vr_hdr.type == VIRTIO_BLK_T_IN) ?
346		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
347		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
348	}
349	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
350	    sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
351	    BUS_DMASYNC_POSTREAD);
352
353
354	if (vr->vr_status != VIRTIO_BLK_S_OK) {
355		DNPRINTF(1, "%s: EIO\n", __func__);
356		xs->error = XS_DRIVER_STUFFUP;
357		xs->resid = xs->datalen;
358	} else {
359		xs->error = XS_NOERROR;
360		xs->resid = xs->datalen - vr->vr_len;
361	}
362	vr->vr_len = VIOBLK_DONE;
363	scsi_done(xs);
364}
365
366void
367vioblk_reset(struct vioblk_softc *sc)
368{
369	int i;
370
371	/* reset device to stop DMA */
372	virtio_reset(sc->sc_virtio);
373
374	/* finish requests that have been completed */
375	vioblk_vq_done(&sc->sc_vq[0]);
376
377	/* abort all remaining requests */
378	for (i = 0; i < sc->sc_nreqs; i++) {
379		struct virtio_blk_req *vr = &sc->sc_reqs[i];
380		struct scsi_xfer *xs = vr->vr_xs;
381
382		if (vr->vr_len == VIOBLK_DONE)
383			continue;
384
385		xs->error = XS_DRIVER_STUFFUP;
386		xs->resid = xs->datalen;
387		scsi_done(xs);
388	}
389}
390
391void
392vioblk_scsi_cmd(struct scsi_xfer *xs)
393{
394	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
395	struct virtqueue *vq = &sc->sc_vq[0];
396	struct virtio_softc *vsc = sc->sc_virtio;
397	struct virtio_blk_req *vr;
398	int len, s, timeout, isread, slot, ret, nsegs;
399	int error = XS_DRIVER_STUFFUP;
400	struct scsi_rw *rw;
401	struct scsi_rw_10 *rw10;
402	struct scsi_rw_12 *rw12;
403	struct scsi_rw_16 *rw16;
404	u_int64_t lba = 0;
405	u_int32_t sector_count = 0;
406	uint8_t operation;
407
408	switch (xs->cmd.opcode) {
409	case READ_COMMAND:
410	case READ_10:
411	case READ_12:
412	case READ_16:
413		operation = VIRTIO_BLK_T_IN;
414		isread = 1;
415		break;
416	case WRITE_COMMAND:
417	case WRITE_10:
418	case WRITE_12:
419	case WRITE_16:
420		operation = VIRTIO_BLK_T_OUT;
421		isread = 0;
422		break;
423
424	case SYNCHRONIZE_CACHE:
425		if (!virtio_has_feature(vsc, VIRTIO_BLK_F_FLUSH)) {
426			vioblk_scsi_done(xs, XS_NOERROR);
427			return;
428		}
429		operation = VIRTIO_BLK_T_FLUSH;
430		break;
431
432	case INQUIRY:
433		vioblk_scsi_inq(xs);
434		return;
435	case READ_CAPACITY:
436		vioblk_scsi_capacity(xs);
437		return;
438	case READ_CAPACITY_16:
439		vioblk_scsi_capacity16(xs);
440		return;
441
442	case TEST_UNIT_READY:
443	case START_STOP:
444	case PREVENT_ALLOW:
445		vioblk_scsi_done(xs, XS_NOERROR);
446		return;
447
448	default:
449		printf("%s cmd 0x%02x\n", __func__, xs->cmd.opcode);
450	case MODE_SENSE:
451	case MODE_SENSE_BIG:
452	case REPORT_LUNS:
453		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
454		return;
455	}
456
457	/*
458	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
459	 * layout as 10-byte READ/WRITE commands.
460	 */
461	if (xs->cmdlen == 6) {
462		rw = (struct scsi_rw *)&xs->cmd;
463		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
464		sector_count = rw->length ? rw->length : 0x100;
465	} else if (xs->cmdlen == 10) {
466		rw10 = (struct scsi_rw_10 *)&xs->cmd;
467		lba = _4btol(rw10->addr);
468		sector_count = _2btol(rw10->length);
469	} else if (xs->cmdlen == 12) {
470		rw12 = (struct scsi_rw_12 *)&xs->cmd;
471		lba = _4btol(rw12->addr);
472		sector_count = _4btol(rw12->length);
473	} else if (xs->cmdlen == 16) {
474		rw16 = (struct scsi_rw_16 *)&xs->cmd;
475		lba = _8btol(rw16->addr);
476		sector_count = _4btol(rw16->length);
477	}
478
479	s = splbio();
480	vr = xs->io;
481	slot = vr->vr_qe_index;
482	if (operation != VIRTIO_BLK_T_FLUSH) {
483		len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
484		ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
485		    xs->data, len, NULL,
486		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
487		     BUS_DMA_NOWAIT));
488		if (ret) {
489			printf("%s: bus_dmamap_load: %d", __func__, ret);
490			error = XS_DRIVER_STUFFUP;
491			goto out_done;
492		}
493		nsegs = vr->vr_payload->dm_nsegs + 2;
494	} else {
495		len = 0;
496		nsegs = 2;
497	}
498
499	/*
500	 * Adjust reservation to the number needed, or virtio gets upset. Note
501	 * that it may trim UP if 'xs' is being recycled w/o getting a new
502	 * reservation!
503	 */
504	virtio_enqueue_trim(vq, slot, nsegs);
505
506	vr->vr_xs = xs;
507	vr->vr_hdr.type = operation;
508	vr->vr_hdr.ioprio = 0;
509	vr->vr_hdr.sector = lba;
510	vr->vr_len = len;
511
512	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
513			0, sizeof(struct virtio_blk_req_hdr),
514			BUS_DMASYNC_PREWRITE);
515	if (operation != VIRTIO_BLK_T_FLUSH) {
516		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, len,
517		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
518	}
519	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
520	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t),
521	    BUS_DMASYNC_PREREAD);
522
523	virtio_enqueue_p(vq, slot, vr->vr_cmdsts, 0,
524	    sizeof(struct virtio_blk_req_hdr), 1);
525	if (operation != VIRTIO_BLK_T_FLUSH)
526		virtio_enqueue(vq, slot, vr->vr_payload, !isread);
527	virtio_enqueue_p(vq, slot, vr->vr_cmdsts,
528	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), 0);
529	virtio_enqueue_commit(vsc, vq, slot, 1);
530	sc->sc_queued++;
531
532	if (!ISSET(xs->flags, SCSI_POLL)) {
533		/* check if some xfers are done: */
534		if (sc->sc_queued > 1)
535			vioblk_vq_done(vq);
536		splx(s);
537		return;
538	}
539
540	timeout = 15 * 1000;
541	do {
542		if (virtio_poll_intr(vsc) && vr->vr_len == VIOBLK_DONE)
543			break;
544
545		delay(1000);
546	} while(--timeout > 0);
547	if (timeout <= 0) {
548		printf("%s: SCSI_POLL timed out\n", __func__);
549		vioblk_reset(sc);
550		virtio_reinit_start(vsc);
551		virtio_reinit_end(vsc);
552	}
553	splx(s);
554	return;
555
556out_done:
557	splx(s);
558	vioblk_scsi_done(xs, error);
559}
560
561void
562vioblk_scsi_inq(struct scsi_xfer *xs)
563{
564	struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
565	struct scsi_inquiry_data inqd;
566
567	if (ISSET(inq->flags, SI_EVPD)) {
568		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
569		return;
570	}
571
572	bzero(&inqd, sizeof(inqd));
573
574	inqd.device = T_DIRECT;
575	inqd.version = SCSI_REV_SPC3;
576	inqd.response_format = SID_SCSI2_RESPONSE;
577	inqd.additional_length = SID_SCSI2_ALEN;
578	inqd.flags |= SID_CmdQue;
579	bcopy("VirtIO  ", inqd.vendor, sizeof(inqd.vendor));
580	bcopy("Block Device    ", inqd.product, sizeof(inqd.product));
581
582	scsi_copy_internal_data(xs, &inqd, sizeof(inqd));
583
584	vioblk_scsi_done(xs, XS_NOERROR);
585}
586
587void
588vioblk_scsi_capacity(struct scsi_xfer *xs)
589{
590	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
591	struct scsi_read_cap_data rcd;
592	uint64_t capacity;
593
594	bzero(&rcd, sizeof(rcd));
595
596	capacity = sc->sc_capacity - 1;
597	if (capacity > 0xffffffff)
598		capacity = 0xffffffff;
599
600	_lto4b(capacity, rcd.addr);
601	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
602
603	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
604	vioblk_scsi_done(xs, XS_NOERROR);
605}
606
607void
608vioblk_scsi_capacity16(struct scsi_xfer *xs)
609{
610	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
611	struct scsi_read_cap_data_16 rcd;
612
613	bzero(&rcd, sizeof(rcd));
614
615	_lto8b(sc->sc_capacity - 1, rcd.addr);
616	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
617
618	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
619	vioblk_scsi_done(xs, XS_NOERROR);
620}
621
622void
623vioblk_scsi_done(struct scsi_xfer *xs, int error)
624{
625	xs->error = error;
626	scsi_done(xs);
627}
628
629int
630vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
631{
632	struct virtqueue *vq = &sc->sc_vq[0];
633	struct vring_desc *vd;
634	int allocsize, nreqs, r, rsegs, slot, i;
635	void *vaddr;
636
637	if (vq->vq_indirect != NULL)
638		nreqs = qsize;
639	else
640		nreqs = qsize / ALLOC_SEGS;
641
642	allocsize = sizeof(struct virtio_blk_req) * nreqs;
643	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
644	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
645	if (r != 0) {
646		printf("DMA memory allocation failed, size %d, error %d\n",
647		    allocsize, r);
648		goto err_none;
649	}
650	r = bus_dmamem_map(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1,
651	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
652	if (r != 0) {
653		printf("DMA memory map failed, error %d\n", r);
654		goto err_dmamem_alloc;
655	}
656	sc->sc_reqs = vaddr;
657	memset(vaddr, 0, allocsize);
658	for (i = 0; i < nreqs; i++) {
659		/*
660		 * Assign descriptors and create the DMA maps for each
661		 * allocated request.
662		 */
663		struct virtio_blk_req *vr = &sc->sc_reqs[i];
664		r = virtio_enqueue_prep(vq, &slot);
665		if (r == 0)
666			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
667		if (r != 0)
668			return i;
669
670		if (vq->vq_indirect == NULL) {
671			/*
672			 * The reserved slots must be a contiguous block
673			 * starting at vq_desc[slot].
674			 */
675			vd = &vq->vq_desc[slot];
676			for (r = 0; r < ALLOC_SEGS - 1; r++) {
677				DNPRINTF(2, "%s: vd[%d].next = %d should be "
678				    "%d\n", __func__, r, vd[r].next,
679				    (slot + r + 1));
680				if (vd[r].next != (slot + r + 1))
681					return i;
682			}
683			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
684				return i;
685			DNPRINTF(2, "%s: reserved slots are contiguous "
686			    "(good!)\n", __func__);
687		}
688
689		vr->vr_qe_index = slot;
690		vq->vq_entries[slot].qe_vr_index = i;
691		vr->vr_len = VIOBLK_DONE;
692
693		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
694		    VR_DMA_END, 1, VR_DMA_END, 0,
695		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
696		if (r != 0) {
697			printf("cmd dmamap creation failed, err %d\n", r);
698			nreqs = i;
699			goto err_reqs;
700		}
701		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
702		    &vr->vr_hdr, VR_DMA_END, NULL, BUS_DMA_NOWAIT);
703		if (r != 0) {
704			printf("command dmamap load failed, err %d\n", r);
705			nreqs = i;
706			goto err_reqs;
707		}
708		r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAXPHYS,
709		    SEG_MAX, MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
710		    &vr->vr_payload);
711		if (r != 0) {
712			printf("payload dmamap creation failed, err %d\n", r);
713			nreqs = i;
714			goto err_reqs;
715		}
716		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
717	}
718	return nreqs;
719
720err_reqs:
721	for (i = 0; i < nreqs; i++) {
722		struct virtio_blk_req *vr = &sc->sc_reqs[i];
723		if (vr->vr_cmdsts) {
724			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
725			    vr->vr_cmdsts);
726			vr->vr_cmdsts = 0;
727		}
728		if (vr->vr_payload) {
729			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
730			    vr->vr_payload);
731			vr->vr_payload = 0;
732		}
733	}
734	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, (caddr_t)sc->sc_reqs,
735	    allocsize);
736err_dmamem_alloc:
737	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
738err_none:
739	return 0;
740}
741