virtio.c revision 1.30
1/*	$NetBSD: virtio.c,v 1.30 2018/02/14 14:04:48 uwe Exp $	*/
2
3/*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.30 2018/02/14 14:04:48 uwe Exp $");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/atomic.h>
35#include <sys/bus.h>
36#include <sys/device.h>
37#include <sys/kmem.h>
38#include <sys/module.h>
39
40#include <dev/pci/pcidevs.h>
41#include <dev/pci/pcireg.h>
42#include <dev/pci/pcivar.h>
43
44#define VIRTIO_PRIVATE
45
46#include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
47#include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
48
49#define MINSEG_INDIRECT		2 /* use indirect if nsegs >= this value */
50
51static int	virtio_intr(void *arg);
52static int	virtio_msix_queue_intr(void *);
53static int	virtio_msix_config_intr(void *);
54static int	virtio_setup_msix_vectors(struct virtio_softc *);
55static int	virtio_setup_msix_interrupts(struct virtio_softc *,
56		    struct pci_attach_args *);
57static int	virtio_setup_intx_interrupt(struct virtio_softc *,
58		    struct pci_attach_args *);
59static int	virtio_setup_interrupts(struct virtio_softc *);
60static void	virtio_free_interrupts(struct virtio_softc *);
61static void	virtio_soft_intr(void *arg);
62static void	virtio_init_vq(struct virtio_softc *,
63		    struct virtqueue *, const bool);
64
65
66/* we use the legacy virtio spec, so the pci registers are host native
67 * byte order, not pci (i.e. LE) byte order */
68static inline uint16_t
69nbo_bus_space_read_2(bus_space_tag_t space, bus_space_handle_t handle,
70         bus_size_t offset)
71{
72	return le16toh(bus_space_read_2(space, handle, offset));
73}
74
75static inline uint32_t
76nbo_bus_space_read_4(bus_space_tag_t space, bus_space_handle_t handle,
77	bus_size_t offset)
78{
79	return le32toh(bus_space_read_4(space, handle, offset));
80}
81
82static void
83nbo_bus_space_write_2(bus_space_tag_t space, bus_space_handle_t handle,
84	bus_size_t offset, uint16_t value)
85{
86	bus_space_write_2(space, handle, offset, htole16(value));
87}
88
89static void
90nbo_bus_space_write_4(bus_space_tag_t space, bus_space_handle_t handle,
91	bus_size_t offset, uint32_t value)
92{
93	bus_space_write_4(space, handle, offset, htole32(value));
94}
95
96/* some functions access registers at 4 byte offset for little/high halves */
97#if BYTE_ORDER == BIG_ENDIAN
98#define REG_HI_OFF	0
99#define REG_LO_OFF	4
100#else
101#define REG_HI_OFF	4
102#define REG_LO_OFF	0
103#endif
104
105void
106virtio_set_status(struct virtio_softc *sc, int status)
107{
108	int old = 0;
109
110	if (status != 0)
111		old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
112				       VIRTIO_CONFIG_DEVICE_STATUS);
113	bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS,
114			  status|old);
115}
116
117#define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
118#define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
119
120static int
121virtio_setup_msix_vectors(struct virtio_softc *sc)
122{
123	int offset, vector, ret, qid;
124
125	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
126	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
127
128	nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector);
129	ret = nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
130	aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
131	    vector, ret);
132	if (ret != vector)
133		return -1;
134
135	for (qid = 0; qid < sc->sc_nvqs; qid++) {
136		offset = VIRTIO_CONFIG_QUEUE_SELECT;
137		nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, qid);
138
139		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
140		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
141
142		nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector);
143		ret = nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
144		aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
145		    vector, ret);
146		if (ret != vector)
147			return -1;
148	}
149
150	return 0;
151}
152
153static int
154virtio_setup_msix_interrupts(struct virtio_softc *sc,
155    struct pci_attach_args *pa)
156{
157	device_t self = sc->sc_dev;
158	pci_chipset_tag_t pc = pa->pa_pc;
159	char intrbuf[PCI_INTRSTR_LEN];
160	char const *intrstr;
161	int idx;
162
163	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
164	if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
165		pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
166
167	sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx],
168	    sc->sc_ipl, virtio_msix_config_intr, sc, device_xname(sc->sc_dev));
169	if (sc->sc_ihs[idx] == NULL) {
170		aprint_error_dev(self, "couldn't establish MSI-X for config\n");
171		goto error;
172	}
173
174	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
175	if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
176		pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
177
178	sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx],
179	    sc->sc_ipl, virtio_msix_queue_intr, sc, device_xname(sc->sc_dev));
180	if (sc->sc_ihs[idx] == NULL) {
181		aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
182		goto error;
183	}
184
185	if (virtio_setup_msix_vectors(sc) != 0) {
186		aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
187		goto error;
188	}
189
190	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
191	intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
192	aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
193	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
194	intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
195	aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
196
197	return 0;
198
199error:
200	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
201	if (sc->sc_ihs[idx] != NULL)
202		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]);
203	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
204	if (sc->sc_ihs[idx] != NULL)
205		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]);
206
207	return -1;
208}
209
210static int
211virtio_setup_intx_interrupt(struct virtio_softc *sc,
212    struct pci_attach_args *pa)
213{
214	device_t self = sc->sc_dev;
215	pci_chipset_tag_t pc = pa->pa_pc;
216	char intrbuf[PCI_INTRSTR_LEN];
217	char const *intrstr;
218
219	if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
220		pci_intr_setattr(pc, &sc->sc_ihp[0], PCI_INTR_MPSAFE, true);
221
222	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_ihp[0],
223	    sc->sc_ipl, virtio_intr, sc, device_xname(sc->sc_dev));
224	if (sc->sc_ihs[0] == NULL) {
225		aprint_error_dev(self, "couldn't establish INTx\n");
226		return -1;
227	}
228
229	intrstr = pci_intr_string(pc, sc->sc_ihp[0], intrbuf, sizeof(intrbuf));
230	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
231
232	return 0;
233}
234
235static int
236virtio_setup_interrupts(struct virtio_softc *sc)
237{
238	device_t self = sc->sc_dev;
239	pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
240	int error;
241	int nmsix;
242	int counts[PCI_INTR_TYPE_SIZE];
243	pci_intr_type_t max_type;
244
245	nmsix = pci_msix_count(sc->sc_pa.pa_pc, sc->sc_pa.pa_tag);
246	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
247
248	/* We need at least two: one for config and the other for queues */
249	if ((sc->sc_flags & VIRTIO_F_PCI_INTR_MSIX) == 0 || nmsix < 2) {
250		/* Try INTx only */
251		max_type = PCI_INTR_TYPE_INTX;
252		counts[PCI_INTR_TYPE_INTX] = 1;
253	} else {
254		/* Try MSI-X first and INTx second */
255		max_type = PCI_INTR_TYPE_MSIX;
256		counts[PCI_INTR_TYPE_MSIX] = 2;
257		counts[PCI_INTR_TYPE_MSI] = 0;
258		counts[PCI_INTR_TYPE_INTX] = 1;
259	}
260
261 retry:
262	error = pci_intr_alloc(&sc->sc_pa, &sc->sc_ihp, counts, max_type);
263	if (error != 0) {
264		aprint_error_dev(self, "couldn't map interrupt\n");
265		return -1;
266	}
267
268	if (pci_intr_type(pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
269		sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 2,
270		    KM_SLEEP);
271
272		error = virtio_setup_msix_interrupts(sc, &sc->sc_pa);
273		if (error != 0) {
274			kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 2);
275			pci_intr_release(pc, sc->sc_ihp, 2);
276
277			/* Retry INTx */
278			max_type = PCI_INTR_TYPE_INTX;
279			counts[PCI_INTR_TYPE_INTX] = 1;
280			goto retry;
281		}
282
283		sc->sc_ihs_num = 2;
284		sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
285	} else if (pci_intr_type(pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
286		sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 1,
287		    KM_SLEEP);
288
289		error = virtio_setup_intx_interrupt(sc, &sc->sc_pa);
290		if (error != 0) {
291			kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 1);
292			pci_intr_release(pc, sc->sc_ihp, 1);
293			return -1;
294		}
295
296		sc->sc_ihs_num = 1;
297		sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
298	}
299
300	KASSERT(sc->sc_soft_ih == NULL);
301	if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
302		u_int flags = SOFTINT_NET;
303		if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
304			flags |= SOFTINT_MPSAFE;
305
306		sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
307		if (sc->sc_soft_ih == NULL) {
308			virtio_free_interrupts(sc);
309			aprint_error_dev(sc->sc_dev,
310			    "failed to establish soft interrupt\n");
311			return -1;
312		}
313	}
314
315	return 0;
316}
317
318static void
319virtio_free_interrupts(struct virtio_softc *sc)
320{
321	for (int i = 0; i < sc->sc_ihs_num; i++) {
322		if (sc->sc_ihs[i] == NULL)
323			continue;
324		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
325		sc->sc_ihs[i] = NULL;
326	}
327
328	if (sc->sc_ihs_num > 0)
329		pci_intr_release(sc->sc_pc, sc->sc_ihp, sc->sc_ihs_num);
330
331	if (sc->sc_soft_ih) {
332		softint_disestablish(sc->sc_soft_ih);
333		sc->sc_soft_ih = NULL;
334	}
335
336	if (sc->sc_ihs != NULL) {
337		kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * sc->sc_ihs_num);
338		sc->sc_ihs = NULL;
339	}
340	sc->sc_ihs_num = 0;
341}
342
343
344
345/*
346 * Reset the device.
347 */
348/*
349 * To reset the device to a known state, do following:
350 *	virtio_reset(sc);	     // this will stop the device activity
351 *	<dequeue finished requests>; // virtio_dequeue() still can be called
352 *	<revoke pending requests in the vqs if any>;
353 *	virtio_reinit_begin(sc);     // dequeue prohibitted
354 *	newfeatures = virtio_negotiate_features(sc, requestedfeatures);
355 *	<some other initialization>;
356 *	virtio_reinit_end(sc);	     // device activated; enqueue allowed
357 * Once attached, feature negotiation can only be allowed after virtio_reset.
358 */
359void
360virtio_reset(struct virtio_softc *sc)
361{
362	virtio_device_reset(sc);
363}
364
365void
366virtio_reinit_start(struct virtio_softc *sc)
367{
368	int i;
369
370	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
371	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
372	for (i = 0; i < sc->sc_nvqs; i++) {
373		int n;
374		struct virtqueue *vq = &sc->sc_vqs[i];
375		nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
376				  VIRTIO_CONFIG_QUEUE_SELECT,
377				  vq->vq_index);
378		n = nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh,
379				     VIRTIO_CONFIG_QUEUE_SIZE);
380		if (n == 0)	/* vq disappeared */
381			continue;
382		if (n != vq->vq_num) {
383			panic("%s: virtqueue size changed, vq index %d\n",
384			      device_xname(sc->sc_dev),
385			      vq->vq_index);
386		}
387		virtio_init_vq(sc, vq, true);
388		nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
389				  VIRTIO_CONFIG_QUEUE_ADDRESS,
390				  (vq->vq_dmamap->dm_segs[0].ds_addr
391				   / VIRTIO_PAGE_SIZE));
392	}
393
394	/* MSI-X should have more than one handles where INTx has just one */
395	if (sc->sc_ihs_num > 1) {
396		if (virtio_setup_msix_vectors(sc) != 0) {
397			aprint_error_dev(sc->sc_dev,
398			    "couldn't setup MSI-X vectors\n");
399			return;
400		}
401	}
402}
403
404void
405virtio_reinit_end(struct virtio_softc *sc)
406{
407	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
408}
409
410/*
411 * Feature negotiation.
412 */
413uint32_t
414virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
415{
416	uint32_t r;
417
418	if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
419	    !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
420		guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
421	r = nbo_bus_space_read_4(sc->sc_iot, sc->sc_ioh,
422			     VIRTIO_CONFIG_DEVICE_FEATURES);
423	r &= guest_features;
424	nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
425			  VIRTIO_CONFIG_GUEST_FEATURES, r);
426	sc->sc_features = r;
427	if (r & VIRTIO_F_RING_INDIRECT_DESC)
428		sc->sc_indirect = true;
429	else
430		sc->sc_indirect = false;
431
432	return r;
433}
434
435/*
436 * Device configuration registers.
437 */
438uint8_t
439virtio_read_device_config_1(struct virtio_softc *sc, int index)
440{
441	return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
442				sc->sc_config_offset + index);
443}
444
445uint16_t
446virtio_read_device_config_2(struct virtio_softc *sc, int index)
447{
448	return nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh,
449				sc->sc_config_offset + index);
450}
451
452uint32_t
453virtio_read_device_config_4(struct virtio_softc *sc, int index)
454{
455	return nbo_bus_space_read_4(sc->sc_iot, sc->sc_ioh,
456				sc->sc_config_offset + index);
457}
458
459uint64_t
460virtio_read_device_config_8(struct virtio_softc *sc, int index)
461{
462	uint64_t r;
463
464	r = nbo_bus_space_read_4(sc->sc_iot, sc->sc_ioh,
465			     sc->sc_config_offset + index + REG_HI_OFF);
466	r <<= 32;
467	r |= nbo_bus_space_read_4(sc->sc_iot, sc->sc_ioh,
468			      sc->sc_config_offset + index + REG_LO_OFF);
469
470	return r;
471}
472
473void
474virtio_write_device_config_1(struct virtio_softc *sc,
475			     int index, uint8_t value)
476{
477	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
478			  sc->sc_config_offset + index, value);
479}
480
481void
482virtio_write_device_config_2(struct virtio_softc *sc,
483			     int index, uint16_t value)
484{
485	nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
486			  sc->sc_config_offset + index, value);
487}
488
489void
490virtio_write_device_config_4(struct virtio_softc *sc,
491			     int index, uint32_t value)
492{
493	nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
494			  sc->sc_config_offset + index, value);
495}
496
497void
498virtio_write_device_config_8(struct virtio_softc *sc,
499			     int index, uint64_t value)
500{
501	nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
502			  sc->sc_config_offset + index + REG_LO_OFF,
503			  value & 0xffffffff);
504	nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
505			  sc->sc_config_offset + index + REG_HI_OFF,
506			  value >> 32);
507}
508
509/*
510 * Interrupt handler.
511 */
512static int
513virtio_intr(void *arg)
514{
515	struct virtio_softc *sc = arg;
516	int isr, r = 0;
517
518	/* check and ack the interrupt */
519	isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
520			       VIRTIO_CONFIG_ISR_STATUS);
521	if (isr == 0)
522		return 0;
523	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
524	    (sc->sc_config_change != NULL))
525		r = (sc->sc_config_change)(sc);
526	if (sc->sc_intrhand != NULL) {
527		if (sc->sc_soft_ih != NULL)
528			softint_schedule(sc->sc_soft_ih);
529		else
530			r |= (sc->sc_intrhand)(sc);
531	}
532
533	return r;
534}
535
536static int
537virtio_msix_queue_intr(void *arg)
538{
539	struct virtio_softc *sc = arg;
540	int r = 0;
541
542	if (sc->sc_intrhand != NULL) {
543		if (sc->sc_soft_ih != NULL)
544			softint_schedule(sc->sc_soft_ih);
545		else
546			r |= (sc->sc_intrhand)(sc);
547	}
548
549	return r;
550}
551
552static int
553virtio_msix_config_intr(void *arg)
554{
555	struct virtio_softc *sc = arg;
556	int r = 0;
557
558	if (sc->sc_config_change != NULL)
559		r = (sc->sc_config_change)(sc);
560	return r;
561}
562
563static void
564virtio_soft_intr(void *arg)
565{
566	struct virtio_softc *sc = arg;
567
568	KASSERT(sc->sc_intrhand != NULL);
569
570	(sc->sc_intrhand)(sc);
571}
572
573/*
574 * dmamap sync operations for a virtqueue.
575 */
576static inline void
577vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
578{
579	/* availoffset == sizeof(vring_desc)*vq_num */
580	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
581			ops);
582}
583
584static inline void
585vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
586{
587	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
588			vq->vq_availoffset,
589			offsetof(struct vring_avail, ring)
590			 + vq->vq_num * sizeof(uint16_t),
591			ops);
592}
593
594static inline void
595vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
596{
597	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
598			vq->vq_usedoffset,
599			offsetof(struct vring_used, ring)
600			 + vq->vq_num * sizeof(struct vring_used_elem),
601			ops);
602}
603
604static inline void
605vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
606		     int ops)
607{
608	int offset = vq->vq_indirectoffset
609		      + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
610
611	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
612			offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
613			ops);
614}
615
616/*
617 * Can be used as sc_intrhand.
618 */
619/*
620 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
621 * and calls (*vq_done)() if some entries are consumed.
622 */
623int
624virtio_vq_intr(struct virtio_softc *sc)
625{
626	struct virtqueue *vq;
627	int i, r = 0;
628
629	for (i = 0; i < sc->sc_nvqs; i++) {
630		vq = &sc->sc_vqs[i];
631		if (vq->vq_queued) {
632			vq->vq_queued = 0;
633			vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
634		}
635		vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
636		membar_consumer();
637		if (vq->vq_used_idx != vq->vq_used->idx) {
638			if (vq->vq_done)
639				r |= (vq->vq_done)(vq);
640		}
641	}
642
643	return r;
644}
645
646/*
647 * Start/stop vq interrupt.  No guarantee.
648 */
649void
650virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
651{
652	vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
653	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
654	vq->vq_queued++;
655}
656
657void
658virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
659{
660	vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
661	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
662	vq->vq_queued++;
663}
664
665/*
666 * Initialize vq structure.
667 */
668static void
669virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
670    const bool reinit)
671{
672	int i, j;
673	int vq_size = vq->vq_num;
674
675	memset(vq->vq_vaddr, 0, vq->vq_bytesize);
676
677	/* build the indirect descriptor chain */
678	if (vq->vq_indirect != NULL) {
679		struct vring_desc *vd;
680
681		for (i = 0; i < vq_size; i++) {
682			vd = vq->vq_indirect;
683			vd += vq->vq_maxnsegs * i;
684			for (j = 0; j < vq->vq_maxnsegs-1; j++) {
685				vd[j].next = j + 1;
686			}
687		}
688	}
689
690	/* free slot management */
691	SIMPLEQ_INIT(&vq->vq_freelist);
692	for (i = 0; i < vq_size; i++) {
693		SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
694				    &vq->vq_entries[i], qe_list);
695		vq->vq_entries[i].qe_index = i;
696	}
697	if (!reinit)
698		mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
699
700	/* enqueue/dequeue status */
701	vq->vq_avail_idx = 0;
702	vq->vq_used_idx = 0;
703	vq->vq_queued = 0;
704	if (!reinit) {
705		mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
706		mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
707	}
708	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
709	vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
710	vq->vq_queued++;
711}
712
713/*
714 * Allocate/free a vq.
715 */
716int
717virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
718    int maxsegsize, int maxnsegs, const char *name)
719{
720	int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
721	int rsegs, r;
722#define VIRTQUEUE_ALIGN(n)	(((n)+(VIRTIO_PAGE_SIZE-1))&	\
723				 ~(VIRTIO_PAGE_SIZE-1))
724
725	/* Make sure callers allocate vqs in order */
726	KASSERT(sc->sc_nvqs == index);
727
728	memset(vq, 0, sizeof(*vq));
729
730	nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
731			  VIRTIO_CONFIG_QUEUE_SELECT, index);
732	vq_size = nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh,
733				   VIRTIO_CONFIG_QUEUE_SIZE);
734	if (vq_size == 0) {
735		aprint_error_dev(sc->sc_dev,
736				 "virtqueue not exist, index %d for %s\n",
737				 index, name);
738		goto err;
739	}
740	/* allocsize1: descriptor table + avail ring + pad */
741	allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
742				     + sizeof(uint16_t)*(2+vq_size));
743	/* allocsize2: used ring + pad */
744	allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
745				     + sizeof(struct vring_used_elem)*vq_size);
746	/* allocsize3: indirect table */
747	if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
748		allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
749	else
750		allocsize3 = 0;
751	allocsize = allocsize1 + allocsize2 + allocsize3;
752
753	/* alloc and map the memory */
754	r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
755			     &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
756	if (r != 0) {
757		aprint_error_dev(sc->sc_dev,
758				 "virtqueue %d for %s allocation failed, "
759				 "error code %d\n", index, name, r);
760		goto err;
761	}
762	r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
763			   &vq->vq_vaddr, BUS_DMA_NOWAIT);
764	if (r != 0) {
765		aprint_error_dev(sc->sc_dev,
766				 "virtqueue %d for %s map failed, "
767				 "error code %d\n", index, name, r);
768		goto err;
769	}
770	r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
771			      BUS_DMA_NOWAIT, &vq->vq_dmamap);
772	if (r != 0) {
773		aprint_error_dev(sc->sc_dev,
774				 "virtqueue %d for %s dmamap creation failed, "
775				 "error code %d\n", index, name, r);
776		goto err;
777	}
778	r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
779			    vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
780	if (r != 0) {
781		aprint_error_dev(sc->sc_dev,
782				 "virtqueue %d for %s dmamap load failed, "
783				 "error code %d\n", index, name, r);
784		goto err;
785	}
786
787	/* set the vq address */
788	nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
789			  VIRTIO_CONFIG_QUEUE_ADDRESS,
790			  (vq->vq_dmamap->dm_segs[0].ds_addr
791			   / VIRTIO_PAGE_SIZE));
792
793	/* remember addresses and offsets for later use */
794	vq->vq_owner = sc;
795	vq->vq_num = vq_size;
796	vq->vq_index = index;
797	vq->vq_desc = vq->vq_vaddr;
798	vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
799	vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
800	vq->vq_usedoffset = allocsize1;
801	vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
802	if (allocsize3 > 0) {
803		vq->vq_indirectoffset = allocsize1 + allocsize2;
804		vq->vq_indirect = (void*)(((char*)vq->vq_desc)
805					  + vq->vq_indirectoffset);
806	}
807	vq->vq_bytesize = allocsize;
808	vq->vq_maxsegsize = maxsegsize;
809	vq->vq_maxnsegs = maxnsegs;
810
811	/* free slot management */
812	vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
813				     KM_NOSLEEP);
814	if (vq->vq_entries == NULL) {
815		r = ENOMEM;
816		goto err;
817	}
818
819	virtio_init_vq(sc, vq, false);
820
821	aprint_verbose_dev(sc->sc_dev,
822			   "allocated %u byte for virtqueue %d for %s, "
823			   "size %d\n", allocsize, index, name, vq_size);
824	if (allocsize3 > 0)
825		aprint_verbose_dev(sc->sc_dev,
826				   "using %d byte (%d entries) "
827				   "indirect descriptors\n",
828				   allocsize3, maxnsegs * vq_size);
829
830	sc->sc_nvqs++;
831
832	return 0;
833
834err:
835	nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
836			  VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
837	if (vq->vq_dmamap)
838		bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
839	if (vq->vq_vaddr)
840		bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
841	if (vq->vq_segs[0].ds_addr)
842		bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
843	memset(vq, 0, sizeof(*vq));
844
845	return -1;
846}
847
848int
849virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
850{
851	struct vq_entry *qe;
852	int i = 0;
853
854	/* device must be already deactivated */
855	/* confirm the vq is empty */
856	SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
857		i++;
858	}
859	if (i != vq->vq_num) {
860		printf("%s: freeing non-empty vq, index %d\n",
861		       device_xname(sc->sc_dev), vq->vq_index);
862		return EBUSY;
863	}
864
865	/* tell device that there's no virtqueue any longer */
866	nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
867			  VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
868	nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
869			  VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
870
871	kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
872	bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
873	bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
874	bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
875	bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
876	mutex_destroy(&vq->vq_freelist_lock);
877	mutex_destroy(&vq->vq_uring_lock);
878	mutex_destroy(&vq->vq_aring_lock);
879	memset(vq, 0, sizeof(*vq));
880
881	sc->sc_nvqs--;
882
883	return 0;
884}
885
886/*
887 * Free descriptor management.
888 */
889static struct vq_entry *
890vq_alloc_entry(struct virtqueue *vq)
891{
892	struct vq_entry *qe;
893
894	mutex_enter(&vq->vq_freelist_lock);
895	if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
896		mutex_exit(&vq->vq_freelist_lock);
897		return NULL;
898	}
899	qe = SIMPLEQ_FIRST(&vq->vq_freelist);
900	SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
901	mutex_exit(&vq->vq_freelist_lock);
902
903	return qe;
904}
905
906static void
907vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
908{
909	mutex_enter(&vq->vq_freelist_lock);
910	SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
911	mutex_exit(&vq->vq_freelist_lock);
912
913	return;
914}
915
916/*
917 * Enqueue several dmamaps as a single request.
918 */
919/*
920 * Typical usage:
921 *  <queue size> number of followings are stored in arrays
922 *  - command blocks (in dmamem) should be pre-allocated and mapped
923 *  - dmamaps for command blocks should be pre-allocated and loaded
924 *  - dmamaps for payload should be pre-allocated
925 *      r = virtio_enqueue_prep(sc, vq, &slot);		// allocate a slot
926 *	if (r)		// currently 0 or EAGAIN
927 *	  return r;
928 *	r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
929 *	if (r) {
930 *	  virtio_enqueue_abort(sc, vq, slot);
931 *	  return r;
932 *	}
933 *	r = virtio_enqueue_reserve(sc, vq, slot,
934 *				   dmamap_payload[slot]->dm_nsegs+1);
935 *							// ^ +1 for command
936 *	if (r) {	// currently 0 or EAGAIN
937 *	  bus_dmamap_unload(dmat, dmamap_payload[slot]);
938 *	  return r;					// do not call abort()
939 *	}
940 *	<setup and prepare commands>
941 *	bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
942 *	bus_dmamap_sync(dmat, dmamap_payload[slot],...);
943 *	virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
944 *	virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
945 *	virtio_enqueue_commit(sc, vq, slot, true);
946 */
947
948/*
949 * enqueue_prep: allocate a slot number
950 */
951int
952virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
953{
954	struct vq_entry *qe1;
955
956	KASSERT(slotp != NULL);
957
958	qe1 = vq_alloc_entry(vq);
959	if (qe1 == NULL)
960		return EAGAIN;
961	/* next slot is not allocated yet */
962	qe1->qe_next = -1;
963	*slotp = qe1->qe_index;
964
965	return 0;
966}
967
968/*
969 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
970 */
971int
972virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
973		       int slot, int nsegs)
974{
975	int indirect;
976	struct vq_entry *qe1 = &vq->vq_entries[slot];
977
978	KASSERT(qe1->qe_next == -1);
979	KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
980
981	if ((vq->vq_indirect != NULL) &&
982	    (nsegs >= MINSEG_INDIRECT) &&
983	    (nsegs <= vq->vq_maxnsegs))
984		indirect = 1;
985	else
986		indirect = 0;
987	qe1->qe_indirect = indirect;
988
989	if (indirect) {
990		struct vring_desc *vd;
991		int i;
992
993		vd = &vq->vq_desc[qe1->qe_index];
994		vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
995			+ vq->vq_indirectoffset;
996		vd->addr += sizeof(struct vring_desc)
997			* vq->vq_maxnsegs * qe1->qe_index;
998		vd->len = sizeof(struct vring_desc) * nsegs;
999		vd->flags = VRING_DESC_F_INDIRECT;
1000
1001		vd = vq->vq_indirect;
1002		vd += vq->vq_maxnsegs * qe1->qe_index;
1003		qe1->qe_desc_base = vd;
1004
1005		for (i = 0; i < nsegs-1; i++) {
1006			vd[i].flags = VRING_DESC_F_NEXT;
1007		}
1008		vd[i].flags = 0;
1009		qe1->qe_next = 0;
1010
1011		return 0;
1012	} else {
1013		struct vring_desc *vd;
1014		struct vq_entry *qe;
1015		int i, s;
1016
1017		vd = &vq->vq_desc[0];
1018		qe1->qe_desc_base = vd;
1019		qe1->qe_next = qe1->qe_index;
1020		s = slot;
1021		for (i = 0; i < nsegs - 1; i++) {
1022			qe = vq_alloc_entry(vq);
1023			if (qe == NULL) {
1024				vd[s].flags = 0;
1025				virtio_enqueue_abort(sc, vq, slot);
1026				return EAGAIN;
1027			}
1028			vd[s].flags = VRING_DESC_F_NEXT;
1029			vd[s].next = qe->qe_index;
1030			s = qe->qe_index;
1031		}
1032		vd[s].flags = 0;
1033
1034		return 0;
1035	}
1036}
1037
1038/*
1039 * enqueue: enqueue a single dmamap.
1040 */
1041int
1042virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1043	       bus_dmamap_t dmamap, bool write)
1044{
1045	struct vq_entry *qe1 = &vq->vq_entries[slot];
1046	struct vring_desc *vd = qe1->qe_desc_base;
1047	int i;
1048	int s = qe1->qe_next;
1049
1050	KASSERT(s >= 0);
1051	KASSERT(dmamap->dm_nsegs > 0);
1052
1053	for (i = 0; i < dmamap->dm_nsegs; i++) {
1054		vd[s].addr = dmamap->dm_segs[i].ds_addr;
1055		vd[s].len = dmamap->dm_segs[i].ds_len;
1056		if (!write)
1057			vd[s].flags |= VRING_DESC_F_WRITE;
1058		s = vd[s].next;
1059	}
1060	qe1->qe_next = s;
1061
1062	return 0;
1063}
1064
1065int
1066virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1067		 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
1068		 bool write)
1069{
1070	struct vq_entry *qe1 = &vq->vq_entries[slot];
1071	struct vring_desc *vd = qe1->qe_desc_base;
1072	int s = qe1->qe_next;
1073
1074	KASSERT(s >= 0);
1075	KASSERT(dmamap->dm_nsegs == 1); /* XXX */
1076	KASSERT((dmamap->dm_segs[0].ds_len > start) &&
1077		(dmamap->dm_segs[0].ds_len >= start + len));
1078
1079	vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
1080	vd[s].len = len;
1081	if (!write)
1082		vd[s].flags |= VRING_DESC_F_WRITE;
1083	qe1->qe_next = vd[s].next;
1084
1085	return 0;
1086}
1087
1088/*
1089 * enqueue_commit: add it to the aring.
1090 */
1091int
1092virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1093		      bool notifynow)
1094{
1095	struct vq_entry *qe1;
1096
1097	if (slot < 0) {
1098		mutex_enter(&vq->vq_aring_lock);
1099		goto notify;
1100	}
1101	vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
1102	qe1 = &vq->vq_entries[slot];
1103	if (qe1->qe_indirect)
1104		vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
1105	mutex_enter(&vq->vq_aring_lock);
1106	vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
1107
1108notify:
1109	if (notifynow) {
1110		vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1111		vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
1112		membar_producer();
1113		vq->vq_avail->idx = vq->vq_avail_idx;
1114		vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1115		membar_producer();
1116		vq->vq_queued++;
1117		vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
1118		membar_consumer();
1119		if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
1120			nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
1121					  VIRTIO_CONFIG_QUEUE_NOTIFY,
1122					  vq->vq_index);
1123	}
1124	mutex_exit(&vq->vq_aring_lock);
1125
1126	return 0;
1127}
1128
1129/*
1130 * enqueue_abort: rollback.
1131 */
1132int
1133virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1134{
1135	struct vq_entry *qe = &vq->vq_entries[slot];
1136	struct vring_desc *vd;
1137	int s;
1138
1139	if (qe->qe_next < 0) {
1140		vq_free_entry(vq, qe);
1141		return 0;
1142	}
1143
1144	s = slot;
1145	vd = &vq->vq_desc[0];
1146	while (vd[s].flags & VRING_DESC_F_NEXT) {
1147		s = vd[s].next;
1148		vq_free_entry(vq, qe);
1149		qe = &vq->vq_entries[s];
1150	}
1151	vq_free_entry(vq, qe);
1152	return 0;
1153}
1154
1155/*
1156 * Dequeue a request.
1157 */
1158/*
1159 * dequeue: dequeue a request from uring; dmamap_sync for uring is
1160 *	    already done in the interrupt handler.
1161 */
1162int
1163virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
1164	       int *slotp, int *lenp)
1165{
1166	uint16_t slot, usedidx;
1167	struct vq_entry *qe;
1168
1169	if (vq->vq_used_idx == vq->vq_used->idx)
1170		return ENOENT;
1171	mutex_enter(&vq->vq_uring_lock);
1172	usedidx = vq->vq_used_idx++;
1173	mutex_exit(&vq->vq_uring_lock);
1174	usedidx %= vq->vq_num;
1175	slot = vq->vq_used->ring[usedidx].id;
1176	qe = &vq->vq_entries[slot];
1177
1178	if (qe->qe_indirect)
1179		vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
1180
1181	if (slotp)
1182		*slotp = slot;
1183	if (lenp)
1184		*lenp = vq->vq_used->ring[usedidx].len;
1185
1186	return 0;
1187}
1188
1189/*
1190 * dequeue_commit: complete dequeue; the slot is recycled for future use.
1191 *                 if you forget to call this the slot will be leaked.
1192 */
1193int
1194virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1195{
1196	struct vq_entry *qe = &vq->vq_entries[slot];
1197	struct vring_desc *vd = &vq->vq_desc[0];
1198	int s = slot;
1199
1200	while (vd[s].flags & VRING_DESC_F_NEXT) {
1201		s = vd[s].next;
1202		vq_free_entry(vq, qe);
1203		qe = &vq->vq_entries[s];
1204	}
1205	vq_free_entry(vq, qe);
1206
1207	return 0;
1208}
1209
1210/*
1211 * Attach a child, fill all the members.
1212 */
1213void
1214virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
1215		    struct virtqueue *vqs,
1216		    virtio_callback config_change,
1217		    virtio_callback intr_hand,
1218		    int req_flags, int req_features, const char *feat_bits)
1219{
1220	char buf[256];
1221	int features;
1222
1223	sc->sc_child = child;
1224	sc->sc_ipl = ipl;
1225	sc->sc_vqs = vqs;
1226	sc->sc_config_change = config_change;
1227	sc->sc_intrhand = intr_hand;
1228	sc->sc_flags = req_flags;
1229
1230	features = virtio_negotiate_features(sc, req_features);
1231	snprintb(buf, sizeof(buf), feat_bits, features);
1232	aprint_normal(": Features: %s\n", buf);
1233	aprint_naive("\n");
1234}
1235
1236int
1237virtio_child_attach_finish(struct virtio_softc *sc)
1238{
1239	int r;
1240
1241	r = virtio_setup_interrupts(sc);
1242	if (r != 0) {
1243		aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
1244		virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1245		return 1;
1246	}
1247
1248	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
1249
1250	return 0;
1251}
1252
1253void
1254virtio_child_detach(struct virtio_softc *sc)
1255{
1256	sc->sc_child = NULL;
1257	sc->sc_vqs = NULL;
1258
1259	virtio_device_reset(sc);
1260
1261	virtio_free_interrupts(sc);
1262}
1263
1264void
1265virtio_child_attach_failed(struct virtio_softc *sc)
1266{
1267	virtio_child_detach(sc);
1268
1269	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1270
1271	sc->sc_child = VIRTIO_CHILD_FAILED;
1272}
1273
1274bus_dma_tag_t
1275virtio_dmat(struct virtio_softc *sc)
1276{
1277	return sc->sc_dmat;
1278}
1279
1280device_t
1281virtio_child(struct virtio_softc *sc)
1282{
1283	return sc->sc_child;
1284}
1285
1286int
1287virtio_intrhand(struct virtio_softc *sc)
1288{
1289	return (sc->sc_intrhand)(sc);
1290}
1291
1292uint32_t
1293virtio_features(struct virtio_softc *sc)
1294{
1295	return sc->sc_features;
1296}
1297
1298MODULE(MODULE_CLASS_DRIVER, virtio, "pci");
1299
1300#ifdef _MODULE
1301#include "ioconf.c"
1302#endif
1303
1304static int
1305virtio_modcmd(modcmd_t cmd, void *opaque)
1306{
1307	int error = 0;
1308
1309#ifdef _MODULE
1310	switch (cmd) {
1311	case MODULE_CMD_INIT:
1312		error = config_init_component(cfdriver_ioconf_virtio,
1313		    cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1314		break;
1315	case MODULE_CMD_FINI:
1316		error = config_fini_component(cfdriver_ioconf_virtio,
1317		    cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1318		break;
1319	default:
1320		error = ENOTTY;
1321		break;
1322	}
1323#endif
1324
1325	return error;
1326}
1327