virtio_pci.c revision 1.24
1/*	$OpenBSD: virtio_pci.c,v 1.24 2019/05/26 15:14:16 sf Exp $	*/
2/*	$NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $	*/
3
4/*
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/device.h>
33#include <sys/mutex.h>
34
35#include <dev/pci/pcidevs.h>
36#include <dev/pci/pcireg.h>
37#include <dev/pci/pcivar.h>
38
39#include <dev/pv/virtioreg.h>
40#include <dev/pv/virtiovar.h>
41#include <dev/pci/virtio_pcireg.h>
42
43/*
44 * XXX: Before being used on big endian arches, the access to config registers
45 * XXX: needs to be reviewed/fixed. The non-device specific registers are
46 * XXX: PCI-endian while the device specific registers are native endian.
47 */
48
49#define MAX_MSIX_VECS	8
50
51struct virtio_pci_softc;
52
53int		virtio_pci_match(struct device *, void *, void *);
54void		virtio_pci_attach(struct device *, struct device *, void *);
55int		virtio_pci_detach(struct device *, int);
56
57void		virtio_pci_kick(struct virtio_softc *, uint16_t);
58int		virtio_pci_adjust_config_region(struct virtio_pci_softc *);
59uint8_t		virtio_pci_read_device_config_1(struct virtio_softc *, int);
60uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
61uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
62uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
63void		virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
64void		virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
65void		virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
66void		virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
67uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
68void		virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
69void		virtio_pci_set_status(struct virtio_softc *, int);
70uint64_t	virtio_pci_negotiate_features(struct virtio_softc *, uint64_t,
71					      const struct virtio_feature_name *);
72int		virtio_pci_msix_establish(struct virtio_pci_softc *, struct pci_attach_args *, int, int (*)(void *), void *);
73int		virtio_pci_setup_msix(struct virtio_pci_softc *, struct pci_attach_args *, int);
74void		virtio_pci_free_irqs(struct virtio_pci_softc *);
75int		virtio_pci_poll_intr(void *);
76int		virtio_pci_legacy_intr(void *);
77int		virtio_pci_legacy_intr_mpsafe(void *);
78int		virtio_pci_config_intr(void *);
79int		virtio_pci_queue_intr(void *);
80int		virtio_pci_shared_queue_intr(void *);
81
82enum irq_type {
83	IRQ_NO_MSIX,
84	IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */
85	IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */
86};
87
88struct virtio_pci_softc {
89	struct virtio_softc	sc_sc;
90	pci_chipset_tag_t	sc_pc;
91	pcitag_t		sc_ptag;
92
93	bus_space_tag_t		sc_iot;
94	bus_space_handle_t	sc_ioh;
95	bus_size_t		sc_iosize;
96
97	bus_space_tag_t		sc_notify_iot;
98	bus_space_handle_t	sc_notify_ioh;
99	bus_size_t		sc_notify_iosize;
100
101	bus_space_tag_t		sc_devcfg_iot;
102	bus_space_handle_t	sc_devcfg_ioh;
103	bus_size_t		sc_devcfg_iosize;
104	/*
105	 * With 0.9, the offset of the devcfg region in the io bar changes
106	 * depending on MSI-X being enabled or not.
107	 * With 1.0, this field is still used to remember if MSI-X is enabled
108	 * or not.
109	 */
110	unsigned int		sc_devcfg_offset;
111
112	bus_space_tag_t		sc_isr_iot;
113	bus_space_handle_t	sc_isr_ioh;
114	bus_size_t		sc_isr_iosize;
115
116	void			*sc_ih[MAX_MSIX_VECS];
117
118	enum irq_type		sc_irq_type;
119};
120
121struct cfattach virtio_pci_ca = {
122	sizeof(struct virtio_pci_softc),
123	virtio_pci_match,
124	virtio_pci_attach,
125	virtio_pci_detach,
126	NULL
127};
128
129struct virtio_ops virtio_pci_ops = {
130	virtio_pci_kick,
131	virtio_pci_read_device_config_1,
132	virtio_pci_read_device_config_2,
133	virtio_pci_read_device_config_4,
134	virtio_pci_read_device_config_8,
135	virtio_pci_write_device_config_1,
136	virtio_pci_write_device_config_2,
137	virtio_pci_write_device_config_4,
138	virtio_pci_write_device_config_8,
139	virtio_pci_read_queue_size,
140	virtio_pci_setup_queue,
141	virtio_pci_set_status,
142	virtio_pci_negotiate_features,
143	virtio_pci_poll_intr,
144};
145
146uint16_t
147virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx)
148{
149	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
150	bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT,
151	    idx);
152	return bus_space_read_2(sc->sc_iot, sc->sc_ioh,
153	    VIRTIO_CONFIG_QUEUE_SIZE);
154}
155
156void
157virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq,
158    uint64_t addr)
159{
160	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
161	bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT,
162	    vq->vq_index);
163	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_ADDRESS,
164	    addr / VIRTIO_PAGE_SIZE);
165
166	/*
167	 * This path is only executed if this function is called after
168	 * the child's attach function has finished. In other cases,
169	 * it's done in virtio_pci_setup_msix().
170	 */
171	if (sc->sc_irq_type != IRQ_NO_MSIX) {
172		int vec = 1;
173		if (sc->sc_irq_type == IRQ_MSIX_PER_VQ)
174		       vec += vq->vq_index;
175		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
176		    VIRTIO_MSI_QUEUE_VECTOR, vec);
177	}
178}
179
180void
181virtio_pci_set_status(struct virtio_softc *vsc, int status)
182{
183	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
184	int old = 0;
185
186	if (status != 0)
187		old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
188				       VIRTIO_CONFIG_DEVICE_STATUS);
189	bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS,
190			  status|old);
191}
192
193int
194virtio_pci_match(struct device *parent, void *match, void *aux)
195{
196	struct pci_attach_args *pa;
197
198	pa = (struct pci_attach_args *)aux;
199	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_QUMRANET &&
200	    PCI_PRODUCT(pa->pa_id) >= 0x1000 &&
201	    PCI_PRODUCT(pa->pa_id) <= 0x103f &&
202	    PCI_REVISION(pa->pa_class) == 0)
203		return 1;
204	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD &&
205	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL)
206		return 1;
207	return 0;
208}
209
210void
211virtio_pci_attach(struct device *parent, struct device *self, void *aux)
212{
213	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
214	struct virtio_softc *vsc = &sc->sc_sc;
215	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
216	pci_chipset_tag_t pc = pa->pa_pc;
217	pcitag_t tag = pa->pa_tag;
218	int revision;
219	pcireg_t id;
220	char const *intrstr;
221	pci_intr_handle_t ih;
222
223	revision = PCI_REVISION(pa->pa_class);
224	if (revision != 0) {
225		printf("unknown revision 0x%02x; giving up\n", revision);
226		return;
227	}
228
229	/* subsystem ID shows what I am */
230	id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
231
232	printf("\n");
233
234	vsc->sc_ops = &virtio_pci_ops;
235	sc->sc_pc = pc;
236	sc->sc_ptag = pa->pa_tag;
237	vsc->sc_dmat = pa->pa_dmat;
238
239	/*
240	 * For virtio, ignore normal MSI black/white-listing depending on the
241	 * PCI bridge but enable it unconditionally.
242	 */
243	pa->pa_flags |= PCI_FLAGS_MSI_ENABLED;
244
245	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
246	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) {
247		printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname);
248		return;
249	}
250
251	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
252	    VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) {
253		printf("%s: can't map notify i/o space\n",
254		    vsc->sc_dev.dv_xname);
255		return;
256	}
257	sc->sc_notify_iosize = 2;
258	sc->sc_notify_iot = sc->sc_iot;
259
260	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
261	    VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) {
262		printf("%s: can't map isr i/o space\n",
263		    vsc->sc_dev.dv_xname);
264		return;
265	}
266	sc->sc_isr_iosize = 1;
267	sc->sc_isr_iot = sc->sc_iot;
268
269	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
270	sc->sc_irq_type = IRQ_NO_MSIX;
271	if (virtio_pci_adjust_config_region(sc) != 0)
272		return;
273
274	virtio_device_reset(vsc);
275	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
276	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
277
278	vsc->sc_childdevid = id;
279	vsc->sc_child = NULL;
280	config_found(self, sc, NULL);
281	if (vsc->sc_child == NULL) {
282		printf("%s: no matching child driver; not configured\n",
283		    vsc->sc_dev.dv_xname);
284		goto fail_1;
285	}
286	if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
287		printf("%s: virtio configuration failed\n",
288		    vsc->sc_dev.dv_xname);
289		goto fail_1;
290	}
291
292	if (virtio_pci_setup_msix(sc, pa, 0) == 0) {
293		sc->sc_irq_type = IRQ_MSIX_PER_VQ;
294		intrstr = "msix per-VQ";
295	} else if (virtio_pci_setup_msix(sc, pa, 1) == 0) {
296		sc->sc_irq_type = IRQ_MSIX_SHARED;
297		intrstr = "msix shared";
298	} else {
299		int (*ih_func)(void *) = virtio_pci_legacy_intr;
300		if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
301			printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname);
302			goto fail_2;
303		}
304		intrstr = pci_intr_string(pc, ih);
305		/*
306		 * We always set the IPL_MPSAFE flag in order to do the relatively
307		 * expensive ISR read without lock, and then grab the kernel lock in
308		 * the interrupt handler.
309		 */
310		if (vsc->sc_ipl & IPL_MPSAFE)
311			ih_func = virtio_pci_legacy_intr_mpsafe;
312		sc->sc_ih[0] = pci_intr_establish(pc, ih, vsc->sc_ipl | IPL_MPSAFE,
313		    ih_func, sc, vsc->sc_dev.dv_xname);
314		if (sc->sc_ih[0] == NULL) {
315			printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname);
316			if (intrstr != NULL)
317				printf(" at %s", intrstr);
318			printf("\n");
319			goto fail_2;
320		}
321	}
322	printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr);
323
324	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
325	return;
326
327fail_2:
328	config_detach(vsc->sc_child, 0);
329fail_1:
330	/* no pci_mapreg_unmap() or pci_intr_unmap() */
331	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
332}
333
334int
335virtio_pci_detach(struct device *self, int flags)
336{
337	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
338	struct virtio_softc *vsc = &sc->sc_sc;
339	int r;
340
341	if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) {
342		r = config_detach(vsc->sc_child, flags);
343		if (r)
344			return r;
345	}
346	KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR);
347	KASSERT(vsc->sc_vqs == 0);
348	virtio_pci_free_irqs(sc);
349	if (sc->sc_iosize)
350		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
351	sc->sc_iosize = 0;
352
353	return 0;
354}
355
356int
357virtio_pci_adjust_config_region(struct virtio_pci_softc *sc)
358{
359	sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset;
360	sc->sc_devcfg_iot = sc->sc_iot;
361	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset,
362	    sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) {
363		printf("%s: can't map config i/o space\n",
364		    sc->sc_sc.sc_dev.dv_xname);
365		return 1;
366	}
367	return 0;
368}
369
370/*
371 * Feature negotiation.
372 * Prints available / negotiated features if guest_feature_names != NULL and
373 * VIRTIO_DEBUG is 1
374 */
375uint64_t
376virtio_pci_negotiate_features(struct virtio_softc *vsc, uint64_t guest_features,
377    const struct virtio_feature_name *guest_feature_names)
378{
379	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
380	uint64_t host, neg;
381
382	/*
383	 * indirect descriptors can be switched off by setting bit 1 in the
384	 * driver flags, see config(8)
385	 */
386	if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) &&
387	    !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) {
388		guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
389	} else {
390		printf("RingIndirectDesc disabled by UKC\n");
391	}
392	host = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
393				VIRTIO_CONFIG_DEVICE_FEATURES);
394	neg = host & guest_features;
395#if VIRTIO_DEBUG
396	if (guest_feature_names)
397		virtio_log_features(host, neg, guest_feature_names);
398#endif
399	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
400			  VIRTIO_CONFIG_GUEST_FEATURES, neg);
401	vsc->sc_features = neg;
402	if (neg & VIRTIO_F_RING_INDIRECT_DESC)
403		vsc->sc_indirect = 1;
404	else
405		vsc->sc_indirect = 0;
406
407	return neg;
408}
409
410/*
411 * Device configuration registers.
412 */
413uint8_t
414virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
415{
416	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
417	return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
418}
419
420uint16_t
421virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
422{
423	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
424	return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
425}
426
427uint32_t
428virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
429{
430	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
431	return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
432}
433
434uint64_t
435virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
436{
437	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
438	uint64_t r;
439
440	r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
441	    index + sizeof(uint32_t));
442	r <<= 32;
443	r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
444	return r;
445}
446
447void
448virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index,
449    uint8_t value)
450{
451	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
452	bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
453}
454
455void
456virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index,
457    uint16_t value)
458{
459	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
460	bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
461}
462
463void
464virtio_pci_write_device_config_4(struct virtio_softc *vsc,
465			     int index, uint32_t value)
466{
467	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
468	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
469}
470
471void
472virtio_pci_write_device_config_8(struct virtio_softc *vsc,
473			     int index, uint64_t value)
474{
475	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
476	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
477	    index, value & 0xffffffff);
478	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
479	    index + sizeof(uint32_t), value >> 32);
480}
481
482int
483virtio_pci_msix_establish(struct virtio_pci_softc *sc,
484    struct pci_attach_args *pa, int idx, int (*handler)(void *), void *ih_arg)
485{
486	struct virtio_softc *vsc = &sc->sc_sc;
487	pci_intr_handle_t ih;
488
489	if (pci_intr_map_msix(pa, idx, &ih) != 0) {
490#if VIRTIO_DEBUG
491		printf("%s[%d]: pci_intr_map_msix failed\n",
492		    vsc->sc_dev.dv_xname, idx);
493#endif
494		return 1;
495	}
496	sc->sc_ih[idx] = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl,
497	    handler, ih_arg, vsc->sc_dev.dv_xname);
498	if (sc->sc_ih[idx] == NULL) {
499		printf("%s[%d]: couldn't establish msix interrupt\n",
500		    vsc->sc_dev.dv_xname, idx);
501		return 1;
502	}
503	return 0;
504}
505
506void
507virtio_pci_free_irqs(struct virtio_pci_softc *sc)
508{
509	struct virtio_softc *vsc = &sc->sc_sc;
510	int i;
511
512	if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
513		for (i = 0; i < vsc->sc_nvqs; i++) {
514			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
515			    VIRTIO_CONFIG_QUEUE_SELECT, i);
516			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
517			    VIRTIO_MSI_QUEUE_VECTOR, VIRTIO_MSI_NO_VECTOR);
518		}
519	}
520
521	for (i = 0; i < MAX_MSIX_VECS; i++) {
522		if (sc->sc_ih[i]) {
523			pci_intr_disestablish(sc->sc_pc, sc->sc_ih[i]);
524			sc->sc_ih[i] = NULL;
525		}
526	}
527
528	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
529	virtio_pci_adjust_config_region(sc);
530}
531
532int
533virtio_pci_setup_msix(struct virtio_pci_softc *sc, struct pci_attach_args *pa,
534    int shared)
535{
536	struct virtio_softc *vsc = &sc->sc_sc;
537	int i;
538
539	if (virtio_pci_msix_establish(sc, pa, 0, virtio_pci_config_intr, vsc))
540		return 1;
541	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
542	virtio_pci_adjust_config_region(sc);
543
544	if (shared) {
545		if (virtio_pci_msix_establish(sc, pa, 1,
546		    virtio_pci_shared_queue_intr, vsc)) {
547			goto fail;
548		}
549
550		for (i = 0; i < vsc->sc_nvqs; i++) {
551			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
552			    VIRTIO_CONFIG_QUEUE_SELECT, i);
553			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
554			    VIRTIO_MSI_QUEUE_VECTOR, 1);
555		}
556	} else {
557		for (i = 0; i <= vsc->sc_nvqs; i++) {
558			if (virtio_pci_msix_establish(sc, pa, i + 1,
559			    virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
560				goto fail;
561			}
562			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
563			    VIRTIO_CONFIG_QUEUE_SELECT, i);
564			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
565			    VIRTIO_MSI_QUEUE_VECTOR, i + 1);
566		}
567	}
568
569	return 0;
570fail:
571	virtio_pci_free_irqs(sc);
572	return 1;
573}
574
575/*
576 * Interrupt handler.
577 */
578
579/*
580 * Only used without MSI-X
581 */
582int
583virtio_pci_legacy_intr(void *arg)
584{
585	struct virtio_pci_softc *sc = arg;
586	struct virtio_softc *vsc = &sc->sc_sc;
587	int isr, r = 0;
588
589	/* check and ack the interrupt */
590	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
591	if (isr == 0)
592		return 0;
593	KERNEL_LOCK();
594	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
595	    (vsc->sc_config_change != NULL)) {
596		r = (vsc->sc_config_change)(vsc);
597	}
598	r |= virtio_check_vqs(vsc);
599	KERNEL_UNLOCK();
600
601	return r;
602}
603
604int
605virtio_pci_legacy_intr_mpsafe(void *arg)
606{
607	struct virtio_pci_softc *sc = arg;
608	struct virtio_softc *vsc = &sc->sc_sc;
609	int isr, r = 0;
610
611	/* check and ack the interrupt */
612	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
613	if (isr == 0)
614		return 0;
615	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
616	    (vsc->sc_config_change != NULL)) {
617		r = (vsc->sc_config_change)(vsc);
618	}
619	r |= virtio_check_vqs(vsc);
620	return r;
621}
622
623/*
624 * Only used with MSI-X
625 */
626int
627virtio_pci_config_intr(void *arg)
628{
629	struct virtio_softc *vsc = arg;
630
631	if (vsc->sc_config_change != NULL)
632		return vsc->sc_config_change(vsc);
633	return 0;
634}
635
636/*
637 * Only used with MSI-X
638 */
639int
640virtio_pci_queue_intr(void *arg)
641{
642	struct virtqueue *vq = arg;
643
644	if (vq->vq_done)
645		return (vq->vq_done)(vq);
646	return 0;
647}
648
649int
650virtio_pci_shared_queue_intr(void *arg)
651{
652	struct virtio_softc *vsc = arg;
653
654	return virtio_check_vqs(vsc);
655}
656
657/*
658 * Interrupt handler to be used when polling.
659 * We cannot use isr here because it is not defined in MSI-X mode.
660 */
661int
662virtio_pci_poll_intr(void *arg)
663{
664	struct virtio_pci_softc *sc = arg;
665	struct virtio_softc *vsc = &sc->sc_sc;
666	int r = 0;
667
668	if (vsc->sc_config_change != NULL)
669		r = (vsc->sc_config_change)(vsc);
670
671	r |= virtio_check_vqs(vsc);
672
673	return r;
674}
675
676void
677virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx)
678{
679	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
680	bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, 0, idx);
681}
682