virtio_pci.c revision 1.26
1/*	$OpenBSD: virtio_pci.c,v 1.26 2019/05/26 15:20:04 sf Exp $	*/
2/*	$NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $	*/
3
4/*
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/device.h>
33#include <sys/mutex.h>
34
35#include <dev/pci/pcidevs.h>
36#include <dev/pci/pcireg.h>
37#include <dev/pci/pcivar.h>
38
39#include <dev/pv/virtioreg.h>
40#include <dev/pv/virtiovar.h>
41#include <dev/pci/virtio_pcireg.h>
42
43/*
44 * XXX: Before being used on big endian arches, the access to config registers
45 * XXX: needs to be reviewed/fixed. The non-device specific registers are
46 * XXX: PCI-endian while the device specific registers are native endian.
47 */
48
49#define MAX_MSIX_VECS	8
50
51struct virtio_pci_softc;
52
53int		virtio_pci_match(struct device *, void *, void *);
54void		virtio_pci_attach(struct device *, struct device *, void *);
55int		virtio_pci_detach(struct device *, int);
56
57void		virtio_pci_kick(struct virtio_softc *, uint16_t);
58int		virtio_pci_adjust_config_region(struct virtio_pci_softc *);
59uint8_t		virtio_pci_read_device_config_1(struct virtio_softc *, int);
60uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
61uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
62uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
63void		virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
64void		virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
65void		virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
66void		virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
67uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
68void		virtio_pci_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
69void		virtio_pci_set_status(struct virtio_softc *, int);
70int		virtio_pci_negotiate_features(struct virtio_softc *,
71    const struct virtio_feature_name *);
72void		virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *, uint32_t, uint16_t);
73void		virtio_pci_set_msix_config_vector(struct virtio_pci_softc *, uint16_t);
74int		virtio_pci_msix_establish(struct virtio_pci_softc *, struct pci_attach_args *, int, int (*)(void *), void *);
75int		virtio_pci_setup_msix(struct virtio_pci_softc *, struct pci_attach_args *, int);
76void		virtio_pci_free_irqs(struct virtio_pci_softc *);
77int		virtio_pci_poll_intr(void *);
78int		virtio_pci_legacy_intr(void *);
79int		virtio_pci_legacy_intr_mpsafe(void *);
80int		virtio_pci_config_intr(void *);
81int		virtio_pci_queue_intr(void *);
82int		virtio_pci_shared_queue_intr(void *);
83
84enum irq_type {
85	IRQ_NO_MSIX,
86	IRQ_MSIX_SHARED, /* vec 0: config irq, vec 1 shared by all vqs */
87	IRQ_MSIX_PER_VQ, /* vec 0: config irq, vec n: irq of vq[n-1] */
88};
89
90struct virtio_pci_softc {
91	struct virtio_softc	sc_sc;
92	pci_chipset_tag_t	sc_pc;
93	pcitag_t		sc_ptag;
94
95	bus_space_tag_t		sc_iot;
96	bus_space_handle_t	sc_ioh;
97	bus_size_t		sc_iosize;
98
99	bus_space_tag_t		sc_notify_iot;
100	bus_space_handle_t	sc_notify_ioh;
101	bus_size_t		sc_notify_iosize;
102
103	bus_space_tag_t		sc_devcfg_iot;
104	bus_space_handle_t	sc_devcfg_ioh;
105	bus_size_t		sc_devcfg_iosize;
106	/*
107	 * With 0.9, the offset of the devcfg region in the io bar changes
108	 * depending on MSI-X being enabled or not.
109	 * With 1.0, this field is still used to remember if MSI-X is enabled
110	 * or not.
111	 */
112	unsigned int		sc_devcfg_offset;
113
114	bus_space_tag_t		sc_isr_iot;
115	bus_space_handle_t	sc_isr_ioh;
116	bus_size_t		sc_isr_iosize;
117
118	void			*sc_ih[MAX_MSIX_VECS];
119
120	enum irq_type		sc_irq_type;
121};
122
123struct cfattach virtio_pci_ca = {
124	sizeof(struct virtio_pci_softc),
125	virtio_pci_match,
126	virtio_pci_attach,
127	virtio_pci_detach,
128	NULL
129};
130
131struct virtio_ops virtio_pci_ops = {
132	virtio_pci_kick,
133	virtio_pci_read_device_config_1,
134	virtio_pci_read_device_config_2,
135	virtio_pci_read_device_config_4,
136	virtio_pci_read_device_config_8,
137	virtio_pci_write_device_config_1,
138	virtio_pci_write_device_config_2,
139	virtio_pci_write_device_config_4,
140	virtio_pci_write_device_config_8,
141	virtio_pci_read_queue_size,
142	virtio_pci_setup_queue,
143	virtio_pci_set_status,
144	virtio_pci_negotiate_features,
145	virtio_pci_poll_intr,
146};
147
148uint16_t
149virtio_pci_read_queue_size(struct virtio_softc *vsc, uint16_t idx)
150{
151	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
152	bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT,
153	    idx);
154	return bus_space_read_2(sc->sc_iot, sc->sc_ioh,
155	    VIRTIO_CONFIG_QUEUE_SIZE);
156}
157
158void
159virtio_pci_setup_queue(struct virtio_softc *vsc, struct virtqueue *vq,
160    uint64_t addr)
161{
162	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
163	bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT,
164	    vq->vq_index);
165	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_ADDRESS,
166	    addr / VIRTIO_PAGE_SIZE);
167
168	/*
169	 * This path is only executed if this function is called after
170	 * the child's attach function has finished. In other cases,
171	 * it's done in virtio_pci_setup_msix().
172	 */
173	if (sc->sc_irq_type != IRQ_NO_MSIX) {
174		int vec = 1;
175		if (sc->sc_irq_type == IRQ_MSIX_PER_VQ)
176		       vec += vq->vq_index;
177		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
178		    VIRTIO_MSI_QUEUE_VECTOR, vec);
179	}
180}
181
182void
183virtio_pci_set_status(struct virtio_softc *vsc, int status)
184{
185	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
186	int old = 0;
187
188	if (status != 0)
189		old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
190				       VIRTIO_CONFIG_DEVICE_STATUS);
191	bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS,
192			  status|old);
193}
194
195int
196virtio_pci_match(struct device *parent, void *match, void *aux)
197{
198	struct pci_attach_args *pa;
199
200	pa = (struct pci_attach_args *)aux;
201	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_QUMRANET &&
202	    PCI_PRODUCT(pa->pa_id) >= 0x1000 &&
203	    PCI_PRODUCT(pa->pa_id) <= 0x103f &&
204	    PCI_REVISION(pa->pa_class) == 0)
205		return 1;
206	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OPENBSD &&
207	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_OPENBSD_CONTROL)
208		return 1;
209	return 0;
210}
211
212void
213virtio_pci_attach(struct device *parent, struct device *self, void *aux)
214{
215	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
216	struct virtio_softc *vsc = &sc->sc_sc;
217	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
218	pci_chipset_tag_t pc = pa->pa_pc;
219	pcitag_t tag = pa->pa_tag;
220	int revision;
221	pcireg_t id;
222	char const *intrstr;
223	pci_intr_handle_t ih;
224
225	revision = PCI_REVISION(pa->pa_class);
226	if (revision != 0) {
227		printf("unknown revision 0x%02x; giving up\n", revision);
228		return;
229	}
230
231	/* subsystem ID shows what I am */
232	id = PCI_PRODUCT(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
233
234	printf("\n");
235
236	vsc->sc_ops = &virtio_pci_ops;
237	sc->sc_pc = pc;
238	sc->sc_ptag = pa->pa_tag;
239	vsc->sc_dmat = pa->pa_dmat;
240
241	/*
242	 * For virtio, ignore normal MSI black/white-listing depending on the
243	 * PCI bridge but enable it unconditionally.
244	 */
245	pa->pa_flags |= PCI_FLAGS_MSI_ENABLED;
246
247	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
248	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize, 0)) {
249		printf("%s: can't map i/o space\n", vsc->sc_dev.dv_xname);
250		return;
251	}
252
253	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
254	    VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &sc->sc_notify_ioh) != 0) {
255		printf("%s: can't map notify i/o space\n",
256		    vsc->sc_dev.dv_xname);
257		return;
258	}
259	sc->sc_notify_iosize = 2;
260	sc->sc_notify_iot = sc->sc_iot;
261
262	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
263	    VIRTIO_CONFIG_ISR_STATUS, 1, &sc->sc_isr_ioh) != 0) {
264		printf("%s: can't map isr i/o space\n",
265		    vsc->sc_dev.dv_xname);
266		return;
267	}
268	sc->sc_isr_iosize = 1;
269	sc->sc_isr_iot = sc->sc_iot;
270
271	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
272	sc->sc_irq_type = IRQ_NO_MSIX;
273	if (virtio_pci_adjust_config_region(sc) != 0)
274		return;
275
276	virtio_device_reset(vsc);
277	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
278	virtio_pci_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
279
280	vsc->sc_childdevid = id;
281	vsc->sc_child = NULL;
282	config_found(self, sc, NULL);
283	if (vsc->sc_child == NULL) {
284		printf("%s: no matching child driver; not configured\n",
285		    vsc->sc_dev.dv_xname);
286		goto fail_1;
287	}
288	if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
289		printf("%s: virtio configuration failed\n",
290		    vsc->sc_dev.dv_xname);
291		goto fail_1;
292	}
293
294	if (virtio_pci_setup_msix(sc, pa, 0) == 0) {
295		sc->sc_irq_type = IRQ_MSIX_PER_VQ;
296		intrstr = "msix per-VQ";
297	} else if (virtio_pci_setup_msix(sc, pa, 1) == 0) {
298		sc->sc_irq_type = IRQ_MSIX_SHARED;
299		intrstr = "msix shared";
300	} else {
301		int (*ih_func)(void *) = virtio_pci_legacy_intr;
302		if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
303			printf("%s: couldn't map interrupt\n", vsc->sc_dev.dv_xname);
304			goto fail_2;
305		}
306		intrstr = pci_intr_string(pc, ih);
307		/*
308		 * We always set the IPL_MPSAFE flag in order to do the relatively
309		 * expensive ISR read without lock, and then grab the kernel lock in
310		 * the interrupt handler.
311		 */
312		if (vsc->sc_ipl & IPL_MPSAFE)
313			ih_func = virtio_pci_legacy_intr_mpsafe;
314		sc->sc_ih[0] = pci_intr_establish(pc, ih, vsc->sc_ipl | IPL_MPSAFE,
315		    ih_func, sc, vsc->sc_dev.dv_xname);
316		if (sc->sc_ih[0] == NULL) {
317			printf("%s: couldn't establish interrupt", vsc->sc_dev.dv_xname);
318			if (intrstr != NULL)
319				printf(" at %s", intrstr);
320			printf("\n");
321			goto fail_2;
322		}
323	}
324	printf("%s: %s\n", vsc->sc_dev.dv_xname, intrstr);
325
326	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
327	return;
328
329fail_2:
330	config_detach(vsc->sc_child, 0);
331fail_1:
332	/* no pci_mapreg_unmap() or pci_intr_unmap() */
333	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
334}
335
336int
337virtio_pci_detach(struct device *self, int flags)
338{
339	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)self;
340	struct virtio_softc *vsc = &sc->sc_sc;
341	int r;
342
343	if (vsc->sc_child != 0 && vsc->sc_child != VIRTIO_CHILD_ERROR) {
344		r = config_detach(vsc->sc_child, flags);
345		if (r)
346			return r;
347	}
348	KASSERT(vsc->sc_child == 0 || vsc->sc_child == VIRTIO_CHILD_ERROR);
349	KASSERT(vsc->sc_vqs == 0);
350	virtio_pci_free_irqs(sc);
351	if (sc->sc_iosize)
352		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
353	sc->sc_iosize = 0;
354
355	return 0;
356}
357
358int
359virtio_pci_adjust_config_region(struct virtio_pci_softc *sc)
360{
361	sc->sc_devcfg_iosize = sc->sc_iosize - sc->sc_devcfg_offset;
362	sc->sc_devcfg_iot = sc->sc_iot;
363	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, sc->sc_devcfg_offset,
364	    sc->sc_devcfg_iosize, &sc->sc_devcfg_ioh) != 0) {
365		printf("%s: can't map config i/o space\n",
366		    sc->sc_sc.sc_dev.dv_xname);
367		return 1;
368	}
369	return 0;
370}
371
372/*
373 * Feature negotiation.
374 * Prints available / negotiated features if guest_feature_names != NULL and
375 * VIRTIO_DEBUG is 1
376 */
377int
378virtio_pci_negotiate_features(struct virtio_softc *vsc,
379    const struct virtio_feature_name *guest_feature_names)
380{
381	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
382	uint64_t host, neg;
383
384	vsc->sc_active_features = 0;
385
386	/*
387	 * We enable indirect descriptors by default. They can be switched
388	 * off by setting bit 1 in the driver flags, see config(8)
389	 */
390	if (!(vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT) &&
391	    !(vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_INDIRECT)) {
392		vsc->sc_driver_features |= VIRTIO_F_RING_INDIRECT_DESC;
393	} else if (guest_feature_names != NULL) {
394		printf(" RingIndirectDesc disabled by UKC");
395	}
396
397	/*
398	 * The driver must add VIRTIO_F_RING_EVENT_IDX if it supports it.
399	 * If it did, check if it is disabled by bit 2 in the driver flags.
400	 */
401	if ((vsc->sc_driver_features & VIRTIO_F_RING_EVENT_IDX) &&
402	    ((vsc->sc_dev.dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX) ||
403	    (vsc->sc_child->dv_cfdata->cf_flags & VIRTIO_CF_NO_EVENT_IDX))) {
404		if (guest_feature_names != NULL)
405			printf(" RingEventIdx disabled by UKC");
406		vsc->sc_driver_features &= ~(VIRTIO_F_RING_EVENT_IDX);
407	}
408
409	host = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
410				VIRTIO_CONFIG_DEVICE_FEATURES);
411	neg = host & vsc->sc_driver_features;
412#if VIRTIO_DEBUG
413	if (guest_feature_names)
414		virtio_log_features(host, neg, guest_feature_names);
415#endif
416	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
417			  VIRTIO_CONFIG_GUEST_FEATURES, neg);
418	vsc->sc_active_features = neg;
419	if (neg & VIRTIO_F_RING_INDIRECT_DESC)
420		vsc->sc_indirect = 1;
421	else
422		vsc->sc_indirect = 0;
423
424	return 0;
425}
426
427/*
428 * Device configuration registers.
429 */
430uint8_t
431virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
432{
433	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
434	return bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
435}
436
437uint16_t
438virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
439{
440	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
441	return bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
442}
443
444uint32_t
445virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
446{
447	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
448	return bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
449}
450
451uint64_t
452virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
453{
454	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
455	uint64_t r;
456
457	r = bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
458	    index + sizeof(uint32_t));
459	r <<= 32;
460	r += bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index);
461	return r;
462}
463
464void
465virtio_pci_write_device_config_1(struct virtio_softc *vsc, int index,
466    uint8_t value)
467{
468	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
469	bus_space_write_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
470}
471
472void
473virtio_pci_write_device_config_2(struct virtio_softc *vsc, int index,
474    uint16_t value)
475{
476	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
477	bus_space_write_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
478}
479
480void
481virtio_pci_write_device_config_4(struct virtio_softc *vsc,
482			     int index, uint32_t value)
483{
484	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
485	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index, value);
486}
487
488void
489virtio_pci_write_device_config_8(struct virtio_softc *vsc,
490			     int index, uint64_t value)
491{
492	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
493	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
494	    index, value & 0xffffffff);
495	bus_space_write_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh,
496	    index + sizeof(uint32_t), value >> 32);
497}
498
499int
500virtio_pci_msix_establish(struct virtio_pci_softc *sc,
501    struct pci_attach_args *pa, int idx, int (*handler)(void *), void *ih_arg)
502{
503	struct virtio_softc *vsc = &sc->sc_sc;
504	pci_intr_handle_t ih;
505
506	if (pci_intr_map_msix(pa, idx, &ih) != 0) {
507#if VIRTIO_DEBUG
508		printf("%s[%d]: pci_intr_map_msix failed\n",
509		    vsc->sc_dev.dv_xname, idx);
510#endif
511		return 1;
512	}
513	sc->sc_ih[idx] = pci_intr_establish(sc->sc_pc, ih, vsc->sc_ipl,
514	    handler, ih_arg, vsc->sc_dev.dv_xname);
515	if (sc->sc_ih[idx] == NULL) {
516		printf("%s[%d]: couldn't establish msix interrupt\n",
517		    vsc->sc_dev.dv_xname, idx);
518		return 1;
519	}
520	return 0;
521}
522
523void
524virtio_pci_set_msix_queue_vector(struct virtio_pci_softc *sc, uint32_t idx, uint16_t vector)
525{
526	bus_space_write_2(sc->sc_iot, sc->sc_ioh,
527	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
528	bus_space_write_2(sc->sc_iot, sc->sc_ioh,
529	    VIRTIO_MSI_QUEUE_VECTOR, vector);
530}
531
532void
533virtio_pci_set_msix_config_vector(struct virtio_pci_softc *sc, uint16_t vector)
534{
535	bus_space_write_2(sc->sc_iot, sc->sc_ioh,
536	    VIRTIO_MSI_CONFIG_VECTOR, vector);
537}
538
539void
540virtio_pci_free_irqs(struct virtio_pci_softc *sc)
541{
542	struct virtio_softc *vsc = &sc->sc_sc;
543	int i;
544
545	if (sc->sc_devcfg_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
546		for (i = 0; i < vsc->sc_nvqs; i++) {
547			virtio_pci_set_msix_queue_vector(sc, i,
548			    VIRTIO_MSI_NO_VECTOR);
549		}
550	}
551
552	for (i = 0; i < MAX_MSIX_VECS; i++) {
553		if (sc->sc_ih[i]) {
554			pci_intr_disestablish(sc->sc_pc, sc->sc_ih[i]);
555			sc->sc_ih[i] = NULL;
556		}
557	}
558
559	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
560	virtio_pci_adjust_config_region(sc);
561}
562
563int
564virtio_pci_setup_msix(struct virtio_pci_softc *sc, struct pci_attach_args *pa,
565    int shared)
566{
567	struct virtio_softc *vsc = &sc->sc_sc;
568	int i;
569
570	if (virtio_pci_msix_establish(sc, pa, 0, virtio_pci_config_intr, vsc))
571		return 1;
572	sc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
573	virtio_pci_adjust_config_region(sc);
574	virtio_pci_set_msix_config_vector(sc, 0);
575
576	if (shared) {
577		if (virtio_pci_msix_establish(sc, pa, 1,
578		    virtio_pci_shared_queue_intr, vsc)) {
579			goto fail;
580		}
581
582		for (i = 0; i < vsc->sc_nvqs; i++)
583			virtio_pci_set_msix_queue_vector(sc, i, 1);
584	} else {
585		for (i = 0; i <= vsc->sc_nvqs; i++) {
586			if (virtio_pci_msix_establish(sc, pa, i + 1,
587			    virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
588				goto fail;
589			}
590			virtio_pci_set_msix_queue_vector(sc, i, i + 1);
591		}
592	}
593
594	return 0;
595fail:
596	virtio_pci_free_irqs(sc);
597	return 1;
598}
599
600/*
601 * Interrupt handler.
602 */
603
604/*
605 * Only used without MSI-X
606 */
607int
608virtio_pci_legacy_intr(void *arg)
609{
610	struct virtio_pci_softc *sc = arg;
611	struct virtio_softc *vsc = &sc->sc_sc;
612	int isr, r = 0;
613
614	/* check and ack the interrupt */
615	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
616	if (isr == 0)
617		return 0;
618	KERNEL_LOCK();
619	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
620	    (vsc->sc_config_change != NULL)) {
621		r = (vsc->sc_config_change)(vsc);
622	}
623	r |= virtio_check_vqs(vsc);
624	KERNEL_UNLOCK();
625
626	return r;
627}
628
629int
630virtio_pci_legacy_intr_mpsafe(void *arg)
631{
632	struct virtio_pci_softc *sc = arg;
633	struct virtio_softc *vsc = &sc->sc_sc;
634	int isr, r = 0;
635
636	/* check and ack the interrupt */
637	isr = bus_space_read_1(sc->sc_isr_iot, sc->sc_isr_ioh, 0);
638	if (isr == 0)
639		return 0;
640	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
641	    (vsc->sc_config_change != NULL)) {
642		r = (vsc->sc_config_change)(vsc);
643	}
644	r |= virtio_check_vqs(vsc);
645	return r;
646}
647
648/*
649 * Only used with MSI-X
650 */
651int
652virtio_pci_config_intr(void *arg)
653{
654	struct virtio_softc *vsc = arg;
655
656	if (vsc->sc_config_change != NULL)
657		return vsc->sc_config_change(vsc);
658	return 0;
659}
660
661/*
662 * Only used with MSI-X
663 */
664int
665virtio_pci_queue_intr(void *arg)
666{
667	struct virtqueue *vq = arg;
668
669	if (vq->vq_done)
670		return (vq->vq_done)(vq);
671	return 0;
672}
673
674int
675virtio_pci_shared_queue_intr(void *arg)
676{
677	struct virtio_softc *vsc = arg;
678
679	return virtio_check_vqs(vsc);
680}
681
682/*
683 * Interrupt handler to be used when polling.
684 * We cannot use isr here because it is not defined in MSI-X mode.
685 */
686int
687virtio_pci_poll_intr(void *arg)
688{
689	struct virtio_pci_softc *sc = arg;
690	struct virtio_softc *vsc = &sc->sc_sc;
691	int r = 0;
692
693	if (vsc->sc_config_change != NULL)
694		r = (vsc->sc_config_change)(vsc);
695
696	r |= virtio_check_vqs(vsc);
697
698	return r;
699}
700
701void
702virtio_pci_kick(struct virtio_softc *vsc, uint16_t idx)
703{
704	struct virtio_pci_softc *sc = (struct virtio_pci_softc *)vsc;
705	bus_space_write_2(sc->sc_notify_iot, sc->sc_notify_ioh, 0, idx);
706}
707