1/*-
| 1/*-
|
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
| 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
|
3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27/* Driver for the VirtIO PCI interface. */ 28 29#include <sys/cdefs.h>
| 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27/* Driver for the VirtIO PCI interface. */ 28 29#include <sys/cdefs.h>
|
30__FBSDID("$FreeBSD: head/sys/dev/virtio/pci/virtio_pci.c 252702 2013-07-04 17:50:11Z bryanv $");
| 30__FBSDID("$FreeBSD: head/sys/dev/virtio/pci/virtio_pci.c 252707 2013-07-04 17:57:26Z bryanv $");
|
31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/bus.h> 35#include <sys/kernel.h> 36#include <sys/module.h> 37#include <sys/malloc.h> 38 39#include <machine/bus.h> 40#include <machine/resource.h> 41#include <sys/bus.h> 42#include <sys/rman.h> 43 44#include <dev/pci/pcivar.h> 45#include <dev/pci/pcireg.h> 46 47#include <dev/virtio/virtio.h> 48#include <dev/virtio/virtqueue.h> 49#include <dev/virtio/pci/virtio_pci.h> 50 51#include "virtio_bus_if.h" 52#include "virtio_if.h" 53 54struct vtpci_softc { 55 device_t vtpci_dev; 56 struct resource *vtpci_res; 57 struct resource *vtpci_msix_res; 58 uint64_t vtpci_features; 59 uint32_t vtpci_flags; 60#define VTPCI_FLAG_NO_MSI 0x0001 61#define VTPCI_FLAG_NO_MSIX 0x0002 62#define VTPCI_FLAG_LEGACY 0x1000 63#define VTPCI_FLAG_MSI 0x2000 64#define VTPCI_FLAG_MSIX 0x4000 65#define VTPCI_FLAG_SHARED_MSIX 0x8000 66#define VTPCI_FLAG_ITYPE_MASK 0xF000 67 68 /* This "bus" will only ever have one child. */ 69 device_t vtpci_child_dev; 70 struct virtio_feature_desc *vtpci_child_feat_desc; 71 72 /* 73 * Ideally, each virtqueue that the driver provides a callback for 74 * will receive its own MSIX vector. If there are not sufficient 75 * vectors available, we will then attempt to have all the VQs 76 * share one vector. Note that when using MSIX, the configuration 77 * changed notifications must be on their own vector. 78 * 79 * If MSIX is not available, we will attempt to have the whole 80 * device share one MSI vector, and then, finally, one legacy 81 * interrupt. 82 */ 83 int vtpci_nvqs; 84 struct vtpci_virtqueue { 85 struct virtqueue *vq; 86 /* Device did not provide a callback for this virtqueue. */ 87 int no_intr; 88 /* Index into vtpci_intr_res[] below. Unused, then -1. */ 89 int ires_idx; 90 } vtpci_vqx[VIRTIO_MAX_VIRTQUEUES]; 91 92 /* 93 * When using MSIX interrupts, the first element of vtpci_intr_res[] 94 * is always the configuration changed notifications. The remaining 95 * element(s) are used for the virtqueues. 96 * 97 * With MSI and legacy interrupts, only the first element of 98 * vtpci_intr_res[] is used. 99 */ 100 int vtpci_nintr_res; 101 struct vtpci_intr_resource { 102 struct resource *irq; 103 int rid; 104 void *intrhand; 105 } vtpci_intr_res[1 + VIRTIO_MAX_VIRTQUEUES]; 106}; 107 108static int vtpci_probe(device_t); 109static int vtpci_attach(device_t); 110static int vtpci_detach(device_t); 111static int vtpci_suspend(device_t); 112static int vtpci_resume(device_t); 113static int vtpci_shutdown(device_t); 114static void vtpci_driver_added(device_t, driver_t *); 115static void vtpci_child_detached(device_t, device_t); 116static int vtpci_read_ivar(device_t, device_t, int, uintptr_t *); 117static int vtpci_write_ivar(device_t, device_t, int, uintptr_t); 118 119static uint64_t vtpci_negotiate_features(device_t, uint64_t); 120static int vtpci_with_feature(device_t, uint64_t); 121static int vtpci_alloc_virtqueues(device_t, int, int, 122 struct vq_alloc_info *); 123static int vtpci_setup_intr(device_t, enum intr_type); 124static void vtpci_stop(device_t); 125static int vtpci_reinit(device_t, uint64_t); 126static void vtpci_reinit_complete(device_t); 127static void vtpci_notify_virtqueue(device_t, uint16_t); 128static uint8_t vtpci_get_status(device_t); 129static void vtpci_set_status(device_t, uint8_t); 130static void vtpci_read_dev_config(device_t, bus_size_t, void *, int); 131static void vtpci_write_dev_config(device_t, bus_size_t, void *, int); 132 133static void vtpci_describe_features(struct vtpci_softc *, const char *, 134 uint64_t); 135static void vtpci_probe_and_attach_child(struct vtpci_softc *); 136 137static int vtpci_alloc_msix(struct vtpci_softc *, int); 138static int vtpci_alloc_msi(struct vtpci_softc *); 139static int vtpci_alloc_intr_msix_pervq(struct vtpci_softc *); 140static int vtpci_alloc_intr_msix_shared(struct vtpci_softc *); 141static int vtpci_alloc_intr_msi(struct vtpci_softc *); 142static int vtpci_alloc_intr_legacy(struct vtpci_softc *); 143static int vtpci_alloc_intr_resources(struct vtpci_softc *); 144 145static int vtpci_setup_legacy_interrupt(struct vtpci_softc *, 146 enum intr_type); 147static int vtpci_setup_msix_interrupts(struct vtpci_softc *, 148 enum intr_type); 149static int vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type); 150 151static int vtpci_register_msix_vector(struct vtpci_softc *, int, int); 152static int vtpci_set_host_msix_vectors(struct vtpci_softc *); 153static int vtpci_reinit_virtqueue(struct vtpci_softc *, int); 154 155static void vtpci_free_interrupts(struct vtpci_softc *); 156static void vtpci_free_virtqueues(struct vtpci_softc *); 157static void vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *); 158static void vtpci_release_child_resources(struct vtpci_softc *); 159static void vtpci_reset(struct vtpci_softc *); 160 161static void vtpci_select_virtqueue(struct vtpci_softc *, int); 162 163static void vtpci_legacy_intr(void *); 164static int vtpci_vq_shared_intr_filter(void *); 165static void vtpci_vq_shared_intr(void *); 166static int vtpci_vq_intr_filter(void *); 167static void vtpci_vq_intr(void *); 168static void vtpci_config_intr(void *); 169 170#define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt 171 172/* 173 * I/O port read/write wrappers. 174 */ 175#define vtpci_read_config_1(sc, o) bus_read_1((sc)->vtpci_res, (o)) 176#define vtpci_read_config_2(sc, o) bus_read_2((sc)->vtpci_res, (o)) 177#define vtpci_read_config_4(sc, o) bus_read_4((sc)->vtpci_res, (o)) 178#define vtpci_write_config_1(sc, o, v) bus_write_1((sc)->vtpci_res, (o), (v)) 179#define vtpci_write_config_2(sc, o, v) bus_write_2((sc)->vtpci_res, (o), (v)) 180#define vtpci_write_config_4(sc, o, v) bus_write_4((sc)->vtpci_res, (o), (v)) 181 182/* Tunables. */ 183static int vtpci_disable_msix = 0; 184TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix); 185 186static device_method_t vtpci_methods[] = { 187 /* Device interface. */ 188 DEVMETHOD(device_probe, vtpci_probe), 189 DEVMETHOD(device_attach, vtpci_attach), 190 DEVMETHOD(device_detach, vtpci_detach), 191 DEVMETHOD(device_suspend, vtpci_suspend), 192 DEVMETHOD(device_resume, vtpci_resume), 193 DEVMETHOD(device_shutdown, vtpci_shutdown), 194 195 /* Bus interface. */ 196 DEVMETHOD(bus_driver_added, vtpci_driver_added), 197 DEVMETHOD(bus_child_detached, vtpci_child_detached), 198 DEVMETHOD(bus_read_ivar, vtpci_read_ivar), 199 DEVMETHOD(bus_write_ivar, vtpci_write_ivar), 200 201 /* VirtIO bus interface. */ 202 DEVMETHOD(virtio_bus_negotiate_features, vtpci_negotiate_features), 203 DEVMETHOD(virtio_bus_with_feature, vtpci_with_feature), 204 DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_alloc_virtqueues), 205 DEVMETHOD(virtio_bus_setup_intr, vtpci_setup_intr), 206 DEVMETHOD(virtio_bus_stop, vtpci_stop), 207 DEVMETHOD(virtio_bus_reinit, vtpci_reinit), 208 DEVMETHOD(virtio_bus_reinit_complete, vtpci_reinit_complete), 209 DEVMETHOD(virtio_bus_notify_vq, vtpci_notify_virtqueue), 210 DEVMETHOD(virtio_bus_read_device_config, vtpci_read_dev_config), 211 DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config), 212 213 DEVMETHOD_END 214}; 215 216static driver_t vtpci_driver = { 217 "virtio_pci", 218 vtpci_methods, 219 sizeof(struct vtpci_softc) 220}; 221 222devclass_t vtpci_devclass; 223 224DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0); 225MODULE_VERSION(virtio_pci, 1); 226MODULE_DEPEND(virtio_pci, pci, 1, 1, 1); 227MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1); 228 229static int 230vtpci_probe(device_t dev) 231{ 232 char desc[36]; 233 const char *name; 234 235 if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID) 236 return (ENXIO); 237 238 if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN || 239 pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX) 240 return (ENXIO); 241 242 if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION) 243 return (ENXIO); 244 245 name = virtio_device_name(pci_get_subdevice(dev)); 246 if (name == NULL) 247 name = "Unknown"; 248 249 snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name); 250 device_set_desc_copy(dev, desc); 251 252 return (BUS_PROBE_DEFAULT); 253} 254 255static int 256vtpci_attach(device_t dev) 257{ 258 struct vtpci_softc *sc; 259 device_t child; 260 int rid; 261 262 sc = device_get_softc(dev); 263 sc->vtpci_dev = dev; 264 265 pci_enable_busmaster(dev); 266 267 rid = PCIR_BAR(0); 268 sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 269 RF_ACTIVE); 270 if (sc->vtpci_res == NULL) { 271 device_printf(dev, "cannot map I/O space\n"); 272 return (ENXIO); 273 } 274 275 if (pci_find_cap(dev, PCIY_MSI, NULL) != 0) 276 sc->vtpci_flags |= VTPCI_FLAG_NO_MSI; 277 278 if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { 279 rid = PCIR_BAR(1); 280 sc->vtpci_msix_res = bus_alloc_resource_any(dev, 281 SYS_RES_MEMORY, &rid, RF_ACTIVE); 282 } 283 284 if (sc->vtpci_msix_res == NULL) 285 sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX; 286 287 vtpci_reset(sc); 288 289 /* Tell the host we've noticed this device. */ 290 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 291 292 if ((child = device_add_child(dev, NULL, -1)) == NULL) { 293 device_printf(dev, "cannot create child device\n"); 294 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 295 vtpci_detach(dev); 296 return (ENOMEM); 297 } 298 299 sc->vtpci_child_dev = child; 300 vtpci_probe_and_attach_child(sc); 301 302 return (0); 303} 304 305static int 306vtpci_detach(device_t dev) 307{ 308 struct vtpci_softc *sc; 309 device_t child; 310 int error; 311 312 sc = device_get_softc(dev); 313 314 if ((child = sc->vtpci_child_dev) != NULL) { 315 error = device_delete_child(dev, child); 316 if (error) 317 return (error); 318 sc->vtpci_child_dev = NULL; 319 } 320 321 vtpci_reset(sc); 322 323 if (sc->vtpci_msix_res != NULL) { 324 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1), 325 sc->vtpci_msix_res); 326 sc->vtpci_msix_res = NULL; 327 } 328 329 if (sc->vtpci_res != NULL) { 330 bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), 331 sc->vtpci_res); 332 sc->vtpci_res = NULL; 333 } 334 335 return (0); 336} 337 338static int 339vtpci_suspend(device_t dev) 340{ 341 342 return (bus_generic_suspend(dev)); 343} 344 345static int 346vtpci_resume(device_t dev) 347{ 348 349 return (bus_generic_resume(dev)); 350} 351 352static int 353vtpci_shutdown(device_t dev) 354{ 355 356 (void) bus_generic_shutdown(dev); 357 /* Forcibly stop the host device. */ 358 vtpci_stop(dev); 359 360 return (0); 361} 362 363static void 364vtpci_driver_added(device_t dev, driver_t *driver) 365{ 366 struct vtpci_softc *sc; 367 368 sc = device_get_softc(dev); 369 370 vtpci_probe_and_attach_child(sc); 371} 372 373static void 374vtpci_child_detached(device_t dev, device_t child) 375{ 376 struct vtpci_softc *sc; 377 378 sc = device_get_softc(dev); 379 380 vtpci_reset(sc); 381 vtpci_release_child_resources(sc); 382} 383 384static int 385vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 386{ 387 struct vtpci_softc *sc; 388 389 sc = device_get_softc(dev); 390 391 if (sc->vtpci_child_dev != child) 392 return (ENOENT); 393 394 switch (index) { 395 case VIRTIO_IVAR_DEVTYPE: 396 case VIRTIO_IVAR_SUBDEVICE: 397 *result = pci_get_subdevice(dev); 398 break; 399 case VIRTIO_IVAR_VENDOR: 400 *result = pci_get_vendor(dev); 401 break; 402 case VIRTIO_IVAR_DEVICE: 403 *result = pci_get_device(dev); 404 break; 405 case VIRTIO_IVAR_SUBVENDOR: 406 *result = pci_get_subdevice(dev); 407 break; 408 default: 409 return (ENOENT); 410 } 411 412 return (0); 413} 414 415static int 416vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 417{ 418 struct vtpci_softc *sc; 419 420 sc = device_get_softc(dev); 421 422 if (sc->vtpci_child_dev != child) 423 return (ENOENT); 424 425 switch (index) { 426 case VIRTIO_IVAR_FEATURE_DESC: 427 sc->vtpci_child_feat_desc = (void *) value; 428 break; 429 default: 430 return (ENOENT); 431 } 432 433 return (0); 434} 435 436static uint64_t 437vtpci_negotiate_features(device_t dev, uint64_t child_features) 438{ 439 struct vtpci_softc *sc; 440 uint64_t host_features, features; 441 442 sc = device_get_softc(dev); 443 444 host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES); 445 vtpci_describe_features(sc, "host", host_features); 446 447 /* 448 * Limit negotiated features to what the driver, virtqueue, and 449 * host all support. 450 */ 451 features = host_features & child_features; 452 features = virtqueue_filter_features(features); 453 sc->vtpci_features = features; 454 455 vtpci_describe_features(sc, "negotiated", features); 456 vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features); 457 458 return (features); 459} 460 461static int 462vtpci_with_feature(device_t dev, uint64_t feature) 463{ 464 struct vtpci_softc *sc; 465 466 sc = device_get_softc(dev); 467 468 return ((sc->vtpci_features & feature) != 0); 469} 470 471static int 472vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs, 473 struct vq_alloc_info *vq_info) 474{ 475 struct vtpci_softc *sc; 476 struct virtqueue *vq; 477 struct vtpci_virtqueue *vqx; 478 struct vq_alloc_info *info; 479 int idx, error; 480 uint16_t size; 481 482 sc = device_get_softc(dev); 483 error = 0; 484 485 if (sc->vtpci_nvqs != 0) 486 return (EALREADY); 487 if (nvqs <= 0 || nvqs > VIRTIO_MAX_VIRTQUEUES) 488 return (EINVAL); 489
| 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/bus.h> 35#include <sys/kernel.h> 36#include <sys/module.h> 37#include <sys/malloc.h> 38 39#include <machine/bus.h> 40#include <machine/resource.h> 41#include <sys/bus.h> 42#include <sys/rman.h> 43 44#include <dev/pci/pcivar.h> 45#include <dev/pci/pcireg.h> 46 47#include <dev/virtio/virtio.h> 48#include <dev/virtio/virtqueue.h> 49#include <dev/virtio/pci/virtio_pci.h> 50 51#include "virtio_bus_if.h" 52#include "virtio_if.h" 53 54struct vtpci_softc { 55 device_t vtpci_dev; 56 struct resource *vtpci_res; 57 struct resource *vtpci_msix_res; 58 uint64_t vtpci_features; 59 uint32_t vtpci_flags; 60#define VTPCI_FLAG_NO_MSI 0x0001 61#define VTPCI_FLAG_NO_MSIX 0x0002 62#define VTPCI_FLAG_LEGACY 0x1000 63#define VTPCI_FLAG_MSI 0x2000 64#define VTPCI_FLAG_MSIX 0x4000 65#define VTPCI_FLAG_SHARED_MSIX 0x8000 66#define VTPCI_FLAG_ITYPE_MASK 0xF000 67 68 /* This "bus" will only ever have one child. */ 69 device_t vtpci_child_dev; 70 struct virtio_feature_desc *vtpci_child_feat_desc; 71 72 /* 73 * Ideally, each virtqueue that the driver provides a callback for 74 * will receive its own MSIX vector. If there are not sufficient 75 * vectors available, we will then attempt to have all the VQs 76 * share one vector. Note that when using MSIX, the configuration 77 * changed notifications must be on their own vector. 78 * 79 * If MSIX is not available, we will attempt to have the whole 80 * device share one MSI vector, and then, finally, one legacy 81 * interrupt. 82 */ 83 int vtpci_nvqs; 84 struct vtpci_virtqueue { 85 struct virtqueue *vq; 86 /* Device did not provide a callback for this virtqueue. */ 87 int no_intr; 88 /* Index into vtpci_intr_res[] below. Unused, then -1. */ 89 int ires_idx; 90 } vtpci_vqx[VIRTIO_MAX_VIRTQUEUES]; 91 92 /* 93 * When using MSIX interrupts, the first element of vtpci_intr_res[] 94 * is always the configuration changed notifications. The remaining 95 * element(s) are used for the virtqueues. 96 * 97 * With MSI and legacy interrupts, only the first element of 98 * vtpci_intr_res[] is used. 99 */ 100 int vtpci_nintr_res; 101 struct vtpci_intr_resource { 102 struct resource *irq; 103 int rid; 104 void *intrhand; 105 } vtpci_intr_res[1 + VIRTIO_MAX_VIRTQUEUES]; 106}; 107 108static int vtpci_probe(device_t); 109static int vtpci_attach(device_t); 110static int vtpci_detach(device_t); 111static int vtpci_suspend(device_t); 112static int vtpci_resume(device_t); 113static int vtpci_shutdown(device_t); 114static void vtpci_driver_added(device_t, driver_t *); 115static void vtpci_child_detached(device_t, device_t); 116static int vtpci_read_ivar(device_t, device_t, int, uintptr_t *); 117static int vtpci_write_ivar(device_t, device_t, int, uintptr_t); 118 119static uint64_t vtpci_negotiate_features(device_t, uint64_t); 120static int vtpci_with_feature(device_t, uint64_t); 121static int vtpci_alloc_virtqueues(device_t, int, int, 122 struct vq_alloc_info *); 123static int vtpci_setup_intr(device_t, enum intr_type); 124static void vtpci_stop(device_t); 125static int vtpci_reinit(device_t, uint64_t); 126static void vtpci_reinit_complete(device_t); 127static void vtpci_notify_virtqueue(device_t, uint16_t); 128static uint8_t vtpci_get_status(device_t); 129static void vtpci_set_status(device_t, uint8_t); 130static void vtpci_read_dev_config(device_t, bus_size_t, void *, int); 131static void vtpci_write_dev_config(device_t, bus_size_t, void *, int); 132 133static void vtpci_describe_features(struct vtpci_softc *, const char *, 134 uint64_t); 135static void vtpci_probe_and_attach_child(struct vtpci_softc *); 136 137static int vtpci_alloc_msix(struct vtpci_softc *, int); 138static int vtpci_alloc_msi(struct vtpci_softc *); 139static int vtpci_alloc_intr_msix_pervq(struct vtpci_softc *); 140static int vtpci_alloc_intr_msix_shared(struct vtpci_softc *); 141static int vtpci_alloc_intr_msi(struct vtpci_softc *); 142static int vtpci_alloc_intr_legacy(struct vtpci_softc *); 143static int vtpci_alloc_intr_resources(struct vtpci_softc *); 144 145static int vtpci_setup_legacy_interrupt(struct vtpci_softc *, 146 enum intr_type); 147static int vtpci_setup_msix_interrupts(struct vtpci_softc *, 148 enum intr_type); 149static int vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type); 150 151static int vtpci_register_msix_vector(struct vtpci_softc *, int, int); 152static int vtpci_set_host_msix_vectors(struct vtpci_softc *); 153static int vtpci_reinit_virtqueue(struct vtpci_softc *, int); 154 155static void vtpci_free_interrupts(struct vtpci_softc *); 156static void vtpci_free_virtqueues(struct vtpci_softc *); 157static void vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *); 158static void vtpci_release_child_resources(struct vtpci_softc *); 159static void vtpci_reset(struct vtpci_softc *); 160 161static void vtpci_select_virtqueue(struct vtpci_softc *, int); 162 163static void vtpci_legacy_intr(void *); 164static int vtpci_vq_shared_intr_filter(void *); 165static void vtpci_vq_shared_intr(void *); 166static int vtpci_vq_intr_filter(void *); 167static void vtpci_vq_intr(void *); 168static void vtpci_config_intr(void *); 169 170#define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt 171 172/* 173 * I/O port read/write wrappers. 174 */ 175#define vtpci_read_config_1(sc, o) bus_read_1((sc)->vtpci_res, (o)) 176#define vtpci_read_config_2(sc, o) bus_read_2((sc)->vtpci_res, (o)) 177#define vtpci_read_config_4(sc, o) bus_read_4((sc)->vtpci_res, (o)) 178#define vtpci_write_config_1(sc, o, v) bus_write_1((sc)->vtpci_res, (o), (v)) 179#define vtpci_write_config_2(sc, o, v) bus_write_2((sc)->vtpci_res, (o), (v)) 180#define vtpci_write_config_4(sc, o, v) bus_write_4((sc)->vtpci_res, (o), (v)) 181 182/* Tunables. */ 183static int vtpci_disable_msix = 0; 184TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix); 185 186static device_method_t vtpci_methods[] = { 187 /* Device interface. */ 188 DEVMETHOD(device_probe, vtpci_probe), 189 DEVMETHOD(device_attach, vtpci_attach), 190 DEVMETHOD(device_detach, vtpci_detach), 191 DEVMETHOD(device_suspend, vtpci_suspend), 192 DEVMETHOD(device_resume, vtpci_resume), 193 DEVMETHOD(device_shutdown, vtpci_shutdown), 194 195 /* Bus interface. */ 196 DEVMETHOD(bus_driver_added, vtpci_driver_added), 197 DEVMETHOD(bus_child_detached, vtpci_child_detached), 198 DEVMETHOD(bus_read_ivar, vtpci_read_ivar), 199 DEVMETHOD(bus_write_ivar, vtpci_write_ivar), 200 201 /* VirtIO bus interface. */ 202 DEVMETHOD(virtio_bus_negotiate_features, vtpci_negotiate_features), 203 DEVMETHOD(virtio_bus_with_feature, vtpci_with_feature), 204 DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_alloc_virtqueues), 205 DEVMETHOD(virtio_bus_setup_intr, vtpci_setup_intr), 206 DEVMETHOD(virtio_bus_stop, vtpci_stop), 207 DEVMETHOD(virtio_bus_reinit, vtpci_reinit), 208 DEVMETHOD(virtio_bus_reinit_complete, vtpci_reinit_complete), 209 DEVMETHOD(virtio_bus_notify_vq, vtpci_notify_virtqueue), 210 DEVMETHOD(virtio_bus_read_device_config, vtpci_read_dev_config), 211 DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config), 212 213 DEVMETHOD_END 214}; 215 216static driver_t vtpci_driver = { 217 "virtio_pci", 218 vtpci_methods, 219 sizeof(struct vtpci_softc) 220}; 221 222devclass_t vtpci_devclass; 223 224DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0); 225MODULE_VERSION(virtio_pci, 1); 226MODULE_DEPEND(virtio_pci, pci, 1, 1, 1); 227MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1); 228 229static int 230vtpci_probe(device_t dev) 231{ 232 char desc[36]; 233 const char *name; 234 235 if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID) 236 return (ENXIO); 237 238 if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN || 239 pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX) 240 return (ENXIO); 241 242 if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION) 243 return (ENXIO); 244 245 name = virtio_device_name(pci_get_subdevice(dev)); 246 if (name == NULL) 247 name = "Unknown"; 248 249 snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name); 250 device_set_desc_copy(dev, desc); 251 252 return (BUS_PROBE_DEFAULT); 253} 254 255static int 256vtpci_attach(device_t dev) 257{ 258 struct vtpci_softc *sc; 259 device_t child; 260 int rid; 261 262 sc = device_get_softc(dev); 263 sc->vtpci_dev = dev; 264 265 pci_enable_busmaster(dev); 266 267 rid = PCIR_BAR(0); 268 sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 269 RF_ACTIVE); 270 if (sc->vtpci_res == NULL) { 271 device_printf(dev, "cannot map I/O space\n"); 272 return (ENXIO); 273 } 274 275 if (pci_find_cap(dev, PCIY_MSI, NULL) != 0) 276 sc->vtpci_flags |= VTPCI_FLAG_NO_MSI; 277 278 if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { 279 rid = PCIR_BAR(1); 280 sc->vtpci_msix_res = bus_alloc_resource_any(dev, 281 SYS_RES_MEMORY, &rid, RF_ACTIVE); 282 } 283 284 if (sc->vtpci_msix_res == NULL) 285 sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX; 286 287 vtpci_reset(sc); 288 289 /* Tell the host we've noticed this device. */ 290 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 291 292 if ((child = device_add_child(dev, NULL, -1)) == NULL) { 293 device_printf(dev, "cannot create child device\n"); 294 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 295 vtpci_detach(dev); 296 return (ENOMEM); 297 } 298 299 sc->vtpci_child_dev = child; 300 vtpci_probe_and_attach_child(sc); 301 302 return (0); 303} 304 305static int 306vtpci_detach(device_t dev) 307{ 308 struct vtpci_softc *sc; 309 device_t child; 310 int error; 311 312 sc = device_get_softc(dev); 313 314 if ((child = sc->vtpci_child_dev) != NULL) { 315 error = device_delete_child(dev, child); 316 if (error) 317 return (error); 318 sc->vtpci_child_dev = NULL; 319 } 320 321 vtpci_reset(sc); 322 323 if (sc->vtpci_msix_res != NULL) { 324 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1), 325 sc->vtpci_msix_res); 326 sc->vtpci_msix_res = NULL; 327 } 328 329 if (sc->vtpci_res != NULL) { 330 bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), 331 sc->vtpci_res); 332 sc->vtpci_res = NULL; 333 } 334 335 return (0); 336} 337 338static int 339vtpci_suspend(device_t dev) 340{ 341 342 return (bus_generic_suspend(dev)); 343} 344 345static int 346vtpci_resume(device_t dev) 347{ 348 349 return (bus_generic_resume(dev)); 350} 351 352static int 353vtpci_shutdown(device_t dev) 354{ 355 356 (void) bus_generic_shutdown(dev); 357 /* Forcibly stop the host device. */ 358 vtpci_stop(dev); 359 360 return (0); 361} 362 363static void 364vtpci_driver_added(device_t dev, driver_t *driver) 365{ 366 struct vtpci_softc *sc; 367 368 sc = device_get_softc(dev); 369 370 vtpci_probe_and_attach_child(sc); 371} 372 373static void 374vtpci_child_detached(device_t dev, device_t child) 375{ 376 struct vtpci_softc *sc; 377 378 sc = device_get_softc(dev); 379 380 vtpci_reset(sc); 381 vtpci_release_child_resources(sc); 382} 383 384static int 385vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 386{ 387 struct vtpci_softc *sc; 388 389 sc = device_get_softc(dev); 390 391 if (sc->vtpci_child_dev != child) 392 return (ENOENT); 393 394 switch (index) { 395 case VIRTIO_IVAR_DEVTYPE: 396 case VIRTIO_IVAR_SUBDEVICE: 397 *result = pci_get_subdevice(dev); 398 break; 399 case VIRTIO_IVAR_VENDOR: 400 *result = pci_get_vendor(dev); 401 break; 402 case VIRTIO_IVAR_DEVICE: 403 *result = pci_get_device(dev); 404 break; 405 case VIRTIO_IVAR_SUBVENDOR: 406 *result = pci_get_subdevice(dev); 407 break; 408 default: 409 return (ENOENT); 410 } 411 412 return (0); 413} 414 415static int 416vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 417{ 418 struct vtpci_softc *sc; 419 420 sc = device_get_softc(dev); 421 422 if (sc->vtpci_child_dev != child) 423 return (ENOENT); 424 425 switch (index) { 426 case VIRTIO_IVAR_FEATURE_DESC: 427 sc->vtpci_child_feat_desc = (void *) value; 428 break; 429 default: 430 return (ENOENT); 431 } 432 433 return (0); 434} 435 436static uint64_t 437vtpci_negotiate_features(device_t dev, uint64_t child_features) 438{ 439 struct vtpci_softc *sc; 440 uint64_t host_features, features; 441 442 sc = device_get_softc(dev); 443 444 host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES); 445 vtpci_describe_features(sc, "host", host_features); 446 447 /* 448 * Limit negotiated features to what the driver, virtqueue, and 449 * host all support. 450 */ 451 features = host_features & child_features; 452 features = virtqueue_filter_features(features); 453 sc->vtpci_features = features; 454 455 vtpci_describe_features(sc, "negotiated", features); 456 vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features); 457 458 return (features); 459} 460 461static int 462vtpci_with_feature(device_t dev, uint64_t feature) 463{ 464 struct vtpci_softc *sc; 465 466 sc = device_get_softc(dev); 467 468 return ((sc->vtpci_features & feature) != 0); 469} 470 471static int 472vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs, 473 struct vq_alloc_info *vq_info) 474{ 475 struct vtpci_softc *sc; 476 struct virtqueue *vq; 477 struct vtpci_virtqueue *vqx; 478 struct vq_alloc_info *info; 479 int idx, error; 480 uint16_t size; 481 482 sc = device_get_softc(dev); 483 error = 0; 484 485 if (sc->vtpci_nvqs != 0) 486 return (EALREADY); 487 if (nvqs <= 0 || nvqs > VIRTIO_MAX_VIRTQUEUES) 488 return (EINVAL); 489
|
490 if (flags & VIRTIO_ALLOC_VQS_DISABLE_MSIX) 491 sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX; 492
| |
493 for (idx = 0; idx < nvqs; idx++) { 494 vqx = &sc->vtpci_vqx[idx]; 495 info = &vq_info[idx]; 496 497 vtpci_select_virtqueue(sc, idx); 498 size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); 499 500 error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN, 501 0xFFFFFFFFUL, info, &vq); 502 if (error) { 503 device_printf(dev, 504 "cannot allocate virtqueue %d: %d\n", idx, error); 505 break; 506 } 507 508 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 509 virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); 510 511 vqx->vq = *info->vqai_vq = vq; 512 vqx->no_intr = info->vqai_intr == NULL; 513 514 sc->vtpci_nvqs++; 515 } 516 517 return (error); 518} 519 520static int 521vtpci_setup_intr(device_t dev, enum intr_type type) 522{ 523 struct vtpci_softc *sc; 524 int attempt, error; 525 526 sc = device_get_softc(dev); 527 528 for (attempt = 0; attempt < 5; attempt++) { 529 /* 530 * Start with the most desirable interrupt configuration and 531 * fallback towards less desirable ones. 532 */ 533 switch (attempt) { 534 case 0: 535 error = vtpci_alloc_intr_msix_pervq(sc); 536 break; 537 case 1: 538 error = vtpci_alloc_intr_msix_shared(sc); 539 break; 540 case 2: 541 error = vtpci_alloc_intr_msi(sc); 542 break; 543 case 3: 544 error = vtpci_alloc_intr_legacy(sc); 545 break; 546 default: 547 device_printf(dev, 548 "exhausted all interrupt allocation attempts\n"); 549 return (ENXIO); 550 } 551 552 if (error == 0 && vtpci_setup_interrupts(sc, type) == 0) 553 break; 554 555 vtpci_cleanup_setup_intr_attempt(sc); 556 } 557 558 if (bootverbose) { 559 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 560 device_printf(dev, "using legacy interrupt\n"); 561 else if (sc->vtpci_flags & VTPCI_FLAG_MSI) 562 device_printf(dev, "using MSI interrupt\n"); 563 else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) 564 device_printf(dev, "using shared MSIX interrupts\n"); 565 else 566 device_printf(dev, "using per VQ MSIX interrupts\n"); 567 } 568 569 return (0); 570} 571 572static void 573vtpci_stop(device_t dev) 574{ 575 576 vtpci_reset(device_get_softc(dev)); 577} 578 579static int 580vtpci_reinit(device_t dev, uint64_t features) 581{ 582 struct vtpci_softc *sc; 583 int idx, error; 584 585 sc = device_get_softc(dev); 586 587 /* 588 * Redrive the device initialization. This is a bit of an abuse of 589 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to 590 * play nice. 591 * 592 * We do not allow the host device to change from what was originally 593 * negotiated beyond what the guest driver changed. MSIX state should 594 * not change, number of virtqueues and their size remain the same, etc. 595 * This will need to be rethought when we want to support migration. 596 */ 597 598 if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) 599 vtpci_stop(dev); 600 601 /* 602 * Quickly drive the status through ACK and DRIVER. The device 603 * does not become usable again until vtpci_reinit_complete(). 604 */ 605 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 606 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 607 608 vtpci_negotiate_features(dev, features); 609 610 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 611 error = vtpci_reinit_virtqueue(sc, idx); 612 if (error) 613 return (error); 614 } 615 616 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 617 error = vtpci_set_host_msix_vectors(sc); 618 if (error) 619 return (error); 620 } 621 622 return (0); 623} 624 625static void 626vtpci_reinit_complete(device_t dev) 627{ 628 629 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 630} 631 632static void 633vtpci_notify_virtqueue(device_t dev, uint16_t queue) 634{ 635 struct vtpci_softc *sc; 636 637 sc = device_get_softc(dev); 638 639 vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue); 640} 641 642static uint8_t 643vtpci_get_status(device_t dev) 644{ 645 struct vtpci_softc *sc; 646 647 sc = device_get_softc(dev); 648 649 return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS)); 650} 651 652static void 653vtpci_set_status(device_t dev, uint8_t status) 654{ 655 struct vtpci_softc *sc; 656 657 sc = device_get_softc(dev); 658 659 if (status != VIRTIO_CONFIG_STATUS_RESET) 660 status |= vtpci_get_status(dev); 661 662 vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status); 663} 664 665static void 666vtpci_read_dev_config(device_t dev, bus_size_t offset, 667 void *dst, int length) 668{ 669 struct vtpci_softc *sc; 670 bus_size_t off; 671 uint8_t *d; 672 int size; 673 674 sc = device_get_softc(dev); 675 off = VIRTIO_PCI_CONFIG(sc) + offset; 676 677 for (d = dst; length > 0; d += size, off += size, length -= size) { 678 if (length >= 4) { 679 size = 4; 680 *(uint32_t *)d = vtpci_read_config_4(sc, off); 681 } else if (length >= 2) { 682 size = 2; 683 *(uint16_t *)d = vtpci_read_config_2(sc, off); 684 } else { 685 size = 1; 686 *d = vtpci_read_config_1(sc, off); 687 } 688 } 689} 690 691static void 692vtpci_write_dev_config(device_t dev, bus_size_t offset, 693 void *src, int length) 694{ 695 struct vtpci_softc *sc; 696 bus_size_t off; 697 uint8_t *s; 698 int size; 699 700 sc = device_get_softc(dev); 701 off = VIRTIO_PCI_CONFIG(sc) + offset; 702 703 for (s = src; length > 0; s += size, off += size, length -= size) { 704 if (length >= 4) { 705 size = 4; 706 vtpci_write_config_4(sc, off, *(uint32_t *)s); 707 } else if (length >= 2) { 708 size = 2; 709 vtpci_write_config_2(sc, off, *(uint16_t *)s); 710 } else { 711 size = 1; 712 vtpci_write_config_1(sc, off, *s); 713 } 714 } 715} 716 717static void 718vtpci_describe_features(struct vtpci_softc *sc, const char *msg, 719 uint64_t features) 720{ 721 device_t dev, child; 722 723 dev = sc->vtpci_dev; 724 child = sc->vtpci_child_dev; 725 726 if (device_is_attached(child) && bootverbose == 0) 727 return; 728 729 virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc); 730} 731 732static void 733vtpci_probe_and_attach_child(struct vtpci_softc *sc) 734{ 735 device_t dev, child; 736 737 dev = sc->vtpci_dev; 738 child = sc->vtpci_child_dev; 739 740 if (child == NULL) 741 return; 742 743 if (device_get_state(child) != DS_NOTPRESENT) 744 return; 745 746 if (device_probe(child) != 0) 747 return; 748 749 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 750 if (device_attach(child) != 0) { 751 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 752 vtpci_reset(sc); 753 vtpci_release_child_resources(sc); 754 /* Reset status for future attempt. */ 755 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 756 } else 757 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 758} 759 760static int 761vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors) 762{ 763 device_t dev; 764 int nmsix, cnt, required; 765 766 dev = sc->vtpci_dev; 767 768 /* Allocate an additional vector for the config changes. */ 769 required = nvectors + 1; 770 771 nmsix = pci_msix_count(dev); 772 if (nmsix < required) 773 return (1); 774 775 cnt = required; 776 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { 777 sc->vtpci_nintr_res = required; 778 return (0); 779 } 780 781 pci_release_msi(dev); 782 783 return (1); 784} 785 786static int 787vtpci_alloc_msi(struct vtpci_softc *sc) 788{ 789 device_t dev; 790 int nmsi, cnt, required; 791 792 dev = sc->vtpci_dev; 793 required = 1; 794 795 nmsi = pci_msi_count(dev); 796 if (nmsi < required) 797 return (1); 798 799 cnt = required; 800 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) { 801 sc->vtpci_nintr_res = required; 802 return (0); 803 } 804 805 pci_release_msi(dev); 806 807 return (1); 808} 809 810static int 811vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc) 812{ 813 int i, nvectors, error; 814 815 if (vtpci_disable_msix != 0 || 816 sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) 817 return (ENOTSUP); 818 819 for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) { 820 if (sc->vtpci_vqx[i].no_intr == 0) 821 nvectors++; 822 } 823 824 error = vtpci_alloc_msix(sc, nvectors); 825 if (error) 826 return (error); 827 828 sc->vtpci_flags |= VTPCI_FLAG_MSIX; 829 830 return (0); 831} 832 833static int 834vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc) 835{ 836 int error; 837 838 if (vtpci_disable_msix != 0 || 839 sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) 840 return (ENOTSUP); 841 842 error = vtpci_alloc_msix(sc, 1); 843 if (error) 844 return (error); 845 846 sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX; 847 848 return (0); 849} 850 851static int 852vtpci_alloc_intr_msi(struct vtpci_softc *sc) 853{ 854 int error; 855 856 /* Only BHyVe supports MSI. */ 857 if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI) 858 return (ENOTSUP); 859 860 error = vtpci_alloc_msi(sc); 861 if (error) 862 return (error); 863 864 sc->vtpci_flags |= VTPCI_FLAG_MSI; 865 866 return (0); 867} 868 869static int 870vtpci_alloc_intr_legacy(struct vtpci_softc *sc) 871{ 872 873 sc->vtpci_flags |= VTPCI_FLAG_LEGACY; 874 sc->vtpci_nintr_res = 1; 875 876 return (0); 877} 878 879static int 880vtpci_alloc_intr_resources(struct vtpci_softc *sc) 881{ 882 device_t dev; 883 struct resource *irq; 884 struct vtpci_virtqueue *vqx; 885 int i, rid, flags, res_idx; 886 887 dev = sc->vtpci_dev; 888 889 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) { 890 rid = 0; 891 flags = RF_ACTIVE | RF_SHAREABLE; 892 } else { 893 rid = 1; 894 flags = RF_ACTIVE; 895 } 896 897 for (i = 0; i < sc->vtpci_nintr_res; i++) { 898 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, flags); 899 if (irq == NULL) 900 return (ENXIO); 901 902 sc->vtpci_intr_res[i].irq = irq; 903 sc->vtpci_intr_res[i].rid = rid++; 904 } 905 906 /* 907 * Map the virtqueue into the correct index in vq_intr_res[]. The 908 * first index is reserved for configuration changed notifications. 909 */ 910 for (i = 0, res_idx = 1; i < sc->vtpci_nvqs; i++) { 911 vqx = &sc->vtpci_vqx[i]; 912 913 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 914 if (vqx->no_intr != 0) 915 vqx->ires_idx = -1; 916 else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) 917 vqx->ires_idx = res_idx; 918 else 919 vqx->ires_idx = res_idx++; 920 } else 921 vqx->ires_idx = -1; 922 } 923 924 return (0); 925} 926 927static int 928vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type) 929{ 930 device_t dev; 931 struct vtpci_intr_resource *ires; 932 int error; 933 934 dev = sc->vtpci_dev; 935 936 ires = &sc->vtpci_intr_res[0]; 937 error = bus_setup_intr(dev, ires->irq, type, NULL, vtpci_legacy_intr, 938 sc, &ires->intrhand); 939 940 return (error); 941} 942 943static int 944vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type) 945{ 946 device_t dev; 947 struct vtpci_intr_resource *ires; 948 struct vtpci_virtqueue *vqx; 949 int i, error; 950 951 dev = sc->vtpci_dev; 952 953 /* 954 * The first MSIX vector is used for configuration changed interrupts. 955 */ 956 ires = &sc->vtpci_intr_res[0]; 957 error = bus_setup_intr(dev, ires->irq, type, NULL, vtpci_config_intr, 958 sc, &ires->intrhand); 959 if (error) 960 return (error); 961 962 if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) { 963 ires = &sc->vtpci_intr_res[1]; 964 965 error = bus_setup_intr(dev, ires->irq, type, 966 vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc, 967 &ires->intrhand); 968 } else { 969 for (i = 0; i < sc->vtpci_nvqs; i++) { 970 vqx = &sc->vtpci_vqx[i]; 971 if (vqx->ires_idx < 1) 972 continue; 973 974 ires = &sc->vtpci_intr_res[vqx->ires_idx]; 975 error = bus_setup_intr(dev, ires->irq, type, 976 vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vq, 977 &ires->intrhand); 978 if (error) 979 break; 980 } 981 } 982 983 if (error == 0) 984 error = vtpci_set_host_msix_vectors(sc); 985 986 return (error); 987} 988 989static int 990vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type) 991{ 992 int error; 993 994 type |= INTR_MPSAFE; 995 KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK, 996 ("no interrupt type selected: %#x", sc->vtpci_flags)); 997 998 error = vtpci_alloc_intr_resources(sc); 999 if (error) 1000 return (error); 1001 1002 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 1003 error = vtpci_setup_legacy_interrupt(sc, type); 1004 else if (sc->vtpci_flags & VTPCI_FLAG_MSI) 1005 error = vtpci_setup_msi_interrupt(sc, type); 1006 else 1007 error = vtpci_setup_msix_interrupts(sc, type); 1008 1009 return (error); 1010} 1011 1012static int 1013vtpci_register_msix_vector(struct vtpci_softc *sc, int offset, int res_idx) 1014{ 1015 device_t dev; 1016 uint16_t vector, rdvector; 1017 1018 dev = sc->vtpci_dev; 1019 1020 if (res_idx != -1) { 1021 /* Map from guest rid to host vector. */ 1022 vector = sc->vtpci_intr_res[res_idx].rid - 1; 1023 } else 1024 vector = VIRTIO_MSI_NO_VECTOR; 1025 1026 /* 1027 * Assert the first resource is always used for the configuration 1028 * changed interrupts. 1029 */ 1030 if (res_idx == 0) { 1031 KASSERT(vector == 0 && offset == VIRTIO_MSI_CONFIG_VECTOR, 1032 ("bad first res use vector:%d offset:%d", vector, offset)); 1033 } else 1034 KASSERT(offset == VIRTIO_MSI_QUEUE_VECTOR, ("bad offset")); 1035 1036 vtpci_write_config_2(sc, offset, vector); 1037 1038 /* Read vector to determine if the host had sufficient resources. */ 1039 rdvector = vtpci_read_config_2(sc, offset); 1040 if (rdvector != vector) { 1041 device_printf(dev, 1042 "insufficient host resources for MSIX interrupts\n"); 1043 return (ENODEV); 1044 } 1045 1046 return (0); 1047} 1048 1049static int 1050vtpci_set_host_msix_vectors(struct vtpci_softc *sc) 1051{ 1052 struct vtpci_virtqueue *vqx; 1053 int idx, error; 1054 1055 error = vtpci_register_msix_vector(sc, VIRTIO_MSI_CONFIG_VECTOR, 0); 1056 if (error) 1057 return (error); 1058 1059 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1060 vqx = &sc->vtpci_vqx[idx]; 1061 1062 vtpci_select_virtqueue(sc, idx); 1063 error = vtpci_register_msix_vector(sc, VIRTIO_MSI_QUEUE_VECTOR, 1064 vqx->ires_idx); 1065 if (error) 1066 return (error); 1067 } 1068 1069 return (0); 1070} 1071 1072static int 1073vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx) 1074{ 1075 struct vtpci_virtqueue *vqx; 1076 struct virtqueue *vq; 1077 int error; 1078 uint16_t size; 1079 1080 vqx = &sc->vtpci_vqx[idx]; 1081 vq = vqx->vq; 1082 1083 KASSERT(vq != NULL, ("vq %d not allocated", idx)); 1084 1085 vtpci_select_virtqueue(sc, idx); 1086 size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); 1087 1088 error = virtqueue_reinit(vq, size); 1089 if (error) 1090 return (error); 1091 1092 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 1093 virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); 1094 1095 return (0); 1096} 1097 1098static void 1099vtpci_free_interrupts(struct vtpci_softc *sc) 1100{ 1101 device_t dev; 1102 struct vtpci_intr_resource *ires; 1103 int i; 1104 1105 dev = sc->vtpci_dev; 1106 1107 for (i = 0; i < sc->vtpci_nintr_res; i++) { 1108 ires = &sc->vtpci_intr_res[i]; 1109 1110 if (ires->intrhand != NULL) { 1111 bus_teardown_intr(dev, ires->irq, ires->intrhand); 1112 ires->intrhand = NULL; 1113 } 1114 1115 if (ires->irq != NULL) { 1116 bus_release_resource(dev, SYS_RES_IRQ, ires->rid, 1117 ires->irq); 1118 ires->irq = NULL; 1119 } 1120 1121 ires->rid = -1; 1122 } 1123 1124 if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX)) 1125 pci_release_msi(dev); 1126 1127 sc->vtpci_nintr_res = 0; 1128 sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK; 1129} 1130 1131static void 1132vtpci_free_virtqueues(struct vtpci_softc *sc) 1133{ 1134 struct vtpci_virtqueue *vqx; 1135 int i; 1136 1137 for (i = 0; i < sc->vtpci_nvqs; i++) { 1138 vqx = &sc->vtpci_vqx[i]; 1139 1140 virtqueue_free(vqx->vq); 1141 vqx->vq = NULL; 1142 } 1143 1144 sc->vtpci_nvqs = 0; 1145} 1146 1147static void 1148vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc) 1149{ 1150 int idx; 1151 1152 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 1153 vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR, 1154 VIRTIO_MSI_NO_VECTOR); 1155 1156 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1157 vtpci_select_virtqueue(sc, idx); 1158 vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR, 1159 VIRTIO_MSI_NO_VECTOR); 1160 } 1161 } 1162 1163 vtpci_free_interrupts(sc); 1164} 1165 1166static void 1167vtpci_release_child_resources(struct vtpci_softc *sc) 1168{ 1169 1170 vtpci_free_interrupts(sc); 1171 vtpci_free_virtqueues(sc); 1172} 1173 1174static void 1175vtpci_reset(struct vtpci_softc *sc) 1176{ 1177 1178 /* 1179 * Setting the status to RESET sets the host device to 1180 * the original, uninitialized state. 1181 */ 1182 vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET); 1183} 1184 1185static void 1186vtpci_select_virtqueue(struct vtpci_softc *sc, int idx) 1187{ 1188 1189 vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx); 1190} 1191 1192static void 1193vtpci_legacy_intr(void *xsc) 1194{ 1195 struct vtpci_softc *sc; 1196 struct vtpci_virtqueue *vqx; 1197 int i; 1198 uint8_t isr; 1199 1200 sc = xsc; 1201 vqx = &sc->vtpci_vqx[0]; 1202 1203 /* Reading the ISR also clears it. */ 1204 isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR); 1205 1206 if (isr & VIRTIO_PCI_ISR_CONFIG) 1207 vtpci_config_intr(sc); 1208 1209 if (isr & VIRTIO_PCI_ISR_INTR) { 1210 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) 1211 virtqueue_intr(vqx->vq); 1212 } 1213} 1214 1215static int 1216vtpci_vq_shared_intr_filter(void *xsc) 1217{ 1218 struct vtpci_softc *sc; 1219 struct vtpci_virtqueue *vqx; 1220 int i, rc; 1221 1222 rc = 0; 1223 sc = xsc; 1224 vqx = &sc->vtpci_vqx[0]; 1225 1226 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) 1227 rc |= virtqueue_intr_filter(vqx->vq); 1228 1229 return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1230} 1231 1232static void 1233vtpci_vq_shared_intr(void *xsc) 1234{ 1235 struct vtpci_softc *sc; 1236 struct vtpci_virtqueue *vqx; 1237 int i; 1238 1239 sc = xsc; 1240 vqx = &sc->vtpci_vqx[0]; 1241 1242 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) 1243 virtqueue_intr(vqx->vq); 1244} 1245 1246static int 1247vtpci_vq_intr_filter(void *xvq) 1248{ 1249 struct virtqueue *vq; 1250 int rc; 1251 1252 vq = xvq; 1253 rc = virtqueue_intr_filter(vq); 1254 1255 return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1256} 1257 1258static void 1259vtpci_vq_intr(void *xvq) 1260{ 1261 struct virtqueue *vq; 1262 1263 vq = xvq; 1264 virtqueue_intr(vq); 1265} 1266 1267static void 1268vtpci_config_intr(void *xsc) 1269{ 1270 struct vtpci_softc *sc; 1271 device_t child; 1272 1273 sc = xsc; 1274 child = sc->vtpci_child_dev; 1275 1276 if (child != NULL) 1277 VIRTIO_CONFIG_CHANGE(child); 1278}
| 490 for (idx = 0; idx < nvqs; idx++) { 491 vqx = &sc->vtpci_vqx[idx]; 492 info = &vq_info[idx]; 493 494 vtpci_select_virtqueue(sc, idx); 495 size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); 496 497 error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN, 498 0xFFFFFFFFUL, info, &vq); 499 if (error) { 500 device_printf(dev, 501 "cannot allocate virtqueue %d: %d\n", idx, error); 502 break; 503 } 504 505 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 506 virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); 507 508 vqx->vq = *info->vqai_vq = vq; 509 vqx->no_intr = info->vqai_intr == NULL; 510 511 sc->vtpci_nvqs++; 512 } 513 514 return (error); 515} 516 517static int 518vtpci_setup_intr(device_t dev, enum intr_type type) 519{ 520 struct vtpci_softc *sc; 521 int attempt, error; 522 523 sc = device_get_softc(dev); 524 525 for (attempt = 0; attempt < 5; attempt++) { 526 /* 527 * Start with the most desirable interrupt configuration and 528 * fallback towards less desirable ones. 529 */ 530 switch (attempt) { 531 case 0: 532 error = vtpci_alloc_intr_msix_pervq(sc); 533 break; 534 case 1: 535 error = vtpci_alloc_intr_msix_shared(sc); 536 break; 537 case 2: 538 error = vtpci_alloc_intr_msi(sc); 539 break; 540 case 3: 541 error = vtpci_alloc_intr_legacy(sc); 542 break; 543 default: 544 device_printf(dev, 545 "exhausted all interrupt allocation attempts\n"); 546 return (ENXIO); 547 } 548 549 if (error == 0 && vtpci_setup_interrupts(sc, type) == 0) 550 break; 551 552 vtpci_cleanup_setup_intr_attempt(sc); 553 } 554 555 if (bootverbose) { 556 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 557 device_printf(dev, "using legacy interrupt\n"); 558 else if (sc->vtpci_flags & VTPCI_FLAG_MSI) 559 device_printf(dev, "using MSI interrupt\n"); 560 else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) 561 device_printf(dev, "using shared MSIX interrupts\n"); 562 else 563 device_printf(dev, "using per VQ MSIX interrupts\n"); 564 } 565 566 return (0); 567} 568 569static void 570vtpci_stop(device_t dev) 571{ 572 573 vtpci_reset(device_get_softc(dev)); 574} 575 576static int 577vtpci_reinit(device_t dev, uint64_t features) 578{ 579 struct vtpci_softc *sc; 580 int idx, error; 581 582 sc = device_get_softc(dev); 583 584 /* 585 * Redrive the device initialization. This is a bit of an abuse of 586 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to 587 * play nice. 588 * 589 * We do not allow the host device to change from what was originally 590 * negotiated beyond what the guest driver changed. MSIX state should 591 * not change, number of virtqueues and their size remain the same, etc. 592 * This will need to be rethought when we want to support migration. 593 */ 594 595 if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) 596 vtpci_stop(dev); 597 598 /* 599 * Quickly drive the status through ACK and DRIVER. The device 600 * does not become usable again until vtpci_reinit_complete(). 601 */ 602 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 603 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 604 605 vtpci_negotiate_features(dev, features); 606 607 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 608 error = vtpci_reinit_virtqueue(sc, idx); 609 if (error) 610 return (error); 611 } 612 613 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 614 error = vtpci_set_host_msix_vectors(sc); 615 if (error) 616 return (error); 617 } 618 619 return (0); 620} 621 622static void 623vtpci_reinit_complete(device_t dev) 624{ 625 626 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 627} 628 629static void 630vtpci_notify_virtqueue(device_t dev, uint16_t queue) 631{ 632 struct vtpci_softc *sc; 633 634 sc = device_get_softc(dev); 635 636 vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue); 637} 638 639static uint8_t 640vtpci_get_status(device_t dev) 641{ 642 struct vtpci_softc *sc; 643 644 sc = device_get_softc(dev); 645 646 return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS)); 647} 648 649static void 650vtpci_set_status(device_t dev, uint8_t status) 651{ 652 struct vtpci_softc *sc; 653 654 sc = device_get_softc(dev); 655 656 if (status != VIRTIO_CONFIG_STATUS_RESET) 657 status |= vtpci_get_status(dev); 658 659 vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status); 660} 661 662static void 663vtpci_read_dev_config(device_t dev, bus_size_t offset, 664 void *dst, int length) 665{ 666 struct vtpci_softc *sc; 667 bus_size_t off; 668 uint8_t *d; 669 int size; 670 671 sc = device_get_softc(dev); 672 off = VIRTIO_PCI_CONFIG(sc) + offset; 673 674 for (d = dst; length > 0; d += size, off += size, length -= size) { 675 if (length >= 4) { 676 size = 4; 677 *(uint32_t *)d = vtpci_read_config_4(sc, off); 678 } else if (length >= 2) { 679 size = 2; 680 *(uint16_t *)d = vtpci_read_config_2(sc, off); 681 } else { 682 size = 1; 683 *d = vtpci_read_config_1(sc, off); 684 } 685 } 686} 687 688static void 689vtpci_write_dev_config(device_t dev, bus_size_t offset, 690 void *src, int length) 691{ 692 struct vtpci_softc *sc; 693 bus_size_t off; 694 uint8_t *s; 695 int size; 696 697 sc = device_get_softc(dev); 698 off = VIRTIO_PCI_CONFIG(sc) + offset; 699 700 for (s = src; length > 0; s += size, off += size, length -= size) { 701 if (length >= 4) { 702 size = 4; 703 vtpci_write_config_4(sc, off, *(uint32_t *)s); 704 } else if (length >= 2) { 705 size = 2; 706 vtpci_write_config_2(sc, off, *(uint16_t *)s); 707 } else { 708 size = 1; 709 vtpci_write_config_1(sc, off, *s); 710 } 711 } 712} 713 714static void 715vtpci_describe_features(struct vtpci_softc *sc, const char *msg, 716 uint64_t features) 717{ 718 device_t dev, child; 719 720 dev = sc->vtpci_dev; 721 child = sc->vtpci_child_dev; 722 723 if (device_is_attached(child) && bootverbose == 0) 724 return; 725 726 virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc); 727} 728 729static void 730vtpci_probe_and_attach_child(struct vtpci_softc *sc) 731{ 732 device_t dev, child; 733 734 dev = sc->vtpci_dev; 735 child = sc->vtpci_child_dev; 736 737 if (child == NULL) 738 return; 739 740 if (device_get_state(child) != DS_NOTPRESENT) 741 return; 742 743 if (device_probe(child) != 0) 744 return; 745 746 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 747 if (device_attach(child) != 0) { 748 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 749 vtpci_reset(sc); 750 vtpci_release_child_resources(sc); 751 /* Reset status for future attempt. */ 752 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 753 } else 754 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 755} 756 757static int 758vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors) 759{ 760 device_t dev; 761 int nmsix, cnt, required; 762 763 dev = sc->vtpci_dev; 764 765 /* Allocate an additional vector for the config changes. */ 766 required = nvectors + 1; 767 768 nmsix = pci_msix_count(dev); 769 if (nmsix < required) 770 return (1); 771 772 cnt = required; 773 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { 774 sc->vtpci_nintr_res = required; 775 return (0); 776 } 777 778 pci_release_msi(dev); 779 780 return (1); 781} 782 783static int 784vtpci_alloc_msi(struct vtpci_softc *sc) 785{ 786 device_t dev; 787 int nmsi, cnt, required; 788 789 dev = sc->vtpci_dev; 790 required = 1; 791 792 nmsi = pci_msi_count(dev); 793 if (nmsi < required) 794 return (1); 795 796 cnt = required; 797 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) { 798 sc->vtpci_nintr_res = required; 799 return (0); 800 } 801 802 pci_release_msi(dev); 803 804 return (1); 805} 806 807static int 808vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc) 809{ 810 int i, nvectors, error; 811 812 if (vtpci_disable_msix != 0 || 813 sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) 814 return (ENOTSUP); 815 816 for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) { 817 if (sc->vtpci_vqx[i].no_intr == 0) 818 nvectors++; 819 } 820 821 error = vtpci_alloc_msix(sc, nvectors); 822 if (error) 823 return (error); 824 825 sc->vtpci_flags |= VTPCI_FLAG_MSIX; 826 827 return (0); 828} 829 830static int 831vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc) 832{ 833 int error; 834 835 if (vtpci_disable_msix != 0 || 836 sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) 837 return (ENOTSUP); 838 839 error = vtpci_alloc_msix(sc, 1); 840 if (error) 841 return (error); 842 843 sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX; 844 845 return (0); 846} 847 848static int 849vtpci_alloc_intr_msi(struct vtpci_softc *sc) 850{ 851 int error; 852 853 /* Only BHyVe supports MSI. */ 854 if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI) 855 return (ENOTSUP); 856 857 error = vtpci_alloc_msi(sc); 858 if (error) 859 return (error); 860 861 sc->vtpci_flags |= VTPCI_FLAG_MSI; 862 863 return (0); 864} 865 866static int 867vtpci_alloc_intr_legacy(struct vtpci_softc *sc) 868{ 869 870 sc->vtpci_flags |= VTPCI_FLAG_LEGACY; 871 sc->vtpci_nintr_res = 1; 872 873 return (0); 874} 875 876static int 877vtpci_alloc_intr_resources(struct vtpci_softc *sc) 878{ 879 device_t dev; 880 struct resource *irq; 881 struct vtpci_virtqueue *vqx; 882 int i, rid, flags, res_idx; 883 884 dev = sc->vtpci_dev; 885 886 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) { 887 rid = 0; 888 flags = RF_ACTIVE | RF_SHAREABLE; 889 } else { 890 rid = 1; 891 flags = RF_ACTIVE; 892 } 893 894 for (i = 0; i < sc->vtpci_nintr_res; i++) { 895 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, flags); 896 if (irq == NULL) 897 return (ENXIO); 898 899 sc->vtpci_intr_res[i].irq = irq; 900 sc->vtpci_intr_res[i].rid = rid++; 901 } 902 903 /* 904 * Map the virtqueue into the correct index in vq_intr_res[]. The 905 * first index is reserved for configuration changed notifications. 906 */ 907 for (i = 0, res_idx = 1; i < sc->vtpci_nvqs; i++) { 908 vqx = &sc->vtpci_vqx[i]; 909 910 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 911 if (vqx->no_intr != 0) 912 vqx->ires_idx = -1; 913 else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) 914 vqx->ires_idx = res_idx; 915 else 916 vqx->ires_idx = res_idx++; 917 } else 918 vqx->ires_idx = -1; 919 } 920 921 return (0); 922} 923 924static int 925vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type) 926{ 927 device_t dev; 928 struct vtpci_intr_resource *ires; 929 int error; 930 931 dev = sc->vtpci_dev; 932 933 ires = &sc->vtpci_intr_res[0]; 934 error = bus_setup_intr(dev, ires->irq, type, NULL, vtpci_legacy_intr, 935 sc, &ires->intrhand); 936 937 return (error); 938} 939 940static int 941vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type) 942{ 943 device_t dev; 944 struct vtpci_intr_resource *ires; 945 struct vtpci_virtqueue *vqx; 946 int i, error; 947 948 dev = sc->vtpci_dev; 949 950 /* 951 * The first MSIX vector is used for configuration changed interrupts. 952 */ 953 ires = &sc->vtpci_intr_res[0]; 954 error = bus_setup_intr(dev, ires->irq, type, NULL, vtpci_config_intr, 955 sc, &ires->intrhand); 956 if (error) 957 return (error); 958 959 if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) { 960 ires = &sc->vtpci_intr_res[1]; 961 962 error = bus_setup_intr(dev, ires->irq, type, 963 vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc, 964 &ires->intrhand); 965 } else { 966 for (i = 0; i < sc->vtpci_nvqs; i++) { 967 vqx = &sc->vtpci_vqx[i]; 968 if (vqx->ires_idx < 1) 969 continue; 970 971 ires = &sc->vtpci_intr_res[vqx->ires_idx]; 972 error = bus_setup_intr(dev, ires->irq, type, 973 vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vq, 974 &ires->intrhand); 975 if (error) 976 break; 977 } 978 } 979 980 if (error == 0) 981 error = vtpci_set_host_msix_vectors(sc); 982 983 return (error); 984} 985 986static int 987vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type) 988{ 989 int error; 990 991 type |= INTR_MPSAFE; 992 KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK, 993 ("no interrupt type selected: %#x", sc->vtpci_flags)); 994 995 error = vtpci_alloc_intr_resources(sc); 996 if (error) 997 return (error); 998 999 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 1000 error = vtpci_setup_legacy_interrupt(sc, type); 1001 else if (sc->vtpci_flags & VTPCI_FLAG_MSI) 1002 error = vtpci_setup_msi_interrupt(sc, type); 1003 else 1004 error = vtpci_setup_msix_interrupts(sc, type); 1005 1006 return (error); 1007} 1008 1009static int 1010vtpci_register_msix_vector(struct vtpci_softc *sc, int offset, int res_idx) 1011{ 1012 device_t dev; 1013 uint16_t vector, rdvector; 1014 1015 dev = sc->vtpci_dev; 1016 1017 if (res_idx != -1) { 1018 /* Map from guest rid to host vector. */ 1019 vector = sc->vtpci_intr_res[res_idx].rid - 1; 1020 } else 1021 vector = VIRTIO_MSI_NO_VECTOR; 1022 1023 /* 1024 * Assert the first resource is always used for the configuration 1025 * changed interrupts. 1026 */ 1027 if (res_idx == 0) { 1028 KASSERT(vector == 0 && offset == VIRTIO_MSI_CONFIG_VECTOR, 1029 ("bad first res use vector:%d offset:%d", vector, offset)); 1030 } else 1031 KASSERT(offset == VIRTIO_MSI_QUEUE_VECTOR, ("bad offset")); 1032 1033 vtpci_write_config_2(sc, offset, vector); 1034 1035 /* Read vector to determine if the host had sufficient resources. */ 1036 rdvector = vtpci_read_config_2(sc, offset); 1037 if (rdvector != vector) { 1038 device_printf(dev, 1039 "insufficient host resources for MSIX interrupts\n"); 1040 return (ENODEV); 1041 } 1042 1043 return (0); 1044} 1045 1046static int 1047vtpci_set_host_msix_vectors(struct vtpci_softc *sc) 1048{ 1049 struct vtpci_virtqueue *vqx; 1050 int idx, error; 1051 1052 error = vtpci_register_msix_vector(sc, VIRTIO_MSI_CONFIG_VECTOR, 0); 1053 if (error) 1054 return (error); 1055 1056 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1057 vqx = &sc->vtpci_vqx[idx]; 1058 1059 vtpci_select_virtqueue(sc, idx); 1060 error = vtpci_register_msix_vector(sc, VIRTIO_MSI_QUEUE_VECTOR, 1061 vqx->ires_idx); 1062 if (error) 1063 return (error); 1064 } 1065 1066 return (0); 1067} 1068 1069static int 1070vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx) 1071{ 1072 struct vtpci_virtqueue *vqx; 1073 struct virtqueue *vq; 1074 int error; 1075 uint16_t size; 1076 1077 vqx = &sc->vtpci_vqx[idx]; 1078 vq = vqx->vq; 1079 1080 KASSERT(vq != NULL, ("vq %d not allocated", idx)); 1081 1082 vtpci_select_virtqueue(sc, idx); 1083 size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); 1084 1085 error = virtqueue_reinit(vq, size); 1086 if (error) 1087 return (error); 1088 1089 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 1090 virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); 1091 1092 return (0); 1093} 1094 1095static void 1096vtpci_free_interrupts(struct vtpci_softc *sc) 1097{ 1098 device_t dev; 1099 struct vtpci_intr_resource *ires; 1100 int i; 1101 1102 dev = sc->vtpci_dev; 1103 1104 for (i = 0; i < sc->vtpci_nintr_res; i++) { 1105 ires = &sc->vtpci_intr_res[i]; 1106 1107 if (ires->intrhand != NULL) { 1108 bus_teardown_intr(dev, ires->irq, ires->intrhand); 1109 ires->intrhand = NULL; 1110 } 1111 1112 if (ires->irq != NULL) { 1113 bus_release_resource(dev, SYS_RES_IRQ, ires->rid, 1114 ires->irq); 1115 ires->irq = NULL; 1116 } 1117 1118 ires->rid = -1; 1119 } 1120 1121 if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX)) 1122 pci_release_msi(dev); 1123 1124 sc->vtpci_nintr_res = 0; 1125 sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK; 1126} 1127 1128static void 1129vtpci_free_virtqueues(struct vtpci_softc *sc) 1130{ 1131 struct vtpci_virtqueue *vqx; 1132 int i; 1133 1134 for (i = 0; i < sc->vtpci_nvqs; i++) { 1135 vqx = &sc->vtpci_vqx[i]; 1136 1137 virtqueue_free(vqx->vq); 1138 vqx->vq = NULL; 1139 } 1140 1141 sc->vtpci_nvqs = 0; 1142} 1143 1144static void 1145vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc) 1146{ 1147 int idx; 1148 1149 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 1150 vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR, 1151 VIRTIO_MSI_NO_VECTOR); 1152 1153 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1154 vtpci_select_virtqueue(sc, idx); 1155 vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR, 1156 VIRTIO_MSI_NO_VECTOR); 1157 } 1158 } 1159 1160 vtpci_free_interrupts(sc); 1161} 1162 1163static void 1164vtpci_release_child_resources(struct vtpci_softc *sc) 1165{ 1166 1167 vtpci_free_interrupts(sc); 1168 vtpci_free_virtqueues(sc); 1169} 1170 1171static void 1172vtpci_reset(struct vtpci_softc *sc) 1173{ 1174 1175 /* 1176 * Setting the status to RESET sets the host device to 1177 * the original, uninitialized state. 1178 */ 1179 vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET); 1180} 1181 1182static void 1183vtpci_select_virtqueue(struct vtpci_softc *sc, int idx) 1184{ 1185 1186 vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx); 1187} 1188 1189static void 1190vtpci_legacy_intr(void *xsc) 1191{ 1192 struct vtpci_softc *sc; 1193 struct vtpci_virtqueue *vqx; 1194 int i; 1195 uint8_t isr; 1196 1197 sc = xsc; 1198 vqx = &sc->vtpci_vqx[0]; 1199 1200 /* Reading the ISR also clears it. */ 1201 isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR); 1202 1203 if (isr & VIRTIO_PCI_ISR_CONFIG) 1204 vtpci_config_intr(sc); 1205 1206 if (isr & VIRTIO_PCI_ISR_INTR) { 1207 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) 1208 virtqueue_intr(vqx->vq); 1209 } 1210} 1211 1212static int 1213vtpci_vq_shared_intr_filter(void *xsc) 1214{ 1215 struct vtpci_softc *sc; 1216 struct vtpci_virtqueue *vqx; 1217 int i, rc; 1218 1219 rc = 0; 1220 sc = xsc; 1221 vqx = &sc->vtpci_vqx[0]; 1222 1223 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) 1224 rc |= virtqueue_intr_filter(vqx->vq); 1225 1226 return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1227} 1228 1229static void 1230vtpci_vq_shared_intr(void *xsc) 1231{ 1232 struct vtpci_softc *sc; 1233 struct vtpci_virtqueue *vqx; 1234 int i; 1235 1236 sc = xsc; 1237 vqx = &sc->vtpci_vqx[0]; 1238 1239 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) 1240 virtqueue_intr(vqx->vq); 1241} 1242 1243static int 1244vtpci_vq_intr_filter(void *xvq) 1245{ 1246 struct virtqueue *vq; 1247 int rc; 1248 1249 vq = xvq; 1250 rc = virtqueue_intr_filter(vq); 1251 1252 return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1253} 1254 1255static void 1256vtpci_vq_intr(void *xvq) 1257{ 1258 struct virtqueue *vq; 1259 1260 vq = xvq; 1261 virtqueue_intr(vq); 1262} 1263 1264static void 1265vtpci_config_intr(void *xsc) 1266{ 1267 struct vtpci_softc *sc; 1268 device_t child; 1269 1270 sc = xsc; 1271 child = sc->vtpci_child_dev; 1272 1273 if (child != NULL) 1274 VIRTIO_CONFIG_CHANGE(child); 1275}
|