173/* 174 * I/O port read/write wrappers. 175 */ 176#define vtpci_read_config_1(sc, o) bus_read_1((sc)->vtpci_res, (o)) 177#define vtpci_read_config_2(sc, o) bus_read_2((sc)->vtpci_res, (o)) 178#define vtpci_read_config_4(sc, o) bus_read_4((sc)->vtpci_res, (o)) 179#define vtpci_write_config_1(sc, o, v) bus_write_1((sc)->vtpci_res, (o), (v)) 180#define vtpci_write_config_2(sc, o, v) bus_write_2((sc)->vtpci_res, (o), (v)) 181#define vtpci_write_config_4(sc, o, v) bus_write_4((sc)->vtpci_res, (o), (v)) 182 183/* Tunables. */ 184static int vtpci_disable_msix = 0; 185TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix); 186 187static device_method_t vtpci_methods[] = { 188 /* Device interface. */ 189 DEVMETHOD(device_probe, vtpci_probe), 190 DEVMETHOD(device_attach, vtpci_attach), 191 DEVMETHOD(device_detach, vtpci_detach), 192 DEVMETHOD(device_suspend, vtpci_suspend), 193 DEVMETHOD(device_resume, vtpci_resume), 194 DEVMETHOD(device_shutdown, vtpci_shutdown), 195 196 /* Bus interface. */ 197 DEVMETHOD(bus_driver_added, vtpci_driver_added), 198 DEVMETHOD(bus_child_detached, vtpci_child_detached), 199 DEVMETHOD(bus_read_ivar, vtpci_read_ivar), 200 DEVMETHOD(bus_write_ivar, vtpci_write_ivar), 201 202 /* VirtIO bus interface. */ 203 DEVMETHOD(virtio_bus_negotiate_features, vtpci_negotiate_features), 204 DEVMETHOD(virtio_bus_with_feature, vtpci_with_feature), 205 DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_alloc_virtqueues), 206 DEVMETHOD(virtio_bus_setup_intr, vtpci_setup_intr), 207 DEVMETHOD(virtio_bus_stop, vtpci_stop), 208 DEVMETHOD(virtio_bus_reinit, vtpci_reinit), 209 DEVMETHOD(virtio_bus_reinit_complete, vtpci_reinit_complete), 210 DEVMETHOD(virtio_bus_notify_vq, vtpci_notify_virtqueue), 211 DEVMETHOD(virtio_bus_read_device_config, vtpci_read_dev_config), 212 DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config), 213 214 DEVMETHOD_END 215}; 216 217static driver_t vtpci_driver = { 218 "virtio_pci", 219 vtpci_methods, 220 sizeof(struct vtpci_softc) 221}; 222 223devclass_t vtpci_devclass; 224 225DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0); 226MODULE_VERSION(virtio_pci, 1); 227MODULE_DEPEND(virtio_pci, pci, 1, 1, 1); 228MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1); 229 230static int 231vtpci_probe(device_t dev) 232{ 233 char desc[36]; 234 const char *name; 235 236 if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID) 237 return (ENXIO); 238 239 if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN || 240 pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX) 241 return (ENXIO); 242 243 if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION) 244 return (ENXIO); 245 246 name = virtio_device_name(pci_get_subdevice(dev)); 247 if (name == NULL) 248 name = "Unknown"; 249 250 snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name); 251 device_set_desc_copy(dev, desc); 252 253 return (BUS_PROBE_DEFAULT); 254} 255 256static int 257vtpci_attach(device_t dev) 258{ 259 struct vtpci_softc *sc; 260 device_t child; 261 int rid; 262 263 sc = device_get_softc(dev); 264 sc->vtpci_dev = dev; 265 266 pci_enable_busmaster(dev); 267 268 rid = PCIR_BAR(0); 269 sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 270 RF_ACTIVE); 271 if (sc->vtpci_res == NULL) { 272 device_printf(dev, "cannot map I/O space\n"); 273 return (ENXIO); 274 } 275 276 if (pci_find_cap(dev, PCIY_MSI, NULL) != 0) 277 sc->vtpci_flags |= VTPCI_FLAG_NO_MSI; 278 279 if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { 280 rid = PCIR_BAR(1); 281 sc->vtpci_msix_res = bus_alloc_resource_any(dev, 282 SYS_RES_MEMORY, &rid, RF_ACTIVE); 283 } 284 285 if (sc->vtpci_msix_res == NULL) 286 sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX; 287 288 vtpci_reset(sc); 289 290 /* Tell the host we've noticed this device. */ 291 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 292 293 if ((child = device_add_child(dev, NULL, -1)) == NULL) { 294 device_printf(dev, "cannot create child device\n"); 295 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 296 vtpci_detach(dev); 297 return (ENOMEM); 298 } 299 300 sc->vtpci_child_dev = child; 301 vtpci_probe_and_attach_child(sc); 302 303 return (0); 304} 305 306static int 307vtpci_detach(device_t dev) 308{ 309 struct vtpci_softc *sc; 310 device_t child; 311 int error; 312 313 sc = device_get_softc(dev); 314 315 if ((child = sc->vtpci_child_dev) != NULL) { 316 error = device_delete_child(dev, child); 317 if (error) 318 return (error); 319 sc->vtpci_child_dev = NULL; 320 } 321 322 vtpci_reset(sc); 323 324 if (sc->vtpci_msix_res != NULL) { 325 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1), 326 sc->vtpci_msix_res); 327 sc->vtpci_msix_res = NULL; 328 } 329 330 if (sc->vtpci_res != NULL) { 331 bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), 332 sc->vtpci_res); 333 sc->vtpci_res = NULL; 334 } 335 336 return (0); 337} 338 339static int 340vtpci_suspend(device_t dev) 341{ 342 343 return (bus_generic_suspend(dev)); 344} 345 346static int 347vtpci_resume(device_t dev) 348{ 349 350 return (bus_generic_resume(dev)); 351} 352 353static int 354vtpci_shutdown(device_t dev) 355{ 356 357 (void) bus_generic_shutdown(dev); 358 /* Forcibly stop the host device. */ 359 vtpci_stop(dev); 360 361 return (0); 362} 363 364static void 365vtpci_driver_added(device_t dev, driver_t *driver) 366{ 367 struct vtpci_softc *sc; 368 369 sc = device_get_softc(dev); 370 371 vtpci_probe_and_attach_child(sc); 372} 373 374static void 375vtpci_child_detached(device_t dev, device_t child) 376{ 377 struct vtpci_softc *sc; 378 379 sc = device_get_softc(dev); 380 381 vtpci_reset(sc); 382 vtpci_release_child_resources(sc); 383} 384 385static int 386vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 387{ 388 struct vtpci_softc *sc; 389 390 sc = device_get_softc(dev); 391 392 if (sc->vtpci_child_dev != child) 393 return (ENOENT); 394 395 switch (index) { 396 case VIRTIO_IVAR_DEVTYPE: 397 case VIRTIO_IVAR_SUBDEVICE: 398 *result = pci_get_subdevice(dev); 399 break; 400 case VIRTIO_IVAR_VENDOR: 401 *result = pci_get_vendor(dev); 402 break; 403 case VIRTIO_IVAR_DEVICE: 404 *result = pci_get_device(dev); 405 break; 406 case VIRTIO_IVAR_SUBVENDOR: 407 *result = pci_get_subdevice(dev); 408 break; 409 default: 410 return (ENOENT); 411 } 412 413 return (0); 414} 415 416static int 417vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 418{ 419 struct vtpci_softc *sc; 420 421 sc = device_get_softc(dev); 422 423 if (sc->vtpci_child_dev != child) 424 return (ENOENT); 425 426 switch (index) { 427 case VIRTIO_IVAR_FEATURE_DESC: 428 sc->vtpci_child_feat_desc = (void *) value; 429 break; 430 default: 431 return (ENOENT); 432 } 433 434 return (0); 435} 436 437static uint64_t 438vtpci_negotiate_features(device_t dev, uint64_t child_features) 439{ 440 struct vtpci_softc *sc; 441 uint64_t host_features, features; 442 443 sc = device_get_softc(dev); 444 445 host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES); 446 vtpci_describe_features(sc, "host", host_features); 447 448 /* 449 * Limit negotiated features to what the driver, virtqueue, and 450 * host all support. 451 */ 452 features = host_features & child_features; 453 features = virtqueue_filter_features(features); 454 sc->vtpci_features = features; 455 456 vtpci_describe_features(sc, "negotiated", features); 457 vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features); 458 459 return (features); 460} 461 462static int 463vtpci_with_feature(device_t dev, uint64_t feature) 464{ 465 struct vtpci_softc *sc; 466 467 sc = device_get_softc(dev); 468 469 return ((sc->vtpci_features & feature) != 0); 470} 471 472static int 473vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs, 474 struct vq_alloc_info *vq_info) 475{ 476 struct vtpci_softc *sc; 477 struct virtqueue *vq; 478 struct vtpci_virtqueue *vqx; 479 struct vq_alloc_info *info; 480 int idx, error; 481 uint16_t size; 482 483 sc = device_get_softc(dev); 484 485 if (sc->vtpci_nvqs != 0) 486 return (EALREADY); 487 if (nvqs <= 0) 488 return (EINVAL); 489 490 sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue), 491 M_DEVBUF, M_NOWAIT | M_ZERO); 492 if (sc->vtpci_vqs == NULL) 493 return (ENOMEM); 494 495 for (idx = 0; idx < nvqs; idx++) { 496 vqx = &sc->vtpci_vqs[idx]; 497 info = &vq_info[idx]; 498 499 vtpci_select_virtqueue(sc, idx); 500 size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); 501 502 error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN, 503 0xFFFFFFFFUL, info, &vq); 504 if (error) { 505 device_printf(dev, 506 "cannot allocate virtqueue %d: %d\n", idx, error); 507 break; 508 } 509 510 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 511 virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); 512 513 vqx->vtv_vq = *info->vqai_vq = vq; 514 vqx->vtv_no_intr = info->vqai_intr == NULL; 515 516 sc->vtpci_nvqs++; 517 } 518 519 if (error) 520 vtpci_free_virtqueues(sc); 521 522 return (error); 523} 524 525static int 526vtpci_setup_intr(device_t dev, enum intr_type type) 527{ 528 struct vtpci_softc *sc; 529 int attempt, error; 530 531 sc = device_get_softc(dev); 532 533 for (attempt = 0; attempt < 5; attempt++) { 534 /* 535 * Start with the most desirable interrupt configuration and 536 * fallback towards less desirable ones. 537 */ 538 switch (attempt) { 539 case 0: 540 error = vtpci_alloc_intr_msix_pervq(sc); 541 break; 542 case 1: 543 error = vtpci_alloc_intr_msix_shared(sc); 544 break; 545 case 2: 546 error = vtpci_alloc_intr_msi(sc); 547 break; 548 case 3: 549 error = vtpci_alloc_intr_legacy(sc); 550 break; 551 default: 552 device_printf(dev, 553 "exhausted all interrupt allocation attempts\n"); 554 return (ENXIO); 555 } 556 557 if (error == 0 && vtpci_setup_interrupts(sc, type) == 0) 558 break; 559 560 vtpci_cleanup_setup_intr_attempt(sc); 561 } 562 563 if (bootverbose) { 564 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 565 device_printf(dev, "using legacy interrupt\n"); 566 else if (sc->vtpci_flags & VTPCI_FLAG_MSI) 567 device_printf(dev, "using MSI interrupt\n"); 568 else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) 569 device_printf(dev, "using shared MSIX interrupts\n"); 570 else 571 device_printf(dev, "using per VQ MSIX interrupts\n"); 572 } 573 574 return (0); 575} 576 577static void 578vtpci_stop(device_t dev) 579{ 580 581 vtpci_reset(device_get_softc(dev)); 582} 583 584static int 585vtpci_reinit(device_t dev, uint64_t features) 586{ 587 struct vtpci_softc *sc; 588 int idx, error; 589 590 sc = device_get_softc(dev); 591 592 /* 593 * Redrive the device initialization. This is a bit of an abuse of 594 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to 595 * play nice. 596 * 597 * We do not allow the host device to change from what was originally 598 * negotiated beyond what the guest driver changed. MSIX state should 599 * not change, number of virtqueues and their size remain the same, etc. 600 * This will need to be rethought when we want to support migration. 601 */ 602 603 if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) 604 vtpci_stop(dev); 605 606 /* 607 * Quickly drive the status through ACK and DRIVER. The device 608 * does not become usable again until vtpci_reinit_complete(). 609 */ 610 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 611 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 612 613 vtpci_negotiate_features(dev, features); 614 615 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 616 error = vtpci_reinit_virtqueue(sc, idx); 617 if (error) 618 return (error); 619 } 620 621 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 622 error = vtpci_set_host_msix_vectors(sc); 623 if (error) 624 return (error); 625 } 626 627 return (0); 628} 629 630static void 631vtpci_reinit_complete(device_t dev) 632{ 633 634 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 635} 636 637static void 638vtpci_notify_virtqueue(device_t dev, uint16_t queue) 639{ 640 struct vtpci_softc *sc; 641 642 sc = device_get_softc(dev); 643 644 vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue); 645} 646 647static uint8_t 648vtpci_get_status(device_t dev) 649{ 650 struct vtpci_softc *sc; 651 652 sc = device_get_softc(dev); 653 654 return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS)); 655} 656 657static void 658vtpci_set_status(device_t dev, uint8_t status) 659{ 660 struct vtpci_softc *sc; 661 662 sc = device_get_softc(dev); 663 664 if (status != VIRTIO_CONFIG_STATUS_RESET) 665 status |= vtpci_get_status(dev); 666 667 vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status); 668} 669 670static void 671vtpci_read_dev_config(device_t dev, bus_size_t offset, 672 void *dst, int length) 673{ 674 struct vtpci_softc *sc; 675 bus_size_t off; 676 uint8_t *d; 677 int size; 678 679 sc = device_get_softc(dev); 680 off = VIRTIO_PCI_CONFIG(sc) + offset; 681 682 for (d = dst; length > 0; d += size, off += size, length -= size) { 683 if (length >= 4) { 684 size = 4; 685 *(uint32_t *)d = vtpci_read_config_4(sc, off); 686 } else if (length >= 2) { 687 size = 2; 688 *(uint16_t *)d = vtpci_read_config_2(sc, off); 689 } else { 690 size = 1; 691 *d = vtpci_read_config_1(sc, off); 692 } 693 } 694} 695 696static void 697vtpci_write_dev_config(device_t dev, bus_size_t offset, 698 void *src, int length) 699{ 700 struct vtpci_softc *sc; 701 bus_size_t off; 702 uint8_t *s; 703 int size; 704 705 sc = device_get_softc(dev); 706 off = VIRTIO_PCI_CONFIG(sc) + offset; 707 708 for (s = src; length > 0; s += size, off += size, length -= size) { 709 if (length >= 4) { 710 size = 4; 711 vtpci_write_config_4(sc, off, *(uint32_t *)s); 712 } else if (length >= 2) { 713 size = 2; 714 vtpci_write_config_2(sc, off, *(uint16_t *)s); 715 } else { 716 size = 1; 717 vtpci_write_config_1(sc, off, *s); 718 } 719 } 720} 721 722static void 723vtpci_describe_features(struct vtpci_softc *sc, const char *msg, 724 uint64_t features) 725{ 726 device_t dev, child; 727 728 dev = sc->vtpci_dev; 729 child = sc->vtpci_child_dev; 730 731 if (device_is_attached(child) && bootverbose == 0) 732 return; 733 734 virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc); 735} 736 737static void 738vtpci_probe_and_attach_child(struct vtpci_softc *sc) 739{ 740 device_t dev, child; 741 742 dev = sc->vtpci_dev; 743 child = sc->vtpci_child_dev; 744 745 if (child == NULL) 746 return; 747 748 if (device_get_state(child) != DS_NOTPRESENT) 749 return; 750 751 if (device_probe(child) != 0) 752 return; 753 754 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 755 if (device_attach(child) != 0) { 756 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 757 vtpci_reset(sc); 758 vtpci_release_child_resources(sc); 759 /* Reset status for future attempt. */ 760 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 761 } else { 762 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 763 VIRTIO_ATTACH_COMPLETED(child); 764 } 765} 766 767static int 768vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors) 769{ 770 device_t dev; 771 int nmsix, cnt, required; 772 773 dev = sc->vtpci_dev; 774 775 /* Allocate an additional vector for the config changes. */ 776 required = nvectors + 1; 777 778 nmsix = pci_msix_count(dev); 779 if (nmsix < required) 780 return (1); 781 782 cnt = required; 783 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { 784 sc->vtpci_nmsix_resources = required; 785 return (0); 786 } 787 788 pci_release_msi(dev); 789 790 return (1); 791} 792 793static int 794vtpci_alloc_msi(struct vtpci_softc *sc) 795{ 796 device_t dev; 797 int nmsi, cnt, required; 798 799 dev = sc->vtpci_dev; 800 required = 1; 801 802 nmsi = pci_msi_count(dev); 803 if (nmsi < required) 804 return (1); 805 806 cnt = required; 807 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) 808 return (0); 809 810 pci_release_msi(dev); 811 812 return (1); 813} 814 815static int 816vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc) 817{ 818 int i, nvectors, error; 819 820 if (vtpci_disable_msix != 0 || 821 sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) 822 return (ENOTSUP); 823 824 for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) { 825 if (sc->vtpci_vqs[i].vtv_no_intr == 0) 826 nvectors++; 827 } 828 829 error = vtpci_alloc_msix(sc, nvectors); 830 if (error) 831 return (error); 832 833 sc->vtpci_flags |= VTPCI_FLAG_MSIX; 834 835 return (0); 836} 837 838static int 839vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc) 840{ 841 int error; 842 843 if (vtpci_disable_msix != 0 || 844 sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) 845 return (ENOTSUP); 846 847 error = vtpci_alloc_msix(sc, 1); 848 if (error) 849 return (error); 850 851 sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX; 852 853 return (0); 854} 855 856static int 857vtpci_alloc_intr_msi(struct vtpci_softc *sc) 858{ 859 int error; 860 861 /* Only BHyVe supports MSI. */ 862 if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI) 863 return (ENOTSUP); 864 865 error = vtpci_alloc_msi(sc); 866 if (error) 867 return (error); 868 869 sc->vtpci_flags |= VTPCI_FLAG_MSI; 870 871 return (0); 872} 873 874static int 875vtpci_alloc_intr_legacy(struct vtpci_softc *sc) 876{ 877 878 sc->vtpci_flags |= VTPCI_FLAG_LEGACY; 879 880 return (0); 881} 882 883static int 884vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags, 885 struct vtpci_interrupt *intr) 886{ 887 struct resource *irq; 888 889 irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags); 890 if (irq == NULL) 891 return (ENXIO); 892 893 intr->vti_irq = irq; 894 intr->vti_rid = rid; 895 896 return (0); 897} 898 899static int 900vtpci_alloc_intr_resources(struct vtpci_softc *sc) 901{ 902 struct vtpci_interrupt *intr; 903 int i, rid, flags, nvq_intrs, error; 904 905 rid = 0; 906 flags = RF_ACTIVE; 907 908 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 909 flags |= RF_SHAREABLE; 910 else 911 rid = 1; 912 913 /* 914 * For legacy and MSI interrupts, this single resource handles all 915 * interrupts. For MSIX, this resource is used for the configuration 916 * changed interrupt. 917 */ 918 intr = &sc->vtpci_device_interrupt; 919 error = vtpci_alloc_interrupt(sc, rid, flags, intr); 920 if (error || sc->vtpci_flags & (VTPCI_FLAG_LEGACY | VTPCI_FLAG_MSI)) 921 return (error); 922 923 /* Subtract one for the configuration changed interrupt. */ 924 nvq_intrs = sc->vtpci_nmsix_resources - 1; 925 926 intr = sc->vtpci_msix_vq_interrupts = malloc(nvq_intrs * 927 sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO); 928 if (sc->vtpci_msix_vq_interrupts == NULL) 929 return (ENOMEM); 930 931 for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) { 932 error = vtpci_alloc_interrupt(sc, rid, flags, intr); 933 if (error) 934 return (error); 935 } 936 937 return (0); 938} 939 940static int 941vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type) 942{ 943 struct vtpci_interrupt *intr; 944 int error; 945 946 intr = &sc->vtpci_device_interrupt; 947 error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, NULL, 948 vtpci_legacy_intr, sc, &intr->vti_handler); 949 950 return (error); 951} 952 953static int 954vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *sc, enum intr_type type) 955{ 956 struct vtpci_virtqueue *vqx; 957 struct vtpci_interrupt *intr; 958 int i, error; 959 960 intr = sc->vtpci_msix_vq_interrupts; 961 962 for (i = 0; i < sc->vtpci_nvqs; i++) { 963 vqx = &sc->vtpci_vqs[i]; 964 965 if (vqx->vtv_no_intr) 966 continue; 967 968 error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, 969 vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq, 970 &intr->vti_handler); 971 if (error) 972 return (error); 973 974 intr++; 975 } 976 977 return (0); 978} 979 980static int 981vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type) 982{ 983 device_t dev; 984 struct vtpci_interrupt *intr; 985 int error; 986 987 dev = sc->vtpci_dev; 988 intr = &sc->vtpci_device_interrupt; 989 990 error = bus_setup_intr(dev, intr->vti_irq, type, NULL, 991 vtpci_config_intr, sc, &intr->vti_handler); 992 if (error) 993 return (error); 994 995 if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) { 996 intr = sc->vtpci_msix_vq_interrupts; 997 error = bus_setup_intr(dev, intr->vti_irq, type, 998 vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc, 999 &intr->vti_handler); 1000 } else 1001 error = vtpci_setup_pervq_msix_interrupts(sc, type); 1002 1003 return (error ? error : vtpci_set_host_msix_vectors(sc)); 1004} 1005 1006static int 1007vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type) 1008{ 1009 int error; 1010 1011 type |= INTR_MPSAFE; 1012 KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK, 1013 ("%s: no interrupt type selected %#x", __func__, sc->vtpci_flags)); 1014 1015 error = vtpci_alloc_intr_resources(sc); 1016 if (error) 1017 return (error); 1018 1019 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 1020 error = vtpci_setup_legacy_interrupt(sc, type); 1021 else if (sc->vtpci_flags & VTPCI_FLAG_MSI) 1022 error = vtpci_setup_msi_interrupt(sc, type); 1023 else 1024 error = vtpci_setup_msix_interrupts(sc, type); 1025 1026 return (error); 1027} 1028 1029static int 1030vtpci_register_msix_vector(struct vtpci_softc *sc, int offset, 1031 struct vtpci_interrupt *intr) 1032{ 1033 device_t dev; 1034 uint16_t vector; 1035 1036 dev = sc->vtpci_dev; 1037 1038 if (intr != NULL) { 1039 /* Map from guest rid to host vector. */ 1040 vector = intr->vti_rid - 1; 1041 } else 1042 vector = VIRTIO_MSI_NO_VECTOR; 1043 1044 vtpci_write_config_2(sc, offset, vector); 1045 1046 /* Read vector to determine if the host had sufficient resources. */ 1047 if (vtpci_read_config_2(sc, offset) != vector) { 1048 device_printf(dev, 1049 "insufficient host resources for MSIX interrupts\n"); 1050 return (ENODEV); 1051 } 1052 1053 return (0); 1054} 1055 1056static int 1057vtpci_set_host_msix_vectors(struct vtpci_softc *sc) 1058{ 1059 struct vtpci_interrupt *intr, *tintr; 1060 int idx, offset, error; 1061 1062 intr = &sc->vtpci_device_interrupt; 1063 offset = VIRTIO_MSI_CONFIG_VECTOR; 1064 1065 error = vtpci_register_msix_vector(sc, offset, intr); 1066 if (error) 1067 return (error); 1068 1069 intr = sc->vtpci_msix_vq_interrupts; 1070 offset = VIRTIO_MSI_QUEUE_VECTOR; 1071 1072 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1073 vtpci_select_virtqueue(sc, idx); 1074 1075 if (sc->vtpci_vqs[idx].vtv_no_intr) 1076 tintr = NULL; 1077 else 1078 tintr = intr; 1079 1080 error = vtpci_register_msix_vector(sc, offset, tintr); 1081 if (error) 1082 break; 1083 1084 /* 1085 * For shared MSIX, all the virtqueues share the first 1086 * interrupt. 1087 */ 1088 if ((sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0) 1089 intr++; 1090 } 1091 1092 return (error); 1093} 1094 1095static int 1096vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx) 1097{ 1098 struct vtpci_virtqueue *vqx; 1099 struct virtqueue *vq; 1100 int error; 1101 uint16_t size; 1102 1103 vqx = &sc->vtpci_vqs[idx]; 1104 vq = vqx->vtv_vq; 1105 1106 KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); 1107 1108 vtpci_select_virtqueue(sc, idx); 1109 size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); 1110 1111 error = virtqueue_reinit(vq, size); 1112 if (error) 1113 return (error); 1114 1115 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 1116 virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); 1117 1118 return (0); 1119} 1120 1121static void 1122vtpci_free_interrupt(struct vtpci_softc *sc, struct vtpci_interrupt *intr) 1123{ 1124 device_t dev; 1125 1126 dev = sc->vtpci_dev; 1127 1128 if (intr->vti_handler != NULL) { 1129 bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler); 1130 intr->vti_handler = NULL; 1131 } 1132 1133 if (intr->vti_irq != NULL) { 1134 bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid, 1135 intr->vti_irq); 1136 intr->vti_irq = NULL; 1137 intr->vti_rid = -1; 1138 } 1139} 1140 1141static void 1142vtpci_free_interrupts(struct vtpci_softc *sc) 1143{ 1144 struct vtpci_interrupt *intr; 1145 int i, nvq_intrs; 1146 1147 vtpci_free_interrupt(sc, &sc->vtpci_device_interrupt); 1148 1149 if (sc->vtpci_nmsix_resources != 0) { 1150 nvq_intrs = sc->vtpci_nmsix_resources - 1; 1151 sc->vtpci_nmsix_resources = 0; 1152 1153 intr = sc->vtpci_msix_vq_interrupts; 1154 if (intr != NULL) { 1155 for (i = 0; i < nvq_intrs; i++, intr++) 1156 vtpci_free_interrupt(sc, intr); 1157 1158 free(sc->vtpci_msix_vq_interrupts, M_DEVBUF); 1159 sc->vtpci_msix_vq_interrupts = NULL; 1160 } 1161 } 1162 1163 if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX)) 1164 pci_release_msi(sc->vtpci_dev); 1165 1166 sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK; 1167} 1168 1169static void 1170vtpci_free_virtqueues(struct vtpci_softc *sc) 1171{ 1172 struct vtpci_virtqueue *vqx; 1173 int idx; 1174 1175 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1176 vqx = &sc->vtpci_vqs[idx]; 1177 1178 vtpci_select_virtqueue(sc, idx); 1179 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0); 1180 1181 virtqueue_free(vqx->vtv_vq); 1182 vqx->vtv_vq = NULL; 1183 } 1184 1185 free(sc->vtpci_vqs, M_DEVBUF); 1186 sc->vtpci_vqs = NULL; 1187 sc->vtpci_nvqs = 0; 1188} 1189 1190static void 1191vtpci_release_child_resources(struct vtpci_softc *sc) 1192{ 1193 1194 vtpci_free_interrupts(sc); 1195 vtpci_free_virtqueues(sc); 1196} 1197 1198static void 1199vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc) 1200{ 1201 int idx; 1202 1203 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 1204 vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR, 1205 VIRTIO_MSI_NO_VECTOR); 1206 1207 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1208 vtpci_select_virtqueue(sc, idx); 1209 vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR, 1210 VIRTIO_MSI_NO_VECTOR); 1211 } 1212 } 1213 1214 vtpci_free_interrupts(sc); 1215} 1216 1217static void 1218vtpci_reset(struct vtpci_softc *sc) 1219{ 1220 1221 /* 1222 * Setting the status to RESET sets the host device to 1223 * the original, uninitialized state. 1224 */ 1225 vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET); 1226} 1227 1228static void 1229vtpci_select_virtqueue(struct vtpci_softc *sc, int idx) 1230{ 1231 1232 vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx); 1233} 1234 1235static void 1236vtpci_legacy_intr(void *xsc) 1237{ 1238 struct vtpci_softc *sc; 1239 struct vtpci_virtqueue *vqx; 1240 int i; 1241 uint8_t isr; 1242 1243 sc = xsc; 1244 vqx = &sc->vtpci_vqs[0]; 1245 1246 /* Reading the ISR also clears it. */ 1247 isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR); 1248 1249 if (isr & VIRTIO_PCI_ISR_CONFIG) 1250 vtpci_config_intr(sc); 1251 1252 if (isr & VIRTIO_PCI_ISR_INTR) { 1253 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { 1254 if (vqx->vtv_no_intr == 0) 1255 virtqueue_intr(vqx->vtv_vq); 1256 } 1257 } 1258} 1259 1260static int 1261vtpci_vq_shared_intr_filter(void *xsc) 1262{ 1263 struct vtpci_softc *sc; 1264 struct vtpci_virtqueue *vqx; 1265 int i, rc; 1266 1267 rc = 0; 1268 sc = xsc; 1269 vqx = &sc->vtpci_vqs[0]; 1270 1271 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { 1272 if (vqx->vtv_no_intr == 0) 1273 rc |= virtqueue_intr_filter(vqx->vtv_vq); 1274 } 1275 1276 return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1277} 1278 1279static void 1280vtpci_vq_shared_intr(void *xsc) 1281{ 1282 struct vtpci_softc *sc; 1283 struct vtpci_virtqueue *vqx; 1284 int i; 1285 1286 sc = xsc; 1287 vqx = &sc->vtpci_vqs[0]; 1288 1289 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { 1290 if (vqx->vtv_no_intr == 0) 1291 virtqueue_intr(vqx->vtv_vq); 1292 } 1293} 1294 1295static int 1296vtpci_vq_intr_filter(void *xvq) 1297{ 1298 struct virtqueue *vq; 1299 int rc; 1300 1301 vq = xvq; 1302 rc = virtqueue_intr_filter(vq); 1303 1304 return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1305} 1306 1307static void 1308vtpci_vq_intr(void *xvq) 1309{ 1310 struct virtqueue *vq; 1311 1312 vq = xvq; 1313 virtqueue_intr(vq); 1314} 1315 1316static void 1317vtpci_config_intr(void *xsc) 1318{ 1319 struct vtpci_softc *sc; 1320 device_t child; 1321 1322 sc = xsc; 1323 child = sc->vtpci_child_dev; 1324 1325 if (child != NULL) 1326 VIRTIO_CONFIG_CHANGE(child); 1327}
| 175/* 176 * I/O port read/write wrappers. 177 */ 178#define vtpci_read_config_1(sc, o) bus_read_1((sc)->vtpci_res, (o)) 179#define vtpci_read_config_2(sc, o) bus_read_2((sc)->vtpci_res, (o)) 180#define vtpci_read_config_4(sc, o) bus_read_4((sc)->vtpci_res, (o)) 181#define vtpci_write_config_1(sc, o, v) bus_write_1((sc)->vtpci_res, (o), (v)) 182#define vtpci_write_config_2(sc, o, v) bus_write_2((sc)->vtpci_res, (o), (v)) 183#define vtpci_write_config_4(sc, o, v) bus_write_4((sc)->vtpci_res, (o), (v)) 184 185/* Tunables. */ 186static int vtpci_disable_msix = 0; 187TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix); 188 189static device_method_t vtpci_methods[] = { 190 /* Device interface. */ 191 DEVMETHOD(device_probe, vtpci_probe), 192 DEVMETHOD(device_attach, vtpci_attach), 193 DEVMETHOD(device_detach, vtpci_detach), 194 DEVMETHOD(device_suspend, vtpci_suspend), 195 DEVMETHOD(device_resume, vtpci_resume), 196 DEVMETHOD(device_shutdown, vtpci_shutdown), 197 198 /* Bus interface. */ 199 DEVMETHOD(bus_driver_added, vtpci_driver_added), 200 DEVMETHOD(bus_child_detached, vtpci_child_detached), 201 DEVMETHOD(bus_read_ivar, vtpci_read_ivar), 202 DEVMETHOD(bus_write_ivar, vtpci_write_ivar), 203 204 /* VirtIO bus interface. */ 205 DEVMETHOD(virtio_bus_negotiate_features, vtpci_negotiate_features), 206 DEVMETHOD(virtio_bus_with_feature, vtpci_with_feature), 207 DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_alloc_virtqueues), 208 DEVMETHOD(virtio_bus_setup_intr, vtpci_setup_intr), 209 DEVMETHOD(virtio_bus_stop, vtpci_stop), 210 DEVMETHOD(virtio_bus_reinit, vtpci_reinit), 211 DEVMETHOD(virtio_bus_reinit_complete, vtpci_reinit_complete), 212 DEVMETHOD(virtio_bus_notify_vq, vtpci_notify_virtqueue), 213 DEVMETHOD(virtio_bus_read_device_config, vtpci_read_dev_config), 214 DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config), 215 216 DEVMETHOD_END 217}; 218 219static driver_t vtpci_driver = { 220 "virtio_pci", 221 vtpci_methods, 222 sizeof(struct vtpci_softc) 223}; 224 225devclass_t vtpci_devclass; 226 227DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0); 228MODULE_VERSION(virtio_pci, 1); 229MODULE_DEPEND(virtio_pci, pci, 1, 1, 1); 230MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1); 231 232static int 233vtpci_probe(device_t dev) 234{ 235 char desc[36]; 236 const char *name; 237 238 if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID) 239 return (ENXIO); 240 241 if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN || 242 pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX) 243 return (ENXIO); 244 245 if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION) 246 return (ENXIO); 247 248 name = virtio_device_name(pci_get_subdevice(dev)); 249 if (name == NULL) 250 name = "Unknown"; 251 252 snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name); 253 device_set_desc_copy(dev, desc); 254 255 return (BUS_PROBE_DEFAULT); 256} 257 258static int 259vtpci_attach(device_t dev) 260{ 261 struct vtpci_softc *sc; 262 device_t child; 263 int rid; 264 265 sc = device_get_softc(dev); 266 sc->vtpci_dev = dev; 267 268 pci_enable_busmaster(dev); 269 270 rid = PCIR_BAR(0); 271 sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 272 RF_ACTIVE); 273 if (sc->vtpci_res == NULL) { 274 device_printf(dev, "cannot map I/O space\n"); 275 return (ENXIO); 276 } 277 278 if (pci_find_cap(dev, PCIY_MSI, NULL) != 0) 279 sc->vtpci_flags |= VTPCI_FLAG_NO_MSI; 280 281 if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { 282 rid = PCIR_BAR(1); 283 sc->vtpci_msix_res = bus_alloc_resource_any(dev, 284 SYS_RES_MEMORY, &rid, RF_ACTIVE); 285 } 286 287 if (sc->vtpci_msix_res == NULL) 288 sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX; 289 290 vtpci_reset(sc); 291 292 /* Tell the host we've noticed this device. */ 293 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 294 295 if ((child = device_add_child(dev, NULL, -1)) == NULL) { 296 device_printf(dev, "cannot create child device\n"); 297 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 298 vtpci_detach(dev); 299 return (ENOMEM); 300 } 301 302 sc->vtpci_child_dev = child; 303 vtpci_probe_and_attach_child(sc); 304 305 return (0); 306} 307 308static int 309vtpci_detach(device_t dev) 310{ 311 struct vtpci_softc *sc; 312 device_t child; 313 int error; 314 315 sc = device_get_softc(dev); 316 317 if ((child = sc->vtpci_child_dev) != NULL) { 318 error = device_delete_child(dev, child); 319 if (error) 320 return (error); 321 sc->vtpci_child_dev = NULL; 322 } 323 324 vtpci_reset(sc); 325 326 if (sc->vtpci_msix_res != NULL) { 327 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1), 328 sc->vtpci_msix_res); 329 sc->vtpci_msix_res = NULL; 330 } 331 332 if (sc->vtpci_res != NULL) { 333 bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), 334 sc->vtpci_res); 335 sc->vtpci_res = NULL; 336 } 337 338 return (0); 339} 340 341static int 342vtpci_suspend(device_t dev) 343{ 344 345 return (bus_generic_suspend(dev)); 346} 347 348static int 349vtpci_resume(device_t dev) 350{ 351 352 return (bus_generic_resume(dev)); 353} 354 355static int 356vtpci_shutdown(device_t dev) 357{ 358 359 (void) bus_generic_shutdown(dev); 360 /* Forcibly stop the host device. */ 361 vtpci_stop(dev); 362 363 return (0); 364} 365 366static void 367vtpci_driver_added(device_t dev, driver_t *driver) 368{ 369 struct vtpci_softc *sc; 370 371 sc = device_get_softc(dev); 372 373 vtpci_probe_and_attach_child(sc); 374} 375 376static void 377vtpci_child_detached(device_t dev, device_t child) 378{ 379 struct vtpci_softc *sc; 380 381 sc = device_get_softc(dev); 382 383 vtpci_reset(sc); 384 vtpci_release_child_resources(sc); 385} 386 387static int 388vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 389{ 390 struct vtpci_softc *sc; 391 392 sc = device_get_softc(dev); 393 394 if (sc->vtpci_child_dev != child) 395 return (ENOENT); 396 397 switch (index) { 398 case VIRTIO_IVAR_DEVTYPE: 399 case VIRTIO_IVAR_SUBDEVICE: 400 *result = pci_get_subdevice(dev); 401 break; 402 case VIRTIO_IVAR_VENDOR: 403 *result = pci_get_vendor(dev); 404 break; 405 case VIRTIO_IVAR_DEVICE: 406 *result = pci_get_device(dev); 407 break; 408 case VIRTIO_IVAR_SUBVENDOR: 409 *result = pci_get_subdevice(dev); 410 break; 411 default: 412 return (ENOENT); 413 } 414 415 return (0); 416} 417 418static int 419vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 420{ 421 struct vtpci_softc *sc; 422 423 sc = device_get_softc(dev); 424 425 if (sc->vtpci_child_dev != child) 426 return (ENOENT); 427 428 switch (index) { 429 case VIRTIO_IVAR_FEATURE_DESC: 430 sc->vtpci_child_feat_desc = (void *) value; 431 break; 432 default: 433 return (ENOENT); 434 } 435 436 return (0); 437} 438 439static uint64_t 440vtpci_negotiate_features(device_t dev, uint64_t child_features) 441{ 442 struct vtpci_softc *sc; 443 uint64_t host_features, features; 444 445 sc = device_get_softc(dev); 446 447 host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES); 448 vtpci_describe_features(sc, "host", host_features); 449 450 /* 451 * Limit negotiated features to what the driver, virtqueue, and 452 * host all support. 453 */ 454 features = host_features & child_features; 455 features = virtqueue_filter_features(features); 456 sc->vtpci_features = features; 457 458 vtpci_describe_features(sc, "negotiated", features); 459 vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features); 460 461 return (features); 462} 463 464static int 465vtpci_with_feature(device_t dev, uint64_t feature) 466{ 467 struct vtpci_softc *sc; 468 469 sc = device_get_softc(dev); 470 471 return ((sc->vtpci_features & feature) != 0); 472} 473 474static int 475vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs, 476 struct vq_alloc_info *vq_info) 477{ 478 struct vtpci_softc *sc; 479 struct virtqueue *vq; 480 struct vtpci_virtqueue *vqx; 481 struct vq_alloc_info *info; 482 int idx, error; 483 uint16_t size; 484 485 sc = device_get_softc(dev); 486 487 if (sc->vtpci_nvqs != 0) 488 return (EALREADY); 489 if (nvqs <= 0) 490 return (EINVAL); 491 492 sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue), 493 M_DEVBUF, M_NOWAIT | M_ZERO); 494 if (sc->vtpci_vqs == NULL) 495 return (ENOMEM); 496 497 for (idx = 0; idx < nvqs; idx++) { 498 vqx = &sc->vtpci_vqs[idx]; 499 info = &vq_info[idx]; 500 501 vtpci_select_virtqueue(sc, idx); 502 size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); 503 504 error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN, 505 0xFFFFFFFFUL, info, &vq); 506 if (error) { 507 device_printf(dev, 508 "cannot allocate virtqueue %d: %d\n", idx, error); 509 break; 510 } 511 512 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 513 virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); 514 515 vqx->vtv_vq = *info->vqai_vq = vq; 516 vqx->vtv_no_intr = info->vqai_intr == NULL; 517 518 sc->vtpci_nvqs++; 519 } 520 521 if (error) 522 vtpci_free_virtqueues(sc); 523 524 return (error); 525} 526 527static int 528vtpci_setup_intr(device_t dev, enum intr_type type) 529{ 530 struct vtpci_softc *sc; 531 int attempt, error; 532 533 sc = device_get_softc(dev); 534 535 for (attempt = 0; attempt < 5; attempt++) { 536 /* 537 * Start with the most desirable interrupt configuration and 538 * fallback towards less desirable ones. 539 */ 540 switch (attempt) { 541 case 0: 542 error = vtpci_alloc_intr_msix_pervq(sc); 543 break; 544 case 1: 545 error = vtpci_alloc_intr_msix_shared(sc); 546 break; 547 case 2: 548 error = vtpci_alloc_intr_msi(sc); 549 break; 550 case 3: 551 error = vtpci_alloc_intr_legacy(sc); 552 break; 553 default: 554 device_printf(dev, 555 "exhausted all interrupt allocation attempts\n"); 556 return (ENXIO); 557 } 558 559 if (error == 0 && vtpci_setup_interrupts(sc, type) == 0) 560 break; 561 562 vtpci_cleanup_setup_intr_attempt(sc); 563 } 564 565 if (bootverbose) { 566 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 567 device_printf(dev, "using legacy interrupt\n"); 568 else if (sc->vtpci_flags & VTPCI_FLAG_MSI) 569 device_printf(dev, "using MSI interrupt\n"); 570 else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) 571 device_printf(dev, "using shared MSIX interrupts\n"); 572 else 573 device_printf(dev, "using per VQ MSIX interrupts\n"); 574 } 575 576 return (0); 577} 578 579static void 580vtpci_stop(device_t dev) 581{ 582 583 vtpci_reset(device_get_softc(dev)); 584} 585 586static int 587vtpci_reinit(device_t dev, uint64_t features) 588{ 589 struct vtpci_softc *sc; 590 int idx, error; 591 592 sc = device_get_softc(dev); 593 594 /* 595 * Redrive the device initialization. This is a bit of an abuse of 596 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to 597 * play nice. 598 * 599 * We do not allow the host device to change from what was originally 600 * negotiated beyond what the guest driver changed. MSIX state should 601 * not change, number of virtqueues and their size remain the same, etc. 602 * This will need to be rethought when we want to support migration. 603 */ 604 605 if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) 606 vtpci_stop(dev); 607 608 /* 609 * Quickly drive the status through ACK and DRIVER. The device 610 * does not become usable again until vtpci_reinit_complete(). 611 */ 612 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 613 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 614 615 vtpci_negotiate_features(dev, features); 616 617 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 618 error = vtpci_reinit_virtqueue(sc, idx); 619 if (error) 620 return (error); 621 } 622 623 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 624 error = vtpci_set_host_msix_vectors(sc); 625 if (error) 626 return (error); 627 } 628 629 return (0); 630} 631 632static void 633vtpci_reinit_complete(device_t dev) 634{ 635 636 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 637} 638 639static void 640vtpci_notify_virtqueue(device_t dev, uint16_t queue) 641{ 642 struct vtpci_softc *sc; 643 644 sc = device_get_softc(dev); 645 646 vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue); 647} 648 649static uint8_t 650vtpci_get_status(device_t dev) 651{ 652 struct vtpci_softc *sc; 653 654 sc = device_get_softc(dev); 655 656 return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS)); 657} 658 659static void 660vtpci_set_status(device_t dev, uint8_t status) 661{ 662 struct vtpci_softc *sc; 663 664 sc = device_get_softc(dev); 665 666 if (status != VIRTIO_CONFIG_STATUS_RESET) 667 status |= vtpci_get_status(dev); 668 669 vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status); 670} 671 672static void 673vtpci_read_dev_config(device_t dev, bus_size_t offset, 674 void *dst, int length) 675{ 676 struct vtpci_softc *sc; 677 bus_size_t off; 678 uint8_t *d; 679 int size; 680 681 sc = device_get_softc(dev); 682 off = VIRTIO_PCI_CONFIG(sc) + offset; 683 684 for (d = dst; length > 0; d += size, off += size, length -= size) { 685 if (length >= 4) { 686 size = 4; 687 *(uint32_t *)d = vtpci_read_config_4(sc, off); 688 } else if (length >= 2) { 689 size = 2; 690 *(uint16_t *)d = vtpci_read_config_2(sc, off); 691 } else { 692 size = 1; 693 *d = vtpci_read_config_1(sc, off); 694 } 695 } 696} 697 698static void 699vtpci_write_dev_config(device_t dev, bus_size_t offset, 700 void *src, int length) 701{ 702 struct vtpci_softc *sc; 703 bus_size_t off; 704 uint8_t *s; 705 int size; 706 707 sc = device_get_softc(dev); 708 off = VIRTIO_PCI_CONFIG(sc) + offset; 709 710 for (s = src; length > 0; s += size, off += size, length -= size) { 711 if (length >= 4) { 712 size = 4; 713 vtpci_write_config_4(sc, off, *(uint32_t *)s); 714 } else if (length >= 2) { 715 size = 2; 716 vtpci_write_config_2(sc, off, *(uint16_t *)s); 717 } else { 718 size = 1; 719 vtpci_write_config_1(sc, off, *s); 720 } 721 } 722} 723 724static void 725vtpci_describe_features(struct vtpci_softc *sc, const char *msg, 726 uint64_t features) 727{ 728 device_t dev, child; 729 730 dev = sc->vtpci_dev; 731 child = sc->vtpci_child_dev; 732 733 if (device_is_attached(child) && bootverbose == 0) 734 return; 735 736 virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc); 737} 738 739static void 740vtpci_probe_and_attach_child(struct vtpci_softc *sc) 741{ 742 device_t dev, child; 743 744 dev = sc->vtpci_dev; 745 child = sc->vtpci_child_dev; 746 747 if (child == NULL) 748 return; 749 750 if (device_get_state(child) != DS_NOTPRESENT) 751 return; 752 753 if (device_probe(child) != 0) 754 return; 755 756 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 757 if (device_attach(child) != 0) { 758 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 759 vtpci_reset(sc); 760 vtpci_release_child_resources(sc); 761 /* Reset status for future attempt. */ 762 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 763 } else { 764 vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 765 VIRTIO_ATTACH_COMPLETED(child); 766 } 767} 768 769static int 770vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors) 771{ 772 device_t dev; 773 int nmsix, cnt, required; 774 775 dev = sc->vtpci_dev; 776 777 /* Allocate an additional vector for the config changes. */ 778 required = nvectors + 1; 779 780 nmsix = pci_msix_count(dev); 781 if (nmsix < required) 782 return (1); 783 784 cnt = required; 785 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { 786 sc->vtpci_nmsix_resources = required; 787 return (0); 788 } 789 790 pci_release_msi(dev); 791 792 return (1); 793} 794 795static int 796vtpci_alloc_msi(struct vtpci_softc *sc) 797{ 798 device_t dev; 799 int nmsi, cnt, required; 800 801 dev = sc->vtpci_dev; 802 required = 1; 803 804 nmsi = pci_msi_count(dev); 805 if (nmsi < required) 806 return (1); 807 808 cnt = required; 809 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) 810 return (0); 811 812 pci_release_msi(dev); 813 814 return (1); 815} 816 817static int 818vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc) 819{ 820 int i, nvectors, error; 821 822 if (vtpci_disable_msix != 0 || 823 sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) 824 return (ENOTSUP); 825 826 for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) { 827 if (sc->vtpci_vqs[i].vtv_no_intr == 0) 828 nvectors++; 829 } 830 831 error = vtpci_alloc_msix(sc, nvectors); 832 if (error) 833 return (error); 834 835 sc->vtpci_flags |= VTPCI_FLAG_MSIX; 836 837 return (0); 838} 839 840static int 841vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc) 842{ 843 int error; 844 845 if (vtpci_disable_msix != 0 || 846 sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) 847 return (ENOTSUP); 848 849 error = vtpci_alloc_msix(sc, 1); 850 if (error) 851 return (error); 852 853 sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX; 854 855 return (0); 856} 857 858static int 859vtpci_alloc_intr_msi(struct vtpci_softc *sc) 860{ 861 int error; 862 863 /* Only BHyVe supports MSI. */ 864 if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI) 865 return (ENOTSUP); 866 867 error = vtpci_alloc_msi(sc); 868 if (error) 869 return (error); 870 871 sc->vtpci_flags |= VTPCI_FLAG_MSI; 872 873 return (0); 874} 875 876static int 877vtpci_alloc_intr_legacy(struct vtpci_softc *sc) 878{ 879 880 sc->vtpci_flags |= VTPCI_FLAG_LEGACY; 881 882 return (0); 883} 884 885static int 886vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags, 887 struct vtpci_interrupt *intr) 888{ 889 struct resource *irq; 890 891 irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags); 892 if (irq == NULL) 893 return (ENXIO); 894 895 intr->vti_irq = irq; 896 intr->vti_rid = rid; 897 898 return (0); 899} 900 901static int 902vtpci_alloc_intr_resources(struct vtpci_softc *sc) 903{ 904 struct vtpci_interrupt *intr; 905 int i, rid, flags, nvq_intrs, error; 906 907 rid = 0; 908 flags = RF_ACTIVE; 909 910 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 911 flags |= RF_SHAREABLE; 912 else 913 rid = 1; 914 915 /* 916 * For legacy and MSI interrupts, this single resource handles all 917 * interrupts. For MSIX, this resource is used for the configuration 918 * changed interrupt. 919 */ 920 intr = &sc->vtpci_device_interrupt; 921 error = vtpci_alloc_interrupt(sc, rid, flags, intr); 922 if (error || sc->vtpci_flags & (VTPCI_FLAG_LEGACY | VTPCI_FLAG_MSI)) 923 return (error); 924 925 /* Subtract one for the configuration changed interrupt. */ 926 nvq_intrs = sc->vtpci_nmsix_resources - 1; 927 928 intr = sc->vtpci_msix_vq_interrupts = malloc(nvq_intrs * 929 sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO); 930 if (sc->vtpci_msix_vq_interrupts == NULL) 931 return (ENOMEM); 932 933 for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) { 934 error = vtpci_alloc_interrupt(sc, rid, flags, intr); 935 if (error) 936 return (error); 937 } 938 939 return (0); 940} 941 942static int 943vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type) 944{ 945 struct vtpci_interrupt *intr; 946 int error; 947 948 intr = &sc->vtpci_device_interrupt; 949 error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, NULL, 950 vtpci_legacy_intr, sc, &intr->vti_handler); 951 952 return (error); 953} 954 955static int 956vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *sc, enum intr_type type) 957{ 958 struct vtpci_virtqueue *vqx; 959 struct vtpci_interrupt *intr; 960 int i, error; 961 962 intr = sc->vtpci_msix_vq_interrupts; 963 964 for (i = 0; i < sc->vtpci_nvqs; i++) { 965 vqx = &sc->vtpci_vqs[i]; 966 967 if (vqx->vtv_no_intr) 968 continue; 969 970 error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, 971 vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq, 972 &intr->vti_handler); 973 if (error) 974 return (error); 975 976 intr++; 977 } 978 979 return (0); 980} 981 982static int 983vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type) 984{ 985 device_t dev; 986 struct vtpci_interrupt *intr; 987 int error; 988 989 dev = sc->vtpci_dev; 990 intr = &sc->vtpci_device_interrupt; 991 992 error = bus_setup_intr(dev, intr->vti_irq, type, NULL, 993 vtpci_config_intr, sc, &intr->vti_handler); 994 if (error) 995 return (error); 996 997 if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) { 998 intr = sc->vtpci_msix_vq_interrupts; 999 error = bus_setup_intr(dev, intr->vti_irq, type, 1000 vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc, 1001 &intr->vti_handler); 1002 } else 1003 error = vtpci_setup_pervq_msix_interrupts(sc, type); 1004 1005 return (error ? error : vtpci_set_host_msix_vectors(sc)); 1006} 1007 1008static int 1009vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type) 1010{ 1011 int error; 1012 1013 type |= INTR_MPSAFE; 1014 KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK, 1015 ("%s: no interrupt type selected %#x", __func__, sc->vtpci_flags)); 1016 1017 error = vtpci_alloc_intr_resources(sc); 1018 if (error) 1019 return (error); 1020 1021 if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) 1022 error = vtpci_setup_legacy_interrupt(sc, type); 1023 else if (sc->vtpci_flags & VTPCI_FLAG_MSI) 1024 error = vtpci_setup_msi_interrupt(sc, type); 1025 else 1026 error = vtpci_setup_msix_interrupts(sc, type); 1027 1028 return (error); 1029} 1030 1031static int 1032vtpci_register_msix_vector(struct vtpci_softc *sc, int offset, 1033 struct vtpci_interrupt *intr) 1034{ 1035 device_t dev; 1036 uint16_t vector; 1037 1038 dev = sc->vtpci_dev; 1039 1040 if (intr != NULL) { 1041 /* Map from guest rid to host vector. */ 1042 vector = intr->vti_rid - 1; 1043 } else 1044 vector = VIRTIO_MSI_NO_VECTOR; 1045 1046 vtpci_write_config_2(sc, offset, vector); 1047 1048 /* Read vector to determine if the host had sufficient resources. */ 1049 if (vtpci_read_config_2(sc, offset) != vector) { 1050 device_printf(dev, 1051 "insufficient host resources for MSIX interrupts\n"); 1052 return (ENODEV); 1053 } 1054 1055 return (0); 1056} 1057 1058static int 1059vtpci_set_host_msix_vectors(struct vtpci_softc *sc) 1060{ 1061 struct vtpci_interrupt *intr, *tintr; 1062 int idx, offset, error; 1063 1064 intr = &sc->vtpci_device_interrupt; 1065 offset = VIRTIO_MSI_CONFIG_VECTOR; 1066 1067 error = vtpci_register_msix_vector(sc, offset, intr); 1068 if (error) 1069 return (error); 1070 1071 intr = sc->vtpci_msix_vq_interrupts; 1072 offset = VIRTIO_MSI_QUEUE_VECTOR; 1073 1074 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1075 vtpci_select_virtqueue(sc, idx); 1076 1077 if (sc->vtpci_vqs[idx].vtv_no_intr) 1078 tintr = NULL; 1079 else 1080 tintr = intr; 1081 1082 error = vtpci_register_msix_vector(sc, offset, tintr); 1083 if (error) 1084 break; 1085 1086 /* 1087 * For shared MSIX, all the virtqueues share the first 1088 * interrupt. 1089 */ 1090 if ((sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0) 1091 intr++; 1092 } 1093 1094 return (error); 1095} 1096 1097static int 1098vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx) 1099{ 1100 struct vtpci_virtqueue *vqx; 1101 struct virtqueue *vq; 1102 int error; 1103 uint16_t size; 1104 1105 vqx = &sc->vtpci_vqs[idx]; 1106 vq = vqx->vtv_vq; 1107 1108 KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); 1109 1110 vtpci_select_virtqueue(sc, idx); 1111 size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); 1112 1113 error = virtqueue_reinit(vq, size); 1114 if (error) 1115 return (error); 1116 1117 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 1118 virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); 1119 1120 return (0); 1121} 1122 1123static void 1124vtpci_free_interrupt(struct vtpci_softc *sc, struct vtpci_interrupt *intr) 1125{ 1126 device_t dev; 1127 1128 dev = sc->vtpci_dev; 1129 1130 if (intr->vti_handler != NULL) { 1131 bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler); 1132 intr->vti_handler = NULL; 1133 } 1134 1135 if (intr->vti_irq != NULL) { 1136 bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid, 1137 intr->vti_irq); 1138 intr->vti_irq = NULL; 1139 intr->vti_rid = -1; 1140 } 1141} 1142 1143static void 1144vtpci_free_interrupts(struct vtpci_softc *sc) 1145{ 1146 struct vtpci_interrupt *intr; 1147 int i, nvq_intrs; 1148 1149 vtpci_free_interrupt(sc, &sc->vtpci_device_interrupt); 1150 1151 if (sc->vtpci_nmsix_resources != 0) { 1152 nvq_intrs = sc->vtpci_nmsix_resources - 1; 1153 sc->vtpci_nmsix_resources = 0; 1154 1155 intr = sc->vtpci_msix_vq_interrupts; 1156 if (intr != NULL) { 1157 for (i = 0; i < nvq_intrs; i++, intr++) 1158 vtpci_free_interrupt(sc, intr); 1159 1160 free(sc->vtpci_msix_vq_interrupts, M_DEVBUF); 1161 sc->vtpci_msix_vq_interrupts = NULL; 1162 } 1163 } 1164 1165 if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX)) 1166 pci_release_msi(sc->vtpci_dev); 1167 1168 sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK; 1169} 1170 1171static void 1172vtpci_free_virtqueues(struct vtpci_softc *sc) 1173{ 1174 struct vtpci_virtqueue *vqx; 1175 int idx; 1176 1177 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1178 vqx = &sc->vtpci_vqs[idx]; 1179 1180 vtpci_select_virtqueue(sc, idx); 1181 vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0); 1182 1183 virtqueue_free(vqx->vtv_vq); 1184 vqx->vtv_vq = NULL; 1185 } 1186 1187 free(sc->vtpci_vqs, M_DEVBUF); 1188 sc->vtpci_vqs = NULL; 1189 sc->vtpci_nvqs = 0; 1190} 1191 1192static void 1193vtpci_release_child_resources(struct vtpci_softc *sc) 1194{ 1195 1196 vtpci_free_interrupts(sc); 1197 vtpci_free_virtqueues(sc); 1198} 1199 1200static void 1201vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc) 1202{ 1203 int idx; 1204 1205 if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { 1206 vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR, 1207 VIRTIO_MSI_NO_VECTOR); 1208 1209 for (idx = 0; idx < sc->vtpci_nvqs; idx++) { 1210 vtpci_select_virtqueue(sc, idx); 1211 vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR, 1212 VIRTIO_MSI_NO_VECTOR); 1213 } 1214 } 1215 1216 vtpci_free_interrupts(sc); 1217} 1218 1219static void 1220vtpci_reset(struct vtpci_softc *sc) 1221{ 1222 1223 /* 1224 * Setting the status to RESET sets the host device to 1225 * the original, uninitialized state. 1226 */ 1227 vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET); 1228} 1229 1230static void 1231vtpci_select_virtqueue(struct vtpci_softc *sc, int idx) 1232{ 1233 1234 vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx); 1235} 1236 1237static void 1238vtpci_legacy_intr(void *xsc) 1239{ 1240 struct vtpci_softc *sc; 1241 struct vtpci_virtqueue *vqx; 1242 int i; 1243 uint8_t isr; 1244 1245 sc = xsc; 1246 vqx = &sc->vtpci_vqs[0]; 1247 1248 /* Reading the ISR also clears it. */ 1249 isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR); 1250 1251 if (isr & VIRTIO_PCI_ISR_CONFIG) 1252 vtpci_config_intr(sc); 1253 1254 if (isr & VIRTIO_PCI_ISR_INTR) { 1255 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { 1256 if (vqx->vtv_no_intr == 0) 1257 virtqueue_intr(vqx->vtv_vq); 1258 } 1259 } 1260} 1261 1262static int 1263vtpci_vq_shared_intr_filter(void *xsc) 1264{ 1265 struct vtpci_softc *sc; 1266 struct vtpci_virtqueue *vqx; 1267 int i, rc; 1268 1269 rc = 0; 1270 sc = xsc; 1271 vqx = &sc->vtpci_vqs[0]; 1272 1273 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { 1274 if (vqx->vtv_no_intr == 0) 1275 rc |= virtqueue_intr_filter(vqx->vtv_vq); 1276 } 1277 1278 return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1279} 1280 1281static void 1282vtpci_vq_shared_intr(void *xsc) 1283{ 1284 struct vtpci_softc *sc; 1285 struct vtpci_virtqueue *vqx; 1286 int i; 1287 1288 sc = xsc; 1289 vqx = &sc->vtpci_vqs[0]; 1290 1291 for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { 1292 if (vqx->vtv_no_intr == 0) 1293 virtqueue_intr(vqx->vtv_vq); 1294 } 1295} 1296 1297static int 1298vtpci_vq_intr_filter(void *xvq) 1299{ 1300 struct virtqueue *vq; 1301 int rc; 1302 1303 vq = xvq; 1304 rc = virtqueue_intr_filter(vq); 1305 1306 return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1307} 1308 1309static void 1310vtpci_vq_intr(void *xvq) 1311{ 1312 struct virtqueue *vq; 1313 1314 vq = xvq; 1315 virtqueue_intr(vq); 1316} 1317 1318static void 1319vtpci_config_intr(void *xsc) 1320{ 1321 struct vtpci_softc *sc; 1322 device_t child; 1323 1324 sc = xsc; 1325 child = sc->vtpci_child_dev; 1326 1327 if (child != NULL) 1328 VIRTIO_CONFIG_CHANGE(child); 1329}
|