virtio.c revision 1.19
1/* $NetBSD: virtio.c,v 1.19 2016/11/29 22:04:42 uwe Exp $ */ 2 3/* 4 * Copyright (c) 2010 Minoura Makoto. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.19 2016/11/29 22:04:42 uwe Exp $"); 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/kernel.h> 34#include <sys/atomic.h> 35#include <sys/bus.h> 36#include <sys/device.h> 37#include <sys/kmem.h> 38#include <sys/module.h> 39 40#include <dev/pci/pcidevs.h> 41#include <dev/pci/pcireg.h> 42#include <dev/pci/pcivar.h> 43 44#include <dev/pci/virtioreg.h> 45#include <dev/pci/virtiovar.h> 46 47#define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ 48 49static int virtio_match(device_t, cfdata_t, void *); 50static void virtio_attach(device_t, device_t, void *); 51static int virtio_rescan(device_t, const char *, const int *); 52static int virtio_detach(device_t, int); 53static int virtio_intr(void *arg); 54static int virtio_msix_queue_intr(void *); 55static int virtio_msix_config_intr(void *); 56static int virtio_setup_msix_vectors(struct virtio_softc *); 57static int virtio_setup_msix_interrupts(struct virtio_softc *, 58 struct pci_attach_args *); 59static int virtio_setup_intx_interrupt(struct virtio_softc *, 60 struct pci_attach_args *); 61static int virtio_setup_interrupts(struct virtio_softc *); 62static void virtio_soft_intr(void *arg); 63static void virtio_init_vq(struct virtio_softc *, 64 struct virtqueue *, const bool); 65 66CFATTACH_DECL3_NEW(virtio, sizeof(struct virtio_softc), 67 virtio_match, virtio_attach, virtio_detach, NULL, virtio_rescan, NULL, 68 DVF_DETACH_SHUTDOWN); 69 70static void 71virtio_set_status(struct virtio_softc *sc, int status) 72{ 73 int old = 0; 74 75 if (status != 0) 76 old = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 77 VIRTIO_CONFIG_DEVICE_STATUS); 78 bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS, 79 status|old); 80} 81 82#define virtio_device_reset(sc) virtio_set_status((sc), 0) 83 84static int 85virtio_match(device_t parent, cfdata_t match, void *aux) 86{ 87 struct pci_attach_args *pa; 88 89 pa = (struct pci_attach_args *)aux; 90 switch (PCI_VENDOR(pa->pa_id)) { 91 case PCI_VENDOR_QUMRANET: 92 if ((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <= 93 PCI_PRODUCT(pa->pa_id)) && 94 (PCI_PRODUCT(pa->pa_id) <= 95 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) 96 return 1; 97 break; 98 } 99 100 return 0; 101} 102 103static const char *virtio_device_name[] = { 104 "Unknown (0)", /* 0 */ 105 "Network", /* 1 */ 106 "Block", /* 2 */ 107 "Console", /* 3 */ 108 "Entropy", /* 4 */ 109 "Memory Balloon", /* 5 */ 110 "I/O Memory", /* 6 */ 111 "Remote Processor Messaging", /* 7 */ 112 "SCSI", /* 8 */ 113 "9P Transport", /* 9 */ 114 "mac80211 wlan", /* 10 */ 115}; 116#define NDEVNAMES __arraycount(virtio_device_name) 117 118#define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0 119#define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1 120 121static int 122virtio_setup_msix_vectors(struct virtio_softc *sc) 123{ 124 int offset, vector, ret, qid; 125 126 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR; 127 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 128 129 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector); 130 ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 131 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n", 132 vector, ret); 133 if (ret != vector) 134 return -1; 135 136 for (qid = 0; qid < sc->sc_nvqs; qid++) { 137 offset = VIRTIO_CONFIG_QUEUE_SELECT; 138 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, qid); 139 140 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR; 141 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 142 143 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector); 144 ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 145 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n", 146 vector, ret); 147 if (ret != vector) 148 return -1; 149 } 150 151 return 0; 152} 153 154static int 155virtio_setup_msix_interrupts(struct virtio_softc *sc, 156 struct pci_attach_args *pa) 157{ 158 device_t self = sc->sc_dev; 159 pci_chipset_tag_t pc = pa->pa_pc; 160 char intrbuf[PCI_INTRSTR_LEN]; 161 char const *intrstr; 162 int idx; 163 164 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 165 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) 166 pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 167 168 sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx], IPL_NET, 169 virtio_msix_config_intr, sc, device_xname(sc->sc_dev)); 170 if (sc->sc_ihs[idx] == NULL) { 171 aprint_error_dev(self, "couldn't establish MSI-X for config\n"); 172 goto error; 173 } 174 175 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 176 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) 177 pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 178 179 sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx], IPL_NET, 180 virtio_msix_queue_intr, sc, device_xname(sc->sc_dev)); 181 if (sc->sc_ihs[idx] == NULL) { 182 aprint_error_dev(self, "couldn't establish MSI-X for queues\n"); 183 goto error; 184 } 185 186 if (virtio_setup_msix_vectors(sc) != 0) { 187 aprint_error_dev(self, "couldn't setup MSI-X vectors\n"); 188 goto error; 189 } 190 191 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 192 intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 193 aprint_normal_dev(self, "config interrupting at %s\n", intrstr); 194 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 195 intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); 196 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr); 197 198 return 0; 199 200error: 201 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 202 if (sc->sc_ihs[idx] != NULL) 203 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]); 204 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 205 if (sc->sc_ihs[idx] != NULL) 206 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]); 207 208 return -1; 209} 210 211static int 212virtio_setup_intx_interrupt(struct virtio_softc *sc, 213 struct pci_attach_args *pa) 214{ 215 device_t self = sc->sc_dev; 216 pci_chipset_tag_t pc = pa->pa_pc; 217 char intrbuf[PCI_INTRSTR_LEN]; 218 char const *intrstr; 219 220 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) 221 pci_intr_setattr(pc, &sc->sc_ihp[0], PCI_INTR_MPSAFE, true); 222 223 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_ihp[0], 224 IPL_NET, virtio_intr, sc, device_xname(sc->sc_dev)); 225 if (sc->sc_ihs[0] == NULL) { 226 aprint_error_dev(self, "couldn't establish INTx\n"); 227 return -1; 228 } 229 230 intrstr = pci_intr_string(pc, sc->sc_ihp[0], intrbuf, sizeof(intrbuf)); 231 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 232 233 return 0; 234} 235 236static int 237virtio_setup_interrupts(struct virtio_softc *sc) 238{ 239 device_t self = sc->sc_dev; 240 pci_chipset_tag_t pc = sc->sc_pa.pa_pc; 241 int error; 242 int nmsix; 243 int counts[PCI_INTR_TYPE_SIZE]; 244 pci_intr_type_t max_type; 245 246 nmsix = pci_msix_count(sc->sc_pa.pa_pc, sc->sc_pa.pa_tag); 247 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix); 248 249 /* We need at least two: one for config and the other for queues */ 250 if ((sc->sc_flags & VIRTIO_F_PCI_INTR_MSIX) == 0 || nmsix < 2) { 251 /* Try INTx only */ 252 max_type = PCI_INTR_TYPE_INTX; 253 counts[PCI_INTR_TYPE_INTX] = 1; 254 } else { 255 /* Try MSI-X first and INTx second */ 256 max_type = PCI_INTR_TYPE_MSIX; 257 counts[PCI_INTR_TYPE_MSIX] = 2; 258 counts[PCI_INTR_TYPE_MSI] = 0; 259 counts[PCI_INTR_TYPE_INTX] = 1; 260 } 261 262 retry: 263 error = pci_intr_alloc(&sc->sc_pa, &sc->sc_ihp, counts, max_type); 264 if (error != 0) { 265 aprint_error_dev(self, "couldn't map interrupt\n"); 266 return -1; 267 } 268 269 if (pci_intr_type(pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) { 270 sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 2, 271 KM_SLEEP); 272 if (sc->sc_ihs == NULL) { 273 pci_intr_release(pc, sc->sc_ihp, 2); 274 275 /* Retry INTx */ 276 max_type = PCI_INTR_TYPE_INTX; 277 counts[PCI_INTR_TYPE_INTX] = 1; 278 goto retry; 279 } 280 281 error = virtio_setup_msix_interrupts(sc, &sc->sc_pa); 282 if (error != 0) { 283 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 2); 284 pci_intr_release(pc, sc->sc_ihp, 2); 285 286 /* Retry INTx */ 287 max_type = PCI_INTR_TYPE_INTX; 288 counts[PCI_INTR_TYPE_INTX] = 1; 289 goto retry; 290 } 291 292 sc->sc_ihs_num = 2; 293 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 294 } else if (pci_intr_type(pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) { 295 sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 1, 296 KM_SLEEP); 297 if (sc->sc_ihs == NULL) { 298 pci_intr_release(pc, sc->sc_ihp, 1); 299 return -1; 300 } 301 302 error = virtio_setup_intx_interrupt(sc, &sc->sc_pa); 303 if (error != 0) { 304 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 1); 305 pci_intr_release(pc, sc->sc_ihp, 1); 306 return -1; 307 } 308 309 sc->sc_ihs_num = 1; 310 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 311 } 312 313 return 0; 314} 315 316static void 317virtio_attach(device_t parent, device_t self, void *aux) 318{ 319 struct virtio_softc *sc = device_private(self); 320 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 321 pci_chipset_tag_t pc = pa->pa_pc; 322 pcitag_t tag = pa->pa_tag; 323 int revision; 324 pcireg_t id; 325 326 revision = PCI_REVISION(pa->pa_class); 327 if (revision != 0) { 328 aprint_normal(": unknown revision 0x%02x; giving up\n", 329 revision); 330 return; 331 } 332 aprint_normal("\n"); 333 aprint_naive("\n"); 334 335 /* subsystem ID shows what I am */ 336 id = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG); 337 aprint_normal_dev(self, "Virtio %s Device (rev. 0x%02x)\n", 338 (PCI_SUBSYS_ID(id) < NDEVNAMES? 339 virtio_device_name[PCI_SUBSYS_ID(id)] : "Unknown"), 340 revision); 341 342 sc->sc_dev = self; 343 sc->sc_pc = pc; 344 sc->sc_tag = tag; 345 sc->sc_iot = pa->pa_iot; 346 if (pci_dma64_available(pa)) 347 sc->sc_dmat = pa->pa_dmat64; 348 else 349 sc->sc_dmat = pa->pa_dmat; 350 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 351 352 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 353 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize)) { 354 aprint_error_dev(self, "can't map i/o space\n"); 355 return; 356 } 357 358 virtio_device_reset(sc); 359 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 360 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 361 362 /* XXX: use softc as aux... */ 363 sc->sc_childdevid = PCI_SUBSYS_ID(id); 364 sc->sc_child = NULL; 365 sc->sc_pa = *pa; 366 virtio_rescan(self, "virtio", 0); 367 return; 368} 369 370/* ARGSUSED */ 371static int 372virtio_rescan(device_t self, const char *attr, const int *scan_flags) 373{ 374 struct virtio_softc *sc; 375 int r; 376 377 sc = device_private(self); 378 if (sc->sc_child) /* Child already attached? */ 379 return 0; 380 config_found_ia(self, attr, sc, NULL); 381 if (sc->sc_child == NULL) { 382 aprint_error_dev(self, 383 "no matching child driver; not configured\n"); 384 return 0; 385 } 386 if (sc->sc_child == (void*)1) { /* this shows error */ 387 aprint_error_dev(self, 388 "virtio configuration failed\n"); 389 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 390 return 0; 391 } 392 393 r = virtio_setup_interrupts(sc); 394 if (r != 0) { 395 aprint_error_dev(self, "failed to setup interrupts\n"); 396 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 397 return 0; 398 } 399 400 sc->sc_soft_ih = NULL; 401 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) { 402 u_int flags = SOFTINT_NET; 403 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) 404 flags |= SOFTINT_MPSAFE; 405 406 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc); 407 if (sc->sc_soft_ih == NULL) 408 aprint_error(": failed to establish soft interrupt\n"); 409 } 410 411 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 412 413 return 0; 414} 415 416static int 417virtio_detach(device_t self, int flags) 418{ 419 struct virtio_softc *sc = device_private(self); 420 int r; 421 int i; 422 423 if (sc->sc_child != 0 && sc->sc_child != (void*)1) { 424 r = config_detach(sc->sc_child, flags); 425 if (r) 426 return r; 427 } 428 KASSERT(sc->sc_child == 0 || sc->sc_child == (void*)1); 429 KASSERT(sc->sc_vqs == 0); 430 for (i = 0; i < sc->sc_ihs_num; i++) { 431 if (sc->sc_ihs[i] == NULL) 432 continue; 433 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]); 434 } 435 pci_intr_release(sc->sc_pc, sc->sc_ihp, sc->sc_ihs_num); 436 if (sc->sc_ihs != NULL) 437 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * sc->sc_ihs_num); 438 sc->sc_ihs_num = 0; 439 if (sc->sc_iosize) 440 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize); 441 sc->sc_iosize = 0; 442 443 return 0; 444} 445 446/* 447 * Reset the device. 448 */ 449/* 450 * To reset the device to a known state, do following: 451 * virtio_reset(sc); // this will stop the device activity 452 * <dequeue finished requests>; // virtio_dequeue() still can be called 453 * <revoke pending requests in the vqs if any>; 454 * virtio_reinit_begin(sc); // dequeue prohibitted 455 * newfeatures = virtio_negotiate_features(sc, requestedfeatures); 456 * <some other initialization>; 457 * virtio_reinit_end(sc); // device activated; enqueue allowed 458 * Once attached, feature negotiation can only be allowed after virtio_reset. 459 */ 460void 461virtio_reset(struct virtio_softc *sc) 462{ 463 virtio_device_reset(sc); 464} 465 466void 467virtio_reinit_start(struct virtio_softc *sc) 468{ 469 int i; 470 471 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 472 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 473 for (i = 0; i < sc->sc_nvqs; i++) { 474 int n; 475 struct virtqueue *vq = &sc->sc_vqs[i]; 476 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 477 VIRTIO_CONFIG_QUEUE_SELECT, 478 vq->vq_index); 479 n = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 480 VIRTIO_CONFIG_QUEUE_SIZE); 481 if (n == 0) /* vq disappeared */ 482 continue; 483 if (n != vq->vq_num) { 484 panic("%s: virtqueue size changed, vq index %d\n", 485 device_xname(sc->sc_dev), 486 vq->vq_index); 487 } 488 virtio_init_vq(sc, vq, true); 489 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 490 VIRTIO_CONFIG_QUEUE_ADDRESS, 491 (vq->vq_dmamap->dm_segs[0].ds_addr 492 / VIRTIO_PAGE_SIZE)); 493 } 494 495 /* MSI-X should have more than one handles where INTx has just one */ 496 if (sc->sc_ihs_num > 1) { 497 if (virtio_setup_msix_vectors(sc) != 0) { 498 aprint_error_dev(sc->sc_dev, 499 "couldn't setup MSI-X vectors\n"); 500 return; 501 } 502 } 503} 504 505void 506virtio_reinit_end(struct virtio_softc *sc) 507{ 508 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 509} 510 511/* 512 * Feature negotiation. 513 */ 514uint32_t 515virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features) 516{ 517 uint32_t r; 518 519 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && 520 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ 521 guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 522 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 523 VIRTIO_CONFIG_DEVICE_FEATURES); 524 r &= guest_features; 525 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 526 VIRTIO_CONFIG_GUEST_FEATURES, r); 527 sc->sc_features = r; 528 if (r & VIRTIO_F_RING_INDIRECT_DESC) 529 sc->sc_indirect = true; 530 else 531 sc->sc_indirect = false; 532 533 return r; 534} 535 536/* 537 * Device configuration registers. 538 */ 539uint8_t 540virtio_read_device_config_1(struct virtio_softc *sc, int index) 541{ 542 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 543 sc->sc_config_offset + index); 544} 545 546uint16_t 547virtio_read_device_config_2(struct virtio_softc *sc, int index) 548{ 549 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, 550 sc->sc_config_offset + index); 551} 552 553uint32_t 554virtio_read_device_config_4(struct virtio_softc *sc, int index) 555{ 556 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, 557 sc->sc_config_offset + index); 558} 559 560uint64_t 561virtio_read_device_config_8(struct virtio_softc *sc, int index) 562{ 563 uint64_t r; 564 565 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 566 sc->sc_config_offset + index + sizeof(uint32_t)); 567 r <<= 32; 568 r += bus_space_read_4(sc->sc_iot, sc->sc_ioh, 569 sc->sc_config_offset + index); 570 return r; 571} 572 573void 574virtio_write_device_config_1(struct virtio_softc *sc, 575 int index, uint8_t value) 576{ 577 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 578 sc->sc_config_offset + index, value); 579} 580 581void 582virtio_write_device_config_2(struct virtio_softc *sc, 583 int index, uint16_t value) 584{ 585 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 586 sc->sc_config_offset + index, value); 587} 588 589void 590virtio_write_device_config_4(struct virtio_softc *sc, 591 int index, uint32_t value) 592{ 593 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 594 sc->sc_config_offset + index, value); 595} 596 597void 598virtio_write_device_config_8(struct virtio_softc *sc, 599 int index, uint64_t value) 600{ 601 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 602 sc->sc_config_offset + index, 603 value & 0xffffffff); 604 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 605 sc->sc_config_offset + index + sizeof(uint32_t), 606 value >> 32); 607} 608 609/* 610 * Interrupt handler. 611 */ 612static int 613virtio_intr(void *arg) 614{ 615 struct virtio_softc *sc = arg; 616 int isr, r = 0; 617 618 /* check and ack the interrupt */ 619 isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh, 620 VIRTIO_CONFIG_ISR_STATUS); 621 if (isr == 0) 622 return 0; 623 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 624 (sc->sc_config_change != NULL)) 625 r = (sc->sc_config_change)(sc); 626 if (sc->sc_intrhand != NULL) { 627 if (sc->sc_soft_ih != NULL) 628 softint_schedule(sc->sc_soft_ih); 629 else 630 r |= (sc->sc_intrhand)(sc); 631 } 632 633 return r; 634} 635 636static int 637virtio_msix_queue_intr(void *arg) 638{ 639 struct virtio_softc *sc = arg; 640 int r = 0; 641 642 if (sc->sc_intrhand != NULL) { 643 if (sc->sc_soft_ih != NULL) 644 softint_schedule(sc->sc_soft_ih); 645 else 646 r |= (sc->sc_intrhand)(sc); 647 } 648 649 return r; 650} 651 652static int 653virtio_msix_config_intr(void *arg) 654{ 655 struct virtio_softc *sc = arg; 656 657 /* TODO: handle events */ 658 aprint_debug_dev(sc->sc_dev, "%s\n", __func__); 659 return 1; 660} 661 662static void 663virtio_soft_intr(void *arg) 664{ 665 struct virtio_softc *sc = arg; 666 667 KASSERT(sc->sc_intrhand != NULL); 668 669 (sc->sc_intrhand)(sc); 670} 671 672/* 673 * dmamap sync operations for a virtqueue. 674 */ 675static inline void 676vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops) 677{ 678 /* availoffset == sizeof(vring_desc)*vq_num */ 679 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset, 680 ops); 681} 682 683static inline void 684vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 685{ 686 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 687 vq->vq_availoffset, 688 offsetof(struct vring_avail, ring) 689 + vq->vq_num * sizeof(uint16_t), 690 ops); 691} 692 693static inline void 694vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 695{ 696 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 697 vq->vq_usedoffset, 698 offsetof(struct vring_used, ring) 699 + vq->vq_num * sizeof(struct vring_used_elem), 700 ops); 701} 702 703static inline void 704vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot, 705 int ops) 706{ 707 int offset = vq->vq_indirectoffset 708 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot; 709 710 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 711 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs, 712 ops); 713} 714 715/* 716 * Can be used as sc_intrhand. 717 */ 718/* 719 * Scan vq, bus_dmamap_sync for the vqs (not for the payload), 720 * and calls (*vq_done)() if some entries are consumed. 721 */ 722int 723virtio_vq_intr(struct virtio_softc *sc) 724{ 725 struct virtqueue *vq; 726 int i, r = 0; 727 728 for (i = 0; i < sc->sc_nvqs; i++) { 729 vq = &sc->sc_vqs[i]; 730 if (vq->vq_queued) { 731 vq->vq_queued = 0; 732 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 733 } 734 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 735 membar_consumer(); 736 if (vq->vq_used_idx != vq->vq_used->idx) { 737 if (vq->vq_done) 738 r |= (vq->vq_done)(vq); 739 } 740 } 741 742 return r; 743} 744 745/* 746 * Start/stop vq interrupt. No guarantee. 747 */ 748void 749virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 750{ 751 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 752 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 753 vq->vq_queued++; 754} 755 756void 757virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 758{ 759 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 760 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 761 vq->vq_queued++; 762} 763 764/* 765 * Initialize vq structure. 766 */ 767static void 768virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, 769 const bool reinit) 770{ 771 int i, j; 772 int vq_size = vq->vq_num; 773 774 memset(vq->vq_vaddr, 0, vq->vq_bytesize); 775 776 /* build the indirect descriptor chain */ 777 if (vq->vq_indirect != NULL) { 778 struct vring_desc *vd; 779 780 for (i = 0; i < vq_size; i++) { 781 vd = vq->vq_indirect; 782 vd += vq->vq_maxnsegs * i; 783 for (j = 0; j < vq->vq_maxnsegs-1; j++) 784 vd[j].next = j + 1; 785 } 786 } 787 788 /* free slot management */ 789 SIMPLEQ_INIT(&vq->vq_freelist); 790 for (i = 0; i < vq_size; i++) { 791 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, 792 &vq->vq_entries[i], qe_list); 793 vq->vq_entries[i].qe_index = i; 794 } 795 if (!reinit) 796 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl); 797 798 /* enqueue/dequeue status */ 799 vq->vq_avail_idx = 0; 800 vq->vq_used_idx = 0; 801 vq->vq_queued = 0; 802 if (!reinit) { 803 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); 804 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); 805 } 806 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 807 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 808 vq->vq_queued++; 809} 810 811/* 812 * Allocate/free a vq. 813 */ 814int 815virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, 816 int maxsegsize, int maxnsegs, const char *name) 817{ 818 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; 819 int rsegs, r; 820#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ 821 ~(VIRTIO_PAGE_SIZE-1)) 822 823 memset(vq, 0, sizeof(*vq)); 824 825 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 826 VIRTIO_CONFIG_QUEUE_SELECT, index); 827 vq_size = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 828 VIRTIO_CONFIG_QUEUE_SIZE); 829 if (vq_size == 0) { 830 aprint_error_dev(sc->sc_dev, 831 "virtqueue not exist, index %d for %s\n", 832 index, name); 833 goto err; 834 } 835 /* allocsize1: descriptor table + avail ring + pad */ 836 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size 837 + sizeof(uint16_t)*(2+vq_size)); 838 /* allocsize2: used ring + pad */ 839 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2 840 + sizeof(struct vring_used_elem)*vq_size); 841 /* allocsize3: indirect table */ 842 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) 843 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; 844 else 845 allocsize3 = 0; 846 allocsize = allocsize1 + allocsize2 + allocsize3; 847 848 /* alloc and map the memory */ 849 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, 850 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 851 if (r != 0) { 852 aprint_error_dev(sc->sc_dev, 853 "virtqueue %d for %s allocation failed, " 854 "error code %d\n", index, name, r); 855 goto err; 856 } 857 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize, 858 &vq->vq_vaddr, BUS_DMA_NOWAIT); 859 if (r != 0) { 860 aprint_error_dev(sc->sc_dev, 861 "virtqueue %d for %s map failed, " 862 "error code %d\n", index, name, r); 863 goto err; 864 } 865 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, 866 BUS_DMA_NOWAIT, &vq->vq_dmamap); 867 if (r != 0) { 868 aprint_error_dev(sc->sc_dev, 869 "virtqueue %d for %s dmamap creation failed, " 870 "error code %d\n", index, name, r); 871 goto err; 872 } 873 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, 874 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT); 875 if (r != 0) { 876 aprint_error_dev(sc->sc_dev, 877 "virtqueue %d for %s dmamap load failed, " 878 "error code %d\n", index, name, r); 879 goto err; 880 } 881 882 /* set the vq address */ 883 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 884 VIRTIO_CONFIG_QUEUE_ADDRESS, 885 (vq->vq_dmamap->dm_segs[0].ds_addr 886 / VIRTIO_PAGE_SIZE)); 887 888 /* remember addresses and offsets for later use */ 889 vq->vq_owner = sc; 890 vq->vq_num = vq_size; 891 vq->vq_index = index; 892 vq->vq_desc = vq->vq_vaddr; 893 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; 894 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset); 895 vq->vq_usedoffset = allocsize1; 896 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset); 897 if (allocsize3 > 0) { 898 vq->vq_indirectoffset = allocsize1 + allocsize2; 899 vq->vq_indirect = (void*)(((char*)vq->vq_desc) 900 + vq->vq_indirectoffset); 901 } 902 vq->vq_bytesize = allocsize; 903 vq->vq_maxsegsize = maxsegsize; 904 vq->vq_maxnsegs = maxnsegs; 905 906 /* free slot management */ 907 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size, 908 KM_NOSLEEP); 909 if (vq->vq_entries == NULL) { 910 r = ENOMEM; 911 goto err; 912 } 913 914 virtio_init_vq(sc, vq, false); 915 916 aprint_verbose_dev(sc->sc_dev, 917 "allocated %u byte for virtqueue %d for %s, " 918 "size %d\n", allocsize, index, name, vq_size); 919 if (allocsize3 > 0) 920 aprint_verbose_dev(sc->sc_dev, 921 "using %d byte (%d entries) " 922 "indirect descriptors\n", 923 allocsize3, maxnsegs * vq_size); 924 return 0; 925 926err: 927 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 928 VIRTIO_CONFIG_QUEUE_ADDRESS, 0); 929 if (vq->vq_dmamap) 930 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 931 if (vq->vq_vaddr) 932 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize); 933 if (vq->vq_segs[0].ds_addr) 934 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 935 memset(vq, 0, sizeof(*vq)); 936 937 return -1; 938} 939 940int 941virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) 942{ 943 struct vq_entry *qe; 944 int i = 0; 945 946 /* device must be already deactivated */ 947 /* confirm the vq is empty */ 948 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) { 949 i++; 950 } 951 if (i != vq->vq_num) { 952 printf("%s: freeing non-empty vq, index %d\n", 953 device_xname(sc->sc_dev), vq->vq_index); 954 return EBUSY; 955 } 956 957 /* tell device that there's no virtqueue any longer */ 958 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 959 VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index); 960 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 961 VIRTIO_CONFIG_QUEUE_ADDRESS, 0); 962 963 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num); 964 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); 965 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 966 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); 967 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 968 mutex_destroy(&vq->vq_freelist_lock); 969 mutex_destroy(&vq->vq_uring_lock); 970 mutex_destroy(&vq->vq_aring_lock); 971 memset(vq, 0, sizeof(*vq)); 972 973 return 0; 974} 975 976/* 977 * Free descriptor management. 978 */ 979static struct vq_entry * 980vq_alloc_entry(struct virtqueue *vq) 981{ 982 struct vq_entry *qe; 983 984 mutex_enter(&vq->vq_freelist_lock); 985 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) { 986 mutex_exit(&vq->vq_freelist_lock); 987 return NULL; 988 } 989 qe = SIMPLEQ_FIRST(&vq->vq_freelist); 990 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list); 991 mutex_exit(&vq->vq_freelist_lock); 992 993 return qe; 994} 995 996static void 997vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) 998{ 999 mutex_enter(&vq->vq_freelist_lock); 1000 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list); 1001 mutex_exit(&vq->vq_freelist_lock); 1002 1003 return; 1004} 1005 1006/* 1007 * Enqueue several dmamaps as a single request. 1008 */ 1009/* 1010 * Typical usage: 1011 * <queue size> number of followings are stored in arrays 1012 * - command blocks (in dmamem) should be pre-allocated and mapped 1013 * - dmamaps for command blocks should be pre-allocated and loaded 1014 * - dmamaps for payload should be pre-allocated 1015 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot 1016 * if (r) // currently 0 or EAGAIN 1017 * return r; 1018 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..); 1019 * if (r) { 1020 * virtio_enqueue_abort(sc, vq, slot); 1021 * return r; 1022 * } 1023 * r = virtio_enqueue_reserve(sc, vq, slot, 1024 * dmamap_payload[slot]->dm_nsegs+1); 1025 * // ^ +1 for command 1026 * if (r) { // currently 0 or EAGAIN 1027 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 1028 * return r; // do not call abort() 1029 * } 1030 * <setup and prepare commands> 1031 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE); 1032 * bus_dmamap_sync(dmat, dmamap_payload[slot],...); 1033 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false); 1034 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite); 1035 * virtio_enqueue_commit(sc, vq, slot, true); 1036 */ 1037 1038/* 1039 * enqueue_prep: allocate a slot number 1040 */ 1041int 1042virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp) 1043{ 1044 struct vq_entry *qe1; 1045 1046 KASSERT(slotp != NULL); 1047 1048 qe1 = vq_alloc_entry(vq); 1049 if (qe1 == NULL) 1050 return EAGAIN; 1051 /* next slot is not allocated yet */ 1052 qe1->qe_next = -1; 1053 *slotp = qe1->qe_index; 1054 1055 return 0; 1056} 1057 1058/* 1059 * enqueue_reserve: allocate remaining slots and build the descriptor chain. 1060 */ 1061int 1062virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, 1063 int slot, int nsegs) 1064{ 1065 int indirect; 1066 struct vq_entry *qe1 = &vq->vq_entries[slot]; 1067 1068 KASSERT(qe1->qe_next == -1); 1069 KASSERT(1 <= nsegs && nsegs <= vq->vq_num); 1070 1071 if ((vq->vq_indirect != NULL) && 1072 (nsegs >= MINSEG_INDIRECT) && 1073 (nsegs <= vq->vq_maxnsegs)) 1074 indirect = 1; 1075 else 1076 indirect = 0; 1077 qe1->qe_indirect = indirect; 1078 1079 if (indirect) { 1080 struct vring_desc *vd; 1081 int i; 1082 1083 vd = &vq->vq_desc[qe1->qe_index]; 1084 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr 1085 + vq->vq_indirectoffset; 1086 vd->addr += sizeof(struct vring_desc) 1087 * vq->vq_maxnsegs * qe1->qe_index; 1088 vd->len = sizeof(struct vring_desc) * nsegs; 1089 vd->flags = VRING_DESC_F_INDIRECT; 1090 1091 vd = vq->vq_indirect; 1092 vd += vq->vq_maxnsegs * qe1->qe_index; 1093 qe1->qe_desc_base = vd; 1094 1095 for (i = 0; i < nsegs-1; i++) { 1096 vd[i].flags = VRING_DESC_F_NEXT; 1097 } 1098 vd[i].flags = 0; 1099 qe1->qe_next = 0; 1100 1101 return 0; 1102 } else { 1103 struct vring_desc *vd; 1104 struct vq_entry *qe; 1105 int i, s; 1106 1107 vd = &vq->vq_desc[0]; 1108 qe1->qe_desc_base = vd; 1109 qe1->qe_next = qe1->qe_index; 1110 s = slot; 1111 for (i = 0; i < nsegs - 1; i++) { 1112 qe = vq_alloc_entry(vq); 1113 if (qe == NULL) { 1114 vd[s].flags = 0; 1115 virtio_enqueue_abort(sc, vq, slot); 1116 return EAGAIN; 1117 } 1118 vd[s].flags = VRING_DESC_F_NEXT; 1119 vd[s].next = qe->qe_index; 1120 s = qe->qe_index; 1121 } 1122 vd[s].flags = 0; 1123 1124 return 0; 1125 } 1126} 1127 1128/* 1129 * enqueue: enqueue a single dmamap. 1130 */ 1131int 1132virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1133 bus_dmamap_t dmamap, bool write) 1134{ 1135 struct vq_entry *qe1 = &vq->vq_entries[slot]; 1136 struct vring_desc *vd = qe1->qe_desc_base; 1137 int i; 1138 int s = qe1->qe_next; 1139 1140 KASSERT(s >= 0); 1141 KASSERT(dmamap->dm_nsegs > 0); 1142 1143 for (i = 0; i < dmamap->dm_nsegs; i++) { 1144 vd[s].addr = dmamap->dm_segs[i].ds_addr; 1145 vd[s].len = dmamap->dm_segs[i].ds_len; 1146 if (!write) 1147 vd[s].flags |= VRING_DESC_F_WRITE; 1148 s = vd[s].next; 1149 } 1150 qe1->qe_next = s; 1151 1152 return 0; 1153} 1154 1155int 1156virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1157 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len, 1158 bool write) 1159{ 1160 struct vq_entry *qe1 = &vq->vq_entries[slot]; 1161 struct vring_desc *vd = qe1->qe_desc_base; 1162 int s = qe1->qe_next; 1163 1164 KASSERT(s >= 0); 1165 KASSERT(dmamap->dm_nsegs == 1); /* XXX */ 1166 KASSERT((dmamap->dm_segs[0].ds_len > start) && 1167 (dmamap->dm_segs[0].ds_len >= start + len)); 1168 1169 vd[s].addr = dmamap->dm_segs[0].ds_addr + start; 1170 vd[s].len = len; 1171 if (!write) 1172 vd[s].flags |= VRING_DESC_F_WRITE; 1173 qe1->qe_next = vd[s].next; 1174 1175 return 0; 1176} 1177 1178/* 1179 * enqueue_commit: add it to the aring. 1180 */ 1181int 1182virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1183 bool notifynow) 1184{ 1185 struct vq_entry *qe1; 1186 1187 if (slot < 0) { 1188 mutex_enter(&vq->vq_aring_lock); 1189 goto notify; 1190 } 1191 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); 1192 qe1 = &vq->vq_entries[slot]; 1193 if (qe1->qe_indirect) 1194 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); 1195 mutex_enter(&vq->vq_aring_lock); 1196 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot; 1197 1198notify: 1199 if (notifynow) { 1200 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 1201 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 1202 membar_producer(); 1203 vq->vq_avail->idx = vq->vq_avail_idx; 1204 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 1205 membar_producer(); 1206 vq->vq_queued++; 1207 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 1208 membar_consumer(); 1209 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) 1210 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 1211 VIRTIO_CONFIG_QUEUE_NOTIFY, 1212 vq->vq_index); 1213 } 1214 mutex_exit(&vq->vq_aring_lock); 1215 1216 return 0; 1217} 1218 1219/* 1220 * enqueue_abort: rollback. 1221 */ 1222int 1223virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1224{ 1225 struct vq_entry *qe = &vq->vq_entries[slot]; 1226 struct vring_desc *vd; 1227 int s; 1228 1229 if (qe->qe_next < 0) { 1230 vq_free_entry(vq, qe); 1231 return 0; 1232 } 1233 1234 s = slot; 1235 vd = &vq->vq_desc[0]; 1236 while (vd[s].flags & VRING_DESC_F_NEXT) { 1237 s = vd[s].next; 1238 vq_free_entry(vq, qe); 1239 qe = &vq->vq_entries[s]; 1240 } 1241 vq_free_entry(vq, qe); 1242 return 0; 1243} 1244 1245/* 1246 * Dequeue a request. 1247 */ 1248/* 1249 * dequeue: dequeue a request from uring; dmamap_sync for uring is 1250 * already done in the interrupt handler. 1251 */ 1252int 1253virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, 1254 int *slotp, int *lenp) 1255{ 1256 uint16_t slot, usedidx; 1257 struct vq_entry *qe; 1258 1259 if (vq->vq_used_idx == vq->vq_used->idx) 1260 return ENOENT; 1261 mutex_enter(&vq->vq_uring_lock); 1262 usedidx = vq->vq_used_idx++; 1263 mutex_exit(&vq->vq_uring_lock); 1264 usedidx %= vq->vq_num; 1265 slot = vq->vq_used->ring[usedidx].id; 1266 qe = &vq->vq_entries[slot]; 1267 1268 if (qe->qe_indirect) 1269 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); 1270 1271 if (slotp) 1272 *slotp = slot; 1273 if (lenp) 1274 *lenp = vq->vq_used->ring[usedidx].len; 1275 1276 return 0; 1277} 1278 1279/* 1280 * dequeue_commit: complete dequeue; the slot is recycled for future use. 1281 * if you forget to call this the slot will be leaked. 1282 */ 1283int 1284virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1285{ 1286 struct vq_entry *qe = &vq->vq_entries[slot]; 1287 struct vring_desc *vd = &vq->vq_desc[0]; 1288 int s = slot; 1289 1290 while (vd[s].flags & VRING_DESC_F_NEXT) { 1291 s = vd[s].next; 1292 vq_free_entry(vq, qe); 1293 qe = &vq->vq_entries[s]; 1294 } 1295 vq_free_entry(vq, qe); 1296 1297 return 0; 1298} 1299 1300MODULE(MODULE_CLASS_DRIVER, virtio, "pci"); 1301 1302#ifdef _MODULE 1303#include "ioconf.c" 1304#endif 1305 1306static int 1307virtio_modcmd(modcmd_t cmd, void *opaque) 1308{ 1309 int error = 0; 1310 1311#ifdef _MODULE 1312 switch (cmd) { 1313 case MODULE_CMD_INIT: 1314 error = config_init_component(cfdriver_ioconf_virtio, 1315 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 1316 break; 1317 case MODULE_CMD_FINI: 1318 error = config_fini_component(cfdriver_ioconf_virtio, 1319 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 1320 break; 1321 default: 1322 error = ENOTTY; 1323 break; 1324 } 1325#endif 1326 1327 return error; 1328} 1329