virtio.c revision 1.40
1/* $NetBSD: virtio.c,v 1.40 2020/05/25 07:37:47 yamaguchi Exp $ */ 2 3/* 4 * Copyright (c) 2010 Minoura Makoto. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.40 2020/05/25 07:37:47 yamaguchi Exp $"); 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/kernel.h> 34#include <sys/atomic.h> 35#include <sys/bus.h> 36#include <sys/device.h> 37#include <sys/kmem.h> 38#include <sys/module.h> 39 40#define VIRTIO_PRIVATE 41 42#include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 43#include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 44 45#define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ 46 47static void virtio_init_vq(struct virtio_softc *, 48 struct virtqueue *, const bool); 49 50void 51virtio_set_status(struct virtio_softc *sc, int status) 52{ 53 sc->sc_ops->set_status(sc, status); 54} 55 56/* 57 * Reset the device. 58 */ 59/* 60 * To reset the device to a known state, do following: 61 * virtio_reset(sc); // this will stop the device activity 62 * <dequeue finished requests>; // virtio_dequeue() still can be called 63 * <revoke pending requests in the vqs if any>; 64 * virtio_reinit_begin(sc); // dequeue prohibitted 65 * newfeatures = virtio_negotiate_features(sc, requestedfeatures); 66 * <some other initialization>; 67 * virtio_reinit_end(sc); // device activated; enqueue allowed 68 * Once attached, feature negotiation can only be allowed after virtio_reset. 69 */ 70void 71virtio_reset(struct virtio_softc *sc) 72{ 73 virtio_device_reset(sc); 74} 75 76void 77virtio_reinit_start(struct virtio_softc *sc) 78{ 79 int i; 80 81 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 82 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 83 for (i = 0; i < sc->sc_nvqs; i++) { 84 int n; 85 struct virtqueue *vq = &sc->sc_vqs[i]; 86 n = sc->sc_ops->read_queue_size(sc, vq->vq_index); 87 if (n == 0) /* vq disappeared */ 88 continue; 89 if (n != vq->vq_num) { 90 panic("%s: virtqueue size changed, vq index %d\n", 91 device_xname(sc->sc_dev), 92 vq->vq_index); 93 } 94 virtio_init_vq(sc, vq, true); 95 sc->sc_ops->setup_queue(sc, vq->vq_index, 96 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); 97 } 98} 99 100void 101virtio_reinit_end(struct virtio_softc *sc) 102{ 103 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 104} 105 106/* 107 * Feature negotiation. 108 */ 109uint32_t 110virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features) 111{ 112 uint32_t r; 113 114 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && 115 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ 116 guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 117 r = sc->sc_ops->neg_features(sc, guest_features); 118 sc->sc_features = r; 119 if (r & VIRTIO_F_RING_INDIRECT_DESC) 120 sc->sc_indirect = true; 121 else 122 sc->sc_indirect = false; 123 124 return r; 125} 126 127/* 128 * Device configuration registers. 129 */ 130uint8_t 131virtio_read_device_config_1(struct virtio_softc *sc, int index) 132{ 133 return sc->sc_ops->read_dev_cfg_1(sc, index); 134} 135 136uint16_t 137virtio_read_device_config_2(struct virtio_softc *sc, int index) 138{ 139 return sc->sc_ops->read_dev_cfg_2(sc, index); 140} 141 142uint32_t 143virtio_read_device_config_4(struct virtio_softc *sc, int index) 144{ 145 return sc->sc_ops->read_dev_cfg_4(sc, index); 146} 147 148uint64_t 149virtio_read_device_config_8(struct virtio_softc *sc, int index) 150{ 151 return sc->sc_ops->read_dev_cfg_8(sc, index); 152} 153 154void 155virtio_write_device_config_1(struct virtio_softc *sc, 156 int index, uint8_t value) 157{ 158 return sc->sc_ops->write_dev_cfg_1(sc, index, value); 159} 160 161void 162virtio_write_device_config_2(struct virtio_softc *sc, 163 int index, uint16_t value) 164{ 165 return sc->sc_ops->write_dev_cfg_2(sc, index, value); 166} 167 168void 169virtio_write_device_config_4(struct virtio_softc *sc, 170 int index, uint32_t value) 171{ 172 return sc->sc_ops->write_dev_cfg_4(sc, index, value); 173} 174 175void 176virtio_write_device_config_8(struct virtio_softc *sc, 177 int index, uint64_t value) 178{ 179 return sc->sc_ops->write_dev_cfg_8(sc, index, value); 180} 181 182/* 183 * Interrupt handler. 184 */ 185static void 186virtio_soft_intr(void *arg) 187{ 188 struct virtio_softc *sc = arg; 189 190 KASSERT(sc->sc_intrhand != NULL); 191 192 (sc->sc_intrhand)(sc); 193} 194 195/* 196 * dmamap sync operations for a virtqueue. 197 */ 198static inline void 199vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops) 200{ 201 /* availoffset == sizeof(vring_desc)*vq_num */ 202 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset, 203 ops); 204} 205 206static inline void 207vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 208{ 209 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 210 vq->vq_availoffset, 211 offsetof(struct vring_avail, ring) 212 + vq->vq_num * sizeof(uint16_t), 213 ops); 214} 215 216static inline void 217vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 218{ 219 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 220 vq->vq_usedoffset, 221 offsetof(struct vring_used, ring) 222 + vq->vq_num * sizeof(struct vring_used_elem), 223 ops); 224} 225 226static inline void 227vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot, 228 int ops) 229{ 230 int offset = vq->vq_indirectoffset 231 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot; 232 233 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 234 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs, 235 ops); 236} 237 238/* 239 * Can be used as sc_intrhand. 240 */ 241/* 242 * Scan vq, bus_dmamap_sync for the vqs (not for the payload), 243 * and calls (*vq_done)() if some entries are consumed. 244 */ 245static int 246virtio_vq_intr_common(struct virtqueue *vq) 247{ 248 struct virtio_softc *sc = vq->vq_owner; 249 int r = 0; 250 251 if (vq->vq_queued) { 252 vq->vq_queued = 0; 253 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 254 } 255 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 256 membar_consumer(); 257 if (vq->vq_used_idx != vq->vq_used->idx) { 258 if (vq->vq_done) 259 r |= (vq->vq_done)(vq); 260 } 261 262 return r; 263} 264 265int 266virtio_vq_intr(struct virtio_softc *sc) 267{ 268 struct virtqueue *vq; 269 int i, r = 0; 270 271 for (i = 0; i < sc->sc_nvqs; i++) { 272 vq = &sc->sc_vqs[i]; 273 r |= virtio_vq_intr_common(vq); 274 } 275 276 return r; 277} 278 279static int 280virtio_vq_mq_intr(void *arg) 281{ 282 struct virtqueue *vq = arg; 283 284 return virtio_vq_intr_common(vq); 285} 286 287/* 288 * Start/stop vq interrupt. No guarantee. 289 */ 290void 291virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 292{ 293 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 294 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 295 vq->vq_queued++; 296} 297 298void 299virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 300{ 301 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 302 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 303 vq->vq_queued++; 304} 305 306/* 307 * Initialize vq structure. 308 */ 309static void 310virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, 311 const bool reinit) 312{ 313 int i, j; 314 int vq_size = vq->vq_num; 315 316 memset(vq->vq_vaddr, 0, vq->vq_bytesize); 317 318 /* build the indirect descriptor chain */ 319 if (vq->vq_indirect != NULL) { 320 struct vring_desc *vd; 321 322 for (i = 0; i < vq_size; i++) { 323 vd = vq->vq_indirect; 324 vd += vq->vq_maxnsegs * i; 325 for (j = 0; j < vq->vq_maxnsegs-1; j++) { 326 vd[j].next = j + 1; 327 } 328 } 329 } 330 331 /* free slot management */ 332 SIMPLEQ_INIT(&vq->vq_freelist); 333 for (i = 0; i < vq_size; i++) { 334 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, 335 &vq->vq_entries[i], qe_list); 336 vq->vq_entries[i].qe_index = i; 337 } 338 if (!reinit) 339 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl); 340 341 /* enqueue/dequeue status */ 342 vq->vq_avail_idx = 0; 343 vq->vq_used_idx = 0; 344 vq->vq_queued = 0; 345 if (!reinit) { 346 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); 347 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); 348 } 349 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 350 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 351 vq->vq_queued++; 352} 353 354/* 355 * Allocate/free a vq. 356 */ 357int 358virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, 359 int maxsegsize, int maxnsegs, const char *name) 360{ 361 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; 362 int rsegs, r; 363#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ 364 ~(VIRTIO_PAGE_SIZE-1)) 365 366 /* Make sure callers allocate vqs in order */ 367 KASSERT(sc->sc_nvqs == index); 368 369 memset(vq, 0, sizeof(*vq)); 370 371 vq_size = sc->sc_ops->read_queue_size(sc, index); 372 if (vq_size == 0) { 373 aprint_error_dev(sc->sc_dev, 374 "virtqueue not exist, index %d for %s\n", 375 index, name); 376 goto err; 377 } 378 /* allocsize1: descriptor table + avail ring + pad */ 379 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size 380 + sizeof(uint16_t)*(2+vq_size)); 381 /* allocsize2: used ring + pad */ 382 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2 383 + sizeof(struct vring_used_elem)*vq_size); 384 /* allocsize3: indirect table */ 385 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) 386 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; 387 else 388 allocsize3 = 0; 389 allocsize = allocsize1 + allocsize2 + allocsize3; 390 391 /* alloc and map the memory */ 392 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, 393 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 394 if (r != 0) { 395 aprint_error_dev(sc->sc_dev, 396 "virtqueue %d for %s allocation failed, " 397 "error code %d\n", index, name, r); 398 goto err; 399 } 400 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize, 401 &vq->vq_vaddr, BUS_DMA_NOWAIT); 402 if (r != 0) { 403 aprint_error_dev(sc->sc_dev, 404 "virtqueue %d for %s map failed, " 405 "error code %d\n", index, name, r); 406 goto err; 407 } 408 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, 409 BUS_DMA_NOWAIT, &vq->vq_dmamap); 410 if (r != 0) { 411 aprint_error_dev(sc->sc_dev, 412 "virtqueue %d for %s dmamap creation failed, " 413 "error code %d\n", index, name, r); 414 goto err; 415 } 416 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, 417 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT); 418 if (r != 0) { 419 aprint_error_dev(sc->sc_dev, 420 "virtqueue %d for %s dmamap load failed, " 421 "error code %d\n", index, name, r); 422 goto err; 423 } 424 425 /* set the vq address */ 426 sc->sc_ops->setup_queue(sc, index, 427 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); 428 429 /* remember addresses and offsets for later use */ 430 vq->vq_owner = sc; 431 vq->vq_intrhand = virtio_vq_mq_intr; 432 vq->vq_num = vq_size; 433 vq->vq_index = index; 434 vq->vq_desc = vq->vq_vaddr; 435 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; 436 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset); 437 vq->vq_usedoffset = allocsize1; 438 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset); 439 if (allocsize3 > 0) { 440 vq->vq_indirectoffset = allocsize1 + allocsize2; 441 vq->vq_indirect = (void*)(((char*)vq->vq_desc) 442 + vq->vq_indirectoffset); 443 } 444 vq->vq_bytesize = allocsize; 445 vq->vq_maxsegsize = maxsegsize; 446 vq->vq_maxnsegs = maxnsegs; 447 448 /* free slot management */ 449 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size, 450 KM_SLEEP); 451 virtio_init_vq(sc, vq, false); 452 453 aprint_verbose_dev(sc->sc_dev, 454 "allocated %u byte for virtqueue %d for %s, " 455 "size %d\n", allocsize, index, name, vq_size); 456 if (allocsize3 > 0) 457 aprint_verbose_dev(sc->sc_dev, 458 "using %d byte (%d entries) " 459 "indirect descriptors\n", 460 allocsize3, maxnsegs * vq_size); 461 462 sc->sc_nvqs++; 463 464 return 0; 465 466err: 467 sc->sc_ops->setup_queue(sc, index, 0); 468 if (vq->vq_dmamap) 469 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 470 if (vq->vq_vaddr) 471 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize); 472 if (vq->vq_segs[0].ds_addr) 473 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 474 memset(vq, 0, sizeof(*vq)); 475 476 return -1; 477} 478 479int 480virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) 481{ 482 struct vq_entry *qe; 483 int i = 0; 484 485 /* device must be already deactivated */ 486 /* confirm the vq is empty */ 487 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) { 488 i++; 489 } 490 if (i != vq->vq_num) { 491 printf("%s: freeing non-empty vq, index %d\n", 492 device_xname(sc->sc_dev), vq->vq_index); 493 return EBUSY; 494 } 495 496 /* tell device that there's no virtqueue any longer */ 497 sc->sc_ops->setup_queue(sc, vq->vq_index, 0); 498 499 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num); 500 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); 501 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 502 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); 503 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 504 mutex_destroy(&vq->vq_freelist_lock); 505 mutex_destroy(&vq->vq_uring_lock); 506 mutex_destroy(&vq->vq_aring_lock); 507 memset(vq, 0, sizeof(*vq)); 508 509 sc->sc_nvqs--; 510 511 return 0; 512} 513 514/* 515 * Free descriptor management. 516 */ 517static struct vq_entry * 518vq_alloc_entry(struct virtqueue *vq) 519{ 520 struct vq_entry *qe; 521 522 mutex_enter(&vq->vq_freelist_lock); 523 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) { 524 mutex_exit(&vq->vq_freelist_lock); 525 return NULL; 526 } 527 qe = SIMPLEQ_FIRST(&vq->vq_freelist); 528 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list); 529 mutex_exit(&vq->vq_freelist_lock); 530 531 return qe; 532} 533 534static void 535vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) 536{ 537 mutex_enter(&vq->vq_freelist_lock); 538 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list); 539 mutex_exit(&vq->vq_freelist_lock); 540 541 return; 542} 543 544/* 545 * Enqueue several dmamaps as a single request. 546 */ 547/* 548 * Typical usage: 549 * <queue size> number of followings are stored in arrays 550 * - command blocks (in dmamem) should be pre-allocated and mapped 551 * - dmamaps for command blocks should be pre-allocated and loaded 552 * - dmamaps for payload should be pre-allocated 553 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot 554 * if (r) // currently 0 or EAGAIN 555 * return r; 556 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..); 557 * if (r) { 558 * virtio_enqueue_abort(sc, vq, slot); 559 * return r; 560 * } 561 * r = virtio_enqueue_reserve(sc, vq, slot, 562 * dmamap_payload[slot]->dm_nsegs+1); 563 * // ^ +1 for command 564 * if (r) { // currently 0 or EAGAIN 565 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 566 * return r; // do not call abort() 567 * } 568 * <setup and prepare commands> 569 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE); 570 * bus_dmamap_sync(dmat, dmamap_payload[slot],...); 571 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false); 572 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite); 573 * virtio_enqueue_commit(sc, vq, slot, true); 574 */ 575 576/* 577 * enqueue_prep: allocate a slot number 578 */ 579int 580virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp) 581{ 582 struct vq_entry *qe1; 583 584 KASSERT(slotp != NULL); 585 586 qe1 = vq_alloc_entry(vq); 587 if (qe1 == NULL) 588 return EAGAIN; 589 /* next slot is not allocated yet */ 590 qe1->qe_next = -1; 591 *slotp = qe1->qe_index; 592 593 return 0; 594} 595 596/* 597 * enqueue_reserve: allocate remaining slots and build the descriptor chain. 598 */ 599int 600virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, 601 int slot, int nsegs) 602{ 603 int indirect; 604 struct vq_entry *qe1 = &vq->vq_entries[slot]; 605 606 KASSERT(qe1->qe_next == -1); 607 KASSERT(1 <= nsegs && nsegs <= vq->vq_num); 608 609 if ((vq->vq_indirect != NULL) && 610 (nsegs >= MINSEG_INDIRECT) && 611 (nsegs <= vq->vq_maxnsegs)) 612 indirect = 1; 613 else 614 indirect = 0; 615 qe1->qe_indirect = indirect; 616 617 if (indirect) { 618 struct vring_desc *vd; 619 int i; 620 621 vd = &vq->vq_desc[qe1->qe_index]; 622 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr 623 + vq->vq_indirectoffset; 624 vd->addr += sizeof(struct vring_desc) 625 * vq->vq_maxnsegs * qe1->qe_index; 626 vd->len = sizeof(struct vring_desc) * nsegs; 627 vd->flags = VRING_DESC_F_INDIRECT; 628 629 vd = vq->vq_indirect; 630 vd += vq->vq_maxnsegs * qe1->qe_index; 631 qe1->qe_desc_base = vd; 632 633 for (i = 0; i < nsegs-1; i++) { 634 vd[i].flags = VRING_DESC_F_NEXT; 635 } 636 vd[i].flags = 0; 637 qe1->qe_next = 0; 638 639 return 0; 640 } else { 641 struct vring_desc *vd; 642 struct vq_entry *qe; 643 int i, s; 644 645 vd = &vq->vq_desc[0]; 646 qe1->qe_desc_base = vd; 647 qe1->qe_next = qe1->qe_index; 648 s = slot; 649 for (i = 0; i < nsegs - 1; i++) { 650 qe = vq_alloc_entry(vq); 651 if (qe == NULL) { 652 vd[s].flags = 0; 653 virtio_enqueue_abort(sc, vq, slot); 654 return EAGAIN; 655 } 656 vd[s].flags = VRING_DESC_F_NEXT; 657 vd[s].next = qe->qe_index; 658 s = qe->qe_index; 659 } 660 vd[s].flags = 0; 661 662 return 0; 663 } 664} 665 666/* 667 * enqueue: enqueue a single dmamap. 668 */ 669int 670virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot, 671 bus_dmamap_t dmamap, bool write) 672{ 673 struct vq_entry *qe1 = &vq->vq_entries[slot]; 674 struct vring_desc *vd = qe1->qe_desc_base; 675 int i; 676 int s = qe1->qe_next; 677 678 KASSERT(s >= 0); 679 KASSERT(dmamap->dm_nsegs > 0); 680 681 for (i = 0; i < dmamap->dm_nsegs; i++) { 682 vd[s].addr = dmamap->dm_segs[i].ds_addr; 683 vd[s].len = dmamap->dm_segs[i].ds_len; 684 if (!write) 685 vd[s].flags |= VRING_DESC_F_WRITE; 686 s = vd[s].next; 687 } 688 qe1->qe_next = s; 689 690 return 0; 691} 692 693int 694virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot, 695 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len, 696 bool write) 697{ 698 struct vq_entry *qe1 = &vq->vq_entries[slot]; 699 struct vring_desc *vd = qe1->qe_desc_base; 700 int s = qe1->qe_next; 701 702 KASSERT(s >= 0); 703 KASSERT(dmamap->dm_nsegs == 1); /* XXX */ 704 KASSERT((dmamap->dm_segs[0].ds_len > start) && 705 (dmamap->dm_segs[0].ds_len >= start + len)); 706 707 vd[s].addr = dmamap->dm_segs[0].ds_addr + start; 708 vd[s].len = len; 709 if (!write) 710 vd[s].flags |= VRING_DESC_F_WRITE; 711 qe1->qe_next = vd[s].next; 712 713 return 0; 714} 715 716/* 717 * enqueue_commit: add it to the aring. 718 */ 719int 720virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, 721 bool notifynow) 722{ 723 struct vq_entry *qe1; 724 725 if (slot < 0) { 726 mutex_enter(&vq->vq_aring_lock); 727 goto notify; 728 } 729 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); 730 qe1 = &vq->vq_entries[slot]; 731 if (qe1->qe_indirect) 732 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); 733 mutex_enter(&vq->vq_aring_lock); 734 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot; 735 736notify: 737 if (notifynow) { 738 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 739 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 740 membar_producer(); 741 vq->vq_avail->idx = vq->vq_avail_idx; 742 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 743 membar_producer(); 744 vq->vq_queued++; 745 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 746 membar_consumer(); 747 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) 748 sc->sc_ops->kick(sc, vq->vq_index); 749 } 750 mutex_exit(&vq->vq_aring_lock); 751 752 return 0; 753} 754 755/* 756 * enqueue_abort: rollback. 757 */ 758int 759virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot) 760{ 761 struct vq_entry *qe = &vq->vq_entries[slot]; 762 struct vring_desc *vd; 763 int s; 764 765 if (qe->qe_next < 0) { 766 vq_free_entry(vq, qe); 767 return 0; 768 } 769 770 s = slot; 771 vd = &vq->vq_desc[0]; 772 while (vd[s].flags & VRING_DESC_F_NEXT) { 773 s = vd[s].next; 774 vq_free_entry(vq, qe); 775 qe = &vq->vq_entries[s]; 776 } 777 vq_free_entry(vq, qe); 778 return 0; 779} 780 781/* 782 * Dequeue a request. 783 */ 784/* 785 * dequeue: dequeue a request from uring; dmamap_sync for uring is 786 * already done in the interrupt handler. 787 */ 788int 789virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, 790 int *slotp, int *lenp) 791{ 792 uint16_t slot, usedidx; 793 struct vq_entry *qe; 794 795 if (vq->vq_used_idx == vq->vq_used->idx) 796 return ENOENT; 797 mutex_enter(&vq->vq_uring_lock); 798 usedidx = vq->vq_used_idx++; 799 mutex_exit(&vq->vq_uring_lock); 800 usedidx %= vq->vq_num; 801 slot = vq->vq_used->ring[usedidx].id; 802 qe = &vq->vq_entries[slot]; 803 804 if (qe->qe_indirect) 805 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); 806 807 if (slotp) 808 *slotp = slot; 809 if (lenp) 810 *lenp = vq->vq_used->ring[usedidx].len; 811 812 return 0; 813} 814 815/* 816 * dequeue_commit: complete dequeue; the slot is recycled for future use. 817 * if you forget to call this the slot will be leaked. 818 */ 819int 820virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot) 821{ 822 struct vq_entry *qe = &vq->vq_entries[slot]; 823 struct vring_desc *vd = &vq->vq_desc[0]; 824 int s = slot; 825 826 while (vd[s].flags & VRING_DESC_F_NEXT) { 827 s = vd[s].next; 828 vq_free_entry(vq, qe); 829 qe = &vq->vq_entries[s]; 830 } 831 vq_free_entry(vq, qe); 832 833 return 0; 834} 835 836/* 837 * Attach a child, fill all the members. 838 */ 839void 840virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, 841 struct virtqueue *vqs, 842 virtio_callback config_change, 843 virtio_callback intr_hand, 844 int req_flags, int req_features, const char *feat_bits) 845{ 846 char buf[256]; 847 int features; 848 849 sc->sc_child = child; 850 sc->sc_ipl = ipl; 851 sc->sc_vqs = vqs; 852 sc->sc_config_change = config_change; 853 sc->sc_intrhand = intr_hand; 854 sc->sc_flags = req_flags; 855 856 features = virtio_negotiate_features(sc, req_features); 857 snprintb(buf, sizeof(buf), feat_bits, features); 858 aprint_normal(": Features: %s\n", buf); 859 aprint_naive("\n"); 860} 861 862void 863virtio_child_attach_set_vqs(struct virtio_softc *sc, 864 struct virtqueue *vqs, int nvq_pairs) 865{ 866 867 KASSERT(nvq_pairs == 1 || 868 (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) == 0); 869 if (nvq_pairs > 1) 870 sc->sc_child_mq = true; 871 872 sc->sc_vqs = vqs; 873} 874 875int 876virtio_child_attach_finish(struct virtio_softc *sc) 877{ 878 int r; 879 880 r = sc->sc_ops->setup_interrupts(sc); 881 if (r != 0) { 882 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n"); 883 goto fail; 884 } 885 886 KASSERT(sc->sc_soft_ih == NULL); 887 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) { 888 u_int flags = SOFTINT_NET; 889 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) 890 flags |= SOFTINT_MPSAFE; 891 892 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc); 893 if (sc->sc_soft_ih == NULL) { 894 sc->sc_ops->free_interrupts(sc); 895 aprint_error_dev(sc->sc_dev, 896 "failed to establish soft interrupt\n"); 897 goto fail; 898 } 899 } 900 901 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 902 return 0; 903 904fail: 905 if (sc->sc_soft_ih) { 906 softint_disestablish(sc->sc_soft_ih); 907 sc->sc_soft_ih = NULL; 908 } 909 910 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 911 return 1; 912} 913 914void 915virtio_child_detach(struct virtio_softc *sc) 916{ 917 sc->sc_child = NULL; 918 sc->sc_vqs = NULL; 919 920 virtio_device_reset(sc); 921 922 sc->sc_ops->free_interrupts(sc); 923 924 if (sc->sc_soft_ih) { 925 softint_disestablish(sc->sc_soft_ih); 926 sc->sc_soft_ih = NULL; 927 } 928} 929 930void 931virtio_child_attach_failed(struct virtio_softc *sc) 932{ 933 virtio_child_detach(sc); 934 935 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 936 937 sc->sc_child = VIRTIO_CHILD_FAILED; 938} 939 940bus_dma_tag_t 941virtio_dmat(struct virtio_softc *sc) 942{ 943 return sc->sc_dmat; 944} 945 946device_t 947virtio_child(struct virtio_softc *sc) 948{ 949 return sc->sc_child; 950} 951 952int 953virtio_intrhand(struct virtio_softc *sc) 954{ 955 return (sc->sc_intrhand)(sc); 956} 957 958uint32_t 959virtio_features(struct virtio_softc *sc) 960{ 961 return sc->sc_features; 962} 963 964int 965virtiobusprint(void *aux, const char *pnp) 966{ 967 struct virtio_attach_args * const va = aux; 968 969 if (va->sc_childdevid == 0) 970 return QUIET; /* No device present */ 971 972 if (pnp) 973 aprint_normal("Device ID %d at %s", va->sc_childdevid, pnp); 974 975 return UNCONF; 976} 977 978MODULE(MODULE_CLASS_DRIVER, virtio, NULL); 979 980#ifdef _MODULE 981#include "ioconf.c" 982#endif 983 984static int 985virtio_modcmd(modcmd_t cmd, void *opaque) 986{ 987 int error = 0; 988 989#ifdef _MODULE 990 switch (cmd) { 991 case MODULE_CMD_INIT: 992 error = config_init_component(cfdriver_ioconf_virtio, 993 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 994 break; 995 case MODULE_CMD_FINI: 996 error = config_fini_component(cfdriver_ioconf_virtio, 997 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 998 break; 999 default: 1000 error = ENOTTY; 1001 break; 1002 } 1003#endif 1004 1005 return error; 1006} 1007