virtio.c revision 1.33
1/* $NetBSD: virtio.c,v 1.33 2018/06/06 17:17:31 jakllsch Exp $ */ 2 3/* 4 * Copyright (c) 2010 Minoura Makoto. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.33 2018/06/06 17:17:31 jakllsch Exp $"); 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/kernel.h> 34#include <sys/atomic.h> 35#include <sys/bus.h> 36#include <sys/device.h> 37#include <sys/kmem.h> 38#include <sys/module.h> 39 40#define VIRTIO_PRIVATE 41 42#include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 43#include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 44 45#define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ 46 47static void virtio_init_vq(struct virtio_softc *, 48 struct virtqueue *, const bool); 49 50void 51virtio_set_status(struct virtio_softc *sc, int status) 52{ 53 sc->sc_ops->set_status(sc, status); 54} 55 56/* 57 * Reset the device. 58 */ 59/* 60 * To reset the device to a known state, do following: 61 * virtio_reset(sc); // this will stop the device activity 62 * <dequeue finished requests>; // virtio_dequeue() still can be called 63 * <revoke pending requests in the vqs if any>; 64 * virtio_reinit_begin(sc); // dequeue prohibitted 65 * newfeatures = virtio_negotiate_features(sc, requestedfeatures); 66 * <some other initialization>; 67 * virtio_reinit_end(sc); // device activated; enqueue allowed 68 * Once attached, feature negotiation can only be allowed after virtio_reset. 69 */ 70void 71virtio_reset(struct virtio_softc *sc) 72{ 73 virtio_device_reset(sc); 74} 75 76void 77virtio_reinit_start(struct virtio_softc *sc) 78{ 79 int i; 80 81 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 82 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 83 for (i = 0; i < sc->sc_nvqs; i++) { 84 int n; 85 struct virtqueue *vq = &sc->sc_vqs[i]; 86 n = sc->sc_ops->read_queue_size(sc, vq->vq_index); 87 if (n == 0) /* vq disappeared */ 88 continue; 89 if (n != vq->vq_num) { 90 panic("%s: virtqueue size changed, vq index %d\n", 91 device_xname(sc->sc_dev), 92 vq->vq_index); 93 } 94 virtio_init_vq(sc, vq, true); 95 sc->sc_ops->setup_queue(sc, vq->vq_index, 96 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); 97 } 98} 99 100void 101virtio_reinit_end(struct virtio_softc *sc) 102{ 103 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 104} 105 106/* 107 * Feature negotiation. 108 */ 109uint32_t 110virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features) 111{ 112 uint32_t r; 113 114 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && 115 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ 116 guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 117 r = sc->sc_ops->neg_features(sc, guest_features); 118 sc->sc_features = r; 119 if (r & VIRTIO_F_RING_INDIRECT_DESC) 120 sc->sc_indirect = true; 121 else 122 sc->sc_indirect = false; 123 124 return r; 125} 126 127/* 128 * Device configuration registers. 129 */ 130uint8_t 131virtio_read_device_config_1(struct virtio_softc *sc, int index) 132{ 133 return sc->sc_ops->read_dev_cfg_1(sc, index); 134} 135 136uint16_t 137virtio_read_device_config_2(struct virtio_softc *sc, int index) 138{ 139 return sc->sc_ops->read_dev_cfg_2(sc, index); 140} 141 142uint32_t 143virtio_read_device_config_4(struct virtio_softc *sc, int index) 144{ 145 return sc->sc_ops->read_dev_cfg_4(sc, index); 146} 147 148uint64_t 149virtio_read_device_config_8(struct virtio_softc *sc, int index) 150{ 151 return sc->sc_ops->read_dev_cfg_8(sc, index); 152} 153 154void 155virtio_write_device_config_1(struct virtio_softc *sc, 156 int index, uint8_t value) 157{ 158 return sc->sc_ops->write_dev_cfg_1(sc, index, value); 159} 160 161void 162virtio_write_device_config_2(struct virtio_softc *sc, 163 int index, uint16_t value) 164{ 165 return sc->sc_ops->write_dev_cfg_2(sc, index, value); 166} 167 168void 169virtio_write_device_config_4(struct virtio_softc *sc, 170 int index, uint32_t value) 171{ 172 return sc->sc_ops->write_dev_cfg_4(sc, index, value); 173} 174 175void 176virtio_write_device_config_8(struct virtio_softc *sc, 177 int index, uint64_t value) 178{ 179 return sc->sc_ops->write_dev_cfg_8(sc, index, value); 180} 181 182/* 183 * Interrupt handler. 184 */ 185static void 186virtio_soft_intr(void *arg) 187{ 188 struct virtio_softc *sc = arg; 189 190 KASSERT(sc->sc_intrhand != NULL); 191 192 (sc->sc_intrhand)(sc); 193} 194 195/* 196 * dmamap sync operations for a virtqueue. 197 */ 198static inline void 199vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops) 200{ 201 /* availoffset == sizeof(vring_desc)*vq_num */ 202 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset, 203 ops); 204} 205 206static inline void 207vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 208{ 209 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 210 vq->vq_availoffset, 211 offsetof(struct vring_avail, ring) 212 + vq->vq_num * sizeof(uint16_t), 213 ops); 214} 215 216static inline void 217vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 218{ 219 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 220 vq->vq_usedoffset, 221 offsetof(struct vring_used, ring) 222 + vq->vq_num * sizeof(struct vring_used_elem), 223 ops); 224} 225 226static inline void 227vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot, 228 int ops) 229{ 230 int offset = vq->vq_indirectoffset 231 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot; 232 233 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 234 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs, 235 ops); 236} 237 238/* 239 * Can be used as sc_intrhand. 240 */ 241/* 242 * Scan vq, bus_dmamap_sync for the vqs (not for the payload), 243 * and calls (*vq_done)() if some entries are consumed. 244 */ 245int 246virtio_vq_intr(struct virtio_softc *sc) 247{ 248 struct virtqueue *vq; 249 int i, r = 0; 250 251 for (i = 0; i < sc->sc_nvqs; i++) { 252 vq = &sc->sc_vqs[i]; 253 if (vq->vq_queued) { 254 vq->vq_queued = 0; 255 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 256 } 257 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 258 membar_consumer(); 259 if (vq->vq_used_idx != vq->vq_used->idx) { 260 if (vq->vq_done) 261 r |= (vq->vq_done)(vq); 262 } 263 } 264 265 return r; 266} 267 268/* 269 * Start/stop vq interrupt. No guarantee. 270 */ 271void 272virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 273{ 274 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 275 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 276 vq->vq_queued++; 277} 278 279void 280virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 281{ 282 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 283 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 284 vq->vq_queued++; 285} 286 287/* 288 * Initialize vq structure. 289 */ 290static void 291virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, 292 const bool reinit) 293{ 294 int i, j; 295 int vq_size = vq->vq_num; 296 297 memset(vq->vq_vaddr, 0, vq->vq_bytesize); 298 299 /* build the indirect descriptor chain */ 300 if (vq->vq_indirect != NULL) { 301 struct vring_desc *vd; 302 303 for (i = 0; i < vq_size; i++) { 304 vd = vq->vq_indirect; 305 vd += vq->vq_maxnsegs * i; 306 for (j = 0; j < vq->vq_maxnsegs-1; j++) { 307 vd[j].next = j + 1; 308 } 309 } 310 } 311 312 /* free slot management */ 313 SIMPLEQ_INIT(&vq->vq_freelist); 314 for (i = 0; i < vq_size; i++) { 315 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, 316 &vq->vq_entries[i], qe_list); 317 vq->vq_entries[i].qe_index = i; 318 } 319 if (!reinit) 320 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl); 321 322 /* enqueue/dequeue status */ 323 vq->vq_avail_idx = 0; 324 vq->vq_used_idx = 0; 325 vq->vq_queued = 0; 326 if (!reinit) { 327 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); 328 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); 329 } 330 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 331 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 332 vq->vq_queued++; 333} 334 335/* 336 * Allocate/free a vq. 337 */ 338int 339virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, 340 int maxsegsize, int maxnsegs, const char *name) 341{ 342 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; 343 int rsegs, r; 344#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ 345 ~(VIRTIO_PAGE_SIZE-1)) 346 347 /* Make sure callers allocate vqs in order */ 348 KASSERT(sc->sc_nvqs == index); 349 350 memset(vq, 0, sizeof(*vq)); 351 352 vq_size = sc->sc_ops->read_queue_size(sc, index); 353 if (vq_size == 0) { 354 aprint_error_dev(sc->sc_dev, 355 "virtqueue not exist, index %d for %s\n", 356 index, name); 357 goto err; 358 } 359 /* allocsize1: descriptor table + avail ring + pad */ 360 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size 361 + sizeof(uint16_t)*(2+vq_size)); 362 /* allocsize2: used ring + pad */ 363 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2 364 + sizeof(struct vring_used_elem)*vq_size); 365 /* allocsize3: indirect table */ 366 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) 367 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; 368 else 369 allocsize3 = 0; 370 allocsize = allocsize1 + allocsize2 + allocsize3; 371 372 /* alloc and map the memory */ 373 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, 374 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 375 if (r != 0) { 376 aprint_error_dev(sc->sc_dev, 377 "virtqueue %d for %s allocation failed, " 378 "error code %d\n", index, name, r); 379 goto err; 380 } 381 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize, 382 &vq->vq_vaddr, BUS_DMA_NOWAIT); 383 if (r != 0) { 384 aprint_error_dev(sc->sc_dev, 385 "virtqueue %d for %s map failed, " 386 "error code %d\n", index, name, r); 387 goto err; 388 } 389 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, 390 BUS_DMA_NOWAIT, &vq->vq_dmamap); 391 if (r != 0) { 392 aprint_error_dev(sc->sc_dev, 393 "virtqueue %d for %s dmamap creation failed, " 394 "error code %d\n", index, name, r); 395 goto err; 396 } 397 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, 398 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT); 399 if (r != 0) { 400 aprint_error_dev(sc->sc_dev, 401 "virtqueue %d for %s dmamap load failed, " 402 "error code %d\n", index, name, r); 403 goto err; 404 } 405 406 /* set the vq address */ 407 sc->sc_ops->setup_queue(sc, vq->vq_index, 408 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); 409 410 /* remember addresses and offsets for later use */ 411 vq->vq_owner = sc; 412 vq->vq_num = vq_size; 413 vq->vq_index = index; 414 vq->vq_desc = vq->vq_vaddr; 415 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; 416 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset); 417 vq->vq_usedoffset = allocsize1; 418 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset); 419 if (allocsize3 > 0) { 420 vq->vq_indirectoffset = allocsize1 + allocsize2; 421 vq->vq_indirect = (void*)(((char*)vq->vq_desc) 422 + vq->vq_indirectoffset); 423 } 424 vq->vq_bytesize = allocsize; 425 vq->vq_maxsegsize = maxsegsize; 426 vq->vq_maxnsegs = maxnsegs; 427 428 /* free slot management */ 429 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size, 430 KM_NOSLEEP); 431 if (vq->vq_entries == NULL) { 432 r = ENOMEM; 433 goto err; 434 } 435 436 virtio_init_vq(sc, vq, false); 437 438 aprint_verbose_dev(sc->sc_dev, 439 "allocated %u byte for virtqueue %d for %s, " 440 "size %d\n", allocsize, index, name, vq_size); 441 if (allocsize3 > 0) 442 aprint_verbose_dev(sc->sc_dev, 443 "using %d byte (%d entries) " 444 "indirect descriptors\n", 445 allocsize3, maxnsegs * vq_size); 446 447 sc->sc_nvqs++; 448 449 return 0; 450 451err: 452 sc->sc_ops->setup_queue(sc, vq->vq_index, 0); 453 if (vq->vq_dmamap) 454 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 455 if (vq->vq_vaddr) 456 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize); 457 if (vq->vq_segs[0].ds_addr) 458 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 459 memset(vq, 0, sizeof(*vq)); 460 461 return -1; 462} 463 464int 465virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) 466{ 467 struct vq_entry *qe; 468 int i = 0; 469 470 /* device must be already deactivated */ 471 /* confirm the vq is empty */ 472 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) { 473 i++; 474 } 475 if (i != vq->vq_num) { 476 printf("%s: freeing non-empty vq, index %d\n", 477 device_xname(sc->sc_dev), vq->vq_index); 478 return EBUSY; 479 } 480 481 /* tell device that there's no virtqueue any longer */ 482 sc->sc_ops->setup_queue(sc, vq->vq_index, 0); 483 484 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num); 485 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); 486 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 487 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); 488 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 489 mutex_destroy(&vq->vq_freelist_lock); 490 mutex_destroy(&vq->vq_uring_lock); 491 mutex_destroy(&vq->vq_aring_lock); 492 memset(vq, 0, sizeof(*vq)); 493 494 sc->sc_nvqs--; 495 496 return 0; 497} 498 499/* 500 * Free descriptor management. 501 */ 502static struct vq_entry * 503vq_alloc_entry(struct virtqueue *vq) 504{ 505 struct vq_entry *qe; 506 507 mutex_enter(&vq->vq_freelist_lock); 508 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) { 509 mutex_exit(&vq->vq_freelist_lock); 510 return NULL; 511 } 512 qe = SIMPLEQ_FIRST(&vq->vq_freelist); 513 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list); 514 mutex_exit(&vq->vq_freelist_lock); 515 516 return qe; 517} 518 519static void 520vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) 521{ 522 mutex_enter(&vq->vq_freelist_lock); 523 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list); 524 mutex_exit(&vq->vq_freelist_lock); 525 526 return; 527} 528 529/* 530 * Enqueue several dmamaps as a single request. 531 */ 532/* 533 * Typical usage: 534 * <queue size> number of followings are stored in arrays 535 * - command blocks (in dmamem) should be pre-allocated and mapped 536 * - dmamaps for command blocks should be pre-allocated and loaded 537 * - dmamaps for payload should be pre-allocated 538 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot 539 * if (r) // currently 0 or EAGAIN 540 * return r; 541 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..); 542 * if (r) { 543 * virtio_enqueue_abort(sc, vq, slot); 544 * return r; 545 * } 546 * r = virtio_enqueue_reserve(sc, vq, slot, 547 * dmamap_payload[slot]->dm_nsegs+1); 548 * // ^ +1 for command 549 * if (r) { // currently 0 or EAGAIN 550 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 551 * return r; // do not call abort() 552 * } 553 * <setup and prepare commands> 554 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE); 555 * bus_dmamap_sync(dmat, dmamap_payload[slot],...); 556 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false); 557 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite); 558 * virtio_enqueue_commit(sc, vq, slot, true); 559 */ 560 561/* 562 * enqueue_prep: allocate a slot number 563 */ 564int 565virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp) 566{ 567 struct vq_entry *qe1; 568 569 KASSERT(slotp != NULL); 570 571 qe1 = vq_alloc_entry(vq); 572 if (qe1 == NULL) 573 return EAGAIN; 574 /* next slot is not allocated yet */ 575 qe1->qe_next = -1; 576 *slotp = qe1->qe_index; 577 578 return 0; 579} 580 581/* 582 * enqueue_reserve: allocate remaining slots and build the descriptor chain. 583 */ 584int 585virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, 586 int slot, int nsegs) 587{ 588 int indirect; 589 struct vq_entry *qe1 = &vq->vq_entries[slot]; 590 591 KASSERT(qe1->qe_next == -1); 592 KASSERT(1 <= nsegs && nsegs <= vq->vq_num); 593 594 if ((vq->vq_indirect != NULL) && 595 (nsegs >= MINSEG_INDIRECT) && 596 (nsegs <= vq->vq_maxnsegs)) 597 indirect = 1; 598 else 599 indirect = 0; 600 qe1->qe_indirect = indirect; 601 602 if (indirect) { 603 struct vring_desc *vd; 604 int i; 605 606 vd = &vq->vq_desc[qe1->qe_index]; 607 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr 608 + vq->vq_indirectoffset; 609 vd->addr += sizeof(struct vring_desc) 610 * vq->vq_maxnsegs * qe1->qe_index; 611 vd->len = sizeof(struct vring_desc) * nsegs; 612 vd->flags = VRING_DESC_F_INDIRECT; 613 614 vd = vq->vq_indirect; 615 vd += vq->vq_maxnsegs * qe1->qe_index; 616 qe1->qe_desc_base = vd; 617 618 for (i = 0; i < nsegs-1; i++) { 619 vd[i].flags = VRING_DESC_F_NEXT; 620 } 621 vd[i].flags = 0; 622 qe1->qe_next = 0; 623 624 return 0; 625 } else { 626 struct vring_desc *vd; 627 struct vq_entry *qe; 628 int i, s; 629 630 vd = &vq->vq_desc[0]; 631 qe1->qe_desc_base = vd; 632 qe1->qe_next = qe1->qe_index; 633 s = slot; 634 for (i = 0; i < nsegs - 1; i++) { 635 qe = vq_alloc_entry(vq); 636 if (qe == NULL) { 637 vd[s].flags = 0; 638 virtio_enqueue_abort(sc, vq, slot); 639 return EAGAIN; 640 } 641 vd[s].flags = VRING_DESC_F_NEXT; 642 vd[s].next = qe->qe_index; 643 s = qe->qe_index; 644 } 645 vd[s].flags = 0; 646 647 return 0; 648 } 649} 650 651/* 652 * enqueue: enqueue a single dmamap. 653 */ 654int 655virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot, 656 bus_dmamap_t dmamap, bool write) 657{ 658 struct vq_entry *qe1 = &vq->vq_entries[slot]; 659 struct vring_desc *vd = qe1->qe_desc_base; 660 int i; 661 int s = qe1->qe_next; 662 663 KASSERT(s >= 0); 664 KASSERT(dmamap->dm_nsegs > 0); 665 666 for (i = 0; i < dmamap->dm_nsegs; i++) { 667 vd[s].addr = dmamap->dm_segs[i].ds_addr; 668 vd[s].len = dmamap->dm_segs[i].ds_len; 669 if (!write) 670 vd[s].flags |= VRING_DESC_F_WRITE; 671 s = vd[s].next; 672 } 673 qe1->qe_next = s; 674 675 return 0; 676} 677 678int 679virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot, 680 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len, 681 bool write) 682{ 683 struct vq_entry *qe1 = &vq->vq_entries[slot]; 684 struct vring_desc *vd = qe1->qe_desc_base; 685 int s = qe1->qe_next; 686 687 KASSERT(s >= 0); 688 KASSERT(dmamap->dm_nsegs == 1); /* XXX */ 689 KASSERT((dmamap->dm_segs[0].ds_len > start) && 690 (dmamap->dm_segs[0].ds_len >= start + len)); 691 692 vd[s].addr = dmamap->dm_segs[0].ds_addr + start; 693 vd[s].len = len; 694 if (!write) 695 vd[s].flags |= VRING_DESC_F_WRITE; 696 qe1->qe_next = vd[s].next; 697 698 return 0; 699} 700 701/* 702 * enqueue_commit: add it to the aring. 703 */ 704int 705virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, 706 bool notifynow) 707{ 708 struct vq_entry *qe1; 709 710 if (slot < 0) { 711 mutex_enter(&vq->vq_aring_lock); 712 goto notify; 713 } 714 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); 715 qe1 = &vq->vq_entries[slot]; 716 if (qe1->qe_indirect) 717 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); 718 mutex_enter(&vq->vq_aring_lock); 719 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot; 720 721notify: 722 if (notifynow) { 723 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 724 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 725 membar_producer(); 726 vq->vq_avail->idx = vq->vq_avail_idx; 727 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 728 membar_producer(); 729 vq->vq_queued++; 730 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 731 membar_consumer(); 732 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) 733 sc->sc_ops->kick(sc, vq->vq_index); 734 } 735 mutex_exit(&vq->vq_aring_lock); 736 737 return 0; 738} 739 740/* 741 * enqueue_abort: rollback. 742 */ 743int 744virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot) 745{ 746 struct vq_entry *qe = &vq->vq_entries[slot]; 747 struct vring_desc *vd; 748 int s; 749 750 if (qe->qe_next < 0) { 751 vq_free_entry(vq, qe); 752 return 0; 753 } 754 755 s = slot; 756 vd = &vq->vq_desc[0]; 757 while (vd[s].flags & VRING_DESC_F_NEXT) { 758 s = vd[s].next; 759 vq_free_entry(vq, qe); 760 qe = &vq->vq_entries[s]; 761 } 762 vq_free_entry(vq, qe); 763 return 0; 764} 765 766/* 767 * Dequeue a request. 768 */ 769/* 770 * dequeue: dequeue a request from uring; dmamap_sync for uring is 771 * already done in the interrupt handler. 772 */ 773int 774virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, 775 int *slotp, int *lenp) 776{ 777 uint16_t slot, usedidx; 778 struct vq_entry *qe; 779 780 if (vq->vq_used_idx == vq->vq_used->idx) 781 return ENOENT; 782 mutex_enter(&vq->vq_uring_lock); 783 usedidx = vq->vq_used_idx++; 784 mutex_exit(&vq->vq_uring_lock); 785 usedidx %= vq->vq_num; 786 slot = vq->vq_used->ring[usedidx].id; 787 qe = &vq->vq_entries[slot]; 788 789 if (qe->qe_indirect) 790 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); 791 792 if (slotp) 793 *slotp = slot; 794 if (lenp) 795 *lenp = vq->vq_used->ring[usedidx].len; 796 797 return 0; 798} 799 800/* 801 * dequeue_commit: complete dequeue; the slot is recycled for future use. 802 * if you forget to call this the slot will be leaked. 803 */ 804int 805virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot) 806{ 807 struct vq_entry *qe = &vq->vq_entries[slot]; 808 struct vring_desc *vd = &vq->vq_desc[0]; 809 int s = slot; 810 811 while (vd[s].flags & VRING_DESC_F_NEXT) { 812 s = vd[s].next; 813 vq_free_entry(vq, qe); 814 qe = &vq->vq_entries[s]; 815 } 816 vq_free_entry(vq, qe); 817 818 return 0; 819} 820 821/* 822 * Attach a child, fill all the members. 823 */ 824void 825virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, 826 struct virtqueue *vqs, 827 virtio_callback config_change, 828 virtio_callback intr_hand, 829 int req_flags, int req_features, const char *feat_bits) 830{ 831 char buf[256]; 832 int features; 833 834 sc->sc_child = child; 835 sc->sc_ipl = ipl; 836 sc->sc_vqs = vqs; 837 sc->sc_config_change = config_change; 838 sc->sc_intrhand = intr_hand; 839 sc->sc_flags = req_flags; 840 841 features = virtio_negotiate_features(sc, req_features); 842 snprintb(buf, sizeof(buf), feat_bits, features); 843 aprint_normal(": Features: %s\n", buf); 844 aprint_naive("\n"); 845} 846 847int 848virtio_child_attach_finish(struct virtio_softc *sc) 849{ 850 int r; 851 852 r = sc->sc_ops->setup_interrupts(sc); 853 if (r != 0) { 854 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n"); 855 goto fail; 856 } 857 858 KASSERT(sc->sc_soft_ih == NULL); 859 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) { 860 u_int flags = SOFTINT_NET; 861 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) 862 flags |= SOFTINT_MPSAFE; 863 864 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc); 865 if (sc->sc_soft_ih == NULL) { 866 sc->sc_ops->free_interrupts(sc); 867 aprint_error_dev(sc->sc_dev, 868 "failed to establish soft interrupt\n"); 869 goto fail; 870 } 871 } 872 873 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 874 return 0; 875 876fail: 877 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 878 return 1; 879} 880 881void 882virtio_child_detach(struct virtio_softc *sc) 883{ 884 sc->sc_child = NULL; 885 sc->sc_vqs = NULL; 886 887 virtio_device_reset(sc); 888 889 sc->sc_ops->free_interrupts(sc); 890 891 if (sc->sc_soft_ih) { 892 softint_disestablish(sc->sc_soft_ih); 893 sc->sc_soft_ih = NULL; 894 } 895} 896 897void 898virtio_child_attach_failed(struct virtio_softc *sc) 899{ 900 virtio_child_detach(sc); 901 902 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 903 904 sc->sc_child = VIRTIO_CHILD_FAILED; 905} 906 907bus_dma_tag_t 908virtio_dmat(struct virtio_softc *sc) 909{ 910 return sc->sc_dmat; 911} 912 913device_t 914virtio_child(struct virtio_softc *sc) 915{ 916 return sc->sc_child; 917} 918 919int 920virtio_intrhand(struct virtio_softc *sc) 921{ 922 return (sc->sc_intrhand)(sc); 923} 924 925uint32_t 926virtio_features(struct virtio_softc *sc) 927{ 928 return sc->sc_features; 929} 930 931MODULE(MODULE_CLASS_DRIVER, virtio, NULL); 932 933#ifdef _MODULE 934#include "ioconf.c" 935#endif 936 937static int 938virtio_modcmd(modcmd_t cmd, void *opaque) 939{ 940 int error = 0; 941 942#ifdef _MODULE 943 switch (cmd) { 944 case MODULE_CMD_INIT: 945 error = config_init_component(cfdriver_ioconf_virtio, 946 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 947 break; 948 case MODULE_CMD_FINI: 949 error = config_fini_component(cfdriver_ioconf_virtio, 950 cfattach_ioconf_virtio, cfdata_ioconf_virtio); 951 break; 952 default: 953 error = ENOTTY; 954 break; 955 } 956#endif 957 958 return error; 959} 960