virtqueue.c revision 331722
1/*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27/* 28 * Implements the virtqueue interface as basically described 29 * in the original VirtIO paper. 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/11/sys/dev/virtio/virtqueue.c 331722 2018-03-29 02:50:57Z eadler $"); 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/kernel.h> 38#include <sys/malloc.h> 39#include <sys/sglist.h> 40#include <vm/vm.h> 41#include <vm/pmap.h> 42 43#include <machine/cpu.h> 44#include <machine/bus.h> 45#include <machine/atomic.h> 46#include <machine/resource.h> 47#include <sys/bus.h> 48#include <sys/rman.h> 49 50#include <dev/virtio/virtio.h> 51#include <dev/virtio/virtqueue.h> 52#include <dev/virtio/virtio_ring.h> 53 54#include "virtio_bus_if.h" 55 56struct virtqueue { 57 device_t vq_dev; 58 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 59 uint16_t vq_queue_index; 60 uint16_t vq_nentries; 61 uint32_t vq_flags; 62#define VIRTQUEUE_FLAG_INDIRECT 0x0001 63#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002 64 65 int vq_alignment; 66 int vq_ring_size; 67 void *vq_ring_mem; 68 int vq_max_indirect_size; 69 int vq_indirect_mem_size; 70 virtqueue_intr_t *vq_intrhand; 71 void *vq_intrhand_arg; 72 73 struct vring vq_ring; 74 uint16_t vq_free_cnt; 75 uint16_t vq_queued_cnt; 76 /* 77 * Head of the free chain in the descriptor table. If 78 * there are no free descriptors, this will be set to 79 * VQ_RING_DESC_CHAIN_END. 80 */ 81 uint16_t vq_desc_head_idx; 82 /* 83 * Last consumed descriptor in the used table, 84 * trails vq_ring.used->idx. 85 */ 86 uint16_t vq_used_cons_idx; 87 88 struct vq_desc_extra { 89 void *cookie; 90 struct vring_desc *indirect; 91 vm_paddr_t indirect_paddr; 92 uint16_t ndescs; 93 } vq_descx[0]; 94}; 95 96/* 97 * The maximum virtqueue size is 2^15. Use that value as the end of 98 * descriptor chain terminator since it will never be a valid index 99 * in the descriptor table. This is used to verify we are correctly 100 * handling vq_free_cnt. 101 */ 102#define VQ_RING_DESC_CHAIN_END 32768 103 104#define VQASSERT(_vq, _exp, _msg, ...) \ 105 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \ 106 ##__VA_ARGS__)) 107 108#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \ 109 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \ 110 "invalid ring index: %d, max: %d", (_idx), \ 111 (_vq)->vq_nentries) 112 113#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \ 114 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \ 115 VQ_RING_DESC_CHAIN_END, "full ring terminated " \ 116 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx) 117 118static int virtqueue_init_indirect(struct virtqueue *vq, int); 119static void virtqueue_free_indirect(struct virtqueue *vq); 120static void virtqueue_init_indirect_list(struct virtqueue *, 121 struct vring_desc *); 122 123static void vq_ring_init(struct virtqueue *); 124static void vq_ring_update_avail(struct virtqueue *, uint16_t); 125static uint16_t vq_ring_enqueue_segments(struct virtqueue *, 126 struct vring_desc *, uint16_t, struct sglist *, int, int); 127static int vq_ring_use_indirect(struct virtqueue *, int); 128static void vq_ring_enqueue_indirect(struct virtqueue *, void *, 129 struct sglist *, int, int); 130static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t); 131static int vq_ring_must_notify_host(struct virtqueue *); 132static void vq_ring_notify_host(struct virtqueue *); 133static void vq_ring_free_chain(struct virtqueue *, uint16_t); 134 135uint64_t 136virtqueue_filter_features(uint64_t features) 137{ 138 uint64_t mask; 139 140 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1; 141 mask |= VIRTIO_RING_F_INDIRECT_DESC; 142 mask |= VIRTIO_RING_F_EVENT_IDX; 143 144 return (features & mask); 145} 146 147int 148virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align, 149 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp) 150{ 151 struct virtqueue *vq; 152 int error; 153 154 *vqp = NULL; 155 error = 0; 156 157 if (size == 0) { 158 device_printf(dev, 159 "virtqueue %d (%s) does not exist (size is zero)\n", 160 queue, info->vqai_name); 161 return (ENODEV); 162 } else if (!powerof2(size)) { 163 device_printf(dev, 164 "virtqueue %d (%s) size is not a power of 2: %d\n", 165 queue, info->vqai_name, size); 166 return (ENXIO); 167 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) { 168 device_printf(dev, "virtqueue %d (%s) requested too many " 169 "indirect descriptors: %d, max %d\n", 170 queue, info->vqai_name, info->vqai_maxindirsz, 171 VIRTIO_MAX_INDIRECT); 172 return (EINVAL); 173 } 174 175 vq = malloc(sizeof(struct virtqueue) + 176 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO); 177 if (vq == NULL) { 178 device_printf(dev, "cannot allocate virtqueue\n"); 179 return (ENOMEM); 180 } 181 182 vq->vq_dev = dev; 183 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name)); 184 vq->vq_queue_index = queue; 185 vq->vq_alignment = align; 186 vq->vq_nentries = size; 187 vq->vq_free_cnt = size; 188 vq->vq_intrhand = info->vqai_intr; 189 vq->vq_intrhand_arg = info->vqai_intr_arg; 190 191 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0) 192 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX; 193 194 if (info->vqai_maxindirsz > 1) { 195 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz); 196 if (error) 197 goto fail; 198 } 199 200 vq->vq_ring_size = round_page(vring_size(size, align)); 201 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF, 202 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0); 203 if (vq->vq_ring_mem == NULL) { 204 device_printf(dev, 205 "cannot allocate memory for virtqueue ring\n"); 206 error = ENOMEM; 207 goto fail; 208 } 209 210 vq_ring_init(vq); 211 virtqueue_disable_intr(vq); 212 213 *vqp = vq; 214 215fail: 216 if (error) 217 virtqueue_free(vq); 218 219 return (error); 220} 221 222static int 223virtqueue_init_indirect(struct virtqueue *vq, int indirect_size) 224{ 225 device_t dev; 226 struct vq_desc_extra *dxp; 227 int i, size; 228 229 dev = vq->vq_dev; 230 231 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) { 232 /* 233 * Indirect descriptors requested by the driver but not 234 * negotiated. Return zero to keep the initialization 235 * going: we'll run fine without. 236 */ 237 if (bootverbose) 238 device_printf(dev, "virtqueue %d (%s) requested " 239 "indirect descriptors but not negotiated\n", 240 vq->vq_queue_index, vq->vq_name); 241 return (0); 242 } 243 244 size = indirect_size * sizeof(struct vring_desc); 245 vq->vq_max_indirect_size = indirect_size; 246 vq->vq_indirect_mem_size = size; 247 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT; 248 249 for (i = 0; i < vq->vq_nentries; i++) { 250 dxp = &vq->vq_descx[i]; 251 252 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT); 253 if (dxp->indirect == NULL) { 254 device_printf(dev, "cannot allocate indirect list\n"); 255 return (ENOMEM); 256 } 257 258 dxp->indirect_paddr = vtophys(dxp->indirect); 259 virtqueue_init_indirect_list(vq, dxp->indirect); 260 } 261 262 return (0); 263} 264 265static void 266virtqueue_free_indirect(struct virtqueue *vq) 267{ 268 struct vq_desc_extra *dxp; 269 int i; 270 271 for (i = 0; i < vq->vq_nentries; i++) { 272 dxp = &vq->vq_descx[i]; 273 274 if (dxp->indirect == NULL) 275 break; 276 277 free(dxp->indirect, M_DEVBUF); 278 dxp->indirect = NULL; 279 dxp->indirect_paddr = 0; 280 } 281 282 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT; 283 vq->vq_indirect_mem_size = 0; 284} 285 286static void 287virtqueue_init_indirect_list(struct virtqueue *vq, 288 struct vring_desc *indirect) 289{ 290 int i; 291 292 bzero(indirect, vq->vq_indirect_mem_size); 293 294 for (i = 0; i < vq->vq_max_indirect_size - 1; i++) 295 indirect[i].next = i + 1; 296 indirect[i].next = VQ_RING_DESC_CHAIN_END; 297} 298 299int 300virtqueue_reinit(struct virtqueue *vq, uint16_t size) 301{ 302 struct vq_desc_extra *dxp; 303 int i; 304 305 if (vq->vq_nentries != size) { 306 device_printf(vq->vq_dev, 307 "%s: '%s' changed size; old=%hu, new=%hu\n", 308 __func__, vq->vq_name, vq->vq_nentries, size); 309 return (EINVAL); 310 } 311 312 /* Warn if the virtqueue was not properly cleaned up. */ 313 if (vq->vq_free_cnt != vq->vq_nentries) { 314 device_printf(vq->vq_dev, 315 "%s: warning '%s' virtqueue not empty, " 316 "leaking %d entries\n", __func__, vq->vq_name, 317 vq->vq_nentries - vq->vq_free_cnt); 318 } 319 320 vq->vq_desc_head_idx = 0; 321 vq->vq_used_cons_idx = 0; 322 vq->vq_queued_cnt = 0; 323 vq->vq_free_cnt = vq->vq_nentries; 324 325 /* To be safe, reset all our allocated memory. */ 326 bzero(vq->vq_ring_mem, vq->vq_ring_size); 327 for (i = 0; i < vq->vq_nentries; i++) { 328 dxp = &vq->vq_descx[i]; 329 dxp->cookie = NULL; 330 dxp->ndescs = 0; 331 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 332 virtqueue_init_indirect_list(vq, dxp->indirect); 333 } 334 335 vq_ring_init(vq); 336 virtqueue_disable_intr(vq); 337 338 return (0); 339} 340 341void 342virtqueue_free(struct virtqueue *vq) 343{ 344 345 if (vq->vq_free_cnt != vq->vq_nentries) { 346 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, " 347 "leaking %d entries\n", vq->vq_name, 348 vq->vq_nentries - vq->vq_free_cnt); 349 } 350 351 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 352 virtqueue_free_indirect(vq); 353 354 if (vq->vq_ring_mem != NULL) { 355 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF); 356 vq->vq_ring_size = 0; 357 vq->vq_ring_mem = NULL; 358 } 359 360 free(vq, M_DEVBUF); 361} 362 363vm_paddr_t 364virtqueue_paddr(struct virtqueue *vq) 365{ 366 367 return (vtophys(vq->vq_ring_mem)); 368} 369 370vm_paddr_t 371virtqueue_desc_paddr(struct virtqueue *vq) 372{ 373 374 return (vtophys(vq->vq_ring.desc)); 375} 376 377vm_paddr_t 378virtqueue_avail_paddr(struct virtqueue *vq) 379{ 380 381 return (vtophys(vq->vq_ring.avail)); 382} 383 384vm_paddr_t 385virtqueue_used_paddr(struct virtqueue *vq) 386{ 387 388 return (vtophys(vq->vq_ring.used)); 389} 390 391uint16_t 392virtqueue_index(struct virtqueue *vq) 393{ 394 return (vq->vq_queue_index); 395} 396 397int 398virtqueue_size(struct virtqueue *vq) 399{ 400 401 return (vq->vq_nentries); 402} 403 404int 405virtqueue_nfree(struct virtqueue *vq) 406{ 407 408 return (vq->vq_free_cnt); 409} 410 411int 412virtqueue_empty(struct virtqueue *vq) 413{ 414 415 return (vq->vq_nentries == vq->vq_free_cnt); 416} 417 418int 419virtqueue_full(struct virtqueue *vq) 420{ 421 422 return (vq->vq_free_cnt == 0); 423} 424 425void 426virtqueue_notify(struct virtqueue *vq) 427{ 428 429 /* Ensure updated avail->idx is visible to host. */ 430 mb(); 431 432 if (vq_ring_must_notify_host(vq)) 433 vq_ring_notify_host(vq); 434 vq->vq_queued_cnt = 0; 435} 436 437int 438virtqueue_nused(struct virtqueue *vq) 439{ 440 uint16_t used_idx, nused; 441 442 used_idx = vq->vq_ring.used->idx; 443 444 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx); 445 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available"); 446 447 return (nused); 448} 449 450int 451virtqueue_intr_filter(struct virtqueue *vq) 452{ 453 454 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 455 return (0); 456 457 virtqueue_disable_intr(vq); 458 459 return (1); 460} 461 462void 463virtqueue_intr(struct virtqueue *vq) 464{ 465 466 vq->vq_intrhand(vq->vq_intrhand_arg); 467} 468 469int 470virtqueue_enable_intr(struct virtqueue *vq) 471{ 472 473 return (vq_ring_enable_interrupt(vq, 0)); 474} 475 476int 477virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint) 478{ 479 uint16_t ndesc, avail_idx; 480 481 avail_idx = vq->vq_ring.avail->idx; 482 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx); 483 484 switch (hint) { 485 case VQ_POSTPONE_SHORT: 486 ndesc = ndesc / 4; 487 break; 488 case VQ_POSTPONE_LONG: 489 ndesc = (ndesc * 3) / 4; 490 break; 491 case VQ_POSTPONE_EMPTIED: 492 break; 493 } 494 495 return (vq_ring_enable_interrupt(vq, ndesc)); 496} 497 498/* 499 * Note this is only considered a hint to the host. 500 */ 501void 502virtqueue_disable_intr(struct virtqueue *vq) 503{ 504 505 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 506 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - 507 vq->vq_nentries - 1; 508 } else 509 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 510} 511 512int 513virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg, 514 int readable, int writable) 515{ 516 struct vq_desc_extra *dxp; 517 int needed; 518 uint16_t head_idx, idx; 519 520 needed = readable + writable; 521 522 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie"); 523 VQASSERT(vq, needed == sg->sg_nseg, 524 "segment count mismatch, %d, %d", needed, sg->sg_nseg); 525 VQASSERT(vq, 526 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size, 527 "too many segments to enqueue: %d, %d/%d", needed, 528 vq->vq_nentries, vq->vq_max_indirect_size); 529 530 if (needed < 1) 531 return (EINVAL); 532 if (vq->vq_free_cnt == 0) 533 return (ENOSPC); 534 535 if (vq_ring_use_indirect(vq, needed)) { 536 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable); 537 return (0); 538 } else if (vq->vq_free_cnt < needed) 539 return (EMSGSIZE); 540 541 head_idx = vq->vq_desc_head_idx; 542 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 543 dxp = &vq->vq_descx[head_idx]; 544 545 VQASSERT(vq, dxp->cookie == NULL, 546 "cookie already exists for index %d", head_idx); 547 dxp->cookie = cookie; 548 dxp->ndescs = needed; 549 550 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx, 551 sg, readable, writable); 552 553 vq->vq_desc_head_idx = idx; 554 vq->vq_free_cnt -= needed; 555 if (vq->vq_free_cnt == 0) 556 VQ_RING_ASSERT_CHAIN_TERM(vq); 557 else 558 VQ_RING_ASSERT_VALID_IDX(vq, idx); 559 560 vq_ring_update_avail(vq, head_idx); 561 562 return (0); 563} 564 565void * 566virtqueue_dequeue(struct virtqueue *vq, uint32_t *len) 567{ 568 struct vring_used_elem *uep; 569 void *cookie; 570 uint16_t used_idx, desc_idx; 571 572 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 573 return (NULL); 574 575 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1); 576 uep = &vq->vq_ring.used->ring[used_idx]; 577 578 rmb(); 579 desc_idx = (uint16_t) uep->id; 580 if (len != NULL) 581 *len = uep->len; 582 583 vq_ring_free_chain(vq, desc_idx); 584 585 cookie = vq->vq_descx[desc_idx].cookie; 586 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx); 587 vq->vq_descx[desc_idx].cookie = NULL; 588 589 return (cookie); 590} 591 592void * 593virtqueue_poll(struct virtqueue *vq, uint32_t *len) 594{ 595 void *cookie; 596 597 VIRTIO_BUS_POLL(vq->vq_dev); 598 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) { 599 cpu_spinwait(); 600 VIRTIO_BUS_POLL(vq->vq_dev); 601 } 602 603 return (cookie); 604} 605 606void * 607virtqueue_drain(struct virtqueue *vq, int *last) 608{ 609 void *cookie; 610 int idx; 611 612 cookie = NULL; 613 idx = *last; 614 615 while (idx < vq->vq_nentries && cookie == NULL) { 616 if ((cookie = vq->vq_descx[idx].cookie) != NULL) { 617 vq->vq_descx[idx].cookie = NULL; 618 /* Free chain to keep free count consistent. */ 619 vq_ring_free_chain(vq, idx); 620 } 621 idx++; 622 } 623 624 *last = idx; 625 626 return (cookie); 627} 628 629void 630virtqueue_dump(struct virtqueue *vq) 631{ 632 633 if (vq == NULL) 634 return; 635 636 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; " 637 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; " 638 "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n", 639 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, 640 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx, 641 vq->vq_ring.avail->idx, vq->vq_used_cons_idx, 642 vq->vq_ring.used->idx, 643 vring_used_event(&vq->vq_ring), 644 vq->vq_ring.avail->flags, 645 vq->vq_ring.used->flags); 646} 647 648static void 649vq_ring_init(struct virtqueue *vq) 650{ 651 struct vring *vr; 652 char *ring_mem; 653 int i, size; 654 655 ring_mem = vq->vq_ring_mem; 656 size = vq->vq_nentries; 657 vr = &vq->vq_ring; 658 659 vring_init(vr, size, ring_mem, vq->vq_alignment); 660 661 for (i = 0; i < size - 1; i++) 662 vr->desc[i].next = i + 1; 663 vr->desc[i].next = VQ_RING_DESC_CHAIN_END; 664} 665 666static void 667vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) 668{ 669 uint16_t avail_idx; 670 671 /* 672 * Place the head of the descriptor chain into the next slot and make 673 * it usable to the host. The chain is made available now rather than 674 * deferring to virtqueue_notify() in the hopes that if the host is 675 * currently running on another CPU, we can keep it processing the new 676 * descriptor. 677 */ 678 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1); 679 vq->vq_ring.avail->ring[avail_idx] = desc_idx; 680 681 wmb(); 682 vq->vq_ring.avail->idx++; 683 684 /* Keep pending count until virtqueue_notify(). */ 685 vq->vq_queued_cnt++; 686} 687 688static uint16_t 689vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc, 690 uint16_t head_idx, struct sglist *sg, int readable, int writable) 691{ 692 struct sglist_seg *seg; 693 struct vring_desc *dp; 694 int i, needed; 695 uint16_t idx; 696 697 needed = readable + writable; 698 699 for (i = 0, idx = head_idx, seg = sg->sg_segs; 700 i < needed; 701 i++, idx = dp->next, seg++) { 702 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END, 703 "premature end of free desc chain"); 704 705 dp = &desc[idx]; 706 dp->addr = seg->ss_paddr; 707 dp->len = seg->ss_len; 708 dp->flags = 0; 709 710 if (i < needed - 1) 711 dp->flags |= VRING_DESC_F_NEXT; 712 if (i >= readable) 713 dp->flags |= VRING_DESC_F_WRITE; 714 } 715 716 return (idx); 717} 718 719static int 720vq_ring_use_indirect(struct virtqueue *vq, int needed) 721{ 722 723 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0) 724 return (0); 725 726 if (vq->vq_max_indirect_size < needed) 727 return (0); 728 729 if (needed < 2) 730 return (0); 731 732 return (1); 733} 734 735static void 736vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie, 737 struct sglist *sg, int readable, int writable) 738{ 739 struct vring_desc *dp; 740 struct vq_desc_extra *dxp; 741 int needed; 742 uint16_t head_idx; 743 744 needed = readable + writable; 745 VQASSERT(vq, needed <= vq->vq_max_indirect_size, 746 "enqueuing too many indirect descriptors"); 747 748 head_idx = vq->vq_desc_head_idx; 749 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 750 dp = &vq->vq_ring.desc[head_idx]; 751 dxp = &vq->vq_descx[head_idx]; 752 753 VQASSERT(vq, dxp->cookie == NULL, 754 "cookie already exists for index %d", head_idx); 755 dxp->cookie = cookie; 756 dxp->ndescs = 1; 757 758 dp->addr = dxp->indirect_paddr; 759 dp->len = needed * sizeof(struct vring_desc); 760 dp->flags = VRING_DESC_F_INDIRECT; 761 762 vq_ring_enqueue_segments(vq, dxp->indirect, 0, 763 sg, readable, writable); 764 765 vq->vq_desc_head_idx = dp->next; 766 vq->vq_free_cnt--; 767 if (vq->vq_free_cnt == 0) 768 VQ_RING_ASSERT_CHAIN_TERM(vq); 769 else 770 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx); 771 772 vq_ring_update_avail(vq, head_idx); 773} 774 775static int 776vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) 777{ 778 779 /* 780 * Enable interrupts, making sure we get the latest index of 781 * what's already been consumed. 782 */ 783 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) 784 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; 785 else 786 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 787 788 mb(); 789 790 /* 791 * Enough items may have already been consumed to meet our threshold 792 * since we last checked. Let our caller know so it processes the new 793 * entries. 794 */ 795 if (virtqueue_nused(vq) > ndesc) 796 return (1); 797 798 return (0); 799} 800 801static int 802vq_ring_must_notify_host(struct virtqueue *vq) 803{ 804 uint16_t new_idx, prev_idx, event_idx; 805 806 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 807 new_idx = vq->vq_ring.avail->idx; 808 prev_idx = new_idx - vq->vq_queued_cnt; 809 event_idx = vring_avail_event(&vq->vq_ring); 810 811 return (vring_need_event(event_idx, new_idx, prev_idx) != 0); 812 } 813 814 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0); 815} 816 817static void 818vq_ring_notify_host(struct virtqueue *vq) 819{ 820 821 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index); 822} 823 824static void 825vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) 826{ 827 struct vring_desc *dp; 828 struct vq_desc_extra *dxp; 829 830 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx); 831 dp = &vq->vq_ring.desc[desc_idx]; 832 dxp = &vq->vq_descx[desc_idx]; 833 834 if (vq->vq_free_cnt == 0) 835 VQ_RING_ASSERT_CHAIN_TERM(vq); 836 837 vq->vq_free_cnt += dxp->ndescs; 838 dxp->ndescs--; 839 840 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { 841 while (dp->flags & VRING_DESC_F_NEXT) { 842 VQ_RING_ASSERT_VALID_IDX(vq, dp->next); 843 dp = &vq->vq_ring.desc[dp->next]; 844 dxp->ndescs--; 845 } 846 } 847 848 VQASSERT(vq, dxp->ndescs == 0, 849 "failed to free entire desc chain, remaining: %d", dxp->ndescs); 850 851 /* 852 * We must append the existing free chain, if any, to the end of 853 * newly freed chain. If the virtqueue was completely used, then 854 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). 855 */ 856 dp->next = vq->vq_desc_head_idx; 857 vq->vq_desc_head_idx = desc_idx; 858} 859