52#include <dev/virtio/virtqueue.h> 53#include <dev/virtio/virtio_ring.h> 54 55#include "virtio_bus_if.h" 56 57struct virtqueue { 58 device_t vq_dev; 59 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 60 uint16_t vq_queue_index; 61 uint16_t vq_nentries; 62 uint32_t vq_flags; 63#define VIRTQUEUE_FLAG_INDIRECT 0x0001 64#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002 65 66 int vq_alignment; 67 int vq_ring_size; 68 void *vq_ring_mem; 69 int vq_max_indirect_size; 70 int vq_indirect_mem_size; 71 virtqueue_intr_t *vq_intrhand; 72 void *vq_intrhand_arg; 73 74 struct vring vq_ring; 75 uint16_t vq_free_cnt; 76 uint16_t vq_queued_cnt; 77 /* 78 * Head of the free chain in the descriptor table. If 79 * there are no free descriptors, this will be set to 80 * VQ_RING_DESC_CHAIN_END. 81 */ 82 uint16_t vq_desc_head_idx; 83 /* 84 * Last consumed descriptor in the used table, 85 * trails vq_ring.used->idx. 86 */ 87 uint16_t vq_used_cons_idx; 88 89 struct vq_desc_extra { 90 void *cookie; 91 struct vring_desc *indirect; 92 vm_paddr_t indirect_paddr; 93 uint16_t ndescs; 94 } vq_descx[0]; 95}; 96 97/* 98 * The maximum virtqueue size is 2^15. Use that value as the end of 99 * descriptor chain terminator since it will never be a valid index 100 * in the descriptor table. This is used to verify we are correctly 101 * handling vq_free_cnt. 102 */ 103#define VQ_RING_DESC_CHAIN_END 32768 104 105#define VQASSERT(_vq, _exp, _msg, ...) \ 106 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \ 107 ##__VA_ARGS__)) 108 109#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \ 110 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \ 111 "invalid ring index: %d, max: %d", (_idx), \ 112 (_vq)->vq_nentries) 113 114#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \ 115 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \ 116 VQ_RING_DESC_CHAIN_END, "full ring terminated " \ 117 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx) 118 119static int virtqueue_init_indirect(struct virtqueue *vq, int); 120static void virtqueue_free_indirect(struct virtqueue *vq); 121static void virtqueue_init_indirect_list(struct virtqueue *, 122 struct vring_desc *); 123 124static void vq_ring_init(struct virtqueue *); 125static void vq_ring_update_avail(struct virtqueue *, uint16_t); 126static uint16_t vq_ring_enqueue_segments(struct virtqueue *, 127 struct vring_desc *, uint16_t, struct sglist *, int, int); 128static int vq_ring_use_indirect(struct virtqueue *, int); 129static void vq_ring_enqueue_indirect(struct virtqueue *, void *, 130 struct sglist *, int, int); 131static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t); 132static int vq_ring_must_notify_host(struct virtqueue *); 133static void vq_ring_notify_host(struct virtqueue *); 134static void vq_ring_free_chain(struct virtqueue *, uint16_t); 135 136uint64_t 137virtqueue_filter_features(uint64_t features) 138{ 139 uint64_t mask; 140 141 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1; 142 mask |= VIRTIO_RING_F_INDIRECT_DESC; 143 mask |= VIRTIO_RING_F_EVENT_IDX; 144 145 return (features & mask); 146} 147 148int 149virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align, 150 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp) 151{ 152 struct virtqueue *vq; 153 int error; 154 155 *vqp = NULL; 156 error = 0; 157 158 if (size == 0) { 159 device_printf(dev, 160 "virtqueue %d (%s) does not exist (size is zero)\n", 161 queue, info->vqai_name); 162 return (ENODEV); 163 } else if (!powerof2(size)) { 164 device_printf(dev, 165 "virtqueue %d (%s) size is not a power of 2: %d\n", 166 queue, info->vqai_name, size); 167 return (ENXIO); 168 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) { 169 device_printf(dev, "virtqueue %d (%s) requested too many " 170 "indirect descriptors: %d, max %d\n", 171 queue, info->vqai_name, info->vqai_maxindirsz, 172 VIRTIO_MAX_INDIRECT); 173 return (EINVAL); 174 } 175 176 vq = malloc(sizeof(struct virtqueue) + 177 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO); 178 if (vq == NULL) { 179 device_printf(dev, "cannot allocate virtqueue\n"); 180 return (ENOMEM); 181 } 182 183 vq->vq_dev = dev; 184 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name)); 185 vq->vq_queue_index = queue; 186 vq->vq_alignment = align; 187 vq->vq_nentries = size; 188 vq->vq_free_cnt = size; 189 vq->vq_intrhand = info->vqai_intr; 190 vq->vq_intrhand_arg = info->vqai_intr_arg; 191 192 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0) 193 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX; 194 195 if (info->vqai_maxindirsz > 1) { 196 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz); 197 if (error) 198 goto fail; 199 } 200 201 vq->vq_ring_size = round_page(vring_size(size, align)); 202 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF, 203 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0); 204 if (vq->vq_ring_mem == NULL) { 205 device_printf(dev, 206 "cannot allocate memory for virtqueue ring\n"); 207 error = ENOMEM; 208 goto fail; 209 } 210 211 vq_ring_init(vq); 212 virtqueue_disable_intr(vq); 213 214 *vqp = vq; 215 216fail: 217 if (error) 218 virtqueue_free(vq); 219 220 return (error); 221} 222 223static int 224virtqueue_init_indirect(struct virtqueue *vq, int indirect_size) 225{ 226 device_t dev; 227 struct vq_desc_extra *dxp; 228 int i, size; 229 230 dev = vq->vq_dev; 231 232 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) { 233 /* 234 * Indirect descriptors requested by the driver but not 235 * negotiated. Return zero to keep the initialization 236 * going: we'll run fine without. 237 */ 238 if (bootverbose) 239 device_printf(dev, "virtqueue %d (%s) requested " 240 "indirect descriptors but not negotiated\n", 241 vq->vq_queue_index, vq->vq_name); 242 return (0); 243 } 244 245 size = indirect_size * sizeof(struct vring_desc); 246 vq->vq_max_indirect_size = indirect_size; 247 vq->vq_indirect_mem_size = size; 248 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT; 249 250 for (i = 0; i < vq->vq_nentries; i++) { 251 dxp = &vq->vq_descx[i]; 252 253 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT); 254 if (dxp->indirect == NULL) { 255 device_printf(dev, "cannot allocate indirect list\n"); 256 return (ENOMEM); 257 } 258 259 dxp->indirect_paddr = vtophys(dxp->indirect); 260 virtqueue_init_indirect_list(vq, dxp->indirect); 261 } 262 263 return (0); 264} 265 266static void 267virtqueue_free_indirect(struct virtqueue *vq) 268{ 269 struct vq_desc_extra *dxp; 270 int i; 271 272 for (i = 0; i < vq->vq_nentries; i++) { 273 dxp = &vq->vq_descx[i]; 274 275 if (dxp->indirect == NULL) 276 break; 277 278 free(dxp->indirect, M_DEVBUF); 279 dxp->indirect = NULL; 280 dxp->indirect_paddr = 0; 281 } 282 283 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT; 284 vq->vq_indirect_mem_size = 0; 285} 286 287static void 288virtqueue_init_indirect_list(struct virtqueue *vq, 289 struct vring_desc *indirect) 290{ 291 int i; 292 293 bzero(indirect, vq->vq_indirect_mem_size); 294 295 for (i = 0; i < vq->vq_max_indirect_size - 1; i++) 296 indirect[i].next = i + 1; 297 indirect[i].next = VQ_RING_DESC_CHAIN_END; 298} 299 300int 301virtqueue_reinit(struct virtqueue *vq, uint16_t size) 302{ 303 struct vq_desc_extra *dxp; 304 int i; 305 306 if (vq->vq_nentries != size) { 307 device_printf(vq->vq_dev, 308 "%s: '%s' changed size; old=%hu, new=%hu\n", 309 __func__, vq->vq_name, vq->vq_nentries, size); 310 return (EINVAL); 311 } 312 313 /* Warn if the virtqueue was not properly cleaned up. */ 314 if (vq->vq_free_cnt != vq->vq_nentries) { 315 device_printf(vq->vq_dev, 316 "%s: warning '%s' virtqueue not empty, " 317 "leaking %d entries\n", __func__, vq->vq_name, 318 vq->vq_nentries - vq->vq_free_cnt); 319 } 320 321 vq->vq_desc_head_idx = 0; 322 vq->vq_used_cons_idx = 0; 323 vq->vq_queued_cnt = 0; 324 vq->vq_free_cnt = vq->vq_nentries; 325 326 /* To be safe, reset all our allocated memory. */ 327 bzero(vq->vq_ring_mem, vq->vq_ring_size); 328 for (i = 0; i < vq->vq_nentries; i++) { 329 dxp = &vq->vq_descx[i]; 330 dxp->cookie = NULL; 331 dxp->ndescs = 0; 332 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 333 virtqueue_init_indirect_list(vq, dxp->indirect); 334 } 335 336 vq_ring_init(vq); 337 virtqueue_disable_intr(vq); 338 339 return (0); 340} 341 342void 343virtqueue_free(struct virtqueue *vq) 344{ 345 346 if (vq->vq_free_cnt != vq->vq_nentries) { 347 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, " 348 "leaking %d entries\n", vq->vq_name, 349 vq->vq_nentries - vq->vq_free_cnt); 350 } 351 352 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 353 virtqueue_free_indirect(vq); 354 355 if (vq->vq_ring_mem != NULL) { 356 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF); 357 vq->vq_ring_size = 0; 358 vq->vq_ring_mem = NULL; 359 } 360 361 free(vq, M_DEVBUF); 362} 363 364vm_paddr_t 365virtqueue_paddr(struct virtqueue *vq) 366{ 367 368 return (vtophys(vq->vq_ring_mem)); 369} 370 371int 372virtqueue_size(struct virtqueue *vq) 373{ 374 375 return (vq->vq_nentries); 376} 377 378int 379virtqueue_empty(struct virtqueue *vq) 380{ 381 382 return (vq->vq_nentries == vq->vq_free_cnt); 383} 384 385int 386virtqueue_full(struct virtqueue *vq) 387{ 388 389 return (vq->vq_free_cnt == 0); 390} 391 392void 393virtqueue_notify(struct virtqueue *vq) 394{ 395 396 /* Ensure updated avail->idx is visible to host. */ 397 mb(); 398 399 if (vq_ring_must_notify_host(vq)) 400 vq_ring_notify_host(vq); 401 vq->vq_queued_cnt = 0; 402} 403 404int 405virtqueue_nused(struct virtqueue *vq) 406{ 407 uint16_t used_idx, nused; 408 409 used_idx = vq->vq_ring.used->idx; 410 411 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx); 412 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available"); 413 414 return (nused); 415} 416 417int 418virtqueue_intr_filter(struct virtqueue *vq) 419{ 420 421 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 422 return (0); 423 424 virtqueue_disable_intr(vq); 425 426 return (1); 427} 428 429void 430virtqueue_intr(struct virtqueue *vq) 431{ 432 433 vq->vq_intrhand(vq->vq_intrhand_arg); 434} 435 436int 437virtqueue_enable_intr(struct virtqueue *vq) 438{ 439 440 return (vq_ring_enable_interrupt(vq, 0)); 441} 442 443int 444virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint) 445{ 446 uint16_t ndesc, avail_idx; 447 448 avail_idx = vq->vq_ring.avail->idx; 449 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx); 450 451 switch (hint) { 452 case VQ_POSTPONE_SHORT: 453 ndesc = ndesc / 4; 454 break; 455 case VQ_POSTPONE_LONG: 456 ndesc = (ndesc * 3) / 4; 457 break; 458 case VQ_POSTPONE_EMPTIED: 459 break; 460 } 461 462 return (vq_ring_enable_interrupt(vq, ndesc)); 463} 464 465/* 466 * Note this is only considered a hint to the host. 467 */ 468void 469virtqueue_disable_intr(struct virtqueue *vq) 470{ 471 472 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 473 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - 474 vq->vq_nentries - 1; 475 } else 476 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 477} 478 479int 480virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg, 481 int readable, int writable) 482{ 483 struct vq_desc_extra *dxp; 484 int needed; 485 uint16_t head_idx, idx; 486 487 needed = readable + writable; 488 489 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie"); 490 VQASSERT(vq, needed == sg->sg_nseg, 491 "segment count mismatch, %d, %d", needed, sg->sg_nseg); 492 VQASSERT(vq, 493 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size, 494 "too many segments to enqueue: %d, %d/%d", needed, 495 vq->vq_nentries, vq->vq_max_indirect_size); 496 497 if (needed < 1) 498 return (EINVAL); 499 if (vq->vq_free_cnt == 0) 500 return (ENOSPC); 501 502 if (vq_ring_use_indirect(vq, needed)) { 503 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable); 504 return (0); 505 } else if (vq->vq_free_cnt < needed) 506 return (EMSGSIZE); 507 508 head_idx = vq->vq_desc_head_idx; 509 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 510 dxp = &vq->vq_descx[head_idx]; 511 512 VQASSERT(vq, dxp->cookie == NULL, 513 "cookie already exists for index %d", head_idx); 514 dxp->cookie = cookie; 515 dxp->ndescs = needed; 516 517 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx, 518 sg, readable, writable); 519 520 vq->vq_desc_head_idx = idx; 521 vq->vq_free_cnt -= needed; 522 if (vq->vq_free_cnt == 0) 523 VQ_RING_ASSERT_CHAIN_TERM(vq); 524 else 525 VQ_RING_ASSERT_VALID_IDX(vq, idx); 526 527 vq_ring_update_avail(vq, head_idx); 528 529 return (0); 530} 531 532void * 533virtqueue_dequeue(struct virtqueue *vq, uint32_t *len) 534{ 535 struct vring_used_elem *uep; 536 void *cookie; 537 uint16_t used_idx, desc_idx; 538 539 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 540 return (NULL); 541 542 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1); 543 uep = &vq->vq_ring.used->ring[used_idx]; 544 545 rmb(); 546 desc_idx = (uint16_t) uep->id; 547 if (len != NULL) 548 *len = uep->len; 549 550 vq_ring_free_chain(vq, desc_idx); 551 552 cookie = vq->vq_descx[desc_idx].cookie; 553 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx); 554 vq->vq_descx[desc_idx].cookie = NULL; 555 556 return (cookie); 557} 558 559void * 560virtqueue_poll(struct virtqueue *vq, uint32_t *len) 561{ 562 void *cookie; 563 564 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) 565 cpu_spinwait(); 566 567 return (cookie); 568} 569 570void * 571virtqueue_drain(struct virtqueue *vq, int *last) 572{ 573 void *cookie; 574 int idx; 575 576 cookie = NULL; 577 idx = *last; 578 579 while (idx < vq->vq_nentries && cookie == NULL) { 580 if ((cookie = vq->vq_descx[idx].cookie) != NULL) { 581 vq->vq_descx[idx].cookie = NULL; 582 /* Free chain to keep free count consistent. */ 583 vq_ring_free_chain(vq, idx); 584 } 585 idx++; 586 } 587 588 *last = idx; 589 590 return (cookie); 591} 592 593void 594virtqueue_dump(struct virtqueue *vq) 595{ 596 597 if (vq == NULL) 598 return; 599 600 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; " 601 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; " 602 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n", 603 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, 604 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx, 605 vq->vq_ring.avail->idx, vq->vq_used_cons_idx, 606 vq->vq_ring.used->idx, vq->vq_ring.avail->flags, 607 vq->vq_ring.used->flags); 608} 609 610static void 611vq_ring_init(struct virtqueue *vq) 612{ 613 struct vring *vr; 614 char *ring_mem; 615 int i, size; 616 617 ring_mem = vq->vq_ring_mem; 618 size = vq->vq_nentries; 619 vr = &vq->vq_ring; 620 621 vring_init(vr, size, ring_mem, vq->vq_alignment); 622 623 for (i = 0; i < size - 1; i++) 624 vr->desc[i].next = i + 1; 625 vr->desc[i].next = VQ_RING_DESC_CHAIN_END; 626} 627 628static void 629vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) 630{ 631 uint16_t avail_idx; 632 633 /* 634 * Place the head of the descriptor chain into the next slot and make 635 * it usable to the host. The chain is made available now rather than 636 * deferring to virtqueue_notify() in the hopes that if the host is 637 * currently running on another CPU, we can keep it processing the new 638 * descriptor. 639 */ 640 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1); 641 vq->vq_ring.avail->ring[avail_idx] = desc_idx; 642 643 wmb(); 644 vq->vq_ring.avail->idx++; 645 646 /* Keep pending count until virtqueue_notify(). */ 647 vq->vq_queued_cnt++; 648} 649 650static uint16_t 651vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc, 652 uint16_t head_idx, struct sglist *sg, int readable, int writable) 653{ 654 struct sglist_seg *seg; 655 struct vring_desc *dp; 656 int i, needed; 657 uint16_t idx; 658 659 needed = readable + writable; 660 661 for (i = 0, idx = head_idx, seg = sg->sg_segs; 662 i < needed; 663 i++, idx = dp->next, seg++) { 664 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END, 665 "premature end of free desc chain"); 666 667 dp = &desc[idx]; 668 dp->addr = seg->ss_paddr; 669 dp->len = seg->ss_len; 670 dp->flags = 0; 671 672 if (i < needed - 1) 673 dp->flags |= VRING_DESC_F_NEXT; 674 if (i >= readable) 675 dp->flags |= VRING_DESC_F_WRITE; 676 } 677 678 return (idx); 679} 680 681static int 682vq_ring_use_indirect(struct virtqueue *vq, int needed) 683{ 684 685 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0) 686 return (0); 687 688 if (vq->vq_max_indirect_size < needed) 689 return (0); 690 691 if (needed < 2) 692 return (0); 693 694 return (1); 695} 696 697static void 698vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie, 699 struct sglist *sg, int readable, int writable) 700{ 701 struct vring_desc *dp; 702 struct vq_desc_extra *dxp; 703 int needed; 704 uint16_t head_idx; 705 706 needed = readable + writable; 707 VQASSERT(vq, needed <= vq->vq_max_indirect_size, 708 "enqueuing too many indirect descriptors"); 709 710 head_idx = vq->vq_desc_head_idx; 711 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 712 dp = &vq->vq_ring.desc[head_idx]; 713 dxp = &vq->vq_descx[head_idx]; 714 715 VQASSERT(vq, dxp->cookie == NULL, 716 "cookie already exists for index %d", head_idx); 717 dxp->cookie = cookie; 718 dxp->ndescs = 1; 719 720 dp->addr = dxp->indirect_paddr; 721 dp->len = needed * sizeof(struct vring_desc); 722 dp->flags = VRING_DESC_F_INDIRECT; 723 724 vq_ring_enqueue_segments(vq, dxp->indirect, 0, 725 sg, readable, writable); 726 727 vq->vq_desc_head_idx = dp->next; 728 vq->vq_free_cnt--; 729 if (vq->vq_free_cnt == 0) 730 VQ_RING_ASSERT_CHAIN_TERM(vq); 731 else 732 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx); 733 734 vq_ring_update_avail(vq, head_idx); 735} 736 737static int 738vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) 739{ 740 741 /* 742 * Enable interrupts, making sure we get the latest index of 743 * what's already been consumed. 744 */ 745 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) 746 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; 747 else 748 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 749 750 mb(); 751 752 /* 753 * Enough items may have already been consumed to meet our threshold 754 * since we last checked. Let our caller know so it processes the new 755 * entries. 756 */ 757 if (virtqueue_nused(vq) > ndesc) 758 return (1); 759 760 return (0); 761} 762 763static int 764vq_ring_must_notify_host(struct virtqueue *vq) 765{ 766 uint16_t new_idx, prev_idx, event_idx; 767 768 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 769 new_idx = vq->vq_ring.avail->idx; 770 prev_idx = new_idx - vq->vq_queued_cnt; 771 event_idx = vring_avail_event(&vq->vq_ring); 772 773 return (vring_need_event(event_idx, new_idx, prev_idx) != 0); 774 } 775 776 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0); 777} 778 779static void 780vq_ring_notify_host(struct virtqueue *vq) 781{ 782 783 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index); 784} 785 786static void 787vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) 788{ 789 struct vring_desc *dp; 790 struct vq_desc_extra *dxp; 791 792 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx); 793 dp = &vq->vq_ring.desc[desc_idx]; 794 dxp = &vq->vq_descx[desc_idx]; 795 796 if (vq->vq_free_cnt == 0) 797 VQ_RING_ASSERT_CHAIN_TERM(vq); 798 799 vq->vq_free_cnt += dxp->ndescs; 800 dxp->ndescs--; 801 802 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { 803 while (dp->flags & VRING_DESC_F_NEXT) { 804 VQ_RING_ASSERT_VALID_IDX(vq, dp->next); 805 dp = &vq->vq_ring.desc[dp->next]; 806 dxp->ndescs--; 807 } 808 } 809 810 VQASSERT(vq, dxp->ndescs == 0, 811 "failed to free entire desc chain, remaining: %d", dxp->ndescs); 812 813 /* 814 * We must append the existing free chain, if any, to the end of 815 * newly freed chain. If the virtqueue was completely used, then 816 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). 817 */ 818 dp->next = vq->vq_desc_head_idx; 819 vq->vq_desc_head_idx = desc_idx; 820}
| 51#include <dev/virtio/virtqueue.h> 52#include <dev/virtio/virtio_ring.h> 53 54#include "virtio_bus_if.h" 55 56struct virtqueue { 57 device_t vq_dev; 58 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 59 uint16_t vq_queue_index; 60 uint16_t vq_nentries; 61 uint32_t vq_flags; 62#define VIRTQUEUE_FLAG_INDIRECT 0x0001 63#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002 64 65 int vq_alignment; 66 int vq_ring_size; 67 void *vq_ring_mem; 68 int vq_max_indirect_size; 69 int vq_indirect_mem_size; 70 virtqueue_intr_t *vq_intrhand; 71 void *vq_intrhand_arg; 72 73 struct vring vq_ring; 74 uint16_t vq_free_cnt; 75 uint16_t vq_queued_cnt; 76 /* 77 * Head of the free chain in the descriptor table. If 78 * there are no free descriptors, this will be set to 79 * VQ_RING_DESC_CHAIN_END. 80 */ 81 uint16_t vq_desc_head_idx; 82 /* 83 * Last consumed descriptor in the used table, 84 * trails vq_ring.used->idx. 85 */ 86 uint16_t vq_used_cons_idx; 87 88 struct vq_desc_extra { 89 void *cookie; 90 struct vring_desc *indirect; 91 vm_paddr_t indirect_paddr; 92 uint16_t ndescs; 93 } vq_descx[0]; 94}; 95 96/* 97 * The maximum virtqueue size is 2^15. Use that value as the end of 98 * descriptor chain terminator since it will never be a valid index 99 * in the descriptor table. This is used to verify we are correctly 100 * handling vq_free_cnt. 101 */ 102#define VQ_RING_DESC_CHAIN_END 32768 103 104#define VQASSERT(_vq, _exp, _msg, ...) \ 105 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \ 106 ##__VA_ARGS__)) 107 108#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \ 109 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \ 110 "invalid ring index: %d, max: %d", (_idx), \ 111 (_vq)->vq_nentries) 112 113#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \ 114 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \ 115 VQ_RING_DESC_CHAIN_END, "full ring terminated " \ 116 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx) 117 118static int virtqueue_init_indirect(struct virtqueue *vq, int); 119static void virtqueue_free_indirect(struct virtqueue *vq); 120static void virtqueue_init_indirect_list(struct virtqueue *, 121 struct vring_desc *); 122 123static void vq_ring_init(struct virtqueue *); 124static void vq_ring_update_avail(struct virtqueue *, uint16_t); 125static uint16_t vq_ring_enqueue_segments(struct virtqueue *, 126 struct vring_desc *, uint16_t, struct sglist *, int, int); 127static int vq_ring_use_indirect(struct virtqueue *, int); 128static void vq_ring_enqueue_indirect(struct virtqueue *, void *, 129 struct sglist *, int, int); 130static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t); 131static int vq_ring_must_notify_host(struct virtqueue *); 132static void vq_ring_notify_host(struct virtqueue *); 133static void vq_ring_free_chain(struct virtqueue *, uint16_t); 134 135uint64_t 136virtqueue_filter_features(uint64_t features) 137{ 138 uint64_t mask; 139 140 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1; 141 mask |= VIRTIO_RING_F_INDIRECT_DESC; 142 mask |= VIRTIO_RING_F_EVENT_IDX; 143 144 return (features & mask); 145} 146 147int 148virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align, 149 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp) 150{ 151 struct virtqueue *vq; 152 int error; 153 154 *vqp = NULL; 155 error = 0; 156 157 if (size == 0) { 158 device_printf(dev, 159 "virtqueue %d (%s) does not exist (size is zero)\n", 160 queue, info->vqai_name); 161 return (ENODEV); 162 } else if (!powerof2(size)) { 163 device_printf(dev, 164 "virtqueue %d (%s) size is not a power of 2: %d\n", 165 queue, info->vqai_name, size); 166 return (ENXIO); 167 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) { 168 device_printf(dev, "virtqueue %d (%s) requested too many " 169 "indirect descriptors: %d, max %d\n", 170 queue, info->vqai_name, info->vqai_maxindirsz, 171 VIRTIO_MAX_INDIRECT); 172 return (EINVAL); 173 } 174 175 vq = malloc(sizeof(struct virtqueue) + 176 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO); 177 if (vq == NULL) { 178 device_printf(dev, "cannot allocate virtqueue\n"); 179 return (ENOMEM); 180 } 181 182 vq->vq_dev = dev; 183 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name)); 184 vq->vq_queue_index = queue; 185 vq->vq_alignment = align; 186 vq->vq_nentries = size; 187 vq->vq_free_cnt = size; 188 vq->vq_intrhand = info->vqai_intr; 189 vq->vq_intrhand_arg = info->vqai_intr_arg; 190 191 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0) 192 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX; 193 194 if (info->vqai_maxindirsz > 1) { 195 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz); 196 if (error) 197 goto fail; 198 } 199 200 vq->vq_ring_size = round_page(vring_size(size, align)); 201 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF, 202 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0); 203 if (vq->vq_ring_mem == NULL) { 204 device_printf(dev, 205 "cannot allocate memory for virtqueue ring\n"); 206 error = ENOMEM; 207 goto fail; 208 } 209 210 vq_ring_init(vq); 211 virtqueue_disable_intr(vq); 212 213 *vqp = vq; 214 215fail: 216 if (error) 217 virtqueue_free(vq); 218 219 return (error); 220} 221 222static int 223virtqueue_init_indirect(struct virtqueue *vq, int indirect_size) 224{ 225 device_t dev; 226 struct vq_desc_extra *dxp; 227 int i, size; 228 229 dev = vq->vq_dev; 230 231 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) { 232 /* 233 * Indirect descriptors requested by the driver but not 234 * negotiated. Return zero to keep the initialization 235 * going: we'll run fine without. 236 */ 237 if (bootverbose) 238 device_printf(dev, "virtqueue %d (%s) requested " 239 "indirect descriptors but not negotiated\n", 240 vq->vq_queue_index, vq->vq_name); 241 return (0); 242 } 243 244 size = indirect_size * sizeof(struct vring_desc); 245 vq->vq_max_indirect_size = indirect_size; 246 vq->vq_indirect_mem_size = size; 247 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT; 248 249 for (i = 0; i < vq->vq_nentries; i++) { 250 dxp = &vq->vq_descx[i]; 251 252 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT); 253 if (dxp->indirect == NULL) { 254 device_printf(dev, "cannot allocate indirect list\n"); 255 return (ENOMEM); 256 } 257 258 dxp->indirect_paddr = vtophys(dxp->indirect); 259 virtqueue_init_indirect_list(vq, dxp->indirect); 260 } 261 262 return (0); 263} 264 265static void 266virtqueue_free_indirect(struct virtqueue *vq) 267{ 268 struct vq_desc_extra *dxp; 269 int i; 270 271 for (i = 0; i < vq->vq_nentries; i++) { 272 dxp = &vq->vq_descx[i]; 273 274 if (dxp->indirect == NULL) 275 break; 276 277 free(dxp->indirect, M_DEVBUF); 278 dxp->indirect = NULL; 279 dxp->indirect_paddr = 0; 280 } 281 282 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT; 283 vq->vq_indirect_mem_size = 0; 284} 285 286static void 287virtqueue_init_indirect_list(struct virtqueue *vq, 288 struct vring_desc *indirect) 289{ 290 int i; 291 292 bzero(indirect, vq->vq_indirect_mem_size); 293 294 for (i = 0; i < vq->vq_max_indirect_size - 1; i++) 295 indirect[i].next = i + 1; 296 indirect[i].next = VQ_RING_DESC_CHAIN_END; 297} 298 299int 300virtqueue_reinit(struct virtqueue *vq, uint16_t size) 301{ 302 struct vq_desc_extra *dxp; 303 int i; 304 305 if (vq->vq_nentries != size) { 306 device_printf(vq->vq_dev, 307 "%s: '%s' changed size; old=%hu, new=%hu\n", 308 __func__, vq->vq_name, vq->vq_nentries, size); 309 return (EINVAL); 310 } 311 312 /* Warn if the virtqueue was not properly cleaned up. */ 313 if (vq->vq_free_cnt != vq->vq_nentries) { 314 device_printf(vq->vq_dev, 315 "%s: warning '%s' virtqueue not empty, " 316 "leaking %d entries\n", __func__, vq->vq_name, 317 vq->vq_nentries - vq->vq_free_cnt); 318 } 319 320 vq->vq_desc_head_idx = 0; 321 vq->vq_used_cons_idx = 0; 322 vq->vq_queued_cnt = 0; 323 vq->vq_free_cnt = vq->vq_nentries; 324 325 /* To be safe, reset all our allocated memory. */ 326 bzero(vq->vq_ring_mem, vq->vq_ring_size); 327 for (i = 0; i < vq->vq_nentries; i++) { 328 dxp = &vq->vq_descx[i]; 329 dxp->cookie = NULL; 330 dxp->ndescs = 0; 331 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 332 virtqueue_init_indirect_list(vq, dxp->indirect); 333 } 334 335 vq_ring_init(vq); 336 virtqueue_disable_intr(vq); 337 338 return (0); 339} 340 341void 342virtqueue_free(struct virtqueue *vq) 343{ 344 345 if (vq->vq_free_cnt != vq->vq_nentries) { 346 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, " 347 "leaking %d entries\n", vq->vq_name, 348 vq->vq_nentries - vq->vq_free_cnt); 349 } 350 351 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) 352 virtqueue_free_indirect(vq); 353 354 if (vq->vq_ring_mem != NULL) { 355 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF); 356 vq->vq_ring_size = 0; 357 vq->vq_ring_mem = NULL; 358 } 359 360 free(vq, M_DEVBUF); 361} 362 363vm_paddr_t 364virtqueue_paddr(struct virtqueue *vq) 365{ 366 367 return (vtophys(vq->vq_ring_mem)); 368} 369 370int 371virtqueue_size(struct virtqueue *vq) 372{ 373 374 return (vq->vq_nentries); 375} 376 377int 378virtqueue_empty(struct virtqueue *vq) 379{ 380 381 return (vq->vq_nentries == vq->vq_free_cnt); 382} 383 384int 385virtqueue_full(struct virtqueue *vq) 386{ 387 388 return (vq->vq_free_cnt == 0); 389} 390 391void 392virtqueue_notify(struct virtqueue *vq) 393{ 394 395 /* Ensure updated avail->idx is visible to host. */ 396 mb(); 397 398 if (vq_ring_must_notify_host(vq)) 399 vq_ring_notify_host(vq); 400 vq->vq_queued_cnt = 0; 401} 402 403int 404virtqueue_nused(struct virtqueue *vq) 405{ 406 uint16_t used_idx, nused; 407 408 used_idx = vq->vq_ring.used->idx; 409 410 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx); 411 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available"); 412 413 return (nused); 414} 415 416int 417virtqueue_intr_filter(struct virtqueue *vq) 418{ 419 420 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 421 return (0); 422 423 virtqueue_disable_intr(vq); 424 425 return (1); 426} 427 428void 429virtqueue_intr(struct virtqueue *vq) 430{ 431 432 vq->vq_intrhand(vq->vq_intrhand_arg); 433} 434 435int 436virtqueue_enable_intr(struct virtqueue *vq) 437{ 438 439 return (vq_ring_enable_interrupt(vq, 0)); 440} 441 442int 443virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint) 444{ 445 uint16_t ndesc, avail_idx; 446 447 avail_idx = vq->vq_ring.avail->idx; 448 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx); 449 450 switch (hint) { 451 case VQ_POSTPONE_SHORT: 452 ndesc = ndesc / 4; 453 break; 454 case VQ_POSTPONE_LONG: 455 ndesc = (ndesc * 3) / 4; 456 break; 457 case VQ_POSTPONE_EMPTIED: 458 break; 459 } 460 461 return (vq_ring_enable_interrupt(vq, ndesc)); 462} 463 464/* 465 * Note this is only considered a hint to the host. 466 */ 467void 468virtqueue_disable_intr(struct virtqueue *vq) 469{ 470 471 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 472 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - 473 vq->vq_nentries - 1; 474 } else 475 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 476} 477 478int 479virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg, 480 int readable, int writable) 481{ 482 struct vq_desc_extra *dxp; 483 int needed; 484 uint16_t head_idx, idx; 485 486 needed = readable + writable; 487 488 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie"); 489 VQASSERT(vq, needed == sg->sg_nseg, 490 "segment count mismatch, %d, %d", needed, sg->sg_nseg); 491 VQASSERT(vq, 492 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size, 493 "too many segments to enqueue: %d, %d/%d", needed, 494 vq->vq_nentries, vq->vq_max_indirect_size); 495 496 if (needed < 1) 497 return (EINVAL); 498 if (vq->vq_free_cnt == 0) 499 return (ENOSPC); 500 501 if (vq_ring_use_indirect(vq, needed)) { 502 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable); 503 return (0); 504 } else if (vq->vq_free_cnt < needed) 505 return (EMSGSIZE); 506 507 head_idx = vq->vq_desc_head_idx; 508 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 509 dxp = &vq->vq_descx[head_idx]; 510 511 VQASSERT(vq, dxp->cookie == NULL, 512 "cookie already exists for index %d", head_idx); 513 dxp->cookie = cookie; 514 dxp->ndescs = needed; 515 516 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx, 517 sg, readable, writable); 518 519 vq->vq_desc_head_idx = idx; 520 vq->vq_free_cnt -= needed; 521 if (vq->vq_free_cnt == 0) 522 VQ_RING_ASSERT_CHAIN_TERM(vq); 523 else 524 VQ_RING_ASSERT_VALID_IDX(vq, idx); 525 526 vq_ring_update_avail(vq, head_idx); 527 528 return (0); 529} 530 531void * 532virtqueue_dequeue(struct virtqueue *vq, uint32_t *len) 533{ 534 struct vring_used_elem *uep; 535 void *cookie; 536 uint16_t used_idx, desc_idx; 537 538 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx) 539 return (NULL); 540 541 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1); 542 uep = &vq->vq_ring.used->ring[used_idx]; 543 544 rmb(); 545 desc_idx = (uint16_t) uep->id; 546 if (len != NULL) 547 *len = uep->len; 548 549 vq_ring_free_chain(vq, desc_idx); 550 551 cookie = vq->vq_descx[desc_idx].cookie; 552 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx); 553 vq->vq_descx[desc_idx].cookie = NULL; 554 555 return (cookie); 556} 557 558void * 559virtqueue_poll(struct virtqueue *vq, uint32_t *len) 560{ 561 void *cookie; 562 563 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) 564 cpu_spinwait(); 565 566 return (cookie); 567} 568 569void * 570virtqueue_drain(struct virtqueue *vq, int *last) 571{ 572 void *cookie; 573 int idx; 574 575 cookie = NULL; 576 idx = *last; 577 578 while (idx < vq->vq_nentries && cookie == NULL) { 579 if ((cookie = vq->vq_descx[idx].cookie) != NULL) { 580 vq->vq_descx[idx].cookie = NULL; 581 /* Free chain to keep free count consistent. */ 582 vq_ring_free_chain(vq, idx); 583 } 584 idx++; 585 } 586 587 *last = idx; 588 589 return (cookie); 590} 591 592void 593virtqueue_dump(struct virtqueue *vq) 594{ 595 596 if (vq == NULL) 597 return; 598 599 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; " 600 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; " 601 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n", 602 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, 603 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx, 604 vq->vq_ring.avail->idx, vq->vq_used_cons_idx, 605 vq->vq_ring.used->idx, vq->vq_ring.avail->flags, 606 vq->vq_ring.used->flags); 607} 608 609static void 610vq_ring_init(struct virtqueue *vq) 611{ 612 struct vring *vr; 613 char *ring_mem; 614 int i, size; 615 616 ring_mem = vq->vq_ring_mem; 617 size = vq->vq_nentries; 618 vr = &vq->vq_ring; 619 620 vring_init(vr, size, ring_mem, vq->vq_alignment); 621 622 for (i = 0; i < size - 1; i++) 623 vr->desc[i].next = i + 1; 624 vr->desc[i].next = VQ_RING_DESC_CHAIN_END; 625} 626 627static void 628vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) 629{ 630 uint16_t avail_idx; 631 632 /* 633 * Place the head of the descriptor chain into the next slot and make 634 * it usable to the host. The chain is made available now rather than 635 * deferring to virtqueue_notify() in the hopes that if the host is 636 * currently running on another CPU, we can keep it processing the new 637 * descriptor. 638 */ 639 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1); 640 vq->vq_ring.avail->ring[avail_idx] = desc_idx; 641 642 wmb(); 643 vq->vq_ring.avail->idx++; 644 645 /* Keep pending count until virtqueue_notify(). */ 646 vq->vq_queued_cnt++; 647} 648 649static uint16_t 650vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc, 651 uint16_t head_idx, struct sglist *sg, int readable, int writable) 652{ 653 struct sglist_seg *seg; 654 struct vring_desc *dp; 655 int i, needed; 656 uint16_t idx; 657 658 needed = readable + writable; 659 660 for (i = 0, idx = head_idx, seg = sg->sg_segs; 661 i < needed; 662 i++, idx = dp->next, seg++) { 663 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END, 664 "premature end of free desc chain"); 665 666 dp = &desc[idx]; 667 dp->addr = seg->ss_paddr; 668 dp->len = seg->ss_len; 669 dp->flags = 0; 670 671 if (i < needed - 1) 672 dp->flags |= VRING_DESC_F_NEXT; 673 if (i >= readable) 674 dp->flags |= VRING_DESC_F_WRITE; 675 } 676 677 return (idx); 678} 679 680static int 681vq_ring_use_indirect(struct virtqueue *vq, int needed) 682{ 683 684 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0) 685 return (0); 686 687 if (vq->vq_max_indirect_size < needed) 688 return (0); 689 690 if (needed < 2) 691 return (0); 692 693 return (1); 694} 695 696static void 697vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie, 698 struct sglist *sg, int readable, int writable) 699{ 700 struct vring_desc *dp; 701 struct vq_desc_extra *dxp; 702 int needed; 703 uint16_t head_idx; 704 705 needed = readable + writable; 706 VQASSERT(vq, needed <= vq->vq_max_indirect_size, 707 "enqueuing too many indirect descriptors"); 708 709 head_idx = vq->vq_desc_head_idx; 710 VQ_RING_ASSERT_VALID_IDX(vq, head_idx); 711 dp = &vq->vq_ring.desc[head_idx]; 712 dxp = &vq->vq_descx[head_idx]; 713 714 VQASSERT(vq, dxp->cookie == NULL, 715 "cookie already exists for index %d", head_idx); 716 dxp->cookie = cookie; 717 dxp->ndescs = 1; 718 719 dp->addr = dxp->indirect_paddr; 720 dp->len = needed * sizeof(struct vring_desc); 721 dp->flags = VRING_DESC_F_INDIRECT; 722 723 vq_ring_enqueue_segments(vq, dxp->indirect, 0, 724 sg, readable, writable); 725 726 vq->vq_desc_head_idx = dp->next; 727 vq->vq_free_cnt--; 728 if (vq->vq_free_cnt == 0) 729 VQ_RING_ASSERT_CHAIN_TERM(vq); 730 else 731 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx); 732 733 vq_ring_update_avail(vq, head_idx); 734} 735 736static int 737vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) 738{ 739 740 /* 741 * Enable interrupts, making sure we get the latest index of 742 * what's already been consumed. 743 */ 744 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) 745 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; 746 else 747 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 748 749 mb(); 750 751 /* 752 * Enough items may have already been consumed to meet our threshold 753 * since we last checked. Let our caller know so it processes the new 754 * entries. 755 */ 756 if (virtqueue_nused(vq) > ndesc) 757 return (1); 758 759 return (0); 760} 761 762static int 763vq_ring_must_notify_host(struct virtqueue *vq) 764{ 765 uint16_t new_idx, prev_idx, event_idx; 766 767 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) { 768 new_idx = vq->vq_ring.avail->idx; 769 prev_idx = new_idx - vq->vq_queued_cnt; 770 event_idx = vring_avail_event(&vq->vq_ring); 771 772 return (vring_need_event(event_idx, new_idx, prev_idx) != 0); 773 } 774 775 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0); 776} 777 778static void 779vq_ring_notify_host(struct virtqueue *vq) 780{ 781 782 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index); 783} 784 785static void 786vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) 787{ 788 struct vring_desc *dp; 789 struct vq_desc_extra *dxp; 790 791 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx); 792 dp = &vq->vq_ring.desc[desc_idx]; 793 dxp = &vq->vq_descx[desc_idx]; 794 795 if (vq->vq_free_cnt == 0) 796 VQ_RING_ASSERT_CHAIN_TERM(vq); 797 798 vq->vq_free_cnt += dxp->ndescs; 799 dxp->ndescs--; 800 801 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { 802 while (dp->flags & VRING_DESC_F_NEXT) { 803 VQ_RING_ASSERT_VALID_IDX(vq, dp->next); 804 dp = &vq->vq_ring.desc[dp->next]; 805 dxp->ndescs--; 806 } 807 } 808 809 VQASSERT(vq, dxp->ndescs == 0, 810 "failed to free entire desc chain, remaining: %d", dxp->ndescs); 811 812 /* 813 * We must append the existing free chain, if any, to the end of 814 * newly freed chain. If the virtqueue was completely used, then 815 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). 816 */ 817 dp->next = vq->vq_desc_head_idx; 818 vq->vq_desc_head_idx = desc_idx; 819}
|