• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/virtio/

Lines Matching refs:vq

123 static int	virtqueue_init_indirect(struct virtqueue *vq, int);
124 static void virtqueue_free_indirect(struct virtqueue *vq);
153 struct virtqueue *vq;
177 vq = malloc(sizeof(struct virtqueue) +
179 if (vq == NULL) {
184 vq->vq_dev = dev;
185 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
186 vq->vq_queue_index = queue;
187 vq->vq_notify_offset = notify_offset;
188 vq->vq_alignment = align;
189 vq->vq_nentries = size;
190 vq->vq_free_cnt = size;
191 vq->vq_intrhand = info->vqai_intr;
192 vq->vq_intrhand_arg = info->vqai_intr_arg;
195 vq->vq_flags |= VIRTQUEUE_FLAG_MODERN;
197 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
200 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
205 vq->vq_ring_size = round_page(vring_size(size, align));
206 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
208 if (vq->vq_ring_mem == NULL) {
215 vq_ring_init(vq);
216 virtqueue_disable_intr(vq);
218 *vqp = vq;
222 virtqueue_free(vq);
228 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
234 dev = vq->vq_dev;
245 vq->vq_queue_index, vq->vq_name);
250 vq->vq_max_indirect_size = indirect_size;
251 vq->vq_indirect_mem_size = size;
252 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
254 for (i = 0; i < vq->vq_nentries; i++) {
255 dxp = &vq->vq_descx[i];
264 virtqueue_init_indirect_list(vq, dxp->indirect);
271 virtqueue_free_indirect(struct virtqueue *vq)
276 for (i = 0; i < vq->vq_nentries; i++) {
277 dxp = &vq->vq_descx[i];
287 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
288 vq->vq_indirect_mem_size = 0;
292 virtqueue_init_indirect_list(struct virtqueue *vq,
297 bzero(indirect, vq->vq_indirect_mem_size);
299 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
300 indirect[i].next = vq_gtoh16(vq, i + 1);
301 indirect[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
305 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
310 if (vq->vq_nentries != size) {
311 device_printf(vq->vq_dev,
313 __func__, vq->vq_name, vq->vq_nentries, size);
318 if (vq->vq_free_cnt != vq->vq_nentries) {
319 device_printf(vq->vq_dev,
321 "leaking %d entries\n", __func__, vq->vq_name,
322 vq->vq_nentries - vq->vq_free_cnt);
325 vq->vq_desc_head_idx = 0;
326 vq->vq_used_cons_idx = 0;
327 vq->vq_queued_cnt = 0;
328 vq->vq_free_cnt = vq->vq_nentries;
331 bzero(vq->vq_ring_mem, vq->vq_ring_size);
332 for (i = 0; i < vq->vq_nentries; i++) {
333 dxp = &vq->vq_descx[i];
336 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
337 virtqueue_init_indirect_list(vq, dxp->indirect);
340 vq_ring_init(vq);
341 virtqueue_disable_intr(vq);
347 virtqueue_free(struct virtqueue *vq)
350 if (vq->vq_free_cnt != vq->vq_nentries) {
351 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
352 "leaking %d entries\n", vq->vq_name,
353 vq->vq_nentries - vq->vq_free_cnt);
356 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
357 virtqueue_free_indirect(vq);
359 if (vq->vq_ring_mem != NULL) {
360 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
361 vq->vq_ring_size = 0;
362 vq->vq_ring_mem = NULL;
365 free(vq, M_DEVBUF);
369 virtqueue_paddr(struct virtqueue *vq)
372 return (vtophys(vq->vq_ring_mem));
376 virtqueue_desc_paddr(struct virtqueue *vq)
379 return (vtophys(vq->vq_ring.desc));
383 virtqueue_avail_paddr(struct virtqueue *vq)
386 return (vtophys(vq->vq_ring.avail));
390 virtqueue_used_paddr(struct virtqueue *vq)
393 return (vtophys(vq->vq_ring.used));
397 virtqueue_index(struct virtqueue *vq)
400 return (vq->vq_queue_index);
404 virtqueue_size(struct virtqueue *vq)
407 return (vq->vq_nentries);
411 virtqueue_nfree(struct virtqueue *vq)
414 return (vq->vq_free_cnt);
418 virtqueue_empty(struct virtqueue *vq)
421 return (vq->vq_nentries == vq->vq_free_cnt);
425 virtqueue_full(struct virtqueue *vq)
428 return (vq->vq_free_cnt == 0);
432 virtqueue_notify(struct virtqueue *vq)
438 if (vq_ring_must_notify_host(vq))
439 vq_ring_notify_host(vq);
440 vq->vq_queued_cnt = 0;
444 virtqueue_nused(struct virtqueue *vq)
448 used_idx = vq_htog16(vq, vq->vq_ring.used->idx);
450 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
451 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
457 virtqueue_intr_filter(struct virtqueue *vq)
460 if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
463 virtqueue_disable_intr(vq);
469 virtqueue_intr(struct virtqueue *vq)
472 vq->vq_intrhand(vq->vq_intrhand_arg);
476 virtqueue_enable_intr(struct virtqueue *vq)
479 return (vq_ring_enable_interrupt(vq, 0));
483 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
487 avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
488 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
501 return (vq_ring_enable_interrupt(vq, ndesc));
508 virtqueue_disable_intr(struct virtqueue *vq)
511 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
512 vring_used_event(&vq->vq_ring) = vq_gtoh16(vq,
513 vq->vq_used_cons_idx - vq->vq_nentries - 1);
517 vq->vq_ring.avail->flags |= vq_gtoh16(vq, VRING_AVAIL_F_NO_INTERRUPT);
521 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
530 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
531 VQASSERT(vq, needed == sg->sg_nseg,
533 VQASSERT(vq,
534 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
536 vq->vq_nentries, vq->vq_max_indirect_size);
540 if (vq->vq_free_cnt == 0)
543 if (vq_ring_use_indirect(vq, needed)) {
544 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
546 } else if (vq->vq_free_cnt < needed)
549 head_idx = vq->vq_desc_head_idx;
550 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
551 dxp = &vq->vq_descx[head_idx];
553 VQASSERT(vq, dxp->cookie == NULL,
558 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
561 vq->vq_desc_head_idx = idx;
562 vq->vq_free_cnt -= needed;
563 if (vq->vq_free_cnt == 0)
564 VQ_RING_ASSERT_CHAIN_TERM(vq);
566 VQ_RING_ASSERT_VALID_IDX(vq, idx);
568 vq_ring_update_avail(vq, head_idx);
574 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
580 if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
583 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
584 uep = &vq->vq_ring.used->ring[used_idx];
587 desc_idx = (uint16_t) vq_htog32(vq, uep->id);
589 *len = vq_htog32(vq, uep->len);
591 vq_ring_free_chain(vq, desc_idx);
593 cookie = vq->vq_descx[desc_idx].cookie;
594 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
595 vq->vq_descx[desc_idx].cookie = NULL;
601 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
605 VIRTIO_BUS_POLL(vq->vq_dev);
606 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
608 VIRTIO_BUS_POLL(vq->vq_dev);
615 virtqueue_drain(struct virtqueue *vq, int *last)
623 while (idx < vq->vq_nentries && cookie == NULL) {
624 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
625 vq->vq_descx[idx].cookie = NULL;
627 vq_ring_free_chain(vq, idx);
638 virtqueue_dump(struct virtqueue *vq)
641 if (vq == NULL)
647 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
648 vq->vq_queued_cnt, vq->vq_desc_head_idx,
649 vq_htog16(vq, vq->vq_ring.avail->idx), vq->vq_used_cons_idx,
650 vq_htog16(vq, vq->vq_ring.used->idx),
651 vq_htog16(vq, vring_used_event(&vq->vq_ring)),
652 vq_htog16(vq, vq->vq_ring.avail->flags),
653 vq_htog16(vq, vq->vq_ring.used->flags));
657 vq_ring_init(struct virtqueue *vq)
663 ring_mem = vq->vq_ring_mem;
664 size = vq->vq_nentries;
665 vr = &vq->vq_ring;
667 vring_init(vr, size, ring_mem, vq->vq_alignment);
670 vr->desc[i].next = vq_gtoh16(vq, i + 1);
671 vr->desc[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
675 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
686 avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
687 avail_ring_idx = avail_idx & (vq->vq_nentries - 1);
688 vq->vq_ring.avail->ring[avail_ring_idx] = vq_gtoh16(vq, desc_idx);
691 vq->vq_ring.avail->idx = vq_gtoh16(vq, avail_idx + 1);
694 vq->vq_queued_cnt++;
698 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
710 i++, idx = vq_htog16(vq, dp->next), seg++) {
711 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
715 dp->addr = vq_gtoh64(vq, seg->ss_paddr);
716 dp->len = vq_gtoh32(vq, seg->ss_len);
720 dp->flags |= vq_gtoh16(vq, VRING_DESC_F_NEXT);
722 dp->flags |= vq_gtoh16(vq, VRING_DESC_F_WRITE);
729 vq_ring_use_indirect(struct virtqueue *vq, int needed)
732 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
735 if (vq->vq_max_indirect_size < needed)
745 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
754 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
757 head_idx = vq->vq_desc_head_idx;
758 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
759 dp = &vq->vq_ring.desc[head_idx];
760 dxp = &vq->vq_descx[head_idx];
762 VQASSERT(vq, dxp->cookie == NULL,
767 dp->addr = vq_gtoh64(vq, dxp->indirect_paddr);
768 dp->len = vq_gtoh32(vq, needed * sizeof(struct vring_desc));
769 dp->flags = vq_gtoh16(vq, VRING_DESC_F_INDIRECT);
771 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
774 vq->vq_desc_head_idx = vq_htog16(vq, dp->next);
775 vq->vq_free_cnt--;
776 if (vq->vq_free_cnt == 0)
777 VQ_RING_ASSERT_CHAIN_TERM(vq);
779 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
781 vq_ring_update_avail(vq, head_idx);
785 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
792 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
793 vring_used_event(&vq->vq_ring) =
794 vq_gtoh16(vq, vq->vq_used_cons_idx + ndesc);
796 vq->vq_ring.avail->flags &=
797 vq_gtoh16(vq, ~VRING_AVAIL_F_NO_INTERRUPT);
807 if (virtqueue_nused(vq) > ndesc)
814 vq_ring_must_notify_host(struct virtqueue *vq)
818 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
819 new_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
820 prev_idx = new_idx - vq->vq_queued_cnt;
821 event_idx = vq_htog16(vq, vring_avail_event(&vq->vq_ring));
826 flags = vq->vq_ring.used->flags;
827 return ((flags & vq_gtoh16(vq, VRING_USED_F_NO_NOTIFY)) == 0);
831 vq_ring_notify_host(struct virtqueue *vq)
834 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index,
835 vq->vq_notify_offset);
839 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
844 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
845 dp = &vq->vq_ring.desc[desc_idx];
846 dxp = &vq->vq_descx[desc_idx];
848 if (vq->vq_free_cnt == 0)
849 VQ_RING_ASSERT_CHAIN_TERM(vq);
851 vq->vq_free_cnt += dxp->ndescs;
854 if ((dp->flags & vq_gtoh16(vq, VRING_DESC_F_INDIRECT)) == 0) {
855 while (dp->flags & vq_gtoh16(vq, VRING_DESC_F_NEXT)) {
856 uint16_t next_idx = vq_htog16(vq, dp->next);
857 VQ_RING_ASSERT_VALID_IDX(vq, next_idx);
858 dp = &vq->vq_ring.desc[next_idx];
863 VQASSERT(vq, dxp->ndescs == 0,
871 dp->next = vq_gtoh16(vq, vq->vq_desc_head_idx);
872 vq->vq_desc_head_idx = desc_idx;