Lines Matching refs:vq

121 static int	virtqueue_init_indirect(struct virtqueue *vq, int);
122 static void virtqueue_free_indirect(struct virtqueue *vq);
156 struct virtqueue *vq;
180 vq = malloc(sizeof(struct virtqueue) +
182 if (vq == NULL) {
187 vq->vq_dev = dev;
188 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
189 vq->vq_queue_index = queue;
190 vq->vq_notify_offset = notify_offset;
191 vq->vq_alignment = align;
192 vq->vq_nentries = size;
193 vq->vq_free_cnt = size;
194 vq->vq_intrhand = info->vqai_intr;
195 vq->vq_intrhand_arg = info->vqai_intr_arg;
198 vq->vq_flags |= VIRTQUEUE_FLAG_MODERN;
200 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
203 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
208 vq->vq_ring_size = round_page(vring_size(size, align));
209 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
211 if (vq->vq_ring_mem == NULL) {
218 vq_ring_init(vq);
219 virtqueue_disable_intr(vq);
221 *vqp = vq;
225 virtqueue_free(vq);
231 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
237 dev = vq->vq_dev;
248 vq->vq_queue_index, vq->vq_name);
253 vq->vq_max_indirect_size = indirect_size;
254 vq->vq_indirect_mem_size = size;
255 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
257 for (i = 0; i < vq->vq_nentries; i++) {
258 dxp = &vq->vq_descx[i];
267 virtqueue_init_indirect_list(vq, dxp->indirect);
274 virtqueue_free_indirect(struct virtqueue *vq)
279 for (i = 0; i < vq->vq_nentries; i++) {
280 dxp = &vq->vq_descx[i];
290 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
291 vq->vq_indirect_mem_size = 0;
295 virtqueue_init_indirect_list(struct virtqueue *vq,
300 bzero(indirect, vq->vq_indirect_mem_size);
302 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
303 indirect[i].next = vq_gtoh16(vq, i + 1);
304 indirect[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
308 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
313 if (vq->vq_nentries != size) {
314 device_printf(vq->vq_dev,
316 __func__, vq->vq_name, vq->vq_nentries, size);
321 if (vq->vq_free_cnt != vq->vq_nentries) {
322 device_printf(vq->vq_dev,
324 "leaking %d entries\n", __func__, vq->vq_name,
325 vq->vq_nentries - vq->vq_free_cnt);
328 vq->vq_desc_head_idx = 0;
329 vq->vq_used_cons_idx = 0;
330 vq->vq_queued_cnt = 0;
331 vq->vq_free_cnt = vq->vq_nentries;
334 bzero(vq->vq_ring_mem, vq->vq_ring_size);
335 for (i = 0; i < vq->vq_nentries; i++) {
336 dxp = &vq->vq_descx[i];
339 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
340 virtqueue_init_indirect_list(vq, dxp->indirect);
343 vq_ring_init(vq);
344 virtqueue_disable_intr(vq);
350 virtqueue_free(struct virtqueue *vq)
353 if (vq->vq_free_cnt != vq->vq_nentries) {
354 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
355 "leaking %d entries\n", vq->vq_name,
356 vq->vq_nentries - vq->vq_free_cnt);
359 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
360 virtqueue_free_indirect(vq);
362 if (vq->vq_ring_mem != NULL) {
363 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
364 vq->vq_ring_size = 0;
365 vq->vq_ring_mem = NULL;
368 free(vq, M_DEVBUF);
372 virtqueue_paddr(struct virtqueue *vq)
375 return (vtophys(vq->vq_ring_mem));
379 virtqueue_desc_paddr(struct virtqueue *vq)
382 return (vtophys(vq->vq_ring.desc));
386 virtqueue_avail_paddr(struct virtqueue *vq)
389 return (vtophys(vq->vq_ring.avail));
393 virtqueue_used_paddr(struct virtqueue *vq)
396 return (vtophys(vq->vq_ring.used));
400 virtqueue_index(struct virtqueue *vq)
403 return (vq->vq_queue_index);
407 virtqueue_size(struct virtqueue *vq)
410 return (vq->vq_nentries);
414 virtqueue_nfree(struct virtqueue *vq)
417 return (vq->vq_free_cnt);
421 virtqueue_empty(struct virtqueue *vq)
424 return (vq->vq_nentries == vq->vq_free_cnt);
428 virtqueue_full(struct virtqueue *vq)
431 return (vq->vq_free_cnt == 0);
435 virtqueue_notify(struct virtqueue *vq)
441 if (vq_ring_must_notify_host(vq))
442 vq_ring_notify_host(vq);
443 vq->vq_queued_cnt = 0;
447 virtqueue_nused(struct virtqueue *vq)
451 used_idx = vq_htog16(vq, vq->vq_ring.used->idx);
453 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
454 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
460 virtqueue_intr_filter(struct virtqueue *vq)
463 if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
466 virtqueue_disable_intr(vq);
472 virtqueue_intr(struct virtqueue *vq)
475 vq->vq_intrhand(vq->vq_intrhand_arg);
479 virtqueue_enable_intr(struct virtqueue *vq)
482 return (vq_ring_enable_interrupt(vq, 0));
486 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
490 avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
491 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
504 return (vq_ring_enable_interrupt(vq, ndesc));
511 virtqueue_disable_intr(struct virtqueue *vq)
514 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
515 vring_used_event(&vq->vq_ring) = vq_gtoh16(vq,
516 vq->vq_used_cons_idx - vq->vq_nentries - 1);
520 vq->vq_ring.avail->flags |= vq_gtoh16(vq, VRING_AVAIL_F_NO_INTERRUPT);
524 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
533 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
534 VQASSERT(vq, needed == sg->sg_nseg,
536 VQASSERT(vq,
537 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
539 vq->vq_nentries, vq->vq_max_indirect_size);
543 if (vq->vq_free_cnt == 0)
546 if (vq_ring_use_indirect(vq, needed)) {
547 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
549 } else if (vq->vq_free_cnt < needed)
552 head_idx = vq->vq_desc_head_idx;
553 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
554 dxp = &vq->vq_descx[head_idx];
556 VQASSERT(vq, dxp->cookie == NULL,
561 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
564 vq->vq_desc_head_idx = idx;
565 vq->vq_free_cnt -= needed;
566 if (vq->vq_free_cnt == 0)
567 VQ_RING_ASSERT_CHAIN_TERM(vq);
569 VQ_RING_ASSERT_VALID_IDX(vq, idx);
571 vq_ring_update_avail(vq, head_idx);
577 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
583 if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
586 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
587 uep = &vq->vq_ring.used->ring[used_idx];
590 desc_idx = (uint16_t) vq_htog32(vq, uep->id);
592 *len = vq_htog32(vq, uep->len);
594 vq_ring_free_chain(vq, desc_idx);
596 cookie = vq->vq_descx[desc_idx].cookie;
597 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
598 vq->vq_descx[desc_idx].cookie = NULL;
604 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
608 VIRTIO_BUS_POLL(vq->vq_dev);
609 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
611 VIRTIO_BUS_POLL(vq->vq_dev);
618 virtqueue_drain(struct virtqueue *vq, int *last)
626 while (idx < vq->vq_nentries && cookie == NULL) {
627 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
628 vq->vq_descx[idx].cookie = NULL;
630 vq_ring_free_chain(vq, idx);
641 virtqueue_dump(struct virtqueue *vq)
644 if (vq == NULL)
650 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
651 vq->vq_queued_cnt, vq->vq_desc_head_idx,
652 vq_htog16(vq, vq->vq_ring.avail->idx), vq->vq_used_cons_idx,
653 vq_htog16(vq, vq->vq_ring.used->idx),
654 vq_htog16(vq, vring_used_event(&vq->vq_ring)),
655 vq_htog16(vq, vq->vq_ring.avail->flags),
656 vq_htog16(vq, vq->vq_ring.used->flags));
660 vq_ring_init(struct virtqueue *vq)
666 ring_mem = vq->vq_ring_mem;
667 size = vq->vq_nentries;
668 vr = &vq->vq_ring;
670 vring_init(vr, size, ring_mem, vq->vq_alignment);
673 vr->desc[i].next = vq_gtoh16(vq, i + 1);
674 vr->desc[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
678 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
689 avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
690 avail_ring_idx = avail_idx & (vq->vq_nentries - 1);
691 vq->vq_ring.avail->ring[avail_ring_idx] = vq_gtoh16(vq, desc_idx);
694 vq->vq_ring.avail->idx = vq_gtoh16(vq, avail_idx + 1);
697 vq->vq_queued_cnt++;
701 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
709 SDT_PROBE6(virtqueue, , enqueue_segments, entry, vq, desc, head_idx,
716 i++, idx = vq_htog16(vq, dp->next), seg++) {
717 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
721 dp->addr = vq_gtoh64(vq, seg->ss_paddr);
722 dp->len = vq_gtoh32(vq, seg->ss_len);
726 dp->flags |= vq_gtoh16(vq, VRING_DESC_F_NEXT);
728 dp->flags |= vq_gtoh16(vq, VRING_DESC_F_WRITE);
736 vq_ring_use_indirect(struct virtqueue *vq, int needed)
739 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
742 if (vq->vq_max_indirect_size < needed)
752 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
761 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
764 head_idx = vq->vq_desc_head_idx;
765 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
766 dp = &vq->vq_ring.desc[head_idx];
767 dxp = &vq->vq_descx[head_idx];
769 VQASSERT(vq, dxp->cookie == NULL,
774 dp->addr = vq_gtoh64(vq, dxp->indirect_paddr);
775 dp->len = vq_gtoh32(vq, needed * sizeof(struct vring_desc));
776 dp->flags = vq_gtoh16(vq, VRING_DESC_F_INDIRECT);
778 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
781 vq->vq_desc_head_idx = vq_htog16(vq, dp->next);
782 vq->vq_free_cnt--;
783 if (vq->vq_free_cnt == 0)
784 VQ_RING_ASSERT_CHAIN_TERM(vq);
786 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
788 vq_ring_update_avail(vq, head_idx);
792 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
799 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
800 vring_used_event(&vq->vq_ring) =
801 vq_gtoh16(vq, vq->vq_used_cons_idx + ndesc);
803 vq->vq_ring.avail->flags &=
804 vq_gtoh16(vq, ~VRING_AVAIL_F_NO_INTERRUPT);
814 if (virtqueue_nused(vq) > ndesc)
821 vq_ring_must_notify_host(struct virtqueue *vq)
825 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
826 new_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
827 prev_idx = new_idx - vq->vq_queued_cnt;
828 event_idx = vq_htog16(vq, vring_avail_event(&vq->vq_ring));
833 flags = vq->vq_ring.used->flags;
834 return ((flags & vq_gtoh16(vq, VRING_USED_F_NO_NOTIFY)) == 0);
838 vq_ring_notify_host(struct virtqueue *vq)
841 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index,
842 vq->vq_notify_offset);
846 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
851 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
852 dp = &vq->vq_ring.desc[desc_idx];
853 dxp = &vq->vq_descx[desc_idx];
855 if (vq->vq_free_cnt == 0)
856 VQ_RING_ASSERT_CHAIN_TERM(vq);
858 vq->vq_free_cnt += dxp->ndescs;
861 if ((dp->flags & vq_gtoh16(vq, VRING_DESC_F_INDIRECT)) == 0) {
862 while (dp->flags & vq_gtoh16(vq, VRING_DESC_F_NEXT)) {
863 uint16_t next_idx = vq_htog16(vq, dp->next);
864 VQ_RING_ASSERT_VALID_IDX(vq, next_idx);
865 dp = &vq->vq_ring.desc[next_idx];
870 VQASSERT(vq, dxp->ndescs == 0,
878 dp->next = vq_gtoh16(vq, vq->vq_desc_head_idx);
879 vq->vq_desc_head_idx = desc_idx;