Lines Matching refs:vq

118 static int	virtqueue_init_indirect(struct virtqueue *vq, int);
119 static void virtqueue_free_indirect(struct virtqueue *vq);
151 struct virtqueue *vq;
175 vq = malloc(sizeof(struct virtqueue) +
177 if (vq == NULL) {
182 vq->vq_dev = dev;
183 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
184 vq->vq_queue_index = queue;
185 vq->vq_alignment = align;
186 vq->vq_nentries = size;
187 vq->vq_free_cnt = size;
188 vq->vq_intrhand = info->vqai_intr;
189 vq->vq_intrhand_arg = info->vqai_intr_arg;
192 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
195 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
200 vq->vq_ring_size = round_page(vring_size(size, align));
201 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
203 if (vq->vq_ring_mem == NULL) {
210 vq_ring_init(vq);
211 virtqueue_disable_intr(vq);
213 *vqp = vq;
217 virtqueue_free(vq);
223 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
229 dev = vq->vq_dev;
240 vq->vq_queue_index, vq->vq_name);
245 vq->vq_max_indirect_size = indirect_size;
246 vq->vq_indirect_mem_size = size;
247 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
249 for (i = 0; i < vq->vq_nentries; i++) {
250 dxp = &vq->vq_descx[i];
259 virtqueue_init_indirect_list(vq, dxp->indirect);
266 virtqueue_free_indirect(struct virtqueue *vq)
271 for (i = 0; i < vq->vq_nentries; i++) {
272 dxp = &vq->vq_descx[i];
282 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
283 vq->vq_indirect_mem_size = 0;
287 virtqueue_init_indirect_list(struct virtqueue *vq,
292 bzero(indirect, vq->vq_indirect_mem_size);
294 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
300 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
305 if (vq->vq_nentries != size) {
306 device_printf(vq->vq_dev,
308 __func__, vq->vq_name, vq->vq_nentries, size);
313 if (vq->vq_free_cnt != vq->vq_nentries) {
314 device_printf(vq->vq_dev,
316 "leaking %d entries\n", __func__, vq->vq_name,
317 vq->vq_nentries - vq->vq_free_cnt);
320 vq->vq_desc_head_idx = 0;
321 vq->vq_used_cons_idx = 0;
322 vq->vq_queued_cnt = 0;
323 vq->vq_free_cnt = vq->vq_nentries;
326 bzero(vq->vq_ring_mem, vq->vq_ring_size);
327 for (i = 0; i < vq->vq_nentries; i++) {
328 dxp = &vq->vq_descx[i];
331 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
332 virtqueue_init_indirect_list(vq, dxp->indirect);
335 vq_ring_init(vq);
336 virtqueue_disable_intr(vq);
342 virtqueue_free(struct virtqueue *vq)
345 if (vq->vq_free_cnt != vq->vq_nentries) {
346 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
347 "leaking %d entries\n", vq->vq_name,
348 vq->vq_nentries - vq->vq_free_cnt);
351 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
352 virtqueue_free_indirect(vq);
354 if (vq->vq_ring_mem != NULL) {
355 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
356 vq->vq_ring_size = 0;
357 vq->vq_ring_mem = NULL;
360 free(vq, M_DEVBUF);
364 virtqueue_paddr(struct virtqueue *vq)
367 return (vtophys(vq->vq_ring_mem));
371 virtqueue_desc_paddr(struct virtqueue *vq)
374 return (vtophys(vq->vq_ring.desc));
378 virtqueue_avail_paddr(struct virtqueue *vq)
381 return (vtophys(vq->vq_ring.avail));
385 virtqueue_used_paddr(struct virtqueue *vq)
388 return (vtophys(vq->vq_ring.used));
392 virtqueue_index(struct virtqueue *vq)
394 return (vq->vq_queue_index);
398 virtqueue_size(struct virtqueue *vq)
401 return (vq->vq_nentries);
405 virtqueue_nfree(struct virtqueue *vq)
408 return (vq->vq_free_cnt);
412 virtqueue_empty(struct virtqueue *vq)
415 return (vq->vq_nentries == vq->vq_free_cnt);
419 virtqueue_full(struct virtqueue *vq)
422 return (vq->vq_free_cnt == 0);
426 virtqueue_notify(struct virtqueue *vq)
432 if (vq_ring_must_notify_host(vq))
433 vq_ring_notify_host(vq);
434 vq->vq_queued_cnt = 0;
438 virtqueue_nused(struct virtqueue *vq)
442 used_idx = vq->vq_ring.used->idx;
444 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
445 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
451 virtqueue_intr_filter(struct virtqueue *vq)
454 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
457 virtqueue_disable_intr(vq);
463 virtqueue_intr(struct virtqueue *vq)
466 vq->vq_intrhand(vq->vq_intrhand_arg);
470 virtqueue_enable_intr(struct virtqueue *vq)
473 return (vq_ring_enable_interrupt(vq, 0));
477 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
481 avail_idx = vq->vq_ring.avail->idx;
482 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
495 return (vq_ring_enable_interrupt(vq, ndesc));
502 virtqueue_disable_intr(struct virtqueue *vq)
505 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
506 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
507 vq->vq_nentries - 1;
509 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
513 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
522 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
523 VQASSERT(vq, needed == sg->sg_nseg,
525 VQASSERT(vq,
526 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
528 vq->vq_nentries, vq->vq_max_indirect_size);
532 if (vq->vq_free_cnt == 0)
535 if (vq_ring_use_indirect(vq, needed)) {
536 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
538 } else if (vq->vq_free_cnt < needed)
541 head_idx = vq->vq_desc_head_idx;
542 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
543 dxp = &vq->vq_descx[head_idx];
545 VQASSERT(vq, dxp->cookie == NULL,
550 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
553 vq->vq_desc_head_idx = idx;
554 vq->vq_free_cnt -= needed;
555 if (vq->vq_free_cnt == 0)
556 VQ_RING_ASSERT_CHAIN_TERM(vq);
558 VQ_RING_ASSERT_VALID_IDX(vq, idx);
560 vq_ring_update_avail(vq, head_idx);
566 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
572 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
575 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
576 uep = &vq->vq_ring.used->ring[used_idx];
583 vq_ring_free_chain(vq, desc_idx);
585 cookie = vq->vq_descx[desc_idx].cookie;
586 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
587 vq->vq_descx[desc_idx].cookie = NULL;
593 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
597 VIRTIO_BUS_POLL(vq->vq_dev);
598 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
600 VIRTIO_BUS_POLL(vq->vq_dev);
607 virtqueue_drain(struct virtqueue *vq, int *last)
615 while (idx < vq->vq_nentries && cookie == NULL) {
616 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
617 vq->vq_descx[idx].cookie = NULL;
619 vq_ring_free_chain(vq, idx);
630 virtqueue_dump(struct virtqueue *vq)
633 if (vq == NULL)
639 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
640 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
641 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
642 vq->vq_ring.used->idx,
643 vring_used_event(&vq->vq_ring),
644 vq->vq_ring.avail->flags,
645 vq->vq_ring.used->flags);
649 vq_ring_init(struct virtqueue *vq)
655 ring_mem = vq->vq_ring_mem;
656 size = vq->vq_nentries;
657 vr = &vq->vq_ring;
659 vring_init(vr, size, ring_mem, vq->vq_alignment);
667 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
678 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
679 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
682 vq->vq_ring.avail->idx++;
685 vq->vq_queued_cnt++;
689 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
702 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
720 vq_ring_use_indirect(struct virtqueue *vq, int needed)
723 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
726 if (vq->vq_max_indirect_size < needed)
736 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
745 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
748 head_idx = vq->vq_desc_head_idx;
749 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
750 dp = &vq->vq_ring.desc[head_idx];
751 dxp = &vq->vq_descx[head_idx];
753 VQASSERT(vq, dxp->cookie == NULL,
762 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
765 vq->vq_desc_head_idx = dp->next;
766 vq->vq_free_cnt--;
767 if (vq->vq_free_cnt == 0)
768 VQ_RING_ASSERT_CHAIN_TERM(vq);
770 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
772 vq_ring_update_avail(vq, head_idx);
776 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
783 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
784 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
786 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
795 if (virtqueue_nused(vq) > ndesc)
802 vq_ring_must_notify_host(struct virtqueue *vq)
806 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
807 new_idx = vq->vq_ring.avail->idx;
808 prev_idx = new_idx - vq->vq_queued_cnt;
809 event_idx = vring_avail_event(&vq->vq_ring);
814 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
818 vq_ring_notify_host(struct virtqueue *vq)
821 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
825 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
830 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
831 dp = &vq->vq_ring.desc[desc_idx];
832 dxp = &vq->vq_descx[desc_idx];
834 if (vq->vq_free_cnt == 0)
835 VQ_RING_ASSERT_CHAIN_TERM(vq);
837 vq->vq_free_cnt += dxp->ndescs;
842 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
843 dp = &vq->vq_ring.desc[dp->next];
848 VQASSERT(vq, dxp->ndescs == 0,
856 dp->next = vq->vq_desc_head_idx;
857 vq->vq_desc_head_idx = desc_idx;