Lines Matching defs:vring

91 	struct vring vring;
112 * vring.
125 } vring;
357 * making all of the arch DMA ops work on the vring device itself
523 struct vring_virtqueue *vring = to_vvq(vq);
524 struct vring_desc_extra *extra = vring->split.desc_extra;
580 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
591 desc = vq->split.vring.desc;
647 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
662 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
687 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
688 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
694 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
754 vring_avail_event(&vq->split.vring)),
757 needs_kick = !(vq->split.vring.used->flags &
777 while (vq->split.vring.desc[i].flags & nextflag) {
820 vq->split.vring.used->idx);
848 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
850 vq->split.vring.used->ring[last_used].id);
852 vq->split.vring.used->ring[last_used].len);
854 if (unlikely(i >= vq->split.vring.num)) {
872 &vring_used_event(&vq->split.vring),
897 vring_used_event(&vq->split.vring) = 0x0;
899 vq->split.vring.avail->flags =
920 vq->split.vring.avail->flags =
924 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
935 vq->split.vring.used->idx);
953 vq->split.vring.avail->flags =
961 &vring_used_event(&vq->split.vring),
964 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
982 for (i = 0; i < vq->split.vring.num; i++) {
989 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
995 BUG_ON(vq->vq.num_free != vq->split.vring.num);
1015 vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
1024 num = vq->split.vring.num;
1026 vq->split.vring.avail->flags = 0;
1027 vq->split.vring.avail->idx = 0;
1030 vq->split.vring.avail->ring[num] = 0;
1032 vq->split.vring.used->flags = 0;
1033 vq->split.vring.used->idx = 0;
1036 *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
1056 u32 num = vring_split->vring.num;
1082 vring_split->vring.desc,
1130 vring_init(&vring_split->vring, num, queue, vring_align);
1197 virtqueue_init(vq, vring_split.vring.num);
1311 BUG_ON(id == vq->packed.vring.num);
1338 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1339 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1341 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1357 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1365 if (n >= vq->packed.vring.num) {
1446 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1448 desc = vq->packed.vring.desc;
1460 BUG_ON(id == vq->packed.vring.num);
1493 if ((unlikely(++i >= vq->packed.vring.num))) {
1524 vq->packed.vring.desc[head].flags = head_flags;
1545 if (i >= vq->packed.vring.num)
1578 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1594 event_idx -= vq->packed.vring.num;
1654 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1701 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1702 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1704 if (unlikely(id >= vq->packed.vring.num)) {
1718 if (unlikely(last_used >= vq->packed.vring.num)) {
1719 last_used -= vq->packed.vring.num;
1733 &vq->packed.vring.driver->off_wrap,
1756 vq->packed.vring.driver->flags =
1773 vq->packed.vring.driver->off_wrap =
1786 vq->packed.vring.driver->flags =
1821 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1826 if (used_idx >= vq->packed.vring.num) {
1827 used_idx -= vq->packed.vring.num;
1831 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1845 vq->packed.vring.driver->flags =
1875 for (i = 0; i < vq->packed.vring.num; i++) {
1885 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1913 if (vring_packed->vring.desc)
1915 vring_packed->vring.desc,
1919 if (vring_packed->vring.driver)
1921 vring_packed->vring.driver,
1925 if (vring_packed->vring.device)
1927 vring_packed->vring.device,
1953 vring_packed->vring.desc = ring;
1966 vring_packed->vring.driver = driver;
1977 vring_packed->vring.device = device;
1980 vring_packed->vring.num = num;
1993 u32 num = vring_packed->vring.num;
2027 vring_packed->vring.driver->flags =
2043 memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
2044 memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
2047 memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
2049 virtqueue_init(vq, vq->packed.vring.num);
2143 virtqueue_init(vq, vring_packed.vring.num);
2582 "virtio vring IRQ raised before DRIVER_OK");
2656 virtqueue_init(vq, vring_split->vring.num);
2715 * virtqueue_resize - resize the vring of vq
2720 * When it is really necessary to create a new vring, it will set the current vq
2722 * that is no longer used. Only after the new vring is successfully created, the
2723 * old vring will be released.
2750 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
2767 * virtqueue_set_dma_premapped - set the vring premapped mode
2772 * The vring in premapped mode does not do dma internally, so the driver must
2775 * the vring, it has to unmap the dma address.
2785 * -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
2794 num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2865 vring_init(&vring_split.vring, num, pages, vring_align);
2880 vq->packed.vring.desc,
2886 vq->packed.vring.driver,
2892 vq->packed.vring.device,
2901 vq->split.vring.desc,
2973 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2974 * @_vq: the struct virtqueue containing the vring of interest.
2976 * Returns the size of the vring. This is mainly used for boasting to
2984 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
3084 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
3098 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
3103 const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
3105 return &to_vvq(vq)->split.vring;