Lines Matching refs:vq

44 static unsigned int virtqueue_attach_desc(struct virtqueue *vq, unsigned int i,
47 struct vring_desc_shadow *desc_shadow = &vq->vring_desc_shadow[i];
48 struct vring_desc *desc = &vq->vring.desc[i];
51 if (IS_ENABLED(CONFIG_BOUNCE_BUFFER) && vq->vring.bouncebufs) {
52 struct bounce_buffer *bb = &vq->vring.bouncebufs[i];
66 vq->vdev->name, sg->length);
80 desc->addr = cpu_to_virtio64(vq->vdev, desc_shadow->addr);
81 desc->len = cpu_to_virtio32(vq->vdev, desc_shadow->len);
82 desc->flags = cpu_to_virtio16(vq->vdev, desc_shadow->flags);
83 desc->next = cpu_to_virtio16(vq->vdev, desc_shadow->next);
88 static void virtqueue_detach_desc(struct virtqueue *vq, unsigned int idx)
90 struct vring_desc *desc = &vq->vring.desc[idx];
93 if (!IS_ENABLED(CONFIG_BOUNCE_BUFFER) || !vq->vring.bouncebufs)
96 bb = &vq->vring.bouncebufs[idx];
98 desc->addr = cpu_to_virtio64(vq->vdev, (u64)(uintptr_t)bb->user_buffer);
101 int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
111 head = vq->free_head;
113 desc = vq->vring.desc;
116 if (vq->num_free < descs_used) {
118 descs_used, vq->num_free);
125 virtio_notify(vq->vdev, vq);
135 i = virtqueue_attach_desc(vq, i, sgs[n], flags);
138 vq->vring_desc_shadow[prev].flags &= ~VRING_DESC_F_NEXT;
139 desc[prev].flags = cpu_to_virtio16(vq->vdev, vq->vring_desc_shadow[prev].flags);
142 vq->num_free -= descs_used;
145 vq->free_head = i;
148 vq->vring_desc_shadow[head].chain_head = true;
154 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
155 vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head);
162 vq->avail_idx_shadow++;
163 vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow);
164 vq->num_added++;
170 if (unlikely(vq->num_added == (1 << 16) - 1))
171 virtqueue_kick(vq);
176 static bool virtqueue_kick_prepare(struct virtqueue *vq)
187 old = vq->avail_idx_shadow - vq->num_added;
188 new = vq->avail_idx_shadow;
189 vq->num_added = 0;
191 if (vq->event) {
192 needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev,
193 vring_avail_event(&vq->vring)), new, old);
195 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev,
202 void virtqueue_kick(struct virtqueue *vq)
204 if (virtqueue_kick_prepare(vq))
205 virtio_notify(vq->vdev, vq);
208 static void detach_buf(struct virtqueue *vq, unsigned int head)
213 vq->vring_desc_shadow[head].chain_head = false;
218 while (vq->vring_desc_shadow[i].flags & VRING_DESC_F_NEXT) {
219 virtqueue_detach_desc(vq, i);
220 i = vq->vring_desc_shadow[i].next;
221 vq->num_free++;
224 virtqueue_detach_desc(vq, i);
225 vq->vring_desc_shadow[i].next = vq->free_head;
226 vq->free_head = head;
229 vq->num_free++;
232 static inline bool more_used(const struct virtqueue *vq)
234 return vq->last_used_idx != virtio16_to_cpu(vq->vdev,
235 vq->vring.used->idx);
238 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
243 if (!more_used(vq)) {
245 vq->vdev->name, vq->index);
252 last_used = (vq->last_used_idx & (vq->vring.num - 1));
253 i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id);
255 *len = virtio32_to_cpu(vq->vdev,
256 vq->vring.used->ring[last_used].len);
258 vq->vdev->name, vq->index, i, *len);
261 if (unlikely(i >= vq->vring.num)) {
263 vq->vdev->name, vq->index, i);
267 if (unlikely(!vq->vring_desc_shadow[i].chain_head)) {
269 vq->vdev->name, vq->index, i);
273 detach_buf(vq, i);
274 vq->last_used_idx++;
280 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
281 virtio_store_mb(&vring_used_event(&vq->vring),
282 cpu_to_virtio16(vq->vdev, vq->last_used_idx));
284 return (void *)(uintptr_t)vq->vring_desc_shadow[i].addr;
292 struct virtqueue *vq;
297 vq = malloc(sizeof(*vq));
298 if (!vq)
303 free(vq);
307 vq->vdev = vdev;
308 vq->index = index;
309 vq->num_free = vring.num;
310 vq->vring = vring;
311 vq->vring_desc_shadow = vring_desc_shadow;
312 vq->last_used_idx = 0;
313 vq->avail_flags_shadow = 0;
314 vq->avail_idx_shadow = 0;
315 vq->num_added = 0;
316 list_add_tail(&vq->list, &uc_priv->vqs);
318 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
321 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
322 if (!vq->event)
323 vq->vring.avail->flags = cpu_to_virtio16(vdev,
324 vq->avail_flags_shadow);
327 vq->free_head = 0;
329 vq->vring_desc_shadow[i].next = i + 1;
331 return vq;
340 struct virtqueue *vq;
380 vq = __vring_new_virtqueue(index, vring, udev);
381 if (!vq)
384 debug("(%s): created vring @ %p for vq @ %p with num %u\n", udev->name,
385 queue, vq, num);
387 return vq;
396 void vring_del_virtqueue(struct virtqueue *vq)
398 virtio_free_pages(vq->vdev, vq->vring.desc,
399 DIV_ROUND_UP(vq->vring.size, PAGE_SIZE));
400 free(vq->vring_desc_shadow);
401 list_del(&vq->list);
402 free(vq->vring.bouncebufs);
403 free(vq);
406 unsigned int virtqueue_get_vring_size(struct virtqueue *vq)
408 return vq->vring.num;
411 ulong virtqueue_get_desc_addr(struct virtqueue *vq)
413 return (ulong)vq->vring.desc;
416 ulong virtqueue_get_avail_addr(struct virtqueue *vq)
418 return (ulong)vq->vring.desc +
419 ((char *)vq->vring.avail - (char *)vq->vring.desc);
422 ulong virtqueue_get_used_addr(struct virtqueue *vq)
424 return (ulong)vq->vring.desc +
425 ((char *)vq->vring.used - (char *)vq->vring.desc);
428 bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx)
432 return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx);
435 void virtqueue_dump(struct virtqueue *vq)
439 printf("virtqueue %p for dev %s:\n", vq, vq->vdev->name);
441 vq->index, vq->vring.desc, vq->vring.num);
443 vq->free_head, vq->num_added, vq->num_free);
445 vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow);
448 for (i = 0; i < vq->vring.num; i++) {
449 struct vring_desc_shadow *desc = &vq->vring_desc_shadow[i];
457 vq->vring.avail->flags, vq->vring.avail->idx);
458 for (i = 0; i < vq->vring.num; i++) {
460 i, vq->vring.avail->ring[i]);
465 vq->vring.used->flags, vq->vring.used->idx);
466 for (i = 0; i < vq->vring.num; i++) {
468 vq->vring.used->ring[i].id, vq->vring.used->ring[i].len);