Lines Matching refs:vq

98 	struct vhost_virtqueue *vq;
109 struct vhost_virtqueue vq;
124 * Protected by vq mutex. Writers must also take device mutex. */
137 * Protected by tx vq lock. */
140 * Protected by tx vq lock. */
142 /* Flush in progress. Protected by tx vq lock. */
227 static void vhost_net_enable_zcopy(int vq)
229 vhost_net_zcopy_mask |= 0x1 << vq;
233 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
244 ubufs->vq = vq;
358 struct vhost_virtqueue *vq)
361 container_of(vq, struct vhost_net_virtqueue, vq);
366 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
368 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
369 vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
376 vhost_add_used_and_signal_n(vq->dev, vq,
377 &vq->heads[nvq->done_idx], add);
388 struct vhost_virtqueue *vq = ubufs->vq;
394 vq->heads[ubuf->desc].len = success ?
406 vhost_poll_queue(&vq->poll);
427 struct vhost_virtqueue *vq)
430 container_of(vq, struct vhost_net_virtqueue, vq);
432 if (!vhost_vq_get_backend(vq))
438 struct vhost_virtqueue *vq)
441 container_of(vq, struct vhost_net_virtqueue, vq);
445 sock = vhost_vq_get_backend(vq);
454 struct vhost_virtqueue *vq = &nvq->vq;
455 struct vhost_dev *dev = vq->dev;
460 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
483 vq_err(&nvq->vq, "Fail to batch sending packets\n");
513 struct vhost_virtqueue *vq)
515 if (!vhost_vq_avail_empty(&net->dev, vq)) {
516 vhost_poll_queue(&vq->poll);
517 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
518 vhost_disable_notify(&net->dev, vq);
519 vhost_poll_queue(&vq->poll);
532 struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
534 /* Try to hold the vq mutex of the paired virtqueue. We can't
538 if (!mutex_trylock(&vq->mutex))
541 vhost_disable_notify(&net->dev, vq);
551 if (vhost_vq_has_work(vq)) {
567 vhost_net_busy_poll_try_queue(net, vq);
571 mutex_unlock(&vq->mutex);
580 struct vhost_virtqueue *rvq = &rnvq->vq;
581 struct vhost_virtqueue *tvq = &tnvq->vq;
605 struct vhost_virtqueue *vq = &nvq->vq;
608 min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
611 static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
615 size_t len = iov_length(vq->iov, out);
617 iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
629 struct vhost_virtqueue *vq = &nvq->vq;
634 if (ret < 0 || ret == vq->num)
638 vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
644 *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
646 vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
654 static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
657 !vhost_vq_avail_empty(vq->dev, vq);
665 struct vhost_virtqueue *vq = &nvq->vq;
666 struct vhost_net *net = container_of(vq->dev, struct vhost_net,
668 struct socket *sock = vhost_vq_get_backend(vq);
708 vhost16_to_cpu(vq, gso->csum_start) +
709 vhost16_to_cpu(vq, gso->csum_offset) + 2 >
710 vhost16_to_cpu(vq, gso->hdr_len)) {
711 gso->hdr_len = cpu_to_vhost16(vq,
712 vhost16_to_cpu(vq, gso->csum_start) +
713 vhost16_to_cpu(vq, gso->csum_offset) + 2);
715 if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
744 struct vhost_virtqueue *vq = &nvq->vq;
771 if (head == vq->num) {
773 vhost_poll_queue(&vq->poll);
775 vq))) {
776 vhost_disable_notify(&net->dev, vq);
793 vhost_discard_vq_desc(vq, 1);
794 vhost_net_enable_vq(net, vq);
805 if (tx_can_batch(vq, total_len))
814 vhost_discard_vq_desc(vq, 1);
815 vhost_net_enable_vq(net, vq);
823 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
824 vq->heads[nvq->done_idx].len = 0;
826 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
834 struct vhost_virtqueue *vq = &nvq->vq;
856 vhost_zerocopy_signal_used(net, vq);
865 if (head == vq->num) {
867 vhost_poll_queue(&vq->poll);
868 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
869 vhost_disable_notify(&net->dev, vq);
882 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
883 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
901 if (tx_can_batch(vq, total_len) &&
913 if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
919 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
922 vhost_discard_vq_desc(vq, 1);
923 vhost_net_enable_vq(net, vq);
931 vhost_add_used_and_signal(&net->dev, vq, head, 0);
933 vhost_zerocopy_signal_used(net, vq);
935 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
943 struct vhost_virtqueue *vq = &nvq->vq;
946 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
947 sock = vhost_vq_get_backend(vq);
951 if (!vq_meta_prefetch(vq))
954 vhost_disable_notify(&net->dev, vq);
955 vhost_net_disable_vq(net, vq);
963 mutex_unlock(&vq->mutex);
992 struct vhost_virtqueue *rvq = &rnvq->vq;
993 struct vhost_virtqueue *tvq = &tnvq->vq;
999 /* Both tx vq and rx socket were polled here */
1009 * vq has read descriptors only.
1010 * @vq - the relevant virtqueue
1018 static int get_rx_bufs(struct vhost_virtqueue *vq,
1041 r = vhost_get_vq_desc(vq, vq->iov + seg,
1042 ARRAY_SIZE(vq->iov) - seg, &out,
1048 if (d == vq->num) {
1053 vq_err(vq, "unexpected descriptor format for RX: "
1062 heads[headcount].id = cpu_to_vhost32(vq, d);
1063 len = iov_length(vq->iov + seg, in);
1064 heads[headcount].len = cpu_to_vhost32(vq, len);
1069 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
1081 vhost_discard_vq_desc(vq, headcount);
1090 struct vhost_virtqueue *vq = &nvq->vq;
1115 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
1116 sock = vhost_vq_get_backend(vq);
1120 if (!vq_meta_prefetch(vq))
1123 vhost_disable_notify(&net->dev, vq);
1124 vhost_net_disable_vq(net, vq);
1129 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1130 vq->log : NULL;
1131 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1140 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
1149 vhost_poll_queue(&vq->poll);
1150 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
1153 vhost_disable_notify(&net->dev, vq);
1165 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1);
1172 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len);
1188 vhost_discard_vq_desc(vq, headcount);
1195 vq_err(vq, "Unable to write vnet_hdr "
1196 "at addr %p\n", vq->iov->iov_base);
1207 num_buffers = cpu_to_vhost16(vq, headcount);
1211 vq_err(vq, "Failed num_buffers write");
1212 vhost_discard_vq_desc(vq, headcount);
1219 vhost_log_write(vq, vq_log, log, vhost_len,
1220 vq->iov, in);
1222 } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
1225 vhost_poll_queue(&vq->poll);
1227 vhost_net_enable_vq(net, vq);
1230 mutex_unlock(&vq->mutex);
1235 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1237 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1244 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1246 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1302 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
1303 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
1304 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
1305 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
1334 struct vhost_virtqueue *vq)
1338 container_of(vq, struct vhost_net_virtqueue, vq);
1340 mutex_lock(&vq->mutex);
1341 sock = vhost_vq_get_backend(vq);
1342 vhost_net_disable_vq(n, vq);
1343 vhost_vq_set_backend(vq, NULL);
1346 mutex_unlock(&vq->mutex);
1353 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
1354 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
1361 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1363 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1366 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1369 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1474 struct vhost_virtqueue *vq;
1488 vq = &n->vqs[index].vq;
1490 mutex_lock(&vq->mutex);
1496 if (!vhost_vq_access_ok(vq)) {
1507 oldsock = vhost_vq_get_backend(vq);
1509 ubufs = vhost_net_ubuf_alloc(vq,
1516 vhost_net_disable_vq(n, vq);
1517 vhost_vq_set_backend(vq, sock);
1519 r = vhost_vq_init_access(vq);
1522 r = vhost_net_enable_vq(n, vq);
1540 mutex_unlock(&vq->mutex);
1544 mutex_lock(&vq->mutex);
1545 vhost_zerocopy_signal_used(n, vq);
1546 mutex_unlock(&vq->mutex);
1558 vhost_vq_set_backend(vq, oldsock);
1559 vhost_net_enable_vq(n, vq);
1566 mutex_unlock(&vq->mutex);
1631 mutex_lock(&n->vqs[i].vq.mutex);
1632 n->vqs[i].vq.acked_features = features;
1635 mutex_unlock(&n->vqs[i].vq.mutex);