Lines Matching refs:rq

382 	struct receive_queue *rq;
578 static void give_pages(struct receive_queue *rq, struct page *page)
582 /* Find end of list, sew whole thing into vi->rq.pages. */
584 end->private = (unsigned long)rq->pages;
585 rq->pages = page;
588 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
590 struct page *p = rq->pages;
593 rq->pages = (struct page *)p->private;
602 struct receive_queue *rq, void *buf)
607 give_pages(rq, buf);
717 struct receive_queue *rq,
753 give_pages(rq, page);
758 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
804 give_pages(rq, page);
815 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
831 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
839 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
844 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
848 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
850 virtnet_rq_unmap(rq, buf, *len);
855 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
862 head = page_address(rq->alloc_frag.page);
870 sg_init_table(rq->sg, 1);
871 rq->sg[0].dma_address = addr;
872 rq->sg[0].length = len;
875 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
877 struct page_frag *alloc_frag = &rq->alloc_frag;
891 if (rq->last_dma) {
896 virtnet_rq_unmap(rq, rq->last_dma, 0);
897 rq->last_dma = NULL;
902 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
904 if (virtqueue_dma_mapping_error(rq->vq, addr))
908 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
918 rq->last_dma = dma;
941 BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
947 struct receive_queue *rq;
950 rq = &vi->rq[i];
953 virtnet_rq_unmap(rq, buf, 0);
955 virtnet_rq_free_buf(vi, rq, buf);
1118 struct receive_queue *rq = vi->rq;
1129 xdp_prog = rcu_access_pointer(rq->xdp_prog);
1260 static struct page *xdp_linearize_page(struct receive_queue *rq,
1285 buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1341 struct receive_queue *rq,
1376 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1387 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1428 struct receive_queue *rq,
1452 xdp_prog = rcu_dereference(rq->xdp_prog);
1454 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1475 struct receive_queue *rq,
1482 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1492 give_pages(rq, page);
1496 static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1505 buf = virtnet_rq_get_buf(rq, &len, NULL);
1570 struct receive_queue *rq,
1589 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1613 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1658 struct receive_queue *rq,
1702 xdp_page = xdp_linearize_page(rq, num_buf,
1733 struct receive_queue *rq,
1753 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1758 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1784 mergeable_buf_free(rq, num_buf, dev, stats);
1793 struct receive_queue *rq,
1824 xdp_prog = rcu_dereference(rq->xdp_prog);
1826 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1834 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1842 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1896 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1901 mergeable_buf_free(rq, num_buf, dev, stats);
1938 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1950 virtnet_rq_free_buf(vi, rq, buf);
1955 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1958 skb = receive_big(dev, vi, rq, buf, len, stats);
1960 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1980 skb_record_rx_queue(skb, vq2rxq(rq->vq));
1985 napi_gro_receive(&rq->napi, skb);
1998 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
2010 buf = virtnet_rq_alloc(rq, len, gfp);
2014 virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
2017 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
2019 virtnet_rq_unmap(rq, buf, 0);
2026 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
2033 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
2035 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
2037 first = get_a_page(rq, gfp);
2040 give_pages(rq, list);
2043 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
2050 first = get_a_page(rq, gfp);
2052 give_pages(rq, list);
2057 /* rq->sg[0], rq->sg[1] share the same page */
2058 /* a separated rq->sg[0] for header - required in case !any_header_sg */
2059 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
2061 /* rq->sg[1] for data packet, from offset */
2063 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
2067 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
2070 give_pages(rq, first);
2075 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
2079 struct virtnet_info *vi = rq->vq->vdev->priv;
2087 rq->min_buf_len, PAGE_SIZE - hdr_len);
2093 struct receive_queue *rq, gfp_t gfp)
2095 struct page_frag *alloc_frag = &rq->alloc_frag;
2108 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
2110 buf = virtnet_rq_alloc(rq, len + room, gfp);
2128 virtnet_rq_init_one_sg(rq, buf, len);
2131 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
2133 virtnet_rq_unmap(rq, buf, 0);
2147 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
2155 err = add_recvbuf_mergeable(vi, rq, gfp);
2157 err = add_recvbuf_big(vi, rq, gfp);
2159 err = add_recvbuf_small(vi, rq, gfp);
2164 } while (rq->vq->num_free);
2165 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2168 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2169 u64_stats_inc(&rq->stats.kicks);
2170 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2179 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2181 rq->calls++;
2182 virtqueue_napi_schedule(&rq->napi, rvq);
2230 struct receive_queue *rq = &vi->rq[i];
2232 napi_disable(&rq->napi);
2233 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2234 virtnet_napi_enable(rq->vq, &rq->napi);
2244 static int virtnet_receive(struct receive_queue *rq, int budget,
2247 struct virtnet_info *vi = rq->vq->vdev->priv;
2258 (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2259 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2264 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
2265 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2270 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2271 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2280 u64_stats_update_begin(&rq->stats.syncp);
2285 item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2290 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
2291 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
2293 u64_stats_update_end(&rq->stats.syncp);
2298 static void virtnet_poll_cleantx(struct receive_queue *rq)
2300 struct virtnet_info *vi = rq->vq->vdev->priv;
2301 unsigned int index = vq2rxq(rq->vq);
2332 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2336 if (!rq->packets_in_napi)
2339 u64_stats_update_begin(&rq->stats.syncp);
2340 dim_update_sample(rq->calls,
2341 u64_stats_read(&rq->stats.packets),
2342 u64_stats_read(&rq->stats.bytes),
2344 u64_stats_update_end(&rq->stats.syncp);
2346 net_dim(&rq->dim, cur_sample);
2347 rq->packets_in_napi = 0;
2352 struct receive_queue *rq =
2354 struct virtnet_info *vi = rq->vq->vdev->priv;
2360 virtnet_poll_cleantx(rq);
2362 received = virtnet_receive(rq, budget, &xdp_xmit);
2363 rq->packets_in_napi += received;
2370 napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
2375 if (napi_complete && rq->dim_enabled)
2376 virtnet_rx_dim_update(vi, rq);
2395 napi_disable(&vi->rq[qp_index].napi);
2396 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2404 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2405 vi->rq[qp_index].napi.napi_id);
2409 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2414 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2420 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2434 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2450 cancel_work_sync(&vi->rq[i].dim.work);
2613 struct receive_queue *rq, u32 ring_num)
2618 qindex = rq - vi->rq;
2621 napi_disable(&rq->napi);
2622 cancel_work_sync(&rq->dim.work);
2625 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
2629 if (!try_fill_recv(vi, rq, GFP_KERNEL))
2633 virtnet_napi_enable(rq->vq, &rq->napi);
2800 struct receive_queue *rq = &vi->rq[i];
2811 start = u64_stats_fetch_begin(&rq->stats.syncp);
2812 rpackets = u64_stats_read(&rq->stats.packets);
2813 rbytes = u64_stats_read(&rq->stats.bytes);
2814 rdrops = u64_stats_read(&rq->stats.drops);
2815 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2881 cancel_work_sync(&vi->rq[i].dim.work);
3030 virtqueue_set_affinity(vi->rq[i].vq, NULL);
3067 virtqueue_set_affinity(vi->rq[i].vq, mask);
3160 vi->rq[queue].intr_coal.max_usecs = max_usecs;
3161 vi->rq[queue].intr_coal.max_packets = max_packets;
3190 ring->rx_max_pending = vi->rq[0].vq->num_max;
3192 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
3203 struct receive_queue *rq;
3210 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
3217 if (ring->rx_pending > vi->rq[0].vq->num_max)
3224 rq = vi->rq + i;
3245 err = virtnet_rx_resize(vi, rq, ring->rx_pending);
3250 mutex_lock(&vi->rq[i].dim_lock);
3254 mutex_unlock(&vi->rq[i].dim_lock);
3469 if (vi->rq[0].xdp_prog)
3728 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4171 struct receive_queue *rq = &vi->rq[i];
4174 stats_base = (const u8 *)&rq->stats;
4176 start = u64_stats_fetch_begin(&rq->stats.syncp);
4178 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
4272 mutex_lock(&vi->rq[i].dim_lock);
4277 vi->rq[i].dim_enabled = true;
4290 vi->rq[i].dim_enabled = false;
4311 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
4312 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
4316 mutex_unlock(&vi->rq[i].dim_lock);
4346 mutex_lock(&vi->rq[queue].dim_lock);
4347 cur_rx_dim = vi->rq[queue].dim_enabled;
4348 max_usecs = vi->rq[queue].intr_coal.max_usecs;
4349 max_packets = vi->rq[queue].intr_coal.max_packets;
4353 mutex_unlock(&vi->rq[queue].dim_lock);
4358 vi->rq[queue].dim_enabled = true;
4359 mutex_unlock(&vi->rq[queue].dim_lock);
4364 vi->rq[queue].dim_enabled = false;
4372 mutex_unlock(&vi->rq[queue].dim_lock);
4398 struct receive_queue *rq = container_of(dim,
4400 struct virtnet_info *vi = rq->vq->vdev->priv;
4405 qnum = rq - vi->rq;
4407 mutex_lock(&rq->dim_lock);
4408 if (!rq->dim_enabled)
4412 if (update_moder.usec != rq->intr_coal.max_usecs ||
4413 update_moder.pkts != rq->intr_coal.max_packets) {
4423 mutex_unlock(&rq->dim_lock);
4561 mutex_lock(&vi->rq[queue].dim_lock);
4562 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
4565 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
4566 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
4567 mutex_unlock(&vi->rq[queue].dim_lock);
4741 struct receive_queue *rq = &vi->rq[i];
4747 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
4959 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
4969 napi_disable(&vi->rq[i].napi);
4976 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
4992 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
5007 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
5019 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
5024 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
5174 __netif_napi_del(&vi->rq[i].napi);
5179 * we need to respect an RCU grace period before freeing vi->rq
5183 kfree(vi->rq);
5194 while (vi->rq[i].pages)
5195 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
5197 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
5198 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
5215 if (vi->rq[i].alloc_frag.page) {
5216 if (vi->rq[i].last_dma)
5217 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
5218 put_page(vi->rq[i].alloc_frag.page);
5243 struct virtqueue *vq = vi->rq[i].vq;
5323 sprintf(vi->rq[i].name, "input.%u", i);
5325 names[rxq2vq(i)] = vi->rq[i].name;
5343 vi->rq[i].vq = vqs[rxq2vq(i)];
5344 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
5377 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
5378 if (!vi->rq)
5383 vi->rq[i].pages = NULL;
5384 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
5390 INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
5391 vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5393 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
5394 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
5397 u64_stats_init(&vi->rq[i].stats.syncp);
5399 mutex_init(&vi->rq[i].dim_lock);
5450 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
5452 get_mergeable_buf_len(&vi->rq[queue_index], avg,