Lines Matching refs:rq

229 					"%s: rq[%d] error 0x%x\n",
613 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
617 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
618 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
630 void *data = vmxnet3_pp_get_buff(rq->page_pool,
634 rq->stats.rx_buf_alloc_failure++;
645 rq->stats.rx_buf_alloc_failure++;
657 rq->stats.rx_buf_alloc_failure++;
671 rq->stats.rx_buf_alloc_failure++;
682 rq->stats.rx_buf_alloc_failure++;
1305 struct vmxnet3_rx_queue *rq, int size)
1325 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
1326 rq->napi.napi_id);
1330 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
1334 rq->page_pool = pp;
1339 xdp_rxq_info_unreg(&rq->xdp_rxq);
1421 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1424 rq->stats.drop_err++;
1426 rq->stats.drop_fcs++;
1428 rq->stats.drop_total++;
1503 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1513 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1522 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1524 while (rcd->gen == rq->comp_ring.gen) {
1545 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1546 rcd->rqID != rq->dataRingQid);
1549 ring = rq->rx_ring + ring_idx;
1550 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1552 rbi = rq->buf_info[ring_idx] + idx;
1558 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1574 act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
1592 (rcd->rqID != rq->qid &&
1593 rcd->rqID != rq->dataRingQid));
1620 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1621 act = vmxnet3_process_xdp_small(adapter, rq,
1622 &rq->data_ring.base[sz],
1639 rq->stats.rx_buf_alloc_failure++;
1641 rq->stats.drop_total++;
1649 BUG_ON(rcd->len > rq->data_ring.desc_size);
1652 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1654 &rq->data_ring.base[sz], rcd->len);
1669 rq->stats.rx_buf_alloc_failure++;
1671 rq->stats.drop_total++;
1688 skb_record_rx_queue(ctx->skb, rq->qid);
1730 rq->stats.rx_buf_alloc_failure++;
1743 rq->stats.rx_buf_alloc_failure++;
1833 !rq->shared->updateRxProd)
1836 napi_gro_receive(&rq->napi, skb);
1845 ring = rq->rx_ring + ring_idx;
1861 rbi = rq->buf_info[ring_idx] + ring->next2fill;
1892 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
1894 rxprod_reg[ring_idx] + rq->qid * 8,
1898 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1900 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1910 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1917 if (!rq->rx_ring[0].base)
1921 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1927 rbi = &rq->buf_info[ring_idx][i];
1929 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1933 page_pool_recycle_direct(rq->page_pool,
1951 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1952 rq->rx_ring[ring_idx].next2fill =
1953 rq->rx_ring[ring_idx].next2comp = 0;
1956 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1957 rq->comp_ring.next2proc = 0;
1972 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1980 if (rq->buf_info[i]) {
1981 for (j = 0; j < rq->rx_ring[i].size; j++)
1982 BUG_ON(rq->buf_info[i][j].page != NULL);
1988 if (rq->rx_ring[i].base) {
1990 rq->rx_ring[i].size
1992 rq->rx_ring[i].base,
1993 rq->rx_ring[i].basePA);
1994 rq->rx_ring[i].base = NULL;
1998 if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
1999 xdp_rxq_info_unreg(&rq->xdp_rxq);
2000 page_pool_destroy(rq->page_pool);
2001 rq->page_pool = NULL;
2003 if (rq->data_ring.base) {
2005 rq->rx_ring[0].size * rq->data_ring.desc_size,
2006 rq->data_ring.base, rq->data_ring.basePA);
2007 rq->data_ring.base = NULL;
2010 if (rq->comp_ring.base) {
2011 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
2013 rq->comp_ring.base, rq->comp_ring.basePA);
2014 rq->comp_ring.base = NULL;
2017 kfree(rq->buf_info[0]);
2018 rq->buf_info[0] = NULL;
2019 rq->buf_info[1] = NULL;
2028 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2030 if (rq->data_ring.base) {
2032 (rq->rx_ring[0].size *
2033 rq->data_ring.desc_size),
2034 rq->data_ring.base,
2035 rq->data_ring.basePA);
2036 rq->data_ring.base = NULL;
2037 rq->data_ring.desc_size = 0;
2043 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
2049 for (i = 0; i < rq->rx_ring[0].size; i++) {
2053 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
2056 rq->buf_info[0][i].len = adapter->skb_buf_size;
2058 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
2059 rq->buf_info[0][i].len = PAGE_SIZE;
2062 for (i = 0; i < rq->rx_ring[1].size; i++) {
2063 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
2064 rq->buf_info[1][i].len = PAGE_SIZE;
2069 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
2071 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
2073 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
2074 rq->rx_ring[i].isOutOfOrder = 0;
2077 err = vmxnet3_create_pp(adapter, rq,
2078 rq->rx_ring[0].size + rq->rx_ring[1].size);
2082 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
2084 xdp_rxq_info_unreg(&rq->xdp_rxq);
2085 page_pool_destroy(rq->page_pool);
2086 rq->page_pool = NULL;
2091 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
2094 rq->comp_ring.next2proc = 0;
2095 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
2097 rq->comp_ring.gen = VMXNET3_INIT_GEN;
2100 rq->rx_ctx.skb = NULL;
2127 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
2135 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
2136 rq->rx_ring[i].base = dma_alloc_coherent(
2138 &rq->rx_ring[i].basePA,
2140 if (!rq->rx_ring[i].base) {
2147 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
2148 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
2149 rq->data_ring.base =
2151 &rq->data_ring.basePA,
2153 if (!rq->data_ring.base) {
2159 rq->data_ring.base = NULL;
2160 rq->data_ring.desc_size = 0;
2163 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
2164 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
2165 &rq->comp_ring.basePA,
2167 if (!rq->comp_ring.base) {
2172 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
2173 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
2178 rq->buf_info[0] = bi;
2179 rq->buf_info[1] = bi + rq->rx_ring[0].size;
2184 vmxnet3_rq_destroy(rq, adapter);
2258 struct vmxnet3_rx_queue *rq = container_of(napi,
2260 struct vmxnet3_adapter *adapter = rq->adapter;
2268 &adapter->tx_queue[rq - adapter->rx_queue];
2272 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2276 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2322 struct vmxnet3_rx_queue *rq = data;
2323 struct vmxnet3_adapter *adapter = rq->adapter;
2327 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2328 napi_schedule(&rq->napi);
2513 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2514 rq->qid = i;
2515 rq->qid2 = i + adapter->num_rx_queues;
2516 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2823 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2825 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2826 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2827 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2829 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2830 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2831 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2833 rqc->intrIdx = rq->comp_ring.intr_idx;
2836 cpu_to_le64(rq->data_ring.basePA);
2838 cpu_to_le16(rq->data_ring.desc_size);
3276 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3278 rq->rx_ring[0].size = ring0_size;
3279 rq->rx_ring[1].size = ring1_size;
3280 rq->comp_ring.size = comp_size;
3317 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3320 rq->shared = &adapter->rqd_start[i].ctrl;
3321 rq->adapter = adapter;
3322 rq->data_ring.desc_size = rxdata_desc_size;
3323 err = vmxnet3_rq_create(rq, adapter);