Lines Matching refs:rxq

548 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
550 struct net_device *ndev = rxq->ndev;
562 if (mpc->rxbpre_datasize != rxq->datasize) {
564 mpc->rxbpre_datasize, rxq->datasize);
568 if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
570 mpc->rxbpre_alloc_size, rxq->alloc_size);
574 if (mpc->rxbpre_headroom != rxq->headroom) {
576 mpc->rxbpre_headroom, rxq->headroom);
1259 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1265 init_completion(&rxq->fence_event);
1269 req.wq_obj_handle = rxq->rxobj;
1275 rxq->rxq_idx, err);
1282 rxq->rxq_idx, err, resp.hdr.status);
1289 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1291 rxq->rxq_idx);
1301 struct mana_rxq *rxq;
1305 rxq = apc->rxqs[rxq_idx];
1306 err = mana_fence_rq(apc, rxq);
1462 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1468 curr_index = rxq->buf_index++;
1469 if (rxq->buf_index == rxq->num_rx_buf)
1470 rxq->buf_index = 0;
1472 recv_buf_oob = &rxq->rx_oobs[curr_index];
1474 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1482 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1485 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1496 skb_reserve(skb, rxq->headroom);
1503 struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1505 struct mana_stats_rx *rx_stats = &rxq->stats;
1506 struct net_device *ndev = rxq->ndev;
1508 u16 rxq_idx = rxq->rxq_idx;
1515 rxq->rx_cq.work_done++;
1516 napi = &rxq->rx_cq.napi;
1523 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1525 if (act == XDP_REDIRECT && !rxq->xdp_rc)
1531 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1590 page_pool_recycle_direct(rxq->page_pool,
1593 WARN_ON_ONCE(rxq->xdp_save_va);
1595 rxq->xdp_save_va = buf_va;
1603 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1612 if (rxq->xdp_save_va) {
1613 va = rxq->xdp_save_va;
1614 rxq->xdp_save_va = NULL;
1615 } else if (rxq->alloc_size > PAGE_SIZE) {
1617 va = napi_alloc_frag(rxq->alloc_size);
1619 va = netdev_alloc_frag(rxq->alloc_size);
1626 if (compound_order(page) < get_order(rxq->alloc_size)) {
1631 page = page_pool_dev_alloc_pages(rxq->page_pool);
1639 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
1643 page_pool_put_full_page(rxq->page_pool, page, false);
1654 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
1662 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1666 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
1676 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1680 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1681 struct net_device *ndev = rxq->ndev;
1697 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1707 complete(&rxq->fence_event);
1722 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1726 curr = rxq->buf_index;
1727 rxbuf_oob = &rxq->rx_oobs[curr];
1730 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
1735 mana_rx_skb(old_buf, old_fp, oob, rxq);
1738 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1740 mana_post_pkt_rxq(rxq);
1746 struct mana_rxq *rxq = cq->rxq;
1752 rxq->xdp_flush = false;
1758 /* verify recv cqe references the right rxq */
1759 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1762 mana_process_rx_cqe(rxq, cq, &comp[i]);
1766 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1768 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1771 if (rxq->xdp_flush)
1988 struct mana_rxq *rxq, bool validate_state)
1998 if (!rxq)
2001 napi = &rxq->rx_cq.napi;
2008 xdp_rxq_info_unreg(&rxq->xdp_rxq);
2012 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2014 mana_deinit_cq(apc, &rxq->rx_cq);
2016 if (rxq->xdp_save_va)
2017 put_page(virt_to_head_page(rxq->xdp_save_va));
2019 for (i = 0; i < rxq->num_rx_buf; i++) {
2020 rx_oob = &rxq->rx_oobs[i];
2031 page_pool_put_full_page(rxq->page_pool, page, false);
2038 page_pool_destroy(rxq->page_pool);
2040 if (rxq->gdma_rq)
2041 mana_gd_destroy_queue(gc, rxq->gdma_rq);
2043 kfree(rxq);
2047 struct mana_rxq *rxq, struct device *dev)
2049 struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2055 va = mana_get_rxbuf_pre(rxq, &da);
2057 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2066 rx_oob->sgl[0].size = rxq->datasize;
2076 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2084 WARN_ON(rxq->datasize == 0);
2089 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2090 rx_oob = &rxq->rx_oobs[buf_idx];
2095 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2115 static int mana_push_wqe(struct mana_rxq *rxq)
2121 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2122 rx_oob = &rxq->rx_oobs[buf_idx];
2124 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2133 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2140 pprm.napi = &rxq->rx_cq.napi;
2141 pprm.netdev = rxq->ndev;
2143 rxq->page_pool = page_pool_create(&pprm);
2145 if (IS_ERR(rxq->page_pool)) {
2146 ret = PTR_ERR(rxq->page_pool);
2147 rxq->page_pool = NULL;
2165 struct mana_rxq *rxq;
2170 rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
2172 if (!rxq)
2175 rxq->ndev = ndev;
2176 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2177 rxq->rxq_idx = rxq_idx;
2178 rxq->rxobj = INVALID_MANA_HANDLE;
2180 mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2181 &rxq->headroom);
2184 err = mana_create_page_pool(rxq, gc);
2190 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2202 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2207 cq = &rxq->rx_cq;
2209 cq->rxq = rxq;
2224 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2225 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2233 &wq_spec, &cq_spec, &rxq->rxobj);
2237 rxq->gdma_rq->id = wq_spec.queue_index;
2240 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2243 rxq->gdma_id = rxq->gdma_rq->id;
2246 err = mana_push_wqe(rxq);
2259 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2261 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2262 rxq->page_pool));
2269 return rxq;
2273 mana_destroy_rxq(apc, rxq, false);
2285 struct mana_rxq *rxq;
2290 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2291 if (!rxq) {
2296 u64_stats_init(&rxq->stats.syncp);
2298 apc->rxqs[i] = rxq;
2309 struct mana_rxq *rxq;
2313 rxq = apc->rxqs[rxq_idx];
2314 if (!rxq)
2317 mana_destroy_rxq(apc, rxq, true);