Lines Matching defs:rx_q

233 	struct stmmac_rx_queue *rx_q;
238 rx_q = &priv->dma_conf.rx_queue[queue];
239 if (rx_q->xsk_pool) {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
1267 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1272 head_rx = (void *)rx_q->dma_erx;
1275 head_rx = (void *)rx_q->dma_rx;
1281 rx_q->dma_rx_phy, desc_size);
1355 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1361 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1366 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1442 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1443 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1450 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1457 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1480 * @rx_q: RX queue
1484 struct stmmac_rx_queue *rx_q,
1487 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1490 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1494 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1555 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1559 stmmac_free_rx_buffer(priv, rx_q, i);
1566 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1574 p = &((rx_q->dma_erx + i)->basic);
1576 p = rx_q->dma_rx + i;
1583 rx_q->buf_alloc_num++;
1599 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1603 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1617 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1632 p = (struct dma_desc *)(rx_q->dma_erx + i);
1634 p = rx_q->dma_rx + i;
1636 buf = &rx_q->buf_pool[i];
1638 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1644 rx_q->buf_alloc_num++;
1672 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1677 (u32)rx_q->dma_rx_phy);
1681 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1683 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1685 if (rx_q->xsk_pool) {
1686 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1691 rx_q->queue_index);
1692 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1696 rx_q->page_pool));
1699 rx_q->queue_index);
1702 if (rx_q->xsk_pool) {
1716 stmmac_mode_init(priv, rx_q->dma_erx,
1717 rx_q->dma_rx_phy,
1720 stmmac_mode_init(priv, rx_q->dma_rx,
1721 rx_q->dma_rx_phy,
1751 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1753 if (rx_q->xsk_pool)
1758 rx_q->buf_alloc_num = 0;
1759 rx_q->xsk_pool = NULL;
1916 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1919 if (rx_q->xsk_pool)
1924 rx_q->buf_alloc_num = 0;
1925 rx_q->xsk_pool = NULL;
1931 rx_q->dma_rx, rx_q->dma_rx_phy);
1935 rx_q->dma_erx, rx_q->dma_rx_phy);
1937 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1938 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1940 kfree(rx_q->buf_pool);
1941 if (rx_q->page_pool)
1942 page_pool_destroy(rx_q->page_pool);
2017 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2025 rx_q->queue_index = queue;
2026 rx_q->priv_data = priv;
2038 rx_q->page_pool = page_pool_create(&pp_params);
2039 if (IS_ERR(rx_q->page_pool)) {
2040 ret = PTR_ERR(rx_q->page_pool);
2041 rx_q->page_pool = NULL;
2045 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2046 sizeof(*rx_q->buf_pool),
2048 if (!rx_q->buf_pool)
2052 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2055 &rx_q->dma_rx_phy,
2057 if (!rx_q->dma_erx)
2061 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2064 &rx_q->dma_rx_phy,
2066 if (!rx_q->dma_rx)
2076 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2077 rx_q->queue_index,
2398 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2406 if (rx_q->xsk_pool) {
2407 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2873 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2880 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
3006 struct stmmac_rx_queue *rx_q;
3040 rx_q = &priv->dma_conf.rx_queue[chan];
3043 rx_q->dma_rx_phy, chan);
3045 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3046 (rx_q->buf_alloc_num *
3049 rx_q->rx_tail_addr, chan);
4798 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4800 unsigned int entry = rx_q->dirty_rx;
4807 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4812 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4814 p = rx_q->dma_rx + entry;
4817 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4823 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4837 stmmac_refill_desc3(priv, rx_q, p);
4839 rx_q->rx_count_frames++;
4840 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4841 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4842 rx_q->rx_count_frames = 0;
4845 use_rx_wd |= rx_q->rx_count_frames > 0;
4854 rx_q->dirty_rx = entry;
4855 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4856 (rx_q->dirty_rx * sizeof(struct dma_desc));
4857 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5166 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5167 unsigned int entry = rx_q->dirty_rx;
5173 while (budget-- > 0 && entry != rx_q->cur_rx) {
5174 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5179 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5187 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5189 rx_desc = rx_q->dma_rx + entry;
5194 stmmac_refill_desc3(priv, rx_q, rx_desc);
5196 rx_q->rx_count_frames++;
5197 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5198 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5199 rx_q->rx_count_frames = 0;
5202 use_rx_wd |= rx_q->rx_count_frames > 0;
5213 rx_q->dirty_rx = entry;
5214 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5215 (rx_q->dirty_rx * sizeof(struct dma_desc));
5216 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5235 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5238 unsigned int next_entry = rx_q->cur_rx;
5251 rx_head = (void *)rx_q->dma_erx;
5254 rx_head = (void *)rx_q->dma_rx;
5259 rx_q->dma_rx_phy, desc_size);
5269 if (!count && rx_q->state_saved) {
5270 error = rx_q->state.error;
5271 len = rx_q->state.len;
5273 rx_q->state_saved = false;
5284 buf = &rx_q->buf_pool[entry];
5293 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5295 p = rx_q->dma_rx + entry;
5304 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5306 next_entry = rx_q->cur_rx;
5309 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5311 np = rx_q->dma_rx + next_entry;
5321 rx_q->dma_erx + entry);
5390 rx_q->state_saved = true;
5391 rx_q->state.error = error;
5392 rx_q->state.len = len;
5404 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5406 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5408 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5428 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5432 unsigned int next_entry = rx_q->cur_rx;
5440 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5449 rx_head = (void *)rx_q->dma_erx;
5452 rx_head = (void *)rx_q->dma_rx;
5457 rx_q->dma_rx_phy, desc_size);
5467 if (!count && rx_q->state_saved) {
5468 skb = rx_q->state.skb;
5469 error = rx_q->state.error;
5470 len = rx_q->state.len;
5472 rx_q->state_saved = false;
5485 buf = &rx_q->buf_pool[entry];
5488 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5490 p = rx_q->dma_rx + entry;
5498 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5500 next_entry = rx_q->cur_rx;
5503 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5505 np = rx_q->dma_rx + next_entry;
5510 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5512 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5556 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5580 page_pool_put_page(rx_q->page_pool,
5623 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5686 rx_q->state_saved = true;
5687 rx_q->state.skb = skb;
5688 rx_q->state.error = error;
5689 rx_q->state.len = len;
6169 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6171 int chan = rx_q->queue_index;
6174 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6351 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6357 sysfs_display_ring((void *)rx_q->dma_erx,
6358 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6361 sysfs_display_ring((void *)rx_q->dma_rx,
6362 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6797 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6820 rx_q->dma_rx_phy, rx_q->queue_index);
6822 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6825 rx_q->rx_tail_addr, rx_q->queue_index);
6827 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6828 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6831 rx_q->queue_index);
6835 rx_q->queue_index);
6937 struct stmmac_rx_queue *rx_q;
6971 rx_q = &priv->dma_conf.rx_queue[chan];
6974 rx_q->dma_rx_phy, chan);
6976 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6977 (rx_q->buf_alloc_num *
6980 rx_q->rx_tail_addr, chan);
6982 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6983 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6986 rx_q->queue_index);
6990 rx_q->queue_index);
7043 struct stmmac_rx_queue *rx_q;
7058 rx_q = &priv->dma_conf.rx_queue[queue];
7062 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7924 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7926 rx_q->cur_rx = 0;
7927 rx_q->dirty_rx = 0;