Lines Matching refs:ch

177 static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
185 ch->buf_count--;
199 struct dpaa2_eth_channel *ch,
273 ch->buf_count -= i + 2;
307 struct dpaa2_eth_channel *ch,
313 ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
314 if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
317 while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
318 ch->recycled_bufs,
319 ch->recycled_bufs_cnt)) == -EBUSY) {
326 dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
327 ch->recycled_bufs_cnt, ch->xsk_zc);
328 ch->buf_count -= ch->recycled_bufs_cnt;
331 ch->recycled_bufs_cnt = 0;
364 struct dpaa2_eth_channel *ch,
381 ch->stats.xdp_tx++;
384 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
386 ch->stats.xdp_tx_err++;
392 struct dpaa2_eth_channel *ch,
422 dpaa2_eth_xdp_tx_flush(priv, ch, fq);
426 struct dpaa2_eth_channel *ch,
436 xdp_prog = READ_ONCE(ch->xdp.prog);
441 xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
455 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
464 dpaa2_eth_recycle_buf(priv, ch, addr);
465 ch->stats.xdp_drop++;
470 ch->buf_count--;
484 ch->buf_count++;
485 dpaa2_eth_recycle_buf(priv, ch, addr);
487 ch->stats.xdp_drop++;
489 ch->stats.xdp_redirect++;
494 ch->xdp.res |= xdp_act;
500 struct dpaa2_eth_channel *ch,
510 skb = napi_alloc_skb(&ch->napi, skb_len);
522 static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
526 struct dpaa2_eth_priv *priv = ch->priv;
532 return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
536 struct dpaa2_eth_channel *ch,
572 ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
574 list_add_tail(&skb->list, ch->rx_list);
579 struct dpaa2_eth_channel *ch,
608 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
615 skb = dpaa2_eth_copybreak(ch, fd, vaddr);
619 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
628 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
640 dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
643 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
656 struct dpaa2_eth_channel *ch,
679 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
683 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
701 ch->buf_count--;
710 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
713 struct dpaa2_eth_priv *priv = ch->priv;
721 dq = dpaa2_io_store_next(ch->store, &is_last);
739 fq->consume(priv, ch, fd, fq);
748 ch->stats.frames += cleaned;
749 ch->stats.frames_per_cdan += cleaned;
1120 struct dpaa2_eth_channel *ch,
1211 ch->xsk_tx_pkts_sent++;
1580 struct dpaa2_eth_channel *ch,
1595 ch->stats.bytes_per_cdan += fd_len;
1599 dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
1677 struct dpaa2_eth_channel *ch)
1690 if (!ch->xsk_zc) {
1713 ch->bp->bpid);
1715 } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
1720 batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
1740 ch->bp->bpid);
1746 while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
1757 dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
1764 if (!ch->xsk_zc) {
1781 struct dpaa2_eth_channel *ch)
1787 new_count = dpaa2_eth_add_bufs(priv, ch);
1788 ch->buf_count += new_count;
1878 struct dpaa2_eth_channel *ch)
1882 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1886 new_count = dpaa2_eth_add_bufs(priv, ch);
1891 ch->buf_count += new_count;
1892 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1894 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1916 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1923 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1924 ch->store);
1929 ch->stats.dequeue_portal_busy += dequeues;
1931 ch->stats.pull_err++;
1944 struct dpaa2_eth_channel *ch;
1956 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1957 ch->xdp.res = 0;
1958 priv = ch->priv;
1961 ch->rx_list = &rx_list;
1963 if (ch->xsk_zc) {
1964 work_done_zc = dpaa2_xsk_tx(priv, ch);
1973 err = dpaa2_eth_pull_channel(ch);
1978 dpaa2_eth_refill_pool(priv, ch);
1980 store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1998 if (ch->xdp.res & XDP_REDIRECT)
2004 if (ch->xdp.res & XDP_REDIRECT)
2008 dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
2009 ch->stats.bytes_per_cdan);
2010 ch->stats.frames_per_cdan = 0;
2011 ch->stats.bytes_per_cdan = 0;
2018 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
2022 ch->nctx.desired_cpu);
2027 netif_receive_skb_list(ch->rx_list);
2029 if (ch->xsk_tx_pkts_sent) {
2030 xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
2031 ch->xsk_tx_pkts_sent = 0;
2042 if (rx_cleaned && ch->xdp.res & XDP_TX)
2043 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
2050 struct dpaa2_eth_channel *ch;
2054 ch = priv->channel[i];
2055 napi_enable(&ch->napi);
2061 struct dpaa2_eth_channel *ch;
2065 ch = priv->channel[i];
2066 napi_disable(&ch->napi);
2734 struct dpaa2_eth_channel *ch;
2770 ch = priv->channel[i];
2771 old = xchg(&ch->xdp.prog, prog);
3038 struct dpaa2_eth_channel *ch;
3040 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
3043 ch->stats.cdan++;
3048 if (!napi_if_scheduled_mark_missed(&ch->napi))
3049 napi_schedule(&ch->napi);
3264 struct dpaa2_eth_channel *ch;
3269 ch = priv->channel[i];
3270 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3271 dpaa2_eth_free_channel(priv, ch);
4792 struct dpaa2_eth_channel *ch;
4795 ch = priv->channel[i];
4797 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
4804 struct dpaa2_eth_channel *ch;
4807 ch = priv->channel[i];
4808 netif_napi_del(&ch->napi);