Lines Matching refs:chan

140 					  u32 rxmode, u32 chan);
2247 * @chan: RX channel index
2251 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2253 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2254 stmmac_start_rx(priv, priv->ioaddr, chan);
2260 * @chan: TX channel index
2264 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2266 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2267 stmmac_start_tx(priv, priv->ioaddr, chan);
2273 * @chan: RX channel index
2277 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2279 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2280 stmmac_stop_rx(priv, priv->ioaddr, chan);
2286 * @chan: TX channel index
2290 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2292 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2293 stmmac_stop_tx(priv, priv->ioaddr, chan);
2301 u32 chan;
2303 for (chan = 0; chan < dma_csr_ch; chan++) {
2304 struct stmmac_channel *ch = &priv->channel[chan];
2308 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2323 u32 chan = 0;
2325 for (chan = 0; chan < rx_channels_count; chan++)
2326 stmmac_start_rx_dma(priv, chan);
2328 for (chan = 0; chan < tx_channels_count; chan++)
2329 stmmac_start_tx_dma(priv, chan);
2342 u32 chan = 0;
2344 for (chan = 0; chan < rx_channels_count; chan++)
2345 stmmac_stop_rx_dma(priv, chan);
2347 for (chan = 0; chan < tx_channels_count; chan++)
2348 stmmac_stop_tx_dma(priv, chan);
2365 u32 chan = 0;
2397 for (chan = 0; chan < rx_channels_count; chan++) {
2398 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2401 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2403 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2410 chan);
2414 chan);
2418 for (chan = 0; chan < tx_channels_count; chan++) {
2419 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2421 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2584 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2590 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2593 chan);
2800 * @chan: channel index
2804 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2806 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2808 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2810 stmmac_stop_tx_dma(priv, chan);
2811 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2812 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2813 stmmac_reset_tx_queue(priv, chan);
2815 tx_q->dma_tx_phy, chan);
2816 stmmac_start_tx_dma(priv, chan);
2819 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2827 * @chan: channel index
2833 u32 rxmode, u32 chan)
2835 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2836 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2851 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2852 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2869 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2872 &priv->xstats, chan, dir);
2873 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2874 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2875 struct stmmac_channel *ch = &priv->channel[chan];
2883 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2886 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2892 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2895 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2917 u32 chan;
2924 for (chan = 0; chan < channels_to_check; chan++)
2925 status[chan] = stmmac_napi_check(priv, chan,
2928 for (chan = 0; chan < tx_channel_count; chan++) {
2929 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2931 stmmac_bump_dma_threshold(priv, chan);
2932 } else if (unlikely(status[chan] == tx_hard_error)) {
2933 stmmac_tx_err(priv, chan);
3008 u32 chan = 0;
3033 for (chan = 0; chan < dma_csr_ch; chan++) {
3034 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3035 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3039 for (chan = 0; chan < rx_channels_count; chan++) {
3040 rx_q = &priv->dma_conf.rx_queue[chan];
3043 rx_q->dma_rx_phy, chan);
3049 rx_q->rx_tail_addr, chan);
3053 for (chan = 0; chan < tx_channels_count; chan++) {
3054 tx_q = &priv->dma_conf.tx_queue[chan];
3057 tx_q->dma_tx_phy, chan);
3061 tx_q->tx_tail_addr, chan);
3132 u32 chan;
3134 for (chan = 0; chan < tx_channel_count; chan++) {
3135 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3137 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3138 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3144 for (chan = 0; chan < rx_channel_count; chan++)
3145 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3152 u32 chan;
3155 for (chan = 0; chan < tx_channels_count; chan++)
3157 (priv->dma_conf.dma_tx_size - 1), chan);
3160 for (chan = 0; chan < rx_channels_count; chan++)
3162 (priv->dma_conf.dma_rx_size - 1), chan);
3217 u32 chan;
3220 chan = priv->plat->rx_queues_cfg[queue].chan;
3221 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3403 u32 chan;
3500 for (chan = 0; chan < tx_cnt; chan++) {
3501 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3507 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3513 for (chan = 0; chan < rx_cnt; chan++)
3514 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3522 for (chan = 0; chan < tx_cnt; chan++) {
3523 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3526 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3874 int chan, bfsize, ret;
3903 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3904 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3905 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3949 u32 chan;
4014 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4015 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4064 u32 chan;
4074 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4075 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
5714 u32 chan = ch->index;
5717 rxq_stats = &priv->xstats.rxq_stats[chan];
5722 work_done = stmmac_rx(priv, budget, chan);
5727 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5741 u32 chan = ch->index;
5744 txq_stats = &priv->xstats.txq_stats[chan];
5749 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5756 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5762 stmmac_tx_timer_arm(priv, chan);
5776 u32 chan = ch->index;
5778 rxq_stats = &priv->xstats.rxq_stats[chan];
5783 txq_stats = &priv->xstats.txq_stats[chan];
5788 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5791 rx_done = stmmac_rx_zc(priv, budget, chan);
5809 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5815 stmmac_tx_timer_arm(priv, chan);
5965 u32 chan;
5967 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5968 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6144 int chan = tx_q->queue_index;
6148 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6155 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6159 stmmac_bump_dma_threshold(priv, chan);
6161 stmmac_tx_err(priv, chan);
6171 int chan = rx_q->queue_index;
6174 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6181 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6901 u32 chan;
6909 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6910 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6941 u32 chan;
6961 for (chan = 0; chan < dma_csr_ch; chan++) {
6962 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6963 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6970 for (chan = 0; chan < rx_cnt; chan++) {
6971 rx_q = &priv->dma_conf.rx_queue[chan];
6974 rx_q->dma_rx_phy, chan);
6980 rx_q->rx_tail_addr, chan);
6993 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6997 for (chan = 0; chan < tx_cnt; chan++) {
6998 tx_q = &priv->dma_conf.tx_queue[chan];
7001 tx_q->dma_tx_phy, chan);
7005 tx_q->tx_tail_addr, chan);
7030 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7031 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7860 u32 chan;
7871 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7872 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);