Lines Matching defs:ch

97 	struct ltq_etop_chan ch[MAX_DMA_CHAN];
107 ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
109 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
111 ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
112 if (!ch->skb[ch->dma.desc])
114 ch->dma.desc_base[ch->dma.desc].addr =
115 dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data,
117 ch->dma.desc_base[ch->dma.desc].addr =
118 CPHYSADDR(ch->skb[ch->dma.desc]->data);
119 ch->dma.desc_base[ch->dma.desc].ctl =
122 skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
127 ltq_etop_hw_receive(struct ltq_etop_chan *ch)
129 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
130 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
131 struct sk_buff *skb = ch->skb[ch->dma.desc];
136 if (ltq_etop_alloc_skb(ch)) {
137 netdev_err(ch->netdev,
139 ltq_dma_close(&ch->dma);
141 ch->dma.desc++;
142 ch->dma.desc %= LTQ_DESC_NUM;
146 skb->protocol = eth_type_trans(skb, ch->netdev);
153 struct ltq_etop_chan *ch = container_of(napi,
158 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
162 ltq_etop_hw_receive(ch);
166 napi_complete_done(&ch->napi, work_done);
167 ltq_dma_ack_irq(&ch->dma);
175 struct ltq_etop_chan *ch =
177 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
179 netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
183 while ((ch->dma.desc_base[ch->tx_free].ctl &
185 dev_kfree_skb_any(ch->skb[ch->tx_free]);
186 ch->skb[ch->tx_free] = NULL;
187 memset(&ch->dma.desc_base[ch->tx_free], 0,
189 ch->tx_free++;
190 ch->tx_free %= LTQ_DESC_NUM;
196 napi_complete(&ch->napi);
197 ltq_dma_ack_irq(&ch->dma);
205 int ch = irq - LTQ_DMA_CH0_INT;
207 napi_schedule(&priv->ch[ch].napi);
212 ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
216 ltq_dma_free(&ch->dma);
217 if (ch->dma.irq)
218 free_irq(ch->dma.irq, priv);
219 if (IS_RX(ch->idx)) {
223 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
236 ltq_etop_free_channel(dev, &priv->ch[i]);
272 struct ltq_etop_chan *ch = &priv->ch[i];
274 ch->dma.nr = i;
275 ch->idx = ch->dma.nr;
276 ch->dma.dev = &priv->pdev->dev;
279 ltq_dma_alloc_tx(&ch->dma);
288 ltq_dma_alloc_rx(&ch->dma);
289 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
290 ch->dma.desc++)
291 if (ltq_etop_alloc_skb(ch))
293 ch->dma.desc = 0;
302 ch->dma.irq = irq;
441 struct ltq_etop_chan *ch = &priv->ch[i];
445 ltq_dma_open(&ch->dma);
446 ltq_dma_enable_irq(&ch->dma);
447 napi_enable(&ch->napi);
463 struct ltq_etop_chan *ch = &priv->ch[i];
467 napi_disable(&ch->napi);
468 ltq_dma_close(&ch->dma);
479 struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
480 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
487 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
495 ch->skb[ch->dma.desc] = skb;
506 ch->dma.desc++;
507 ch->dma.desc %= LTQ_DESC_NUM;
510 if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
703 netif_napi_add_weight(dev, &priv->ch[i].napi,
706 netif_napi_add_weight(dev, &priv->ch[i].napi,
708 priv->ch[i].netdev = dev;