Lines Matching defs:tx_queue

133 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
248 if (likely(priv->tx_queue[i]->txcoalescing))
249 gfar_write(baddr + i, priv->tx_queue[i]->txic);
263 if (likely(priv->tx_queue[0]->txcoalescing))
264 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
289 stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
290 stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
415 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
417 if (!priv->tx_queue[i])
420 priv->tx_queue[i]->tx_skbuff = NULL;
421 priv->tx_queue[i]->qindex = i;
422 priv->tx_queue[i]->dev = priv->ndev;
423 spin_lock_init(&(priv->tx_queue[i]->txlock));
449 kfree(priv->tx_queue[i]);
561 if (!grp->tx_queue)
562 grp->tx_queue = priv->tx_queue[i];
566 priv->tx_queue[i]->grp = grp;
1062 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1065 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1068 txbdp = tx_queue->tx_bd_base;
1070 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1071 if (!tx_queue->tx_skbuff[i])
1077 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1085 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1086 tx_queue->tx_skbuff[i] = NULL;
1088 kfree(tx_queue->tx_skbuff);
1089 tx_queue->tx_skbuff = NULL;
1126 struct gfar_priv_tx_q *tx_queue = NULL;
1134 tx_queue = priv->tx_queue[i];
1135 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1136 if (tx_queue->tx_skbuff)
1137 free_skb_tx_queue(tx_queue);
1150 priv->tx_queue[0]->tx_bd_base,
1151 priv->tx_queue[0]->tx_bd_dma_base);
1286 struct gfar_priv_tx_q *tx_queue = NULL;
1293 tx_queue = priv->tx_queue[i];
1295 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1296 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1297 tx_queue->cur_tx = tx_queue->tx_bd_base;
1298 tx_queue->skb_curtx = 0;
1299 tx_queue->skb_dirtytx = 0;
1302 txbdp = tx_queue->tx_bd_base;
1303 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1340 struct gfar_priv_tx_q *tx_queue = NULL;
1345 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1362 tx_queue = priv->tx_queue[i];
1363 tx_queue->tx_bd_base = vaddr;
1364 tx_queue->tx_bd_dma_base = addr;
1365 tx_queue->dev = ndev;
1367 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1368 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1384 tx_queue = priv->tx_queue[i];
1385 tx_queue->tx_skbuff =
1386 kmalloc_array(tx_queue->tx_ring_size,
1387 sizeof(*tx_queue->tx_skbuff),
1389 if (!tx_queue->tx_skbuff)
1392 for (j = 0; j < tx_queue->tx_ring_size; j++)
1393 tx_queue->tx_skbuff[j] = NULL;
1772 struct gfar_priv_tx_q *tx_queue = NULL;
1785 tx_queue = priv->tx_queue[rq];
1787 base = tx_queue->tx_bd_base;
1788 regs = tx_queue->grp->regs;
1821 if (nr_txbds > tx_queue->num_txbdfree) {
1830 tx_queue->stats.tx_bytes += bytes_sent;
1833 tx_queue->stats.tx_packets++;
1835 txbdp = txbdp_start = tx_queue->cur_tx;
1882 tx_queue->tx_ring_size);
1896 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1956 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1961 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1962 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1964 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1971 spin_lock_bh(&tx_queue->txlock);
1973 tx_queue->num_txbdfree -= (nr_txbds);
1974 spin_unlock_bh(&tx_queue->txlock);
1979 if (!tx_queue->num_txbdfree) {
1986 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1991 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
1993 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2004 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2148 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2150 struct net_device *dev = tx_queue->dev;
2155 struct txbd8 *base = tx_queue->tx_bd_base;
2158 int tx_ring_size = tx_queue->tx_ring_size;
2162 int tqi = tx_queue->qindex;
2168 bdp = tx_queue->dirty_tx;
2169 skb_dirtytx = tx_queue->skb_dirtytx;
2171 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2234 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2240 spin_lock(&tx_queue->txlock);
2241 tx_queue->num_txbdfree += nr_txbds;
2242 spin_unlock(&tx_queue->txlock);
2246 if (tx_queue->num_txbdfree &&
2252 tx_queue->skb_dirtytx = skb_dirtytx;
2253 tx_queue->dirty_tx = bdp;
2640 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2649 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2650 gfar_clean_tx_ring(tx_queue);
3270 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3271 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3272 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3273 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3351 i, priv->tx_queue[i]->tx_ring_size);