Lines Matching refs:rx_queue

109 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
139 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
152 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
255 if (likely(priv->rx_queue[i]->rxcoalescing))
256 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
267 if (unlikely(priv->rx_queue[0]->rxcoalescing))
268 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
283 stats->rx_packets += priv->rx_queue[i]->stats.rx_packets;
284 stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
285 stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
433 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
435 if (!priv->rx_queue[i])
438 priv->rx_queue[i]->qindex = i;
439 priv->rx_queue[i]->ndev = priv->ndev;
457 kfree(priv->rx_queue[i]);
552 if (!grp->rx_queue)
553 grp->rx_queue = priv->rx_queue[i];
557 priv->rx_queue[i]->grp = grp;
1092 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1096 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1098 dev_kfree_skb(rx_queue->skb);
1100 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1101 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1110 dma_unmap_page(rx_queue->dev, rxb->dma,
1117 kfree(rx_queue->rx_buff);
1118 rx_queue->rx_buff = NULL;
1127 struct gfar_priv_rx_q *rx_queue = NULL;
1142 rx_queue = priv->rx_queue[i];
1143 if (rx_queue->rx_buff)
1144 free_skb_rx_queue(rx_queue);
1234 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1236 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1239 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1243 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1250 i = rx_queue->next_to_use;
1251 bdp = &rx_queue->rx_bd_base[i];
1252 rxb = &rx_queue->rx_buff[i];
1257 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1258 gfar_rx_alloc_err(rx_queue);
1264 gfar_init_rxbdp(rx_queue, bdp,
1271 if (unlikely(++i == rx_queue->rx_ring_size)) {
1273 bdp = rx_queue->rx_bd_base;
1274 rxb = rx_queue->rx_buff;
1278 rx_queue->next_to_use = i;
1279 rx_queue->next_to_alloc = i;
1287 struct gfar_priv_rx_q *rx_queue = NULL;
1317 rx_queue = priv->rx_queue[i];
1319 rx_queue->next_to_clean = 0;
1320 rx_queue->next_to_use = 0;
1321 rx_queue->next_to_alloc = 0;
1326 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1328 rx_queue->rfbptr = rfbptr;
1341 struct gfar_priv_rx_q *rx_queue = NULL;
1349 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1373 rx_queue = priv->rx_queue[i];
1374 rx_queue->rx_bd_base = vaddr;
1375 rx_queue->rx_bd_dma_base = addr;
1376 rx_queue->ndev = ndev;
1377 rx_queue->dev = dev;
1378 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1379 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1397 rx_queue = priv->rx_queue[i];
1398 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1399 sizeof(*rx_queue->rx_buff),
1401 if (!rx_queue->rx_buff)
1489 struct gfar_priv_rx_q *rx_queue = NULL;
1549 rx_queue = priv->rx_queue[i];
1550 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1551 gfar_write(rx_queue->rfbptr, bdp_dma);
2404 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2407 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2416 gfar_rx_alloc_err(rx_queue);
2423 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2428 gfar_reuse_rx_page(rx_queue, rxb);
2431 dma_unmap_page(rx_queue->dev, rxb->dma,
2501 static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2504 struct net_device *ndev = rx_queue->ndev;
2508 struct sk_buff *skb = rx_queue->skb;
2509 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2513 i = rx_queue->next_to_clean;
2519 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2523 bdp = &rx_queue->rx_bd_base[i];
2534 rx_queue->stats.rx_dropped++;
2543 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2550 if (unlikely(++i == rx_queue->rx_ring_size))
2553 rx_queue->next_to_clean = i;
2565 rx_queue->stats.rx_dropped++;
2575 skb_record_rx_queue(skb, rx_queue->qindex);
2580 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2586 rx_queue->skb = skb;
2588 rx_queue->stats.rx_packets += total_pkts;
2589 rx_queue->stats.rx_bytes += total_bytes;
2592 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2596 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2598 gfar_write(rx_queue->rfbptr, bdp_dma);
2609 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2617 work_done = gfar_clean_rx_ring(rx_queue, budget);
3277 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3278 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3279 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3348 i, priv->rx_queue[i]->rx_ring_size);
3426 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;