Lines Matching refs:queue

187 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
190 index = macb_tx_ring_wrap(queue->bp, index);
191 index = macb_adj_dma_desc_idx(queue->bp, index);
192 return &queue->tx_ring[index];
195 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
198 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
201 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
205 offset = macb_tx_ring_wrap(queue->bp, index) *
206 macb_dma_desc_get_size(queue->bp);
208 return queue->tx_ring_dma + offset;
216 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
218 index = macb_rx_ring_wrap(queue->bp, index);
219 index = macb_adj_dma_desc_idx(queue->bp, index);
220 return &queue->rx_ring[index];
223 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
225 return queue->rx_buffers + queue->bp->rx_buffer_size *
226 macb_rx_ring_wrap(queue->bp, index);
498 struct macb_queue *queue;
501 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
502 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
505 queue_writel(queue, RBQPH,
506 upper_32_bits(queue->rx_ring_dma));
508 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
511 queue_writel(queue, TBQPH,
512 upper_32_bits(queue->tx_ring_dma));
706 struct macb_queue *queue;
711 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
712 queue_writel(queue, IDR,
730 struct macb_queue *queue;
765 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
766 queue_writel(queue, IER,
1086 struct macb_queue *queue = container_of(work, struct macb_queue,
1089 struct macb *bp = queue->bp;
1097 (unsigned int)(queue - bp->queues),
1098 queue->tx_tail, queue->tx_head);
1100 /* Prevent the queue NAPI TX poll from running, as it calls
1106 napi_disable(&queue->napi_tx);
1109 /* Make sure nobody is trying to queue up new packets */
1122 /* Treat frames in TX queue including the ones that caused the error.
1125 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
1128 desc = macb_tx_desc(queue, tail);
1130 tx_skb = macb_tx_skb(queue, tail);
1138 tx_skb = macb_tx_skb(queue, tail);
1150 queue->stats.tx_packets++;
1152 queue->stats.tx_bytes += skb->len;
1169 /* Set end of TX queue */
1170 desc = macb_tx_desc(queue, 0);
1177 /* Reinitialize the TX desc queue */
1178 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1181 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1184 queue->tx_head = 0;
1185 queue->tx_tail = 0;
1189 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
1199 napi_enable(&queue->napi_tx);
1232 static int macb_tx_complete(struct macb_queue *queue, int budget)
1234 struct macb *bp = queue->bp;
1235 u16 queue_index = queue - bp->queues;
1240 spin_lock(&queue->tx_ptr_lock);
1241 head = queue->tx_head;
1242 for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
1248 desc = macb_tx_desc(queue, tail);
1263 tx_skb = macb_tx_skb(queue, tail);
1276 queue->stats.tx_packets++;
1278 queue->stats.tx_bytes += skb->len;
1294 queue->tx_tail = tail;
1296 CIRC_CNT(queue->tx_head, queue->tx_tail,
1299 spin_unlock(&queue->tx_ptr_lock);
1304 static void gem_rx_refill(struct macb_queue *queue)
1309 struct macb *bp = queue->bp;
1312 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1314 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1319 desc = macb_rx_desc(queue, entry);
1321 if (!queue->rx_skbuff[entry]) {
1339 queue->rx_skbuff[entry] = skb;
1357 queue->rx_prepared_head++;
1363 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1364 queue, queue->rx_prepared_head, queue->rx_tail);
1368 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1374 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1388 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1391 struct macb *bp = queue->bp;
1403 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1404 desc = macb_rx_desc(queue, entry);
1420 queue->rx_tail++;
1427 queue->stats.rx_dropped++;
1430 skb = queue->rx_skbuff[entry];
1435 queue->stats.rx_dropped++;
1439 queue->rx_skbuff[entry] = NULL;
1456 queue->stats.rx_packets++;
1458 queue->stats.rx_bytes += skb->len;
1474 gem_rx_refill(queue);
1479 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1487 struct macb *bp = queue->bp;
1489 desc = macb_rx_desc(queue, last_frag);
1508 desc = macb_rx_desc(queue, frag);
1536 macb_rx_buffer(queue, frag),
1539 desc = macb_rx_desc(queue, frag);
1561 static inline void macb_init_rx_ring(struct macb_queue *queue)
1563 struct macb *bp = queue->bp;
1568 addr = queue->rx_buffers_dma;
1570 desc = macb_rx_desc(queue, i);
1576 queue->rx_tail = 0;
1579 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1582 struct macb *bp = queue->bp;
1588 for (tail = queue->rx_tail; budget > 0; tail++) {
1589 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1605 discard_partial_frame(queue, first_frag, tail);
1617 dropped = macb_rx_frame(queue, napi, first_frag, tail);
1634 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1641 macb_init_rx_ring(queue);
1642 queue_writel(queue, RBQP, queue->rx_ring_dma);
1651 queue->rx_tail = first_frag;
1653 queue->rx_tail = tail;
1658 static bool macb_rx_pending(struct macb_queue *queue)
1660 struct macb *bp = queue->bp;
1664 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1665 desc = macb_rx_desc(queue, entry);
1675 struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
1676 struct macb *bp = queue->bp;
1679 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1681 netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
1682 (unsigned int)(queue - bp->queues), work_done, budget);
1685 queue_writel(queue, IER, bp->rx_intr_mask);
1697 if (macb_rx_pending(queue)) {
1698 queue_writel(queue, IDR, bp->rx_intr_mask);
1700 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1711 static void macb_tx_restart(struct macb_queue *queue)
1713 struct macb *bp = queue->bp;
1716 spin_lock(&queue->tx_ptr_lock);
1718 if (queue->tx_head == queue->tx_tail)
1721 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1723 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
1733 spin_unlock(&queue->tx_ptr_lock);
1736 static bool macb_tx_complete_pending(struct macb_queue *queue)
1740 spin_lock(&queue->tx_ptr_lock);
1741 if (queue->tx_head != queue->tx_tail) {
1745 if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
1748 spin_unlock(&queue->tx_ptr_lock);
1754 struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
1755 struct macb *bp = queue->bp;
1758 work_done = macb_tx_complete(queue, budget);
1761 if (queue->txubr_pending) {
1762 queue->txubr_pending = false;
1764 macb_tx_restart(queue);
1767 netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
1768 (unsigned int)(queue - bp->queues), work_done, budget);
1771 queue_writel(queue, IER, MACB_BIT(TCOMP));
1783 if (macb_tx_complete_pending(queue)) {
1784 queue_writel(queue, IDR, MACB_BIT(TCOMP));
1786 queue_writel(queue, ISR, MACB_BIT(TCOMP));
1799 struct macb_queue *queue;
1803 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1804 queue_writel(queue, IDR, bp->rx_intr_mask |
1821 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1822 queue_writel(queue, IER,
1836 struct macb_queue *queue = dev_id;
1837 struct macb *bp = queue->bp;
1840 status = queue_readl(queue, ISR);
1848 queue_writel(queue, IDR, MACB_BIT(WOL));
1850 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1851 (unsigned int)(queue - bp->queues),
1854 queue_writel(queue, ISR, MACB_BIT(WOL));
1865 struct macb_queue *queue = dev_id;
1866 struct macb *bp = queue->bp;
1869 status = queue_readl(queue, ISR);
1877 queue_writel(queue, IDR, GEM_BIT(WOL));
1879 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1880 (unsigned int)(queue - bp->queues),
1883 queue_writel(queue, ISR, GEM_BIT(WOL));
1894 struct macb_queue *queue = dev_id;
1895 struct macb *bp = queue->bp;
1899 status = queue_readl(queue, ISR);
1909 queue_writel(queue, IDR, -1);
1911 queue_writel(queue, ISR, -1);
1915 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1916 (unsigned int)(queue - bp->queues),
1926 queue_writel(queue, IDR, bp->rx_intr_mask);
1928 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1930 if (napi_schedule_prep(&queue->napi_rx)) {
1932 __napi_schedule(&queue->napi_rx);
1938 queue_writel(queue, IDR, MACB_BIT(TCOMP));
1940 queue_writel(queue, ISR, MACB_BIT(TCOMP) |
1944 queue->txubr_pending = true;
1948 if (napi_schedule_prep(&queue->napi_tx)) {
1950 __napi_schedule(&queue->napi_tx);
1955 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1956 schedule_work(&queue->tx_error_task);
1959 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1982 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1993 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
2001 queue_writel(queue, ISR, MACB_BIT(HRESP));
2003 status = queue_readl(queue, ISR);
2018 struct macb_queue *queue;
2023 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2024 macb_interrupt(dev->irq, queue);
2030 struct macb_queue *queue,
2035 unsigned int len, entry, i, tx_head = queue->tx_head;
2062 tx_skb = &queue->tx_skb[entry];
2093 tx_skb = &queue->tx_skb[entry];
2127 * to set the end of TX queue
2132 desc = macb_tx_desc(queue, entry);
2153 tx_skb = &queue->tx_skb[entry];
2154 desc = macb_tx_desc(queue, entry);
2165 if (i == queue->tx_head) {
2185 } while (i != queue->tx_head);
2187 queue->tx_head = tx_head;
2194 for (i = queue->tx_head; i != tx_head; i++) {
2195 tx_skb = macb_tx_skb(queue, i);
2311 struct macb_queue *queue = &bp->queues[queue_index];
2338 /* only queue eth + ip headers separately for UDP */
2352 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
2374 spin_lock_bh(&queue->tx_ptr_lock);
2377 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
2381 queue->tx_head, queue->tx_tail);
2387 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
2400 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2404 spin_unlock_bh(&queue->tx_ptr_lock);
2433 struct macb_queue *queue;
2438 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2439 if (!queue->rx_skbuff)
2443 skb = queue->rx_skbuff[i];
2448 desc = macb_rx_desc(queue, i);
2457 kfree(queue->rx_skbuff);
2458 queue->rx_skbuff = NULL;
2464 struct macb_queue *queue = &bp->queues[0];
2466 if (queue->rx_buffers) {
2469 queue->rx_buffers, queue->rx_buffers_dma);
2470 queue->rx_buffers = NULL;
2476 struct macb_queue *queue;
2482 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2483 kfree(queue->tx_skb);
2484 queue->tx_skb = NULL;
2485 if (queue->tx_ring) {
2488 queue->tx_ring, queue->tx_ring_dma);
2489 queue->tx_ring = NULL;
2491 if (queue->rx_ring) {
2494 queue->rx_ring, queue->rx_ring_dma);
2495 queue->rx_ring = NULL;
2502 struct macb_queue *queue;
2506 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2508 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2509 if (!queue->rx_skbuff)
2514 bp->rx_ring_size, queue->rx_skbuff);
2521 struct macb_queue *queue = &bp->queues[0];
2525 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2526 &queue->rx_buffers_dma, GFP_KERNEL);
2527 if (!queue->rx_buffers)
2532 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2538 struct macb_queue *queue;
2542 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2544 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2545 &queue->tx_ring_dma,
2547 if (!queue->tx_ring)
2550 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2551 q, size, (unsigned long)queue->tx_ring_dma,
2552 queue->tx_ring);
2555 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2556 if (!queue->tx_skb)
2560 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2561 &queue->rx_ring_dma, GFP_KERNEL);
2562 if (!queue->rx_ring)
2566 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2580 struct macb_queue *queue;
2585 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2587 desc = macb_tx_desc(queue, i);
2592 queue->tx_head = 0;
2593 queue->tx_tail = 0;
2595 queue->rx_tail = 0;
2596 queue->rx_prepared_head = 0;
2598 gem_rx_refill(queue);
2622 struct macb_queue *queue;
2644 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2645 queue_writel(queue, IDR, -1);
2646 queue_readl(queue, ISR);
2648 queue_writel(queue, ISR, -1);
2727 struct macb_queue *queue;
2735 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2737 queue_writel(queue, RBQS, buffer_size);
2929 struct macb_queue *queue;
2949 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2950 napi_enable(&queue->napi_rx);
2951 napi_enable(&queue->napi_tx);
2976 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2977 napi_disable(&queue->napi_rx);
2978 napi_disable(&queue->napi_tx);
2989 struct macb_queue *queue;
2995 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2996 napi_disable(&queue->napi_rx);
2997 napi_disable(&queue->napi_tx);
3044 struct macb_queue *queue;
3066 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
3067 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
3139 struct macb_queue *queue;
3149 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3575 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3629 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3969 /* bit 0 is never set but queue 0 always exists */
4078 struct macb_queue *queue;
4085 /* set the queue register mapping once for all: queue0 has a special
4086 * register mapping but we don't want to test the queue index then
4093 queue = &bp->queues[q];
4094 queue->bp = bp;
4095 spin_lock_init(&queue->tx_ptr_lock);
4096 netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
4097 netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
4099 queue->ISR = GEM_ISR(hw_q - 1);
4100 queue->IER = GEM_IER(hw_q - 1);
4101 queue->IDR = GEM_IDR(hw_q - 1);
4102 queue->IMR = GEM_IMR(hw_q - 1);
4103 queue->TBQP = GEM_TBQP(hw_q - 1);
4104 queue->RBQP = GEM_RBQP(hw_q - 1);
4105 queue->RBQS = GEM_RBQS(hw_q - 1);
4108 queue->TBQPH = GEM_TBQPH(hw_q - 1);
4109 queue->RBQPH = GEM_RBQPH(hw_q - 1);
4114 queue->ISR = MACB_ISR;
4115 queue->IER = MACB_IER;
4116 queue->IDR = MACB_IDR;
4117 queue->IMR = MACB_IMR;
4118 queue->TBQP = MACB_TBQP;
4119 queue->RBQP = MACB_RBQP;
4122 queue->TBQPH = MACB_TBQPH;
4123 queue->RBQPH = MACB_RBQPH;
4128 /* get irq: here we use the linux queue index, not the hardware
4129 * queue index. the queue irq definitions in the device tree
4131 * hardware queue mask.
4133 queue->irq = platform_get_irq(pdev, q);
4134 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
4135 IRQF_SHARED, dev->name, queue);
4139 queue->irq, err);
4143 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
5214 struct macb_queue *queue;
5230 for (q = 0, queue = bp->queues; q < bp->num_queues;
5231 ++q, ++queue) {
5233 queue_writel(queue, IDR, -1);
5234 queue_readl(queue, ISR);
5236 queue_writel(queue, ISR, -1);
5239 * Enable WoL IRQ on queue 0
5273 for (q = 0, queue = bp->queues; q < bp->num_queues;
5274 ++q, ++queue) {
5275 napi_disable(&queue->napi_rx);
5276 napi_disable(&queue->napi_tx);
5306 struct macb_queue *queue;
5330 /* Clear ISR on queue 0 */
5334 /* Replace interrupt handler on queue 0 */
5357 for (q = 0, queue = bp->queues; q < bp->num_queues;
5358 ++q, ++queue) {
5359 napi_enable(&queue->napi_rx);
5360 napi_enable(&queue->napi_tx);