Lines Matching refs:ring

41 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
46 if (!ring->mmio_base)
49 /* Suspend DMA TX ring first.
53 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
56 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
67 dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
68 ring->mmio_base, val);
71 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
73 ring->mmio_base + BGMAC_DMA_TX_STATUS,
76 dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
77 ring->mmio_base);
79 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
81 dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
82 ring->mmio_base);
87 struct bgmac_dma_ring *ring)
91 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
107 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
111 bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
123 slot = &ring->slots[i];
124 dma_desc = &ring->cpu_base[i];
132 struct bgmac_dma_ring *ring,
137 int index = ring->end % BGMAC_TX_RING_SLOTS;
138 struct bgmac_slot_info *slot = &ring->slots[index];
153 /* ring->end - ring->start will return the number of valid slots,
154 * even when ring->end overflows
156 if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
157 netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
171 bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
179 slot = &ring->slots[index];
188 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
193 ring->end += nr_frags + 1;
197 /* Increase ring->end to point empty slot. We tell hardware the first
200 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
201 ring->index_base +
202 (ring->end % BGMAC_TX_RING_SLOTS) *
205 if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
215 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
216 struct bgmac_slot_info *slot = &ring->slots[index];
217 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
224 netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
225 ring->mmio_base);
235 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
242 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
244 empty_slot -= ring->index_base;
248 while (ring->start != ring->end) {
249 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
250 struct bgmac_slot_info *slot = &ring->slots[slot_idx];
257 ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
258 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
280 ring->start++;
292 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
294 if (!ring->mmio_base)
297 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
299 ring->mmio_base + BGMAC_DMA_RX_STATUS,
302 dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
303 ring->mmio_base);
307 struct bgmac_dma_ring *ring)
311 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
330 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
368 struct bgmac_dma_ring *ring)
372 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
373 ring->index_base +
374 ring->end * sizeof(struct bgmac_dma_desc));
378 struct bgmac_dma_ring *ring, int desc_idx)
380 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
391 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
392 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
396 ring->end = desc_idx;
412 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
418 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
420 end_slot -= ring->index_base;
424 while (ring->start != end_slot) {
426 struct bgmac_slot_info *slot = &ring->slots[ring->start];
451 ring->start);
459 ring->start);
489 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
491 if (++ring->start >= BGMAC_RX_RING_SLOTS)
492 ring->start = 0;
498 bgmac_dma_rx_update_index(bgmac, ring);
503 /* Does ring support unaligned addressing? */
505 struct bgmac_dma_ring *ring,
510 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
512 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
516 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
518 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
526 struct bgmac_dma_ring *ring)
529 struct bgmac_dma_desc *dma_desc = ring->cpu_base;
537 slot = &ring->slots[i];
553 struct bgmac_dma_ring *ring)
560 slot = &ring->slots[i];
573 struct bgmac_dma_ring *ring,
579 if (!ring->cpu_base)
582 /* Free ring of descriptors */
584 dma_free_coherent(dma_dev, size, ring->cpu_base,
585 ring->dma_base);
615 struct bgmac_dma_ring *ring;
618 int size; /* ring size: different for Tx and Rx */
632 ring = &bgmac->tx_ring[i];
633 ring->mmio_base = ring_base[i];
635 /* Alloc ring of descriptors */
637 ring->cpu_base = dma_alloc_coherent(dma_dev, size,
638 &ring->dma_base,
640 if (!ring->cpu_base) {
641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
642 ring->mmio_base);
646 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
648 if (ring->unaligned)
649 ring->index_base = lower_32_bits(ring->dma_base);
651 ring->index_base = 0;
657 ring = &bgmac->rx_ring[i];
658 ring->mmio_base = ring_base[i];
660 /* Alloc ring of descriptors */
662 ring->cpu_base = dma_alloc_coherent(dma_dev, size,
663 &ring->dma_base,
665 if (!ring->cpu_base) {
666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
667 ring->mmio_base);
671 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
673 if (ring->unaligned)
674 ring->index_base = lower_32_bits(ring->dma_base);
676 ring->index_base = 0;
688 struct bgmac_dma_ring *ring;
692 ring = &bgmac->tx_ring[i];
694 if (!ring->unaligned)
695 bgmac_dma_tx_enable(bgmac, ring);
696 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
697 lower_32_bits(ring->dma_base));
698 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
699 upper_32_bits(ring->dma_base));
700 if (ring->unaligned)
701 bgmac_dma_tx_enable(bgmac, ring);
703 ring->start = 0;
704 ring->end = 0; /* Points the slot that should *not* be read */
710 ring = &bgmac->rx_ring[i];
712 if (!ring->unaligned)
713 bgmac_dma_rx_enable(bgmac, ring);
714 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
715 lower_32_bits(ring->dma_base));
716 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
717 upper_32_bits(ring->dma_base));
718 if (ring->unaligned)
719 bgmac_dma_rx_enable(bgmac, ring);
721 ring->start = 0;
722 ring->end = 0;
724 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
728 bgmac_dma_rx_setup_desc(bgmac, ring, j);
731 bgmac_dma_rx_update_index(bgmac, ring);
1227 struct bgmac_dma_ring *ring;
1230 ring = &bgmac->tx_ring[0];
1231 return bgmac_dma_tx_add(bgmac, ring, skb);