Lines Matching refs:ring

72 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
78 *meta = &(ring->meta[slot]);
79 desc = ring->descbase;
85 static void op32_fill_descriptor(struct b43_dmaring *ring,
90 struct b43_dmadesc32 *descbase = ring->descbase;
97 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
99 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
100 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
103 if (slot == ring->nr_slots - 1)
118 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
120 b43_dma_write(ring, B43_DMA32_TXINDEX,
124 static void op32_tx_suspend(struct b43_dmaring *ring)
126 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
130 static void op32_tx_resume(struct b43_dmaring *ring)
132 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
136 static int op32_get_current_rxslot(struct b43_dmaring *ring)
140 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
146 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
148 b43_dma_write(ring, B43_DMA32_RXINDEX,
164 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
170 *meta = &(ring->meta[slot]);
171 desc = ring->descbase;
177 static void op64_fill_descriptor(struct b43_dmaring *ring,
182 struct b43_dmadesc64 *descbase = ring->descbase;
189 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
191 addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
192 addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
193 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
195 if (slot == ring->nr_slots - 1)
213 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
215 b43_dma_write(ring, B43_DMA64_TXINDEX,
219 static void op64_tx_suspend(struct b43_dmaring *ring)
221 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
225 static void op64_tx_resume(struct b43_dmaring *ring)
227 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
231 static int op64_get_current_rxslot(struct b43_dmaring *ring)
235 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
241 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
243 b43_dma_write(ring, B43_DMA64_RXINDEX,
257 static inline int free_slots(struct b43_dmaring *ring)
259 return (ring->nr_slots - ring->used_slots);
262 static inline int next_slot(struct b43_dmaring *ring, int slot)
264 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
265 if (slot == ring->nr_slots - 1)
270 static inline int prev_slot(struct b43_dmaring *ring, int slot)
272 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
274 return ring->nr_slots - 1;
279 static void update_max_used_slots(struct b43_dmaring *ring,
282 if (current_used_slots <= ring->max_used_slots)
284 ring->max_used_slots = current_used_slots;
285 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
286 b43dbg(ring->dev->wl,
287 "max_used_slots increased to %d on %s ring %d\n",
288 ring->max_used_slots,
289 ring->tx ? "TX" : "RX", ring->index);
294 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
300 static inline int request_slot(struct b43_dmaring *ring)
304 B43_WARN_ON(!ring->tx);
305 B43_WARN_ON(ring->stopped);
306 B43_WARN_ON(free_slots(ring) == 0);
308 slot = next_slot(ring, ring->current_slot);
309 ring->current_slot = slot;
310 ring->used_slots++;
312 update_max_used_slots(ring, ring->used_slots);
347 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
353 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
356 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
364 void unmap_descbuffer(struct b43_dmaring *ring,
368 dma_unmap_single(ring->dev->dev->dma_dev,
371 dma_unmap_single(ring->dev->dev->dma_dev,
377 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
380 B43_WARN_ON(ring->tx);
381 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
386 void sync_descbuffer_for_device(struct b43_dmaring *ring,
389 B43_WARN_ON(ring->tx);
390 dma_sync_single_for_device(ring->dev->dev->dma_dev,
395 void free_descriptor_buffer(struct b43_dmaring *ring,
399 if (ring->tx)
400 ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
407 static int alloc_ringmemory(struct b43_dmaring *ring)
416 * more than 256 slots for ring.
418 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
421 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
422 ring_mem_size, &(ring->dmabase),
424 if (!ring->descbase)
430 static void free_ringmemory(struct b43_dmaring *ring)
432 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
434 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
435 ring->descbase, ring->dmabase);
538 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
542 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
545 switch (ring->type) {
565 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
570 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
572 unsigned char *f = skb->data + ring->frameoffset;
577 static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
587 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
588 frame = skb->data + ring->frameoffset;
592 static int setup_rx_descbuffer(struct b43_dmaring *ring,
599 B43_WARN_ON(ring->tx);
601 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
604 b43_poison_rx_buffer(ring, skb);
605 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
606 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
612 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
615 b43_poison_rx_buffer(ring, skb);
616 dmaaddr = map_descbuffer(ring, skb->data,
617 ring->rx_buffersize, 0);
618 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
619 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
627 ring->ops->fill_descriptor(ring, desc, dmaaddr,
628 ring->rx_buffersize, 0, 0, 0);
634 * This is used for an RX ring only.
636 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
642 for (i = 0; i < ring->nr_slots; i++) {
643 desc = ring->ops->idx2desc(ring, i, &meta);
645 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
647 b43err(ring->dev->wl,
653 ring->used_slots = ring->nr_slots;
660 desc = ring->ops->idx2desc(ring, i, &meta);
662 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
669 * Reset the controller, write the ring busaddress
672 static int dmacontroller_setup(struct b43_dmaring *ring)
677 bool parity = ring->dev->dma.parity;
681 if (ring->tx) {
682 if (ring->type == B43_DMA_64BIT) {
683 u64 ringbase = (u64) (ring->dmabase);
684 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
685 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
686 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
693 b43_dma_write(ring, B43_DMA64_TXCTL, value);
694 b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
695 b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
697 u32 ringbase = (u32) (ring->dmabase);
698 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
699 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
706 b43_dma_write(ring, B43_DMA32_TXCTL, value);
707 b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
710 err = alloc_initial_descbuffers(ring);
713 if (ring->type == B43_DMA_64BIT) {
714 u64 ringbase = (u64) (ring->dmabase);
715 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
716 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
717 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
719 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
725 b43_dma_write(ring, B43_DMA64_RXCTL, value);
726 b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
727 b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
728 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
731 u32 ringbase = (u32) (ring->dmabase);
732 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
733 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
735 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
741 b43_dma_write(ring, B43_DMA32_RXCTL, value);
742 b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
743 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
753 static void dmacontroller_cleanup(struct b43_dmaring *ring)
755 if (ring->tx) {
756 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
757 ring->type);
758 if (ring->type == B43_DMA_64BIT) {
759 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
760 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
762 b43_dma_write(ring, B43_DMA32_TXRING, 0);
764 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
765 ring->type);
766 if (ring->type == B43_DMA_64BIT) {
767 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
768 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
770 b43_dma_write(ring, B43_DMA32_RXRING, 0);
774 static void free_all_descbuffers(struct b43_dmaring *ring)
779 if (!ring->used_slots)
781 for (i = 0; i < ring->nr_slots; i++) {
783 ring->ops->idx2desc(ring, i, &meta);
786 B43_WARN_ON(!ring->tx);
789 if (ring->tx) {
790 unmap_descbuffer(ring, meta->dmaaddr,
793 unmap_descbuffer(ring, meta->dmaaddr,
794 ring->rx_buffersize, 0);
796 free_descriptor_buffer(ring, meta);
837 struct b43_dmaring *ring;
841 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
842 if (!ring)
845 ring->nr_slots = B43_RXRING_SLOTS;
847 ring->nr_slots = B43_TXRING_SLOTS;
849 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
851 if (!ring->meta)
853 for (i = 0; i < ring->nr_slots; i++)
854 ring->meta->skb = B43_DMA_PTR_POISON;
856 ring->type = type;
857 ring->dev = dev;
858 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
859 ring->index = controller_index;
861 ring->ops = &dma64_ops;
863 ring->ops = &dma32_ops;
865 ring->tx = true;
866 ring->current_slot = -1;
868 if (ring->index == 0) {
871 ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
872 ring->frameoffset = B43_DMA0_RX_FW598_FO;
876 ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
877 ring->frameoffset = B43_DMA0_RX_FW351_FO;
884 ring->last_injected_overflow = jiffies;
891 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
894 if (!ring->txhdr_cache)
899 ring->txhdr_cache,
903 if (b43_dma_mapping_error(ring, dma_test,
906 kfree(ring->txhdr_cache);
907 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
910 if (!ring->txhdr_cache)
914 ring->txhdr_cache,
918 if (b43_dma_mapping_error(ring, dma_test,
932 err = alloc_ringmemory(ring);
935 err = dmacontroller_setup(ring);
940 return ring;
943 free_ringmemory(ring);
945 kfree(ring->txhdr_cache);
947 kfree(ring->meta);
949 kfree(ring);
950 ring = NULL;
966 static void b43_destroy_dmaring(struct b43_dmaring *ring,
969 if (!ring)
975 u64 failed_packets = ring->nr_failed_tx_packets;
976 u64 succeed_packets = ring->nr_succeed_tx_packets;
983 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
985 b43dbg(ring->dev->wl, "DMA-%u %s: "
988 (unsigned int)(ring->type), ringname,
989 ring->max_used_slots,
990 ring->nr_slots,
1003 dmacontroller_cleanup(ring);
1004 free_all_descbuffers(ring);
1005 free_ringmemory(ring);
1007 kfree(ring->txhdr_cache);
1008 kfree(ring->meta);
1009 kfree(ring);
1012 #define destroy_ring(dma, ring) do { \
1013 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1014 (dma)->ring = NULL; \
1113 /* No support for the TX status DMA ring. */
1136 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1148 cookie = (((u16)ring->index + 1) << 12);
1160 struct b43_dmaring *ring = NULL;
1164 ring = dma->tx_ring_AC_BK;
1167 ring = dma->tx_ring_AC_BE;
1170 ring = dma->tx_ring_AC_VI;
1173 ring = dma->tx_ring_AC_VO;
1176 ring = dma->tx_ring_mcast;
1180 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1186 return ring;
1189 static int dma_tx_fragment(struct b43_dmaring *ring,
1192 const struct b43_dma_ops *ops = ring->ops;
1202 size_t hdrsize = b43_txhdr_size(ring->dev);
1209 old_top_slot = ring->current_slot;
1210 old_used_slots = ring->used_slots;
1213 slot = request_slot(ring);
1214 desc = ops->idx2desc(ring, slot, &meta_hdr);
1217 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1218 cookie = generate_cookie(ring, slot);
1219 err = b43_generate_txhdr(ring->dev, header,
1222 ring->current_slot = old_top_slot;
1223 ring->used_slots = old_used_slots;
1227 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1229 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1230 ring->current_slot = old_top_slot;
1231 ring->used_slots = old_used_slots;
1234 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1238 slot = request_slot(ring);
1239 desc = ops->idx2desc(ring, slot, &meta);
1246 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1248 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1252 ring->current_slot = old_top_slot;
1253 ring->used_slots = old_used_slots;
1258 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1259 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1262 ring->current_slot = old_top_slot;
1263 ring->used_slots = old_used_slots;
1269 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1274 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1279 ops->poke_tx(ring, next_slot(ring, slot));
1283 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1288 static inline int should_inject_overflow(struct b43_dmaring *ring)
1291 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1296 next_overflow = ring->last_injected_overflow + HZ;
1298 ring->last_injected_overflow = jiffies;
1299 b43dbg(ring->dev->wl,
1300 "Injecting TX ring overflow on "
1301 "DMA controller %d\n", ring->index);
1313 struct b43_dmaring *ring;
1322 ring = dev->dma.tx_ring_AC_VO;
1325 ring = dev->dma.tx_ring_AC_VI;
1328 ring = dev->dma.tx_ring_AC_BE;
1331 ring = dev->dma.tx_ring_AC_BK;
1335 ring = dev->dma.tx_ring_AC_BE;
1337 return ring;
1342 struct b43_dmaring *ring;
1349 /* The multicast ring will be sent after the DTIM */
1350 ring = dev->dma.tx_ring_mcast;
1356 ring = select_ring_by_priority(
1360 B43_WARN_ON(!ring->tx);
1362 if (unlikely(ring->stopped)) {
1373 if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
1381 /* Assign the queue number to the ring (if not already done before)
1382 * so TX status handling can use it. The queue to ring mapping is
1384 ring->queue_prio = skb_get_queue_mapping(skb);
1386 err = dma_tx_fragment(ring, skb);
1398 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1399 should_inject_overflow(ring)) {
1400 /* This TX ring is full. */
1404 ring->stopped = true;
1406 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1418 struct b43_dmaring *ring;
1427 ring = parse_cookie(dev, status->cookie, &slot);
1428 if (unlikely(!ring))
1430 B43_WARN_ON(!ring->tx);
1432 /* Sanity check: TX packets are processed in-order on one ring.
1435 firstused = ring->current_slot - ring->used_slots + 1;
1437 firstused = ring->nr_slots + firstused;
1444 if (slot == next_slot(ring, next_slot(ring, firstused))) {
1453 "Skip on DMA ring %d slot %d.\n",
1454 ring->index, slot);
1464 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
1465 ring->index, firstused, slot);
1472 ops = ring->ops;
1474 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1476 ops->idx2desc(ring, slot, &meta);
1480 "on ring %d\n",
1481 slot, firstused, ring->index);
1489 unmap_descbuffer(ring, meta->dmaaddr,
1494 unmap_descbuffer(ring, meta->dmaaddr,
1506 "at slot %d (first=%d) on ring %d\n",
1507 slot, firstused, ring->index);
1529 ring->nr_succeed_tx_packets++;
1531 ring->nr_failed_tx_packets++;
1532 ring->nr_total_packet_tries += status->frame_count;
1545 "at slot %d (first=%d) on ring %d\n",
1546 slot, firstused, ring->index);
1552 ring->used_slots--;
1559 slot = next_slot(ring, slot);
1563 if (ring->stopped) {
1564 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1565 ring->stopped = false;
1568 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1569 dev->wl->tx_queue_stopped[ring->queue_prio] = false;
1573 b43_wake_queue(dev, ring->queue_prio);
1575 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1582 static void dma_rx(struct b43_dmaring *ring, int *slot)
1584 const struct b43_dma_ops *ops = ring->ops;
1593 desc = ops->idx2desc(ring, *slot, &meta);
1595 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1613 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1616 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1620 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
1630 desc = ops->idx2desc(ring, *slot, &meta);
1632 b43_poison_rx_buffer(ring, meta->skb);
1633 sync_descbuffer_for_device(ring, meta->dmaaddr,
1634 ring->rx_buffersize);
1635 *slot = next_slot(ring, *slot);
1637 tmp -= ring->rx_buffersize;
1641 b43err(ring->dev->wl, "DMA RX buffer too small "
1643 len, ring->rx_buffersize, cnt);
1648 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1650 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1654 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1655 skb_put(skb, len + ring->frameoffset);
1656 skb_pull(skb, ring->frameoffset);
1658 b43_rx(ring->dev, skb, rxhdr);
1664 b43_poison_rx_buffer(ring, skb);
1665 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1668 void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
1672 B43_WARN_ON(ring->tx);
1682 current_slot = ring->ops->get_current_rxslot(ring);
1683 previous_slot = prev_slot(ring, current_slot);
1684 ring->ops->set_current_rxslot(ring, previous_slot);
1687 void b43_dma_rx(struct b43_dmaring *ring)
1689 const struct b43_dma_ops *ops = ring->ops;
1693 B43_WARN_ON(ring->tx);
1694 current_slot = ops->get_current_rxslot(ring);
1695 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1697 slot = ring->current_slot;
1698 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1699 dma_rx(ring, &slot);
1700 update_max_used_slots(ring, ++used_slots);
1703 ops->set_current_rxslot(ring, slot);
1704 ring->current_slot = slot;
1707 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1709 B43_WARN_ON(!ring->tx);
1710 ring->ops->tx_suspend(ring);
1713 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1715 B43_WARN_ON(!ring->tx);
1716 ring->ops->tx_resume(ring);