Lines Matching refs:ring

32 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
38 *meta = &(ring->meta[slot]);
39 desc = ring->descbase;
45 static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
50 struct b43legacy_dmadesc32 *descbase = ring->descbase;
57 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
62 addr |= ring->dev->dma.translation;
63 ctl = (bufsize - ring->frameoffset)
65 if (slot == ring->nr_slots - 1)
80 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
82 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
86 static void op32_tx_suspend(struct b43legacy_dmaring *ring)
88 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
89 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
93 static void op32_tx_resume(struct b43legacy_dmaring *ring)
95 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
96 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
100 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
104 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
110 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
113 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
117 static inline int free_slots(struct b43legacy_dmaring *ring)
119 return (ring->nr_slots - ring->used_slots);
122 static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
124 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
125 if (slot == ring->nr_slots - 1)
131 static void update_max_used_slots(struct b43legacy_dmaring *ring,
134 if (current_used_slots <= ring->max_used_slots)
136 ring->max_used_slots = current_used_slots;
137 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
138 b43legacydbg(ring->dev->wl,
139 "max_used_slots increased to %d on %s ring %d\n",
140 ring->max_used_slots,
141 ring->tx ? "TX" : "RX",
142 ring->index);
146 void update_max_used_slots(struct b43legacy_dmaring *ring,
153 int request_slot(struct b43legacy_dmaring *ring)
157 B43legacy_WARN_ON(!ring->tx);
158 B43legacy_WARN_ON(ring->stopped);
159 B43legacy_WARN_ON(free_slots(ring) == 0);
161 slot = next_slot(ring, ring->current_slot);
162 ring->current_slot = slot;
163 ring->used_slots++;
165 update_max_used_slots(ring, ring->used_slots);
170 /* Mac80211-queue to b43legacy-ring mapping */
175 struct b43legacy_dmaring *ring;
177 /*FIXME: For now we always run on TX-ring-1 */
186 ring = dev->dma.tx_ring3;
189 ring = dev->dma.tx_ring2;
192 ring = dev->dma.tx_ring1;
195 ring = dev->dma.tx_ring0;
198 ring = dev->dma.tx_ring4;
201 ring = dev->dma.tx_ring5;
205 return ring;
226 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
234 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
238 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
246 void unmap_descbuffer(struct b43legacy_dmaring *ring,
252 dma_unmap_single(ring->dev->dev->dma_dev,
256 dma_unmap_single(ring->dev->dev->dma_dev,
262 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
266 B43legacy_WARN_ON(ring->tx);
268 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
273 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
277 B43legacy_WARN_ON(ring->tx);
279 dma_sync_single_for_device(ring->dev->dev->dma_dev,
284 void free_descriptor_buffer(struct b43legacy_dmaring *ring,
297 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
300 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
302 &(ring->dmabase), GFP_KERNEL);
303 if (!ring->descbase)
309 static void free_ringmemory(struct b43legacy_dmaring *ring)
311 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
312 ring->descbase, ring->dmabase);
390 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
395 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
398 switch (ring->type) {
414 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
419 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
429 B43legacy_WARN_ON(ring->tx);
431 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
434 dmaaddr = map_descbuffer(ring, skb->data,
435 ring->rx_buffersize, 0);
436 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
442 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
445 dmaaddr = map_descbuffer(ring, skb->data,
446 ring->rx_buffersize, 0);
449 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
456 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
467 * This is used for an RX ring only.
469 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
476 for (i = 0; i < ring->nr_slots; i++) {
477 desc = op32_idx2desc(ring, i, &meta);
479 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
481 b43legacyerr(ring->dev->wl,
487 ring->used_slots = ring->nr_slots;
494 desc = op32_idx2desc(ring, i, &meta);
496 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
503 * Reset the controller, write the ring busaddress
506 static int dmacontroller_setup(struct b43legacy_dmaring *ring)
511 u32 trans = ring->dev->dma.translation;
512 u32 ringbase = (u32)(ring->dmabase);
514 if (ring->tx) {
520 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
521 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
525 err = alloc_initial_descbuffers(ring);
531 value = (ring->frameoffset <<
536 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
537 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
540 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
548 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
550 if (ring->tx) {
551 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
552 ring->type);
553 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
555 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
556 ring->type);
557 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
561 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
566 if (!ring->used_slots)
568 for (i = 0; i < ring->nr_slots; i++) {
569 op32_idx2desc(ring, i, &meta);
572 B43legacy_WARN_ON(!ring->tx);
575 if (ring->tx)
576 unmap_descbuffer(ring, meta->dmaaddr,
579 unmap_descbuffer(ring, meta->dmaaddr,
580 ring->rx_buffersize, 0);
581 free_descriptor_buffer(ring, meta, 0);
608 struct b43legacy_dmaring *ring;
613 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
614 if (!ring)
616 ring->type = type;
617 ring->dev = dev;
623 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
625 if (!ring->meta)
628 ring->txhdr_cache = kcalloc(nr_slots,
631 if (!ring->txhdr_cache)
635 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
639 if (b43legacy_dma_mapping_error(ring, dma_test,
642 kfree(ring->txhdr_cache);
643 ring->txhdr_cache = kcalloc(nr_slots,
646 if (!ring->txhdr_cache)
650 ring->txhdr_cache,
654 if (b43legacy_dma_mapping_error(ring, dma_test,
664 ring->nr_slots = nr_slots;
665 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
666 ring->index = controller_index;
668 ring->tx = true;
669 ring->current_slot = -1;
671 if (ring->index == 0) {
672 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
673 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
674 } else if (ring->index == 3) {
675 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
676 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
681 ring->last_injected_overflow = jiffies;
684 err = alloc_ringmemory(ring);
687 err = dmacontroller_setup(ring);
692 return ring;
695 free_ringmemory(ring);
697 kfree(ring->txhdr_cache);
699 kfree(ring->meta);
701 kfree(ring);
702 ring = NULL;
707 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
709 if (!ring)
712 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
713 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
714 (ring->tx) ? "TX" : "RX", ring->max_used_slots,
715 ring->nr_slots);
719 dmacontroller_cleanup(ring);
720 free_all_descbuffers(ring);
721 free_ringmemory(ring);
723 kfree(ring->txhdr_cache);
724 kfree(ring->meta);
725 kfree(ring);
758 struct b43legacy_dmaring *ring;
779 ring = b43legacy_setup_dmaring(dev, 0, 1, type);
780 if (!ring)
782 dma->tx_ring0 = ring;
784 ring = b43legacy_setup_dmaring(dev, 1, 1, type);
785 if (!ring)
787 dma->tx_ring1 = ring;
789 ring = b43legacy_setup_dmaring(dev, 2, 1, type);
790 if (!ring)
792 dma->tx_ring2 = ring;
794 ring = b43legacy_setup_dmaring(dev, 3, 1, type);
795 if (!ring)
797 dma->tx_ring3 = ring;
799 ring = b43legacy_setup_dmaring(dev, 4, 1, type);
800 if (!ring)
802 dma->tx_ring4 = ring;
804 ring = b43legacy_setup_dmaring(dev, 5, 1, type);
805 if (!ring)
807 dma->tx_ring5 = ring;
810 ring = b43legacy_setup_dmaring(dev, 0, 0, type);
811 if (!ring)
813 dma->rx_ring0 = ring;
816 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
817 if (!ring)
819 dma->rx_ring3 = ring;
852 static u16 generate_cookie(struct b43legacy_dmaring *ring,
863 switch (ring->index) {
895 struct b43legacy_dmaring *ring = NULL;
899 ring = dma->tx_ring0;
902 ring = dma->tx_ring1;
905 ring = dma->tx_ring2;
908 ring = dma->tx_ring3;
911 ring = dma->tx_ring4;
914 ring = dma->tx_ring5;
920 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
922 return ring;
925 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
941 old_top_slot = ring->current_slot;
942 old_used_slots = ring->used_slots;
945 slot = request_slot(ring);
946 desc = op32_idx2desc(ring, slot, &meta_hdr);
949 header = &(ring->txhdr_cache[slot * sizeof(
951 err = b43legacy_generate_txhdr(ring->dev, header,
953 generate_cookie(ring, slot));
955 ring->current_slot = old_top_slot;
956 ring->used_slots = old_used_slots;
960 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
962 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
964 ring->current_slot = old_top_slot;
965 ring->used_slots = old_used_slots;
968 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
972 slot = request_slot(ring);
973 desc = op32_idx2desc(ring, slot, &meta);
979 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
981 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
984 ring->current_slot = old_top_slot;
985 ring->used_slots = old_used_slots;
1000 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1001 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1002 ring->current_slot = old_top_slot;
1003 ring->used_slots = old_used_slots;
1009 op32_fill_descriptor(ring, desc, meta->dmaaddr,
1014 op32_poke_tx(ring, next_slot(ring, slot));
1020 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1026 int should_inject_overflow(struct b43legacy_dmaring *ring)
1029 if (unlikely(b43legacy_debug(ring->dev,
1035 next_overflow = ring->last_injected_overflow + HZ;
1037 ring->last_injected_overflow = jiffies;
1038 b43legacydbg(ring->dev->wl,
1039 "Injecting TX ring overflow on "
1040 "DMA controller %d\n", ring->index);
1051 struct b43legacy_dmaring *ring;
1054 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1055 B43legacy_WARN_ON(!ring->tx);
1057 if (unlikely(ring->stopped)) {
1067 if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
1076 err = dma_tx_fragment(ring, &skb);
1087 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1088 should_inject_overflow(ring)) {
1089 /* This TX ring is full. */
1093 ring->stopped = true;
1095 b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1096 ring->index);
1104 struct b43legacy_dmaring *ring;
1110 ring = parse_cookie(dev, status->cookie, &slot);
1111 if (unlikely(!ring))
1113 B43legacy_WARN_ON(!ring->tx);
1115 /* Sanity check: TX packets are processed in-order on one ring.
1118 firstused = ring->current_slot - ring->used_slots + 1;
1120 firstused = ring->nr_slots + firstused;
1126 "ring %d. Expected %d, but got %d\n",
1127 ring->index, firstused, slot);
1132 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1133 op32_idx2desc(ring, slot, &meta);
1136 unmap_descbuffer(ring, meta->dmaaddr,
1139 unmap_descbuffer(ring, meta->dmaaddr,
1194 ring->used_slots--;
1198 slot = next_slot(ring, slot);
1201 if (ring->stopped) {
1202 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1203 ring->stopped = false;
1206 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1207 dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1211 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1213 b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1214 ring->index);
1220 static void dma_rx(struct b43legacy_dmaring *ring,
1231 desc = op32_idx2desc(ring, *slot, &meta);
1233 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1236 if (ring->index == 3) {
1249 b43legacy_handle_hwtxstatus(ring->dev, hw);
1251 sync_descbuffer_for_device(ring, meta->dmaaddr,
1252 ring->rx_buffersize);
1268 sync_descbuffer_for_device(ring, meta->dmaaddr,
1269 ring->rx_buffersize);
1273 if (unlikely(len > ring->rx_buffersize)) {
1283 desc = op32_idx2desc(ring, *slot, &meta);
1285 sync_descbuffer_for_device(ring, meta->dmaaddr,
1286 ring->rx_buffersize);
1287 *slot = next_slot(ring, *slot);
1289 tmp -= ring->rx_buffersize;
1293 b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1295 len, ring->rx_buffersize, cnt);
1300 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1302 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1304 sync_descbuffer_for_device(ring, dmaaddr,
1305 ring->rx_buffersize);
1309 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1310 skb_put(skb, len + ring->frameoffset);
1311 skb_pull(skb, ring->frameoffset);
1313 b43legacy_rx(ring->dev, skb, rxhdr);
1318 void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1324 B43legacy_WARN_ON(ring->tx);
1325 current_slot = op32_get_current_rxslot(ring);
1327 ring->nr_slots));
1329 slot = ring->current_slot;
1330 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1331 dma_rx(ring, &slot);
1332 update_max_used_slots(ring, ++used_slots);
1334 op32_set_current_rxslot(ring, slot);
1335 ring->current_slot = slot;
1338 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1340 B43legacy_WARN_ON(!ring->tx);
1341 op32_tx_suspend(ring);
1344 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1346 B43legacy_WARN_ON(!ring->tx);
1347 op32_tx_resume(ring);