1/* 2 3 Broadcom BCM43xx wireless driver 4 5 DMA ringbuffer and descriptor allocation/management 6 7 Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de> 8 9 Some code in this file is derived from the b44.c driver 10 Copyright (C) 2002 David S. Miller 11 Copyright (C) Pekka Pietikainen 12 13 This program is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2 of the License, or 16 (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; see the file COPYING. If not, write to 25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, 26 Boston, MA 02110-1301, USA. 27 28*/ 29 30#include "bcm43xx.h" 31#include "bcm43xx_dma.h" 32#include "bcm43xx_main.h" 33#include "bcm43xx_debugfs.h" 34#include "bcm43xx_power.h" 35#include "bcm43xx_xmit.h" 36 37#include <linux/dma-mapping.h> 38#include <linux/pci.h> 39#include <linux/delay.h> 40#include <linux/skbuff.h> 41 42 43static inline int free_slots(struct bcm43xx_dmaring *ring) 44{ 45 return (ring->nr_slots - ring->used_slots); 46} 47 48static inline int next_slot(struct bcm43xx_dmaring *ring, int slot) 49{ 50 assert(slot >= -1 && slot <= ring->nr_slots - 1); 51 if (slot == ring->nr_slots - 1) 52 return 0; 53 return slot + 1; 54} 55 56static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot) 57{ 58 assert(slot >= 0 && slot <= ring->nr_slots - 1); 59 if (slot == 0) 60 return ring->nr_slots - 1; 61 return slot - 1; 62} 63 64/* Request a slot for usage. */ 65static inline 66int request_slot(struct bcm43xx_dmaring *ring) 67{ 68 int slot; 69 70 assert(ring->tx); 71 assert(!ring->suspended); 72 assert(free_slots(ring) != 0); 73 74 slot = next_slot(ring, ring->current_slot); 75 ring->current_slot = slot; 76 ring->used_slots++; 77 78 /* Check the number of available slots and suspend TX, 79 * if we are running low on free slots. 80 */ 81 if (unlikely(free_slots(ring) < ring->suspend_mark)) { 82 netif_stop_queue(ring->bcm->net_dev); 83 ring->suspended = 1; 84 } 85#ifdef CONFIG_BCM43XX_DEBUG 86 if (ring->used_slots > ring->max_used_slots) 87 ring->max_used_slots = ring->used_slots; 88#endif /* CONFIG_BCM43XX_DEBUG*/ 89 90 return slot; 91} 92 93/* Return a slot to the free slots. */ 94static inline 95void return_slot(struct bcm43xx_dmaring *ring, int slot) 96{ 97 assert(ring->tx); 98 99 ring->used_slots--; 100 101 /* Check if TX is suspended and check if we have 102 * enough free slots to resume it again. 103 */ 104 if (unlikely(ring->suspended)) { 105 if (free_slots(ring) >= ring->resume_mark) { 106 ring->suspended = 0; 107 netif_wake_queue(ring->bcm->net_dev); 108 } 109 } 110} 111 112u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx) 113{ 114 static const u16 map64[] = { 115 BCM43xx_MMIO_DMA64_BASE0, 116 BCM43xx_MMIO_DMA64_BASE1, 117 BCM43xx_MMIO_DMA64_BASE2, 118 BCM43xx_MMIO_DMA64_BASE3, 119 BCM43xx_MMIO_DMA64_BASE4, 120 BCM43xx_MMIO_DMA64_BASE5, 121 }; 122 static const u16 map32[] = { 123 BCM43xx_MMIO_DMA32_BASE0, 124 BCM43xx_MMIO_DMA32_BASE1, 125 BCM43xx_MMIO_DMA32_BASE2, 126 BCM43xx_MMIO_DMA32_BASE3, 127 BCM43xx_MMIO_DMA32_BASE4, 128 BCM43xx_MMIO_DMA32_BASE5, 129 }; 130 131 if (dma64bit) { 132 assert(controller_idx >= 0 && 133 controller_idx < ARRAY_SIZE(map64)); 134 return map64[controller_idx]; 135 } 136 assert(controller_idx >= 0 && 137 controller_idx < ARRAY_SIZE(map32)); 138 return map32[controller_idx]; 139} 140 141static inline 142dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, 143 unsigned char *buf, 144 size_t len, 145 int tx) 146{ 147 dma_addr_t dmaaddr; 148 int direction = PCI_DMA_FROMDEVICE; 149 150 if (tx) 151 direction = PCI_DMA_TODEVICE; 152 153 dmaaddr = pci_map_single(ring->bcm->pci_dev, 154 buf, len, 155 direction); 156 157 return dmaaddr; 158} 159 160static inline 161void unmap_descbuffer(struct bcm43xx_dmaring *ring, 162 dma_addr_t addr, 163 size_t len, 164 int tx) 165{ 166 if (tx) { 167 pci_unmap_single(ring->bcm->pci_dev, 168 addr, len, 169 PCI_DMA_TODEVICE); 170 } else { 171 pci_unmap_single(ring->bcm->pci_dev, 172 addr, len, 173 PCI_DMA_FROMDEVICE); 174 } 175} 176 177static inline 178void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring, 179 dma_addr_t addr, 180 size_t len) 181{ 182 assert(!ring->tx); 183 184 pci_dma_sync_single_for_cpu(ring->bcm->pci_dev, 185 addr, len, PCI_DMA_FROMDEVICE); 186} 187 188static inline 189void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring, 190 dma_addr_t addr, 191 size_t len) 192{ 193 assert(!ring->tx); 194 195 pci_dma_sync_single_for_cpu(ring->bcm->pci_dev, 196 addr, len, PCI_DMA_TODEVICE); 197} 198 199/* Unmap and free a descriptor buffer. */ 200static inline 201void free_descriptor_buffer(struct bcm43xx_dmaring *ring, 202 struct bcm43xx_dmadesc_meta *meta, 203 int irq_context) 204{ 205 assert(meta->skb); 206 if (irq_context) 207 dev_kfree_skb_irq(meta->skb); 208 else 209 dev_kfree_skb(meta->skb); 210 meta->skb = NULL; 211} 212 213static int alloc_ringmemory(struct bcm43xx_dmaring *ring) 214{ 215 ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE, 216 &(ring->dmabase)); 217 if (!ring->descbase) { 218 /* Allocation may have failed due to pci_alloc_consistent 219 insisting on use of GFP_DMA, which is more restrictive 220 than necessary... */ 221 struct dma_desc *rx_ring; 222 dma_addr_t rx_ring_dma; 223 224 rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL); 225 if (!rx_ring) 226 goto out_err; 227 228 rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring, 229 BCM43xx_DMA_RINGMEMSIZE, 230 PCI_DMA_BIDIRECTIONAL); 231 232 if (pci_dma_mapping_error(rx_ring_dma) || 233 rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) { 234 /* Sigh... */ 235 if (!pci_dma_mapping_error(rx_ring_dma)) 236 pci_unmap_single(ring->bcm->pci_dev, 237 rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE, 238 PCI_DMA_BIDIRECTIONAL); 239 rx_ring_dma = pci_map_single(ring->bcm->pci_dev, 240 rx_ring, BCM43xx_DMA_RINGMEMSIZE, 241 PCI_DMA_BIDIRECTIONAL); 242 if (pci_dma_mapping_error(rx_ring_dma) || 243 rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) { 244 assert(0); 245 if (!pci_dma_mapping_error(rx_ring_dma)) 246 pci_unmap_single(ring->bcm->pci_dev, 247 rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE, 248 PCI_DMA_BIDIRECTIONAL); 249 goto out_err; 250 } 251 } 252 253 ring->descbase = rx_ring; 254 ring->dmabase = rx_ring_dma; 255 } 256 memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE); 257 258 return 0; 259out_err: 260 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); 261 return -ENOMEM; 262} 263 264static void free_ringmemory(struct bcm43xx_dmaring *ring) 265{ 266 struct device *dev = &(ring->bcm->pci_dev->dev); 267 268 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, 269 ring->descbase, ring->dmabase); 270} 271 272/* Reset the RX DMA channel */ 273int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, 274 u16 mmio_base, int dma64) 275{ 276 int i; 277 u32 value; 278 u16 offset; 279 280 offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL; 281 bcm43xx_write32(bcm, mmio_base + offset, 0); 282 for (i = 0; i < 1000; i++) { 283 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS; 284 value = bcm43xx_read32(bcm, mmio_base + offset); 285 if (dma64) { 286 value &= BCM43xx_DMA64_RXSTAT; 287 if (value == BCM43xx_DMA64_RXSTAT_DISABLED) { 288 i = -1; 289 break; 290 } 291 } else { 292 value &= BCM43xx_DMA32_RXSTATE; 293 if (value == BCM43xx_DMA32_RXSTAT_DISABLED) { 294 i = -1; 295 break; 296 } 297 } 298 udelay(10); 299 } 300 if (i != -1) { 301 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n"); 302 return -ENODEV; 303 } 304 305 return 0; 306} 307 308/* Reset the RX DMA channel */ 309int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, 310 u16 mmio_base, int dma64) 311{ 312 int i; 313 u32 value; 314 u16 offset; 315 316 for (i = 0; i < 1000; i++) { 317 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS; 318 value = bcm43xx_read32(bcm, mmio_base + offset); 319 if (dma64) { 320 value &= BCM43xx_DMA64_TXSTAT; 321 if (value == BCM43xx_DMA64_TXSTAT_DISABLED || 322 value == BCM43xx_DMA64_TXSTAT_IDLEWAIT || 323 value == BCM43xx_DMA64_TXSTAT_STOPPED) 324 break; 325 } else { 326 value &= BCM43xx_DMA32_TXSTATE; 327 if (value == BCM43xx_DMA32_TXSTAT_DISABLED || 328 value == BCM43xx_DMA32_TXSTAT_IDLEWAIT || 329 value == BCM43xx_DMA32_TXSTAT_STOPPED) 330 break; 331 } 332 udelay(10); 333 } 334 offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL; 335 bcm43xx_write32(bcm, mmio_base + offset, 0); 336 for (i = 0; i < 1000; i++) { 337 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS; 338 value = bcm43xx_read32(bcm, mmio_base + offset); 339 if (dma64) { 340 value &= BCM43xx_DMA64_TXSTAT; 341 if (value == BCM43xx_DMA64_TXSTAT_DISABLED) { 342 i = -1; 343 break; 344 } 345 } else { 346 value &= BCM43xx_DMA32_TXSTATE; 347 if (value == BCM43xx_DMA32_TXSTAT_DISABLED) { 348 i = -1; 349 break; 350 } 351 } 352 udelay(10); 353 } 354 if (i != -1) { 355 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n"); 356 return -ENODEV; 357 } 358 /* ensure the reset is completed. */ 359 udelay(300); 360 361 return 0; 362} 363 364static void fill_descriptor(struct bcm43xx_dmaring *ring, 365 struct bcm43xx_dmadesc_generic *desc, 366 dma_addr_t dmaaddr, 367 u16 bufsize, 368 int start, int end, int irq) 369{ 370 int slot; 371 372 slot = bcm43xx_dma_desc2idx(ring, desc); 373 assert(slot >= 0 && slot < ring->nr_slots); 374 375 if (ring->dma64) { 376 u32 ctl0 = 0, ctl1 = 0; 377 u32 addrlo, addrhi; 378 u32 addrext; 379 380 addrlo = (u32)(dmaaddr & 0xFFFFFFFF); 381 addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING); 382 addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT); 383 addrhi |= ring->routing; 384 if (slot == ring->nr_slots - 1) 385 ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND; 386 if (start) 387 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART; 388 if (end) 389 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND; 390 if (irq) 391 ctl0 |= BCM43xx_DMA64_DCTL0_IRQ; 392 ctl1 |= (bufsize - ring->frameoffset) 393 & BCM43xx_DMA64_DCTL1_BYTECNT; 394 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT) 395 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK; 396 397 desc->dma64.control0 = cpu_to_le32(ctl0); 398 desc->dma64.control1 = cpu_to_le32(ctl1); 399 desc->dma64.address_low = cpu_to_le32(addrlo); 400 desc->dma64.address_high = cpu_to_le32(addrhi); 401 } else { 402 u32 ctl; 403 u32 addr; 404 u32 addrext; 405 406 addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING); 407 addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING) 408 >> BCM43xx_DMA32_ROUTING_SHIFT; 409 addr |= ring->routing; 410 ctl = (bufsize - ring->frameoffset) 411 & BCM43xx_DMA32_DCTL_BYTECNT; 412 if (slot == ring->nr_slots - 1) 413 ctl |= BCM43xx_DMA32_DCTL_DTABLEEND; 414 if (start) 415 ctl |= BCM43xx_DMA32_DCTL_FRAMESTART; 416 if (end) 417 ctl |= BCM43xx_DMA32_DCTL_FRAMEEND; 418 if (irq) 419 ctl |= BCM43xx_DMA32_DCTL_IRQ; 420 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT) 421 & BCM43xx_DMA32_DCTL_ADDREXT_MASK; 422 423 desc->dma32.control = cpu_to_le32(ctl); 424 desc->dma32.address = cpu_to_le32(addr); 425 } 426} 427 428static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, 429 struct bcm43xx_dmadesc_generic *desc, 430 struct bcm43xx_dmadesc_meta *meta, 431 gfp_t gfp_flags) 432{ 433 struct bcm43xx_rxhdr *rxhdr; 434 struct bcm43xx_hwxmitstatus *xmitstat; 435 dma_addr_t dmaaddr; 436 struct sk_buff *skb; 437 438 assert(!ring->tx); 439 440 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 441 if (unlikely(!skb)) 442 return -ENOMEM; 443 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); 444 if (pci_dma_mapping_error(dmaaddr) || 445 dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) { 446 /* This one has 30-bit addressing... */ 447 if (!pci_dma_mapping_error(dmaaddr)) 448 pci_unmap_single(ring->bcm->pci_dev, 449 dmaaddr, ring->rx_buffersize, 450 PCI_DMA_FROMDEVICE); 451 dev_kfree_skb_any(skb); 452 skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA); 453 if (skb == NULL) 454 return -ENOMEM; 455 dmaaddr = pci_map_single(ring->bcm->pci_dev, 456 skb->data, ring->rx_buffersize, 457 PCI_DMA_FROMDEVICE); 458 if (pci_dma_mapping_error(dmaaddr) || 459 dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) { 460 assert(0); 461 dev_kfree_skb_any(skb); 462 return -ENOMEM; 463 } 464 } 465 meta->skb = skb; 466 meta->dmaaddr = dmaaddr; 467 skb->dev = ring->bcm->net_dev; 468 469 fill_descriptor(ring, desc, dmaaddr, 470 ring->rx_buffersize, 0, 0, 0); 471 472 rxhdr = (struct bcm43xx_rxhdr *)(skb->data); 473 rxhdr->frame_length = 0; 474 rxhdr->flags1 = 0; 475 xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data); 476 xmitstat->cookie = 0; 477 478 return 0; 479} 480 481/* Allocate the initial descbuffers. 482 * This is used for an RX ring only. 483 */ 484static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) 485{ 486 int i, err = -ENOMEM; 487 struct bcm43xx_dmadesc_generic *desc; 488 struct bcm43xx_dmadesc_meta *meta; 489 490 for (i = 0; i < ring->nr_slots; i++) { 491 desc = bcm43xx_dma_idx2desc(ring, i, &meta); 492 493 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 494 if (err) 495 goto err_unwind; 496 } 497 mb(); 498 ring->used_slots = ring->nr_slots; 499 err = 0; 500out: 501 return err; 502 503err_unwind: 504 for (i--; i >= 0; i--) { 505 desc = bcm43xx_dma_idx2desc(ring, i, &meta); 506 507 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 508 dev_kfree_skb(meta->skb); 509 } 510 goto out; 511} 512 513/* Do initial setup of the DMA controller. 514 * Reset the controller, write the ring busaddress 515 * and switch the "enable" bit on. 516 */ 517static int dmacontroller_setup(struct bcm43xx_dmaring *ring) 518{ 519 int err = 0; 520 u32 value; 521 u32 addrext; 522 523 if (ring->tx) { 524 if (ring->dma64) { 525 u64 ringbase = (u64)(ring->dmabase); 526 527 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT); 528 value = BCM43xx_DMA64_TXENABLE; 529 value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT) 530 & BCM43xx_DMA64_TXADDREXT_MASK; 531 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value); 532 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 533 (ringbase & 0xFFFFFFFF)); 534 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 535 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING) 536 | ring->routing); 537 } else { 538 u32 ringbase = (u32)(ring->dmabase); 539 540 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT); 541 value = BCM43xx_DMA32_TXENABLE; 542 value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT) 543 & BCM43xx_DMA32_TXADDREXT_MASK; 544 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value); 545 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 546 (ringbase & ~BCM43xx_DMA32_ROUTING) 547 | ring->routing); 548 } 549 } else { 550 err = alloc_initial_descbuffers(ring); 551 if (err) 552 goto out; 553 if (ring->dma64) { 554 u64 ringbase = (u64)(ring->dmabase); 555 556 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT); 557 value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT); 558 value |= BCM43xx_DMA64_RXENABLE; 559 value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT) 560 & BCM43xx_DMA64_RXADDREXT_MASK; 561 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value); 562 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 563 (ringbase & 0xFFFFFFFF)); 564 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 565 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING) 566 | ring->routing); 567 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200); 568 } else { 569 u32 ringbase = (u32)(ring->dmabase); 570 571 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT); 572 value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT); 573 value |= BCM43xx_DMA32_RXENABLE; 574 value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT) 575 & BCM43xx_DMA32_RXADDREXT_MASK; 576 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value); 577 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 578 (ringbase & ~BCM43xx_DMA32_ROUTING) 579 | ring->routing); 580 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200); 581 } 582 } 583 584out: 585 return err; 586} 587 588/* Shutdown the DMA controller. */ 589static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring) 590{ 591 if (ring->tx) { 592 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64); 593 if (ring->dma64) { 594 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0); 595 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0); 596 } else 597 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0); 598 } else { 599 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64); 600 if (ring->dma64) { 601 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0); 602 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0); 603 } else 604 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0); 605 } 606} 607 608static void free_all_descbuffers(struct bcm43xx_dmaring *ring) 609{ 610 struct bcm43xx_dmadesc_generic *desc; 611 struct bcm43xx_dmadesc_meta *meta; 612 int i; 613 614 if (!ring->used_slots) 615 return; 616 for (i = 0; i < ring->nr_slots; i++) { 617 desc = bcm43xx_dma_idx2desc(ring, i, &meta); 618 619 if (!meta->skb) { 620 assert(ring->tx); 621 continue; 622 } 623 if (ring->tx) { 624 unmap_descbuffer(ring, meta->dmaaddr, 625 meta->skb->len, 1); 626 } else { 627 unmap_descbuffer(ring, meta->dmaaddr, 628 ring->rx_buffersize, 0); 629 } 630 free_descriptor_buffer(ring, meta, 0); 631 } 632} 633 634/* Main initialization function. */ 635static 636struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, 637 int controller_index, 638 int for_tx, 639 int dma64) 640{ 641 struct bcm43xx_dmaring *ring; 642 int err; 643 int nr_slots; 644 645 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 646 if (!ring) 647 goto out; 648 649 nr_slots = BCM43xx_RXRING_SLOTS; 650 if (for_tx) 651 nr_slots = BCM43xx_TXRING_SLOTS; 652 653 ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta), 654 GFP_KERNEL); 655 if (!ring->meta) 656 goto err_kfree_ring; 657 658 ring->routing = BCM43xx_DMA32_CLIENTTRANS; 659 if (dma64) 660 ring->routing = BCM43xx_DMA64_CLIENTTRANS; 661 662 ring->bcm = bcm; 663 ring->nr_slots = nr_slots; 664 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; 665 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100; 666 assert(ring->suspend_mark < ring->resume_mark); 667 ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index); 668 ring->index = controller_index; 669 ring->dma64 = !!dma64; 670 if (for_tx) { 671 ring->tx = 1; 672 ring->current_slot = -1; 673 } else { 674 if (ring->index == 0) { 675 ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE; 676 ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET; 677 } else if (ring->index == 3) { 678 ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE; 679 ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET; 680 } else 681 assert(0); 682 } 683 684 err = alloc_ringmemory(ring); 685 if (err) 686 goto err_kfree_meta; 687 err = dmacontroller_setup(ring); 688 if (err) 689 goto err_free_ringmemory; 690 return ring; 691 692out: 693 printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n"); 694 return ring; 695 696err_free_ringmemory: 697 free_ringmemory(ring); 698err_kfree_meta: 699 kfree(ring->meta); 700err_kfree_ring: 701 kfree(ring); 702 ring = NULL; 703 goto out; 704} 705 706/* Main cleanup function. */ 707static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring) 708{ 709 if (!ring) 710 return; 711 712 dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n", 713 (ring->dma64) ? "64" : "32", 714 ring->mmio_base, 715 (ring->tx) ? "TX" : "RX", 716 ring->max_used_slots, ring->nr_slots); 717 /* Device IRQs are disabled prior entering this function, 718 * so no need to take care of concurrency with rx handler stuff. 719 */ 720 dmacontroller_cleanup(ring); 721 free_all_descbuffers(ring); 722 free_ringmemory(ring); 723 724 kfree(ring->meta); 725 kfree(ring); 726} 727 728void bcm43xx_dma_free(struct bcm43xx_private *bcm) 729{ 730 struct bcm43xx_dma *dma; 731 732 if (bcm43xx_using_pio(bcm)) 733 return; 734 dma = bcm43xx_current_dma(bcm); 735 736 bcm43xx_destroy_dmaring(dma->rx_ring3); 737 dma->rx_ring3 = NULL; 738 bcm43xx_destroy_dmaring(dma->rx_ring0); 739 dma->rx_ring0 = NULL; 740 741 bcm43xx_destroy_dmaring(dma->tx_ring5); 742 dma->tx_ring5 = NULL; 743 bcm43xx_destroy_dmaring(dma->tx_ring4); 744 dma->tx_ring4 = NULL; 745 bcm43xx_destroy_dmaring(dma->tx_ring3); 746 dma->tx_ring3 = NULL; 747 bcm43xx_destroy_dmaring(dma->tx_ring2); 748 dma->tx_ring2 = NULL; 749 bcm43xx_destroy_dmaring(dma->tx_ring1); 750 dma->tx_ring1 = NULL; 751 bcm43xx_destroy_dmaring(dma->tx_ring0); 752 dma->tx_ring0 = NULL; 753} 754 755int bcm43xx_dma_init(struct bcm43xx_private *bcm) 756{ 757 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); 758 struct bcm43xx_dmaring *ring; 759 int err = -ENOMEM; 760 int dma64 = 0; 761 762 bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm); 763 if (bcm->dma_mask == DMA_64BIT_MASK) 764 dma64 = 1; 765 err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask); 766 if (err) 767 goto no_dma; 768 err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask); 769 if (err) 770 goto no_dma; 771 772 /* setup TX DMA channels. */ 773 ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64); 774 if (!ring) 775 goto out; 776 dma->tx_ring0 = ring; 777 778 ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64); 779 if (!ring) 780 goto err_destroy_tx0; 781 dma->tx_ring1 = ring; 782 783 ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64); 784 if (!ring) 785 goto err_destroy_tx1; 786 dma->tx_ring2 = ring; 787 788 ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64); 789 if (!ring) 790 goto err_destroy_tx2; 791 dma->tx_ring3 = ring; 792 793 ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64); 794 if (!ring) 795 goto err_destroy_tx3; 796 dma->tx_ring4 = ring; 797 798 ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64); 799 if (!ring) 800 goto err_destroy_tx4; 801 dma->tx_ring5 = ring; 802 803 /* setup RX DMA channels. */ 804 ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64); 805 if (!ring) 806 goto err_destroy_tx5; 807 dma->rx_ring0 = ring; 808 809 if (bcm->current_core->rev < 5) { 810 ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64); 811 if (!ring) 812 goto err_destroy_rx0; 813 dma->rx_ring3 = ring; 814 } 815 816 dprintk(KERN_INFO PFX "%d-bit DMA initialized\n", 817 (bcm->dma_mask == DMA_64BIT_MASK) ? 64 : 818 (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30); 819 err = 0; 820out: 821 return err; 822 823err_destroy_rx0: 824 bcm43xx_destroy_dmaring(dma->rx_ring0); 825 dma->rx_ring0 = NULL; 826err_destroy_tx5: 827 bcm43xx_destroy_dmaring(dma->tx_ring5); 828 dma->tx_ring5 = NULL; 829err_destroy_tx4: 830 bcm43xx_destroy_dmaring(dma->tx_ring4); 831 dma->tx_ring4 = NULL; 832err_destroy_tx3: 833 bcm43xx_destroy_dmaring(dma->tx_ring3); 834 dma->tx_ring3 = NULL; 835err_destroy_tx2: 836 bcm43xx_destroy_dmaring(dma->tx_ring2); 837 dma->tx_ring2 = NULL; 838err_destroy_tx1: 839 bcm43xx_destroy_dmaring(dma->tx_ring1); 840 dma->tx_ring1 = NULL; 841err_destroy_tx0: 842 bcm43xx_destroy_dmaring(dma->tx_ring0); 843 dma->tx_ring0 = NULL; 844no_dma: 845#ifdef CONFIG_BCM43XX_PIO 846 printk(KERN_WARNING PFX "DMA not supported on this device." 847 " Falling back to PIO.\n"); 848 bcm->__using_pio = 1; 849 return -ENOSYS; 850#else 851 printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. " 852 "Please recompile the driver with PIO support.\n"); 853 return -ENODEV; 854#endif /* CONFIG_BCM43XX_PIO */ 855} 856 857/* Generate a cookie for the TX header. */ 858static u16 generate_cookie(struct bcm43xx_dmaring *ring, 859 int slot) 860{ 861 u16 cookie = 0x1000; 862 863 /* Use the upper 4 bits of the cookie as 864 * DMA controller ID and store the slot number 865 * in the lower 12 bits. 866 * Note that the cookie must never be 0, as this 867 * is a special value used in RX path. 868 */ 869 switch (ring->index) { 870 case 0: 871 cookie = 0xA000; 872 break; 873 case 1: 874 cookie = 0xB000; 875 break; 876 case 2: 877 cookie = 0xC000; 878 break; 879 case 3: 880 cookie = 0xD000; 881 break; 882 case 4: 883 cookie = 0xE000; 884 break; 885 case 5: 886 cookie = 0xF000; 887 break; 888 } 889 assert(((u16)slot & 0xF000) == 0x0000); 890 cookie |= (u16)slot; 891 892 return cookie; 893} 894 895/* Inspect a cookie and find out to which controller/slot it belongs. */ 896static 897struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm, 898 u16 cookie, int *slot) 899{ 900 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); 901 struct bcm43xx_dmaring *ring = NULL; 902 903 switch (cookie & 0xF000) { 904 case 0xA000: 905 ring = dma->tx_ring0; 906 break; 907 case 0xB000: 908 ring = dma->tx_ring1; 909 break; 910 case 0xC000: 911 ring = dma->tx_ring2; 912 break; 913 case 0xD000: 914 ring = dma->tx_ring3; 915 break; 916 case 0xE000: 917 ring = dma->tx_ring4; 918 break; 919 case 0xF000: 920 ring = dma->tx_ring5; 921 break; 922 default: 923 assert(0); 924 } 925 *slot = (cookie & 0x0FFF); 926 assert(*slot >= 0 && *slot < ring->nr_slots); 927 928 return ring; 929} 930 931static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, 932 int slot) 933{ 934 u16 offset; 935 int descsize; 936 937 /* Everything is ready to start. Buffers are DMA mapped and 938 * associated with slots. 939 * "slot" is the last slot of the new frame we want to transmit. 940 * Close your seat belts now, please. 941 */ 942 wmb(); 943 slot = next_slot(ring, slot); 944 offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX; 945 descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64) 946 : sizeof(struct bcm43xx_dmadesc32); 947 bcm43xx_dma_write(ring, offset, 948 (u32)(slot * descsize)); 949} 950 951static void dma_tx_fragment(struct bcm43xx_dmaring *ring, 952 struct sk_buff *skb, 953 u8 cur_frag) 954{ 955 int slot; 956 struct bcm43xx_dmadesc_generic *desc; 957 struct bcm43xx_dmadesc_meta *meta; 958 dma_addr_t dmaaddr; 959 struct sk_buff *bounce_skb; 960 961 assert(skb_shinfo(skb)->nr_frags == 0); 962 963 slot = request_slot(ring); 964 desc = bcm43xx_dma_idx2desc(ring, slot, &meta); 965 966 /* Add a device specific TX header. */ 967 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); 968 /* Reserve enough headroom for the device tx header. */ 969 __skb_push(skb, sizeof(struct bcm43xx_txhdr)); 970 /* Now calculate and add the tx header. 971 * The tx header includes the PLCP header. 972 */ 973 bcm43xx_generate_txhdr(ring->bcm, 974 (struct bcm43xx_txhdr *)skb->data, 975 skb->data + sizeof(struct bcm43xx_txhdr), 976 skb->len - sizeof(struct bcm43xx_txhdr), 977 (cur_frag == 0), 978 generate_cookie(ring, slot)); 979 dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 980 if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) { 981 /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */ 982 if (!dma_mapping_error(dmaaddr)) 983 unmap_descbuffer(ring, dmaaddr, skb->len, 1); 984 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA); 985 if (!bounce_skb) 986 return; 987 dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1); 988 if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) { 989 if (!dma_mapping_error(dmaaddr)) 990 unmap_descbuffer(ring, dmaaddr, skb->len, 1); 991 dev_kfree_skb_any(bounce_skb); 992 assert(0); 993 return; 994 } 995 skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len), 996 skb->len); 997 dev_kfree_skb_any(skb); 998 skb = bounce_skb; 999 } 1000 1001 meta->skb = skb; 1002 meta->dmaaddr = dmaaddr; 1003 1004 fill_descriptor(ring, desc, dmaaddr, 1005 skb->len, 1, 1, 1); 1006 1007 /* Now transfer the whole frame. */ 1008 dmacontroller_poke_tx(ring, slot); 1009} 1010 1011int bcm43xx_dma_tx(struct bcm43xx_private *bcm, 1012 struct ieee80211_txb *txb) 1013{ 1014 /* We just received a packet from the kernel network subsystem. 1015 * Add headers and DMA map the memory. Poke 1016 * the device to send the stuff. 1017 * Note that this is called from atomic context. 1018 */ 1019 struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1; 1020 u8 i; 1021 struct sk_buff *skb; 1022 1023 assert(ring->tx); 1024 if (unlikely(free_slots(ring) < txb->nr_frags)) { 1025 /* The queue should be stopped, 1026 * if we are low on free slots. 1027 * If this ever triggers, we have to lower the suspend_mark. 1028 */ 1029 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n"); 1030 return -ENOMEM; 1031 } 1032 1033 for (i = 0; i < txb->nr_frags; i++) { 1034 skb = txb->fragments[i]; 1035 /* Take skb from ieee80211_txb_free */ 1036 txb->fragments[i] = NULL; 1037 dma_tx_fragment(ring, skb, i); 1038 } 1039 ieee80211_txb_free(txb); 1040 1041 return 0; 1042} 1043 1044void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm, 1045 struct bcm43xx_xmitstatus *status) 1046{ 1047 struct bcm43xx_dmaring *ring; 1048 struct bcm43xx_dmadesc_generic *desc; 1049 struct bcm43xx_dmadesc_meta *meta; 1050 int is_last_fragment; 1051 int slot; 1052 u32 tmp; 1053 1054 ring = parse_cookie(bcm, status->cookie, &slot); 1055 assert(ring); 1056 assert(ring->tx); 1057 while (1) { 1058 assert(slot >= 0 && slot < ring->nr_slots); 1059 desc = bcm43xx_dma_idx2desc(ring, slot, &meta); 1060 1061 if (ring->dma64) { 1062 tmp = le32_to_cpu(desc->dma64.control0); 1063 is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND); 1064 } else { 1065 tmp = le32_to_cpu(desc->dma32.control); 1066 is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND); 1067 } 1068 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); 1069 free_descriptor_buffer(ring, meta, 1); 1070 /* Everything belonging to the slot is unmapped 1071 * and freed, so we can return it. 1072 */ 1073 return_slot(ring, slot); 1074 1075 if (is_last_fragment) 1076 break; 1077 slot = next_slot(ring, slot); 1078 } 1079 bcm->stats.last_tx = jiffies; 1080} 1081 1082static void dma_rx(struct bcm43xx_dmaring *ring, 1083 int *slot) 1084{ 1085 struct bcm43xx_dmadesc_generic *desc; 1086 struct bcm43xx_dmadesc_meta *meta; 1087 struct bcm43xx_rxhdr *rxhdr; 1088 struct sk_buff *skb; 1089 u16 len; 1090 int err; 1091 dma_addr_t dmaaddr; 1092 1093 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta); 1094 1095 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1096 skb = meta->skb; 1097 1098 if (ring->index == 3) { 1099 /* We received an xmit status. */ 1100 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; 1101 struct bcm43xx_xmitstatus stat; 1102 int i = 0; 1103 1104 stat.cookie = le16_to_cpu(hw->cookie); 1105 while (stat.cookie == 0) { 1106 if (unlikely(++i >= 10000)) { 1107 assert(0); 1108 break; 1109 } 1110 udelay(2); 1111 barrier(); 1112 stat.cookie = le16_to_cpu(hw->cookie); 1113 } 1114 stat.flags = hw->flags; 1115 stat.cnt1 = hw->cnt1; 1116 stat.cnt2 = hw->cnt2; 1117 stat.seq = le16_to_cpu(hw->seq); 1118 stat.unknown = le16_to_cpu(hw->unknown); 1119 1120 bcm43xx_debugfs_log_txstat(ring->bcm, &stat); 1121 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat); 1122 /* recycle the descriptor buffer. */ 1123 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); 1124 1125 return; 1126 } 1127 rxhdr = (struct bcm43xx_rxhdr *)skb->data; 1128 len = le16_to_cpu(rxhdr->frame_length); 1129 if (len == 0) { 1130 int i = 0; 1131 1132 do { 1133 udelay(2); 1134 barrier(); 1135 len = le16_to_cpu(rxhdr->frame_length); 1136 } while (len == 0 && i++ < 5); 1137 if (unlikely(len == 0)) { 1138 /* recycle the descriptor buffer. */ 1139 sync_descbuffer_for_device(ring, meta->dmaaddr, 1140 ring->rx_buffersize); 1141 goto drop; 1142 } 1143 } 1144 if (unlikely(len > ring->rx_buffersize)) { 1145 /* The data did not fit into one descriptor buffer 1146 * and is split over multiple buffers. 1147 * This should never happen, as we try to allocate buffers 1148 * big enough. So simply ignore this packet. 1149 */ 1150 int cnt = 0; 1151 s32 tmp = len; 1152 1153 while (1) { 1154 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta); 1155 /* recycle the descriptor buffer. */ 1156 sync_descbuffer_for_device(ring, meta->dmaaddr, 1157 ring->rx_buffersize); 1158 *slot = next_slot(ring, *slot); 1159 cnt++; 1160 tmp -= ring->rx_buffersize; 1161 if (tmp <= 0) 1162 break; 1163 } 1164 printkl(KERN_ERR PFX "DMA RX buffer too small " 1165 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1166 len, ring->rx_buffersize, cnt); 1167 goto drop; 1168 } 1169 len -= IEEE80211_FCS_LEN; 1170 1171 dmaaddr = meta->dmaaddr; 1172 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); 1173 if (unlikely(err)) { 1174 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n"); 1175 sync_descbuffer_for_device(ring, dmaaddr, 1176 ring->rx_buffersize); 1177 goto drop; 1178 } 1179 1180 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); 1181 skb_put(skb, len + ring->frameoffset); 1182 skb_pull(skb, ring->frameoffset); 1183 1184 err = bcm43xx_rx(ring->bcm, skb, rxhdr); 1185 if (err) { 1186 dev_kfree_skb_irq(skb); 1187 goto drop; 1188 } 1189 1190drop: 1191 return; 1192} 1193 1194void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) 1195{ 1196 u32 status; 1197 u16 descptr; 1198 int slot, current_slot; 1199#ifdef CONFIG_BCM43XX_DEBUG 1200 int used_slots = 0; 1201#endif 1202 1203 assert(!ring->tx); 1204 if (ring->dma64) { 1205 status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS); 1206 descptr = (status & BCM43xx_DMA64_RXSTATDPTR); 1207 current_slot = descptr / sizeof(struct bcm43xx_dmadesc64); 1208 } else { 1209 status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS); 1210 descptr = (status & BCM43xx_DMA32_RXDPTR); 1211 current_slot = descptr / sizeof(struct bcm43xx_dmadesc32); 1212 } 1213 assert(current_slot >= 0 && current_slot < ring->nr_slots); 1214 1215 slot = ring->current_slot; 1216 for ( ; slot != current_slot; slot = next_slot(ring, slot)) { 1217 dma_rx(ring, &slot); 1218#ifdef CONFIG_BCM43XX_DEBUG 1219 if (++used_slots > ring->max_used_slots) 1220 ring->max_used_slots = used_slots; 1221#endif 1222 } 1223 if (ring->dma64) { 1224 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 1225 (u32)(slot * sizeof(struct bcm43xx_dmadesc64))); 1226 } else { 1227 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 1228 (u32)(slot * sizeof(struct bcm43xx_dmadesc32))); 1229 } 1230 ring->current_slot = slot; 1231} 1232 1233void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring) 1234{ 1235 assert(ring->tx); 1236 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1); 1237 if (ring->dma64) { 1238 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, 1239 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL) 1240 | BCM43xx_DMA64_TXSUSPEND); 1241 } else { 1242 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, 1243 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL) 1244 | BCM43xx_DMA32_TXSUSPEND); 1245 } 1246} 1247 1248void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring) 1249{ 1250 assert(ring->tx); 1251 if (ring->dma64) { 1252 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, 1253 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL) 1254 & ~BCM43xx_DMA64_TXSUSPEND); 1255 } else { 1256 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, 1257 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL) 1258 & ~BCM43xx_DMA32_TXSUSPEND); 1259 } 1260 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1); 1261} 1262