1/* 2 3 Broadcom B43legacy wireless driver 4 5 DMA ringbuffer and descriptor allocation/management 6 7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> 8 9 Some code in this file is derived from the b44.c driver 10 Copyright (C) 2002 David S. Miller 11 Copyright (C) Pekka Pietikainen 12 13 This program is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2 of the License, or 16 (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; see the file COPYING. If not, write to 25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, 26 Boston, MA 02110-1301, USA. 27 28*/ 29 30#include "b43legacy.h" 31#include "dma.h" 32#include "main.h" 33#include "debugfs.h" 34#include "xmit.h" 35 36#include <linux/dma-mapping.h> 37#include <linux/pci.h> 38#include <linux/delay.h> 39#include <linux/skbuff.h> 40#include <linux/slab.h> 41#include <net/dst.h> 42 43/* 32bit DMA ops. */ 44static 45struct b43legacy_dmadesc_generic *op32_idx2desc( 46 struct b43legacy_dmaring *ring, 47 int slot, 48 struct b43legacy_dmadesc_meta **meta) 49{ 50 struct b43legacy_dmadesc32 *desc; 51 52 *meta = &(ring->meta[slot]); 53 desc = ring->descbase; 54 desc = &(desc[slot]); 55 56 return (struct b43legacy_dmadesc_generic *)desc; 57} 58 59static void op32_fill_descriptor(struct b43legacy_dmaring *ring, 60 struct b43legacy_dmadesc_generic *desc, 61 dma_addr_t dmaaddr, u16 bufsize, 62 int start, int end, int irq) 63{ 64 struct b43legacy_dmadesc32 *descbase = ring->descbase; 65 int slot; 66 u32 ctl; 67 u32 addr; 68 u32 addrext; 69 70 slot = (int)(&(desc->dma32) - descbase); 71 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 72 73 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); 74 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) 75 >> SSB_DMA_TRANSLATION_SHIFT; 76 addr |= ssb_dma_translation(ring->dev->dev); 77 ctl = (bufsize - ring->frameoffset) 78 & B43legacy_DMA32_DCTL_BYTECNT; 79 if (slot == ring->nr_slots - 1) 80 ctl |= B43legacy_DMA32_DCTL_DTABLEEND; 81 if (start) 82 ctl |= B43legacy_DMA32_DCTL_FRAMESTART; 83 if (end) 84 ctl |= B43legacy_DMA32_DCTL_FRAMEEND; 85 if (irq) 86 ctl |= B43legacy_DMA32_DCTL_IRQ; 87 ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) 88 & B43legacy_DMA32_DCTL_ADDREXT_MASK; 89 90 desc->dma32.control = cpu_to_le32(ctl); 91 desc->dma32.address = cpu_to_le32(addr); 92} 93 94static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) 95{ 96 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, 97 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 98} 99 100static void op32_tx_suspend(struct b43legacy_dmaring *ring) 101{ 102 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 103 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) 104 | B43legacy_DMA32_TXSUSPEND); 105} 106 107static void op32_tx_resume(struct b43legacy_dmaring *ring) 108{ 109 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 110 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) 111 & ~B43legacy_DMA32_TXSUSPEND); 112} 113 114static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) 115{ 116 u32 val; 117 118 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); 119 val &= B43legacy_DMA32_RXDPTR; 120 121 return (val / sizeof(struct b43legacy_dmadesc32)); 122} 123 124static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, 125 int slot) 126{ 127 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 128 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 129} 130 131static const struct b43legacy_dma_ops dma32_ops = { 132 .idx2desc = op32_idx2desc, 133 .fill_descriptor = op32_fill_descriptor, 134 .poke_tx = op32_poke_tx, 135 .tx_suspend = op32_tx_suspend, 136 .tx_resume = op32_tx_resume, 137 .get_current_rxslot = op32_get_current_rxslot, 138 .set_current_rxslot = op32_set_current_rxslot, 139}; 140 141/* 64bit DMA ops. */ 142static 143struct b43legacy_dmadesc_generic *op64_idx2desc( 144 struct b43legacy_dmaring *ring, 145 int slot, 146 struct b43legacy_dmadesc_meta 147 **meta) 148{ 149 struct b43legacy_dmadesc64 *desc; 150 151 *meta = &(ring->meta[slot]); 152 desc = ring->descbase; 153 desc = &(desc[slot]); 154 155 return (struct b43legacy_dmadesc_generic *)desc; 156} 157 158static void op64_fill_descriptor(struct b43legacy_dmaring *ring, 159 struct b43legacy_dmadesc_generic *desc, 160 dma_addr_t dmaaddr, u16 bufsize, 161 int start, int end, int irq) 162{ 163 struct b43legacy_dmadesc64 *descbase = ring->descbase; 164 int slot; 165 u32 ctl0 = 0; 166 u32 ctl1 = 0; 167 u32 addrlo; 168 u32 addrhi; 169 u32 addrext; 170 171 slot = (int)(&(desc->dma64) - descbase); 172 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 173 174 addrlo = (u32)(dmaaddr & 0xFFFFFFFF); 175 addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); 176 addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) 177 >> SSB_DMA_TRANSLATION_SHIFT; 178 addrhi |= ssb_dma_translation(ring->dev->dev); 179 if (slot == ring->nr_slots - 1) 180 ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND; 181 if (start) 182 ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART; 183 if (end) 184 ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND; 185 if (irq) 186 ctl0 |= B43legacy_DMA64_DCTL0_IRQ; 187 ctl1 |= (bufsize - ring->frameoffset) 188 & B43legacy_DMA64_DCTL1_BYTECNT; 189 ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT) 190 & B43legacy_DMA64_DCTL1_ADDREXT_MASK; 191 192 desc->dma64.control0 = cpu_to_le32(ctl0); 193 desc->dma64.control1 = cpu_to_le32(ctl1); 194 desc->dma64.address_low = cpu_to_le32(addrlo); 195 desc->dma64.address_high = cpu_to_le32(addrhi); 196} 197 198static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot) 199{ 200 b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX, 201 (u32)(slot * sizeof(struct b43legacy_dmadesc64))); 202} 203 204static void op64_tx_suspend(struct b43legacy_dmaring *ring) 205{ 206 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, 207 b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) 208 | B43legacy_DMA64_TXSUSPEND); 209} 210 211static void op64_tx_resume(struct b43legacy_dmaring *ring) 212{ 213 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, 214 b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) 215 & ~B43legacy_DMA64_TXSUSPEND); 216} 217 218static int op64_get_current_rxslot(struct b43legacy_dmaring *ring) 219{ 220 u32 val; 221 222 val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS); 223 val &= B43legacy_DMA64_RXSTATDPTR; 224 225 return (val / sizeof(struct b43legacy_dmadesc64)); 226} 227 228static void op64_set_current_rxslot(struct b43legacy_dmaring *ring, 229 int slot) 230{ 231 b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, 232 (u32)(slot * sizeof(struct b43legacy_dmadesc64))); 233} 234 235static const struct b43legacy_dma_ops dma64_ops = { 236 .idx2desc = op64_idx2desc, 237 .fill_descriptor = op64_fill_descriptor, 238 .poke_tx = op64_poke_tx, 239 .tx_suspend = op64_tx_suspend, 240 .tx_resume = op64_tx_resume, 241 .get_current_rxslot = op64_get_current_rxslot, 242 .set_current_rxslot = op64_set_current_rxslot, 243}; 244 245 246static inline int free_slots(struct b43legacy_dmaring *ring) 247{ 248 return (ring->nr_slots - ring->used_slots); 249} 250 251static inline int next_slot(struct b43legacy_dmaring *ring, int slot) 252{ 253 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); 254 if (slot == ring->nr_slots - 1) 255 return 0; 256 return slot + 1; 257} 258 259static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) 260{ 261 B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); 262 if (slot == 0) 263 return ring->nr_slots - 1; 264 return slot - 1; 265} 266 267#ifdef CONFIG_B43LEGACY_DEBUG 268static void update_max_used_slots(struct b43legacy_dmaring *ring, 269 int current_used_slots) 270{ 271 if (current_used_slots <= ring->max_used_slots) 272 return; 273 ring->max_used_slots = current_used_slots; 274 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) 275 b43legacydbg(ring->dev->wl, 276 "max_used_slots increased to %d on %s ring %d\n", 277 ring->max_used_slots, 278 ring->tx ? "TX" : "RX", 279 ring->index); 280} 281#else 282static inline 283void update_max_used_slots(struct b43legacy_dmaring *ring, 284 int current_used_slots) 285{ } 286#endif /* DEBUG */ 287 288/* Request a slot for usage. */ 289static inline 290int request_slot(struct b43legacy_dmaring *ring) 291{ 292 int slot; 293 294 B43legacy_WARN_ON(!ring->tx); 295 B43legacy_WARN_ON(ring->stopped); 296 B43legacy_WARN_ON(free_slots(ring) == 0); 297 298 slot = next_slot(ring, ring->current_slot); 299 ring->current_slot = slot; 300 ring->used_slots++; 301 302 update_max_used_slots(ring, ring->used_slots); 303 304 return slot; 305} 306 307/* Mac80211-queue to b43legacy-ring mapping */ 308static struct b43legacy_dmaring *priority_to_txring( 309 struct b43legacy_wldev *dev, 310 int queue_priority) 311{ 312 struct b43legacy_dmaring *ring; 313 314return dev->dma.tx_ring1; 315 316 /* 0 = highest priority */ 317 switch (queue_priority) { 318 default: 319 B43legacy_WARN_ON(1); 320 /* fallthrough */ 321 case 0: 322 ring = dev->dma.tx_ring3; 323 break; 324 case 1: 325 ring = dev->dma.tx_ring2; 326 break; 327 case 2: 328 ring = dev->dma.tx_ring1; 329 break; 330 case 3: 331 ring = dev->dma.tx_ring0; 332 break; 333 case 4: 334 ring = dev->dma.tx_ring4; 335 break; 336 case 5: 337 ring = dev->dma.tx_ring5; 338 break; 339 } 340 341 return ring; 342} 343 344/* Bcm4301-ring to mac80211-queue mapping */ 345static inline int txring_to_priority(struct b43legacy_dmaring *ring) 346{ 347 static const u8 idx_to_prio[] = 348 { 3, 2, 1, 0, 4, 5, }; 349 350return 0; 351 352 return idx_to_prio[ring->index]; 353} 354 355 356static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, 357 int controller_idx) 358{ 359 static const u16 map64[] = { 360 B43legacy_MMIO_DMA64_BASE0, 361 B43legacy_MMIO_DMA64_BASE1, 362 B43legacy_MMIO_DMA64_BASE2, 363 B43legacy_MMIO_DMA64_BASE3, 364 B43legacy_MMIO_DMA64_BASE4, 365 B43legacy_MMIO_DMA64_BASE5, 366 }; 367 static const u16 map32[] = { 368 B43legacy_MMIO_DMA32_BASE0, 369 B43legacy_MMIO_DMA32_BASE1, 370 B43legacy_MMIO_DMA32_BASE2, 371 B43legacy_MMIO_DMA32_BASE3, 372 B43legacy_MMIO_DMA32_BASE4, 373 B43legacy_MMIO_DMA32_BASE5, 374 }; 375 376 if (type == B43legacy_DMA_64BIT) { 377 B43legacy_WARN_ON(!(controller_idx >= 0 && 378 controller_idx < ARRAY_SIZE(map64))); 379 return map64[controller_idx]; 380 } 381 B43legacy_WARN_ON(!(controller_idx >= 0 && 382 controller_idx < ARRAY_SIZE(map32))); 383 return map32[controller_idx]; 384} 385 386static inline 387dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, 388 unsigned char *buf, 389 size_t len, 390 int tx) 391{ 392 dma_addr_t dmaaddr; 393 394 if (tx) 395 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 396 buf, len, 397 DMA_TO_DEVICE); 398 else 399 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 400 buf, len, 401 DMA_FROM_DEVICE); 402 403 return dmaaddr; 404} 405 406static inline 407void unmap_descbuffer(struct b43legacy_dmaring *ring, 408 dma_addr_t addr, 409 size_t len, 410 int tx) 411{ 412 if (tx) 413 dma_unmap_single(ring->dev->dev->dma_dev, 414 addr, len, 415 DMA_TO_DEVICE); 416 else 417 dma_unmap_single(ring->dev->dev->dma_dev, 418 addr, len, 419 DMA_FROM_DEVICE); 420} 421 422static inline 423void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, 424 dma_addr_t addr, 425 size_t len) 426{ 427 B43legacy_WARN_ON(ring->tx); 428 429 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, 430 addr, len, DMA_FROM_DEVICE); 431} 432 433static inline 434void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, 435 dma_addr_t addr, 436 size_t len) 437{ 438 B43legacy_WARN_ON(ring->tx); 439 440 dma_sync_single_for_device(ring->dev->dev->dma_dev, 441 addr, len, DMA_FROM_DEVICE); 442} 443 444static inline 445void free_descriptor_buffer(struct b43legacy_dmaring *ring, 446 struct b43legacy_dmadesc_meta *meta, 447 int irq_context) 448{ 449 if (meta->skb) { 450 if (irq_context) 451 dev_kfree_skb_irq(meta->skb); 452 else 453 dev_kfree_skb(meta->skb); 454 meta->skb = NULL; 455 } 456} 457 458static int alloc_ringmemory(struct b43legacy_dmaring *ring) 459{ 460 /* GFP flags must match the flags in free_ringmemory()! */ 461 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 462 B43legacy_DMA_RINGMEMSIZE, 463 &(ring->dmabase), 464 GFP_KERNEL); 465 if (!ring->descbase) { 466 b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" 467 " failed\n"); 468 return -ENOMEM; 469 } 470 memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE); 471 472 return 0; 473} 474 475static void free_ringmemory(struct b43legacy_dmaring *ring) 476{ 477 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, 478 ring->descbase, ring->dmabase); 479} 480 481/* Reset the RX DMA channel */ 482static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, 483 u16 mmio_base, 484 enum b43legacy_dmatype type) 485{ 486 int i; 487 u32 value; 488 u16 offset; 489 490 might_sleep(); 491 492 offset = (type == B43legacy_DMA_64BIT) ? 493 B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL; 494 b43legacy_write32(dev, mmio_base + offset, 0); 495 for (i = 0; i < 10; i++) { 496 offset = (type == B43legacy_DMA_64BIT) ? 497 B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS; 498 value = b43legacy_read32(dev, mmio_base + offset); 499 if (type == B43legacy_DMA_64BIT) { 500 value &= B43legacy_DMA64_RXSTAT; 501 if (value == B43legacy_DMA64_RXSTAT_DISABLED) { 502 i = -1; 503 break; 504 } 505 } else { 506 value &= B43legacy_DMA32_RXSTATE; 507 if (value == B43legacy_DMA32_RXSTAT_DISABLED) { 508 i = -1; 509 break; 510 } 511 } 512 msleep(1); 513 } 514 if (i != -1) { 515 b43legacyerr(dev->wl, "DMA RX reset timed out\n"); 516 return -ENODEV; 517 } 518 519 return 0; 520} 521 522/* Reset the RX DMA channel */ 523static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, 524 u16 mmio_base, 525 enum b43legacy_dmatype type) 526{ 527 int i; 528 u32 value; 529 u16 offset; 530 531 might_sleep(); 532 533 for (i = 0; i < 10; i++) { 534 offset = (type == B43legacy_DMA_64BIT) ? 535 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS; 536 value = b43legacy_read32(dev, mmio_base + offset); 537 if (type == B43legacy_DMA_64BIT) { 538 value &= B43legacy_DMA64_TXSTAT; 539 if (value == B43legacy_DMA64_TXSTAT_DISABLED || 540 value == B43legacy_DMA64_TXSTAT_IDLEWAIT || 541 value == B43legacy_DMA64_TXSTAT_STOPPED) 542 break; 543 } else { 544 value &= B43legacy_DMA32_TXSTATE; 545 if (value == B43legacy_DMA32_TXSTAT_DISABLED || 546 value == B43legacy_DMA32_TXSTAT_IDLEWAIT || 547 value == B43legacy_DMA32_TXSTAT_STOPPED) 548 break; 549 } 550 msleep(1); 551 } 552 offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL : 553 B43legacy_DMA32_TXCTL; 554 b43legacy_write32(dev, mmio_base + offset, 0); 555 for (i = 0; i < 10; i++) { 556 offset = (type == B43legacy_DMA_64BIT) ? 557 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS; 558 value = b43legacy_read32(dev, mmio_base + offset); 559 if (type == B43legacy_DMA_64BIT) { 560 value &= B43legacy_DMA64_TXSTAT; 561 if (value == B43legacy_DMA64_TXSTAT_DISABLED) { 562 i = -1; 563 break; 564 } 565 } else { 566 value &= B43legacy_DMA32_TXSTATE; 567 if (value == B43legacy_DMA32_TXSTAT_DISABLED) { 568 i = -1; 569 break; 570 } 571 } 572 msleep(1); 573 } 574 if (i != -1) { 575 b43legacyerr(dev->wl, "DMA TX reset timed out\n"); 576 return -ENODEV; 577 } 578 /* ensure the reset is completed. */ 579 msleep(1); 580 581 return 0; 582} 583 584/* Check if a DMA mapping address is invalid. */ 585static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, 586 dma_addr_t addr, 587 size_t buffersize, 588 bool dma_to_device) 589{ 590 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) 591 return 1; 592 593 switch (ring->type) { 594 case B43legacy_DMA_30BIT: 595 if ((u64)addr + buffersize > (1ULL << 30)) 596 goto address_error; 597 break; 598 case B43legacy_DMA_32BIT: 599 if ((u64)addr + buffersize > (1ULL << 32)) 600 goto address_error; 601 break; 602 case B43legacy_DMA_64BIT: 603 /* Currently we can't have addresses beyond 64 bits in the kernel. */ 604 break; 605 } 606 607 /* The address is OK. */ 608 return 0; 609 610address_error: 611 /* We can't support this address. Unmap it again. */ 612 unmap_descbuffer(ring, addr, buffersize, dma_to_device); 613 614 return 1; 615} 616 617static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, 618 struct b43legacy_dmadesc_generic *desc, 619 struct b43legacy_dmadesc_meta *meta, 620 gfp_t gfp_flags) 621{ 622 struct b43legacy_rxhdr_fw3 *rxhdr; 623 struct b43legacy_hwtxstatus *txstat; 624 dma_addr_t dmaaddr; 625 struct sk_buff *skb; 626 627 B43legacy_WARN_ON(ring->tx); 628 629 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 630 if (unlikely(!skb)) 631 return -ENOMEM; 632 dmaaddr = map_descbuffer(ring, skb->data, 633 ring->rx_buffersize, 0); 634 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 635 /* ugh. try to realloc in zone_dma */ 636 gfp_flags |= GFP_DMA; 637 638 dev_kfree_skb_any(skb); 639 640 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 641 if (unlikely(!skb)) 642 return -ENOMEM; 643 dmaaddr = map_descbuffer(ring, skb->data, 644 ring->rx_buffersize, 0); 645 } 646 647 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 648 dev_kfree_skb_any(skb); 649 return -EIO; 650 } 651 652 meta->skb = skb; 653 meta->dmaaddr = dmaaddr; 654 ring->ops->fill_descriptor(ring, desc, dmaaddr, 655 ring->rx_buffersize, 0, 0, 0); 656 657 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); 658 rxhdr->frame_len = 0; 659 txstat = (struct b43legacy_hwtxstatus *)(skb->data); 660 txstat->cookie = 0; 661 662 return 0; 663} 664 665/* Allocate the initial descbuffers. 666 * This is used for an RX ring only. 667 */ 668static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) 669{ 670 int i; 671 int err = -ENOMEM; 672 struct b43legacy_dmadesc_generic *desc; 673 struct b43legacy_dmadesc_meta *meta; 674 675 for (i = 0; i < ring->nr_slots; i++) { 676 desc = ring->ops->idx2desc(ring, i, &meta); 677 678 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 679 if (err) { 680 b43legacyerr(ring->dev->wl, 681 "Failed to allocate initial descbuffers\n"); 682 goto err_unwind; 683 } 684 } 685 mb(); /* all descbuffer setup before next line */ 686 ring->used_slots = ring->nr_slots; 687 err = 0; 688out: 689 return err; 690 691err_unwind: 692 for (i--; i >= 0; i--) { 693 desc = ring->ops->idx2desc(ring, i, &meta); 694 695 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 696 dev_kfree_skb(meta->skb); 697 } 698 goto out; 699} 700 701/* Do initial setup of the DMA controller. 702 * Reset the controller, write the ring busaddress 703 * and switch the "enable" bit on. 704 */ 705static int dmacontroller_setup(struct b43legacy_dmaring *ring) 706{ 707 int err = 0; 708 u32 value; 709 u32 addrext; 710 u32 trans = ssb_dma_translation(ring->dev->dev); 711 712 if (ring->tx) { 713 if (ring->type == B43legacy_DMA_64BIT) { 714 u64 ringbase = (u64)(ring->dmabase); 715 716 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 717 >> SSB_DMA_TRANSLATION_SHIFT; 718 value = B43legacy_DMA64_TXENABLE; 719 value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT) 720 & B43legacy_DMA64_TXADDREXT_MASK; 721 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, 722 value); 723 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 724 (ringbase & 0xFFFFFFFF)); 725 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 726 ((ringbase >> 32) 727 & ~SSB_DMA_TRANSLATION_MASK) 728 | trans); 729 } else { 730 u32 ringbase = (u32)(ring->dmabase); 731 732 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 733 >> SSB_DMA_TRANSLATION_SHIFT; 734 value = B43legacy_DMA32_TXENABLE; 735 value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) 736 & B43legacy_DMA32_TXADDREXT_MASK; 737 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 738 value); 739 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 740 (ringbase & 741 ~SSB_DMA_TRANSLATION_MASK) 742 | trans); 743 } 744 } else { 745 err = alloc_initial_descbuffers(ring); 746 if (err) 747 goto out; 748 if (ring->type == B43legacy_DMA_64BIT) { 749 u64 ringbase = (u64)(ring->dmabase); 750 751 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 752 >> SSB_DMA_TRANSLATION_SHIFT; 753 value = (ring->frameoffset << 754 B43legacy_DMA64_RXFROFF_SHIFT); 755 value |= B43legacy_DMA64_RXENABLE; 756 value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT) 757 & B43legacy_DMA64_RXADDREXT_MASK; 758 b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL, 759 value); 760 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 761 (ringbase & 0xFFFFFFFF)); 762 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 763 ((ringbase >> 32) & 764 ~SSB_DMA_TRANSLATION_MASK) | 765 trans); 766 b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, 767 200); 768 } else { 769 u32 ringbase = (u32)(ring->dmabase); 770 771 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 772 >> SSB_DMA_TRANSLATION_SHIFT; 773 value = (ring->frameoffset << 774 B43legacy_DMA32_RXFROFF_SHIFT); 775 value |= B43legacy_DMA32_RXENABLE; 776 value |= (addrext << 777 B43legacy_DMA32_RXADDREXT_SHIFT) 778 & B43legacy_DMA32_RXADDREXT_MASK; 779 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, 780 value); 781 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 782 (ringbase & 783 ~SSB_DMA_TRANSLATION_MASK) 784 | trans); 785 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 786 200); 787 } 788 } 789 790out: 791 return err; 792} 793 794/* Shutdown the DMA controller. */ 795static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) 796{ 797 if (ring->tx) { 798 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 799 ring->type); 800 if (ring->type == B43legacy_DMA_64BIT) { 801 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0); 802 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0); 803 } else 804 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); 805 } else { 806 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 807 ring->type); 808 if (ring->type == B43legacy_DMA_64BIT) { 809 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0); 810 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0); 811 } else 812 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); 813 } 814} 815 816static void free_all_descbuffers(struct b43legacy_dmaring *ring) 817{ 818 struct b43legacy_dmadesc_generic *desc; 819 struct b43legacy_dmadesc_meta *meta; 820 int i; 821 822 if (!ring->used_slots) 823 return; 824 for (i = 0; i < ring->nr_slots; i++) { 825 desc = ring->ops->idx2desc(ring, i, &meta); 826 827 if (!meta->skb) { 828 B43legacy_WARN_ON(!ring->tx); 829 continue; 830 } 831 if (ring->tx) 832 unmap_descbuffer(ring, meta->dmaaddr, 833 meta->skb->len, 1); 834 else 835 unmap_descbuffer(ring, meta->dmaaddr, 836 ring->rx_buffersize, 0); 837 free_descriptor_buffer(ring, meta, 0); 838 } 839} 840 841static u64 supported_dma_mask(struct b43legacy_wldev *dev) 842{ 843 u32 tmp; 844 u16 mmio_base; 845 846 tmp = b43legacy_read32(dev, SSB_TMSHIGH); 847 if (tmp & SSB_TMSHIGH_DMA64) 848 return DMA_BIT_MASK(64); 849 mmio_base = b43legacy_dmacontroller_base(0, 0); 850 b43legacy_write32(dev, 851 mmio_base + B43legacy_DMA32_TXCTL, 852 B43legacy_DMA32_TXADDREXT_MASK); 853 tmp = b43legacy_read32(dev, mmio_base + 854 B43legacy_DMA32_TXCTL); 855 if (tmp & B43legacy_DMA32_TXADDREXT_MASK) 856 return DMA_BIT_MASK(32); 857 858 return DMA_BIT_MASK(30); 859} 860 861static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask) 862{ 863 if (dmamask == DMA_BIT_MASK(30)) 864 return B43legacy_DMA_30BIT; 865 if (dmamask == DMA_BIT_MASK(32)) 866 return B43legacy_DMA_32BIT; 867 if (dmamask == DMA_BIT_MASK(64)) 868 return B43legacy_DMA_64BIT; 869 B43legacy_WARN_ON(1); 870 return B43legacy_DMA_30BIT; 871} 872 873/* Main initialization function. */ 874static 875struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, 876 int controller_index, 877 int for_tx, 878 enum b43legacy_dmatype type) 879{ 880 struct b43legacy_dmaring *ring; 881 int err; 882 int nr_slots; 883 dma_addr_t dma_test; 884 885 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 886 if (!ring) 887 goto out; 888 ring->type = type; 889 ring->dev = dev; 890 891 nr_slots = B43legacy_RXRING_SLOTS; 892 if (for_tx) 893 nr_slots = B43legacy_TXRING_SLOTS; 894 895 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), 896 GFP_KERNEL); 897 if (!ring->meta) 898 goto err_kfree_ring; 899 if (for_tx) { 900 ring->txhdr_cache = kcalloc(nr_slots, 901 sizeof(struct b43legacy_txhdr_fw3), 902 GFP_KERNEL); 903 if (!ring->txhdr_cache) 904 goto err_kfree_meta; 905 906 /* test for ability to dma to txhdr_cache */ 907 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, 908 sizeof(struct b43legacy_txhdr_fw3), 909 DMA_TO_DEVICE); 910 911 if (b43legacy_dma_mapping_error(ring, dma_test, 912 sizeof(struct b43legacy_txhdr_fw3), 1)) { 913 /* ugh realloc */ 914 kfree(ring->txhdr_cache); 915 ring->txhdr_cache = kcalloc(nr_slots, 916 sizeof(struct b43legacy_txhdr_fw3), 917 GFP_KERNEL | GFP_DMA); 918 if (!ring->txhdr_cache) 919 goto err_kfree_meta; 920 921 dma_test = dma_map_single(dev->dev->dma_dev, 922 ring->txhdr_cache, 923 sizeof(struct b43legacy_txhdr_fw3), 924 DMA_TO_DEVICE); 925 926 if (b43legacy_dma_mapping_error(ring, dma_test, 927 sizeof(struct b43legacy_txhdr_fw3), 1)) 928 goto err_kfree_txhdr_cache; 929 } 930 931 dma_unmap_single(dev->dev->dma_dev, dma_test, 932 sizeof(struct b43legacy_txhdr_fw3), 933 DMA_TO_DEVICE); 934 } 935 936 ring->nr_slots = nr_slots; 937 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); 938 ring->index = controller_index; 939 if (type == B43legacy_DMA_64BIT) 940 ring->ops = &dma64_ops; 941 else 942 ring->ops = &dma32_ops; 943 if (for_tx) { 944 ring->tx = 1; 945 ring->current_slot = -1; 946 } else { 947 if (ring->index == 0) { 948 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; 949 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; 950 } else if (ring->index == 3) { 951 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; 952 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; 953 } else 954 B43legacy_WARN_ON(1); 955 } 956 spin_lock_init(&ring->lock); 957#ifdef CONFIG_B43LEGACY_DEBUG 958 ring->last_injected_overflow = jiffies; 959#endif 960 961 err = alloc_ringmemory(ring); 962 if (err) 963 goto err_kfree_txhdr_cache; 964 err = dmacontroller_setup(ring); 965 if (err) 966 goto err_free_ringmemory; 967 968out: 969 return ring; 970 971err_free_ringmemory: 972 free_ringmemory(ring); 973err_kfree_txhdr_cache: 974 kfree(ring->txhdr_cache); 975err_kfree_meta: 976 kfree(ring->meta); 977err_kfree_ring: 978 kfree(ring); 979 ring = NULL; 980 goto out; 981} 982 983/* Main cleanup function. */ 984static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) 985{ 986 if (!ring) 987 return; 988 989 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" 990 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, 991 (ring->tx) ? "TX" : "RX", ring->max_used_slots, 992 ring->nr_slots); 993 /* Device IRQs are disabled prior entering this function, 994 * so no need to take care of concurrency with rx handler stuff. 995 */ 996 dmacontroller_cleanup(ring); 997 free_all_descbuffers(ring); 998 free_ringmemory(ring); 999 1000 kfree(ring->txhdr_cache); 1001 kfree(ring->meta); 1002 kfree(ring); 1003} 1004 1005void b43legacy_dma_free(struct b43legacy_wldev *dev) 1006{ 1007 struct b43legacy_dma *dma; 1008 1009 if (b43legacy_using_pio(dev)) 1010 return; 1011 dma = &dev->dma; 1012 1013 b43legacy_destroy_dmaring(dma->rx_ring3); 1014 dma->rx_ring3 = NULL; 1015 b43legacy_destroy_dmaring(dma->rx_ring0); 1016 dma->rx_ring0 = NULL; 1017 1018 b43legacy_destroy_dmaring(dma->tx_ring5); 1019 dma->tx_ring5 = NULL; 1020 b43legacy_destroy_dmaring(dma->tx_ring4); 1021 dma->tx_ring4 = NULL; 1022 b43legacy_destroy_dmaring(dma->tx_ring3); 1023 dma->tx_ring3 = NULL; 1024 b43legacy_destroy_dmaring(dma->tx_ring2); 1025 dma->tx_ring2 = NULL; 1026 b43legacy_destroy_dmaring(dma->tx_ring1); 1027 dma->tx_ring1 = NULL; 1028 b43legacy_destroy_dmaring(dma->tx_ring0); 1029 dma->tx_ring0 = NULL; 1030} 1031 1032static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) 1033{ 1034 u64 orig_mask = mask; 1035 bool fallback = 0; 1036 int err; 1037 1038 /* Try to set the DMA mask. If it fails, try falling back to a 1039 * lower mask, as we can always also support a lower one. */ 1040 while (1) { 1041 err = dma_set_mask(dev->dev->dma_dev, mask); 1042 if (!err) { 1043 err = dma_set_coherent_mask(dev->dev->dma_dev, mask); 1044 if (!err) 1045 break; 1046 } 1047 if (mask == DMA_BIT_MASK(64)) { 1048 mask = DMA_BIT_MASK(32); 1049 fallback = 1; 1050 continue; 1051 } 1052 if (mask == DMA_BIT_MASK(32)) { 1053 mask = DMA_BIT_MASK(30); 1054 fallback = 1; 1055 continue; 1056 } 1057 b43legacyerr(dev->wl, "The machine/kernel does not support " 1058 "the required %u-bit DMA mask\n", 1059 (unsigned int)dma_mask_to_engine_type(orig_mask)); 1060 return -EOPNOTSUPP; 1061 } 1062 if (fallback) { 1063 b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-" 1064 "bit\n", 1065 (unsigned int)dma_mask_to_engine_type(orig_mask), 1066 (unsigned int)dma_mask_to_engine_type(mask)); 1067 } 1068 1069 return 0; 1070} 1071 1072int b43legacy_dma_init(struct b43legacy_wldev *dev) 1073{ 1074 struct b43legacy_dma *dma = &dev->dma; 1075 struct b43legacy_dmaring *ring; 1076 int err; 1077 u64 dmamask; 1078 enum b43legacy_dmatype type; 1079 1080 dmamask = supported_dma_mask(dev); 1081 type = dma_mask_to_engine_type(dmamask); 1082 err = b43legacy_dma_set_mask(dev, dmamask); 1083 if (err) { 1084#ifdef CONFIG_B43LEGACY_PIO 1085 b43legacywarn(dev->wl, "DMA for this device not supported. " 1086 "Falling back to PIO\n"); 1087 dev->__using_pio = 1; 1088 return -EAGAIN; 1089#else 1090 b43legacyerr(dev->wl, "DMA for this device not supported and " 1091 "no PIO support compiled in\n"); 1092 return -EOPNOTSUPP; 1093#endif 1094 } 1095 1096 err = -ENOMEM; 1097 /* setup TX DMA channels. */ 1098 ring = b43legacy_setup_dmaring(dev, 0, 1, type); 1099 if (!ring) 1100 goto out; 1101 dma->tx_ring0 = ring; 1102 1103 ring = b43legacy_setup_dmaring(dev, 1, 1, type); 1104 if (!ring) 1105 goto err_destroy_tx0; 1106 dma->tx_ring1 = ring; 1107 1108 ring = b43legacy_setup_dmaring(dev, 2, 1, type); 1109 if (!ring) 1110 goto err_destroy_tx1; 1111 dma->tx_ring2 = ring; 1112 1113 ring = b43legacy_setup_dmaring(dev, 3, 1, type); 1114 if (!ring) 1115 goto err_destroy_tx2; 1116 dma->tx_ring3 = ring; 1117 1118 ring = b43legacy_setup_dmaring(dev, 4, 1, type); 1119 if (!ring) 1120 goto err_destroy_tx3; 1121 dma->tx_ring4 = ring; 1122 1123 ring = b43legacy_setup_dmaring(dev, 5, 1, type); 1124 if (!ring) 1125 goto err_destroy_tx4; 1126 dma->tx_ring5 = ring; 1127 1128 /* setup RX DMA channels. */ 1129 ring = b43legacy_setup_dmaring(dev, 0, 0, type); 1130 if (!ring) 1131 goto err_destroy_tx5; 1132 dma->rx_ring0 = ring; 1133 1134 if (dev->dev->id.revision < 5) { 1135 ring = b43legacy_setup_dmaring(dev, 3, 0, type); 1136 if (!ring) 1137 goto err_destroy_rx0; 1138 dma->rx_ring3 = ring; 1139 } 1140 1141 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); 1142 err = 0; 1143out: 1144 return err; 1145 1146err_destroy_rx0: 1147 b43legacy_destroy_dmaring(dma->rx_ring0); 1148 dma->rx_ring0 = NULL; 1149err_destroy_tx5: 1150 b43legacy_destroy_dmaring(dma->tx_ring5); 1151 dma->tx_ring5 = NULL; 1152err_destroy_tx4: 1153 b43legacy_destroy_dmaring(dma->tx_ring4); 1154 dma->tx_ring4 = NULL; 1155err_destroy_tx3: 1156 b43legacy_destroy_dmaring(dma->tx_ring3); 1157 dma->tx_ring3 = NULL; 1158err_destroy_tx2: 1159 b43legacy_destroy_dmaring(dma->tx_ring2); 1160 dma->tx_ring2 = NULL; 1161err_destroy_tx1: 1162 b43legacy_destroy_dmaring(dma->tx_ring1); 1163 dma->tx_ring1 = NULL; 1164err_destroy_tx0: 1165 b43legacy_destroy_dmaring(dma->tx_ring0); 1166 dma->tx_ring0 = NULL; 1167 goto out; 1168} 1169 1170/* Generate a cookie for the TX header. */ 1171static u16 generate_cookie(struct b43legacy_dmaring *ring, 1172 int slot) 1173{ 1174 u16 cookie = 0x1000; 1175 1176 /* Use the upper 4 bits of the cookie as 1177 * DMA controller ID and store the slot number 1178 * in the lower 12 bits. 1179 * Note that the cookie must never be 0, as this 1180 * is a special value used in RX path. 1181 */ 1182 switch (ring->index) { 1183 case 0: 1184 cookie = 0xA000; 1185 break; 1186 case 1: 1187 cookie = 0xB000; 1188 break; 1189 case 2: 1190 cookie = 0xC000; 1191 break; 1192 case 3: 1193 cookie = 0xD000; 1194 break; 1195 case 4: 1196 cookie = 0xE000; 1197 break; 1198 case 5: 1199 cookie = 0xF000; 1200 break; 1201 } 1202 B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); 1203 cookie |= (u16)slot; 1204 1205 return cookie; 1206} 1207 1208/* Inspect a cookie and find out to which controller/slot it belongs. */ 1209static 1210struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, 1211 u16 cookie, int *slot) 1212{ 1213 struct b43legacy_dma *dma = &dev->dma; 1214 struct b43legacy_dmaring *ring = NULL; 1215 1216 switch (cookie & 0xF000) { 1217 case 0xA000: 1218 ring = dma->tx_ring0; 1219 break; 1220 case 0xB000: 1221 ring = dma->tx_ring1; 1222 break; 1223 case 0xC000: 1224 ring = dma->tx_ring2; 1225 break; 1226 case 0xD000: 1227 ring = dma->tx_ring3; 1228 break; 1229 case 0xE000: 1230 ring = dma->tx_ring4; 1231 break; 1232 case 0xF000: 1233 ring = dma->tx_ring5; 1234 break; 1235 default: 1236 B43legacy_WARN_ON(1); 1237 } 1238 *slot = (cookie & 0x0FFF); 1239 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); 1240 1241 return ring; 1242} 1243 1244static int dma_tx_fragment(struct b43legacy_dmaring *ring, 1245 struct sk_buff **in_skb) 1246{ 1247 struct sk_buff *skb = *in_skb; 1248 const struct b43legacy_dma_ops *ops = ring->ops; 1249 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1250 u8 *header; 1251 int slot, old_top_slot, old_used_slots; 1252 int err; 1253 struct b43legacy_dmadesc_generic *desc; 1254 struct b43legacy_dmadesc_meta *meta; 1255 struct b43legacy_dmadesc_meta *meta_hdr; 1256 struct sk_buff *bounce_skb; 1257 1258#define SLOTS_PER_PACKET 2 1259 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); 1260 1261 old_top_slot = ring->current_slot; 1262 old_used_slots = ring->used_slots; 1263 1264 /* Get a slot for the header. */ 1265 slot = request_slot(ring); 1266 desc = ops->idx2desc(ring, slot, &meta_hdr); 1267 memset(meta_hdr, 0, sizeof(*meta_hdr)); 1268 1269 header = &(ring->txhdr_cache[slot * sizeof( 1270 struct b43legacy_txhdr_fw3)]); 1271 err = b43legacy_generate_txhdr(ring->dev, header, 1272 skb->data, skb->len, info, 1273 generate_cookie(ring, slot)); 1274 if (unlikely(err)) { 1275 ring->current_slot = old_top_slot; 1276 ring->used_slots = old_used_slots; 1277 return err; 1278 } 1279 1280 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1281 sizeof(struct b43legacy_txhdr_fw3), 1); 1282 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, 1283 sizeof(struct b43legacy_txhdr_fw3), 1)) { 1284 ring->current_slot = old_top_slot; 1285 ring->used_slots = old_used_slots; 1286 return -EIO; 1287 } 1288 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1289 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); 1290 1291 /* Get a slot for the payload. */ 1292 slot = request_slot(ring); 1293 desc = ops->idx2desc(ring, slot, &meta); 1294 memset(meta, 0, sizeof(*meta)); 1295 1296 meta->skb = skb; 1297 meta->is_last_fragment = 1; 1298 1299 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1300 /* create a bounce buffer in zone_dma on mapping failure. */ 1301 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1302 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1303 if (!bounce_skb) { 1304 ring->current_slot = old_top_slot; 1305 ring->used_slots = old_used_slots; 1306 err = -ENOMEM; 1307 goto out_unmap_hdr; 1308 } 1309 1310 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); 1311 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); 1312 bounce_skb->dev = skb->dev; 1313 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); 1314 info = IEEE80211_SKB_CB(bounce_skb); 1315 1316 dev_kfree_skb_any(skb); 1317 skb = bounce_skb; 1318 *in_skb = bounce_skb; 1319 meta->skb = skb; 1320 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1321 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1322 ring->current_slot = old_top_slot; 1323 ring->used_slots = old_used_slots; 1324 err = -EIO; 1325 goto out_free_bounce; 1326 } 1327 } 1328 1329 ops->fill_descriptor(ring, desc, meta->dmaaddr, 1330 skb->len, 0, 1, 1); 1331 1332 wmb(); /* previous stuff MUST be done */ 1333 /* Now transfer the whole frame. */ 1334 ops->poke_tx(ring, next_slot(ring, slot)); 1335 return 0; 1336 1337out_free_bounce: 1338 dev_kfree_skb_any(skb); 1339out_unmap_hdr: 1340 unmap_descbuffer(ring, meta_hdr->dmaaddr, 1341 sizeof(struct b43legacy_txhdr_fw3), 1); 1342 return err; 1343} 1344 1345static inline 1346int should_inject_overflow(struct b43legacy_dmaring *ring) 1347{ 1348#ifdef CONFIG_B43LEGACY_DEBUG 1349 if (unlikely(b43legacy_debug(ring->dev, 1350 B43legacy_DBG_DMAOVERFLOW))) { 1351 /* Check if we should inject another ringbuffer overflow 1352 * to test handling of this situation in the stack. */ 1353 unsigned long next_overflow; 1354 1355 next_overflow = ring->last_injected_overflow + HZ; 1356 if (time_after(jiffies, next_overflow)) { 1357 ring->last_injected_overflow = jiffies; 1358 b43legacydbg(ring->dev->wl, 1359 "Injecting TX ring overflow on " 1360 "DMA controller %d\n", ring->index); 1361 return 1; 1362 } 1363 } 1364#endif /* CONFIG_B43LEGACY_DEBUG */ 1365 return 0; 1366} 1367 1368int b43legacy_dma_tx(struct b43legacy_wldev *dev, 1369 struct sk_buff *skb) 1370{ 1371 struct b43legacy_dmaring *ring; 1372 struct ieee80211_hdr *hdr; 1373 int err = 0; 1374 unsigned long flags; 1375 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1376 1377 ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); 1378 spin_lock_irqsave(&ring->lock, flags); 1379 B43legacy_WARN_ON(!ring->tx); 1380 1381 if (unlikely(ring->stopped)) { 1382 /* We get here only because of a bug in mac80211. 1383 * Because of a race, one packet may be queued after 1384 * the queue is stopped, thus we got called when we shouldn't. 1385 * For now, just refuse the transmit. */ 1386 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1387 b43legacyerr(dev->wl, "Packet after queue stopped\n"); 1388 err = -ENOSPC; 1389 goto out_unlock; 1390 } 1391 1392 if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) { 1393 /* If we get here, we have a real error with the queue 1394 * full, but queues not stopped. */ 1395 b43legacyerr(dev->wl, "DMA queue overflow\n"); 1396 err = -ENOSPC; 1397 goto out_unlock; 1398 } 1399 1400 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing 1401 * into the skb data or cb now. */ 1402 hdr = NULL; 1403 info = NULL; 1404 err = dma_tx_fragment(ring, &skb); 1405 if (unlikely(err == -ENOKEY)) { 1406 /* Drop this packet, as we don't have the encryption key 1407 * anymore and must not transmit it unencrypted. */ 1408 dev_kfree_skb_any(skb); 1409 err = 0; 1410 goto out_unlock; 1411 } 1412 if (unlikely(err)) { 1413 b43legacyerr(dev->wl, "DMA tx mapping failure\n"); 1414 goto out_unlock; 1415 } 1416 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1417 should_inject_overflow(ring)) { 1418 /* This TX ring is full. */ 1419 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); 1420 ring->stopped = 1; 1421 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1422 b43legacydbg(dev->wl, "Stopped TX ring %d\n", 1423 ring->index); 1424 } 1425out_unlock: 1426 spin_unlock_irqrestore(&ring->lock, flags); 1427 1428 return err; 1429} 1430 1431void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, 1432 const struct b43legacy_txstatus *status) 1433{ 1434 const struct b43legacy_dma_ops *ops; 1435 struct b43legacy_dmaring *ring; 1436 struct b43legacy_dmadesc_generic *desc; 1437 struct b43legacy_dmadesc_meta *meta; 1438 int retry_limit; 1439 int slot; 1440 1441 ring = parse_cookie(dev, status->cookie, &slot); 1442 if (unlikely(!ring)) 1443 return; 1444 B43legacy_WARN_ON(!irqs_disabled()); 1445 spin_lock(&ring->lock); 1446 1447 B43legacy_WARN_ON(!ring->tx); 1448 ops = ring->ops; 1449 while (1) { 1450 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 1451 desc = ops->idx2desc(ring, slot, &meta); 1452 1453 if (meta->skb) 1454 unmap_descbuffer(ring, meta->dmaaddr, 1455 meta->skb->len, 1); 1456 else 1457 unmap_descbuffer(ring, meta->dmaaddr, 1458 sizeof(struct b43legacy_txhdr_fw3), 1459 1); 1460 1461 if (meta->is_last_fragment) { 1462 struct ieee80211_tx_info *info; 1463 BUG_ON(!meta->skb); 1464 info = IEEE80211_SKB_CB(meta->skb); 1465 1466 /* preserve the confiured retry limit before clearing the status 1467 * The xmit function has overwritten the rc's value with the actual 1468 * retry limit done by the hardware */ 1469 retry_limit = info->status.rates[0].count; 1470 ieee80211_tx_info_clear_status(info); 1471 1472 if (status->acked) 1473 info->flags |= IEEE80211_TX_STAT_ACK; 1474 1475 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { 1476 /* 1477 * If the short retries (RTS, not data frame) have exceeded 1478 * the limit, the hw will not have tried the selected rate, 1479 * but will have used the fallback rate instead. 1480 * Don't let the rate control count attempts for the selected 1481 * rate in this case, otherwise the statistics will be off. 1482 */ 1483 info->status.rates[0].count = 0; 1484 info->status.rates[1].count = status->frame_count; 1485 } else { 1486 if (status->frame_count > retry_limit) { 1487 info->status.rates[0].count = retry_limit; 1488 info->status.rates[1].count = status->frame_count - 1489 retry_limit; 1490 1491 } else { 1492 info->status.rates[0].count = status->frame_count; 1493 info->status.rates[1].idx = -1; 1494 } 1495 } 1496 1497 /* Call back to inform the ieee80211 subsystem about the 1498 * status of the transmission. 1499 * Some fields of txstat are already filled in dma_tx(). 1500 */ 1501 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); 1502 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1503 meta->skb = NULL; 1504 } else { 1505 /* No need to call free_descriptor_buffer here, as 1506 * this is only the txhdr, which is not allocated. 1507 */ 1508 B43legacy_WARN_ON(meta->skb != NULL); 1509 } 1510 1511 /* Everything unmapped and free'd. So it's not used anymore. */ 1512 ring->used_slots--; 1513 1514 if (meta->is_last_fragment) 1515 break; 1516 slot = next_slot(ring, slot); 1517 } 1518 dev->stats.last_tx = jiffies; 1519 if (ring->stopped) { 1520 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); 1521 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); 1522 ring->stopped = 0; 1523 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1524 b43legacydbg(dev->wl, "Woke up TX ring %d\n", 1525 ring->index); 1526 } 1527 1528 spin_unlock(&ring->lock); 1529} 1530 1531static void dma_rx(struct b43legacy_dmaring *ring, 1532 int *slot) 1533{ 1534 const struct b43legacy_dma_ops *ops = ring->ops; 1535 struct b43legacy_dmadesc_generic *desc; 1536 struct b43legacy_dmadesc_meta *meta; 1537 struct b43legacy_rxhdr_fw3 *rxhdr; 1538 struct sk_buff *skb; 1539 u16 len; 1540 int err; 1541 dma_addr_t dmaaddr; 1542 1543 desc = ops->idx2desc(ring, *slot, &meta); 1544 1545 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1546 skb = meta->skb; 1547 1548 if (ring->index == 3) { 1549 /* We received an xmit status. */ 1550 struct b43legacy_hwtxstatus *hw = 1551 (struct b43legacy_hwtxstatus *)skb->data; 1552 int i = 0; 1553 1554 while (hw->cookie == 0) { 1555 if (i > 100) 1556 break; 1557 i++; 1558 udelay(2); 1559 barrier(); 1560 } 1561 b43legacy_handle_hwtxstatus(ring->dev, hw); 1562 /* recycle the descriptor buffer. */ 1563 sync_descbuffer_for_device(ring, meta->dmaaddr, 1564 ring->rx_buffersize); 1565 1566 return; 1567 } 1568 rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; 1569 len = le16_to_cpu(rxhdr->frame_len); 1570 if (len == 0) { 1571 int i = 0; 1572 1573 do { 1574 udelay(2); 1575 barrier(); 1576 len = le16_to_cpu(rxhdr->frame_len); 1577 } while (len == 0 && i++ < 5); 1578 if (unlikely(len == 0)) { 1579 /* recycle the descriptor buffer. */ 1580 sync_descbuffer_for_device(ring, meta->dmaaddr, 1581 ring->rx_buffersize); 1582 goto drop; 1583 } 1584 } 1585 if (unlikely(len > ring->rx_buffersize)) { 1586 /* The data did not fit into one descriptor buffer 1587 * and is split over multiple buffers. 1588 * This should never happen, as we try to allocate buffers 1589 * big enough. So simply ignore this packet. 1590 */ 1591 int cnt = 0; 1592 s32 tmp = len; 1593 1594 while (1) { 1595 desc = ops->idx2desc(ring, *slot, &meta); 1596 /* recycle the descriptor buffer. */ 1597 sync_descbuffer_for_device(ring, meta->dmaaddr, 1598 ring->rx_buffersize); 1599 *slot = next_slot(ring, *slot); 1600 cnt++; 1601 tmp -= ring->rx_buffersize; 1602 if (tmp <= 0) 1603 break; 1604 } 1605 b43legacyerr(ring->dev->wl, "DMA RX buffer too small " 1606 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1607 len, ring->rx_buffersize, cnt); 1608 goto drop; 1609 } 1610 1611 dmaaddr = meta->dmaaddr; 1612 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); 1613 if (unlikely(err)) { 1614 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" 1615 " failed\n"); 1616 sync_descbuffer_for_device(ring, dmaaddr, 1617 ring->rx_buffersize); 1618 goto drop; 1619 } 1620 1621 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); 1622 skb_put(skb, len + ring->frameoffset); 1623 skb_pull(skb, ring->frameoffset); 1624 1625 b43legacy_rx(ring->dev, skb, rxhdr); 1626drop: 1627 return; 1628} 1629 1630void b43legacy_dma_rx(struct b43legacy_dmaring *ring) 1631{ 1632 const struct b43legacy_dma_ops *ops = ring->ops; 1633 int slot; 1634 int current_slot; 1635 int used_slots = 0; 1636 1637 B43legacy_WARN_ON(ring->tx); 1638 current_slot = ops->get_current_rxslot(ring); 1639 B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < 1640 ring->nr_slots)); 1641 1642 slot = ring->current_slot; 1643 for (; slot != current_slot; slot = next_slot(ring, slot)) { 1644 dma_rx(ring, &slot); 1645 update_max_used_slots(ring, ++used_slots); 1646 } 1647 ops->set_current_rxslot(ring, slot); 1648 ring->current_slot = slot; 1649} 1650 1651static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) 1652{ 1653 unsigned long flags; 1654 1655 spin_lock_irqsave(&ring->lock, flags); 1656 B43legacy_WARN_ON(!ring->tx); 1657 ring->ops->tx_suspend(ring); 1658 spin_unlock_irqrestore(&ring->lock, flags); 1659} 1660 1661static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) 1662{ 1663 unsigned long flags; 1664 1665 spin_lock_irqsave(&ring->lock, flags); 1666 B43legacy_WARN_ON(!ring->tx); 1667 ring->ops->tx_resume(ring); 1668 spin_unlock_irqrestore(&ring->lock, flags); 1669} 1670 1671void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) 1672{ 1673 b43legacy_power_saving_ctl_bits(dev, -1, 1); 1674 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); 1675 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); 1676 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); 1677 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); 1678 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); 1679 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); 1680} 1681 1682void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) 1683{ 1684 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); 1685 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); 1686 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); 1687 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); 1688 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); 1689 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); 1690 b43legacy_power_saving_ctl_bits(dev, -1, -1); 1691} 1692