1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 15 * redistribution must be conditioned upon including a substantially 16 * similar Disclaimer requirement for further binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 22 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 24 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 27 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 29 * THE POSSIBILITY OF SUCH DAMAGES. 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: releng/12.0/sys/dev/ath/if_ath_rx_edma.c 337573 2018-08-10 13:38:23Z kevans $"); 34 35/* 36 * Driver for the Atheros Wireless LAN controller. 37 * 38 * This software is derived from work of Atsushi Onoe; his contribution 39 * is greatly appreciated. 40 */ 41 42#include "opt_inet.h" 43#include "opt_ath.h" 44/* 45 * This is needed for register operations which are performed 46 * by the driver - eg, calls to ath_hal_gettsf32(). 47 * 48 * It's also required for any AH_DEBUG checks in here, eg the 49 * module dependencies. 50 */ 51#include "opt_ah.h" 52#include "opt_wlan.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysctl.h> 57#include <sys/mbuf.h> 58#include <sys/malloc.h> 59#include <sys/lock.h> 60#include <sys/mutex.h> 61#include <sys/kernel.h> 62#include <sys/socket.h> 63#include <sys/sockio.h> 64#include <sys/errno.h> 65#include <sys/callout.h> 66#include <sys/bus.h> 67#include <sys/endian.h> 68#include <sys/kthread.h> 69#include <sys/taskqueue.h> 70#include <sys/priv.h> 71#include <sys/module.h> 72#include <sys/ktr.h> 73#include <sys/smp.h> /* for mp_ncpus */ 74 75#include <machine/bus.h> 76 77#include <net/if.h> 78#include <net/if_var.h> 79#include <net/if_dl.h> 80#include <net/if_media.h> 81#include <net/if_types.h> 82#include <net/if_arp.h> 83#include <net/ethernet.h> 84#include <net/if_llc.h> 85 86#include <net80211/ieee80211_var.h> 87#include <net80211/ieee80211_regdomain.h> 88#ifdef IEEE80211_SUPPORT_SUPERG 89#include <net80211/ieee80211_superg.h> 90#endif 91#ifdef IEEE80211_SUPPORT_TDMA 92#include <net80211/ieee80211_tdma.h> 93#endif 94 95#include <net/bpf.h> 96 97#ifdef INET 98#include <netinet/in.h> 99#include <netinet/if_ether.h> 100#endif 101 102#include <dev/ath/if_athvar.h> 103#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 104#include <dev/ath/ath_hal/ah_diagcodes.h> 105 106#include <dev/ath/if_ath_debug.h> 107#include <dev/ath/if_ath_misc.h> 108#include <dev/ath/if_ath_tsf.h> 109#include <dev/ath/if_ath_tx.h> 110#include <dev/ath/if_ath_sysctl.h> 111#include <dev/ath/if_ath_led.h> 112#include <dev/ath/if_ath_keycache.h> 113#include <dev/ath/if_ath_rx.h> 114#include <dev/ath/if_ath_beacon.h> 115#include <dev/ath/if_athdfs.h> 116#include <dev/ath/if_ath_descdma.h> 117 118#ifdef ATH_TX99_DIAG 119#include <dev/ath/ath_tx99/ath_tx99.h> 120#endif 121 122#include <dev/ath/if_ath_rx_edma.h> 123 124#ifdef ATH_DEBUG_ALQ 125#include <dev/ath/if_ath_alq.h> 126#endif 127 128/* 129 * some general macros 130 */ 131#define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 132#define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 133 134MALLOC_DECLARE(M_ATHDEV); 135 136/* 137 * XXX TODO: 138 * 139 * + Make sure the FIFO is correctly flushed and reinitialised 140 * through a reset; 141 * + Verify multi-descriptor frames work! 142 * + There's a "memory use after free" which needs to be tracked down 143 * and fixed ASAP. I've seen this in the legacy path too, so it 144 * may be a generic RX path issue. 145 */ 146 147/* 148 * XXX shuffle the function orders so these pre-declarations aren't 149 * required! 150 */ 151static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 152 int nbufs); 153static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 154static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 155static void ath_edma_recv_proc_queue(struct ath_softc *sc, 156 HAL_RX_QUEUE qtype, int dosched); 157static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 158 HAL_RX_QUEUE qtype, int dosched); 159 160static void 161ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 162{ 163 struct ath_hal *ah = sc->sc_ah; 164 165 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called, dodelay=%d\n", 166 __func__, dodelay); 167 168 ATH_RX_LOCK(sc); 169 170 ath_hal_stoppcurecv(ah); 171 ath_hal_setrxfilter(ah, 0); 172 173 /* 174 * 175 */ 176 if (ath_hal_stopdmarecv(ah) == AH_TRUE) 177 sc->sc_rx_stopped = 1; 178 179 /* 180 * Give the various bus FIFOs (not EDMA descriptor FIFO) 181 * time to finish flushing out data. 182 */ 183 DELAY(3000); 184 185 /* Flush RX pending for each queue */ 186 /* XXX should generic-ify this */ 187 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 188 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 189 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 190 } 191 192 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 193 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 194 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 195 } 196 ATH_RX_UNLOCK(sc); 197 198 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 199} 200 201/* 202 * Re-initialise the FIFO given the current buffer contents. 203 * Specifically, walk from head -> tail, pushing the FIFO contents 204 * back into the FIFO. 205 */ 206static void 207ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 208{ 209 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 210 struct ath_buf *bf; 211 int i, j; 212 213 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called\n", __func__); 214 215 ATH_RX_LOCK_ASSERT(sc); 216 217 i = re->m_fifo_head; 218 for (j = 0; j < re->m_fifo_depth; j++) { 219 bf = re->m_fifo[i]; 220 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 221 "%s: Q%d: pos=%i, addr=0x%jx\n", 222 __func__, 223 qtype, 224 i, 225 (uintmax_t)bf->bf_daddr); 226 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 227 INCR(i, re->m_fifolen); 228 } 229 230 /* Ensure this worked out right */ 231 if (i != re->m_fifo_tail) { 232 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 233 __func__, 234 i, 235 re->m_fifo_tail); 236 } 237 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 238} 239 240/* 241 * Start receive. 242 */ 243static int 244ath_edma_startrecv(struct ath_softc *sc) 245{ 246 struct ath_hal *ah = sc->sc_ah; 247 248 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 249 "%s: called; resetted=%d, stopped=%d\n", __func__, 250 sc->sc_rx_resetted, sc->sc_rx_stopped); 251 252 ATH_RX_LOCK(sc); 253 254 /* 255 * Sanity check - are we being called whilst RX 256 * isn't stopped? If so, we may end up pushing 257 * too many entries into the RX FIFO and 258 * badness occurs. 259 */ 260 261 /* Enable RX FIFO */ 262 ath_hal_rxena(ah); 263 264 /* 265 * In theory the hardware has been initialised, right? 266 */ 267 if (sc->sc_rx_resetted == 1 || sc->sc_rx_stopped == 1) { 268 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 269 "%s: Re-initing HP FIFO\n", __func__); 270 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 271 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 272 "%s: Re-initing LP FIFO\n", __func__); 273 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 274 sc->sc_rx_resetted = 0; 275 } else { 276 device_printf(sc->sc_dev, 277 "%s: called without resetting chip? " 278 "resetted=%d, stopped=%d\n", 279 __func__, 280 sc->sc_rx_resetted, 281 sc->sc_rx_stopped); 282 } 283 284 /* Add up to m_fifolen entries in each queue */ 285 /* 286 * These must occur after the above write so the FIFO buffers 287 * are pushed/tracked in the same order as the hardware will 288 * process them. 289 * 290 * XXX TODO: is this really necessary? We should've stopped 291 * the hardware already and reinitialised it, so it's a no-op. 292 */ 293 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 294 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 295 296 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 297 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 298 299 ath_mode_init(sc); 300 ath_hal_startpcurecv(ah, (!! sc->sc_scanning)); 301 302 /* 303 * We're now doing RX DMA! 304 */ 305 sc->sc_rx_stopped = 0; 306 307 ATH_RX_UNLOCK(sc); 308 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: ready\n", __func__); 309 310 return (0); 311} 312 313static void 314ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 315 int dosched) 316{ 317 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; qtype=%d, dosched=%d\n", 318 __func__, qtype, dosched); 319 320 ATH_LOCK(sc); 321 ath_power_set_power_state(sc, HAL_PM_AWAKE); 322 ATH_UNLOCK(sc); 323 324 ath_edma_recv_proc_queue(sc, qtype, dosched); 325 326 ATH_LOCK(sc); 327 ath_power_restore_power_state(sc); 328 ATH_UNLOCK(sc); 329 330 /* XXX TODO: methodize */ 331 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 332 333 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 334} 335 336static void 337ath_edma_recv_sched(struct ath_softc *sc, int dosched) 338{ 339 340 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; dosched=%d\n", 341 __func__, dosched); 342 343 ATH_LOCK(sc); 344 ath_power_set_power_state(sc, HAL_PM_AWAKE); 345 ATH_UNLOCK(sc); 346 347 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 348 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 349 350 ATH_LOCK(sc); 351 ath_power_restore_power_state(sc); 352 ATH_UNLOCK(sc); 353 354 /* XXX TODO: methodize */ 355 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 356 357 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 358} 359 360static void 361ath_edma_recv_flush(struct ath_softc *sc) 362{ 363 364 DPRINTF(sc, ATH_DEBUG_RECV | ATH_DEBUG_EDMA_RX, "%s: called\n", __func__); 365 366 ATH_PCU_LOCK(sc); 367 sc->sc_rxproc_cnt++; 368 ATH_PCU_UNLOCK(sc); 369 370 // XXX TODO: methodize; make it an RX stop/block 371 while (taskqueue_cancel(sc->sc_tq, &sc->sc_rxtask, NULL) != 0) { 372 taskqueue_drain(sc->sc_tq, &sc->sc_rxtask); 373 } 374 375 ATH_LOCK(sc); 376 ath_power_set_power_state(sc, HAL_PM_AWAKE); 377 ATH_UNLOCK(sc); 378 379 /* 380 * Flush any active frames from FIFO -> deferred list 381 */ 382 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 383 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 384 385 /* 386 * Process what's in the deferred queue 387 */ 388 /* 389 * XXX: If we read the tsf/channoise here and then pass it in, 390 * we could restore the power state before processing 391 * the deferred queue. 392 */ 393 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 394 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 395 396 ATH_LOCK(sc); 397 ath_power_restore_power_state(sc); 398 ATH_UNLOCK(sc); 399 400 ATH_PCU_LOCK(sc); 401 sc->sc_rxproc_cnt--; 402 ATH_PCU_UNLOCK(sc); 403 404 DPRINTF(sc, ATH_DEBUG_RECV | ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 405} 406 407/* 408 * Process frames from the current queue into the deferred queue. 409 */ 410static void 411ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 412 int dosched) 413{ 414 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 415 struct ath_rx_status *rs; 416 struct ath_desc *ds; 417 struct ath_buf *bf; 418 struct mbuf *m; 419 struct ath_hal *ah = sc->sc_ah; 420 uint64_t tsf; 421 uint16_t nf; 422 int npkts = 0; 423 424 tsf = ath_hal_gettsf64(ah); 425 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 426 sc->sc_stats.ast_rx_noise = nf; 427 428 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; qtype=%d, dosched=%d\n", __func__, qtype, dosched); 429 430 ATH_RX_LOCK(sc); 431 432#if 1 433 if (sc->sc_rx_resetted == 1) { 434 /* 435 * XXX We shouldn't ever be scheduled if 436 * receive has been stopped - so complain 437 * loudly! 438 */ 439 device_printf(sc->sc_dev, 440 "%s: sc_rx_resetted=1! Bad!\n", 441 __func__); 442 ATH_RX_UNLOCK(sc); 443 return; 444 } 445#endif 446 447 do { 448 bf = re->m_fifo[re->m_fifo_head]; 449 /* This shouldn't occur! */ 450 if (bf == NULL) { 451 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 452 __func__, 453 qtype); 454 break; 455 } 456 m = bf->bf_m; 457 ds = bf->bf_desc; 458 459 /* 460 * Sync descriptor memory - this also syncs the buffer for us. 461 * EDMA descriptors are in cached memory. 462 */ 463 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 464 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 465 rs = &bf->bf_status.ds_rxstat; 466 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 467 NULL, rs); 468 if (bf->bf_rxstatus == HAL_EINPROGRESS) 469 break; 470#ifdef ATH_DEBUG 471 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 472 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 473#endif /* ATH_DEBUG */ 474#ifdef ATH_DEBUG_ALQ 475 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 476 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 477 sc->sc_rx_statuslen, (char *) ds); 478#endif /* ATH_DEBUG */ 479 480 /* 481 * Completed descriptor. 482 */ 483 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 484 "%s: Q%d: completed!\n", __func__, qtype); 485 npkts++; 486 487 /* 488 * We've been synced already, so unmap. 489 */ 490 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 491 492 /* 493 * Remove the FIFO entry and place it on the completion 494 * queue. 495 */ 496 re->m_fifo[re->m_fifo_head] = NULL; 497 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 498 499 /* Bump the descriptor FIFO stats */ 500 INCR(re->m_fifo_head, re->m_fifolen); 501 re->m_fifo_depth--; 502 /* XXX check it doesn't fall below 0 */ 503 } while (re->m_fifo_depth > 0); 504 505 /* Append some more fresh frames to the FIFO */ 506 if (dosched) 507 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 508 509 ATH_RX_UNLOCK(sc); 510 511 /* rx signal state monitoring */ 512 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 513 514 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 515 "ath edma rx proc: npkts=%d\n", 516 npkts); 517 518 return; 519} 520 521/* 522 * Flush the deferred queue. 523 * 524 * This destructively flushes the deferred queue - it doesn't 525 * call the wireless stack on each mbuf. 526 */ 527static void 528ath_edma_flush_deferred_queue(struct ath_softc *sc) 529{ 530 struct ath_buf *bf; 531 532 ATH_RX_LOCK_ASSERT(sc); 533 534 /* Free in one set, inside the lock */ 535 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) { 536 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 537 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); 538 /* Free the buffer/mbuf */ 539 ath_edma_rxbuf_free(sc, bf); 540 } 541 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) { 542 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 543 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); 544 /* Free the buffer/mbuf */ 545 ath_edma_rxbuf_free(sc, bf); 546 } 547} 548 549static int 550ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 551 int dosched) 552{ 553 int ngood = 0; 554 uint64_t tsf; 555 struct ath_buf *bf, *next; 556 struct ath_rx_status *rs; 557 int16_t nf; 558 ath_bufhead rxlist; 559 struct mbuf *m; 560 561 TAILQ_INIT(&rxlist); 562 563 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 564 /* 565 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 566 * otherwise we may end up adding in the wrong values if this 567 * is delayed too far.. 568 */ 569 tsf = ath_hal_gettsf64(sc->sc_ah); 570 571 /* Copy the list over */ 572 ATH_RX_LOCK(sc); 573 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 574 ATH_RX_UNLOCK(sc); 575 576 /* Handle the completed descriptors */ 577 /* 578 * XXX is this SAFE call needed? The ath_buf entries 579 * aren't modified by ath_rx_pkt, right? 580 */ 581 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 582 /* 583 * Skip the RX descriptor status - start at the data offset 584 */ 585 m_adj(bf->bf_m, sc->sc_rx_statuslen); 586 587 /* Handle the frame */ 588 589 rs = &bf->bf_status.ds_rxstat; 590 m = bf->bf_m; 591 bf->bf_m = NULL; 592 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 593 ngood++; 594 } 595 596 if (ngood) { 597 sc->sc_lastrx = tsf; 598 } 599 600 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 601 "ath edma rx deferred proc: ngood=%d\n", 602 ngood); 603 604 /* Free in one set, inside the lock */ 605 ATH_RX_LOCK(sc); 606 while (! TAILQ_EMPTY(&rxlist)) { 607 bf = TAILQ_FIRST(&rxlist); 608 TAILQ_REMOVE(&rxlist, bf, bf_list); 609 /* Free the buffer/mbuf */ 610 ath_edma_rxbuf_free(sc, bf); 611 } 612 ATH_RX_UNLOCK(sc); 613 614 return (ngood); 615} 616 617static void 618ath_edma_recv_tasklet(void *arg, int npending) 619{ 620 struct ath_softc *sc = (struct ath_softc *) arg; 621#ifdef IEEE80211_SUPPORT_SUPERG 622 struct ieee80211com *ic = &sc->sc_ic; 623#endif 624 625 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 626 __func__, 627 npending); 628 629 ATH_PCU_LOCK(sc); 630 if (sc->sc_inreset_cnt > 0) { 631 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 632 __func__); 633 ATH_PCU_UNLOCK(sc); 634 return; 635 } 636 sc->sc_rxproc_cnt++; 637 ATH_PCU_UNLOCK(sc); 638 639 ATH_LOCK(sc); 640 ath_power_set_power_state(sc, HAL_PM_AWAKE); 641 ATH_UNLOCK(sc); 642 643 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 644 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 645 646 /* 647 * XXX: If we read the tsf/channoise here and then pass it in, 648 * we could restore the power state before processing 649 * the deferred queue. 650 */ 651 ATH_LOCK(sc); 652 ath_power_restore_power_state(sc); 653 ATH_UNLOCK(sc); 654 655#ifdef IEEE80211_SUPPORT_SUPERG 656 ieee80211_ff_age_all(ic, 100); 657#endif 658 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 659 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 660 661 ATH_PCU_LOCK(sc); 662 sc->sc_rxproc_cnt--; 663 ATH_PCU_UNLOCK(sc); 664 665 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; done!\n", __func__); 666} 667 668/* 669 * Allocate an RX mbuf for the given ath_buf and initialise 670 * it for EDMA. 671 * 672 * + Allocate a 4KB mbuf; 673 * + Setup the DMA map for the given buffer; 674 * + Return that. 675 */ 676static int 677ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 678{ 679 680 struct mbuf *m; 681 int error; 682 int len; 683 684 ATH_RX_LOCK_ASSERT(sc); 685 686 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 687 if (! m) 688 return (ENOBUFS); /* XXX ?*/ 689 690 /* XXX warn/enforce alignment */ 691 692 len = m->m_ext.ext_size; 693#if 0 694 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 695 __func__, 696 m, 697 len, 698 mtod(m, char *)); 699#endif 700 701 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 702 703 /* 704 * Populate ath_buf fields. 705 */ 706 bf->bf_desc = mtod(m, struct ath_desc *); 707 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 708 bf->bf_m = m; 709 710 /* 711 * Zero the descriptor and ensure it makes it out to the 712 * bounce buffer if one is required. 713 * 714 * XXX PREWRITE will copy the whole buffer; we only needed it 715 * to sync the first 32 DWORDS. Oh well. 716 */ 717 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 718 719 /* 720 * Create DMA mapping. 721 */ 722 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 723 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 724 725 if (error != 0) { 726 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 727 __func__, 728 error); 729 m_freem(m); 730 return (error); 731 } 732 733 /* 734 * Set daddr to the physical mapping page. 735 */ 736 bf->bf_daddr = bf->bf_segs[0].ds_addr; 737 738 /* 739 * Prepare for the upcoming read. 740 * 741 * We need to both sync some data into the buffer (the zero'ed 742 * descriptor payload) and also prepare for the read that's going 743 * to occur. 744 */ 745 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 746 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 747 748 /* Finish! */ 749 return (0); 750} 751 752/* 753 * Allocate a RX buffer. 754 */ 755static struct ath_buf * 756ath_edma_rxbuf_alloc(struct ath_softc *sc) 757{ 758 struct ath_buf *bf; 759 int error; 760 761 ATH_RX_LOCK_ASSERT(sc); 762 763 /* Allocate buffer */ 764 bf = TAILQ_FIRST(&sc->sc_rxbuf); 765 /* XXX shouldn't happen upon startup? */ 766 if (bf == NULL) { 767 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: nothing on rxbuf?!\n", 768 __func__); 769 return (NULL); 770 } 771 772 /* Remove it from the free list */ 773 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 774 775 /* Assign RX mbuf to it */ 776 error = ath_edma_rxbuf_init(sc, bf); 777 if (error != 0) { 778 device_printf(sc->sc_dev, 779 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 780 __func__, 781 bf, 782 error); 783 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 784 return (NULL); 785 } 786 787 return (bf); 788} 789 790static void 791ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 792{ 793 794 ATH_RX_LOCK_ASSERT(sc); 795 796 /* 797 * Only unload the frame if we haven't consumed 798 * the mbuf via ath_rx_pkt(). 799 */ 800 if (bf->bf_m) { 801 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 802 m_freem(bf->bf_m); 803 bf->bf_m = NULL; 804 } 805 806 /* XXX lock? */ 807 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 808} 809 810/* 811 * Allocate up to 'n' entries and push them onto the hardware FIFO. 812 * 813 * Return how many entries were successfully pushed onto the 814 * FIFO. 815 */ 816static int 817ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 818{ 819 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 820 struct ath_buf *bf; 821 int i; 822 823 ATH_RX_LOCK_ASSERT(sc); 824 825 /* 826 * Allocate buffers until the FIFO is full or nbufs is reached. 827 */ 828 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 829 /* Ensure the FIFO is already blank, complain loudly! */ 830 if (re->m_fifo[re->m_fifo_tail] != NULL) { 831 device_printf(sc->sc_dev, 832 "%s: Q%d: fifo[%d] != NULL (%p)\n", 833 __func__, 834 qtype, 835 re->m_fifo_tail, 836 re->m_fifo[re->m_fifo_tail]); 837 838 /* Free the slot */ 839 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 840 re->m_fifo_depth--; 841 /* XXX check it's not < 0 */ 842 re->m_fifo[re->m_fifo_tail] = NULL; 843 } 844 845 bf = ath_edma_rxbuf_alloc(sc); 846 /* XXX should ensure the FIFO is not NULL? */ 847 if (bf == NULL) { 848 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 849 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 850 __func__, 851 qtype, 852 i, 853 nbufs); 854 break; 855 } 856 857 re->m_fifo[re->m_fifo_tail] = bf; 858 859 /* Write to the RX FIFO */ 860 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 861 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 862 __func__, 863 qtype, 864 bf->bf_desc, 865 (uintmax_t) bf->bf_daddr); 866 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 867 868 re->m_fifo_depth++; 869 INCR(re->m_fifo_tail, re->m_fifolen); 870 } 871 872 /* 873 * Return how many were allocated. 874 */ 875 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 876 __func__, 877 qtype, 878 nbufs, 879 i); 880 return (i); 881} 882 883static int 884ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 885{ 886 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 887 int i; 888 889 ATH_RX_LOCK_ASSERT(sc); 890 891 for (i = 0; i < re->m_fifolen; i++) { 892 if (re->m_fifo[i] != NULL) { 893#ifdef ATH_DEBUG 894 struct ath_buf *bf = re->m_fifo[i]; 895 896 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 897 ath_printrxbuf(sc, bf, 0, HAL_OK); 898#endif 899 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 900 re->m_fifo[i] = NULL; 901 re->m_fifo_depth--; 902 } 903 } 904 905 if (re->m_rxpending != NULL) { 906 m_freem(re->m_rxpending); 907 re->m_rxpending = NULL; 908 } 909 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 910 911 return (0); 912} 913 914/* 915 * Setup the initial RX FIFO structure. 916 */ 917static int 918ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 919{ 920 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 921 922 ATH_RX_LOCK_ASSERT(sc); 923 924 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 925 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 926 __func__, 927 qtype); 928 return (-EINVAL); 929 } 930 931 if (bootverbose) 932 device_printf(sc->sc_dev, 933 "%s: type=%d, FIFO depth = %d entries\n", 934 __func__, 935 qtype, 936 re->m_fifolen); 937 938 /* Allocate ath_buf FIFO array, pre-zero'ed */ 939 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 940 M_ATHDEV, 941 M_NOWAIT | M_ZERO); 942 if (re->m_fifo == NULL) { 943 device_printf(sc->sc_dev, "%s: malloc failed\n", 944 __func__); 945 return (-ENOMEM); 946 } 947 948 /* 949 * Set initial "empty" state. 950 */ 951 re->m_rxpending = NULL; 952 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 953 954 return (0); 955} 956 957static int 958ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 959{ 960 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 961 962 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 963 __func__, 964 qtype); 965 966 free(re->m_fifo, M_ATHDEV); 967 968 return (0); 969} 970 971static int 972ath_edma_dma_rxsetup(struct ath_softc *sc) 973{ 974 int error; 975 976 /* 977 * Create RX DMA tag and buffers. 978 */ 979 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 980 "rx", ath_rxbuf, sc->sc_rx_statuslen); 981 if (error != 0) 982 return error; 983 984 ATH_RX_LOCK(sc); 985 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 986 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 987 ATH_RX_UNLOCK(sc); 988 989 return (0); 990} 991 992static int 993ath_edma_dma_rxteardown(struct ath_softc *sc) 994{ 995 996 ATH_RX_LOCK(sc); 997 ath_edma_flush_deferred_queue(sc); 998 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 999 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 1000 1001 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 1002 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 1003 ATH_RX_UNLOCK(sc); 1004 1005 /* Free RX ath_buf */ 1006 /* Free RX DMA tag */ 1007 if (sc->sc_rxdma.dd_desc_len != 0) 1008 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 1009 1010 return (0); 1011} 1012 1013void 1014ath_recv_setup_edma(struct ath_softc *sc) 1015{ 1016 1017 /* Set buffer size to 4k */ 1018 sc->sc_edma_bufsize = 4096; 1019 1020 /* Fetch EDMA field and buffer sizes */ 1021 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 1022 1023 /* Configure the hardware with the RX buffer size */ 1024 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 1025 sc->sc_rx_statuslen); 1026 1027 if (bootverbose) { 1028 device_printf(sc->sc_dev, "RX status length: %d\n", 1029 sc->sc_rx_statuslen); 1030 device_printf(sc->sc_dev, "RX buffer size: %d\n", 1031 sc->sc_edma_bufsize); 1032 } 1033 1034 sc->sc_rx.recv_stop = ath_edma_stoprecv; 1035 sc->sc_rx.recv_start = ath_edma_startrecv; 1036 sc->sc_rx.recv_flush = ath_edma_recv_flush; 1037 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 1038 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 1039 1040 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 1041 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 1042 1043 sc->sc_rx.recv_sched = ath_edma_recv_sched; 1044 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 1045} 1046