if_ath_tx.c revision 248988
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath_tx.c 248988 2013-04-01 20:57:13Z adrian $"); 33 34/* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41#include "opt_inet.h" 42#include "opt_ath.h" 43#include "opt_wlan.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/sysctl.h> 48#include <sys/mbuf.h> 49#include <sys/malloc.h> 50#include <sys/lock.h> 51#include <sys/mutex.h> 52#include <sys/kernel.h> 53#include <sys/socket.h> 54#include <sys/sockio.h> 55#include <sys/errno.h> 56#include <sys/callout.h> 57#include <sys/bus.h> 58#include <sys/endian.h> 59#include <sys/kthread.h> 60#include <sys/taskqueue.h> 61#include <sys/priv.h> 62 63#include <machine/bus.h> 64 65#include <net/if.h> 66#include <net/if_dl.h> 67#include <net/if_media.h> 68#include <net/if_types.h> 69#include <net/if_arp.h> 70#include <net/ethernet.h> 71#include <net/if_llc.h> 72 73#include <net80211/ieee80211_var.h> 74#include <net80211/ieee80211_regdomain.h> 75#ifdef IEEE80211_SUPPORT_SUPERG 76#include <net80211/ieee80211_superg.h> 77#endif 78#ifdef IEEE80211_SUPPORT_TDMA 79#include <net80211/ieee80211_tdma.h> 80#endif 81#include <net80211/ieee80211_ht.h> 82 83#include <net/bpf.h> 84 85#ifdef INET 86#include <netinet/in.h> 87#include <netinet/if_ether.h> 88#endif 89 90#include <dev/ath/if_athvar.h> 91#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 92#include <dev/ath/ath_hal/ah_diagcodes.h> 93 94#include <dev/ath/if_ath_debug.h> 95 96#ifdef ATH_TX99_DIAG 97#include <dev/ath/ath_tx99/ath_tx99.h> 98#endif 99 100#include <dev/ath/if_ath_misc.h> 101#include <dev/ath/if_ath_tx.h> 102#include <dev/ath/if_ath_tx_ht.h> 103 104#ifdef ATH_DEBUG_ALQ 105#include <dev/ath/if_ath_alq.h> 106#endif 107 108/* 109 * How many retries to perform in software 110 */ 111#define SWMAX_RETRIES 10 112 113/* 114 * What queue to throw the non-QoS TID traffic into 115 */ 116#define ATH_NONQOS_TID_AC WME_AC_VO 117 118#if 0 119static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 120#endif 121static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 122 int tid); 123static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 124 int tid); 125static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 126 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 127static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 128 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 129static struct ath_buf * 130ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 131 struct ath_tid *tid, struct ath_buf *bf); 132 133#ifdef ATH_DEBUG_ALQ 134void 135ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 136{ 137 struct ath_buf *bf; 138 int i, n; 139 const char *ds; 140 141 /* XXX we should skip out early if debugging isn't enabled! */ 142 bf = bf_first; 143 144 while (bf != NULL) { 145 /* XXX should ensure bf_nseg > 0! */ 146 if (bf->bf_nseg == 0) 147 break; 148 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 149 for (i = 0, ds = (const char *) bf->bf_desc; 150 i < n; 151 i++, ds += sc->sc_tx_desclen) { 152 if_ath_alq_post(&sc->sc_alq, 153 ATH_ALQ_EDMA_TXDESC, 154 sc->sc_tx_desclen, 155 ds); 156 } 157 bf = bf->bf_next; 158 } 159} 160#endif /* ATH_DEBUG_ALQ */ 161 162/* 163 * Whether to use the 11n rate scenario functions or not 164 */ 165static inline int 166ath_tx_is_11n(struct ath_softc *sc) 167{ 168 return ((sc->sc_ah->ah_magic == 0x20065416) || 169 (sc->sc_ah->ah_magic == 0x19741014)); 170} 171 172/* 173 * Obtain the current TID from the given frame. 174 * 175 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 176 * This has implications for which AC/priority the packet is placed 177 * in. 178 */ 179static int 180ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 181{ 182 const struct ieee80211_frame *wh; 183 int pri = M_WME_GETAC(m0); 184 185 wh = mtod(m0, const struct ieee80211_frame *); 186 if (! IEEE80211_QOS_HAS_SEQ(wh)) 187 return IEEE80211_NONQOS_TID; 188 else 189 return WME_AC_TO_TID(pri); 190} 191 192static void 193ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 194{ 195 struct ieee80211_frame *wh; 196 197 wh = mtod(bf->bf_m, struct ieee80211_frame *); 198 /* Only update/resync if needed */ 199 if (bf->bf_state.bfs_isretried == 0) { 200 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 201 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 202 BUS_DMASYNC_PREWRITE); 203 } 204 bf->bf_state.bfs_isretried = 1; 205 bf->bf_state.bfs_retries ++; 206} 207 208/* 209 * Determine what the correct AC queue for the given frame 210 * should be. 211 * 212 * This code assumes that the TIDs map consistently to 213 * the underlying hardware (or software) ath_txq. 214 * Since the sender may try to set an AC which is 215 * arbitrary, non-QoS TIDs may end up being put on 216 * completely different ACs. There's no way to put a 217 * TID into multiple ath_txq's for scheduling, so 218 * for now we override the AC/TXQ selection and set 219 * non-QOS TID frames into the BE queue. 220 * 221 * This may be completely incorrect - specifically, 222 * some management frames may end up out of order 223 * compared to the QoS traffic they're controlling. 224 * I'll look into this later. 225 */ 226static int 227ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 228{ 229 const struct ieee80211_frame *wh; 230 int pri = M_WME_GETAC(m0); 231 wh = mtod(m0, const struct ieee80211_frame *); 232 if (IEEE80211_QOS_HAS_SEQ(wh)) 233 return pri; 234 235 return ATH_NONQOS_TID_AC; 236} 237 238void 239ath_txfrag_cleanup(struct ath_softc *sc, 240 ath_bufhead *frags, struct ieee80211_node *ni) 241{ 242 struct ath_buf *bf, *next; 243 244 ATH_TXBUF_LOCK_ASSERT(sc); 245 246 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 247 /* NB: bf assumed clean */ 248 TAILQ_REMOVE(frags, bf, bf_list); 249 ath_returnbuf_head(sc, bf); 250 ieee80211_node_decref(ni); 251 } 252} 253 254/* 255 * Setup xmit of a fragmented frame. Allocate a buffer 256 * for each frag and bump the node reference count to 257 * reflect the held reference to be setup by ath_tx_start. 258 */ 259int 260ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 261 struct mbuf *m0, struct ieee80211_node *ni) 262{ 263 struct mbuf *m; 264 struct ath_buf *bf; 265 266 ATH_TXBUF_LOCK(sc); 267 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 268 /* XXX non-management? */ 269 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 270 if (bf == NULL) { /* out of buffers, cleanup */ 271 device_printf(sc->sc_dev, "%s: no buffer?\n", 272 __func__); 273 ath_txfrag_cleanup(sc, frags, ni); 274 break; 275 } 276 ieee80211_node_incref(ni); 277 TAILQ_INSERT_TAIL(frags, bf, bf_list); 278 } 279 ATH_TXBUF_UNLOCK(sc); 280 281 return !TAILQ_EMPTY(frags); 282} 283 284/* 285 * Reclaim mbuf resources. For fragmented frames we 286 * need to claim each frag chained with m_nextpkt. 287 */ 288void 289ath_freetx(struct mbuf *m) 290{ 291 struct mbuf *next; 292 293 do { 294 next = m->m_nextpkt; 295 m->m_nextpkt = NULL; 296 m_freem(m); 297 } while ((m = next) != NULL); 298} 299 300static int 301ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 302{ 303 struct mbuf *m; 304 int error; 305 306 /* 307 * Load the DMA map so any coalescing is done. This 308 * also calculates the number of descriptors we need. 309 */ 310 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 311 bf->bf_segs, &bf->bf_nseg, 312 BUS_DMA_NOWAIT); 313 if (error == EFBIG) { 314 /* XXX packet requires too many descriptors */ 315 bf->bf_nseg = ATH_MAX_SCATTER + 1; 316 } else if (error != 0) { 317 sc->sc_stats.ast_tx_busdma++; 318 ath_freetx(m0); 319 return error; 320 } 321 /* 322 * Discard null packets and check for packets that 323 * require too many TX descriptors. We try to convert 324 * the latter to a cluster. 325 */ 326 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 327 sc->sc_stats.ast_tx_linear++; 328 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 329 if (m == NULL) { 330 ath_freetx(m0); 331 sc->sc_stats.ast_tx_nombuf++; 332 return ENOMEM; 333 } 334 m0 = m; 335 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 336 bf->bf_segs, &bf->bf_nseg, 337 BUS_DMA_NOWAIT); 338 if (error != 0) { 339 sc->sc_stats.ast_tx_busdma++; 340 ath_freetx(m0); 341 return error; 342 } 343 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 344 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 345 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 346 sc->sc_stats.ast_tx_nodata++; 347 ath_freetx(m0); 348 return EIO; 349 } 350 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 351 __func__, m0, m0->m_pkthdr.len); 352 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 353 bf->bf_m = m0; 354 355 return 0; 356} 357 358/* 359 * Chain together segments+descriptors for a frame - 11n or otherwise. 360 * 361 * For aggregates, this is called on each frame in the aggregate. 362 */ 363static void 364ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 365 struct ath_buf *bf, int is_aggr, int is_first_subframe, 366 int is_last_subframe) 367{ 368 struct ath_hal *ah = sc->sc_ah; 369 char *ds; 370 int i, bp, dsp; 371 HAL_DMA_ADDR bufAddrList[4]; 372 uint32_t segLenList[4]; 373 int numTxMaps = 1; 374 int isFirstDesc = 1; 375 376 /* 377 * XXX There's txdma and txdma_mgmt; the descriptor 378 * sizes must match. 379 */ 380 struct ath_descdma *dd = &sc->sc_txdma; 381 382 /* 383 * Fillin the remainder of the descriptor info. 384 */ 385 386 /* 387 * We need the number of TX data pointers in each descriptor. 388 * EDMA and later chips support 4 TX buffers per descriptor; 389 * previous chips just support one. 390 */ 391 numTxMaps = sc->sc_tx_nmaps; 392 393 /* 394 * For EDMA and later chips ensure the TX map is fully populated 395 * before advancing to the next descriptor. 396 */ 397 ds = (char *) bf->bf_desc; 398 bp = dsp = 0; 399 bzero(bufAddrList, sizeof(bufAddrList)); 400 bzero(segLenList, sizeof(segLenList)); 401 for (i = 0; i < bf->bf_nseg; i++) { 402 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 403 segLenList[bp] = bf->bf_segs[i].ds_len; 404 bp++; 405 406 /* 407 * Go to the next segment if this isn't the last segment 408 * and there's space in the current TX map. 409 */ 410 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 411 continue; 412 413 /* 414 * Last segment or we're out of buffer pointers. 415 */ 416 bp = 0; 417 418 if (i == bf->bf_nseg - 1) 419 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 420 else 421 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 422 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 423 424 /* 425 * XXX This assumes that bfs_txq is the actual destination 426 * hardware queue at this point. It may not have been 427 * assigned, it may actually be pointing to the multicast 428 * software TXQ id. These must be fixed! 429 */ 430 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 431 , bufAddrList 432 , segLenList 433 , bf->bf_descid /* XXX desc id */ 434 , bf->bf_state.bfs_tx_queue 435 , isFirstDesc /* first segment */ 436 , i == bf->bf_nseg - 1 /* last segment */ 437 , (struct ath_desc *) ds0 /* first descriptor */ 438 ); 439 440 /* 441 * Make sure the 11n aggregate fields are cleared. 442 * 443 * XXX TODO: this doesn't need to be called for 444 * aggregate frames; as it'll be called on all 445 * sub-frames. Since the descriptors are in 446 * non-cacheable memory, this leads to some 447 * rather slow writes on MIPS/ARM platforms. 448 */ 449 if (ath_tx_is_11n(sc)) 450 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 451 452 /* 453 * If 11n is enabled, set it up as if it's an aggregate 454 * frame. 455 */ 456 if (is_last_subframe) { 457 ath_hal_set11n_aggr_last(sc->sc_ah, 458 (struct ath_desc *) ds); 459 } else if (is_aggr) { 460 /* 461 * This clears the aggrlen field; so 462 * the caller needs to call set_aggr_first()! 463 * 464 * XXX TODO: don't call this for the first 465 * descriptor in the first frame in an 466 * aggregate! 467 */ 468 ath_hal_set11n_aggr_middle(sc->sc_ah, 469 (struct ath_desc *) ds, 470 bf->bf_state.bfs_ndelim); 471 } 472 isFirstDesc = 0; 473 bf->bf_lastds = (struct ath_desc *) ds; 474 475 /* 476 * Don't forget to skip to the next descriptor. 477 */ 478 ds += sc->sc_tx_desclen; 479 dsp++; 480 481 /* 482 * .. and don't forget to blank these out! 483 */ 484 bzero(bufAddrList, sizeof(bufAddrList)); 485 bzero(segLenList, sizeof(segLenList)); 486 } 487 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 488} 489 490/* 491 * Set the rate control fields in the given descriptor based on 492 * the bf_state fields and node state. 493 * 494 * The bfs fields should already be set with the relevant rate 495 * control information, including whether MRR is to be enabled. 496 * 497 * Since the FreeBSD HAL currently sets up the first TX rate 498 * in ath_hal_setuptxdesc(), this will setup the MRR 499 * conditionally for the pre-11n chips, and call ath_buf_set_rate 500 * unconditionally for 11n chips. These require the 11n rate 501 * scenario to be set if MCS rates are enabled, so it's easier 502 * to just always call it. The caller can then only set rates 2, 3 503 * and 4 if multi-rate retry is needed. 504 */ 505static void 506ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 507 struct ath_buf *bf) 508{ 509 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 510 511 /* If mrr is disabled, blank tries 1, 2, 3 */ 512 if (! bf->bf_state.bfs_ismrr) 513 rc[1].tries = rc[2].tries = rc[3].tries = 0; 514 515#if 0 516 /* 517 * If NOACK is set, just set ntries=1. 518 */ 519 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 520 rc[1].tries = rc[2].tries = rc[3].tries = 0; 521 rc[0].tries = 1; 522 } 523#endif 524 525 /* 526 * Always call - that way a retried descriptor will 527 * have the MRR fields overwritten. 528 * 529 * XXX TODO: see if this is really needed - setting up 530 * the first descriptor should set the MRR fields to 0 531 * for us anyway. 532 */ 533 if (ath_tx_is_11n(sc)) { 534 ath_buf_set_rate(sc, ni, bf); 535 } else { 536 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 537 , rc[1].ratecode, rc[1].tries 538 , rc[2].ratecode, rc[2].tries 539 , rc[3].ratecode, rc[3].tries 540 ); 541 } 542} 543 544/* 545 * Setup segments+descriptors for an 11n aggregate. 546 * bf_first is the first buffer in the aggregate. 547 * The descriptor list must already been linked together using 548 * bf->bf_next. 549 */ 550static void 551ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 552{ 553 struct ath_buf *bf, *bf_prev = NULL; 554 struct ath_desc *ds0 = bf_first->bf_desc; 555 556 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 557 __func__, bf_first->bf_state.bfs_nframes, 558 bf_first->bf_state.bfs_al); 559 560 bf = bf_first; 561 562 if (bf->bf_state.bfs_txrate0 == 0) 563 device_printf(sc->sc_dev, "%s: bf=%p, txrate0=%d\n", 564 __func__, bf, 0); 565 if (bf->bf_state.bfs_rc[0].ratecode == 0) 566 device_printf(sc->sc_dev, "%s: bf=%p, rix0=%d\n", 567 __func__, bf, 0); 568 569 /* 570 * Setup all descriptors of all subframes - this will 571 * call ath_hal_set11naggrmiddle() on every frame. 572 */ 573 while (bf != NULL) { 574 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 575 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 576 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 577 SEQNO(bf->bf_state.bfs_seqno)); 578 579 /* 580 * Setup the initial fields for the first descriptor - all 581 * the non-11n specific stuff. 582 */ 583 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 584 , bf->bf_state.bfs_pktlen /* packet length */ 585 , bf->bf_state.bfs_hdrlen /* header length */ 586 , bf->bf_state.bfs_atype /* Atheros packet type */ 587 , bf->bf_state.bfs_txpower /* txpower */ 588 , bf->bf_state.bfs_txrate0 589 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 590 , bf->bf_state.bfs_keyix /* key cache index */ 591 , bf->bf_state.bfs_txantenna /* antenna mode */ 592 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 593 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 594 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 595 ); 596 597 /* 598 * First descriptor? Setup the rate control and initial 599 * aggregate header information. 600 */ 601 if (bf == bf_first) { 602 /* 603 * setup first desc with rate and aggr info 604 */ 605 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 606 } 607 608 /* 609 * Setup the descriptors for a multi-descriptor frame. 610 * This is both aggregate and non-aggregate aware. 611 */ 612 ath_tx_chaindesclist(sc, ds0, bf, 613 1, /* is_aggr */ 614 !! (bf == bf_first), /* is_first_subframe */ 615 !! (bf->bf_next == NULL) /* is_last_subframe */ 616 ); 617 618 if (bf == bf_first) { 619 /* 620 * Initialise the first 11n aggregate with the 621 * aggregate length and aggregate enable bits. 622 */ 623 ath_hal_set11n_aggr_first(sc->sc_ah, 624 ds0, 625 bf->bf_state.bfs_al, 626 bf->bf_state.bfs_ndelim); 627 } 628 629 /* 630 * Link the last descriptor of the previous frame 631 * to the beginning descriptor of this frame. 632 */ 633 if (bf_prev != NULL) 634 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 635 bf->bf_daddr); 636 637 /* Save a copy so we can link the next descriptor in */ 638 bf_prev = bf; 639 bf = bf->bf_next; 640 } 641 642 /* 643 * Set the first descriptor bf_lastds field to point to 644 * the last descriptor in the last subframe, that's where 645 * the status update will occur. 646 */ 647 bf_first->bf_lastds = bf_prev->bf_lastds; 648 649 /* 650 * And bf_last in the first descriptor points to the end of 651 * the aggregate list. 652 */ 653 bf_first->bf_last = bf_prev; 654 655 /* 656 * For non-AR9300 NICs, which require the rate control 657 * in the final descriptor - let's set that up now. 658 * 659 * This is because the filltxdesc() HAL call doesn't 660 * populate the last segment with rate control information 661 * if firstSeg is also true. For non-aggregate frames 662 * that is fine, as the first frame already has rate control 663 * info. But if the last frame in an aggregate has one 664 * descriptor, both firstseg and lastseg will be true and 665 * the rate info isn't copied. 666 * 667 * This is inefficient on MIPS/ARM platforms that have 668 * non-cachable memory for TX descriptors, but we'll just 669 * make do for now. 670 * 671 * As to why the rate table is stashed in the last descriptor 672 * rather than the first descriptor? Because proctxdesc() 673 * is called on the final descriptor in an MPDU or A-MPDU - 674 * ie, the one that gets updated by the hardware upon 675 * completion. That way proctxdesc() doesn't need to know 676 * about the first _and_ last TX descriptor. 677 */ 678 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 679 680 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 681} 682 683/* 684 * Hand-off a frame to the multicast TX queue. 685 * 686 * This is a software TXQ which will be appended to the CAB queue 687 * during the beacon setup code. 688 * 689 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 690 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 691 * with the actual hardware txq, or all of this will fall apart. 692 * 693 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 694 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 695 * correctly. 696 */ 697static void 698ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 699 struct ath_buf *bf) 700{ 701 ATH_TX_LOCK_ASSERT(sc); 702 703 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 704 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 705 706 ATH_TXQ_LOCK(txq); 707 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 708 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 709 struct ieee80211_frame *wh; 710 711 /* mark previous frame */ 712 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 713 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 714 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 715 BUS_DMASYNC_PREWRITE); 716 717 /* link descriptor */ 718 ath_hal_settxdesclink(sc->sc_ah, 719 bf_last->bf_lastds, 720 bf->bf_daddr); 721 } 722 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 723 ATH_TXQ_UNLOCK(txq); 724} 725 726/* 727 * Hand-off packet to a hardware queue. 728 */ 729static void 730ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 731 struct ath_buf *bf) 732{ 733 struct ath_hal *ah = sc->sc_ah; 734 735 /* 736 * Insert the frame on the outbound list and pass it on 737 * to the hardware. Multicast frames buffered for power 738 * save stations and transmit from the CAB queue are stored 739 * on a s/w only queue and loaded on to the CAB queue in 740 * the SWBA handler since frames only go out on DTIM and 741 * to avoid possible races. 742 */ 743 ATH_TX_LOCK_ASSERT(sc); 744 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 745 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 746 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 747 ("ath_tx_handoff_hw called for mcast queue")); 748 749#if 0 750 /* 751 * This causes a LOR. Find out where the PCU lock is being 752 * held whilst the TXQ lock is grabbed - that shouldn't 753 * be occuring. 754 */ 755 ATH_PCU_LOCK(sc); 756 if (sc->sc_inreset_cnt) { 757 ATH_PCU_UNLOCK(sc); 758 DPRINTF(sc, ATH_DEBUG_RESET, 759 "%s: called with sc_in_reset != 0\n", 760 __func__); 761 DPRINTF(sc, ATH_DEBUG_XMIT, 762 "%s: queued: TXDP[%u] = %p (%p) depth %d\n", 763 __func__, txq->axq_qnum, 764 (caddr_t)bf->bf_daddr, bf->bf_desc, 765 txq->axq_depth); 766 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 767 if (bf->bf_state.bfs_aggr) 768 txq->axq_aggr_depth++; 769 /* 770 * There's no need to update axq_link; the hardware 771 * is in reset and once the reset is complete, any 772 * non-empty queues will simply have DMA restarted. 773 */ 774 return; 775 } 776 ATH_PCU_UNLOCK(sc); 777#endif 778 779 /* For now, so not to generate whitespace diffs */ 780 if (1) { 781 ATH_TXQ_LOCK(txq); 782#ifdef IEEE80211_SUPPORT_TDMA 783 int qbusy; 784 785 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 786 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum); 787 788 ATH_KTR(sc, ATH_KTR_TX, 4, 789 "ath_tx_handoff: txq=%u, add bf=%p, qbusy=%d, depth=%d", 790 txq->axq_qnum, bf, qbusy, txq->axq_depth); 791 if (txq->axq_link == NULL) { 792 /* 793 * Be careful writing the address to TXDP. If 794 * the tx q is enabled then this write will be 795 * ignored. Normally this is not an issue but 796 * when tdma is in use and the q is beacon gated 797 * this race can occur. If the q is busy then 798 * defer the work to later--either when another 799 * packet comes along or when we prepare a beacon 800 * frame at SWBA. 801 */ 802 if (!qbusy) { 803 ath_hal_puttxbuf(ah, txq->axq_qnum, 804 bf->bf_daddr); 805 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 806 DPRINTF(sc, ATH_DEBUG_XMIT, 807 "%s: TXDP[%u] = %p (%p) lastds=%p depth %d\n", 808 __func__, txq->axq_qnum, 809 (caddr_t)bf->bf_daddr, bf->bf_desc, 810 bf->bf_lastds, 811 txq->axq_depth); 812 ATH_KTR(sc, ATH_KTR_TX, 5, 813 "ath_tx_handoff: TXDP[%u] = %p (%p) " 814 "lastds=%p depth %d", 815 txq->axq_qnum, 816 (caddr_t)bf->bf_daddr, bf->bf_desc, 817 bf->bf_lastds, 818 txq->axq_depth); 819 } else { 820 txq->axq_flags |= ATH_TXQ_PUTPENDING; 821 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 822 "%s: Q%u busy, defer enable\n", __func__, 823 txq->axq_qnum); 824 ATH_KTR(sc, ATH_KTR_TX, 0, "defer enable"); 825 } 826 } else { 827 *txq->axq_link = bf->bf_daddr; 828 DPRINTF(sc, ATH_DEBUG_XMIT, 829 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 830 txq->axq_qnum, txq->axq_link, 831 (caddr_t)bf->bf_daddr, bf->bf_desc, 832 txq->axq_depth); 833 ATH_KTR(sc, ATH_KTR_TX, 5, 834 "ath_tx_handoff: link[%u](%p)=%p (%p) lastds=%p", 835 txq->axq_qnum, txq->axq_link, 836 (caddr_t)bf->bf_daddr, bf->bf_desc, 837 bf->bf_lastds); 838 839 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) { 840 /* 841 * The q was busy when we previously tried 842 * to write the address of the first buffer 843 * in the chain. Since it's not busy now 844 * handle this chore. We are certain the 845 * buffer at the front is the right one since 846 * axq_link is NULL only when the buffer list 847 * is/was empty. 848 */ 849 ath_hal_puttxbuf(ah, txq->axq_qnum, 850 TAILQ_FIRST(&txq->axq_q)->bf_daddr); 851 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 852 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 853 "%s: Q%u restarted\n", __func__, 854 txq->axq_qnum); 855 ATH_KTR(sc, ATH_KTR_TX, 4, 856 "ath_tx_handoff: txq[%d] restarted, bf=%p " 857 "daddr=%p ds=%p", 858 txq->axq_qnum, 859 bf, 860 (caddr_t)bf->bf_daddr, 861 bf->bf_desc); 862 } 863 } 864#else 865 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 866 ATH_KTR(sc, ATH_KTR_TX, 3, 867 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 868 "depth=%d", 869 txq->axq_qnum, 870 bf, 871 txq->axq_depth); 872 if (txq->axq_link == NULL) { 873 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 874 DPRINTF(sc, ATH_DEBUG_XMIT, 875 "%s: TXDP[%u] = %p (%p) depth %d\n", 876 __func__, txq->axq_qnum, 877 (caddr_t)bf->bf_daddr, bf->bf_desc, 878 txq->axq_depth); 879 ATH_KTR(sc, ATH_KTR_TX, 5, 880 "ath_tx_handoff: non-tdma: TXDP[%u] = %p (%p) " 881 "lastds=%p depth %d", 882 txq->axq_qnum, 883 (caddr_t)bf->bf_daddr, bf->bf_desc, 884 bf->bf_lastds, 885 txq->axq_depth); 886 887 } else { 888 *txq->axq_link = bf->bf_daddr; 889 DPRINTF(sc, ATH_DEBUG_XMIT, 890 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 891 txq->axq_qnum, txq->axq_link, 892 (caddr_t)bf->bf_daddr, bf->bf_desc, 893 txq->axq_depth); 894 ATH_KTR(sc, ATH_KTR_TX, 5, 895 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 896 "lastds=%d", 897 txq->axq_qnum, txq->axq_link, 898 (caddr_t)bf->bf_daddr, bf->bf_desc, 899 bf->bf_lastds); 900 901 } 902#endif /* IEEE80211_SUPPORT_TDMA */ 903 if (bf->bf_state.bfs_aggr) 904 txq->axq_aggr_depth++; 905 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 906 ath_hal_txstart(ah, txq->axq_qnum); 907 ATH_TXQ_UNLOCK(txq); 908 ATH_KTR(sc, ATH_KTR_TX, 1, 909 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 910 } 911} 912 913/* 914 * Restart TX DMA for the given TXQ. 915 * 916 * This must be called whether the queue is empty or not. 917 */ 918static void 919ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 920{ 921 struct ath_hal *ah = sc->sc_ah; 922 struct ath_buf *bf, *bf_last; 923 924 ATH_TXQ_LOCK_ASSERT(txq); 925 /* This is always going to be cleared, empty or not */ 926 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 927 928 /* XXX make this ATH_TXQ_FIRST */ 929 bf = TAILQ_FIRST(&txq->axq_q); 930 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 931 932 if (bf == NULL) 933 return; 934 935 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 936 ath_hal_gettxdesclinkptr(ah, bf_last->bf_lastds, &txq->axq_link); 937 ath_hal_txstart(ah, txq->axq_qnum); 938} 939 940/* 941 * Hand off a packet to the hardware (or mcast queue.) 942 * 943 * The relevant hardware txq should be locked. 944 */ 945static void 946ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 947 struct ath_buf *bf) 948{ 949 ATH_TX_LOCK_ASSERT(sc); 950 951#ifdef ATH_DEBUG_ALQ 952 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 953 ath_tx_alq_post(sc, bf); 954#endif 955 956 if (txq->axq_qnum == ATH_TXQ_SWQ) 957 ath_tx_handoff_mcast(sc, txq, bf); 958 else 959 ath_tx_handoff_hw(sc, txq, bf); 960} 961 962static int 963ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 964 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 965 int *keyix) 966{ 967 DPRINTF(sc, ATH_DEBUG_XMIT, 968 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 969 __func__, 970 *hdrlen, 971 *pktlen, 972 isfrag, 973 iswep, 974 m0); 975 976 if (iswep) { 977 const struct ieee80211_cipher *cip; 978 struct ieee80211_key *k; 979 980 /* 981 * Construct the 802.11 header+trailer for an encrypted 982 * frame. The only reason this can fail is because of an 983 * unknown or unsupported cipher/key type. 984 */ 985 k = ieee80211_crypto_encap(ni, m0); 986 if (k == NULL) { 987 /* 988 * This can happen when the key is yanked after the 989 * frame was queued. Just discard the frame; the 990 * 802.11 layer counts failures and provides 991 * debugging/diagnostics. 992 */ 993 return (0); 994 } 995 /* 996 * Adjust the packet + header lengths for the crypto 997 * additions and calculate the h/w key index. When 998 * a s/w mic is done the frame will have had any mic 999 * added to it prior to entry so m0->m_pkthdr.len will 1000 * account for it. Otherwise we need to add it to the 1001 * packet length. 1002 */ 1003 cip = k->wk_cipher; 1004 (*hdrlen) += cip->ic_header; 1005 (*pktlen) += cip->ic_header + cip->ic_trailer; 1006 /* NB: frags always have any TKIP MIC done in s/w */ 1007 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1008 (*pktlen) += cip->ic_miclen; 1009 (*keyix) = k->wk_keyix; 1010 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1011 /* 1012 * Use station key cache slot, if assigned. 1013 */ 1014 (*keyix) = ni->ni_ucastkey.wk_keyix; 1015 if ((*keyix) == IEEE80211_KEYIX_NONE) 1016 (*keyix) = HAL_TXKEYIX_INVALID; 1017 } else 1018 (*keyix) = HAL_TXKEYIX_INVALID; 1019 1020 return (1); 1021} 1022 1023/* 1024 * Calculate whether interoperability protection is required for 1025 * this frame. 1026 * 1027 * This requires the rate control information be filled in, 1028 * as the protection requirement depends upon the current 1029 * operating mode / PHY. 1030 */ 1031static void 1032ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1033{ 1034 struct ieee80211_frame *wh; 1035 uint8_t rix; 1036 uint16_t flags; 1037 int shortPreamble; 1038 const HAL_RATE_TABLE *rt = sc->sc_currates; 1039 struct ifnet *ifp = sc->sc_ifp; 1040 struct ieee80211com *ic = ifp->if_l2com; 1041 1042 flags = bf->bf_state.bfs_txflags; 1043 rix = bf->bf_state.bfs_rc[0].rix; 1044 shortPreamble = bf->bf_state.bfs_shpream; 1045 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1046 1047 /* 1048 * If 802.11g protection is enabled, determine whether 1049 * to use RTS/CTS or just CTS. Note that this is only 1050 * done for OFDM unicast frames. 1051 */ 1052 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1053 rt->info[rix].phy == IEEE80211_T_OFDM && 1054 (flags & HAL_TXDESC_NOACK) == 0) { 1055 bf->bf_state.bfs_doprot = 1; 1056 /* XXX fragments must use CCK rates w/ protection */ 1057 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1058 flags |= HAL_TXDESC_RTSENA; 1059 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1060 flags |= HAL_TXDESC_CTSENA; 1061 } 1062 /* 1063 * For frags it would be desirable to use the 1064 * highest CCK rate for RTS/CTS. But stations 1065 * farther away may detect it at a lower CCK rate 1066 * so use the configured protection rate instead 1067 * (for now). 1068 */ 1069 sc->sc_stats.ast_tx_protect++; 1070 } 1071 1072 /* 1073 * If 11n protection is enabled and it's a HT frame, 1074 * enable RTS. 1075 * 1076 * XXX ic_htprotmode or ic_curhtprotmode? 1077 * XXX should it_htprotmode only matter if ic_curhtprotmode 1078 * XXX indicates it's not a HT pure environment? 1079 */ 1080 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1081 rt->info[rix].phy == IEEE80211_T_HT && 1082 (flags & HAL_TXDESC_NOACK) == 0) { 1083 flags |= HAL_TXDESC_RTSENA; 1084 sc->sc_stats.ast_tx_htprotect++; 1085 } 1086 bf->bf_state.bfs_txflags = flags; 1087} 1088 1089/* 1090 * Update the frame duration given the currently selected rate. 1091 * 1092 * This also updates the frame duration value, so it will require 1093 * a DMA flush. 1094 */ 1095static void 1096ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1097{ 1098 struct ieee80211_frame *wh; 1099 uint8_t rix; 1100 uint16_t flags; 1101 int shortPreamble; 1102 struct ath_hal *ah = sc->sc_ah; 1103 const HAL_RATE_TABLE *rt = sc->sc_currates; 1104 int isfrag = bf->bf_m->m_flags & M_FRAG; 1105 1106 flags = bf->bf_state.bfs_txflags; 1107 rix = bf->bf_state.bfs_rc[0].rix; 1108 shortPreamble = bf->bf_state.bfs_shpream; 1109 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1110 1111 /* 1112 * Calculate duration. This logically belongs in the 802.11 1113 * layer but it lacks sufficient information to calculate it. 1114 */ 1115 if ((flags & HAL_TXDESC_NOACK) == 0 && 1116 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1117 u_int16_t dur; 1118 if (shortPreamble) 1119 dur = rt->info[rix].spAckDuration; 1120 else 1121 dur = rt->info[rix].lpAckDuration; 1122 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1123 dur += dur; /* additional SIFS+ACK */ 1124 KASSERT(bf->bf_m->m_nextpkt != NULL, ("no fragment")); 1125 /* 1126 * Include the size of next fragment so NAV is 1127 * updated properly. The last fragment uses only 1128 * the ACK duration 1129 * 1130 * XXX TODO: ensure that the rate lookup for each 1131 * fragment is the same as the rate used by the 1132 * first fragment! 1133 */ 1134 dur += ath_hal_computetxtime(ah, rt, 1135 bf->bf_m->m_nextpkt->m_pkthdr.len, 1136 rix, shortPreamble); 1137 } 1138 if (isfrag) { 1139 /* 1140 * Force hardware to use computed duration for next 1141 * fragment by disabling multi-rate retry which updates 1142 * duration based on the multi-rate duration table. 1143 */ 1144 bf->bf_state.bfs_ismrr = 0; 1145 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1146 /* XXX update bfs_rc[0].try? */ 1147 } 1148 1149 /* Update the duration field itself */ 1150 *(u_int16_t *)wh->i_dur = htole16(dur); 1151 } 1152} 1153 1154static uint8_t 1155ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1156 int cix, int shortPreamble) 1157{ 1158 uint8_t ctsrate; 1159 1160 /* 1161 * CTS transmit rate is derived from the transmit rate 1162 * by looking in the h/w rate table. We must also factor 1163 * in whether or not a short preamble is to be used. 1164 */ 1165 /* NB: cix is set above where RTS/CTS is enabled */ 1166 KASSERT(cix != 0xff, ("cix not setup")); 1167 ctsrate = rt->info[cix].rateCode; 1168 1169 /* XXX this should only matter for legacy rates */ 1170 if (shortPreamble) 1171 ctsrate |= rt->info[cix].shortPreamble; 1172 1173 return (ctsrate); 1174} 1175 1176/* 1177 * Calculate the RTS/CTS duration for legacy frames. 1178 */ 1179static int 1180ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1181 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1182 int flags) 1183{ 1184 int ctsduration = 0; 1185 1186 /* This mustn't be called for HT modes */ 1187 if (rt->info[cix].phy == IEEE80211_T_HT) { 1188 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1189 __func__, rt->info[cix].rateCode); 1190 return (-1); 1191 } 1192 1193 /* 1194 * Compute the transmit duration based on the frame 1195 * size and the size of an ACK frame. We call into the 1196 * HAL to do the computation since it depends on the 1197 * characteristics of the actual PHY being used. 1198 * 1199 * NB: CTS is assumed the same size as an ACK so we can 1200 * use the precalculated ACK durations. 1201 */ 1202 if (shortPreamble) { 1203 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1204 ctsduration += rt->info[cix].spAckDuration; 1205 ctsduration += ath_hal_computetxtime(ah, 1206 rt, pktlen, rix, AH_TRUE); 1207 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1208 ctsduration += rt->info[rix].spAckDuration; 1209 } else { 1210 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1211 ctsduration += rt->info[cix].lpAckDuration; 1212 ctsduration += ath_hal_computetxtime(ah, 1213 rt, pktlen, rix, AH_FALSE); 1214 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1215 ctsduration += rt->info[rix].lpAckDuration; 1216 } 1217 1218 return (ctsduration); 1219} 1220 1221/* 1222 * Update the given ath_buf with updated rts/cts setup and duration 1223 * values. 1224 * 1225 * To support rate lookups for each software retry, the rts/cts rate 1226 * and cts duration must be re-calculated. 1227 * 1228 * This function assumes the RTS/CTS flags have been set as needed; 1229 * mrr has been disabled; and the rate control lookup has been done. 1230 * 1231 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1232 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1233 */ 1234static void 1235ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1236{ 1237 uint16_t ctsduration = 0; 1238 uint8_t ctsrate = 0; 1239 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1240 uint8_t cix = 0; 1241 const HAL_RATE_TABLE *rt = sc->sc_currates; 1242 1243 /* 1244 * No RTS/CTS enabled? Don't bother. 1245 */ 1246 if ((bf->bf_state.bfs_txflags & 1247 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1248 /* XXX is this really needed? */ 1249 bf->bf_state.bfs_ctsrate = 0; 1250 bf->bf_state.bfs_ctsduration = 0; 1251 return; 1252 } 1253 1254 /* 1255 * If protection is enabled, use the protection rix control 1256 * rate. Otherwise use the rate0 control rate. 1257 */ 1258 if (bf->bf_state.bfs_doprot) 1259 rix = sc->sc_protrix; 1260 else 1261 rix = bf->bf_state.bfs_rc[0].rix; 1262 1263 /* 1264 * If the raw path has hard-coded ctsrate0 to something, 1265 * use it. 1266 */ 1267 if (bf->bf_state.bfs_ctsrate0 != 0) 1268 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1269 else 1270 /* Control rate from above */ 1271 cix = rt->info[rix].controlRate; 1272 1273 /* Calculate the rtscts rate for the given cix */ 1274 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1275 bf->bf_state.bfs_shpream); 1276 1277 /* The 11n chipsets do ctsduration calculations for you */ 1278 if (! ath_tx_is_11n(sc)) 1279 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1280 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1281 rt, bf->bf_state.bfs_txflags); 1282 1283 /* Squirrel away in ath_buf */ 1284 bf->bf_state.bfs_ctsrate = ctsrate; 1285 bf->bf_state.bfs_ctsduration = ctsduration; 1286 1287 /* 1288 * Must disable multi-rate retry when using RTS/CTS. 1289 */ 1290 if (!sc->sc_mrrprot) { 1291 bf->bf_state.bfs_ismrr = 0; 1292 bf->bf_state.bfs_try0 = 1293 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1294 } 1295} 1296 1297/* 1298 * Setup the descriptor chain for a normal or fast-frame 1299 * frame. 1300 * 1301 * XXX TODO: extend to include the destination hardware QCU ID. 1302 * Make sure that is correct. Make sure that when being added 1303 * to the mcastq, the CABQ QCUID is set or things will get a bit 1304 * odd. 1305 */ 1306static void 1307ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1308{ 1309 struct ath_desc *ds = bf->bf_desc; 1310 struct ath_hal *ah = sc->sc_ah; 1311 1312 if (bf->bf_state.bfs_txrate0 == 0) 1313 device_printf(sc->sc_dev, "%s: bf=%p, txrate0=%d\n", 1314 __func__, bf, 0); 1315 1316 ath_hal_setuptxdesc(ah, ds 1317 , bf->bf_state.bfs_pktlen /* packet length */ 1318 , bf->bf_state.bfs_hdrlen /* header length */ 1319 , bf->bf_state.bfs_atype /* Atheros packet type */ 1320 , bf->bf_state.bfs_txpower /* txpower */ 1321 , bf->bf_state.bfs_txrate0 1322 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1323 , bf->bf_state.bfs_keyix /* key cache index */ 1324 , bf->bf_state.bfs_txantenna /* antenna mode */ 1325 , bf->bf_state.bfs_txflags /* flags */ 1326 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1327 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1328 ); 1329 1330 /* 1331 * This will be overriden when the descriptor chain is written. 1332 */ 1333 bf->bf_lastds = ds; 1334 bf->bf_last = bf; 1335 1336 /* Set rate control and descriptor chain for this frame */ 1337 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1338 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1339} 1340 1341/* 1342 * Do a rate lookup. 1343 * 1344 * This performs a rate lookup for the given ath_buf only if it's required. 1345 * Non-data frames and raw frames don't require it. 1346 * 1347 * This populates the primary and MRR entries; MRR values are 1348 * then disabled later on if something requires it (eg RTS/CTS on 1349 * pre-11n chipsets. 1350 * 1351 * This needs to be done before the RTS/CTS fields are calculated 1352 * as they may depend upon the rate chosen. 1353 */ 1354static void 1355ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1356{ 1357 uint8_t rate, rix; 1358 int try0; 1359 1360 if (! bf->bf_state.bfs_doratelookup) 1361 return; 1362 1363 /* Get rid of any previous state */ 1364 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1365 1366 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1367 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1368 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1369 1370 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1371 bf->bf_state.bfs_rc[0].rix = rix; 1372 bf->bf_state.bfs_rc[0].ratecode = rate; 1373 bf->bf_state.bfs_rc[0].tries = try0; 1374 1375 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1376 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1377 bf->bf_state.bfs_rc); 1378 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1379 1380 sc->sc_txrix = rix; /* for LED blinking */ 1381 sc->sc_lastdatarix = rix; /* for fast frames */ 1382 bf->bf_state.bfs_try0 = try0; 1383 bf->bf_state.bfs_txrate0 = rate; 1384} 1385 1386/* 1387 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1388 */ 1389static void 1390ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1391 struct ath_buf *bf) 1392{ 1393 struct ath_node *an = ATH_NODE(bf->bf_node); 1394 1395 ATH_TX_LOCK_ASSERT(sc); 1396 1397 if (an->clrdmask == 1) { 1398 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1399 an->clrdmask = 0; 1400 } 1401} 1402 1403/* 1404 * Transmit the given frame to the hardware. 1405 * 1406 * The frame must already be setup; rate control must already have 1407 * been done. 1408 * 1409 * XXX since the TXQ lock is being held here (and I dislike holding 1410 * it for this long when not doing software aggregation), later on 1411 * break this function into "setup_normal" and "xmit_normal". The 1412 * lock only needs to be held for the ath_tx_handoff call. 1413 */ 1414static void 1415ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1416 struct ath_buf *bf) 1417{ 1418 struct ath_node *an = ATH_NODE(bf->bf_node); 1419 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1420 1421 ATH_TX_LOCK_ASSERT(sc); 1422 1423 /* 1424 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1425 * set a completion handler however it doesn't (yet) properly 1426 * handle the strict ordering requirements needed for normal, 1427 * non-aggregate session frames. 1428 * 1429 * Once this is implemented, only set CLRDMASK like this for 1430 * frames that must go out - eg management/raw frames. 1431 */ 1432 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1433 1434 /* Setup the descriptor before handoff */ 1435 ath_tx_do_ratelookup(sc, bf); 1436 ath_tx_calc_duration(sc, bf); 1437 ath_tx_calc_protection(sc, bf); 1438 ath_tx_set_rtscts(sc, bf); 1439 ath_tx_rate_fill_rcflags(sc, bf); 1440 ath_tx_setds(sc, bf); 1441 1442 /* Track per-TID hardware queue depth correctly */ 1443 tid->hwq_depth++; 1444 1445 /* Assign the completion handler */ 1446 bf->bf_comp = ath_tx_normal_comp; 1447 1448 /* Hand off to hardware */ 1449 ath_tx_handoff(sc, txq, bf); 1450} 1451 1452/* 1453 * Do the basic frame setup stuff that's required before the frame 1454 * is added to a software queue. 1455 * 1456 * All frames get mostly the same treatment and it's done once. 1457 * Retransmits fiddle with things like the rate control setup, 1458 * setting the retransmit bit in the packet; doing relevant DMA/bus 1459 * syncing and relinking it (back) into the hardware TX queue. 1460 * 1461 * Note that this may cause the mbuf to be reallocated, so 1462 * m0 may not be valid. 1463 */ 1464static int 1465ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1466 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1467{ 1468 struct ieee80211vap *vap = ni->ni_vap; 1469 struct ath_hal *ah = sc->sc_ah; 1470 struct ifnet *ifp = sc->sc_ifp; 1471 struct ieee80211com *ic = ifp->if_l2com; 1472 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1473 int error, iswep, ismcast, isfrag, ismrr; 1474 int keyix, hdrlen, pktlen, try0 = 0; 1475 u_int8_t rix = 0, txrate = 0; 1476 struct ath_desc *ds; 1477 struct ieee80211_frame *wh; 1478 u_int subtype, flags; 1479 HAL_PKT_TYPE atype; 1480 const HAL_RATE_TABLE *rt; 1481 HAL_BOOL shortPreamble; 1482 struct ath_node *an; 1483 u_int pri; 1484 1485 /* 1486 * To ensure that both sequence numbers and the CCMP PN handling 1487 * is "correct", make sure that the relevant TID queue is locked. 1488 * Otherwise the CCMP PN and seqno may appear out of order, causing 1489 * re-ordered frames to have out of order CCMP PN's, resulting 1490 * in many, many frame drops. 1491 */ 1492 ATH_TX_LOCK_ASSERT(sc); 1493 1494 wh = mtod(m0, struct ieee80211_frame *); 1495 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 1496 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1497 isfrag = m0->m_flags & M_FRAG; 1498 hdrlen = ieee80211_anyhdrsize(wh); 1499 /* 1500 * Packet length must not include any 1501 * pad bytes; deduct them here. 1502 */ 1503 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1504 1505 /* Handle encryption twiddling if needed */ 1506 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1507 &pktlen, &keyix)) { 1508 ath_freetx(m0); 1509 return EIO; 1510 } 1511 1512 /* packet header may have moved, reset our local pointer */ 1513 wh = mtod(m0, struct ieee80211_frame *); 1514 1515 pktlen += IEEE80211_CRC_LEN; 1516 1517 /* 1518 * Load the DMA map so any coalescing is done. This 1519 * also calculates the number of descriptors we need. 1520 */ 1521 error = ath_tx_dmasetup(sc, bf, m0); 1522 if (error != 0) 1523 return error; 1524 bf->bf_node = ni; /* NB: held reference */ 1525 m0 = bf->bf_m; /* NB: may have changed */ 1526 wh = mtod(m0, struct ieee80211_frame *); 1527 1528 /* setup descriptors */ 1529 ds = bf->bf_desc; 1530 rt = sc->sc_currates; 1531 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1532 1533 /* 1534 * NB: the 802.11 layer marks whether or not we should 1535 * use short preamble based on the current mode and 1536 * negotiated parameters. 1537 */ 1538 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1539 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1540 shortPreamble = AH_TRUE; 1541 sc->sc_stats.ast_tx_shortpre++; 1542 } else { 1543 shortPreamble = AH_FALSE; 1544 } 1545 1546 an = ATH_NODE(ni); 1547 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1548 flags = 0; 1549 ismrr = 0; /* default no multi-rate retry*/ 1550 pri = M_WME_GETAC(m0); /* honor classification */ 1551 /* XXX use txparams instead of fixed values */ 1552 /* 1553 * Calculate Atheros packet type from IEEE80211 packet header, 1554 * setup for rate calculations, and select h/w transmit queue. 1555 */ 1556 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1557 case IEEE80211_FC0_TYPE_MGT: 1558 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1559 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1560 atype = HAL_PKT_TYPE_BEACON; 1561 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1562 atype = HAL_PKT_TYPE_PROBE_RESP; 1563 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1564 atype = HAL_PKT_TYPE_ATIM; 1565 else 1566 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1567 rix = an->an_mgmtrix; 1568 txrate = rt->info[rix].rateCode; 1569 if (shortPreamble) 1570 txrate |= rt->info[rix].shortPreamble; 1571 try0 = ATH_TXMGTTRY; 1572 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1573 break; 1574 case IEEE80211_FC0_TYPE_CTL: 1575 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1576 rix = an->an_mgmtrix; 1577 txrate = rt->info[rix].rateCode; 1578 if (shortPreamble) 1579 txrate |= rt->info[rix].shortPreamble; 1580 try0 = ATH_TXMGTTRY; 1581 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1582 break; 1583 case IEEE80211_FC0_TYPE_DATA: 1584 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1585 /* 1586 * Data frames: multicast frames go out at a fixed rate, 1587 * EAPOL frames use the mgmt frame rate; otherwise consult 1588 * the rate control module for the rate to use. 1589 */ 1590 if (ismcast) { 1591 rix = an->an_mcastrix; 1592 txrate = rt->info[rix].rateCode; 1593 if (shortPreamble) 1594 txrate |= rt->info[rix].shortPreamble; 1595 try0 = 1; 1596 } else if (m0->m_flags & M_EAPOL) { 1597 /* XXX? maybe always use long preamble? */ 1598 rix = an->an_mgmtrix; 1599 txrate = rt->info[rix].rateCode; 1600 if (shortPreamble) 1601 txrate |= rt->info[rix].shortPreamble; 1602 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1603 } else { 1604 /* 1605 * Do rate lookup on each TX, rather than using 1606 * the hard-coded TX information decided here. 1607 */ 1608 ismrr = 1; 1609 bf->bf_state.bfs_doratelookup = 1; 1610 } 1611 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1612 flags |= HAL_TXDESC_NOACK; 1613 break; 1614 default: 1615 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1616 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1617 /* XXX statistic */ 1618 ath_freetx(m0); 1619 return EIO; 1620 } 1621 1622 /* 1623 * There are two known scenarios where the frame AC doesn't match 1624 * what the destination TXQ is. 1625 * 1626 * + non-QoS frames (eg management?) that the net80211 stack has 1627 * assigned a higher AC to, but since it's a non-QoS TID, it's 1628 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1629 * It's quite possible that management frames should just be 1630 * direct dispatched to hardware rather than go via the software 1631 * queue; that should be investigated in the future. There are 1632 * some specific scenarios where this doesn't make sense, mostly 1633 * surrounding ADDBA request/response - hence why that is special 1634 * cased. 1635 * 1636 * + Multicast frames going into the VAP mcast queue. That shows up 1637 * as "TXQ 11". 1638 * 1639 * This driver should eventually support separate TID and TXQ locking, 1640 * allowing for arbitrary AC frames to appear on arbitrary software 1641 * queues, being queued to the "correct" hardware queue when needed. 1642 */ 1643#if 0 1644 if (txq != sc->sc_ac2q[pri]) { 1645 device_printf(sc->sc_dev, 1646 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1647 __func__, 1648 txq, 1649 txq->axq_qnum, 1650 pri, 1651 sc->sc_ac2q[pri], 1652 sc->sc_ac2q[pri]->axq_qnum); 1653 } 1654#endif 1655 1656 /* 1657 * Calculate miscellaneous flags. 1658 */ 1659 if (ismcast) { 1660 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1661 } else if (pktlen > vap->iv_rtsthreshold && 1662 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1663 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1664 sc->sc_stats.ast_tx_rts++; 1665 } 1666 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1667 sc->sc_stats.ast_tx_noack++; 1668#ifdef IEEE80211_SUPPORT_TDMA 1669 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1670 DPRINTF(sc, ATH_DEBUG_TDMA, 1671 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1672 sc->sc_stats.ast_tdma_ack++; 1673 ath_freetx(m0); 1674 return EIO; 1675 } 1676#endif 1677 1678 /* 1679 * Determine if a tx interrupt should be generated for 1680 * this descriptor. We take a tx interrupt to reap 1681 * descriptors when the h/w hits an EOL condition or 1682 * when the descriptor is specifically marked to generate 1683 * an interrupt. We periodically mark descriptors in this 1684 * way to insure timely replenishing of the supply needed 1685 * for sending frames. Defering interrupts reduces system 1686 * load and potentially allows more concurrent work to be 1687 * done but if done to aggressively can cause senders to 1688 * backup. 1689 * 1690 * NB: use >= to deal with sc_txintrperiod changing 1691 * dynamically through sysctl. 1692 */ 1693 if (flags & HAL_TXDESC_INTREQ) { 1694 txq->axq_intrcnt = 0; 1695 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1696 flags |= HAL_TXDESC_INTREQ; 1697 txq->axq_intrcnt = 0; 1698 } 1699 1700 /* This point forward is actual TX bits */ 1701 1702 /* 1703 * At this point we are committed to sending the frame 1704 * and we don't need to look at m_nextpkt; clear it in 1705 * case this frame is part of frag chain. 1706 */ 1707 m0->m_nextpkt = NULL; 1708 1709 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1710 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1711 sc->sc_hwmap[rix].ieeerate, -1); 1712 1713 if (ieee80211_radiotap_active_vap(vap)) { 1714 u_int64_t tsf = ath_hal_gettsf64(ah); 1715 1716 sc->sc_tx_th.wt_tsf = htole64(tsf); 1717 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1718 if (iswep) 1719 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1720 if (isfrag) 1721 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1722 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1723 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 1724 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1725 1726 ieee80211_radiotap_tx(vap, m0); 1727 } 1728 1729 /* Blank the legacy rate array */ 1730 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1731 1732 /* 1733 * ath_buf_set_rate needs at least one rate/try to setup 1734 * the rate scenario. 1735 */ 1736 bf->bf_state.bfs_rc[0].rix = rix; 1737 bf->bf_state.bfs_rc[0].tries = try0; 1738 bf->bf_state.bfs_rc[0].ratecode = txrate; 1739 1740 /* Store the decided rate index values away */ 1741 bf->bf_state.bfs_pktlen = pktlen; 1742 bf->bf_state.bfs_hdrlen = hdrlen; 1743 bf->bf_state.bfs_atype = atype; 1744 bf->bf_state.bfs_txpower = ni->ni_txpower; 1745 bf->bf_state.bfs_txrate0 = txrate; 1746 bf->bf_state.bfs_try0 = try0; 1747 bf->bf_state.bfs_keyix = keyix; 1748 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1749 bf->bf_state.bfs_txflags = flags; 1750 bf->bf_state.bfs_shpream = shortPreamble; 1751 1752 /* XXX this should be done in ath_tx_setrate() */ 1753 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1754 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1755 bf->bf_state.bfs_ctsduration = 0; 1756 bf->bf_state.bfs_ismrr = ismrr; 1757 1758 return 0; 1759} 1760 1761/* 1762 * Queue a frame to the hardware or software queue. 1763 * 1764 * This can be called by the net80211 code. 1765 * 1766 * XXX what about locking? Or, push the seqno assign into the 1767 * XXX aggregate scheduler so its serialised? 1768 * 1769 * XXX When sending management frames via ath_raw_xmit(), 1770 * should CLRDMASK be set unconditionally? 1771 */ 1772int 1773ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1774 struct ath_buf *bf, struct mbuf *m0) 1775{ 1776 struct ieee80211vap *vap = ni->ni_vap; 1777 struct ath_vap *avp = ATH_VAP(vap); 1778 int r = 0; 1779 u_int pri; 1780 int tid; 1781 struct ath_txq *txq; 1782 int ismcast; 1783 const struct ieee80211_frame *wh; 1784 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1785 ieee80211_seq seqno; 1786 uint8_t type, subtype; 1787 1788 ATH_TX_LOCK_ASSERT(sc); 1789 1790 /* 1791 * Determine the target hardware queue. 1792 * 1793 * For multicast frames, the txq gets overridden appropriately 1794 * depending upon the state of PS. 1795 * 1796 * For any other frame, we do a TID/QoS lookup inside the frame 1797 * to see what the TID should be. If it's a non-QoS frame, the 1798 * AC and TID are overridden. The TID/TXQ code assumes the 1799 * TID is on a predictable hardware TXQ, so we don't support 1800 * having a node TID queued to multiple hardware TXQs. 1801 * This may change in the future but would require some locking 1802 * fudgery. 1803 */ 1804 pri = ath_tx_getac(sc, m0); 1805 tid = ath_tx_gettid(sc, m0); 1806 1807 txq = sc->sc_ac2q[pri]; 1808 wh = mtod(m0, struct ieee80211_frame *); 1809 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1810 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1811 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1812 1813 /* 1814 * Enforce how deep the multicast queue can grow. 1815 * 1816 * XXX duplicated in ath_raw_xmit(). 1817 */ 1818 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1819 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1820 > sc->sc_txq_mcastq_maxdepth) { 1821 sc->sc_stats.ast_tx_mcastq_overflow++; 1822 r = ENOBUFS; 1823 } 1824 if (r != 0) { 1825 m_freem(m0); 1826 return r; 1827 } 1828 } 1829 1830 /* A-MPDU TX */ 1831 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1832 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1833 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1834 1835 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1836 __func__, tid, pri, is_ampdu); 1837 1838 /* Set local packet state, used to queue packets to hardware */ 1839 bf->bf_state.bfs_tid = tid; 1840 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1841 bf->bf_state.bfs_pri = pri; 1842 1843#if 1 1844 /* 1845 * When servicing one or more stations in power-save mode 1846 * (or) if there is some mcast data waiting on the mcast 1847 * queue (to prevent out of order delivery) multicast frames 1848 * must be bufferd until after the beacon. 1849 * 1850 * TODO: we should lock the mcastq before we check the length. 1851 */ 1852 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1853 txq = &avp->av_mcastq; 1854 /* 1855 * Mark the frame as eventually belonging on the CAB 1856 * queue, so the descriptor setup functions will 1857 * correctly initialise the descriptor 'qcuId' field. 1858 */ 1859 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 1860 } 1861#endif 1862 1863 /* Do the generic frame setup */ 1864 /* XXX should just bzero the bf_state? */ 1865 bf->bf_state.bfs_dobaw = 0; 1866 1867 /* A-MPDU TX? Manually set sequence number */ 1868 /* 1869 * Don't do it whilst pending; the net80211 layer still 1870 * assigns them. 1871 */ 1872 if (is_ampdu_tx) { 1873 /* 1874 * Always call; this function will 1875 * handle making sure that null data frames 1876 * don't get a sequence number from the current 1877 * TID and thus mess with the BAW. 1878 */ 1879 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1880 1881 /* 1882 * Don't add QoS NULL frames to the BAW. 1883 */ 1884 if (IEEE80211_QOS_HAS_SEQ(wh) && 1885 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 1886 bf->bf_state.bfs_dobaw = 1; 1887 } 1888 } 1889 1890 /* 1891 * If needed, the sequence number has been assigned. 1892 * Squirrel it away somewhere easy to get to. 1893 */ 1894 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 1895 1896 /* Is ampdu pending? fetch the seqno and print it out */ 1897 if (is_ampdu_pending) 1898 DPRINTF(sc, ATH_DEBUG_SW_TX, 1899 "%s: tid %d: ampdu pending, seqno %d\n", 1900 __func__, tid, M_SEQNO_GET(m0)); 1901 1902 /* This also sets up the DMA map */ 1903 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 1904 1905 if (r != 0) 1906 goto done; 1907 1908 /* At this point m0 could have changed! */ 1909 m0 = bf->bf_m; 1910 1911#if 1 1912 /* 1913 * If it's a multicast frame, do a direct-dispatch to the 1914 * destination hardware queue. Don't bother software 1915 * queuing it. 1916 */ 1917 /* 1918 * If it's a BAR frame, do a direct dispatch to the 1919 * destination hardware queue. Don't bother software 1920 * queuing it, as the TID will now be paused. 1921 * Sending a BAR frame can occur from the net80211 txa timer 1922 * (ie, retries) or from the ath txtask (completion call.) 1923 * It queues directly to hardware because the TID is paused 1924 * at this point (and won't be unpaused until the BAR has 1925 * either been TXed successfully or max retries has been 1926 * reached.) 1927 */ 1928 if (txq == &avp->av_mcastq) { 1929 DPRINTF(sc, ATH_DEBUG_SW_TX, 1930 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 1931 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1932 ath_tx_xmit_normal(sc, txq, bf); 1933 } else if (type == IEEE80211_FC0_TYPE_CTL && 1934 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1935 DPRINTF(sc, ATH_DEBUG_SW_TX, 1936 "%s: BAR: TX'ing direct\n", __func__); 1937 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1938 ath_tx_xmit_normal(sc, txq, bf); 1939 } else { 1940 /* add to software queue */ 1941 DPRINTF(sc, ATH_DEBUG_SW_TX, 1942 "%s: bf=%p: swq: TX'ing\n", __func__, bf); 1943 ath_tx_swq(sc, ni, txq, bf); 1944 } 1945#else 1946 /* 1947 * For now, since there's no software queue, 1948 * direct-dispatch to the hardware. 1949 */ 1950 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1951 ath_tx_xmit_normal(sc, txq, bf); 1952#endif 1953done: 1954 return 0; 1955} 1956 1957static int 1958ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 1959 struct ath_buf *bf, struct mbuf *m0, 1960 const struct ieee80211_bpf_params *params) 1961{ 1962 struct ifnet *ifp = sc->sc_ifp; 1963 struct ieee80211com *ic = ifp->if_l2com; 1964 struct ath_hal *ah = sc->sc_ah; 1965 struct ieee80211vap *vap = ni->ni_vap; 1966 int error, ismcast, ismrr; 1967 int keyix, hdrlen, pktlen, try0, txantenna; 1968 u_int8_t rix, txrate; 1969 struct ieee80211_frame *wh; 1970 u_int flags; 1971 HAL_PKT_TYPE atype; 1972 const HAL_RATE_TABLE *rt; 1973 struct ath_desc *ds; 1974 u_int pri; 1975 int o_tid = -1; 1976 int do_override; 1977 1978 ATH_TX_LOCK_ASSERT(sc); 1979 1980 wh = mtod(m0, struct ieee80211_frame *); 1981 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1982 hdrlen = ieee80211_anyhdrsize(wh); 1983 /* 1984 * Packet length must not include any 1985 * pad bytes; deduct them here. 1986 */ 1987 /* XXX honor IEEE80211_BPF_DATAPAD */ 1988 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 1989 1990 ATH_KTR(sc, ATH_KTR_TX, 2, 1991 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 1992 1993 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 1994 __func__, ismcast); 1995 1996 pri = params->ibp_pri & 3; 1997 /* Override pri if the frame isn't a QoS one */ 1998 if (! IEEE80211_QOS_HAS_SEQ(wh)) 1999 pri = ath_tx_getac(sc, m0); 2000 2001 /* XXX If it's an ADDBA, override the correct queue */ 2002 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2003 2004 /* Map ADDBA to the correct priority */ 2005 if (do_override) { 2006#if 0 2007 device_printf(sc->sc_dev, 2008 "%s: overriding tid %d pri %d -> %d\n", 2009 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2010#endif 2011 pri = TID_TO_WME_AC(o_tid); 2012 } 2013 2014 /* Handle encryption twiddling if needed */ 2015 if (! ath_tx_tag_crypto(sc, ni, 2016 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2017 &hdrlen, &pktlen, &keyix)) { 2018 ath_freetx(m0); 2019 return EIO; 2020 } 2021 /* packet header may have moved, reset our local pointer */ 2022 wh = mtod(m0, struct ieee80211_frame *); 2023 2024 /* Do the generic frame setup */ 2025 /* XXX should just bzero the bf_state? */ 2026 bf->bf_state.bfs_dobaw = 0; 2027 2028 error = ath_tx_dmasetup(sc, bf, m0); 2029 if (error != 0) 2030 return error; 2031 m0 = bf->bf_m; /* NB: may have changed */ 2032 wh = mtod(m0, struct ieee80211_frame *); 2033 bf->bf_node = ni; /* NB: held reference */ 2034 2035 /* Always enable CLRDMASK for raw frames for now.. */ 2036 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2037 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2038 if (params->ibp_flags & IEEE80211_BPF_RTS) 2039 flags |= HAL_TXDESC_RTSENA; 2040 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2041 /* XXX assume 11g/11n protection? */ 2042 bf->bf_state.bfs_doprot = 1; 2043 flags |= HAL_TXDESC_CTSENA; 2044 } 2045 /* XXX leave ismcast to injector? */ 2046 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2047 flags |= HAL_TXDESC_NOACK; 2048 2049 rt = sc->sc_currates; 2050 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2051 rix = ath_tx_findrix(sc, params->ibp_rate0); 2052 txrate = rt->info[rix].rateCode; 2053 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2054 txrate |= rt->info[rix].shortPreamble; 2055 sc->sc_txrix = rix; 2056 try0 = params->ibp_try0; 2057 ismrr = (params->ibp_try1 != 0); 2058 txantenna = params->ibp_pri >> 2; 2059 if (txantenna == 0) /* XXX? */ 2060 txantenna = sc->sc_txantenna; 2061 2062 /* 2063 * Since ctsrate is fixed, store it away for later 2064 * use when the descriptor fields are being set. 2065 */ 2066 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2067 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2068 2069 /* 2070 * NB: we mark all packets as type PSPOLL so the h/w won't 2071 * set the sequence number, duration, etc. 2072 */ 2073 atype = HAL_PKT_TYPE_PSPOLL; 2074 2075 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2076 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2077 sc->sc_hwmap[rix].ieeerate, -1); 2078 2079 if (ieee80211_radiotap_active_vap(vap)) { 2080 u_int64_t tsf = ath_hal_gettsf64(ah); 2081 2082 sc->sc_tx_th.wt_tsf = htole64(tsf); 2083 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2084 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2085 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2086 if (m0->m_flags & M_FRAG) 2087 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2088 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2089 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 2090 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2091 2092 ieee80211_radiotap_tx(vap, m0); 2093 } 2094 2095 /* 2096 * Formulate first tx descriptor with tx controls. 2097 */ 2098 ds = bf->bf_desc; 2099 /* XXX check return value? */ 2100 2101 /* Store the decided rate index values away */ 2102 bf->bf_state.bfs_pktlen = pktlen; 2103 bf->bf_state.bfs_hdrlen = hdrlen; 2104 bf->bf_state.bfs_atype = atype; 2105 bf->bf_state.bfs_txpower = params->ibp_power; 2106 bf->bf_state.bfs_txrate0 = txrate; 2107 bf->bf_state.bfs_try0 = try0; 2108 bf->bf_state.bfs_keyix = keyix; 2109 bf->bf_state.bfs_txantenna = txantenna; 2110 bf->bf_state.bfs_txflags = flags; 2111 bf->bf_state.bfs_shpream = 2112 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2113 2114 /* Set local packet state, used to queue packets to hardware */ 2115 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2116 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2117 bf->bf_state.bfs_pri = pri; 2118 2119 /* XXX this should be done in ath_tx_setrate() */ 2120 bf->bf_state.bfs_ctsrate = 0; 2121 bf->bf_state.bfs_ctsduration = 0; 2122 bf->bf_state.bfs_ismrr = ismrr; 2123 2124 /* Blank the legacy rate array */ 2125 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2126 2127 bf->bf_state.bfs_rc[0].rix = 2128 ath_tx_findrix(sc, params->ibp_rate0); 2129 bf->bf_state.bfs_rc[0].tries = try0; 2130 bf->bf_state.bfs_rc[0].ratecode = txrate; 2131 2132 if (ismrr) { 2133 int rix; 2134 2135 rix = ath_tx_findrix(sc, params->ibp_rate1); 2136 bf->bf_state.bfs_rc[1].rix = rix; 2137 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2138 2139 rix = ath_tx_findrix(sc, params->ibp_rate2); 2140 bf->bf_state.bfs_rc[2].rix = rix; 2141 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2142 2143 rix = ath_tx_findrix(sc, params->ibp_rate3); 2144 bf->bf_state.bfs_rc[3].rix = rix; 2145 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2146 } 2147 /* 2148 * All the required rate control decisions have been made; 2149 * fill in the rc flags. 2150 */ 2151 ath_tx_rate_fill_rcflags(sc, bf); 2152 2153 /* NB: no buffered multicast in power save support */ 2154 2155 /* 2156 * If we're overiding the ADDBA destination, dump directly 2157 * into the hardware queue, right after any pending 2158 * frames to that node are. 2159 */ 2160 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2161 __func__, do_override); 2162 2163#if 1 2164 if (do_override) { 2165 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2166 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2167 } else { 2168 /* Queue to software queue */ 2169 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf); 2170 } 2171#else 2172 /* Direct-dispatch to the hardware */ 2173 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2174 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2175#endif 2176 return 0; 2177} 2178 2179/* 2180 * Send a raw frame. 2181 * 2182 * This can be called by net80211. 2183 */ 2184int 2185ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2186 const struct ieee80211_bpf_params *params) 2187{ 2188 struct ieee80211com *ic = ni->ni_ic; 2189 struct ifnet *ifp = ic->ic_ifp; 2190 struct ath_softc *sc = ifp->if_softc; 2191 struct ath_buf *bf; 2192 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2193 int error = 0; 2194 2195 ATH_PCU_LOCK(sc); 2196 if (sc->sc_inreset_cnt > 0) { 2197 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; bailing\n", 2198 __func__); 2199 error = EIO; 2200 ATH_PCU_UNLOCK(sc); 2201 goto bad0; 2202 } 2203 sc->sc_txstart_cnt++; 2204 ATH_PCU_UNLOCK(sc); 2205 2206 ATH_TX_LOCK(sc); 2207 2208 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 2209 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2210 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? 2211 "!running" : "invalid"); 2212 m_freem(m); 2213 error = ENETDOWN; 2214 goto bad; 2215 } 2216 2217 /* 2218 * Enforce how deep the multicast queue can grow. 2219 * 2220 * XXX duplicated in ath_tx_start(). 2221 */ 2222 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2223 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2224 > sc->sc_txq_mcastq_maxdepth) { 2225 sc->sc_stats.ast_tx_mcastq_overflow++; 2226 error = ENOBUFS; 2227 } 2228 2229 if (error != 0) { 2230 m_freem(m); 2231 goto bad; 2232 } 2233 } 2234 2235 /* 2236 * Grab a TX buffer and associated resources. 2237 */ 2238 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2239 if (bf == NULL) { 2240 sc->sc_stats.ast_tx_nobuf++; 2241 m_freem(m); 2242 error = ENOBUFS; 2243 goto bad; 2244 } 2245 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2246 m, params, bf); 2247 2248 if (params == NULL) { 2249 /* 2250 * Legacy path; interpret frame contents to decide 2251 * precisely how to send the frame. 2252 */ 2253 if (ath_tx_start(sc, ni, bf, m)) { 2254 error = EIO; /* XXX */ 2255 goto bad2; 2256 } 2257 } else { 2258 /* 2259 * Caller supplied explicit parameters to use in 2260 * sending the frame. 2261 */ 2262 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2263 error = EIO; /* XXX */ 2264 goto bad2; 2265 } 2266 } 2267 sc->sc_wd_timer = 5; 2268 ifp->if_opackets++; 2269 sc->sc_stats.ast_tx_raw++; 2270 2271 /* 2272 * Update the TIM - if there's anything queued to the 2273 * software queue and power save is enabled, we should 2274 * set the TIM. 2275 */ 2276 ath_tx_update_tim(sc, ni, 1); 2277 2278 ATH_TX_UNLOCK(sc); 2279 2280 ATH_PCU_LOCK(sc); 2281 sc->sc_txstart_cnt--; 2282 ATH_PCU_UNLOCK(sc); 2283 2284 return 0; 2285bad2: 2286 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2287 "bf=%p", 2288 m, 2289 params, 2290 bf); 2291 ATH_TXBUF_LOCK(sc); 2292 ath_returnbuf_head(sc, bf); 2293 ATH_TXBUF_UNLOCK(sc); 2294bad: 2295 2296 ATH_TX_UNLOCK(sc); 2297 2298 ATH_PCU_LOCK(sc); 2299 sc->sc_txstart_cnt--; 2300 ATH_PCU_UNLOCK(sc); 2301bad0: 2302 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2303 m, params); 2304 ifp->if_oerrors++; 2305 sc->sc_stats.ast_tx_raw_fail++; 2306 ieee80211_free_node(ni); 2307 2308 return error; 2309} 2310 2311/* Some helper functions */ 2312 2313/* 2314 * ADDBA (and potentially others) need to be placed in the same 2315 * hardware queue as the TID/node it's relating to. This is so 2316 * it goes out after any pending non-aggregate frames to the 2317 * same node/TID. 2318 * 2319 * If this isn't done, the ADDBA can go out before the frames 2320 * queued in hardware. Even though these frames have a sequence 2321 * number -earlier- than the ADDBA can be transmitted (but 2322 * no frames whose sequence numbers are after the ADDBA should 2323 * be!) they'll arrive after the ADDBA - and the receiving end 2324 * will simply drop them as being out of the BAW. 2325 * 2326 * The frames can't be appended to the TID software queue - it'll 2327 * never be sent out. So these frames have to be directly 2328 * dispatched to the hardware, rather than queued in software. 2329 * So if this function returns true, the TXQ has to be 2330 * overridden and it has to be directly dispatched. 2331 * 2332 * It's a dirty hack, but someone's gotta do it. 2333 */ 2334 2335/* 2336 * XXX doesn't belong here! 2337 */ 2338static int 2339ieee80211_is_action(struct ieee80211_frame *wh) 2340{ 2341 /* Type: Management frame? */ 2342 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2343 IEEE80211_FC0_TYPE_MGT) 2344 return 0; 2345 2346 /* Subtype: Action frame? */ 2347 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2348 IEEE80211_FC0_SUBTYPE_ACTION) 2349 return 0; 2350 2351 return 1; 2352} 2353 2354#define MS(_v, _f) (((_v) & _f) >> _f##_S) 2355/* 2356 * Return an alternate TID for ADDBA request frames. 2357 * 2358 * Yes, this likely should be done in the net80211 layer. 2359 */ 2360static int 2361ath_tx_action_frame_override_queue(struct ath_softc *sc, 2362 struct ieee80211_node *ni, 2363 struct mbuf *m0, int *tid) 2364{ 2365 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2366 struct ieee80211_action_ba_addbarequest *ia; 2367 uint8_t *frm; 2368 uint16_t baparamset; 2369 2370 /* Not action frame? Bail */ 2371 if (! ieee80211_is_action(wh)) 2372 return 0; 2373 2374 /* XXX Not needed for frames we send? */ 2375#if 0 2376 /* Correct length? */ 2377 if (! ieee80211_parse_action(ni, m)) 2378 return 0; 2379#endif 2380 2381 /* Extract out action frame */ 2382 frm = (u_int8_t *)&wh[1]; 2383 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2384 2385 /* Not ADDBA? Bail */ 2386 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2387 return 0; 2388 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2389 return 0; 2390 2391 /* Extract TID, return it */ 2392 baparamset = le16toh(ia->rq_baparamset); 2393 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2394 2395 return 1; 2396} 2397#undef MS 2398 2399/* Per-node software queue operations */ 2400 2401/* 2402 * Add the current packet to the given BAW. 2403 * It is assumed that the current packet 2404 * 2405 * + fits inside the BAW; 2406 * + already has had a sequence number allocated. 2407 * 2408 * Since the BAW status may be modified by both the ath task and 2409 * the net80211/ifnet contexts, the TID must be locked. 2410 */ 2411void 2412ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2413 struct ath_tid *tid, struct ath_buf *bf) 2414{ 2415 int index, cindex; 2416 struct ieee80211_tx_ampdu *tap; 2417 2418 ATH_TX_LOCK_ASSERT(sc); 2419 2420 if (bf->bf_state.bfs_isretried) 2421 return; 2422 2423 tap = ath_tx_get_tx_tid(an, tid->tid); 2424 2425 if (! bf->bf_state.bfs_dobaw) { 2426 device_printf(sc->sc_dev, 2427 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2428 __func__, 2429 SEQNO(bf->bf_state.bfs_seqno), 2430 tap->txa_start, 2431 tap->txa_wnd); 2432 } 2433 2434 if (bf->bf_state.bfs_addedbaw) 2435 device_printf(sc->sc_dev, 2436 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2437 "baw head=%d tail=%d\n", 2438 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2439 tap->txa_start, tap->txa_wnd, tid->baw_head, 2440 tid->baw_tail); 2441 2442 /* 2443 * Verify that the given sequence number is not outside of the 2444 * BAW. Complain loudly if that's the case. 2445 */ 2446 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2447 SEQNO(bf->bf_state.bfs_seqno))) { 2448 device_printf(sc->sc_dev, 2449 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2450 "baw head=%d tail=%d\n", 2451 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2452 tap->txa_start, tap->txa_wnd, tid->baw_head, 2453 tid->baw_tail); 2454 } 2455 2456 /* 2457 * ni->ni_txseqs[] is the currently allocated seqno. 2458 * the txa state contains the current baw start. 2459 */ 2460 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2461 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2462 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2463 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2464 "baw head=%d tail=%d\n", 2465 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2466 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2467 tid->baw_tail); 2468 2469 2470#if 0 2471 assert(tid->tx_buf[cindex] == NULL); 2472#endif 2473 if (tid->tx_buf[cindex] != NULL) { 2474 device_printf(sc->sc_dev, 2475 "%s: ba packet dup (index=%d, cindex=%d, " 2476 "head=%d, tail=%d)\n", 2477 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2478 device_printf(sc->sc_dev, 2479 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2480 __func__, 2481 tid->tx_buf[cindex], 2482 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2483 bf, 2484 SEQNO(bf->bf_state.bfs_seqno) 2485 ); 2486 } 2487 tid->tx_buf[cindex] = bf; 2488 2489 if (index >= ((tid->baw_tail - tid->baw_head) & 2490 (ATH_TID_MAX_BUFS - 1))) { 2491 tid->baw_tail = cindex; 2492 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2493 } 2494} 2495 2496/* 2497 * Flip the BAW buffer entry over from the existing one to the new one. 2498 * 2499 * When software retransmitting a (sub-)frame, it is entirely possible that 2500 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2501 * In that instance the buffer is cloned and the new buffer is used for 2502 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2503 * tracking array to maintain consistency. 2504 */ 2505static void 2506ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2507 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2508{ 2509 int index, cindex; 2510 struct ieee80211_tx_ampdu *tap; 2511 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2512 2513 ATH_TX_LOCK_ASSERT(sc); 2514 2515 tap = ath_tx_get_tx_tid(an, tid->tid); 2516 index = ATH_BA_INDEX(tap->txa_start, seqno); 2517 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2518 2519 /* 2520 * Just warn for now; if it happens then we should find out 2521 * about it. It's highly likely the aggregation session will 2522 * soon hang. 2523 */ 2524 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2525 device_printf(sc->sc_dev, "%s: retransmitted buffer" 2526 " has mismatching seqno's, BA session may hang.\n", 2527 __func__); 2528 device_printf(sc->sc_dev, "%s: old seqno=%d, new_seqno=%d\n", 2529 __func__, 2530 old_bf->bf_state.bfs_seqno, 2531 new_bf->bf_state.bfs_seqno); 2532 } 2533 2534 if (tid->tx_buf[cindex] != old_bf) { 2535 device_printf(sc->sc_dev, "%s: ath_buf pointer incorrect; " 2536 " has m BA session may hang.\n", 2537 __func__); 2538 device_printf(sc->sc_dev, "%s: old bf=%p, new bf=%p\n", 2539 __func__, 2540 old_bf, new_bf); 2541 } 2542 2543 tid->tx_buf[cindex] = new_bf; 2544} 2545 2546/* 2547 * seq_start - left edge of BAW 2548 * seq_next - current/next sequence number to allocate 2549 * 2550 * Since the BAW status may be modified by both the ath task and 2551 * the net80211/ifnet contexts, the TID must be locked. 2552 */ 2553static void 2554ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2555 struct ath_tid *tid, const struct ath_buf *bf) 2556{ 2557 int index, cindex; 2558 struct ieee80211_tx_ampdu *tap; 2559 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2560 2561 ATH_TX_LOCK_ASSERT(sc); 2562 2563 tap = ath_tx_get_tx_tid(an, tid->tid); 2564 index = ATH_BA_INDEX(tap->txa_start, seqno); 2565 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2566 2567 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2568 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2569 "baw head=%d, tail=%d\n", 2570 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2571 cindex, tid->baw_head, tid->baw_tail); 2572 2573 /* 2574 * If this occurs then we have a big problem - something else 2575 * has slid tap->txa_start along without updating the BAW 2576 * tracking start/end pointers. Thus the TX BAW state is now 2577 * completely busted. 2578 * 2579 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2580 * it's quite possible that a cloned buffer is making its way 2581 * here and causing it to fire off. Disable TDMA for now. 2582 */ 2583 if (tid->tx_buf[cindex] != bf) { 2584 device_printf(sc->sc_dev, 2585 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2586 __func__, 2587 bf, SEQNO(bf->bf_state.bfs_seqno), 2588 tid->tx_buf[cindex], 2589 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno)); 2590 } 2591 2592 tid->tx_buf[cindex] = NULL; 2593 2594 while (tid->baw_head != tid->baw_tail && 2595 !tid->tx_buf[tid->baw_head]) { 2596 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2597 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2598 } 2599 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2600 "%s: baw is now %d:%d, baw head=%d\n", 2601 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); 2602} 2603 2604/* 2605 * Mark the current node/TID as ready to TX. 2606 * 2607 * This is done to make it easy for the software scheduler to 2608 * find which nodes have data to send. 2609 * 2610 * The TXQ lock must be held. 2611 */ 2612static void 2613ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2614{ 2615 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2616 2617 ATH_TX_LOCK_ASSERT(sc); 2618 2619 if (tid->paused) 2620 return; /* paused, can't schedule yet */ 2621 2622 if (tid->sched) 2623 return; /* already scheduled */ 2624 2625 tid->sched = 1; 2626 2627 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2628} 2629 2630/* 2631 * Mark the current node as no longer needing to be polled for 2632 * TX packets. 2633 * 2634 * The TXQ lock must be held. 2635 */ 2636static void 2637ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2638{ 2639 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2640 2641 ATH_TX_LOCK_ASSERT(sc); 2642 2643 if (tid->sched == 0) 2644 return; 2645 2646 tid->sched = 0; 2647 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2648} 2649 2650/* 2651 * Assign a sequence number manually to the given frame. 2652 * 2653 * This should only be called for A-MPDU TX frames. 2654 */ 2655static ieee80211_seq 2656ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2657 struct ath_buf *bf, struct mbuf *m0) 2658{ 2659 struct ieee80211_frame *wh; 2660 int tid, pri; 2661 ieee80211_seq seqno; 2662 uint8_t subtype; 2663 2664 /* TID lookup */ 2665 wh = mtod(m0, struct ieee80211_frame *); 2666 pri = M_WME_GETAC(m0); /* honor classification */ 2667 tid = WME_AC_TO_TID(pri); 2668 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2669 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2670 2671 /* XXX Is it a control frame? Ignore */ 2672 2673 /* Does the packet require a sequence number? */ 2674 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2675 return -1; 2676 2677 ATH_TX_LOCK_ASSERT(sc); 2678 2679 /* 2680 * Is it a QOS NULL Data frame? Give it a sequence number from 2681 * the default TID (IEEE80211_NONQOS_TID.) 2682 * 2683 * The RX path of everything I've looked at doesn't include the NULL 2684 * data frame sequence number in the aggregation state updates, so 2685 * assigning it a sequence number there will cause a BAW hole on the 2686 * RX side. 2687 */ 2688 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2689 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2690 /* XXX no locking for this TID? This is a bit of a problem. */ 2691 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2692 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2693 } else { 2694 /* Manually assign sequence number */ 2695 seqno = ni->ni_txseqs[tid]; 2696 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2697 } 2698 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2699 M_SEQNO_SET(m0, seqno); 2700 2701 /* Return so caller can do something with it if needed */ 2702 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2703 return seqno; 2704} 2705 2706/* 2707 * Attempt to direct dispatch an aggregate frame to hardware. 2708 * If the frame is out of BAW, queue. 2709 * Otherwise, schedule it as a single frame. 2710 */ 2711static void 2712ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2713 struct ath_txq *txq, struct ath_buf *bf) 2714{ 2715 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2716 struct ieee80211_tx_ampdu *tap; 2717 2718 ATH_TX_LOCK_ASSERT(sc); 2719 2720 tap = ath_tx_get_tx_tid(an, tid->tid); 2721 2722 /* paused? queue */ 2723 if (tid->paused) { 2724 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2725 /* XXX don't sched - we're paused! */ 2726 return; 2727 } 2728 2729 /* outside baw? queue */ 2730 if (bf->bf_state.bfs_dobaw && 2731 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2732 SEQNO(bf->bf_state.bfs_seqno)))) { 2733 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2734 ath_tx_tid_sched(sc, tid); 2735 return; 2736 } 2737 2738 /* 2739 * This is a temporary check and should be removed once 2740 * all the relevant code paths have been fixed. 2741 * 2742 * During aggregate retries, it's possible that the head 2743 * frame will fail (which has the bfs_aggr and bfs_nframes 2744 * fields set for said aggregate) and will be retried as 2745 * a single frame. In this instance, the values should 2746 * be reset or the completion code will get upset with you. 2747 */ 2748 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 2749 device_printf(sc->sc_dev, "%s: bfs_aggr=%d, bfs_nframes=%d\n", 2750 __func__, 2751 bf->bf_state.bfs_aggr, 2752 bf->bf_state.bfs_nframes); 2753 bf->bf_state.bfs_aggr = 0; 2754 bf->bf_state.bfs_nframes = 1; 2755 } 2756 2757 /* Update CLRDMASK just before this frame is queued */ 2758 ath_tx_update_clrdmask(sc, tid, bf); 2759 2760 /* Direct dispatch to hardware */ 2761 ath_tx_do_ratelookup(sc, bf); 2762 ath_tx_calc_duration(sc, bf); 2763 ath_tx_calc_protection(sc, bf); 2764 ath_tx_set_rtscts(sc, bf); 2765 ath_tx_rate_fill_rcflags(sc, bf); 2766 ath_tx_setds(sc, bf); 2767 2768 /* Statistics */ 2769 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 2770 2771 /* Track per-TID hardware queue depth correctly */ 2772 tid->hwq_depth++; 2773 2774 /* Add to BAW */ 2775 if (bf->bf_state.bfs_dobaw) { 2776 ath_tx_addto_baw(sc, an, tid, bf); 2777 bf->bf_state.bfs_addedbaw = 1; 2778 } 2779 2780 /* Set completion handler, multi-frame aggregate or not */ 2781 bf->bf_comp = ath_tx_aggr_comp; 2782 2783 /* Hand off to hardware */ 2784 ath_tx_handoff(sc, txq, bf); 2785} 2786 2787/* 2788 * Attempt to send the packet. 2789 * If the queue isn't busy, direct-dispatch. 2790 * If the queue is busy enough, queue the given packet on the 2791 * relevant software queue. 2792 */ 2793void 2794ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, 2795 struct ath_buf *bf) 2796{ 2797 struct ath_node *an = ATH_NODE(ni); 2798 struct ieee80211_frame *wh; 2799 struct ath_tid *atid; 2800 int pri, tid; 2801 struct mbuf *m0 = bf->bf_m; 2802 2803 ATH_TX_LOCK_ASSERT(sc); 2804 2805 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 2806 wh = mtod(m0, struct ieee80211_frame *); 2807 pri = ath_tx_getac(sc, m0); 2808 tid = ath_tx_gettid(sc, m0); 2809 atid = &an->an_tid[tid]; 2810 2811 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 2812 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2813 2814 /* Set local packet state, used to queue packets to hardware */ 2815 /* XXX potentially duplicate info, re-check */ 2816 bf->bf_state.bfs_tid = tid; 2817 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 2818 bf->bf_state.bfs_pri = pri; 2819 2820 /* 2821 * If the hardware queue isn't busy, queue it directly. 2822 * If the hardware queue is busy, queue it. 2823 * If the TID is paused or the traffic it outside BAW, software 2824 * queue it. 2825 */ 2826 if (atid->paused) { 2827 /* TID is paused, queue */ 2828 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 2829 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2830 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 2831 /* AMPDU pending; queue */ 2832 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 2833 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2834 /* XXX sched? */ 2835 } else if (ath_tx_ampdu_running(sc, an, tid)) { 2836 /* AMPDU running, attempt direct dispatch if possible */ 2837 2838 /* 2839 * Always queue the frame to the tail of the list. 2840 */ 2841 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2842 2843 /* 2844 * If the hardware queue isn't busy, direct dispatch 2845 * the head frame in the list. Don't schedule the 2846 * TID - let it build some more frames first? 2847 * 2848 * Otherwise, schedule the TID. 2849 */ 2850 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit) { 2851 bf = ATH_TID_FIRST(atid); 2852 ATH_TID_REMOVE(atid, bf, bf_list); 2853 2854 /* 2855 * Ensure it's definitely treated as a non-AMPDU 2856 * frame - this information may have been left 2857 * over from a previous attempt. 2858 */ 2859 bf->bf_state.bfs_aggr = 0; 2860 bf->bf_state.bfs_nframes = 1; 2861 2862 /* Queue to the hardware */ 2863 ath_tx_xmit_aggr(sc, an, txq, bf); 2864 DPRINTF(sc, ATH_DEBUG_SW_TX, 2865 "%s: xmit_aggr\n", 2866 __func__); 2867 } else { 2868 DPRINTF(sc, ATH_DEBUG_SW_TX, 2869 "%s: ampdu; swq'ing\n", 2870 __func__); 2871 2872 ath_tx_tid_sched(sc, atid); 2873 } 2874 } else if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit) { 2875 /* AMPDU not running, attempt direct dispatch */ 2876 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 2877 /* See if clrdmask needs to be set */ 2878 ath_tx_update_clrdmask(sc, atid, bf); 2879 ath_tx_xmit_normal(sc, txq, bf); 2880 } else { 2881 /* Busy; queue */ 2882 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 2883 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2884 ath_tx_tid_sched(sc, atid); 2885 } 2886} 2887 2888/* 2889 * Only set the clrdmask bit if none of the nodes are currently 2890 * filtered. 2891 * 2892 * XXX TODO: go through all the callers and check to see 2893 * which are being called in the context of looping over all 2894 * TIDs (eg, if all tids are being paused, resumed, etc.) 2895 * That'll avoid O(n^2) complexity here. 2896 */ 2897static void 2898ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 2899{ 2900 int i; 2901 2902 ATH_TX_LOCK_ASSERT(sc); 2903 2904 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 2905 if (an->an_tid[i].isfiltered == 1) 2906 return; 2907 } 2908 an->clrdmask = 1; 2909} 2910 2911/* 2912 * Configure the per-TID node state. 2913 * 2914 * This likely belongs in if_ath_node.c but I can't think of anywhere 2915 * else to put it just yet. 2916 * 2917 * This sets up the SLISTs and the mutex as appropriate. 2918 */ 2919void 2920ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 2921{ 2922 int i, j; 2923 struct ath_tid *atid; 2924 2925 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 2926 atid = &an->an_tid[i]; 2927 2928 /* XXX now with this bzer(), is the field 0'ing needed? */ 2929 bzero(atid, sizeof(*atid)); 2930 2931 TAILQ_INIT(&atid->tid_q); 2932 TAILQ_INIT(&atid->filtq.tid_q); 2933 atid->tid = i; 2934 atid->an = an; 2935 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 2936 atid->tx_buf[j] = NULL; 2937 atid->baw_head = atid->baw_tail = 0; 2938 atid->paused = 0; 2939 atid->sched = 0; 2940 atid->hwq_depth = 0; 2941 atid->cleanup_inprogress = 0; 2942 if (i == IEEE80211_NONQOS_TID) 2943 atid->ac = ATH_NONQOS_TID_AC; 2944 else 2945 atid->ac = TID_TO_WME_AC(i); 2946 } 2947 an->clrdmask = 1; /* Always start by setting this bit */ 2948} 2949 2950/* 2951 * Pause the current TID. This stops packets from being transmitted 2952 * on it. 2953 * 2954 * Since this is also called from upper layers as well as the driver, 2955 * it will get the TID lock. 2956 */ 2957static void 2958ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 2959{ 2960 2961 ATH_TX_LOCK_ASSERT(sc); 2962 tid->paused++; 2963 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", 2964 __func__, tid->paused); 2965} 2966 2967/* 2968 * Unpause the current TID, and schedule it if needed. 2969 */ 2970static void 2971ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 2972{ 2973 ATH_TX_LOCK_ASSERT(sc); 2974 2975 tid->paused--; 2976 2977 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", 2978 __func__, tid->paused); 2979 2980 if (tid->paused) 2981 return; 2982 2983 /* 2984 * Override the clrdmask configuration for the next frame 2985 * from this TID, just to get the ball rolling. 2986 */ 2987 ath_tx_set_clrdmask(sc, tid->an); 2988 2989 if (tid->axq_depth == 0) 2990 return; 2991 2992 /* XXX isfiltered shouldn't ever be 0 at this point */ 2993 if (tid->isfiltered == 1) { 2994 device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); 2995 return; 2996 } 2997 2998 ath_tx_tid_sched(sc, tid); 2999 3000 /* 3001 * Queue the software TX scheduler. 3002 */ 3003 ath_tx_swq_kick(sc); 3004} 3005 3006/* 3007 * Add the given ath_buf to the TID filtered frame list. 3008 * This requires the TID be filtered. 3009 */ 3010static void 3011ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3012 struct ath_buf *bf) 3013{ 3014 3015 ATH_TX_LOCK_ASSERT(sc); 3016 3017 if (! tid->isfiltered) 3018 device_printf(sc->sc_dev, "%s: not filtered?!\n", __func__); 3019 3020 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3021 3022 /* Set the retry bit and bump the retry counter */ 3023 ath_tx_set_retry(sc, bf); 3024 sc->sc_stats.ast_tx_swfiltered++; 3025 3026 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3027} 3028 3029/* 3030 * Handle a completed filtered frame from the given TID. 3031 * This just enables/pauses the filtered frame state if required 3032 * and appends the filtered frame to the filtered queue. 3033 */ 3034static void 3035ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3036 struct ath_buf *bf) 3037{ 3038 3039 ATH_TX_LOCK_ASSERT(sc); 3040 3041 if (! tid->isfiltered) { 3042 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", 3043 __func__); 3044 tid->isfiltered = 1; 3045 ath_tx_tid_pause(sc, tid); 3046 } 3047 3048 /* Add the frame to the filter queue */ 3049 ath_tx_tid_filt_addbuf(sc, tid, bf); 3050} 3051 3052/* 3053 * Complete the filtered frame TX completion. 3054 * 3055 * If there are no more frames in the hardware queue, unpause/unfilter 3056 * the TID if applicable. Otherwise we will wait for a node PS transition 3057 * to unfilter. 3058 */ 3059static void 3060ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3061{ 3062 struct ath_buf *bf; 3063 3064 ATH_TX_LOCK_ASSERT(sc); 3065 3066 if (tid->hwq_depth != 0) 3067 return; 3068 3069 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", 3070 __func__); 3071 tid->isfiltered = 0; 3072 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3073 ath_tx_set_clrdmask(sc, tid->an); 3074 3075 /* XXX this is really quite inefficient */ 3076 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3077 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3078 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3079 } 3080 3081 ath_tx_tid_resume(sc, tid); 3082} 3083 3084/* 3085 * Called when a single (aggregate or otherwise) frame is completed. 3086 * 3087 * Returns 1 if the buffer could be added to the filtered list 3088 * (cloned or otherwise), 0 if the buffer couldn't be added to the 3089 * filtered list (failed clone; expired retry) and the caller should 3090 * free it and handle it like a failure (eg by sending a BAR.) 3091 */ 3092static int 3093ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3094 struct ath_buf *bf) 3095{ 3096 struct ath_buf *nbf; 3097 int retval; 3098 3099 ATH_TX_LOCK_ASSERT(sc); 3100 3101 /* 3102 * Don't allow a filtered frame to live forever. 3103 */ 3104 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3105 sc->sc_stats.ast_tx_swretrymax++; 3106 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3107 "%s: bf=%p, seqno=%d, exceeded retries\n", 3108 __func__, 3109 bf, 3110 bf->bf_state.bfs_seqno); 3111 return (0); 3112 } 3113 3114 /* 3115 * A busy buffer can't be added to the retry list. 3116 * It needs to be cloned. 3117 */ 3118 if (bf->bf_flags & ATH_BUF_BUSY) { 3119 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3120 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3121 "%s: busy buffer clone: %p -> %p\n", 3122 __func__, bf, nbf); 3123 } else { 3124 nbf = bf; 3125 } 3126 3127 if (nbf == NULL) { 3128 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3129 "%s: busy buffer couldn't be cloned (%p)!\n", 3130 __func__, bf); 3131 retval = 1; 3132 } else { 3133 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3134 retval = 0; 3135 } 3136 ath_tx_tid_filt_comp_complete(sc, tid); 3137 3138 return (retval); 3139} 3140 3141static void 3142ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3143 struct ath_buf *bf_first, ath_bufhead *bf_q) 3144{ 3145 struct ath_buf *bf, *bf_next, *nbf; 3146 3147 ATH_TX_LOCK_ASSERT(sc); 3148 3149 bf = bf_first; 3150 while (bf) { 3151 bf_next = bf->bf_next; 3152 bf->bf_next = NULL; /* Remove it from the aggr list */ 3153 3154 /* 3155 * Don't allow a filtered frame to live forever. 3156 */ 3157 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3158 sc->sc_stats.ast_tx_swretrymax++; 3159 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3160 "%s: bf=%p, seqno=%d, exceeded retries\n", 3161 __func__, 3162 bf, 3163 bf->bf_state.bfs_seqno); 3164 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3165 goto next; 3166 } 3167 3168 if (bf->bf_flags & ATH_BUF_BUSY) { 3169 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3170 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3171 "%s: busy buffer cloned: %p -> %p", 3172 __func__, bf, nbf); 3173 } else { 3174 nbf = bf; 3175 } 3176 3177 /* 3178 * If the buffer couldn't be cloned, add it to bf_q; 3179 * the caller will free the buffer(s) as required. 3180 */ 3181 if (nbf == NULL) { 3182 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3183 "%s: buffer couldn't be cloned! (%p)\n", 3184 __func__, bf); 3185 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3186 } else { 3187 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3188 } 3189next: 3190 bf = bf_next; 3191 } 3192 3193 ath_tx_tid_filt_comp_complete(sc, tid); 3194} 3195 3196/* 3197 * Suspend the queue because we need to TX a BAR. 3198 */ 3199static void 3200ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3201{ 3202 3203 ATH_TX_LOCK_ASSERT(sc); 3204 3205 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3206 "%s: tid=%p, bar_wait=%d, bar_tx=%d, called\n", 3207 __func__, 3208 tid, 3209 tid->bar_wait, 3210 tid->bar_tx); 3211 3212 /* We shouldn't be called when bar_tx is 1 */ 3213 if (tid->bar_tx) { 3214 device_printf(sc->sc_dev, "%s: bar_tx is 1?!\n", 3215 __func__); 3216 } 3217 3218 /* If we've already been called, just be patient. */ 3219 if (tid->bar_wait) 3220 return; 3221 3222 /* Wait! */ 3223 tid->bar_wait = 1; 3224 3225 /* Only one pause, no matter how many frames fail */ 3226 ath_tx_tid_pause(sc, tid); 3227} 3228 3229/* 3230 * We've finished with BAR handling - either we succeeded or 3231 * failed. Either way, unsuspend TX. 3232 */ 3233static void 3234ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3235{ 3236 3237 ATH_TX_LOCK_ASSERT(sc); 3238 3239 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3240 "%s: tid=%p, called\n", 3241 __func__, 3242 tid); 3243 3244 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3245 device_printf(sc->sc_dev, "%s: bar_tx=%d, bar_wait=%d: ?\n", 3246 __func__, tid->bar_tx, tid->bar_wait); 3247 } 3248 3249 tid->bar_tx = tid->bar_wait = 0; 3250 ath_tx_tid_resume(sc, tid); 3251} 3252 3253/* 3254 * Return whether we're ready to TX a BAR frame. 3255 * 3256 * Requires the TID lock be held. 3257 */ 3258static int 3259ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3260{ 3261 3262 ATH_TX_LOCK_ASSERT(sc); 3263 3264 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3265 return (0); 3266 3267 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%p (%d), bar ready\n", 3268 __func__, tid, tid->tid); 3269 3270 return (1); 3271} 3272 3273/* 3274 * Check whether the current TID is ready to have a BAR 3275 * TXed and if so, do the TX. 3276 * 3277 * Since the TID/TXQ lock can't be held during a call to 3278 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3279 * sending the BAR and locking it again. 3280 * 3281 * Eventually, the code to send the BAR should be broken out 3282 * from this routine so the lock doesn't have to be reacquired 3283 * just to be immediately dropped by the caller. 3284 */ 3285static void 3286ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3287{ 3288 struct ieee80211_tx_ampdu *tap; 3289 3290 ATH_TX_LOCK_ASSERT(sc); 3291 3292 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3293 "%s: tid=%p, called\n", 3294 __func__, 3295 tid); 3296 3297 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3298 3299 /* 3300 * This is an error condition! 3301 */ 3302 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3303 device_printf(sc->sc_dev, 3304 "%s: tid=%p, bar_tx=%d, bar_wait=%d: ?\n", 3305 __func__, 3306 tid, 3307 tid->bar_tx, 3308 tid->bar_wait); 3309 return; 3310 } 3311 3312 /* Don't do anything if we still have pending frames */ 3313 if (tid->hwq_depth > 0) { 3314 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3315 "%s: tid=%p, hwq_depth=%d, waiting\n", 3316 __func__, 3317 tid, 3318 tid->hwq_depth); 3319 return; 3320 } 3321 3322 /* We're now about to TX */ 3323 tid->bar_tx = 1; 3324 3325 /* 3326 * Override the clrdmask configuration for the next frame, 3327 * just to get the ball rolling. 3328 */ 3329 ath_tx_set_clrdmask(sc, tid->an); 3330 3331 /* 3332 * Calculate new BAW left edge, now that all frames have either 3333 * succeeded or failed. 3334 * 3335 * XXX verify this is _actually_ the valid value to begin at! 3336 */ 3337 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3338 "%s: tid=%p, new BAW left edge=%d\n", 3339 __func__, 3340 tid, 3341 tap->txa_start); 3342 3343 /* Try sending the BAR frame */ 3344 /* We can't hold the lock here! */ 3345 3346 ATH_TX_UNLOCK(sc); 3347 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3348 /* Success? Now we wait for notification that it's done */ 3349 ATH_TX_LOCK(sc); 3350 return; 3351 } 3352 3353 /* Failure? For now, warn loudly and continue */ 3354 ATH_TX_LOCK(sc); 3355 device_printf(sc->sc_dev, "%s: tid=%p, failed to TX BAR, continue!\n", 3356 __func__, tid); 3357 ath_tx_tid_bar_unsuspend(sc, tid); 3358} 3359 3360static void 3361ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3362 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3363{ 3364 3365 ATH_TX_LOCK_ASSERT(sc); 3366 3367 /* 3368 * If the current TID is running AMPDU, update 3369 * the BAW. 3370 */ 3371 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3372 bf->bf_state.bfs_dobaw) { 3373 /* 3374 * Only remove the frame from the BAW if it's 3375 * been transmitted at least once; this means 3376 * the frame was in the BAW to begin with. 3377 */ 3378 if (bf->bf_state.bfs_retries > 0) { 3379 ath_tx_update_baw(sc, an, tid, bf); 3380 bf->bf_state.bfs_dobaw = 0; 3381 } 3382#if 0 3383 /* 3384 * This has become a non-fatal error now 3385 */ 3386 if (! bf->bf_state.bfs_addedbaw) 3387 device_printf(sc->sc_dev, 3388 "%s: wasn't added: seqno %d\n", 3389 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3390#endif 3391 } 3392 3393 /* Strip it out of an aggregate list if it was in one */ 3394 bf->bf_next = NULL; 3395 3396 /* Insert on the free queue to be freed by the caller */ 3397 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3398} 3399 3400static void 3401ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3402 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3403{ 3404 struct ieee80211_node *ni = &an->an_node; 3405 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 3406 struct ieee80211_tx_ampdu *tap; 3407 3408 tap = ath_tx_get_tx_tid(an, tid->tid); 3409 3410 device_printf(sc->sc_dev, 3411 "%s: %s: node %p: bf=%p: addbaw=%d, dobaw=%d, " 3412 "seqno=%d, retry=%d\n", 3413 __func__, pfx, ni, bf, 3414 bf->bf_state.bfs_addedbaw, 3415 bf->bf_state.bfs_dobaw, 3416 SEQNO(bf->bf_state.bfs_seqno), 3417 bf->bf_state.bfs_retries); 3418 device_printf(sc->sc_dev, 3419 "%s: node %p: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3420 __func__, ni, bf, 3421 txq->axq_qnum, 3422 txq->axq_depth, 3423 txq->axq_aggr_depth); 3424 3425 device_printf(sc->sc_dev, 3426 "%s: node %p: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, isfiltered=%d\n", 3427 __func__, ni, bf, 3428 tid->axq_depth, 3429 tid->hwq_depth, 3430 tid->bar_wait, 3431 tid->isfiltered); 3432 device_printf(sc->sc_dev, 3433 "%s: node %p: tid %d: " 3434 "sched=%d, paused=%d, " 3435 "incomp=%d, baw_head=%d, " 3436 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3437 __func__, ni, tid->tid, 3438 tid->sched, tid->paused, 3439 tid->incomp, tid->baw_head, 3440 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3441 ni->ni_txseqs[tid->tid]); 3442 3443 /* XXX Dump the frame, see what it is? */ 3444 ieee80211_dump_pkt(ni->ni_ic, 3445 mtod(bf->bf_m, const uint8_t *), 3446 bf->bf_m->m_len, 0, -1); 3447} 3448 3449/* 3450 * Free any packets currently pending in the software TX queue. 3451 * 3452 * This will be called when a node is being deleted. 3453 * 3454 * It can also be called on an active node during an interface 3455 * reset or state transition. 3456 * 3457 * (From Linux/reference): 3458 * 3459 * TODO: For frame(s) that are in the retry state, we will reuse the 3460 * sequence number(s) without setting the retry bit. The 3461 * alternative is to give up on these and BAR the receiver's window 3462 * forward. 3463 */ 3464static void 3465ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3466 struct ath_tid *tid, ath_bufhead *bf_cq) 3467{ 3468 struct ath_buf *bf; 3469 struct ieee80211_tx_ampdu *tap; 3470 struct ieee80211_node *ni = &an->an_node; 3471 int t; 3472 3473 tap = ath_tx_get_tx_tid(an, tid->tid); 3474 3475 ATH_TX_LOCK_ASSERT(sc); 3476 3477 /* Walk the queue, free frames */ 3478 t = 0; 3479 for (;;) { 3480 bf = ATH_TID_FIRST(tid); 3481 if (bf == NULL) { 3482 break; 3483 } 3484 3485 if (t == 0) { 3486 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3487 t = 1; 3488 } 3489 3490 ATH_TID_REMOVE(tid, bf, bf_list); 3491 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3492 } 3493 3494 /* And now, drain the filtered frame queue */ 3495 t = 0; 3496 for (;;) { 3497 bf = ATH_TID_FILT_FIRST(tid); 3498 if (bf == NULL) 3499 break; 3500 3501 if (t == 0) { 3502 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3503 t = 1; 3504 } 3505 3506 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3507 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3508 } 3509 3510 /* 3511 * Override the clrdmask configuration for the next frame 3512 * in case there is some future transmission, just to get 3513 * the ball rolling. 3514 * 3515 * This won't hurt things if the TID is about to be freed. 3516 */ 3517 ath_tx_set_clrdmask(sc, tid->an); 3518 3519 /* 3520 * Now that it's completed, grab the TID lock and update 3521 * the sequence number and BAW window. 3522 * Because sequence numbers have been assigned to frames 3523 * that haven't been sent yet, it's entirely possible 3524 * we'll be called with some pending frames that have not 3525 * been transmitted. 3526 * 3527 * The cleaner solution is to do the sequence number allocation 3528 * when the packet is first transmitted - and thus the "retries" 3529 * check above would be enough to update the BAW/seqno. 3530 */ 3531 3532 /* But don't do it for non-QoS TIDs */ 3533 if (tap) { 3534#if 0 3535 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3536 "%s: node %p: TID %d: sliding BAW left edge to %d\n", 3537 __func__, an, tid->tid, tap->txa_start); 3538#endif 3539 ni->ni_txseqs[tid->tid] = tap->txa_start; 3540 tid->baw_tail = tid->baw_head; 3541 } 3542} 3543 3544/* 3545 * Flush all software queued packets for the given node. 3546 * 3547 * This occurs when a completion handler frees the last buffer 3548 * for a node, and the node is thus freed. This causes the node 3549 * to be cleaned up, which ends up calling ath_tx_node_flush. 3550 */ 3551void 3552ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 3553{ 3554 int tid; 3555 ath_bufhead bf_cq; 3556 struct ath_buf *bf; 3557 3558 TAILQ_INIT(&bf_cq); 3559 3560 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 3561 &an->an_node); 3562 3563 ATH_TX_LOCK(sc); 3564 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 3565 struct ath_tid *atid = &an->an_tid[tid]; 3566 3567 /* Free packets */ 3568 ath_tx_tid_drain(sc, an, atid, &bf_cq); 3569 /* Remove this tid from the list of active tids */ 3570 ath_tx_tid_unsched(sc, atid); 3571 } 3572 ATH_TX_UNLOCK(sc); 3573 3574 /* Handle completed frames */ 3575 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3576 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3577 ath_tx_default_comp(sc, bf, 0); 3578 } 3579} 3580 3581/* 3582 * Drain all the software TXQs currently with traffic queued. 3583 */ 3584void 3585ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 3586{ 3587 struct ath_tid *tid; 3588 ath_bufhead bf_cq; 3589 struct ath_buf *bf; 3590 3591 TAILQ_INIT(&bf_cq); 3592 ATH_TX_LOCK(sc); 3593 3594 /* 3595 * Iterate over all active tids for the given txq, 3596 * flushing and unsched'ing them 3597 */ 3598 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 3599 tid = TAILQ_FIRST(&txq->axq_tidq); 3600 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 3601 ath_tx_tid_unsched(sc, tid); 3602 } 3603 3604 ATH_TX_UNLOCK(sc); 3605 3606 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3607 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3608 ath_tx_default_comp(sc, bf, 0); 3609 } 3610} 3611 3612/* 3613 * Handle completion of non-aggregate session frames. 3614 * 3615 * This (currently) doesn't implement software retransmission of 3616 * non-aggregate frames! 3617 * 3618 * Software retransmission of non-aggregate frames needs to obey 3619 * the strict sequence number ordering, and drop any frames that 3620 * will fail this. 3621 * 3622 * For now, filtered frames and frame transmission will cause 3623 * all kinds of issues. So we don't support them. 3624 * 3625 * So anyone queuing frames via ath_tx_normal_xmit() or 3626 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 3627 */ 3628void 3629ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3630{ 3631 struct ieee80211_node *ni = bf->bf_node; 3632 struct ath_node *an = ATH_NODE(ni); 3633 int tid = bf->bf_state.bfs_tid; 3634 struct ath_tid *atid = &an->an_tid[tid]; 3635 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3636 3637 /* The TID state is protected behind the TXQ lock */ 3638 ATH_TX_LOCK(sc); 3639 3640 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 3641 __func__, bf, fail, atid->hwq_depth - 1); 3642 3643 atid->hwq_depth--; 3644 3645#if 0 3646 /* 3647 * If the frame was filtered, stick it on the filter frame 3648 * queue and complain about it. It shouldn't happen! 3649 */ 3650 if ((ts->ts_status & HAL_TXERR_FILT) || 3651 (ts->ts_status != 0 && atid->isfiltered)) { 3652 device_printf(sc->sc_dev, 3653 "%s: isfiltered=%d, ts_status=%d: huh?\n", 3654 __func__, 3655 atid->isfiltered, 3656 ts->ts_status); 3657 ath_tx_tid_filt_comp_buf(sc, atid, bf); 3658 } 3659#endif 3660 if (atid->isfiltered) 3661 device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); 3662 if (atid->hwq_depth < 0) 3663 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 3664 __func__, atid->hwq_depth); 3665 3666 /* 3667 * If the queue is filtered, potentially mark it as complete 3668 * and reschedule it as needed. 3669 * 3670 * This is required as there may be a subsequent TX descriptor 3671 * for this end-node that has CLRDMASK set, so it's quite possible 3672 * that a filtered frame will be followed by a non-filtered 3673 * (complete or otherwise) frame. 3674 * 3675 * XXX should we do this before we complete the frame? 3676 */ 3677 if (atid->isfiltered) 3678 ath_tx_tid_filt_comp_complete(sc, atid); 3679 ATH_TX_UNLOCK(sc); 3680 3681 /* 3682 * punt to rate control if we're not being cleaned up 3683 * during a hw queue drain and the frame wanted an ACK. 3684 */ 3685 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 3686 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 3687 ts, bf->bf_state.bfs_pktlen, 3688 1, (ts->ts_status == 0) ? 0 : 1); 3689 3690 ath_tx_default_comp(sc, bf, fail); 3691} 3692 3693/* 3694 * Handle cleanup of aggregate session packets that aren't 3695 * an A-MPDU. 3696 * 3697 * There's no need to update the BAW here - the session is being 3698 * torn down. 3699 */ 3700static void 3701ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3702{ 3703 struct ieee80211_node *ni = bf->bf_node; 3704 struct ath_node *an = ATH_NODE(ni); 3705 int tid = bf->bf_state.bfs_tid; 3706 struct ath_tid *atid = &an->an_tid[tid]; 3707 3708 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 3709 __func__, tid, atid->incomp); 3710 3711 ATH_TX_LOCK(sc); 3712 atid->incomp--; 3713 if (atid->incomp == 0) { 3714 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3715 "%s: TID %d: cleaned up! resume!\n", 3716 __func__, tid); 3717 atid->cleanup_inprogress = 0; 3718 ath_tx_tid_resume(sc, atid); 3719 } 3720 ATH_TX_UNLOCK(sc); 3721 3722 ath_tx_default_comp(sc, bf, 0); 3723} 3724 3725/* 3726 * Performs transmit side cleanup when TID changes from aggregated to 3727 * unaggregated. 3728 * 3729 * - Discard all retry frames from the s/w queue. 3730 * - Fix the tx completion function for all buffers in s/w queue. 3731 * - Count the number of unacked frames, and let transmit completion 3732 * handle it later. 3733 * 3734 * The caller is responsible for pausing the TID. 3735 */ 3736static void 3737ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid) 3738{ 3739 struct ath_tid *atid = &an->an_tid[tid]; 3740 struct ieee80211_tx_ampdu *tap; 3741 struct ath_buf *bf, *bf_next; 3742 ath_bufhead bf_cq; 3743 3744 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 3745 "%s: TID %d: called\n", __func__, tid); 3746 3747 TAILQ_INIT(&bf_cq); 3748 ATH_TX_LOCK(sc); 3749 3750 /* 3751 * Move the filtered frames to the TX queue, before 3752 * we run off and discard/process things. 3753 */ 3754 /* XXX this is really quite inefficient */ 3755 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 3756 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 3757 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3758 } 3759 3760 /* 3761 * Update the frames in the software TX queue: 3762 * 3763 * + Discard retry frames in the queue 3764 * + Fix the completion function to be non-aggregate 3765 */ 3766 bf = ATH_TID_FIRST(atid); 3767 while (bf) { 3768 if (bf->bf_state.bfs_isretried) { 3769 bf_next = TAILQ_NEXT(bf, bf_list); 3770 ATH_TID_REMOVE(atid, bf, bf_list); 3771 if (bf->bf_state.bfs_dobaw) { 3772 ath_tx_update_baw(sc, an, atid, bf); 3773 if (! bf->bf_state.bfs_addedbaw) 3774 device_printf(sc->sc_dev, 3775 "%s: wasn't added: seqno %d\n", 3776 __func__, 3777 SEQNO(bf->bf_state.bfs_seqno)); 3778 } 3779 bf->bf_state.bfs_dobaw = 0; 3780 /* 3781 * Call the default completion handler with "fail" just 3782 * so upper levels are suitably notified about this. 3783 */ 3784 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 3785 bf = bf_next; 3786 continue; 3787 } 3788 /* Give these the default completion handler */ 3789 bf->bf_comp = ath_tx_normal_comp; 3790 bf = TAILQ_NEXT(bf, bf_list); 3791 } 3792 3793 /* The caller is required to pause the TID */ 3794#if 0 3795 /* Pause the TID */ 3796 ath_tx_tid_pause(sc, atid); 3797#endif 3798 3799 /* 3800 * Calculate what hardware-queued frames exist based 3801 * on the current BAW size. Ie, what frames have been 3802 * added to the TX hardware queue for this TID but 3803 * not yet ACKed. 3804 */ 3805 tap = ath_tx_get_tx_tid(an, tid); 3806 /* Need the lock - fiddling with BAW */ 3807 while (atid->baw_head != atid->baw_tail) { 3808 if (atid->tx_buf[atid->baw_head]) { 3809 atid->incomp++; 3810 atid->cleanup_inprogress = 1; 3811 atid->tx_buf[atid->baw_head] = NULL; 3812 } 3813 INCR(atid->baw_head, ATH_TID_MAX_BUFS); 3814 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 3815 } 3816 3817 /* 3818 * If cleanup is required, defer TID scheduling 3819 * until all the HW queued packets have been 3820 * sent. 3821 */ 3822 if (! atid->cleanup_inprogress) 3823 ath_tx_tid_resume(sc, atid); 3824 3825 if (atid->cleanup_inprogress) 3826 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3827 "%s: TID %d: cleanup needed: %d packets\n", 3828 __func__, tid, atid->incomp); 3829 ATH_TX_UNLOCK(sc); 3830 3831 /* Handle completing frames and fail them */ 3832 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3833 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3834 ath_tx_default_comp(sc, bf, 1); 3835 } 3836} 3837 3838static struct ath_buf * 3839ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 3840 struct ath_tid *tid, struct ath_buf *bf) 3841{ 3842 struct ath_buf *nbf; 3843 int error; 3844 3845 /* 3846 * Clone the buffer. This will handle the dma unmap and 3847 * copy the node reference to the new buffer. If this 3848 * works out, 'bf' will have no DMA mapping, no mbuf 3849 * pointer and no node reference. 3850 */ 3851 nbf = ath_buf_clone(sc, bf); 3852 3853#if 0 3854 device_printf(sc->sc_dev, "%s: ATH_BUF_BUSY; cloning\n", 3855 __func__); 3856#endif 3857 3858 if (nbf == NULL) { 3859 /* Failed to clone */ 3860 device_printf(sc->sc_dev, 3861 "%s: failed to clone a busy buffer\n", 3862 __func__); 3863 return NULL; 3864 } 3865 3866 /* Setup the dma for the new buffer */ 3867 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 3868 if (error != 0) { 3869 device_printf(sc->sc_dev, 3870 "%s: failed to setup dma for clone\n", 3871 __func__); 3872 /* 3873 * Put this at the head of the list, not tail; 3874 * that way it doesn't interfere with the 3875 * busy buffer logic (which uses the tail of 3876 * the list.) 3877 */ 3878 ATH_TXBUF_LOCK(sc); 3879 ath_returnbuf_head(sc, nbf); 3880 ATH_TXBUF_UNLOCK(sc); 3881 return NULL; 3882 } 3883 3884 /* Update BAW if required, before we free the original buf */ 3885 if (bf->bf_state.bfs_dobaw) 3886 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 3887 3888 /* Free original buffer; return new buffer */ 3889 ath_freebuf(sc, bf); 3890 3891 return nbf; 3892} 3893 3894/* 3895 * Handle retrying an unaggregate frame in an aggregate 3896 * session. 3897 * 3898 * If too many retries occur, pause the TID, wait for 3899 * any further retransmits (as there's no reason why 3900 * non-aggregate frames in an aggregate session are 3901 * transmitted in-order; they just have to be in-BAW) 3902 * and then queue a BAR. 3903 */ 3904static void 3905ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3906{ 3907 struct ieee80211_node *ni = bf->bf_node; 3908 struct ath_node *an = ATH_NODE(ni); 3909 int tid = bf->bf_state.bfs_tid; 3910 struct ath_tid *atid = &an->an_tid[tid]; 3911 struct ieee80211_tx_ampdu *tap; 3912 3913 ATH_TX_LOCK(sc); 3914 3915 tap = ath_tx_get_tx_tid(an, tid); 3916 3917 /* 3918 * If the buffer is marked as busy, we can't directly 3919 * reuse it. Instead, try to clone the buffer. 3920 * If the clone is successful, recycle the old buffer. 3921 * If the clone is unsuccessful, set bfs_retries to max 3922 * to force the next bit of code to free the buffer 3923 * for us. 3924 */ 3925 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 3926 (bf->bf_flags & ATH_BUF_BUSY)) { 3927 struct ath_buf *nbf; 3928 nbf = ath_tx_retry_clone(sc, an, atid, bf); 3929 if (nbf) 3930 /* bf has been freed at this point */ 3931 bf = nbf; 3932 else 3933 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 3934 } 3935 3936 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 3937 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 3938 "%s: exceeded retries; seqno %d\n", 3939 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3940 sc->sc_stats.ast_tx_swretrymax++; 3941 3942 /* Update BAW anyway */ 3943 if (bf->bf_state.bfs_dobaw) { 3944 ath_tx_update_baw(sc, an, atid, bf); 3945 if (! bf->bf_state.bfs_addedbaw) 3946 device_printf(sc->sc_dev, 3947 "%s: wasn't added: seqno %d\n", 3948 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3949 } 3950 bf->bf_state.bfs_dobaw = 0; 3951 3952 /* Suspend the TX queue and get ready to send the BAR */ 3953 ath_tx_tid_bar_suspend(sc, atid); 3954 3955 /* Send the BAR if there are no other frames waiting */ 3956 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3957 ath_tx_tid_bar_tx(sc, atid); 3958 3959 ATH_TX_UNLOCK(sc); 3960 3961 /* Free buffer, bf is free after this call */ 3962 ath_tx_default_comp(sc, bf, 0); 3963 return; 3964 } 3965 3966 /* 3967 * This increments the retry counter as well as 3968 * sets the retry flag in the ath_buf and packet 3969 * body. 3970 */ 3971 ath_tx_set_retry(sc, bf); 3972 sc->sc_stats.ast_tx_swretries++; 3973 3974 /* 3975 * Insert this at the head of the queue, so it's 3976 * retried before any current/subsequent frames. 3977 */ 3978 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3979 ath_tx_tid_sched(sc, atid); 3980 /* Send the BAR if there are no other frames waiting */ 3981 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3982 ath_tx_tid_bar_tx(sc, atid); 3983 3984 ATH_TX_UNLOCK(sc); 3985} 3986 3987/* 3988 * Common code for aggregate excessive retry/subframe retry. 3989 * If retrying, queues buffers to bf_q. If not, frees the 3990 * buffers. 3991 * 3992 * XXX should unify this with ath_tx_aggr_retry_unaggr() 3993 */ 3994static int 3995ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 3996 ath_bufhead *bf_q) 3997{ 3998 struct ieee80211_node *ni = bf->bf_node; 3999 struct ath_node *an = ATH_NODE(ni); 4000 int tid = bf->bf_state.bfs_tid; 4001 struct ath_tid *atid = &an->an_tid[tid]; 4002 4003 ATH_TX_LOCK_ASSERT(sc); 4004 4005 /* XXX clr11naggr should be done for all subframes */ 4006 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4007 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4008 4009 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4010 4011 /* 4012 * If the buffer is marked as busy, we can't directly 4013 * reuse it. Instead, try to clone the buffer. 4014 * If the clone is successful, recycle the old buffer. 4015 * If the clone is unsuccessful, set bfs_retries to max 4016 * to force the next bit of code to free the buffer 4017 * for us. 4018 */ 4019 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4020 (bf->bf_flags & ATH_BUF_BUSY)) { 4021 struct ath_buf *nbf; 4022 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4023 if (nbf) 4024 /* bf has been freed at this point */ 4025 bf = nbf; 4026 else 4027 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4028 } 4029 4030 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4031 sc->sc_stats.ast_tx_swretrymax++; 4032 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4033 "%s: max retries: seqno %d\n", 4034 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4035 ath_tx_update_baw(sc, an, atid, bf); 4036 if (! bf->bf_state.bfs_addedbaw) 4037 device_printf(sc->sc_dev, 4038 "%s: wasn't added: seqno %d\n", 4039 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4040 bf->bf_state.bfs_dobaw = 0; 4041 return 1; 4042 } 4043 4044 ath_tx_set_retry(sc, bf); 4045 sc->sc_stats.ast_tx_swretries++; 4046 bf->bf_next = NULL; /* Just to make sure */ 4047 4048 /* Clear the aggregate state */ 4049 bf->bf_state.bfs_aggr = 0; 4050 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4051 bf->bf_state.bfs_nframes = 1; 4052 4053 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4054 return 0; 4055} 4056 4057/* 4058 * error pkt completion for an aggregate destination 4059 */ 4060static void 4061ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4062 struct ath_tid *tid) 4063{ 4064 struct ieee80211_node *ni = bf_first->bf_node; 4065 struct ath_node *an = ATH_NODE(ni); 4066 struct ath_buf *bf_next, *bf; 4067 ath_bufhead bf_q; 4068 int drops = 0; 4069 struct ieee80211_tx_ampdu *tap; 4070 ath_bufhead bf_cq; 4071 4072 TAILQ_INIT(&bf_q); 4073 TAILQ_INIT(&bf_cq); 4074 4075 /* 4076 * Update rate control - all frames have failed. 4077 * 4078 * XXX use the length in the first frame in the series; 4079 * XXX just so things are consistent for now. 4080 */ 4081 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4082 &bf_first->bf_status.ds_txstat, 4083 bf_first->bf_state.bfs_pktlen, 4084 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4085 4086 ATH_TX_LOCK(sc); 4087 tap = ath_tx_get_tx_tid(an, tid->tid); 4088 sc->sc_stats.ast_tx_aggr_failall++; 4089 4090 /* Retry all subframes */ 4091 bf = bf_first; 4092 while (bf) { 4093 bf_next = bf->bf_next; 4094 bf->bf_next = NULL; /* Remove it from the aggr list */ 4095 sc->sc_stats.ast_tx_aggr_fail++; 4096 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4097 drops++; 4098 bf->bf_next = NULL; 4099 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4100 } 4101 bf = bf_next; 4102 } 4103 4104 /* Prepend all frames to the beginning of the queue */ 4105 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4106 TAILQ_REMOVE(&bf_q, bf, bf_list); 4107 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4108 } 4109 4110 /* 4111 * Schedule the TID to be re-tried. 4112 */ 4113 ath_tx_tid_sched(sc, tid); 4114 4115 /* 4116 * send bar if we dropped any frames 4117 * 4118 * Keep the txq lock held for now, as we need to ensure 4119 * that ni_txseqs[] is consistent (as it's being updated 4120 * in the ifnet TX context or raw TX context.) 4121 */ 4122 if (drops) { 4123 /* Suspend the TX queue and get ready to send the BAR */ 4124 ath_tx_tid_bar_suspend(sc, tid); 4125 } 4126 4127 /* 4128 * Send BAR if required 4129 */ 4130 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4131 ath_tx_tid_bar_tx(sc, tid); 4132 4133 ATH_TX_UNLOCK(sc); 4134 4135 /* Complete frames which errored out */ 4136 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4137 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4138 ath_tx_default_comp(sc, bf, 0); 4139 } 4140} 4141 4142/* 4143 * Handle clean-up of packets from an aggregate list. 4144 * 4145 * There's no need to update the BAW here - the session is being 4146 * torn down. 4147 */ 4148static void 4149ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4150{ 4151 struct ath_buf *bf, *bf_next; 4152 struct ieee80211_node *ni = bf_first->bf_node; 4153 struct ath_node *an = ATH_NODE(ni); 4154 int tid = bf_first->bf_state.bfs_tid; 4155 struct ath_tid *atid = &an->an_tid[tid]; 4156 4157 ATH_TX_LOCK(sc); 4158 4159 /* update incomp */ 4160 bf = bf_first; 4161 while (bf) { 4162 atid->incomp--; 4163 bf = bf->bf_next; 4164 } 4165 4166 if (atid->incomp == 0) { 4167 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4168 "%s: TID %d: cleaned up! resume!\n", 4169 __func__, tid); 4170 atid->cleanup_inprogress = 0; 4171 ath_tx_tid_resume(sc, atid); 4172 } 4173 4174 /* Send BAR if required */ 4175 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4176 /* 4177 * XXX TODO: we should likely just tear down the BAR state here, 4178 * rather than sending a BAR. 4179 */ 4180 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4181 ath_tx_tid_bar_tx(sc, atid); 4182 4183 ATH_TX_UNLOCK(sc); 4184 4185 /* Handle frame completion */ 4186 bf = bf_first; 4187 while (bf) { 4188 bf_next = bf->bf_next; 4189 ath_tx_default_comp(sc, bf, 1); 4190 bf = bf_next; 4191 } 4192} 4193 4194/* 4195 * Handle completion of an set of aggregate frames. 4196 * 4197 * Note: the completion handler is the last descriptor in the aggregate, 4198 * not the last descriptor in the first frame. 4199 */ 4200static void 4201ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4202 int fail) 4203{ 4204 //struct ath_desc *ds = bf->bf_lastds; 4205 struct ieee80211_node *ni = bf_first->bf_node; 4206 struct ath_node *an = ATH_NODE(ni); 4207 int tid = bf_first->bf_state.bfs_tid; 4208 struct ath_tid *atid = &an->an_tid[tid]; 4209 struct ath_tx_status ts; 4210 struct ieee80211_tx_ampdu *tap; 4211 ath_bufhead bf_q; 4212 ath_bufhead bf_cq; 4213 int seq_st, tx_ok; 4214 int hasba, isaggr; 4215 uint32_t ba[2]; 4216 struct ath_buf *bf, *bf_next; 4217 int ba_index; 4218 int drops = 0; 4219 int nframes = 0, nbad = 0, nf; 4220 int pktlen; 4221 /* XXX there's too much on the stack? */ 4222 struct ath_rc_series rc[ATH_RC_NUM]; 4223 int txseq; 4224 4225 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4226 __func__, atid->hwq_depth); 4227 4228 /* 4229 * Take a copy; this may be needed -after- bf_first 4230 * has been completed and freed. 4231 */ 4232 ts = bf_first->bf_status.ds_txstat; 4233 4234 TAILQ_INIT(&bf_q); 4235 TAILQ_INIT(&bf_cq); 4236 4237 /* The TID state is kept behind the TXQ lock */ 4238 ATH_TX_LOCK(sc); 4239 4240 atid->hwq_depth--; 4241 if (atid->hwq_depth < 0) 4242 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 4243 __func__, atid->hwq_depth); 4244 4245 /* 4246 * If the TID is filtered, handle completing the filter 4247 * transition before potentially kicking it to the cleanup 4248 * function. 4249 * 4250 * XXX this is duplicate work, ew. 4251 */ 4252 if (atid->isfiltered) 4253 ath_tx_tid_filt_comp_complete(sc, atid); 4254 4255 /* 4256 * Punt cleanup to the relevant function, not our problem now 4257 */ 4258 if (atid->cleanup_inprogress) { 4259 if (atid->isfiltered) 4260 device_printf(sc->sc_dev, 4261 "%s: isfiltered=1, normal_comp?\n", 4262 __func__); 4263 ATH_TX_UNLOCK(sc); 4264 ath_tx_comp_cleanup_aggr(sc, bf_first); 4265 return; 4266 } 4267 4268 /* 4269 * If the frame is filtered, transition to filtered frame 4270 * mode and add this to the filtered frame list. 4271 * 4272 * XXX TODO: figure out how this interoperates with 4273 * BAR, pause and cleanup states. 4274 */ 4275 if ((ts.ts_status & HAL_TXERR_FILT) || 4276 (ts.ts_status != 0 && atid->isfiltered)) { 4277 if (fail != 0) 4278 device_printf(sc->sc_dev, 4279 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4280 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4281 4282 /* Remove from BAW */ 4283 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4284 if (bf->bf_state.bfs_addedbaw) 4285 drops++; 4286 if (bf->bf_state.bfs_dobaw) { 4287 ath_tx_update_baw(sc, an, atid, bf); 4288 if (! bf->bf_state.bfs_addedbaw) 4289 device_printf(sc->sc_dev, 4290 "%s: wasn't added: seqno %d\n", 4291 __func__, 4292 SEQNO(bf->bf_state.bfs_seqno)); 4293 } 4294 bf->bf_state.bfs_dobaw = 0; 4295 } 4296 /* 4297 * If any intermediate frames in the BAW were dropped when 4298 * handling filtering things, send a BAR. 4299 */ 4300 if (drops) 4301 ath_tx_tid_bar_suspend(sc, atid); 4302 4303 /* 4304 * Finish up by sending a BAR if required and freeing 4305 * the frames outside of the TX lock. 4306 */ 4307 goto finish_send_bar; 4308 } 4309 4310 /* 4311 * XXX for now, use the first frame in the aggregate for 4312 * XXX rate control completion; it's at least consistent. 4313 */ 4314 pktlen = bf_first->bf_state.bfs_pktlen; 4315 4316 /* 4317 * Handle errors first! 4318 * 4319 * Here, handle _any_ error as a "exceeded retries" error. 4320 * Later on (when filtered frames are to be specially handled) 4321 * it'll have to be expanded. 4322 */ 4323#if 0 4324 if (ts.ts_status & HAL_TXERR_XRETRY) { 4325#endif 4326 if (ts.ts_status != 0) { 4327 ATH_TX_UNLOCK(sc); 4328 ath_tx_comp_aggr_error(sc, bf_first, atid); 4329 return; 4330 } 4331 4332 tap = ath_tx_get_tx_tid(an, tid); 4333 4334 /* 4335 * extract starting sequence and block-ack bitmap 4336 */ 4337 /* XXX endian-ness of seq_st, ba? */ 4338 seq_st = ts.ts_seqnum; 4339 hasba = !! (ts.ts_flags & HAL_TX_BA); 4340 tx_ok = (ts.ts_status == 0); 4341 isaggr = bf_first->bf_state.bfs_aggr; 4342 ba[0] = ts.ts_ba_low; 4343 ba[1] = ts.ts_ba_high; 4344 4345 /* 4346 * Copy the TX completion status and the rate control 4347 * series from the first descriptor, as it may be freed 4348 * before the rate control code can get its grubby fingers 4349 * into things. 4350 */ 4351 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4352 4353 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4354 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4355 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4356 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4357 isaggr, seq_st, hasba, ba[0], ba[1]); 4358 4359 /* 4360 * The reference driver doesn't do this; it simply ignores 4361 * this check in its entirety. 4362 * 4363 * I've seen this occur when using iperf to send traffic 4364 * out tid 1 - the aggregate frames are all marked as TID 1, 4365 * but the TXSTATUS has TID=0. So, let's just ignore this 4366 * check. 4367 */ 4368#if 0 4369 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4370 if (tid != ts.ts_tid) { 4371 device_printf(sc->sc_dev, "%s: tid %d != hw tid %d\n", 4372 __func__, tid, ts.ts_tid); 4373 tx_ok = 0; 4374 } 4375#endif 4376 4377 /* AR5416 BA bug; this requires an interface reset */ 4378 if (isaggr && tx_ok && (! hasba)) { 4379 device_printf(sc->sc_dev, 4380 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4381 "seq_st=%d\n", 4382 __func__, hasba, tx_ok, isaggr, seq_st); 4383 /* XXX TODO: schedule an interface reset */ 4384#ifdef ATH_DEBUG 4385 ath_printtxbuf(sc, bf_first, 4386 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4387#endif 4388 } 4389 4390 /* 4391 * Walk the list of frames, figure out which ones were correctly 4392 * sent and which weren't. 4393 */ 4394 bf = bf_first; 4395 nf = bf_first->bf_state.bfs_nframes; 4396 4397 /* bf_first is going to be invalid once this list is walked */ 4398 bf_first = NULL; 4399 4400 /* 4401 * Walk the list of completed frames and determine 4402 * which need to be completed and which need to be 4403 * retransmitted. 4404 * 4405 * For completed frames, the completion functions need 4406 * to be called at the end of this function as the last 4407 * node reference may free the node. 4408 * 4409 * Finally, since the TXQ lock can't be held during the 4410 * completion callback (to avoid lock recursion), 4411 * the completion calls have to be done outside of the 4412 * lock. 4413 */ 4414 while (bf) { 4415 nframes++; 4416 ba_index = ATH_BA_INDEX(seq_st, 4417 SEQNO(bf->bf_state.bfs_seqno)); 4418 bf_next = bf->bf_next; 4419 bf->bf_next = NULL; /* Remove it from the aggr list */ 4420 4421 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4422 "%s: checking bf=%p seqno=%d; ack=%d\n", 4423 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4424 ATH_BA_ISSET(ba, ba_index)); 4425 4426 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4427 sc->sc_stats.ast_tx_aggr_ok++; 4428 ath_tx_update_baw(sc, an, atid, bf); 4429 bf->bf_state.bfs_dobaw = 0; 4430 if (! bf->bf_state.bfs_addedbaw) 4431 device_printf(sc->sc_dev, 4432 "%s: wasn't added: seqno %d\n", 4433 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4434 bf->bf_next = NULL; 4435 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4436 } else { 4437 sc->sc_stats.ast_tx_aggr_fail++; 4438 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4439 drops++; 4440 bf->bf_next = NULL; 4441 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4442 } 4443 nbad++; 4444 } 4445 bf = bf_next; 4446 } 4447 4448 /* 4449 * Now that the BAW updates have been done, unlock 4450 * 4451 * txseq is grabbed before the lock is released so we 4452 * have a consistent view of what -was- in the BAW. 4453 * Anything after this point will not yet have been 4454 * TXed. 4455 */ 4456 txseq = tap->txa_start; 4457 ATH_TX_UNLOCK(sc); 4458 4459 if (nframes != nf) 4460 device_printf(sc->sc_dev, 4461 "%s: num frames seen=%d; bf nframes=%d\n", 4462 __func__, nframes, nf); 4463 4464 /* 4465 * Now we know how many frames were bad, call the rate 4466 * control code. 4467 */ 4468 if (fail == 0) 4469 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 4470 nbad); 4471 4472 /* 4473 * send bar if we dropped any frames 4474 */ 4475 if (drops) { 4476 /* Suspend the TX queue and get ready to send the BAR */ 4477 ATH_TX_LOCK(sc); 4478 ath_tx_tid_bar_suspend(sc, atid); 4479 ATH_TX_UNLOCK(sc); 4480 } 4481 4482 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4483 "%s: txa_start now %d\n", __func__, tap->txa_start); 4484 4485 ATH_TX_LOCK(sc); 4486 4487 /* Prepend all frames to the beginning of the queue */ 4488 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4489 TAILQ_REMOVE(&bf_q, bf, bf_list); 4490 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4491 } 4492 4493 /* 4494 * Reschedule to grab some further frames. 4495 */ 4496 ath_tx_tid_sched(sc, atid); 4497 4498 /* 4499 * If the queue is filtered, re-schedule as required. 4500 * 4501 * This is required as there may be a subsequent TX descriptor 4502 * for this end-node that has CLRDMASK set, so it's quite possible 4503 * that a filtered frame will be followed by a non-filtered 4504 * (complete or otherwise) frame. 4505 * 4506 * XXX should we do this before we complete the frame? 4507 */ 4508 if (atid->isfiltered) 4509 ath_tx_tid_filt_comp_complete(sc, atid); 4510 4511finish_send_bar: 4512 4513 /* 4514 * Send BAR if required 4515 */ 4516 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4517 ath_tx_tid_bar_tx(sc, atid); 4518 4519 ATH_TX_UNLOCK(sc); 4520 4521 /* Do deferred completion */ 4522 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4523 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4524 ath_tx_default_comp(sc, bf, 0); 4525 } 4526} 4527 4528/* 4529 * Handle completion of unaggregated frames in an ADDBA 4530 * session. 4531 * 4532 * Fail is set to 1 if the entry is being freed via a call to 4533 * ath_tx_draintxq(). 4534 */ 4535static void 4536ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 4537{ 4538 struct ieee80211_node *ni = bf->bf_node; 4539 struct ath_node *an = ATH_NODE(ni); 4540 int tid = bf->bf_state.bfs_tid; 4541 struct ath_tid *atid = &an->an_tid[tid]; 4542 struct ath_tx_status ts; 4543 int drops = 0; 4544 4545 /* 4546 * Take a copy of this; filtering/cloning the frame may free the 4547 * bf pointer. 4548 */ 4549 ts = bf->bf_status.ds_txstat; 4550 4551 /* 4552 * Update rate control status here, before we possibly 4553 * punt to retry or cleanup. 4554 * 4555 * Do it outside of the TXQ lock. 4556 */ 4557 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4558 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4559 &bf->bf_status.ds_txstat, 4560 bf->bf_state.bfs_pktlen, 4561 1, (ts.ts_status == 0) ? 0 : 1); 4562 4563 /* 4564 * This is called early so atid->hwq_depth can be tracked. 4565 * This unfortunately means that it's released and regrabbed 4566 * during retry and cleanup. That's rather inefficient. 4567 */ 4568 ATH_TX_LOCK(sc); 4569 4570 if (tid == IEEE80211_NONQOS_TID) 4571 device_printf(sc->sc_dev, "%s: TID=16!\n", __func__); 4572 4573 DPRINTF(sc, ATH_DEBUG_SW_TX, 4574 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 4575 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 4576 SEQNO(bf->bf_state.bfs_seqno)); 4577 4578 atid->hwq_depth--; 4579 if (atid->hwq_depth < 0) 4580 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 4581 __func__, atid->hwq_depth); 4582 4583 /* 4584 * If the TID is filtered, handle completing the filter 4585 * transition before potentially kicking it to the cleanup 4586 * function. 4587 */ 4588 if (atid->isfiltered) 4589 ath_tx_tid_filt_comp_complete(sc, atid); 4590 4591 /* 4592 * If a cleanup is in progress, punt to comp_cleanup; 4593 * rather than handling it here. It's thus their 4594 * responsibility to clean up, call the completion 4595 * function in net80211, etc. 4596 */ 4597 if (atid->cleanup_inprogress) { 4598 if (atid->isfiltered) 4599 device_printf(sc->sc_dev, 4600 "%s: isfiltered=1, normal_comp?\n", 4601 __func__); 4602 ATH_TX_UNLOCK(sc); 4603 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 4604 __func__); 4605 ath_tx_comp_cleanup_unaggr(sc, bf); 4606 return; 4607 } 4608 4609 /* 4610 * XXX TODO: how does cleanup, BAR and filtered frame handling 4611 * overlap? 4612 * 4613 * If the frame is filtered OR if it's any failure but 4614 * the TID is filtered, the frame must be added to the 4615 * filtered frame list. 4616 * 4617 * However - a busy buffer can't be added to the filtered 4618 * list as it will end up being recycled without having 4619 * been made available for the hardware. 4620 */ 4621 if ((ts.ts_status & HAL_TXERR_FILT) || 4622 (ts.ts_status != 0 && atid->isfiltered)) { 4623 int freeframe; 4624 4625 if (fail != 0) 4626 device_printf(sc->sc_dev, 4627 "%s: isfiltered=1, fail=%d\n", 4628 __func__, 4629 fail); 4630 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 4631 if (freeframe) { 4632 /* Remove from BAW */ 4633 if (bf->bf_state.bfs_addedbaw) 4634 drops++; 4635 if (bf->bf_state.bfs_dobaw) { 4636 ath_tx_update_baw(sc, an, atid, bf); 4637 if (! bf->bf_state.bfs_addedbaw) 4638 device_printf(sc->sc_dev, 4639 "%s: wasn't added: seqno %d\n", 4640 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4641 } 4642 bf->bf_state.bfs_dobaw = 0; 4643 } 4644 4645 /* 4646 * If the frame couldn't be filtered, treat it as a drop and 4647 * prepare to send a BAR. 4648 */ 4649 if (freeframe && drops) 4650 ath_tx_tid_bar_suspend(sc, atid); 4651 4652 /* 4653 * Send BAR if required 4654 */ 4655 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4656 ath_tx_tid_bar_tx(sc, atid); 4657 4658 ATH_TX_UNLOCK(sc); 4659 /* 4660 * If freeframe is set, then the frame couldn't be 4661 * cloned and bf is still valid. Just complete/free it. 4662 */ 4663 if (freeframe) 4664 ath_tx_default_comp(sc, bf, fail); 4665 4666 4667 return; 4668 } 4669 /* 4670 * Don't bother with the retry check if all frames 4671 * are being failed (eg during queue deletion.) 4672 */ 4673#if 0 4674 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 4675#endif 4676 if (fail == 0 && ts.ts_status != 0) { 4677 ATH_TX_UNLOCK(sc); 4678 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 4679 __func__); 4680 ath_tx_aggr_retry_unaggr(sc, bf); 4681 return; 4682 } 4683 4684 /* Success? Complete */ 4685 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 4686 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 4687 if (bf->bf_state.bfs_dobaw) { 4688 ath_tx_update_baw(sc, an, atid, bf); 4689 bf->bf_state.bfs_dobaw = 0; 4690 if (! bf->bf_state.bfs_addedbaw) 4691 device_printf(sc->sc_dev, 4692 "%s: wasn't added: seqno %d\n", 4693 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4694 } 4695 4696 /* 4697 * If the queue is filtered, re-schedule as required. 4698 * 4699 * This is required as there may be a subsequent TX descriptor 4700 * for this end-node that has CLRDMASK set, so it's quite possible 4701 * that a filtered frame will be followed by a non-filtered 4702 * (complete or otherwise) frame. 4703 * 4704 * XXX should we do this before we complete the frame? 4705 */ 4706 if (atid->isfiltered) 4707 ath_tx_tid_filt_comp_complete(sc, atid); 4708 4709 /* 4710 * Send BAR if required 4711 */ 4712 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4713 ath_tx_tid_bar_tx(sc, atid); 4714 4715 ATH_TX_UNLOCK(sc); 4716 4717 ath_tx_default_comp(sc, bf, fail); 4718 /* bf is freed at this point */ 4719} 4720 4721void 4722ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4723{ 4724 if (bf->bf_state.bfs_aggr) 4725 ath_tx_aggr_comp_aggr(sc, bf, fail); 4726 else 4727 ath_tx_aggr_comp_unaggr(sc, bf, fail); 4728} 4729 4730/* 4731 * Schedule some packets from the given node/TID to the hardware. 4732 * 4733 * This is the aggregate version. 4734 */ 4735void 4736ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 4737 struct ath_tid *tid) 4738{ 4739 struct ath_buf *bf; 4740 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4741 struct ieee80211_tx_ampdu *tap; 4742 ATH_AGGR_STATUS status; 4743 ath_bufhead bf_q; 4744 4745 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 4746 ATH_TX_LOCK_ASSERT(sc); 4747 4748 tap = ath_tx_get_tx_tid(an, tid->tid); 4749 4750 if (tid->tid == IEEE80211_NONQOS_TID) 4751 device_printf(sc->sc_dev, "%s: called for TID=NONQOS_TID?\n", 4752 __func__); 4753 4754 for (;;) { 4755 status = ATH_AGGR_DONE; 4756 4757 /* 4758 * If the upper layer has paused the TID, don't 4759 * queue any further packets. 4760 * 4761 * This can also occur from the completion task because 4762 * of packet loss; but as its serialised with this code, 4763 * it won't "appear" half way through queuing packets. 4764 */ 4765 if (tid->paused) 4766 break; 4767 4768 bf = ATH_TID_FIRST(tid); 4769 if (bf == NULL) { 4770 break; 4771 } 4772 4773 /* 4774 * If the packet doesn't fall within the BAW (eg a NULL 4775 * data frame), schedule it directly; continue. 4776 */ 4777 if (! bf->bf_state.bfs_dobaw) { 4778 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4779 "%s: non-baw packet\n", 4780 __func__); 4781 ATH_TID_REMOVE(tid, bf, bf_list); 4782 4783 if (bf->bf_state.bfs_nframes > 1) 4784 device_printf(sc->sc_dev, 4785 "%s: aggr=%d, nframes=%d\n", 4786 __func__, 4787 bf->bf_state.bfs_aggr, 4788 bf->bf_state.bfs_nframes); 4789 4790 /* 4791 * This shouldn't happen - such frames shouldn't 4792 * ever have been queued as an aggregate in the 4793 * first place. However, make sure the fields 4794 * are correctly setup just to be totally sure. 4795 */ 4796 bf->bf_state.bfs_aggr = 0; 4797 bf->bf_state.bfs_nframes = 1; 4798 4799 /* Update CLRDMASK just before this frame is queued */ 4800 ath_tx_update_clrdmask(sc, tid, bf); 4801 4802 ath_tx_do_ratelookup(sc, bf); 4803 ath_tx_calc_duration(sc, bf); 4804 ath_tx_calc_protection(sc, bf); 4805 ath_tx_set_rtscts(sc, bf); 4806 ath_tx_rate_fill_rcflags(sc, bf); 4807 ath_tx_setds(sc, bf); 4808 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4809 4810 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 4811 4812 /* Queue the packet; continue */ 4813 goto queuepkt; 4814 } 4815 4816 TAILQ_INIT(&bf_q); 4817 4818 /* 4819 * Do a rate control lookup on the first frame in the 4820 * list. The rate control code needs that to occur 4821 * before it can determine whether to TX. 4822 * It's inaccurate because the rate control code doesn't 4823 * really "do" aggregate lookups, so it only considers 4824 * the size of the first frame. 4825 */ 4826 ath_tx_do_ratelookup(sc, bf); 4827 bf->bf_state.bfs_rc[3].rix = 0; 4828 bf->bf_state.bfs_rc[3].tries = 0; 4829 4830 ath_tx_calc_duration(sc, bf); 4831 ath_tx_calc_protection(sc, bf); 4832 4833 ath_tx_set_rtscts(sc, bf); 4834 ath_tx_rate_fill_rcflags(sc, bf); 4835 4836 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 4837 4838 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4839 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 4840 4841 /* 4842 * No frames to be picked up - out of BAW 4843 */ 4844 if (TAILQ_EMPTY(&bf_q)) 4845 break; 4846 4847 /* 4848 * This assumes that the descriptor list in the ath_bufhead 4849 * are already linked together via bf_next pointers. 4850 */ 4851 bf = TAILQ_FIRST(&bf_q); 4852 4853 if (status == ATH_AGGR_8K_LIMITED) 4854 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 4855 4856 /* 4857 * If it's the only frame send as non-aggregate 4858 * assume that ath_tx_form_aggr() has checked 4859 * whether it's in the BAW and added it appropriately. 4860 */ 4861 if (bf->bf_state.bfs_nframes == 1) { 4862 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4863 "%s: single-frame aggregate\n", __func__); 4864 4865 /* Update CLRDMASK just before this frame is queued */ 4866 ath_tx_update_clrdmask(sc, tid, bf); 4867 4868 bf->bf_state.bfs_aggr = 0; 4869 bf->bf_state.bfs_ndelim = 0; 4870 ath_tx_setds(sc, bf); 4871 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4872 if (status == ATH_AGGR_BAW_CLOSED) 4873 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 4874 else 4875 sc->sc_aggr_stats.aggr_single_pkt++; 4876 } else { 4877 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4878 "%s: multi-frame aggregate: %d frames, " 4879 "length %d\n", 4880 __func__, bf->bf_state.bfs_nframes, 4881 bf->bf_state.bfs_al); 4882 bf->bf_state.bfs_aggr = 1; 4883 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 4884 sc->sc_aggr_stats.aggr_aggr_pkt++; 4885 4886 /* Update CLRDMASK just before this frame is queued */ 4887 ath_tx_update_clrdmask(sc, tid, bf); 4888 4889 /* 4890 * Calculate the duration/protection as required. 4891 */ 4892 ath_tx_calc_duration(sc, bf); 4893 ath_tx_calc_protection(sc, bf); 4894 4895 /* 4896 * Update the rate and rtscts information based on the 4897 * rate decision made by the rate control code; 4898 * the first frame in the aggregate needs it. 4899 */ 4900 ath_tx_set_rtscts(sc, bf); 4901 4902 /* 4903 * Setup the relevant descriptor fields 4904 * for aggregation. The first descriptor 4905 * already points to the rest in the chain. 4906 */ 4907 ath_tx_setds_11n(sc, bf); 4908 4909 } 4910 queuepkt: 4911 /* Set completion handler, multi-frame aggregate or not */ 4912 bf->bf_comp = ath_tx_aggr_comp; 4913 4914 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 4915 device_printf(sc->sc_dev, "%s: TID=16?\n", __func__); 4916 4917 /* Punt to txq */ 4918 ath_tx_handoff(sc, txq, bf); 4919 4920 /* Track outstanding buffer count to hardware */ 4921 /* aggregates are "one" buffer */ 4922 tid->hwq_depth++; 4923 4924 /* 4925 * Break out if ath_tx_form_aggr() indicated 4926 * there can't be any further progress (eg BAW is full.) 4927 * Checking for an empty txq is done above. 4928 * 4929 * XXX locking on txq here? 4930 */ 4931 if (txq->axq_aggr_depth >= sc->sc_hwq_limit || 4932 status == ATH_AGGR_BAW_CLOSED) 4933 break; 4934 } 4935} 4936 4937/* 4938 * Schedule some packets from the given node/TID to the hardware. 4939 */ 4940void 4941ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 4942 struct ath_tid *tid) 4943{ 4944 struct ath_buf *bf; 4945 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4946 4947 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 4948 __func__, an, tid->tid); 4949 4950 ATH_TX_LOCK_ASSERT(sc); 4951 4952 /* Check - is AMPDU pending or running? then print out something */ 4953 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 4954 device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n", 4955 __func__, tid->tid); 4956 if (ath_tx_ampdu_running(sc, an, tid->tid)) 4957 device_printf(sc->sc_dev, "%s: tid=%d, ampdu running?\n", 4958 __func__, tid->tid); 4959 4960 for (;;) { 4961 4962 /* 4963 * If the upper layers have paused the TID, don't 4964 * queue any further packets. 4965 */ 4966 if (tid->paused) 4967 break; 4968 4969 bf = ATH_TID_FIRST(tid); 4970 if (bf == NULL) { 4971 break; 4972 } 4973 4974 ATH_TID_REMOVE(tid, bf, bf_list); 4975 4976 /* Sanity check! */ 4977 if (tid->tid != bf->bf_state.bfs_tid) { 4978 device_printf(sc->sc_dev, "%s: bfs_tid %d !=" 4979 " tid %d\n", 4980 __func__, bf->bf_state.bfs_tid, tid->tid); 4981 } 4982 /* Normal completion handler */ 4983 bf->bf_comp = ath_tx_normal_comp; 4984 4985 /* 4986 * Override this for now, until the non-aggregate 4987 * completion handler correctly handles software retransmits. 4988 */ 4989 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 4990 4991 /* Update CLRDMASK just before this frame is queued */ 4992 ath_tx_update_clrdmask(sc, tid, bf); 4993 4994 /* Program descriptors + rate control */ 4995 ath_tx_do_ratelookup(sc, bf); 4996 ath_tx_calc_duration(sc, bf); 4997 ath_tx_calc_protection(sc, bf); 4998 ath_tx_set_rtscts(sc, bf); 4999 ath_tx_rate_fill_rcflags(sc, bf); 5000 ath_tx_setds(sc, bf); 5001 5002 /* Track outstanding buffer count to hardware */ 5003 /* aggregates are "one" buffer */ 5004 tid->hwq_depth++; 5005 5006 /* Punt to hardware or software txq */ 5007 ath_tx_handoff(sc, txq, bf); 5008 } 5009} 5010 5011/* 5012 * Schedule some packets to the given hardware queue. 5013 * 5014 * This function walks the list of TIDs (ie, ath_node TIDs 5015 * with queued traffic) and attempts to schedule traffic 5016 * from them. 5017 * 5018 * TID scheduling is implemented as a FIFO, with TIDs being 5019 * added to the end of the queue after some frames have been 5020 * scheduled. 5021 */ 5022void 5023ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5024{ 5025 struct ath_tid *tid, *next, *last; 5026 5027 ATH_TX_LOCK_ASSERT(sc); 5028 5029 /* 5030 * Don't schedule if the hardware queue is busy. 5031 * This (hopefully) gives some more time to aggregate 5032 * some packets in the aggregation queue. 5033 */ 5034 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 5035 sc->sc_aggr_stats.aggr_sched_nopkt++; 5036 return; 5037 } 5038 5039 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5040 5041 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5042 /* 5043 * Suspend paused queues here; they'll be resumed 5044 * once the addba completes or times out. 5045 */ 5046 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5047 __func__, tid->tid, tid->paused); 5048 ath_tx_tid_unsched(sc, tid); 5049 if (tid->paused) { 5050 continue; 5051 } 5052 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5053 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5054 else 5055 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5056 5057 /* Not empty? Re-schedule */ 5058 if (tid->axq_depth != 0) 5059 ath_tx_tid_sched(sc, tid); 5060 5061 /* Give the software queue time to aggregate more packets */ 5062 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 5063 break; 5064 } 5065 5066 /* 5067 * If this was the last entry on the original list, stop. 5068 * Otherwise nodes that have been rescheduled onto the end 5069 * of the TID FIFO list will just keep being rescheduled. 5070 */ 5071 if (tid == last) 5072 break; 5073 } 5074} 5075 5076/* 5077 * TX addba handling 5078 */ 5079 5080/* 5081 * Return net80211 TID struct pointer, or NULL for none 5082 */ 5083struct ieee80211_tx_ampdu * 5084ath_tx_get_tx_tid(struct ath_node *an, int tid) 5085{ 5086 struct ieee80211_node *ni = &an->an_node; 5087 struct ieee80211_tx_ampdu *tap; 5088 5089 if (tid == IEEE80211_NONQOS_TID) 5090 return NULL; 5091 5092 tap = &ni->ni_tx_ampdu[tid]; 5093 return tap; 5094} 5095 5096/* 5097 * Is AMPDU-TX running? 5098 */ 5099static int 5100ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5101{ 5102 struct ieee80211_tx_ampdu *tap; 5103 5104 if (tid == IEEE80211_NONQOS_TID) 5105 return 0; 5106 5107 tap = ath_tx_get_tx_tid(an, tid); 5108 if (tap == NULL) 5109 return 0; /* Not valid; default to not running */ 5110 5111 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5112} 5113 5114/* 5115 * Is AMPDU-TX negotiation pending? 5116 */ 5117static int 5118ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5119{ 5120 struct ieee80211_tx_ampdu *tap; 5121 5122 if (tid == IEEE80211_NONQOS_TID) 5123 return 0; 5124 5125 tap = ath_tx_get_tx_tid(an, tid); 5126 if (tap == NULL) 5127 return 0; /* Not valid; default to not pending */ 5128 5129 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5130} 5131 5132/* 5133 * Is AMPDU-TX pending for the given TID? 5134 */ 5135 5136 5137/* 5138 * Method to handle sending an ADDBA request. 5139 * 5140 * We tap this so the relevant flags can be set to pause the TID 5141 * whilst waiting for the response. 5142 * 5143 * XXX there's no timeout handler we can override? 5144 */ 5145int 5146ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5147 int dialogtoken, int baparamset, int batimeout) 5148{ 5149 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5150 int tid = tap->txa_tid; 5151 struct ath_node *an = ATH_NODE(ni); 5152 struct ath_tid *atid = &an->an_tid[tid]; 5153 5154 /* 5155 * XXX danger Will Robinson! 5156 * 5157 * Although the taskqueue may be running and scheduling some more 5158 * packets, these should all be _before_ the addba sequence number. 5159 * However, net80211 will keep self-assigning sequence numbers 5160 * until addba has been negotiated. 5161 * 5162 * In the past, these packets would be "paused" (which still works 5163 * fine, as they're being scheduled to the driver in the same 5164 * serialised method which is calling the addba request routine) 5165 * and when the aggregation session begins, they'll be dequeued 5166 * as aggregate packets and added to the BAW. However, now there's 5167 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5168 * packets. Thus they never get included in the BAW tracking and 5169 * this can cause the initial burst of packets after the addba 5170 * negotiation to "hang", as they quickly fall outside the BAW. 5171 * 5172 * The "eventual" solution should be to tag these packets with 5173 * dobaw. Although net80211 has given us a sequence number, 5174 * it'll be "after" the left edge of the BAW and thus it'll 5175 * fall within it. 5176 */ 5177 ATH_TX_LOCK(sc); 5178 /* 5179 * This is a bit annoying. Until net80211 HT code inherits some 5180 * (any) locking, we may have this called in parallel BUT only 5181 * one response/timeout will be called. Grr. 5182 */ 5183 if (atid->addba_tx_pending == 0) { 5184 ath_tx_tid_pause(sc, atid); 5185 atid->addba_tx_pending = 1; 5186 } 5187 ATH_TX_UNLOCK(sc); 5188 5189 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5190 "%s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5191 __func__, dialogtoken, baparamset, batimeout); 5192 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5193 "%s: txa_start=%d, ni_txseqs=%d\n", 5194 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5195 5196 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5197 batimeout); 5198} 5199 5200/* 5201 * Handle an ADDBA response. 5202 * 5203 * We unpause the queue so TX'ing can resume. 5204 * 5205 * Any packets TX'ed from this point should be "aggregate" (whether 5206 * aggregate or not) so the BAW is updated. 5207 * 5208 * Note! net80211 keeps self-assigning sequence numbers until 5209 * ampdu is negotiated. This means the initially-negotiated BAW left 5210 * edge won't match the ni->ni_txseq. 5211 * 5212 * So, being very dirty, the BAW left edge is "slid" here to match 5213 * ni->ni_txseq. 5214 * 5215 * What likely SHOULD happen is that all packets subsequent to the 5216 * addba request should be tagged as aggregate and queued as non-aggregate 5217 * frames; thus updating the BAW. For now though, I'll just slide the 5218 * window. 5219 */ 5220int 5221ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5222 int status, int code, int batimeout) 5223{ 5224 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5225 int tid = tap->txa_tid; 5226 struct ath_node *an = ATH_NODE(ni); 5227 struct ath_tid *atid = &an->an_tid[tid]; 5228 int r; 5229 5230 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5231 "%s: called; status=%d, code=%d, batimeout=%d\n", __func__, 5232 status, code, batimeout); 5233 5234 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5235 "%s: txa_start=%d, ni_txseqs=%d\n", 5236 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5237 5238 /* 5239 * Call this first, so the interface flags get updated 5240 * before the TID is unpaused. Otherwise a race condition 5241 * exists where the unpaused TID still doesn't yet have 5242 * IEEE80211_AGGR_RUNNING set. 5243 */ 5244 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5245 5246 ATH_TX_LOCK(sc); 5247 atid->addba_tx_pending = 0; 5248 /* 5249 * XXX dirty! 5250 * Slide the BAW left edge to wherever net80211 left it for us. 5251 * Read above for more information. 5252 */ 5253 tap->txa_start = ni->ni_txseqs[tid]; 5254 ath_tx_tid_resume(sc, atid); 5255 ATH_TX_UNLOCK(sc); 5256 return r; 5257} 5258 5259 5260/* 5261 * Stop ADDBA on a queue. 5262 * 5263 * This can be called whilst BAR TX is currently active on the queue, 5264 * so make sure this is unblocked before continuing. 5265 */ 5266void 5267ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5268{ 5269 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5270 int tid = tap->txa_tid; 5271 struct ath_node *an = ATH_NODE(ni); 5272 struct ath_tid *atid = &an->an_tid[tid]; 5273 5274 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called\n", __func__); 5275 5276 /* 5277 * Pause TID traffic early, so there aren't any races 5278 * Unblock the pending BAR held traffic, if it's currently paused. 5279 */ 5280 ATH_TX_LOCK(sc); 5281 ath_tx_tid_pause(sc, atid); 5282 if (atid->bar_wait) { 5283 /* 5284 * bar_unsuspend() expects bar_tx == 1, as it should be 5285 * called from the TX completion path. This quietens 5286 * the warning. It's cleared for us anyway. 5287 */ 5288 atid->bar_tx = 1; 5289 ath_tx_tid_bar_unsuspend(sc, atid); 5290 } 5291 ATH_TX_UNLOCK(sc); 5292 5293 /* There's no need to hold the TXQ lock here */ 5294 sc->sc_addba_stop(ni, tap); 5295 5296 /* 5297 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5298 * it'll set the cleanup flag, and it'll be unpaused once 5299 * things have been cleaned up. 5300 */ 5301 ath_tx_tid_cleanup(sc, an, tid); 5302} 5303 5304/* 5305 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5306 * it simply tears down the aggregation session. Ew. 5307 * 5308 * It however will call ieee80211_ampdu_stop() which will call 5309 * ic->ic_addba_stop(). 5310 * 5311 * XXX This uses a hard-coded max BAR count value; the whole 5312 * XXX BAR TX success or failure should be better handled! 5313 */ 5314void 5315ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5316 int status) 5317{ 5318 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5319 int tid = tap->txa_tid; 5320 struct ath_node *an = ATH_NODE(ni); 5321 struct ath_tid *atid = &an->an_tid[tid]; 5322 int attempts = tap->txa_attempts; 5323 5324 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5325 "%s: called; tap=%p, atid=%p, txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", 5326 __func__, 5327 tap, 5328 atid, 5329 tap->txa_tid, 5330 atid->tid, 5331 status, 5332 attempts); 5333 5334 /* Note: This may update the BAW details */ 5335 sc->sc_bar_response(ni, tap, status); 5336 5337 /* Unpause the TID */ 5338 /* 5339 * XXX if this is attempt=50, the TID will be downgraded 5340 * XXX to a non-aggregate session. So we must unpause the 5341 * XXX TID here or it'll never be done. 5342 * 5343 * Also, don't call it if bar_tx/bar_wait are 0; something 5344 * has beaten us to the punch? (XXX figure out what?) 5345 */ 5346 if (status == 0 || attempts == 50) { 5347 ATH_TX_LOCK(sc); 5348 if (atid->bar_tx == 0 || atid->bar_wait == 0) 5349 device_printf(sc->sc_dev, 5350 "%s: huh? bar_tx=%d, bar_wait=%d\n", 5351 __func__, 5352 atid->bar_tx, atid->bar_wait); 5353 else 5354 ath_tx_tid_bar_unsuspend(sc, atid); 5355 ATH_TX_UNLOCK(sc); 5356 } 5357} 5358 5359/* 5360 * This is called whenever the pending ADDBA request times out. 5361 * Unpause and reschedule the TID. 5362 */ 5363void 5364ath_addba_response_timeout(struct ieee80211_node *ni, 5365 struct ieee80211_tx_ampdu *tap) 5366{ 5367 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5368 int tid = tap->txa_tid; 5369 struct ath_node *an = ATH_NODE(ni); 5370 struct ath_tid *atid = &an->an_tid[tid]; 5371 5372 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5373 "%s: called; resuming\n", __func__); 5374 5375 ATH_TX_LOCK(sc); 5376 atid->addba_tx_pending = 0; 5377 ATH_TX_UNLOCK(sc); 5378 5379 /* Note: This updates the aggregate state to (again) pending */ 5380 sc->sc_addba_response_timeout(ni, tap); 5381 5382 /* Unpause the TID; which reschedules it */ 5383 ATH_TX_LOCK(sc); 5384 ath_tx_tid_resume(sc, atid); 5385 ATH_TX_UNLOCK(sc); 5386} 5387 5388/* 5389 * Check if a node is asleep or not. 5390 */ 5391int 5392ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 5393{ 5394 5395 ATH_NODE_LOCK_ASSERT(an); 5396 5397 return (an->an_is_powersave); 5398} 5399 5400/* 5401 * Mark a node as currently "in powersaving." 5402 * This suspends all traffic on the node. 5403 * 5404 * This must be called with the node/tx locks free. 5405 * 5406 * XXX TODO: the locking silliness below is due to how the node 5407 * locking currently works. Right now, the node lock is grabbed 5408 * to do rate control lookups and these are done with the TX 5409 * queue lock held. This means the node lock can't be grabbed 5410 * first here or a LOR will occur. 5411 * 5412 * Eventually (hopefully!) the TX path code will only grab 5413 * the TXQ lock when transmitting and the ath_node lock when 5414 * doing node/TID operations. There are other complications - 5415 * the sched/unsched operations involve walking the per-txq 5416 * 'active tid' list and this requires both locks to be held. 5417 */ 5418void 5419ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 5420{ 5421 struct ath_tid *atid; 5422 struct ath_txq *txq; 5423 int tid; 5424 5425 ATH_NODE_UNLOCK_ASSERT(an); 5426 5427 /* 5428 * It's possible that a parallel call to ath_tx_node_wakeup() 5429 * will unpause these queues. 5430 * 5431 * The node lock can't just be grabbed here, as there's places 5432 * in the driver where the node lock is grabbed _within_ a 5433 * TXQ lock. 5434 * So, we do this delicately and unwind state if needed. 5435 * 5436 * + Pause all the queues 5437 * + Grab the node lock 5438 * + If the queue is already asleep, unpause and quit 5439 * + else just mark as asleep. 5440 * 5441 * A parallel sleep() call will just pause and then 5442 * find they're already paused, so undo it. 5443 * 5444 * A parallel wakeup() call will check if asleep is 1 5445 * and if it's not (ie, it's 0), it'll treat it as already 5446 * being awake. If it's 1, it'll mark it as 0 and then 5447 * unpause everything. 5448 * 5449 * (Talk about a delicate hack.) 5450 */ 5451 5452 /* Suspend all traffic on the node */ 5453 ATH_TX_LOCK(sc); 5454 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5455 atid = &an->an_tid[tid]; 5456 txq = sc->sc_ac2q[atid->ac]; 5457 5458 ath_tx_tid_pause(sc, atid); 5459 } 5460 ATH_TX_UNLOCK(sc); 5461 5462 ATH_NODE_LOCK(an); 5463 5464 /* In case of concurrency races from net80211.. */ 5465 if (an->an_is_powersave == 1) { 5466 ATH_NODE_UNLOCK(an); 5467 device_printf(sc->sc_dev, 5468 "%s: an=%p: node was already asleep\n", 5469 __func__, an); 5470 ATH_TX_LOCK(sc); 5471 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5472 atid = &an->an_tid[tid]; 5473 txq = sc->sc_ac2q[atid->ac]; 5474 5475 ath_tx_tid_resume(sc, atid); 5476 } 5477 ATH_TX_UNLOCK(sc); 5478 return; 5479 } 5480 5481 /* Mark node as in powersaving */ 5482 an->an_is_powersave = 1; 5483 5484 ATH_NODE_UNLOCK(an); 5485} 5486 5487/* 5488 * Mark a node as currently "awake." 5489 * This resumes all traffic to the node. 5490 */ 5491void 5492ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 5493{ 5494 struct ath_tid *atid; 5495 struct ath_txq *txq; 5496 int tid; 5497 5498 ATH_NODE_UNLOCK_ASSERT(an); 5499 ATH_NODE_LOCK(an); 5500 5501 /* In case of concurrency races from net80211.. */ 5502 if (an->an_is_powersave == 0) { 5503 ATH_NODE_UNLOCK(an); 5504 device_printf(sc->sc_dev, 5505 "%s: an=%p: node was already awake\n", 5506 __func__, an); 5507 return; 5508 } 5509 5510 /* Mark node as awake */ 5511 an->an_is_powersave = 0; 5512 5513 ATH_NODE_UNLOCK(an); 5514 5515 ATH_TX_LOCK(sc); 5516 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5517 atid = &an->an_tid[tid]; 5518 txq = sc->sc_ac2q[atid->ac]; 5519 5520 ath_tx_tid_resume(sc, atid); 5521 } 5522 ATH_TX_UNLOCK(sc); 5523} 5524 5525static int 5526ath_legacy_dma_txsetup(struct ath_softc *sc) 5527{ 5528 5529 /* nothing new needed */ 5530 return (0); 5531} 5532 5533static int 5534ath_legacy_dma_txteardown(struct ath_softc *sc) 5535{ 5536 5537 /* nothing new needed */ 5538 return (0); 5539} 5540 5541void 5542ath_xmit_setup_legacy(struct ath_softc *sc) 5543{ 5544 /* 5545 * For now, just set the descriptor length to sizeof(ath_desc); 5546 * worry about extracting the real length out of the HAL later. 5547 */ 5548 sc->sc_tx_desclen = sizeof(struct ath_desc); 5549 sc->sc_tx_statuslen = sizeof(struct ath_desc); 5550 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 5551 5552 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 5553 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 5554 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 5555 5556 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 5557 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 5558 5559 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 5560} 5561