1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31#include <sys/cdefs.h>
| 1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31#include <sys/cdefs.h>
|
32__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath_tx.c 243647 2012-11-28 06:55:34Z adrian $");
| 32__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath_tx.c 243786 2012-12-02 06:24:08Z adrian $");
|
33 34/* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41#include "opt_inet.h" 42#include "opt_ath.h" 43#include "opt_wlan.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/sysctl.h> 48#include <sys/mbuf.h> 49#include <sys/malloc.h> 50#include <sys/lock.h> 51#include <sys/mutex.h> 52#include <sys/kernel.h> 53#include <sys/socket.h> 54#include <sys/sockio.h> 55#include <sys/errno.h> 56#include <sys/callout.h> 57#include <sys/bus.h> 58#include <sys/endian.h> 59#include <sys/kthread.h> 60#include <sys/taskqueue.h> 61#include <sys/priv.h> 62 63#include <machine/bus.h> 64 65#include <net/if.h> 66#include <net/if_dl.h> 67#include <net/if_media.h> 68#include <net/if_types.h> 69#include <net/if_arp.h> 70#include <net/ethernet.h> 71#include <net/if_llc.h> 72 73#include <net80211/ieee80211_var.h> 74#include <net80211/ieee80211_regdomain.h> 75#ifdef IEEE80211_SUPPORT_SUPERG 76#include <net80211/ieee80211_superg.h> 77#endif 78#ifdef IEEE80211_SUPPORT_TDMA 79#include <net80211/ieee80211_tdma.h> 80#endif 81#include <net80211/ieee80211_ht.h> 82 83#include <net/bpf.h> 84 85#ifdef INET 86#include <netinet/in.h> 87#include <netinet/if_ether.h> 88#endif 89 90#include <dev/ath/if_athvar.h> 91#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 92#include <dev/ath/ath_hal/ah_diagcodes.h> 93 94#include <dev/ath/if_ath_debug.h> 95 96#ifdef ATH_TX99_DIAG 97#include <dev/ath/ath_tx99/ath_tx99.h> 98#endif 99 100#include <dev/ath/if_ath_misc.h> 101#include <dev/ath/if_ath_tx.h> 102#include <dev/ath/if_ath_tx_ht.h> 103 104#ifdef ATH_DEBUG_ALQ 105#include <dev/ath/if_ath_alq.h> 106#endif 107 108/* 109 * How many retries to perform in software 110 */ 111#define SWMAX_RETRIES 10 112 113/* 114 * What queue to throw the non-QoS TID traffic into 115 */ 116#define ATH_NONQOS_TID_AC WME_AC_VO 117 118#if 0 119static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 120#endif 121static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 122 int tid); 123static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 124 int tid); 125static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 126 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 127static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 128 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 129static struct ath_buf * 130ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 131 struct ath_tid *tid, struct ath_buf *bf); 132 133#ifdef ATH_DEBUG_ALQ 134void 135ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 136{ 137 struct ath_buf *bf; 138 int i, n; 139 const char *ds; 140 141 /* XXX we should skip out early if debugging isn't enabled! */ 142 bf = bf_first; 143 144 while (bf != NULL) { 145 /* XXX should ensure bf_nseg > 0! */ 146 if (bf->bf_nseg == 0) 147 break; 148 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 149 for (i = 0, ds = (const char *) bf->bf_desc; 150 i < n; 151 i++, ds += sc->sc_tx_desclen) { 152 if_ath_alq_post(&sc->sc_alq, 153 ATH_ALQ_EDMA_TXDESC, 154 sc->sc_tx_desclen, 155 ds); 156 } 157 bf = bf->bf_next; 158 } 159} 160#endif /* ATH_DEBUG_ALQ */ 161 162/* 163 * Whether to use the 11n rate scenario functions or not 164 */ 165static inline int 166ath_tx_is_11n(struct ath_softc *sc) 167{ 168 return ((sc->sc_ah->ah_magic == 0x20065416) || 169 (sc->sc_ah->ah_magic == 0x19741014)); 170} 171 172/* 173 * Obtain the current TID from the given frame. 174 * 175 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 176 * This has implications for which AC/priority the packet is placed 177 * in. 178 */ 179static int 180ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 181{ 182 const struct ieee80211_frame *wh; 183 int pri = M_WME_GETAC(m0); 184 185 wh = mtod(m0, const struct ieee80211_frame *); 186 if (! IEEE80211_QOS_HAS_SEQ(wh)) 187 return IEEE80211_NONQOS_TID; 188 else 189 return WME_AC_TO_TID(pri); 190} 191 192static void 193ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 194{ 195 struct ieee80211_frame *wh; 196 197 wh = mtod(bf->bf_m, struct ieee80211_frame *); 198 /* Only update/resync if needed */ 199 if (bf->bf_state.bfs_isretried == 0) { 200 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 201 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 202 BUS_DMASYNC_PREWRITE); 203 } 204 bf->bf_state.bfs_isretried = 1; 205 bf->bf_state.bfs_retries ++; 206} 207 208/* 209 * Determine what the correct AC queue for the given frame 210 * should be. 211 * 212 * This code assumes that the TIDs map consistently to 213 * the underlying hardware (or software) ath_txq. 214 * Since the sender may try to set an AC which is 215 * arbitrary, non-QoS TIDs may end up being put on 216 * completely different ACs. There's no way to put a 217 * TID into multiple ath_txq's for scheduling, so 218 * for now we override the AC/TXQ selection and set 219 * non-QOS TID frames into the BE queue. 220 * 221 * This may be completely incorrect - specifically, 222 * some management frames may end up out of order 223 * compared to the QoS traffic they're controlling. 224 * I'll look into this later. 225 */ 226static int 227ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 228{ 229 const struct ieee80211_frame *wh; 230 int pri = M_WME_GETAC(m0); 231 wh = mtod(m0, const struct ieee80211_frame *); 232 if (IEEE80211_QOS_HAS_SEQ(wh)) 233 return pri; 234 235 return ATH_NONQOS_TID_AC; 236} 237 238void 239ath_txfrag_cleanup(struct ath_softc *sc, 240 ath_bufhead *frags, struct ieee80211_node *ni) 241{ 242 struct ath_buf *bf, *next; 243 244 ATH_TXBUF_LOCK_ASSERT(sc); 245 246 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 247 /* NB: bf assumed clean */ 248 TAILQ_REMOVE(frags, bf, bf_list); 249 ath_returnbuf_head(sc, bf); 250 ieee80211_node_decref(ni); 251 } 252} 253 254/* 255 * Setup xmit of a fragmented frame. Allocate a buffer 256 * for each frag and bump the node reference count to 257 * reflect the held reference to be setup by ath_tx_start. 258 */ 259int 260ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 261 struct mbuf *m0, struct ieee80211_node *ni) 262{ 263 struct mbuf *m; 264 struct ath_buf *bf; 265 266 ATH_TXBUF_LOCK(sc); 267 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 268 /* XXX non-management? */ 269 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 270 if (bf == NULL) { /* out of buffers, cleanup */ 271 device_printf(sc->sc_dev, "%s: no buffer?\n", 272 __func__); 273 ath_txfrag_cleanup(sc, frags, ni); 274 break; 275 } 276 ieee80211_node_incref(ni); 277 TAILQ_INSERT_TAIL(frags, bf, bf_list); 278 } 279 ATH_TXBUF_UNLOCK(sc); 280 281 return !TAILQ_EMPTY(frags); 282} 283 284/* 285 * Reclaim mbuf resources. For fragmented frames we 286 * need to claim each frag chained with m_nextpkt. 287 */ 288void 289ath_freetx(struct mbuf *m) 290{ 291 struct mbuf *next; 292 293 do { 294 next = m->m_nextpkt; 295 m->m_nextpkt = NULL; 296 m_freem(m); 297 } while ((m = next) != NULL); 298} 299 300static int 301ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 302{ 303 struct mbuf *m; 304 int error; 305 306 /* 307 * Load the DMA map so any coalescing is done. This 308 * also calculates the number of descriptors we need. 309 */ 310 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 311 bf->bf_segs, &bf->bf_nseg, 312 BUS_DMA_NOWAIT); 313 if (error == EFBIG) { 314 /* XXX packet requires too many descriptors */ 315 bf->bf_nseg = ATH_TXDESC+1; 316 } else if (error != 0) { 317 sc->sc_stats.ast_tx_busdma++; 318 ath_freetx(m0); 319 return error; 320 } 321 /* 322 * Discard null packets and check for packets that 323 * require too many TX descriptors. We try to convert 324 * the latter to a cluster. 325 */ 326 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 327 sc->sc_stats.ast_tx_linear++; 328 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC); 329 if (m == NULL) { 330 ath_freetx(m0); 331 sc->sc_stats.ast_tx_nombuf++; 332 return ENOMEM; 333 } 334 m0 = m; 335 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 336 bf->bf_segs, &bf->bf_nseg, 337 BUS_DMA_NOWAIT); 338 if (error != 0) { 339 sc->sc_stats.ast_tx_busdma++; 340 ath_freetx(m0); 341 return error; 342 } 343 KASSERT(bf->bf_nseg <= ATH_TXDESC, 344 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 345 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 346 sc->sc_stats.ast_tx_nodata++; 347 ath_freetx(m0); 348 return EIO; 349 } 350 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 351 __func__, m0, m0->m_pkthdr.len); 352 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 353 bf->bf_m = m0; 354 355 return 0; 356} 357 358/* 359 * Chain together segments+descriptors for a frame - 11n or otherwise. 360 * 361 * For aggregates, this is called on each frame in the aggregate. 362 */ 363static void 364ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 365 struct ath_buf *bf, int is_aggr, int is_first_subframe, 366 int is_last_subframe) 367{ 368 struct ath_hal *ah = sc->sc_ah; 369 char *ds; 370 int i, bp, dsp; 371 HAL_DMA_ADDR bufAddrList[4]; 372 uint32_t segLenList[4]; 373 int numTxMaps = 1; 374 int isFirstDesc = 1; 375 int qnum; 376 377 /* 378 * XXX There's txdma and txdma_mgmt; the descriptor 379 * sizes must match. 380 */ 381 struct ath_descdma *dd = &sc->sc_txdma; 382 383 /* 384 * Fillin the remainder of the descriptor info. 385 */ 386 387 /* 388 * For now the HAL doesn't implement halNumTxMaps for non-EDMA 389 * (ie it's 0.) So just work around it. 390 * 391 * XXX TODO: populate halNumTxMaps for each HAL chip and 392 * then undo this hack. 393 */ 394 if (sc->sc_ah->ah_magic == 0x19741014) 395 numTxMaps = 4; 396 397 /* 398 * For EDMA and later chips ensure the TX map is fully populated 399 * before advancing to the next descriptor. 400 */ 401 ds = (char *) bf->bf_desc; 402 bp = dsp = 0; 403 bzero(bufAddrList, sizeof(bufAddrList)); 404 bzero(segLenList, sizeof(segLenList)); 405 for (i = 0; i < bf->bf_nseg; i++) { 406 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 407 segLenList[bp] = bf->bf_segs[i].ds_len; 408 bp++; 409 410 /* 411 * Go to the next segment if this isn't the last segment 412 * and there's space in the current TX map. 413 */ 414 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 415 continue; 416 417 /* 418 * Last segment or we're out of buffer pointers. 419 */ 420 bp = 0; 421 422 if (i == bf->bf_nseg - 1) 423 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 424 else 425 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 426 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 427 428 /* 429 * XXX this assumes that bfs_txq is the actual destination 430 * hardware queue at this point. It may not have been assigned, 431 * it may actually be pointing to the multicast software 432 * TXQ id. These must be fixed! 433 */ 434 qnum = bf->bf_state.bfs_txq->axq_qnum; 435 436 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 437 , bufAddrList 438 , segLenList 439 , bf->bf_descid /* XXX desc id */ 440 , qnum 441 , isFirstDesc /* first segment */ 442 , i == bf->bf_nseg - 1 /* last segment */ 443 , (struct ath_desc *) ds0 /* first descriptor */ 444 ); 445 446 /* 447 * Make sure the 11n aggregate fields are cleared. 448 * 449 * XXX TODO: this doesn't need to be called for 450 * aggregate frames; as it'll be called on all 451 * sub-frames. Since the descriptors are in 452 * non-cacheable memory, this leads to some 453 * rather slow writes on MIPS/ARM platforms. 454 */ 455 if (ath_tx_is_11n(sc)) 456 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 457 458 /* 459 * If 11n is enabled, set it up as if it's an aggregate 460 * frame. 461 */ 462 if (is_last_subframe) { 463 ath_hal_set11n_aggr_last(sc->sc_ah, 464 (struct ath_desc *) ds); 465 } else if (is_aggr) { 466 /* 467 * This clears the aggrlen field; so 468 * the caller needs to call set_aggr_first()! 469 * 470 * XXX TODO: don't call this for the first 471 * descriptor in the first frame in an 472 * aggregate! 473 */ 474 ath_hal_set11n_aggr_middle(sc->sc_ah, 475 (struct ath_desc *) ds, 476 bf->bf_state.bfs_ndelim); 477 } 478 isFirstDesc = 0; 479#ifdef ATH_DEBUG 480 if (sc->sc_debug & ATH_DEBUG_XMIT) 481 ath_printtxbuf(sc, bf, qnum, 0, 0); 482#endif 483 bf->bf_lastds = (struct ath_desc *) ds; 484 485 /* 486 * Don't forget to skip to the next descriptor. 487 */ 488 ds += sc->sc_tx_desclen; 489 dsp++; 490 491 /* 492 * .. and don't forget to blank these out! 493 */ 494 bzero(bufAddrList, sizeof(bufAddrList)); 495 bzero(segLenList, sizeof(segLenList)); 496 } 497 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 498} 499 500/* 501 * Set the rate control fields in the given descriptor based on 502 * the bf_state fields and node state. 503 * 504 * The bfs fields should already be set with the relevant rate 505 * control information, including whether MRR is to be enabled. 506 * 507 * Since the FreeBSD HAL currently sets up the first TX rate 508 * in ath_hal_setuptxdesc(), this will setup the MRR 509 * conditionally for the pre-11n chips, and call ath_buf_set_rate 510 * unconditionally for 11n chips. These require the 11n rate 511 * scenario to be set if MCS rates are enabled, so it's easier 512 * to just always call it. The caller can then only set rates 2, 3 513 * and 4 if multi-rate retry is needed. 514 */ 515static void 516ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 517 struct ath_buf *bf) 518{ 519 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 520 521 /* If mrr is disabled, blank tries 1, 2, 3 */ 522 if (! bf->bf_state.bfs_ismrr) 523 rc[1].tries = rc[2].tries = rc[3].tries = 0; 524 525#if 0 526 /* 527 * If NOACK is set, just set ntries=1. 528 */ 529 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 530 rc[1].tries = rc[2].tries = rc[3].tries = 0; 531 rc[0].tries = 1; 532 } 533#endif 534 535 /* 536 * Always call - that way a retried descriptor will 537 * have the MRR fields overwritten. 538 * 539 * XXX TODO: see if this is really needed - setting up 540 * the first descriptor should set the MRR fields to 0 541 * for us anyway. 542 */ 543 if (ath_tx_is_11n(sc)) { 544 ath_buf_set_rate(sc, ni, bf); 545 } else { 546 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 547 , rc[1].ratecode, rc[1].tries 548 , rc[2].ratecode, rc[2].tries 549 , rc[3].ratecode, rc[3].tries 550 ); 551 } 552} 553 554/* 555 * Setup segments+descriptors for an 11n aggregate. 556 * bf_first is the first buffer in the aggregate. 557 * The descriptor list must already been linked together using 558 * bf->bf_next. 559 */ 560static void 561ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 562{ 563 struct ath_buf *bf, *bf_prev = NULL; 564 struct ath_desc *ds0 = bf_first->bf_desc; 565 566 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 567 __func__, bf_first->bf_state.bfs_nframes, 568 bf_first->bf_state.bfs_al); 569 570 bf = bf_first; 571 572 if (bf->bf_state.bfs_txrate0 == 0) 573 device_printf(sc->sc_dev, "%s: bf=%p, txrate0=%d\n", 574 __func__, bf, 0); 575 if (bf->bf_state.bfs_rc[0].ratecode == 0) 576 device_printf(sc->sc_dev, "%s: bf=%p, rix0=%d\n", 577 __func__, bf, 0); 578 579 /* 580 * Setup all descriptors of all subframes - this will 581 * call ath_hal_set11naggrmiddle() on every frame. 582 */ 583 while (bf != NULL) { 584 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 585 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 586 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 587 SEQNO(bf->bf_state.bfs_seqno)); 588 589 /* 590 * Setup the initial fields for the first descriptor - all 591 * the non-11n specific stuff. 592 */ 593 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 594 , bf->bf_state.bfs_pktlen /* packet length */ 595 , bf->bf_state.bfs_hdrlen /* header length */ 596 , bf->bf_state.bfs_atype /* Atheros packet type */ 597 , bf->bf_state.bfs_txpower /* txpower */ 598 , bf->bf_state.bfs_txrate0 599 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 600 , bf->bf_state.bfs_keyix /* key cache index */ 601 , bf->bf_state.bfs_txantenna /* antenna mode */ 602 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 603 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 604 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 605 ); 606 607 /* 608 * First descriptor? Setup the rate control and initial 609 * aggregate header information. 610 */ 611 if (bf == bf_first) { 612 /* 613 * setup first desc with rate and aggr info 614 */ 615 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 616 } 617 618 /* 619 * Setup the descriptors for a multi-descriptor frame. 620 * This is both aggregate and non-aggregate aware. 621 */ 622 ath_tx_chaindesclist(sc, ds0, bf, 623 1, /* is_aggr */ 624 !! (bf == bf_first), /* is_first_subframe */ 625 !! (bf->bf_next == NULL) /* is_last_subframe */ 626 ); 627 628 if (bf == bf_first) { 629 /* 630 * Initialise the first 11n aggregate with the 631 * aggregate length and aggregate enable bits. 632 */ 633 ath_hal_set11n_aggr_first(sc->sc_ah, 634 ds0, 635 bf->bf_state.bfs_al, 636 bf->bf_state.bfs_ndelim); 637 } 638 639 /* 640 * Link the last descriptor of the previous frame 641 * to the beginning descriptor of this frame. 642 */ 643 if (bf_prev != NULL) 644 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 645 bf->bf_daddr); 646 647 /* Save a copy so we can link the next descriptor in */ 648 bf_prev = bf; 649 bf = bf->bf_next; 650 } 651 652 /* 653 * Set the first descriptor bf_lastds field to point to 654 * the last descriptor in the last subframe, that's where 655 * the status update will occur. 656 */ 657 bf_first->bf_lastds = bf_prev->bf_lastds; 658 659 /* 660 * And bf_last in the first descriptor points to the end of 661 * the aggregate list. 662 */ 663 bf_first->bf_last = bf_prev; 664 665 /* 666 * For non-AR9300 NICs, which require the rate control 667 * in the final descriptor - let's set that up now. 668 * 669 * This is because the filltxdesc() HAL call doesn't 670 * populate the last segment with rate control information 671 * if firstSeg is also true. For non-aggregate frames 672 * that is fine, as the first frame already has rate control 673 * info. But if the last frame in an aggregate has one 674 * descriptor, both firstseg and lastseg will be true and 675 * the rate info isn't copied. 676 * 677 * This is inefficient on MIPS/ARM platforms that have 678 * non-cachable memory for TX descriptors, but we'll just 679 * make do for now. 680 * 681 * As to why the rate table is stashed in the last descriptor 682 * rather than the first descriptor? Because proctxdesc() 683 * is called on the final descriptor in an MPDU or A-MPDU - 684 * ie, the one that gets updated by the hardware upon 685 * completion. That way proctxdesc() doesn't need to know 686 * about the first _and_ last TX descriptor. 687 */ 688 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 689 690 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 691} 692 693/* 694 * Hand-off a frame to the multicast TX queue. 695 * 696 * This is a software TXQ which will be appended to the CAB queue 697 * during the beacon setup code. 698 * 699 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 700 * as part of the TX descriptor, bf_state.bfs_txq must be updated 701 * with the actual hardware txq, or all of this will fall apart. 702 * 703 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 704 * and retire bfs_txq; then make sure the CABQ QCU ID is populated 705 * correctly. 706 */ 707static void 708ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 709 struct ath_buf *bf) 710{
| 33 34/* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41#include "opt_inet.h" 42#include "opt_ath.h" 43#include "opt_wlan.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/sysctl.h> 48#include <sys/mbuf.h> 49#include <sys/malloc.h> 50#include <sys/lock.h> 51#include <sys/mutex.h> 52#include <sys/kernel.h> 53#include <sys/socket.h> 54#include <sys/sockio.h> 55#include <sys/errno.h> 56#include <sys/callout.h> 57#include <sys/bus.h> 58#include <sys/endian.h> 59#include <sys/kthread.h> 60#include <sys/taskqueue.h> 61#include <sys/priv.h> 62 63#include <machine/bus.h> 64 65#include <net/if.h> 66#include <net/if_dl.h> 67#include <net/if_media.h> 68#include <net/if_types.h> 69#include <net/if_arp.h> 70#include <net/ethernet.h> 71#include <net/if_llc.h> 72 73#include <net80211/ieee80211_var.h> 74#include <net80211/ieee80211_regdomain.h> 75#ifdef IEEE80211_SUPPORT_SUPERG 76#include <net80211/ieee80211_superg.h> 77#endif 78#ifdef IEEE80211_SUPPORT_TDMA 79#include <net80211/ieee80211_tdma.h> 80#endif 81#include <net80211/ieee80211_ht.h> 82 83#include <net/bpf.h> 84 85#ifdef INET 86#include <netinet/in.h> 87#include <netinet/if_ether.h> 88#endif 89 90#include <dev/ath/if_athvar.h> 91#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 92#include <dev/ath/ath_hal/ah_diagcodes.h> 93 94#include <dev/ath/if_ath_debug.h> 95 96#ifdef ATH_TX99_DIAG 97#include <dev/ath/ath_tx99/ath_tx99.h> 98#endif 99 100#include <dev/ath/if_ath_misc.h> 101#include <dev/ath/if_ath_tx.h> 102#include <dev/ath/if_ath_tx_ht.h> 103 104#ifdef ATH_DEBUG_ALQ 105#include <dev/ath/if_ath_alq.h> 106#endif 107 108/* 109 * How many retries to perform in software 110 */ 111#define SWMAX_RETRIES 10 112 113/* 114 * What queue to throw the non-QoS TID traffic into 115 */ 116#define ATH_NONQOS_TID_AC WME_AC_VO 117 118#if 0 119static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 120#endif 121static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 122 int tid); 123static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 124 int tid); 125static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 126 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 127static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 128 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 129static struct ath_buf * 130ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 131 struct ath_tid *tid, struct ath_buf *bf); 132 133#ifdef ATH_DEBUG_ALQ 134void 135ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 136{ 137 struct ath_buf *bf; 138 int i, n; 139 const char *ds; 140 141 /* XXX we should skip out early if debugging isn't enabled! */ 142 bf = bf_first; 143 144 while (bf != NULL) { 145 /* XXX should ensure bf_nseg > 0! */ 146 if (bf->bf_nseg == 0) 147 break; 148 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 149 for (i = 0, ds = (const char *) bf->bf_desc; 150 i < n; 151 i++, ds += sc->sc_tx_desclen) { 152 if_ath_alq_post(&sc->sc_alq, 153 ATH_ALQ_EDMA_TXDESC, 154 sc->sc_tx_desclen, 155 ds); 156 } 157 bf = bf->bf_next; 158 } 159} 160#endif /* ATH_DEBUG_ALQ */ 161 162/* 163 * Whether to use the 11n rate scenario functions or not 164 */ 165static inline int 166ath_tx_is_11n(struct ath_softc *sc) 167{ 168 return ((sc->sc_ah->ah_magic == 0x20065416) || 169 (sc->sc_ah->ah_magic == 0x19741014)); 170} 171 172/* 173 * Obtain the current TID from the given frame. 174 * 175 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 176 * This has implications for which AC/priority the packet is placed 177 * in. 178 */ 179static int 180ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 181{ 182 const struct ieee80211_frame *wh; 183 int pri = M_WME_GETAC(m0); 184 185 wh = mtod(m0, const struct ieee80211_frame *); 186 if (! IEEE80211_QOS_HAS_SEQ(wh)) 187 return IEEE80211_NONQOS_TID; 188 else 189 return WME_AC_TO_TID(pri); 190} 191 192static void 193ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 194{ 195 struct ieee80211_frame *wh; 196 197 wh = mtod(bf->bf_m, struct ieee80211_frame *); 198 /* Only update/resync if needed */ 199 if (bf->bf_state.bfs_isretried == 0) { 200 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 201 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 202 BUS_DMASYNC_PREWRITE); 203 } 204 bf->bf_state.bfs_isretried = 1; 205 bf->bf_state.bfs_retries ++; 206} 207 208/* 209 * Determine what the correct AC queue for the given frame 210 * should be. 211 * 212 * This code assumes that the TIDs map consistently to 213 * the underlying hardware (or software) ath_txq. 214 * Since the sender may try to set an AC which is 215 * arbitrary, non-QoS TIDs may end up being put on 216 * completely different ACs. There's no way to put a 217 * TID into multiple ath_txq's for scheduling, so 218 * for now we override the AC/TXQ selection and set 219 * non-QOS TID frames into the BE queue. 220 * 221 * This may be completely incorrect - specifically, 222 * some management frames may end up out of order 223 * compared to the QoS traffic they're controlling. 224 * I'll look into this later. 225 */ 226static int 227ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 228{ 229 const struct ieee80211_frame *wh; 230 int pri = M_WME_GETAC(m0); 231 wh = mtod(m0, const struct ieee80211_frame *); 232 if (IEEE80211_QOS_HAS_SEQ(wh)) 233 return pri; 234 235 return ATH_NONQOS_TID_AC; 236} 237 238void 239ath_txfrag_cleanup(struct ath_softc *sc, 240 ath_bufhead *frags, struct ieee80211_node *ni) 241{ 242 struct ath_buf *bf, *next; 243 244 ATH_TXBUF_LOCK_ASSERT(sc); 245 246 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 247 /* NB: bf assumed clean */ 248 TAILQ_REMOVE(frags, bf, bf_list); 249 ath_returnbuf_head(sc, bf); 250 ieee80211_node_decref(ni); 251 } 252} 253 254/* 255 * Setup xmit of a fragmented frame. Allocate a buffer 256 * for each frag and bump the node reference count to 257 * reflect the held reference to be setup by ath_tx_start. 258 */ 259int 260ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 261 struct mbuf *m0, struct ieee80211_node *ni) 262{ 263 struct mbuf *m; 264 struct ath_buf *bf; 265 266 ATH_TXBUF_LOCK(sc); 267 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 268 /* XXX non-management? */ 269 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 270 if (bf == NULL) { /* out of buffers, cleanup */ 271 device_printf(sc->sc_dev, "%s: no buffer?\n", 272 __func__); 273 ath_txfrag_cleanup(sc, frags, ni); 274 break; 275 } 276 ieee80211_node_incref(ni); 277 TAILQ_INSERT_TAIL(frags, bf, bf_list); 278 } 279 ATH_TXBUF_UNLOCK(sc); 280 281 return !TAILQ_EMPTY(frags); 282} 283 284/* 285 * Reclaim mbuf resources. For fragmented frames we 286 * need to claim each frag chained with m_nextpkt. 287 */ 288void 289ath_freetx(struct mbuf *m) 290{ 291 struct mbuf *next; 292 293 do { 294 next = m->m_nextpkt; 295 m->m_nextpkt = NULL; 296 m_freem(m); 297 } while ((m = next) != NULL); 298} 299 300static int 301ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 302{ 303 struct mbuf *m; 304 int error; 305 306 /* 307 * Load the DMA map so any coalescing is done. This 308 * also calculates the number of descriptors we need. 309 */ 310 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 311 bf->bf_segs, &bf->bf_nseg, 312 BUS_DMA_NOWAIT); 313 if (error == EFBIG) { 314 /* XXX packet requires too many descriptors */ 315 bf->bf_nseg = ATH_TXDESC+1; 316 } else if (error != 0) { 317 sc->sc_stats.ast_tx_busdma++; 318 ath_freetx(m0); 319 return error; 320 } 321 /* 322 * Discard null packets and check for packets that 323 * require too many TX descriptors. We try to convert 324 * the latter to a cluster. 325 */ 326 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 327 sc->sc_stats.ast_tx_linear++; 328 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC); 329 if (m == NULL) { 330 ath_freetx(m0); 331 sc->sc_stats.ast_tx_nombuf++; 332 return ENOMEM; 333 } 334 m0 = m; 335 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 336 bf->bf_segs, &bf->bf_nseg, 337 BUS_DMA_NOWAIT); 338 if (error != 0) { 339 sc->sc_stats.ast_tx_busdma++; 340 ath_freetx(m0); 341 return error; 342 } 343 KASSERT(bf->bf_nseg <= ATH_TXDESC, 344 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 345 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 346 sc->sc_stats.ast_tx_nodata++; 347 ath_freetx(m0); 348 return EIO; 349 } 350 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 351 __func__, m0, m0->m_pkthdr.len); 352 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 353 bf->bf_m = m0; 354 355 return 0; 356} 357 358/* 359 * Chain together segments+descriptors for a frame - 11n or otherwise. 360 * 361 * For aggregates, this is called on each frame in the aggregate. 362 */ 363static void 364ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 365 struct ath_buf *bf, int is_aggr, int is_first_subframe, 366 int is_last_subframe) 367{ 368 struct ath_hal *ah = sc->sc_ah; 369 char *ds; 370 int i, bp, dsp; 371 HAL_DMA_ADDR bufAddrList[4]; 372 uint32_t segLenList[4]; 373 int numTxMaps = 1; 374 int isFirstDesc = 1; 375 int qnum; 376 377 /* 378 * XXX There's txdma and txdma_mgmt; the descriptor 379 * sizes must match. 380 */ 381 struct ath_descdma *dd = &sc->sc_txdma; 382 383 /* 384 * Fillin the remainder of the descriptor info. 385 */ 386 387 /* 388 * For now the HAL doesn't implement halNumTxMaps for non-EDMA 389 * (ie it's 0.) So just work around it. 390 * 391 * XXX TODO: populate halNumTxMaps for each HAL chip and 392 * then undo this hack. 393 */ 394 if (sc->sc_ah->ah_magic == 0x19741014) 395 numTxMaps = 4; 396 397 /* 398 * For EDMA and later chips ensure the TX map is fully populated 399 * before advancing to the next descriptor. 400 */ 401 ds = (char *) bf->bf_desc; 402 bp = dsp = 0; 403 bzero(bufAddrList, sizeof(bufAddrList)); 404 bzero(segLenList, sizeof(segLenList)); 405 for (i = 0; i < bf->bf_nseg; i++) { 406 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 407 segLenList[bp] = bf->bf_segs[i].ds_len; 408 bp++; 409 410 /* 411 * Go to the next segment if this isn't the last segment 412 * and there's space in the current TX map. 413 */ 414 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 415 continue; 416 417 /* 418 * Last segment or we're out of buffer pointers. 419 */ 420 bp = 0; 421 422 if (i == bf->bf_nseg - 1) 423 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 424 else 425 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 426 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 427 428 /* 429 * XXX this assumes that bfs_txq is the actual destination 430 * hardware queue at this point. It may not have been assigned, 431 * it may actually be pointing to the multicast software 432 * TXQ id. These must be fixed! 433 */ 434 qnum = bf->bf_state.bfs_txq->axq_qnum; 435 436 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 437 , bufAddrList 438 , segLenList 439 , bf->bf_descid /* XXX desc id */ 440 , qnum 441 , isFirstDesc /* first segment */ 442 , i == bf->bf_nseg - 1 /* last segment */ 443 , (struct ath_desc *) ds0 /* first descriptor */ 444 ); 445 446 /* 447 * Make sure the 11n aggregate fields are cleared. 448 * 449 * XXX TODO: this doesn't need to be called for 450 * aggregate frames; as it'll be called on all 451 * sub-frames. Since the descriptors are in 452 * non-cacheable memory, this leads to some 453 * rather slow writes on MIPS/ARM platforms. 454 */ 455 if (ath_tx_is_11n(sc)) 456 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 457 458 /* 459 * If 11n is enabled, set it up as if it's an aggregate 460 * frame. 461 */ 462 if (is_last_subframe) { 463 ath_hal_set11n_aggr_last(sc->sc_ah, 464 (struct ath_desc *) ds); 465 } else if (is_aggr) { 466 /* 467 * This clears the aggrlen field; so 468 * the caller needs to call set_aggr_first()! 469 * 470 * XXX TODO: don't call this for the first 471 * descriptor in the first frame in an 472 * aggregate! 473 */ 474 ath_hal_set11n_aggr_middle(sc->sc_ah, 475 (struct ath_desc *) ds, 476 bf->bf_state.bfs_ndelim); 477 } 478 isFirstDesc = 0; 479#ifdef ATH_DEBUG 480 if (sc->sc_debug & ATH_DEBUG_XMIT) 481 ath_printtxbuf(sc, bf, qnum, 0, 0); 482#endif 483 bf->bf_lastds = (struct ath_desc *) ds; 484 485 /* 486 * Don't forget to skip to the next descriptor. 487 */ 488 ds += sc->sc_tx_desclen; 489 dsp++; 490 491 /* 492 * .. and don't forget to blank these out! 493 */ 494 bzero(bufAddrList, sizeof(bufAddrList)); 495 bzero(segLenList, sizeof(segLenList)); 496 } 497 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 498} 499 500/* 501 * Set the rate control fields in the given descriptor based on 502 * the bf_state fields and node state. 503 * 504 * The bfs fields should already be set with the relevant rate 505 * control information, including whether MRR is to be enabled. 506 * 507 * Since the FreeBSD HAL currently sets up the first TX rate 508 * in ath_hal_setuptxdesc(), this will setup the MRR 509 * conditionally for the pre-11n chips, and call ath_buf_set_rate 510 * unconditionally for 11n chips. These require the 11n rate 511 * scenario to be set if MCS rates are enabled, so it's easier 512 * to just always call it. The caller can then only set rates 2, 3 513 * and 4 if multi-rate retry is needed. 514 */ 515static void 516ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 517 struct ath_buf *bf) 518{ 519 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 520 521 /* If mrr is disabled, blank tries 1, 2, 3 */ 522 if (! bf->bf_state.bfs_ismrr) 523 rc[1].tries = rc[2].tries = rc[3].tries = 0; 524 525#if 0 526 /* 527 * If NOACK is set, just set ntries=1. 528 */ 529 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 530 rc[1].tries = rc[2].tries = rc[3].tries = 0; 531 rc[0].tries = 1; 532 } 533#endif 534 535 /* 536 * Always call - that way a retried descriptor will 537 * have the MRR fields overwritten. 538 * 539 * XXX TODO: see if this is really needed - setting up 540 * the first descriptor should set the MRR fields to 0 541 * for us anyway. 542 */ 543 if (ath_tx_is_11n(sc)) { 544 ath_buf_set_rate(sc, ni, bf); 545 } else { 546 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 547 , rc[1].ratecode, rc[1].tries 548 , rc[2].ratecode, rc[2].tries 549 , rc[3].ratecode, rc[3].tries 550 ); 551 } 552} 553 554/* 555 * Setup segments+descriptors for an 11n aggregate. 556 * bf_first is the first buffer in the aggregate. 557 * The descriptor list must already been linked together using 558 * bf->bf_next. 559 */ 560static void 561ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 562{ 563 struct ath_buf *bf, *bf_prev = NULL; 564 struct ath_desc *ds0 = bf_first->bf_desc; 565 566 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 567 __func__, bf_first->bf_state.bfs_nframes, 568 bf_first->bf_state.bfs_al); 569 570 bf = bf_first; 571 572 if (bf->bf_state.bfs_txrate0 == 0) 573 device_printf(sc->sc_dev, "%s: bf=%p, txrate0=%d\n", 574 __func__, bf, 0); 575 if (bf->bf_state.bfs_rc[0].ratecode == 0) 576 device_printf(sc->sc_dev, "%s: bf=%p, rix0=%d\n", 577 __func__, bf, 0); 578 579 /* 580 * Setup all descriptors of all subframes - this will 581 * call ath_hal_set11naggrmiddle() on every frame. 582 */ 583 while (bf != NULL) { 584 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 585 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 586 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 587 SEQNO(bf->bf_state.bfs_seqno)); 588 589 /* 590 * Setup the initial fields for the first descriptor - all 591 * the non-11n specific stuff. 592 */ 593 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 594 , bf->bf_state.bfs_pktlen /* packet length */ 595 , bf->bf_state.bfs_hdrlen /* header length */ 596 , bf->bf_state.bfs_atype /* Atheros packet type */ 597 , bf->bf_state.bfs_txpower /* txpower */ 598 , bf->bf_state.bfs_txrate0 599 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 600 , bf->bf_state.bfs_keyix /* key cache index */ 601 , bf->bf_state.bfs_txantenna /* antenna mode */ 602 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 603 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 604 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 605 ); 606 607 /* 608 * First descriptor? Setup the rate control and initial 609 * aggregate header information. 610 */ 611 if (bf == bf_first) { 612 /* 613 * setup first desc with rate and aggr info 614 */ 615 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 616 } 617 618 /* 619 * Setup the descriptors for a multi-descriptor frame. 620 * This is both aggregate and non-aggregate aware. 621 */ 622 ath_tx_chaindesclist(sc, ds0, bf, 623 1, /* is_aggr */ 624 !! (bf == bf_first), /* is_first_subframe */ 625 !! (bf->bf_next == NULL) /* is_last_subframe */ 626 ); 627 628 if (bf == bf_first) { 629 /* 630 * Initialise the first 11n aggregate with the 631 * aggregate length and aggregate enable bits. 632 */ 633 ath_hal_set11n_aggr_first(sc->sc_ah, 634 ds0, 635 bf->bf_state.bfs_al, 636 bf->bf_state.bfs_ndelim); 637 } 638 639 /* 640 * Link the last descriptor of the previous frame 641 * to the beginning descriptor of this frame. 642 */ 643 if (bf_prev != NULL) 644 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 645 bf->bf_daddr); 646 647 /* Save a copy so we can link the next descriptor in */ 648 bf_prev = bf; 649 bf = bf->bf_next; 650 } 651 652 /* 653 * Set the first descriptor bf_lastds field to point to 654 * the last descriptor in the last subframe, that's where 655 * the status update will occur. 656 */ 657 bf_first->bf_lastds = bf_prev->bf_lastds; 658 659 /* 660 * And bf_last in the first descriptor points to the end of 661 * the aggregate list. 662 */ 663 bf_first->bf_last = bf_prev; 664 665 /* 666 * For non-AR9300 NICs, which require the rate control 667 * in the final descriptor - let's set that up now. 668 * 669 * This is because the filltxdesc() HAL call doesn't 670 * populate the last segment with rate control information 671 * if firstSeg is also true. For non-aggregate frames 672 * that is fine, as the first frame already has rate control 673 * info. But if the last frame in an aggregate has one 674 * descriptor, both firstseg and lastseg will be true and 675 * the rate info isn't copied. 676 * 677 * This is inefficient on MIPS/ARM platforms that have 678 * non-cachable memory for TX descriptors, but we'll just 679 * make do for now. 680 * 681 * As to why the rate table is stashed in the last descriptor 682 * rather than the first descriptor? Because proctxdesc() 683 * is called on the final descriptor in an MPDU or A-MPDU - 684 * ie, the one that gets updated by the hardware upon 685 * completion. That way proctxdesc() doesn't need to know 686 * about the first _and_ last TX descriptor. 687 */ 688 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 689 690 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 691} 692 693/* 694 * Hand-off a frame to the multicast TX queue. 695 * 696 * This is a software TXQ which will be appended to the CAB queue 697 * during the beacon setup code. 698 * 699 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 700 * as part of the TX descriptor, bf_state.bfs_txq must be updated 701 * with the actual hardware txq, or all of this will fall apart. 702 * 703 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 704 * and retire bfs_txq; then make sure the CABQ QCU ID is populated 705 * correctly. 706 */ 707static void 708ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 709 struct ath_buf *bf) 710{
|
711 ATH_TXQ_LOCK_ASSERT(txq);
| 711 ATH_TX_LOCK_ASSERT(sc); 712
|
712 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 713 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 714 if (txq->axq_link != NULL) { 715 struct ath_buf *last = ATH_TXQ_LAST(txq, axq_q_s); 716 struct ieee80211_frame *wh; 717 718 /* mark previous frame */ 719 wh = mtod(last->bf_m, struct ieee80211_frame *); 720 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 721 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap, 722 BUS_DMASYNC_PREWRITE); 723 724 /* link descriptor */ 725 *txq->axq_link = bf->bf_daddr; 726 } 727 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 728 ath_hal_gettxdesclinkptr(sc->sc_ah, bf->bf_lastds, &txq->axq_link); 729} 730 731/* 732 * Hand-off packet to a hardware queue. 733 */ 734static void 735ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 736 struct ath_buf *bf) 737{ 738 struct ath_hal *ah = sc->sc_ah; 739 740 /* 741 * Insert the frame on the outbound list and pass it on 742 * to the hardware. Multicast frames buffered for power 743 * save stations and transmit from the CAB queue are stored 744 * on a s/w only queue and loaded on to the CAB queue in 745 * the SWBA handler since frames only go out on DTIM and 746 * to avoid possible races. 747 */
| 713 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 714 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 715 if (txq->axq_link != NULL) { 716 struct ath_buf *last = ATH_TXQ_LAST(txq, axq_q_s); 717 struct ieee80211_frame *wh; 718 719 /* mark previous frame */ 720 wh = mtod(last->bf_m, struct ieee80211_frame *); 721 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 722 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap, 723 BUS_DMASYNC_PREWRITE); 724 725 /* link descriptor */ 726 *txq->axq_link = bf->bf_daddr; 727 } 728 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 729 ath_hal_gettxdesclinkptr(sc->sc_ah, bf->bf_lastds, &txq->axq_link); 730} 731 732/* 733 * Hand-off packet to a hardware queue. 734 */ 735static void 736ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 737 struct ath_buf *bf) 738{ 739 struct ath_hal *ah = sc->sc_ah; 740 741 /* 742 * Insert the frame on the outbound list and pass it on 743 * to the hardware. Multicast frames buffered for power 744 * save stations and transmit from the CAB queue are stored 745 * on a s/w only queue and loaded on to the CAB queue in 746 * the SWBA handler since frames only go out on DTIM and 747 * to avoid possible races. 748 */
|
748 ATH_TXQ_LOCK_ASSERT(txq);
| 749 ATH_TX_LOCK_ASSERT(sc);
|
749 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 750 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 751 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 752 ("ath_tx_handoff_hw called for mcast queue")); 753 754#if 0 755 /* 756 * This causes a LOR. Find out where the PCU lock is being 757 * held whilst the TXQ lock is grabbed - that shouldn't 758 * be occuring. 759 */ 760 ATH_PCU_LOCK(sc); 761 if (sc->sc_inreset_cnt) { 762 ATH_PCU_UNLOCK(sc); 763 DPRINTF(sc, ATH_DEBUG_RESET, 764 "%s: called with sc_in_reset != 0\n", 765 __func__); 766 DPRINTF(sc, ATH_DEBUG_XMIT, 767 "%s: queued: TXDP[%u] = %p (%p) depth %d\n", 768 __func__, txq->axq_qnum, 769 (caddr_t)bf->bf_daddr, bf->bf_desc, 770 txq->axq_depth); 771 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 772 if (bf->bf_state.bfs_aggr) 773 txq->axq_aggr_depth++; 774 /* 775 * There's no need to update axq_link; the hardware 776 * is in reset and once the reset is complete, any 777 * non-empty queues will simply have DMA restarted. 778 */ 779 return; 780 } 781 ATH_PCU_UNLOCK(sc); 782#endif 783 784 /* For now, so not to generate whitespace diffs */ 785 if (1) { 786#ifdef IEEE80211_SUPPORT_TDMA 787 int qbusy; 788 789 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 790 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum); 791 792 ATH_KTR(sc, ATH_KTR_TX, 4, 793 "ath_tx_handoff: txq=%u, add bf=%p, qbusy=%d, depth=%d", 794 txq->axq_qnum, bf, qbusy, txq->axq_depth); 795 if (txq->axq_link == NULL) { 796 /* 797 * Be careful writing the address to TXDP. If 798 * the tx q is enabled then this write will be 799 * ignored. Normally this is not an issue but 800 * when tdma is in use and the q is beacon gated 801 * this race can occur. If the q is busy then 802 * defer the work to later--either when another 803 * packet comes along or when we prepare a beacon 804 * frame at SWBA. 805 */ 806 if (!qbusy) { 807 ath_hal_puttxbuf(ah, txq->axq_qnum, 808 bf->bf_daddr); 809 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 810 DPRINTF(sc, ATH_DEBUG_XMIT, 811 "%s: TXDP[%u] = %p (%p) lastds=%p depth %d\n", 812 __func__, txq->axq_qnum, 813 (caddr_t)bf->bf_daddr, bf->bf_desc, 814 bf->bf_lastds, 815 txq->axq_depth); 816 ATH_KTR(sc, ATH_KTR_TX, 5, 817 "ath_tx_handoff: TXDP[%u] = %p (%p) " 818 "lastds=%p depth %d", 819 txq->axq_qnum, 820 (caddr_t)bf->bf_daddr, bf->bf_desc, 821 bf->bf_lastds, 822 txq->axq_depth); 823 } else { 824 txq->axq_flags |= ATH_TXQ_PUTPENDING; 825 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 826 "%s: Q%u busy, defer enable\n", __func__, 827 txq->axq_qnum); 828 ATH_KTR(sc, ATH_KTR_TX, 0, "defer enable"); 829 } 830 } else { 831 *txq->axq_link = bf->bf_daddr; 832 DPRINTF(sc, ATH_DEBUG_XMIT, 833 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 834 txq->axq_qnum, txq->axq_link, 835 (caddr_t)bf->bf_daddr, bf->bf_desc, 836 txq->axq_depth); 837 ATH_KTR(sc, ATH_KTR_TX, 5, 838 "ath_tx_handoff: link[%u](%p)=%p (%p) lastds=%p", 839 txq->axq_qnum, txq->axq_link, 840 (caddr_t)bf->bf_daddr, bf->bf_desc, 841 bf->bf_lastds); 842 843 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) { 844 /* 845 * The q was busy when we previously tried 846 * to write the address of the first buffer 847 * in the chain. Since it's not busy now 848 * handle this chore. We are certain the 849 * buffer at the front is the right one since 850 * axq_link is NULL only when the buffer list 851 * is/was empty. 852 */ 853 ath_hal_puttxbuf(ah, txq->axq_qnum, 854 TAILQ_FIRST(&txq->axq_q)->bf_daddr); 855 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 856 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 857 "%s: Q%u restarted\n", __func__, 858 txq->axq_qnum); 859 ATH_KTR(sc, ATH_KTR_TX, 4, 860 "ath_tx_handoff: txq[%d] restarted, bf=%p " 861 "daddr=%p ds=%p", 862 txq->axq_qnum, 863 bf, 864 (caddr_t)bf->bf_daddr, 865 bf->bf_desc); 866 } 867 } 868#else 869 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 870 ATH_KTR(sc, ATH_KTR_TX, 3, 871 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 872 "depth=%d", 873 txq->axq_qnum, 874 bf, 875 txq->axq_depth); 876 if (txq->axq_link == NULL) { 877 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 878 DPRINTF(sc, ATH_DEBUG_XMIT, 879 "%s: TXDP[%u] = %p (%p) depth %d\n", 880 __func__, txq->axq_qnum, 881 (caddr_t)bf->bf_daddr, bf->bf_desc, 882 txq->axq_depth); 883 ATH_KTR(sc, ATH_KTR_TX, 5, 884 "ath_tx_handoff: non-tdma: TXDP[%u] = %p (%p) " 885 "lastds=%p depth %d", 886 txq->axq_qnum, 887 (caddr_t)bf->bf_daddr, bf->bf_desc, 888 bf->bf_lastds, 889 txq->axq_depth); 890 891 } else { 892 *txq->axq_link = bf->bf_daddr; 893 DPRINTF(sc, ATH_DEBUG_XMIT, 894 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 895 txq->axq_qnum, txq->axq_link, 896 (caddr_t)bf->bf_daddr, bf->bf_desc, 897 txq->axq_depth); 898 ATH_KTR(sc, ATH_KTR_TX, 5, 899 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 900 "lastds=%d", 901 txq->axq_qnum, txq->axq_link, 902 (caddr_t)bf->bf_daddr, bf->bf_desc, 903 bf->bf_lastds); 904 905 } 906#endif /* IEEE80211_SUPPORT_TDMA */ 907 if (bf->bf_state.bfs_aggr) 908 txq->axq_aggr_depth++; 909 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 910 ath_hal_txstart(ah, txq->axq_qnum); 911 ATH_KTR(sc, ATH_KTR_TX, 1, 912 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 913 } 914} 915 916/* 917 * Restart TX DMA for the given TXQ. 918 * 919 * This must be called whether the queue is empty or not. 920 */ 921static void 922ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 923{ 924 struct ath_hal *ah = sc->sc_ah; 925 struct ath_buf *bf, *bf_last; 926
| 750 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 751 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 752 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 753 ("ath_tx_handoff_hw called for mcast queue")); 754 755#if 0 756 /* 757 * This causes a LOR. Find out where the PCU lock is being 758 * held whilst the TXQ lock is grabbed - that shouldn't 759 * be occuring. 760 */ 761 ATH_PCU_LOCK(sc); 762 if (sc->sc_inreset_cnt) { 763 ATH_PCU_UNLOCK(sc); 764 DPRINTF(sc, ATH_DEBUG_RESET, 765 "%s: called with sc_in_reset != 0\n", 766 __func__); 767 DPRINTF(sc, ATH_DEBUG_XMIT, 768 "%s: queued: TXDP[%u] = %p (%p) depth %d\n", 769 __func__, txq->axq_qnum, 770 (caddr_t)bf->bf_daddr, bf->bf_desc, 771 txq->axq_depth); 772 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 773 if (bf->bf_state.bfs_aggr) 774 txq->axq_aggr_depth++; 775 /* 776 * There's no need to update axq_link; the hardware 777 * is in reset and once the reset is complete, any 778 * non-empty queues will simply have DMA restarted. 779 */ 780 return; 781 } 782 ATH_PCU_UNLOCK(sc); 783#endif 784 785 /* For now, so not to generate whitespace diffs */ 786 if (1) { 787#ifdef IEEE80211_SUPPORT_TDMA 788 int qbusy; 789 790 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 791 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum); 792 793 ATH_KTR(sc, ATH_KTR_TX, 4, 794 "ath_tx_handoff: txq=%u, add bf=%p, qbusy=%d, depth=%d", 795 txq->axq_qnum, bf, qbusy, txq->axq_depth); 796 if (txq->axq_link == NULL) { 797 /* 798 * Be careful writing the address to TXDP. If 799 * the tx q is enabled then this write will be 800 * ignored. Normally this is not an issue but 801 * when tdma is in use and the q is beacon gated 802 * this race can occur. If the q is busy then 803 * defer the work to later--either when another 804 * packet comes along or when we prepare a beacon 805 * frame at SWBA. 806 */ 807 if (!qbusy) { 808 ath_hal_puttxbuf(ah, txq->axq_qnum, 809 bf->bf_daddr); 810 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 811 DPRINTF(sc, ATH_DEBUG_XMIT, 812 "%s: TXDP[%u] = %p (%p) lastds=%p depth %d\n", 813 __func__, txq->axq_qnum, 814 (caddr_t)bf->bf_daddr, bf->bf_desc, 815 bf->bf_lastds, 816 txq->axq_depth); 817 ATH_KTR(sc, ATH_KTR_TX, 5, 818 "ath_tx_handoff: TXDP[%u] = %p (%p) " 819 "lastds=%p depth %d", 820 txq->axq_qnum, 821 (caddr_t)bf->bf_daddr, bf->bf_desc, 822 bf->bf_lastds, 823 txq->axq_depth); 824 } else { 825 txq->axq_flags |= ATH_TXQ_PUTPENDING; 826 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 827 "%s: Q%u busy, defer enable\n", __func__, 828 txq->axq_qnum); 829 ATH_KTR(sc, ATH_KTR_TX, 0, "defer enable"); 830 } 831 } else { 832 *txq->axq_link = bf->bf_daddr; 833 DPRINTF(sc, ATH_DEBUG_XMIT, 834 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 835 txq->axq_qnum, txq->axq_link, 836 (caddr_t)bf->bf_daddr, bf->bf_desc, 837 txq->axq_depth); 838 ATH_KTR(sc, ATH_KTR_TX, 5, 839 "ath_tx_handoff: link[%u](%p)=%p (%p) lastds=%p", 840 txq->axq_qnum, txq->axq_link, 841 (caddr_t)bf->bf_daddr, bf->bf_desc, 842 bf->bf_lastds); 843 844 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) { 845 /* 846 * The q was busy when we previously tried 847 * to write the address of the first buffer 848 * in the chain. Since it's not busy now 849 * handle this chore. We are certain the 850 * buffer at the front is the right one since 851 * axq_link is NULL only when the buffer list 852 * is/was empty. 853 */ 854 ath_hal_puttxbuf(ah, txq->axq_qnum, 855 TAILQ_FIRST(&txq->axq_q)->bf_daddr); 856 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 857 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 858 "%s: Q%u restarted\n", __func__, 859 txq->axq_qnum); 860 ATH_KTR(sc, ATH_KTR_TX, 4, 861 "ath_tx_handoff: txq[%d] restarted, bf=%p " 862 "daddr=%p ds=%p", 863 txq->axq_qnum, 864 bf, 865 (caddr_t)bf->bf_daddr, 866 bf->bf_desc); 867 } 868 } 869#else 870 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 871 ATH_KTR(sc, ATH_KTR_TX, 3, 872 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 873 "depth=%d", 874 txq->axq_qnum, 875 bf, 876 txq->axq_depth); 877 if (txq->axq_link == NULL) { 878 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 879 DPRINTF(sc, ATH_DEBUG_XMIT, 880 "%s: TXDP[%u] = %p (%p) depth %d\n", 881 __func__, txq->axq_qnum, 882 (caddr_t)bf->bf_daddr, bf->bf_desc, 883 txq->axq_depth); 884 ATH_KTR(sc, ATH_KTR_TX, 5, 885 "ath_tx_handoff: non-tdma: TXDP[%u] = %p (%p) " 886 "lastds=%p depth %d", 887 txq->axq_qnum, 888 (caddr_t)bf->bf_daddr, bf->bf_desc, 889 bf->bf_lastds, 890 txq->axq_depth); 891 892 } else { 893 *txq->axq_link = bf->bf_daddr; 894 DPRINTF(sc, ATH_DEBUG_XMIT, 895 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 896 txq->axq_qnum, txq->axq_link, 897 (caddr_t)bf->bf_daddr, bf->bf_desc, 898 txq->axq_depth); 899 ATH_KTR(sc, ATH_KTR_TX, 5, 900 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 901 "lastds=%d", 902 txq->axq_qnum, txq->axq_link, 903 (caddr_t)bf->bf_daddr, bf->bf_desc, 904 bf->bf_lastds); 905 906 } 907#endif /* IEEE80211_SUPPORT_TDMA */ 908 if (bf->bf_state.bfs_aggr) 909 txq->axq_aggr_depth++; 910 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 911 ath_hal_txstart(ah, txq->axq_qnum); 912 ATH_KTR(sc, ATH_KTR_TX, 1, 913 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 914 } 915} 916 917/* 918 * Restart TX DMA for the given TXQ. 919 * 920 * This must be called whether the queue is empty or not. 921 */ 922static void 923ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 924{ 925 struct ath_hal *ah = sc->sc_ah; 926 struct ath_buf *bf, *bf_last; 927
|
927 ATH_TXQ_LOCK_ASSERT(txq);
| 928 ATH_TX_LOCK_ASSERT(sc);
|
928 929 /* This is always going to be cleared, empty or not */ 930 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 931 932 /* XXX make this ATH_TXQ_FIRST */ 933 bf = TAILQ_FIRST(&txq->axq_q); 934 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 935 936 if (bf == NULL) 937 return; 938 939 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 940 ath_hal_gettxdesclinkptr(ah, bf_last->bf_lastds, &txq->axq_link); 941 ath_hal_txstart(ah, txq->axq_qnum); 942} 943 944/* 945 * Hand off a packet to the hardware (or mcast queue.) 946 * 947 * The relevant hardware txq should be locked. 948 */ 949static void 950ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 951 struct ath_buf *bf) 952{
| 929 930 /* This is always going to be cleared, empty or not */ 931 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 932 933 /* XXX make this ATH_TXQ_FIRST */ 934 bf = TAILQ_FIRST(&txq->axq_q); 935 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 936 937 if (bf == NULL) 938 return; 939 940 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 941 ath_hal_gettxdesclinkptr(ah, bf_last->bf_lastds, &txq->axq_link); 942 ath_hal_txstart(ah, txq->axq_qnum); 943} 944 945/* 946 * Hand off a packet to the hardware (or mcast queue.) 947 * 948 * The relevant hardware txq should be locked. 949 */ 950static void 951ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 952 struct ath_buf *bf) 953{
|
953 ATH_TXQ_LOCK_ASSERT(txq);
| 954 ATH_TX_LOCK_ASSERT(sc);
|
954 955#ifdef ATH_DEBUG_ALQ 956 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 957 ath_tx_alq_post(sc, bf); 958#endif 959 960 if (txq->axq_qnum == ATH_TXQ_SWQ) 961 ath_tx_handoff_mcast(sc, txq, bf); 962 else 963 ath_tx_handoff_hw(sc, txq, bf); 964} 965 966static int 967ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 968 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 969 int *keyix) 970{ 971 DPRINTF(sc, ATH_DEBUG_XMIT, 972 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 973 __func__, 974 *hdrlen, 975 *pktlen, 976 isfrag, 977 iswep, 978 m0); 979 980 if (iswep) { 981 const struct ieee80211_cipher *cip; 982 struct ieee80211_key *k; 983 984 /* 985 * Construct the 802.11 header+trailer for an encrypted 986 * frame. The only reason this can fail is because of an 987 * unknown or unsupported cipher/key type. 988 */ 989 k = ieee80211_crypto_encap(ni, m0); 990 if (k == NULL) { 991 /* 992 * This can happen when the key is yanked after the 993 * frame was queued. Just discard the frame; the 994 * 802.11 layer counts failures and provides 995 * debugging/diagnostics. 996 */ 997 return (0); 998 } 999 /* 1000 * Adjust the packet + header lengths for the crypto 1001 * additions and calculate the h/w key index. When 1002 * a s/w mic is done the frame will have had any mic 1003 * added to it prior to entry so m0->m_pkthdr.len will 1004 * account for it. Otherwise we need to add it to the 1005 * packet length. 1006 */ 1007 cip = k->wk_cipher; 1008 (*hdrlen) += cip->ic_header; 1009 (*pktlen) += cip->ic_header + cip->ic_trailer; 1010 /* NB: frags always have any TKIP MIC done in s/w */ 1011 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1012 (*pktlen) += cip->ic_miclen; 1013 (*keyix) = k->wk_keyix; 1014 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1015 /* 1016 * Use station key cache slot, if assigned. 1017 */ 1018 (*keyix) = ni->ni_ucastkey.wk_keyix; 1019 if ((*keyix) == IEEE80211_KEYIX_NONE) 1020 (*keyix) = HAL_TXKEYIX_INVALID; 1021 } else 1022 (*keyix) = HAL_TXKEYIX_INVALID; 1023 1024 return (1); 1025} 1026 1027/* 1028 * Calculate whether interoperability protection is required for 1029 * this frame. 1030 * 1031 * This requires the rate control information be filled in, 1032 * as the protection requirement depends upon the current 1033 * operating mode / PHY. 1034 */ 1035static void 1036ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1037{ 1038 struct ieee80211_frame *wh; 1039 uint8_t rix; 1040 uint16_t flags; 1041 int shortPreamble; 1042 const HAL_RATE_TABLE *rt = sc->sc_currates; 1043 struct ifnet *ifp = sc->sc_ifp; 1044 struct ieee80211com *ic = ifp->if_l2com; 1045 1046 flags = bf->bf_state.bfs_txflags; 1047 rix = bf->bf_state.bfs_rc[0].rix; 1048 shortPreamble = bf->bf_state.bfs_shpream; 1049 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1050 1051 /* 1052 * If 802.11g protection is enabled, determine whether 1053 * to use RTS/CTS or just CTS. Note that this is only 1054 * done for OFDM unicast frames. 1055 */ 1056 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1057 rt->info[rix].phy == IEEE80211_T_OFDM && 1058 (flags & HAL_TXDESC_NOACK) == 0) { 1059 bf->bf_state.bfs_doprot = 1; 1060 /* XXX fragments must use CCK rates w/ protection */ 1061 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1062 flags |= HAL_TXDESC_RTSENA; 1063 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1064 flags |= HAL_TXDESC_CTSENA; 1065 } 1066 /* 1067 * For frags it would be desirable to use the 1068 * highest CCK rate for RTS/CTS. But stations 1069 * farther away may detect it at a lower CCK rate 1070 * so use the configured protection rate instead 1071 * (for now). 1072 */ 1073 sc->sc_stats.ast_tx_protect++; 1074 } 1075 1076 /* 1077 * If 11n protection is enabled and it's a HT frame, 1078 * enable RTS. 1079 * 1080 * XXX ic_htprotmode or ic_curhtprotmode? 1081 * XXX should it_htprotmode only matter if ic_curhtprotmode 1082 * XXX indicates it's not a HT pure environment? 1083 */ 1084 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1085 rt->info[rix].phy == IEEE80211_T_HT && 1086 (flags & HAL_TXDESC_NOACK) == 0) { 1087 flags |= HAL_TXDESC_RTSENA; 1088 sc->sc_stats.ast_tx_htprotect++; 1089 } 1090 bf->bf_state.bfs_txflags = flags; 1091} 1092 1093/* 1094 * Update the frame duration given the currently selected rate. 1095 * 1096 * This also updates the frame duration value, so it will require 1097 * a DMA flush. 1098 */ 1099static void 1100ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1101{ 1102 struct ieee80211_frame *wh; 1103 uint8_t rix; 1104 uint16_t flags; 1105 int shortPreamble; 1106 struct ath_hal *ah = sc->sc_ah; 1107 const HAL_RATE_TABLE *rt = sc->sc_currates; 1108 int isfrag = bf->bf_m->m_flags & M_FRAG; 1109 1110 flags = bf->bf_state.bfs_txflags; 1111 rix = bf->bf_state.bfs_rc[0].rix; 1112 shortPreamble = bf->bf_state.bfs_shpream; 1113 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1114 1115 /* 1116 * Calculate duration. This logically belongs in the 802.11 1117 * layer but it lacks sufficient information to calculate it. 1118 */ 1119 if ((flags & HAL_TXDESC_NOACK) == 0 && 1120 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1121 u_int16_t dur; 1122 if (shortPreamble) 1123 dur = rt->info[rix].spAckDuration; 1124 else 1125 dur = rt->info[rix].lpAckDuration; 1126 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1127 dur += dur; /* additional SIFS+ACK */ 1128 KASSERT(bf->bf_m->m_nextpkt != NULL, ("no fragment")); 1129 /* 1130 * Include the size of next fragment so NAV is 1131 * updated properly. The last fragment uses only 1132 * the ACK duration 1133 * 1134 * XXX TODO: ensure that the rate lookup for each 1135 * fragment is the same as the rate used by the 1136 * first fragment! 1137 */ 1138 dur += ath_hal_computetxtime(ah, rt, 1139 bf->bf_m->m_nextpkt->m_pkthdr.len, 1140 rix, shortPreamble); 1141 } 1142 if (isfrag) { 1143 /* 1144 * Force hardware to use computed duration for next 1145 * fragment by disabling multi-rate retry which updates 1146 * duration based on the multi-rate duration table. 1147 */ 1148 bf->bf_state.bfs_ismrr = 0; 1149 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1150 /* XXX update bfs_rc[0].try? */ 1151 } 1152 1153 /* Update the duration field itself */ 1154 *(u_int16_t *)wh->i_dur = htole16(dur); 1155 } 1156} 1157 1158static uint8_t 1159ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1160 int cix, int shortPreamble) 1161{ 1162 uint8_t ctsrate; 1163 1164 /* 1165 * CTS transmit rate is derived from the transmit rate 1166 * by looking in the h/w rate table. We must also factor 1167 * in whether or not a short preamble is to be used. 1168 */ 1169 /* NB: cix is set above where RTS/CTS is enabled */ 1170 KASSERT(cix != 0xff, ("cix not setup")); 1171 ctsrate = rt->info[cix].rateCode; 1172 1173 /* XXX this should only matter for legacy rates */ 1174 if (shortPreamble) 1175 ctsrate |= rt->info[cix].shortPreamble; 1176 1177 return (ctsrate); 1178} 1179 1180/* 1181 * Calculate the RTS/CTS duration for legacy frames. 1182 */ 1183static int 1184ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1185 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1186 int flags) 1187{ 1188 int ctsduration = 0; 1189 1190 /* This mustn't be called for HT modes */ 1191 if (rt->info[cix].phy == IEEE80211_T_HT) { 1192 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1193 __func__, rt->info[cix].rateCode); 1194 return (-1); 1195 } 1196 1197 /* 1198 * Compute the transmit duration based on the frame 1199 * size and the size of an ACK frame. We call into the 1200 * HAL to do the computation since it depends on the 1201 * characteristics of the actual PHY being used. 1202 * 1203 * NB: CTS is assumed the same size as an ACK so we can 1204 * use the precalculated ACK durations. 1205 */ 1206 if (shortPreamble) { 1207 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1208 ctsduration += rt->info[cix].spAckDuration; 1209 ctsduration += ath_hal_computetxtime(ah, 1210 rt, pktlen, rix, AH_TRUE); 1211 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1212 ctsduration += rt->info[rix].spAckDuration; 1213 } else { 1214 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1215 ctsduration += rt->info[cix].lpAckDuration; 1216 ctsduration += ath_hal_computetxtime(ah, 1217 rt, pktlen, rix, AH_FALSE); 1218 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1219 ctsduration += rt->info[rix].lpAckDuration; 1220 } 1221 1222 return (ctsduration); 1223} 1224 1225/* 1226 * Update the given ath_buf with updated rts/cts setup and duration 1227 * values. 1228 * 1229 * To support rate lookups for each software retry, the rts/cts rate 1230 * and cts duration must be re-calculated. 1231 * 1232 * This function assumes the RTS/CTS flags have been set as needed; 1233 * mrr has been disabled; and the rate control lookup has been done. 1234 * 1235 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1236 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1237 */ 1238static void 1239ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1240{ 1241 uint16_t ctsduration = 0; 1242 uint8_t ctsrate = 0; 1243 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1244 uint8_t cix = 0; 1245 const HAL_RATE_TABLE *rt = sc->sc_currates; 1246 1247 /* 1248 * No RTS/CTS enabled? Don't bother. 1249 */ 1250 if ((bf->bf_state.bfs_txflags & 1251 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1252 /* XXX is this really needed? */ 1253 bf->bf_state.bfs_ctsrate = 0; 1254 bf->bf_state.bfs_ctsduration = 0; 1255 return; 1256 } 1257 1258 /* 1259 * If protection is enabled, use the protection rix control 1260 * rate. Otherwise use the rate0 control rate. 1261 */ 1262 if (bf->bf_state.bfs_doprot) 1263 rix = sc->sc_protrix; 1264 else 1265 rix = bf->bf_state.bfs_rc[0].rix; 1266 1267 /* 1268 * If the raw path has hard-coded ctsrate0 to something, 1269 * use it. 1270 */ 1271 if (bf->bf_state.bfs_ctsrate0 != 0) 1272 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1273 else 1274 /* Control rate from above */ 1275 cix = rt->info[rix].controlRate; 1276 1277 /* Calculate the rtscts rate for the given cix */ 1278 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1279 bf->bf_state.bfs_shpream); 1280 1281 /* The 11n chipsets do ctsduration calculations for you */ 1282 if (! ath_tx_is_11n(sc)) 1283 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1284 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1285 rt, bf->bf_state.bfs_txflags); 1286 1287 /* Squirrel away in ath_buf */ 1288 bf->bf_state.bfs_ctsrate = ctsrate; 1289 bf->bf_state.bfs_ctsduration = ctsduration; 1290 1291 /* 1292 * Must disable multi-rate retry when using RTS/CTS. 1293 */ 1294 if (!sc->sc_mrrprot) { 1295 bf->bf_state.bfs_ismrr = 0; 1296 bf->bf_state.bfs_try0 = 1297 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1298 } 1299} 1300 1301/* 1302 * Setup the descriptor chain for a normal or fast-frame 1303 * frame. 1304 * 1305 * XXX TODO: extend to include the destination hardware QCU ID. 1306 * Make sure that is correct. Make sure that when being added 1307 * to the mcastq, the CABQ QCUID is set or things will get a bit 1308 * odd. 1309 */ 1310static void 1311ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1312{ 1313 struct ath_desc *ds = bf->bf_desc; 1314 struct ath_hal *ah = sc->sc_ah; 1315 1316 if (bf->bf_state.bfs_txrate0 == 0) 1317 device_printf(sc->sc_dev, "%s: bf=%p, txrate0=%d\n", 1318 __func__, bf, 0); 1319 1320 ath_hal_setuptxdesc(ah, ds 1321 , bf->bf_state.bfs_pktlen /* packet length */ 1322 , bf->bf_state.bfs_hdrlen /* header length */ 1323 , bf->bf_state.bfs_atype /* Atheros packet type */ 1324 , bf->bf_state.bfs_txpower /* txpower */ 1325 , bf->bf_state.bfs_txrate0 1326 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1327 , bf->bf_state.bfs_keyix /* key cache index */ 1328 , bf->bf_state.bfs_txantenna /* antenna mode */ 1329 , bf->bf_state.bfs_txflags /* flags */ 1330 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1331 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1332 ); 1333 1334 /* 1335 * This will be overriden when the descriptor chain is written. 1336 */ 1337 bf->bf_lastds = ds; 1338 bf->bf_last = bf; 1339 1340 /* Set rate control and descriptor chain for this frame */ 1341 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1342 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1343} 1344 1345/* 1346 * Do a rate lookup. 1347 * 1348 * This performs a rate lookup for the given ath_buf only if it's required. 1349 * Non-data frames and raw frames don't require it. 1350 * 1351 * This populates the primary and MRR entries; MRR values are 1352 * then disabled later on if something requires it (eg RTS/CTS on 1353 * pre-11n chipsets. 1354 * 1355 * This needs to be done before the RTS/CTS fields are calculated 1356 * as they may depend upon the rate chosen. 1357 */ 1358static void 1359ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1360{ 1361 uint8_t rate, rix; 1362 int try0; 1363 1364 if (! bf->bf_state.bfs_doratelookup) 1365 return; 1366 1367 /* Get rid of any previous state */ 1368 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1369 1370 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1371 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1372 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1373 1374 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1375 bf->bf_state.bfs_rc[0].rix = rix; 1376 bf->bf_state.bfs_rc[0].ratecode = rate; 1377 bf->bf_state.bfs_rc[0].tries = try0; 1378 1379 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1380 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1381 bf->bf_state.bfs_rc); 1382 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1383 1384 sc->sc_txrix = rix; /* for LED blinking */ 1385 sc->sc_lastdatarix = rix; /* for fast frames */ 1386 bf->bf_state.bfs_try0 = try0; 1387 bf->bf_state.bfs_txrate0 = rate; 1388} 1389 1390/* 1391 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1392 */ 1393static void 1394ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1395 struct ath_buf *bf) 1396{ 1397
| 955 956#ifdef ATH_DEBUG_ALQ 957 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 958 ath_tx_alq_post(sc, bf); 959#endif 960 961 if (txq->axq_qnum == ATH_TXQ_SWQ) 962 ath_tx_handoff_mcast(sc, txq, bf); 963 else 964 ath_tx_handoff_hw(sc, txq, bf); 965} 966 967static int 968ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 969 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 970 int *keyix) 971{ 972 DPRINTF(sc, ATH_DEBUG_XMIT, 973 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 974 __func__, 975 *hdrlen, 976 *pktlen, 977 isfrag, 978 iswep, 979 m0); 980 981 if (iswep) { 982 const struct ieee80211_cipher *cip; 983 struct ieee80211_key *k; 984 985 /* 986 * Construct the 802.11 header+trailer for an encrypted 987 * frame. The only reason this can fail is because of an 988 * unknown or unsupported cipher/key type. 989 */ 990 k = ieee80211_crypto_encap(ni, m0); 991 if (k == NULL) { 992 /* 993 * This can happen when the key is yanked after the 994 * frame was queued. Just discard the frame; the 995 * 802.11 layer counts failures and provides 996 * debugging/diagnostics. 997 */ 998 return (0); 999 } 1000 /* 1001 * Adjust the packet + header lengths for the crypto 1002 * additions and calculate the h/w key index. When 1003 * a s/w mic is done the frame will have had any mic 1004 * added to it prior to entry so m0->m_pkthdr.len will 1005 * account for it. Otherwise we need to add it to the 1006 * packet length. 1007 */ 1008 cip = k->wk_cipher; 1009 (*hdrlen) += cip->ic_header; 1010 (*pktlen) += cip->ic_header + cip->ic_trailer; 1011 /* NB: frags always have any TKIP MIC done in s/w */ 1012 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1013 (*pktlen) += cip->ic_miclen; 1014 (*keyix) = k->wk_keyix; 1015 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1016 /* 1017 * Use station key cache slot, if assigned. 1018 */ 1019 (*keyix) = ni->ni_ucastkey.wk_keyix; 1020 if ((*keyix) == IEEE80211_KEYIX_NONE) 1021 (*keyix) = HAL_TXKEYIX_INVALID; 1022 } else 1023 (*keyix) = HAL_TXKEYIX_INVALID; 1024 1025 return (1); 1026} 1027 1028/* 1029 * Calculate whether interoperability protection is required for 1030 * this frame. 1031 * 1032 * This requires the rate control information be filled in, 1033 * as the protection requirement depends upon the current 1034 * operating mode / PHY. 1035 */ 1036static void 1037ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1038{ 1039 struct ieee80211_frame *wh; 1040 uint8_t rix; 1041 uint16_t flags; 1042 int shortPreamble; 1043 const HAL_RATE_TABLE *rt = sc->sc_currates; 1044 struct ifnet *ifp = sc->sc_ifp; 1045 struct ieee80211com *ic = ifp->if_l2com; 1046 1047 flags = bf->bf_state.bfs_txflags; 1048 rix = bf->bf_state.bfs_rc[0].rix; 1049 shortPreamble = bf->bf_state.bfs_shpream; 1050 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1051 1052 /* 1053 * If 802.11g protection is enabled, determine whether 1054 * to use RTS/CTS or just CTS. Note that this is only 1055 * done for OFDM unicast frames. 1056 */ 1057 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1058 rt->info[rix].phy == IEEE80211_T_OFDM && 1059 (flags & HAL_TXDESC_NOACK) == 0) { 1060 bf->bf_state.bfs_doprot = 1; 1061 /* XXX fragments must use CCK rates w/ protection */ 1062 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1063 flags |= HAL_TXDESC_RTSENA; 1064 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1065 flags |= HAL_TXDESC_CTSENA; 1066 } 1067 /* 1068 * For frags it would be desirable to use the 1069 * highest CCK rate for RTS/CTS. But stations 1070 * farther away may detect it at a lower CCK rate 1071 * so use the configured protection rate instead 1072 * (for now). 1073 */ 1074 sc->sc_stats.ast_tx_protect++; 1075 } 1076 1077 /* 1078 * If 11n protection is enabled and it's a HT frame, 1079 * enable RTS. 1080 * 1081 * XXX ic_htprotmode or ic_curhtprotmode? 1082 * XXX should it_htprotmode only matter if ic_curhtprotmode 1083 * XXX indicates it's not a HT pure environment? 1084 */ 1085 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1086 rt->info[rix].phy == IEEE80211_T_HT && 1087 (flags & HAL_TXDESC_NOACK) == 0) { 1088 flags |= HAL_TXDESC_RTSENA; 1089 sc->sc_stats.ast_tx_htprotect++; 1090 } 1091 bf->bf_state.bfs_txflags = flags; 1092} 1093 1094/* 1095 * Update the frame duration given the currently selected rate. 1096 * 1097 * This also updates the frame duration value, so it will require 1098 * a DMA flush. 1099 */ 1100static void 1101ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1102{ 1103 struct ieee80211_frame *wh; 1104 uint8_t rix; 1105 uint16_t flags; 1106 int shortPreamble; 1107 struct ath_hal *ah = sc->sc_ah; 1108 const HAL_RATE_TABLE *rt = sc->sc_currates; 1109 int isfrag = bf->bf_m->m_flags & M_FRAG; 1110 1111 flags = bf->bf_state.bfs_txflags; 1112 rix = bf->bf_state.bfs_rc[0].rix; 1113 shortPreamble = bf->bf_state.bfs_shpream; 1114 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1115 1116 /* 1117 * Calculate duration. This logically belongs in the 802.11 1118 * layer but it lacks sufficient information to calculate it. 1119 */ 1120 if ((flags & HAL_TXDESC_NOACK) == 0 && 1121 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1122 u_int16_t dur; 1123 if (shortPreamble) 1124 dur = rt->info[rix].spAckDuration; 1125 else 1126 dur = rt->info[rix].lpAckDuration; 1127 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1128 dur += dur; /* additional SIFS+ACK */ 1129 KASSERT(bf->bf_m->m_nextpkt != NULL, ("no fragment")); 1130 /* 1131 * Include the size of next fragment so NAV is 1132 * updated properly. The last fragment uses only 1133 * the ACK duration 1134 * 1135 * XXX TODO: ensure that the rate lookup for each 1136 * fragment is the same as the rate used by the 1137 * first fragment! 1138 */ 1139 dur += ath_hal_computetxtime(ah, rt, 1140 bf->bf_m->m_nextpkt->m_pkthdr.len, 1141 rix, shortPreamble); 1142 } 1143 if (isfrag) { 1144 /* 1145 * Force hardware to use computed duration for next 1146 * fragment by disabling multi-rate retry which updates 1147 * duration based on the multi-rate duration table. 1148 */ 1149 bf->bf_state.bfs_ismrr = 0; 1150 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1151 /* XXX update bfs_rc[0].try? */ 1152 } 1153 1154 /* Update the duration field itself */ 1155 *(u_int16_t *)wh->i_dur = htole16(dur); 1156 } 1157} 1158 1159static uint8_t 1160ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1161 int cix, int shortPreamble) 1162{ 1163 uint8_t ctsrate; 1164 1165 /* 1166 * CTS transmit rate is derived from the transmit rate 1167 * by looking in the h/w rate table. We must also factor 1168 * in whether or not a short preamble is to be used. 1169 */ 1170 /* NB: cix is set above where RTS/CTS is enabled */ 1171 KASSERT(cix != 0xff, ("cix not setup")); 1172 ctsrate = rt->info[cix].rateCode; 1173 1174 /* XXX this should only matter for legacy rates */ 1175 if (shortPreamble) 1176 ctsrate |= rt->info[cix].shortPreamble; 1177 1178 return (ctsrate); 1179} 1180 1181/* 1182 * Calculate the RTS/CTS duration for legacy frames. 1183 */ 1184static int 1185ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1186 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1187 int flags) 1188{ 1189 int ctsduration = 0; 1190 1191 /* This mustn't be called for HT modes */ 1192 if (rt->info[cix].phy == IEEE80211_T_HT) { 1193 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1194 __func__, rt->info[cix].rateCode); 1195 return (-1); 1196 } 1197 1198 /* 1199 * Compute the transmit duration based on the frame 1200 * size and the size of an ACK frame. We call into the 1201 * HAL to do the computation since it depends on the 1202 * characteristics of the actual PHY being used. 1203 * 1204 * NB: CTS is assumed the same size as an ACK so we can 1205 * use the precalculated ACK durations. 1206 */ 1207 if (shortPreamble) { 1208 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1209 ctsduration += rt->info[cix].spAckDuration; 1210 ctsduration += ath_hal_computetxtime(ah, 1211 rt, pktlen, rix, AH_TRUE); 1212 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1213 ctsduration += rt->info[rix].spAckDuration; 1214 } else { 1215 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1216 ctsduration += rt->info[cix].lpAckDuration; 1217 ctsduration += ath_hal_computetxtime(ah, 1218 rt, pktlen, rix, AH_FALSE); 1219 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1220 ctsduration += rt->info[rix].lpAckDuration; 1221 } 1222 1223 return (ctsduration); 1224} 1225 1226/* 1227 * Update the given ath_buf with updated rts/cts setup and duration 1228 * values. 1229 * 1230 * To support rate lookups for each software retry, the rts/cts rate 1231 * and cts duration must be re-calculated. 1232 * 1233 * This function assumes the RTS/CTS flags have been set as needed; 1234 * mrr has been disabled; and the rate control lookup has been done. 1235 * 1236 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1237 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1238 */ 1239static void 1240ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1241{ 1242 uint16_t ctsduration = 0; 1243 uint8_t ctsrate = 0; 1244 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1245 uint8_t cix = 0; 1246 const HAL_RATE_TABLE *rt = sc->sc_currates; 1247 1248 /* 1249 * No RTS/CTS enabled? Don't bother. 1250 */ 1251 if ((bf->bf_state.bfs_txflags & 1252 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1253 /* XXX is this really needed? */ 1254 bf->bf_state.bfs_ctsrate = 0; 1255 bf->bf_state.bfs_ctsduration = 0; 1256 return; 1257 } 1258 1259 /* 1260 * If protection is enabled, use the protection rix control 1261 * rate. Otherwise use the rate0 control rate. 1262 */ 1263 if (bf->bf_state.bfs_doprot) 1264 rix = sc->sc_protrix; 1265 else 1266 rix = bf->bf_state.bfs_rc[0].rix; 1267 1268 /* 1269 * If the raw path has hard-coded ctsrate0 to something, 1270 * use it. 1271 */ 1272 if (bf->bf_state.bfs_ctsrate0 != 0) 1273 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1274 else 1275 /* Control rate from above */ 1276 cix = rt->info[rix].controlRate; 1277 1278 /* Calculate the rtscts rate for the given cix */ 1279 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1280 bf->bf_state.bfs_shpream); 1281 1282 /* The 11n chipsets do ctsduration calculations for you */ 1283 if (! ath_tx_is_11n(sc)) 1284 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1285 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1286 rt, bf->bf_state.bfs_txflags); 1287 1288 /* Squirrel away in ath_buf */ 1289 bf->bf_state.bfs_ctsrate = ctsrate; 1290 bf->bf_state.bfs_ctsduration = ctsduration; 1291 1292 /* 1293 * Must disable multi-rate retry when using RTS/CTS. 1294 */ 1295 if (!sc->sc_mrrprot) { 1296 bf->bf_state.bfs_ismrr = 0; 1297 bf->bf_state.bfs_try0 = 1298 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1299 } 1300} 1301 1302/* 1303 * Setup the descriptor chain for a normal or fast-frame 1304 * frame. 1305 * 1306 * XXX TODO: extend to include the destination hardware QCU ID. 1307 * Make sure that is correct. Make sure that when being added 1308 * to the mcastq, the CABQ QCUID is set or things will get a bit 1309 * odd. 1310 */ 1311static void 1312ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1313{ 1314 struct ath_desc *ds = bf->bf_desc; 1315 struct ath_hal *ah = sc->sc_ah; 1316 1317 if (bf->bf_state.bfs_txrate0 == 0) 1318 device_printf(sc->sc_dev, "%s: bf=%p, txrate0=%d\n", 1319 __func__, bf, 0); 1320 1321 ath_hal_setuptxdesc(ah, ds 1322 , bf->bf_state.bfs_pktlen /* packet length */ 1323 , bf->bf_state.bfs_hdrlen /* header length */ 1324 , bf->bf_state.bfs_atype /* Atheros packet type */ 1325 , bf->bf_state.bfs_txpower /* txpower */ 1326 , bf->bf_state.bfs_txrate0 1327 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1328 , bf->bf_state.bfs_keyix /* key cache index */ 1329 , bf->bf_state.bfs_txantenna /* antenna mode */ 1330 , bf->bf_state.bfs_txflags /* flags */ 1331 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1332 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1333 ); 1334 1335 /* 1336 * This will be overriden when the descriptor chain is written. 1337 */ 1338 bf->bf_lastds = ds; 1339 bf->bf_last = bf; 1340 1341 /* Set rate control and descriptor chain for this frame */ 1342 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1343 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1344} 1345 1346/* 1347 * Do a rate lookup. 1348 * 1349 * This performs a rate lookup for the given ath_buf only if it's required. 1350 * Non-data frames and raw frames don't require it. 1351 * 1352 * This populates the primary and MRR entries; MRR values are 1353 * then disabled later on if something requires it (eg RTS/CTS on 1354 * pre-11n chipsets. 1355 * 1356 * This needs to be done before the RTS/CTS fields are calculated 1357 * as they may depend upon the rate chosen. 1358 */ 1359static void 1360ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1361{ 1362 uint8_t rate, rix; 1363 int try0; 1364 1365 if (! bf->bf_state.bfs_doratelookup) 1366 return; 1367 1368 /* Get rid of any previous state */ 1369 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1370 1371 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1372 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1373 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1374 1375 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1376 bf->bf_state.bfs_rc[0].rix = rix; 1377 bf->bf_state.bfs_rc[0].ratecode = rate; 1378 bf->bf_state.bfs_rc[0].tries = try0; 1379 1380 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1381 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1382 bf->bf_state.bfs_rc); 1383 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1384 1385 sc->sc_txrix = rix; /* for LED blinking */ 1386 sc->sc_lastdatarix = rix; /* for fast frames */ 1387 bf->bf_state.bfs_try0 = try0; 1388 bf->bf_state.bfs_txrate0 = rate; 1389} 1390 1391/* 1392 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1393 */ 1394static void 1395ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1396 struct ath_buf *bf) 1397{ 1398
|
1398 ATH_TID_LOCK_ASSERT(sc, tid);
| 1399 ATH_TX_LOCK_ASSERT(sc);
|
1399 1400 if (tid->clrdmask == 1) { 1401 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1402 tid->clrdmask = 0; 1403 } 1404} 1405 1406/* 1407 * Transmit the given frame to the hardware. 1408 * 1409 * The frame must already be setup; rate control must already have 1410 * been done. 1411 * 1412 * XXX since the TXQ lock is being held here (and I dislike holding 1413 * it for this long when not doing software aggregation), later on 1414 * break this function into "setup_normal" and "xmit_normal". The 1415 * lock only needs to be held for the ath_tx_handoff call. 1416 */ 1417static void 1418ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1419 struct ath_buf *bf) 1420{ 1421 struct ath_node *an = ATH_NODE(bf->bf_node); 1422 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1423
| 1400 1401 if (tid->clrdmask == 1) { 1402 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1403 tid->clrdmask = 0; 1404 } 1405} 1406 1407/* 1408 * Transmit the given frame to the hardware. 1409 * 1410 * The frame must already be setup; rate control must already have 1411 * been done. 1412 * 1413 * XXX since the TXQ lock is being held here (and I dislike holding 1414 * it for this long when not doing software aggregation), later on 1415 * break this function into "setup_normal" and "xmit_normal". The 1416 * lock only needs to be held for the ath_tx_handoff call. 1417 */ 1418static void 1419ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1420 struct ath_buf *bf) 1421{ 1422 struct ath_node *an = ATH_NODE(bf->bf_node); 1423 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1424
|
1424 ATH_TXQ_LOCK_ASSERT(txq);
| 1425 ATH_TX_LOCK_ASSERT(sc);
|
1425 1426 /* 1427 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1428 * set a completion handler however it doesn't (yet) properly 1429 * handle the strict ordering requirements needed for normal, 1430 * non-aggregate session frames. 1431 * 1432 * Once this is implemented, only set CLRDMASK like this for 1433 * frames that must go out - eg management/raw frames. 1434 */ 1435 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1436 1437 /* Setup the descriptor before handoff */ 1438 ath_tx_do_ratelookup(sc, bf); 1439 ath_tx_calc_duration(sc, bf); 1440 ath_tx_calc_protection(sc, bf); 1441 ath_tx_set_rtscts(sc, bf); 1442 ath_tx_rate_fill_rcflags(sc, bf); 1443 ath_tx_setds(sc, bf); 1444 1445 /* Track per-TID hardware queue depth correctly */ 1446 tid->hwq_depth++; 1447 1448 /* Assign the completion handler */ 1449 bf->bf_comp = ath_tx_normal_comp; 1450 1451 /* Hand off to hardware */ 1452 ath_tx_handoff(sc, txq, bf); 1453} 1454 1455/* 1456 * Do the basic frame setup stuff that's required before the frame 1457 * is added to a software queue. 1458 * 1459 * All frames get mostly the same treatment and it's done once. 1460 * Retransmits fiddle with things like the rate control setup, 1461 * setting the retransmit bit in the packet; doing relevant DMA/bus 1462 * syncing and relinking it (back) into the hardware TX queue. 1463 * 1464 * Note that this may cause the mbuf to be reallocated, so 1465 * m0 may not be valid. 1466 */ 1467static int 1468ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1469 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1470{ 1471 struct ieee80211vap *vap = ni->ni_vap; 1472 struct ath_hal *ah = sc->sc_ah; 1473 struct ifnet *ifp = sc->sc_ifp; 1474 struct ieee80211com *ic = ifp->if_l2com; 1475 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1476 int error, iswep, ismcast, isfrag, ismrr; 1477 int keyix, hdrlen, pktlen, try0 = 0; 1478 u_int8_t rix = 0, txrate = 0; 1479 struct ath_desc *ds; 1480 struct ieee80211_frame *wh; 1481 u_int subtype, flags; 1482 HAL_PKT_TYPE atype; 1483 const HAL_RATE_TABLE *rt; 1484 HAL_BOOL shortPreamble; 1485 struct ath_node *an; 1486 u_int pri; 1487 1488 /* 1489 * To ensure that both sequence numbers and the CCMP PN handling 1490 * is "correct", make sure that the relevant TID queue is locked. 1491 * Otherwise the CCMP PN and seqno may appear out of order, causing 1492 * re-ordered frames to have out of order CCMP PN's, resulting 1493 * in many, many frame drops. 1494 */
| 1426 1427 /* 1428 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1429 * set a completion handler however it doesn't (yet) properly 1430 * handle the strict ordering requirements needed for normal, 1431 * non-aggregate session frames. 1432 * 1433 * Once this is implemented, only set CLRDMASK like this for 1434 * frames that must go out - eg management/raw frames. 1435 */ 1436 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1437 1438 /* Setup the descriptor before handoff */ 1439 ath_tx_do_ratelookup(sc, bf); 1440 ath_tx_calc_duration(sc, bf); 1441 ath_tx_calc_protection(sc, bf); 1442 ath_tx_set_rtscts(sc, bf); 1443 ath_tx_rate_fill_rcflags(sc, bf); 1444 ath_tx_setds(sc, bf); 1445 1446 /* Track per-TID hardware queue depth correctly */ 1447 tid->hwq_depth++; 1448 1449 /* Assign the completion handler */ 1450 bf->bf_comp = ath_tx_normal_comp; 1451 1452 /* Hand off to hardware */ 1453 ath_tx_handoff(sc, txq, bf); 1454} 1455 1456/* 1457 * Do the basic frame setup stuff that's required before the frame 1458 * is added to a software queue. 1459 * 1460 * All frames get mostly the same treatment and it's done once. 1461 * Retransmits fiddle with things like the rate control setup, 1462 * setting the retransmit bit in the packet; doing relevant DMA/bus 1463 * syncing and relinking it (back) into the hardware TX queue. 1464 * 1465 * Note that this may cause the mbuf to be reallocated, so 1466 * m0 may not be valid. 1467 */ 1468static int 1469ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1470 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1471{ 1472 struct ieee80211vap *vap = ni->ni_vap; 1473 struct ath_hal *ah = sc->sc_ah; 1474 struct ifnet *ifp = sc->sc_ifp; 1475 struct ieee80211com *ic = ifp->if_l2com; 1476 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1477 int error, iswep, ismcast, isfrag, ismrr; 1478 int keyix, hdrlen, pktlen, try0 = 0; 1479 u_int8_t rix = 0, txrate = 0; 1480 struct ath_desc *ds; 1481 struct ieee80211_frame *wh; 1482 u_int subtype, flags; 1483 HAL_PKT_TYPE atype; 1484 const HAL_RATE_TABLE *rt; 1485 HAL_BOOL shortPreamble; 1486 struct ath_node *an; 1487 u_int pri; 1488 1489 /* 1490 * To ensure that both sequence numbers and the CCMP PN handling 1491 * is "correct", make sure that the relevant TID queue is locked. 1492 * Otherwise the CCMP PN and seqno may appear out of order, causing 1493 * re-ordered frames to have out of order CCMP PN's, resulting 1494 * in many, many frame drops. 1495 */
|
1495 ATH_TXQ_LOCK_ASSERT(txq);
| 1496 ATH_TX_LOCK_ASSERT(sc);
|
1496 1497 wh = mtod(m0, struct ieee80211_frame *); 1498 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 1499 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1500 isfrag = m0->m_flags & M_FRAG; 1501 hdrlen = ieee80211_anyhdrsize(wh); 1502 /* 1503 * Packet length must not include any 1504 * pad bytes; deduct them here. 1505 */ 1506 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1507 1508 /* Handle encryption twiddling if needed */ 1509 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1510 &pktlen, &keyix)) { 1511 ath_freetx(m0); 1512 return EIO; 1513 } 1514 1515 /* packet header may have moved, reset our local pointer */ 1516 wh = mtod(m0, struct ieee80211_frame *); 1517 1518 pktlen += IEEE80211_CRC_LEN; 1519 1520 /* 1521 * Load the DMA map so any coalescing is done. This 1522 * also calculates the number of descriptors we need. 1523 */ 1524 error = ath_tx_dmasetup(sc, bf, m0); 1525 if (error != 0) 1526 return error; 1527 bf->bf_node = ni; /* NB: held reference */ 1528 m0 = bf->bf_m; /* NB: may have changed */ 1529 wh = mtod(m0, struct ieee80211_frame *); 1530 1531 /* setup descriptors */ 1532 ds = bf->bf_desc; 1533 rt = sc->sc_currates; 1534 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1535 1536 /* 1537 * NB: the 802.11 layer marks whether or not we should 1538 * use short preamble based on the current mode and 1539 * negotiated parameters. 1540 */ 1541 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1542 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1543 shortPreamble = AH_TRUE; 1544 sc->sc_stats.ast_tx_shortpre++; 1545 } else { 1546 shortPreamble = AH_FALSE; 1547 } 1548 1549 an = ATH_NODE(ni); 1550 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1551 flags = 0; 1552 ismrr = 0; /* default no multi-rate retry*/ 1553 pri = M_WME_GETAC(m0); /* honor classification */ 1554 /* XXX use txparams instead of fixed values */ 1555 /* 1556 * Calculate Atheros packet type from IEEE80211 packet header, 1557 * setup for rate calculations, and select h/w transmit queue. 1558 */ 1559 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1560 case IEEE80211_FC0_TYPE_MGT: 1561 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1562 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1563 atype = HAL_PKT_TYPE_BEACON; 1564 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1565 atype = HAL_PKT_TYPE_PROBE_RESP; 1566 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1567 atype = HAL_PKT_TYPE_ATIM; 1568 else 1569 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1570 rix = an->an_mgmtrix; 1571 txrate = rt->info[rix].rateCode; 1572 if (shortPreamble) 1573 txrate |= rt->info[rix].shortPreamble; 1574 try0 = ATH_TXMGTTRY; 1575 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1576 break; 1577 case IEEE80211_FC0_TYPE_CTL: 1578 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1579 rix = an->an_mgmtrix; 1580 txrate = rt->info[rix].rateCode; 1581 if (shortPreamble) 1582 txrate |= rt->info[rix].shortPreamble; 1583 try0 = ATH_TXMGTTRY; 1584 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1585 break; 1586 case IEEE80211_FC0_TYPE_DATA: 1587 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1588 /* 1589 * Data frames: multicast frames go out at a fixed rate, 1590 * EAPOL frames use the mgmt frame rate; otherwise consult 1591 * the rate control module for the rate to use. 1592 */ 1593 if (ismcast) { 1594 rix = an->an_mcastrix; 1595 txrate = rt->info[rix].rateCode; 1596 if (shortPreamble) 1597 txrate |= rt->info[rix].shortPreamble; 1598 try0 = 1; 1599 } else if (m0->m_flags & M_EAPOL) { 1600 /* XXX? maybe always use long preamble? */ 1601 rix = an->an_mgmtrix; 1602 txrate = rt->info[rix].rateCode; 1603 if (shortPreamble) 1604 txrate |= rt->info[rix].shortPreamble; 1605 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1606 } else { 1607 /* 1608 * Do rate lookup on each TX, rather than using 1609 * the hard-coded TX information decided here. 1610 */ 1611 ismrr = 1; 1612 bf->bf_state.bfs_doratelookup = 1; 1613 } 1614 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1615 flags |= HAL_TXDESC_NOACK; 1616 break; 1617 default: 1618 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1619 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1620 /* XXX statistic */ 1621 ath_freetx(m0); 1622 return EIO; 1623 } 1624 1625 /* 1626 * There are two known scenarios where the frame AC doesn't match 1627 * what the destination TXQ is. 1628 * 1629 * + non-QoS frames (eg management?) that the net80211 stack has 1630 * assigned a higher AC to, but since it's a non-QoS TID, it's 1631 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1632 * It's quite possible that management frames should just be 1633 * direct dispatched to hardware rather than go via the software 1634 * queue; that should be investigated in the future. There are 1635 * some specific scenarios where this doesn't make sense, mostly 1636 * surrounding ADDBA request/response - hence why that is special 1637 * cased. 1638 * 1639 * + Multicast frames going into the VAP mcast queue. That shows up 1640 * as "TXQ 11". 1641 * 1642 * This driver should eventually support separate TID and TXQ locking, 1643 * allowing for arbitrary AC frames to appear on arbitrary software 1644 * queues, being queued to the "correct" hardware queue when needed. 1645 */ 1646#if 0 1647 if (txq != sc->sc_ac2q[pri]) { 1648 device_printf(sc->sc_dev, 1649 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1650 __func__, 1651 txq, 1652 txq->axq_qnum, 1653 pri, 1654 sc->sc_ac2q[pri], 1655 sc->sc_ac2q[pri]->axq_qnum); 1656 } 1657#endif 1658 1659 /* 1660 * Calculate miscellaneous flags. 1661 */ 1662 if (ismcast) { 1663 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1664 } else if (pktlen > vap->iv_rtsthreshold && 1665 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1666 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1667 sc->sc_stats.ast_tx_rts++; 1668 } 1669 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1670 sc->sc_stats.ast_tx_noack++; 1671#ifdef IEEE80211_SUPPORT_TDMA 1672 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1673 DPRINTF(sc, ATH_DEBUG_TDMA, 1674 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1675 sc->sc_stats.ast_tdma_ack++; 1676 ath_freetx(m0); 1677 return EIO; 1678 } 1679#endif 1680 1681 /* 1682 * Determine if a tx interrupt should be generated for 1683 * this descriptor. We take a tx interrupt to reap 1684 * descriptors when the h/w hits an EOL condition or 1685 * when the descriptor is specifically marked to generate 1686 * an interrupt. We periodically mark descriptors in this 1687 * way to insure timely replenishing of the supply needed 1688 * for sending frames. Defering interrupts reduces system 1689 * load and potentially allows more concurrent work to be 1690 * done but if done to aggressively can cause senders to 1691 * backup. 1692 * 1693 * NB: use >= to deal with sc_txintrperiod changing 1694 * dynamically through sysctl. 1695 */ 1696 if (flags & HAL_TXDESC_INTREQ) { 1697 txq->axq_intrcnt = 0; 1698 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1699 flags |= HAL_TXDESC_INTREQ; 1700 txq->axq_intrcnt = 0; 1701 } 1702 1703 /* This point forward is actual TX bits */ 1704 1705 /* 1706 * At this point we are committed to sending the frame 1707 * and we don't need to look at m_nextpkt; clear it in 1708 * case this frame is part of frag chain. 1709 */ 1710 m0->m_nextpkt = NULL; 1711 1712 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1713 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1714 sc->sc_hwmap[rix].ieeerate, -1); 1715 1716 if (ieee80211_radiotap_active_vap(vap)) { 1717 u_int64_t tsf = ath_hal_gettsf64(ah); 1718 1719 sc->sc_tx_th.wt_tsf = htole64(tsf); 1720 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1721 if (iswep) 1722 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1723 if (isfrag) 1724 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1725 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1726 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 1727 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1728 1729 ieee80211_radiotap_tx(vap, m0); 1730 } 1731 1732 /* Blank the legacy rate array */ 1733 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1734 1735 /* 1736 * ath_buf_set_rate needs at least one rate/try to setup 1737 * the rate scenario. 1738 */ 1739 bf->bf_state.bfs_rc[0].rix = rix; 1740 bf->bf_state.bfs_rc[0].tries = try0; 1741 bf->bf_state.bfs_rc[0].ratecode = txrate; 1742 1743 /* Store the decided rate index values away */ 1744 bf->bf_state.bfs_pktlen = pktlen; 1745 bf->bf_state.bfs_hdrlen = hdrlen; 1746 bf->bf_state.bfs_atype = atype; 1747 bf->bf_state.bfs_txpower = ni->ni_txpower; 1748 bf->bf_state.bfs_txrate0 = txrate; 1749 bf->bf_state.bfs_try0 = try0; 1750 bf->bf_state.bfs_keyix = keyix; 1751 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1752 bf->bf_state.bfs_txflags = flags; 1753 bf->bf_state.bfs_shpream = shortPreamble; 1754 1755 /* XXX this should be done in ath_tx_setrate() */ 1756 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1757 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1758 bf->bf_state.bfs_ctsduration = 0; 1759 bf->bf_state.bfs_ismrr = ismrr; 1760 1761 return 0; 1762} 1763 1764/* 1765 * Queue a frame to the hardware or software queue. 1766 * 1767 * This can be called by the net80211 code. 1768 * 1769 * XXX what about locking? Or, push the seqno assign into the 1770 * XXX aggregate scheduler so its serialised? 1771 * 1772 * XXX When sending management frames via ath_raw_xmit(), 1773 * should CLRDMASK be set unconditionally? 1774 */ 1775int 1776ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1777 struct ath_buf *bf, struct mbuf *m0) 1778{ 1779 struct ieee80211vap *vap = ni->ni_vap; 1780 struct ath_vap *avp = ATH_VAP(vap); 1781 int r = 0; 1782 u_int pri; 1783 int tid; 1784 struct ath_txq *txq; 1785 int ismcast; 1786 const struct ieee80211_frame *wh; 1787 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1788 ieee80211_seq seqno; 1789 uint8_t type, subtype; 1790
| 1497 1498 wh = mtod(m0, struct ieee80211_frame *); 1499 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 1500 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1501 isfrag = m0->m_flags & M_FRAG; 1502 hdrlen = ieee80211_anyhdrsize(wh); 1503 /* 1504 * Packet length must not include any 1505 * pad bytes; deduct them here. 1506 */ 1507 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1508 1509 /* Handle encryption twiddling if needed */ 1510 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1511 &pktlen, &keyix)) { 1512 ath_freetx(m0); 1513 return EIO; 1514 } 1515 1516 /* packet header may have moved, reset our local pointer */ 1517 wh = mtod(m0, struct ieee80211_frame *); 1518 1519 pktlen += IEEE80211_CRC_LEN; 1520 1521 /* 1522 * Load the DMA map so any coalescing is done. This 1523 * also calculates the number of descriptors we need. 1524 */ 1525 error = ath_tx_dmasetup(sc, bf, m0); 1526 if (error != 0) 1527 return error; 1528 bf->bf_node = ni; /* NB: held reference */ 1529 m0 = bf->bf_m; /* NB: may have changed */ 1530 wh = mtod(m0, struct ieee80211_frame *); 1531 1532 /* setup descriptors */ 1533 ds = bf->bf_desc; 1534 rt = sc->sc_currates; 1535 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1536 1537 /* 1538 * NB: the 802.11 layer marks whether or not we should 1539 * use short preamble based on the current mode and 1540 * negotiated parameters. 1541 */ 1542 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1543 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1544 shortPreamble = AH_TRUE; 1545 sc->sc_stats.ast_tx_shortpre++; 1546 } else { 1547 shortPreamble = AH_FALSE; 1548 } 1549 1550 an = ATH_NODE(ni); 1551 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1552 flags = 0; 1553 ismrr = 0; /* default no multi-rate retry*/ 1554 pri = M_WME_GETAC(m0); /* honor classification */ 1555 /* XXX use txparams instead of fixed values */ 1556 /* 1557 * Calculate Atheros packet type from IEEE80211 packet header, 1558 * setup for rate calculations, and select h/w transmit queue. 1559 */ 1560 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1561 case IEEE80211_FC0_TYPE_MGT: 1562 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1563 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1564 atype = HAL_PKT_TYPE_BEACON; 1565 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1566 atype = HAL_PKT_TYPE_PROBE_RESP; 1567 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1568 atype = HAL_PKT_TYPE_ATIM; 1569 else 1570 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1571 rix = an->an_mgmtrix; 1572 txrate = rt->info[rix].rateCode; 1573 if (shortPreamble) 1574 txrate |= rt->info[rix].shortPreamble; 1575 try0 = ATH_TXMGTTRY; 1576 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1577 break; 1578 case IEEE80211_FC0_TYPE_CTL: 1579 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1580 rix = an->an_mgmtrix; 1581 txrate = rt->info[rix].rateCode; 1582 if (shortPreamble) 1583 txrate |= rt->info[rix].shortPreamble; 1584 try0 = ATH_TXMGTTRY; 1585 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1586 break; 1587 case IEEE80211_FC0_TYPE_DATA: 1588 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1589 /* 1590 * Data frames: multicast frames go out at a fixed rate, 1591 * EAPOL frames use the mgmt frame rate; otherwise consult 1592 * the rate control module for the rate to use. 1593 */ 1594 if (ismcast) { 1595 rix = an->an_mcastrix; 1596 txrate = rt->info[rix].rateCode; 1597 if (shortPreamble) 1598 txrate |= rt->info[rix].shortPreamble; 1599 try0 = 1; 1600 } else if (m0->m_flags & M_EAPOL) { 1601 /* XXX? maybe always use long preamble? */ 1602 rix = an->an_mgmtrix; 1603 txrate = rt->info[rix].rateCode; 1604 if (shortPreamble) 1605 txrate |= rt->info[rix].shortPreamble; 1606 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1607 } else { 1608 /* 1609 * Do rate lookup on each TX, rather than using 1610 * the hard-coded TX information decided here. 1611 */ 1612 ismrr = 1; 1613 bf->bf_state.bfs_doratelookup = 1; 1614 } 1615 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1616 flags |= HAL_TXDESC_NOACK; 1617 break; 1618 default: 1619 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1620 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1621 /* XXX statistic */ 1622 ath_freetx(m0); 1623 return EIO; 1624 } 1625 1626 /* 1627 * There are two known scenarios where the frame AC doesn't match 1628 * what the destination TXQ is. 1629 * 1630 * + non-QoS frames (eg management?) that the net80211 stack has 1631 * assigned a higher AC to, but since it's a non-QoS TID, it's 1632 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1633 * It's quite possible that management frames should just be 1634 * direct dispatched to hardware rather than go via the software 1635 * queue; that should be investigated in the future. There are 1636 * some specific scenarios where this doesn't make sense, mostly 1637 * surrounding ADDBA request/response - hence why that is special 1638 * cased. 1639 * 1640 * + Multicast frames going into the VAP mcast queue. That shows up 1641 * as "TXQ 11". 1642 * 1643 * This driver should eventually support separate TID and TXQ locking, 1644 * allowing for arbitrary AC frames to appear on arbitrary software 1645 * queues, being queued to the "correct" hardware queue when needed. 1646 */ 1647#if 0 1648 if (txq != sc->sc_ac2q[pri]) { 1649 device_printf(sc->sc_dev, 1650 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1651 __func__, 1652 txq, 1653 txq->axq_qnum, 1654 pri, 1655 sc->sc_ac2q[pri], 1656 sc->sc_ac2q[pri]->axq_qnum); 1657 } 1658#endif 1659 1660 /* 1661 * Calculate miscellaneous flags. 1662 */ 1663 if (ismcast) { 1664 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1665 } else if (pktlen > vap->iv_rtsthreshold && 1666 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1667 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1668 sc->sc_stats.ast_tx_rts++; 1669 } 1670 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1671 sc->sc_stats.ast_tx_noack++; 1672#ifdef IEEE80211_SUPPORT_TDMA 1673 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1674 DPRINTF(sc, ATH_DEBUG_TDMA, 1675 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1676 sc->sc_stats.ast_tdma_ack++; 1677 ath_freetx(m0); 1678 return EIO; 1679 } 1680#endif 1681 1682 /* 1683 * Determine if a tx interrupt should be generated for 1684 * this descriptor. We take a tx interrupt to reap 1685 * descriptors when the h/w hits an EOL condition or 1686 * when the descriptor is specifically marked to generate 1687 * an interrupt. We periodically mark descriptors in this 1688 * way to insure timely replenishing of the supply needed 1689 * for sending frames. Defering interrupts reduces system 1690 * load and potentially allows more concurrent work to be 1691 * done but if done to aggressively can cause senders to 1692 * backup. 1693 * 1694 * NB: use >= to deal with sc_txintrperiod changing 1695 * dynamically through sysctl. 1696 */ 1697 if (flags & HAL_TXDESC_INTREQ) { 1698 txq->axq_intrcnt = 0; 1699 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1700 flags |= HAL_TXDESC_INTREQ; 1701 txq->axq_intrcnt = 0; 1702 } 1703 1704 /* This point forward is actual TX bits */ 1705 1706 /* 1707 * At this point we are committed to sending the frame 1708 * and we don't need to look at m_nextpkt; clear it in 1709 * case this frame is part of frag chain. 1710 */ 1711 m0->m_nextpkt = NULL; 1712 1713 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1714 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1715 sc->sc_hwmap[rix].ieeerate, -1); 1716 1717 if (ieee80211_radiotap_active_vap(vap)) { 1718 u_int64_t tsf = ath_hal_gettsf64(ah); 1719 1720 sc->sc_tx_th.wt_tsf = htole64(tsf); 1721 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1722 if (iswep) 1723 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1724 if (isfrag) 1725 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1726 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1727 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 1728 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1729 1730 ieee80211_radiotap_tx(vap, m0); 1731 } 1732 1733 /* Blank the legacy rate array */ 1734 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1735 1736 /* 1737 * ath_buf_set_rate needs at least one rate/try to setup 1738 * the rate scenario. 1739 */ 1740 bf->bf_state.bfs_rc[0].rix = rix; 1741 bf->bf_state.bfs_rc[0].tries = try0; 1742 bf->bf_state.bfs_rc[0].ratecode = txrate; 1743 1744 /* Store the decided rate index values away */ 1745 bf->bf_state.bfs_pktlen = pktlen; 1746 bf->bf_state.bfs_hdrlen = hdrlen; 1747 bf->bf_state.bfs_atype = atype; 1748 bf->bf_state.bfs_txpower = ni->ni_txpower; 1749 bf->bf_state.bfs_txrate0 = txrate; 1750 bf->bf_state.bfs_try0 = try0; 1751 bf->bf_state.bfs_keyix = keyix; 1752 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1753 bf->bf_state.bfs_txflags = flags; 1754 bf->bf_state.bfs_shpream = shortPreamble; 1755 1756 /* XXX this should be done in ath_tx_setrate() */ 1757 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1758 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1759 bf->bf_state.bfs_ctsduration = 0; 1760 bf->bf_state.bfs_ismrr = ismrr; 1761 1762 return 0; 1763} 1764 1765/* 1766 * Queue a frame to the hardware or software queue. 1767 * 1768 * This can be called by the net80211 code. 1769 * 1770 * XXX what about locking? Or, push the seqno assign into the 1771 * XXX aggregate scheduler so its serialised? 1772 * 1773 * XXX When sending management frames via ath_raw_xmit(), 1774 * should CLRDMASK be set unconditionally? 1775 */ 1776int 1777ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1778 struct ath_buf *bf, struct mbuf *m0) 1779{ 1780 struct ieee80211vap *vap = ni->ni_vap; 1781 struct ath_vap *avp = ATH_VAP(vap); 1782 int r = 0; 1783 u_int pri; 1784 int tid; 1785 struct ath_txq *txq; 1786 int ismcast; 1787 const struct ieee80211_frame *wh; 1788 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1789 ieee80211_seq seqno; 1790 uint8_t type, subtype; 1791
|
| 1792 ATH_TX_LOCK_ASSERT(sc); 1793
|
1791 /* 1792 * Determine the target hardware queue. 1793 * 1794 * For multicast frames, the txq gets overridden appropriately 1795 * depending upon the state of PS. 1796 * 1797 * For any other frame, we do a TID/QoS lookup inside the frame 1798 * to see what the TID should be. If it's a non-QoS frame, the 1799 * AC and TID are overridden. The TID/TXQ code assumes the 1800 * TID is on a predictable hardware TXQ, so we don't support 1801 * having a node TID queued to multiple hardware TXQs. 1802 * This may change in the future but would require some locking 1803 * fudgery. 1804 */ 1805 pri = ath_tx_getac(sc, m0); 1806 tid = ath_tx_gettid(sc, m0); 1807 1808 txq = sc->sc_ac2q[pri]; 1809 wh = mtod(m0, struct ieee80211_frame *); 1810 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1811 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1812 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1813 1814 /* 1815 * Enforce how deep the multicast queue can grow. 1816 * 1817 * XXX duplicated in ath_raw_xmit(). 1818 */ 1819 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
| 1794 /* 1795 * Determine the target hardware queue. 1796 * 1797 * For multicast frames, the txq gets overridden appropriately 1798 * depending upon the state of PS. 1799 * 1800 * For any other frame, we do a TID/QoS lookup inside the frame 1801 * to see what the TID should be. If it's a non-QoS frame, the 1802 * AC and TID are overridden. The TID/TXQ code assumes the 1803 * TID is on a predictable hardware TXQ, so we don't support 1804 * having a node TID queued to multiple hardware TXQs. 1805 * This may change in the future but would require some locking 1806 * fudgery. 1807 */ 1808 pri = ath_tx_getac(sc, m0); 1809 tid = ath_tx_gettid(sc, m0); 1810 1811 txq = sc->sc_ac2q[pri]; 1812 wh = mtod(m0, struct ieee80211_frame *); 1813 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1814 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1815 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1816 1817 /* 1818 * Enforce how deep the multicast queue can grow. 1819 * 1820 * XXX duplicated in ath_raw_xmit(). 1821 */ 1822 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
|
1820 ATH_TXQ_LOCK(sc->sc_cabq); 1821
| |
1822 if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { 1823 sc->sc_stats.ast_tx_mcastq_overflow++; 1824 r = ENOBUFS; 1825 }
| 1823 if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { 1824 sc->sc_stats.ast_tx_mcastq_overflow++; 1825 r = ENOBUFS; 1826 }
|
1826 1827 ATH_TXQ_UNLOCK(sc->sc_cabq); 1828
| |
1829 if (r != 0) { 1830 m_freem(m0); 1831 return r; 1832 } 1833 } 1834 1835 /* A-MPDU TX */ 1836 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1837 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1838 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1839 1840 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1841 __func__, tid, pri, is_ampdu); 1842 1843 /* Set local packet state, used to queue packets to hardware */ 1844 bf->bf_state.bfs_tid = tid; 1845 bf->bf_state.bfs_txq = txq; 1846 bf->bf_state.bfs_pri = pri; 1847 1848 /* 1849 * When servicing one or more stations in power-save mode 1850 * (or) if there is some mcast data waiting on the mcast 1851 * queue (to prevent out of order delivery) multicast frames 1852 * must be bufferd until after the beacon. 1853 * 1854 * TODO: we should lock the mcastq before we check the length. 1855 */ 1856 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1857 txq = &avp->av_mcastq; 1858 /* 1859 * Mark the frame as eventually belonging on the CAB 1860 * queue, so the descriptor setup functions will 1861 * correctly initialise the descriptor 'qcuId' field. 1862 */ 1863 bf->bf_state.bfs_txq = sc->sc_cabq; 1864 } 1865 1866 /* Do the generic frame setup */ 1867 /* XXX should just bzero the bf_state? */ 1868 bf->bf_state.bfs_dobaw = 0; 1869
| 1827 if (r != 0) { 1828 m_freem(m0); 1829 return r; 1830 } 1831 } 1832 1833 /* A-MPDU TX */ 1834 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1835 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1836 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1837 1838 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1839 __func__, tid, pri, is_ampdu); 1840 1841 /* Set local packet state, used to queue packets to hardware */ 1842 bf->bf_state.bfs_tid = tid; 1843 bf->bf_state.bfs_txq = txq; 1844 bf->bf_state.bfs_pri = pri; 1845 1846 /* 1847 * When servicing one or more stations in power-save mode 1848 * (or) if there is some mcast data waiting on the mcast 1849 * queue (to prevent out of order delivery) multicast frames 1850 * must be bufferd until after the beacon. 1851 * 1852 * TODO: we should lock the mcastq before we check the length. 1853 */ 1854 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1855 txq = &avp->av_mcastq; 1856 /* 1857 * Mark the frame as eventually belonging on the CAB 1858 * queue, so the descriptor setup functions will 1859 * correctly initialise the descriptor 'qcuId' field. 1860 */ 1861 bf->bf_state.bfs_txq = sc->sc_cabq; 1862 } 1863 1864 /* Do the generic frame setup */ 1865 /* XXX should just bzero the bf_state? */ 1866 bf->bf_state.bfs_dobaw = 0; 1867
|
1870 /* 1871 * Acquire the TXQ lock early, so both the encap and seqno 1872 * are allocated together. 1873 * 1874 * XXX should TXQ for CABQ traffic be the multicast queue, 1875 * or the TXQ the given PRI would allocate from? (eg for 1876 * sequence number allocation locking.) 1877 */ 1878 ATH_TXQ_LOCK(txq); 1879
| |
1880 /* A-MPDU TX? Manually set sequence number */ 1881 /* 1882 * Don't do it whilst pending; the net80211 layer still 1883 * assigns them. 1884 */ 1885 if (is_ampdu_tx) { 1886 /* 1887 * Always call; this function will 1888 * handle making sure that null data frames 1889 * don't get a sequence number from the current 1890 * TID and thus mess with the BAW. 1891 */ 1892 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1893 1894 /* 1895 * Don't add QoS NULL frames to the BAW. 1896 */ 1897 if (IEEE80211_QOS_HAS_SEQ(wh) && 1898 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 1899 bf->bf_state.bfs_dobaw = 1; 1900 } 1901 } 1902 1903 /* 1904 * If needed, the sequence number has been assigned. 1905 * Squirrel it away somewhere easy to get to. 1906 */ 1907 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 1908 1909 /* Is ampdu pending? fetch the seqno and print it out */ 1910 if (is_ampdu_pending) 1911 DPRINTF(sc, ATH_DEBUG_SW_TX, 1912 "%s: tid %d: ampdu pending, seqno %d\n", 1913 __func__, tid, M_SEQNO_GET(m0)); 1914 1915 /* This also sets up the DMA map */ 1916 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 1917 1918 if (r != 0) 1919 goto done; 1920 1921 /* At this point m0 could have changed! */ 1922 m0 = bf->bf_m; 1923 1924#if 1 1925 /* 1926 * If it's a multicast frame, do a direct-dispatch to the 1927 * destination hardware queue. Don't bother software 1928 * queuing it. 1929 */ 1930 /* 1931 * If it's a BAR frame, do a direct dispatch to the 1932 * destination hardware queue. Don't bother software 1933 * queuing it, as the TID will now be paused. 1934 * Sending a BAR frame can occur from the net80211 txa timer 1935 * (ie, retries) or from the ath txtask (completion call.) 1936 * It queues directly to hardware because the TID is paused 1937 * at this point (and won't be unpaused until the BAR has 1938 * either been TXed successfully or max retries has been 1939 * reached.) 1940 */ 1941 if (txq == &avp->av_mcastq) { 1942 DPRINTF(sc, ATH_DEBUG_SW_TX, 1943 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 1944 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1945 ath_tx_xmit_normal(sc, txq, bf); 1946 } else if (type == IEEE80211_FC0_TYPE_CTL && 1947 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1948 DPRINTF(sc, ATH_DEBUG_SW_TX, 1949 "%s: BAR: TX'ing direct\n", __func__); 1950 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1951 ath_tx_xmit_normal(sc, txq, bf); 1952 } else { 1953 /* add to software queue */ 1954 DPRINTF(sc, ATH_DEBUG_SW_TX, 1955 "%s: bf=%p: swq: TX'ing\n", __func__, bf); 1956 ath_tx_swq(sc, ni, txq, bf); 1957 } 1958#else 1959 /* 1960 * For now, since there's no software queue, 1961 * direct-dispatch to the hardware. 1962 */ 1963 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1964 ath_tx_xmit_normal(sc, txq, bf); 1965#endif 1966done:
| 1868 /* A-MPDU TX? Manually set sequence number */ 1869 /* 1870 * Don't do it whilst pending; the net80211 layer still 1871 * assigns them. 1872 */ 1873 if (is_ampdu_tx) { 1874 /* 1875 * Always call; this function will 1876 * handle making sure that null data frames 1877 * don't get a sequence number from the current 1878 * TID and thus mess with the BAW. 1879 */ 1880 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1881 1882 /* 1883 * Don't add QoS NULL frames to the BAW. 1884 */ 1885 if (IEEE80211_QOS_HAS_SEQ(wh) && 1886 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 1887 bf->bf_state.bfs_dobaw = 1; 1888 } 1889 } 1890 1891 /* 1892 * If needed, the sequence number has been assigned. 1893 * Squirrel it away somewhere easy to get to. 1894 */ 1895 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 1896 1897 /* Is ampdu pending? fetch the seqno and print it out */ 1898 if (is_ampdu_pending) 1899 DPRINTF(sc, ATH_DEBUG_SW_TX, 1900 "%s: tid %d: ampdu pending, seqno %d\n", 1901 __func__, tid, M_SEQNO_GET(m0)); 1902 1903 /* This also sets up the DMA map */ 1904 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 1905 1906 if (r != 0) 1907 goto done; 1908 1909 /* At this point m0 could have changed! */ 1910 m0 = bf->bf_m; 1911 1912#if 1 1913 /* 1914 * If it's a multicast frame, do a direct-dispatch to the 1915 * destination hardware queue. Don't bother software 1916 * queuing it. 1917 */ 1918 /* 1919 * If it's a BAR frame, do a direct dispatch to the 1920 * destination hardware queue. Don't bother software 1921 * queuing it, as the TID will now be paused. 1922 * Sending a BAR frame can occur from the net80211 txa timer 1923 * (ie, retries) or from the ath txtask (completion call.) 1924 * It queues directly to hardware because the TID is paused 1925 * at this point (and won't be unpaused until the BAR has 1926 * either been TXed successfully or max retries has been 1927 * reached.) 1928 */ 1929 if (txq == &avp->av_mcastq) { 1930 DPRINTF(sc, ATH_DEBUG_SW_TX, 1931 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 1932 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1933 ath_tx_xmit_normal(sc, txq, bf); 1934 } else if (type == IEEE80211_FC0_TYPE_CTL && 1935 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1936 DPRINTF(sc, ATH_DEBUG_SW_TX, 1937 "%s: BAR: TX'ing direct\n", __func__); 1938 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1939 ath_tx_xmit_normal(sc, txq, bf); 1940 } else { 1941 /* add to software queue */ 1942 DPRINTF(sc, ATH_DEBUG_SW_TX, 1943 "%s: bf=%p: swq: TX'ing\n", __func__, bf); 1944 ath_tx_swq(sc, ni, txq, bf); 1945 } 1946#else 1947 /* 1948 * For now, since there's no software queue, 1949 * direct-dispatch to the hardware. 1950 */ 1951 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1952 ath_tx_xmit_normal(sc, txq, bf); 1953#endif 1954done:
|
1967 ATH_TXQ_UNLOCK(txq); 1968
| |
1969 return 0; 1970} 1971 1972static int 1973ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 1974 struct ath_buf *bf, struct mbuf *m0, 1975 const struct ieee80211_bpf_params *params) 1976{ 1977 struct ifnet *ifp = sc->sc_ifp; 1978 struct ieee80211com *ic = ifp->if_l2com; 1979 struct ath_hal *ah = sc->sc_ah; 1980 struct ieee80211vap *vap = ni->ni_vap; 1981 int error, ismcast, ismrr; 1982 int keyix, hdrlen, pktlen, try0, txantenna; 1983 u_int8_t rix, txrate; 1984 struct ieee80211_frame *wh; 1985 u_int flags; 1986 HAL_PKT_TYPE atype; 1987 const HAL_RATE_TABLE *rt; 1988 struct ath_desc *ds; 1989 u_int pri; 1990 int o_tid = -1; 1991 int do_override; 1992
| 1955 return 0; 1956} 1957 1958static int 1959ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 1960 struct ath_buf *bf, struct mbuf *m0, 1961 const struct ieee80211_bpf_params *params) 1962{ 1963 struct ifnet *ifp = sc->sc_ifp; 1964 struct ieee80211com *ic = ifp->if_l2com; 1965 struct ath_hal *ah = sc->sc_ah; 1966 struct ieee80211vap *vap = ni->ni_vap; 1967 int error, ismcast, ismrr; 1968 int keyix, hdrlen, pktlen, try0, txantenna; 1969 u_int8_t rix, txrate; 1970 struct ieee80211_frame *wh; 1971 u_int flags; 1972 HAL_PKT_TYPE atype; 1973 const HAL_RATE_TABLE *rt; 1974 struct ath_desc *ds; 1975 u_int pri; 1976 int o_tid = -1; 1977 int do_override; 1978
|
| 1979 ATH_TX_LOCK_ASSERT(sc); 1980
|
1993 wh = mtod(m0, struct ieee80211_frame *); 1994 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1995 hdrlen = ieee80211_anyhdrsize(wh); 1996 /* 1997 * Packet length must not include any 1998 * pad bytes; deduct them here. 1999 */ 2000 /* XXX honor IEEE80211_BPF_DATAPAD */ 2001 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2002 2003 ATH_KTR(sc, ATH_KTR_TX, 2, 2004 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2005 2006 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2007 __func__, ismcast); 2008 2009 pri = params->ibp_pri & 3; 2010 /* Override pri if the frame isn't a QoS one */ 2011 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2012 pri = ath_tx_getac(sc, m0); 2013 2014 /* XXX If it's an ADDBA, override the correct queue */ 2015 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2016 2017 /* Map ADDBA to the correct priority */ 2018 if (do_override) { 2019#if 0 2020 device_printf(sc->sc_dev, 2021 "%s: overriding tid %d pri %d -> %d\n", 2022 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2023#endif 2024 pri = TID_TO_WME_AC(o_tid); 2025 } 2026
| 1981 wh = mtod(m0, struct ieee80211_frame *); 1982 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1983 hdrlen = ieee80211_anyhdrsize(wh); 1984 /* 1985 * Packet length must not include any 1986 * pad bytes; deduct them here. 1987 */ 1988 /* XXX honor IEEE80211_BPF_DATAPAD */ 1989 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 1990 1991 ATH_KTR(sc, ATH_KTR_TX, 2, 1992 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 1993 1994 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 1995 __func__, ismcast); 1996 1997 pri = params->ibp_pri & 3; 1998 /* Override pri if the frame isn't a QoS one */ 1999 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2000 pri = ath_tx_getac(sc, m0); 2001 2002 /* XXX If it's an ADDBA, override the correct queue */ 2003 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2004 2005 /* Map ADDBA to the correct priority */ 2006 if (do_override) { 2007#if 0 2008 device_printf(sc->sc_dev, 2009 "%s: overriding tid %d pri %d -> %d\n", 2010 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2011#endif 2012 pri = TID_TO_WME_AC(o_tid); 2013 } 2014
|
2027 ATH_TXQ_LOCK(sc->sc_ac2q[pri]); 2028
| |
2029 /* Handle encryption twiddling if needed */ 2030 if (! ath_tx_tag_crypto(sc, ni, 2031 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2032 &hdrlen, &pktlen, &keyix)) { 2033 ath_freetx(m0); 2034 return EIO; 2035 } 2036 /* packet header may have moved, reset our local pointer */ 2037 wh = mtod(m0, struct ieee80211_frame *); 2038 2039 /* Do the generic frame setup */ 2040 /* XXX should just bzero the bf_state? */ 2041 bf->bf_state.bfs_dobaw = 0; 2042 2043 error = ath_tx_dmasetup(sc, bf, m0); 2044 if (error != 0) 2045 return error; 2046 m0 = bf->bf_m; /* NB: may have changed */ 2047 wh = mtod(m0, struct ieee80211_frame *); 2048 bf->bf_node = ni; /* NB: held reference */ 2049 2050 /* Always enable CLRDMASK for raw frames for now.. */ 2051 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2052 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2053 if (params->ibp_flags & IEEE80211_BPF_RTS) 2054 flags |= HAL_TXDESC_RTSENA; 2055 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2056 /* XXX assume 11g/11n protection? */ 2057 bf->bf_state.bfs_doprot = 1; 2058 flags |= HAL_TXDESC_CTSENA; 2059 } 2060 /* XXX leave ismcast to injector? */ 2061 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2062 flags |= HAL_TXDESC_NOACK; 2063 2064 rt = sc->sc_currates; 2065 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2066 rix = ath_tx_findrix(sc, params->ibp_rate0); 2067 txrate = rt->info[rix].rateCode; 2068 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2069 txrate |= rt->info[rix].shortPreamble; 2070 sc->sc_txrix = rix; 2071 try0 = params->ibp_try0; 2072 ismrr = (params->ibp_try1 != 0); 2073 txantenna = params->ibp_pri >> 2; 2074 if (txantenna == 0) /* XXX? */ 2075 txantenna = sc->sc_txantenna; 2076 2077 /* 2078 * Since ctsrate is fixed, store it away for later 2079 * use when the descriptor fields are being set. 2080 */ 2081 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2082 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2083 2084 /* 2085 * NB: we mark all packets as type PSPOLL so the h/w won't 2086 * set the sequence number, duration, etc. 2087 */ 2088 atype = HAL_PKT_TYPE_PSPOLL; 2089 2090 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2091 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2092 sc->sc_hwmap[rix].ieeerate, -1); 2093 2094 if (ieee80211_radiotap_active_vap(vap)) { 2095 u_int64_t tsf = ath_hal_gettsf64(ah); 2096 2097 sc->sc_tx_th.wt_tsf = htole64(tsf); 2098 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2099 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2100 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2101 if (m0->m_flags & M_FRAG) 2102 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2103 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2104 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 2105 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2106 2107 ieee80211_radiotap_tx(vap, m0); 2108 } 2109 2110 /* 2111 * Formulate first tx descriptor with tx controls. 2112 */ 2113 ds = bf->bf_desc; 2114 /* XXX check return value? */ 2115 2116 /* Store the decided rate index values away */ 2117 bf->bf_state.bfs_pktlen = pktlen; 2118 bf->bf_state.bfs_hdrlen = hdrlen; 2119 bf->bf_state.bfs_atype = atype; 2120 bf->bf_state.bfs_txpower = params->ibp_power; 2121 bf->bf_state.bfs_txrate0 = txrate; 2122 bf->bf_state.bfs_try0 = try0; 2123 bf->bf_state.bfs_keyix = keyix; 2124 bf->bf_state.bfs_txantenna = txantenna; 2125 bf->bf_state.bfs_txflags = flags; 2126 bf->bf_state.bfs_shpream = 2127 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2128 2129 /* Set local packet state, used to queue packets to hardware */ 2130 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2131 bf->bf_state.bfs_txq = sc->sc_ac2q[pri]; 2132 bf->bf_state.bfs_pri = pri; 2133 2134 /* XXX this should be done in ath_tx_setrate() */ 2135 bf->bf_state.bfs_ctsrate = 0; 2136 bf->bf_state.bfs_ctsduration = 0; 2137 bf->bf_state.bfs_ismrr = ismrr; 2138 2139 /* Blank the legacy rate array */ 2140 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2141 2142 bf->bf_state.bfs_rc[0].rix = 2143 ath_tx_findrix(sc, params->ibp_rate0); 2144 bf->bf_state.bfs_rc[0].tries = try0; 2145 bf->bf_state.bfs_rc[0].ratecode = txrate; 2146 2147 if (ismrr) { 2148 int rix; 2149 2150 rix = ath_tx_findrix(sc, params->ibp_rate1); 2151 bf->bf_state.bfs_rc[1].rix = rix; 2152 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2153 2154 rix = ath_tx_findrix(sc, params->ibp_rate2); 2155 bf->bf_state.bfs_rc[2].rix = rix; 2156 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2157 2158 rix = ath_tx_findrix(sc, params->ibp_rate3); 2159 bf->bf_state.bfs_rc[3].rix = rix; 2160 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2161 } 2162 /* 2163 * All the required rate control decisions have been made; 2164 * fill in the rc flags. 2165 */ 2166 ath_tx_rate_fill_rcflags(sc, bf); 2167 2168 /* NB: no buffered multicast in power save support */ 2169 2170 /* 2171 * If we're overiding the ADDBA destination, dump directly 2172 * into the hardware queue, right after any pending 2173 * frames to that node are. 2174 */ 2175 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2176 __func__, do_override); 2177 2178#if 1 2179 if (do_override) { 2180 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2181 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2182 } else { 2183 /* Queue to software queue */ 2184 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf); 2185 } 2186#else 2187 /* Direct-dispatch to the hardware */ 2188 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2189 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2190#endif
| 2015 /* Handle encryption twiddling if needed */ 2016 if (! ath_tx_tag_crypto(sc, ni, 2017 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2018 &hdrlen, &pktlen, &keyix)) { 2019 ath_freetx(m0); 2020 return EIO; 2021 } 2022 /* packet header may have moved, reset our local pointer */ 2023 wh = mtod(m0, struct ieee80211_frame *); 2024 2025 /* Do the generic frame setup */ 2026 /* XXX should just bzero the bf_state? */ 2027 bf->bf_state.bfs_dobaw = 0; 2028 2029 error = ath_tx_dmasetup(sc, bf, m0); 2030 if (error != 0) 2031 return error; 2032 m0 = bf->bf_m; /* NB: may have changed */ 2033 wh = mtod(m0, struct ieee80211_frame *); 2034 bf->bf_node = ni; /* NB: held reference */ 2035 2036 /* Always enable CLRDMASK for raw frames for now.. */ 2037 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2038 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2039 if (params->ibp_flags & IEEE80211_BPF_RTS) 2040 flags |= HAL_TXDESC_RTSENA; 2041 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2042 /* XXX assume 11g/11n protection? */ 2043 bf->bf_state.bfs_doprot = 1; 2044 flags |= HAL_TXDESC_CTSENA; 2045 } 2046 /* XXX leave ismcast to injector? */ 2047 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2048 flags |= HAL_TXDESC_NOACK; 2049 2050 rt = sc->sc_currates; 2051 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2052 rix = ath_tx_findrix(sc, params->ibp_rate0); 2053 txrate = rt->info[rix].rateCode; 2054 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2055 txrate |= rt->info[rix].shortPreamble; 2056 sc->sc_txrix = rix; 2057 try0 = params->ibp_try0; 2058 ismrr = (params->ibp_try1 != 0); 2059 txantenna = params->ibp_pri >> 2; 2060 if (txantenna == 0) /* XXX? */ 2061 txantenna = sc->sc_txantenna; 2062 2063 /* 2064 * Since ctsrate is fixed, store it away for later 2065 * use when the descriptor fields are being set. 2066 */ 2067 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2068 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2069 2070 /* 2071 * NB: we mark all packets as type PSPOLL so the h/w won't 2072 * set the sequence number, duration, etc. 2073 */ 2074 atype = HAL_PKT_TYPE_PSPOLL; 2075 2076 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2077 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2078 sc->sc_hwmap[rix].ieeerate, -1); 2079 2080 if (ieee80211_radiotap_active_vap(vap)) { 2081 u_int64_t tsf = ath_hal_gettsf64(ah); 2082 2083 sc->sc_tx_th.wt_tsf = htole64(tsf); 2084 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2085 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2086 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2087 if (m0->m_flags & M_FRAG) 2088 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2089 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2090 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 2091 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2092 2093 ieee80211_radiotap_tx(vap, m0); 2094 } 2095 2096 /* 2097 * Formulate first tx descriptor with tx controls. 2098 */ 2099 ds = bf->bf_desc; 2100 /* XXX check return value? */ 2101 2102 /* Store the decided rate index values away */ 2103 bf->bf_state.bfs_pktlen = pktlen; 2104 bf->bf_state.bfs_hdrlen = hdrlen; 2105 bf->bf_state.bfs_atype = atype; 2106 bf->bf_state.bfs_txpower = params->ibp_power; 2107 bf->bf_state.bfs_txrate0 = txrate; 2108 bf->bf_state.bfs_try0 = try0; 2109 bf->bf_state.bfs_keyix = keyix; 2110 bf->bf_state.bfs_txantenna = txantenna; 2111 bf->bf_state.bfs_txflags = flags; 2112 bf->bf_state.bfs_shpream = 2113 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2114 2115 /* Set local packet state, used to queue packets to hardware */ 2116 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2117 bf->bf_state.bfs_txq = sc->sc_ac2q[pri]; 2118 bf->bf_state.bfs_pri = pri; 2119 2120 /* XXX this should be done in ath_tx_setrate() */ 2121 bf->bf_state.bfs_ctsrate = 0; 2122 bf->bf_state.bfs_ctsduration = 0; 2123 bf->bf_state.bfs_ismrr = ismrr; 2124 2125 /* Blank the legacy rate array */ 2126 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2127 2128 bf->bf_state.bfs_rc[0].rix = 2129 ath_tx_findrix(sc, params->ibp_rate0); 2130 bf->bf_state.bfs_rc[0].tries = try0; 2131 bf->bf_state.bfs_rc[0].ratecode = txrate; 2132 2133 if (ismrr) { 2134 int rix; 2135 2136 rix = ath_tx_findrix(sc, params->ibp_rate1); 2137 bf->bf_state.bfs_rc[1].rix = rix; 2138 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2139 2140 rix = ath_tx_findrix(sc, params->ibp_rate2); 2141 bf->bf_state.bfs_rc[2].rix = rix; 2142 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2143 2144 rix = ath_tx_findrix(sc, params->ibp_rate3); 2145 bf->bf_state.bfs_rc[3].rix = rix; 2146 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2147 } 2148 /* 2149 * All the required rate control decisions have been made; 2150 * fill in the rc flags. 2151 */ 2152 ath_tx_rate_fill_rcflags(sc, bf); 2153 2154 /* NB: no buffered multicast in power save support */ 2155 2156 /* 2157 * If we're overiding the ADDBA destination, dump directly 2158 * into the hardware queue, right after any pending 2159 * frames to that node are. 2160 */ 2161 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2162 __func__, do_override); 2163 2164#if 1 2165 if (do_override) { 2166 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2167 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2168 } else { 2169 /* Queue to software queue */ 2170 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf); 2171 } 2172#else 2173 /* Direct-dispatch to the hardware */ 2174 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2175 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2176#endif
|
2191 ATH_TXQ_UNLOCK(sc->sc_ac2q[pri]); 2192
| |
2193 return 0; 2194} 2195 2196/* 2197 * Send a raw frame. 2198 * 2199 * This can be called by net80211. 2200 */ 2201int 2202ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2203 const struct ieee80211_bpf_params *params) 2204{ 2205 struct ieee80211com *ic = ni->ni_ic; 2206 struct ifnet *ifp = ic->ic_ifp; 2207 struct ath_softc *sc = ifp->if_softc; 2208 struct ath_buf *bf; 2209 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2210 int error = 0; 2211 2212 ATH_PCU_LOCK(sc); 2213 if (sc->sc_inreset_cnt > 0) { 2214 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; bailing\n", 2215 __func__); 2216 error = EIO; 2217 ATH_PCU_UNLOCK(sc); 2218 goto bad0; 2219 } 2220 sc->sc_txstart_cnt++; 2221 ATH_PCU_UNLOCK(sc); 2222 2223 ATH_TX_LOCK(sc); 2224 2225 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 2226 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2227 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? 2228 "!running" : "invalid"); 2229 m_freem(m); 2230 error = ENETDOWN; 2231 goto bad; 2232 } 2233 2234 /* 2235 * Enforce how deep the multicast queue can grow. 2236 * 2237 * XXX duplicated in ath_tx_start(). 2238 */ 2239 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
| 2177 return 0; 2178} 2179 2180/* 2181 * Send a raw frame. 2182 * 2183 * This can be called by net80211. 2184 */ 2185int 2186ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2187 const struct ieee80211_bpf_params *params) 2188{ 2189 struct ieee80211com *ic = ni->ni_ic; 2190 struct ifnet *ifp = ic->ic_ifp; 2191 struct ath_softc *sc = ifp->if_softc; 2192 struct ath_buf *bf; 2193 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2194 int error = 0; 2195 2196 ATH_PCU_LOCK(sc); 2197 if (sc->sc_inreset_cnt > 0) { 2198 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; bailing\n", 2199 __func__); 2200 error = EIO; 2201 ATH_PCU_UNLOCK(sc); 2202 goto bad0; 2203 } 2204 sc->sc_txstart_cnt++; 2205 ATH_PCU_UNLOCK(sc); 2206 2207 ATH_TX_LOCK(sc); 2208 2209 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 2210 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2211 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? 2212 "!running" : "invalid"); 2213 m_freem(m); 2214 error = ENETDOWN; 2215 goto bad; 2216 } 2217 2218 /* 2219 * Enforce how deep the multicast queue can grow. 2220 * 2221 * XXX duplicated in ath_tx_start(). 2222 */ 2223 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
|
2240 ATH_TXQ_LOCK(sc->sc_cabq); 2241
| |
2242 if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { 2243 sc->sc_stats.ast_tx_mcastq_overflow++; 2244 error = ENOBUFS; 2245 } 2246
| 2224 if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { 2225 sc->sc_stats.ast_tx_mcastq_overflow++; 2226 error = ENOBUFS; 2227 } 2228
|
2247 ATH_TXQ_UNLOCK(sc->sc_cabq); 2248
| |
2249 if (error != 0) { 2250 m_freem(m); 2251 goto bad; 2252 } 2253 } 2254 2255 /* 2256 * Grab a TX buffer and associated resources. 2257 */ 2258 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2259 if (bf == NULL) { 2260 sc->sc_stats.ast_tx_nobuf++; 2261 m_freem(m); 2262 error = ENOBUFS; 2263 goto bad; 2264 } 2265 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2266 m, params, bf); 2267 2268 if (params == NULL) { 2269 /* 2270 * Legacy path; interpret frame contents to decide 2271 * precisely how to send the frame. 2272 */ 2273 if (ath_tx_start(sc, ni, bf, m)) { 2274 error = EIO; /* XXX */ 2275 goto bad2; 2276 } 2277 } else { 2278 /* 2279 * Caller supplied explicit parameters to use in 2280 * sending the frame. 2281 */ 2282 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2283 error = EIO; /* XXX */ 2284 goto bad2; 2285 } 2286 } 2287 sc->sc_wd_timer = 5; 2288 ifp->if_opackets++; 2289 sc->sc_stats.ast_tx_raw++; 2290 2291 /* 2292 * Update the TIM - if there's anything queued to the 2293 * software queue and power save is enabled, we should 2294 * set the TIM. 2295 */ 2296 ath_tx_update_tim(sc, ni, 1); 2297 2298 ATH_PCU_LOCK(sc); 2299 sc->sc_txstart_cnt--; 2300 ATH_PCU_UNLOCK(sc); 2301 2302 ATH_TX_UNLOCK(sc); 2303 2304 return 0; 2305bad2: 2306 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2307 "bf=%p", 2308 m, 2309 params, 2310 bf); 2311 ATH_TXBUF_LOCK(sc); 2312 ath_returnbuf_head(sc, bf); 2313 ATH_TXBUF_UNLOCK(sc); 2314bad: 2315 2316 ATH_TX_UNLOCK(sc); 2317 2318 ATH_PCU_LOCK(sc); 2319 sc->sc_txstart_cnt--; 2320 ATH_PCU_UNLOCK(sc); 2321bad0: 2322 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2323 m, params); 2324 ifp->if_oerrors++; 2325 sc->sc_stats.ast_tx_raw_fail++; 2326 ieee80211_free_node(ni); 2327 2328 return error; 2329} 2330 2331/* Some helper functions */ 2332 2333/* 2334 * ADDBA (and potentially others) need to be placed in the same 2335 * hardware queue as the TID/node it's relating to. This is so 2336 * it goes out after any pending non-aggregate frames to the 2337 * same node/TID. 2338 * 2339 * If this isn't done, the ADDBA can go out before the frames 2340 * queued in hardware. Even though these frames have a sequence 2341 * number -earlier- than the ADDBA can be transmitted (but 2342 * no frames whose sequence numbers are after the ADDBA should 2343 * be!) they'll arrive after the ADDBA - and the receiving end 2344 * will simply drop them as being out of the BAW. 2345 * 2346 * The frames can't be appended to the TID software queue - it'll 2347 * never be sent out. So these frames have to be directly 2348 * dispatched to the hardware, rather than queued in software. 2349 * So if this function returns true, the TXQ has to be 2350 * overridden and it has to be directly dispatched. 2351 * 2352 * It's a dirty hack, but someone's gotta do it. 2353 */ 2354 2355/* 2356 * XXX doesn't belong here! 2357 */ 2358static int 2359ieee80211_is_action(struct ieee80211_frame *wh) 2360{ 2361 /* Type: Management frame? */ 2362 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2363 IEEE80211_FC0_TYPE_MGT) 2364 return 0; 2365 2366 /* Subtype: Action frame? */ 2367 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2368 IEEE80211_FC0_SUBTYPE_ACTION) 2369 return 0; 2370 2371 return 1; 2372} 2373 2374#define MS(_v, _f) (((_v) & _f) >> _f##_S) 2375/* 2376 * Return an alternate TID for ADDBA request frames. 2377 * 2378 * Yes, this likely should be done in the net80211 layer. 2379 */ 2380static int 2381ath_tx_action_frame_override_queue(struct ath_softc *sc, 2382 struct ieee80211_node *ni, 2383 struct mbuf *m0, int *tid) 2384{ 2385 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2386 struct ieee80211_action_ba_addbarequest *ia; 2387 uint8_t *frm; 2388 uint16_t baparamset; 2389 2390 /* Not action frame? Bail */ 2391 if (! ieee80211_is_action(wh)) 2392 return 0; 2393 2394 /* XXX Not needed for frames we send? */ 2395#if 0 2396 /* Correct length? */ 2397 if (! ieee80211_parse_action(ni, m)) 2398 return 0; 2399#endif 2400 2401 /* Extract out action frame */ 2402 frm = (u_int8_t *)&wh[1]; 2403 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2404 2405 /* Not ADDBA? Bail */ 2406 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2407 return 0; 2408 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2409 return 0; 2410 2411 /* Extract TID, return it */ 2412 baparamset = le16toh(ia->rq_baparamset); 2413 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2414 2415 return 1; 2416} 2417#undef MS 2418 2419/* Per-node software queue operations */ 2420 2421/* 2422 * Add the current packet to the given BAW. 2423 * It is assumed that the current packet 2424 * 2425 * + fits inside the BAW; 2426 * + already has had a sequence number allocated. 2427 * 2428 * Since the BAW status may be modified by both the ath task and 2429 * the net80211/ifnet contexts, the TID must be locked. 2430 */ 2431void 2432ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2433 struct ath_tid *tid, struct ath_buf *bf) 2434{ 2435 int index, cindex; 2436 struct ieee80211_tx_ampdu *tap; 2437
| 2229 if (error != 0) { 2230 m_freem(m); 2231 goto bad; 2232 } 2233 } 2234 2235 /* 2236 * Grab a TX buffer and associated resources. 2237 */ 2238 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2239 if (bf == NULL) { 2240 sc->sc_stats.ast_tx_nobuf++; 2241 m_freem(m); 2242 error = ENOBUFS; 2243 goto bad; 2244 } 2245 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2246 m, params, bf); 2247 2248 if (params == NULL) { 2249 /* 2250 * Legacy path; interpret frame contents to decide 2251 * precisely how to send the frame. 2252 */ 2253 if (ath_tx_start(sc, ni, bf, m)) { 2254 error = EIO; /* XXX */ 2255 goto bad2; 2256 } 2257 } else { 2258 /* 2259 * Caller supplied explicit parameters to use in 2260 * sending the frame. 2261 */ 2262 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2263 error = EIO; /* XXX */ 2264 goto bad2; 2265 } 2266 } 2267 sc->sc_wd_timer = 5; 2268 ifp->if_opackets++; 2269 sc->sc_stats.ast_tx_raw++; 2270 2271 /* 2272 * Update the TIM - if there's anything queued to the 2273 * software queue and power save is enabled, we should 2274 * set the TIM. 2275 */ 2276 ath_tx_update_tim(sc, ni, 1); 2277 2278 ATH_PCU_LOCK(sc); 2279 sc->sc_txstart_cnt--; 2280 ATH_PCU_UNLOCK(sc); 2281 2282 ATH_TX_UNLOCK(sc); 2283 2284 return 0; 2285bad2: 2286 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2287 "bf=%p", 2288 m, 2289 params, 2290 bf); 2291 ATH_TXBUF_LOCK(sc); 2292 ath_returnbuf_head(sc, bf); 2293 ATH_TXBUF_UNLOCK(sc); 2294bad: 2295 2296 ATH_TX_UNLOCK(sc); 2297 2298 ATH_PCU_LOCK(sc); 2299 sc->sc_txstart_cnt--; 2300 ATH_PCU_UNLOCK(sc); 2301bad0: 2302 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2303 m, params); 2304 ifp->if_oerrors++; 2305 sc->sc_stats.ast_tx_raw_fail++; 2306 ieee80211_free_node(ni); 2307 2308 return error; 2309} 2310 2311/* Some helper functions */ 2312 2313/* 2314 * ADDBA (and potentially others) need to be placed in the same 2315 * hardware queue as the TID/node it's relating to. This is so 2316 * it goes out after any pending non-aggregate frames to the 2317 * same node/TID. 2318 * 2319 * If this isn't done, the ADDBA can go out before the frames 2320 * queued in hardware. Even though these frames have a sequence 2321 * number -earlier- than the ADDBA can be transmitted (but 2322 * no frames whose sequence numbers are after the ADDBA should 2323 * be!) they'll arrive after the ADDBA - and the receiving end 2324 * will simply drop them as being out of the BAW. 2325 * 2326 * The frames can't be appended to the TID software queue - it'll 2327 * never be sent out. So these frames have to be directly 2328 * dispatched to the hardware, rather than queued in software. 2329 * So if this function returns true, the TXQ has to be 2330 * overridden and it has to be directly dispatched. 2331 * 2332 * It's a dirty hack, but someone's gotta do it. 2333 */ 2334 2335/* 2336 * XXX doesn't belong here! 2337 */ 2338static int 2339ieee80211_is_action(struct ieee80211_frame *wh) 2340{ 2341 /* Type: Management frame? */ 2342 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2343 IEEE80211_FC0_TYPE_MGT) 2344 return 0; 2345 2346 /* Subtype: Action frame? */ 2347 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2348 IEEE80211_FC0_SUBTYPE_ACTION) 2349 return 0; 2350 2351 return 1; 2352} 2353 2354#define MS(_v, _f) (((_v) & _f) >> _f##_S) 2355/* 2356 * Return an alternate TID for ADDBA request frames. 2357 * 2358 * Yes, this likely should be done in the net80211 layer. 2359 */ 2360static int 2361ath_tx_action_frame_override_queue(struct ath_softc *sc, 2362 struct ieee80211_node *ni, 2363 struct mbuf *m0, int *tid) 2364{ 2365 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2366 struct ieee80211_action_ba_addbarequest *ia; 2367 uint8_t *frm; 2368 uint16_t baparamset; 2369 2370 /* Not action frame? Bail */ 2371 if (! ieee80211_is_action(wh)) 2372 return 0; 2373 2374 /* XXX Not needed for frames we send? */ 2375#if 0 2376 /* Correct length? */ 2377 if (! ieee80211_parse_action(ni, m)) 2378 return 0; 2379#endif 2380 2381 /* Extract out action frame */ 2382 frm = (u_int8_t *)&wh[1]; 2383 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2384 2385 /* Not ADDBA? Bail */ 2386 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2387 return 0; 2388 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2389 return 0; 2390 2391 /* Extract TID, return it */ 2392 baparamset = le16toh(ia->rq_baparamset); 2393 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2394 2395 return 1; 2396} 2397#undef MS 2398 2399/* Per-node software queue operations */ 2400 2401/* 2402 * Add the current packet to the given BAW. 2403 * It is assumed that the current packet 2404 * 2405 * + fits inside the BAW; 2406 * + already has had a sequence number allocated. 2407 * 2408 * Since the BAW status may be modified by both the ath task and 2409 * the net80211/ifnet contexts, the TID must be locked. 2410 */ 2411void 2412ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2413 struct ath_tid *tid, struct ath_buf *bf) 2414{ 2415 int index, cindex; 2416 struct ieee80211_tx_ampdu *tap; 2417
|
2438 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2439 ATH_TID_LOCK_ASSERT(sc, tid);
| 2418 ATH_TX_LOCK_ASSERT(sc);
|
2440 2441 if (bf->bf_state.bfs_isretried) 2442 return; 2443 2444 tap = ath_tx_get_tx_tid(an, tid->tid); 2445 2446 if (! bf->bf_state.bfs_dobaw) { 2447 device_printf(sc->sc_dev, 2448 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2449 __func__, 2450 SEQNO(bf->bf_state.bfs_seqno), 2451 tap->txa_start, 2452 tap->txa_wnd); 2453 } 2454 2455 if (bf->bf_state.bfs_addedbaw) 2456 device_printf(sc->sc_dev, 2457 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2458 "baw head=%d tail=%d\n", 2459 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2460 tap->txa_start, tap->txa_wnd, tid->baw_head, 2461 tid->baw_tail); 2462 2463 /* 2464 * Verify that the given sequence number is not outside of the 2465 * BAW. Complain loudly if that's the case. 2466 */ 2467 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2468 SEQNO(bf->bf_state.bfs_seqno))) { 2469 device_printf(sc->sc_dev, 2470 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2471 "baw head=%d tail=%d\n", 2472 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2473 tap->txa_start, tap->txa_wnd, tid->baw_head, 2474 tid->baw_tail); 2475 } 2476 2477 /* 2478 * ni->ni_txseqs[] is the currently allocated seqno. 2479 * the txa state contains the current baw start. 2480 */ 2481 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2482 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2483 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2484 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2485 "baw head=%d tail=%d\n", 2486 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2487 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2488 tid->baw_tail); 2489 2490 2491#if 0 2492 assert(tid->tx_buf[cindex] == NULL); 2493#endif 2494 if (tid->tx_buf[cindex] != NULL) { 2495 device_printf(sc->sc_dev, 2496 "%s: ba packet dup (index=%d, cindex=%d, " 2497 "head=%d, tail=%d)\n", 2498 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2499 device_printf(sc->sc_dev, 2500 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2501 __func__, 2502 tid->tx_buf[cindex], 2503 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2504 bf, 2505 SEQNO(bf->bf_state.bfs_seqno) 2506 ); 2507 } 2508 tid->tx_buf[cindex] = bf; 2509 2510 if (index >= ((tid->baw_tail - tid->baw_head) & 2511 (ATH_TID_MAX_BUFS - 1))) { 2512 tid->baw_tail = cindex; 2513 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2514 } 2515} 2516 2517/* 2518 * Flip the BAW buffer entry over from the existing one to the new one. 2519 * 2520 * When software retransmitting a (sub-)frame, it is entirely possible that 2521 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2522 * In that instance the buffer is cloned and the new buffer is used for 2523 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2524 * tracking array to maintain consistency. 2525 */ 2526static void 2527ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2528 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2529{ 2530 int index, cindex; 2531 struct ieee80211_tx_ampdu *tap; 2532 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2533
| 2419 2420 if (bf->bf_state.bfs_isretried) 2421 return; 2422 2423 tap = ath_tx_get_tx_tid(an, tid->tid); 2424 2425 if (! bf->bf_state.bfs_dobaw) { 2426 device_printf(sc->sc_dev, 2427 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2428 __func__, 2429 SEQNO(bf->bf_state.bfs_seqno), 2430 tap->txa_start, 2431 tap->txa_wnd); 2432 } 2433 2434 if (bf->bf_state.bfs_addedbaw) 2435 device_printf(sc->sc_dev, 2436 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2437 "baw head=%d tail=%d\n", 2438 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2439 tap->txa_start, tap->txa_wnd, tid->baw_head, 2440 tid->baw_tail); 2441 2442 /* 2443 * Verify that the given sequence number is not outside of the 2444 * BAW. Complain loudly if that's the case. 2445 */ 2446 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2447 SEQNO(bf->bf_state.bfs_seqno))) { 2448 device_printf(sc->sc_dev, 2449 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2450 "baw head=%d tail=%d\n", 2451 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2452 tap->txa_start, tap->txa_wnd, tid->baw_head, 2453 tid->baw_tail); 2454 } 2455 2456 /* 2457 * ni->ni_txseqs[] is the currently allocated seqno. 2458 * the txa state contains the current baw start. 2459 */ 2460 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2461 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2462 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2463 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2464 "baw head=%d tail=%d\n", 2465 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2466 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2467 tid->baw_tail); 2468 2469 2470#if 0 2471 assert(tid->tx_buf[cindex] == NULL); 2472#endif 2473 if (tid->tx_buf[cindex] != NULL) { 2474 device_printf(sc->sc_dev, 2475 "%s: ba packet dup (index=%d, cindex=%d, " 2476 "head=%d, tail=%d)\n", 2477 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2478 device_printf(sc->sc_dev, 2479 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2480 __func__, 2481 tid->tx_buf[cindex], 2482 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2483 bf, 2484 SEQNO(bf->bf_state.bfs_seqno) 2485 ); 2486 } 2487 tid->tx_buf[cindex] = bf; 2488 2489 if (index >= ((tid->baw_tail - tid->baw_head) & 2490 (ATH_TID_MAX_BUFS - 1))) { 2491 tid->baw_tail = cindex; 2492 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2493 } 2494} 2495 2496/* 2497 * Flip the BAW buffer entry over from the existing one to the new one. 2498 * 2499 * When software retransmitting a (sub-)frame, it is entirely possible that 2500 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2501 * In that instance the buffer is cloned and the new buffer is used for 2502 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2503 * tracking array to maintain consistency. 2504 */ 2505static void 2506ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2507 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2508{ 2509 int index, cindex; 2510 struct ieee80211_tx_ampdu *tap; 2511 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2512
|
2534 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2535 ATH_TID_LOCK_ASSERT(sc, tid);
| 2513 ATH_TX_LOCK_ASSERT(sc);
|
2536 2537 tap = ath_tx_get_tx_tid(an, tid->tid); 2538 index = ATH_BA_INDEX(tap->txa_start, seqno); 2539 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2540 2541 /* 2542 * Just warn for now; if it happens then we should find out 2543 * about it. It's highly likely the aggregation session will 2544 * soon hang. 2545 */ 2546 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2547 device_printf(sc->sc_dev, "%s: retransmitted buffer" 2548 " has mismatching seqno's, BA session may hang.\n", 2549 __func__); 2550 device_printf(sc->sc_dev, "%s: old seqno=%d, new_seqno=%d\n", 2551 __func__, 2552 old_bf->bf_state.bfs_seqno, 2553 new_bf->bf_state.bfs_seqno); 2554 } 2555 2556 if (tid->tx_buf[cindex] != old_bf) { 2557 device_printf(sc->sc_dev, "%s: ath_buf pointer incorrect; " 2558 " has m BA session may hang.\n", 2559 __func__); 2560 device_printf(sc->sc_dev, "%s: old bf=%p, new bf=%p\n", 2561 __func__, 2562 old_bf, new_bf); 2563 } 2564 2565 tid->tx_buf[cindex] = new_bf; 2566} 2567 2568/* 2569 * seq_start - left edge of BAW 2570 * seq_next - current/next sequence number to allocate 2571 * 2572 * Since the BAW status may be modified by both the ath task and 2573 * the net80211/ifnet contexts, the TID must be locked. 2574 */ 2575static void 2576ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2577 struct ath_tid *tid, const struct ath_buf *bf) 2578{ 2579 int index, cindex; 2580 struct ieee80211_tx_ampdu *tap; 2581 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2582
| 2514 2515 tap = ath_tx_get_tx_tid(an, tid->tid); 2516 index = ATH_BA_INDEX(tap->txa_start, seqno); 2517 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2518 2519 /* 2520 * Just warn for now; if it happens then we should find out 2521 * about it. It's highly likely the aggregation session will 2522 * soon hang. 2523 */ 2524 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2525 device_printf(sc->sc_dev, "%s: retransmitted buffer" 2526 " has mismatching seqno's, BA session may hang.\n", 2527 __func__); 2528 device_printf(sc->sc_dev, "%s: old seqno=%d, new_seqno=%d\n", 2529 __func__, 2530 old_bf->bf_state.bfs_seqno, 2531 new_bf->bf_state.bfs_seqno); 2532 } 2533 2534 if (tid->tx_buf[cindex] != old_bf) { 2535 device_printf(sc->sc_dev, "%s: ath_buf pointer incorrect; " 2536 " has m BA session may hang.\n", 2537 __func__); 2538 device_printf(sc->sc_dev, "%s: old bf=%p, new bf=%p\n", 2539 __func__, 2540 old_bf, new_bf); 2541 } 2542 2543 tid->tx_buf[cindex] = new_bf; 2544} 2545 2546/* 2547 * seq_start - left edge of BAW 2548 * seq_next - current/next sequence number to allocate 2549 * 2550 * Since the BAW status may be modified by both the ath task and 2551 * the net80211/ifnet contexts, the TID must be locked. 2552 */ 2553static void 2554ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2555 struct ath_tid *tid, const struct ath_buf *bf) 2556{ 2557 int index, cindex; 2558 struct ieee80211_tx_ampdu *tap; 2559 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2560
|
2583 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2584 ATH_TID_LOCK_ASSERT(sc, tid);
| 2561 ATH_TX_LOCK_ASSERT(sc);
|
2585 2586 tap = ath_tx_get_tx_tid(an, tid->tid); 2587 index = ATH_BA_INDEX(tap->txa_start, seqno); 2588 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2589 2590 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2591 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2592 "baw head=%d, tail=%d\n", 2593 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2594 cindex, tid->baw_head, tid->baw_tail); 2595 2596 /* 2597 * If this occurs then we have a big problem - something else 2598 * has slid tap->txa_start along without updating the BAW 2599 * tracking start/end pointers. Thus the TX BAW state is now 2600 * completely busted. 2601 * 2602 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2603 * it's quite possible that a cloned buffer is making its way 2604 * here and causing it to fire off. Disable TDMA for now. 2605 */ 2606 if (tid->tx_buf[cindex] != bf) { 2607 device_printf(sc->sc_dev, 2608 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2609 __func__, 2610 bf, SEQNO(bf->bf_state.bfs_seqno), 2611 tid->tx_buf[cindex], 2612 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno)); 2613 } 2614 2615 tid->tx_buf[cindex] = NULL; 2616 2617 while (tid->baw_head != tid->baw_tail && 2618 !tid->tx_buf[tid->baw_head]) { 2619 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2620 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2621 } 2622 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2623 "%s: baw is now %d:%d, baw head=%d\n", 2624 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); 2625} 2626 2627/* 2628 * Mark the current node/TID as ready to TX. 2629 * 2630 * This is done to make it easy for the software scheduler to 2631 * find which nodes have data to send. 2632 * 2633 * The TXQ lock must be held. 2634 */ 2635static void 2636ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2637{ 2638 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2639
| 2562 2563 tap = ath_tx_get_tx_tid(an, tid->tid); 2564 index = ATH_BA_INDEX(tap->txa_start, seqno); 2565 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2566 2567 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2568 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2569 "baw head=%d, tail=%d\n", 2570 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2571 cindex, tid->baw_head, tid->baw_tail); 2572 2573 /* 2574 * If this occurs then we have a big problem - something else 2575 * has slid tap->txa_start along without updating the BAW 2576 * tracking start/end pointers. Thus the TX BAW state is now 2577 * completely busted. 2578 * 2579 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2580 * it's quite possible that a cloned buffer is making its way 2581 * here and causing it to fire off. Disable TDMA for now. 2582 */ 2583 if (tid->tx_buf[cindex] != bf) { 2584 device_printf(sc->sc_dev, 2585 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2586 __func__, 2587 bf, SEQNO(bf->bf_state.bfs_seqno), 2588 tid->tx_buf[cindex], 2589 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno)); 2590 } 2591 2592 tid->tx_buf[cindex] = NULL; 2593 2594 while (tid->baw_head != tid->baw_tail && 2595 !tid->tx_buf[tid->baw_head]) { 2596 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2597 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2598 } 2599 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2600 "%s: baw is now %d:%d, baw head=%d\n", 2601 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); 2602} 2603 2604/* 2605 * Mark the current node/TID as ready to TX. 2606 * 2607 * This is done to make it easy for the software scheduler to 2608 * find which nodes have data to send. 2609 * 2610 * The TXQ lock must be held. 2611 */ 2612static void 2613ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2614{ 2615 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2616
|
2640 ATH_TXQ_LOCK_ASSERT(txq);
| 2617 ATH_TX_LOCK_ASSERT(sc);
|
2641 2642 if (tid->paused) 2643 return; /* paused, can't schedule yet */ 2644 2645 if (tid->sched) 2646 return; /* already scheduled */ 2647 2648 tid->sched = 1; 2649 2650 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2651} 2652 2653/* 2654 * Mark the current node as no longer needing to be polled for 2655 * TX packets. 2656 * 2657 * The TXQ lock must be held. 2658 */ 2659static void 2660ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2661{ 2662 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2663
| 2618 2619 if (tid->paused) 2620 return; /* paused, can't schedule yet */ 2621 2622 if (tid->sched) 2623 return; /* already scheduled */ 2624 2625 tid->sched = 1; 2626 2627 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2628} 2629 2630/* 2631 * Mark the current node as no longer needing to be polled for 2632 * TX packets. 2633 * 2634 * The TXQ lock must be held. 2635 */ 2636static void 2637ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2638{ 2639 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2640
|
2664 ATH_TXQ_LOCK_ASSERT(txq);
| 2641 ATH_TX_LOCK_ASSERT(sc);
|
2665 2666 if (tid->sched == 0) 2667 return; 2668 2669 tid->sched = 0; 2670 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2671} 2672 2673/* 2674 * Assign a sequence number manually to the given frame. 2675 * 2676 * This should only be called for A-MPDU TX frames. 2677 */ 2678static ieee80211_seq 2679ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2680 struct ath_buf *bf, struct mbuf *m0) 2681{ 2682 struct ieee80211_frame *wh; 2683 int tid, pri; 2684 ieee80211_seq seqno; 2685 uint8_t subtype; 2686 2687 /* TID lookup */ 2688 wh = mtod(m0, struct ieee80211_frame *); 2689 pri = M_WME_GETAC(m0); /* honor classification */ 2690 tid = WME_AC_TO_TID(pri); 2691 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2692 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2693 2694 /* XXX Is it a control frame? Ignore */ 2695 2696 /* Does the packet require a sequence number? */ 2697 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2698 return -1; 2699
| 2642 2643 if (tid->sched == 0) 2644 return; 2645 2646 tid->sched = 0; 2647 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2648} 2649 2650/* 2651 * Assign a sequence number manually to the given frame. 2652 * 2653 * This should only be called for A-MPDU TX frames. 2654 */ 2655static ieee80211_seq 2656ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2657 struct ath_buf *bf, struct mbuf *m0) 2658{ 2659 struct ieee80211_frame *wh; 2660 int tid, pri; 2661 ieee80211_seq seqno; 2662 uint8_t subtype; 2663 2664 /* TID lookup */ 2665 wh = mtod(m0, struct ieee80211_frame *); 2666 pri = M_WME_GETAC(m0); /* honor classification */ 2667 tid = WME_AC_TO_TID(pri); 2668 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2669 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2670 2671 /* XXX Is it a control frame? Ignore */ 2672 2673 /* Does the packet require a sequence number? */ 2674 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2675 return -1; 2676
|
2700 ATH_TID_LOCK_ASSERT(sc, &(ATH_NODE(ni)->an_tid[tid]));
| 2677 ATH_TX_LOCK_ASSERT(sc);
|
2701 2702 /* 2703 * Is it a QOS NULL Data frame? Give it a sequence number from 2704 * the default TID (IEEE80211_NONQOS_TID.) 2705 * 2706 * The RX path of everything I've looked at doesn't include the NULL 2707 * data frame sequence number in the aggregation state updates, so 2708 * assigning it a sequence number there will cause a BAW hole on the 2709 * RX side. 2710 */ 2711 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2712 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2713 /* XXX no locking for this TID? This is a bit of a problem. */ 2714 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2715 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2716 } else { 2717 /* Manually assign sequence number */ 2718 seqno = ni->ni_txseqs[tid]; 2719 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2720 } 2721 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2722 M_SEQNO_SET(m0, seqno); 2723 2724 /* Return so caller can do something with it if needed */ 2725 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2726 return seqno; 2727} 2728 2729/* 2730 * Attempt to direct dispatch an aggregate frame to hardware. 2731 * If the frame is out of BAW, queue. 2732 * Otherwise, schedule it as a single frame. 2733 */ 2734static void 2735ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2736 struct ath_txq *txq, struct ath_buf *bf) 2737{ 2738 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2739// struct ath_txq *txq = bf->bf_state.bfs_txq; 2740 struct ieee80211_tx_ampdu *tap; 2741 2742 if (txq != bf->bf_state.bfs_txq) { 2743 device_printf(sc->sc_dev, "%s: txq %d != bfs_txq %d!\n", 2744 __func__, 2745 txq->axq_qnum, 2746 bf->bf_state.bfs_txq->axq_qnum); 2747 } 2748
| 2678 2679 /* 2680 * Is it a QOS NULL Data frame? Give it a sequence number from 2681 * the default TID (IEEE80211_NONQOS_TID.) 2682 * 2683 * The RX path of everything I've looked at doesn't include the NULL 2684 * data frame sequence number in the aggregation state updates, so 2685 * assigning it a sequence number there will cause a BAW hole on the 2686 * RX side. 2687 */ 2688 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2689 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2690 /* XXX no locking for this TID? This is a bit of a problem. */ 2691 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2692 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2693 } else { 2694 /* Manually assign sequence number */ 2695 seqno = ni->ni_txseqs[tid]; 2696 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2697 } 2698 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2699 M_SEQNO_SET(m0, seqno); 2700 2701 /* Return so caller can do something with it if needed */ 2702 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2703 return seqno; 2704} 2705 2706/* 2707 * Attempt to direct dispatch an aggregate frame to hardware. 2708 * If the frame is out of BAW, queue. 2709 * Otherwise, schedule it as a single frame. 2710 */ 2711static void 2712ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2713 struct ath_txq *txq, struct ath_buf *bf) 2714{ 2715 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2716// struct ath_txq *txq = bf->bf_state.bfs_txq; 2717 struct ieee80211_tx_ampdu *tap; 2718 2719 if (txq != bf->bf_state.bfs_txq) { 2720 device_printf(sc->sc_dev, "%s: txq %d != bfs_txq %d!\n", 2721 __func__, 2722 txq->axq_qnum, 2723 bf->bf_state.bfs_txq->axq_qnum); 2724 } 2725
|
2749 ATH_TXQ_LOCK_ASSERT(txq); 2750 ATH_TID_LOCK_ASSERT(sc, tid);
| 2726 ATH_TX_LOCK_ASSERT(sc);
|
2751 2752 tap = ath_tx_get_tx_tid(an, tid->tid); 2753 2754 /* paused? queue */ 2755 if (tid->paused) { 2756 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2757 /* XXX don't sched - we're paused! */ 2758 return; 2759 } 2760 2761 /* outside baw? queue */ 2762 if (bf->bf_state.bfs_dobaw && 2763 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2764 SEQNO(bf->bf_state.bfs_seqno)))) { 2765 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2766 ath_tx_tid_sched(sc, tid); 2767 return; 2768 } 2769 2770 /* 2771 * This is a temporary check and should be removed once 2772 * all the relevant code paths have been fixed. 2773 * 2774 * During aggregate retries, it's possible that the head 2775 * frame will fail (which has the bfs_aggr and bfs_nframes 2776 * fields set for said aggregate) and will be retried as 2777 * a single frame. In this instance, the values should 2778 * be reset or the completion code will get upset with you. 2779 */ 2780 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 2781 device_printf(sc->sc_dev, "%s: bfs_aggr=%d, bfs_nframes=%d\n", 2782 __func__, 2783 bf->bf_state.bfs_aggr, 2784 bf->bf_state.bfs_nframes); 2785 bf->bf_state.bfs_aggr = 0; 2786 bf->bf_state.bfs_nframes = 1; 2787 } 2788 2789 /* Update CLRDMASK just before this frame is queued */ 2790 ath_tx_update_clrdmask(sc, tid, bf); 2791 2792 /* Direct dispatch to hardware */ 2793 ath_tx_do_ratelookup(sc, bf); 2794 ath_tx_calc_duration(sc, bf); 2795 ath_tx_calc_protection(sc, bf); 2796 ath_tx_set_rtscts(sc, bf); 2797 ath_tx_rate_fill_rcflags(sc, bf); 2798 ath_tx_setds(sc, bf); 2799 2800 /* Statistics */ 2801 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 2802 2803 /* Track per-TID hardware queue depth correctly */ 2804 tid->hwq_depth++; 2805 2806 /* Add to BAW */ 2807 if (bf->bf_state.bfs_dobaw) { 2808 ath_tx_addto_baw(sc, an, tid, bf); 2809 bf->bf_state.bfs_addedbaw = 1; 2810 } 2811 2812 /* Set completion handler, multi-frame aggregate or not */ 2813 bf->bf_comp = ath_tx_aggr_comp; 2814 2815 /* Hand off to hardware */ 2816 ath_tx_handoff(sc, txq, bf); 2817} 2818 2819/* 2820 * Attempt to send the packet. 2821 * If the queue isn't busy, direct-dispatch. 2822 * If the queue is busy enough, queue the given packet on the 2823 * relevant software queue. 2824 */ 2825void 2826ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, 2827 struct ath_buf *bf) 2828{ 2829 struct ath_node *an = ATH_NODE(ni); 2830 struct ieee80211_frame *wh; 2831 struct ath_tid *atid; 2832 int pri, tid; 2833 struct mbuf *m0 = bf->bf_m; 2834
| 2727 2728 tap = ath_tx_get_tx_tid(an, tid->tid); 2729 2730 /* paused? queue */ 2731 if (tid->paused) { 2732 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2733 /* XXX don't sched - we're paused! */ 2734 return; 2735 } 2736 2737 /* outside baw? queue */ 2738 if (bf->bf_state.bfs_dobaw && 2739 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2740 SEQNO(bf->bf_state.bfs_seqno)))) { 2741 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2742 ath_tx_tid_sched(sc, tid); 2743 return; 2744 } 2745 2746 /* 2747 * This is a temporary check and should be removed once 2748 * all the relevant code paths have been fixed. 2749 * 2750 * During aggregate retries, it's possible that the head 2751 * frame will fail (which has the bfs_aggr and bfs_nframes 2752 * fields set for said aggregate) and will be retried as 2753 * a single frame. In this instance, the values should 2754 * be reset or the completion code will get upset with you. 2755 */ 2756 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 2757 device_printf(sc->sc_dev, "%s: bfs_aggr=%d, bfs_nframes=%d\n", 2758 __func__, 2759 bf->bf_state.bfs_aggr, 2760 bf->bf_state.bfs_nframes); 2761 bf->bf_state.bfs_aggr = 0; 2762 bf->bf_state.bfs_nframes = 1; 2763 } 2764 2765 /* Update CLRDMASK just before this frame is queued */ 2766 ath_tx_update_clrdmask(sc, tid, bf); 2767 2768 /* Direct dispatch to hardware */ 2769 ath_tx_do_ratelookup(sc, bf); 2770 ath_tx_calc_duration(sc, bf); 2771 ath_tx_calc_protection(sc, bf); 2772 ath_tx_set_rtscts(sc, bf); 2773 ath_tx_rate_fill_rcflags(sc, bf); 2774 ath_tx_setds(sc, bf); 2775 2776 /* Statistics */ 2777 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 2778 2779 /* Track per-TID hardware queue depth correctly */ 2780 tid->hwq_depth++; 2781 2782 /* Add to BAW */ 2783 if (bf->bf_state.bfs_dobaw) { 2784 ath_tx_addto_baw(sc, an, tid, bf); 2785 bf->bf_state.bfs_addedbaw = 1; 2786 } 2787 2788 /* Set completion handler, multi-frame aggregate or not */ 2789 bf->bf_comp = ath_tx_aggr_comp; 2790 2791 /* Hand off to hardware */ 2792 ath_tx_handoff(sc, txq, bf); 2793} 2794 2795/* 2796 * Attempt to send the packet. 2797 * If the queue isn't busy, direct-dispatch. 2798 * If the queue is busy enough, queue the given packet on the 2799 * relevant software queue. 2800 */ 2801void 2802ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, 2803 struct ath_buf *bf) 2804{ 2805 struct ath_node *an = ATH_NODE(ni); 2806 struct ieee80211_frame *wh; 2807 struct ath_tid *atid; 2808 int pri, tid; 2809 struct mbuf *m0 = bf->bf_m; 2810
|
2835 ATH_TXQ_LOCK_ASSERT(txq);
| 2811 ATH_TX_LOCK_ASSERT(sc);
|
2836 2837 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 2838 wh = mtod(m0, struct ieee80211_frame *); 2839 pri = ath_tx_getac(sc, m0); 2840 tid = ath_tx_gettid(sc, m0); 2841 atid = &an->an_tid[tid]; 2842
| 2812 2813 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 2814 wh = mtod(m0, struct ieee80211_frame *); 2815 pri = ath_tx_getac(sc, m0); 2816 tid = ath_tx_gettid(sc, m0); 2817 atid = &an->an_tid[tid]; 2818
|
2843 ATH_TID_LOCK_ASSERT(sc, atid); 2844
| |
2845 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 2846 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2847 2848 /* Set local packet state, used to queue packets to hardware */ 2849 /* XXX potentially duplicate info, re-check */ 2850 /* XXX remember, txq must be the hardware queue, not the av_mcastq */ 2851 bf->bf_state.bfs_tid = tid; 2852 bf->bf_state.bfs_txq = txq; 2853 bf->bf_state.bfs_pri = pri; 2854 2855 /* 2856 * If the hardware queue isn't busy, queue it directly. 2857 * If the hardware queue is busy, queue it. 2858 * If the TID is paused or the traffic it outside BAW, software 2859 * queue it. 2860 */ 2861 if (atid->paused) { 2862 /* TID is paused, queue */ 2863 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 2864 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2865 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 2866 /* AMPDU pending; queue */ 2867 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 2868 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2869 /* XXX sched? */ 2870 } else if (ath_tx_ampdu_running(sc, an, tid)) { 2871 /* AMPDU running, attempt direct dispatch if possible */ 2872 2873 /* 2874 * Always queue the frame to the tail of the list. 2875 */ 2876 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2877 2878 /* 2879 * If the hardware queue isn't busy, direct dispatch 2880 * the head frame in the list. Don't schedule the 2881 * TID - let it build some more frames first? 2882 * 2883 * Otherwise, schedule the TID. 2884 */ 2885 if (txq->axq_depth < sc->sc_hwq_limit) { 2886 bf = ATH_TID_FIRST(atid); 2887 ATH_TID_REMOVE(atid, bf, bf_list); 2888 2889 /* 2890 * Ensure it's definitely treated as a non-AMPDU 2891 * frame - this information may have been left 2892 * over from a previous attempt. 2893 */ 2894 bf->bf_state.bfs_aggr = 0; 2895 bf->bf_state.bfs_nframes = 1; 2896 2897 /* Queue to the hardware */ 2898 ath_tx_xmit_aggr(sc, an, txq, bf); 2899 DPRINTF(sc, ATH_DEBUG_SW_TX, 2900 "%s: xmit_aggr\n", 2901 __func__); 2902 } else { 2903 DPRINTF(sc, ATH_DEBUG_SW_TX, 2904 "%s: ampdu; swq'ing\n", 2905 __func__); 2906 2907 ath_tx_tid_sched(sc, atid); 2908 } 2909 } else if (txq->axq_depth < sc->sc_hwq_limit) { 2910 /* AMPDU not running, attempt direct dispatch */ 2911 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 2912 /* See if clrdmask needs to be set */ 2913 ath_tx_update_clrdmask(sc, atid, bf); 2914 ath_tx_xmit_normal(sc, txq, bf); 2915 } else { 2916 /* Busy; queue */ 2917 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 2918 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2919 ath_tx_tid_sched(sc, atid); 2920 } 2921} 2922 2923/* 2924 * Configure the per-TID node state. 2925 * 2926 * This likely belongs in if_ath_node.c but I can't think of anywhere 2927 * else to put it just yet. 2928 * 2929 * This sets up the SLISTs and the mutex as appropriate. 2930 */ 2931void 2932ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 2933{ 2934 int i, j; 2935 struct ath_tid *atid; 2936 2937 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 2938 atid = &an->an_tid[i]; 2939 2940 /* XXX now with this bzer(), is the field 0'ing needed? */ 2941 bzero(atid, sizeof(*atid)); 2942 2943 TAILQ_INIT(&atid->tid_q); 2944 TAILQ_INIT(&atid->filtq.tid_q); 2945 atid->tid = i; 2946 atid->an = an; 2947 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 2948 atid->tx_buf[j] = NULL; 2949 atid->baw_head = atid->baw_tail = 0; 2950 atid->paused = 0; 2951 atid->sched = 0; 2952 atid->hwq_depth = 0; 2953 atid->cleanup_inprogress = 0; 2954 atid->clrdmask = 1; /* Always start by setting this bit */ 2955 if (i == IEEE80211_NONQOS_TID) 2956 atid->ac = ATH_NONQOS_TID_AC; 2957 else 2958 atid->ac = TID_TO_WME_AC(i); 2959 } 2960} 2961 2962/* 2963 * Pause the current TID. This stops packets from being transmitted 2964 * on it. 2965 * 2966 * Since this is also called from upper layers as well as the driver, 2967 * it will get the TID lock. 2968 */ 2969static void 2970ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 2971{ 2972
| 2819 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 2820 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2821 2822 /* Set local packet state, used to queue packets to hardware */ 2823 /* XXX potentially duplicate info, re-check */ 2824 /* XXX remember, txq must be the hardware queue, not the av_mcastq */ 2825 bf->bf_state.bfs_tid = tid; 2826 bf->bf_state.bfs_txq = txq; 2827 bf->bf_state.bfs_pri = pri; 2828 2829 /* 2830 * If the hardware queue isn't busy, queue it directly. 2831 * If the hardware queue is busy, queue it. 2832 * If the TID is paused or the traffic it outside BAW, software 2833 * queue it. 2834 */ 2835 if (atid->paused) { 2836 /* TID is paused, queue */ 2837 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 2838 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2839 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 2840 /* AMPDU pending; queue */ 2841 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 2842 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2843 /* XXX sched? */ 2844 } else if (ath_tx_ampdu_running(sc, an, tid)) { 2845 /* AMPDU running, attempt direct dispatch if possible */ 2846 2847 /* 2848 * Always queue the frame to the tail of the list. 2849 */ 2850 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2851 2852 /* 2853 * If the hardware queue isn't busy, direct dispatch 2854 * the head frame in the list. Don't schedule the 2855 * TID - let it build some more frames first? 2856 * 2857 * Otherwise, schedule the TID. 2858 */ 2859 if (txq->axq_depth < sc->sc_hwq_limit) { 2860 bf = ATH_TID_FIRST(atid); 2861 ATH_TID_REMOVE(atid, bf, bf_list); 2862 2863 /* 2864 * Ensure it's definitely treated as a non-AMPDU 2865 * frame - this information may have been left 2866 * over from a previous attempt. 2867 */ 2868 bf->bf_state.bfs_aggr = 0; 2869 bf->bf_state.bfs_nframes = 1; 2870 2871 /* Queue to the hardware */ 2872 ath_tx_xmit_aggr(sc, an, txq, bf); 2873 DPRINTF(sc, ATH_DEBUG_SW_TX, 2874 "%s: xmit_aggr\n", 2875 __func__); 2876 } else { 2877 DPRINTF(sc, ATH_DEBUG_SW_TX, 2878 "%s: ampdu; swq'ing\n", 2879 __func__); 2880 2881 ath_tx_tid_sched(sc, atid); 2882 } 2883 } else if (txq->axq_depth < sc->sc_hwq_limit) { 2884 /* AMPDU not running, attempt direct dispatch */ 2885 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 2886 /* See if clrdmask needs to be set */ 2887 ath_tx_update_clrdmask(sc, atid, bf); 2888 ath_tx_xmit_normal(sc, txq, bf); 2889 } else { 2890 /* Busy; queue */ 2891 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 2892 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2893 ath_tx_tid_sched(sc, atid); 2894 } 2895} 2896 2897/* 2898 * Configure the per-TID node state. 2899 * 2900 * This likely belongs in if_ath_node.c but I can't think of anywhere 2901 * else to put it just yet. 2902 * 2903 * This sets up the SLISTs and the mutex as appropriate. 2904 */ 2905void 2906ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 2907{ 2908 int i, j; 2909 struct ath_tid *atid; 2910 2911 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 2912 atid = &an->an_tid[i]; 2913 2914 /* XXX now with this bzer(), is the field 0'ing needed? */ 2915 bzero(atid, sizeof(*atid)); 2916 2917 TAILQ_INIT(&atid->tid_q); 2918 TAILQ_INIT(&atid->filtq.tid_q); 2919 atid->tid = i; 2920 atid->an = an; 2921 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 2922 atid->tx_buf[j] = NULL; 2923 atid->baw_head = atid->baw_tail = 0; 2924 atid->paused = 0; 2925 atid->sched = 0; 2926 atid->hwq_depth = 0; 2927 atid->cleanup_inprogress = 0; 2928 atid->clrdmask = 1; /* Always start by setting this bit */ 2929 if (i == IEEE80211_NONQOS_TID) 2930 atid->ac = ATH_NONQOS_TID_AC; 2931 else 2932 atid->ac = TID_TO_WME_AC(i); 2933 } 2934} 2935 2936/* 2937 * Pause the current TID. This stops packets from being transmitted 2938 * on it. 2939 * 2940 * Since this is also called from upper layers as well as the driver, 2941 * it will get the TID lock. 2942 */ 2943static void 2944ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 2945{ 2946
|
2973 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
| 2947 ATH_TX_LOCK_ASSERT(sc);
|
2974 tid->paused++; 2975 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", 2976 __func__, tid->paused); 2977} 2978 2979/* 2980 * Unpause the current TID, and schedule it if needed. 2981 */ 2982static void 2983ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 2984{
| 2948 tid->paused++; 2949 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", 2950 __func__, tid->paused); 2951} 2952 2953/* 2954 * Unpause the current TID, and schedule it if needed. 2955 */ 2956static void 2957ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 2958{
|
2985 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
| |
2986
| 2959
|
| 2960 ATH_TX_LOCK_ASSERT(sc); 2961
|
2987 tid->paused--; 2988 2989 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", 2990 __func__, tid->paused); 2991 2992 if (tid->paused) 2993 return; 2994 2995 /* 2996 * Override the clrdmask configuration for the next frame 2997 * from this TID, just to get the ball rolling. 2998 */ 2999 tid->clrdmask = 1; 3000 3001 if (tid->axq_depth == 0) 3002 return; 3003 3004 /* XXX isfiltered shouldn't ever be 0 at this point */ 3005 if (tid->isfiltered == 1) { 3006 device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); 3007 return; 3008 } 3009 3010 ath_tx_tid_sched(sc, tid); 3011 /* Punt some frames to the hardware if needed */ 3012 //ath_txq_sched(sc, sc->sc_ac2q[tid->ac]); 3013 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 3014} 3015 3016/* 3017 * Add the given ath_buf to the TID filtered frame list. 3018 * This requires the TID be filtered. 3019 */ 3020static void 3021ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3022 struct ath_buf *bf) 3023{ 3024
| 2962 tid->paused--; 2963 2964 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", 2965 __func__, tid->paused); 2966 2967 if (tid->paused) 2968 return; 2969 2970 /* 2971 * Override the clrdmask configuration for the next frame 2972 * from this TID, just to get the ball rolling. 2973 */ 2974 tid->clrdmask = 1; 2975 2976 if (tid->axq_depth == 0) 2977 return; 2978 2979 /* XXX isfiltered shouldn't ever be 0 at this point */ 2980 if (tid->isfiltered == 1) { 2981 device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); 2982 return; 2983 } 2984 2985 ath_tx_tid_sched(sc, tid); 2986 /* Punt some frames to the hardware if needed */ 2987 //ath_txq_sched(sc, sc->sc_ac2q[tid->ac]); 2988 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 2989} 2990 2991/* 2992 * Add the given ath_buf to the TID filtered frame list. 2993 * This requires the TID be filtered. 2994 */ 2995static void 2996ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 2997 struct ath_buf *bf) 2998{ 2999
|
3025 ATH_TID_LOCK_ASSERT(sc, tid);
| 3000 ATH_TX_LOCK_ASSERT(sc); 3001
|
3026 if (! tid->isfiltered) 3027 device_printf(sc->sc_dev, "%s: not filtered?!\n", __func__); 3028 3029 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3030 3031 /* Set the retry bit and bump the retry counter */ 3032 ath_tx_set_retry(sc, bf); 3033 sc->sc_stats.ast_tx_swfiltered++; 3034 3035 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3036} 3037 3038/* 3039 * Handle a completed filtered frame from the given TID. 3040 * This just enables/pauses the filtered frame state if required 3041 * and appends the filtered frame to the filtered queue. 3042 */ 3043static void 3044ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3045 struct ath_buf *bf) 3046{ 3047
| 3002 if (! tid->isfiltered) 3003 device_printf(sc->sc_dev, "%s: not filtered?!\n", __func__); 3004 3005 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3006 3007 /* Set the retry bit and bump the retry counter */ 3008 ath_tx_set_retry(sc, bf); 3009 sc->sc_stats.ast_tx_swfiltered++; 3010 3011 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3012} 3013 3014/* 3015 * Handle a completed filtered frame from the given TID. 3016 * This just enables/pauses the filtered frame state if required 3017 * and appends the filtered frame to the filtered queue. 3018 */ 3019static void 3020ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3021 struct ath_buf *bf) 3022{ 3023
|
3048 ATH_TID_LOCK_ASSERT(sc, tid);
| 3024 ATH_TX_LOCK_ASSERT(sc);
|
3049 3050 if (! tid->isfiltered) { 3051 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", 3052 __func__); 3053 tid->isfiltered = 1; 3054 ath_tx_tid_pause(sc, tid); 3055 } 3056 3057 /* Add the frame to the filter queue */ 3058 ath_tx_tid_filt_addbuf(sc, tid, bf); 3059} 3060 3061/* 3062 * Complete the filtered frame TX completion. 3063 * 3064 * If there are no more frames in the hardware queue, unpause/unfilter 3065 * the TID if applicable. Otherwise we will wait for a node PS transition 3066 * to unfilter. 3067 */ 3068static void 3069ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3070{ 3071 struct ath_buf *bf; 3072
| 3025 3026 if (! tid->isfiltered) { 3027 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", 3028 __func__); 3029 tid->isfiltered = 1; 3030 ath_tx_tid_pause(sc, tid); 3031 } 3032 3033 /* Add the frame to the filter queue */ 3034 ath_tx_tid_filt_addbuf(sc, tid, bf); 3035} 3036 3037/* 3038 * Complete the filtered frame TX completion. 3039 * 3040 * If there are no more frames in the hardware queue, unpause/unfilter 3041 * the TID if applicable. Otherwise we will wait for a node PS transition 3042 * to unfilter. 3043 */ 3044static void 3045ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3046{ 3047 struct ath_buf *bf; 3048
|
3073 ATH_TID_LOCK_ASSERT(sc, tid);
| 3049 ATH_TX_LOCK_ASSERT(sc);
|
3074 3075 if (tid->hwq_depth != 0) 3076 return; 3077 3078 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", 3079 __func__); 3080 tid->isfiltered = 0; 3081 tid->clrdmask = 1; 3082 3083 /* XXX this is really quite inefficient */ 3084 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3085 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3086 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3087 } 3088 3089 ath_tx_tid_resume(sc, tid); 3090} 3091 3092/* 3093 * Called when a single (aggregate or otherwise) frame is completed. 3094 * 3095 * Returns 1 if the buffer could be added to the filtered list 3096 * (cloned or otherwise), 0 if the buffer couldn't be added to the 3097 * filtered list (failed clone; expired retry) and the caller should 3098 * free it and handle it like a failure (eg by sending a BAR.) 3099 */ 3100static int 3101ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3102 struct ath_buf *bf) 3103{ 3104 struct ath_buf *nbf; 3105 int retval; 3106
| 3050 3051 if (tid->hwq_depth != 0) 3052 return; 3053 3054 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", 3055 __func__); 3056 tid->isfiltered = 0; 3057 tid->clrdmask = 1; 3058 3059 /* XXX this is really quite inefficient */ 3060 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3061 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3062 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3063 } 3064 3065 ath_tx_tid_resume(sc, tid); 3066} 3067 3068/* 3069 * Called when a single (aggregate or otherwise) frame is completed. 3070 * 3071 * Returns 1 if the buffer could be added to the filtered list 3072 * (cloned or otherwise), 0 if the buffer couldn't be added to the 3073 * filtered list (failed clone; expired retry) and the caller should 3074 * free it and handle it like a failure (eg by sending a BAR.) 3075 */ 3076static int 3077ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3078 struct ath_buf *bf) 3079{ 3080 struct ath_buf *nbf; 3081 int retval; 3082
|
3107 ATH_TID_LOCK_ASSERT(sc, tid);
| 3083 ATH_TX_LOCK_ASSERT(sc);
|
3108 3109 /* 3110 * Don't allow a filtered frame to live forever. 3111 */ 3112 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3113 sc->sc_stats.ast_tx_swretrymax++; 3114 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3115 "%s: bf=%p, seqno=%d, exceeded retries\n", 3116 __func__, 3117 bf, 3118 bf->bf_state.bfs_seqno); 3119 return (0); 3120 } 3121 3122 /* 3123 * A busy buffer can't be added to the retry list. 3124 * It needs to be cloned. 3125 */ 3126 if (bf->bf_flags & ATH_BUF_BUSY) { 3127 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3128 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3129 "%s: busy buffer clone: %p -> %p\n", 3130 __func__, bf, nbf); 3131 } else { 3132 nbf = bf; 3133 } 3134 3135 if (nbf == NULL) { 3136 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3137 "%s: busy buffer couldn't be cloned (%p)!\n", 3138 __func__, bf); 3139 retval = 1; 3140 } else { 3141 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3142 retval = 0; 3143 } 3144 ath_tx_tid_filt_comp_complete(sc, tid); 3145 3146 return (retval); 3147} 3148 3149static void 3150ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3151 struct ath_buf *bf_first, ath_bufhead *bf_q) 3152{ 3153 struct ath_buf *bf, *bf_next, *nbf; 3154
| 3084 3085 /* 3086 * Don't allow a filtered frame to live forever. 3087 */ 3088 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3089 sc->sc_stats.ast_tx_swretrymax++; 3090 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3091 "%s: bf=%p, seqno=%d, exceeded retries\n", 3092 __func__, 3093 bf, 3094 bf->bf_state.bfs_seqno); 3095 return (0); 3096 } 3097 3098 /* 3099 * A busy buffer can't be added to the retry list. 3100 * It needs to be cloned. 3101 */ 3102 if (bf->bf_flags & ATH_BUF_BUSY) { 3103 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3104 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3105 "%s: busy buffer clone: %p -> %p\n", 3106 __func__, bf, nbf); 3107 } else { 3108 nbf = bf; 3109 } 3110 3111 if (nbf == NULL) { 3112 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3113 "%s: busy buffer couldn't be cloned (%p)!\n", 3114 __func__, bf); 3115 retval = 1; 3116 } else { 3117 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3118 retval = 0; 3119 } 3120 ath_tx_tid_filt_comp_complete(sc, tid); 3121 3122 return (retval); 3123} 3124 3125static void 3126ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3127 struct ath_buf *bf_first, ath_bufhead *bf_q) 3128{ 3129 struct ath_buf *bf, *bf_next, *nbf; 3130
|
3155 ATH_TID_LOCK_ASSERT(sc, tid);
| 3131 ATH_TX_LOCK_ASSERT(sc);
|
3156 3157 bf = bf_first; 3158 while (bf) { 3159 bf_next = bf->bf_next; 3160 bf->bf_next = NULL; /* Remove it from the aggr list */ 3161 3162 /* 3163 * Don't allow a filtered frame to live forever. 3164 */ 3165 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3166 sc->sc_stats.ast_tx_swretrymax++; 3167 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3168 "%s: bf=%p, seqno=%d, exceeded retries\n", 3169 __func__, 3170 bf, 3171 bf->bf_state.bfs_seqno); 3172 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3173 goto next; 3174 } 3175 3176 if (bf->bf_flags & ATH_BUF_BUSY) { 3177 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3178 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3179 "%s: busy buffer cloned: %p -> %p", 3180 __func__, bf, nbf); 3181 } else { 3182 nbf = bf; 3183 } 3184 3185 /* 3186 * If the buffer couldn't be cloned, add it to bf_q; 3187 * the caller will free the buffer(s) as required. 3188 */ 3189 if (nbf == NULL) { 3190 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3191 "%s: buffer couldn't be cloned! (%p)\n", 3192 __func__, bf); 3193 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3194 } else { 3195 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3196 } 3197next: 3198 bf = bf_next; 3199 } 3200 3201 ath_tx_tid_filt_comp_complete(sc, tid); 3202} 3203 3204/* 3205 * Suspend the queue because we need to TX a BAR. 3206 */ 3207static void 3208ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3209{
| 3132 3133 bf = bf_first; 3134 while (bf) { 3135 bf_next = bf->bf_next; 3136 bf->bf_next = NULL; /* Remove it from the aggr list */ 3137 3138 /* 3139 * Don't allow a filtered frame to live forever. 3140 */ 3141 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3142 sc->sc_stats.ast_tx_swretrymax++; 3143 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3144 "%s: bf=%p, seqno=%d, exceeded retries\n", 3145 __func__, 3146 bf, 3147 bf->bf_state.bfs_seqno); 3148 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3149 goto next; 3150 } 3151 3152 if (bf->bf_flags & ATH_BUF_BUSY) { 3153 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3154 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3155 "%s: busy buffer cloned: %p -> %p", 3156 __func__, bf, nbf); 3157 } else { 3158 nbf = bf; 3159 } 3160 3161 /* 3162 * If the buffer couldn't be cloned, add it to bf_q; 3163 * the caller will free the buffer(s) as required. 3164 */ 3165 if (nbf == NULL) { 3166 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3167 "%s: buffer couldn't be cloned! (%p)\n", 3168 __func__, bf); 3169 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3170 } else { 3171 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3172 } 3173next: 3174 bf = bf_next; 3175 } 3176 3177 ath_tx_tid_filt_comp_complete(sc, tid); 3178} 3179 3180/* 3181 * Suspend the queue because we need to TX a BAR. 3182 */ 3183static void 3184ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3185{
|
3210 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
| |
3211
| 3186
|
| 3187 ATH_TX_LOCK_ASSERT(sc); 3188
|
3212 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3213 "%s: tid=%p, bar_wait=%d, bar_tx=%d, called\n", 3214 __func__, 3215 tid, 3216 tid->bar_wait, 3217 tid->bar_tx); 3218 3219 /* We shouldn't be called when bar_tx is 1 */ 3220 if (tid->bar_tx) { 3221 device_printf(sc->sc_dev, "%s: bar_tx is 1?!\n", 3222 __func__); 3223 } 3224 3225 /* If we've already been called, just be patient. */ 3226 if (tid->bar_wait) 3227 return; 3228 3229 /* Wait! */ 3230 tid->bar_wait = 1; 3231 3232 /* Only one pause, no matter how many frames fail */ 3233 ath_tx_tid_pause(sc, tid); 3234} 3235 3236/* 3237 * We've finished with BAR handling - either we succeeded or 3238 * failed. Either way, unsuspend TX. 3239 */ 3240static void 3241ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3242{
| 3189 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3190 "%s: tid=%p, bar_wait=%d, bar_tx=%d, called\n", 3191 __func__, 3192 tid, 3193 tid->bar_wait, 3194 tid->bar_tx); 3195 3196 /* We shouldn't be called when bar_tx is 1 */ 3197 if (tid->bar_tx) { 3198 device_printf(sc->sc_dev, "%s: bar_tx is 1?!\n", 3199 __func__); 3200 } 3201 3202 /* If we've already been called, just be patient. */ 3203 if (tid->bar_wait) 3204 return; 3205 3206 /* Wait! */ 3207 tid->bar_wait = 1; 3208 3209 /* Only one pause, no matter how many frames fail */ 3210 ath_tx_tid_pause(sc, tid); 3211} 3212 3213/* 3214 * We've finished with BAR handling - either we succeeded or 3215 * failed. Either way, unsuspend TX. 3216 */ 3217static void 3218ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3219{
|
3243 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
| |
3244
| 3220
|
| 3221 ATH_TX_LOCK_ASSERT(sc); 3222
|
3245 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3246 "%s: tid=%p, called\n", 3247 __func__, 3248 tid); 3249 3250 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3251 device_printf(sc->sc_dev, "%s: bar_tx=%d, bar_wait=%d: ?\n", 3252 __func__, tid->bar_tx, tid->bar_wait); 3253 } 3254 3255 tid->bar_tx = tid->bar_wait = 0; 3256 ath_tx_tid_resume(sc, tid); 3257} 3258 3259/* 3260 * Return whether we're ready to TX a BAR frame. 3261 * 3262 * Requires the TID lock be held. 3263 */ 3264static int 3265ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3266{ 3267
| 3223 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3224 "%s: tid=%p, called\n", 3225 __func__, 3226 tid); 3227 3228 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3229 device_printf(sc->sc_dev, "%s: bar_tx=%d, bar_wait=%d: ?\n", 3230 __func__, tid->bar_tx, tid->bar_wait); 3231 } 3232 3233 tid->bar_tx = tid->bar_wait = 0; 3234 ath_tx_tid_resume(sc, tid); 3235} 3236 3237/* 3238 * Return whether we're ready to TX a BAR frame. 3239 * 3240 * Requires the TID lock be held. 3241 */ 3242static int 3243ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3244{ 3245
|
3268 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
| 3246 ATH_TX_LOCK_ASSERT(sc);
|
3269 3270 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3271 return (0); 3272 3273 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%p (%d), bar ready\n", 3274 __func__, tid, tid->tid); 3275 3276 return (1); 3277} 3278 3279/* 3280 * Check whether the current TID is ready to have a BAR 3281 * TXed and if so, do the TX. 3282 * 3283 * Since the TID/TXQ lock can't be held during a call to 3284 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3285 * sending the BAR and locking it again. 3286 * 3287 * Eventually, the code to send the BAR should be broken out 3288 * from this routine so the lock doesn't have to be reacquired 3289 * just to be immediately dropped by the caller. 3290 */ 3291static void 3292ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3293{ 3294 struct ieee80211_tx_ampdu *tap; 3295
| 3247 3248 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3249 return (0); 3250 3251 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%p (%d), bar ready\n", 3252 __func__, tid, tid->tid); 3253 3254 return (1); 3255} 3256 3257/* 3258 * Check whether the current TID is ready to have a BAR 3259 * TXed and if so, do the TX. 3260 * 3261 * Since the TID/TXQ lock can't be held during a call to 3262 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3263 * sending the BAR and locking it again. 3264 * 3265 * Eventually, the code to send the BAR should be broken out 3266 * from this routine so the lock doesn't have to be reacquired 3267 * just to be immediately dropped by the caller. 3268 */ 3269static void 3270ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3271{ 3272 struct ieee80211_tx_ampdu *tap; 3273
|
3296 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
| 3274 ATH_TX_LOCK_ASSERT(sc);
|
3297 3298 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3299 "%s: tid=%p, called\n", 3300 __func__, 3301 tid); 3302 3303 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3304 3305 /* 3306 * This is an error condition! 3307 */ 3308 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3309 device_printf(sc->sc_dev, 3310 "%s: tid=%p, bar_tx=%d, bar_wait=%d: ?\n", 3311 __func__, 3312 tid, 3313 tid->bar_tx, 3314 tid->bar_wait); 3315 return; 3316 } 3317 3318 /* Don't do anything if we still have pending frames */ 3319 if (tid->hwq_depth > 0) { 3320 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3321 "%s: tid=%p, hwq_depth=%d, waiting\n", 3322 __func__, 3323 tid, 3324 tid->hwq_depth); 3325 return; 3326 } 3327 3328 /* We're now about to TX */ 3329 tid->bar_tx = 1; 3330 3331 /* 3332 * Override the clrdmask configuration for the next frame, 3333 * just to get the ball rolling. 3334 */ 3335 tid->clrdmask = 1; 3336 3337 /* 3338 * Calculate new BAW left edge, now that all frames have either 3339 * succeeded or failed. 3340 * 3341 * XXX verify this is _actually_ the valid value to begin at! 3342 */ 3343 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3344 "%s: tid=%p, new BAW left edge=%d\n", 3345 __func__, 3346 tid, 3347 tap->txa_start); 3348 3349 /* Try sending the BAR frame */ 3350 /* We can't hold the lock here! */ 3351
| 3275 3276 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3277 "%s: tid=%p, called\n", 3278 __func__, 3279 tid); 3280 3281 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3282 3283 /* 3284 * This is an error condition! 3285 */ 3286 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3287 device_printf(sc->sc_dev, 3288 "%s: tid=%p, bar_tx=%d, bar_wait=%d: ?\n", 3289 __func__, 3290 tid, 3291 tid->bar_tx, 3292 tid->bar_wait); 3293 return; 3294 } 3295 3296 /* Don't do anything if we still have pending frames */ 3297 if (tid->hwq_depth > 0) { 3298 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3299 "%s: tid=%p, hwq_depth=%d, waiting\n", 3300 __func__, 3301 tid, 3302 tid->hwq_depth); 3303 return; 3304 } 3305 3306 /* We're now about to TX */ 3307 tid->bar_tx = 1; 3308 3309 /* 3310 * Override the clrdmask configuration for the next frame, 3311 * just to get the ball rolling. 3312 */ 3313 tid->clrdmask = 1; 3314 3315 /* 3316 * Calculate new BAW left edge, now that all frames have either 3317 * succeeded or failed. 3318 * 3319 * XXX verify this is _actually_ the valid value to begin at! 3320 */ 3321 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3322 "%s: tid=%p, new BAW left edge=%d\n", 3323 __func__, 3324 tid, 3325 tap->txa_start); 3326 3327 /* Try sending the BAR frame */ 3328 /* We can't hold the lock here! */ 3329
|
3352 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
| 3330 ATH_TX_UNLOCK(sc);
|
3353 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3354 /* Success? Now we wait for notification that it's done */
| 3331 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3332 /* Success? Now we wait for notification that it's done */
|
3355 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]);
| 3333 ATH_TX_LOCK(sc);
|
3356 return; 3357 } 3358 3359 /* Failure? For now, warn loudly and continue */
| 3334 return; 3335 } 3336 3337 /* Failure? For now, warn loudly and continue */
|
3360 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]);
| 3338 ATH_TX_LOCK(sc);
|
3361 device_printf(sc->sc_dev, "%s: tid=%p, failed to TX BAR, continue!\n", 3362 __func__, tid); 3363 ath_tx_tid_bar_unsuspend(sc, tid); 3364} 3365 3366static void 3367ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3368 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3369{ 3370
| 3339 device_printf(sc->sc_dev, "%s: tid=%p, failed to TX BAR, continue!\n", 3340 __func__, tid); 3341 ath_tx_tid_bar_unsuspend(sc, tid); 3342} 3343 3344static void 3345ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3346 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3347{ 3348
|
3371 ATH_TID_LOCK_ASSERT(sc, tid);
| 3349 ATH_TX_LOCK_ASSERT(sc);
|
3372 3373 /* 3374 * If the current TID is running AMPDU, update 3375 * the BAW. 3376 */ 3377 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3378 bf->bf_state.bfs_dobaw) { 3379 /* 3380 * Only remove the frame from the BAW if it's 3381 * been transmitted at least once; this means 3382 * the frame was in the BAW to begin with. 3383 */ 3384 if (bf->bf_state.bfs_retries > 0) { 3385 ath_tx_update_baw(sc, an, tid, bf); 3386 bf->bf_state.bfs_dobaw = 0; 3387 } 3388 /* 3389 * This has become a non-fatal error now 3390 */ 3391 if (! bf->bf_state.bfs_addedbaw) 3392 device_printf(sc->sc_dev, 3393 "%s: wasn't added: seqno %d\n", 3394 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3395 } 3396 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3397} 3398 3399static void 3400ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3401 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3402{ 3403 struct ieee80211_node *ni = &an->an_node; 3404 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 3405 struct ieee80211_tx_ampdu *tap; 3406 3407 tap = ath_tx_get_tx_tid(an, tid->tid); 3408 3409 device_printf(sc->sc_dev, 3410 "%s: %s: node %p: bf=%p: addbaw=%d, dobaw=%d, " 3411 "seqno=%d, retry=%d\n", 3412 __func__, pfx, ni, bf, 3413 bf->bf_state.bfs_addedbaw, 3414 bf->bf_state.bfs_dobaw, 3415 SEQNO(bf->bf_state.bfs_seqno), 3416 bf->bf_state.bfs_retries); 3417 device_printf(sc->sc_dev, 3418 "%s: node %p: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3419 __func__, ni, bf, 3420 txq->axq_qnum, 3421 txq->axq_depth, 3422 txq->axq_aggr_depth); 3423 3424 device_printf(sc->sc_dev, 3425 "%s: node %p: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, isfiltered=%d\n", 3426 __func__, ni, bf, 3427 tid->axq_depth, 3428 tid->hwq_depth, 3429 tid->bar_wait, 3430 tid->isfiltered); 3431 device_printf(sc->sc_dev, 3432 "%s: node %p: tid %d: " 3433 "sched=%d, paused=%d, " 3434 "incomp=%d, baw_head=%d, " 3435 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3436 __func__, ni, tid->tid, 3437 tid->sched, tid->paused, 3438 tid->incomp, tid->baw_head, 3439 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3440 ni->ni_txseqs[tid->tid]); 3441 3442 /* XXX Dump the frame, see what it is? */ 3443 ieee80211_dump_pkt(ni->ni_ic, 3444 mtod(bf->bf_m, const uint8_t *), 3445 bf->bf_m->m_len, 0, -1); 3446} 3447 3448/* 3449 * Free any packets currently pending in the software TX queue. 3450 * 3451 * This will be called when a node is being deleted. 3452 * 3453 * It can also be called on an active node during an interface 3454 * reset or state transition. 3455 * 3456 * (From Linux/reference): 3457 * 3458 * TODO: For frame(s) that are in the retry state, we will reuse the 3459 * sequence number(s) without setting the retry bit. The 3460 * alternative is to give up on these and BAR the receiver's window 3461 * forward. 3462 */ 3463static void 3464ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3465 struct ath_tid *tid, ath_bufhead *bf_cq) 3466{ 3467 struct ath_buf *bf; 3468 struct ieee80211_tx_ampdu *tap; 3469 struct ieee80211_node *ni = &an->an_node; 3470 int t; 3471 3472 tap = ath_tx_get_tx_tid(an, tid->tid); 3473
| 3350 3351 /* 3352 * If the current TID is running AMPDU, update 3353 * the BAW. 3354 */ 3355 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3356 bf->bf_state.bfs_dobaw) { 3357 /* 3358 * Only remove the frame from the BAW if it's 3359 * been transmitted at least once; this means 3360 * the frame was in the BAW to begin with. 3361 */ 3362 if (bf->bf_state.bfs_retries > 0) { 3363 ath_tx_update_baw(sc, an, tid, bf); 3364 bf->bf_state.bfs_dobaw = 0; 3365 } 3366 /* 3367 * This has become a non-fatal error now 3368 */ 3369 if (! bf->bf_state.bfs_addedbaw) 3370 device_printf(sc->sc_dev, 3371 "%s: wasn't added: seqno %d\n", 3372 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3373 } 3374 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3375} 3376 3377static void 3378ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3379 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3380{ 3381 struct ieee80211_node *ni = &an->an_node; 3382 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 3383 struct ieee80211_tx_ampdu *tap; 3384 3385 tap = ath_tx_get_tx_tid(an, tid->tid); 3386 3387 device_printf(sc->sc_dev, 3388 "%s: %s: node %p: bf=%p: addbaw=%d, dobaw=%d, " 3389 "seqno=%d, retry=%d\n", 3390 __func__, pfx, ni, bf, 3391 bf->bf_state.bfs_addedbaw, 3392 bf->bf_state.bfs_dobaw, 3393 SEQNO(bf->bf_state.bfs_seqno), 3394 bf->bf_state.bfs_retries); 3395 device_printf(sc->sc_dev, 3396 "%s: node %p: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3397 __func__, ni, bf, 3398 txq->axq_qnum, 3399 txq->axq_depth, 3400 txq->axq_aggr_depth); 3401 3402 device_printf(sc->sc_dev, 3403 "%s: node %p: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, isfiltered=%d\n", 3404 __func__, ni, bf, 3405 tid->axq_depth, 3406 tid->hwq_depth, 3407 tid->bar_wait, 3408 tid->isfiltered); 3409 device_printf(sc->sc_dev, 3410 "%s: node %p: tid %d: " 3411 "sched=%d, paused=%d, " 3412 "incomp=%d, baw_head=%d, " 3413 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3414 __func__, ni, tid->tid, 3415 tid->sched, tid->paused, 3416 tid->incomp, tid->baw_head, 3417 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3418 ni->ni_txseqs[tid->tid]); 3419 3420 /* XXX Dump the frame, see what it is? */ 3421 ieee80211_dump_pkt(ni->ni_ic, 3422 mtod(bf->bf_m, const uint8_t *), 3423 bf->bf_m->m_len, 0, -1); 3424} 3425 3426/* 3427 * Free any packets currently pending in the software TX queue. 3428 * 3429 * This will be called when a node is being deleted. 3430 * 3431 * It can also be called on an active node during an interface 3432 * reset or state transition. 3433 * 3434 * (From Linux/reference): 3435 * 3436 * TODO: For frame(s) that are in the retry state, we will reuse the 3437 * sequence number(s) without setting the retry bit. The 3438 * alternative is to give up on these and BAR the receiver's window 3439 * forward. 3440 */ 3441static void 3442ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3443 struct ath_tid *tid, ath_bufhead *bf_cq) 3444{ 3445 struct ath_buf *bf; 3446 struct ieee80211_tx_ampdu *tap; 3447 struct ieee80211_node *ni = &an->an_node; 3448 int t; 3449 3450 tap = ath_tx_get_tx_tid(an, tid->tid); 3451
|
3474 ATH_TID_LOCK_ASSERT(sc, tid);
| 3452 ATH_TX_LOCK_ASSERT(sc);
|
3475 3476 /* Walk the queue, free frames */ 3477 t = 0; 3478 for (;;) { 3479 bf = ATH_TID_FIRST(tid); 3480 if (bf == NULL) { 3481 break; 3482 } 3483 3484 if (t == 0) { 3485 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3486 t = 1; 3487 } 3488 3489 ATH_TID_REMOVE(tid, bf, bf_list); 3490 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3491 } 3492 3493 /* And now, drain the filtered frame queue */ 3494 t = 0; 3495 for (;;) { 3496 bf = ATH_TID_FILT_FIRST(tid); 3497 if (bf == NULL) 3498 break; 3499 3500 if (t == 0) { 3501 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3502 t = 1; 3503 } 3504 3505 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3506 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3507 } 3508 3509 /* 3510 * Override the clrdmask configuration for the next frame 3511 * in case there is some future transmission, just to get 3512 * the ball rolling. 3513 * 3514 * This won't hurt things if the TID is about to be freed. 3515 */ 3516 tid->clrdmask = 1; 3517 3518 /* 3519 * Now that it's completed, grab the TID lock and update 3520 * the sequence number and BAW window. 3521 * Because sequence numbers have been assigned to frames 3522 * that haven't been sent yet, it's entirely possible 3523 * we'll be called with some pending frames that have not 3524 * been transmitted. 3525 * 3526 * The cleaner solution is to do the sequence number allocation 3527 * when the packet is first transmitted - and thus the "retries" 3528 * check above would be enough to update the BAW/seqno. 3529 */ 3530 3531 /* But don't do it for non-QoS TIDs */ 3532 if (tap) { 3533#if 0 3534 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3535 "%s: node %p: TID %d: sliding BAW left edge to %d\n", 3536 __func__, an, tid->tid, tap->txa_start); 3537#endif 3538 ni->ni_txseqs[tid->tid] = tap->txa_start; 3539 tid->baw_tail = tid->baw_head; 3540 } 3541} 3542 3543/* 3544 * Flush all software queued packets for the given node. 3545 * 3546 * This occurs when a completion handler frees the last buffer 3547 * for a node, and the node is thus freed. This causes the node 3548 * to be cleaned up, which ends up calling ath_tx_node_flush. 3549 */ 3550void 3551ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 3552{ 3553 int tid; 3554 ath_bufhead bf_cq; 3555 struct ath_buf *bf; 3556 3557 TAILQ_INIT(&bf_cq); 3558 3559 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 3560 &an->an_node); 3561
| 3453 3454 /* Walk the queue, free frames */ 3455 t = 0; 3456 for (;;) { 3457 bf = ATH_TID_FIRST(tid); 3458 if (bf == NULL) { 3459 break; 3460 } 3461 3462 if (t == 0) { 3463 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3464 t = 1; 3465 } 3466 3467 ATH_TID_REMOVE(tid, bf, bf_list); 3468 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3469 } 3470 3471 /* And now, drain the filtered frame queue */ 3472 t = 0; 3473 for (;;) { 3474 bf = ATH_TID_FILT_FIRST(tid); 3475 if (bf == NULL) 3476 break; 3477 3478 if (t == 0) { 3479 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3480 t = 1; 3481 } 3482 3483 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3484 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3485 } 3486 3487 /* 3488 * Override the clrdmask configuration for the next frame 3489 * in case there is some future transmission, just to get 3490 * the ball rolling. 3491 * 3492 * This won't hurt things if the TID is about to be freed. 3493 */ 3494 tid->clrdmask = 1; 3495 3496 /* 3497 * Now that it's completed, grab the TID lock and update 3498 * the sequence number and BAW window. 3499 * Because sequence numbers have been assigned to frames 3500 * that haven't been sent yet, it's entirely possible 3501 * we'll be called with some pending frames that have not 3502 * been transmitted. 3503 * 3504 * The cleaner solution is to do the sequence number allocation 3505 * when the packet is first transmitted - and thus the "retries" 3506 * check above would be enough to update the BAW/seqno. 3507 */ 3508 3509 /* But don't do it for non-QoS TIDs */ 3510 if (tap) { 3511#if 0 3512 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3513 "%s: node %p: TID %d: sliding BAW left edge to %d\n", 3514 __func__, an, tid->tid, tap->txa_start); 3515#endif 3516 ni->ni_txseqs[tid->tid] = tap->txa_start; 3517 tid->baw_tail = tid->baw_head; 3518 } 3519} 3520 3521/* 3522 * Flush all software queued packets for the given node. 3523 * 3524 * This occurs when a completion handler frees the last buffer 3525 * for a node, and the node is thus freed. This causes the node 3526 * to be cleaned up, which ends up calling ath_tx_node_flush. 3527 */ 3528void 3529ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 3530{ 3531 int tid; 3532 ath_bufhead bf_cq; 3533 struct ath_buf *bf; 3534 3535 TAILQ_INIT(&bf_cq); 3536 3537 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 3538 &an->an_node); 3539
|
| 3540 ATH_TX_LOCK(sc);
|
3562 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 3563 struct ath_tid *atid = &an->an_tid[tid];
| 3541 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 3542 struct ath_tid *atid = &an->an_tid[tid];
|
3564 struct ath_txq *txq = sc->sc_ac2q[atid->ac];
| |
3565
| 3543
|
3566 ATH_TXQ_LOCK(txq);
| |
3567 /* Free packets */ 3568 ath_tx_tid_drain(sc, an, atid, &bf_cq); 3569 /* Remove this tid from the list of active tids */ 3570 ath_tx_tid_unsched(sc, atid);
| 3544 /* Free packets */ 3545 ath_tx_tid_drain(sc, an, atid, &bf_cq); 3546 /* Remove this tid from the list of active tids */ 3547 ath_tx_tid_unsched(sc, atid);
|
3571 ATH_TXQ_UNLOCK(txq);
| |
3572 }
| 3548 }
|
| 3549 ATH_TX_UNLOCK(sc);
|
3573 3574 /* Handle completed frames */ 3575 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3576 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3577 ath_tx_default_comp(sc, bf, 0); 3578 } 3579} 3580 3581/* 3582 * Drain all the software TXQs currently with traffic queued. 3583 */ 3584void 3585ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 3586{ 3587 struct ath_tid *tid; 3588 ath_bufhead bf_cq; 3589 struct ath_buf *bf; 3590 3591 TAILQ_INIT(&bf_cq);
| 3550 3551 /* Handle completed frames */ 3552 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3553 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3554 ath_tx_default_comp(sc, bf, 0); 3555 } 3556} 3557 3558/* 3559 * Drain all the software TXQs currently with traffic queued. 3560 */ 3561void 3562ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 3563{ 3564 struct ath_tid *tid; 3565 ath_bufhead bf_cq; 3566 struct ath_buf *bf; 3567 3568 TAILQ_INIT(&bf_cq);
|
3592 ATH_TXQ_LOCK(txq);
| 3569 ATH_TX_LOCK(sc);
|
3593 3594 /* 3595 * Iterate over all active tids for the given txq, 3596 * flushing and unsched'ing them 3597 */ 3598 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 3599 tid = TAILQ_FIRST(&txq->axq_tidq); 3600 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 3601 ath_tx_tid_unsched(sc, tid); 3602 } 3603
| 3570 3571 /* 3572 * Iterate over all active tids for the given txq, 3573 * flushing and unsched'ing them 3574 */ 3575 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 3576 tid = TAILQ_FIRST(&txq->axq_tidq); 3577 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 3578 ath_tx_tid_unsched(sc, tid); 3579 } 3580
|
3604 ATH_TXQ_UNLOCK(txq);
| 3581 ATH_TX_UNLOCK(sc);
|
3605 3606 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3607 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3608 ath_tx_default_comp(sc, bf, 0); 3609 } 3610} 3611 3612/* 3613 * Handle completion of non-aggregate session frames. 3614 * 3615 * This (currently) doesn't implement software retransmission of 3616 * non-aggregate frames! 3617 * 3618 * Software retransmission of non-aggregate frames needs to obey 3619 * the strict sequence number ordering, and drop any frames that 3620 * will fail this. 3621 * 3622 * For now, filtered frames and frame transmission will cause 3623 * all kinds of issues. So we don't support them. 3624 * 3625 * So anyone queuing frames via ath_tx_normal_xmit() or 3626 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 3627 */ 3628void 3629ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3630{ 3631 struct ieee80211_node *ni = bf->bf_node; 3632 struct ath_node *an = ATH_NODE(ni); 3633 int tid = bf->bf_state.bfs_tid; 3634 struct ath_tid *atid = &an->an_tid[tid]; 3635 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3636 3637 /* The TID state is protected behind the TXQ lock */
| 3582 3583 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3584 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3585 ath_tx_default_comp(sc, bf, 0); 3586 } 3587} 3588 3589/* 3590 * Handle completion of non-aggregate session frames. 3591 * 3592 * This (currently) doesn't implement software retransmission of 3593 * non-aggregate frames! 3594 * 3595 * Software retransmission of non-aggregate frames needs to obey 3596 * the strict sequence number ordering, and drop any frames that 3597 * will fail this. 3598 * 3599 * For now, filtered frames and frame transmission will cause 3600 * all kinds of issues. So we don't support them. 3601 * 3602 * So anyone queuing frames via ath_tx_normal_xmit() or 3603 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 3604 */ 3605void 3606ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3607{ 3608 struct ieee80211_node *ni = bf->bf_node; 3609 struct ath_node *an = ATH_NODE(ni); 3610 int tid = bf->bf_state.bfs_tid; 3611 struct ath_tid *atid = &an->an_tid[tid]; 3612 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3613 3614 /* The TID state is protected behind the TXQ lock */
|
3638 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 3615 ATH_TX_LOCK(sc);
|
3639 3640 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 3641 __func__, bf, fail, atid->hwq_depth - 1); 3642 3643 atid->hwq_depth--; 3644 3645#if 0 3646 /* 3647 * If the frame was filtered, stick it on the filter frame 3648 * queue and complain about it. It shouldn't happen! 3649 */ 3650 if ((ts->ts_status & HAL_TXERR_FILT) || 3651 (ts->ts_status != 0 && atid->isfiltered)) { 3652 device_printf(sc->sc_dev, 3653 "%s: isfiltered=%d, ts_status=%d: huh?\n", 3654 __func__, 3655 atid->isfiltered, 3656 ts->ts_status); 3657 ath_tx_tid_filt_comp_buf(sc, atid, bf); 3658 } 3659#endif 3660 if (atid->isfiltered) 3661 device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); 3662 if (atid->hwq_depth < 0) 3663 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 3664 __func__, atid->hwq_depth); 3665 3666 /* 3667 * If the queue is filtered, potentially mark it as complete 3668 * and reschedule it as needed. 3669 * 3670 * This is required as there may be a subsequent TX descriptor 3671 * for this end-node that has CLRDMASK set, so it's quite possible 3672 * that a filtered frame will be followed by a non-filtered 3673 * (complete or otherwise) frame. 3674 * 3675 * XXX should we do this before we complete the frame? 3676 */ 3677 if (atid->isfiltered) 3678 ath_tx_tid_filt_comp_complete(sc, atid);
| 3616 3617 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 3618 __func__, bf, fail, atid->hwq_depth - 1); 3619 3620 atid->hwq_depth--; 3621 3622#if 0 3623 /* 3624 * If the frame was filtered, stick it on the filter frame 3625 * queue and complain about it. It shouldn't happen! 3626 */ 3627 if ((ts->ts_status & HAL_TXERR_FILT) || 3628 (ts->ts_status != 0 && atid->isfiltered)) { 3629 device_printf(sc->sc_dev, 3630 "%s: isfiltered=%d, ts_status=%d: huh?\n", 3631 __func__, 3632 atid->isfiltered, 3633 ts->ts_status); 3634 ath_tx_tid_filt_comp_buf(sc, atid, bf); 3635 } 3636#endif 3637 if (atid->isfiltered) 3638 device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); 3639 if (atid->hwq_depth < 0) 3640 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 3641 __func__, atid->hwq_depth); 3642 3643 /* 3644 * If the queue is filtered, potentially mark it as complete 3645 * and reschedule it as needed. 3646 * 3647 * This is required as there may be a subsequent TX descriptor 3648 * for this end-node that has CLRDMASK set, so it's quite possible 3649 * that a filtered frame will be followed by a non-filtered 3650 * (complete or otherwise) frame. 3651 * 3652 * XXX should we do this before we complete the frame? 3653 */ 3654 if (atid->isfiltered) 3655 ath_tx_tid_filt_comp_complete(sc, atid);
|
3679 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 3656 ATH_TX_UNLOCK(sc);
|
3680 3681 /* 3682 * punt to rate control if we're not being cleaned up 3683 * during a hw queue drain and the frame wanted an ACK. 3684 */ 3685 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 3686 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 3687 ts, bf->bf_state.bfs_pktlen, 3688 1, (ts->ts_status == 0) ? 0 : 1); 3689 3690 ath_tx_default_comp(sc, bf, fail); 3691} 3692 3693/* 3694 * Handle cleanup of aggregate session packets that aren't 3695 * an A-MPDU. 3696 * 3697 * There's no need to update the BAW here - the session is being 3698 * torn down. 3699 */ 3700static void 3701ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3702{ 3703 struct ieee80211_node *ni = bf->bf_node; 3704 struct ath_node *an = ATH_NODE(ni); 3705 int tid = bf->bf_state.bfs_tid; 3706 struct ath_tid *atid = &an->an_tid[tid]; 3707 3708 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 3709 __func__, tid, atid->incomp); 3710
| 3657 3658 /* 3659 * punt to rate control if we're not being cleaned up 3660 * during a hw queue drain and the frame wanted an ACK. 3661 */ 3662 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 3663 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 3664 ts, bf->bf_state.bfs_pktlen, 3665 1, (ts->ts_status == 0) ? 0 : 1); 3666 3667 ath_tx_default_comp(sc, bf, fail); 3668} 3669 3670/* 3671 * Handle cleanup of aggregate session packets that aren't 3672 * an A-MPDU. 3673 * 3674 * There's no need to update the BAW here - the session is being 3675 * torn down. 3676 */ 3677static void 3678ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3679{ 3680 struct ieee80211_node *ni = bf->bf_node; 3681 struct ath_node *an = ATH_NODE(ni); 3682 int tid = bf->bf_state.bfs_tid; 3683 struct ath_tid *atid = &an->an_tid[tid]; 3684 3685 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 3686 __func__, tid, atid->incomp); 3687
|
3711 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 3688 ATH_TX_LOCK(sc);
|
3712 atid->incomp--; 3713 if (atid->incomp == 0) { 3714 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3715 "%s: TID %d: cleaned up! resume!\n", 3716 __func__, tid); 3717 atid->cleanup_inprogress = 0; 3718 ath_tx_tid_resume(sc, atid); 3719 }
| 3689 atid->incomp--; 3690 if (atid->incomp == 0) { 3691 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3692 "%s: TID %d: cleaned up! resume!\n", 3693 __func__, tid); 3694 atid->cleanup_inprogress = 0; 3695 ath_tx_tid_resume(sc, atid); 3696 }
|
3720 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 3697 ATH_TX_UNLOCK(sc);
|
3721 3722 ath_tx_default_comp(sc, bf, 0); 3723} 3724 3725/* 3726 * Performs transmit side cleanup when TID changes from aggregated to 3727 * unaggregated. 3728 * 3729 * - Discard all retry frames from the s/w queue. 3730 * - Fix the tx completion function for all buffers in s/w queue. 3731 * - Count the number of unacked frames, and let transmit completion 3732 * handle it later. 3733 * 3734 * The caller is responsible for pausing the TID. 3735 */ 3736static void 3737ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid) 3738{ 3739 struct ath_tid *atid = &an->an_tid[tid]; 3740 struct ieee80211_tx_ampdu *tap; 3741 struct ath_buf *bf, *bf_next; 3742 ath_bufhead bf_cq; 3743 3744 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 3745 "%s: TID %d: called\n", __func__, tid); 3746 3747 TAILQ_INIT(&bf_cq);
| 3698 3699 ath_tx_default_comp(sc, bf, 0); 3700} 3701 3702/* 3703 * Performs transmit side cleanup when TID changes from aggregated to 3704 * unaggregated. 3705 * 3706 * - Discard all retry frames from the s/w queue. 3707 * - Fix the tx completion function for all buffers in s/w queue. 3708 * - Count the number of unacked frames, and let transmit completion 3709 * handle it later. 3710 * 3711 * The caller is responsible for pausing the TID. 3712 */ 3713static void 3714ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid) 3715{ 3716 struct ath_tid *atid = &an->an_tid[tid]; 3717 struct ieee80211_tx_ampdu *tap; 3718 struct ath_buf *bf, *bf_next; 3719 ath_bufhead bf_cq; 3720 3721 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 3722 "%s: TID %d: called\n", __func__, tid); 3723 3724 TAILQ_INIT(&bf_cq);
|
3748 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 3725 ATH_TX_LOCK(sc);
|
3749 3750 /* 3751 * Move the filtered frames to the TX queue, before 3752 * we run off and discard/process things. 3753 */ 3754 /* XXX this is really quite inefficient */ 3755 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 3756 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 3757 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3758 } 3759 3760 /* 3761 * Update the frames in the software TX queue: 3762 * 3763 * + Discard retry frames in the queue 3764 * + Fix the completion function to be non-aggregate 3765 */ 3766 bf = ATH_TID_FIRST(atid); 3767 while (bf) { 3768 if (bf->bf_state.bfs_isretried) { 3769 bf_next = TAILQ_NEXT(bf, bf_list); 3770 ATH_TID_REMOVE(atid, bf, bf_list); 3771 atid->axq_depth--; 3772 if (bf->bf_state.bfs_dobaw) { 3773 ath_tx_update_baw(sc, an, atid, bf); 3774 if (! bf->bf_state.bfs_addedbaw) 3775 device_printf(sc->sc_dev, 3776 "%s: wasn't added: seqno %d\n", 3777 __func__, 3778 SEQNO(bf->bf_state.bfs_seqno)); 3779 } 3780 bf->bf_state.bfs_dobaw = 0; 3781 /* 3782 * Call the default completion handler with "fail" just 3783 * so upper levels are suitably notified about this. 3784 */ 3785 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 3786 bf = bf_next; 3787 continue; 3788 } 3789 /* Give these the default completion handler */ 3790 bf->bf_comp = ath_tx_normal_comp; 3791 bf = TAILQ_NEXT(bf, bf_list); 3792 } 3793 3794 /* The caller is required to pause the TID */ 3795#if 0 3796 /* Pause the TID */ 3797 ath_tx_tid_pause(sc, atid); 3798#endif 3799 3800 /* 3801 * Calculate what hardware-queued frames exist based 3802 * on the current BAW size. Ie, what frames have been 3803 * added to the TX hardware queue for this TID but 3804 * not yet ACKed. 3805 */ 3806 tap = ath_tx_get_tx_tid(an, tid); 3807 /* Need the lock - fiddling with BAW */ 3808 while (atid->baw_head != atid->baw_tail) { 3809 if (atid->tx_buf[atid->baw_head]) { 3810 atid->incomp++; 3811 atid->cleanup_inprogress = 1; 3812 atid->tx_buf[atid->baw_head] = NULL; 3813 } 3814 INCR(atid->baw_head, ATH_TID_MAX_BUFS); 3815 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 3816 } 3817 3818 /* 3819 * If cleanup is required, defer TID scheduling 3820 * until all the HW queued packets have been 3821 * sent. 3822 */ 3823 if (! atid->cleanup_inprogress) 3824 ath_tx_tid_resume(sc, atid); 3825 3826 if (atid->cleanup_inprogress) 3827 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3828 "%s: TID %d: cleanup needed: %d packets\n", 3829 __func__, tid, atid->incomp);
| 3726 3727 /* 3728 * Move the filtered frames to the TX queue, before 3729 * we run off and discard/process things. 3730 */ 3731 /* XXX this is really quite inefficient */ 3732 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 3733 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 3734 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3735 } 3736 3737 /* 3738 * Update the frames in the software TX queue: 3739 * 3740 * + Discard retry frames in the queue 3741 * + Fix the completion function to be non-aggregate 3742 */ 3743 bf = ATH_TID_FIRST(atid); 3744 while (bf) { 3745 if (bf->bf_state.bfs_isretried) { 3746 bf_next = TAILQ_NEXT(bf, bf_list); 3747 ATH_TID_REMOVE(atid, bf, bf_list); 3748 atid->axq_depth--; 3749 if (bf->bf_state.bfs_dobaw) { 3750 ath_tx_update_baw(sc, an, atid, bf); 3751 if (! bf->bf_state.bfs_addedbaw) 3752 device_printf(sc->sc_dev, 3753 "%s: wasn't added: seqno %d\n", 3754 __func__, 3755 SEQNO(bf->bf_state.bfs_seqno)); 3756 } 3757 bf->bf_state.bfs_dobaw = 0; 3758 /* 3759 * Call the default completion handler with "fail" just 3760 * so upper levels are suitably notified about this. 3761 */ 3762 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 3763 bf = bf_next; 3764 continue; 3765 } 3766 /* Give these the default completion handler */ 3767 bf->bf_comp = ath_tx_normal_comp; 3768 bf = TAILQ_NEXT(bf, bf_list); 3769 } 3770 3771 /* The caller is required to pause the TID */ 3772#if 0 3773 /* Pause the TID */ 3774 ath_tx_tid_pause(sc, atid); 3775#endif 3776 3777 /* 3778 * Calculate what hardware-queued frames exist based 3779 * on the current BAW size. Ie, what frames have been 3780 * added to the TX hardware queue for this TID but 3781 * not yet ACKed. 3782 */ 3783 tap = ath_tx_get_tx_tid(an, tid); 3784 /* Need the lock - fiddling with BAW */ 3785 while (atid->baw_head != atid->baw_tail) { 3786 if (atid->tx_buf[atid->baw_head]) { 3787 atid->incomp++; 3788 atid->cleanup_inprogress = 1; 3789 atid->tx_buf[atid->baw_head] = NULL; 3790 } 3791 INCR(atid->baw_head, ATH_TID_MAX_BUFS); 3792 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 3793 } 3794 3795 /* 3796 * If cleanup is required, defer TID scheduling 3797 * until all the HW queued packets have been 3798 * sent. 3799 */ 3800 if (! atid->cleanup_inprogress) 3801 ath_tx_tid_resume(sc, atid); 3802 3803 if (atid->cleanup_inprogress) 3804 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3805 "%s: TID %d: cleanup needed: %d packets\n", 3806 __func__, tid, atid->incomp);
|
3830 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 3807 ATH_TX_UNLOCK(sc);
|
3831 3832 /* Handle completing frames and fail them */ 3833 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3834 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3835 ath_tx_default_comp(sc, bf, 1); 3836 } 3837} 3838 3839static struct ath_buf * 3840ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 3841 struct ath_tid *tid, struct ath_buf *bf) 3842{ 3843 struct ath_buf *nbf; 3844 int error; 3845 3846 nbf = ath_buf_clone(sc, bf); 3847 3848#if 0 3849 device_printf(sc->sc_dev, "%s: ATH_BUF_BUSY; cloning\n", 3850 __func__); 3851#endif 3852 3853 if (nbf == NULL) { 3854 /* Failed to clone */ 3855 device_printf(sc->sc_dev, 3856 "%s: failed to clone a busy buffer\n", 3857 __func__); 3858 return NULL; 3859 } 3860 3861 /* Setup the dma for the new buffer */ 3862 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 3863 if (error != 0) { 3864 device_printf(sc->sc_dev, 3865 "%s: failed to setup dma for clone\n", 3866 __func__); 3867 /* 3868 * Put this at the head of the list, not tail; 3869 * that way it doesn't interfere with the 3870 * busy buffer logic (which uses the tail of 3871 * the list.) 3872 */ 3873 ATH_TXBUF_LOCK(sc); 3874 ath_returnbuf_head(sc, nbf); 3875 ATH_TXBUF_UNLOCK(sc); 3876 return NULL; 3877 } 3878 3879 /* Update BAW if required, before we free the original buf */ 3880 if (bf->bf_state.bfs_dobaw) 3881 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 3882 3883 /* Free current buffer; return the older buffer */ 3884 bf->bf_m = NULL; 3885 bf->bf_node = NULL; 3886 ath_freebuf(sc, bf); 3887 3888 return nbf; 3889} 3890 3891/* 3892 * Handle retrying an unaggregate frame in an aggregate 3893 * session. 3894 * 3895 * If too many retries occur, pause the TID, wait for 3896 * any further retransmits (as there's no reason why 3897 * non-aggregate frames in an aggregate session are 3898 * transmitted in-order; they just have to be in-BAW) 3899 * and then queue a BAR. 3900 */ 3901static void 3902ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3903{ 3904 struct ieee80211_node *ni = bf->bf_node; 3905 struct ath_node *an = ATH_NODE(ni); 3906 int tid = bf->bf_state.bfs_tid; 3907 struct ath_tid *atid = &an->an_tid[tid]; 3908 struct ieee80211_tx_ampdu *tap; 3909
| 3808 3809 /* Handle completing frames and fail them */ 3810 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3811 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3812 ath_tx_default_comp(sc, bf, 1); 3813 } 3814} 3815 3816static struct ath_buf * 3817ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 3818 struct ath_tid *tid, struct ath_buf *bf) 3819{ 3820 struct ath_buf *nbf; 3821 int error; 3822 3823 nbf = ath_buf_clone(sc, bf); 3824 3825#if 0 3826 device_printf(sc->sc_dev, "%s: ATH_BUF_BUSY; cloning\n", 3827 __func__); 3828#endif 3829 3830 if (nbf == NULL) { 3831 /* Failed to clone */ 3832 device_printf(sc->sc_dev, 3833 "%s: failed to clone a busy buffer\n", 3834 __func__); 3835 return NULL; 3836 } 3837 3838 /* Setup the dma for the new buffer */ 3839 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 3840 if (error != 0) { 3841 device_printf(sc->sc_dev, 3842 "%s: failed to setup dma for clone\n", 3843 __func__); 3844 /* 3845 * Put this at the head of the list, not tail; 3846 * that way it doesn't interfere with the 3847 * busy buffer logic (which uses the tail of 3848 * the list.) 3849 */ 3850 ATH_TXBUF_LOCK(sc); 3851 ath_returnbuf_head(sc, nbf); 3852 ATH_TXBUF_UNLOCK(sc); 3853 return NULL; 3854 } 3855 3856 /* Update BAW if required, before we free the original buf */ 3857 if (bf->bf_state.bfs_dobaw) 3858 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 3859 3860 /* Free current buffer; return the older buffer */ 3861 bf->bf_m = NULL; 3862 bf->bf_node = NULL; 3863 ath_freebuf(sc, bf); 3864 3865 return nbf; 3866} 3867 3868/* 3869 * Handle retrying an unaggregate frame in an aggregate 3870 * session. 3871 * 3872 * If too many retries occur, pause the TID, wait for 3873 * any further retransmits (as there's no reason why 3874 * non-aggregate frames in an aggregate session are 3875 * transmitted in-order; they just have to be in-BAW) 3876 * and then queue a BAR. 3877 */ 3878static void 3879ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3880{ 3881 struct ieee80211_node *ni = bf->bf_node; 3882 struct ath_node *an = ATH_NODE(ni); 3883 int tid = bf->bf_state.bfs_tid; 3884 struct ath_tid *atid = &an->an_tid[tid]; 3885 struct ieee80211_tx_ampdu *tap; 3886
|
3910 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 3887 ATH_TX_LOCK(sc);
|
3911 3912 tap = ath_tx_get_tx_tid(an, tid); 3913 3914 /* 3915 * If the buffer is marked as busy, we can't directly 3916 * reuse it. Instead, try to clone the buffer. 3917 * If the clone is successful, recycle the old buffer. 3918 * If the clone is unsuccessful, set bfs_retries to max 3919 * to force the next bit of code to free the buffer 3920 * for us. 3921 */ 3922 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 3923 (bf->bf_flags & ATH_BUF_BUSY)) { 3924 struct ath_buf *nbf; 3925 nbf = ath_tx_retry_clone(sc, an, atid, bf); 3926 if (nbf) 3927 /* bf has been freed at this point */ 3928 bf = nbf; 3929 else 3930 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 3931 } 3932 3933 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 3934 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 3935 "%s: exceeded retries; seqno %d\n", 3936 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3937 sc->sc_stats.ast_tx_swretrymax++; 3938 3939 /* Update BAW anyway */ 3940 if (bf->bf_state.bfs_dobaw) { 3941 ath_tx_update_baw(sc, an, atid, bf); 3942 if (! bf->bf_state.bfs_addedbaw) 3943 device_printf(sc->sc_dev, 3944 "%s: wasn't added: seqno %d\n", 3945 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3946 } 3947 bf->bf_state.bfs_dobaw = 0; 3948 3949 /* Suspend the TX queue and get ready to send the BAR */ 3950 ath_tx_tid_bar_suspend(sc, atid); 3951 3952 /* Send the BAR if there are no other frames waiting */ 3953 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3954 ath_tx_tid_bar_tx(sc, atid); 3955
| 3888 3889 tap = ath_tx_get_tx_tid(an, tid); 3890 3891 /* 3892 * If the buffer is marked as busy, we can't directly 3893 * reuse it. Instead, try to clone the buffer. 3894 * If the clone is successful, recycle the old buffer. 3895 * If the clone is unsuccessful, set bfs_retries to max 3896 * to force the next bit of code to free the buffer 3897 * for us. 3898 */ 3899 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 3900 (bf->bf_flags & ATH_BUF_BUSY)) { 3901 struct ath_buf *nbf; 3902 nbf = ath_tx_retry_clone(sc, an, atid, bf); 3903 if (nbf) 3904 /* bf has been freed at this point */ 3905 bf = nbf; 3906 else 3907 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 3908 } 3909 3910 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 3911 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 3912 "%s: exceeded retries; seqno %d\n", 3913 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3914 sc->sc_stats.ast_tx_swretrymax++; 3915 3916 /* Update BAW anyway */ 3917 if (bf->bf_state.bfs_dobaw) { 3918 ath_tx_update_baw(sc, an, atid, bf); 3919 if (! bf->bf_state.bfs_addedbaw) 3920 device_printf(sc->sc_dev, 3921 "%s: wasn't added: seqno %d\n", 3922 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3923 } 3924 bf->bf_state.bfs_dobaw = 0; 3925 3926 /* Suspend the TX queue and get ready to send the BAR */ 3927 ath_tx_tid_bar_suspend(sc, atid); 3928 3929 /* Send the BAR if there are no other frames waiting */ 3930 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3931 ath_tx_tid_bar_tx(sc, atid); 3932
|
3956 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 3933 ATH_TX_UNLOCK(sc);
|
3957 3958 /* Free buffer, bf is free after this call */ 3959 ath_tx_default_comp(sc, bf, 0); 3960 return; 3961 } 3962 3963 /* 3964 * This increments the retry counter as well as 3965 * sets the retry flag in the ath_buf and packet 3966 * body. 3967 */ 3968 ath_tx_set_retry(sc, bf); 3969 sc->sc_stats.ast_tx_swretries++; 3970 3971 /* 3972 * Insert this at the head of the queue, so it's 3973 * retried before any current/subsequent frames. 3974 */ 3975 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3976 ath_tx_tid_sched(sc, atid); 3977 /* Send the BAR if there are no other frames waiting */ 3978 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3979 ath_tx_tid_bar_tx(sc, atid); 3980
| 3934 3935 /* Free buffer, bf is free after this call */ 3936 ath_tx_default_comp(sc, bf, 0); 3937 return; 3938 } 3939 3940 /* 3941 * This increments the retry counter as well as 3942 * sets the retry flag in the ath_buf and packet 3943 * body. 3944 */ 3945 ath_tx_set_retry(sc, bf); 3946 sc->sc_stats.ast_tx_swretries++; 3947 3948 /* 3949 * Insert this at the head of the queue, so it's 3950 * retried before any current/subsequent frames. 3951 */ 3952 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3953 ath_tx_tid_sched(sc, atid); 3954 /* Send the BAR if there are no other frames waiting */ 3955 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3956 ath_tx_tid_bar_tx(sc, atid); 3957
|
3981 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 3958 ATH_TX_UNLOCK(sc);
|
3982} 3983 3984/* 3985 * Common code for aggregate excessive retry/subframe retry. 3986 * If retrying, queues buffers to bf_q. If not, frees the 3987 * buffers. 3988 * 3989 * XXX should unify this with ath_tx_aggr_retry_unaggr() 3990 */ 3991static int 3992ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 3993 ath_bufhead *bf_q) 3994{ 3995 struct ieee80211_node *ni = bf->bf_node; 3996 struct ath_node *an = ATH_NODE(ni); 3997 int tid = bf->bf_state.bfs_tid; 3998 struct ath_tid *atid = &an->an_tid[tid]; 3999
| 3959} 3960 3961/* 3962 * Common code for aggregate excessive retry/subframe retry. 3963 * If retrying, queues buffers to bf_q. If not, frees the 3964 * buffers. 3965 * 3966 * XXX should unify this with ath_tx_aggr_retry_unaggr() 3967 */ 3968static int 3969ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 3970 ath_bufhead *bf_q) 3971{ 3972 struct ieee80211_node *ni = bf->bf_node; 3973 struct ath_node *an = ATH_NODE(ni); 3974 int tid = bf->bf_state.bfs_tid; 3975 struct ath_tid *atid = &an->an_tid[tid]; 3976
|
4000 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
| 3977 ATH_TX_LOCK_ASSERT(sc);
|
4001 4002 /* XXX clr11naggr should be done for all subframes */ 4003 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4004 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4005 4006 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4007 4008 /* 4009 * If the buffer is marked as busy, we can't directly 4010 * reuse it. Instead, try to clone the buffer. 4011 * If the clone is successful, recycle the old buffer. 4012 * If the clone is unsuccessful, set bfs_retries to max 4013 * to force the next bit of code to free the buffer 4014 * for us. 4015 */ 4016 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4017 (bf->bf_flags & ATH_BUF_BUSY)) { 4018 struct ath_buf *nbf; 4019 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4020 if (nbf) 4021 /* bf has been freed at this point */ 4022 bf = nbf; 4023 else 4024 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4025 } 4026 4027 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4028 sc->sc_stats.ast_tx_swretrymax++; 4029 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4030 "%s: max retries: seqno %d\n", 4031 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4032 ath_tx_update_baw(sc, an, atid, bf); 4033 if (! bf->bf_state.bfs_addedbaw) 4034 device_printf(sc->sc_dev, 4035 "%s: wasn't added: seqno %d\n", 4036 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4037 bf->bf_state.bfs_dobaw = 0; 4038 return 1; 4039 } 4040 4041 ath_tx_set_retry(sc, bf); 4042 sc->sc_stats.ast_tx_swretries++; 4043 bf->bf_next = NULL; /* Just to make sure */ 4044 4045 /* Clear the aggregate state */ 4046 bf->bf_state.bfs_aggr = 0; 4047 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4048 bf->bf_state.bfs_nframes = 1; 4049 4050 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4051 return 0; 4052} 4053 4054/* 4055 * error pkt completion for an aggregate destination 4056 */ 4057static void 4058ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4059 struct ath_tid *tid) 4060{ 4061 struct ieee80211_node *ni = bf_first->bf_node; 4062 struct ath_node *an = ATH_NODE(ni); 4063 struct ath_buf *bf_next, *bf; 4064 ath_bufhead bf_q; 4065 int drops = 0; 4066 struct ieee80211_tx_ampdu *tap; 4067 ath_bufhead bf_cq; 4068 4069 TAILQ_INIT(&bf_q); 4070 TAILQ_INIT(&bf_cq); 4071 4072 /* 4073 * Update rate control - all frames have failed. 4074 * 4075 * XXX use the length in the first frame in the series; 4076 * XXX just so things are consistent for now. 4077 */ 4078 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4079 &bf_first->bf_status.ds_txstat, 4080 bf_first->bf_state.bfs_pktlen, 4081 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4082
| 3978 3979 /* XXX clr11naggr should be done for all subframes */ 3980 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 3981 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 3982 3983 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 3984 3985 /* 3986 * If the buffer is marked as busy, we can't directly 3987 * reuse it. Instead, try to clone the buffer. 3988 * If the clone is successful, recycle the old buffer. 3989 * If the clone is unsuccessful, set bfs_retries to max 3990 * to force the next bit of code to free the buffer 3991 * for us. 3992 */ 3993 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 3994 (bf->bf_flags & ATH_BUF_BUSY)) { 3995 struct ath_buf *nbf; 3996 nbf = ath_tx_retry_clone(sc, an, atid, bf); 3997 if (nbf) 3998 /* bf has been freed at this point */ 3999 bf = nbf; 4000 else 4001 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4002 } 4003 4004 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4005 sc->sc_stats.ast_tx_swretrymax++; 4006 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4007 "%s: max retries: seqno %d\n", 4008 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4009 ath_tx_update_baw(sc, an, atid, bf); 4010 if (! bf->bf_state.bfs_addedbaw) 4011 device_printf(sc->sc_dev, 4012 "%s: wasn't added: seqno %d\n", 4013 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4014 bf->bf_state.bfs_dobaw = 0; 4015 return 1; 4016 } 4017 4018 ath_tx_set_retry(sc, bf); 4019 sc->sc_stats.ast_tx_swretries++; 4020 bf->bf_next = NULL; /* Just to make sure */ 4021 4022 /* Clear the aggregate state */ 4023 bf->bf_state.bfs_aggr = 0; 4024 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4025 bf->bf_state.bfs_nframes = 1; 4026 4027 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4028 return 0; 4029} 4030 4031/* 4032 * error pkt completion for an aggregate destination 4033 */ 4034static void 4035ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4036 struct ath_tid *tid) 4037{ 4038 struct ieee80211_node *ni = bf_first->bf_node; 4039 struct ath_node *an = ATH_NODE(ni); 4040 struct ath_buf *bf_next, *bf; 4041 ath_bufhead bf_q; 4042 int drops = 0; 4043 struct ieee80211_tx_ampdu *tap; 4044 ath_bufhead bf_cq; 4045 4046 TAILQ_INIT(&bf_q); 4047 TAILQ_INIT(&bf_cq); 4048 4049 /* 4050 * Update rate control - all frames have failed. 4051 * 4052 * XXX use the length in the first frame in the series; 4053 * XXX just so things are consistent for now. 4054 */ 4055 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4056 &bf_first->bf_status.ds_txstat, 4057 bf_first->bf_state.bfs_pktlen, 4058 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4059
|
4083 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]);
| 4060 ATH_TX_LOCK(sc);
|
4084 tap = ath_tx_get_tx_tid(an, tid->tid); 4085 sc->sc_stats.ast_tx_aggr_failall++; 4086 4087 /* Retry all subframes */ 4088 bf = bf_first; 4089 while (bf) { 4090 bf_next = bf->bf_next; 4091 bf->bf_next = NULL; /* Remove it from the aggr list */ 4092 sc->sc_stats.ast_tx_aggr_fail++; 4093 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4094 drops++; 4095 bf->bf_next = NULL; 4096 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4097 } 4098 bf = bf_next; 4099 } 4100 4101 /* Prepend all frames to the beginning of the queue */ 4102 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4103 TAILQ_REMOVE(&bf_q, bf, bf_list); 4104 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4105 } 4106 4107 /* 4108 * Schedule the TID to be re-tried. 4109 */ 4110 ath_tx_tid_sched(sc, tid); 4111 4112 /* 4113 * send bar if we dropped any frames 4114 * 4115 * Keep the txq lock held for now, as we need to ensure 4116 * that ni_txseqs[] is consistent (as it's being updated 4117 * in the ifnet TX context or raw TX context.) 4118 */ 4119 if (drops) { 4120 /* Suspend the TX queue and get ready to send the BAR */ 4121 ath_tx_tid_bar_suspend(sc, tid); 4122 } 4123 4124 /* 4125 * Send BAR if required 4126 */ 4127 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4128 ath_tx_tid_bar_tx(sc, tid); 4129
| 4061 tap = ath_tx_get_tx_tid(an, tid->tid); 4062 sc->sc_stats.ast_tx_aggr_failall++; 4063 4064 /* Retry all subframes */ 4065 bf = bf_first; 4066 while (bf) { 4067 bf_next = bf->bf_next; 4068 bf->bf_next = NULL; /* Remove it from the aggr list */ 4069 sc->sc_stats.ast_tx_aggr_fail++; 4070 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4071 drops++; 4072 bf->bf_next = NULL; 4073 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4074 } 4075 bf = bf_next; 4076 } 4077 4078 /* Prepend all frames to the beginning of the queue */ 4079 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4080 TAILQ_REMOVE(&bf_q, bf, bf_list); 4081 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4082 } 4083 4084 /* 4085 * Schedule the TID to be re-tried. 4086 */ 4087 ath_tx_tid_sched(sc, tid); 4088 4089 /* 4090 * send bar if we dropped any frames 4091 * 4092 * Keep the txq lock held for now, as we need to ensure 4093 * that ni_txseqs[] is consistent (as it's being updated 4094 * in the ifnet TX context or raw TX context.) 4095 */ 4096 if (drops) { 4097 /* Suspend the TX queue and get ready to send the BAR */ 4098 ath_tx_tid_bar_suspend(sc, tid); 4099 } 4100 4101 /* 4102 * Send BAR if required 4103 */ 4104 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4105 ath_tx_tid_bar_tx(sc, tid); 4106
|
4130 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
| 4107 ATH_TX_UNLOCK(sc);
|
4131 4132 /* Complete frames which errored out */ 4133 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4134 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4135 ath_tx_default_comp(sc, bf, 0); 4136 } 4137} 4138 4139/* 4140 * Handle clean-up of packets from an aggregate list. 4141 * 4142 * There's no need to update the BAW here - the session is being 4143 * torn down. 4144 */ 4145static void 4146ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4147{ 4148 struct ath_buf *bf, *bf_next; 4149 struct ieee80211_node *ni = bf_first->bf_node; 4150 struct ath_node *an = ATH_NODE(ni); 4151 int tid = bf_first->bf_state.bfs_tid; 4152 struct ath_tid *atid = &an->an_tid[tid]; 4153 4154 bf = bf_first; 4155
| 4108 4109 /* Complete frames which errored out */ 4110 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4111 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4112 ath_tx_default_comp(sc, bf, 0); 4113 } 4114} 4115 4116/* 4117 * Handle clean-up of packets from an aggregate list. 4118 * 4119 * There's no need to update the BAW here - the session is being 4120 * torn down. 4121 */ 4122static void 4123ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4124{ 4125 struct ath_buf *bf, *bf_next; 4126 struct ieee80211_node *ni = bf_first->bf_node; 4127 struct ath_node *an = ATH_NODE(ni); 4128 int tid = bf_first->bf_state.bfs_tid; 4129 struct ath_tid *atid = &an->an_tid[tid]; 4130 4131 bf = bf_first; 4132
|
4156 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 4133 ATH_TX_LOCK(sc);
|
4157 4158 /* update incomp */ 4159 while (bf) { 4160 atid->incomp--; 4161 bf = bf->bf_next; 4162 } 4163 4164 if (atid->incomp == 0) { 4165 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4166 "%s: TID %d: cleaned up! resume!\n", 4167 __func__, tid); 4168 atid->cleanup_inprogress = 0; 4169 ath_tx_tid_resume(sc, atid); 4170 } 4171 4172 /* Send BAR if required */ 4173 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4174 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4175 ath_tx_tid_bar_tx(sc, atid); 4176
| 4134 4135 /* update incomp */ 4136 while (bf) { 4137 atid->incomp--; 4138 bf = bf->bf_next; 4139 } 4140 4141 if (atid->incomp == 0) { 4142 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4143 "%s: TID %d: cleaned up! resume!\n", 4144 __func__, tid); 4145 atid->cleanup_inprogress = 0; 4146 ath_tx_tid_resume(sc, atid); 4147 } 4148 4149 /* Send BAR if required */ 4150 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4151 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4152 ath_tx_tid_bar_tx(sc, atid); 4153
|
4177 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4154 ATH_TX_UNLOCK(sc);
|
4178 4179 /* Handle frame completion */ 4180 while (bf) { 4181 bf_next = bf->bf_next; 4182 ath_tx_default_comp(sc, bf, 1); 4183 bf = bf_next; 4184 } 4185} 4186 4187/* 4188 * Handle completion of an set of aggregate frames. 4189 * 4190 * XXX for now, simply complete each sub-frame. 4191 * 4192 * Note: the completion handler is the last descriptor in the aggregate, 4193 * not the last descriptor in the first frame. 4194 */ 4195static void 4196ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4197 int fail) 4198{ 4199 //struct ath_desc *ds = bf->bf_lastds; 4200 struct ieee80211_node *ni = bf_first->bf_node; 4201 struct ath_node *an = ATH_NODE(ni); 4202 int tid = bf_first->bf_state.bfs_tid; 4203 struct ath_tid *atid = &an->an_tid[tid]; 4204 struct ath_tx_status ts; 4205 struct ieee80211_tx_ampdu *tap; 4206 ath_bufhead bf_q; 4207 ath_bufhead bf_cq; 4208 int seq_st, tx_ok; 4209 int hasba, isaggr; 4210 uint32_t ba[2]; 4211 struct ath_buf *bf, *bf_next; 4212 int ba_index; 4213 int drops = 0; 4214 int nframes = 0, nbad = 0, nf; 4215 int pktlen; 4216 /* XXX there's too much on the stack? */ 4217 struct ath_rc_series rc[ATH_RC_NUM]; 4218 int txseq; 4219 4220 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4221 __func__, atid->hwq_depth); 4222 4223 /* 4224 * Take a copy; this may be needed -after- bf_first 4225 * has been completed and freed. 4226 */ 4227 ts = bf_first->bf_status.ds_txstat; 4228 4229 TAILQ_INIT(&bf_q); 4230 TAILQ_INIT(&bf_cq); 4231 4232 /* The TID state is kept behind the TXQ lock */
| 4155 4156 /* Handle frame completion */ 4157 while (bf) { 4158 bf_next = bf->bf_next; 4159 ath_tx_default_comp(sc, bf, 1); 4160 bf = bf_next; 4161 } 4162} 4163 4164/* 4165 * Handle completion of an set of aggregate frames. 4166 * 4167 * XXX for now, simply complete each sub-frame. 4168 * 4169 * Note: the completion handler is the last descriptor in the aggregate, 4170 * not the last descriptor in the first frame. 4171 */ 4172static void 4173ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4174 int fail) 4175{ 4176 //struct ath_desc *ds = bf->bf_lastds; 4177 struct ieee80211_node *ni = bf_first->bf_node; 4178 struct ath_node *an = ATH_NODE(ni); 4179 int tid = bf_first->bf_state.bfs_tid; 4180 struct ath_tid *atid = &an->an_tid[tid]; 4181 struct ath_tx_status ts; 4182 struct ieee80211_tx_ampdu *tap; 4183 ath_bufhead bf_q; 4184 ath_bufhead bf_cq; 4185 int seq_st, tx_ok; 4186 int hasba, isaggr; 4187 uint32_t ba[2]; 4188 struct ath_buf *bf, *bf_next; 4189 int ba_index; 4190 int drops = 0; 4191 int nframes = 0, nbad = 0, nf; 4192 int pktlen; 4193 /* XXX there's too much on the stack? */ 4194 struct ath_rc_series rc[ATH_RC_NUM]; 4195 int txseq; 4196 4197 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4198 __func__, atid->hwq_depth); 4199 4200 /* 4201 * Take a copy; this may be needed -after- bf_first 4202 * has been completed and freed. 4203 */ 4204 ts = bf_first->bf_status.ds_txstat; 4205 4206 TAILQ_INIT(&bf_q); 4207 TAILQ_INIT(&bf_cq); 4208 4209 /* The TID state is kept behind the TXQ lock */
|
4233 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 4210 ATH_TX_LOCK(sc);
|
4234 4235 atid->hwq_depth--; 4236 if (atid->hwq_depth < 0) 4237 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 4238 __func__, atid->hwq_depth); 4239 4240 /* 4241 * If the TID is filtered, handle completing the filter 4242 * transition before potentially kicking it to the cleanup 4243 * function. 4244 * 4245 * XXX this is duplicate work, ew. 4246 */ 4247 if (atid->isfiltered) 4248 ath_tx_tid_filt_comp_complete(sc, atid); 4249 4250 /* 4251 * Punt cleanup to the relevant function, not our problem now 4252 */ 4253 if (atid->cleanup_inprogress) { 4254 if (atid->isfiltered) 4255 device_printf(sc->sc_dev, 4256 "%s: isfiltered=1, normal_comp?\n", 4257 __func__);
| 4211 4212 atid->hwq_depth--; 4213 if (atid->hwq_depth < 0) 4214 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 4215 __func__, atid->hwq_depth); 4216 4217 /* 4218 * If the TID is filtered, handle completing the filter 4219 * transition before potentially kicking it to the cleanup 4220 * function. 4221 * 4222 * XXX this is duplicate work, ew. 4223 */ 4224 if (atid->isfiltered) 4225 ath_tx_tid_filt_comp_complete(sc, atid); 4226 4227 /* 4228 * Punt cleanup to the relevant function, not our problem now 4229 */ 4230 if (atid->cleanup_inprogress) { 4231 if (atid->isfiltered) 4232 device_printf(sc->sc_dev, 4233 "%s: isfiltered=1, normal_comp?\n", 4234 __func__);
|
4258 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4235 ATH_TX_UNLOCK(sc);
|
4259 ath_tx_comp_cleanup_aggr(sc, bf_first); 4260 return; 4261 } 4262 4263 /* 4264 * If the frame is filtered, transition to filtered frame 4265 * mode and add this to the filtered frame list. 4266 * 4267 * XXX TODO: figure out how this interoperates with 4268 * BAR, pause and cleanup states. 4269 */ 4270 if ((ts.ts_status & HAL_TXERR_FILT) || 4271 (ts.ts_status != 0 && atid->isfiltered)) { 4272 if (fail != 0) 4273 device_printf(sc->sc_dev, 4274 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4275 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4276 4277 /* Remove from BAW */ 4278 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4279 if (bf->bf_state.bfs_addedbaw) 4280 drops++; 4281 if (bf->bf_state.bfs_dobaw) { 4282 ath_tx_update_baw(sc, an, atid, bf); 4283 if (! bf->bf_state.bfs_addedbaw) 4284 device_printf(sc->sc_dev, 4285 "%s: wasn't added: seqno %d\n", 4286 __func__, 4287 SEQNO(bf->bf_state.bfs_seqno)); 4288 } 4289 bf->bf_state.bfs_dobaw = 0; 4290 } 4291 /* 4292 * If any intermediate frames in the BAW were dropped when 4293 * handling filtering things, send a BAR. 4294 */ 4295 if (drops) 4296 ath_tx_tid_bar_suspend(sc, atid); 4297 4298 /* 4299 * Finish up by sending a BAR if required and freeing 4300 * the frames outside of the TX lock. 4301 */ 4302 goto finish_send_bar; 4303 } 4304 4305 /* 4306 * XXX for now, use the first frame in the aggregate for 4307 * XXX rate control completion; it's at least consistent. 4308 */ 4309 pktlen = bf_first->bf_state.bfs_pktlen; 4310 4311 /* 4312 * Handle errors first! 4313 * 4314 * Here, handle _any_ error as a "exceeded retries" error. 4315 * Later on (when filtered frames are to be specially handled) 4316 * it'll have to be expanded. 4317 */ 4318#if 0 4319 if (ts.ts_status & HAL_TXERR_XRETRY) { 4320#endif 4321 if (ts.ts_status != 0) {
| 4236 ath_tx_comp_cleanup_aggr(sc, bf_first); 4237 return; 4238 } 4239 4240 /* 4241 * If the frame is filtered, transition to filtered frame 4242 * mode and add this to the filtered frame list. 4243 * 4244 * XXX TODO: figure out how this interoperates with 4245 * BAR, pause and cleanup states. 4246 */ 4247 if ((ts.ts_status & HAL_TXERR_FILT) || 4248 (ts.ts_status != 0 && atid->isfiltered)) { 4249 if (fail != 0) 4250 device_printf(sc->sc_dev, 4251 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4252 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4253 4254 /* Remove from BAW */ 4255 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4256 if (bf->bf_state.bfs_addedbaw) 4257 drops++; 4258 if (bf->bf_state.bfs_dobaw) { 4259 ath_tx_update_baw(sc, an, atid, bf); 4260 if (! bf->bf_state.bfs_addedbaw) 4261 device_printf(sc->sc_dev, 4262 "%s: wasn't added: seqno %d\n", 4263 __func__, 4264 SEQNO(bf->bf_state.bfs_seqno)); 4265 } 4266 bf->bf_state.bfs_dobaw = 0; 4267 } 4268 /* 4269 * If any intermediate frames in the BAW were dropped when 4270 * handling filtering things, send a BAR. 4271 */ 4272 if (drops) 4273 ath_tx_tid_bar_suspend(sc, atid); 4274 4275 /* 4276 * Finish up by sending a BAR if required and freeing 4277 * the frames outside of the TX lock. 4278 */ 4279 goto finish_send_bar; 4280 } 4281 4282 /* 4283 * XXX for now, use the first frame in the aggregate for 4284 * XXX rate control completion; it's at least consistent. 4285 */ 4286 pktlen = bf_first->bf_state.bfs_pktlen; 4287 4288 /* 4289 * Handle errors first! 4290 * 4291 * Here, handle _any_ error as a "exceeded retries" error. 4292 * Later on (when filtered frames are to be specially handled) 4293 * it'll have to be expanded. 4294 */ 4295#if 0 4296 if (ts.ts_status & HAL_TXERR_XRETRY) { 4297#endif 4298 if (ts.ts_status != 0) {
|
4322 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4299 ATH_TX_UNLOCK(sc);
|
4323 ath_tx_comp_aggr_error(sc, bf_first, atid); 4324 return; 4325 } 4326 4327 tap = ath_tx_get_tx_tid(an, tid); 4328 4329 /* 4330 * extract starting sequence and block-ack bitmap 4331 */ 4332 /* XXX endian-ness of seq_st, ba? */ 4333 seq_st = ts.ts_seqnum; 4334 hasba = !! (ts.ts_flags & HAL_TX_BA); 4335 tx_ok = (ts.ts_status == 0); 4336 isaggr = bf_first->bf_state.bfs_aggr; 4337 ba[0] = ts.ts_ba_low; 4338 ba[1] = ts.ts_ba_high; 4339 4340 /* 4341 * Copy the TX completion status and the rate control 4342 * series from the first descriptor, as it may be freed 4343 * before the rate control code can get its grubby fingers 4344 * into things. 4345 */ 4346 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4347 4348 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4349 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4350 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4351 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4352 isaggr, seq_st, hasba, ba[0], ba[1]); 4353 4354 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4355 if (tid != ts.ts_tid) { 4356 device_printf(sc->sc_dev, "%s: tid %d != hw tid %d\n", 4357 __func__, tid, ts.ts_tid); 4358 tx_ok = 0; 4359 } 4360 4361 /* AR5416 BA bug; this requires an interface reset */ 4362 if (isaggr && tx_ok && (! hasba)) { 4363 device_printf(sc->sc_dev, 4364 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4365 "seq_st=%d\n", 4366 __func__, hasba, tx_ok, isaggr, seq_st); 4367 /* XXX TODO: schedule an interface reset */ 4368#ifdef ATH_DEBUG 4369 ath_printtxbuf(sc, bf_first, 4370 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4371#endif 4372 } 4373 4374 /* 4375 * Walk the list of frames, figure out which ones were correctly 4376 * sent and which weren't. 4377 */ 4378 bf = bf_first; 4379 nf = bf_first->bf_state.bfs_nframes; 4380 4381 /* bf_first is going to be invalid once this list is walked */ 4382 bf_first = NULL; 4383 4384 /* 4385 * Walk the list of completed frames and determine 4386 * which need to be completed and which need to be 4387 * retransmitted. 4388 * 4389 * For completed frames, the completion functions need 4390 * to be called at the end of this function as the last 4391 * node reference may free the node. 4392 * 4393 * Finally, since the TXQ lock can't be held during the 4394 * completion callback (to avoid lock recursion), 4395 * the completion calls have to be done outside of the 4396 * lock. 4397 */ 4398 while (bf) { 4399 nframes++; 4400 ba_index = ATH_BA_INDEX(seq_st, 4401 SEQNO(bf->bf_state.bfs_seqno)); 4402 bf_next = bf->bf_next; 4403 bf->bf_next = NULL; /* Remove it from the aggr list */ 4404 4405 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4406 "%s: checking bf=%p seqno=%d; ack=%d\n", 4407 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4408 ATH_BA_ISSET(ba, ba_index)); 4409 4410 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4411 sc->sc_stats.ast_tx_aggr_ok++; 4412 ath_tx_update_baw(sc, an, atid, bf); 4413 bf->bf_state.bfs_dobaw = 0; 4414 if (! bf->bf_state.bfs_addedbaw) 4415 device_printf(sc->sc_dev, 4416 "%s: wasn't added: seqno %d\n", 4417 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4418 bf->bf_next = NULL; 4419 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4420 } else { 4421 sc->sc_stats.ast_tx_aggr_fail++; 4422 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4423 drops++; 4424 bf->bf_next = NULL; 4425 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4426 } 4427 nbad++; 4428 } 4429 bf = bf_next; 4430 } 4431 4432 /* 4433 * Now that the BAW updates have been done, unlock 4434 * 4435 * txseq is grabbed before the lock is released so we 4436 * have a consistent view of what -was- in the BAW. 4437 * Anything after this point will not yet have been 4438 * TXed. 4439 */ 4440 txseq = tap->txa_start;
| 4300 ath_tx_comp_aggr_error(sc, bf_first, atid); 4301 return; 4302 } 4303 4304 tap = ath_tx_get_tx_tid(an, tid); 4305 4306 /* 4307 * extract starting sequence and block-ack bitmap 4308 */ 4309 /* XXX endian-ness of seq_st, ba? */ 4310 seq_st = ts.ts_seqnum; 4311 hasba = !! (ts.ts_flags & HAL_TX_BA); 4312 tx_ok = (ts.ts_status == 0); 4313 isaggr = bf_first->bf_state.bfs_aggr; 4314 ba[0] = ts.ts_ba_low; 4315 ba[1] = ts.ts_ba_high; 4316 4317 /* 4318 * Copy the TX completion status and the rate control 4319 * series from the first descriptor, as it may be freed 4320 * before the rate control code can get its grubby fingers 4321 * into things. 4322 */ 4323 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4324 4325 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4326 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4327 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4328 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4329 isaggr, seq_st, hasba, ba[0], ba[1]); 4330 4331 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4332 if (tid != ts.ts_tid) { 4333 device_printf(sc->sc_dev, "%s: tid %d != hw tid %d\n", 4334 __func__, tid, ts.ts_tid); 4335 tx_ok = 0; 4336 } 4337 4338 /* AR5416 BA bug; this requires an interface reset */ 4339 if (isaggr && tx_ok && (! hasba)) { 4340 device_printf(sc->sc_dev, 4341 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4342 "seq_st=%d\n", 4343 __func__, hasba, tx_ok, isaggr, seq_st); 4344 /* XXX TODO: schedule an interface reset */ 4345#ifdef ATH_DEBUG 4346 ath_printtxbuf(sc, bf_first, 4347 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4348#endif 4349 } 4350 4351 /* 4352 * Walk the list of frames, figure out which ones were correctly 4353 * sent and which weren't. 4354 */ 4355 bf = bf_first; 4356 nf = bf_first->bf_state.bfs_nframes; 4357 4358 /* bf_first is going to be invalid once this list is walked */ 4359 bf_first = NULL; 4360 4361 /* 4362 * Walk the list of completed frames and determine 4363 * which need to be completed and which need to be 4364 * retransmitted. 4365 * 4366 * For completed frames, the completion functions need 4367 * to be called at the end of this function as the last 4368 * node reference may free the node. 4369 * 4370 * Finally, since the TXQ lock can't be held during the 4371 * completion callback (to avoid lock recursion), 4372 * the completion calls have to be done outside of the 4373 * lock. 4374 */ 4375 while (bf) { 4376 nframes++; 4377 ba_index = ATH_BA_INDEX(seq_st, 4378 SEQNO(bf->bf_state.bfs_seqno)); 4379 bf_next = bf->bf_next; 4380 bf->bf_next = NULL; /* Remove it from the aggr list */ 4381 4382 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4383 "%s: checking bf=%p seqno=%d; ack=%d\n", 4384 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4385 ATH_BA_ISSET(ba, ba_index)); 4386 4387 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4388 sc->sc_stats.ast_tx_aggr_ok++; 4389 ath_tx_update_baw(sc, an, atid, bf); 4390 bf->bf_state.bfs_dobaw = 0; 4391 if (! bf->bf_state.bfs_addedbaw) 4392 device_printf(sc->sc_dev, 4393 "%s: wasn't added: seqno %d\n", 4394 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4395 bf->bf_next = NULL; 4396 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4397 } else { 4398 sc->sc_stats.ast_tx_aggr_fail++; 4399 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4400 drops++; 4401 bf->bf_next = NULL; 4402 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4403 } 4404 nbad++; 4405 } 4406 bf = bf_next; 4407 } 4408 4409 /* 4410 * Now that the BAW updates have been done, unlock 4411 * 4412 * txseq is grabbed before the lock is released so we 4413 * have a consistent view of what -was- in the BAW. 4414 * Anything after this point will not yet have been 4415 * TXed. 4416 */ 4417 txseq = tap->txa_start;
|
4441 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4418 ATH_TX_UNLOCK(sc);
|
4442 4443 if (nframes != nf) 4444 device_printf(sc->sc_dev, 4445 "%s: num frames seen=%d; bf nframes=%d\n", 4446 __func__, nframes, nf); 4447 4448 /* 4449 * Now we know how many frames were bad, call the rate 4450 * control code. 4451 */ 4452 if (fail == 0) 4453 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 4454 nbad); 4455 4456 /* 4457 * send bar if we dropped any frames 4458 */ 4459 if (drops) { 4460 /* Suspend the TX queue and get ready to send the BAR */
| 4419 4420 if (nframes != nf) 4421 device_printf(sc->sc_dev, 4422 "%s: num frames seen=%d; bf nframes=%d\n", 4423 __func__, nframes, nf); 4424 4425 /* 4426 * Now we know how many frames were bad, call the rate 4427 * control code. 4428 */ 4429 if (fail == 0) 4430 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 4431 nbad); 4432 4433 /* 4434 * send bar if we dropped any frames 4435 */ 4436 if (drops) { 4437 /* Suspend the TX queue and get ready to send the BAR */
|
4461 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 4438 ATH_TX_LOCK(sc);
|
4462 ath_tx_tid_bar_suspend(sc, atid);
| 4439 ath_tx_tid_bar_suspend(sc, atid);
|
4463 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4440 ATH_TX_UNLOCK(sc);
|
4464 } 4465 4466 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4467 "%s: txa_start now %d\n", __func__, tap->txa_start); 4468
| 4441 } 4442 4443 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4444 "%s: txa_start now %d\n", __func__, tap->txa_start); 4445
|
4469 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 4446 ATH_TX_LOCK(sc);
|
4470 4471 /* Prepend all frames to the beginning of the queue */ 4472 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4473 TAILQ_REMOVE(&bf_q, bf, bf_list); 4474 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4475 } 4476 4477 /* 4478 * Reschedule to grab some further frames. 4479 */ 4480 ath_tx_tid_sched(sc, atid); 4481 4482 /* 4483 * If the queue is filtered, re-schedule as required. 4484 * 4485 * This is required as there may be a subsequent TX descriptor 4486 * for this end-node that has CLRDMASK set, so it's quite possible 4487 * that a filtered frame will be followed by a non-filtered 4488 * (complete or otherwise) frame. 4489 * 4490 * XXX should we do this before we complete the frame? 4491 */ 4492 if (atid->isfiltered) 4493 ath_tx_tid_filt_comp_complete(sc, atid); 4494 4495finish_send_bar: 4496 4497 /* 4498 * Send BAR if required 4499 */ 4500 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4501 ath_tx_tid_bar_tx(sc, atid); 4502
| 4447 4448 /* Prepend all frames to the beginning of the queue */ 4449 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4450 TAILQ_REMOVE(&bf_q, bf, bf_list); 4451 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4452 } 4453 4454 /* 4455 * Reschedule to grab some further frames. 4456 */ 4457 ath_tx_tid_sched(sc, atid); 4458 4459 /* 4460 * If the queue is filtered, re-schedule as required. 4461 * 4462 * This is required as there may be a subsequent TX descriptor 4463 * for this end-node that has CLRDMASK set, so it's quite possible 4464 * that a filtered frame will be followed by a non-filtered 4465 * (complete or otherwise) frame. 4466 * 4467 * XXX should we do this before we complete the frame? 4468 */ 4469 if (atid->isfiltered) 4470 ath_tx_tid_filt_comp_complete(sc, atid); 4471 4472finish_send_bar: 4473 4474 /* 4475 * Send BAR if required 4476 */ 4477 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4478 ath_tx_tid_bar_tx(sc, atid); 4479
|
4503 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4480 ATH_TX_UNLOCK(sc);
|
4504 4505 /* Do deferred completion */ 4506 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4507 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4508 ath_tx_default_comp(sc, bf, 0); 4509 } 4510} 4511 4512/* 4513 * Handle completion of unaggregated frames in an ADDBA 4514 * session. 4515 * 4516 * Fail is set to 1 if the entry is being freed via a call to 4517 * ath_tx_draintxq(). 4518 */ 4519static void 4520ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 4521{ 4522 struct ieee80211_node *ni = bf->bf_node; 4523 struct ath_node *an = ATH_NODE(ni); 4524 int tid = bf->bf_state.bfs_tid; 4525 struct ath_tid *atid = &an->an_tid[tid]; 4526 struct ath_tx_status ts; 4527 int drops = 0; 4528 4529 /* 4530 * Take a copy of this; filtering/cloning the frame may free the 4531 * bf pointer. 4532 */ 4533 ts = bf->bf_status.ds_txstat; 4534 4535 /* 4536 * Update rate control status here, before we possibly 4537 * punt to retry or cleanup. 4538 * 4539 * Do it outside of the TXQ lock. 4540 */ 4541 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4542 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4543 &bf->bf_status.ds_txstat, 4544 bf->bf_state.bfs_pktlen, 4545 1, (ts.ts_status == 0) ? 0 : 1); 4546 4547 /* 4548 * This is called early so atid->hwq_depth can be tracked. 4549 * This unfortunately means that it's released and regrabbed 4550 * during retry and cleanup. That's rather inefficient. 4551 */
| 4481 4482 /* Do deferred completion */ 4483 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4484 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4485 ath_tx_default_comp(sc, bf, 0); 4486 } 4487} 4488 4489/* 4490 * Handle completion of unaggregated frames in an ADDBA 4491 * session. 4492 * 4493 * Fail is set to 1 if the entry is being freed via a call to 4494 * ath_tx_draintxq(). 4495 */ 4496static void 4497ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 4498{ 4499 struct ieee80211_node *ni = bf->bf_node; 4500 struct ath_node *an = ATH_NODE(ni); 4501 int tid = bf->bf_state.bfs_tid; 4502 struct ath_tid *atid = &an->an_tid[tid]; 4503 struct ath_tx_status ts; 4504 int drops = 0; 4505 4506 /* 4507 * Take a copy of this; filtering/cloning the frame may free the 4508 * bf pointer. 4509 */ 4510 ts = bf->bf_status.ds_txstat; 4511 4512 /* 4513 * Update rate control status here, before we possibly 4514 * punt to retry or cleanup. 4515 * 4516 * Do it outside of the TXQ lock. 4517 */ 4518 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4519 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4520 &bf->bf_status.ds_txstat, 4521 bf->bf_state.bfs_pktlen, 4522 1, (ts.ts_status == 0) ? 0 : 1); 4523 4524 /* 4525 * This is called early so atid->hwq_depth can be tracked. 4526 * This unfortunately means that it's released and regrabbed 4527 * during retry and cleanup. That's rather inefficient. 4528 */
|
4552 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 4529 ATH_TX_LOCK(sc);
|
4553 4554 if (tid == IEEE80211_NONQOS_TID) 4555 device_printf(sc->sc_dev, "%s: TID=16!\n", __func__); 4556 4557 DPRINTF(sc, ATH_DEBUG_SW_TX, 4558 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 4559 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 4560 SEQNO(bf->bf_state.bfs_seqno)); 4561 4562 atid->hwq_depth--; 4563 if (atid->hwq_depth < 0) 4564 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 4565 __func__, atid->hwq_depth); 4566 4567 /* 4568 * If the TID is filtered, handle completing the filter 4569 * transition before potentially kicking it to the cleanup 4570 * function. 4571 */ 4572 if (atid->isfiltered) 4573 ath_tx_tid_filt_comp_complete(sc, atid); 4574 4575 /* 4576 * If a cleanup is in progress, punt to comp_cleanup; 4577 * rather than handling it here. It's thus their 4578 * responsibility to clean up, call the completion 4579 * function in net80211, etc. 4580 */ 4581 if (atid->cleanup_inprogress) { 4582 if (atid->isfiltered) 4583 device_printf(sc->sc_dev, 4584 "%s: isfiltered=1, normal_comp?\n", 4585 __func__);
| 4530 4531 if (tid == IEEE80211_NONQOS_TID) 4532 device_printf(sc->sc_dev, "%s: TID=16!\n", __func__); 4533 4534 DPRINTF(sc, ATH_DEBUG_SW_TX, 4535 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 4536 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 4537 SEQNO(bf->bf_state.bfs_seqno)); 4538 4539 atid->hwq_depth--; 4540 if (atid->hwq_depth < 0) 4541 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 4542 __func__, atid->hwq_depth); 4543 4544 /* 4545 * If the TID is filtered, handle completing the filter 4546 * transition before potentially kicking it to the cleanup 4547 * function. 4548 */ 4549 if (atid->isfiltered) 4550 ath_tx_tid_filt_comp_complete(sc, atid); 4551 4552 /* 4553 * If a cleanup is in progress, punt to comp_cleanup; 4554 * rather than handling it here. It's thus their 4555 * responsibility to clean up, call the completion 4556 * function in net80211, etc. 4557 */ 4558 if (atid->cleanup_inprogress) { 4559 if (atid->isfiltered) 4560 device_printf(sc->sc_dev, 4561 "%s: isfiltered=1, normal_comp?\n", 4562 __func__);
|
4586 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4563 ATH_TX_UNLOCK(sc);
|
4587 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 4588 __func__); 4589 ath_tx_comp_cleanup_unaggr(sc, bf); 4590 return; 4591 } 4592 4593 /* 4594 * XXX TODO: how does cleanup, BAR and filtered frame handling 4595 * overlap? 4596 * 4597 * If the frame is filtered OR if it's any failure but 4598 * the TID is filtered, the frame must be added to the 4599 * filtered frame list. 4600 * 4601 * However - a busy buffer can't be added to the filtered 4602 * list as it will end up being recycled without having 4603 * been made available for the hardware. 4604 */ 4605 if ((ts.ts_status & HAL_TXERR_FILT) || 4606 (ts.ts_status != 0 && atid->isfiltered)) { 4607 int freeframe; 4608 4609 if (fail != 0) 4610 device_printf(sc->sc_dev, 4611 "%s: isfiltered=1, fail=%d\n", 4612 __func__, 4613 fail); 4614 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 4615 if (freeframe) { 4616 /* Remove from BAW */ 4617 if (bf->bf_state.bfs_addedbaw) 4618 drops++; 4619 if (bf->bf_state.bfs_dobaw) { 4620 ath_tx_update_baw(sc, an, atid, bf); 4621 if (! bf->bf_state.bfs_addedbaw) 4622 device_printf(sc->sc_dev, 4623 "%s: wasn't added: seqno %d\n", 4624 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4625 } 4626 bf->bf_state.bfs_dobaw = 0; 4627 } 4628 4629 /* 4630 * If the frame couldn't be filtered, treat it as a drop and 4631 * prepare to send a BAR. 4632 */ 4633 if (freeframe && drops) 4634 ath_tx_tid_bar_suspend(sc, atid); 4635 4636 /* 4637 * Send BAR if required 4638 */ 4639 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4640 ath_tx_tid_bar_tx(sc, atid); 4641
| 4564 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 4565 __func__); 4566 ath_tx_comp_cleanup_unaggr(sc, bf); 4567 return; 4568 } 4569 4570 /* 4571 * XXX TODO: how does cleanup, BAR and filtered frame handling 4572 * overlap? 4573 * 4574 * If the frame is filtered OR if it's any failure but 4575 * the TID is filtered, the frame must be added to the 4576 * filtered frame list. 4577 * 4578 * However - a busy buffer can't be added to the filtered 4579 * list as it will end up being recycled without having 4580 * been made available for the hardware. 4581 */ 4582 if ((ts.ts_status & HAL_TXERR_FILT) || 4583 (ts.ts_status != 0 && atid->isfiltered)) { 4584 int freeframe; 4585 4586 if (fail != 0) 4587 device_printf(sc->sc_dev, 4588 "%s: isfiltered=1, fail=%d\n", 4589 __func__, 4590 fail); 4591 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 4592 if (freeframe) { 4593 /* Remove from BAW */ 4594 if (bf->bf_state.bfs_addedbaw) 4595 drops++; 4596 if (bf->bf_state.bfs_dobaw) { 4597 ath_tx_update_baw(sc, an, atid, bf); 4598 if (! bf->bf_state.bfs_addedbaw) 4599 device_printf(sc->sc_dev, 4600 "%s: wasn't added: seqno %d\n", 4601 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4602 } 4603 bf->bf_state.bfs_dobaw = 0; 4604 } 4605 4606 /* 4607 * If the frame couldn't be filtered, treat it as a drop and 4608 * prepare to send a BAR. 4609 */ 4610 if (freeframe && drops) 4611 ath_tx_tid_bar_suspend(sc, atid); 4612 4613 /* 4614 * Send BAR if required 4615 */ 4616 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4617 ath_tx_tid_bar_tx(sc, atid); 4618
|
4642 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4619 ATH_TX_UNLOCK(sc);
|
4643 /* 4644 * If freeframe is set, then the frame couldn't be 4645 * cloned and bf is still valid. Just complete/free it. 4646 */ 4647 if (freeframe) 4648 ath_tx_default_comp(sc, bf, fail); 4649 4650 4651 return; 4652 } 4653 /* 4654 * Don't bother with the retry check if all frames 4655 * are being failed (eg during queue deletion.) 4656 */ 4657#if 0 4658 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 4659#endif 4660 if (fail == 0 && ts.ts_status != 0) {
| 4620 /* 4621 * If freeframe is set, then the frame couldn't be 4622 * cloned and bf is still valid. Just complete/free it. 4623 */ 4624 if (freeframe) 4625 ath_tx_default_comp(sc, bf, fail); 4626 4627 4628 return; 4629 } 4630 /* 4631 * Don't bother with the retry check if all frames 4632 * are being failed (eg during queue deletion.) 4633 */ 4634#if 0 4635 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 4636#endif 4637 if (fail == 0 && ts.ts_status != 0) {
|
4661 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4638 ATH_TX_UNLOCK(sc);
|
4662 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 4663 __func__); 4664 ath_tx_aggr_retry_unaggr(sc, bf); 4665 return; 4666 } 4667 4668 /* Success? Complete */ 4669 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 4670 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 4671 if (bf->bf_state.bfs_dobaw) { 4672 ath_tx_update_baw(sc, an, atid, bf); 4673 bf->bf_state.bfs_dobaw = 0; 4674 if (! bf->bf_state.bfs_addedbaw) 4675 device_printf(sc->sc_dev, 4676 "%s: wasn't added: seqno %d\n", 4677 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4678 } 4679 4680 /* 4681 * If the queue is filtered, re-schedule as required. 4682 * 4683 * This is required as there may be a subsequent TX descriptor 4684 * for this end-node that has CLRDMASK set, so it's quite possible 4685 * that a filtered frame will be followed by a non-filtered 4686 * (complete or otherwise) frame. 4687 * 4688 * XXX should we do this before we complete the frame? 4689 */ 4690 if (atid->isfiltered) 4691 ath_tx_tid_filt_comp_complete(sc, atid); 4692 4693 /* 4694 * Send BAR if required 4695 */ 4696 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4697 ath_tx_tid_bar_tx(sc, atid); 4698
| 4639 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 4640 __func__); 4641 ath_tx_aggr_retry_unaggr(sc, bf); 4642 return; 4643 } 4644 4645 /* Success? Complete */ 4646 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 4647 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 4648 if (bf->bf_state.bfs_dobaw) { 4649 ath_tx_update_baw(sc, an, atid, bf); 4650 bf->bf_state.bfs_dobaw = 0; 4651 if (! bf->bf_state.bfs_addedbaw) 4652 device_printf(sc->sc_dev, 4653 "%s: wasn't added: seqno %d\n", 4654 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4655 } 4656 4657 /* 4658 * If the queue is filtered, re-schedule as required. 4659 * 4660 * This is required as there may be a subsequent TX descriptor 4661 * for this end-node that has CLRDMASK set, so it's quite possible 4662 * that a filtered frame will be followed by a non-filtered 4663 * (complete or otherwise) frame. 4664 * 4665 * XXX should we do this before we complete the frame? 4666 */ 4667 if (atid->isfiltered) 4668 ath_tx_tid_filt_comp_complete(sc, atid); 4669 4670 /* 4671 * Send BAR if required 4672 */ 4673 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4674 ath_tx_tid_bar_tx(sc, atid); 4675
|
4699 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 4676 ATH_TX_UNLOCK(sc);
|
4700 4701 ath_tx_default_comp(sc, bf, fail); 4702 /* bf is freed at this point */ 4703} 4704 4705void 4706ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4707{ 4708 if (bf->bf_state.bfs_aggr) 4709 ath_tx_aggr_comp_aggr(sc, bf, fail); 4710 else 4711 ath_tx_aggr_comp_unaggr(sc, bf, fail); 4712} 4713 4714/* 4715 * Schedule some packets from the given node/TID to the hardware. 4716 * 4717 * This is the aggregate version. 4718 */ 4719void 4720ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 4721 struct ath_tid *tid) 4722{ 4723 struct ath_buf *bf; 4724 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4725 struct ieee80211_tx_ampdu *tap; 4726 ATH_AGGR_STATUS status; 4727 ath_bufhead bf_q; 4728 4729 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
| 4677 4678 ath_tx_default_comp(sc, bf, fail); 4679 /* bf is freed at this point */ 4680} 4681 4682void 4683ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4684{ 4685 if (bf->bf_state.bfs_aggr) 4686 ath_tx_aggr_comp_aggr(sc, bf, fail); 4687 else 4688 ath_tx_aggr_comp_unaggr(sc, bf, fail); 4689} 4690 4691/* 4692 * Schedule some packets from the given node/TID to the hardware. 4693 * 4694 * This is the aggregate version. 4695 */ 4696void 4697ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 4698 struct ath_tid *tid) 4699{ 4700 struct ath_buf *bf; 4701 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4702 struct ieee80211_tx_ampdu *tap; 4703 ATH_AGGR_STATUS status; 4704 ath_bufhead bf_q; 4705 4706 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
|
4730 ATH_TXQ_LOCK_ASSERT(txq);
| 4707 ATH_TX_LOCK_ASSERT(sc);
|
4731 4732 tap = ath_tx_get_tx_tid(an, tid->tid); 4733 4734 if (tid->tid == IEEE80211_NONQOS_TID) 4735 device_printf(sc->sc_dev, "%s: called for TID=NONQOS_TID?\n", 4736 __func__); 4737 4738 for (;;) { 4739 status = ATH_AGGR_DONE; 4740 4741 /* 4742 * If the upper layer has paused the TID, don't 4743 * queue any further packets. 4744 * 4745 * This can also occur from the completion task because 4746 * of packet loss; but as its serialised with this code, 4747 * it won't "appear" half way through queuing packets. 4748 */ 4749 if (tid->paused) 4750 break; 4751 4752 bf = ATH_TID_FIRST(tid); 4753 if (bf == NULL) { 4754 break; 4755 } 4756 4757 /* 4758 * If the packet doesn't fall within the BAW (eg a NULL 4759 * data frame), schedule it directly; continue. 4760 */ 4761 if (! bf->bf_state.bfs_dobaw) { 4762 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4763 "%s: non-baw packet\n", 4764 __func__); 4765 ATH_TID_REMOVE(tid, bf, bf_list); 4766 4767 if (bf->bf_state.bfs_nframes > 1) 4768 device_printf(sc->sc_dev, 4769 "%s: aggr=%d, nframes=%d\n", 4770 __func__, 4771 bf->bf_state.bfs_aggr, 4772 bf->bf_state.bfs_nframes); 4773 4774 /* 4775 * This shouldn't happen - such frames shouldn't 4776 * ever have been queued as an aggregate in the 4777 * first place. However, make sure the fields 4778 * are correctly setup just to be totally sure. 4779 */ 4780 bf->bf_state.bfs_aggr = 0; 4781 bf->bf_state.bfs_nframes = 1; 4782 4783 /* Update CLRDMASK just before this frame is queued */ 4784 ath_tx_update_clrdmask(sc, tid, bf); 4785 4786 ath_tx_do_ratelookup(sc, bf); 4787 ath_tx_calc_duration(sc, bf); 4788 ath_tx_calc_protection(sc, bf); 4789 ath_tx_set_rtscts(sc, bf); 4790 ath_tx_rate_fill_rcflags(sc, bf); 4791 ath_tx_setds(sc, bf); 4792 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4793 4794 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 4795 4796 /* Queue the packet; continue */ 4797 goto queuepkt; 4798 } 4799 4800 TAILQ_INIT(&bf_q); 4801 4802 /* 4803 * Do a rate control lookup on the first frame in the 4804 * list. The rate control code needs that to occur 4805 * before it can determine whether to TX. 4806 * It's inaccurate because the rate control code doesn't 4807 * really "do" aggregate lookups, so it only considers 4808 * the size of the first frame. 4809 */ 4810 ath_tx_do_ratelookup(sc, bf); 4811 bf->bf_state.bfs_rc[3].rix = 0; 4812 bf->bf_state.bfs_rc[3].tries = 0; 4813 4814 ath_tx_calc_duration(sc, bf); 4815 ath_tx_calc_protection(sc, bf); 4816 4817 ath_tx_set_rtscts(sc, bf); 4818 ath_tx_rate_fill_rcflags(sc, bf); 4819 4820 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 4821 4822 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4823 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 4824 4825 /* 4826 * No frames to be picked up - out of BAW 4827 */ 4828 if (TAILQ_EMPTY(&bf_q)) 4829 break; 4830 4831 /* 4832 * This assumes that the descriptor list in the ath_bufhead 4833 * are already linked together via bf_next pointers. 4834 */ 4835 bf = TAILQ_FIRST(&bf_q); 4836 4837 if (status == ATH_AGGR_8K_LIMITED) 4838 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 4839 4840 /* 4841 * If it's the only frame send as non-aggregate 4842 * assume that ath_tx_form_aggr() has checked 4843 * whether it's in the BAW and added it appropriately. 4844 */ 4845 if (bf->bf_state.bfs_nframes == 1) { 4846 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4847 "%s: single-frame aggregate\n", __func__); 4848 4849 /* Update CLRDMASK just before this frame is queued */ 4850 ath_tx_update_clrdmask(sc, tid, bf); 4851 4852 bf->bf_state.bfs_aggr = 0; 4853 bf->bf_state.bfs_ndelim = 0; 4854 ath_tx_setds(sc, bf); 4855 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4856 if (status == ATH_AGGR_BAW_CLOSED) 4857 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 4858 else 4859 sc->sc_aggr_stats.aggr_single_pkt++; 4860 } else { 4861 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4862 "%s: multi-frame aggregate: %d frames, " 4863 "length %d\n", 4864 __func__, bf->bf_state.bfs_nframes, 4865 bf->bf_state.bfs_al); 4866 bf->bf_state.bfs_aggr = 1; 4867 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 4868 sc->sc_aggr_stats.aggr_aggr_pkt++; 4869 4870 /* Update CLRDMASK just before this frame is queued */ 4871 ath_tx_update_clrdmask(sc, tid, bf); 4872 4873 /* 4874 * Calculate the duration/protection as required. 4875 */ 4876 ath_tx_calc_duration(sc, bf); 4877 ath_tx_calc_protection(sc, bf); 4878 4879 /* 4880 * Update the rate and rtscts information based on the 4881 * rate decision made by the rate control code; 4882 * the first frame in the aggregate needs it. 4883 */ 4884 ath_tx_set_rtscts(sc, bf); 4885 4886 /* 4887 * Setup the relevant descriptor fields 4888 * for aggregation. The first descriptor 4889 * already points to the rest in the chain. 4890 */ 4891 ath_tx_setds_11n(sc, bf); 4892 4893 } 4894 queuepkt: 4895 //txq = bf->bf_state.bfs_txq; 4896 4897 /* Set completion handler, multi-frame aggregate or not */ 4898 bf->bf_comp = ath_tx_aggr_comp; 4899 4900 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 4901 device_printf(sc->sc_dev, "%s: TID=16?\n", __func__); 4902 4903 /* Punt to txq */ 4904 ath_tx_handoff(sc, txq, bf); 4905 4906 /* Track outstanding buffer count to hardware */ 4907 /* aggregates are "one" buffer */ 4908 tid->hwq_depth++; 4909 4910 /* 4911 * Break out if ath_tx_form_aggr() indicated 4912 * there can't be any further progress (eg BAW is full.) 4913 * Checking for an empty txq is done above. 4914 * 4915 * XXX locking on txq here? 4916 */ 4917 if (txq->axq_aggr_depth >= sc->sc_hwq_limit || 4918 status == ATH_AGGR_BAW_CLOSED) 4919 break; 4920 } 4921} 4922 4923/* 4924 * Schedule some packets from the given node/TID to the hardware. 4925 */ 4926void 4927ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 4928 struct ath_tid *tid) 4929{ 4930 struct ath_buf *bf; 4931 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4932 4933 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 4934 __func__, an, tid->tid); 4935
| 4708 4709 tap = ath_tx_get_tx_tid(an, tid->tid); 4710 4711 if (tid->tid == IEEE80211_NONQOS_TID) 4712 device_printf(sc->sc_dev, "%s: called for TID=NONQOS_TID?\n", 4713 __func__); 4714 4715 for (;;) { 4716 status = ATH_AGGR_DONE; 4717 4718 /* 4719 * If the upper layer has paused the TID, don't 4720 * queue any further packets. 4721 * 4722 * This can also occur from the completion task because 4723 * of packet loss; but as its serialised with this code, 4724 * it won't "appear" half way through queuing packets. 4725 */ 4726 if (tid->paused) 4727 break; 4728 4729 bf = ATH_TID_FIRST(tid); 4730 if (bf == NULL) { 4731 break; 4732 } 4733 4734 /* 4735 * If the packet doesn't fall within the BAW (eg a NULL 4736 * data frame), schedule it directly; continue. 4737 */ 4738 if (! bf->bf_state.bfs_dobaw) { 4739 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4740 "%s: non-baw packet\n", 4741 __func__); 4742 ATH_TID_REMOVE(tid, bf, bf_list); 4743 4744 if (bf->bf_state.bfs_nframes > 1) 4745 device_printf(sc->sc_dev, 4746 "%s: aggr=%d, nframes=%d\n", 4747 __func__, 4748 bf->bf_state.bfs_aggr, 4749 bf->bf_state.bfs_nframes); 4750 4751 /* 4752 * This shouldn't happen - such frames shouldn't 4753 * ever have been queued as an aggregate in the 4754 * first place. However, make sure the fields 4755 * are correctly setup just to be totally sure. 4756 */ 4757 bf->bf_state.bfs_aggr = 0; 4758 bf->bf_state.bfs_nframes = 1; 4759 4760 /* Update CLRDMASK just before this frame is queued */ 4761 ath_tx_update_clrdmask(sc, tid, bf); 4762 4763 ath_tx_do_ratelookup(sc, bf); 4764 ath_tx_calc_duration(sc, bf); 4765 ath_tx_calc_protection(sc, bf); 4766 ath_tx_set_rtscts(sc, bf); 4767 ath_tx_rate_fill_rcflags(sc, bf); 4768 ath_tx_setds(sc, bf); 4769 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4770 4771 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 4772 4773 /* Queue the packet; continue */ 4774 goto queuepkt; 4775 } 4776 4777 TAILQ_INIT(&bf_q); 4778 4779 /* 4780 * Do a rate control lookup on the first frame in the 4781 * list. The rate control code needs that to occur 4782 * before it can determine whether to TX. 4783 * It's inaccurate because the rate control code doesn't 4784 * really "do" aggregate lookups, so it only considers 4785 * the size of the first frame. 4786 */ 4787 ath_tx_do_ratelookup(sc, bf); 4788 bf->bf_state.bfs_rc[3].rix = 0; 4789 bf->bf_state.bfs_rc[3].tries = 0; 4790 4791 ath_tx_calc_duration(sc, bf); 4792 ath_tx_calc_protection(sc, bf); 4793 4794 ath_tx_set_rtscts(sc, bf); 4795 ath_tx_rate_fill_rcflags(sc, bf); 4796 4797 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 4798 4799 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4800 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 4801 4802 /* 4803 * No frames to be picked up - out of BAW 4804 */ 4805 if (TAILQ_EMPTY(&bf_q)) 4806 break; 4807 4808 /* 4809 * This assumes that the descriptor list in the ath_bufhead 4810 * are already linked together via bf_next pointers. 4811 */ 4812 bf = TAILQ_FIRST(&bf_q); 4813 4814 if (status == ATH_AGGR_8K_LIMITED) 4815 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 4816 4817 /* 4818 * If it's the only frame send as non-aggregate 4819 * assume that ath_tx_form_aggr() has checked 4820 * whether it's in the BAW and added it appropriately. 4821 */ 4822 if (bf->bf_state.bfs_nframes == 1) { 4823 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4824 "%s: single-frame aggregate\n", __func__); 4825 4826 /* Update CLRDMASK just before this frame is queued */ 4827 ath_tx_update_clrdmask(sc, tid, bf); 4828 4829 bf->bf_state.bfs_aggr = 0; 4830 bf->bf_state.bfs_ndelim = 0; 4831 ath_tx_setds(sc, bf); 4832 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4833 if (status == ATH_AGGR_BAW_CLOSED) 4834 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 4835 else 4836 sc->sc_aggr_stats.aggr_single_pkt++; 4837 } else { 4838 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4839 "%s: multi-frame aggregate: %d frames, " 4840 "length %d\n", 4841 __func__, bf->bf_state.bfs_nframes, 4842 bf->bf_state.bfs_al); 4843 bf->bf_state.bfs_aggr = 1; 4844 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 4845 sc->sc_aggr_stats.aggr_aggr_pkt++; 4846 4847 /* Update CLRDMASK just before this frame is queued */ 4848 ath_tx_update_clrdmask(sc, tid, bf); 4849 4850 /* 4851 * Calculate the duration/protection as required. 4852 */ 4853 ath_tx_calc_duration(sc, bf); 4854 ath_tx_calc_protection(sc, bf); 4855 4856 /* 4857 * Update the rate and rtscts information based on the 4858 * rate decision made by the rate control code; 4859 * the first frame in the aggregate needs it. 4860 */ 4861 ath_tx_set_rtscts(sc, bf); 4862 4863 /* 4864 * Setup the relevant descriptor fields 4865 * for aggregation. The first descriptor 4866 * already points to the rest in the chain. 4867 */ 4868 ath_tx_setds_11n(sc, bf); 4869 4870 } 4871 queuepkt: 4872 //txq = bf->bf_state.bfs_txq; 4873 4874 /* Set completion handler, multi-frame aggregate or not */ 4875 bf->bf_comp = ath_tx_aggr_comp; 4876 4877 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 4878 device_printf(sc->sc_dev, "%s: TID=16?\n", __func__); 4879 4880 /* Punt to txq */ 4881 ath_tx_handoff(sc, txq, bf); 4882 4883 /* Track outstanding buffer count to hardware */ 4884 /* aggregates are "one" buffer */ 4885 tid->hwq_depth++; 4886 4887 /* 4888 * Break out if ath_tx_form_aggr() indicated 4889 * there can't be any further progress (eg BAW is full.) 4890 * Checking for an empty txq is done above. 4891 * 4892 * XXX locking on txq here? 4893 */ 4894 if (txq->axq_aggr_depth >= sc->sc_hwq_limit || 4895 status == ATH_AGGR_BAW_CLOSED) 4896 break; 4897 } 4898} 4899 4900/* 4901 * Schedule some packets from the given node/TID to the hardware. 4902 */ 4903void 4904ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 4905 struct ath_tid *tid) 4906{ 4907 struct ath_buf *bf; 4908 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4909 4910 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 4911 __func__, an, tid->tid); 4912
|
4936 ATH_TID_LOCK_ASSERT(sc, tid);
| 4913 ATH_TX_LOCK_ASSERT(sc);
|
4937 4938 /* Check - is AMPDU pending or running? then print out something */ 4939 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 4940 device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n", 4941 __func__, tid->tid); 4942 if (ath_tx_ampdu_running(sc, an, tid->tid)) 4943 device_printf(sc->sc_dev, "%s: tid=%d, ampdu running?\n", 4944 __func__, tid->tid); 4945 4946 for (;;) { 4947 4948 /* 4949 * If the upper layers have paused the TID, don't 4950 * queue any further packets. 4951 */ 4952 if (tid->paused) 4953 break; 4954 4955 bf = ATH_TID_FIRST(tid); 4956 if (bf == NULL) { 4957 break; 4958 } 4959 4960 ATH_TID_REMOVE(tid, bf, bf_list); 4961 4962 KASSERT(txq == bf->bf_state.bfs_txq, ("txqs not equal!\n")); 4963 4964 /* Sanity check! */ 4965 if (tid->tid != bf->bf_state.bfs_tid) { 4966 device_printf(sc->sc_dev, "%s: bfs_tid %d !=" 4967 " tid %d\n", 4968 __func__, bf->bf_state.bfs_tid, tid->tid); 4969 } 4970 /* Normal completion handler */ 4971 bf->bf_comp = ath_tx_normal_comp; 4972 4973 /* 4974 * Override this for now, until the non-aggregate 4975 * completion handler correctly handles software retransmits. 4976 */ 4977 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 4978 4979 /* Update CLRDMASK just before this frame is queued */ 4980 ath_tx_update_clrdmask(sc, tid, bf); 4981 4982 /* Program descriptors + rate control */ 4983 ath_tx_do_ratelookup(sc, bf); 4984 ath_tx_calc_duration(sc, bf); 4985 ath_tx_calc_protection(sc, bf); 4986 ath_tx_set_rtscts(sc, bf); 4987 ath_tx_rate_fill_rcflags(sc, bf); 4988 ath_tx_setds(sc, bf); 4989 4990 /* Track outstanding buffer count to hardware */ 4991 /* aggregates are "one" buffer */ 4992 tid->hwq_depth++; 4993 4994 /* Punt to hardware or software txq */ 4995 ath_tx_handoff(sc, txq, bf); 4996 } 4997} 4998 4999/* 5000 * Schedule some packets to the given hardware queue. 5001 * 5002 * This function walks the list of TIDs (ie, ath_node TIDs 5003 * with queued traffic) and attempts to schedule traffic 5004 * from them. 5005 * 5006 * TID scheduling is implemented as a FIFO, with TIDs being 5007 * added to the end of the queue after some frames have been 5008 * scheduled. 5009 */ 5010void 5011ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5012{ 5013 struct ath_tid *tid, *next, *last; 5014
| 4914 4915 /* Check - is AMPDU pending or running? then print out something */ 4916 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 4917 device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n", 4918 __func__, tid->tid); 4919 if (ath_tx_ampdu_running(sc, an, tid->tid)) 4920 device_printf(sc->sc_dev, "%s: tid=%d, ampdu running?\n", 4921 __func__, tid->tid); 4922 4923 for (;;) { 4924 4925 /* 4926 * If the upper layers have paused the TID, don't 4927 * queue any further packets. 4928 */ 4929 if (tid->paused) 4930 break; 4931 4932 bf = ATH_TID_FIRST(tid); 4933 if (bf == NULL) { 4934 break; 4935 } 4936 4937 ATH_TID_REMOVE(tid, bf, bf_list); 4938 4939 KASSERT(txq == bf->bf_state.bfs_txq, ("txqs not equal!\n")); 4940 4941 /* Sanity check! */ 4942 if (tid->tid != bf->bf_state.bfs_tid) { 4943 device_printf(sc->sc_dev, "%s: bfs_tid %d !=" 4944 " tid %d\n", 4945 __func__, bf->bf_state.bfs_tid, tid->tid); 4946 } 4947 /* Normal completion handler */ 4948 bf->bf_comp = ath_tx_normal_comp; 4949 4950 /* 4951 * Override this for now, until the non-aggregate 4952 * completion handler correctly handles software retransmits. 4953 */ 4954 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 4955 4956 /* Update CLRDMASK just before this frame is queued */ 4957 ath_tx_update_clrdmask(sc, tid, bf); 4958 4959 /* Program descriptors + rate control */ 4960 ath_tx_do_ratelookup(sc, bf); 4961 ath_tx_calc_duration(sc, bf); 4962 ath_tx_calc_protection(sc, bf); 4963 ath_tx_set_rtscts(sc, bf); 4964 ath_tx_rate_fill_rcflags(sc, bf); 4965 ath_tx_setds(sc, bf); 4966 4967 /* Track outstanding buffer count to hardware */ 4968 /* aggregates are "one" buffer */ 4969 tid->hwq_depth++; 4970 4971 /* Punt to hardware or software txq */ 4972 ath_tx_handoff(sc, txq, bf); 4973 } 4974} 4975 4976/* 4977 * Schedule some packets to the given hardware queue. 4978 * 4979 * This function walks the list of TIDs (ie, ath_node TIDs 4980 * with queued traffic) and attempts to schedule traffic 4981 * from them. 4982 * 4983 * TID scheduling is implemented as a FIFO, with TIDs being 4984 * added to the end of the queue after some frames have been 4985 * scheduled. 4986 */ 4987void 4988ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 4989{ 4990 struct ath_tid *tid, *next, *last; 4991
|
5015 ATH_TXQ_LOCK_ASSERT(txq);
| 4992 ATH_TX_LOCK_ASSERT(sc);
|
5016 5017 /* 5018 * Don't schedule if the hardware queue is busy. 5019 * This (hopefully) gives some more time to aggregate 5020 * some packets in the aggregation queue. 5021 */ 5022 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 5023 sc->sc_aggr_stats.aggr_sched_nopkt++; 5024 return; 5025 } 5026 5027 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5028 5029 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5030 /* 5031 * Suspend paused queues here; they'll be resumed 5032 * once the addba completes or times out. 5033 */ 5034 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5035 __func__, tid->tid, tid->paused); 5036 ath_tx_tid_unsched(sc, tid); 5037 if (tid->paused) { 5038 continue; 5039 } 5040 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5041 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5042 else 5043 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5044 5045 /* Not empty? Re-schedule */ 5046 if (tid->axq_depth != 0) 5047 ath_tx_tid_sched(sc, tid); 5048 5049 /* Give the software queue time to aggregate more packets */ 5050 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 5051 break; 5052 } 5053 5054 /* 5055 * If this was the last entry on the original list, stop. 5056 * Otherwise nodes that have been rescheduled onto the end 5057 * of the TID FIFO list will just keep being rescheduled. 5058 */ 5059 if (tid == last) 5060 break; 5061 } 5062} 5063 5064/* 5065 * TX addba handling 5066 */ 5067 5068/* 5069 * Return net80211 TID struct pointer, or NULL for none 5070 */ 5071struct ieee80211_tx_ampdu * 5072ath_tx_get_tx_tid(struct ath_node *an, int tid) 5073{ 5074 struct ieee80211_node *ni = &an->an_node; 5075 struct ieee80211_tx_ampdu *tap; 5076 5077 if (tid == IEEE80211_NONQOS_TID) 5078 return NULL; 5079 5080 tap = &ni->ni_tx_ampdu[tid]; 5081 return tap; 5082} 5083 5084/* 5085 * Is AMPDU-TX running? 5086 */ 5087static int 5088ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5089{ 5090 struct ieee80211_tx_ampdu *tap; 5091 5092 if (tid == IEEE80211_NONQOS_TID) 5093 return 0; 5094 5095 tap = ath_tx_get_tx_tid(an, tid); 5096 if (tap == NULL) 5097 return 0; /* Not valid; default to not running */ 5098 5099 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5100} 5101 5102/* 5103 * Is AMPDU-TX negotiation pending? 5104 */ 5105static int 5106ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5107{ 5108 struct ieee80211_tx_ampdu *tap; 5109 5110 if (tid == IEEE80211_NONQOS_TID) 5111 return 0; 5112 5113 tap = ath_tx_get_tx_tid(an, tid); 5114 if (tap == NULL) 5115 return 0; /* Not valid; default to not pending */ 5116 5117 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5118} 5119 5120/* 5121 * Is AMPDU-TX pending for the given TID? 5122 */ 5123 5124 5125/* 5126 * Method to handle sending an ADDBA request. 5127 * 5128 * We tap this so the relevant flags can be set to pause the TID 5129 * whilst waiting for the response. 5130 * 5131 * XXX there's no timeout handler we can override? 5132 */ 5133int 5134ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5135 int dialogtoken, int baparamset, int batimeout) 5136{ 5137 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5138 int tid = tap->txa_tid; 5139 struct ath_node *an = ATH_NODE(ni); 5140 struct ath_tid *atid = &an->an_tid[tid]; 5141 5142 /* 5143 * XXX danger Will Robinson! 5144 * 5145 * Although the taskqueue may be running and scheduling some more 5146 * packets, these should all be _before_ the addba sequence number. 5147 * However, net80211 will keep self-assigning sequence numbers 5148 * until addba has been negotiated. 5149 * 5150 * In the past, these packets would be "paused" (which still works 5151 * fine, as they're being scheduled to the driver in the same 5152 * serialised method which is calling the addba request routine) 5153 * and when the aggregation session begins, they'll be dequeued 5154 * as aggregate packets and added to the BAW. However, now there's 5155 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5156 * packets. Thus they never get included in the BAW tracking and 5157 * this can cause the initial burst of packets after the addba 5158 * negotiation to "hang", as they quickly fall outside the BAW. 5159 * 5160 * The "eventual" solution should be to tag these packets with 5161 * dobaw. Although net80211 has given us a sequence number, 5162 * it'll be "after" the left edge of the BAW and thus it'll 5163 * fall within it. 5164 */
| 4993 4994 /* 4995 * Don't schedule if the hardware queue is busy. 4996 * This (hopefully) gives some more time to aggregate 4997 * some packets in the aggregation queue. 4998 */ 4999 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 5000 sc->sc_aggr_stats.aggr_sched_nopkt++; 5001 return; 5002 } 5003 5004 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5005 5006 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5007 /* 5008 * Suspend paused queues here; they'll be resumed 5009 * once the addba completes or times out. 5010 */ 5011 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5012 __func__, tid->tid, tid->paused); 5013 ath_tx_tid_unsched(sc, tid); 5014 if (tid->paused) { 5015 continue; 5016 } 5017 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5018 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5019 else 5020 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5021 5022 /* Not empty? Re-schedule */ 5023 if (tid->axq_depth != 0) 5024 ath_tx_tid_sched(sc, tid); 5025 5026 /* Give the software queue time to aggregate more packets */ 5027 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 5028 break; 5029 } 5030 5031 /* 5032 * If this was the last entry on the original list, stop. 5033 * Otherwise nodes that have been rescheduled onto the end 5034 * of the TID FIFO list will just keep being rescheduled. 5035 */ 5036 if (tid == last) 5037 break; 5038 } 5039} 5040 5041/* 5042 * TX addba handling 5043 */ 5044 5045/* 5046 * Return net80211 TID struct pointer, or NULL for none 5047 */ 5048struct ieee80211_tx_ampdu * 5049ath_tx_get_tx_tid(struct ath_node *an, int tid) 5050{ 5051 struct ieee80211_node *ni = &an->an_node; 5052 struct ieee80211_tx_ampdu *tap; 5053 5054 if (tid == IEEE80211_NONQOS_TID) 5055 return NULL; 5056 5057 tap = &ni->ni_tx_ampdu[tid]; 5058 return tap; 5059} 5060 5061/* 5062 * Is AMPDU-TX running? 5063 */ 5064static int 5065ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5066{ 5067 struct ieee80211_tx_ampdu *tap; 5068 5069 if (tid == IEEE80211_NONQOS_TID) 5070 return 0; 5071 5072 tap = ath_tx_get_tx_tid(an, tid); 5073 if (tap == NULL) 5074 return 0; /* Not valid; default to not running */ 5075 5076 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5077} 5078 5079/* 5080 * Is AMPDU-TX negotiation pending? 5081 */ 5082static int 5083ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5084{ 5085 struct ieee80211_tx_ampdu *tap; 5086 5087 if (tid == IEEE80211_NONQOS_TID) 5088 return 0; 5089 5090 tap = ath_tx_get_tx_tid(an, tid); 5091 if (tap == NULL) 5092 return 0; /* Not valid; default to not pending */ 5093 5094 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5095} 5096 5097/* 5098 * Is AMPDU-TX pending for the given TID? 5099 */ 5100 5101 5102/* 5103 * Method to handle sending an ADDBA request. 5104 * 5105 * We tap this so the relevant flags can be set to pause the TID 5106 * whilst waiting for the response. 5107 * 5108 * XXX there's no timeout handler we can override? 5109 */ 5110int 5111ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5112 int dialogtoken, int baparamset, int batimeout) 5113{ 5114 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5115 int tid = tap->txa_tid; 5116 struct ath_node *an = ATH_NODE(ni); 5117 struct ath_tid *atid = &an->an_tid[tid]; 5118 5119 /* 5120 * XXX danger Will Robinson! 5121 * 5122 * Although the taskqueue may be running and scheduling some more 5123 * packets, these should all be _before_ the addba sequence number. 5124 * However, net80211 will keep self-assigning sequence numbers 5125 * until addba has been negotiated. 5126 * 5127 * In the past, these packets would be "paused" (which still works 5128 * fine, as they're being scheduled to the driver in the same 5129 * serialised method which is calling the addba request routine) 5130 * and when the aggregation session begins, they'll be dequeued 5131 * as aggregate packets and added to the BAW. However, now there's 5132 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5133 * packets. Thus they never get included in the BAW tracking and 5134 * this can cause the initial burst of packets after the addba 5135 * negotiation to "hang", as they quickly fall outside the BAW. 5136 * 5137 * The "eventual" solution should be to tag these packets with 5138 * dobaw. Although net80211 has given us a sequence number, 5139 * it'll be "after" the left edge of the BAW and thus it'll 5140 * fall within it. 5141 */
|
5165 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 5142 ATH_TX_LOCK(sc);
|
5166 /* 5167 * This is a bit annoying. Until net80211 HT code inherits some 5168 * (any) locking, we may have this called in parallel BUT only 5169 * one response/timeout will be called. Grr. 5170 */ 5171 if (atid->addba_tx_pending == 0) { 5172 ath_tx_tid_pause(sc, atid); 5173 atid->addba_tx_pending = 1; 5174 }
| 5143 /* 5144 * This is a bit annoying. Until net80211 HT code inherits some 5145 * (any) locking, we may have this called in parallel BUT only 5146 * one response/timeout will be called. Grr. 5147 */ 5148 if (atid->addba_tx_pending == 0) { 5149 ath_tx_tid_pause(sc, atid); 5150 atid->addba_tx_pending = 1; 5151 }
|
5175 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 5152 ATH_TX_UNLOCK(sc);
|
5176 5177 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5178 "%s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5179 __func__, dialogtoken, baparamset, batimeout); 5180 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5181 "%s: txa_start=%d, ni_txseqs=%d\n", 5182 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5183 5184 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5185 batimeout); 5186} 5187 5188/* 5189 * Handle an ADDBA response. 5190 * 5191 * We unpause the queue so TX'ing can resume. 5192 * 5193 * Any packets TX'ed from this point should be "aggregate" (whether 5194 * aggregate or not) so the BAW is updated. 5195 * 5196 * Note! net80211 keeps self-assigning sequence numbers until 5197 * ampdu is negotiated. This means the initially-negotiated BAW left 5198 * edge won't match the ni->ni_txseq. 5199 * 5200 * So, being very dirty, the BAW left edge is "slid" here to match 5201 * ni->ni_txseq. 5202 * 5203 * What likely SHOULD happen is that all packets subsequent to the 5204 * addba request should be tagged as aggregate and queued as non-aggregate 5205 * frames; thus updating the BAW. For now though, I'll just slide the 5206 * window. 5207 */ 5208int 5209ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5210 int status, int code, int batimeout) 5211{ 5212 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5213 int tid = tap->txa_tid; 5214 struct ath_node *an = ATH_NODE(ni); 5215 struct ath_tid *atid = &an->an_tid[tid]; 5216 int r; 5217 5218 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5219 "%s: called; status=%d, code=%d, batimeout=%d\n", __func__, 5220 status, code, batimeout); 5221 5222 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5223 "%s: txa_start=%d, ni_txseqs=%d\n", 5224 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5225 5226 /* 5227 * Call this first, so the interface flags get updated 5228 * before the TID is unpaused. Otherwise a race condition 5229 * exists where the unpaused TID still doesn't yet have 5230 * IEEE80211_AGGR_RUNNING set. 5231 */ 5232 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5233
| 5153 5154 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5155 "%s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5156 __func__, dialogtoken, baparamset, batimeout); 5157 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5158 "%s: txa_start=%d, ni_txseqs=%d\n", 5159 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5160 5161 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5162 batimeout); 5163} 5164 5165/* 5166 * Handle an ADDBA response. 5167 * 5168 * We unpause the queue so TX'ing can resume. 5169 * 5170 * Any packets TX'ed from this point should be "aggregate" (whether 5171 * aggregate or not) so the BAW is updated. 5172 * 5173 * Note! net80211 keeps self-assigning sequence numbers until 5174 * ampdu is negotiated. This means the initially-negotiated BAW left 5175 * edge won't match the ni->ni_txseq. 5176 * 5177 * So, being very dirty, the BAW left edge is "slid" here to match 5178 * ni->ni_txseq. 5179 * 5180 * What likely SHOULD happen is that all packets subsequent to the 5181 * addba request should be tagged as aggregate and queued as non-aggregate 5182 * frames; thus updating the BAW. For now though, I'll just slide the 5183 * window. 5184 */ 5185int 5186ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5187 int status, int code, int batimeout) 5188{ 5189 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5190 int tid = tap->txa_tid; 5191 struct ath_node *an = ATH_NODE(ni); 5192 struct ath_tid *atid = &an->an_tid[tid]; 5193 int r; 5194 5195 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5196 "%s: called; status=%d, code=%d, batimeout=%d\n", __func__, 5197 status, code, batimeout); 5198 5199 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5200 "%s: txa_start=%d, ni_txseqs=%d\n", 5201 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5202 5203 /* 5204 * Call this first, so the interface flags get updated 5205 * before the TID is unpaused. Otherwise a race condition 5206 * exists where the unpaused TID still doesn't yet have 5207 * IEEE80211_AGGR_RUNNING set. 5208 */ 5209 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5210
|
5234 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 5211 ATH_TX_LOCK(sc);
|
5235 atid->addba_tx_pending = 0; 5236 /* 5237 * XXX dirty! 5238 * Slide the BAW left edge to wherever net80211 left it for us. 5239 * Read above for more information. 5240 */ 5241 tap->txa_start = ni->ni_txseqs[tid]; 5242 ath_tx_tid_resume(sc, atid);
| 5212 atid->addba_tx_pending = 0; 5213 /* 5214 * XXX dirty! 5215 * Slide the BAW left edge to wherever net80211 left it for us. 5216 * Read above for more information. 5217 */ 5218 tap->txa_start = ni->ni_txseqs[tid]; 5219 ath_tx_tid_resume(sc, atid);
|
5243 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 5220 ATH_TX_UNLOCK(sc);
|
5244 return r; 5245} 5246 5247 5248/* 5249 * Stop ADDBA on a queue. 5250 * 5251 * This can be called whilst BAR TX is currently active on the queue, 5252 * so make sure this is unblocked before continuing. 5253 */ 5254void 5255ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5256{ 5257 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5258 int tid = tap->txa_tid; 5259 struct ath_node *an = ATH_NODE(ni); 5260 struct ath_tid *atid = &an->an_tid[tid]; 5261 5262 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called\n", __func__); 5263 5264 /* 5265 * Pause TID traffic early, so there aren't any races 5266 * Unblock the pending BAR held traffic, if it's currently paused. 5267 */
| 5221 return r; 5222} 5223 5224 5225/* 5226 * Stop ADDBA on a queue. 5227 * 5228 * This can be called whilst BAR TX is currently active on the queue, 5229 * so make sure this is unblocked before continuing. 5230 */ 5231void 5232ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5233{ 5234 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5235 int tid = tap->txa_tid; 5236 struct ath_node *an = ATH_NODE(ni); 5237 struct ath_tid *atid = &an->an_tid[tid]; 5238 5239 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called\n", __func__); 5240 5241 /* 5242 * Pause TID traffic early, so there aren't any races 5243 * Unblock the pending BAR held traffic, if it's currently paused. 5244 */
|
5268 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 5245 ATH_TX_LOCK(sc);
|
5269 ath_tx_tid_pause(sc, atid); 5270 if (atid->bar_wait) { 5271 /* 5272 * bar_unsuspend() expects bar_tx == 1, as it should be 5273 * called from the TX completion path. This quietens 5274 * the warning. It's cleared for us anyway. 5275 */ 5276 atid->bar_tx = 1; 5277 ath_tx_tid_bar_unsuspend(sc, atid); 5278 }
| 5246 ath_tx_tid_pause(sc, atid); 5247 if (atid->bar_wait) { 5248 /* 5249 * bar_unsuspend() expects bar_tx == 1, as it should be 5250 * called from the TX completion path. This quietens 5251 * the warning. It's cleared for us anyway. 5252 */ 5253 atid->bar_tx = 1; 5254 ath_tx_tid_bar_unsuspend(sc, atid); 5255 }
|
5279 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 5256 ATH_TX_UNLOCK(sc);
|
5280 5281 /* There's no need to hold the TXQ lock here */ 5282 sc->sc_addba_stop(ni, tap); 5283 5284 /* 5285 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5286 * it'll set the cleanup flag, and it'll be unpaused once 5287 * things have been cleaned up. 5288 */ 5289 ath_tx_tid_cleanup(sc, an, tid); 5290} 5291 5292/* 5293 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5294 * it simply tears down the aggregation session. Ew. 5295 * 5296 * It however will call ieee80211_ampdu_stop() which will call 5297 * ic->ic_addba_stop(). 5298 * 5299 * XXX This uses a hard-coded max BAR count value; the whole 5300 * XXX BAR TX success or failure should be better handled! 5301 */ 5302void 5303ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5304 int status) 5305{ 5306 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5307 int tid = tap->txa_tid; 5308 struct ath_node *an = ATH_NODE(ni); 5309 struct ath_tid *atid = &an->an_tid[tid]; 5310 int attempts = tap->txa_attempts; 5311 5312 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5313 "%s: called; tap=%p, atid=%p, txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", 5314 __func__, 5315 tap, 5316 atid, 5317 tap->txa_tid, 5318 atid->tid, 5319 status, 5320 attempts); 5321 5322 /* Note: This may update the BAW details */ 5323 sc->sc_bar_response(ni, tap, status); 5324 5325 /* Unpause the TID */ 5326 /* 5327 * XXX if this is attempt=50, the TID will be downgraded 5328 * XXX to a non-aggregate session. So we must unpause the 5329 * XXX TID here or it'll never be done. 5330 * 5331 * Also, don't call it if bar_tx/bar_wait are 0; something 5332 * has beaten us to the punch? (XXX figure out what?) 5333 */ 5334 if (status == 0 || attempts == 50) {
| 5257 5258 /* There's no need to hold the TXQ lock here */ 5259 sc->sc_addba_stop(ni, tap); 5260 5261 /* 5262 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5263 * it'll set the cleanup flag, and it'll be unpaused once 5264 * things have been cleaned up. 5265 */ 5266 ath_tx_tid_cleanup(sc, an, tid); 5267} 5268 5269/* 5270 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5271 * it simply tears down the aggregation session. Ew. 5272 * 5273 * It however will call ieee80211_ampdu_stop() which will call 5274 * ic->ic_addba_stop(). 5275 * 5276 * XXX This uses a hard-coded max BAR count value; the whole 5277 * XXX BAR TX success or failure should be better handled! 5278 */ 5279void 5280ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5281 int status) 5282{ 5283 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5284 int tid = tap->txa_tid; 5285 struct ath_node *an = ATH_NODE(ni); 5286 struct ath_tid *atid = &an->an_tid[tid]; 5287 int attempts = tap->txa_attempts; 5288 5289 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5290 "%s: called; tap=%p, atid=%p, txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", 5291 __func__, 5292 tap, 5293 atid, 5294 tap->txa_tid, 5295 atid->tid, 5296 status, 5297 attempts); 5298 5299 /* Note: This may update the BAW details */ 5300 sc->sc_bar_response(ni, tap, status); 5301 5302 /* Unpause the TID */ 5303 /* 5304 * XXX if this is attempt=50, the TID will be downgraded 5305 * XXX to a non-aggregate session. So we must unpause the 5306 * XXX TID here or it'll never be done. 5307 * 5308 * Also, don't call it if bar_tx/bar_wait are 0; something 5309 * has beaten us to the punch? (XXX figure out what?) 5310 */ 5311 if (status == 0 || attempts == 50) {
|
5335 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 5312 ATH_TX_LOCK(sc);
|
5336 if (atid->bar_tx == 0 || atid->bar_wait == 0) 5337 device_printf(sc->sc_dev, 5338 "%s: huh? bar_tx=%d, bar_wait=%d\n", 5339 __func__, 5340 atid->bar_tx, atid->bar_wait); 5341 else 5342 ath_tx_tid_bar_unsuspend(sc, atid);
| 5313 if (atid->bar_tx == 0 || atid->bar_wait == 0) 5314 device_printf(sc->sc_dev, 5315 "%s: huh? bar_tx=%d, bar_wait=%d\n", 5316 __func__, 5317 atid->bar_tx, atid->bar_wait); 5318 else 5319 ath_tx_tid_bar_unsuspend(sc, atid);
|
5343 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 5320 ATH_TX_UNLOCK(sc);
|
5344 } 5345} 5346 5347/* 5348 * This is called whenever the pending ADDBA request times out. 5349 * Unpause and reschedule the TID. 5350 */ 5351void 5352ath_addba_response_timeout(struct ieee80211_node *ni, 5353 struct ieee80211_tx_ampdu *tap) 5354{ 5355 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5356 int tid = tap->txa_tid; 5357 struct ath_node *an = ATH_NODE(ni); 5358 struct ath_tid *atid = &an->an_tid[tid]; 5359 5360 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5361 "%s: called; resuming\n", __func__); 5362
| 5321 } 5322} 5323 5324/* 5325 * This is called whenever the pending ADDBA request times out. 5326 * Unpause and reschedule the TID. 5327 */ 5328void 5329ath_addba_response_timeout(struct ieee80211_node *ni, 5330 struct ieee80211_tx_ampdu *tap) 5331{ 5332 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5333 int tid = tap->txa_tid; 5334 struct ath_node *an = ATH_NODE(ni); 5335 struct ath_tid *atid = &an->an_tid[tid]; 5336 5337 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5338 "%s: called; resuming\n", __func__); 5339
|
5363 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 5340 ATH_TX_LOCK(sc);
|
5364 atid->addba_tx_pending = 0;
| 5341 atid->addba_tx_pending = 0;
|
5365 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 5342 ATH_TX_UNLOCK(sc);
|
5366 5367 /* Note: This updates the aggregate state to (again) pending */ 5368 sc->sc_addba_response_timeout(ni, tap); 5369 5370 /* Unpause the TID; which reschedules it */
| 5343 5344 /* Note: This updates the aggregate state to (again) pending */ 5345 sc->sc_addba_response_timeout(ni, tap); 5346 5347 /* Unpause the TID; which reschedules it */
|
5371 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
| 5348 ATH_TX_LOCK(sc);
|
5372 ath_tx_tid_resume(sc, atid);
| 5349 ath_tx_tid_resume(sc, atid);
|
5373 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
| 5350 ATH_TX_UNLOCK(sc);
|
5374} 5375 5376/* 5377 * Check if a node is asleep or not. 5378 */ 5379int 5380ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 5381{ 5382 5383 ATH_NODE_LOCK_ASSERT(an); 5384 5385 return (an->an_is_powersave); 5386} 5387 5388/* 5389 * Mark a node as currently "in powersaving." 5390 * This suspends all traffic on the node. 5391 * 5392 * This must be called with the node/tx locks free. 5393 * 5394 * XXX TODO: the locking silliness below is due to how the node 5395 * locking currently works. Right now, the node lock is grabbed 5396 * to do rate control lookups and these are done with the TX 5397 * queue lock held. This means the node lock can't be grabbed 5398 * first here or a LOR will occur. 5399 * 5400 * Eventually (hopefully!) the TX path code will only grab 5401 * the TXQ lock when transmitting and the ath_node lock when 5402 * doing node/TID operations. There are other complications - 5403 * the sched/unsched operations involve walking the per-txq 5404 * 'active tid' list and this requires both locks to be held. 5405 */ 5406void 5407ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 5408{ 5409 struct ath_tid *atid; 5410 struct ath_txq *txq; 5411 int tid; 5412 5413 ATH_NODE_UNLOCK_ASSERT(an); 5414 5415 /* 5416 * It's possible that a parallel call to ath_tx_node_wakeup() 5417 * will unpause these queues. 5418 * 5419 * The node lock can't just be grabbed here, as there's places 5420 * in the driver where the node lock is grabbed _within_ a 5421 * TXQ lock. 5422 * So, we do this delicately and unwind state if needed. 5423 * 5424 * + Pause all the queues 5425 * + Grab the node lock 5426 * + If the queue is already asleep, unpause and quit 5427 * + else just mark as asleep. 5428 * 5429 * A parallel sleep() call will just pause and then 5430 * find they're already paused, so undo it. 5431 * 5432 * A parallel wakeup() call will check if asleep is 1 5433 * and if it's not (ie, it's 0), it'll treat it as already 5434 * being awake. If it's 1, it'll mark it as 0 and then 5435 * unpause everything. 5436 * 5437 * (Talk about a delicate hack.) 5438 */ 5439 5440 /* Suspend all traffic on the node */
| 5351} 5352 5353/* 5354 * Check if a node is asleep or not. 5355 */ 5356int 5357ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 5358{ 5359 5360 ATH_NODE_LOCK_ASSERT(an); 5361 5362 return (an->an_is_powersave); 5363} 5364 5365/* 5366 * Mark a node as currently "in powersaving." 5367 * This suspends all traffic on the node. 5368 * 5369 * This must be called with the node/tx locks free. 5370 * 5371 * XXX TODO: the locking silliness below is due to how the node 5372 * locking currently works. Right now, the node lock is grabbed 5373 * to do rate control lookups and these are done with the TX 5374 * queue lock held. This means the node lock can't be grabbed 5375 * first here or a LOR will occur. 5376 * 5377 * Eventually (hopefully!) the TX path code will only grab 5378 * the TXQ lock when transmitting and the ath_node lock when 5379 * doing node/TID operations. There are other complications - 5380 * the sched/unsched operations involve walking the per-txq 5381 * 'active tid' list and this requires both locks to be held. 5382 */ 5383void 5384ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 5385{ 5386 struct ath_tid *atid; 5387 struct ath_txq *txq; 5388 int tid; 5389 5390 ATH_NODE_UNLOCK_ASSERT(an); 5391 5392 /* 5393 * It's possible that a parallel call to ath_tx_node_wakeup() 5394 * will unpause these queues. 5395 * 5396 * The node lock can't just be grabbed here, as there's places 5397 * in the driver where the node lock is grabbed _within_ a 5398 * TXQ lock. 5399 * So, we do this delicately and unwind state if needed. 5400 * 5401 * + Pause all the queues 5402 * + Grab the node lock 5403 * + If the queue is already asleep, unpause and quit 5404 * + else just mark as asleep. 5405 * 5406 * A parallel sleep() call will just pause and then 5407 * find they're already paused, so undo it. 5408 * 5409 * A parallel wakeup() call will check if asleep is 1 5410 * and if it's not (ie, it's 0), it'll treat it as already 5411 * being awake. If it's 1, it'll mark it as 0 and then 5412 * unpause everything. 5413 * 5414 * (Talk about a delicate hack.) 5415 */ 5416 5417 /* Suspend all traffic on the node */
|
| 5418 ATH_TX_LOCK(sc);
|
5441 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5442 atid = &an->an_tid[tid]; 5443 txq = sc->sc_ac2q[atid->ac]; 5444
| 5419 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5420 atid = &an->an_tid[tid]; 5421 txq = sc->sc_ac2q[atid->ac]; 5422
|
5445 ATH_TXQ_LOCK(txq);
| |
5446 ath_tx_tid_pause(sc, atid);
| 5423 ath_tx_tid_pause(sc, atid);
|
5447 ATH_TXQ_UNLOCK(txq);
| |
5448 }
| 5424 }
|
| 5425 ATH_TX_UNLOCK(sc);
|
5449 5450 ATH_NODE_LOCK(an); 5451 5452 /* In case of concurrency races from net80211.. */ 5453 if (an->an_is_powersave == 1) { 5454 ATH_NODE_UNLOCK(an); 5455 device_printf(sc->sc_dev, 5456 "%s: an=%p: node was already asleep\n", 5457 __func__, an);
| 5426 5427 ATH_NODE_LOCK(an); 5428 5429 /* In case of concurrency races from net80211.. */ 5430 if (an->an_is_powersave == 1) { 5431 ATH_NODE_UNLOCK(an); 5432 device_printf(sc->sc_dev, 5433 "%s: an=%p: node was already asleep\n", 5434 __func__, an);
|
| 5435 ATH_TX_LOCK(sc);
|
5458 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5459 atid = &an->an_tid[tid]; 5460 txq = sc->sc_ac2q[atid->ac]; 5461
| 5436 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5437 atid = &an->an_tid[tid]; 5438 txq = sc->sc_ac2q[atid->ac]; 5439
|
5462 ATH_TXQ_LOCK(txq);
| |
5463 ath_tx_tid_resume(sc, atid);
| 5440 ath_tx_tid_resume(sc, atid);
|
5464 ATH_TXQ_UNLOCK(txq);
| |
5465 }
| 5441 }
|
| 5442 ATH_TX_UNLOCK(sc);
|
5466 return; 5467 } 5468 5469 /* Mark node as in powersaving */ 5470 an->an_is_powersave = 1; 5471 5472 ATH_NODE_UNLOCK(an); 5473} 5474 5475/* 5476 * Mark a node as currently "awake." 5477 * This resumes all traffic to the node. 5478 */ 5479void 5480ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 5481{ 5482 struct ath_tid *atid; 5483 struct ath_txq *txq; 5484 int tid; 5485 5486 ATH_NODE_UNLOCK_ASSERT(an); 5487 ATH_NODE_LOCK(an); 5488 5489 /* In case of concurrency races from net80211.. */ 5490 if (an->an_is_powersave == 0) { 5491 ATH_NODE_UNLOCK(an); 5492 device_printf(sc->sc_dev, 5493 "%s: an=%p: node was already awake\n", 5494 __func__, an); 5495 return; 5496 } 5497 5498 /* Mark node as awake */ 5499 an->an_is_powersave = 0; 5500 5501 ATH_NODE_UNLOCK(an); 5502
| 5443 return; 5444 } 5445 5446 /* Mark node as in powersaving */ 5447 an->an_is_powersave = 1; 5448 5449 ATH_NODE_UNLOCK(an); 5450} 5451 5452/* 5453 * Mark a node as currently "awake." 5454 * This resumes all traffic to the node. 5455 */ 5456void 5457ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 5458{ 5459 struct ath_tid *atid; 5460 struct ath_txq *txq; 5461 int tid; 5462 5463 ATH_NODE_UNLOCK_ASSERT(an); 5464 ATH_NODE_LOCK(an); 5465 5466 /* In case of concurrency races from net80211.. */ 5467 if (an->an_is_powersave == 0) { 5468 ATH_NODE_UNLOCK(an); 5469 device_printf(sc->sc_dev, 5470 "%s: an=%p: node was already awake\n", 5471 __func__, an); 5472 return; 5473 } 5474 5475 /* Mark node as awake */ 5476 an->an_is_powersave = 0; 5477 5478 ATH_NODE_UNLOCK(an); 5479
|
| 5480 ATH_TX_LOCK(sc);
|
5503 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5504 atid = &an->an_tid[tid]; 5505 txq = sc->sc_ac2q[atid->ac]; 5506
| 5481 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5482 atid = &an->an_tid[tid]; 5483 txq = sc->sc_ac2q[atid->ac]; 5484
|
5507 ATH_TXQ_LOCK(txq);
| |
5508 ath_tx_tid_resume(sc, atid);
| 5485 ath_tx_tid_resume(sc, atid);
|
5509 ATH_TXQ_UNLOCK(txq);
| |
5510 }
| 5486 }
|
| 5487 ATH_TX_UNLOCK(sc);
|
5511} 5512 5513static int 5514ath_legacy_dma_txsetup(struct ath_softc *sc) 5515{ 5516 5517 /* nothing new needed */ 5518 return (0); 5519} 5520 5521static int 5522ath_legacy_dma_txteardown(struct ath_softc *sc) 5523{ 5524 5525 /* nothing new needed */ 5526 return (0); 5527} 5528 5529void 5530ath_xmit_setup_legacy(struct ath_softc *sc) 5531{ 5532 /* 5533 * For now, just set the descriptor length to sizeof(ath_desc); 5534 * worry about extracting the real length out of the HAL later. 5535 */ 5536 sc->sc_tx_desclen = sizeof(struct ath_desc); 5537 sc->sc_tx_statuslen = sizeof(struct ath_desc); 5538 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 5539 5540 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 5541 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 5542 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 5543 5544 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 5545 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 5546 5547 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 5548}
| 5488} 5489 5490static int 5491ath_legacy_dma_txsetup(struct ath_softc *sc) 5492{ 5493 5494 /* nothing new needed */ 5495 return (0); 5496} 5497 5498static int 5499ath_legacy_dma_txteardown(struct ath_softc *sc) 5500{ 5501 5502 /* nothing new needed */ 5503 return (0); 5504} 5505 5506void 5507ath_xmit_setup_legacy(struct ath_softc *sc) 5508{ 5509 /* 5510 * For now, just set the descriptor length to sizeof(ath_desc); 5511 * worry about extracting the real length out of the HAL later. 5512 */ 5513 sc->sc_tx_desclen = sizeof(struct ath_desc); 5514 sc->sc_tx_statuslen = sizeof(struct ath_desc); 5515 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 5516 5517 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 5518 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 5519 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 5520 5521 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 5522 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 5523 5524 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 5525}
|