1/* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17#include "ath9k.h" 18#include "ar9003_mac.h" 19 20#define BITS_PER_BYTE 8 21#define OFDM_PLCP_BITS 22 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f) 23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 24#define L_STF 8 25#define L_LTF 8 26#define L_SIG 4 27#define HT_SIG 8 28#define HT_STF 4 29#define HT_LTF(_ns) (4 * (_ns)) 30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 34 35#define OFDM_SIFS_TIME 16 36 37static u16 bits_per_symbol[][2] = { 38 /* 20MHz 40MHz */ 39 { 26, 54 }, /* 0: BPSK */ 40 { 52, 108 }, /* 1: QPSK 1/2 */ 41 { 78, 162 }, /* 2: QPSK 3/4 */ 42 { 104, 216 }, /* 3: 16-QAM 1/2 */ 43 { 156, 324 }, /* 4: 16-QAM 3/4 */ 44 { 208, 432 }, /* 5: 64-QAM 2/3 */ 45 { 234, 486 }, /* 6: 64-QAM 3/4 */ 46 { 260, 540 }, /* 7: 64-QAM 5/6 */ 47}; 48 49#define IS_HT_RATE(_rate) ((_rate) & 0x80) 50 51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 52 struct ath_atx_tid *tid, 53 struct list_head *bf_head); 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 55 struct ath_txq *txq, struct list_head *bf_q, 56 struct ath_tx_status *ts, int txok, int sendbar); 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 58 struct list_head *head); 59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 61 struct ath_tx_status *ts, int txok); 62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 63 int nbad, int txok, bool update_rc); 64static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 65 int seqno); 66 67enum { 68 MCS_HT20, 69 MCS_HT20_SGI, 70 MCS_HT40, 71 MCS_HT40_SGI, 72}; 73 74static int ath_max_4ms_framelen[4][32] = { 75 [MCS_HT20] = { 76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172, 77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280, 78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532, 79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532, 80 }, 81 [MCS_HT20_SGI] = { 82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744, 83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532, 84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532, 85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532, 86 }, 87 [MCS_HT40] = { 88 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532, 89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532, 90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532, 91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532, 92 }, 93 [MCS_HT40_SGI] = { 94 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532, 95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532, 96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532, 97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532, 98 } 99}; 100 101/*********************/ 102/* Aggregation logic */ 103/*********************/ 104 105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 106{ 107 struct ath_atx_ac *ac = tid->ac; 108 109 if (tid->paused) 110 return; 111 112 if (tid->sched) 113 return; 114 115 tid->sched = true; 116 list_add_tail(&tid->list, &ac->tid_q); 117 118 if (ac->sched) 119 return; 120 121 ac->sched = true; 122 list_add_tail(&ac->list, &txq->axq_acq); 123} 124 125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 126{ 127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 128 129 WARN_ON(!tid->paused); 130 131 spin_lock_bh(&txq->axq_lock); 132 tid->paused = false; 133 134 if (list_empty(&tid->buf_q)) 135 goto unlock; 136 137 ath_tx_queue_tid(txq, tid); 138 ath_txq_schedule(sc, txq); 139unlock: 140 spin_unlock_bh(&txq->axq_lock); 141} 142 143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 144{ 145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 146 struct ath_buf *bf; 147 struct list_head bf_head; 148 struct ath_tx_status ts; 149 150 INIT_LIST_HEAD(&bf_head); 151 152 memset(&ts, 0, sizeof(ts)); 153 spin_lock_bh(&txq->axq_lock); 154 155 while (!list_empty(&tid->buf_q)) { 156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 157 list_move_tail(&bf->list, &bf_head); 158 159 if (bf_isretried(bf)) { 160 ath_tx_update_baw(sc, tid, bf->bf_seqno); 161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 162 } else { 163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 164 } 165 } 166 167 spin_unlock_bh(&txq->axq_lock); 168} 169 170static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 171 int seqno) 172{ 173 int index, cindex; 174 175 index = ATH_BA_INDEX(tid->seq_start, seqno); 176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 177 178 tid->tx_buf[cindex] = NULL; 179 180 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { 181 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 182 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 183 } 184} 185 186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 187 struct ath_buf *bf) 188{ 189 int index, cindex; 190 191 if (bf_isretried(bf)) 192 return; 193 194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 196 197 BUG_ON(tid->tx_buf[cindex] != NULL); 198 tid->tx_buf[cindex] = bf; 199 200 if (index >= ((tid->baw_tail - tid->baw_head) & 201 (ATH_TID_MAX_BUFS - 1))) { 202 tid->baw_tail = cindex; 203 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 204 } 205} 206 207/* 208 * TODO: For frame(s) that are in the retry state, we will reuse the 209 * sequence number(s) without setting the retry bit. The 210 * alternative is to give up on these and BAR the receiver's window 211 * forward. 212 */ 213static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 214 struct ath_atx_tid *tid) 215 216{ 217 struct ath_buf *bf; 218 struct list_head bf_head; 219 struct ath_tx_status ts; 220 221 memset(&ts, 0, sizeof(ts)); 222 INIT_LIST_HEAD(&bf_head); 223 224 for (;;) { 225 if (list_empty(&tid->buf_q)) 226 break; 227 228 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 229 list_move_tail(&bf->list, &bf_head); 230 231 if (bf_isretried(bf)) 232 ath_tx_update_baw(sc, tid, bf->bf_seqno); 233 234 spin_unlock(&txq->axq_lock); 235 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 236 spin_lock(&txq->axq_lock); 237 } 238 239 tid->seq_next = tid->seq_start; 240 tid->baw_tail = tid->baw_head; 241} 242 243static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 244 struct ath_buf *bf) 245{ 246 struct sk_buff *skb; 247 struct ieee80211_hdr *hdr; 248 249 bf->bf_state.bf_type |= BUF_RETRY; 250 bf->bf_retries++; 251 TX_STAT_INC(txq->axq_qnum, a_retries); 252 253 skb = bf->bf_mpdu; 254 hdr = (struct ieee80211_hdr *)skb->data; 255 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 256} 257 258static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 259{ 260 struct ath_buf *bf = NULL; 261 262 spin_lock_bh(&sc->tx.txbuflock); 263 264 if (unlikely(list_empty(&sc->tx.txbuf))) { 265 spin_unlock_bh(&sc->tx.txbuflock); 266 return NULL; 267 } 268 269 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 270 list_del(&bf->list); 271 272 spin_unlock_bh(&sc->tx.txbuflock); 273 274 return bf; 275} 276 277static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 278{ 279 spin_lock_bh(&sc->tx.txbuflock); 280 list_add_tail(&bf->list, &sc->tx.txbuf); 281 spin_unlock_bh(&sc->tx.txbuflock); 282} 283 284static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 285{ 286 struct ath_buf *tbf; 287 288 tbf = ath_tx_get_buffer(sc); 289 if (WARN_ON(!tbf)) 290 return NULL; 291 292 ATH_TXBUF_RESET(tbf); 293 294 tbf->aphy = bf->aphy; 295 tbf->bf_mpdu = bf->bf_mpdu; 296 tbf->bf_buf_addr = bf->bf_buf_addr; 297 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 298 tbf->bf_state = bf->bf_state; 299 tbf->bf_dmacontext = bf->bf_dmacontext; 300 301 return tbf; 302} 303 304static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 305 struct ath_buf *bf, struct list_head *bf_q, 306 struct ath_tx_status *ts, int txok) 307{ 308 struct ath_node *an = NULL; 309 struct sk_buff *skb; 310 struct ieee80211_sta *sta; 311 struct ieee80211_hw *hw; 312 struct ieee80211_hdr *hdr; 313 struct ieee80211_tx_info *tx_info; 314 struct ath_atx_tid *tid = NULL; 315 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 316 struct list_head bf_head, bf_pending; 317 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 318 u32 ba[WME_BA_BMP_SIZE >> 5]; 319 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 320 bool rc_update = true; 321 struct ieee80211_tx_rate rates[4]; 322 int nframes; 323 324 skb = bf->bf_mpdu; 325 hdr = (struct ieee80211_hdr *)skb->data; 326 327 tx_info = IEEE80211_SKB_CB(skb); 328 hw = bf->aphy->hw; 329 330 memcpy(rates, tx_info->control.rates, sizeof(rates)); 331 nframes = bf->bf_nframes; 332 333 rcu_read_lock(); 334 335 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1); 336 if (!sta) { 337 rcu_read_unlock(); 338 339 INIT_LIST_HEAD(&bf_head); 340 while (bf) { 341 bf_next = bf->bf_next; 342 343 bf->bf_state.bf_type |= BUF_XRETRY; 344 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || 345 !bf->bf_stale || bf_next != NULL) 346 list_move_tail(&bf->list, &bf_head); 347 348 ath_tx_rc_status(bf, ts, 1, 0, false); 349 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 350 0, 0); 351 352 bf = bf_next; 353 } 354 return; 355 } 356 357 an = (struct ath_node *)sta->drv_priv; 358 tid = ATH_AN_2_TID(an, bf->bf_tidno); 359 360 /* 361 * The hardware occasionally sends a tx status for the wrong TID. 362 * In this case, the BA status cannot be considered valid and all 363 * subframes need to be retransmitted 364 */ 365 if (bf->bf_tidno != ts->tid) 366 txok = false; 367 368 isaggr = bf_isaggr(bf); 369 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 370 371 if (isaggr && txok) { 372 if (ts->ts_flags & ATH9K_TX_BA) { 373 seq_st = ts->ts_seqnum; 374 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 375 } else { 376 /* 377 * AR5416 can become deaf/mute when BA 378 * issue happens. Chip needs to be reset. 379 * But AP code may have sychronization issues 380 * when perform internal reset in this routine. 381 * Only enable reset in STA mode for now. 382 */ 383 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 384 needreset = 1; 385 } 386 } 387 388 INIT_LIST_HEAD(&bf_pending); 389 INIT_LIST_HEAD(&bf_head); 390 391 nbad = ath_tx_num_badfrms(sc, bf, ts, txok); 392 while (bf) { 393 txfail = txpending = 0; 394 bf_next = bf->bf_next; 395 396 skb = bf->bf_mpdu; 397 tx_info = IEEE80211_SKB_CB(skb); 398 399 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 400 /* transmit completion, subframe is 401 * acked by block ack */ 402 acked_cnt++; 403 } else if (!isaggr && txok) { 404 /* transmit completion */ 405 acked_cnt++; 406 } else { 407 if (!(tid->state & AGGR_CLEANUP) && 408 !bf_last->bf_tx_aborted) { 409 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 410 ath_tx_set_retry(sc, txq, bf); 411 txpending = 1; 412 } else { 413 bf->bf_state.bf_type |= BUF_XRETRY; 414 txfail = 1; 415 sendbar = 1; 416 txfail_cnt++; 417 } 418 } else { 419 /* 420 * cleanup in progress, just fail 421 * the un-acked sub-frames 422 */ 423 txfail = 1; 424 } 425 } 426 427 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 428 bf_next == NULL) { 429 /* 430 * Make sure the last desc is reclaimed if it 431 * not a holding desc. 432 */ 433 if (!bf_last->bf_stale) 434 list_move_tail(&bf->list, &bf_head); 435 else 436 INIT_LIST_HEAD(&bf_head); 437 } else { 438 BUG_ON(list_empty(bf_q)); 439 list_move_tail(&bf->list, &bf_head); 440 } 441 442 if (!txpending || (tid->state & AGGR_CLEANUP)) { 443 /* 444 * complete the acked-ones/xretried ones; update 445 * block-ack window 446 */ 447 spin_lock_bh(&txq->axq_lock); 448 ath_tx_update_baw(sc, tid, bf->bf_seqno); 449 spin_unlock_bh(&txq->axq_lock); 450 451 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 452 memcpy(tx_info->control.rates, rates, sizeof(rates)); 453 bf->bf_nframes = nframes; 454 ath_tx_rc_status(bf, ts, nbad, txok, true); 455 rc_update = false; 456 } else { 457 ath_tx_rc_status(bf, ts, nbad, txok, false); 458 } 459 460 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 461 !txfail, sendbar); 462 } else { 463 /* retry the un-acked ones */ 464 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { 465 if (bf->bf_next == NULL && bf_last->bf_stale) { 466 struct ath_buf *tbf; 467 468 tbf = ath_clone_txbuf(sc, bf_last); 469 /* 470 * Update tx baw and complete the 471 * frame with failed status if we 472 * run out of tx buf. 473 */ 474 if (!tbf) { 475 spin_lock_bh(&txq->axq_lock); 476 ath_tx_update_baw(sc, tid, 477 bf->bf_seqno); 478 spin_unlock_bh(&txq->axq_lock); 479 480 bf->bf_state.bf_type |= 481 BUF_XRETRY; 482 ath_tx_rc_status(bf, ts, nbad, 483 0, false); 484 ath_tx_complete_buf(sc, bf, txq, 485 &bf_head, 486 ts, 0, 0); 487 break; 488 } 489 490 ath9k_hw_cleartxdesc(sc->sc_ah, 491 tbf->bf_desc); 492 list_add_tail(&tbf->list, &bf_head); 493 } else { 494 /* 495 * Clear descriptor status words for 496 * software retry 497 */ 498 ath9k_hw_cleartxdesc(sc->sc_ah, 499 bf->bf_desc); 500 } 501 } 502 503 /* 504 * Put this buffer to the temporary pending 505 * queue to retain ordering 506 */ 507 list_splice_tail_init(&bf_head, &bf_pending); 508 } 509 510 bf = bf_next; 511 } 512 513 /* prepend un-acked frames to the beginning of the pending frame queue */ 514 if (!list_empty(&bf_pending)) { 515 spin_lock_bh(&txq->axq_lock); 516 list_splice(&bf_pending, &tid->buf_q); 517 ath_tx_queue_tid(txq, tid); 518 spin_unlock_bh(&txq->axq_lock); 519 } 520 521 if (tid->state & AGGR_CLEANUP) { 522 ath_tx_flush_tid(sc, tid); 523 524 if (tid->baw_head == tid->baw_tail) { 525 tid->state &= ~AGGR_ADDBA_COMPLETE; 526 tid->state &= ~AGGR_CLEANUP; 527 } 528 } 529 530 rcu_read_unlock(); 531 532 if (needreset) 533 ath_reset(sc, false); 534} 535 536static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 537 struct ath_atx_tid *tid) 538{ 539 struct sk_buff *skb; 540 struct ieee80211_tx_info *tx_info; 541 struct ieee80211_tx_rate *rates; 542 u32 max_4ms_framelen, frmlen; 543 u16 aggr_limit, legacy = 0; 544 int i; 545 546 skb = bf->bf_mpdu; 547 tx_info = IEEE80211_SKB_CB(skb); 548 rates = tx_info->control.rates; 549 550 /* 551 * Find the lowest frame length among the rate series that will have a 552 * 4ms transmit duration. 553 * TODO - TXOP limit needs to be considered. 554 */ 555 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 556 557 for (i = 0; i < 4; i++) { 558 if (rates[i].count) { 559 int modeidx; 560 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 561 legacy = 1; 562 break; 563 } 564 565 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 566 modeidx = MCS_HT40; 567 else 568 modeidx = MCS_HT20; 569 570 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 571 modeidx++; 572 573 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 574 max_4ms_framelen = min(max_4ms_framelen, frmlen); 575 } 576 } 577 578 /* 579 * limit aggregate size by the minimum rate if rate selected is 580 * not a probe rate, if rate selected is a probe rate then 581 * avoid aggregation of this packet. 582 */ 583 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 584 return 0; 585 586 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) 587 aggr_limit = min((max_4ms_framelen * 3) / 8, 588 (u32)ATH_AMPDU_LIMIT_MAX); 589 else 590 aggr_limit = min(max_4ms_framelen, 591 (u32)ATH_AMPDU_LIMIT_MAX); 592 593 /* 594 * h/w can accept aggregates upto 16 bit lengths (65535). 595 * The IE, however can hold upto 65536, which shows up here 596 * as zero. Ignore 65536 since we are constrained by hw. 597 */ 598 if (tid->an->maxampdu) 599 aggr_limit = min(aggr_limit, tid->an->maxampdu); 600 601 return aggr_limit; 602} 603 604/* 605 * Returns the number of delimiters to be added to 606 * meet the minimum required mpdudensity. 607 */ 608static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 609 struct ath_buf *bf, u16 frmlen) 610{ 611 struct sk_buff *skb = bf->bf_mpdu; 612 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 613 u32 nsymbits, nsymbols; 614 u16 minlen; 615 u8 flags, rix; 616 int width, streams, half_gi, ndelim, mindelim; 617 618 /* Select standard number of delimiters based on frame length alone */ 619 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 620 621 /* 622 * If encryption enabled, hardware requires some more padding between 623 * subframes. 624 * TODO - this could be improved to be dependent on the rate. 625 * The hardware can keep up at lower rates, but not higher rates 626 */ 627 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) 628 ndelim += ATH_AGGR_ENCRYPTDELIM; 629 630 /* 631 * Convert desired mpdu density from microeconds to bytes based 632 * on highest rate in rate series (i.e. first rate) to determine 633 * required minimum length for subframe. Take into account 634 * whether high rate is 20 or 40Mhz and half or full GI. 635 * 636 * If there is no mpdu density restriction, no further calculation 637 * is needed. 638 */ 639 640 if (tid->an->mpdudensity == 0) 641 return ndelim; 642 643 rix = tx_info->control.rates[0].idx; 644 flags = tx_info->control.rates[0].flags; 645 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 646 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 647 648 if (half_gi) 649 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 650 else 651 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 652 653 if (nsymbols == 0) 654 nsymbols = 1; 655 656 streams = HT_RC_2_STREAMS(rix); 657 nsymbits = bits_per_symbol[rix % 8][width] * streams; 658 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 659 660 if (frmlen < minlen) { 661 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 662 ndelim = max(mindelim, ndelim); 663 } 664 665 return ndelim; 666} 667 668static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 669 struct ath_txq *txq, 670 struct ath_atx_tid *tid, 671 struct list_head *bf_q) 672{ 673#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 674 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 675 int rl = 0, nframes = 0, ndelim, prev_al = 0; 676 u16 aggr_limit = 0, al = 0, bpad = 0, 677 al_delta, h_baw = tid->baw_size / 2; 678 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 679 680 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 681 682 do { 683 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 684 685 /* do not step over block-ack window */ 686 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { 687 status = ATH_AGGR_BAW_CLOSED; 688 break; 689 } 690 691 if (!rl) { 692 aggr_limit = ath_lookup_rate(sc, bf, tid); 693 rl = 1; 694 } 695 696 /* do not exceed aggregation limit */ 697 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; 698 699 if (nframes && 700 (aggr_limit < (al + bpad + al_delta + prev_al))) { 701 status = ATH_AGGR_LIMITED; 702 break; 703 } 704 705 /* do not exceed subframe limit */ 706 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 707 status = ATH_AGGR_LIMITED; 708 break; 709 } 710 nframes++; 711 712 /* add padding for previous frame to aggregation length */ 713 al += bpad + al_delta; 714 715 /* 716 * Get the delimiters needed to meet the MPDU 717 * density for this node. 718 */ 719 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); 720 bpad = PADBYTES(al_delta) + (ndelim << 2); 721 722 bf->bf_next = NULL; 723 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0); 724 725 /* link buffers of this frame to the aggregate */ 726 ath_tx_addto_baw(sc, tid, bf); 727 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 728 list_move_tail(&bf->list, bf_q); 729 if (bf_prev) { 730 bf_prev->bf_next = bf; 731 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc, 732 bf->bf_daddr); 733 } 734 bf_prev = bf; 735 736 } while (!list_empty(&tid->buf_q)); 737 738 bf_first->bf_al = al; 739 bf_first->bf_nframes = nframes; 740 741 return status; 742#undef PADBYTES 743} 744 745static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 746 struct ath_atx_tid *tid) 747{ 748 struct ath_buf *bf; 749 enum ATH_AGGR_STATUS status; 750 struct list_head bf_q; 751 752 do { 753 if (list_empty(&tid->buf_q)) 754 return; 755 756 INIT_LIST_HEAD(&bf_q); 757 758 status = ath_tx_form_aggr(sc, txq, tid, &bf_q); 759 760 /* 761 * no frames picked up to be aggregated; 762 * block-ack window is not open. 763 */ 764 if (list_empty(&bf_q)) 765 break; 766 767 bf = list_first_entry(&bf_q, struct ath_buf, list); 768 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 769 770 /* if only one frame, send as non-aggregate */ 771 if (bf->bf_nframes == 1) { 772 bf->bf_state.bf_type &= ~BUF_AGGR; 773 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); 774 ath_buf_set_rate(sc, bf); 775 ath_tx_txqaddbuf(sc, txq, &bf_q); 776 continue; 777 } 778 779 /* setup first desc of aggregate */ 780 bf->bf_state.bf_type |= BUF_AGGR; 781 ath_buf_set_rate(sc, bf); 782 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 783 784 /* anchor last desc of aggregate */ 785 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 786 787 ath_tx_txqaddbuf(sc, txq, &bf_q); 788 TX_STAT_INC(txq->axq_qnum, a_aggr); 789 790 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && 791 status != ATH_AGGR_BAW_CLOSED); 792} 793 794int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 795 u16 tid, u16 *ssn) 796{ 797 struct ath_atx_tid *txtid; 798 struct ath_node *an; 799 800 an = (struct ath_node *)sta->drv_priv; 801 txtid = ATH_AN_2_TID(an, tid); 802 803 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 804 return -EAGAIN; 805 806 txtid->state |= AGGR_ADDBA_PROGRESS; 807 txtid->paused = true; 808 *ssn = txtid->seq_start; 809 810 return 0; 811} 812 813void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 814{ 815 struct ath_node *an = (struct ath_node *)sta->drv_priv; 816 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 817 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 818 819 if (txtid->state & AGGR_CLEANUP) 820 return; 821 822 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 823 txtid->state &= ~AGGR_ADDBA_PROGRESS; 824 return; 825 } 826 827 spin_lock_bh(&txq->axq_lock); 828 txtid->paused = true; 829 830 /* 831 * If frames are still being transmitted for this TID, they will be 832 * cleaned up during tx completion. To prevent race conditions, this 833 * TID can only be reused after all in-progress subframes have been 834 * completed. 835 */ 836 if (txtid->baw_head != txtid->baw_tail) 837 txtid->state |= AGGR_CLEANUP; 838 else 839 txtid->state &= ~AGGR_ADDBA_COMPLETE; 840 spin_unlock_bh(&txq->axq_lock); 841 842 ath_tx_flush_tid(sc, txtid); 843} 844 845void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 846{ 847 struct ath_atx_tid *txtid; 848 struct ath_node *an; 849 850 an = (struct ath_node *)sta->drv_priv; 851 852 if (sc->sc_flags & SC_OP_TXAGGR) { 853 txtid = ATH_AN_2_TID(an, tid); 854 txtid->baw_size = 855 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 856 txtid->state |= AGGR_ADDBA_COMPLETE; 857 txtid->state &= ~AGGR_ADDBA_PROGRESS; 858 ath_tx_resume_tid(sc, txtid); 859 } 860} 861 862bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) 863{ 864 struct ath_atx_tid *txtid; 865 866 if (!(sc->sc_flags & SC_OP_TXAGGR)) 867 return false; 868 869 txtid = ATH_AN_2_TID(an, tidno); 870 871 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS))) 872 return true; 873 return false; 874} 875 876/********************/ 877/* Queue Management */ 878/********************/ 879 880static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 881 struct ath_txq *txq) 882{ 883 struct ath_atx_ac *ac, *ac_tmp; 884 struct ath_atx_tid *tid, *tid_tmp; 885 886 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 887 list_del(&ac->list); 888 ac->sched = false; 889 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 890 list_del(&tid->list); 891 tid->sched = false; 892 ath_tid_drain(sc, txq, tid); 893 } 894 } 895} 896 897struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 898{ 899 struct ath_hw *ah = sc->sc_ah; 900 struct ath_common *common = ath9k_hw_common(ah); 901 struct ath9k_tx_queue_info qi; 902 int qnum, i; 903 904 memset(&qi, 0, sizeof(qi)); 905 qi.tqi_subtype = subtype; 906 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 907 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 908 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 909 qi.tqi_physCompBuf = 0; 910 911 /* 912 * Enable interrupts only for EOL and DESC conditions. 913 * We mark tx descriptors to receive a DESC interrupt 914 * when a tx queue gets deep; otherwise waiting for the 915 * EOL to reap descriptors. Note that this is done to 916 * reduce interrupt load and this only defers reaping 917 * descriptors, never transmitting frames. Aside from 918 * reducing interrupts this also permits more concurrency. 919 * The only potential downside is if the tx queue backs 920 * up in which case the top half of the kernel may backup 921 * due to a lack of tx descriptors. 922 * 923 * The UAPSD queue is an exception, since we take a desc- 924 * based intr on the EOSP frames. 925 */ 926 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 927 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE | 928 TXQ_FLAG_TXERRINT_ENABLE; 929 } else { 930 if (qtype == ATH9K_TX_QUEUE_UAPSD) 931 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 932 else 933 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 934 TXQ_FLAG_TXDESCINT_ENABLE; 935 } 936 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 937 if (qnum == -1) { 938 /* 939 * NB: don't print a message, this happens 940 * normally on parts with too few tx queues 941 */ 942 return NULL; 943 } 944 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 945 ath_print(common, ATH_DBG_FATAL, 946 "qnum %u out of range, max %u!\n", 947 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); 948 ath9k_hw_releasetxqueue(ah, qnum); 949 return NULL; 950 } 951 if (!ATH_TXQ_SETUP(sc, qnum)) { 952 struct ath_txq *txq = &sc->tx.txq[qnum]; 953 954 txq->axq_class = subtype; 955 txq->axq_qnum = qnum; 956 txq->axq_link = NULL; 957 INIT_LIST_HEAD(&txq->axq_q); 958 INIT_LIST_HEAD(&txq->axq_acq); 959 spin_lock_init(&txq->axq_lock); 960 txq->axq_depth = 0; 961 txq->axq_tx_inprogress = false; 962 sc->tx.txqsetup |= 1<<qnum; 963 964 txq->txq_headidx = txq->txq_tailidx = 0; 965 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 966 INIT_LIST_HEAD(&txq->txq_fifo[i]); 967 INIT_LIST_HEAD(&txq->txq_fifo_pending); 968 } 969 return &sc->tx.txq[qnum]; 970} 971 972int ath_txq_update(struct ath_softc *sc, int qnum, 973 struct ath9k_tx_queue_info *qinfo) 974{ 975 struct ath_hw *ah = sc->sc_ah; 976 int error = 0; 977 struct ath9k_tx_queue_info qi; 978 979 if (qnum == sc->beacon.beaconq) { 980 sc->beacon.beacon_qi = *qinfo; 981 return 0; 982 } 983 984 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 985 986 ath9k_hw_get_txq_props(ah, qnum, &qi); 987 qi.tqi_aifs = qinfo->tqi_aifs; 988 qi.tqi_cwmin = qinfo->tqi_cwmin; 989 qi.tqi_cwmax = qinfo->tqi_cwmax; 990 qi.tqi_burstTime = qinfo->tqi_burstTime; 991 qi.tqi_readyTime = qinfo->tqi_readyTime; 992 993 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 994 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 995 "Unable to update hardware queue %u!\n", qnum); 996 error = -EIO; 997 } else { 998 ath9k_hw_resettxqueue(ah, qnum); 999 } 1000 1001 return error; 1002} 1003 1004int ath_cabq_update(struct ath_softc *sc) 1005{ 1006 struct ath9k_tx_queue_info qi; 1007 int qnum = sc->beacon.cabq->axq_qnum; 1008 1009 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1010 /* 1011 * Ensure the readytime % is within the bounds. 1012 */ 1013 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1014 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1015 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1016 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1017 1018 qi.tqi_readyTime = (sc->beacon_interval * 1019 sc->config.cabqReadytime) / 100; 1020 ath_txq_update(sc, qnum, &qi); 1021 1022 return 0; 1023} 1024 1025/* 1026 * Drain a given TX queue (could be Beacon or Data) 1027 * 1028 * This assumes output has been stopped and 1029 * we do not need to block ath_tx_tasklet. 1030 */ 1031void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) 1032{ 1033 struct ath_buf *bf, *lastbf; 1034 struct list_head bf_head; 1035 struct ath_tx_status ts; 1036 1037 memset(&ts, 0, sizeof(ts)); 1038 INIT_LIST_HEAD(&bf_head); 1039 1040 for (;;) { 1041 spin_lock_bh(&txq->axq_lock); 1042 1043 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1044 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 1045 txq->txq_headidx = txq->txq_tailidx = 0; 1046 spin_unlock_bh(&txq->axq_lock); 1047 break; 1048 } else { 1049 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 1050 struct ath_buf, list); 1051 } 1052 } else { 1053 if (list_empty(&txq->axq_q)) { 1054 txq->axq_link = NULL; 1055 spin_unlock_bh(&txq->axq_lock); 1056 break; 1057 } 1058 bf = list_first_entry(&txq->axq_q, struct ath_buf, 1059 list); 1060 1061 if (bf->bf_stale) { 1062 list_del(&bf->list); 1063 spin_unlock_bh(&txq->axq_lock); 1064 1065 ath_tx_return_buffer(sc, bf); 1066 continue; 1067 } 1068 } 1069 1070 lastbf = bf->bf_lastbf; 1071 if (!retry_tx) 1072 lastbf->bf_tx_aborted = true; 1073 1074 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1075 list_cut_position(&bf_head, 1076 &txq->txq_fifo[txq->txq_tailidx], 1077 &lastbf->list); 1078 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 1079 } else { 1080 /* remove ath_buf's of the same mpdu from txq */ 1081 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); 1082 } 1083 1084 txq->axq_depth--; 1085 1086 spin_unlock_bh(&txq->axq_lock); 1087 1088 if (bf_isampdu(bf)) 1089 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0); 1090 else 1091 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1092 } 1093 1094 spin_lock_bh(&txq->axq_lock); 1095 txq->axq_tx_inprogress = false; 1096 spin_unlock_bh(&txq->axq_lock); 1097 1098 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1099 spin_lock_bh(&txq->axq_lock); 1100 while (!list_empty(&txq->txq_fifo_pending)) { 1101 bf = list_first_entry(&txq->txq_fifo_pending, 1102 struct ath_buf, list); 1103 list_cut_position(&bf_head, 1104 &txq->txq_fifo_pending, 1105 &bf->bf_lastbf->list); 1106 spin_unlock_bh(&txq->axq_lock); 1107 1108 if (bf_isampdu(bf)) 1109 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 1110 &ts, 0); 1111 else 1112 ath_tx_complete_buf(sc, bf, txq, &bf_head, 1113 &ts, 0, 0); 1114 spin_lock_bh(&txq->axq_lock); 1115 } 1116 spin_unlock_bh(&txq->axq_lock); 1117 } 1118 1119 /* flush any pending frames if aggregation is enabled */ 1120 if (sc->sc_flags & SC_OP_TXAGGR) { 1121 if (!retry_tx) { 1122 spin_lock_bh(&txq->axq_lock); 1123 ath_txq_drain_pending_buffers(sc, txq); 1124 spin_unlock_bh(&txq->axq_lock); 1125 } 1126 } 1127} 1128 1129void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1130{ 1131 struct ath_hw *ah = sc->sc_ah; 1132 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1133 struct ath_txq *txq; 1134 int i, npend = 0; 1135 1136 if (sc->sc_flags & SC_OP_INVALID) 1137 return; 1138 1139 /* Stop beacon queue */ 1140 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1141 1142 /* Stop data queues */ 1143 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1144 if (ATH_TXQ_SETUP(sc, i)) { 1145 txq = &sc->tx.txq[i]; 1146 ath9k_hw_stoptxdma(ah, txq->axq_qnum); 1147 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum); 1148 } 1149 } 1150 1151 if (npend) { 1152 int r; 1153 1154 ath_print(common, ATH_DBG_FATAL, 1155 "Failed to stop TX DMA. Resetting hardware!\n"); 1156 1157 spin_lock_bh(&sc->sc_pcu_lock); 1158 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 1159 if (r) 1160 ath_print(common, ATH_DBG_FATAL, 1161 "Unable to reset hardware; reset status %d\n", 1162 r); 1163 spin_unlock_bh(&sc->sc_pcu_lock); 1164 } 1165 1166 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1167 if (ATH_TXQ_SETUP(sc, i)) 1168 ath_draintxq(sc, &sc->tx.txq[i], retry_tx); 1169 } 1170} 1171 1172void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1173{ 1174 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1175 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1176} 1177 1178void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1179{ 1180 struct ath_atx_ac *ac; 1181 struct ath_atx_tid *tid; 1182 1183 if (list_empty(&txq->axq_acq)) 1184 return; 1185 1186 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1187 list_del(&ac->list); 1188 ac->sched = false; 1189 1190 do { 1191 if (list_empty(&ac->tid_q)) 1192 return; 1193 1194 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); 1195 list_del(&tid->list); 1196 tid->sched = false; 1197 1198 if (tid->paused) 1199 continue; 1200 1201 ath_tx_sched_aggr(sc, txq, tid); 1202 1203 /* 1204 * add tid to round-robin queue if more frames 1205 * are pending for the tid 1206 */ 1207 if (!list_empty(&tid->buf_q)) 1208 ath_tx_queue_tid(txq, tid); 1209 1210 break; 1211 } while (!list_empty(&ac->tid_q)); 1212 1213 if (!list_empty(&ac->tid_q)) { 1214 if (!ac->sched) { 1215 ac->sched = true; 1216 list_add_tail(&ac->list, &txq->axq_acq); 1217 } 1218 } 1219} 1220 1221int ath_tx_setup(struct ath_softc *sc, int haltype) 1222{ 1223 struct ath_txq *txq; 1224 1225 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 1226 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1227 "HAL AC %u out of range, max %zu!\n", 1228 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 1229 return 0; 1230 } 1231 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); 1232 if (txq != NULL) { 1233 sc->tx.hwq_map[haltype] = txq->axq_qnum; 1234 return 1; 1235 } else 1236 return 0; 1237} 1238 1239/***********/ 1240/* TX, DMA */ 1241/***********/ 1242 1243/* 1244 * Insert a chain of ath_buf (descriptors) on a txq and 1245 * assume the descriptors are already chained together by caller. 1246 */ 1247static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1248 struct list_head *head) 1249{ 1250 struct ath_hw *ah = sc->sc_ah; 1251 struct ath_common *common = ath9k_hw_common(ah); 1252 struct ath_buf *bf; 1253 1254 /* 1255 * Insert the frame on the outbound list and 1256 * pass it on to the hardware. 1257 */ 1258 1259 if (list_empty(head)) 1260 return; 1261 1262 bf = list_first_entry(head, struct ath_buf, list); 1263 1264 ath_print(common, ATH_DBG_QUEUE, 1265 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1266 1267 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1268 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) { 1269 list_splice_tail_init(head, &txq->txq_fifo_pending); 1270 return; 1271 } 1272 if (!list_empty(&txq->txq_fifo[txq->txq_headidx])) 1273 ath_print(common, ATH_DBG_XMIT, 1274 "Initializing tx fifo %d which " 1275 "is non-empty\n", 1276 txq->txq_headidx); 1277 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); 1278 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); 1279 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1280 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1281 ath_print(common, ATH_DBG_XMIT, 1282 "TXDP[%u] = %llx (%p)\n", 1283 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1284 } else { 1285 list_splice_tail_init(head, &txq->axq_q); 1286 1287 if (txq->axq_link == NULL) { 1288 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1289 ath_print(common, ATH_DBG_XMIT, 1290 "TXDP[%u] = %llx (%p)\n", 1291 txq->axq_qnum, ito64(bf->bf_daddr), 1292 bf->bf_desc); 1293 } else { 1294 *txq->axq_link = bf->bf_daddr; 1295 ath_print(common, ATH_DBG_XMIT, 1296 "link[%u] (%p)=%llx (%p)\n", 1297 txq->axq_qnum, txq->axq_link, 1298 ito64(bf->bf_daddr), bf->bf_desc); 1299 } 1300 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, 1301 &txq->axq_link); 1302 ath9k_hw_txstart(ah, txq->axq_qnum); 1303 } 1304 txq->axq_depth++; 1305} 1306 1307static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1308 struct list_head *bf_head, 1309 struct ath_tx_control *txctl) 1310{ 1311 struct ath_buf *bf; 1312 1313 bf = list_first_entry(bf_head, struct ath_buf, list); 1314 bf->bf_state.bf_type |= BUF_AMPDU; 1315 TX_STAT_INC(txctl->txq->axq_qnum, a_queued); 1316 1317 /* 1318 * Do not queue to h/w when any of the following conditions is true: 1319 * - there are pending frames in software queue 1320 * - the TID is currently paused for ADDBA/BAR request 1321 * - seqno is not within block-ack window 1322 * - h/w queue depth exceeds low water mark 1323 */ 1324 if (!list_empty(&tid->buf_q) || tid->paused || 1325 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1326 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1327 /* 1328 * Add this frame to software queue for scheduling later 1329 * for aggregation. 1330 */ 1331 list_move_tail(&bf->list, &tid->buf_q); 1332 ath_tx_queue_tid(txctl->txq, tid); 1333 return; 1334 } 1335 1336 /* Add sub-frame to BAW */ 1337 ath_tx_addto_baw(sc, tid, bf); 1338 1339 /* Queue to h/w without aggregation */ 1340 bf->bf_nframes = 1; 1341 bf->bf_lastbf = bf; 1342 ath_buf_set_rate(sc, bf); 1343 ath_tx_txqaddbuf(sc, txctl->txq, bf_head); 1344} 1345 1346static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 1347 struct ath_atx_tid *tid, 1348 struct list_head *bf_head) 1349{ 1350 struct ath_buf *bf; 1351 1352 bf = list_first_entry(bf_head, struct ath_buf, list); 1353 bf->bf_state.bf_type &= ~BUF_AMPDU; 1354 1355 /* update starting sequence number for subsequent ADDBA request */ 1356 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1357 1358 bf->bf_nframes = 1; 1359 bf->bf_lastbf = bf; 1360 ath_buf_set_rate(sc, bf); 1361 ath_tx_txqaddbuf(sc, txq, bf_head); 1362 TX_STAT_INC(txq->axq_qnum, queued); 1363} 1364 1365static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1366 struct list_head *bf_head) 1367{ 1368 struct ath_buf *bf; 1369 1370 bf = list_first_entry(bf_head, struct ath_buf, list); 1371 1372 bf->bf_lastbf = bf; 1373 bf->bf_nframes = 1; 1374 ath_buf_set_rate(sc, bf); 1375 ath_tx_txqaddbuf(sc, txq, bf_head); 1376 TX_STAT_INC(txq->axq_qnum, queued); 1377} 1378 1379static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1380{ 1381 struct ieee80211_hdr *hdr; 1382 enum ath9k_pkt_type htype; 1383 __le16 fc; 1384 1385 hdr = (struct ieee80211_hdr *)skb->data; 1386 fc = hdr->frame_control; 1387 1388 if (ieee80211_is_beacon(fc)) 1389 htype = ATH9K_PKT_TYPE_BEACON; 1390 else if (ieee80211_is_probe_resp(fc)) 1391 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1392 else if (ieee80211_is_atim(fc)) 1393 htype = ATH9K_PKT_TYPE_ATIM; 1394 else if (ieee80211_is_pspoll(fc)) 1395 htype = ATH9K_PKT_TYPE_PSPOLL; 1396 else 1397 htype = ATH9K_PKT_TYPE_NORMAL; 1398 1399 return htype; 1400} 1401 1402static int get_hw_crypto_keytype(struct sk_buff *skb) 1403{ 1404 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1405 1406 if (tx_info->control.hw_key) { 1407 if (tx_info->control.hw_key->alg == ALG_WEP) 1408 return ATH9K_KEY_TYPE_WEP; 1409 else if (tx_info->control.hw_key->alg == ALG_TKIP) 1410 return ATH9K_KEY_TYPE_TKIP; 1411 else if (tx_info->control.hw_key->alg == ALG_CCMP) 1412 return ATH9K_KEY_TYPE_AES; 1413 } 1414 1415 return ATH9K_KEY_TYPE_CLEAR; 1416} 1417 1418static void assign_aggr_tid_seqno(struct sk_buff *skb, 1419 struct ath_buf *bf) 1420{ 1421 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1422 struct ieee80211_hdr *hdr; 1423 struct ath_node *an; 1424 struct ath_atx_tid *tid; 1425 __le16 fc; 1426 u8 *qc; 1427 1428 if (!tx_info->control.sta) 1429 return; 1430 1431 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1432 hdr = (struct ieee80211_hdr *)skb->data; 1433 fc = hdr->frame_control; 1434 1435 if (ieee80211_is_data_qos(fc)) { 1436 qc = ieee80211_get_qos_ctl(hdr); 1437 bf->bf_tidno = qc[0] & 0xf; 1438 } 1439 1440 /* 1441 * For HT capable stations, we save tidno for later use. 1442 * We also override seqno set by upper layer with the one 1443 * in tx aggregation state. 1444 */ 1445 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1446 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1447 bf->bf_seqno = tid->seq_next; 1448 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1449} 1450 1451static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc) 1452{ 1453 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1454 int flags = 0; 1455 1456 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ 1457 flags |= ATH9K_TXDESC_INTREQ; 1458 1459 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1460 flags |= ATH9K_TXDESC_NOACK; 1461 1462 if (use_ldpc) 1463 flags |= ATH9K_TXDESC_LDPC; 1464 1465 return flags; 1466} 1467 1468/* 1469 * rix - rate index 1470 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 1471 * width - 0 for 20 MHz, 1 for 40 MHz 1472 * half_gi - to use 4us v/s 3.6 us for symbol time 1473 */ 1474static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, 1475 int width, int half_gi, bool shortPreamble) 1476{ 1477 u32 nbits, nsymbits, duration, nsymbols; 1478 int streams, pktlen; 1479 1480 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; 1481 1482 /* find number of symbols: PLCP + data */ 1483 streams = HT_RC_2_STREAMS(rix); 1484 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 1485 nsymbits = bits_per_symbol[rix % 8][width] * streams; 1486 nsymbols = (nbits + nsymbits - 1) / nsymbits; 1487 1488 if (!half_gi) 1489 duration = SYMBOL_TIME(nsymbols); 1490 else 1491 duration = SYMBOL_TIME_HALFGI(nsymbols); 1492 1493 /* addup duration for legacy/ht training and signal fields */ 1494 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 1495 1496 return duration; 1497} 1498 1499static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1500{ 1501 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1502 struct ath9k_11n_rate_series series[4]; 1503 struct sk_buff *skb; 1504 struct ieee80211_tx_info *tx_info; 1505 struct ieee80211_tx_rate *rates; 1506 const struct ieee80211_rate *rate; 1507 struct ieee80211_hdr *hdr; 1508 int i, flags = 0; 1509 u8 rix = 0, ctsrate = 0; 1510 bool is_pspoll; 1511 1512 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 1513 1514 skb = bf->bf_mpdu; 1515 tx_info = IEEE80211_SKB_CB(skb); 1516 rates = tx_info->control.rates; 1517 hdr = (struct ieee80211_hdr *)skb->data; 1518 is_pspoll = ieee80211_is_pspoll(hdr->frame_control); 1519 1520 /* 1521 * We check if Short Preamble is needed for the CTS rate by 1522 * checking the BSS's global flag. 1523 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1524 */ 1525 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); 1526 ctsrate = rate->hw_value; 1527 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 1528 ctsrate |= rate->hw_value_short; 1529 1530 for (i = 0; i < 4; i++) { 1531 bool is_40, is_sgi, is_sp; 1532 int phy; 1533 1534 if (!rates[i].count || (rates[i].idx < 0)) 1535 continue; 1536 1537 rix = rates[i].idx; 1538 series[i].Tries = rates[i].count; 1539 series[i].ChSel = common->tx_chainmask; 1540 1541 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) || 1542 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) { 1543 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1544 flags |= ATH9K_TXDESC_RTSENA; 1545 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1546 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1547 flags |= ATH9K_TXDESC_CTSENA; 1548 } 1549 1550 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1551 series[i].RateFlags |= ATH9K_RATESERIES_2040; 1552 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1553 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1554 1555 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1556 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1557 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1558 1559 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1560 /* MCS rates */ 1561 series[i].Rate = rix | 0x80; 1562 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1563 is_40, is_sgi, is_sp); 1564 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1565 series[i].RateFlags |= ATH9K_RATESERIES_STBC; 1566 continue; 1567 } 1568 1569 /* legcay rates */ 1570 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1571 !(rate->flags & IEEE80211_RATE_ERP_G)) 1572 phy = WLAN_RC_PHY_CCK; 1573 else 1574 phy = WLAN_RC_PHY_OFDM; 1575 1576 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1577 series[i].Rate = rate->hw_value; 1578 if (rate->hw_value_short) { 1579 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1580 series[i].Rate |= rate->hw_value_short; 1581 } else { 1582 is_sp = false; 1583 } 1584 1585 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1586 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); 1587 } 1588 1589 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1590 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit)) 1591 flags &= ~ATH9K_TXDESC_RTSENA; 1592 1593 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1594 if (flags & ATH9K_TXDESC_RTSENA) 1595 flags &= ~ATH9K_TXDESC_CTSENA; 1596 1597 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1598 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc, 1599 bf->bf_lastbf->bf_desc, 1600 !is_pspoll, ctsrate, 1601 0, series, 4, flags); 1602 1603 if (sc->config.ath_aggr_prot && flags) 1604 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192); 1605} 1606 1607static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, 1608 struct sk_buff *skb, 1609 struct ath_tx_control *txctl) 1610{ 1611 struct ath_wiphy *aphy = hw->priv; 1612 struct ath_softc *sc = aphy->sc; 1613 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1614 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1615 int hdrlen; 1616 __le16 fc; 1617 int padpos, padsize; 1618 bool use_ldpc = false; 1619 1620 tx_info->pad[0] = 0; 1621 switch (txctl->frame_type) { 1622 case ATH9K_IFT_NOT_INTERNAL: 1623 break; 1624 case ATH9K_IFT_PAUSE: 1625 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE; 1626 /* fall through */ 1627 case ATH9K_IFT_UNPAUSE: 1628 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL; 1629 break; 1630 } 1631 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1632 fc = hdr->frame_control; 1633 1634 ATH_TXBUF_RESET(bf); 1635 1636 bf->aphy = aphy; 1637 bf->bf_frmlen = skb->len + FCS_LEN; 1638 /* Remove the padding size from bf_frmlen, if any */ 1639 padpos = ath9k_cmn_padpos(hdr->frame_control); 1640 padsize = padpos & 3; 1641 if (padsize && skb->len>padpos+padsize) { 1642 bf->bf_frmlen -= padsize; 1643 } 1644 1645 if (!txctl->paprd && conf_is_ht(&hw->conf)) { 1646 bf->bf_state.bf_type |= BUF_HT; 1647 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1648 use_ldpc = true; 1649 } 1650 1651 bf->bf_state.bfs_paprd = txctl->paprd; 1652 if (txctl->paprd) 1653 bf->bf_state.bfs_paprd_timestamp = jiffies; 1654 bf->bf_flags = setup_tx_flags(skb, use_ldpc); 1655 1656 bf->bf_keytype = get_hw_crypto_keytype(skb); 1657 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1658 bf->bf_frmlen += tx_info->control.hw_key->icv_len; 1659 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; 1660 } else { 1661 bf->bf_keyix = ATH9K_TXKEYIX_INVALID; 1662 } 1663 1664 if (ieee80211_is_data_qos(fc) && bf_isht(bf) && 1665 (sc->sc_flags & SC_OP_TXAGGR)) 1666 assign_aggr_tid_seqno(skb, bf); 1667 1668 bf->bf_mpdu = skb; 1669 1670 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data, 1671 skb->len, DMA_TO_DEVICE); 1672 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { 1673 bf->bf_mpdu = NULL; 1674 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1675 "dma_mapping_error() on TX\n"); 1676 return -ENOMEM; 1677 } 1678 1679 bf->bf_buf_addr = bf->bf_dmacontext; 1680 1681 /* tag if this is a nullfunc frame to enable PS when AP acks it */ 1682 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { 1683 bf->bf_isnullfunc = true; 1684 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED; 1685 } else 1686 bf->bf_isnullfunc = false; 1687 1688 bf->bf_tx_aborted = false; 1689 1690 return 0; 1691} 1692 1693static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, 1694 struct ath_tx_control *txctl) 1695{ 1696 struct sk_buff *skb = bf->bf_mpdu; 1697 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1698 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1699 struct ath_node *an = NULL; 1700 struct list_head bf_head; 1701 struct ath_desc *ds; 1702 struct ath_atx_tid *tid; 1703 struct ath_hw *ah = sc->sc_ah; 1704 int frm_type; 1705 __le16 fc; 1706 1707 frm_type = get_hw_packet_type(skb); 1708 fc = hdr->frame_control; 1709 1710 INIT_LIST_HEAD(&bf_head); 1711 list_add_tail(&bf->list, &bf_head); 1712 1713 ds = bf->bf_desc; 1714 ath9k_hw_set_desc_link(ah, ds, 0); 1715 1716 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1717 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1718 1719 ath9k_hw_filltxdesc(ah, ds, 1720 skb->len, /* segment length */ 1721 true, /* first segment */ 1722 true, /* last segment */ 1723 ds, /* first descriptor */ 1724 bf->bf_buf_addr, 1725 txctl->txq->axq_qnum); 1726 1727 if (bf->bf_state.bfs_paprd) 1728 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd); 1729 1730 spin_lock_bh(&txctl->txq->axq_lock); 1731 1732 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 1733 tx_info->control.sta) { 1734 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1735 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1736 1737 if (!ieee80211_is_data_qos(fc)) { 1738 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1739 goto tx_done; 1740 } 1741 1742 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1743 /* 1744 * Try aggregation if it's a unicast data frame 1745 * and the destination is HT capable. 1746 */ 1747 ath_tx_send_ampdu(sc, tid, &bf_head, txctl); 1748 } else { 1749 /* 1750 * Send this frame as regular when ADDBA 1751 * exchange is neither complete nor pending. 1752 */ 1753 ath_tx_send_ht_normal(sc, txctl->txq, 1754 tid, &bf_head); 1755 } 1756 } else { 1757 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1758 } 1759 1760tx_done: 1761 spin_unlock_bh(&txctl->txq->axq_lock); 1762} 1763 1764/* Upon failure caller should free skb */ 1765int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1766 struct ath_tx_control *txctl) 1767{ 1768 struct ath_wiphy *aphy = hw->priv; 1769 struct ath_softc *sc = aphy->sc; 1770 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1771 struct ath_txq *txq = txctl->txq; 1772 struct ath_buf *bf; 1773 int q, r; 1774 1775 bf = ath_tx_get_buffer(sc); 1776 if (!bf) { 1777 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1778 return -1; 1779 } 1780 1781 r = ath_tx_setup_buffer(hw, bf, skb, txctl); 1782 if (unlikely(r)) { 1783 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1784 1785 /* upon ath_tx_processq() this TX queue will be resumed, we 1786 * guarantee this will happen by knowing beforehand that 1787 * we will at least have to run TX completionon one buffer 1788 * on the queue */ 1789 spin_lock_bh(&txq->axq_lock); 1790 if (!txq->stopped && txq->axq_depth > 1) { 1791 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1792 txq->stopped = 1; 1793 } 1794 spin_unlock_bh(&txq->axq_lock); 1795 1796 ath_tx_return_buffer(sc, bf); 1797 1798 return r; 1799 } 1800 1801 q = skb_get_queue_mapping(skb); 1802 if (q >= 4) 1803 q = 0; 1804 1805 spin_lock_bh(&txq->axq_lock); 1806 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) { 1807 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1808 txq->stopped = 1; 1809 } 1810 spin_unlock_bh(&txq->axq_lock); 1811 1812 ath_tx_start_dma(sc, bf, txctl); 1813 1814 return 0; 1815} 1816 1817void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) 1818{ 1819 struct ath_wiphy *aphy = hw->priv; 1820 struct ath_softc *sc = aphy->sc; 1821 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1822 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1823 int padpos, padsize; 1824 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1825 struct ath_tx_control txctl; 1826 1827 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1828 1829 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1830 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1831 sc->tx.seq_no += 0x10; 1832 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1833 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1834 } 1835 1836 /* Add the padding after the header if this is not already done */ 1837 padpos = ath9k_cmn_padpos(hdr->frame_control); 1838 padsize = padpos & 3; 1839 if (padsize && skb->len>padpos) { 1840 if (skb_headroom(skb) < padsize) { 1841 ath_print(common, ATH_DBG_XMIT, 1842 "TX CABQ padding failed\n"); 1843 dev_kfree_skb_any(skb); 1844 return; 1845 } 1846 skb_push(skb, padsize); 1847 memmove(skb->data, skb->data + padsize, padpos); 1848 } 1849 1850 txctl.txq = sc->beacon.cabq; 1851 1852 ath_print(common, ATH_DBG_XMIT, 1853 "transmitting CABQ packet, skb: %p\n", skb); 1854 1855 if (ath_tx_start(hw, skb, &txctl) != 0) { 1856 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n"); 1857 goto exit; 1858 } 1859 1860 return; 1861exit: 1862 dev_kfree_skb_any(skb); 1863} 1864 1865/*****************/ 1866/* TX Completion */ 1867/*****************/ 1868 1869static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1870 struct ath_wiphy *aphy, int tx_flags) 1871{ 1872 struct ieee80211_hw *hw = sc->hw; 1873 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1874 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1875 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1876 int q, padpos, padsize; 1877 1878 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1879 1880 if (aphy) 1881 hw = aphy->hw; 1882 1883 if (tx_flags & ATH_TX_BAR) 1884 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1885 1886 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { 1887 /* Frame was ACKed */ 1888 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1889 } 1890 1891 padpos = ath9k_cmn_padpos(hdr->frame_control); 1892 padsize = padpos & 3; 1893 if (padsize && skb->len>padpos+padsize) { 1894 /* 1895 * Remove MAC header padding before giving the frame back to 1896 * mac80211. 1897 */ 1898 memmove(skb->data + padsize, skb->data, padpos); 1899 skb_pull(skb, padsize); 1900 } 1901 1902 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { 1903 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 1904 ath_print(common, ATH_DBG_PS, 1905 "Going back to sleep after having " 1906 "received TX status (0x%lx)\n", 1907 sc->ps_flags & (PS_WAIT_FOR_BEACON | 1908 PS_WAIT_FOR_CAB | 1909 PS_WAIT_FOR_PSPOLL_DATA | 1910 PS_WAIT_FOR_TX_ACK)); 1911 } 1912 1913 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1914 ath9k_tx_status(hw, skb); 1915 else { 1916 q = skb_get_queue_mapping(skb); 1917 if (q >= 4) 1918 q = 0; 1919 1920 if (--sc->tx.pending_frames[q] < 0) 1921 sc->tx.pending_frames[q] = 0; 1922 1923 ieee80211_tx_status(hw, skb); 1924 } 1925} 1926 1927static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1928 struct ath_txq *txq, struct list_head *bf_q, 1929 struct ath_tx_status *ts, int txok, int sendbar) 1930{ 1931 struct sk_buff *skb = bf->bf_mpdu; 1932 unsigned long flags; 1933 int tx_flags = 0; 1934 1935 if (sendbar) 1936 tx_flags = ATH_TX_BAR; 1937 1938 if (!txok) { 1939 tx_flags |= ATH_TX_ERROR; 1940 1941 if (bf_isxretried(bf)) 1942 tx_flags |= ATH_TX_XRETRY; 1943 } 1944 1945 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1946 1947 if (bf->bf_state.bfs_paprd) { 1948 if (time_after(jiffies, 1949 bf->bf_state.bfs_paprd_timestamp + 1950 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 1951 dev_kfree_skb_any(skb); 1952 else 1953 complete(&sc->paprd_complete); 1954 } else { 1955 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1956 ath_debug_stat_tx(sc, txq, bf, ts); 1957 } 1958 1959 /* 1960 * Return the list of ath_buf of this mpdu to free queue 1961 */ 1962 spin_lock_irqsave(&sc->tx.txbuflock, flags); 1963 list_splice_tail_init(bf_q, &sc->tx.txbuf); 1964 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1965} 1966 1967static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 1968 struct ath_tx_status *ts, int txok) 1969{ 1970 u16 seq_st = 0; 1971 u32 ba[WME_BA_BMP_SIZE >> 5]; 1972 int ba_index; 1973 int nbad = 0; 1974 int isaggr = 0; 1975 1976 if (bf->bf_lastbf->bf_tx_aborted) 1977 return 0; 1978 1979 isaggr = bf_isaggr(bf); 1980 if (isaggr) { 1981 seq_st = ts->ts_seqnum; 1982 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 1983 } 1984 1985 while (bf) { 1986 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); 1987 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 1988 nbad++; 1989 1990 bf = bf->bf_next; 1991 } 1992 1993 return nbad; 1994} 1995 1996static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 1997 int nbad, int txok, bool update_rc) 1998{ 1999 struct sk_buff *skb = bf->bf_mpdu; 2000 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2001 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2002 struct ieee80211_hw *hw = bf->aphy->hw; 2003 u8 i, tx_rateindex; 2004 2005 if (txok) 2006 tx_info->status.ack_signal = ts->ts_rssi; 2007 2008 tx_rateindex = ts->ts_rateindex; 2009 WARN_ON(tx_rateindex >= hw->max_rates); 2010 2011 if (ts->ts_status & ATH9K_TXERR_FILT) 2012 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2013 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) { 2014 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2015 2016 BUG_ON(nbad > bf->bf_nframes); 2017 2018 tx_info->status.ampdu_len = bf->bf_nframes; 2019 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 2020 } 2021 2022 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2023 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 2024 if (ieee80211_is_data(hdr->frame_control)) { 2025 if (ts->ts_flags & 2026 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 2027 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 2028 if ((ts->ts_status & ATH9K_TXERR_XRETRY) || 2029 (ts->ts_status & ATH9K_TXERR_FIFO)) 2030 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 2031 } 2032 } 2033 2034 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2035 tx_info->status.rates[i].count = 0; 2036 tx_info->status.rates[i].idx = -1; 2037 } 2038 2039 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2040} 2041 2042static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 2043{ 2044 int qnum; 2045 2046 qnum = ath_get_mac80211_qnum(txq->axq_class, sc); 2047 if (qnum == -1) 2048 return; 2049 2050 spin_lock_bh(&txq->axq_lock); 2051 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) { 2052 if (ath_mac80211_start_queue(sc, qnum)) 2053 txq->stopped = 0; 2054 } 2055 spin_unlock_bh(&txq->axq_lock); 2056} 2057 2058static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2059{ 2060 struct ath_hw *ah = sc->sc_ah; 2061 struct ath_common *common = ath9k_hw_common(ah); 2062 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2063 struct list_head bf_head; 2064 struct ath_desc *ds; 2065 struct ath_tx_status ts; 2066 int txok; 2067 int status; 2068 2069 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2070 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2071 txq->axq_link); 2072 2073 for (;;) { 2074 spin_lock_bh(&txq->axq_lock); 2075 if (list_empty(&txq->axq_q)) { 2076 txq->axq_link = NULL; 2077 spin_unlock_bh(&txq->axq_lock); 2078 break; 2079 } 2080 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2081 2082 /* 2083 * There is a race condition that a BH gets scheduled 2084 * after sw writes TxE and before hw re-load the last 2085 * descriptor to get the newly chained one. 2086 * Software must keep the last DONE descriptor as a 2087 * holding descriptor - software does so by marking 2088 * it with the STALE flag. 2089 */ 2090 bf_held = NULL; 2091 if (bf->bf_stale) { 2092 bf_held = bf; 2093 if (list_is_last(&bf_held->list, &txq->axq_q)) { 2094 spin_unlock_bh(&txq->axq_lock); 2095 break; 2096 } else { 2097 bf = list_entry(bf_held->list.next, 2098 struct ath_buf, list); 2099 } 2100 } 2101 2102 lastbf = bf->bf_lastbf; 2103 ds = lastbf->bf_desc; 2104 2105 memset(&ts, 0, sizeof(ts)); 2106 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2107 if (status == -EINPROGRESS) { 2108 spin_unlock_bh(&txq->axq_lock); 2109 break; 2110 } 2111 2112 /* 2113 * We now know the nullfunc frame has been ACKed so we 2114 * can disable RX. 2115 */ 2116 if (bf->bf_isnullfunc && 2117 (ts.ts_status & ATH9K_TX_ACKED)) { 2118 if ((sc->ps_flags & PS_ENABLED)) 2119 ath9k_enable_ps(sc); 2120 else 2121 sc->ps_flags |= PS_NULLFUNC_COMPLETED; 2122 } 2123 2124 /* 2125 * Remove ath_buf's of the same transmit unit from txq, 2126 * however leave the last descriptor back as the holding 2127 * descriptor for hw. 2128 */ 2129 lastbf->bf_stale = true; 2130 INIT_LIST_HEAD(&bf_head); 2131 if (!list_is_singular(&lastbf->list)) 2132 list_cut_position(&bf_head, 2133 &txq->axq_q, lastbf->list.prev); 2134 2135 txq->axq_depth--; 2136 txok = !(ts.ts_status & ATH9K_TXERR_MASK); 2137 txq->axq_tx_inprogress = false; 2138 if (bf_held) 2139 list_del(&bf_held->list); 2140 spin_unlock_bh(&txq->axq_lock); 2141 2142 if (bf_held) 2143 ath_tx_return_buffer(sc, bf_held); 2144 2145 if (!bf_isampdu(bf)) { 2146 /* 2147 * This frame is sent out as a single frame. 2148 * Use hardware retry status for this frame. 2149 */ 2150 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2151 bf->bf_state.bf_type |= BUF_XRETRY; 2152 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true); 2153 } 2154 2155 if (bf_isampdu(bf)) 2156 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok); 2157 else 2158 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2159 2160 ath_wake_mac80211_queue(sc, txq); 2161 2162 spin_lock_bh(&txq->axq_lock); 2163 if (sc->sc_flags & SC_OP_TXAGGR) 2164 ath_txq_schedule(sc, txq); 2165 spin_unlock_bh(&txq->axq_lock); 2166 } 2167} 2168 2169static void ath_tx_complete_poll_work(struct work_struct *work) 2170{ 2171 struct ath_softc *sc = container_of(work, struct ath_softc, 2172 tx_complete_work.work); 2173 struct ath_txq *txq; 2174 int i; 2175 bool needreset = false; 2176 2177 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2178 if (ATH_TXQ_SETUP(sc, i)) { 2179 txq = &sc->tx.txq[i]; 2180 spin_lock_bh(&txq->axq_lock); 2181 if (txq->axq_depth) { 2182 if (txq->axq_tx_inprogress) { 2183 needreset = true; 2184 spin_unlock_bh(&txq->axq_lock); 2185 break; 2186 } else { 2187 txq->axq_tx_inprogress = true; 2188 } 2189 } 2190 spin_unlock_bh(&txq->axq_lock); 2191 } 2192 2193 if (needreset) { 2194 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2195 "tx hung, resetting the chip\n"); 2196 ath9k_ps_wakeup(sc); 2197 ath_reset(sc, true); 2198 ath9k_ps_restore(sc); 2199 } 2200 2201 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2202 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); 2203} 2204 2205 2206 2207void ath_tx_tasklet(struct ath_softc *sc) 2208{ 2209 int i; 2210 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2211 2212 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2213 2214 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2215 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2216 ath_tx_processq(sc, &sc->tx.txq[i]); 2217 } 2218} 2219 2220void ath_tx_edma_tasklet(struct ath_softc *sc) 2221{ 2222 struct ath_tx_status txs; 2223 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2224 struct ath_hw *ah = sc->sc_ah; 2225 struct ath_txq *txq; 2226 struct ath_buf *bf, *lastbf; 2227 struct list_head bf_head; 2228 int status; 2229 int txok; 2230 2231 for (;;) { 2232 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2233 if (status == -EINPROGRESS) 2234 break; 2235 if (status == -EIO) { 2236 ath_print(common, ATH_DBG_XMIT, 2237 "Error processing tx status\n"); 2238 break; 2239 } 2240 2241 /* Skip beacon completions */ 2242 if (txs.qid == sc->beacon.beaconq) 2243 continue; 2244 2245 txq = &sc->tx.txq[txs.qid]; 2246 2247 spin_lock_bh(&txq->axq_lock); 2248 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2249 spin_unlock_bh(&txq->axq_lock); 2250 return; 2251 } 2252 2253 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 2254 struct ath_buf, list); 2255 lastbf = bf->bf_lastbf; 2256 2257 INIT_LIST_HEAD(&bf_head); 2258 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx], 2259 &lastbf->list); 2260 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2261 txq->axq_depth--; 2262 txq->axq_tx_inprogress = false; 2263 spin_unlock_bh(&txq->axq_lock); 2264 2265 txok = !(txs.ts_status & ATH9K_TXERR_MASK); 2266 2267 /* 2268 * Make sure null func frame is acked before configuring 2269 * hw into ps mode. 2270 */ 2271 if (bf->bf_isnullfunc && txok) { 2272 if ((sc->ps_flags & PS_ENABLED)) 2273 ath9k_enable_ps(sc); 2274 else 2275 sc->ps_flags |= PS_NULLFUNC_COMPLETED; 2276 } 2277 2278 if (!bf_isampdu(bf)) { 2279 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2280 bf->bf_state.bf_type |= BUF_XRETRY; 2281 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true); 2282 } 2283 2284 if (bf_isampdu(bf)) 2285 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok); 2286 else 2287 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2288 &txs, txok, 0); 2289 2290 ath_wake_mac80211_queue(sc, txq); 2291 2292 spin_lock_bh(&txq->axq_lock); 2293 if (!list_empty(&txq->txq_fifo_pending)) { 2294 INIT_LIST_HEAD(&bf_head); 2295 bf = list_first_entry(&txq->txq_fifo_pending, 2296 struct ath_buf, list); 2297 list_cut_position(&bf_head, &txq->txq_fifo_pending, 2298 &bf->bf_lastbf->list); 2299 ath_tx_txqaddbuf(sc, txq, &bf_head); 2300 } else if (sc->sc_flags & SC_OP_TXAGGR) 2301 ath_txq_schedule(sc, txq); 2302 spin_unlock_bh(&txq->axq_lock); 2303 } 2304} 2305 2306/*****************/ 2307/* Init, Cleanup */ 2308/*****************/ 2309 2310static int ath_txstatus_setup(struct ath_softc *sc, int size) 2311{ 2312 struct ath_descdma *dd = &sc->txsdma; 2313 u8 txs_len = sc->sc_ah->caps.txs_len; 2314 2315 dd->dd_desc_len = size * txs_len; 2316 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 2317 &dd->dd_desc_paddr, GFP_KERNEL); 2318 if (!dd->dd_desc) 2319 return -ENOMEM; 2320 2321 return 0; 2322} 2323 2324static int ath_tx_edma_init(struct ath_softc *sc) 2325{ 2326 int err; 2327 2328 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2329 if (!err) 2330 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2331 sc->txsdma.dd_desc_paddr, 2332 ATH_TXSTATUS_RING_SIZE); 2333 2334 return err; 2335} 2336 2337static void ath_tx_edma_cleanup(struct ath_softc *sc) 2338{ 2339 struct ath_descdma *dd = &sc->txsdma; 2340 2341 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 2342 dd->dd_desc_paddr); 2343} 2344 2345int ath_tx_init(struct ath_softc *sc, int nbufs) 2346{ 2347 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2348 int error = 0; 2349 2350 spin_lock_init(&sc->tx.txbuflock); 2351 2352 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2353 "tx", nbufs, 1, 1); 2354 if (error != 0) { 2355 ath_print(common, ATH_DBG_FATAL, 2356 "Failed to allocate tx descriptors: %d\n", error); 2357 goto err; 2358 } 2359 2360 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2361 "beacon", ATH_BCBUF, 1, 1); 2362 if (error != 0) { 2363 ath_print(common, ATH_DBG_FATAL, 2364 "Failed to allocate beacon descriptors: %d\n", error); 2365 goto err; 2366 } 2367 2368 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2369 2370 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2371 error = ath_tx_edma_init(sc); 2372 if (error) 2373 goto err; 2374 } 2375 2376err: 2377 if (error != 0) 2378 ath_tx_cleanup(sc); 2379 2380 return error; 2381} 2382 2383void ath_tx_cleanup(struct ath_softc *sc) 2384{ 2385 if (sc->beacon.bdma.dd_desc_len != 0) 2386 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); 2387 2388 if (sc->tx.txdma.dd_desc_len != 0) 2389 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2390 2391 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2392 ath_tx_edma_cleanup(sc); 2393} 2394 2395void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2396{ 2397 struct ath_atx_tid *tid; 2398 struct ath_atx_ac *ac; 2399 int tidno, acno; 2400 2401 for (tidno = 0, tid = &an->tid[tidno]; 2402 tidno < WME_NUM_TID; 2403 tidno++, tid++) { 2404 tid->an = an; 2405 tid->tidno = tidno; 2406 tid->seq_start = tid->seq_next = 0; 2407 tid->baw_size = WME_MAX_BA; 2408 tid->baw_head = tid->baw_tail = 0; 2409 tid->sched = false; 2410 tid->paused = false; 2411 tid->state &= ~AGGR_CLEANUP; 2412 INIT_LIST_HEAD(&tid->buf_q); 2413 acno = TID_TO_WME_AC(tidno); 2414 tid->ac = &an->ac[acno]; 2415 tid->state &= ~AGGR_ADDBA_COMPLETE; 2416 tid->state &= ~AGGR_ADDBA_PROGRESS; 2417 } 2418 2419 for (acno = 0, ac = &an->ac[acno]; 2420 acno < WME_NUM_AC; acno++, ac++) { 2421 ac->sched = false; 2422 ac->qnum = sc->tx.hwq_map[acno]; 2423 INIT_LIST_HEAD(&ac->tid_q); 2424 } 2425} 2426 2427void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2428{ 2429 struct ath_atx_ac *ac; 2430 struct ath_atx_tid *tid; 2431 struct ath_txq *txq; 2432 int i, tidno; 2433 2434 for (tidno = 0, tid = &an->tid[tidno]; 2435 tidno < WME_NUM_TID; tidno++, tid++) { 2436 i = tid->ac->qnum; 2437 2438 if (!ATH_TXQ_SETUP(sc, i)) 2439 continue; 2440 2441 txq = &sc->tx.txq[i]; 2442 ac = tid->ac; 2443 2444 spin_lock_bh(&txq->axq_lock); 2445 2446 if (tid->sched) { 2447 list_del(&tid->list); 2448 tid->sched = false; 2449 } 2450 2451 if (ac->sched) { 2452 list_del(&ac->list); 2453 tid->ac->sched = false; 2454 } 2455 2456 ath_tid_drain(sc, txq, tid); 2457 tid->state &= ~AGGR_ADDBA_COMPLETE; 2458 tid->state &= ~AGGR_CLEANUP; 2459 2460 spin_unlock_bh(&txq->axq_lock); 2461 } 2462} 2463