Lines Matching defs:sc

123 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
125 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
127 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
129 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
131 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
134 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
139 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
152 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
155 i++, ds += sc->sc_tx_desclen) {
156 if_ath_alq_post(&sc->sc_alq,
158 sc->sc_tx_desclen,
170 ath_tx_is_11n(struct ath_softc *sc)
172 return ((sc->sc_ah->ah_magic == 0x20065416) ||
173 (sc->sc_ah->ah_magic == 0x19741014));
183 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
198 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
206 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
235 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
256 ath_txfrag_cleanup(struct ath_softc *sc,
261 ATH_TXBUF_LOCK_ASSERT(sc);
266 ath_returnbuf_head(sc, bf);
277 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
283 ATH_TXBUF_LOCK(sc);
286 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
288 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
290 ath_txfrag_cleanup(sc, frags, ni);
296 ATH_TXBUF_UNLOCK(sc);
302 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
311 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
318 sc->sc_stats.ast_tx_busdma++;
328 sc->sc_stats.ast_tx_linear++;
332 sc->sc_stats.ast_tx_nombuf++;
336 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
340 sc->sc_stats.ast_tx_busdma++;
347 sc->sc_stats.ast_tx_nodata++;
351 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
353 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
365 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
369 struct ath_hal *ah = sc->sc_ah;
381 struct ath_descdma *dd = &sc->sc_txdma;
392 numTxMaps = sc->sc_tx_nmaps;
450 if (ath_tx_is_11n(sc))
451 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
458 ath_hal_set11n_aggr_last(sc->sc_ah,
469 ath_hal_set11n_aggr_middle(sc->sc_ah,
479 ds += sc->sc_tx_desclen;
488 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
507 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
534 if (ath_tx_is_11n(sc)) {
535 ath_buf_set_rate(sc, ni, bf);
537 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
552 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
557 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
564 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
567 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
575 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
584 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
606 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
613 ath_tx_chaindesclist(sc, ds0, bf,
624 ath_hal_set11n_aggr_first(sc->sc_ah,
635 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
679 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
681 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
699 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
702 ATH_TX_LOCK_ASSERT(sc);
711 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
712 DPRINTF(sc, ATH_DEBUG_XMIT,
726 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
730 ath_hal_settxdesclink(sc->sc_ah,
742 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
745 struct ath_hal *ah = sc->sc_ah;
756 ATH_TX_LOCK_ASSERT(sc);
767 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
768 device_printf(sc->sc_dev,
805 ATH_KTR(sc, ATH_KTR_TX, 3,
820 DPRINTF(sc, ATH_DEBUG_XMIT,
825 ATH_KTR(sc, ATH_KTR_TX, 5,
848 DPRINTF(sc, ATH_DEBUG_XMIT,
853 ATH_KTR(sc, ATH_KTR_TX, 5,
867 DPRINTF(sc, ATH_DEBUG_XMIT,
901 ATH_KTR(sc, ATH_KTR_TX, 1,
911 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
924 DPRINTF(sc, ATH_DEBUG_RESET,
933 if (sc->sc_debug & ATH_DEBUG_RESET)
934 ath_tx_dump(sc, txq);
946 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
949 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
951 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
960 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
963 ATH_TX_LOCK_ASSERT(sc);
966 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
967 ath_tx_alq_post(sc, bf);
971 ath_tx_handoff_mcast(sc, txq, bf);
973 ath_tx_handoff_hw(sc, txq, bf);
977 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
981 DPRINTF(sc, ATH_DEBUG_XMIT,
1046 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1052 const HAL_RATE_TABLE *rt = sc->sc_currates;
1053 struct ieee80211com *ic = &sc->sc_ic;
1090 sc->sc_stats.ast_tx_protect++;
1105 sc->sc_stats.ast_tx_htprotect++;
1119 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1125 struct ath_hal *ah = sc->sc_ah;
1126 const HAL_RATE_TABLE *rt = sc->sc_currates;
1259 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1265 const HAL_RATE_TABLE *rt = sc->sc_currates;
1283 rix = sc->sc_protrix;
1292 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1298 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1302 if (! ath_tx_is_11n(sc))
1303 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1314 if (!sc->sc_mrrprot) {
1331 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1334 struct ath_hal *ah = sc->sc_ah;
1337 DPRINTF(sc, ATH_DEBUG_XMIT,
1361 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1362 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1379 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid,
1394 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1403 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1407 sc->sc_txrix = rix; /* for LED blinking */
1408 sc->sc_lastdatarix = rix; /* for fast frames */
1418 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1423 ATH_TX_LOCK_ASSERT(sc);
1444 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1461 DPRINTF(sc, ATH_DEBUG_SW_TX,
1468 DPRINTF(sc, ATH_DEBUG_SW_TX,
1479 DPRINTF(sc, ATH_DEBUG_XMIT,
1505 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1511 ATH_TX_LOCK_ASSERT(sc);
1525 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false);
1526 ath_tx_calc_duration(sc, bf);
1527 ath_tx_calc_protection(sc, bf);
1528 ath_tx_set_rtscts(sc, bf);
1529 ath_tx_rate_fill_rcflags(sc, bf);
1530 ath_tx_setds(sc, bf);
1539 ath_tx_handoff(sc, txq, bf);
1555 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1559 struct ieee80211com *ic = &sc->sc_ic;
1581 ATH_TX_LOCK_ASSERT(sc);
1595 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1610 error = ath_tx_dmasetup(sc, bf, m0);
1620 rt = sc->sc_currates;
1621 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1631 sc->sc_stats.ast_tx_shortpre++;
1641 pri = ath_tx_getac(sc, m0); /* honor classification */
1710 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1740 if (txq != sc->sc_ac2q[pri]) {
1741 DPRINTF(sc, ATH_DEBUG_XMIT,
1747 sc->sc_ac2q[pri],
1748 sc->sc_ac2q[pri]->axq_qnum);
1760 sc->sc_stats.ast_tx_rts++;
1763 sc->sc_stats.ast_tx_noack++;
1765 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1766 DPRINTF(sc, ATH_DEBUG_TDMA,
1768 sc->sc_stats.ast_tdma_ack++;
1780 device_printf(sc->sc_dev,
1829 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1843 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1845 sc->sc_hwmap[rix].ieeerate, -1);
1848 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1850 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1852 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1853 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1854 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1855 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1879 bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1904 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1920 ATH_TX_LOCK_ASSERT(sc);
1947 pri = ath_tx_getac(sc, m0);
1948 tid = ath_tx_gettid(sc, m0);
1950 txq = sc->sc_ac2q[pri];
1962 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1963 > sc->sc_txq_mcastq_maxdepth) {
1964 sc->sc_stats.ast_tx_mcastq_overflow++;
1990 sc->sc_txq_node_psq_maxdepth) {
1991 sc->sc_stats.ast_tx_node_psq_overflow++;
1997 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1998 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
2001 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
2018 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
2025 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
2048 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
2069 DPRINTF(sc, ATH_DEBUG_SW_TX,
2074 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2109 DPRINTF(sc, ATH_DEBUG_SW_TX,
2112 ath_tx_xmit_normal(sc, txq, bf);
2113 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2115 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2118 ath_tx_xmit_normal(sc, txq, bf);
2131 ath_tx_leak_count_update(sc, tid, bf);
2132 ath_tx_xmit_normal(sc, txq, bf);
2139 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2143 struct ieee80211com *ic = &sc->sc_ic;
2160 ATH_TX_LOCK_ASSERT(sc);
2175 ATH_KTR(sc, ATH_KTR_TX, 2,
2178 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2184 pri = ath_tx_getac(sc, m0);
2187 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2192 DPRINTF(sc, ATH_DEBUG_XMIT,
2208 if (! ath_tx_tag_crypto(sc, ni,
2221 error = ath_tx_dmasetup(sc, bf, m0);
2243 rt = sc->sc_currates;
2244 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2247 rix = ath_tx_findrix(sc, params->ibp_rate0);
2264 device_printf(sc->sc_dev,
2274 sc->sc_txrix = rix;
2278 txantenna = sc->sc_txantenna;
2293 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2295 sc->sc_hwmap[rix].ieeerate, -1);
2298 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2300 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2302 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2303 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2304 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2306 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2333 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2351 rix = ath_tx_findrix(sc, params->ibp_rate1);
2355 rix = ath_tx_findrix(sc, params->ibp_rate2);
2359 rix = ath_tx_findrix(sc, params->ibp_rate3);
2367 ath_tx_rate_fill_rcflags(sc, bf);
2376 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2391 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2392 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2395 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2398 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2408 ath_tx_leak_count_update(sc, tid, bf);
2409 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2424 struct ath_softc *sc = ic->ic_softc;
2429 ATH_PCU_LOCK(sc);
2430 if (sc->sc_inreset_cnt > 0) {
2431 DPRINTF(sc, ATH_DEBUG_XMIT,
2434 ATH_PCU_UNLOCK(sc);
2437 sc->sc_txstart_cnt++;
2438 ATH_PCU_UNLOCK(sc);
2441 ATH_LOCK(sc);
2442 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2443 ATH_UNLOCK(sc);
2445 ATH_TX_LOCK(sc);
2447 if (!sc->sc_running || sc->sc_invalid) {
2448 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2449 __func__, sc->sc_running, sc->sc_invalid);
2461 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2462 > sc->sc_txq_mcastq_maxdepth) {
2463 sc->sc_stats.ast_tx_mcastq_overflow++;
2476 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2478 sc->sc_stats.ast_tx_nobuf++;
2483 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2491 if (ath_tx_start(sc, ni, bf, m)) {
2500 if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2505 sc->sc_wd_timer = 5;
2506 sc->sc_stats.ast_tx_raw++;
2513 ath_tx_update_tim(sc, ni, 1);
2515 ATH_TX_UNLOCK(sc);
2517 ATH_PCU_LOCK(sc);
2518 sc->sc_txstart_cnt--;
2519 ATH_PCU_UNLOCK(sc);
2522 ATH_LOCK(sc);
2523 ath_power_restore_power_state(sc);
2524 ATH_UNLOCK(sc);
2529 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2534 ATH_TXBUF_LOCK(sc);
2535 ath_returnbuf_head(sc, bf);
2536 ATH_TXBUF_UNLOCK(sc);
2539 ATH_TX_UNLOCK(sc);
2541 ATH_PCU_LOCK(sc);
2542 sc->sc_txstart_cnt--;
2543 ATH_PCU_UNLOCK(sc);
2546 ATH_LOCK(sc);
2547 ath_power_restore_power_state(sc);
2548 ATH_UNLOCK(sc);
2551 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2553 sc->sc_stats.ast_tx_raw_fail++;
2607 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2657 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2663 ATH_TX_LOCK_ASSERT(sc);
2671 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2678 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2691 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2705 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2716 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2720 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2748 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2755 ATH_TX_LOCK_ASSERT(sc);
2767 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2771 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2777 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2780 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2795 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2802 ATH_TX_LOCK_ASSERT(sc);
2808 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2825 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2840 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2846 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2851 ATH_TX_LOCK_ASSERT(sc);
2865 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2878 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2886 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2889 ATH_TX_LOCK_ASSERT(sc);
2908 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2910 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2912 ATH_TX_LOCK_ASSERT(sc);
2919 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2961 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2963 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2965 ATH_TX_LOCK_ASSERT(sc);
2984 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2995 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n",
3004 ATH_TX_LOCK_ASSERT(sc);
3036 DPRINTF(sc, ATH_DEBUG_SW_TX,
3048 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
3054 ATH_TX_LOCK_ASSERT(sc);
3059 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
3070 ath_tx_tid_sched(sc, tid);
3085 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3093 ath_tx_update_clrdmask(sc, tid, bf);
3096 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen,
3098 ath_tx_calc_duration(sc, bf);
3099 ath_tx_calc_protection(sc, bf);
3100 ath_tx_set_rtscts(sc, bf);
3101 ath_tx_rate_fill_rcflags(sc, bf);
3102 ath_tx_setds(sc, bf);
3105 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3112 ath_tx_addto_baw(sc, an, tid, bf);
3124 ath_tx_leak_count_update(sc, tid, bf);
3127 ath_tx_handoff(sc, txq, bf);
3137 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3146 ATH_TX_LOCK_ASSERT(sc);
3150 pri = ath_tx_getac(sc, m0);
3151 tid = ath_tx_gettid(sc, m0);
3154 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3172 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3174 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3183 } else if (ath_tx_ampdu_pending(sc, an, tid)) {
3185 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3188 } else if (ath_tx_ampdu_running(sc, an, tid)) {
3235 ath_tx_xmit_aggr(sc, an, txq, bf);
3236 DPRINTF(sc, ATH_DEBUG_SW_TX,
3240 DPRINTF(sc, ATH_DEBUG_SW_TX,
3244 ath_tx_tid_sched(sc, atid);
3260 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3261 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3263 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3265 ath_tx_update_clrdmask(sc, atid, bf);
3272 ath_tx_leak_count_update(sc, atid, bf);
3277 ath_tx_xmit_normal(sc, txq, bf);
3280 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3282 ath_tx_tid_sched(sc, atid);
3296 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3300 ATH_TX_LOCK_ASSERT(sc);
3318 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3356 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3359 ATH_TX_LOCK_ASSERT(sc);
3361 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3372 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3374 ATH_TX_LOCK_ASSERT(sc);
3382 device_printf(sc->sc_dev,
3391 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3405 ath_tx_set_clrdmask(sc, tid->an);
3412 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3417 ath_tx_tid_sched(sc, tid);
3422 ath_tx_swq_kick(sc);
3430 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3434 ATH_TX_LOCK_ASSERT(sc);
3437 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3440 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3443 ath_tx_set_retry(sc, bf);
3444 sc->sc_stats.ast_tx_swfiltered++;
3455 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3459 ATH_TX_LOCK_ASSERT(sc);
3462 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3465 ath_tx_tid_pause(sc, tid);
3469 ath_tx_tid_filt_addbuf(sc, tid, bf);
3480 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3485 ATH_TX_LOCK_ASSERT(sc);
3490 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3498 ath_tx_set_clrdmask(sc, tid->an);
3508 ath_tx_tid_resume(sc, tid);
3523 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3529 ATH_TX_LOCK_ASSERT(sc);
3535 sc->sc_stats.ast_tx_swretrymax++;
3536 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3550 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3551 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3559 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3564 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3568 ath_tx_tid_filt_comp_complete(sc, tid);
3574 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3579 ATH_TX_LOCK_ASSERT(sc);
3590 sc->sc_stats.ast_tx_swretrymax++;
3591 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3602 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3603 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3615 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3620 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3626 ath_tx_tid_filt_comp_complete(sc, tid);
3633 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3636 ATH_TX_LOCK_ASSERT(sc);
3638 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3647 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3659 ath_tx_tid_pause(sc, tid);
3667 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3670 ATH_TX_LOCK_ASSERT(sc);
3672 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3680 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3687 ath_tx_tid_resume(sc, tid);
3696 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3699 ATH_TX_LOCK_ASSERT(sc);
3704 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3727 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3731 ATH_TX_LOCK_ASSERT(sc);
3733 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3746 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3755 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3772 ath_tx_set_clrdmask(sc, tid->an);
3780 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3791 ATH_TX_UNLOCK(sc);
3794 ATH_TX_LOCK(sc);
3799 ATH_TX_LOCK(sc);
3800 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3804 ath_tx_tid_bar_unsuspend(sc, tid);
3808 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3812 ATH_TX_LOCK_ASSERT(sc);
3818 if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3826 ath_tx_update_baw(sc, an, tid, bf);
3834 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3848 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3855 txq = sc->sc_ac2q[tid->ac];
3858 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3870 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3880 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3892 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3908 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3930 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3940 ATH_TX_LOCK_ASSERT(sc);
3951 ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3956 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3967 ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3972 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3982 ath_tx_set_clrdmask(sc, tid->an);
4000 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4020 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
4089 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
4097 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
4100 ATH_TX_LOCK(sc);
4101 DPRINTF(sc, ATH_DEBUG_NODE,
4118 ath_tx_tid_drain(sc, an, atid, &bf_cq);
4121 ath_tx_tid_unsched(sc, atid);
4124 ath_tx_tid_reset(sc, atid);
4131 ATH_TX_UNLOCK(sc);
4136 ath_tx_default_comp(sc, bf, 0);
4144 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4151 ATH_TX_LOCK(sc);
4159 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4160 ath_tx_tid_unsched(sc, tid);
4163 ATH_TX_UNLOCK(sc);
4167 ath_tx_default_comp(sc, bf, 0);
4188 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4197 ATH_TX_LOCK(sc);
4199 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4211 DPRINTF(sc, ATH_DEBUG_SW_TX,
4216 ath_tx_tid_filt_comp_buf(sc, atid, bf);
4220 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4222 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4230 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4234 ath_tx_tid_resume(sc, atid);
4250 ath_tx_tid_filt_comp_complete(sc, atid);
4251 ATH_TX_UNLOCK(sc);
4258 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4264 ath_tx_default_comp(sc, bf, fail);
4275 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4282 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4285 ATH_TX_LOCK(sc);
4290 ath_tx_update_baw(sc, an, atid, bf);
4292 DPRINTF(sc, ATH_DEBUG_SW_TX,
4298 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4302 ath_tx_tid_resume(sc, atid);
4304 ATH_TX_UNLOCK(sc);
4306 ath_tx_default_comp(sc, bf, 0);
4315 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4321 ATH_TX_LOCK_ASSERT(sc);
4344 ath_tx_update_baw(sc, an, atid, bf);
4380 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4386 ATH_TX_LOCK_ASSERT(sc);
4388 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4420 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4443 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4451 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4463 nbf = ath_buf_clone(sc, bf);
4466 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4472 DPRINTF(sc, ATH_DEBUG_XMIT,
4479 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4481 DPRINTF(sc, ATH_DEBUG_XMIT,
4490 ATH_TXBUF_LOCK(sc);
4491 ath_returnbuf_head(sc, nbf);
4492 ATH_TXBUF_UNLOCK(sc);
4498 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4501 ath_freebuf(sc, bf);
4517 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4525 ATH_TX_LOCK(sc);
4540 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4549 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4552 sc->sc_stats.ast_tx_swretrymax++;
4556 ath_tx_update_baw(sc, an, atid, bf);
4558 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4565 ath_tx_tid_bar_suspend(sc, atid);
4568 if (ath_tx_tid_bar_tx_ready(sc, atid))
4569 ath_tx_tid_bar_tx(sc, atid);
4571 ATH_TX_UNLOCK(sc);
4574 ath_tx_default_comp(sc, bf, 0);
4583 ath_tx_set_retry(sc, bf);
4584 sc->sc_stats.ast_tx_swretries++;
4591 ath_tx_tid_sched(sc, atid);
4593 if (ath_tx_tid_bar_tx_ready(sc, atid))
4594 ath_tx_tid_bar_tx(sc, atid);
4596 ATH_TX_UNLOCK(sc);
4607 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4615 ATH_TX_LOCK_ASSERT(sc);
4618 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4619 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4621 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4634 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4643 sc->sc_stats.ast_tx_swretrymax++;
4644 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4647 ath_tx_update_baw(sc, an, atid, bf);
4649 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4656 ath_tx_set_retry(sc, bf);
4657 sc->sc_stats.ast_tx_swretries++;
4673 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4690 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4696 ATH_TX_LOCK(sc);
4698 sc->sc_stats.ast_tx_aggr_failall++;
4705 sc->sc_stats.ast_tx_aggr_fail++;
4706 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4723 ath_tx_tid_sched(sc, tid);
4734 ath_tx_tid_bar_suspend(sc, tid);
4740 if (ath_tx_tid_bar_tx_ready(sc, tid))
4741 ath_tx_tid_bar_tx(sc, tid);
4743 ATH_TX_UNLOCK(sc);
4748 ath_tx_default_comp(sc, bf, 0);
4759 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4767 ATH_TX_LOCK(sc);
4777 ath_tx_update_baw(sc, an, atid, bf);
4779 DPRINTF(sc, ATH_DEBUG_SW_TX,
4787 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4791 ath_tx_tid_resume(sc, atid);
4800 if (ath_tx_tid_bar_tx_ready(sc, atid))
4801 ath_tx_tid_bar_tx(sc, atid);
4803 ATH_TX_UNLOCK(sc);
4810 ath_tx_default_comp(sc, bf, 1);
4822 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4847 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4862 ATH_TX_LOCK(sc);
4866 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4877 ath_tx_tid_filt_comp_complete(sc, atid);
4884 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4887 ATH_TX_UNLOCK(sc);
4888 ath_tx_comp_cleanup_aggr(sc, bf_first);
4902 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4904 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4911 ath_tx_update_baw(sc, an, atid, bf);
4913 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4925 ath_tx_tid_bar_suspend(sc, atid);
4951 ATH_TX_UNLOCK(sc);
4952 ath_tx_comp_aggr_error(sc, bf_first, atid);
4977 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4995 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
5003 device_printf(sc->sc_dev,
5007 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
5013 ath_printtxbuf(sc, bf_first,
5014 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
5049 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5055 sc->sc_stats.ast_tx_aggr_ok++;
5056 ath_tx_update_baw(sc, an, atid, bf);
5059 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5065 sc->sc_stats.ast_tx_aggr_fail++;
5066 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
5085 ATH_TX_UNLOCK(sc);
5088 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5097 ath_tx_update_ratectrl(sc, ni, rc, &ts, agglen, rc_agglen,
5106 ATH_TX_LOCK(sc);
5107 ath_tx_tid_bar_suspend(sc, atid);
5108 ATH_TX_UNLOCK(sc);
5111 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5114 ATH_TX_LOCK(sc);
5125 ath_tx_tid_sched(sc, atid);
5138 ath_tx_tid_filt_comp_complete(sc, atid);
5145 if (ath_tx_tid_bar_tx_ready(sc, atid))
5146 ath_tx_tid_bar_tx(sc, atid);
5148 ATH_TX_UNLOCK(sc);
5153 ath_tx_default_comp(sc, bf, 0);
5165 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5187 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5198 ATH_TX_LOCK(sc);
5201 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5203 DPRINTF(sc, ATH_DEBUG_SW_TX,
5210 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5219 ath_tx_tid_filt_comp_complete(sc, atid);
5229 DPRINTF(sc, ATH_DEBUG_SW_TX,
5232 ATH_TX_UNLOCK(sc);
5233 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5235 ath_tx_comp_cleanup_unaggr(sc, bf);
5256 DPRINTF(sc, ATH_DEBUG_SW_TX,
5259 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5269 ath_tx_update_baw(sc, an, atid, bf);
5271 DPRINTF(sc, ATH_DEBUG_SW_TX,
5283 ath_tx_tid_bar_suspend(sc, atid);
5288 if (ath_tx_tid_bar_tx_ready(sc, atid))
5289 ath_tx_tid_bar_tx(sc, atid);
5291 ATH_TX_UNLOCK(sc);
5297 ath_tx_default_comp(sc, bf, fail);
5309 ATH_TX_UNLOCK(sc);
5310 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5312 ath_tx_aggr_retry_unaggr(sc, bf);
5317 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5320 ath_tx_update_baw(sc, an, atid, bf);
5323 DPRINTF(sc, ATH_DEBUG_SW_TX,
5339 ath_tx_tid_filt_comp_complete(sc, atid);
5344 if (ath_tx_tid_bar_tx_ready(sc, atid))
5345 ath_tx_tid_bar_tx(sc, atid);
5347 ATH_TX_UNLOCK(sc);
5349 ath_tx_default_comp(sc, bf, fail);
5354 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5357 ath_tx_aggr_comp_aggr(sc, bf, fail);
5359 ath_tx_aggr_comp_unaggr(sc, bf, fail);
5371 ath_tx_tid_swq_depth_bytes(struct ath_softc *sc, struct ath_node *an,
5378 ATH_TX_LOCK_ASSERT(sc);
5427 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5431 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5437 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5438 ATH_TX_LOCK_ASSERT(sc);
5448 DPRINTF(sc, ATH_DEBUG_SW_TX,
5462 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5475 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5481 DPRINTF(sc, ATH_DEBUG_SW_TX,
5497 ath_tx_update_clrdmask(sc, tid, bf);
5499 ath_tx_do_ratelookup(sc, bf, tid->tid,
5501 ath_tx_calc_duration(sc, bf);
5502 ath_tx_calc_protection(sc, bf);
5503 ath_tx_set_rtscts(sc, bf);
5504 ath_tx_rate_fill_rcflags(sc, bf);
5505 ath_tx_setds(sc, bf);
5506 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5508 sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5521 swq_pktbytes = ath_tx_tid_swq_depth_bytes(sc, an, tid);
5522 ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5529 ath_tx_calc_duration(sc, bf);
5530 ath_tx_calc_protection(sc, bf);
5532 ath_tx_set_rtscts(sc, bf);
5533 ath_tx_rate_fill_rcflags(sc, bf);
5535 status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5537 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5553 sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5561 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5565 ath_tx_update_clrdmask(sc, tid, bf);
5569 ath_tx_setds(sc, bf);
5570 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5572 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5574 sc->sc_aggr_stats.aggr_single_pkt++;
5576 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5582 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5583 sc->sc_aggr_stats.aggr_aggr_pkt++;
5586 ath_tx_update_clrdmask(sc, tid, bf);
5591 ath_tx_calc_duration(sc, bf);
5592 ath_tx_calc_protection(sc, bf);
5599 ath_tx_set_rtscts(sc, bf);
5606 ath_tx_setds_11n(sc, bf);
5613 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5621 ath_tx_leak_count_update(sc, tid, bf);
5624 ath_tx_handoff(sc, txq, bf);
5638 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5658 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5662 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5664 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5667 ATH_TX_LOCK_ASSERT(sc);
5670 if (ath_tx_ampdu_pending(sc, an, tid->tid))
5671 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5673 if (ath_tx_ampdu_running(sc, an, tid->tid))
5674 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5685 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5697 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5711 ath_tx_update_clrdmask(sc, tid, bf);
5714 ath_tx_do_ratelookup(sc, bf, tid->tid,
5716 ath_tx_calc_duration(sc, bf);
5717 ath_tx_calc_protection(sc, bf);
5718 ath_tx_set_rtscts(sc, bf);
5719 ath_tx_rate_fill_rcflags(sc, bf);
5720 ath_tx_setds(sc, bf);
5727 ath_tx_leak_count_update(sc, tid, bf);
5734 ath_tx_handoff(sc, txq, bf);
5750 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5754 ATH_TX_LOCK_ASSERT(sc);
5779 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5780 sc->sc_aggr_stats.aggr_sched_nopkt++;
5790 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5791 sc->sc_aggr_stats.aggr_sched_nopkt++;
5802 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5804 ath_tx_tid_unsched(sc, tid);
5809 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5812 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5813 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5815 ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5819 ath_tx_tid_sched(sc, tid);
5827 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5830 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5873 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5891 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5921 struct ath_softc *sc = ni->ni_ic->ic_softc;
5949 ATH_TX_LOCK(sc);
5956 ath_tx_tid_pause(sc, atid);
5959 ATH_TX_UNLOCK(sc);
5961 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5967 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5971 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5999 struct ath_softc *sc = ni->ni_ic->ic_softc;
6005 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6011 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6021 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6023 ATH_TX_LOCK(sc);
6031 ath_tx_tid_resume(sc, atid);
6032 ATH_TX_UNLOCK(sc);
6045 struct ath_softc *sc = ni->ni_ic->ic_softc;
6052 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
6061 ATH_TX_LOCK(sc);
6062 ath_tx_tid_pause(sc, atid);
6070 ath_tx_tid_bar_unsuspend(sc, atid);
6072 ATH_TX_UNLOCK(sc);
6075 sc->sc_addba_stop(ni, tap);
6083 ATH_TX_LOCK(sc);
6095 ath_tx_tid_resume(sc, atid);
6097 ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
6102 ath_tx_tid_resume(sc, atid);
6104 ATH_TX_UNLOCK(sc);
6109 ath_tx_default_comp(sc, bf, 1);
6121 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
6130 ATH_TX_UNLOCK_ASSERT(sc);
6132 ATH_TX_LOCK(sc);
6137 DPRINTF(sc, ATH_DEBUG_NODE,
6148 ath_tx_tid_pause(sc, tid);
6149 ath_tx_tid_cleanup(sc, an, i, &bf_cq);
6154 ath_tx_tid_resume(sc, tid);
6157 ATH_TX_UNLOCK(sc);
6162 ath_tx_default_comp(sc, bf, 1);
6180 struct ath_softc *sc = ni->ni_ic->ic_softc;
6187 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6210 ATH_TX_LOCK(sc);
6212 sc->sc_bar_response(ni, tap, status);
6214 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6221 ATH_TX_UNLOCK(sc);
6233 ATH_TX_LOCK(sc);
6235 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6240 ath_tx_tid_bar_unsuspend(sc, atid);
6241 ATH_TX_UNLOCK(sc);
6253 struct ath_softc *sc = ni->ni_ic->ic_softc;
6258 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6265 ATH_TX_LOCK(sc);
6267 ATH_TX_UNLOCK(sc);
6270 sc->sc_addba_response_timeout(ni, tap);
6273 ATH_TX_LOCK(sc);
6274 ath_tx_tid_resume(sc, atid);
6275 ATH_TX_UNLOCK(sc);
6282 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6285 ATH_TX_LOCK_ASSERT(sc);
6309 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6315 ATH_TX_UNLOCK_ASSERT(sc);
6318 ATH_TX_LOCK(sc);
6321 DPRINTF(sc, ATH_DEBUG_XMIT,
6324 ATH_TX_UNLOCK(sc);
6330 txq = sc->sc_ac2q[atid->ac];
6332 ath_tx_tid_pause(sc, atid);
6338 ATH_TX_UNLOCK(sc);
6346 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6352 ATH_TX_UNLOCK_ASSERT(sc);
6354 ATH_TX_LOCK(sc);
6358 ATH_TX_UNLOCK(sc);
6359 DPRINTF(sc, ATH_DEBUG_XMIT,
6374 txq = sc->sc_ac2q[atid->ac];
6376 ath_tx_tid_resume(sc, atid);
6378 ATH_TX_UNLOCK(sc);
6382 ath_legacy_dma_txsetup(struct ath_softc *sc)
6390 ath_legacy_dma_txteardown(struct ath_softc *sc)
6398 ath_xmit_setup_legacy(struct ath_softc *sc)
6404 sc->sc_tx_desclen = sizeof(struct ath_desc);
6405 sc->sc_tx_statuslen = sizeof(struct ath_desc);
6406 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6408 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6409 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6410 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6412 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6413 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6415 sc->sc_tx.xmit_drain = ath_legacy_tx_drain;