Lines Matching defs:sc

56 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
58 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
61 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
65 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
67 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
70 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
72 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
107 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
110 struct ieee80211_hw *hw = sc->hw;
122 void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
127 ieee80211_schedule_txq(sc->hw, queue);
132 struct ath_softc *sc = hw->priv;
133 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
141 ath_txq_lock(sc, txq);
142 ath_txq_schedule(sc, txq);
143 ath_txq_unlock(sc, txq);
211 static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
220 txq = sc->tx.txq_map[q];
227 ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
237 struct ath_softc *sc = tid->an->sc;
238 struct ieee80211_hw *hw = sc->hw;
258 if (tid->txq == sc->tx.txq_map[q]) {
279 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
297 ath_txq_skb_done(sc, txq, skb);
298 ieee80211_free_txskb(sc->hw, skb);
303 ath_tx_update_baw(sc, tid, bf);
308 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
312 ath_txq_unlock(sc, txq);
314 ath_txq_lock(sc, txq);
318 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
341 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
363 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
381 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL);
386 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
390 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
398 TX_STAT_INC(sc, txq->axq_qnum, a_retries);
406 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
410 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
414 spin_lock_bh(&sc->tx.txbuflock);
416 if (unlikely(list_empty(&sc->tx.txbuf))) {
417 spin_unlock_bh(&sc->tx.txbuflock);
421 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
424 spin_unlock_bh(&sc->tx.txbuflock);
429 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
431 spin_lock_bh(&sc->tx.txbuflock);
432 list_add_tail(&bf->list, &sc->tx.txbuf);
433 spin_unlock_bh(&sc->tx.txbuflock);
436 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
440 tbf = ath_tx_get_buffer(sc);
448 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
455 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
487 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
527 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, ts, 0);
564 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
571 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
600 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
624 ath_tx_update_baw(sc, tid, bf);
628 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
631 ath_dynack_sample_tx_ts(sc->sc_ah,
636 ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
647 tbf = ath_clone_txbuf(sc, bf_last);
654 ath_tx_update_baw(sc, tid, bf);
656 ath_tx_complete_buf(sc, bf, txq,
684 ath_tx_queue_tid(sc, tid);
696 ath_txq_unlock(sc, txq);
698 ath_txq_lock(sc, txq);
702 ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
711 static void ath_tx_count_airtime(struct ath_softc *sc,
722 int rate_dur = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i);
729 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
733 struct ieee80211_hw *hw = sc->hw;
748 ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc,
755 tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
756 ath_tx_count_airtime(sc, sta, bf, ts, tid->tidno);
766 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
767 ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts,
770 ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
772 ath_tx_complete_aggr(sc, txq, bf, bf_head, sta, tid, ts, txok);
775 ath_txq_schedule(sc, txq);
800 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
840 frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
857 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
871 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
892 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
899 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
900 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
942 ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
960 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
965 ath_txq_skb_done(sc, txq, skb);
966 ieee80211_free_txskb(sc->hw, skb);
1015 ath_tx_update_baw(sc, tid, bf);
1016 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
1021 ath_tx_addto_baw(sc, tid, bf);
1031 ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
1046 aggr_limit = ath_lookup_rate(sc, bf, tid);
1073 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
1089 ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
1104 TX_STAT_INC(sc, txq->axq_qnum, a_aggr);
1117 u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
1157 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
1166 cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
1167 cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
1168 cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
1169 cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
1178 static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
1185 struct ath_hw *ah = sc->sc_ah;
1188 if (sc->tx99_state || !ah->tpc_enabled)
1264 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1267 struct ath_hw *ah = sc->sc_ah;
1275 u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1328 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1330 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1340 info->txpower[i] = ath_get_rate_txpower(sc, bf, rix,
1364 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1367 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1371 info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, false,
1376 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1407 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1410 struct ath_hw *ah = sc->sc_ah;
1413 u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1431 info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1436 if (!sc->tx99_state)
1439 txq == sc->tx.uapsdq)
1470 ath_buf_set_rate(sc, bf, &info, len, rts);
1500 ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
1519 ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
1533 static int ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1544 ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
1558 aggr_len = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf);
1560 ath_tx_form_burst(sc, txq, tid, &bf_q, bf);
1570 ath_tx_fill_desc(sc, bf, txq, aggr_len);
1571 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1575 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1578 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1590 ath_txq_lock(sc, txq);
1610 ath_txq_unlock_complete(sc, txq);
1615 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1617 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1624 ath_txq_lock(sc, txq);
1626 ath_tx_flush_tid(sc, txtid);
1627 ath_txq_unlock_complete(sc, txq);
1630 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1633 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1648 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1650 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1661 ath_txq_lock(sc, txq);
1664 ath_tx_queue_tid(sc, tid);
1665 ath_txq_schedule(sc, txq);
1667 ath_txq_unlock_complete(sc, txq);
1674 ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val)
1689 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
1700 struct ath_softc *sc = hw->priv;
1702 struct ath_txq *txq = sc->tx.uapsdq;
1717 ath_txq_lock(sc, tid->txq);
1719 ret = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq,
1724 ath9k_set_moredata(sc, bf, true);
1734 TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw);
1739 ath_txq_unlock_complete(sc, tid->txq);
1746 ath9k_set_moredata(sc, bf_tail, false);
1752 ath_txq_lock(sc, txq);
1753 ath_tx_fill_desc(sc, bf, txq, 0);
1754 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1755 ath_txq_unlock(sc, txq);
1762 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1764 struct ath_hw *ah = sc->sc_ah;
1813 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1814 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1825 sc->tx.txqsetup |= 1<<axq_qnum;
1831 return &sc->tx.txq[axq_qnum];
1834 int ath_txq_update(struct ath_softc *sc, int qnum,
1837 struct ath_hw *ah = sc->sc_ah;
1841 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
1851 ath_err(ath9k_hw_common(sc->sc_ah),
1861 int ath_cabq_update(struct ath_softc *sc)
1864 struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
1865 int qnum = sc->beacon.cabq->axq_qnum;
1867 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1871 ath_txq_update(sc, qnum, &qi);
1876 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1893 ath_tx_return_buffer(sc, bf);
1899 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1909 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1912 ath_txq_lock(sc, txq);
1914 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1918 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1927 ath_drain_txq_list(sc, txq, &txq->axq_q);
1929 ath_txq_unlock_complete(sc, txq);
1933 bool ath_drain_all_txq(struct ath_softc *sc)
1935 struct ath_hw *ah = sc->sc_ah;
1936 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1948 if (!ATH_TXQ_SETUP(sc, i))
1951 if (!sc->tx.txq[i].axq_depth)
1954 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1959 RESET_STAT_INC(sc, RESET_TX_DMA_ERROR);
1965 if (!ATH_TXQ_SETUP(sc, i))
1968 txq = &sc->tx.txq[i];
1969 ath_draintxq(sc, txq);
1975 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1977 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1978 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1984 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1986 struct ieee80211_hw *hw = sc->hw;
1987 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1999 spin_lock_bh(&sc->chan_lock);
2002 if (sc->cur_chan->stopped)
2010 ret = ath_tx_sched_aggr(sc, txq, tid);
2019 spin_unlock_bh(&sc->chan_lock);
2023 void ath_txq_schedule_all(struct ath_softc *sc)
2029 txq = sc->tx.txq_map[i];
2032 ath_txq_schedule(sc, txq);
2045 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
2048 struct ath_hw *ah = sc->sc_ah;
2088 TX_STAT_INC(sc, txq->axq_qnum, puttxbuf);
2094 if (!edma || sc->tx99_state) {
2095 TX_STAT_INC(sc, txq->axq_qnum, txstart);
2112 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
2125 ath_tx_addto_baw(sc, tid, bf);
2130 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
2131 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
2132 TX_STAT_INC(sc, txq->axq_qnum, queued);
2171 struct ath_softc *sc;
2173 sc = hw->priv;
2175 txpower = sc->cur_chan->cur_txpower;
2198 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
2200 struct ath_hw *ah = sc->sc_ah;
2217 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
2222 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2229 bf = ath_tx_get_buffer(sc);
2253 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
2255 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2258 ath_err(ath9k_hw_common(sc->sc_ah),
2260 ath_tx_return_buffer(sc, bf);
2299 struct ath_softc *sc = hw->priv;
2314 ath_assign_seq(ath9k_hw_common(sc->sc_ah), skb);
2345 struct ath_softc *sc = hw->priv;
2367 txq = sc->tx.uapsdq;
2371 tid = ath_get_skb_tid(sc, an, skb);
2374 ath_txq_lock(sc, txq);
2375 if (txq == sc->tx.txq_map[q]) {
2380 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2382 ath_txq_skb_done(sc, txq, skb);
2386 ieee80211_free_txskb(sc->hw, skb);
2396 ath_tx_send_normal(sc, txq, tid, skb);
2399 ath_txq_unlock(sc, txq);
2407 struct ath_softc *sc = hw->priv;
2409 .txq = sc->beacon.cabq
2419 sc->cur_chan->beacon.beacon_interval * 1000 *
2420 sc->cur_chan->beacon.dtim_period / ATH_BCBUF;
2428 bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
2434 ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2456 ath9k_set_moredata(sc, bf, false);
2459 ath_txq_lock(sc, txctl.txq);
2460 ath_tx_fill_desc(sc, bf, txctl.txq, 0);
2461 ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
2462 TX_STAT_INC(sc, txctl.txq->axq_qnum, queued);
2463 ath_txq_unlock(sc, txctl.txq);
2470 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2475 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2482 if (sc->sc_ah->caldata)
2483 set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2505 spin_lock_irqsave(&sc->sc_pm_lock, flags);
2506 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
2507 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2510 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2515 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2517 ath_txq_skb_done(sc, txq, skb);
2522 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2538 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2540 if (sc->tx99_state)
2549 complete(&sc->paprd_complete);
2551 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2552 ath_tx_complete(sc, skb, tx_flags, txq, sta);
2563 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2564 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2565 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2578 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2585 struct ieee80211_hw *hw = sc->hw;
2586 struct ath_hw *ah = sc->sc_ah;
2629 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2635 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2637 struct ath_hw *ah = sc->sc_ah;
2646 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2649 ath_txq_lock(sc, txq);
2656 ath_txq_schedule(sc, txq);
2687 TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
2702 ath_tx_return_buffer(sc, bf_held);
2705 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2707 ath_txq_unlock_complete(sc, txq);
2710 void ath_tx_tasklet(struct ath_softc *sc)
2712 struct ath_hw *ah = sc->sc_ah;
2718 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2719 ath_tx_processq(sc, &sc->tx.txq[i]);
2724 void ath_tx_edma_tasklet(struct ath_softc *sc)
2727 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2728 struct ath_hw *ah = sc->sc_ah;
2749 if (ts.qid == sc->beacon.beaconq) {
2750 sc->beacon.tx_processed = true;
2751 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2754 ath_chanctx_event(sc, NULL,
2758 ath9k_csa_update(sc);
2762 txq = &sc->tx.txq[ts.qid];
2764 ath_txq_lock(sc, txq);
2766 TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
2770 ath_txq_unlock(sc, txq);
2777 ath_tx_return_buffer(sc, bf);
2794 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2803 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2804 ath_txq_unlock_complete(sc, txq);
2813 static int ath_txstatus_setup(struct ath_softc *sc, int size)
2815 struct ath_descdma *dd = &sc->txsdma;
2816 u8 txs_len = sc->sc_ah->caps.txs_len;
2819 dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
2827 static int ath_tx_edma_init(struct ath_softc *sc)
2831 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2833 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2834 sc->txsdma.dd_desc_paddr,
2840 int ath_tx_init(struct ath_softc *sc, int nbufs)
2842 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2845 spin_lock_init(&sc->tx.txbuflock);
2847 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2855 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2863 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2864 error = ath_tx_edma_init(sc);
2869 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2886 tid->txq = sc->tx.txq_map[acno];
2893 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2905 ath_txq_lock(sc, txq);
2910 ath_tid_drain(sc, txq, tid);
2913 ath_txq_unlock(sc, txq);
2924 int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
2929 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2951 bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
2957 ath_set_rates(sc->tx99_vif, NULL, bf);
2959 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
2960 ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
2962 ath_tx_send_normal(sc, txctl->txq, NULL, skb);