Lines Matching defs:txq

677  * with the actual hardware txq, or all of this will fall apart.
684 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
700 txq->axq_qnum);
703 ATH_TXQ_LOCK(txq);
704 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
705 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
719 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
720 ATH_TXQ_UNLOCK(txq);
727 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
744 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
764 ATH_TXQ_LOCK(txq);
789 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
791 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
793 txq->axq_qnum,
795 txq->axq_depth);
803 if (txq->axq_link != NULL) {
804 *txq->axq_link = bf->bf_daddr;
807 txq->axq_qnum, txq->axq_link,
809 txq->axq_depth);
813 txq->axq_qnum, txq->axq_link,
829 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
830 bf_first = TAILQ_FIRST(&txq->axq_q);
831 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
832 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
835 __func__, txq->axq_qnum,
837 txq->axq_depth);
841 txq->axq_qnum,
844 txq->axq_depth);
851 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
855 txq->axq_qnum);
862 txq->axq_aggr_depth++;
867 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
884 ath_hal_txstart(ah, txq->axq_qnum);
885 ATH_TXQ_UNLOCK(txq);
887 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
896 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
900 ATH_TXQ_LOCK_ASSERT(txq);
903 bf = TAILQ_FIRST(&txq->axq_q);
904 bf_last = ATH_TXQ_LAST(txq, axq_q_s);
912 txq->axq_qnum,
919 ath_tx_dump(sc, txq);
926 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
929 txq->axq_qnum));
931 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
932 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
935 &txq->axq_link);
936 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
942 * The relevant hardware txq should be locked.
945 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
955 if (txq->axq_qnum == ATH_TXQ_SWQ)
956 ath_tx_handoff_mcast(sc, txq, bf);
958 ath_tx_handoff_hw(sc, txq, bf);
1476 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1510 ath_tx_handoff(sc, txq, bf);
1527 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1705 if (txq != sc->sc_ac2q[pri]) {
1707 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1709 txq,
1710 txq->axq_qnum,
1765 txq->axq_intrcnt = 0;
1766 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1768 txq->axq_intrcnt = 0;
1849 struct ath_txq *txq;
1862 * For multicast frames, the txq gets overridden appropriately
1876 txq = sc->sc_ac2q[pri];
1932 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1945 txq = &avp->av_mcastq;
1995 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2029 if (txq == &avp->av_mcastq) {
2033 ath_tx_xmit_normal(sc, txq, bf);
2036 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2039 ath_tx_xmit_normal(sc, txq, bf);
2053 ath_tx_xmit_normal(sc, txq, bf);
2815 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2838 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2840 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2856 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2868 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2876 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2942 struct ath_txq *txq, struct ath_buf *bf)
3019 ath_tx_handoff(sc, txq, bf);
3030 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3052 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3101 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3114 ath_tx_xmit_aggr(sc, an, txq, bf);
3139 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3140 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3156 ath_tx_xmit_normal(sc, txq, bf);
3731 struct ath_txq *txq;
3734 txq = sc->sc_ac2q[tid->ac];
3750 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3756 txq->axq_qnum,
3757 txq->axq_depth,
3758 txq->axq_aggr_depth);
4023 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4033 * Iterate over all active tids for the given txq,
4036 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4037 tid = TAILQ_FIRST(&txq->axq_tidq);
4608 * Keep the txq lock held for now, as we need to ensure
5243 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5433 /* Punt to txq */
5434 ath_tx_handoff(sc, txq, bf);
5443 * Checking for an empty txq is done above.
5445 * XXX locking on txq here?
5448 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5472 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5543 /* Punt to hardware or software txq */
5544 ath_tx_handoff(sc, txq, bf);
5560 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5575 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5579 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5584 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5586 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5616 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5619 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
6096 * the sched/unsched operations involve walking the per-txq
6103 struct ath_txq *txq;
6121 txq = sc->sc_ac2q[atid->ac];
6140 struct ath_txq *txq;
6165 txq = sc->sc_ac2q[atid->ac];