Lines Matching defs:txq

677  * with the actual hardware txq, or all of this will fall apart.
684 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
700 txq->axq_qnum);
703 ATH_TXQ_LOCK(txq);
704 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
705 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
719 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
720 ATH_TXQ_UNLOCK(txq);
727 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
744 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
764 ATH_TXQ_LOCK(txq);
789 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
791 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
793 txq->axq_qnum,
795 txq->axq_depth);
803 if (txq->axq_link != NULL) {
804 *txq->axq_link = bf->bf_daddr;
807 txq->axq_qnum, txq->axq_link,
809 txq->axq_depth);
813 txq->axq_qnum, txq->axq_link,
829 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
830 bf_first = TAILQ_FIRST(&txq->axq_q);
831 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
832 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
835 __func__, txq->axq_qnum,
837 txq->axq_depth);
841 txq->axq_qnum,
844 txq->axq_depth);
851 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
855 txq->axq_qnum);
862 txq->axq_aggr_depth++;
867 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
884 ath_hal_txstart(ah, txq->axq_qnum);
885 ATH_TXQ_UNLOCK(txq);
887 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
896 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
900 ATH_TXQ_LOCK_ASSERT(txq);
903 bf = TAILQ_FIRST(&txq->axq_q);
904 bf_last = ATH_TXQ_LAST(txq, axq_q_s);
912 txq->axq_qnum,
919 ath_tx_dump(sc, txq);
926 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
929 txq->axq_qnum));
931 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
932 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
935 &txq->axq_link);
936 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
942 * The relevant hardware txq should be locked.
945 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
955 if (txq->axq_qnum == ATH_TXQ_SWQ)
956 ath_tx_handoff_mcast(sc, txq, bf);
958 ath_tx_handoff_hw(sc, txq, bf);
1476 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1510 ath_tx_handoff(sc, txq, bf);
1527 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1706 if (txq != sc->sc_ac2q[pri]) {
1708 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1710 txq,
1711 txq->axq_qnum,
1766 txq->axq_intrcnt = 0;
1767 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1769 txq->axq_intrcnt = 0;
1853 struct ath_txq *txq;
1866 * For multicast frames, the txq gets overridden appropriately
1880 txq = sc->sc_ac2q[pri];
1936 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1949 txq = &avp->av_mcastq;
1999 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2033 if (txq == &avp->av_mcastq) {
2037 ath_tx_xmit_normal(sc, txq, bf);
2040 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2043 ath_tx_xmit_normal(sc, txq, bf);
2057 ath_tx_xmit_normal(sc, txq, bf);
2823 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2846 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2848 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2864 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2876 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2884 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2950 struct ath_txq *txq, struct ath_buf *bf)
3027 ath_tx_handoff(sc, txq, bf);
3038 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3060 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3109 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3122 ath_tx_xmit_aggr(sc, an, txq, bf);
3147 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3148 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3164 ath_tx_xmit_normal(sc, txq, bf);
3739 struct ath_txq *txq;
3742 txq = sc->sc_ac2q[tid->ac];
3758 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3764 txq->axq_qnum,
3765 txq->axq_depth,
3766 txq->axq_aggr_depth);
4031 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4041 * Iterate over all active tids for the given txq,
4044 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4045 tid = TAILQ_FIRST(&txq->axq_tidq);
4616 * Keep the txq lock held for now, as we need to ensure
5251 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5441 /* Punt to txq */
5442 ath_tx_handoff(sc, txq, bf);
5451 * Checking for an empty txq is done above.
5453 * XXX locking on txq here?
5456 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5480 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5551 /* Punt to hardware or software txq */
5552 ath_tx_handoff(sc, txq, bf);
5568 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5583 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5587 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5592 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5594 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5624 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5627 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
6104 * the sched/unsched operations involve walking the per-txq
6111 struct ath_txq *txq;
6129 txq = sc->sc_ac2q[atid->ac];
6148 struct ath_txq *txq;
6173 txq = sc->sc_ac2q[atid->ac];