Lines Matching refs:txq

692  * with the actual hardware txq, or all of this will fall apart.
699 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
715 txq->axq_qnum);
718 ATH_TXQ_LOCK(txq);
719 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
720 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
734 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
735 ATH_TXQ_UNLOCK(txq);
742 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
759 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
779 ATH_TXQ_LOCK(txq);
804 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
806 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
808 txq->axq_qnum,
810 txq->axq_depth);
818 if (txq->axq_link != NULL) {
819 *txq->axq_link = bf->bf_daddr;
822 txq->axq_qnum, txq->axq_link,
824 txq->axq_depth);
828 txq->axq_qnum, txq->axq_link,
844 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
845 bf_first = TAILQ_FIRST(&txq->axq_q);
846 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
847 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
850 __func__, txq->axq_qnum,
852 txq->axq_depth);
856 txq->axq_qnum,
859 txq->axq_depth);
866 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
870 txq->axq_qnum);
877 txq->axq_aggr_depth++;
882 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
899 ath_hal_txstart(ah, txq->axq_qnum);
900 ATH_TXQ_UNLOCK(txq);
902 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
911 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
915 ATH_TXQ_LOCK_ASSERT(txq);
918 bf = TAILQ_FIRST(&txq->axq_q);
919 bf_last = ATH_TXQ_LAST(txq, axq_q_s);
927 txq->axq_qnum,
934 ath_tx_dump(sc, txq);
941 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
944 txq->axq_qnum));
946 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
947 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
950 &txq->axq_link);
951 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
957 * The relevant hardware txq should be locked.
960 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
970 if (txq->axq_qnum == ATH_TXQ_SWQ)
971 ath_tx_handoff_mcast(sc, txq, bf);
973 ath_tx_handoff_hw(sc, txq, bf);
1505 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1539 ath_tx_handoff(sc, txq, bf);
1556 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1740 if (txq != sc->sc_ac2q[pri]) {
1742 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1744 txq,
1745 txq->axq_qnum,
1828 txq->axq_intrcnt = 0;
1829 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1831 txq->axq_intrcnt = 0;
1912 struct ath_txq *txq;
1925 * For multicast frames, the txq gets overridden appropriately
1950 txq = sc->sc_ac2q[pri];
2006 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
2019 txq = &avp->av_mcastq;
2074 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2108 if (txq == &avp->av_mcastq) {
2112 ath_tx_xmit_normal(sc, txq, bf);
2115 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2118 ath_tx_xmit_normal(sc, txq, bf);
2132 ath_tx_xmit_normal(sc, txq, bf);
2910 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2933 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2935 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2951 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2963 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2971 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
3049 struct ath_txq *txq, struct ath_buf *bf)
3127 ath_tx_handoff(sc, txq, bf);
3138 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3160 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3222 if (txq->axq_depth + txq->fifo.axq_depth == 0) {
3235 ath_tx_xmit_aggr(sc, an, txq, bf);
3260 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3261 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3277 ath_tx_xmit_normal(sc, txq, bf);
3852 struct ath_txq *txq;
3855 txq = sc->sc_ac2q[tid->ac];
3871 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3877 txq->axq_qnum,
3878 txq->axq_depth,
3879 txq->axq_aggr_depth);
4144 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4154 * Iterate over all active tids for the given txq,
4157 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4158 tid = TAILQ_FIRST(&txq->axq_tidq);
4728 * Keep the txq lock held for now, as we need to ensure
5431 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5623 /* Punt to txq */
5624 ath_tx_handoff(sc, txq, bf);
5633 * Checking for an empty txq is done above.
5635 * XXX locking on txq here?
5638 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5662 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5733 /* Punt to hardware or software txq */
5734 ath_tx_handoff(sc, txq, bf);
5750 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5759 * There's no FIFO, so txq->axq_depth is what's been scheduled
5769 * The FIFO depth is what's in the hardware; the txq->axq_depth
5779 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5790 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5795 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5797 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5827 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5830 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
6305 * the sched/unsched operations involve walking the per-txq
6312 struct ath_txq *txq;
6330 txq = sc->sc_ac2q[atid->ac];
6349 struct ath_txq *txq;
6374 txq = sc->sc_ac2q[atid->ac];