1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer,
13 *    without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 *    redistribution must be conditioned upon including a substantially
17 *    similar Disclaimer requirement for further binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGES.
31 */
32
33#include <sys/cdefs.h>
34/*
35 * Driver for the Atheros Wireless LAN controller.
36 *
37 * This software is derived from work of Atsushi Onoe; his contribution
38 * is greatly appreciated.
39 */
40
41#include "opt_inet.h"
42#include "opt_ath.h"
43#include "opt_wlan.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysctl.h>
48#include <sys/mbuf.h>
49#include <sys/malloc.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/kernel.h>
53#include <sys/socket.h>
54#include <sys/sockio.h>
55#include <sys/errno.h>
56#include <sys/callout.h>
57#include <sys/bus.h>
58#include <sys/endian.h>
59#include <sys/kthread.h>
60#include <sys/taskqueue.h>
61#include <sys/priv.h>
62#include <sys/ktr.h>
63
64#include <machine/bus.h>
65
66#include <net/if.h>
67#include <net/if_var.h>
68#include <net/if_dl.h>
69#include <net/if_media.h>
70#include <net/if_types.h>
71#include <net/if_arp.h>
72#include <net/ethernet.h>
73#include <net/if_llc.h>
74
75#include <net80211/ieee80211_var.h>
76#include <net80211/ieee80211_regdomain.h>
77#ifdef IEEE80211_SUPPORT_SUPERG
78#include <net80211/ieee80211_superg.h>
79#endif
80#ifdef IEEE80211_SUPPORT_TDMA
81#include <net80211/ieee80211_tdma.h>
82#endif
83#include <net80211/ieee80211_ht.h>
84
85#include <net/bpf.h>
86
87#ifdef INET
88#include <netinet/in.h>
89#include <netinet/if_ether.h>
90#endif
91
92#include <dev/ath/if_athvar.h>
93#include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
94#include <dev/ath/ath_hal/ah_diagcodes.h>
95
96#include <dev/ath/if_ath_debug.h>
97
98#ifdef ATH_TX99_DIAG
99#include <dev/ath/ath_tx99/ath_tx99.h>
100#endif
101
102#include <dev/ath/if_ath_misc.h>
103#include <dev/ath/if_ath_tx.h>
104#include <dev/ath/if_ath_tx_ht.h>
105
106#ifdef	ATH_DEBUG_ALQ
107#include <dev/ath/if_ath_alq.h>
108#endif
109
110/*
111 * How many retries to perform in software
112 */
113#define	SWMAX_RETRIES		10
114
115/*
116 * What queue to throw the non-QoS TID traffic into
117 */
118#define	ATH_NONQOS_TID_AC	WME_AC_VO
119
120#if 0
121static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
122#endif
123static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
124    int tid);
125static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
126    int tid);
127static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
128    struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
129static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
130    struct ieee80211_node *ni, struct mbuf *m0, int *tid);
131static struct ath_buf *
132ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
133    struct ath_tid *tid, struct ath_buf *bf);
134
135#ifdef	ATH_DEBUG_ALQ
136void
137ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
138{
139	struct ath_buf *bf;
140	int i, n;
141	const char *ds;
142
143	/* XXX we should skip out early if debugging isn't enabled! */
144	bf = bf_first;
145
146	while (bf != NULL) {
147		/* XXX should ensure bf_nseg > 0! */
148		if (bf->bf_nseg == 0)
149			break;
150		n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151		for (i = 0, ds = (const char *) bf->bf_desc;
152		    i < n;
153		    i++, ds += sc->sc_tx_desclen) {
154			if_ath_alq_post(&sc->sc_alq,
155			    ATH_ALQ_EDMA_TXDESC,
156			    sc->sc_tx_desclen,
157			    ds);
158		}
159		bf = bf->bf_next;
160	}
161}
162#endif /* ATH_DEBUG_ALQ */
163
164/*
165 * Whether to use the 11n rate scenario functions or not
166 */
167static inline int
168ath_tx_is_11n(struct ath_softc *sc)
169{
170	return ((sc->sc_ah->ah_magic == 0x20065416) ||
171		    (sc->sc_ah->ah_magic == 0x19741014));
172}
173
174/*
175 * Obtain the current TID from the given frame.
176 *
177 * Non-QoS frames get mapped to a TID so frames consistently
178 * go on a sensible queue.
179 */
180static int
181ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
182{
183	const struct ieee80211_frame *wh;
184
185	wh = mtod(m0, const struct ieee80211_frame *);
186
187	/* Non-QoS: map frame to a TID queue for software queueing */
188	if (! IEEE80211_QOS_HAS_SEQ(wh))
189		return (WME_AC_TO_TID(M_WME_GETAC(m0)));
190
191	/* QoS - fetch the TID from the header, ignore mbuf WME */
192	return (ieee80211_gettid(wh));
193}
194
195static void
196ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
197{
198	struct ieee80211_frame *wh;
199
200	wh = mtod(bf->bf_m, struct ieee80211_frame *);
201	/* Only update/resync if needed */
202	if (bf->bf_state.bfs_isretried == 0) {
203		wh->i_fc[1] |= IEEE80211_FC1_RETRY;
204		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
205		    BUS_DMASYNC_PREWRITE);
206	}
207	bf->bf_state.bfs_isretried = 1;
208	bf->bf_state.bfs_retries ++;
209}
210
211/*
212 * Determine what the correct AC queue for the given frame
213 * should be.
214 *
215 * For QoS frames, obey the TID.  That way things like
216 * management frames that are related to a given TID
217 * are thus serialised with the rest of the TID traffic,
218 * regardless of net80211 overriding priority.
219 *
220 * For non-QoS frames, return the mbuf WMI priority.
221 *
222 * This has implications that higher priority non-QoS traffic
223 * may end up being scheduled before other non-QoS traffic,
224 * leading to out-of-sequence packets being emitted.
225 *
226 * (It'd be nice to log/count this so we can see if it
227 * really is a problem.)
228 *
229 * TODO: maybe we should throw multicast traffic, QoS or
230 * otherwise, into a separate TX queue?
231 */
232static int
233ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
234{
235	const struct ieee80211_frame *wh;
236
237	wh = mtod(m0, const struct ieee80211_frame *);
238
239	/*
240	 * QoS data frame (sequence number or otherwise) -
241	 * return hardware queue mapping for the underlying
242	 * TID.
243	 */
244	if (IEEE80211_QOS_HAS_SEQ(wh))
245		return TID_TO_WME_AC(ieee80211_gettid(wh));
246
247	/*
248	 * Otherwise - return mbuf QoS pri.
249	 */
250	return (M_WME_GETAC(m0));
251}
252
253void
254ath_txfrag_cleanup(struct ath_softc *sc,
255	ath_bufhead *frags, struct ieee80211_node *ni)
256{
257	struct ath_buf *bf, *next;
258
259	ATH_TXBUF_LOCK_ASSERT(sc);
260
261	TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
262		/* NB: bf assumed clean */
263		TAILQ_REMOVE(frags, bf, bf_list);
264		ath_returnbuf_head(sc, bf);
265		ieee80211_node_decref(ni);
266	}
267}
268
269/*
270 * Setup xmit of a fragmented frame.  Allocate a buffer
271 * for each frag and bump the node reference count to
272 * reflect the held reference to be setup by ath_tx_start.
273 */
274int
275ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
276	struct mbuf *m0, struct ieee80211_node *ni)
277{
278	struct mbuf *m;
279	struct ath_buf *bf;
280
281	ATH_TXBUF_LOCK(sc);
282	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
283		/* XXX non-management? */
284		bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
285		if (bf == NULL) {	/* out of buffers, cleanup */
286			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
287			    __func__);
288			ath_txfrag_cleanup(sc, frags, ni);
289			break;
290		}
291		(void) ieee80211_ref_node(ni);
292		TAILQ_INSERT_TAIL(frags, bf, bf_list);
293	}
294	ATH_TXBUF_UNLOCK(sc);
295
296	return !TAILQ_EMPTY(frags);
297}
298
299static int
300ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
301{
302	struct mbuf *m;
303	int error;
304
305	/*
306	 * Load the DMA map so any coalescing is done.  This
307	 * also calculates the number of descriptors we need.
308	 */
309	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
310				     bf->bf_segs, &bf->bf_nseg,
311				     BUS_DMA_NOWAIT);
312	if (error == EFBIG) {
313		/* XXX packet requires too many descriptors */
314		bf->bf_nseg = ATH_MAX_SCATTER + 1;
315	} else if (error != 0) {
316		sc->sc_stats.ast_tx_busdma++;
317		ieee80211_free_mbuf(m0);
318		return error;
319	}
320	/*
321	 * Discard null packets and check for packets that
322	 * require too many TX descriptors.  We try to convert
323	 * the latter to a cluster.
324	 */
325	if (bf->bf_nseg > ATH_MAX_SCATTER) {		/* too many desc's, linearize */
326		sc->sc_stats.ast_tx_linear++;
327		m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
328		if (m == NULL) {
329			ieee80211_free_mbuf(m0);
330			sc->sc_stats.ast_tx_nombuf++;
331			return ENOMEM;
332		}
333		m0 = m;
334		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
335					     bf->bf_segs, &bf->bf_nseg,
336					     BUS_DMA_NOWAIT);
337		if (error != 0) {
338			sc->sc_stats.ast_tx_busdma++;
339			ieee80211_free_mbuf(m0);
340			return error;
341		}
342		KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
343		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
344	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
345		sc->sc_stats.ast_tx_nodata++;
346		ieee80211_free_mbuf(m0);
347		return EIO;
348	}
349	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
350		__func__, m0, m0->m_pkthdr.len);
351	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
352	bf->bf_m = m0;
353
354	return 0;
355}
356
357/*
358 * Chain together segments+descriptors for a frame - 11n or otherwise.
359 *
360 * For aggregates, this is called on each frame in the aggregate.
361 */
362static void
363ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
364    struct ath_buf *bf, bool is_aggr, int is_first_subframe,
365    int is_last_subframe)
366{
367	struct ath_hal *ah = sc->sc_ah;
368	char *ds;
369	int i, bp, dsp;
370	HAL_DMA_ADDR bufAddrList[4];
371	uint32_t segLenList[4];
372	int numTxMaps = 1;
373	int isFirstDesc = 1;
374
375	/*
376	 * XXX There's txdma and txdma_mgmt; the descriptor
377	 * sizes must match.
378	 */
379	struct ath_descdma *dd = &sc->sc_txdma;
380
381	/*
382	 * Fillin the remainder of the descriptor info.
383	 */
384
385	/*
386	 * We need the number of TX data pointers in each descriptor.
387	 * EDMA and later chips support 4 TX buffers per descriptor;
388	 * previous chips just support one.
389	 */
390	numTxMaps = sc->sc_tx_nmaps;
391
392	/*
393	 * For EDMA and later chips ensure the TX map is fully populated
394	 * before advancing to the next descriptor.
395	 */
396	ds = (char *) bf->bf_desc;
397	bp = dsp = 0;
398	bzero(bufAddrList, sizeof(bufAddrList));
399	bzero(segLenList, sizeof(segLenList));
400	for (i = 0; i < bf->bf_nseg; i++) {
401		bufAddrList[bp] = bf->bf_segs[i].ds_addr;
402		segLenList[bp] = bf->bf_segs[i].ds_len;
403		bp++;
404
405		/*
406		 * Go to the next segment if this isn't the last segment
407		 * and there's space in the current TX map.
408		 */
409		if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
410			continue;
411
412		/*
413		 * Last segment or we're out of buffer pointers.
414		 */
415		bp = 0;
416
417		if (i == bf->bf_nseg - 1)
418			ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
419		else
420			ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
421			    bf->bf_daddr + dd->dd_descsize * (dsp + 1));
422
423		/*
424		 * XXX This assumes that bfs_txq is the actual destination
425		 * hardware queue at this point.  It may not have been
426		 * assigned, it may actually be pointing to the multicast
427		 * software TXQ id.  These must be fixed!
428		 */
429		ath_hal_filltxdesc(ah, (struct ath_desc *) ds
430			, bufAddrList
431			, segLenList
432			, bf->bf_descid		/* XXX desc id */
433			, bf->bf_state.bfs_tx_queue
434			, isFirstDesc		/* first segment */
435			, i == bf->bf_nseg - 1	/* last segment */
436			, (struct ath_desc *) ds0	/* first descriptor */
437		);
438
439		/*
440		 * Make sure the 11n aggregate fields are cleared.
441		 *
442		 * XXX TODO: this doesn't need to be called for
443		 * aggregate frames; as it'll be called on all
444		 * sub-frames.  Since the descriptors are in
445		 * non-cacheable memory, this leads to some
446		 * rather slow writes on MIPS/ARM platforms.
447		 */
448		if (ath_tx_is_11n(sc))
449			ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
450
451		/*
452		 * If 11n is enabled, set it up as if it's an aggregate
453		 * frame.
454		 */
455		if (is_last_subframe) {
456			ath_hal_set11n_aggr_last(sc->sc_ah,
457			    (struct ath_desc *) ds);
458		} else if (is_aggr) {
459			/*
460			 * This clears the aggrlen field; so
461			 * the caller needs to call set_aggr_first()!
462			 *
463			 * XXX TODO: don't call this for the first
464			 * descriptor in the first frame in an
465			 * aggregate!
466			 */
467			ath_hal_set11n_aggr_middle(sc->sc_ah,
468			    (struct ath_desc *) ds,
469			    bf->bf_state.bfs_ndelim);
470		}
471		isFirstDesc = 0;
472		bf->bf_lastds = (struct ath_desc *) ds;
473
474		/*
475		 * Don't forget to skip to the next descriptor.
476		 */
477		ds += sc->sc_tx_desclen;
478		dsp++;
479
480		/*
481		 * .. and don't forget to blank these out!
482		 */
483		bzero(bufAddrList, sizeof(bufAddrList));
484		bzero(segLenList, sizeof(segLenList));
485	}
486	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
487}
488
489/*
490 * Set the rate control fields in the given descriptor based on
491 * the bf_state fields and node state.
492 *
493 * The bfs fields should already be set with the relevant rate
494 * control information, including whether MRR is to be enabled.
495 *
496 * Since the FreeBSD HAL currently sets up the first TX rate
497 * in ath_hal_setuptxdesc(), this will setup the MRR
498 * conditionally for the pre-11n chips, and call ath_buf_set_rate
499 * unconditionally for 11n chips. These require the 11n rate
500 * scenario to be set if MCS rates are enabled, so it's easier
501 * to just always call it. The caller can then only set rates 2, 3
502 * and 4 if multi-rate retry is needed.
503 */
504static void
505ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
506    struct ath_buf *bf)
507{
508	struct ath_rc_series *rc = bf->bf_state.bfs_rc;
509
510	/* If mrr is disabled, blank tries 1, 2, 3 */
511	if (! bf->bf_state.bfs_ismrr)
512		rc[1].tries = rc[2].tries = rc[3].tries = 0;
513
514#if 0
515	/*
516	 * If NOACK is set, just set ntries=1.
517	 */
518	else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
519		rc[1].tries = rc[2].tries = rc[3].tries = 0;
520		rc[0].tries = 1;
521	}
522#endif
523
524	/*
525	 * Always call - that way a retried descriptor will
526	 * have the MRR fields overwritten.
527	 *
528	 * XXX TODO: see if this is really needed - setting up
529	 * the first descriptor should set the MRR fields to 0
530	 * for us anyway.
531	 */
532	if (ath_tx_is_11n(sc)) {
533		ath_buf_set_rate(sc, ni, bf);
534	} else {
535		ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
536			, rc[1].ratecode, rc[1].tries
537			, rc[2].ratecode, rc[2].tries
538			, rc[3].ratecode, rc[3].tries
539		);
540	}
541}
542
543/*
544 * Setup segments+descriptors for an 11n aggregate.
545 * bf_first is the first buffer in the aggregate.
546 * The descriptor list must already been linked together using
547 * bf->bf_next.
548 */
549static void
550ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
551{
552	struct ath_buf *bf, *bf_prev = NULL;
553	struct ath_desc *ds0 = bf_first->bf_desc;
554
555	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
556	    __func__, bf_first->bf_state.bfs_nframes,
557	    bf_first->bf_state.bfs_al);
558
559	bf = bf_first;
560
561	if (bf->bf_state.bfs_txrate0 == 0)
562		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
563		    __func__, bf, 0);
564	if (bf->bf_state.bfs_rc[0].ratecode == 0)
565		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
566		    __func__, bf, 0);
567
568	/*
569	 * Setup all descriptors of all subframes - this will
570	 * call ath_hal_set11naggrmiddle() on every frame.
571	 */
572	while (bf != NULL) {
573		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
574		    "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
575		    __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
576		    SEQNO(bf->bf_state.bfs_seqno));
577
578		/*
579		 * Setup the initial fields for the first descriptor - all
580		 * the non-11n specific stuff.
581		 */
582		ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
583			, bf->bf_state.bfs_pktlen	/* packet length */
584			, bf->bf_state.bfs_hdrlen	/* header length */
585			, bf->bf_state.bfs_atype	/* Atheros packet type */
586			, bf->bf_state.bfs_txpower	/* txpower */
587			, bf->bf_state.bfs_txrate0
588			, bf->bf_state.bfs_try0		/* series 0 rate/tries */
589			, bf->bf_state.bfs_keyix	/* key cache index */
590			, bf->bf_state.bfs_txantenna	/* antenna mode */
591			, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ	/* flags */
592			, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
593			, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
594		);
595
596		/*
597		 * First descriptor? Setup the rate control and initial
598		 * aggregate header information.
599		 */
600		if (bf == bf_first) {
601			/*
602			 * setup first desc with rate and aggr info
603			 */
604			ath_tx_set_ratectrl(sc, bf->bf_node, bf);
605		}
606
607		/*
608		 * Setup the descriptors for a multi-descriptor frame.
609		 * This is both aggregate and non-aggregate aware.
610		 */
611		ath_tx_chaindesclist(sc, ds0, bf,
612		    1, /* is_aggr */
613		    !! (bf == bf_first), /* is_first_subframe */
614		    !! (bf->bf_next == NULL) /* is_last_subframe */
615		    );
616
617		if (bf == bf_first) {
618			/*
619			 * Initialise the first 11n aggregate with the
620			 * aggregate length and aggregate enable bits.
621			 */
622			ath_hal_set11n_aggr_first(sc->sc_ah,
623			    ds0,
624			    bf->bf_state.bfs_al,
625			    bf->bf_state.bfs_ndelim);
626		}
627
628		/*
629		 * Link the last descriptor of the previous frame
630		 * to the beginning descriptor of this frame.
631		 */
632		if (bf_prev != NULL)
633			ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
634			    bf->bf_daddr);
635
636		/* Save a copy so we can link the next descriptor in */
637		bf_prev = bf;
638		bf = bf->bf_next;
639	}
640
641	/*
642	 * Set the first descriptor bf_lastds field to point to
643	 * the last descriptor in the last subframe, that's where
644	 * the status update will occur.
645	 */
646	bf_first->bf_lastds = bf_prev->bf_lastds;
647
648	/*
649	 * And bf_last in the first descriptor points to the end of
650	 * the aggregate list.
651	 */
652	bf_first->bf_last = bf_prev;
653
654	/*
655	 * For non-AR9300 NICs, which require the rate control
656	 * in the final descriptor - let's set that up now.
657	 *
658	 * This is because the filltxdesc() HAL call doesn't
659	 * populate the last segment with rate control information
660	 * if firstSeg is also true.  For non-aggregate frames
661	 * that is fine, as the first frame already has rate control
662	 * info.  But if the last frame in an aggregate has one
663	 * descriptor, both firstseg and lastseg will be true and
664	 * the rate info isn't copied.
665	 *
666	 * This is inefficient on MIPS/ARM platforms that have
667	 * non-cachable memory for TX descriptors, but we'll just
668	 * make do for now.
669	 *
670	 * As to why the rate table is stashed in the last descriptor
671	 * rather than the first descriptor?  Because proctxdesc()
672	 * is called on the final descriptor in an MPDU or A-MPDU -
673	 * ie, the one that gets updated by the hardware upon
674	 * completion.  That way proctxdesc() doesn't need to know
675	 * about the first _and_ last TX descriptor.
676	 */
677	ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
678
679	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
680}
681
682/*
683 * Hand-off a frame to the multicast TX queue.
684 *
685 * This is a software TXQ which will be appended to the CAB queue
686 * during the beacon setup code.
687 *
688 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
689 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
690 * with the actual hardware txq, or all of this will fall apart.
691 *
692 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
693 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
694 * correctly.
695 */
696static void
697ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
698    struct ath_buf *bf)
699{
700	ATH_TX_LOCK_ASSERT(sc);
701
702	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
703	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
704
705	/*
706	 * Ensure that the tx queue is the cabq, so things get
707	 * mapped correctly.
708	 */
709	if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
710		DPRINTF(sc, ATH_DEBUG_XMIT,
711		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
712		    __func__, bf, bf->bf_state.bfs_tx_queue,
713		    txq->axq_qnum);
714	}
715
716	ATH_TXQ_LOCK(txq);
717	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
718		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
719		struct ieee80211_frame *wh;
720
721		/* mark previous frame */
722		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
723		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
724		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
725		    BUS_DMASYNC_PREWRITE);
726
727		/* link descriptor */
728		ath_hal_settxdesclink(sc->sc_ah,
729		    bf_last->bf_lastds,
730		    bf->bf_daddr);
731	}
732	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
733	ATH_TXQ_UNLOCK(txq);
734}
735
736/*
737 * Hand-off packet to a hardware queue.
738 */
739static void
740ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
741    struct ath_buf *bf)
742{
743	struct ath_hal *ah = sc->sc_ah;
744	struct ath_buf *bf_first;
745
746	/*
747	 * Insert the frame on the outbound list and pass it on
748	 * to the hardware.  Multicast frames buffered for power
749	 * save stations and transmit from the CAB queue are stored
750	 * on a s/w only queue and loaded on to the CAB queue in
751	 * the SWBA handler since frames only go out on DTIM and
752	 * to avoid possible races.
753	 */
754	ATH_TX_LOCK_ASSERT(sc);
755	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
756	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
757	KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
758	     ("ath_tx_handoff_hw called for mcast queue"));
759
760	/*
761	 * XXX We should instead just verify that sc_txstart_cnt
762	 * or ath_txproc_cnt > 0.  That would mean that
763	 * the reset is going to be waiting for us to complete.
764	 */
765	if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
766		device_printf(sc->sc_dev,
767		    "%s: TX dispatch without holding txcount/txstart refcnt!\n",
768		    __func__);
769	}
770
771	/*
772	 * XXX .. this is going to cause the hardware to get upset;
773	 * so we really should find some way to drop or queue
774	 * things.
775	 */
776
777	ATH_TXQ_LOCK(txq);
778
779	/*
780	 * XXX TODO: if there's a holdingbf, then
781	 * ATH_TXQ_PUTRUNNING should be clear.
782	 *
783	 * If there is a holdingbf and the list is empty,
784	 * then axq_link should be pointing to the holdingbf.
785	 *
786	 * Otherwise it should point to the last descriptor
787	 * in the last ath_buf.
788	 *
789	 * In any case, we should really ensure that we
790	 * update the previous descriptor link pointer to
791	 * this descriptor, regardless of all of the above state.
792	 *
793	 * For now this is captured by having axq_link point
794	 * to either the holdingbf (if the TXQ list is empty)
795	 * or the end of the list (if the TXQ list isn't empty.)
796	 * I'd rather just kill axq_link here and do it as above.
797	 */
798
799	/*
800	 * Append the frame to the TX queue.
801	 */
802	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
803	ATH_KTR(sc, ATH_KTR_TX, 3,
804	    "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
805	    "depth=%d",
806	    txq->axq_qnum,
807	    bf,
808	    txq->axq_depth);
809
810	/*
811	 * If there's a link pointer, update it.
812	 *
813	 * XXX we should replace this with the above logic, just
814	 * to kill axq_link with fire.
815	 */
816	if (txq->axq_link != NULL) {
817		*txq->axq_link = bf->bf_daddr;
818		DPRINTF(sc, ATH_DEBUG_XMIT,
819		    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
820		    txq->axq_qnum, txq->axq_link,
821		    (caddr_t)bf->bf_daddr, bf->bf_desc,
822		    txq->axq_depth);
823		ATH_KTR(sc, ATH_KTR_TX, 5,
824		    "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
825		    "lastds=%d",
826		    txq->axq_qnum, txq->axq_link,
827		    (caddr_t)bf->bf_daddr, bf->bf_desc,
828		    bf->bf_lastds);
829	}
830
831	/*
832	 * If we've not pushed anything into the hardware yet,
833	 * push the head of the queue into the TxDP.
834	 *
835	 * Once we've started DMA, there's no guarantee that
836	 * updating the TxDP with a new value will actually work.
837	 * So we just don't do that - if we hit the end of the list,
838	 * we keep that buffer around (the "holding buffer") and
839	 * re-start DMA by updating the link pointer of _that_
840	 * descriptor and then restart DMA.
841	 */
842	if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
843		bf_first = TAILQ_FIRST(&txq->axq_q);
844		txq->axq_flags |= ATH_TXQ_PUTRUNNING;
845		ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
846		DPRINTF(sc, ATH_DEBUG_XMIT,
847		    "%s: TXDP[%u] = %p (%p) depth %d\n",
848		    __func__, txq->axq_qnum,
849		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
850		    txq->axq_depth);
851		ATH_KTR(sc, ATH_KTR_TX, 5,
852		    "ath_tx_handoff: TXDP[%u] = %p (%p) "
853		    "lastds=%p depth %d",
854		    txq->axq_qnum,
855		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
856		    bf_first->bf_lastds,
857		    txq->axq_depth);
858	}
859
860	/*
861	 * Ensure that the bf TXQ matches this TXQ, so later
862	 * checking and holding buffer manipulation is sane.
863	 */
864	if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
865		DPRINTF(sc, ATH_DEBUG_XMIT,
866		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
867		    __func__, bf, bf->bf_state.bfs_tx_queue,
868		    txq->axq_qnum);
869	}
870
871	/*
872	 * Track aggregate queue depth.
873	 */
874	if (bf->bf_state.bfs_aggr)
875		txq->axq_aggr_depth++;
876
877	/*
878	 * Update the link pointer.
879	 */
880	ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
881
882	/*
883	 * Start DMA.
884	 *
885	 * If we wrote a TxDP above, DMA will start from here.
886	 *
887	 * If DMA is running, it'll do nothing.
888	 *
889	 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
890	 * or VEOL) then it stops at the last transmitted write.
891	 * We then append a new frame by updating the link pointer
892	 * in that descriptor and then kick TxE here; it will re-read
893	 * that last descriptor and find the new descriptor to transmit.
894	 *
895	 * This is why we keep the holding descriptor around.
896	 */
897	ath_hal_txstart(ah, txq->axq_qnum);
898	ATH_TXQ_UNLOCK(txq);
899	ATH_KTR(sc, ATH_KTR_TX, 1,
900	    "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
901}
902
903/*
904 * Restart TX DMA for the given TXQ.
905 *
906 * This must be called whether the queue is empty or not.
907 */
908static void
909ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
910{
911	struct ath_buf *bf, *bf_last;
912
913	ATH_TXQ_LOCK_ASSERT(txq);
914
915	/* XXX make this ATH_TXQ_FIRST */
916	bf = TAILQ_FIRST(&txq->axq_q);
917	bf_last = ATH_TXQ_LAST(txq, axq_q_s);
918
919	if (bf == NULL)
920		return;
921
922	DPRINTF(sc, ATH_DEBUG_RESET,
923	    "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
924	    __func__,
925	    txq->axq_qnum,
926	    bf,
927	    bf_last,
928	    (uint32_t) bf->bf_daddr);
929
930#ifdef	ATH_DEBUG
931	if (sc->sc_debug & ATH_DEBUG_RESET)
932		ath_tx_dump(sc, txq);
933#endif
934
935	/*
936	 * This is called from a restart, so DMA is known to be
937	 * completely stopped.
938	 */
939	KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
940	    ("%s: Q%d: called with PUTRUNNING=1\n",
941	    __func__,
942	    txq->axq_qnum));
943
944	ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
945	txq->axq_flags |= ATH_TXQ_PUTRUNNING;
946
947	ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
948	    &txq->axq_link);
949	ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
950}
951
952/*
953 * Hand off a packet to the hardware (or mcast queue.)
954 *
955 * The relevant hardware txq should be locked.
956 */
957static void
958ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
959    struct ath_buf *bf)
960{
961	ATH_TX_LOCK_ASSERT(sc);
962
963#ifdef	ATH_DEBUG_ALQ
964	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
965		ath_tx_alq_post(sc, bf);
966#endif
967
968	if (txq->axq_qnum == ATH_TXQ_SWQ)
969		ath_tx_handoff_mcast(sc, txq, bf);
970	else
971		ath_tx_handoff_hw(sc, txq, bf);
972}
973
974static int
975ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
976    struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
977    int *keyix)
978{
979	DPRINTF(sc, ATH_DEBUG_XMIT,
980	    "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
981	    __func__,
982	    *hdrlen,
983	    *pktlen,
984	    isfrag,
985	    iswep,
986	    m0);
987
988	if (iswep) {
989		const struct ieee80211_cipher *cip;
990		struct ieee80211_key *k;
991
992		/*
993		 * Construct the 802.11 header+trailer for an encrypted
994		 * frame. The only reason this can fail is because of an
995		 * unknown or unsupported cipher/key type.
996		 */
997		k = ieee80211_crypto_encap(ni, m0);
998		if (k == NULL) {
999			/*
1000			 * This can happen when the key is yanked after the
1001			 * frame was queued.  Just discard the frame; the
1002			 * 802.11 layer counts failures and provides
1003			 * debugging/diagnostics.
1004			 */
1005			return (0);
1006		}
1007		/*
1008		 * Adjust the packet + header lengths for the crypto
1009		 * additions and calculate the h/w key index.  When
1010		 * a s/w mic is done the frame will have had any mic
1011		 * added to it prior to entry so m0->m_pkthdr.len will
1012		 * account for it. Otherwise we need to add it to the
1013		 * packet length.
1014		 */
1015		cip = k->wk_cipher;
1016		(*hdrlen) += cip->ic_header;
1017		(*pktlen) += cip->ic_header + cip->ic_trailer;
1018		/* NB: frags always have any TKIP MIC done in s/w */
1019		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1020			(*pktlen) += cip->ic_miclen;
1021		(*keyix) = k->wk_keyix;
1022	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1023		/*
1024		 * Use station key cache slot, if assigned.
1025		 */
1026		(*keyix) = ni->ni_ucastkey.wk_keyix;
1027		if ((*keyix) == IEEE80211_KEYIX_NONE)
1028			(*keyix) = HAL_TXKEYIX_INVALID;
1029	} else
1030		(*keyix) = HAL_TXKEYIX_INVALID;
1031
1032	return (1);
1033}
1034
1035/*
1036 * Calculate whether interoperability protection is required for
1037 * this frame.
1038 *
1039 * This requires the rate control information be filled in,
1040 * as the protection requirement depends upon the current
1041 * operating mode / PHY.
1042 */
1043static void
1044ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1045{
1046	struct ieee80211_frame *wh;
1047	uint8_t rix;
1048	uint16_t flags;
1049	int shortPreamble;
1050	const HAL_RATE_TABLE *rt = sc->sc_currates;
1051	struct ieee80211com *ic = &sc->sc_ic;
1052
1053	flags = bf->bf_state.bfs_txflags;
1054	rix = bf->bf_state.bfs_rc[0].rix;
1055	shortPreamble = bf->bf_state.bfs_shpream;
1056	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1057
1058	/* Disable frame protection for TOA probe frames */
1059	if (bf->bf_flags & ATH_BUF_TOA_PROBE) {
1060		/* XXX count */
1061		flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA);
1062		bf->bf_state.bfs_doprot = 0;
1063		goto finish;
1064	}
1065
1066	/*
1067	 * If 802.11g protection is enabled, determine whether
1068	 * to use RTS/CTS or just CTS.  Note that this is only
1069	 * done for OFDM unicast frames.
1070	 */
1071	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1072	    rt->info[rix].phy == IEEE80211_T_OFDM &&
1073	    (flags & HAL_TXDESC_NOACK) == 0) {
1074		bf->bf_state.bfs_doprot = 1;
1075		/* XXX fragments must use CCK rates w/ protection */
1076		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1077			flags |= HAL_TXDESC_RTSENA;
1078		} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1079			flags |= HAL_TXDESC_CTSENA;
1080		}
1081		/*
1082		 * For frags it would be desirable to use the
1083		 * highest CCK rate for RTS/CTS.  But stations
1084		 * farther away may detect it at a lower CCK rate
1085		 * so use the configured protection rate instead
1086		 * (for now).
1087		 */
1088		sc->sc_stats.ast_tx_protect++;
1089	}
1090
1091	/*
1092	 * If 11n protection is enabled and it's a HT frame,
1093	 * enable RTS.
1094	 *
1095	 * XXX ic_htprotmode or ic_curhtprotmode?
1096	 * XXX should it_htprotmode only matter if ic_curhtprotmode
1097	 * XXX indicates it's not a HT pure environment?
1098	 */
1099	if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1100	    rt->info[rix].phy == IEEE80211_T_HT &&
1101	    (flags & HAL_TXDESC_NOACK) == 0) {
1102		flags |= HAL_TXDESC_RTSENA;
1103		sc->sc_stats.ast_tx_htprotect++;
1104	}
1105
1106finish:
1107	bf->bf_state.bfs_txflags = flags;
1108}
1109
1110/*
1111 * Update the frame duration given the currently selected rate.
1112 *
1113 * This also updates the frame duration value, so it will require
1114 * a DMA flush.
1115 */
1116static void
1117ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1118{
1119	struct ieee80211_frame *wh;
1120	uint8_t rix;
1121	uint16_t flags;
1122	int shortPreamble;
1123	struct ath_hal *ah = sc->sc_ah;
1124	const HAL_RATE_TABLE *rt = sc->sc_currates;
1125	int isfrag = bf->bf_m->m_flags & M_FRAG;
1126
1127	flags = bf->bf_state.bfs_txflags;
1128	rix = bf->bf_state.bfs_rc[0].rix;
1129	shortPreamble = bf->bf_state.bfs_shpream;
1130	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1131
1132	/*
1133	 * Calculate duration.  This logically belongs in the 802.11
1134	 * layer but it lacks sufficient information to calculate it.
1135	 */
1136	if ((flags & HAL_TXDESC_NOACK) == 0 &&
1137	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1138		u_int16_t dur;
1139		if (shortPreamble)
1140			dur = rt->info[rix].spAckDuration;
1141		else
1142			dur = rt->info[rix].lpAckDuration;
1143		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1144			dur += dur;		/* additional SIFS+ACK */
1145			/*
1146			 * Include the size of next fragment so NAV is
1147			 * updated properly.  The last fragment uses only
1148			 * the ACK duration
1149			 *
1150			 * XXX TODO: ensure that the rate lookup for each
1151			 * fragment is the same as the rate used by the
1152			 * first fragment!
1153			 */
1154			dur += ath_hal_computetxtime(ah,
1155			    rt,
1156			    bf->bf_nextfraglen,
1157			    rix, shortPreamble,
1158			    AH_TRUE);
1159		}
1160		if (isfrag) {
1161			/*
1162			 * Force hardware to use computed duration for next
1163			 * fragment by disabling multi-rate retry which updates
1164			 * duration based on the multi-rate duration table.
1165			 */
1166			bf->bf_state.bfs_ismrr = 0;
1167			bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1168			/* XXX update bfs_rc[0].try? */
1169		}
1170
1171		/* Update the duration field itself */
1172		*(u_int16_t *)wh->i_dur = htole16(dur);
1173	}
1174}
1175
1176static uint8_t
1177ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1178    int cix, int shortPreamble)
1179{
1180	uint8_t ctsrate;
1181
1182	/*
1183	 * CTS transmit rate is derived from the transmit rate
1184	 * by looking in the h/w rate table.  We must also factor
1185	 * in whether or not a short preamble is to be used.
1186	 */
1187	/* NB: cix is set above where RTS/CTS is enabled */
1188	KASSERT(cix != 0xff, ("cix not setup"));
1189	ctsrate = rt->info[cix].rateCode;
1190
1191	/* XXX this should only matter for legacy rates */
1192	if (shortPreamble)
1193		ctsrate |= rt->info[cix].shortPreamble;
1194
1195	return (ctsrate);
1196}
1197
1198/*
1199 * Calculate the RTS/CTS duration for legacy frames.
1200 */
1201static int
1202ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1203    int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1204    int flags)
1205{
1206	int ctsduration = 0;
1207
1208	/* This mustn't be called for HT modes */
1209	if (rt->info[cix].phy == IEEE80211_T_HT) {
1210		printf("%s: HT rate where it shouldn't be (0x%x)\n",
1211		    __func__, rt->info[cix].rateCode);
1212		return (-1);
1213	}
1214
1215	/*
1216	 * Compute the transmit duration based on the frame
1217	 * size and the size of an ACK frame.  We call into the
1218	 * HAL to do the computation since it depends on the
1219	 * characteristics of the actual PHY being used.
1220	 *
1221	 * NB: CTS is assumed the same size as an ACK so we can
1222	 *     use the precalculated ACK durations.
1223	 */
1224	if (shortPreamble) {
1225		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1226			ctsduration += rt->info[cix].spAckDuration;
1227		ctsduration += ath_hal_computetxtime(ah,
1228			rt, pktlen, rix, AH_TRUE, AH_TRUE);
1229		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1230			ctsduration += rt->info[rix].spAckDuration;
1231	} else {
1232		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1233			ctsduration += rt->info[cix].lpAckDuration;
1234		ctsduration += ath_hal_computetxtime(ah,
1235			rt, pktlen, rix, AH_FALSE, AH_TRUE);
1236		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1237			ctsduration += rt->info[rix].lpAckDuration;
1238	}
1239
1240	return (ctsduration);
1241}
1242
1243/*
1244 * Update the given ath_buf with updated rts/cts setup and duration
1245 * values.
1246 *
1247 * To support rate lookups for each software retry, the rts/cts rate
1248 * and cts duration must be re-calculated.
1249 *
1250 * This function assumes the RTS/CTS flags have been set as needed;
1251 * mrr has been disabled; and the rate control lookup has been done.
1252 *
1253 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1254 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1255 */
1256static void
1257ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1258{
1259	uint16_t ctsduration = 0;
1260	uint8_t ctsrate = 0;
1261	uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1262	uint8_t cix = 0;
1263	const HAL_RATE_TABLE *rt = sc->sc_currates;
1264
1265	/*
1266	 * No RTS/CTS enabled? Don't bother.
1267	 */
1268	if ((bf->bf_state.bfs_txflags &
1269	    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1270		/* XXX is this really needed? */
1271		bf->bf_state.bfs_ctsrate = 0;
1272		bf->bf_state.bfs_ctsduration = 0;
1273		return;
1274	}
1275
1276	/*
1277	 * If protection is enabled, use the protection rix control
1278	 * rate. Otherwise use the rate0 control rate.
1279	 */
1280	if (bf->bf_state.bfs_doprot)
1281		rix = sc->sc_protrix;
1282	else
1283		rix = bf->bf_state.bfs_rc[0].rix;
1284
1285	/*
1286	 * If the raw path has hard-coded ctsrate0 to something,
1287	 * use it.
1288	 */
1289	if (bf->bf_state.bfs_ctsrate0 != 0)
1290		cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1291	else
1292		/* Control rate from above */
1293		cix = rt->info[rix].controlRate;
1294
1295	/* Calculate the rtscts rate for the given cix */
1296	ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1297	    bf->bf_state.bfs_shpream);
1298
1299	/* The 11n chipsets do ctsduration calculations for you */
1300	if (! ath_tx_is_11n(sc))
1301		ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1302		    bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1303		    rt, bf->bf_state.bfs_txflags);
1304
1305	/* Squirrel away in ath_buf */
1306	bf->bf_state.bfs_ctsrate = ctsrate;
1307	bf->bf_state.bfs_ctsduration = ctsduration;
1308
1309	/*
1310	 * Must disable multi-rate retry when using RTS/CTS.
1311	 */
1312	if (!sc->sc_mrrprot) {
1313		bf->bf_state.bfs_ismrr = 0;
1314		bf->bf_state.bfs_try0 =
1315		    bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1316	}
1317}
1318
1319/*
1320 * Setup the descriptor chain for a normal or fast-frame
1321 * frame.
1322 *
1323 * XXX TODO: extend to include the destination hardware QCU ID.
1324 * Make sure that is correct.  Make sure that when being added
1325 * to the mcastq, the CABQ QCUID is set or things will get a bit
1326 * odd.
1327 */
1328static void
1329ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1330{
1331	struct ath_desc *ds = bf->bf_desc;
1332	struct ath_hal *ah = sc->sc_ah;
1333
1334	if (bf->bf_state.bfs_txrate0 == 0)
1335		DPRINTF(sc, ATH_DEBUG_XMIT,
1336		    "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1337
1338	ath_hal_setuptxdesc(ah, ds
1339		, bf->bf_state.bfs_pktlen	/* packet length */
1340		, bf->bf_state.bfs_hdrlen	/* header length */
1341		, bf->bf_state.bfs_atype	/* Atheros packet type */
1342		, bf->bf_state.bfs_txpower	/* txpower */
1343		, bf->bf_state.bfs_txrate0
1344		, bf->bf_state.bfs_try0		/* series 0 rate/tries */
1345		, bf->bf_state.bfs_keyix	/* key cache index */
1346		, bf->bf_state.bfs_txantenna	/* antenna mode */
1347		, bf->bf_state.bfs_txflags	/* flags */
1348		, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
1349		, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
1350	);
1351
1352	/*
1353	 * This will be overridden when the descriptor chain is written.
1354	 */
1355	bf->bf_lastds = ds;
1356	bf->bf_last = bf;
1357
1358	/* Set rate control and descriptor chain for this frame */
1359	ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1360	ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1361}
1362
1363/*
1364 * Do a rate lookup.
1365 *
1366 * This performs a rate lookup for the given ath_buf only if it's required.
1367 * Non-data frames and raw frames don't require it.
1368 *
1369 * This populates the primary and MRR entries; MRR values are
1370 * then disabled later on if something requires it (eg RTS/CTS on
1371 * pre-11n chipsets.
1372 *
1373 * This needs to be done before the RTS/CTS fields are calculated
1374 * as they may depend upon the rate chosen.
1375 */
1376static void
1377ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid,
1378    int pktlen, int is_aggr)
1379{
1380	uint8_t rate, rix;
1381	int try0;
1382	int maxdur; // Note: Unused for now
1383	int maxpktlen;
1384
1385	if (! bf->bf_state.bfs_doratelookup)
1386		return;
1387
1388	/* Get rid of any previous state */
1389	bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1390
1391	ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1392	ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1393	    pktlen, tid, is_aggr, &rix, &try0, &rate, &maxdur, &maxpktlen);
1394
1395	/* In case MRR is disabled, make sure rc[0] is setup correctly */
1396	bf->bf_state.bfs_rc[0].rix = rix;
1397	bf->bf_state.bfs_rc[0].ratecode = rate;
1398	bf->bf_state.bfs_rc[0].tries = try0;
1399
1400	if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1401		ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1402		    is_aggr, bf->bf_state.bfs_rc);
1403	ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1404
1405	sc->sc_txrix = rix;	/* for LED blinking */
1406	sc->sc_lastdatarix = rix;	/* for fast frames */
1407	bf->bf_state.bfs_try0 = try0;
1408	bf->bf_state.bfs_txrate0 = rate;
1409	bf->bf_state.bfs_rc_maxpktlen = maxpktlen;
1410}
1411
1412/*
1413 * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1414 */
1415static void
1416ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1417    struct ath_buf *bf)
1418{
1419	struct ath_node *an = ATH_NODE(bf->bf_node);
1420
1421	ATH_TX_LOCK_ASSERT(sc);
1422
1423	if (an->clrdmask == 1) {
1424		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1425		an->clrdmask = 0;
1426	}
1427}
1428
1429/*
1430 * Return whether this frame should be software queued or
1431 * direct dispatched.
1432 *
1433 * When doing powersave, BAR frames should be queued but other management
1434 * frames should be directly sent.
1435 *
1436 * When not doing powersave, stick BAR frames into the hardware queue
1437 * so it goes out even though the queue is paused.
1438 *
1439 * For now, management frames are also software queued by default.
1440 */
1441static int
1442ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1443    struct mbuf *m0, int *queue_to_head)
1444{
1445	struct ieee80211_node *ni = &an->an_node;
1446	struct ieee80211_frame *wh;
1447	uint8_t type, subtype;
1448
1449	wh = mtod(m0, struct ieee80211_frame *);
1450	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1451	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1452
1453	(*queue_to_head) = 0;
1454
1455	/* If it's not in powersave - direct-dispatch BAR */
1456	if ((ATH_NODE(ni)->an_is_powersave == 0)
1457	    && type == IEEE80211_FC0_TYPE_CTL &&
1458	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1459		DPRINTF(sc, ATH_DEBUG_SW_TX,
1460		    "%s: BAR: TX'ing direct\n", __func__);
1461		return (0);
1462	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1463	    && type == IEEE80211_FC0_TYPE_CTL &&
1464	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1465		/* BAR TX whilst asleep; queue */
1466		DPRINTF(sc, ATH_DEBUG_SW_TX,
1467		    "%s: swq: TX'ing\n", __func__);
1468		(*queue_to_head) = 1;
1469		return (1);
1470	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1471	    && (type == IEEE80211_FC0_TYPE_MGT ||
1472	        type == IEEE80211_FC0_TYPE_CTL)) {
1473		/*
1474		 * Other control/mgmt frame; bypass software queuing
1475		 * for now!
1476		 */
1477		DPRINTF(sc, ATH_DEBUG_XMIT,
1478		    "%s: %6D: Node is asleep; sending mgmt "
1479		    "(type=%d, subtype=%d)\n",
1480		    __func__, ni->ni_macaddr, ":", type, subtype);
1481		return (0);
1482	} else {
1483		return (1);
1484	}
1485}
1486
1487/*
1488 * Transmit the given frame to the hardware.
1489 *
1490 * The frame must already be setup; rate control must already have
1491 * been done.
1492 *
1493 * XXX since the TXQ lock is being held here (and I dislike holding
1494 * it for this long when not doing software aggregation), later on
1495 * break this function into "setup_normal" and "xmit_normal". The
1496 * lock only needs to be held for the ath_tx_handoff call.
1497 *
1498 * XXX we don't update the leak count here - if we're doing
1499 * direct frame dispatch, we need to be able to do it without
1500 * decrementing the leak count (eg multicast queue frames.)
1501 */
1502static void
1503ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1504    struct ath_buf *bf)
1505{
1506	struct ath_node *an = ATH_NODE(bf->bf_node);
1507	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1508
1509	ATH_TX_LOCK_ASSERT(sc);
1510
1511	/*
1512	 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1513	 * set a completion handler however it doesn't (yet) properly
1514	 * handle the strict ordering requirements needed for normal,
1515	 * non-aggregate session frames.
1516	 *
1517	 * Once this is implemented, only set CLRDMASK like this for
1518	 * frames that must go out - eg management/raw frames.
1519	 */
1520	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1521
1522	/* Setup the descriptor before handoff */
1523	ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false);
1524	ath_tx_calc_duration(sc, bf);
1525	ath_tx_calc_protection(sc, bf);
1526	ath_tx_set_rtscts(sc, bf);
1527	ath_tx_rate_fill_rcflags(sc, bf);
1528	ath_tx_setds(sc, bf);
1529
1530	/* Track per-TID hardware queue depth correctly */
1531	tid->hwq_depth++;
1532
1533	/* Assign the completion handler */
1534	bf->bf_comp = ath_tx_normal_comp;
1535
1536	/* Hand off to hardware */
1537	ath_tx_handoff(sc, txq, bf);
1538}
1539
1540/*
1541 * Do the basic frame setup stuff that's required before the frame
1542 * is added to a software queue.
1543 *
1544 * All frames get mostly the same treatment and it's done once.
1545 * Retransmits fiddle with things like the rate control setup,
1546 * setting the retransmit bit in the packet; doing relevant DMA/bus
1547 * syncing and relinking it (back) into the hardware TX queue.
1548 *
1549 * Note that this may cause the mbuf to be reallocated, so
1550 * m0 may not be valid.
1551 */
1552static int
1553ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1554    struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1555{
1556	struct ieee80211vap *vap = ni->ni_vap;
1557	struct ieee80211com *ic = &sc->sc_ic;
1558	int error, iswep, ismcast, isfrag, ismrr;
1559	int keyix, hdrlen, pktlen, try0 = 0;
1560	u_int8_t rix = 0, txrate = 0;
1561	struct ath_desc *ds;
1562	struct ieee80211_frame *wh;
1563	u_int subtype, flags;
1564	HAL_PKT_TYPE atype;
1565	const HAL_RATE_TABLE *rt;
1566	HAL_BOOL shortPreamble;
1567	struct ath_node *an;
1568
1569	/* XXX TODO: this pri is only used for non-QoS check, right? */
1570	u_int pri;
1571
1572	/*
1573	 * To ensure that both sequence numbers and the CCMP PN handling
1574	 * is "correct", make sure that the relevant TID queue is locked.
1575	 * Otherwise the CCMP PN and seqno may appear out of order, causing
1576	 * re-ordered frames to have out of order CCMP PN's, resulting
1577	 * in many, many frame drops.
1578	 */
1579	ATH_TX_LOCK_ASSERT(sc);
1580
1581	wh = mtod(m0, struct ieee80211_frame *);
1582	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1583	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1584	isfrag = m0->m_flags & M_FRAG;
1585	hdrlen = ieee80211_anyhdrsize(wh);
1586	/*
1587	 * Packet length must not include any
1588	 * pad bytes; deduct them here.
1589	 */
1590	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1591
1592	/* Handle encryption twiddling if needed */
1593	if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1594	    &pktlen, &keyix)) {
1595		ieee80211_free_mbuf(m0);
1596		return EIO;
1597	}
1598
1599	/* packet header may have moved, reset our local pointer */
1600	wh = mtod(m0, struct ieee80211_frame *);
1601
1602	pktlen += IEEE80211_CRC_LEN;
1603
1604	/*
1605	 * Load the DMA map so any coalescing is done.  This
1606	 * also calculates the number of descriptors we need.
1607	 */
1608	error = ath_tx_dmasetup(sc, bf, m0);
1609	if (error != 0)
1610		return error;
1611	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1612	bf->bf_node = ni;			/* NB: held reference */
1613	m0 = bf->bf_m;				/* NB: may have changed */
1614	wh = mtod(m0, struct ieee80211_frame *);
1615
1616	/* setup descriptors */
1617	ds = bf->bf_desc;
1618	rt = sc->sc_currates;
1619	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1620
1621	/*
1622	 * NB: the 802.11 layer marks whether or not we should
1623	 * use short preamble based on the current mode and
1624	 * negotiated parameters.
1625	 */
1626	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1627	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1628		shortPreamble = AH_TRUE;
1629		sc->sc_stats.ast_tx_shortpre++;
1630	} else {
1631		shortPreamble = AH_FALSE;
1632	}
1633
1634	an = ATH_NODE(ni);
1635	//flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
1636	flags = 0;
1637	ismrr = 0;				/* default no multi-rate retry*/
1638
1639	pri = ath_tx_getac(sc, m0);			/* honor classification */
1640	/* XXX use txparams instead of fixed values */
1641	/*
1642	 * Calculate Atheros packet type from IEEE80211 packet header,
1643	 * setup for rate calculations, and select h/w transmit queue.
1644	 */
1645	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1646	case IEEE80211_FC0_TYPE_MGT:
1647		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1648		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1649			atype = HAL_PKT_TYPE_BEACON;
1650		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1651			atype = HAL_PKT_TYPE_PROBE_RESP;
1652		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1653			atype = HAL_PKT_TYPE_ATIM;
1654		else
1655			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
1656		rix = an->an_mgmtrix;
1657		txrate = rt->info[rix].rateCode;
1658		if (shortPreamble)
1659			txrate |= rt->info[rix].shortPreamble;
1660		try0 = ATH_TXMGTTRY;
1661		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1662		break;
1663	case IEEE80211_FC0_TYPE_CTL:
1664		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
1665		rix = an->an_mgmtrix;
1666		txrate = rt->info[rix].rateCode;
1667		if (shortPreamble)
1668			txrate |= rt->info[rix].shortPreamble;
1669		try0 = ATH_TXMGTTRY;
1670		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1671		break;
1672	case IEEE80211_FC0_TYPE_DATA:
1673		atype = HAL_PKT_TYPE_NORMAL;		/* default */
1674		/*
1675		 * Data frames: multicast frames go out at a fixed rate,
1676		 * EAPOL frames use the mgmt frame rate; otherwise consult
1677		 * the rate control module for the rate to use.
1678		 */
1679		if (ismcast) {
1680			rix = an->an_mcastrix;
1681			txrate = rt->info[rix].rateCode;
1682			if (shortPreamble)
1683				txrate |= rt->info[rix].shortPreamble;
1684			try0 = 1;
1685		} else if (m0->m_flags & M_EAPOL) {
1686			/* XXX? maybe always use long preamble? */
1687			rix = an->an_mgmtrix;
1688			txrate = rt->info[rix].rateCode;
1689			if (shortPreamble)
1690				txrate |= rt->info[rix].shortPreamble;
1691			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
1692		} else {
1693			/*
1694			 * Do rate lookup on each TX, rather than using
1695			 * the hard-coded TX information decided here.
1696			 */
1697			ismrr = 1;
1698			bf->bf_state.bfs_doratelookup = 1;
1699		}
1700
1701		/*
1702		 * Check whether to set NOACK for this WME category or not.
1703		 */
1704		if (ieee80211_wme_vap_ac_is_noack(vap, pri))
1705			flags |= HAL_TXDESC_NOACK;
1706		break;
1707	default:
1708		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1709		    wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1710		/* XXX statistic */
1711		/* XXX free tx dmamap */
1712		ieee80211_free_mbuf(m0);
1713		return EIO;
1714	}
1715
1716	/*
1717	 * There are two known scenarios where the frame AC doesn't match
1718	 * what the destination TXQ is.
1719	 *
1720	 * + non-QoS frames (eg management?) that the net80211 stack has
1721	 *   assigned a higher AC to, but since it's a non-QoS TID, it's
1722	 *   being thrown into TID 16.  TID 16 gets the AC_BE queue.
1723	 *   It's quite possible that management frames should just be
1724	 *   direct dispatched to hardware rather than go via the software
1725	 *   queue; that should be investigated in the future.  There are
1726	 *   some specific scenarios where this doesn't make sense, mostly
1727	 *   surrounding ADDBA request/response - hence why that is special
1728	 *   cased.
1729	 *
1730	 * + Multicast frames going into the VAP mcast queue.  That shows up
1731	 *   as "TXQ 11".
1732	 *
1733	 * This driver should eventually support separate TID and TXQ locking,
1734	 * allowing for arbitrary AC frames to appear on arbitrary software
1735	 * queues, being queued to the "correct" hardware queue when needed.
1736	 */
1737#if 0
1738	if (txq != sc->sc_ac2q[pri]) {
1739		DPRINTF(sc, ATH_DEBUG_XMIT,
1740		    "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1741		    __func__,
1742		    txq,
1743		    txq->axq_qnum,
1744		    pri,
1745		    sc->sc_ac2q[pri],
1746		    sc->sc_ac2q[pri]->axq_qnum);
1747	}
1748#endif
1749
1750	/*
1751	 * Calculate miscellaneous flags.
1752	 */
1753	if (ismcast) {
1754		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
1755	} else if (pktlen > vap->iv_rtsthreshold &&
1756	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1757		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
1758		sc->sc_stats.ast_tx_rts++;
1759	}
1760	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
1761		sc->sc_stats.ast_tx_noack++;
1762#ifdef IEEE80211_SUPPORT_TDMA
1763	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1764		DPRINTF(sc, ATH_DEBUG_TDMA,
1765		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
1766		sc->sc_stats.ast_tdma_ack++;
1767		/* XXX free tx dmamap */
1768		ieee80211_free_mbuf(m0);
1769		return EIO;
1770	}
1771#endif
1772
1773	/*
1774	 * If it's a frame to do location reporting on,
1775	 * communicate it to the HAL.
1776	 */
1777	if (ieee80211_get_toa_params(m0, NULL)) {
1778		device_printf(sc->sc_dev,
1779		    "%s: setting TX positioning bit\n", __func__);
1780		flags |= HAL_TXDESC_POS;
1781
1782		/*
1783		 * Note: The hardware reports timestamps for
1784		 * each of the RX'ed packets as part of the packet
1785		 * exchange.  So this means things like RTS/CTS
1786		 * exchanges, as well as the final ACK.
1787		 *
1788		 * So, if you send a RTS-protected NULL data frame,
1789		 * you'll get an RX report for the RTS response, then
1790		 * an RX report for the NULL frame, and then the TX
1791		 * completion at the end.
1792		 *
1793		 * NOTE: it doesn't work right for CCK frames;
1794		 * there's no channel info data provided unless
1795		 * it's OFDM or HT.  Will have to dig into it.
1796		 */
1797		flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);
1798		bf->bf_flags |= ATH_BUF_TOA_PROBE;
1799	}
1800
1801#if 0
1802	/*
1803	 * Placeholder: if you want to transmit with the azimuth
1804	 * timestamp in the end of the payload, here's where you
1805	 * should set the TXDESC field.
1806	 */
1807	flags |= HAL_TXDESC_HWTS;
1808#endif
1809
1810	/*
1811	 * Determine if a tx interrupt should be generated for
1812	 * this descriptor.  We take a tx interrupt to reap
1813	 * descriptors when the h/w hits an EOL condition or
1814	 * when the descriptor is specifically marked to generate
1815	 * an interrupt.  We periodically mark descriptors in this
1816	 * way to insure timely replenishing of the supply needed
1817	 * for sending frames.  Defering interrupts reduces system
1818	 * load and potentially allows more concurrent work to be
1819	 * done but if done to aggressively can cause senders to
1820	 * backup.
1821	 *
1822	 * NB: use >= to deal with sc_txintrperiod changing
1823	 *     dynamically through sysctl.
1824	 */
1825	if (flags & HAL_TXDESC_INTREQ) {
1826		txq->axq_intrcnt = 0;
1827	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1828		flags |= HAL_TXDESC_INTREQ;
1829		txq->axq_intrcnt = 0;
1830	}
1831
1832	/* This point forward is actual TX bits */
1833
1834	/*
1835	 * At this point we are committed to sending the frame
1836	 * and we don't need to look at m_nextpkt; clear it in
1837	 * case this frame is part of frag chain.
1838	 */
1839	m0->m_nextpkt = NULL;
1840
1841	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1842		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1843		    sc->sc_hwmap[rix].ieeerate, -1);
1844
1845	if (ieee80211_radiotap_active_vap(vap)) {
1846		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1847		if (iswep)
1848			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1849		if (isfrag)
1850			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1851		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1852		sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1853		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1854
1855		ieee80211_radiotap_tx(vap, m0);
1856	}
1857
1858	/* Blank the legacy rate array */
1859	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1860
1861	/*
1862	 * ath_buf_set_rate needs at least one rate/try to setup
1863	 * the rate scenario.
1864	 */
1865	bf->bf_state.bfs_rc[0].rix = rix;
1866	bf->bf_state.bfs_rc[0].tries = try0;
1867	bf->bf_state.bfs_rc[0].ratecode = txrate;
1868
1869	/* Store the decided rate index values away */
1870	bf->bf_state.bfs_pktlen = pktlen;
1871	bf->bf_state.bfs_hdrlen = hdrlen;
1872	bf->bf_state.bfs_atype = atype;
1873	bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1874	bf->bf_state.bfs_txrate0 = txrate;
1875	bf->bf_state.bfs_try0 = try0;
1876	bf->bf_state.bfs_keyix = keyix;
1877	bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1878	bf->bf_state.bfs_txflags = flags;
1879	bf->bf_state.bfs_shpream = shortPreamble;
1880
1881	/* XXX this should be done in ath_tx_setrate() */
1882	bf->bf_state.bfs_ctsrate0 = 0;	/* ie, no hard-coded ctsrate */
1883	bf->bf_state.bfs_ctsrate = 0;	/* calculated later */
1884	bf->bf_state.bfs_ctsduration = 0;
1885	bf->bf_state.bfs_ismrr = ismrr;
1886
1887	return 0;
1888}
1889
1890/*
1891 * Queue a frame to the hardware or software queue.
1892 *
1893 * This can be called by the net80211 code.
1894 *
1895 * XXX what about locking? Or, push the seqno assign into the
1896 * XXX aggregate scheduler so its serialised?
1897 *
1898 * XXX When sending management frames via ath_raw_xmit(),
1899 *     should CLRDMASK be set unconditionally?
1900 */
1901int
1902ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1903    struct ath_buf *bf, struct mbuf *m0)
1904{
1905	struct ieee80211vap *vap = ni->ni_vap;
1906	struct ath_vap *avp = ATH_VAP(vap);
1907	int r = 0;
1908	u_int pri;
1909	int tid;
1910	struct ath_txq *txq;
1911	int ismcast;
1912	const struct ieee80211_frame *wh;
1913	int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1914	ieee80211_seq seqno;
1915	uint8_t type, subtype;
1916	int queue_to_head;
1917
1918	ATH_TX_LOCK_ASSERT(sc);
1919
1920	/*
1921	 * Determine the target hardware queue.
1922	 *
1923	 * For multicast frames, the txq gets overridden appropriately
1924	 * depending upon the state of PS.  If powersave is enabled
1925	 * then they get added to the cabq for later transmit.
1926	 *
1927	 * The "fun" issue here is that group addressed frames should
1928	 * have the sequence number from a different pool, rather than
1929	 * the per-TID pool.  That means that even QoS group addressed
1930	 * frames will have a sequence number from that global value,
1931	 * which means if we transmit different group addressed frames
1932	 * at different traffic priorities, the sequence numbers will
1933	 * all be out of whack.  So - chances are, the right thing
1934	 * to do here is to always put group addressed frames into the BE
1935	 * queue, and ignore the TID for queue selection.
1936	 *
1937	 * For any other frame, we do a TID/QoS lookup inside the frame
1938	 * to see what the TID should be. If it's a non-QoS frame, the
1939	 * AC and TID are overridden. The TID/TXQ code assumes the
1940	 * TID is on a predictable hardware TXQ, so we don't support
1941	 * having a node TID queued to multiple hardware TXQs.
1942	 * This may change in the future but would require some locking
1943	 * fudgery.
1944	 */
1945	pri = ath_tx_getac(sc, m0);
1946	tid = ath_tx_gettid(sc, m0);
1947
1948	txq = sc->sc_ac2q[pri];
1949	wh = mtod(m0, struct ieee80211_frame *);
1950	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1951	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1952	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1953
1954	/*
1955	 * Enforce how deep the multicast queue can grow.
1956	 *
1957	 * XXX duplicated in ath_raw_xmit().
1958	 */
1959	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1960		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1961		    > sc->sc_txq_mcastq_maxdepth) {
1962			sc->sc_stats.ast_tx_mcastq_overflow++;
1963			m_freem(m0);
1964			return (ENOBUFS);
1965		}
1966	}
1967
1968	/*
1969	 * Enforce how deep the unicast queue can grow.
1970	 *
1971	 * If the node is in power save then we don't want
1972	 * the software queue to grow too deep, or a node may
1973	 * end up consuming all of the ath_buf entries.
1974	 *
1975	 * For now, only do this for DATA frames.
1976	 *
1977	 * We will want to cap how many management/control
1978	 * frames get punted to the software queue so it doesn't
1979	 * fill up.  But the correct solution isn't yet obvious.
1980	 * In any case, this check should at least let frames pass
1981	 * that we are direct-dispatching.
1982	 *
1983	 * XXX TODO: duplicate this to the raw xmit path!
1984	 */
1985	if (type == IEEE80211_FC0_TYPE_DATA &&
1986	    ATH_NODE(ni)->an_is_powersave &&
1987	    ATH_NODE(ni)->an_swq_depth >
1988	     sc->sc_txq_node_psq_maxdepth) {
1989		sc->sc_stats.ast_tx_node_psq_overflow++;
1990		m_freem(m0);
1991		return (ENOBUFS);
1992	}
1993
1994	/* A-MPDU TX */
1995	is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1996	is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1997	is_ampdu = is_ampdu_tx | is_ampdu_pending;
1998
1999	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
2000	    __func__, tid, pri, is_ampdu);
2001
2002	/* Set local packet state, used to queue packets to hardware */
2003	bf->bf_state.bfs_tid = tid;
2004	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
2005	bf->bf_state.bfs_pri = pri;
2006
2007#if 1
2008	/*
2009	 * When servicing one or more stations in power-save mode
2010	 * (or) if there is some mcast data waiting on the mcast
2011	 * queue (to prevent out of order delivery) multicast frames
2012	 * must be bufferd until after the beacon.
2013	 *
2014	 * TODO: we should lock the mcastq before we check the length.
2015	 */
2016	if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
2017		txq = &avp->av_mcastq;
2018		/*
2019		 * Mark the frame as eventually belonging on the CAB
2020		 * queue, so the descriptor setup functions will
2021		 * correctly initialise the descriptor 'qcuId' field.
2022		 */
2023		bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
2024	}
2025#endif
2026
2027	/* Do the generic frame setup */
2028	/* XXX should just bzero the bf_state? */
2029	bf->bf_state.bfs_dobaw = 0;
2030
2031	/* A-MPDU TX? Manually set sequence number */
2032	/*
2033	 * Don't do it whilst pending; the net80211 layer still
2034	 * assigns them.
2035	 *
2036	 * Don't assign A-MPDU sequence numbers to group address
2037	 * frames; they come from a different sequence number space.
2038	 */
2039	if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) {
2040		/*
2041		 * Always call; this function will
2042		 * handle making sure that null data frames
2043		 * and group-addressed frames don't get a sequence number
2044		 * from the current TID and thus mess with the BAW.
2045		 */
2046		seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
2047
2048		/*
2049		 * Don't add QoS NULL frames and group-addressed frames
2050		 * to the BAW.
2051		 */
2052		if (IEEE80211_QOS_HAS_SEQ(wh) &&
2053		    (! IEEE80211_IS_MULTICAST(wh->i_addr1)) &&
2054		    (subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) {
2055			bf->bf_state.bfs_dobaw = 1;
2056		}
2057	}
2058
2059	/*
2060	 * If needed, the sequence number has been assigned.
2061	 * Squirrel it away somewhere easy to get to.
2062	 */
2063	bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2064
2065	/* Is ampdu pending? fetch the seqno and print it out */
2066	if (is_ampdu_pending)
2067		DPRINTF(sc, ATH_DEBUG_SW_TX,
2068		    "%s: tid %d: ampdu pending, seqno %d\n",
2069		    __func__, tid, M_SEQNO_GET(m0));
2070
2071	/* This also sets up the DMA map; crypto; frame parameters, etc */
2072	r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2073
2074	if (r != 0)
2075		goto done;
2076
2077	/* At this point m0 could have changed! */
2078	m0 = bf->bf_m;
2079
2080#if 1
2081	/*
2082	 * If it's a multicast frame, do a direct-dispatch to the
2083	 * destination hardware queue. Don't bother software
2084	 * queuing it.
2085	 */
2086	/*
2087	 * If it's a BAR frame, do a direct dispatch to the
2088	 * destination hardware queue. Don't bother software
2089	 * queuing it, as the TID will now be paused.
2090	 * Sending a BAR frame can occur from the net80211 txa timer
2091	 * (ie, retries) or from the ath txtask (completion call.)
2092	 * It queues directly to hardware because the TID is paused
2093	 * at this point (and won't be unpaused until the BAR has
2094	 * either been TXed successfully or max retries has been
2095	 * reached.)
2096	 */
2097	/*
2098	 * Until things are better debugged - if this node is asleep
2099	 * and we're sending it a non-BAR frame, direct dispatch it.
2100	 * Why? Because we need to figure out what's actually being
2101	 * sent - eg, during reassociation/reauthentication after
2102	 * the node (last) disappeared whilst asleep, the driver should
2103	 * have unpaused/unsleep'ed the node.  So until that is
2104	 * sorted out, use this workaround.
2105	 */
2106	if (txq == &avp->av_mcastq) {
2107		DPRINTF(sc, ATH_DEBUG_SW_TX,
2108		    "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2109		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2110		ath_tx_xmit_normal(sc, txq, bf);
2111	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2112	    &queue_to_head)) {
2113		ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2114	} else {
2115		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2116		ath_tx_xmit_normal(sc, txq, bf);
2117	}
2118#else
2119	/*
2120	 * For now, since there's no software queue,
2121	 * direct-dispatch to the hardware.
2122	 */
2123	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2124	/*
2125	 * Update the current leak count if
2126	 * we're leaking frames; and set the
2127	 * MORE flag as appropriate.
2128	 */
2129	ath_tx_leak_count_update(sc, tid, bf);
2130	ath_tx_xmit_normal(sc, txq, bf);
2131#endif
2132done:
2133	return 0;
2134}
2135
2136static int
2137ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2138	struct ath_buf *bf, struct mbuf *m0,
2139	const struct ieee80211_bpf_params *params)
2140{
2141	struct ieee80211com *ic = &sc->sc_ic;
2142	struct ieee80211vap *vap = ni->ni_vap;
2143	int error, ismcast, ismrr;
2144	int keyix, hdrlen, pktlen, try0, txantenna;
2145	u_int8_t rix, txrate;
2146	struct ieee80211_frame *wh;
2147	u_int flags;
2148	HAL_PKT_TYPE atype;
2149	const HAL_RATE_TABLE *rt;
2150	struct ath_desc *ds;
2151	u_int pri;
2152	int o_tid = -1;
2153	int do_override;
2154	uint8_t type, subtype;
2155	int queue_to_head;
2156	struct ath_node *an = ATH_NODE(ni);
2157
2158	ATH_TX_LOCK_ASSERT(sc);
2159
2160	wh = mtod(m0, struct ieee80211_frame *);
2161	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2162	hdrlen = ieee80211_anyhdrsize(wh);
2163	/*
2164	 * Packet length must not include any
2165	 * pad bytes; deduct them here.
2166	 */
2167	/* XXX honor IEEE80211_BPF_DATAPAD */
2168	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2169
2170	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2171	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2172
2173	ATH_KTR(sc, ATH_KTR_TX, 2,
2174	     "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2175
2176	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2177	    __func__, ismcast);
2178
2179	pri = params->ibp_pri & 3;
2180	/* Override pri if the frame isn't a QoS one */
2181	if (! IEEE80211_QOS_HAS_SEQ(wh))
2182		pri = ath_tx_getac(sc, m0);
2183
2184	/* XXX If it's an ADDBA, override the correct queue */
2185	do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2186
2187	/* Map ADDBA to the correct priority */
2188	if (do_override) {
2189#if 1
2190		DPRINTF(sc, ATH_DEBUG_XMIT,
2191		    "%s: overriding tid %d pri %d -> %d\n",
2192		    __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2193#endif
2194		pri = TID_TO_WME_AC(o_tid);
2195	}
2196
2197	/*
2198	 * "pri" is the hardware queue to transmit on.
2199	 *
2200	 * Look at the description in ath_tx_start() to understand
2201	 * what needs to be "fixed" here so we just use the TID
2202	 * for QoS frames.
2203	 */
2204
2205	/* Handle encryption twiddling if needed */
2206	if (! ath_tx_tag_crypto(sc, ni,
2207	    m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2208	    &hdrlen, &pktlen, &keyix)) {
2209		ieee80211_free_mbuf(m0);
2210		return EIO;
2211	}
2212	/* packet header may have moved, reset our local pointer */
2213	wh = mtod(m0, struct ieee80211_frame *);
2214
2215	/* Do the generic frame setup */
2216	/* XXX should just bzero the bf_state? */
2217	bf->bf_state.bfs_dobaw = 0;
2218
2219	error = ath_tx_dmasetup(sc, bf, m0);
2220	if (error != 0)
2221		return error;
2222	m0 = bf->bf_m;				/* NB: may have changed */
2223	wh = mtod(m0, struct ieee80211_frame *);
2224	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2225	bf->bf_node = ni;			/* NB: held reference */
2226
2227	/* Always enable CLRDMASK for raw frames for now.. */
2228	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
2229	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
2230	if (params->ibp_flags & IEEE80211_BPF_RTS)
2231		flags |= HAL_TXDESC_RTSENA;
2232	else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2233		/* XXX assume 11g/11n protection? */
2234		bf->bf_state.bfs_doprot = 1;
2235		flags |= HAL_TXDESC_CTSENA;
2236	}
2237	/* XXX leave ismcast to injector? */
2238	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2239		flags |= HAL_TXDESC_NOACK;
2240
2241	rt = sc->sc_currates;
2242	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2243
2244	/* Fetch first rate information */
2245	rix = ath_tx_findrix(sc, params->ibp_rate0);
2246	try0 = params->ibp_try0;
2247
2248	/*
2249	 * Override EAPOL rate as appropriate.
2250	 */
2251	if (m0->m_flags & M_EAPOL) {
2252		/* XXX? maybe always use long preamble? */
2253		rix = an->an_mgmtrix;
2254		try0 = ATH_TXMAXTRY;	/* XXX?too many? */
2255	}
2256
2257	/*
2258	 * If it's a frame to do location reporting on,
2259	 * communicate it to the HAL.
2260	 */
2261	if (ieee80211_get_toa_params(m0, NULL)) {
2262		device_printf(sc->sc_dev,
2263		    "%s: setting TX positioning bit\n", __func__);
2264		flags |= HAL_TXDESC_POS;
2265		flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);
2266		bf->bf_flags |= ATH_BUF_TOA_PROBE;
2267	}
2268
2269	txrate = rt->info[rix].rateCode;
2270	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2271		txrate |= rt->info[rix].shortPreamble;
2272	sc->sc_txrix = rix;
2273	ismrr = (params->ibp_try1 != 0);
2274	txantenna = params->ibp_pri >> 2;
2275	if (txantenna == 0)			/* XXX? */
2276		txantenna = sc->sc_txantenna;
2277
2278	/*
2279	 * Since ctsrate is fixed, store it away for later
2280	 * use when the descriptor fields are being set.
2281	 */
2282	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2283		bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2284
2285	/*
2286	 * NB: we mark all packets as type PSPOLL so the h/w won't
2287	 * set the sequence number, duration, etc.
2288	 */
2289	atype = HAL_PKT_TYPE_PSPOLL;
2290
2291	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2292		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2293		    sc->sc_hwmap[rix].ieeerate, -1);
2294
2295	if (ieee80211_radiotap_active_vap(vap)) {
2296		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2297		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2298			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2299		if (m0->m_flags & M_FRAG)
2300			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2301		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2302		sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2303		    ieee80211_get_node_txpower(ni));
2304		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2305
2306		ieee80211_radiotap_tx(vap, m0);
2307	}
2308
2309	/*
2310	 * Formulate first tx descriptor with tx controls.
2311	 */
2312	ds = bf->bf_desc;
2313	/* XXX check return value? */
2314
2315	/* Store the decided rate index values away */
2316	bf->bf_state.bfs_pktlen = pktlen;
2317	bf->bf_state.bfs_hdrlen = hdrlen;
2318	bf->bf_state.bfs_atype = atype;
2319	bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2320	    ieee80211_get_node_txpower(ni));
2321	bf->bf_state.bfs_txrate0 = txrate;
2322	bf->bf_state.bfs_try0 = try0;
2323	bf->bf_state.bfs_keyix = keyix;
2324	bf->bf_state.bfs_txantenna = txantenna;
2325	bf->bf_state.bfs_txflags = flags;
2326	bf->bf_state.bfs_shpream =
2327	    !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2328
2329	/* Set local packet state, used to queue packets to hardware */
2330	bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2331	bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2332	bf->bf_state.bfs_pri = pri;
2333
2334	/* XXX this should be done in ath_tx_setrate() */
2335	bf->bf_state.bfs_ctsrate = 0;
2336	bf->bf_state.bfs_ctsduration = 0;
2337	bf->bf_state.bfs_ismrr = ismrr;
2338
2339	/* Blank the legacy rate array */
2340	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2341
2342	bf->bf_state.bfs_rc[0].rix = rix;
2343	bf->bf_state.bfs_rc[0].tries = try0;
2344	bf->bf_state.bfs_rc[0].ratecode = txrate;
2345
2346	if (ismrr) {
2347		int rix;
2348
2349		rix = ath_tx_findrix(sc, params->ibp_rate1);
2350		bf->bf_state.bfs_rc[1].rix = rix;
2351		bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2352
2353		rix = ath_tx_findrix(sc, params->ibp_rate2);
2354		bf->bf_state.bfs_rc[2].rix = rix;
2355		bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2356
2357		rix = ath_tx_findrix(sc, params->ibp_rate3);
2358		bf->bf_state.bfs_rc[3].rix = rix;
2359		bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2360	}
2361	/*
2362	 * All the required rate control decisions have been made;
2363	 * fill in the rc flags.
2364	 */
2365	ath_tx_rate_fill_rcflags(sc, bf);
2366
2367	/* NB: no buffered multicast in power save support */
2368
2369	/*
2370	 * If we're overiding the ADDBA destination, dump directly
2371	 * into the hardware queue, right after any pending
2372	 * frames to that node are.
2373	 */
2374	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2375	    __func__, do_override);
2376
2377#if 1
2378	/*
2379	 * Put addba frames in the right place in the right TID/HWQ.
2380	 */
2381	if (do_override) {
2382		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2383		/*
2384		 * XXX if it's addba frames, should we be leaking
2385		 * them out via the frame leak method?
2386		 * XXX for now let's not risk it; but we may wish
2387		 * to investigate this later.
2388		 */
2389		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2390	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2391	    &queue_to_head)) {
2392		/* Queue to software queue */
2393		ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2394	} else {
2395		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2396		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2397	}
2398#else
2399	/* Direct-dispatch to the hardware */
2400	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2401	/*
2402	 * Update the current leak count if
2403	 * we're leaking frames; and set the
2404	 * MORE flag as appropriate.
2405	 */
2406	ath_tx_leak_count_update(sc, tid, bf);
2407	ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2408#endif
2409	return 0;
2410}
2411
2412/*
2413 * Send a raw frame.
2414 *
2415 * This can be called by net80211.
2416 */
2417int
2418ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2419	const struct ieee80211_bpf_params *params)
2420{
2421	struct ieee80211com *ic = ni->ni_ic;
2422	struct ath_softc *sc = ic->ic_softc;
2423	struct ath_buf *bf;
2424	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2425	int error = 0;
2426
2427	ATH_PCU_LOCK(sc);
2428	if (sc->sc_inreset_cnt > 0) {
2429		DPRINTF(sc, ATH_DEBUG_XMIT,
2430		    "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2431		error = EIO;
2432		ATH_PCU_UNLOCK(sc);
2433		goto badbad;
2434	}
2435	sc->sc_txstart_cnt++;
2436	ATH_PCU_UNLOCK(sc);
2437
2438	/* Wake the hardware up already */
2439	ATH_LOCK(sc);
2440	ath_power_set_power_state(sc, HAL_PM_AWAKE);
2441	ATH_UNLOCK(sc);
2442
2443	ATH_TX_LOCK(sc);
2444
2445	if (!sc->sc_running || sc->sc_invalid) {
2446		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2447		    __func__, sc->sc_running, sc->sc_invalid);
2448		m_freem(m);
2449		error = ENETDOWN;
2450		goto bad;
2451	}
2452
2453	/*
2454	 * Enforce how deep the multicast queue can grow.
2455	 *
2456	 * XXX duplicated in ath_tx_start().
2457	 */
2458	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2459		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2460		    > sc->sc_txq_mcastq_maxdepth) {
2461			sc->sc_stats.ast_tx_mcastq_overflow++;
2462			error = ENOBUFS;
2463		}
2464
2465		if (error != 0) {
2466			m_freem(m);
2467			goto bad;
2468		}
2469	}
2470
2471	/*
2472	 * Grab a TX buffer and associated resources.
2473	 */
2474	bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2475	if (bf == NULL) {
2476		sc->sc_stats.ast_tx_nobuf++;
2477		m_freem(m);
2478		error = ENOBUFS;
2479		goto bad;
2480	}
2481	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2482	    m, params,  bf);
2483
2484	if (params == NULL) {
2485		/*
2486		 * Legacy path; interpret frame contents to decide
2487		 * precisely how to send the frame.
2488		 */
2489		if (ath_tx_start(sc, ni, bf, m)) {
2490			error = EIO;		/* XXX */
2491			goto bad2;
2492		}
2493	} else {
2494		/*
2495		 * Caller supplied explicit parameters to use in
2496		 * sending the frame.
2497		 */
2498		if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2499			error = EIO;		/* XXX */
2500			goto bad2;
2501		}
2502	}
2503	sc->sc_wd_timer = 5;
2504	sc->sc_stats.ast_tx_raw++;
2505
2506	/*
2507	 * Update the TIM - if there's anything queued to the
2508	 * software queue and power save is enabled, we should
2509	 * set the TIM.
2510	 */
2511	ath_tx_update_tim(sc, ni, 1);
2512
2513	ATH_TX_UNLOCK(sc);
2514
2515	ATH_PCU_LOCK(sc);
2516	sc->sc_txstart_cnt--;
2517	ATH_PCU_UNLOCK(sc);
2518
2519	/* Put the hardware back to sleep if required */
2520	ATH_LOCK(sc);
2521	ath_power_restore_power_state(sc);
2522	ATH_UNLOCK(sc);
2523
2524	return 0;
2525
2526bad2:
2527	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2528	    "bf=%p",
2529	    m,
2530	    params,
2531	    bf);
2532	ATH_TXBUF_LOCK(sc);
2533	ath_returnbuf_head(sc, bf);
2534	ATH_TXBUF_UNLOCK(sc);
2535
2536bad:
2537	ATH_TX_UNLOCK(sc);
2538
2539	ATH_PCU_LOCK(sc);
2540	sc->sc_txstart_cnt--;
2541	ATH_PCU_UNLOCK(sc);
2542
2543	/* Put the hardware back to sleep if required */
2544	ATH_LOCK(sc);
2545	ath_power_restore_power_state(sc);
2546	ATH_UNLOCK(sc);
2547
2548badbad:
2549	ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2550	    m, params);
2551	sc->sc_stats.ast_tx_raw_fail++;
2552
2553	return error;
2554}
2555
2556/* Some helper functions */
2557
2558/*
2559 * ADDBA (and potentially others) need to be placed in the same
2560 * hardware queue as the TID/node it's relating to. This is so
2561 * it goes out after any pending non-aggregate frames to the
2562 * same node/TID.
2563 *
2564 * If this isn't done, the ADDBA can go out before the frames
2565 * queued in hardware. Even though these frames have a sequence
2566 * number -earlier- than the ADDBA can be transmitted (but
2567 * no frames whose sequence numbers are after the ADDBA should
2568 * be!) they'll arrive after the ADDBA - and the receiving end
2569 * will simply drop them as being out of the BAW.
2570 *
2571 * The frames can't be appended to the TID software queue - it'll
2572 * never be sent out. So these frames have to be directly
2573 * dispatched to the hardware, rather than queued in software.
2574 * So if this function returns true, the TXQ has to be
2575 * overridden and it has to be directly dispatched.
2576 *
2577 * It's a dirty hack, but someone's gotta do it.
2578 */
2579
2580/*
2581 * XXX doesn't belong here!
2582 */
2583static int
2584ieee80211_is_action(struct ieee80211_frame *wh)
2585{
2586	/* Type: Management frame? */
2587	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2588	    IEEE80211_FC0_TYPE_MGT)
2589		return 0;
2590
2591	/* Subtype: Action frame? */
2592	if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2593	    IEEE80211_FC0_SUBTYPE_ACTION)
2594		return 0;
2595
2596	return 1;
2597}
2598
2599/*
2600 * Return an alternate TID for ADDBA request frames.
2601 *
2602 * Yes, this likely should be done in the net80211 layer.
2603 */
2604static int
2605ath_tx_action_frame_override_queue(struct ath_softc *sc,
2606    struct ieee80211_node *ni,
2607    struct mbuf *m0, int *tid)
2608{
2609	struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2610	struct ieee80211_action_ba_addbarequest *ia;
2611	uint8_t *frm;
2612	uint16_t baparamset;
2613
2614	/* Not action frame? Bail */
2615	if (! ieee80211_is_action(wh))
2616		return 0;
2617
2618	/* XXX Not needed for frames we send? */
2619#if 0
2620	/* Correct length? */
2621	if (! ieee80211_parse_action(ni, m))
2622		return 0;
2623#endif
2624
2625	/* Extract out action frame */
2626	frm = (u_int8_t *)&wh[1];
2627	ia = (struct ieee80211_action_ba_addbarequest *) frm;
2628
2629	/* Not ADDBA? Bail */
2630	if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2631		return 0;
2632	if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2633		return 0;
2634
2635	/* Extract TID, return it */
2636	baparamset = le16toh(ia->rq_baparamset);
2637	*tid = (int) _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID);
2638
2639	return 1;
2640}
2641
2642/* Per-node software queue operations */
2643
2644/*
2645 * Add the current packet to the given BAW.
2646 * It is assumed that the current packet
2647 *
2648 * + fits inside the BAW;
2649 * + already has had a sequence number allocated.
2650 *
2651 * Since the BAW status may be modified by both the ath task and
2652 * the net80211/ifnet contexts, the TID must be locked.
2653 */
2654void
2655ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2656    struct ath_tid *tid, struct ath_buf *bf)
2657{
2658	int index, cindex;
2659	struct ieee80211_tx_ampdu *tap;
2660
2661	ATH_TX_LOCK_ASSERT(sc);
2662
2663	if (bf->bf_state.bfs_isretried)
2664		return;
2665
2666	tap = ath_tx_get_tx_tid(an, tid->tid);
2667
2668	if (! bf->bf_state.bfs_dobaw) {
2669		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2670		    "%s: dobaw=0, seqno=%d, window %d:%d\n",
2671		    __func__, SEQNO(bf->bf_state.bfs_seqno),
2672		    tap->txa_start, tap->txa_wnd);
2673	}
2674
2675	if (bf->bf_state.bfs_addedbaw)
2676		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2677		    "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2678		    "baw head=%d tail=%d\n",
2679		    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2680		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2681		    tid->baw_tail);
2682
2683	/*
2684	 * Verify that the given sequence number is not outside of the
2685	 * BAW.  Complain loudly if that's the case.
2686	 */
2687	if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2688	    SEQNO(bf->bf_state.bfs_seqno))) {
2689		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2690		    "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2691		    "baw head=%d tail=%d\n",
2692		    __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2693		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2694		    tid->baw_tail);
2695	}
2696
2697	/*
2698	 * ni->ni_txseqs[] is the currently allocated seqno.
2699	 * the txa state contains the current baw start.
2700	 */
2701	index  = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2702	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2703	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2704	    "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2705	    "baw head=%d tail=%d\n",
2706	    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2707	    tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2708	    tid->baw_tail);
2709
2710#if 0
2711	assert(tid->tx_buf[cindex] == NULL);
2712#endif
2713	if (tid->tx_buf[cindex] != NULL) {
2714		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2715		    "%s: ba packet dup (index=%d, cindex=%d, "
2716		    "head=%d, tail=%d)\n",
2717		    __func__, index, cindex, tid->baw_head, tid->baw_tail);
2718		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2719		    "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2720		    __func__,
2721		    tid->tx_buf[cindex],
2722		    SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2723		    bf,
2724		    SEQNO(bf->bf_state.bfs_seqno)
2725		);
2726	}
2727	tid->tx_buf[cindex] = bf;
2728
2729	if (index >= ((tid->baw_tail - tid->baw_head) &
2730	    (ATH_TID_MAX_BUFS - 1))) {
2731		tid->baw_tail = cindex;
2732		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2733	}
2734}
2735
2736/*
2737 * Flip the BAW buffer entry over from the existing one to the new one.
2738 *
2739 * When software retransmitting a (sub-)frame, it is entirely possible that
2740 * the frame ath_buf is marked as BUSY and can't be immediately reused.
2741 * In that instance the buffer is cloned and the new buffer is used for
2742 * retransmit. We thus need to update the ath_buf slot in the BAW buf
2743 * tracking array to maintain consistency.
2744 */
2745static void
2746ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2747    struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2748{
2749	int index, cindex;
2750	struct ieee80211_tx_ampdu *tap;
2751	int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2752
2753	ATH_TX_LOCK_ASSERT(sc);
2754
2755	tap = ath_tx_get_tx_tid(an, tid->tid);
2756	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2757	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2758
2759	/*
2760	 * Just warn for now; if it happens then we should find out
2761	 * about it. It's highly likely the aggregation session will
2762	 * soon hang.
2763	 */
2764	if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2765		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2766		    "%s: retransmitted buffer"
2767		    " has mismatching seqno's, BA session may hang.\n",
2768		    __func__);
2769		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2770		    "%s: old seqno=%d, new_seqno=%d\n", __func__,
2771		    old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2772	}
2773
2774	if (tid->tx_buf[cindex] != old_bf) {
2775		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2776		    "%s: ath_buf pointer incorrect; "
2777		    " has m BA session may hang.\n", __func__);
2778		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2779		    "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2780	}
2781
2782	tid->tx_buf[cindex] = new_bf;
2783}
2784
2785/*
2786 * seq_start - left edge of BAW
2787 * seq_next - current/next sequence number to allocate
2788 *
2789 * Since the BAW status may be modified by both the ath task and
2790 * the net80211/ifnet contexts, the TID must be locked.
2791 */
2792static void
2793ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2794    struct ath_tid *tid, const struct ath_buf *bf)
2795{
2796	int index, cindex;
2797	struct ieee80211_tx_ampdu *tap;
2798	int seqno = SEQNO(bf->bf_state.bfs_seqno);
2799
2800	ATH_TX_LOCK_ASSERT(sc);
2801
2802	tap = ath_tx_get_tx_tid(an, tid->tid);
2803	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2804	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2805
2806	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2807	    "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2808	    "baw head=%d, tail=%d\n",
2809	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2810	    cindex, tid->baw_head, tid->baw_tail);
2811
2812	/*
2813	 * If this occurs then we have a big problem - something else
2814	 * has slid tap->txa_start along without updating the BAW
2815	 * tracking start/end pointers. Thus the TX BAW state is now
2816	 * completely busted.
2817	 *
2818	 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2819	 * it's quite possible that a cloned buffer is making its way
2820	 * here and causing it to fire off. Disable TDMA for now.
2821	 */
2822	if (tid->tx_buf[cindex] != bf) {
2823		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2824		    "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2825		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2826		    tid->tx_buf[cindex],
2827		    (tid->tx_buf[cindex] != NULL) ?
2828		      SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2829	}
2830
2831	tid->tx_buf[cindex] = NULL;
2832
2833	while (tid->baw_head != tid->baw_tail &&
2834	    !tid->tx_buf[tid->baw_head]) {
2835		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2836		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2837	}
2838	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2839	    "%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2840	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2841}
2842
2843static void
2844ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2845    struct ath_buf *bf)
2846{
2847	struct ieee80211_frame *wh;
2848
2849	ATH_TX_LOCK_ASSERT(sc);
2850
2851	if (tid->an->an_leak_count > 0) {
2852		wh = mtod(bf->bf_m, struct ieee80211_frame *);
2853
2854		/*
2855		 * Update MORE based on the software/net80211 queue states.
2856		 */
2857		if ((tid->an->an_stack_psq > 0)
2858		    || (tid->an->an_swq_depth > 0))
2859			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2860		else
2861			wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2862
2863		DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2864		    "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2865		    __func__,
2866		    tid->an->an_node.ni_macaddr,
2867		    ":",
2868		    tid->an->an_leak_count,
2869		    tid->an->an_stack_psq,
2870		    tid->an->an_swq_depth,
2871		    !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2872
2873		/*
2874		 * Re-sync the underlying buffer.
2875		 */
2876		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2877		    BUS_DMASYNC_PREWRITE);
2878
2879		tid->an->an_leak_count --;
2880	}
2881}
2882
2883static int
2884ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2885{
2886
2887	ATH_TX_LOCK_ASSERT(sc);
2888
2889	if (tid->an->an_leak_count > 0) {
2890		return (1);
2891	}
2892	if (tid->paused)
2893		return (0);
2894	return (1);
2895}
2896
2897/*
2898 * Mark the current node/TID as ready to TX.
2899 *
2900 * This is done to make it easy for the software scheduler to
2901 * find which nodes have data to send.
2902 *
2903 * The TXQ lock must be held.
2904 */
2905void
2906ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2907{
2908	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2909
2910	ATH_TX_LOCK_ASSERT(sc);
2911
2912	/*
2913	 * If we are leaking out a frame to this destination
2914	 * for PS-POLL, ensure that we allow scheduling to
2915	 * occur.
2916	 */
2917	if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2918		return;		/* paused, can't schedule yet */
2919
2920	if (tid->sched)
2921		return;		/* already scheduled */
2922
2923	tid->sched = 1;
2924
2925#if 0
2926	/*
2927	 * If this is a sleeping node we're leaking to, given
2928	 * it a higher priority.  This is so bad for QoS it hurts.
2929	 */
2930	if (tid->an->an_leak_count) {
2931		TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2932	} else {
2933		TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2934	}
2935#endif
2936
2937	/*
2938	 * We can't do the above - it'll confuse the TXQ software
2939	 * scheduler which will keep checking the _head_ TID
2940	 * in the list to see if it has traffic.  If we queue
2941	 * a TID to the head of the list and it doesn't transmit,
2942	 * we'll check it again.
2943	 *
2944	 * So, get the rest of this leaking frames support working
2945	 * and reliable first and _then_ optimise it so they're
2946	 * pushed out in front of any other pending software
2947	 * queued nodes.
2948	 */
2949	TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2950}
2951
2952/*
2953 * Mark the current node as no longer needing to be polled for
2954 * TX packets.
2955 *
2956 * The TXQ lock must be held.
2957 */
2958static void
2959ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2960{
2961	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2962
2963	ATH_TX_LOCK_ASSERT(sc);
2964
2965	if (tid->sched == 0)
2966		return;
2967
2968	tid->sched = 0;
2969	TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2970}
2971
2972/*
2973 * Assign a sequence number manually to the given frame.
2974 *
2975 * This should only be called for A-MPDU TX frames.
2976 *
2977 * Note: for group addressed frames, the sequence number
2978 * should be from NONQOS_TID, and net80211 should have
2979 * already assigned it for us.
2980 */
2981static ieee80211_seq
2982ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2983    struct ath_buf *bf, struct mbuf *m0)
2984{
2985	struct ieee80211_frame *wh;
2986	int tid;
2987	ieee80211_seq seqno;
2988	uint8_t subtype;
2989
2990	wh = mtod(m0, struct ieee80211_frame *);
2991	tid = ieee80211_gettid(wh);
2992
2993	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n",
2994	    __func__, tid, IEEE80211_QOS_HAS_SEQ(wh));
2995
2996	/* XXX Is it a control frame? Ignore */
2997
2998	/* Does the packet require a sequence number? */
2999	if (! IEEE80211_QOS_HAS_SEQ(wh))
3000		return -1;
3001
3002	ATH_TX_LOCK_ASSERT(sc);
3003
3004	/*
3005	 * Is it a QOS NULL Data frame? Give it a sequence number from
3006	 * the default TID (IEEE80211_NONQOS_TID.)
3007	 *
3008	 * The RX path of everything I've looked at doesn't include the NULL
3009	 * data frame sequence number in the aggregation state updates, so
3010	 * assigning it a sequence number there will cause a BAW hole on the
3011	 * RX side.
3012	 */
3013	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3014	if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
3015		/* XXX no locking for this TID? This is a bit of a problem. */
3016		seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
3017		INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
3018	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3019		/*
3020		 * group addressed frames get a sequence number from
3021		 * a different sequence number space.
3022		 */
3023		seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
3024		INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
3025	} else {
3026		/* Manually assign sequence number */
3027		seqno = ni->ni_txseqs[tid];
3028		INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
3029	}
3030	*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
3031	M_SEQNO_SET(m0, seqno);
3032
3033	/* Return so caller can do something with it if needed */
3034	DPRINTF(sc, ATH_DEBUG_SW_TX,
3035	    "%s:  -> subtype=0x%x, tid=%d, seqno=%d\n",
3036	    __func__, subtype, tid, seqno);
3037	return seqno;
3038}
3039
3040/*
3041 * Attempt to direct dispatch an aggregate frame to hardware.
3042 * If the frame is out of BAW, queue.
3043 * Otherwise, schedule it as a single frame.
3044 */
3045static void
3046ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
3047    struct ath_txq *txq, struct ath_buf *bf)
3048{
3049	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
3050	struct ieee80211_tx_ampdu *tap;
3051
3052	ATH_TX_LOCK_ASSERT(sc);
3053
3054	tap = ath_tx_get_tx_tid(an, tid->tid);
3055
3056	/* paused? queue */
3057	if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
3058		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3059		/* XXX don't sched - we're paused! */
3060		return;
3061	}
3062
3063	/* outside baw? queue */
3064	if (bf->bf_state.bfs_dobaw &&
3065	    (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
3066	    SEQNO(bf->bf_state.bfs_seqno)))) {
3067		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3068		ath_tx_tid_sched(sc, tid);
3069		return;
3070	}
3071
3072	/*
3073	 * This is a temporary check and should be removed once
3074	 * all the relevant code paths have been fixed.
3075	 *
3076	 * During aggregate retries, it's possible that the head
3077	 * frame will fail (which has the bfs_aggr and bfs_nframes
3078	 * fields set for said aggregate) and will be retried as
3079	 * a single frame.  In this instance, the values should
3080	 * be reset or the completion code will get upset with you.
3081	 */
3082	if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
3083		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3084		    "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
3085		    bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
3086		bf->bf_state.bfs_aggr = 0;
3087		bf->bf_state.bfs_nframes = 1;
3088	}
3089
3090	/* Update CLRDMASK just before this frame is queued */
3091	ath_tx_update_clrdmask(sc, tid, bf);
3092
3093	/* Direct dispatch to hardware */
3094	ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen,
3095	    false);
3096	ath_tx_calc_duration(sc, bf);
3097	ath_tx_calc_protection(sc, bf);
3098	ath_tx_set_rtscts(sc, bf);
3099	ath_tx_rate_fill_rcflags(sc, bf);
3100	ath_tx_setds(sc, bf);
3101
3102	/* Statistics */
3103	sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3104
3105	/* Track per-TID hardware queue depth correctly */
3106	tid->hwq_depth++;
3107
3108	/* Add to BAW */
3109	if (bf->bf_state.bfs_dobaw) {
3110		ath_tx_addto_baw(sc, an, tid, bf);
3111		bf->bf_state.bfs_addedbaw = 1;
3112	}
3113
3114	/* Set completion handler, multi-frame aggregate or not */
3115	bf->bf_comp = ath_tx_aggr_comp;
3116
3117	/*
3118	 * Update the current leak count if
3119	 * we're leaking frames; and set the
3120	 * MORE flag as appropriate.
3121	 */
3122	ath_tx_leak_count_update(sc, tid, bf);
3123
3124	/* Hand off to hardware */
3125	ath_tx_handoff(sc, txq, bf);
3126}
3127
3128/*
3129 * Attempt to send the packet.
3130 * If the queue isn't busy, direct-dispatch.
3131 * If the queue is busy enough, queue the given packet on the
3132 *  relevant software queue.
3133 */
3134void
3135ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3136    struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3137{
3138	struct ath_node *an = ATH_NODE(ni);
3139	struct ieee80211_frame *wh;
3140	struct ath_tid *atid;
3141	int pri, tid;
3142	struct mbuf *m0 = bf->bf_m;
3143
3144	ATH_TX_LOCK_ASSERT(sc);
3145
3146	/* Fetch the TID - non-QoS frames get assigned to TID 16 */
3147	wh = mtod(m0, struct ieee80211_frame *);
3148	pri = ath_tx_getac(sc, m0);
3149	tid = ath_tx_gettid(sc, m0);
3150	atid = &an->an_tid[tid];
3151
3152	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3153	    __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3154
3155	/* Set local packet state, used to queue packets to hardware */
3156	/* XXX potentially duplicate info, re-check */
3157	bf->bf_state.bfs_tid = tid;
3158	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3159	bf->bf_state.bfs_pri = pri;
3160
3161	/*
3162	 * If the hardware queue isn't busy, queue it directly.
3163	 * If the hardware queue is busy, queue it.
3164	 * If the TID is paused or the traffic it outside BAW, software
3165	 * queue it.
3166	 *
3167	 * If the node is in power-save and we're leaking a frame,
3168	 * leak a single frame.
3169	 */
3170	if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3171		/* TID is paused, queue */
3172		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3173		/*
3174		 * If the caller requested that it be sent at a high
3175		 * priority, queue it at the head of the list.
3176		 */
3177		if (queue_to_head)
3178			ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3179		else
3180			ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3181	} else if (ath_tx_ampdu_pending(sc, an, tid)) {
3182		/* AMPDU pending; queue */
3183		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3184		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3185		/* XXX sched? */
3186	} else if (ath_tx_ampdu_running(sc, an, tid)) {
3187		/*
3188		 * AMPDU running, queue single-frame if the hardware queue
3189		 * isn't busy.
3190		 *
3191		 * If the hardware queue is busy, sending an aggregate frame
3192		 * then just hold off so we can queue more aggregate frames.
3193		 *
3194		 * Otherwise we may end up with single frames leaking through
3195		 * because we are dispatching them too quickly.
3196		 *
3197		 * TODO: maybe we should treat this as two policies - minimise
3198		 * latency, or maximise throughput.  Then for BE/BK we can
3199		 * maximise throughput, and VO/VI (if AMPDU is enabled!)
3200		 * minimise latency.
3201		 */
3202
3203		/*
3204		 * Always queue the frame to the tail of the list.
3205		 */
3206		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3207
3208		/*
3209		 * If the hardware queue isn't busy, direct dispatch
3210		 * the head frame in the list.
3211		 *
3212		 * Note: if we're say, configured to do ADDBA but not A-MPDU
3213		 * then maybe we want to still queue two non-aggregate frames
3214		 * to the hardware.  Again with the per-TID policy
3215		 * configuration..)
3216		 *
3217		 * Otherwise, schedule the TID.
3218		 */
3219		/* XXX TXQ locking */
3220		if (txq->axq_depth + txq->fifo.axq_depth == 0) {
3221			bf = ATH_TID_FIRST(atid);
3222			ATH_TID_REMOVE(atid, bf, bf_list);
3223
3224			/*
3225			 * Ensure it's definitely treated as a non-AMPDU
3226			 * frame - this information may have been left
3227			 * over from a previous attempt.
3228			 */
3229			bf->bf_state.bfs_aggr = 0;
3230			bf->bf_state.bfs_nframes = 1;
3231
3232			/* Queue to the hardware */
3233			ath_tx_xmit_aggr(sc, an, txq, bf);
3234			DPRINTF(sc, ATH_DEBUG_SW_TX,
3235			    "%s: xmit_aggr\n",
3236			    __func__);
3237		} else {
3238			DPRINTF(sc, ATH_DEBUG_SW_TX,
3239			    "%s: ampdu; swq'ing\n",
3240			    __func__);
3241
3242			ath_tx_tid_sched(sc, atid);
3243		}
3244	/*
3245	 * If we're not doing A-MPDU, be prepared to direct dispatch
3246	 * up to both limits if possible.  This particular corner
3247	 * case may end up with packet starvation between aggregate
3248	 * traffic and non-aggregate traffic: we want to ensure
3249	 * that non-aggregate stations get a few frames queued to the
3250	 * hardware before the aggregate station(s) get their chance.
3251	 *
3252	 * So if you only ever see a couple of frames direct dispatched
3253	 * to the hardware from a non-AMPDU client, check both here
3254	 * and in the software queue dispatcher to ensure that those
3255	 * non-AMPDU stations get a fair chance to transmit.
3256	 */
3257	/* XXX TXQ locking */
3258	} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3259		    (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3260		/* AMPDU not running, attempt direct dispatch */
3261		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3262		/* See if clrdmask needs to be set */
3263		ath_tx_update_clrdmask(sc, atid, bf);
3264
3265		/*
3266		 * Update the current leak count if
3267		 * we're leaking frames; and set the
3268		 * MORE flag as appropriate.
3269		 */
3270		ath_tx_leak_count_update(sc, atid, bf);
3271
3272		/*
3273		 * Dispatch the frame.
3274		 */
3275		ath_tx_xmit_normal(sc, txq, bf);
3276	} else {
3277		/* Busy; queue */
3278		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3279		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3280		ath_tx_tid_sched(sc, atid);
3281	}
3282}
3283
3284/*
3285 * Only set the clrdmask bit if none of the nodes are currently
3286 * filtered.
3287 *
3288 * XXX TODO: go through all the callers and check to see
3289 * which are being called in the context of looping over all
3290 * TIDs (eg, if all tids are being paused, resumed, etc.)
3291 * That'll avoid O(n^2) complexity here.
3292 */
3293static void
3294ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3295{
3296	int i;
3297
3298	ATH_TX_LOCK_ASSERT(sc);
3299
3300	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3301		if (an->an_tid[i].isfiltered == 1)
3302			return;
3303	}
3304	an->clrdmask = 1;
3305}
3306
3307/*
3308 * Configure the per-TID node state.
3309 *
3310 * This likely belongs in if_ath_node.c but I can't think of anywhere
3311 * else to put it just yet.
3312 *
3313 * This sets up the SLISTs and the mutex as appropriate.
3314 */
3315void
3316ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3317{
3318	int i, j;
3319	struct ath_tid *atid;
3320
3321	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3322		atid = &an->an_tid[i];
3323
3324		/* XXX now with this bzer(), is the field 0'ing needed? */
3325		bzero(atid, sizeof(*atid));
3326
3327		TAILQ_INIT(&atid->tid_q);
3328		TAILQ_INIT(&atid->filtq.tid_q);
3329		atid->tid = i;
3330		atid->an = an;
3331		for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3332			atid->tx_buf[j] = NULL;
3333		atid->baw_head = atid->baw_tail = 0;
3334		atid->paused = 0;
3335		atid->sched = 0;
3336		atid->hwq_depth = 0;
3337		atid->cleanup_inprogress = 0;
3338		if (i == IEEE80211_NONQOS_TID)
3339			atid->ac = ATH_NONQOS_TID_AC;
3340		else
3341			atid->ac = TID_TO_WME_AC(i);
3342	}
3343	an->clrdmask = 1;	/* Always start by setting this bit */
3344}
3345
3346/*
3347 * Pause the current TID. This stops packets from being transmitted
3348 * on it.
3349 *
3350 * Since this is also called from upper layers as well as the driver,
3351 * it will get the TID lock.
3352 */
3353static void
3354ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3355{
3356
3357	ATH_TX_LOCK_ASSERT(sc);
3358	tid->paused++;
3359	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3360	    __func__,
3361	    tid->an->an_node.ni_macaddr, ":",
3362	    tid->tid,
3363	    tid->paused);
3364}
3365
3366/*
3367 * Unpause the current TID, and schedule it if needed.
3368 */
3369static void
3370ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3371{
3372	ATH_TX_LOCK_ASSERT(sc);
3373
3374	/*
3375	 * There's some odd places where ath_tx_tid_resume() is called
3376	 * when it shouldn't be; this works around that particular issue
3377	 * until it's actually resolved.
3378	 */
3379	if (tid->paused == 0) {
3380		device_printf(sc->sc_dev,
3381		    "%s: [%6D]: tid=%d, paused=0?\n",
3382		    __func__,
3383		    tid->an->an_node.ni_macaddr, ":",
3384		    tid->tid);
3385	} else {
3386		tid->paused--;
3387	}
3388
3389	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3390	    "%s: [%6D]: tid=%d, unpaused = %d\n",
3391	    __func__,
3392	    tid->an->an_node.ni_macaddr, ":",
3393	    tid->tid,
3394	    tid->paused);
3395
3396	if (tid->paused)
3397		return;
3398
3399	/*
3400	 * Override the clrdmask configuration for the next frame
3401	 * from this TID, just to get the ball rolling.
3402	 */
3403	ath_tx_set_clrdmask(sc, tid->an);
3404
3405	if (tid->axq_depth == 0)
3406		return;
3407
3408	/* XXX isfiltered shouldn't ever be 0 at this point */
3409	if (tid->isfiltered == 1) {
3410		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3411		    __func__);
3412		return;
3413	}
3414
3415	ath_tx_tid_sched(sc, tid);
3416
3417	/*
3418	 * Queue the software TX scheduler.
3419	 */
3420	ath_tx_swq_kick(sc);
3421}
3422
3423/*
3424 * Add the given ath_buf to the TID filtered frame list.
3425 * This requires the TID be filtered.
3426 */
3427static void
3428ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3429    struct ath_buf *bf)
3430{
3431
3432	ATH_TX_LOCK_ASSERT(sc);
3433
3434	if (!tid->isfiltered)
3435		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3436		    __func__);
3437
3438	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3439
3440	/* Set the retry bit and bump the retry counter */
3441	ath_tx_set_retry(sc, bf);
3442	sc->sc_stats.ast_tx_swfiltered++;
3443
3444	ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3445}
3446
3447/*
3448 * Handle a completed filtered frame from the given TID.
3449 * This just enables/pauses the filtered frame state if required
3450 * and appends the filtered frame to the filtered queue.
3451 */
3452static void
3453ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3454    struct ath_buf *bf)
3455{
3456
3457	ATH_TX_LOCK_ASSERT(sc);
3458
3459	if (! tid->isfiltered) {
3460		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3461		    __func__, tid->tid);
3462		tid->isfiltered = 1;
3463		ath_tx_tid_pause(sc, tid);
3464	}
3465
3466	/* Add the frame to the filter queue */
3467	ath_tx_tid_filt_addbuf(sc, tid, bf);
3468}
3469
3470/*
3471 * Complete the filtered frame TX completion.
3472 *
3473 * If there are no more frames in the hardware queue, unpause/unfilter
3474 * the TID if applicable.  Otherwise we will wait for a node PS transition
3475 * to unfilter.
3476 */
3477static void
3478ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3479{
3480	struct ath_buf *bf;
3481	int do_resume = 0;
3482
3483	ATH_TX_LOCK_ASSERT(sc);
3484
3485	if (tid->hwq_depth != 0)
3486		return;
3487
3488	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3489	    __func__, tid->tid);
3490	if (tid->isfiltered == 1) {
3491		tid->isfiltered = 0;
3492		do_resume = 1;
3493	}
3494
3495	/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3496	ath_tx_set_clrdmask(sc, tid->an);
3497
3498	/* XXX this is really quite inefficient */
3499	while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3500		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3501		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3502	}
3503
3504	/* And only resume if we had paused before */
3505	if (do_resume)
3506		ath_tx_tid_resume(sc, tid);
3507}
3508
3509/*
3510 * Called when a single (aggregate or otherwise) frame is completed.
3511 *
3512 * Returns 0 if the buffer could be added to the filtered list
3513 * (cloned or otherwise), 1 if the buffer couldn't be added to the
3514 * filtered list (failed clone; expired retry) and the caller should
3515 * free it and handle it like a failure (eg by sending a BAR.)
3516 *
3517 * since the buffer may be cloned, bf must be not touched after this
3518 * if the return value is 0.
3519 */
3520static int
3521ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3522    struct ath_buf *bf)
3523{
3524	struct ath_buf *nbf;
3525	int retval;
3526
3527	ATH_TX_LOCK_ASSERT(sc);
3528
3529	/*
3530	 * Don't allow a filtered frame to live forever.
3531	 */
3532	if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3533		sc->sc_stats.ast_tx_swretrymax++;
3534		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3535		    "%s: bf=%p, seqno=%d, exceeded retries\n",
3536		    __func__,
3537		    bf,
3538		    SEQNO(bf->bf_state.bfs_seqno));
3539		retval = 1; /* error */
3540		goto finish;
3541	}
3542
3543	/*
3544	 * A busy buffer can't be added to the retry list.
3545	 * It needs to be cloned.
3546	 */
3547	if (bf->bf_flags & ATH_BUF_BUSY) {
3548		nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3549		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3550		    "%s: busy buffer clone: %p -> %p\n",
3551		    __func__, bf, nbf);
3552	} else {
3553		nbf = bf;
3554	}
3555
3556	if (nbf == NULL) {
3557		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3558		    "%s: busy buffer couldn't be cloned (%p)!\n",
3559		    __func__, bf);
3560		retval = 1; /* error */
3561	} else {
3562		ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3563		retval = 0; /* ok */
3564	}
3565finish:
3566	ath_tx_tid_filt_comp_complete(sc, tid);
3567
3568	return (retval);
3569}
3570
3571static void
3572ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3573    struct ath_buf *bf_first, ath_bufhead *bf_q)
3574{
3575	struct ath_buf *bf, *bf_next, *nbf;
3576
3577	ATH_TX_LOCK_ASSERT(sc);
3578
3579	bf = bf_first;
3580	while (bf) {
3581		bf_next = bf->bf_next;
3582		bf->bf_next = NULL;	/* Remove it from the aggr list */
3583
3584		/*
3585		 * Don't allow a filtered frame to live forever.
3586		 */
3587		if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3588			sc->sc_stats.ast_tx_swretrymax++;
3589			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3590			    "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3591			    __func__,
3592			    tid->tid,
3593			    bf,
3594			    SEQNO(bf->bf_state.bfs_seqno));
3595			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3596			goto next;
3597		}
3598
3599		if (bf->bf_flags & ATH_BUF_BUSY) {
3600			nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3601			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3602			    "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3603			    __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3604		} else {
3605			nbf = bf;
3606		}
3607
3608		/*
3609		 * If the buffer couldn't be cloned, add it to bf_q;
3610		 * the caller will free the buffer(s) as required.
3611		 */
3612		if (nbf == NULL) {
3613			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3614			    "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3615			    __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3616			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3617		} else {
3618			ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3619		}
3620next:
3621		bf = bf_next;
3622	}
3623
3624	ath_tx_tid_filt_comp_complete(sc, tid);
3625}
3626
3627/*
3628 * Suspend the queue because we need to TX a BAR.
3629 */
3630static void
3631ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3632{
3633
3634	ATH_TX_LOCK_ASSERT(sc);
3635
3636	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3637	    "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3638	    __func__,
3639	    tid->tid,
3640	    tid->bar_wait,
3641	    tid->bar_tx);
3642
3643	/* We shouldn't be called when bar_tx is 1 */
3644	if (tid->bar_tx) {
3645		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3646		    "%s: bar_tx is 1?!\n", __func__);
3647	}
3648
3649	/* If we've already been called, just be patient. */
3650	if (tid->bar_wait)
3651		return;
3652
3653	/* Wait! */
3654	tid->bar_wait = 1;
3655
3656	/* Only one pause, no matter how many frames fail */
3657	ath_tx_tid_pause(sc, tid);
3658}
3659
3660/*
3661 * We've finished with BAR handling - either we succeeded or
3662 * failed. Either way, unsuspend TX.
3663 */
3664static void
3665ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3666{
3667
3668	ATH_TX_LOCK_ASSERT(sc);
3669
3670	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3671	    "%s: %6D: TID=%d, called\n",
3672	    __func__,
3673	    tid->an->an_node.ni_macaddr,
3674	    ":",
3675	    tid->tid);
3676
3677	if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3678		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3679		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3680		    __func__, tid->an->an_node.ni_macaddr, ":",
3681		    tid->tid, tid->bar_tx, tid->bar_wait);
3682	}
3683
3684	tid->bar_tx = tid->bar_wait = 0;
3685	ath_tx_tid_resume(sc, tid);
3686}
3687
3688/*
3689 * Return whether we're ready to TX a BAR frame.
3690 *
3691 * Requires the TID lock be held.
3692 */
3693static int
3694ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3695{
3696
3697	ATH_TX_LOCK_ASSERT(sc);
3698
3699	if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3700		return (0);
3701
3702	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3703	    "%s: %6D: TID=%d, bar ready\n",
3704	    __func__,
3705	    tid->an->an_node.ni_macaddr,
3706	    ":",
3707	    tid->tid);
3708
3709	return (1);
3710}
3711
3712/*
3713 * Check whether the current TID is ready to have a BAR
3714 * TXed and if so, do the TX.
3715 *
3716 * Since the TID/TXQ lock can't be held during a call to
3717 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3718 * sending the BAR and locking it again.
3719 *
3720 * Eventually, the code to send the BAR should be broken out
3721 * from this routine so the lock doesn't have to be reacquired
3722 * just to be immediately dropped by the caller.
3723 */
3724static void
3725ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3726{
3727	struct ieee80211_tx_ampdu *tap;
3728
3729	ATH_TX_LOCK_ASSERT(sc);
3730
3731	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3732	    "%s: %6D: TID=%d, called\n",
3733	    __func__,
3734	    tid->an->an_node.ni_macaddr,
3735	    ":",
3736	    tid->tid);
3737
3738	tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3739
3740	/*
3741	 * This is an error condition!
3742	 */
3743	if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3744		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3745		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3746		    __func__, tid->an->an_node.ni_macaddr, ":",
3747		    tid->tid, tid->bar_tx, tid->bar_wait);
3748		return;
3749	}
3750
3751	/* Don't do anything if we still have pending frames */
3752	if (tid->hwq_depth > 0) {
3753		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3754		    "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3755		    __func__,
3756		    tid->an->an_node.ni_macaddr,
3757		    ":",
3758		    tid->tid,
3759		    tid->hwq_depth);
3760		return;
3761	}
3762
3763	/* We're now about to TX */
3764	tid->bar_tx = 1;
3765
3766	/*
3767	 * Override the clrdmask configuration for the next frame,
3768	 * just to get the ball rolling.
3769	 */
3770	ath_tx_set_clrdmask(sc, tid->an);
3771
3772	/*
3773	 * Calculate new BAW left edge, now that all frames have either
3774	 * succeeded or failed.
3775	 *
3776	 * XXX verify this is _actually_ the valid value to begin at!
3777	 */
3778	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3779	    "%s: %6D: TID=%d, new BAW left edge=%d\n",
3780	    __func__,
3781	    tid->an->an_node.ni_macaddr,
3782	    ":",
3783	    tid->tid,
3784	    tap->txa_start);
3785
3786	/* Try sending the BAR frame */
3787	/* We can't hold the lock here! */
3788
3789	ATH_TX_UNLOCK(sc);
3790	if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3791		/* Success? Now we wait for notification that it's done */
3792		ATH_TX_LOCK(sc);
3793		return;
3794	}
3795
3796	/* Failure? For now, warn loudly and continue */
3797	ATH_TX_LOCK(sc);
3798	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3799	    "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3800	    __func__, tid->an->an_node.ni_macaddr, ":",
3801	    tid->tid);
3802	ath_tx_tid_bar_unsuspend(sc, tid);
3803}
3804
3805static void
3806ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3807    struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3808{
3809
3810	ATH_TX_LOCK_ASSERT(sc);
3811
3812	/*
3813	 * If the current TID is running AMPDU, update
3814	 * the BAW.
3815	 */
3816	if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3817	    bf->bf_state.bfs_dobaw) {
3818		/*
3819		 * Only remove the frame from the BAW if it's
3820		 * been transmitted at least once; this means
3821		 * the frame was in the BAW to begin with.
3822		 */
3823		if (bf->bf_state.bfs_retries > 0) {
3824			ath_tx_update_baw(sc, an, tid, bf);
3825			bf->bf_state.bfs_dobaw = 0;
3826		}
3827#if 0
3828		/*
3829		 * This has become a non-fatal error now
3830		 */
3831		if (! bf->bf_state.bfs_addedbaw)
3832			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3833			    "%s: wasn't added: seqno %d\n",
3834			    __func__, SEQNO(bf->bf_state.bfs_seqno));
3835#endif
3836	}
3837
3838	/* Strip it out of an aggregate list if it was in one */
3839	bf->bf_next = NULL;
3840
3841	/* Insert on the free queue to be freed by the caller */
3842	TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3843}
3844
3845static void
3846ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3847    const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3848{
3849	struct ieee80211_node *ni = &an->an_node;
3850	struct ath_txq *txq;
3851	struct ieee80211_tx_ampdu *tap;
3852
3853	txq = sc->sc_ac2q[tid->ac];
3854	tap = ath_tx_get_tx_tid(an, tid->tid);
3855
3856	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3857	    "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3858	    "seqno=%d, retry=%d\n",
3859	    __func__,
3860	    pfx,
3861	    ni->ni_macaddr,
3862	    ":",
3863	    bf,
3864	    bf->bf_state.bfs_addedbaw,
3865	    bf->bf_state.bfs_dobaw,
3866	    SEQNO(bf->bf_state.bfs_seqno),
3867	    bf->bf_state.bfs_retries);
3868	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3869	    "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3870	    __func__,
3871	    pfx,
3872	    ni->ni_macaddr,
3873	    ":",
3874	    bf,
3875	    txq->axq_qnum,
3876	    txq->axq_depth,
3877	    txq->axq_aggr_depth);
3878	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3879	    "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3880	      "isfiltered=%d\n",
3881	    __func__,
3882	    pfx,
3883	    ni->ni_macaddr,
3884	    ":",
3885	    bf,
3886	    tid->axq_depth,
3887	    tid->hwq_depth,
3888	    tid->bar_wait,
3889	    tid->isfiltered);
3890	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3891	    "%s: %s: %6D: tid %d: "
3892	    "sched=%d, paused=%d, "
3893	    "incomp=%d, baw_head=%d, "
3894	    "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3895	     __func__,
3896	     pfx,
3897	     ni->ni_macaddr,
3898	     ":",
3899	     tid->tid,
3900	     tid->sched, tid->paused,
3901	     tid->incomp, tid->baw_head,
3902	     tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3903	     ni->ni_txseqs[tid->tid]);
3904
3905	/* XXX Dump the frame, see what it is? */
3906	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3907		ieee80211_dump_pkt(ni->ni_ic,
3908		    mtod(bf->bf_m, const uint8_t *),
3909		    bf->bf_m->m_len, 0, -1);
3910}
3911
3912/*
3913 * Free any packets currently pending in the software TX queue.
3914 *
3915 * This will be called when a node is being deleted.
3916 *
3917 * It can also be called on an active node during an interface
3918 * reset or state transition.
3919 *
3920 * (From Linux/reference):
3921 *
3922 * TODO: For frame(s) that are in the retry state, we will reuse the
3923 * sequence number(s) without setting the retry bit. The
3924 * alternative is to give up on these and BAR the receiver's window
3925 * forward.
3926 */
3927static void
3928ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3929    struct ath_tid *tid, ath_bufhead *bf_cq)
3930{
3931	struct ath_buf *bf;
3932	struct ieee80211_tx_ampdu *tap;
3933	struct ieee80211_node *ni = &an->an_node;
3934	int t;
3935
3936	tap = ath_tx_get_tx_tid(an, tid->tid);
3937
3938	ATH_TX_LOCK_ASSERT(sc);
3939
3940	/* Walk the queue, free frames */
3941	t = 0;
3942	for (;;) {
3943		bf = ATH_TID_FIRST(tid);
3944		if (bf == NULL) {
3945			break;
3946		}
3947
3948		if (t == 0) {
3949			ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3950//			t = 1;
3951		}
3952
3953		ATH_TID_REMOVE(tid, bf, bf_list);
3954		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3955	}
3956
3957	/* And now, drain the filtered frame queue */
3958	t = 0;
3959	for (;;) {
3960		bf = ATH_TID_FILT_FIRST(tid);
3961		if (bf == NULL)
3962			break;
3963
3964		if (t == 0) {
3965			ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3966//			t = 1;
3967		}
3968
3969		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3970		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3971	}
3972
3973	/*
3974	 * Override the clrdmask configuration for the next frame
3975	 * in case there is some future transmission, just to get
3976	 * the ball rolling.
3977	 *
3978	 * This won't hurt things if the TID is about to be freed.
3979	 */
3980	ath_tx_set_clrdmask(sc, tid->an);
3981
3982	/*
3983	 * Now that it's completed, grab the TID lock and update
3984	 * the sequence number and BAW window.
3985	 * Because sequence numbers have been assigned to frames
3986	 * that haven't been sent yet, it's entirely possible
3987	 * we'll be called with some pending frames that have not
3988	 * been transmitted.
3989	 *
3990	 * The cleaner solution is to do the sequence number allocation
3991	 * when the packet is first transmitted - and thus the "retries"
3992	 * check above would be enough to update the BAW/seqno.
3993	 */
3994
3995	/* But don't do it for non-QoS TIDs */
3996	if (tap) {
3997#if 1
3998		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3999		    "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
4000		    __func__,
4001		    ni->ni_macaddr,
4002		    ":",
4003		    an,
4004		    tid->tid,
4005		    tap->txa_start);
4006#endif
4007		ni->ni_txseqs[tid->tid] = tap->txa_start;
4008		tid->baw_tail = tid->baw_head;
4009	}
4010}
4011
4012/*
4013 * Reset the TID state.  This must be only called once the node has
4014 * had its frames flushed from this TID, to ensure that no other
4015 * pause / unpause logic can kick in.
4016 */
4017static void
4018ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
4019{
4020
4021#if 0
4022	tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
4023	tid->paused = tid->sched = tid->addba_tx_pending = 0;
4024	tid->incomp = tid->cleanup_inprogress = 0;
4025#endif
4026
4027	/*
4028	 * If we have a bar_wait set, we need to unpause the TID
4029	 * here.  Otherwise once cleanup has finished, the TID won't
4030	 * have the right paused counter.
4031	 *
4032	 * XXX I'm not going through resume here - I don't want the
4033	 * node to be rescheuled just yet.  This however should be
4034	 * methodized!
4035	 */
4036	if (tid->bar_wait) {
4037		if (tid->paused > 0) {
4038			tid->paused --;
4039		}
4040	}
4041
4042	/*
4043	 * XXX same with a currently filtered TID.
4044	 *
4045	 * Since this is being called during a flush, we assume that
4046	 * the filtered frame list is actually empty.
4047	 *
4048	 * XXX TODO: add in a check to ensure that the filtered queue
4049	 * depth is actually 0!
4050	 */
4051	if (tid->isfiltered) {
4052		if (tid->paused > 0) {
4053			tid->paused --;
4054		}
4055	}
4056
4057	/*
4058	 * Clear BAR, filtered frames, scheduled and ADDBA pending.
4059	 * The TID may be going through cleanup from the last association
4060	 * where things in the BAW are still in the hardware queue.
4061	 */
4062	tid->bar_wait = 0;
4063	tid->bar_tx = 0;
4064	tid->isfiltered = 0;
4065	tid->sched = 0;
4066	tid->addba_tx_pending = 0;
4067
4068	/*
4069	 * XXX TODO: it may just be enough to walk the HWQs and mark
4070	 * frames for that node as non-aggregate; or mark the ath_node
4071	 * with something that indicates that aggregation is no longer
4072	 * occurring.  Then we can just toss the BAW complaints and
4073	 * do a complete hard reset of state here - no pause, no
4074	 * complete counter, etc.
4075	 */
4076
4077}
4078
4079/*
4080 * Flush all software queued packets for the given node.
4081 *
4082 * This occurs when a completion handler frees the last buffer
4083 * for a node, and the node is thus freed. This causes the node
4084 * to be cleaned up, which ends up calling ath_tx_node_flush.
4085 */
4086void
4087ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
4088{
4089	int tid;
4090	ath_bufhead bf_cq;
4091	struct ath_buf *bf;
4092
4093	TAILQ_INIT(&bf_cq);
4094
4095	ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
4096	    &an->an_node);
4097
4098	ATH_TX_LOCK(sc);
4099	DPRINTF(sc, ATH_DEBUG_NODE,
4100	    "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
4101	    "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
4102	    __func__,
4103	    an->an_node.ni_macaddr,
4104	    ":",
4105	    an->an_is_powersave,
4106	    an->an_stack_psq,
4107	    an->an_tim_set,
4108	    an->an_swq_depth,
4109	    an->clrdmask,
4110	    an->an_leak_count);
4111
4112	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
4113		struct ath_tid *atid = &an->an_tid[tid];
4114
4115		/* Free packets */
4116		ath_tx_tid_drain(sc, an, atid, &bf_cq);
4117
4118		/* Remove this tid from the list of active tids */
4119		ath_tx_tid_unsched(sc, atid);
4120
4121		/* Reset the per-TID pause, BAR, etc state */
4122		ath_tx_tid_reset(sc, atid);
4123	}
4124
4125	/*
4126	 * Clear global leak count
4127	 */
4128	an->an_leak_count = 0;
4129	ATH_TX_UNLOCK(sc);
4130
4131	/* Handle completed frames */
4132	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4133		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4134		ath_tx_default_comp(sc, bf, 0);
4135	}
4136}
4137
4138/*
4139 * Drain all the software TXQs currently with traffic queued.
4140 */
4141void
4142ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4143{
4144	struct ath_tid *tid;
4145	ath_bufhead bf_cq;
4146	struct ath_buf *bf;
4147
4148	TAILQ_INIT(&bf_cq);
4149	ATH_TX_LOCK(sc);
4150
4151	/*
4152	 * Iterate over all active tids for the given txq,
4153	 * flushing and unsched'ing them
4154	 */
4155	while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4156		tid = TAILQ_FIRST(&txq->axq_tidq);
4157		ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4158		ath_tx_tid_unsched(sc, tid);
4159	}
4160
4161	ATH_TX_UNLOCK(sc);
4162
4163	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4164		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4165		ath_tx_default_comp(sc, bf, 0);
4166	}
4167}
4168
4169/*
4170 * Handle completion of non-aggregate session frames.
4171 *
4172 * This (currently) doesn't implement software retransmission of
4173 * non-aggregate frames!
4174 *
4175 * Software retransmission of non-aggregate frames needs to obey
4176 * the strict sequence number ordering, and drop any frames that
4177 * will fail this.
4178 *
4179 * For now, filtered frames and frame transmission will cause
4180 * all kinds of issues.  So we don't support them.
4181 *
4182 * So anyone queuing frames via ath_tx_normal_xmit() or
4183 * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4184 */
4185void
4186ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4187{
4188	struct ieee80211_node *ni = bf->bf_node;
4189	struct ath_node *an = ATH_NODE(ni);
4190	int tid = bf->bf_state.bfs_tid;
4191	struct ath_tid *atid = &an->an_tid[tid];
4192	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4193
4194	/* The TID state is protected behind the TXQ lock */
4195	ATH_TX_LOCK(sc);
4196
4197	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4198	    __func__, bf, fail, atid->hwq_depth - 1);
4199
4200	atid->hwq_depth--;
4201
4202#if 0
4203	/*
4204	 * If the frame was filtered, stick it on the filter frame
4205	 * queue and complain about it.  It shouldn't happen!
4206	 */
4207	if ((ts->ts_status & HAL_TXERR_FILT) ||
4208	    (ts->ts_status != 0 && atid->isfiltered)) {
4209		DPRINTF(sc, ATH_DEBUG_SW_TX,
4210		    "%s: isfiltered=%d, ts_status=%d: huh?\n",
4211		    __func__,
4212		    atid->isfiltered,
4213		    ts->ts_status);
4214		ath_tx_tid_filt_comp_buf(sc, atid, bf);
4215	}
4216#endif
4217	if (atid->isfiltered)
4218		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4219	if (atid->hwq_depth < 0)
4220		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4221		    __func__, atid->hwq_depth);
4222
4223	/* If the TID is being cleaned up, track things */
4224	/* XXX refactor! */
4225	if (atid->cleanup_inprogress) {
4226		atid->incomp--;
4227		if (atid->incomp == 0) {
4228			DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4229			    "%s: TID %d: cleaned up! resume!\n",
4230			    __func__, tid);
4231			atid->cleanup_inprogress = 0;
4232			ath_tx_tid_resume(sc, atid);
4233		}
4234	}
4235
4236	/*
4237	 * If the queue is filtered, potentially mark it as complete
4238	 * and reschedule it as needed.
4239	 *
4240	 * This is required as there may be a subsequent TX descriptor
4241	 * for this end-node that has CLRDMASK set, so it's quite possible
4242	 * that a filtered frame will be followed by a non-filtered
4243	 * (complete or otherwise) frame.
4244	 *
4245	 * XXX should we do this before we complete the frame?
4246	 */
4247	if (atid->isfiltered)
4248		ath_tx_tid_filt_comp_complete(sc, atid);
4249	ATH_TX_UNLOCK(sc);
4250
4251	/*
4252	 * punt to rate control if we're not being cleaned up
4253	 * during a hw queue drain and the frame wanted an ACK.
4254	 */
4255	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4256		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4257		    ts,
4258		    bf->bf_state.bfs_pktlen,
4259		    bf->bf_state.bfs_pktlen,
4260		    1, (ts->ts_status == 0) ? 0 : 1);
4261
4262	ath_tx_default_comp(sc, bf, fail);
4263}
4264
4265/*
4266 * Handle cleanup of aggregate session packets that aren't
4267 * an A-MPDU.
4268 *
4269 * There's no need to update the BAW here - the session is being
4270 * torn down.
4271 */
4272static void
4273ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4274{
4275	struct ieee80211_node *ni = bf->bf_node;
4276	struct ath_node *an = ATH_NODE(ni);
4277	int tid = bf->bf_state.bfs_tid;
4278	struct ath_tid *atid = &an->an_tid[tid];
4279
4280	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4281	    __func__, tid, atid->incomp);
4282
4283	ATH_TX_LOCK(sc);
4284	atid->incomp--;
4285
4286	/* XXX refactor! */
4287	if (bf->bf_state.bfs_dobaw) {
4288		ath_tx_update_baw(sc, an, atid, bf);
4289		if (!bf->bf_state.bfs_addedbaw)
4290			DPRINTF(sc, ATH_DEBUG_SW_TX,
4291			    "%s: wasn't added: seqno %d\n",
4292			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4293	}
4294
4295	if (atid->incomp == 0) {
4296		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4297		    "%s: TID %d: cleaned up! resume!\n",
4298		    __func__, tid);
4299		atid->cleanup_inprogress = 0;
4300		ath_tx_tid_resume(sc, atid);
4301	}
4302	ATH_TX_UNLOCK(sc);
4303
4304	ath_tx_default_comp(sc, bf, 0);
4305}
4306
4307/*
4308 * This as it currently stands is a bit dumb.  Ideally we'd just
4309 * fail the frame the normal way and have it permanently fail
4310 * via the normal aggregate completion path.
4311 */
4312static void
4313ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4314    int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4315{
4316	struct ath_tid *atid = &an->an_tid[tid];
4317	struct ath_buf *bf, *bf_next;
4318
4319	ATH_TX_LOCK_ASSERT(sc);
4320
4321	/*
4322	 * Remove this frame from the queue.
4323	 */
4324	ATH_TID_REMOVE(atid, bf_head, bf_list);
4325
4326	/*
4327	 * Loop over all the frames in the aggregate.
4328	 */
4329	bf = bf_head;
4330	while (bf != NULL) {
4331		bf_next = bf->bf_next;	/* next aggregate frame, or NULL */
4332
4333		/*
4334		 * If it's been added to the BAW we need to kick
4335		 * it out of the BAW before we continue.
4336		 *
4337		 * XXX if it's an aggregate, assert that it's in the
4338		 * BAW - we shouldn't have it be in an aggregate
4339		 * otherwise!
4340		 */
4341		if (bf->bf_state.bfs_addedbaw) {
4342			ath_tx_update_baw(sc, an, atid, bf);
4343			bf->bf_state.bfs_dobaw = 0;
4344		}
4345
4346		/*
4347		 * Give it the default completion handler.
4348		 */
4349		bf->bf_comp = ath_tx_normal_comp;
4350		bf->bf_next = NULL;
4351
4352		/*
4353		 * Add it to the list to free.
4354		 */
4355		TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4356
4357		/*
4358		 * Now advance to the next frame in the aggregate.
4359		 */
4360		bf = bf_next;
4361	}
4362}
4363
4364/*
4365 * Performs transmit side cleanup when TID changes from aggregated to
4366 * unaggregated and during reassociation.
4367 *
4368 * For now, this just tosses everything from the TID software queue
4369 * whether or not it has been retried and marks the TID as
4370 * pending completion if there's anything for this TID queued to
4371 * the hardware.
4372 *
4373 * The caller is responsible for pausing the TID and unpausing the
4374 * TID if no cleanup was required. Otherwise the cleanup path will
4375 * unpause the TID once the last hardware queued frame is completed.
4376 */
4377static void
4378ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4379    ath_bufhead *bf_cq)
4380{
4381	struct ath_tid *atid = &an->an_tid[tid];
4382	struct ath_buf *bf, *bf_next;
4383
4384	ATH_TX_LOCK_ASSERT(sc);
4385
4386	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4387	    "%s: TID %d: called; inprogress=%d\n", __func__, tid,
4388	    atid->cleanup_inprogress);
4389
4390	/*
4391	 * Move the filtered frames to the TX queue, before
4392	 * we run off and discard/process things.
4393	 */
4394
4395	/* XXX this is really quite inefficient */
4396	while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4397		ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4398		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4399	}
4400
4401	/*
4402	 * Update the frames in the software TX queue:
4403	 *
4404	 * + Discard retry frames in the queue
4405	 * + Fix the completion function to be non-aggregate
4406	 */
4407	bf = ATH_TID_FIRST(atid);
4408	while (bf) {
4409		/*
4410		 * Grab the next frame in the list, we may
4411		 * be fiddling with the list.
4412		 */
4413		bf_next = TAILQ_NEXT(bf, bf_list);
4414
4415		/*
4416		 * Free the frame and all subframes.
4417		 */
4418		ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4419
4420		/*
4421		 * Next frame!
4422		 */
4423		bf = bf_next;
4424	}
4425
4426	/*
4427	 * If there's anything in the hardware queue we wait
4428	 * for the TID HWQ to empty.
4429	 */
4430	if (atid->hwq_depth > 0) {
4431		/*
4432		 * XXX how about we kill atid->incomp, and instead
4433		 * replace it with a macro that checks that atid->hwq_depth
4434		 * is 0?
4435		 */
4436		atid->incomp = atid->hwq_depth;
4437		atid->cleanup_inprogress = 1;
4438	}
4439
4440	if (atid->cleanup_inprogress)
4441		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4442		    "%s: TID %d: cleanup needed: %d packets\n",
4443		    __func__, tid, atid->incomp);
4444
4445	/* Owner now must free completed frames */
4446}
4447
4448static struct ath_buf *
4449ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4450    struct ath_tid *tid, struct ath_buf *bf)
4451{
4452	struct ath_buf *nbf;
4453	int error;
4454
4455	/*
4456	 * Clone the buffer.  This will handle the dma unmap and
4457	 * copy the node reference to the new buffer.  If this
4458	 * works out, 'bf' will have no DMA mapping, no mbuf
4459	 * pointer and no node reference.
4460	 */
4461	nbf = ath_buf_clone(sc, bf);
4462
4463#if 0
4464	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4465	    __func__);
4466#endif
4467
4468	if (nbf == NULL) {
4469		/* Failed to clone */
4470		DPRINTF(sc, ATH_DEBUG_XMIT,
4471		    "%s: failed to clone a busy buffer\n",
4472		    __func__);
4473		return NULL;
4474	}
4475
4476	/* Setup the dma for the new buffer */
4477	error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4478	if (error != 0) {
4479		DPRINTF(sc, ATH_DEBUG_XMIT,
4480		    "%s: failed to setup dma for clone\n",
4481		    __func__);
4482		/*
4483		 * Put this at the head of the list, not tail;
4484		 * that way it doesn't interfere with the
4485		 * busy buffer logic (which uses the tail of
4486		 * the list.)
4487		 */
4488		ATH_TXBUF_LOCK(sc);
4489		ath_returnbuf_head(sc, nbf);
4490		ATH_TXBUF_UNLOCK(sc);
4491		return NULL;
4492	}
4493
4494	/* Update BAW if required, before we free the original buf */
4495	if (bf->bf_state.bfs_dobaw)
4496		ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4497
4498	/* Free original buffer; return new buffer */
4499	ath_freebuf(sc, bf);
4500
4501	return nbf;
4502}
4503
4504/*
4505 * Handle retrying an unaggregate frame in an aggregate
4506 * session.
4507 *
4508 * If too many retries occur, pause the TID, wait for
4509 * any further retransmits (as there's no reason why
4510 * non-aggregate frames in an aggregate session are
4511 * transmitted in-order; they just have to be in-BAW)
4512 * and then queue a BAR.
4513 */
4514static void
4515ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4516{
4517	struct ieee80211_node *ni = bf->bf_node;
4518	struct ath_node *an = ATH_NODE(ni);
4519	int tid = bf->bf_state.bfs_tid;
4520	struct ath_tid *atid = &an->an_tid[tid];
4521	struct ieee80211_tx_ampdu *tap;
4522
4523	ATH_TX_LOCK(sc);
4524
4525	tap = ath_tx_get_tx_tid(an, tid);
4526
4527	/*
4528	 * If the buffer is marked as busy, we can't directly
4529	 * reuse it. Instead, try to clone the buffer.
4530	 * If the clone is successful, recycle the old buffer.
4531	 * If the clone is unsuccessful, set bfs_retries to max
4532	 * to force the next bit of code to free the buffer
4533	 * for us.
4534	 */
4535	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4536	    (bf->bf_flags & ATH_BUF_BUSY)) {
4537		struct ath_buf *nbf;
4538		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4539		if (nbf)
4540			/* bf has been freed at this point */
4541			bf = nbf;
4542		else
4543			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4544	}
4545
4546	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4547		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4548		    "%s: exceeded retries; seqno %d\n",
4549		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4550		sc->sc_stats.ast_tx_swretrymax++;
4551
4552		/* Update BAW anyway */
4553		if (bf->bf_state.bfs_dobaw) {
4554			ath_tx_update_baw(sc, an, atid, bf);
4555			if (! bf->bf_state.bfs_addedbaw)
4556				DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4557				    "%s: wasn't added: seqno %d\n",
4558				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4559		}
4560		bf->bf_state.bfs_dobaw = 0;
4561
4562		/* Suspend the TX queue and get ready to send the BAR */
4563		ath_tx_tid_bar_suspend(sc, atid);
4564
4565		/* Send the BAR if there are no other frames waiting */
4566		if (ath_tx_tid_bar_tx_ready(sc, atid))
4567			ath_tx_tid_bar_tx(sc, atid);
4568
4569		ATH_TX_UNLOCK(sc);
4570
4571		/* Free buffer, bf is free after this call */
4572		ath_tx_default_comp(sc, bf, 0);
4573		return;
4574	}
4575
4576	/*
4577	 * This increments the retry counter as well as
4578	 * sets the retry flag in the ath_buf and packet
4579	 * body.
4580	 */
4581	ath_tx_set_retry(sc, bf);
4582	sc->sc_stats.ast_tx_swretries++;
4583
4584	/*
4585	 * Insert this at the head of the queue, so it's
4586	 * retried before any current/subsequent frames.
4587	 */
4588	ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4589	ath_tx_tid_sched(sc, atid);
4590	/* Send the BAR if there are no other frames waiting */
4591	if (ath_tx_tid_bar_tx_ready(sc, atid))
4592		ath_tx_tid_bar_tx(sc, atid);
4593
4594	ATH_TX_UNLOCK(sc);
4595}
4596
4597/*
4598 * Common code for aggregate excessive retry/subframe retry.
4599 * If retrying, queues buffers to bf_q. If not, frees the
4600 * buffers.
4601 *
4602 * XXX should unify this with ath_tx_aggr_retry_unaggr()
4603 */
4604static int
4605ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4606    ath_bufhead *bf_q)
4607{
4608	struct ieee80211_node *ni = bf->bf_node;
4609	struct ath_node *an = ATH_NODE(ni);
4610	int tid = bf->bf_state.bfs_tid;
4611	struct ath_tid *atid = &an->an_tid[tid];
4612
4613	ATH_TX_LOCK_ASSERT(sc);
4614
4615	/* XXX clr11naggr should be done for all subframes */
4616	ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4617	ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4618
4619	/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4620
4621	/*
4622	 * If the buffer is marked as busy, we can't directly
4623	 * reuse it. Instead, try to clone the buffer.
4624	 * If the clone is successful, recycle the old buffer.
4625	 * If the clone is unsuccessful, set bfs_retries to max
4626	 * to force the next bit of code to free the buffer
4627	 * for us.
4628	 */
4629	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4630	    (bf->bf_flags & ATH_BUF_BUSY)) {
4631		struct ath_buf *nbf;
4632		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4633		if (nbf)
4634			/* bf has been freed at this point */
4635			bf = nbf;
4636		else
4637			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4638	}
4639
4640	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4641		sc->sc_stats.ast_tx_swretrymax++;
4642		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4643		    "%s: max retries: seqno %d\n",
4644		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4645		ath_tx_update_baw(sc, an, atid, bf);
4646		if (!bf->bf_state.bfs_addedbaw)
4647			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4648			    "%s: wasn't added: seqno %d\n",
4649			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4650		bf->bf_state.bfs_dobaw = 0;
4651		return 1;
4652	}
4653
4654	ath_tx_set_retry(sc, bf);
4655	sc->sc_stats.ast_tx_swretries++;
4656	bf->bf_next = NULL;		/* Just to make sure */
4657
4658	/* Clear the aggregate state */
4659	bf->bf_state.bfs_aggr = 0;
4660	bf->bf_state.bfs_ndelim = 0;	/* ??? needed? */
4661	bf->bf_state.bfs_nframes = 1;
4662
4663	TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4664	return 0;
4665}
4666
4667/*
4668 * error pkt completion for an aggregate destination
4669 */
4670static void
4671ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4672    struct ath_tid *tid)
4673{
4674	struct ieee80211_node *ni = bf_first->bf_node;
4675	struct ath_node *an = ATH_NODE(ni);
4676	struct ath_buf *bf_next, *bf;
4677	ath_bufhead bf_q;
4678	int drops = 0;
4679	struct ieee80211_tx_ampdu *tap;
4680	ath_bufhead bf_cq;
4681
4682	TAILQ_INIT(&bf_q);
4683	TAILQ_INIT(&bf_cq);
4684
4685	/*
4686	 * Update rate control - all frames have failed.
4687	 */
4688	ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4689	    &bf_first->bf_status.ds_txstat,
4690	    bf_first->bf_state.bfs_al,
4691	    bf_first->bf_state.bfs_rc_maxpktlen,
4692	    bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4693
4694	ATH_TX_LOCK(sc);
4695	tap = ath_tx_get_tx_tid(an, tid->tid);
4696	sc->sc_stats.ast_tx_aggr_failall++;
4697
4698	/* Retry all subframes */
4699	bf = bf_first;
4700	while (bf) {
4701		bf_next = bf->bf_next;
4702		bf->bf_next = NULL;	/* Remove it from the aggr list */
4703		sc->sc_stats.ast_tx_aggr_fail++;
4704		if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4705			drops++;
4706			bf->bf_next = NULL;
4707			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4708		}
4709		bf = bf_next;
4710	}
4711
4712	/* Prepend all frames to the beginning of the queue */
4713	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4714		TAILQ_REMOVE(&bf_q, bf, bf_list);
4715		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4716	}
4717
4718	/*
4719	 * Schedule the TID to be re-tried.
4720	 */
4721	ath_tx_tid_sched(sc, tid);
4722
4723	/*
4724	 * send bar if we dropped any frames
4725	 *
4726	 * Keep the txq lock held for now, as we need to ensure
4727	 * that ni_txseqs[] is consistent (as it's being updated
4728	 * in the ifnet TX context or raw TX context.)
4729	 */
4730	if (drops) {
4731		/* Suspend the TX queue and get ready to send the BAR */
4732		ath_tx_tid_bar_suspend(sc, tid);
4733	}
4734
4735	/*
4736	 * Send BAR if required
4737	 */
4738	if (ath_tx_tid_bar_tx_ready(sc, tid))
4739		ath_tx_tid_bar_tx(sc, tid);
4740
4741	ATH_TX_UNLOCK(sc);
4742
4743	/* Complete frames which errored out */
4744	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4745		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4746		ath_tx_default_comp(sc, bf, 0);
4747	}
4748}
4749
4750/*
4751 * Handle clean-up of packets from an aggregate list.
4752 *
4753 * There's no need to update the BAW here - the session is being
4754 * torn down.
4755 */
4756static void
4757ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4758{
4759	struct ath_buf *bf, *bf_next;
4760	struct ieee80211_node *ni = bf_first->bf_node;
4761	struct ath_node *an = ATH_NODE(ni);
4762	int tid = bf_first->bf_state.bfs_tid;
4763	struct ath_tid *atid = &an->an_tid[tid];
4764
4765	ATH_TX_LOCK(sc);
4766
4767	/* update incomp */
4768	atid->incomp--;
4769
4770	/* Update the BAW */
4771	bf = bf_first;
4772	while (bf) {
4773		/* XXX refactor! */
4774		if (bf->bf_state.bfs_dobaw) {
4775			ath_tx_update_baw(sc, an, atid, bf);
4776			if (!bf->bf_state.bfs_addedbaw)
4777				DPRINTF(sc, ATH_DEBUG_SW_TX,
4778				    "%s: wasn't added: seqno %d\n",
4779				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4780		}
4781		bf = bf->bf_next;
4782	}
4783
4784	if (atid->incomp == 0) {
4785		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4786		    "%s: TID %d: cleaned up! resume!\n",
4787		    __func__, tid);
4788		atid->cleanup_inprogress = 0;
4789		ath_tx_tid_resume(sc, atid);
4790	}
4791
4792	/* Send BAR if required */
4793	/* XXX why would we send a BAR when transitioning to non-aggregation? */
4794	/*
4795	 * XXX TODO: we should likely just tear down the BAR state here,
4796	 * rather than sending a BAR.
4797	 */
4798	if (ath_tx_tid_bar_tx_ready(sc, atid))
4799		ath_tx_tid_bar_tx(sc, atid);
4800
4801	ATH_TX_UNLOCK(sc);
4802
4803	/* Handle frame completion as individual frames */
4804	bf = bf_first;
4805	while (bf) {
4806		bf_next = bf->bf_next;
4807		bf->bf_next = NULL;
4808		ath_tx_default_comp(sc, bf, 1);
4809		bf = bf_next;
4810	}
4811}
4812
4813/*
4814 * Handle completion of an set of aggregate frames.
4815 *
4816 * Note: the completion handler is the last descriptor in the aggregate,
4817 * not the last descriptor in the first frame.
4818 */
4819static void
4820ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4821    int fail)
4822{
4823	//struct ath_desc *ds = bf->bf_lastds;
4824	struct ieee80211_node *ni = bf_first->bf_node;
4825	struct ath_node *an = ATH_NODE(ni);
4826	int tid = bf_first->bf_state.bfs_tid;
4827	struct ath_tid *atid = &an->an_tid[tid];
4828	struct ath_tx_status ts;
4829	struct ieee80211_tx_ampdu *tap;
4830	ath_bufhead bf_q;
4831	ath_bufhead bf_cq;
4832	int seq_st, tx_ok;
4833	int hasba, isaggr;
4834	uint32_t ba[2];
4835	struct ath_buf *bf, *bf_next;
4836	int ba_index;
4837	int drops = 0;
4838	int nframes = 0, nbad = 0, nf;
4839	int pktlen;
4840	int agglen, rc_agglen;
4841	/* XXX there's too much on the stack? */
4842	struct ath_rc_series rc[ATH_RC_NUM];
4843	int txseq;
4844
4845	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4846	    __func__, atid->hwq_depth);
4847
4848	/*
4849	 * Take a copy; this may be needed -after- bf_first
4850	 * has been completed and freed.
4851	 */
4852	ts = bf_first->bf_status.ds_txstat;
4853	agglen = bf_first->bf_state.bfs_al;
4854	rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen;
4855
4856	TAILQ_INIT(&bf_q);
4857	TAILQ_INIT(&bf_cq);
4858
4859	/* The TID state is kept behind the TXQ lock */
4860	ATH_TX_LOCK(sc);
4861
4862	atid->hwq_depth--;
4863	if (atid->hwq_depth < 0)
4864		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4865		    __func__, atid->hwq_depth);
4866
4867	/*
4868	 * If the TID is filtered, handle completing the filter
4869	 * transition before potentially kicking it to the cleanup
4870	 * function.
4871	 *
4872	 * XXX this is duplicate work, ew.
4873	 */
4874	if (atid->isfiltered)
4875		ath_tx_tid_filt_comp_complete(sc, atid);
4876
4877	/*
4878	 * Punt cleanup to the relevant function, not our problem now
4879	 */
4880	if (atid->cleanup_inprogress) {
4881		if (atid->isfiltered)
4882			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4883			    "%s: isfiltered=1, normal_comp?\n",
4884			    __func__);
4885		ATH_TX_UNLOCK(sc);
4886		ath_tx_comp_cleanup_aggr(sc, bf_first);
4887		return;
4888	}
4889
4890	/*
4891	 * If the frame is filtered, transition to filtered frame
4892	 * mode and add this to the filtered frame list.
4893	 *
4894	 * XXX TODO: figure out how this interoperates with
4895	 * BAR, pause and cleanup states.
4896	 */
4897	if ((ts.ts_status & HAL_TXERR_FILT) ||
4898	    (ts.ts_status != 0 && atid->isfiltered)) {
4899		if (fail != 0)
4900			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4901			    "%s: isfiltered=1, fail=%d\n", __func__, fail);
4902		ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4903
4904		/* Remove from BAW */
4905		TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4906			if (bf->bf_state.bfs_addedbaw)
4907				drops++;
4908			if (bf->bf_state.bfs_dobaw) {
4909				ath_tx_update_baw(sc, an, atid, bf);
4910				if (!bf->bf_state.bfs_addedbaw)
4911					DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4912					    "%s: wasn't added: seqno %d\n",
4913					    __func__,
4914					    SEQNO(bf->bf_state.bfs_seqno));
4915			}
4916			bf->bf_state.bfs_dobaw = 0;
4917		}
4918		/*
4919		 * If any intermediate frames in the BAW were dropped when
4920		 * handling filtering things, send a BAR.
4921		 */
4922		if (drops)
4923			ath_tx_tid_bar_suspend(sc, atid);
4924
4925		/*
4926		 * Finish up by sending a BAR if required and freeing
4927		 * the frames outside of the TX lock.
4928		 */
4929		goto finish_send_bar;
4930	}
4931
4932	/*
4933	 * XXX for now, use the first frame in the aggregate for
4934	 * XXX rate control completion; it's at least consistent.
4935	 */
4936	pktlen = bf_first->bf_state.bfs_pktlen;
4937
4938	/*
4939	 * Handle errors first!
4940	 *
4941	 * Here, handle _any_ error as a "exceeded retries" error.
4942	 * Later on (when filtered frames are to be specially handled)
4943	 * it'll have to be expanded.
4944	 */
4945#if 0
4946	if (ts.ts_status & HAL_TXERR_XRETRY) {
4947#endif
4948	if (ts.ts_status != 0) {
4949		ATH_TX_UNLOCK(sc);
4950		ath_tx_comp_aggr_error(sc, bf_first, atid);
4951		return;
4952	}
4953
4954	tap = ath_tx_get_tx_tid(an, tid);
4955
4956	/*
4957	 * extract starting sequence and block-ack bitmap
4958	 */
4959	/* XXX endian-ness of seq_st, ba? */
4960	seq_st = ts.ts_seqnum;
4961	hasba = !! (ts.ts_flags & HAL_TX_BA);
4962	tx_ok = (ts.ts_status == 0);
4963	isaggr = bf_first->bf_state.bfs_aggr;
4964	ba[0] = ts.ts_ba_low;
4965	ba[1] = ts.ts_ba_high;
4966
4967	/*
4968	 * Copy the TX completion status and the rate control
4969	 * series from the first descriptor, as it may be freed
4970	 * before the rate control code can get its grubby fingers
4971	 * into things.
4972	 */
4973	memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4974
4975	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4976	    "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4977	    "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4978	    __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4979	    isaggr, seq_st, hasba, ba[0], ba[1]);
4980
4981	/*
4982	 * The reference driver doesn't do this; it simply ignores
4983	 * this check in its entirety.
4984	 *
4985	 * I've seen this occur when using iperf to send traffic
4986	 * out tid 1 - the aggregate frames are all marked as TID 1,
4987	 * but the TXSTATUS has TID=0.  So, let's just ignore this
4988	 * check.
4989	 */
4990#if 0
4991	/* Occasionally, the MAC sends a tx status for the wrong TID. */
4992	if (tid != ts.ts_tid) {
4993		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4994		    __func__, tid, ts.ts_tid);
4995		tx_ok = 0;
4996	}
4997#endif
4998
4999	/* AR5416 BA bug; this requires an interface reset */
5000	if (isaggr && tx_ok && (! hasba)) {
5001		device_printf(sc->sc_dev,
5002		    "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
5003		    "seq_st=%d\n",
5004		    __func__, hasba, tx_ok, isaggr, seq_st);
5005		taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
5006		/* And as we can't really trust the BA here .. */
5007		ba[0] = 0;
5008		ba[1] = 0;
5009		seq_st = 0;
5010#ifdef ATH_DEBUG
5011		ath_printtxbuf(sc, bf_first,
5012		    sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
5013#endif
5014	}
5015
5016	/*
5017	 * Walk the list of frames, figure out which ones were correctly
5018	 * sent and which weren't.
5019	 */
5020	bf = bf_first;
5021	nf = bf_first->bf_state.bfs_nframes;
5022
5023	/* bf_first is going to be invalid once this list is walked */
5024	bf_first = NULL;
5025
5026	/*
5027	 * Walk the list of completed frames and determine
5028	 * which need to be completed and which need to be
5029	 * retransmitted.
5030	 *
5031	 * For completed frames, the completion functions need
5032	 * to be called at the end of this function as the last
5033	 * node reference may free the node.
5034	 *
5035	 * Finally, since the TXQ lock can't be held during the
5036	 * completion callback (to avoid lock recursion),
5037	 * the completion calls have to be done outside of the
5038	 * lock.
5039	 */
5040	while (bf) {
5041		nframes++;
5042		ba_index = ATH_BA_INDEX(seq_st,
5043		    SEQNO(bf->bf_state.bfs_seqno));
5044		bf_next = bf->bf_next;
5045		bf->bf_next = NULL;	/* Remove it from the aggr list */
5046
5047		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5048		    "%s: checking bf=%p seqno=%d; ack=%d\n",
5049		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5050		    ATH_BA_ISSET(ba, ba_index));
5051
5052		if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
5053			sc->sc_stats.ast_tx_aggr_ok++;
5054			ath_tx_update_baw(sc, an, atid, bf);
5055			bf->bf_state.bfs_dobaw = 0;
5056			if (!bf->bf_state.bfs_addedbaw)
5057				DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5058				    "%s: wasn't added: seqno %d\n",
5059				    __func__, SEQNO(bf->bf_state.bfs_seqno));
5060			bf->bf_next = NULL;
5061			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5062		} else {
5063			sc->sc_stats.ast_tx_aggr_fail++;
5064			if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
5065				drops++;
5066				bf->bf_next = NULL;
5067				TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5068			}
5069			nbad++;
5070		}
5071		bf = bf_next;
5072	}
5073
5074	/*
5075	 * Now that the BAW updates have been done, unlock
5076	 *
5077	 * txseq is grabbed before the lock is released so we
5078	 * have a consistent view of what -was- in the BAW.
5079	 * Anything after this point will not yet have been
5080	 * TXed.
5081	 */
5082	txseq = tap->txa_start;
5083	ATH_TX_UNLOCK(sc);
5084
5085	if (nframes != nf)
5086		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5087		    "%s: num frames seen=%d; bf nframes=%d\n",
5088		    __func__, nframes, nf);
5089
5090	/*
5091	 * Now we know how many frames were bad, call the rate
5092	 * control code.
5093	 */
5094	if (fail == 0) {
5095		ath_tx_update_ratectrl(sc, ni, rc, &ts, agglen, rc_agglen,
5096		    nframes, nbad);
5097	}
5098
5099	/*
5100	 * send bar if we dropped any frames
5101	 */
5102	if (drops) {
5103		/* Suspend the TX queue and get ready to send the BAR */
5104		ATH_TX_LOCK(sc);
5105		ath_tx_tid_bar_suspend(sc, atid);
5106		ATH_TX_UNLOCK(sc);
5107	}
5108
5109	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5110	    "%s: txa_start now %d\n", __func__, tap->txa_start);
5111
5112	ATH_TX_LOCK(sc);
5113
5114	/* Prepend all frames to the beginning of the queue */
5115	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
5116		TAILQ_REMOVE(&bf_q, bf, bf_list);
5117		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
5118	}
5119
5120	/*
5121	 * Reschedule to grab some further frames.
5122	 */
5123	ath_tx_tid_sched(sc, atid);
5124
5125	/*
5126	 * If the queue is filtered, re-schedule as required.
5127	 *
5128	 * This is required as there may be a subsequent TX descriptor
5129	 * for this end-node that has CLRDMASK set, so it's quite possible
5130	 * that a filtered frame will be followed by a non-filtered
5131	 * (complete or otherwise) frame.
5132	 *
5133	 * XXX should we do this before we complete the frame?
5134	 */
5135	if (atid->isfiltered)
5136		ath_tx_tid_filt_comp_complete(sc, atid);
5137
5138finish_send_bar:
5139
5140	/*
5141	 * Send BAR if required
5142	 */
5143	if (ath_tx_tid_bar_tx_ready(sc, atid))
5144		ath_tx_tid_bar_tx(sc, atid);
5145
5146	ATH_TX_UNLOCK(sc);
5147
5148	/* Do deferred completion */
5149	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5150		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5151		ath_tx_default_comp(sc, bf, 0);
5152	}
5153}
5154
5155/*
5156 * Handle completion of unaggregated frames in an ADDBA
5157 * session.
5158 *
5159 * Fail is set to 1 if the entry is being freed via a call to
5160 * ath_tx_draintxq().
5161 */
5162static void
5163ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5164{
5165	struct ieee80211_node *ni = bf->bf_node;
5166	struct ath_node *an = ATH_NODE(ni);
5167	int tid = bf->bf_state.bfs_tid;
5168	struct ath_tid *atid = &an->an_tid[tid];
5169	struct ath_tx_status ts;
5170	int drops = 0;
5171
5172	/*
5173	 * Take a copy of this; filtering/cloning the frame may free the
5174	 * bf pointer.
5175	 */
5176	ts = bf->bf_status.ds_txstat;
5177
5178	/*
5179	 * Update rate control status here, before we possibly
5180	 * punt to retry or cleanup.
5181	 *
5182	 * Do it outside of the TXQ lock.
5183	 */
5184	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5185		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5186		    &bf->bf_status.ds_txstat,
5187		    bf->bf_state.bfs_pktlen,
5188		    bf->bf_state.bfs_pktlen,
5189		    1, (ts.ts_status == 0) ? 0 : 1);
5190
5191	/*
5192	 * This is called early so atid->hwq_depth can be tracked.
5193	 * This unfortunately means that it's released and regrabbed
5194	 * during retry and cleanup. That's rather inefficient.
5195	 */
5196	ATH_TX_LOCK(sc);
5197
5198	if (tid == IEEE80211_NONQOS_TID)
5199		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5200
5201	DPRINTF(sc, ATH_DEBUG_SW_TX,
5202	    "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5203	    __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5204	    SEQNO(bf->bf_state.bfs_seqno));
5205
5206	atid->hwq_depth--;
5207	if (atid->hwq_depth < 0)
5208		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5209		    __func__, atid->hwq_depth);
5210
5211	/*
5212	 * If the TID is filtered, handle completing the filter
5213	 * transition before potentially kicking it to the cleanup
5214	 * function.
5215	 */
5216	if (atid->isfiltered)
5217		ath_tx_tid_filt_comp_complete(sc, atid);
5218
5219	/*
5220	 * If a cleanup is in progress, punt to comp_cleanup;
5221	 * rather than handling it here. It's thus their
5222	 * responsibility to clean up, call the completion
5223	 * function in net80211, etc.
5224	 */
5225	if (atid->cleanup_inprogress) {
5226		if (atid->isfiltered)
5227			DPRINTF(sc, ATH_DEBUG_SW_TX,
5228			    "%s: isfiltered=1, normal_comp?\n",
5229			    __func__);
5230		ATH_TX_UNLOCK(sc);
5231		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5232		    __func__);
5233		ath_tx_comp_cleanup_unaggr(sc, bf);
5234		return;
5235	}
5236
5237	/*
5238	 * XXX TODO: how does cleanup, BAR and filtered frame handling
5239	 * overlap?
5240	 *
5241	 * If the frame is filtered OR if it's any failure but
5242	 * the TID is filtered, the frame must be added to the
5243	 * filtered frame list.
5244	 *
5245	 * However - a busy buffer can't be added to the filtered
5246	 * list as it will end up being recycled without having
5247	 * been made available for the hardware.
5248	 */
5249	if ((ts.ts_status & HAL_TXERR_FILT) ||
5250	    (ts.ts_status != 0 && atid->isfiltered)) {
5251		int freeframe;
5252
5253		if (fail != 0)
5254			DPRINTF(sc, ATH_DEBUG_SW_TX,
5255			    "%s: isfiltered=1, fail=%d\n",
5256			    __func__, fail);
5257		freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5258		/*
5259		 * If freeframe=0 then bf is no longer ours; don't
5260		 * touch it.
5261		 */
5262		if (freeframe) {
5263			/* Remove from BAW */
5264			if (bf->bf_state.bfs_addedbaw)
5265				drops++;
5266			if (bf->bf_state.bfs_dobaw) {
5267				ath_tx_update_baw(sc, an, atid, bf);
5268				if (!bf->bf_state.bfs_addedbaw)
5269					DPRINTF(sc, ATH_DEBUG_SW_TX,
5270					    "%s: wasn't added: seqno %d\n",
5271					    __func__, SEQNO(bf->bf_state.bfs_seqno));
5272			}
5273			bf->bf_state.bfs_dobaw = 0;
5274		}
5275
5276		/*
5277		 * If the frame couldn't be filtered, treat it as a drop and
5278		 * prepare to send a BAR.
5279		 */
5280		if (freeframe && drops)
5281			ath_tx_tid_bar_suspend(sc, atid);
5282
5283		/*
5284		 * Send BAR if required
5285		 */
5286		if (ath_tx_tid_bar_tx_ready(sc, atid))
5287			ath_tx_tid_bar_tx(sc, atid);
5288
5289		ATH_TX_UNLOCK(sc);
5290		/*
5291		 * If freeframe is set, then the frame couldn't be
5292		 * cloned and bf is still valid.  Just complete/free it.
5293		 */
5294		if (freeframe)
5295			ath_tx_default_comp(sc, bf, fail);
5296
5297		return;
5298	}
5299	/*
5300	 * Don't bother with the retry check if all frames
5301	 * are being failed (eg during queue deletion.)
5302	 */
5303#if 0
5304	if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5305#endif
5306	if (fail == 0 && ts.ts_status != 0) {
5307		ATH_TX_UNLOCK(sc);
5308		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5309		    __func__);
5310		ath_tx_aggr_retry_unaggr(sc, bf);
5311		return;
5312	}
5313
5314	/* Success? Complete */
5315	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5316	    __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5317	if (bf->bf_state.bfs_dobaw) {
5318		ath_tx_update_baw(sc, an, atid, bf);
5319		bf->bf_state.bfs_dobaw = 0;
5320		if (!bf->bf_state.bfs_addedbaw)
5321			DPRINTF(sc, ATH_DEBUG_SW_TX,
5322			    "%s: wasn't added: seqno %d\n",
5323			    __func__, SEQNO(bf->bf_state.bfs_seqno));
5324	}
5325
5326	/*
5327	 * If the queue is filtered, re-schedule as required.
5328	 *
5329	 * This is required as there may be a subsequent TX descriptor
5330	 * for this end-node that has CLRDMASK set, so it's quite possible
5331	 * that a filtered frame will be followed by a non-filtered
5332	 * (complete or otherwise) frame.
5333	 *
5334	 * XXX should we do this before we complete the frame?
5335	 */
5336	if (atid->isfiltered)
5337		ath_tx_tid_filt_comp_complete(sc, atid);
5338
5339	/*
5340	 * Send BAR if required
5341	 */
5342	if (ath_tx_tid_bar_tx_ready(sc, atid))
5343		ath_tx_tid_bar_tx(sc, atid);
5344
5345	ATH_TX_UNLOCK(sc);
5346
5347	ath_tx_default_comp(sc, bf, fail);
5348	/* bf is freed at this point */
5349}
5350
5351void
5352ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5353{
5354	if (bf->bf_state.bfs_aggr)
5355		ath_tx_aggr_comp_aggr(sc, bf, fail);
5356	else
5357		ath_tx_aggr_comp_unaggr(sc, bf, fail);
5358}
5359
5360/*
5361 * Grab the software queue depth that we COULD transmit.
5362 *
5363 * This includes checks if it's in the BAW, whether it's a frame
5364 * that is supposed to be in the BAW.  Other checks could be done;
5365 * but for now let's try and avoid doing the whole of ath_tx_form_aggr()
5366 * here.
5367 */
5368static int
5369ath_tx_tid_swq_depth_bytes(struct ath_softc *sc, struct ath_node *an,
5370    struct ath_tid *tid)
5371{
5372	struct ath_buf *bf;
5373	struct ieee80211_tx_ampdu *tap;
5374	int nbytes = 0;
5375
5376	ATH_TX_LOCK_ASSERT(sc);
5377
5378	tap = ath_tx_get_tx_tid(an, tid->tid);
5379
5380	/*
5381	 * Iterate over each buffer and sum the pkt_len.
5382	 * Bail if we exceed ATH_AGGR_MAXSIZE bytes; we won't
5383	 * ever queue more than that in a single frame.
5384	 */
5385	TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {
5386		/*
5387		 * TODO: I'm not sure if we're going to hit cases where
5388		 * no frames get sent because the list is empty.
5389		 */
5390
5391		/* Check if it's in the BAW */
5392		if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
5393		    SEQNO(bf->bf_state.bfs_seqno)))) {
5394			break;
5395		}
5396
5397		/* Check if it's even supposed to be in the BAW */
5398		if (! bf->bf_state.bfs_dobaw) {
5399			break;
5400		}
5401
5402		nbytes += bf->bf_state.bfs_pktlen;
5403		if (nbytes >= ATH_AGGR_MAXSIZE)
5404			break;
5405
5406		/*
5407		 * Check if we're likely going to leak a frame
5408		 * as part of a PSPOLL.  Break out at this point;
5409		 * we're only going to send a single frame anyway.
5410		 */
5411		if (an->an_leak_count) {
5412			break;
5413		}
5414	}
5415
5416	return MIN(nbytes, ATH_AGGR_MAXSIZE);
5417}
5418
5419/*
5420 * Schedule some packets from the given node/TID to the hardware.
5421 *
5422 * This is the aggregate version.
5423 */
5424void
5425ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5426    struct ath_tid *tid)
5427{
5428	struct ath_buf *bf;
5429	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5430	struct ieee80211_tx_ampdu *tap;
5431	ATH_AGGR_STATUS status;
5432	ath_bufhead bf_q;
5433	int swq_pktbytes;
5434
5435	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5436	ATH_TX_LOCK_ASSERT(sc);
5437
5438	/*
5439	 * XXX TODO: If we're called for a queue that we're leaking frames to,
5440	 * ensure we only leak one.
5441	 */
5442
5443	tap = ath_tx_get_tx_tid(an, tid->tid);
5444
5445	if (tid->tid == IEEE80211_NONQOS_TID)
5446		DPRINTF(sc, ATH_DEBUG_SW_TX,
5447		    "%s: called for TID=NONQOS_TID?\n", __func__);
5448
5449	for (;;) {
5450		status = ATH_AGGR_DONE;
5451
5452		/*
5453		 * If the upper layer has paused the TID, don't
5454		 * queue any further packets.
5455		 *
5456		 * This can also occur from the completion task because
5457		 * of packet loss; but as its serialised with this code,
5458		 * it won't "appear" half way through queuing packets.
5459		 */
5460		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5461			break;
5462
5463		bf = ATH_TID_FIRST(tid);
5464		if (bf == NULL) {
5465			break;
5466		}
5467
5468		/*
5469		 * If the packet doesn't fall within the BAW (eg a NULL
5470		 * data frame), schedule it directly; continue.
5471		 */
5472		if (! bf->bf_state.bfs_dobaw) {
5473			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5474			    "%s: non-baw packet\n",
5475			    __func__);
5476			ATH_TID_REMOVE(tid, bf, bf_list);
5477
5478			if (bf->bf_state.bfs_nframes > 1)
5479				DPRINTF(sc, ATH_DEBUG_SW_TX,
5480				    "%s: aggr=%d, nframes=%d\n",
5481				    __func__,
5482				    bf->bf_state.bfs_aggr,
5483				    bf->bf_state.bfs_nframes);
5484
5485			/*
5486			 * This shouldn't happen - such frames shouldn't
5487			 * ever have been queued as an aggregate in the
5488			 * first place.  However, make sure the fields
5489			 * are correctly setup just to be totally sure.
5490			 */
5491			bf->bf_state.bfs_aggr = 0;
5492			bf->bf_state.bfs_nframes = 1;
5493
5494			/* Update CLRDMASK just before this frame is queued */
5495			ath_tx_update_clrdmask(sc, tid, bf);
5496
5497			ath_tx_do_ratelookup(sc, bf, tid->tid,
5498			    bf->bf_state.bfs_pktlen, false);
5499			ath_tx_calc_duration(sc, bf);
5500			ath_tx_calc_protection(sc, bf);
5501			ath_tx_set_rtscts(sc, bf);
5502			ath_tx_rate_fill_rcflags(sc, bf);
5503			ath_tx_setds(sc, bf);
5504			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5505
5506			sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5507
5508			/* Queue the packet; continue */
5509			goto queuepkt;
5510		}
5511
5512		TAILQ_INIT(&bf_q);
5513
5514		/*
5515		 * Loop over the swq to find out how long
5516		 * each packet is (up until 64k) and provide that
5517		 * to the rate control lookup.
5518		 */
5519		swq_pktbytes = ath_tx_tid_swq_depth_bytes(sc, an, tid);
5520		ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5521
5522		/*
5523		 * Note this only is used for the fragment paths and
5524		 * should really be rethought out if we want to do
5525		 * things like an RTS burst across >1 aggregate.
5526		 */
5527		ath_tx_calc_duration(sc, bf);
5528		ath_tx_calc_protection(sc, bf);
5529
5530		ath_tx_set_rtscts(sc, bf);
5531		ath_tx_rate_fill_rcflags(sc, bf);
5532
5533		status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5534
5535		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5536		    "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5537
5538		/*
5539		 * No frames to be picked up - out of BAW
5540		 */
5541		if (TAILQ_EMPTY(&bf_q))
5542			break;
5543
5544		/*
5545		 * This assumes that the descriptor list in the ath_bufhead
5546		 * are already linked together via bf_next pointers.
5547		 */
5548		bf = TAILQ_FIRST(&bf_q);
5549
5550		if (status == ATH_AGGR_8K_LIMITED)
5551			sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5552
5553		/*
5554		 * If it's the only frame send as non-aggregate
5555		 * assume that ath_tx_form_aggr() has checked
5556		 * whether it's in the BAW and added it appropriately.
5557		 */
5558		if (bf->bf_state.bfs_nframes == 1) {
5559			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5560			    "%s: single-frame aggregate\n", __func__);
5561
5562			/* Update CLRDMASK just before this frame is queued */
5563			ath_tx_update_clrdmask(sc, tid, bf);
5564
5565			bf->bf_state.bfs_aggr = 0;
5566			bf->bf_state.bfs_ndelim = 0;
5567			ath_tx_setds(sc, bf);
5568			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5569			if (status == ATH_AGGR_BAW_CLOSED)
5570				sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5571			else
5572				sc->sc_aggr_stats.aggr_single_pkt++;
5573		} else {
5574			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5575			    "%s: multi-frame aggregate: %d frames, "
5576			    "length %d\n",
5577			     __func__, bf->bf_state.bfs_nframes,
5578			    bf->bf_state.bfs_al);
5579			bf->bf_state.bfs_aggr = 1;
5580			sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5581			sc->sc_aggr_stats.aggr_aggr_pkt++;
5582
5583			/* Update CLRDMASK just before this frame is queued */
5584			ath_tx_update_clrdmask(sc, tid, bf);
5585
5586			/*
5587			 * Calculate the duration/protection as required.
5588			 */
5589			ath_tx_calc_duration(sc, bf);
5590			ath_tx_calc_protection(sc, bf);
5591
5592			/*
5593			 * Update the rate and rtscts information based on the
5594			 * rate decision made by the rate control code;
5595			 * the first frame in the aggregate needs it.
5596			 */
5597			ath_tx_set_rtscts(sc, bf);
5598
5599			/*
5600			 * Setup the relevant descriptor fields
5601			 * for aggregation. The first descriptor
5602			 * already points to the rest in the chain.
5603			 */
5604			ath_tx_setds_11n(sc, bf);
5605		}
5606	queuepkt:
5607		/* Set completion handler, multi-frame aggregate or not */
5608		bf->bf_comp = ath_tx_aggr_comp;
5609
5610		if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5611			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5612
5613		/*
5614		 * Update leak count and frame config if were leaking frames.
5615		 *
5616		 * XXX TODO: it should update all frames in an aggregate
5617		 * correctly!
5618		 */
5619		ath_tx_leak_count_update(sc, tid, bf);
5620
5621		/* Punt to txq */
5622		ath_tx_handoff(sc, txq, bf);
5623
5624		/* Track outstanding buffer count to hardware */
5625		/* aggregates are "one" buffer */
5626		tid->hwq_depth++;
5627
5628		/*
5629		 * Break out if ath_tx_form_aggr() indicated
5630		 * there can't be any further progress (eg BAW is full.)
5631		 * Checking for an empty txq is done above.
5632		 *
5633		 * XXX locking on txq here?
5634		 */
5635		/* XXX TXQ locking */
5636		if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5637		    (status == ATH_AGGR_BAW_CLOSED ||
5638		     status == ATH_AGGR_LEAK_CLOSED))
5639			break;
5640	}
5641}
5642
5643/*
5644 * Schedule some packets from the given node/TID to the hardware.
5645 *
5646 * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5647 * It just dumps frames into the TXQ.  We should limit how deep
5648 * the transmit queue can grow for frames dispatched to the given
5649 * TXQ.
5650 *
5651 * To avoid locking issues, either we need to own the TXQ lock
5652 * at this point, or we need to pass in the maximum frame count
5653 * from the caller.
5654 */
5655void
5656ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5657    struct ath_tid *tid)
5658{
5659	struct ath_buf *bf;
5660	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5661
5662	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5663	    __func__, an, tid->tid);
5664
5665	ATH_TX_LOCK_ASSERT(sc);
5666
5667	/* Check - is AMPDU pending or running? then print out something */
5668	if (ath_tx_ampdu_pending(sc, an, tid->tid))
5669		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5670		    __func__, tid->tid);
5671	if (ath_tx_ampdu_running(sc, an, tid->tid))
5672		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5673		    __func__, tid->tid);
5674
5675	for (;;) {
5676		/*
5677		 * If the upper layers have paused the TID, don't
5678		 * queue any further packets.
5679		 *
5680		 * XXX if we are leaking frames, make sure we decrement
5681		 * that counter _and_ we continue here.
5682		 */
5683		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5684			break;
5685
5686		bf = ATH_TID_FIRST(tid);
5687		if (bf == NULL) {
5688			break;
5689		}
5690
5691		ATH_TID_REMOVE(tid, bf, bf_list);
5692
5693		/* Sanity check! */
5694		if (tid->tid != bf->bf_state.bfs_tid) {
5695			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5696			    " tid %d\n", __func__, bf->bf_state.bfs_tid,
5697			    tid->tid);
5698		}
5699		/* Normal completion handler */
5700		bf->bf_comp = ath_tx_normal_comp;
5701
5702		/*
5703		 * Override this for now, until the non-aggregate
5704		 * completion handler correctly handles software retransmits.
5705		 */
5706		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5707
5708		/* Update CLRDMASK just before this frame is queued */
5709		ath_tx_update_clrdmask(sc, tid, bf);
5710
5711		/* Program descriptors + rate control */
5712		ath_tx_do_ratelookup(sc, bf, tid->tid,
5713		    bf->bf_state.bfs_pktlen, false);
5714		ath_tx_calc_duration(sc, bf);
5715		ath_tx_calc_protection(sc, bf);
5716		ath_tx_set_rtscts(sc, bf);
5717		ath_tx_rate_fill_rcflags(sc, bf);
5718		ath_tx_setds(sc, bf);
5719
5720		/*
5721		 * Update the current leak count if
5722		 * we're leaking frames; and set the
5723		 * MORE flag as appropriate.
5724		 */
5725		ath_tx_leak_count_update(sc, tid, bf);
5726
5727		/* Track outstanding buffer count to hardware */
5728		/* aggregates are "one" buffer */
5729		tid->hwq_depth++;
5730
5731		/* Punt to hardware or software txq */
5732		ath_tx_handoff(sc, txq, bf);
5733	}
5734}
5735
5736/*
5737 * Schedule some packets to the given hardware queue.
5738 *
5739 * This function walks the list of TIDs (ie, ath_node TIDs
5740 * with queued traffic) and attempts to schedule traffic
5741 * from them.
5742 *
5743 * TID scheduling is implemented as a FIFO, with TIDs being
5744 * added to the end of the queue after some frames have been
5745 * scheduled.
5746 */
5747void
5748ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5749{
5750	struct ath_tid *tid, *next, *last;
5751
5752	ATH_TX_LOCK_ASSERT(sc);
5753
5754	/*
5755	 * For non-EDMA chips, aggr frames that have been built are
5756	 * in axq_aggr_depth, whether they've been scheduled or not.
5757	 * There's no FIFO, so txq->axq_depth is what's been scheduled
5758	 * to the hardware.
5759	 *
5760	 * For EDMA chips, we do it in two stages.  The existing code
5761	 * builds a list of frames to go to the hardware and the EDMA
5762	 * code turns it into a single entry to push into the FIFO.
5763	 * That way we don't take up one packet per FIFO slot.
5764	 * We do push one aggregate per FIFO slot though, just to keep
5765	 * things simple.
5766	 *
5767	 * The FIFO depth is what's in the hardware; the txq->axq_depth
5768	 * is what's been scheduled to the FIFO.
5769	 *
5770	 * fifo.axq_depth is the number of frames (or aggregates) pushed
5771	 *  into the EDMA FIFO.  For multi-frame lists, this is the number
5772	 *  of frames pushed in.
5773	 * axq_fifo_depth is the number of FIFO slots currently busy.
5774	 */
5775
5776	/* For EDMA and non-EDMA, check built/scheduled against aggr limit */
5777	if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5778		sc->sc_aggr_stats.aggr_sched_nopkt++;
5779		return;
5780	}
5781
5782	/*
5783	 * For non-EDMA chips, axq_depth is the "what's scheduled to
5784	 * the hardware list".  For EDMA it's "What's built for the hardware"
5785	 * and fifo.axq_depth is how many frames have been dispatched
5786	 * already to the hardware.
5787	 */
5788	if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5789		sc->sc_aggr_stats.aggr_sched_nopkt++;
5790		return;
5791	}
5792
5793	last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5794
5795	TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5796		/*
5797		 * Suspend paused queues here; they'll be resumed
5798		 * once the addba completes or times out.
5799		 */
5800		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5801		    __func__, tid->tid, tid->paused);
5802		ath_tx_tid_unsched(sc, tid);
5803		/*
5804		 * This node may be in power-save and we're leaking
5805		 * a frame; be careful.
5806		 */
5807		if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5808			goto loop_done;
5809		}
5810		if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5811			ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5812		else
5813			ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5814
5815		/* Not empty? Re-schedule */
5816		if (tid->axq_depth != 0)
5817			ath_tx_tid_sched(sc, tid);
5818
5819		/*
5820		 * Give the software queue time to aggregate more
5821		 * packets.  If we aren't running aggregation then
5822		 * we should still limit the hardware queue depth.
5823		 */
5824		/* XXX TXQ locking */
5825		if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5826			break;
5827		}
5828		if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5829			break;
5830		}
5831loop_done:
5832		/*
5833		 * If this was the last entry on the original list, stop.
5834		 * Otherwise nodes that have been rescheduled onto the end
5835		 * of the TID FIFO list will just keep being rescheduled.
5836		 *
5837		 * XXX What should we do about nodes that were paused
5838		 * but are pending a leaking frame in response to a ps-poll?
5839		 * They'll be put at the front of the list; so they'll
5840		 * prematurely trigger this condition! Ew.
5841		 */
5842		if (tid == last)
5843			break;
5844	}
5845}
5846
5847/*
5848 * TX addba handling
5849 */
5850
5851/*
5852 * Return net80211 TID struct pointer, or NULL for none
5853 */
5854struct ieee80211_tx_ampdu *
5855ath_tx_get_tx_tid(struct ath_node *an, int tid)
5856{
5857	struct ieee80211_node *ni = &an->an_node;
5858	struct ieee80211_tx_ampdu *tap;
5859
5860	if (tid == IEEE80211_NONQOS_TID)
5861		return NULL;
5862
5863	tap = &ni->ni_tx_ampdu[tid];
5864	return tap;
5865}
5866
5867/*
5868 * Is AMPDU-TX running?
5869 */
5870static int
5871ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5872{
5873	struct ieee80211_tx_ampdu *tap;
5874
5875	if (tid == IEEE80211_NONQOS_TID)
5876		return 0;
5877
5878	tap = ath_tx_get_tx_tid(an, tid);
5879	if (tap == NULL)
5880		return 0;	/* Not valid; default to not running */
5881
5882	return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5883}
5884
5885/*
5886 * Is AMPDU-TX negotiation pending?
5887 */
5888static int
5889ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5890{
5891	struct ieee80211_tx_ampdu *tap;
5892
5893	if (tid == IEEE80211_NONQOS_TID)
5894		return 0;
5895
5896	tap = ath_tx_get_tx_tid(an, tid);
5897	if (tap == NULL)
5898		return 0;	/* Not valid; default to not pending */
5899
5900	return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5901}
5902
5903/*
5904 * Is AMPDU-TX pending for the given TID?
5905 */
5906
5907/*
5908 * Method to handle sending an ADDBA request.
5909 *
5910 * We tap this so the relevant flags can be set to pause the TID
5911 * whilst waiting for the response.
5912 *
5913 * XXX there's no timeout handler we can override?
5914 */
5915int
5916ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5917    int dialogtoken, int baparamset, int batimeout)
5918{
5919	struct ath_softc *sc = ni->ni_ic->ic_softc;
5920	int tid = tap->txa_tid;
5921	struct ath_node *an = ATH_NODE(ni);
5922	struct ath_tid *atid = &an->an_tid[tid];
5923
5924	/*
5925	 * XXX danger Will Robinson!
5926	 *
5927	 * Although the taskqueue may be running and scheduling some more
5928	 * packets, these should all be _before_ the addba sequence number.
5929	 * However, net80211 will keep self-assigning sequence numbers
5930	 * until addba has been negotiated.
5931	 *
5932	 * In the past, these packets would be "paused" (which still works
5933	 * fine, as they're being scheduled to the driver in the same
5934	 * serialised method which is calling the addba request routine)
5935	 * and when the aggregation session begins, they'll be dequeued
5936	 * as aggregate packets and added to the BAW. However, now there's
5937	 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5938	 * packets. Thus they never get included in the BAW tracking and
5939	 * this can cause the initial burst of packets after the addba
5940	 * negotiation to "hang", as they quickly fall outside the BAW.
5941	 *
5942	 * The "eventual" solution should be to tag these packets with
5943	 * dobaw. Although net80211 has given us a sequence number,
5944	 * it'll be "after" the left edge of the BAW and thus it'll
5945	 * fall within it.
5946	 */
5947	ATH_TX_LOCK(sc);
5948	/*
5949	 * This is a bit annoying.  Until net80211 HT code inherits some
5950	 * (any) locking, we may have this called in parallel BUT only
5951	 * one response/timeout will be called.  Grr.
5952	 */
5953	if (atid->addba_tx_pending == 0) {
5954		ath_tx_tid_pause(sc, atid);
5955		atid->addba_tx_pending = 1;
5956	}
5957	ATH_TX_UNLOCK(sc);
5958
5959	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5960	    "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5961	    __func__,
5962	    ni->ni_macaddr,
5963	    ":",
5964	    dialogtoken, baparamset, batimeout);
5965	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5966	    "%s: txa_start=%d, ni_txseqs=%d\n",
5967	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5968
5969	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5970	    batimeout);
5971}
5972
5973/*
5974 * Handle an ADDBA response.
5975 *
5976 * We unpause the queue so TX'ing can resume.
5977 *
5978 * Any packets TX'ed from this point should be "aggregate" (whether
5979 * aggregate or not) so the BAW is updated.
5980 *
5981 * Note! net80211 keeps self-assigning sequence numbers until
5982 * ampdu is negotiated. This means the initially-negotiated BAW left
5983 * edge won't match the ni->ni_txseq.
5984 *
5985 * So, being very dirty, the BAW left edge is "slid" here to match
5986 * ni->ni_txseq.
5987 *
5988 * What likely SHOULD happen is that all packets subsequent to the
5989 * addba request should be tagged as aggregate and queued as non-aggregate
5990 * frames; thus updating the BAW. For now though, I'll just slide the
5991 * window.
5992 */
5993int
5994ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5995    int status, int code, int batimeout)
5996{
5997	struct ath_softc *sc = ni->ni_ic->ic_softc;
5998	int tid = tap->txa_tid;
5999	struct ath_node *an = ATH_NODE(ni);
6000	struct ath_tid *atid = &an->an_tid[tid];
6001	int r;
6002
6003	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6004	    "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
6005	    ni->ni_macaddr,
6006	    ":",
6007	    status, code, batimeout);
6008
6009	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6010	    "%s: txa_start=%d, ni_txseqs=%d\n",
6011	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
6012
6013	/*
6014	 * Call this first, so the interface flags get updated
6015	 * before the TID is unpaused. Otherwise a race condition
6016	 * exists where the unpaused TID still doesn't yet have
6017	 * IEEE80211_AGGR_RUNNING set.
6018	 */
6019	r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6020
6021	ATH_TX_LOCK(sc);
6022	atid->addba_tx_pending = 0;
6023	/*
6024	 * XXX dirty!
6025	 * Slide the BAW left edge to wherever net80211 left it for us.
6026	 * Read above for more information.
6027	 */
6028	tap->txa_start = ni->ni_txseqs[tid];
6029	ath_tx_tid_resume(sc, atid);
6030	ATH_TX_UNLOCK(sc);
6031	return r;
6032}
6033
6034/*
6035 * Stop ADDBA on a queue.
6036 *
6037 * This can be called whilst BAR TX is currently active on the queue,
6038 * so make sure this is unblocked before continuing.
6039 */
6040void
6041ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
6042{
6043	struct ath_softc *sc = ni->ni_ic->ic_softc;
6044	int tid = tap->txa_tid;
6045	struct ath_node *an = ATH_NODE(ni);
6046	struct ath_tid *atid = &an->an_tid[tid];
6047	ath_bufhead bf_cq;
6048	struct ath_buf *bf;
6049
6050	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
6051	    __func__,
6052	    ni->ni_macaddr,
6053	    ":");
6054
6055	/*
6056	 * Pause TID traffic early, so there aren't any races
6057	 * Unblock the pending BAR held traffic, if it's currently paused.
6058	 */
6059	ATH_TX_LOCK(sc);
6060	ath_tx_tid_pause(sc, atid);
6061	if (atid->bar_wait) {
6062		/*
6063		 * bar_unsuspend() expects bar_tx == 1, as it should be
6064		 * called from the TX completion path.  This quietens
6065		 * the warning.  It's cleared for us anyway.
6066		 */
6067		atid->bar_tx = 1;
6068		ath_tx_tid_bar_unsuspend(sc, atid);
6069	}
6070	ATH_TX_UNLOCK(sc);
6071
6072	/* There's no need to hold the TXQ lock here */
6073	sc->sc_addba_stop(ni, tap);
6074
6075	/*
6076	 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
6077	 * it'll set the cleanup flag, and it'll be unpaused once
6078	 * things have been cleaned up.
6079	 */
6080	TAILQ_INIT(&bf_cq);
6081	ATH_TX_LOCK(sc);
6082
6083	/*
6084	 * In case there's a followup call to this, only call it
6085	 * if we don't have a cleanup in progress.
6086	 *
6087	 * Since we've paused the queue above, we need to make
6088	 * sure we unpause if there's already a cleanup in
6089	 * progress - it means something else is also doing
6090	 * this stuff, so we don't need to also keep it paused.
6091	 */
6092	if (atid->cleanup_inprogress) {
6093		ath_tx_tid_resume(sc, atid);
6094	} else {
6095		ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
6096		/*
6097		 * Unpause the TID if no cleanup is required.
6098		 */
6099		if (! atid->cleanup_inprogress)
6100			ath_tx_tid_resume(sc, atid);
6101	}
6102	ATH_TX_UNLOCK(sc);
6103
6104	/* Handle completing frames and fail them */
6105	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6106		TAILQ_REMOVE(&bf_cq, bf, bf_list);
6107		ath_tx_default_comp(sc, bf, 1);
6108	}
6109
6110}
6111
6112/*
6113 * Handle a node reassociation.
6114 *
6115 * We may have a bunch of frames queued to the hardware; those need
6116 * to be marked as cleanup.
6117 */
6118void
6119ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
6120{
6121	struct ath_tid *tid;
6122	int i;
6123	ath_bufhead bf_cq;
6124	struct ath_buf *bf;
6125
6126	TAILQ_INIT(&bf_cq);
6127
6128	ATH_TX_UNLOCK_ASSERT(sc);
6129
6130	ATH_TX_LOCK(sc);
6131	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
6132		tid = &an->an_tid[i];
6133		if (tid->hwq_depth == 0)
6134			continue;
6135		DPRINTF(sc, ATH_DEBUG_NODE,
6136		    "%s: %6D: TID %d: cleaning up TID\n",
6137		    __func__,
6138		    an->an_node.ni_macaddr,
6139		    ":",
6140		    i);
6141		/*
6142		 * In case there's a followup call to this, only call it
6143		 * if we don't have a cleanup in progress.
6144		 */
6145		if (! tid->cleanup_inprogress) {
6146			ath_tx_tid_pause(sc, tid);
6147			ath_tx_tid_cleanup(sc, an, i, &bf_cq);
6148			/*
6149			 * Unpause the TID if no cleanup is required.
6150			 */
6151			if (! tid->cleanup_inprogress)
6152				ath_tx_tid_resume(sc, tid);
6153		}
6154	}
6155	ATH_TX_UNLOCK(sc);
6156
6157	/* Handle completing frames and fail them */
6158	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6159		TAILQ_REMOVE(&bf_cq, bf, bf_list);
6160		ath_tx_default_comp(sc, bf, 1);
6161	}
6162}
6163
6164/*
6165 * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
6166 * it simply tears down the aggregation session. Ew.
6167 *
6168 * It however will call ieee80211_ampdu_stop() which will call
6169 * ic->ic_addba_stop().
6170 *
6171 * XXX This uses a hard-coded max BAR count value; the whole
6172 * XXX BAR TX success or failure should be better handled!
6173 */
6174void
6175ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
6176    int status)
6177{
6178	struct ath_softc *sc = ni->ni_ic->ic_softc;
6179	int tid = tap->txa_tid;
6180	struct ath_node *an = ATH_NODE(ni);
6181	struct ath_tid *atid = &an->an_tid[tid];
6182	int attempts = tap->txa_attempts;
6183	int old_txa_start;
6184
6185	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6186	    "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
6187	    __func__,
6188	    ni->ni_macaddr,
6189	    ":",
6190	    tap->txa_tid,
6191	    atid->tid,
6192	    status,
6193	    attempts,
6194	    tap->txa_start,
6195	    tap->txa_seqpending);
6196
6197	/* Note: This may update the BAW details */
6198	/*
6199	 * XXX What if this does slide the BAW along? We need to somehow
6200	 * XXX either fix things when it does happen, or prevent the
6201	 * XXX seqpending value to be anything other than exactly what
6202	 * XXX the hell we want!
6203	 *
6204	 * XXX So for now, how I do this inside the TX lock for now
6205	 * XXX and just correct it afterwards? The below condition should
6206	 * XXX never happen and if it does I need to fix all kinds of things.
6207	 */
6208	ATH_TX_LOCK(sc);
6209	old_txa_start = tap->txa_start;
6210	sc->sc_bar_response(ni, tap, status);
6211	if (tap->txa_start != old_txa_start) {
6212		device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6213		    __func__,
6214		    tid,
6215		    tap->txa_start,
6216		    old_txa_start);
6217	}
6218	tap->txa_start = old_txa_start;
6219	ATH_TX_UNLOCK(sc);
6220
6221	/* Unpause the TID */
6222	/*
6223	 * XXX if this is attempt=50, the TID will be downgraded
6224	 * XXX to a non-aggregate session. So we must unpause the
6225	 * XXX TID here or it'll never be done.
6226	 *
6227	 * Also, don't call it if bar_tx/bar_wait are 0; something
6228	 * has beaten us to the punch? (XXX figure out what?)
6229	 */
6230	if (status == 0 || attempts == 50) {
6231		ATH_TX_LOCK(sc);
6232		if (atid->bar_tx == 0 || atid->bar_wait == 0)
6233			DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6234			    "%s: huh? bar_tx=%d, bar_wait=%d\n",
6235			    __func__,
6236			    atid->bar_tx, atid->bar_wait);
6237		else
6238			ath_tx_tid_bar_unsuspend(sc, atid);
6239		ATH_TX_UNLOCK(sc);
6240	}
6241}
6242
6243/*
6244 * This is called whenever the pending ADDBA request times out.
6245 * Unpause and reschedule the TID.
6246 */
6247void
6248ath_addba_response_timeout(struct ieee80211_node *ni,
6249    struct ieee80211_tx_ampdu *tap)
6250{
6251	struct ath_softc *sc = ni->ni_ic->ic_softc;
6252	int tid = tap->txa_tid;
6253	struct ath_node *an = ATH_NODE(ni);
6254	struct ath_tid *atid = &an->an_tid[tid];
6255
6256	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6257	    "%s: %6D: TID=%d, called; resuming\n",
6258	    __func__,
6259	    ni->ni_macaddr,
6260	    ":",
6261	    tid);
6262
6263	ATH_TX_LOCK(sc);
6264	atid->addba_tx_pending = 0;
6265	ATH_TX_UNLOCK(sc);
6266
6267	/* Note: This updates the aggregate state to (again) pending */
6268	sc->sc_addba_response_timeout(ni, tap);
6269
6270	/* Unpause the TID; which reschedules it */
6271	ATH_TX_LOCK(sc);
6272	ath_tx_tid_resume(sc, atid);
6273	ATH_TX_UNLOCK(sc);
6274}
6275
6276/*
6277 * Check if a node is asleep or not.
6278 */
6279int
6280ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6281{
6282
6283	ATH_TX_LOCK_ASSERT(sc);
6284
6285	return (an->an_is_powersave);
6286}
6287
6288/*
6289 * Mark a node as currently "in powersaving."
6290 * This suspends all traffic on the node.
6291 *
6292 * This must be called with the node/tx locks free.
6293 *
6294 * XXX TODO: the locking silliness below is due to how the node
6295 * locking currently works.  Right now, the node lock is grabbed
6296 * to do rate control lookups and these are done with the TX
6297 * queue lock held.  This means the node lock can't be grabbed
6298 * first here or a LOR will occur.
6299 *
6300 * Eventually (hopefully!) the TX path code will only grab
6301 * the TXQ lock when transmitting and the ath_node lock when
6302 * doing node/TID operations.  There are other complications -
6303 * the sched/unsched operations involve walking the per-txq
6304 * 'active tid' list and this requires both locks to be held.
6305 */
6306void
6307ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6308{
6309	struct ath_tid *atid;
6310	struct ath_txq *txq;
6311	int tid;
6312
6313	ATH_TX_UNLOCK_ASSERT(sc);
6314
6315	/* Suspend all traffic on the node */
6316	ATH_TX_LOCK(sc);
6317
6318	if (an->an_is_powersave) {
6319		DPRINTF(sc, ATH_DEBUG_XMIT,
6320		    "%s: %6D: node was already asleep!\n",
6321		    __func__, an->an_node.ni_macaddr, ":");
6322		ATH_TX_UNLOCK(sc);
6323		return;
6324	}
6325
6326	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6327		atid = &an->an_tid[tid];
6328		txq = sc->sc_ac2q[atid->ac];
6329
6330		ath_tx_tid_pause(sc, atid);
6331	}
6332
6333	/* Mark node as in powersaving */
6334	an->an_is_powersave = 1;
6335
6336	ATH_TX_UNLOCK(sc);
6337}
6338
6339/*
6340 * Mark a node as currently "awake."
6341 * This resumes all traffic to the node.
6342 */
6343void
6344ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6345{
6346	struct ath_tid *atid;
6347	struct ath_txq *txq;
6348	int tid;
6349
6350	ATH_TX_UNLOCK_ASSERT(sc);
6351
6352	ATH_TX_LOCK(sc);
6353
6354	/* !? */
6355	if (an->an_is_powersave == 0) {
6356		ATH_TX_UNLOCK(sc);
6357		DPRINTF(sc, ATH_DEBUG_XMIT,
6358		    "%s: an=%p: node was already awake\n",
6359		    __func__, an);
6360		return;
6361	}
6362
6363	/* Mark node as awake */
6364	an->an_is_powersave = 0;
6365	/*
6366	 * Clear any pending leaked frame requests
6367	 */
6368	an->an_leak_count = 0;
6369
6370	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6371		atid = &an->an_tid[tid];
6372		txq = sc->sc_ac2q[atid->ac];
6373
6374		ath_tx_tid_resume(sc, atid);
6375	}
6376	ATH_TX_UNLOCK(sc);
6377}
6378
6379static int
6380ath_legacy_dma_txsetup(struct ath_softc *sc)
6381{
6382
6383	/* nothing new needed */
6384	return (0);
6385}
6386
6387static int
6388ath_legacy_dma_txteardown(struct ath_softc *sc)
6389{
6390
6391	/* nothing new needed */
6392	return (0);
6393}
6394
6395void
6396ath_xmit_setup_legacy(struct ath_softc *sc)
6397{
6398	/*
6399	 * For now, just set the descriptor length to sizeof(ath_desc);
6400	 * worry about extracting the real length out of the HAL later.
6401	 */
6402	sc->sc_tx_desclen = sizeof(struct ath_desc);
6403	sc->sc_tx_statuslen = sizeof(struct ath_desc);
6404	sc->sc_tx_nmaps = 1;	/* only one buffer per TX desc */
6405
6406	sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6407	sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6408	sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6409
6410	sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6411	sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6412
6413	sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6414}
6415