Deleted Added
full compact
if_ath_tx.c (227360) if_ath_tx.c (227364)
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 14 unchanged lines hidden (view full) ---

23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 14 unchanged lines hidden (view full) ---

23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath_tx.c 227360 2011-11-08 21:25:36Z adrian $");
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath_tx.c 227364 2011-11-08 22:43:13Z adrian $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39

--- 32 unchanged lines hidden (view full) ---

72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
74#ifdef IEEE80211_SUPPORT_SUPERG
75#include <net80211/ieee80211_superg.h>
76#endif
77#ifdef IEEE80211_SUPPORT_TDMA
78#include <net80211/ieee80211_tdma.h>
79#endif
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39

--- 32 unchanged lines hidden (view full) ---

72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
74#ifdef IEEE80211_SUPPORT_SUPERG
75#include <net80211/ieee80211_superg.h>
76#endif
77#ifdef IEEE80211_SUPPORT_TDMA
78#include <net80211/ieee80211_tdma.h>
79#endif
80#include <net80211/ieee80211_ht.h>
80
81#include <net/bpf.h>
82
83#ifdef INET
84#include <netinet/in.h>
85#include <netinet/if_ether.h>
86#endif
87

--- 7 unchanged lines hidden (view full) ---

95#include <dev/ath/ath_tx99/ath_tx99.h>
96#endif
97
98#include <dev/ath/if_ath_misc.h>
99#include <dev/ath/if_ath_tx.h>
100#include <dev/ath/if_ath_tx_ht.h>
101
102/*
81
82#include <net/bpf.h>
83
84#ifdef INET
85#include <netinet/in.h>
86#include <netinet/if_ether.h>
87#endif
88

--- 7 unchanged lines hidden (view full) ---

96#include <dev/ath/ath_tx99/ath_tx99.h>
97#endif
98
99#include <dev/ath/if_ath_misc.h>
100#include <dev/ath/if_ath_tx.h>
101#include <dev/ath/if_ath_tx_ht.h>
102
103/*
104 * How many retries to perform in software
105 */
106#define SWMAX_RETRIES 10
107
108static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
109 int tid);
110static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
111 int tid);
112static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
113 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
114static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
115 struct ieee80211_node *ni, struct mbuf *m0, int *tid);
116
117/*
103 * Whether to use the 11n rate scenario functions or not
104 */
105static inline int
106ath_tx_is_11n(struct ath_softc *sc)
107{
108 return (sc->sc_ah->ah_magic == 0x20065416);
109}
110
118 * Whether to use the 11n rate scenario functions or not
119 */
120static inline int
121ath_tx_is_11n(struct ath_softc *sc)
122{
123 return (sc->sc_ah->ah_magic == 0x20065416);
124}
125
126/*
127 * Obtain the current TID from the given frame.
128 *
129 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
130 * This has implications for which AC/priority the packet is placed
131 * in.
132 */
133static int
134ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
135{
136 const struct ieee80211_frame *wh;
137 int pri = M_WME_GETAC(m0);
138
139 wh = mtod(m0, const struct ieee80211_frame *);
140 if (! IEEE80211_QOS_HAS_SEQ(wh))
141 return IEEE80211_NONQOS_TID;
142 else
143 return WME_AC_TO_TID(pri);
144}
145
146/*
147 * Determine what the correct AC queue for the given frame
148 * should be.
149 *
150 * This code assumes that the TIDs map consistently to
151 * the underlying hardware (or software) ath_txq.
152 * Since the sender may try to set an AC which is
153 * arbitrary, non-QoS TIDs may end up being put on
154 * completely different ACs. There's no way to put a
155 * TID into multiple ath_txq's for scheduling, so
156 * for now we override the AC/TXQ selection and set
157 * non-QOS TID frames into the BE queue.
158 *
159 * This may be completely incorrect - specifically,
160 * some management frames may end up out of order
161 * compared to the QoS traffic they're controlling.
162 * I'll look into this later.
163 */
164static int
165ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
166{
167 const struct ieee80211_frame *wh;
168 int pri = M_WME_GETAC(m0);
169 wh = mtod(m0, const struct ieee80211_frame *);
170 if (IEEE80211_QOS_HAS_SEQ(wh))
171 return pri;
172
173 return WME_AC_BE;
174}
175
111void
112ath_txfrag_cleanup(struct ath_softc *sc,
113 ath_bufhead *frags, struct ieee80211_node *ni)
114{
115 struct ath_buf *bf, *next;
116
117 ATH_TXBUF_LOCK_ASSERT(sc);
118

--- 105 unchanged lines hidden (view full) ---

224
225 return 0;
226}
227
228/*
229 * Chain together segments+descriptors for a non-11n frame.
230 */
231static void
176void
177ath_txfrag_cleanup(struct ath_softc *sc,
178 ath_bufhead *frags, struct ieee80211_node *ni)
179{
180 struct ath_buf *bf, *next;
181
182 ATH_TXBUF_LOCK_ASSERT(sc);
183

--- 105 unchanged lines hidden (view full) ---

289
290 return 0;
291}
292
293/*
294 * Chain together segments+descriptors for a non-11n frame.
295 */
296static void
232ath_tx_chaindesclist(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
297ath_tx_chaindesclist(struct ath_softc *sc, struct ath_buf *bf)
233{
234 struct ath_hal *ah = sc->sc_ah;
235 struct ath_desc *ds, *ds0;
236 int i;
237
238 /*
239 * Fillin the remainder of the descriptor info.
240 */

--- 13 unchanged lines hidden (view full) ---

254 DPRINTF(sc, ATH_DEBUG_XMIT,
255 "%s: %d: %08x %08x %08x %08x %08x %08x\n",
256 __func__, i, ds->ds_link, ds->ds_data,
257 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
258 bf->bf_lastds = ds;
259 }
260}
261
298{
299 struct ath_hal *ah = sc->sc_ah;
300 struct ath_desc *ds, *ds0;
301 int i;
302
303 /*
304 * Fillin the remainder of the descriptor info.
305 */

--- 13 unchanged lines hidden (view full) ---

319 DPRINTF(sc, ATH_DEBUG_XMIT,
320 "%s: %d: %08x %08x %08x %08x %08x %08x\n",
321 __func__, i, ds->ds_link, ds->ds_data,
322 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
323 bf->bf_lastds = ds;
324 }
325}
326
327/*
328 * Fill in the descriptor list for a aggregate subframe.
329 *
330 * The subframe is returned with the ds_link field in the last subframe
331 * pointing to 0.
332 */
262static void
333static void
263ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
334ath_tx_chaindesclist_subframe(struct ath_softc *sc, struct ath_buf *bf)
264{
265 struct ath_hal *ah = sc->sc_ah;
335{
336 struct ath_hal *ah = sc->sc_ah;
337 struct ath_desc *ds, *ds0;
338 int i;
266
339
267 /* Fill in the details in the descriptor list */
268 ath_tx_chaindesclist(sc, txq, bf);
340 ds0 = ds = bf->bf_desc;
269
270 /*
341
342 /*
343 * There's no need to call ath_hal_setupfirsttxdesc here;
344 * That's only going to occur for the first frame in an aggregate.
345 */
346 for (i = 0; i < bf->bf_nseg; i++, ds++) {
347 ds->ds_data = bf->bf_segs[i].ds_addr;
348 if (i == bf->bf_nseg - 1)
349 ds->ds_link = 0;
350 else
351 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
352
353 /*
354 * This performs the setup for an aggregate frame.
355 * This includes enabling the aggregate flags if needed.
356 */
357 ath_hal_chaintxdesc(ah, ds,
358 bf->bf_state.bfs_pktlen,
359 bf->bf_state.bfs_hdrlen,
360 HAL_PKT_TYPE_AMPDU, /* forces aggregate bits to be set */
361 bf->bf_state.bfs_keyix,
362 0, /* cipher, calculated from keyix */
363 bf->bf_state.bfs_ndelim,
364 bf->bf_segs[i].ds_len, /* segment length */
365 i == 0, /* first segment */
366 i == bf->bf_nseg - 1 /* last segment */
367 );
368
369 DPRINTF(sc, ATH_DEBUG_XMIT,
370 "%s: %d: %08x %08x %08x %08x %08x %08x\n",
371 __func__, i, ds->ds_link, ds->ds_data,
372 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
373 bf->bf_lastds = ds;
374 }
375}
376
377/*
378 * Setup segments+descriptors for an 11n aggregate.
379 * bf_first is the first buffer in the aggregate.
380 * The descriptor list must already been linked together using
381 * bf->bf_next.
382 */
383static void
384ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
385{
386 struct ath_buf *bf, *bf_prev = NULL;
387
388 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
389 __func__, bf_first->bf_state.bfs_nframes,
390 bf_first->bf_state.bfs_al);
391
392 /*
393 * Setup all descriptors of all subframes.
394 */
395 bf = bf_first;
396 while (bf != NULL) {
397 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
398 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
399 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
400 SEQNO(bf->bf_state.bfs_seqno));
401
402 /* Sub-frame setup */
403 ath_tx_chaindesclist_subframe(sc, bf);
404
405 /*
406 * Link the last descriptor of the previous frame
407 * to the beginning descriptor of this frame.
408 */
409 if (bf_prev != NULL)
410 bf_prev->bf_lastds->ds_link = bf->bf_daddr;
411
412 /* Save a copy so we can link the next descriptor in */
413 bf_prev = bf;
414 bf = bf->bf_next;
415 }
416
417 /*
418 * Setup first descriptor of first frame.
419 * chaintxdesc() overwrites the descriptor entries;
420 * setupfirsttxdesc() merges in things.
421 * Otherwise various fields aren't set correctly (eg flags).
422 */
423 ath_hal_setupfirsttxdesc(sc->sc_ah,
424 bf_first->bf_desc,
425 bf_first->bf_state.bfs_al,
426 bf_first->bf_state.bfs_flags | HAL_TXDESC_INTREQ,
427 bf_first->bf_state.bfs_txpower,
428 bf_first->bf_state.bfs_txrate0,
429 bf_first->bf_state.bfs_try0,
430 bf_first->bf_state.bfs_txantenna,
431 bf_first->bf_state.bfs_ctsrate,
432 bf_first->bf_state.bfs_ctsduration);
433
434 /*
435 * Setup the last descriptor in the list.
436 * bf_prev points to the last; bf is NULL here.
437 */
438 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_desc, bf_first->bf_desc);
439
440 /*
441 * Set the first descriptor bf_lastds field to point to
442 * the last descriptor in the last subframe, that's where
443 * the status update will occur.
444 */
445 bf_first->bf_lastds = bf_prev->bf_lastds;
446
447 /*
448 * And bf_last in the first descriptor points to the end of
449 * the aggregate list.
450 */
451 bf_first->bf_last = bf_prev;
452
453 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
454}
455
456static void
457ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
458 struct ath_buf *bf)
459{
460 ATH_TXQ_LOCK_ASSERT(txq);
461 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
462 ("%s: busy status 0x%x", __func__, bf->bf_flags));
463 if (txq->axq_link != NULL) {
464 struct ath_buf *last = ATH_TXQ_LAST(txq, axq_q_s);
465 struct ieee80211_frame *wh;
466
467 /* mark previous frame */
468 wh = mtod(last->bf_m, struct ieee80211_frame *);
469 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
470 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
471 BUS_DMASYNC_PREWRITE);
472
473 /* link descriptor */
474 *txq->axq_link = bf->bf_daddr;
475 }
476 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
477 txq->axq_link = &bf->bf_lastds->ds_link;
478}
479
480
481
482/*
483 * Hand-off packet to a hardware queue.
484 */
485static void
486ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
487{
488 struct ath_hal *ah = sc->sc_ah;
489
490 /*
271 * Insert the frame on the outbound list and pass it on
272 * to the hardware. Multicast frames buffered for power
273 * save stations and transmit from the CAB queue are stored
274 * on a s/w only queue and loaded on to the CAB queue in
275 * the SWBA handler since frames only go out on DTIM and
276 * to avoid possible races.
277 */
491 * Insert the frame on the outbound list and pass it on
492 * to the hardware. Multicast frames buffered for power
493 * save stations and transmit from the CAB queue are stored
494 * on a s/w only queue and loaded on to the CAB queue in
495 * the SWBA handler since frames only go out on DTIM and
496 * to avoid possible races.
497 */
278 ATH_TXQ_LOCK(txq);
498 ATH_TXQ_LOCK_ASSERT(txq);
279 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
499 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
280 ("busy status 0x%x", bf->bf_flags));
281 if (txq->axq_qnum != ATH_TXQ_SWQ) {
500 ("%s: busy status 0x%x", __func__, bf->bf_flags));
501 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
502 ("ath_tx_handoff_hw called for mcast queue"));
503
504 /* For now, so not to generate whitespace diffs */
505 if (1) {
282#ifdef IEEE80211_SUPPORT_TDMA
283 int qbusy;
284
285 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
286 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum);
287 if (txq->axq_link == NULL) {
288 /*
289 * Be careful writing the address to TXDP. If

--- 59 unchanged lines hidden (view full) ---

349 txq->axq_qnum, txq->axq_link,
350 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
351 }
352#endif /* IEEE80211_SUPPORT_TDMA */
353 if (bf->bf_state.bfs_aggr)
354 txq->axq_aggr_depth++;
355 txq->axq_link = &bf->bf_lastds->ds_link;
356 ath_hal_txstart(ah, txq->axq_qnum);
506#ifdef IEEE80211_SUPPORT_TDMA
507 int qbusy;
508
509 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
510 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum);
511 if (txq->axq_link == NULL) {
512 /*
513 * Be careful writing the address to TXDP. If

--- 59 unchanged lines hidden (view full) ---

573 txq->axq_qnum, txq->axq_link,
574 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
575 }
576#endif /* IEEE80211_SUPPORT_TDMA */
577 if (bf->bf_state.bfs_aggr)
578 txq->axq_aggr_depth++;
579 txq->axq_link = &bf->bf_lastds->ds_link;
580 ath_hal_txstart(ah, txq->axq_qnum);
357 } else {
358 if (txq->axq_link != NULL) {
359 struct ath_buf *last = ATH_TXQ_LAST(txq, axq_q_s);
360 struct ieee80211_frame *wh;
581 }
582}
361
583
362 /* mark previous frame */
363 wh = mtod(last->bf_m, struct ieee80211_frame *);
364 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
365 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
366 BUS_DMASYNC_PREWRITE);
584/*
585 * Restart TX DMA for the given TXQ.
586 *
587 * This must be called whether the queue is empty or not.
588 */
589void
590ath_txq_restart_dma(struct ath_softc *sc, struct ath_txq *txq)
591{
592 struct ath_hal *ah = sc->sc_ah;
593 struct ath_buf *bf;
367
594
368 /* link descriptor */
369 *txq->axq_link = bf->bf_daddr;
370 }
371 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
372 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
373 }
374 ATH_TXQ_UNLOCK(txq);
595 ATH_TXQ_LOCK_ASSERT(txq);
596
597 /* This is always going to be cleared, empty or not */
598 txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
599
600 bf = TAILQ_FIRST(&txq->axq_q);
601 if (bf == NULL)
602 return;
603
604 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
605 txq->axq_link = &bf->bf_lastds->ds_link;
606 ath_hal_txstart(ah, txq->axq_qnum);
375}
376
607}
608
609/*
610 * Hand off a packet to the hardware (or mcast queue.)
611 *
612 * The relevant hardware txq should be locked.
613 */
614static void
615ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
616{
617 ATH_TXQ_LOCK_ASSERT(txq);
618
619 if (txq->axq_qnum == ATH_TXQ_SWQ)
620 ath_tx_handoff_mcast(sc, txq, bf);
621 else
622 ath_tx_handoff_hw(sc, txq, bf);
623}
624
377static int
378ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
379 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, int *keyix)
380{
381 if (iswep) {
382 const struct ieee80211_cipher *cip;
383 struct ieee80211_key *k;
384

--- 37 unchanged lines hidden (view full) ---

422 } else
423 (*keyix) = HAL_TXKEYIX_INVALID;
424
425 return 1;
426}
427
428static uint8_t
429ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
625static int
626ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
627 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, int *keyix)
628{
629 if (iswep) {
630 const struct ieee80211_cipher *cip;
631 struct ieee80211_key *k;
632

--- 37 unchanged lines hidden (view full) ---

670 } else
671 (*keyix) = HAL_TXKEYIX_INVALID;
672
673 return 1;
674}
675
676static uint8_t
677ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
430 int rix, int cix, int shortPreamble)
678 int cix, int shortPreamble)
431{
432 uint8_t ctsrate;
433
434 /*
435 * CTS transmit rate is derived from the transmit rate
436 * by looking in the h/w rate table. We must also factor
437 * in whether or not a short preamble is to be used.
438 */
439 /* NB: cix is set above where RTS/CTS is enabled */
440 KASSERT(cix != 0xff, ("cix not setup"));
441 ctsrate = rt->info[cix].rateCode;
442
443 /* XXX this should only matter for legacy rates */
444 if (shortPreamble)
445 ctsrate |= rt->info[cix].shortPreamble;
446
447 return ctsrate;
448}
449
679{
680 uint8_t ctsrate;
681
682 /*
683 * CTS transmit rate is derived from the transmit rate
684 * by looking in the h/w rate table. We must also factor
685 * in whether or not a short preamble is to be used.
686 */
687 /* NB: cix is set above where RTS/CTS is enabled */
688 KASSERT(cix != 0xff, ("cix not setup"));
689 ctsrate = rt->info[cix].rateCode;
690
691 /* XXX this should only matter for legacy rates */
692 if (shortPreamble)
693 ctsrate |= rt->info[cix].shortPreamble;
694
695 return ctsrate;
696}
697
450
451/*
452 * Calculate the RTS/CTS duration for legacy frames.
453 */
454static int
455ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
456 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
457 int flags)
458{

--- 29 unchanged lines hidden (view full) ---

488 rt, pktlen, rix, AH_FALSE);
489 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
490 ctsduration += rt->info[rix].lpAckDuration;
491 }
492
493 return ctsduration;
494}
495
698/*
699 * Calculate the RTS/CTS duration for legacy frames.
700 */
701static int
702ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
703 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
704 int flags)
705{

--- 29 unchanged lines hidden (view full) ---

735 rt, pktlen, rix, AH_FALSE);
736 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
737 ctsduration += rt->info[rix].lpAckDuration;
738 }
739
740 return ctsduration;
741}
742
496int
497ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
498 struct mbuf *m0)
743/*
744 * Update the given ath_buf with updated rts/cts setup and duration
745 * values.
746 *
747 * To support rate lookups for each software retry, the rts/cts rate
748 * and cts duration must be re-calculated.
749 *
750 * This function assumes the RTS/CTS flags have been set as needed;
751 * mrr has been disabled; and the rate control lookup has been done.
752 *
753 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
754 * XXX The 11n NICs support per-rate RTS/CTS configuration.
755 */
756static void
757ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
499{
758{
759 uint16_t ctsduration = 0;
760 uint8_t ctsrate = 0;
761 uint8_t rix = bf->bf_state.bfs_rc[0].rix;
762 uint8_t cix = 0;
763 const HAL_RATE_TABLE *rt = sc->sc_currates;
764
765 /*
766 * No RTS/CTS enabled? Don't bother.
767 */
768 if ((bf->bf_state.bfs_flags &
769 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
770 /* XXX is this really needed? */
771 bf->bf_state.bfs_ctsrate = 0;
772 bf->bf_state.bfs_ctsduration = 0;
773 return;
774 }
775
776 /*
777 * If protection is enabled, use the protection rix control
778 * rate. Otherwise use the rate0 control rate.
779 */
780 if (bf->bf_state.bfs_doprot)
781 rix = sc->sc_protrix;
782 else
783 rix = bf->bf_state.bfs_rc[0].rix;
784
785 /*
786 * If the raw path has hard-coded ctsrate0 to something,
787 * use it.
788 */
789 if (bf->bf_state.bfs_ctsrate0 != 0)
790 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
791 else
792 /* Control rate from above */
793 cix = rt->info[rix].controlRate;
794
795 /* Calculate the rtscts rate for the given cix */
796 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
797 bf->bf_state.bfs_shpream);
798
799 /* The 11n chipsets do ctsduration calculations for you */
800 if (! ath_tx_is_11n(sc))
801 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
802 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
803 rt, bf->bf_state.bfs_flags);
804
805 /* Squirrel away in ath_buf */
806 bf->bf_state.bfs_ctsrate = ctsrate;
807 bf->bf_state.bfs_ctsduration = ctsduration;
808
809 /*
810 * Must disable multi-rate retry when using RTS/CTS.
811 * XXX TODO: only for pre-11n NICs.
812 */
813 bf->bf_state.bfs_ismrr = 0;
814 bf->bf_state.bfs_try0 =
815 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
816}
817
818/*
819 * Setup the descriptor chain for a normal or fast-frame
820 * frame.
821 */
822static void
823ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
824{
825 struct ath_desc *ds = bf->bf_desc;
826 struct ath_hal *ah = sc->sc_ah;
827
828 ath_hal_setuptxdesc(ah, ds
829 , bf->bf_state.bfs_pktlen /* packet length */
830 , bf->bf_state.bfs_hdrlen /* header length */
831 , bf->bf_state.bfs_atype /* Atheros packet type */
832 , bf->bf_state.bfs_txpower /* txpower */
833 , bf->bf_state.bfs_txrate0
834 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
835 , bf->bf_state.bfs_keyix /* key cache index */
836 , bf->bf_state.bfs_txantenna /* antenna mode */
837 , bf->bf_state.bfs_flags /* flags */
838 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
839 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
840 );
841
842 /*
843 * This will be overriden when the descriptor chain is written.
844 */
845 bf->bf_lastds = ds;
846 bf->bf_last = bf;
847
848 /* XXX TODO: Setup descriptor chain */
849}
850
851/*
852 * Do a rate lookup.
853 *
854 * This performs a rate lookup for the given ath_buf only if it's required.
855 * Non-data frames and raw frames don't require it.
856 *
857 * This populates the primary and MRR entries; MRR values are
858 * then disabled later on if something requires it (eg RTS/CTS on
859 * pre-11n chipsets.
860 *
861 * This needs to be done before the RTS/CTS fields are calculated
862 * as they may depend upon the rate chosen.
863 */
864static void
865ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
866{
867 uint8_t rate, rix;
868 int try0;
869
870 if (! bf->bf_state.bfs_doratelookup)
871 return;
872
873 /* Get rid of any previous state */
874 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
875
876 ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
877 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
878 bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
879
880 /* In case MRR is disabled, make sure rc[0] is setup correctly */
881 bf->bf_state.bfs_rc[0].rix = rix;
882 bf->bf_state.bfs_rc[0].ratecode = rate;
883 bf->bf_state.bfs_rc[0].tries = try0;
884
885 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
886 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
887 bf->bf_state.bfs_rc);
888 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
889
890 sc->sc_txrix = rix; /* for LED blinking */
891 sc->sc_lastdatarix = rix; /* for fast frames */
892 bf->bf_state.bfs_try0 = try0;
893 bf->bf_state.bfs_txrate0 = rate;
894}
895
896/*
897 * Set the rate control fields in the given descriptor based on
898 * the bf_state fields and node state.
899 *
900 * The bfs fields should already be set with the relevant rate
901 * control information, including whether MRR is to be enabled.
902 *
903 * Since the FreeBSD HAL currently sets up the first TX rate
904 * in ath_hal_setuptxdesc(), this will setup the MRR
905 * conditionally for the pre-11n chips, and call ath_buf_set_rate
906 * unconditionally for 11n chips. These require the 11n rate
907 * scenario to be set if MCS rates are enabled, so it's easier
908 * to just always call it. The caller can then only set rates 2, 3
909 * and 4 if multi-rate retry is needed.
910 */
911static void
912ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
913 struct ath_buf *bf)
914{
915 struct ath_rc_series *rc = bf->bf_state.bfs_rc;
916
917 /* If mrr is disabled, blank tries 1, 2, 3 */
918 if (! bf->bf_state.bfs_ismrr)
919 rc[1].tries = rc[2].tries = rc[3].tries = 0;
920
921 /*
922 * Always call - that way a retried descriptor will
923 * have the MRR fields overwritten.
924 *
925 * XXX TODO: see if this is really needed - setting up
926 * the first descriptor should set the MRR fields to 0
927 * for us anyway.
928 */
929 if (ath_tx_is_11n(sc)) {
930 ath_buf_set_rate(sc, ni, bf);
931 } else {
932 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
933 , rc[1].ratecode, rc[1].tries
934 , rc[2].ratecode, rc[2].tries
935 , rc[3].ratecode, rc[3].tries
936 );
937 }
938}
939
940/*
941 * Transmit the given frame to the hardware.
942 *
943 * The frame must already be setup; rate control must already have
944 * been done.
945 *
946 * XXX since the TXQ lock is being held here (and I dislike holding
947 * it for this long when not doing software aggregation), later on
948 * break this function into "setup_normal" and "xmit_normal". The
949 * lock only needs to be held for the ath_tx_handoff call.
950 */
951static void
952ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
953 struct ath_buf *bf)
954{
955
956 ATH_TXQ_LOCK_ASSERT(txq);
957
958 /* Setup the descriptor before handoff */
959 ath_tx_do_ratelookup(sc, bf);
960 ath_tx_rate_fill_rcflags(sc, bf);
961 ath_tx_set_rtscts(sc, bf);
962 ath_tx_setds(sc, bf);
963 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
964 ath_tx_chaindesclist(sc, bf);
965
966 /* Hand off to hardware */
967 ath_tx_handoff(sc, txq, bf);
968}
969
970
971
972static int
973ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
974 struct ath_buf *bf, struct mbuf *m0)
975{
500 struct ieee80211vap *vap = ni->ni_vap;
501 struct ath_vap *avp = ATH_VAP(vap);
502 struct ath_hal *ah = sc->sc_ah;
503 struct ifnet *ifp = sc->sc_ifp;
504 struct ieee80211com *ic = ifp->if_l2com;
505 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
506 int error, iswep, ismcast, isfrag, ismrr;
976 struct ieee80211vap *vap = ni->ni_vap;
977 struct ath_vap *avp = ATH_VAP(vap);
978 struct ath_hal *ah = sc->sc_ah;
979 struct ifnet *ifp = sc->sc_ifp;
980 struct ieee80211com *ic = ifp->if_l2com;
981 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
982 int error, iswep, ismcast, isfrag, ismrr;
507 int keyix, hdrlen, pktlen, try0;
508 u_int8_t rix, txrate, ctsrate;
509 u_int8_t cix = 0xff; /* NB: silence compiler */
983 int keyix, hdrlen, pktlen, try0 = 0;
984 u_int8_t rix = 0, txrate = 0;
510 struct ath_desc *ds;
511 struct ath_txq *txq;
512 struct ieee80211_frame *wh;
985 struct ath_desc *ds;
986 struct ath_txq *txq;
987 struct ieee80211_frame *wh;
513 u_int subtype, flags, ctsduration;
988 u_int subtype, flags;
514 HAL_PKT_TYPE atype;
515 const HAL_RATE_TABLE *rt;
516 HAL_BOOL shortPreamble;
517 struct ath_node *an;
518 u_int pri;
989 HAL_PKT_TYPE atype;
990 const HAL_RATE_TABLE *rt;
991 HAL_BOOL shortPreamble;
992 struct ath_node *an;
993 u_int pri;
519 uint8_t try[4], rate[4];
520
994
521 bzero(try, sizeof(try));
522 bzero(rate, sizeof(rate));
523
524 wh = mtod(m0, struct ieee80211_frame *);
525 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
526 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
527 isfrag = m0->m_flags & M_FRAG;
528 hdrlen = ieee80211_anyhdrsize(wh);
529 /*
530 * Packet length must not include any
531 * pad bytes; deduct them here.
532 */
533 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
534
535 /* Handle encryption twiddling if needed */
995 wh = mtod(m0, struct ieee80211_frame *);
996 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
997 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
998 isfrag = m0->m_flags & M_FRAG;
999 hdrlen = ieee80211_anyhdrsize(wh);
1000 /*
1001 * Packet length must not include any
1002 * pad bytes; deduct them here.
1003 */
1004 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1005
1006 /* Handle encryption twiddling if needed */
536 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, &pktlen, &keyix)) {
1007 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1008 &pktlen, &keyix)) {
537 ath_freetx(m0);
538 return EIO;
539 }
540
541 /* packet header may have moved, reset our local pointer */
542 wh = mtod(m0, struct ieee80211_frame *);
543
544 pktlen += IEEE80211_CRC_LEN;

--- 79 unchanged lines hidden (view full) ---

624 } else if (m0->m_flags & M_EAPOL) {
625 /* XXX? maybe always use long preamble? */
626 rix = an->an_mgmtrix;
627 txrate = rt->info[rix].rateCode;
628 if (shortPreamble)
629 txrate |= rt->info[rix].shortPreamble;
630 try0 = ATH_TXMAXTRY; /* XXX?too many? */
631 } else {
1009 ath_freetx(m0);
1010 return EIO;
1011 }
1012
1013 /* packet header may have moved, reset our local pointer */
1014 wh = mtod(m0, struct ieee80211_frame *);
1015
1016 pktlen += IEEE80211_CRC_LEN;

--- 79 unchanged lines hidden (view full) ---

1096 } else if (m0->m_flags & M_EAPOL) {
1097 /* XXX? maybe always use long preamble? */
1098 rix = an->an_mgmtrix;
1099 txrate = rt->info[rix].rateCode;
1100 if (shortPreamble)
1101 txrate |= rt->info[rix].shortPreamble;
1102 try0 = ATH_TXMAXTRY; /* XXX?too many? */
1103 } else {
632 ath_rate_findrate(sc, an, shortPreamble, pktlen,
633 &rix, &try0, &txrate);
634 sc->sc_txrix = rix; /* for LED blinking */
635 sc->sc_lastdatarix = rix; /* for fast frames */
636 if (try0 != ATH_TXMAXTRY)
637 ismrr = 1;
1104 /*
1105 * Do rate lookup on each TX, rather than using
1106 * the hard-coded TX information decided here.
1107 */
1108 ismrr = 1;
1109 bf->bf_state.bfs_doratelookup = 1;
638 }
639 if (cap->cap_wmeParams[pri].wmep_noackPolicy)
640 flags |= HAL_TXDESC_NOACK;
641 break;
642 default:
643 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
644 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
645 /* XXX statistic */

--- 14 unchanged lines hidden (view full) ---

660 /*
661 * Calculate miscellaneous flags.
662 */
663 if (ismcast) {
664 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
665 } else if (pktlen > vap->iv_rtsthreshold &&
666 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
667 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
1110 }
1111 if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1112 flags |= HAL_TXDESC_NOACK;
1113 break;
1114 default:
1115 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
1116 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1117 /* XXX statistic */

--- 14 unchanged lines hidden (view full) ---

1132 /*
1133 * Calculate miscellaneous flags.
1134 */
1135 if (ismcast) {
1136 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
1137 } else if (pktlen > vap->iv_rtsthreshold &&
1138 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1139 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
668 cix = rt->info[rix].controlRate;
669 sc->sc_stats.ast_tx_rts++;
670 }
671 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
672 sc->sc_stats.ast_tx_noack++;
673#ifdef IEEE80211_SUPPORT_TDMA
674 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
675 DPRINTF(sc, ATH_DEBUG_TDMA,
676 "%s: discard frame, ACK required w/ TDMA\n", __func__);

--- 6 unchanged lines hidden (view full) ---

683 /*
684 * If 802.11g protection is enabled, determine whether
685 * to use RTS/CTS or just CTS. Note that this is only
686 * done for OFDM unicast frames.
687 */
688 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
689 rt->info[rix].phy == IEEE80211_T_OFDM &&
690 (flags & HAL_TXDESC_NOACK) == 0) {
1140 sc->sc_stats.ast_tx_rts++;
1141 }
1142 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
1143 sc->sc_stats.ast_tx_noack++;
1144#ifdef IEEE80211_SUPPORT_TDMA
1145 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1146 DPRINTF(sc, ATH_DEBUG_TDMA,
1147 "%s: discard frame, ACK required w/ TDMA\n", __func__);

--- 6 unchanged lines hidden (view full) ---

1154 /*
1155 * If 802.11g protection is enabled, determine whether
1156 * to use RTS/CTS or just CTS. Note that this is only
1157 * done for OFDM unicast frames.
1158 */
1159 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1160 rt->info[rix].phy == IEEE80211_T_OFDM &&
1161 (flags & HAL_TXDESC_NOACK) == 0) {
1162 bf->bf_state.bfs_doprot = 1;
691 /* XXX fragments must use CCK rates w/ protection */
1163 /* XXX fragments must use CCK rates w/ protection */
692 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
1164 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
693 flags |= HAL_TXDESC_RTSENA;
1165 flags |= HAL_TXDESC_RTSENA;
694 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
1166 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
695 flags |= HAL_TXDESC_CTSENA;
1167 flags |= HAL_TXDESC_CTSENA;
696 if (isfrag) {
697 /*
698 * For frags it would be desirable to use the
699 * highest CCK rate for RTS/CTS. But stations
700 * farther away may detect it at a lower CCK rate
701 * so use the configured protection rate instead
702 * (for now).
703 */
704 cix = rt->info[sc->sc_protrix].controlRate;
705 } else
706 cix = rt->info[sc->sc_protrix].controlRate;
1168 }
1169 /*
1170 * For frags it would be desirable to use the
1171 * highest CCK rate for RTS/CTS. But stations
1172 * farther away may detect it at a lower CCK rate
1173 * so use the configured protection rate instead
1174 * (for now).
1175 */
707 sc->sc_stats.ast_tx_protect++;
708 }
709
710#if 0
711 /*
712 * If 11n protection is enabled and it's a HT frame,
713 * enable RTS.
714 *

--- 41 unchanged lines hidden (view full) ---

756 */
757 ismrr = 0;
758 try0 = ATH_TXMGTTRY; /* XXX? */
759 }
760 *(u_int16_t *)wh->i_dur = htole16(dur);
761 }
762
763 /*
1176 sc->sc_stats.ast_tx_protect++;
1177 }
1178
1179#if 0
1180 /*
1181 * If 11n protection is enabled and it's a HT frame,
1182 * enable RTS.
1183 *

--- 41 unchanged lines hidden (view full) ---

1225 */
1226 ismrr = 0;
1227 try0 = ATH_TXMGTTRY; /* XXX? */
1228 }
1229 *(u_int16_t *)wh->i_dur = htole16(dur);
1230 }
1231
1232 /*
764 * Calculate RTS/CTS rate and duration if needed.
1233 * Determine if a tx interrupt should be generated for
1234 * this descriptor. We take a tx interrupt to reap
1235 * descriptors when the h/w hits an EOL condition or
1236 * when the descriptor is specifically marked to generate
1237 * an interrupt. We periodically mark descriptors in this
1238 * way to insure timely replenishing of the supply needed
1239 * for sending frames. Defering interrupts reduces system
1240 * load and potentially allows more concurrent work to be
1241 * done but if done to aggressively can cause senders to
1242 * backup.
1243 *
1244 * NB: use >= to deal with sc_txintrperiod changing
1245 * dynamically through sysctl.
765 */
1246 */
766 ctsduration = 0;
767 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
768 ctsrate = ath_tx_get_rtscts_rate(ah, rt, rix, cix, shortPreamble);
1247 if (flags & HAL_TXDESC_INTREQ) {
1248 txq->axq_intrcnt = 0;
1249 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1250 flags |= HAL_TXDESC_INTREQ;
1251 txq->axq_intrcnt = 0;
1252 }
769
1253
770 /* The 11n chipsets do ctsduration calculations for you */
771 if (! ath_tx_is_11n(sc))
772 ctsduration = ath_tx_calc_ctsduration(ah, rix, cix, shortPreamble,
773 pktlen, rt, flags);
774 /*
775 * Must disable multi-rate retry when using RTS/CTS.
776 */
777 ismrr = 0;
778 try0 = ATH_TXMGTTRY; /* XXX */
779 } else
780 ctsrate = 0;
1254 /* This point forward is actual TX bits */
781
782 /*
783 * At this point we are committed to sending the frame
784 * and we don't need to look at m_nextpkt; clear it in
785 * case this frame is part of frag chain.
786 */
787 m0->m_nextpkt = NULL;
788

--- 12 unchanged lines hidden (view full) ---

801 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
802 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
803 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
804 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
805
806 ieee80211_radiotap_tx(vap, m0);
807 }
808
1255
1256 /*
1257 * At this point we are committed to sending the frame
1258 * and we don't need to look at m_nextpkt; clear it in
1259 * case this frame is part of frag chain.
1260 */
1261 m0->m_nextpkt = NULL;
1262

--- 12 unchanged lines hidden (view full) ---

1275 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1276 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1277 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
1278 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1279
1280 ieee80211_radiotap_tx(vap, m0);
1281 }
1282
1283 /* Blank the legacy rate array */
1284 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1285
809 /*
1286 /*
810 * Determine if a tx interrupt should be generated for
811 * this descriptor. We take a tx interrupt to reap
812 * descriptors when the h/w hits an EOL condition or
813 * when the descriptor is specifically marked to generate
814 * an interrupt. We periodically mark descriptors in this
815 * way to insure timely replenishing of the supply needed
816 * for sending frames. Defering interrupts reduces system
817 * load and potentially allows more concurrent work to be
818 * done but if done to aggressively can cause senders to
819 * backup.
1287 * ath_buf_set_rate needs at least one rate/try to setup
1288 * the rate scenario.
1289 */
1290 bf->bf_state.bfs_rc[0].rix = rix;
1291 bf->bf_state.bfs_rc[0].tries = try0;
1292 bf->bf_state.bfs_rc[0].ratecode = txrate;
1293
1294 /* Store the decided rate index values away */
1295 bf->bf_state.bfs_pktlen = pktlen;
1296 bf->bf_state.bfs_hdrlen = hdrlen;
1297 bf->bf_state.bfs_atype = atype;
1298 bf->bf_state.bfs_txpower = ni->ni_txpower;
1299 bf->bf_state.bfs_txrate0 = txrate;
1300 bf->bf_state.bfs_try0 = try0;
1301 bf->bf_state.bfs_keyix = keyix;
1302 bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1303 bf->bf_state.bfs_flags = flags;
1304 bf->bf_txflags = flags;
1305 bf->bf_state.bfs_shpream = shortPreamble;
1306
1307 /* XXX this should be done in ath_tx_setrate() */
1308 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
1309 bf->bf_state.bfs_ctsrate = 0; /* calculated later */
1310 bf->bf_state.bfs_ctsduration = 0;
1311 bf->bf_state.bfs_ismrr = ismrr;
1312
1313 return 0;
1314}
1315
1316/*
1317 * Direct-dispatch the current frame to the hardware.
1318 *
1319 * This can be called by the net80211 code.
1320 *
1321 * XXX what about locking? Or, push the seqno assign into the
1322 * XXX aggregate scheduler so its serialised?
1323 */
1324int
1325ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1326 struct ath_buf *bf, struct mbuf *m0)
1327{
1328 struct ieee80211vap *vap = ni->ni_vap;
1329 struct ath_vap *avp = ATH_VAP(vap);
1330 int r;
1331 u_int pri;
1332 int tid;
1333 struct ath_txq *txq;
1334 int ismcast;
1335 const struct ieee80211_frame *wh;
1336 int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1337 ieee80211_seq seqno;
1338 uint8_t type, subtype;
1339
1340 /*
1341 * Determine the target hardware queue.
820 *
1342 *
821 * NB: use >= to deal with sc_txintrperiod changing
822 * dynamically through sysctl.
1343 * For multicast frames, the txq gets overridden to be the
1344 * software TXQ and it's done via direct-dispatch.
1345 *
1346 * For any other frame, we do a TID/QoS lookup inside the frame
1347 * to see what the TID should be. If it's a non-QoS frame, the
1348 * AC and TID are overridden. The TID/TXQ code assumes the
1349 * TID is on a predictable hardware TXQ, so we don't support
1350 * having a node TID queued to multiple hardware TXQs.
1351 * This may change in the future but would require some locking
1352 * fudgery.
823 */
1353 */
824 if (flags & HAL_TXDESC_INTREQ) {
825 txq->axq_intrcnt = 0;
826 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
827 flags |= HAL_TXDESC_INTREQ;
828 txq->axq_intrcnt = 0;
829 }
1354 pri = ath_tx_getac(sc, m0);
1355 tid = ath_tx_gettid(sc, m0);
830
1356
831 if (ath_tx_is_11n(sc)) {
832 rate[0] = rix;
833 try[0] = try0;
1357 txq = sc->sc_ac2q[pri];
1358 wh = mtod(m0, struct ieee80211_frame *);
1359 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1360 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1361 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1362
1363 /* A-MPDU TX */
1364 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1365 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1366 is_ampdu = is_ampdu_tx | is_ampdu_pending;
1367
1368 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1369 __func__, tid, pri, is_ampdu);
1370
1371 /* Multicast frames go onto the software multicast queue */
1372 if (ismcast)
1373 txq = &avp->av_mcastq;
1374
1375 if ((! is_ampdu) && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
1376 txq = &avp->av_mcastq;
1377
1378 /* Do the generic frame setup */
1379 /* XXX should just bzero the bf_state? */
1380 bf->bf_state.bfs_dobaw = 0;
1381
1382 /* A-MPDU TX? Manually set sequence number */
1383 /* Don't do it whilst pending; the net80211 layer still assigns them */
1384 /* XXX do we need locking here? */
1385 if (is_ampdu_tx) {
1386 ATH_TXQ_LOCK(txq);
1387 /*
1388 * Always call; this function will
1389 * handle making sure that null data frames
1390 * don't get a sequence number from the current
1391 * TID and thus mess with the BAW.
1392 */
1393 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
1394 if (IEEE80211_QOS_HAS_SEQ(wh) &&
1395 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
1396 bf->bf_state.bfs_dobaw = 1;
1397 }
1398 ATH_TXQ_UNLOCK(txq);
834 }
835
836 /*
1399 }
1400
1401 /*
837 * Formulate first tx descriptor with tx controls.
1402 * If needed, the sequence number has been assigned.
1403 * Squirrel it away somewhere easy to get to.
838 */
1404 */
839 /* XXX check return value? */
840 /* XXX is this ok to call for 11n descriptors? */
841 /* XXX or should it go through the first, next, last 11n calls? */
842 ath_hal_setuptxdesc(ah, ds
843 , pktlen /* packet length */
844 , hdrlen /* header length */
845 , atype /* Atheros packet type */
846 , ni->ni_txpower /* txpower */
847 , txrate, try0 /* series 0 rate/tries */
848 , keyix /* key cache index */
849 , sc->sc_txantenna /* antenna mode */
850 , flags /* flags */
851 , ctsrate /* rts/cts rate */
852 , ctsduration /* rts/cts duration */
853 );
854 bf->bf_txflags = flags;
1405 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
1406
1407 /* Is ampdu pending? fetch the seqno and print it out */
1408 if (is_ampdu_pending)
1409 DPRINTF(sc, ATH_DEBUG_SW_TX,
1410 "%s: tid %d: ampdu pending, seqno %d\n",
1411 __func__, tid, M_SEQNO_GET(m0));
1412
1413 /* This also sets up the DMA map */
1414 r = ath_tx_normal_setup(sc, ni, bf, m0);
1415
1416 if (r != 0)
1417 return r;
1418
1419 /* At this point m0 could have changed! */
1420 m0 = bf->bf_m;
1421
1422#if 1
855 /*
1423 /*
856 * Setup the multi-rate retry state only when we're
857 * going to use it. This assumes ath_hal_setuptxdesc
858 * initializes the descriptors (so we don't have to)
859 * when the hardware supports multi-rate retry and
860 * we don't use it.
1424 * If it's a multicast frame, do a direct-dispatch to the
1425 * destination hardware queue. Don't bother software
1426 * queuing it.
861 */
1427 */
862 if (ismrr) {
863 if (ath_tx_is_11n(sc))
864 ath_rate_getxtxrates(sc, an, rix, rate, try);
865 else
866 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
867 }
1428 /*
1429 * If it's a BAR frame, do a direct dispatch to the
1430 * destination hardware queue. Don't bother software
1431 * queuing it, as the TID will now be paused.
1432 * Sending a BAR frame can occur from the net80211 txa timer
1433 * (ie, retries) or from the ath txtask (completion call.)
1434 * It queues directly to hardware because the TID is paused
1435 * at this point (and won't be unpaused until the BAR has
1436 * either been TXed successfully or max retries has been
1437 * reached.)
1438 */
1439 if (txq == &avp->av_mcastq) {
1440 ATH_TXQ_LOCK(txq);
1441 ath_tx_xmit_normal(sc, txq, bf);
1442 ATH_TXQ_UNLOCK(txq);
1443 } else if (type == IEEE80211_FC0_TYPE_CTL &&
1444 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1445 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
1446 "%s: BAR: TX'ing direct\n", __func__);
1447 ATH_TXQ_LOCK(txq);
1448 ath_tx_xmit_normal(sc, txq, bf);
1449 ATH_TXQ_UNLOCK(txq);
1450 } else {
1451 /* add to software queue */
1452 ath_tx_swq(sc, ni, txq, bf);
1453 }
1454#else
1455 /*
1456 * For now, since there's no software queue,
1457 * direct-dispatch to the hardware.
1458 */
1459 ATH_TXQ_LOCK(txq);
1460 ath_tx_xmit_normal(sc, txq, bf);
1461 ATH_TXQ_UNLOCK(txq);
1462#endif
868
1463
869 if (ath_tx_is_11n(sc)) {
870 ath_buf_set_rate(sc, ni, bf, pktlen, flags, ctsrate, (atype == HAL_PKT_TYPE_PSPOLL), rate, try);
871 }
872
873 ath_tx_handoff(sc, txq, bf);
874 return 0;
875}
876
877static int
878ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
879 struct ath_buf *bf, struct mbuf *m0,
880 const struct ieee80211_bpf_params *params)
881{
882 struct ifnet *ifp = sc->sc_ifp;
883 struct ieee80211com *ic = ifp->if_l2com;
884 struct ath_hal *ah = sc->sc_ah;
885 struct ieee80211vap *vap = ni->ni_vap;
886 int error, ismcast, ismrr;
887 int keyix, hdrlen, pktlen, try0, txantenna;
1464 return 0;
1465}
1466
1467static int
1468ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
1469 struct ath_buf *bf, struct mbuf *m0,
1470 const struct ieee80211_bpf_params *params)
1471{
1472 struct ifnet *ifp = sc->sc_ifp;
1473 struct ieee80211com *ic = ifp->if_l2com;
1474 struct ath_hal *ah = sc->sc_ah;
1475 struct ieee80211vap *vap = ni->ni_vap;
1476 int error, ismcast, ismrr;
1477 int keyix, hdrlen, pktlen, try0, txantenna;
888 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
1478 u_int8_t rix, txrate;
889 struct ieee80211_frame *wh;
1479 struct ieee80211_frame *wh;
890 u_int flags, ctsduration;
1480 u_int flags;
891 HAL_PKT_TYPE atype;
892 const HAL_RATE_TABLE *rt;
893 struct ath_desc *ds;
894 u_int pri;
1481 HAL_PKT_TYPE atype;
1482 const HAL_RATE_TABLE *rt;
1483 struct ath_desc *ds;
1484 u_int pri;
895 uint8_t try[4], rate[4];
1485 int o_tid = -1;
1486 int do_override;
896
1487
897 bzero(try, sizeof(try));
898 bzero(rate, sizeof(rate));
899
900 wh = mtod(m0, struct ieee80211_frame *);
901 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
902 hdrlen = ieee80211_anyhdrsize(wh);
903 /*
904 * Packet length must not include any
905 * pad bytes; deduct them here.
906 */
907 /* XXX honor IEEE80211_BPF_DATAPAD */
908 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
909
1488 wh = mtod(m0, struct ieee80211_frame *);
1489 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1490 hdrlen = ieee80211_anyhdrsize(wh);
1491 /*
1492 * Packet length must not include any
1493 * pad bytes; deduct them here.
1494 */
1495 /* XXX honor IEEE80211_BPF_DATAPAD */
1496 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
1497
1498
1499 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
1500 __func__, ismcast);
1501
910 /* Handle encryption twiddling if needed */
1502 /* Handle encryption twiddling if needed */
911 if (! ath_tx_tag_crypto(sc, ni, m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, &hdrlen, &pktlen, &keyix)) {
1503 if (! ath_tx_tag_crypto(sc, ni,
1504 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
1505 &hdrlen, &pktlen, &keyix)) {
912 ath_freetx(m0);
913 return EIO;
914 }
915 /* packet header may have moved, reset our local pointer */
916 wh = mtod(m0, struct ieee80211_frame *);
917
1506 ath_freetx(m0);
1507 return EIO;
1508 }
1509 /* packet header may have moved, reset our local pointer */
1510 wh = mtod(m0, struct ieee80211_frame *);
1511
1512 /* Do the generic frame setup */
1513 /* XXX should just bzero the bf_state? */
1514 bf->bf_state.bfs_dobaw = 0;
1515
918 error = ath_tx_dmasetup(sc, bf, m0);
919 if (error != 0)
920 return error;
921 m0 = bf->bf_m; /* NB: may have changed */
922 wh = mtod(m0, struct ieee80211_frame *);
923 bf->bf_node = ni; /* NB: held reference */
924
925 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
926 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
927 if (params->ibp_flags & IEEE80211_BPF_RTS)
928 flags |= HAL_TXDESC_RTSENA;
1516 error = ath_tx_dmasetup(sc, bf, m0);
1517 if (error != 0)
1518 return error;
1519 m0 = bf->bf_m; /* NB: may have changed */
1520 wh = mtod(m0, struct ieee80211_frame *);
1521 bf->bf_node = ni; /* NB: held reference */
1522
1523 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
1524 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1525 if (params->ibp_flags & IEEE80211_BPF_RTS)
1526 flags |= HAL_TXDESC_RTSENA;
929 else if (params->ibp_flags & IEEE80211_BPF_CTS)
1527 else if (params->ibp_flags & IEEE80211_BPF_CTS) {
1528 /* XXX assume 11g/11n protection? */
1529 bf->bf_state.bfs_doprot = 1;
930 flags |= HAL_TXDESC_CTSENA;
1530 flags |= HAL_TXDESC_CTSENA;
1531 }
931 /* XXX leave ismcast to injector? */
932 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
933 flags |= HAL_TXDESC_NOACK;
934
935 rt = sc->sc_currates;
936 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
937 rix = ath_tx_findrix(sc, params->ibp_rate0);
938 txrate = rt->info[rix].rateCode;
939 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
940 txrate |= rt->info[rix].shortPreamble;
941 sc->sc_txrix = rix;
942 try0 = params->ibp_try0;
943 ismrr = (params->ibp_try1 != 0);
944 txantenna = params->ibp_pri >> 2;
945 if (txantenna == 0) /* XXX? */
946 txantenna = sc->sc_txantenna;
947
1532 /* XXX leave ismcast to injector? */
1533 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
1534 flags |= HAL_TXDESC_NOACK;
1535
1536 rt = sc->sc_currates;
1537 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1538 rix = ath_tx_findrix(sc, params->ibp_rate0);
1539 txrate = rt->info[rix].rateCode;
1540 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
1541 txrate |= rt->info[rix].shortPreamble;
1542 sc->sc_txrix = rix;
1543 try0 = params->ibp_try0;
1544 ismrr = (params->ibp_try1 != 0);
1545 txantenna = params->ibp_pri >> 2;
1546 if (txantenna == 0) /* XXX? */
1547 txantenna = sc->sc_txantenna;
1548
948 ctsduration = 0;
949 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
950 cix = ath_tx_findrix(sc, params->ibp_ctsrate);
951 ctsrate = ath_tx_get_rtscts_rate(ah, rt, rix, cix, params->ibp_flags & IEEE80211_BPF_SHORTPRE);
952 /* The 11n chipsets do ctsduration calculations for you */
953 if (! ath_tx_is_11n(sc))
954 ctsduration = ath_tx_calc_ctsduration(ah, rix, cix,
955 params->ibp_flags & IEEE80211_BPF_SHORTPRE, pktlen,
956 rt, flags);
957 /*
958 * Must disable multi-rate retry when using RTS/CTS.
959 */
960 ismrr = 0; /* XXX */
961 } else
962 ctsrate = 0;
1549 /*
1550 * Since ctsrate is fixed, store it away for later
1551 * use when the descriptor fields are being set.
1552 */
1553 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
1554 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
963
964 pri = params->ibp_pri & 3;
1555
1556 pri = params->ibp_pri & 3;
1557 /* Override pri if the frame isn't a QoS one */
1558 if (! IEEE80211_QOS_HAS_SEQ(wh))
1559 pri = ath_tx_getac(sc, m0);
1560
965 /*
966 * NB: we mark all packets as type PSPOLL so the h/w won't
967 * set the sequence number, duration, etc.
968 */
969 atype = HAL_PKT_TYPE_PSPOLL;
970
971 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
972 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,

--- 15 unchanged lines hidden (view full) ---

988 ieee80211_radiotap_tx(vap, m0);
989 }
990
991 /*
992 * Formulate first tx descriptor with tx controls.
993 */
994 ds = bf->bf_desc;
995 /* XXX check return value? */
1561 /*
1562 * NB: we mark all packets as type PSPOLL so the h/w won't
1563 * set the sequence number, duration, etc.
1564 */
1565 atype = HAL_PKT_TYPE_PSPOLL;
1566
1567 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1568 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,

--- 15 unchanged lines hidden (view full) ---

1584 ieee80211_radiotap_tx(vap, m0);
1585 }
1586
1587 /*
1588 * Formulate first tx descriptor with tx controls.
1589 */
1590 ds = bf->bf_desc;
1591 /* XXX check return value? */
996 ath_hal_setuptxdesc(ah, ds
997 , pktlen /* packet length */
998 , hdrlen /* header length */
999 , atype /* Atheros packet type */
1000 , params->ibp_power /* txpower */
1001 , txrate, try0 /* series 0 rate/tries */
1002 , keyix /* key cache index */
1003 , txantenna /* antenna mode */
1004 , flags /* flags */
1005 , ctsrate /* rts/cts rate */
1006 , ctsduration /* rts/cts duration */
1007 );
1592
1593 /* Store the decided rate index values away */
1594 bf->bf_state.bfs_pktlen = pktlen;
1595 bf->bf_state.bfs_hdrlen = hdrlen;
1596 bf->bf_state.bfs_atype = atype;
1597 bf->bf_state.bfs_txpower = params->ibp_power;
1598 bf->bf_state.bfs_txrate0 = txrate;
1599 bf->bf_state.bfs_try0 = try0;
1600 bf->bf_state.bfs_keyix = keyix;
1601 bf->bf_state.bfs_txantenna = txantenna;
1602 bf->bf_state.bfs_flags = flags;
1008 bf->bf_txflags = flags;
1603 bf->bf_txflags = flags;
1604 bf->bf_state.bfs_shpream =
1605 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
1009
1606
1010 if (ath_tx_is_11n(sc)) {
1011 rate[0] = ath_tx_findrix(sc, params->ibp_rate0);
1012 try[0] = params->ibp_try0;
1607 /* XXX this should be done in ath_tx_setrate() */
1608 bf->bf_state.bfs_ctsrate = 0;
1609 bf->bf_state.bfs_ctsduration = 0;
1610 bf->bf_state.bfs_ismrr = ismrr;
1013
1611
1014 if (ismrr) {
1015 /* Remember, rate[] is actually an array of rix's -adrian */
1016 rate[0] = ath_tx_findrix(sc, params->ibp_rate0);
1017 rate[1] = ath_tx_findrix(sc, params->ibp_rate1);
1018 rate[2] = ath_tx_findrix(sc, params->ibp_rate2);
1019 rate[3] = ath_tx_findrix(sc, params->ibp_rate3);
1612 /* Blank the legacy rate array */
1613 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1020
1614
1021 try[0] = params->ibp_try0;
1022 try[1] = params->ibp_try1;
1023 try[2] = params->ibp_try2;
1024 try[3] = params->ibp_try3;
1025 }
1026 } else {
1027 if (ismrr) {
1028 rix = ath_tx_findrix(sc, params->ibp_rate1);
1029 rate1 = rt->info[rix].rateCode;
1030 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
1031 rate1 |= rt->info[rix].shortPreamble;
1032 if (params->ibp_try2) {
1033 rix = ath_tx_findrix(sc, params->ibp_rate2);
1034 rate2 = rt->info[rix].rateCode;
1035 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
1036 rate2 |= rt->info[rix].shortPreamble;
1037 } else
1038 rate2 = 0;
1039 if (params->ibp_try3) {
1040 rix = ath_tx_findrix(sc, params->ibp_rate3);
1041 rate3 = rt->info[rix].rateCode;
1042 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
1043 rate3 |= rt->info[rix].shortPreamble;
1044 } else
1045 rate3 = 0;
1046 ath_hal_setupxtxdesc(ah, ds
1047 , rate1, params->ibp_try1 /* series 1 */
1048 , rate2, params->ibp_try2 /* series 2 */
1049 , rate3, params->ibp_try3 /* series 3 */
1050 );
1051 }
1052 }
1615 bf->bf_state.bfs_rc[0].rix =
1616 ath_tx_findrix(sc, params->ibp_rate0);
1617 bf->bf_state.bfs_rc[0].tries = try0;
1618 bf->bf_state.bfs_rc[0].ratecode = txrate;
1053
1619
1054 if (ath_tx_is_11n(sc)) {
1055 /*
1056 * notice that rix doesn't include any of the "magic" flags txrate
1057 * does for communicating "other stuff" to the HAL.
1058 */
1059 ath_buf_set_rate(sc, ni, bf, pktlen, flags, ctsrate, (atype == HAL_PKT_TYPE_PSPOLL), rate, try);
1620 if (ismrr) {
1621 int rix;
1622
1623 rix = ath_tx_findrix(sc, params->ibp_rate1);
1624 bf->bf_state.bfs_rc[1].rix = rix;
1625 bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
1626
1627 rix = ath_tx_findrix(sc, params->ibp_rate2);
1628 bf->bf_state.bfs_rc[2].rix = rix;
1629 bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
1630
1631 rix = ath_tx_findrix(sc, params->ibp_rate3);
1632 bf->bf_state.bfs_rc[3].rix = rix;
1633 bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
1060 }
1634 }
1635 /*
1636 * All the required rate control decisions have been made;
1637 * fill in the rc flags.
1638 */
1639 ath_tx_rate_fill_rcflags(sc, bf);
1061
1062 /* NB: no buffered multicast in power save support */
1640
1641 /* NB: no buffered multicast in power save support */
1063 ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
1642
1643 /* XXX If it's an ADDBA, override the correct queue */
1644 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
1645
1646 /* Map ADDBA to the correct priority */
1647 if (do_override) {
1648#if 0
1649 device_printf(sc->sc_dev,
1650 "%s: overriding tid %d pri %d -> %d\n",
1651 __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
1652#endif
1653 pri = TID_TO_WME_AC(o_tid);
1654 }
1655
1656 /*
1657 * If we're overiding the ADDBA destination, dump directly
1658 * into the hardware queue, right after any pending
1659 * frames to that node are.
1660 */
1661 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
1662 __func__, do_override);
1663
1664 if (do_override) {
1665 ATH_TXQ_LOCK(sc->sc_ac2q[pri]);
1666 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
1667 ATH_TXQ_UNLOCK(sc->sc_ac2q[pri]);
1668 } else {
1669 /* Queue to software queue */
1670 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf);
1671 }
1672
1064 return 0;
1065}
1066
1673 return 0;
1674}
1675
1676/*
1677 * Send a raw frame.
1678 *
1679 * This can be called by net80211.
1680 */
1067int
1068ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1069 const struct ieee80211_bpf_params *params)
1070{
1071 struct ieee80211com *ic = ni->ni_ic;
1072 struct ifnet *ifp = ic->ic_ifp;
1073 struct ath_softc *sc = ifp->if_softc;
1074 struct ath_buf *bf;

--- 47 unchanged lines hidden (view full) ---

1122 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1123 ATH_TXBUF_UNLOCK(sc);
1124bad:
1125 ifp->if_oerrors++;
1126 sc->sc_stats.ast_tx_raw_fail++;
1127 ieee80211_free_node(ni);
1128 return error;
1129}
1681int
1682ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1683 const struct ieee80211_bpf_params *params)
1684{
1685 struct ieee80211com *ic = ni->ni_ic;
1686 struct ifnet *ifp = ic->ic_ifp;
1687 struct ath_softc *sc = ifp->if_softc;
1688 struct ath_buf *bf;

--- 47 unchanged lines hidden (view full) ---

1736 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1737 ATH_TXBUF_UNLOCK(sc);
1738bad:
1739 ifp->if_oerrors++;
1740 sc->sc_stats.ast_tx_raw_fail++;
1741 ieee80211_free_node(ni);
1742 return error;
1743}
1744
1745/* Some helper functions */
1746
1747/*
1748 * ADDBA (and potentially others) need to be placed in the same
1749 * hardware queue as the TID/node it's relating to. This is so
1750 * it goes out after any pending non-aggregate frames to the
1751 * same node/TID.
1752 *
1753 * If this isn't done, the ADDBA can go out before the frames
1754 * queued in hardware. Even though these frames have a sequence
1755 * number -earlier- than the ADDBA can be transmitted (but
1756 * no frames whose sequence numbers are after the ADDBA should
1757 * be!) they'll arrive after the ADDBA - and the receiving end
1758 * will simply drop them as being out of the BAW.
1759 *
1760 * The frames can't be appended to the TID software queue - it'll
1761 * never be sent out. So these frames have to be directly
1762 * dispatched to the hardware, rather than queued in software.
1763 * So if this function returns true, the TXQ has to be
1764 * overridden and it has to be directly dispatched.
1765 *
1766 * It's a dirty hack, but someone's gotta do it.
1767 */
1768
1769/*
1770 * XXX doesn't belong here!
1771 */
1772static int
1773ieee80211_is_action(struct ieee80211_frame *wh)
1774{
1775 /* Type: Management frame? */
1776 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
1777 IEEE80211_FC0_TYPE_MGT)
1778 return 0;
1779
1780 /* Subtype: Action frame? */
1781 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
1782 IEEE80211_FC0_SUBTYPE_ACTION)
1783 return 0;
1784
1785 return 1;
1786}
1787
1788#define MS(_v, _f) (((_v) & _f) >> _f##_S)
1789/*
1790 * Return an alternate TID for ADDBA request frames.
1791 *
1792 * Yes, this likely should be done in the net80211 layer.
1793 */
1794static int
1795ath_tx_action_frame_override_queue(struct ath_softc *sc,
1796 struct ieee80211_node *ni,
1797 struct mbuf *m0, int *tid)
1798{
1799 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
1800 struct ieee80211_action_ba_addbarequest *ia;
1801 uint8_t *frm;
1802 uint16_t baparamset;
1803
1804 /* Not action frame? Bail */
1805 if (! ieee80211_is_action(wh))
1806 return 0;
1807
1808 /* XXX Not needed for frames we send? */
1809#if 0
1810 /* Correct length? */
1811 if (! ieee80211_parse_action(ni, m))
1812 return 0;
1813#endif
1814
1815 /* Extract out action frame */
1816 frm = (u_int8_t *)&wh[1];
1817 ia = (struct ieee80211_action_ba_addbarequest *) frm;
1818
1819 /* Not ADDBA? Bail */
1820 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
1821 return 0;
1822 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
1823 return 0;
1824
1825 /* Extract TID, return it */
1826 baparamset = le16toh(ia->rq_baparamset);
1827 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
1828
1829 return 1;
1830}
1831#undef MS
1832
1833/* Per-node software queue operations */
1834
1835/*
1836 * Add the current packet to the given BAW.
1837 * It is assumed that the current packet
1838 *
1839 * + fits inside the BAW;
1840 * + already has had a sequence number allocated.
1841 *
1842 * Since the BAW status may be modified by both the ath task and
1843 * the net80211/ifnet contexts, the TID must be locked.
1844 */
1845void
1846ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
1847 struct ath_tid *tid, struct ath_buf *bf)
1848{
1849 int index, cindex;
1850 struct ieee80211_tx_ampdu *tap;
1851
1852 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
1853
1854 if (bf->bf_state.bfs_isretried)
1855 return;
1856
1857 tap = ath_tx_get_tx_tid(an, tid->tid);
1858
1859 if (bf->bf_state.bfs_addedbaw)
1860 device_printf(sc->sc_dev,
1861 "%s: re-added? tid=%d, seqno %d; window %d:%d; baw head=%d tail=%d\n",
1862 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
1863 tap->txa_start, tap->txa_wnd, tid->baw_head, tid->baw_tail);
1864
1865 /*
1866 * ni->ni_txseqs[] is the currently allocated seqno.
1867 * the txa state contains the current baw start.
1868 */
1869 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
1870 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1871 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
1872 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d baw head=%d tail=%d\n",
1873 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
1874 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, tid->baw_tail);
1875
1876
1877#if 0
1878 assert(tid->tx_buf[cindex] == NULL);
1879#endif
1880 if (tid->tx_buf[cindex] != NULL) {
1881 device_printf(sc->sc_dev,
1882 "%s: ba packet dup (index=%d, cindex=%d, "
1883 "head=%d, tail=%d)\n",
1884 __func__, index, cindex, tid->baw_head, tid->baw_tail);
1885 device_printf(sc->sc_dev,
1886 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
1887 __func__,
1888 tid->tx_buf[cindex],
1889 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
1890 bf,
1891 SEQNO(bf->bf_state.bfs_seqno)
1892 );
1893 }
1894 tid->tx_buf[cindex] = bf;
1895
1896 if (index >= ((tid->baw_tail - tid->baw_head) & (ATH_TID_MAX_BUFS - 1))) {
1897 tid->baw_tail = cindex;
1898 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1899 }
1900}
1901
1902/*
1903 * seq_start - left edge of BAW
1904 * seq_next - current/next sequence number to allocate
1905 *
1906 * Since the BAW status may be modified by both the ath task and
1907 * the net80211/ifnet contexts, the TID must be locked.
1908 */
1909static void
1910ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
1911 struct ath_tid *tid, const struct ath_buf *bf)
1912{
1913 int index, cindex;
1914 struct ieee80211_tx_ampdu *tap;
1915 int seqno = SEQNO(bf->bf_state.bfs_seqno);
1916
1917 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
1918
1919 tap = ath_tx_get_tx_tid(an, tid->tid);
1920 index = ATH_BA_INDEX(tap->txa_start, seqno);
1921 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1922
1923 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
1924 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, baw head=%d, tail=%d\n",
1925 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
1926 cindex, tid->baw_head, tid->baw_tail);
1927
1928 /*
1929 * If this occurs then we have a big problem - something else
1930 * has slid tap->txa_start along without updating the BAW
1931 * tracking start/end pointers. Thus the TX BAW state is now
1932 * completely busted.
1933 *
1934 * But for now, since I haven't yet fixed TDMA and buffer cloning,
1935 * it's quite possible that a cloned buffer is making its way
1936 * here and causing it to fire off. Disable TDMA for now.
1937 */
1938 if (tid->tx_buf[cindex] != bf) {
1939 device_printf(sc->sc_dev,
1940 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
1941 __func__,
1942 bf, SEQNO(bf->bf_state.bfs_seqno),
1943 tid->tx_buf[cindex],
1944 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno));
1945 }
1946
1947 tid->tx_buf[cindex] = NULL;
1948
1949 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
1950 INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
1951 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
1952 }
1953 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: baw is now %d:%d, baw head=%d\n",
1954 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head);
1955}
1956
1957/*
1958 * Mark the current node/TID as ready to TX.
1959 *
1960 * This is done to make it easy for the software scheduler to
1961 * find which nodes have data to send.
1962 *
1963 * The TXQ lock must be held.
1964 */
1965static void
1966ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
1967{
1968 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
1969
1970 ATH_TXQ_LOCK_ASSERT(txq);
1971
1972 if (tid->paused)
1973 return; /* paused, can't schedule yet */
1974
1975 if (tid->sched)
1976 return; /* already scheduled */
1977
1978 tid->sched = 1;
1979
1980 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
1981}
1982
1983/*
1984 * Mark the current node as no longer needing to be polled for
1985 * TX packets.
1986 *
1987 * The TXQ lock must be held.
1988 */
1989static void
1990ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
1991{
1992 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
1993
1994 ATH_TXQ_LOCK_ASSERT(txq);
1995
1996 if (tid->sched == 0)
1997 return;
1998
1999 tid->sched = 0;
2000 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2001}
2002
2003/*
2004 * Assign a sequence number manually to the given frame.
2005 *
2006 * This should only be called for A-MPDU TX frames.
2007 */
2008static ieee80211_seq
2009ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2010 struct ath_buf *bf, struct mbuf *m0)
2011{
2012 struct ieee80211_frame *wh;
2013 int tid, pri;
2014 ieee80211_seq seqno;
2015 uint8_t subtype;
2016
2017 /* TID lookup */
2018 wh = mtod(m0, struct ieee80211_frame *);
2019 pri = M_WME_GETAC(m0); /* honor classification */
2020 tid = WME_AC_TO_TID(pri);
2021 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2022 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2023
2024 /* XXX Is it a control frame? Ignore */
2025
2026 /* Does the packet require a sequence number? */
2027 if (! IEEE80211_QOS_HAS_SEQ(wh))
2028 return -1;
2029
2030 /*
2031 * Is it a QOS NULL Data frame? Give it a sequence number from
2032 * the default TID (IEEE80211_NONQOS_TID.)
2033 *
2034 * The RX path of everything I've looked at doesn't include the NULL
2035 * data frame sequence number in the aggregation state updates, so
2036 * assigning it a sequence number there will cause a BAW hole on the
2037 * RX side.
2038 */
2039 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2040 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2041 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2042 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2043 } else {
2044 /* Manually assign sequence number */
2045 seqno = ni->ni_txseqs[tid];
2046 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2047 }
2048 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2049 M_SEQNO_SET(m0, seqno);
2050
2051 /* Return so caller can do something with it if needed */
2052 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno);
2053 return seqno;
2054}
2055
2056/*
2057 * Attempt to direct dispatch an aggregate frame to hardware.
2058 * If the frame is out of BAW, queue.
2059 * Otherwise, schedule it as a single frame.
2060 */
2061static void
2062ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_buf *bf)
2063{
2064 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2065 struct ath_txq *txq = bf->bf_state.bfs_txq;
2066 struct ieee80211_tx_ampdu *tap;
2067
2068 ATH_TXQ_LOCK_ASSERT(txq);
2069
2070 tap = ath_tx_get_tx_tid(an, tid->tid);
2071
2072 /* paused? queue */
2073 if (tid->paused) {
2074 ATH_TXQ_INSERT_TAIL(tid, bf, bf_list);
2075 return;
2076 }
2077
2078 /* outside baw? queue */
2079 if (bf->bf_state.bfs_dobaw &&
2080 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2081 SEQNO(bf->bf_state.bfs_seqno)))) {
2082 ATH_TXQ_INSERT_TAIL(tid, bf, bf_list);
2083 ath_tx_tid_sched(sc, tid);
2084 return;
2085 }
2086
2087 /* Direct dispatch to hardware */
2088 ath_tx_do_ratelookup(sc, bf);
2089 ath_tx_rate_fill_rcflags(sc, bf);
2090 ath_tx_set_rtscts(sc, bf);
2091 ath_tx_setds(sc, bf);
2092 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
2093 ath_tx_chaindesclist(sc, bf);
2094
2095 /* Statistics */
2096 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
2097
2098 /* Track per-TID hardware queue depth correctly */
2099 tid->hwq_depth++;
2100
2101 /* Add to BAW */
2102 if (bf->bf_state.bfs_dobaw) {
2103 ath_tx_addto_baw(sc, an, tid, bf);
2104 bf->bf_state.bfs_addedbaw = 1;
2105 }
2106
2107 /* Set completion handler, multi-frame aggregate or not */
2108 bf->bf_comp = ath_tx_aggr_comp;
2109
2110 /* Hand off to hardware */
2111 ath_tx_handoff(sc, txq, bf);
2112}
2113
2114/*
2115 * Attempt to send the packet.
2116 * If the queue isn't busy, direct-dispatch.
2117 * If the queue is busy enough, queue the given packet on the
2118 * relevant software queue.
2119 */
2120void
2121ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq,
2122 struct ath_buf *bf)
2123{
2124 struct ath_node *an = ATH_NODE(ni);
2125 struct ieee80211_frame *wh;
2126 struct ath_tid *atid;
2127 int pri, tid;
2128 struct mbuf *m0 = bf->bf_m;
2129
2130 /* Fetch the TID - non-QoS frames get assigned to TID 16 */
2131 wh = mtod(m0, struct ieee80211_frame *);
2132 pri = ath_tx_getac(sc, m0);
2133 tid = ath_tx_gettid(sc, m0);
2134 atid = &an->an_tid[tid];
2135
2136 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
2137 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2138
2139 /* Set local packet state, used to queue packets to hardware */
2140 bf->bf_state.bfs_tid = tid;
2141 bf->bf_state.bfs_txq = txq;
2142 bf->bf_state.bfs_pri = pri;
2143
2144 /*
2145 * If the hardware queue isn't busy, queue it directly.
2146 * If the hardware queue is busy, queue it.
2147 * If the TID is paused or the traffic it outside BAW, software
2148 * queue it.
2149 */
2150 ATH_TXQ_LOCK(txq);
2151 if (atid->paused) {
2152 /* TID is paused, queue */
2153 ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
2154 } else if (ath_tx_ampdu_pending(sc, an, tid)) {
2155 /* AMPDU pending; queue */
2156 ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
2157 /* XXX sched? */
2158 } else if (ath_tx_ampdu_running(sc, an, tid)) {
2159 /* AMPDU running, attempt direct dispatch if possible */
2160 if (txq->axq_depth < sc->sc_hwq_limit)
2161 ath_tx_xmit_aggr(sc, an, bf);
2162 else {
2163 ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
2164 ath_tx_tid_sched(sc, atid);
2165 }
2166 } else if (txq->axq_depth < sc->sc_hwq_limit) {
2167 /* AMPDU not running, attempt direct dispatch */
2168 ath_tx_xmit_normal(sc, txq, bf);
2169 } else {
2170 /* Busy; queue */
2171 ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
2172 ath_tx_tid_sched(sc, atid);
2173 }
2174 ATH_TXQ_UNLOCK(txq);
2175}
2176
2177/*
2178 * Do the basic frame setup stuff that's required before the frame
2179 * is added to a software queue.
2180 *
2181 * All frames get mostly the same treatment and it's done once.
2182 * Retransmits fiddle with things like the rate control setup,
2183 * setting the retransmit bit in the packet; doing relevant DMA/bus
2184 * syncing and relinking it (back) into the hardware TX queue.
2185 *
2186 * Note that this may cause the mbuf to be reallocated, so
2187 * m0 may not be valid.
2188 */
2189
2190
2191/*
2192 * Configure the per-TID node state.
2193 *
2194 * This likely belongs in if_ath_node.c but I can't think of anywhere
2195 * else to put it just yet.
2196 *
2197 * This sets up the SLISTs and the mutex as appropriate.
2198 */
2199void
2200ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
2201{
2202 int i, j;
2203 struct ath_tid *atid;
2204
2205 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
2206 atid = &an->an_tid[i];
2207 TAILQ_INIT(&atid->axq_q);
2208 atid->tid = i;
2209 atid->an = an;
2210 for (j = 0; j < ATH_TID_MAX_BUFS; j++)
2211 atid->tx_buf[j] = NULL;
2212 atid->baw_head = atid->baw_tail = 0;
2213 atid->paused = 0;
2214 atid->sched = 0;
2215 atid->hwq_depth = 0;
2216 atid->cleanup_inprogress = 0;
2217 if (i == IEEE80211_NONQOS_TID)
2218 atid->ac = WME_AC_BE;
2219 else
2220 atid->ac = TID_TO_WME_AC(i);
2221 }
2222}
2223
2224/*
2225 * Pause the current TID. This stops packets from being transmitted
2226 * on it.
2227 *
2228 * Since this is also called from upper layers as well as the driver,
2229 * it will get the TID lock.
2230 */
2231static void
2232ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
2233{
2234 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]);
2235 tid->paused++;
2236 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n",
2237 __func__, tid->paused);
2238 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
2239}
2240
2241/*
2242 * Unpause the current TID, and schedule it if needed.
2243 */
2244static void
2245ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
2246{
2247 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
2248
2249 tid->paused--;
2250
2251 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n",
2252 __func__, tid->paused);
2253
2254 if (tid->paused || tid->axq_depth == 0) {
2255 return;
2256 }
2257
2258 ath_tx_tid_sched(sc, tid);
2259 /* Punt some frames to the hardware if needed */
2260 ath_txq_sched(sc, sc->sc_ac2q[tid->ac]);
2261}
2262
2263/*
2264 * Free any packets currently pending in the software TX queue.
2265 *
2266 * This will be called when a node is being deleted.
2267 *
2268 * It can also be called on an active node during an interface
2269 * reset or state transition.
2270 *
2271 * (From Linux/reference):
2272 *
2273 * TODO: For frame(s) that are in the retry state, we will reuse the
2274 * sequence number(s) without setting the retry bit. The
2275 * alternative is to give up on these and BAR the receiver's window
2276 * forward.
2277 */
2278static void
2279ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid,
2280 ath_bufhead *bf_cq)
2281{
2282 struct ath_buf *bf;
2283 struct ieee80211_tx_ampdu *tap;
2284 struct ieee80211_node *ni = &an->an_node;
2285 int t = 0;
2286 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2287
2288 tap = ath_tx_get_tx_tid(an, tid->tid);
2289
2290 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
2291
2292 /* Walk the queue, free frames */
2293 for (;;) {
2294 bf = TAILQ_FIRST(&tid->axq_q);
2295 if (bf == NULL) {
2296 break;
2297 }
2298
2299 if (t == 0) {
2300 device_printf(sc->sc_dev,
2301 "%s: node %p: tid %d: txq_depth=%d, "
2302 "txq_aggr_depth=%d, sched=%d, paused=%d, "
2303 "hwq_depth=%d, incomp=%d, baw_head=%d, baw_tail=%d "
2304 "txa_start=%d, ni_txseqs=%d\n",
2305 __func__, ni, tid->tid, txq->axq_depth,
2306 txq->axq_aggr_depth, tid->sched, tid->paused,
2307 tid->hwq_depth, tid->incomp, tid->baw_head,
2308 tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
2309 ni->ni_txseqs[tid->tid]);
2310 t = 1;
2311 }
2312
2313
2314 /*
2315 * If the current TID is running AMPDU, update
2316 * the BAW.
2317 */
2318 if (ath_tx_ampdu_running(sc, an, tid->tid) &&
2319 bf->bf_state.bfs_dobaw) {
2320 /*
2321 * Only remove the frame from the BAW if it's
2322 * been transmitted at least once; this means
2323 * the frame was in the BAW to begin with.
2324 */
2325 if (bf->bf_state.bfs_retries > 0) {
2326 ath_tx_update_baw(sc, an, tid, bf);
2327 bf->bf_state.bfs_dobaw = 0;
2328 }
2329 /*
2330 * This has become a non-fatal error now
2331 */
2332 if (! bf->bf_state.bfs_addedbaw)
2333 device_printf(sc->sc_dev,
2334 "%s: wasn't added: seqno %d\n",
2335 __func__, SEQNO(bf->bf_state.bfs_seqno));
2336 }
2337 ATH_TXQ_REMOVE(tid, bf, bf_list);
2338 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
2339 }
2340
2341 /*
2342 * Now that it's completed, grab the TID lock and update
2343 * the sequence number and BAW window.
2344 * Because sequence numbers have been assigned to frames
2345 * that haven't been sent yet, it's entirely possible
2346 * we'll be called with some pending frames that have not
2347 * been transmitted.
2348 *
2349 * The cleaner solution is to do the sequence number allocation
2350 * when the packet is first transmitted - and thus the "retries"
2351 * check above would be enough to update the BAW/seqno.
2352 */
2353
2354 /* But don't do it for non-QoS TIDs */
2355 if (tap) {
2356#if 0
2357 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
2358 "%s: node %p: TID %d: sliding BAW left edge to %d\n",
2359 __func__, an, tid->tid, tap->txa_start);
2360#endif
2361 ni->ni_txseqs[tid->tid] = tap->txa_start;
2362 tid->baw_tail = tid->baw_head;
2363 }
2364}
2365
2366/*
2367 * Flush all software queued packets for the given node.
2368 *
2369 * This occurs when a completion handler frees the last buffer
2370 * for a node, and the node is thus freed. This causes the node
2371 * to be cleaned up, which ends up calling ath_tx_node_flush.
2372 */
2373void
2374ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
2375{
2376 int tid;
2377 ath_bufhead bf_cq;
2378 struct ath_buf *bf;
2379
2380 TAILQ_INIT(&bf_cq);
2381
2382 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
2383 struct ath_tid *atid = &an->an_tid[tid];
2384 struct ath_txq *txq = sc->sc_ac2q[atid->ac];
2385
2386 /* Remove this tid from the list of active tids */
2387 ATH_TXQ_LOCK(txq);
2388 ath_tx_tid_unsched(sc, atid);
2389
2390 /* Free packets */
2391 ath_tx_tid_drain(sc, an, atid, &bf_cq);
2392 ATH_TXQ_UNLOCK(txq);
2393 }
2394
2395 /* Handle completed frames */
2396 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
2397 TAILQ_REMOVE(&bf_cq, bf, bf_list);
2398 ath_tx_default_comp(sc, bf, 0);
2399 }
2400}
2401
2402/*
2403 * Drain all the software TXQs currently with traffic queued.
2404 */
2405void
2406ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
2407{
2408 struct ath_tid *tid;
2409 ath_bufhead bf_cq;
2410 struct ath_buf *bf;
2411
2412 TAILQ_INIT(&bf_cq);
2413 ATH_TXQ_LOCK(txq);
2414
2415 /*
2416 * Iterate over all active tids for the given txq,
2417 * flushing and unsched'ing them
2418 */
2419 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
2420 tid = TAILQ_FIRST(&txq->axq_tidq);
2421 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
2422 ath_tx_tid_unsched(sc, tid);
2423 }
2424
2425 ATH_TXQ_UNLOCK(txq);
2426
2427 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
2428 TAILQ_REMOVE(&bf_cq, bf, bf_list);
2429 ath_tx_default_comp(sc, bf, 0);
2430 }
2431}
2432
2433/*
2434 * Handle completion of non-aggregate session frames.
2435 */
2436void
2437ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
2438{
2439 struct ieee80211_node *ni = bf->bf_node;
2440 struct ath_node *an = ATH_NODE(ni);
2441 int tid = bf->bf_state.bfs_tid;
2442 struct ath_tid *atid = &an->an_tid[tid];
2443 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
2444
2445 /* The TID state is protected behind the TXQ lock */
2446 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
2447
2448 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
2449 __func__, bf, fail, atid->hwq_depth - 1);
2450
2451 atid->hwq_depth--;
2452 if (atid->hwq_depth < 0)
2453 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
2454 __func__, atid->hwq_depth);
2455 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
2456
2457 /*
2458 * punt to rate control if we're not being cleaned up
2459 * during a hw queue drain and the frame wanted an ACK.
2460 */
2461 if (fail == 0 && ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0))
2462 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
2463 ts, bf->bf_state.bfs_pktlen,
2464 1, (ts->ts_status == 0) ? 0 : 1);
2465
2466 ath_tx_default_comp(sc, bf, fail);
2467}
2468
2469/*
2470 * Handle cleanup of aggregate session packets that aren't
2471 * an A-MPDU.
2472 *
2473 * There's no need to update the BAW here - the session is being
2474 * torn down.
2475 */
2476static void
2477ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
2478{
2479 struct ieee80211_node *ni = bf->bf_node;
2480 struct ath_node *an = ATH_NODE(ni);
2481 int tid = bf->bf_state.bfs_tid;
2482 struct ath_tid *atid = &an->an_tid[tid];
2483
2484 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
2485 __func__, tid, atid->incomp);
2486
2487 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
2488 atid->incomp--;
2489 if (atid->incomp == 0) {
2490 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
2491 "%s: TID %d: cleaned up! resume!\n",
2492 __func__, tid);
2493 atid->cleanup_inprogress = 0;
2494 ath_tx_tid_resume(sc, atid);
2495 }
2496 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
2497
2498 ath_tx_default_comp(sc, bf, 0);
2499}
2500
2501/*
2502 * Performs transmit side cleanup when TID changes from aggregated to
2503 * unaggregated.
2504 *
2505 * - Discard all retry frames from the s/w queue.
2506 * - Fix the tx completion function for all buffers in s/w queue.
2507 * - Count the number of unacked frames, and let transmit completion
2508 * handle it later.
2509 *
2510 * The caller is responsible for pausing the TID.
2511 */
2512static void
2513ath_tx_cleanup(struct ath_softc *sc, struct ath_node *an, int tid)
2514{
2515 struct ath_tid *atid = &an->an_tid[tid];
2516 struct ieee80211_tx_ampdu *tap;
2517 struct ath_buf *bf, *bf_next;
2518 ath_bufhead bf_cq;
2519
2520 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
2521 "%s: TID %d: called\n", __func__, tid);
2522
2523 TAILQ_INIT(&bf_cq);
2524 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
2525
2526 /*
2527 * Update the frames in the software TX queue:
2528 *
2529 * + Discard retry frames in the queue
2530 * + Fix the completion function to be non-aggregate
2531 */
2532 bf = TAILQ_FIRST(&atid->axq_q);
2533 while (bf) {
2534 if (bf->bf_state.bfs_isretried) {
2535 bf_next = TAILQ_NEXT(bf, bf_list);
2536 TAILQ_REMOVE(&atid->axq_q, bf, bf_list);
2537 atid->axq_depth--;
2538 if (bf->bf_state.bfs_dobaw) {
2539 ath_tx_update_baw(sc, an, atid, bf);
2540 if (! bf->bf_state.bfs_addedbaw)
2541 device_printf(sc->sc_dev,
2542 "%s: wasn't added: seqno %d\n",
2543 __func__, SEQNO(bf->bf_state.bfs_seqno));
2544 }
2545 bf->bf_state.bfs_dobaw = 0;
2546 /*
2547 * Call the default completion handler with "fail" just
2548 * so upper levels are suitably notified about this.
2549 */
2550 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
2551 bf = bf_next;
2552 continue;
2553 }
2554 /* Give these the default completion handler */
2555 bf->bf_comp = ath_tx_normal_comp;
2556 bf = TAILQ_NEXT(bf, bf_list);
2557 }
2558
2559 /* The caller is required to pause the TID */
2560#if 0
2561 /* Pause the TID */
2562 ath_tx_tid_pause(sc, atid);
2563#endif
2564
2565 /*
2566 * Calculate what hardware-queued frames exist based
2567 * on the current BAW size. Ie, what frames have been
2568 * added to the TX hardware queue for this TID but
2569 * not yet ACKed.
2570 */
2571 tap = ath_tx_get_tx_tid(an, tid);
2572 /* Need the lock - fiddling with BAW */
2573 while (atid->baw_head != atid->baw_tail) {
2574 if (atid->tx_buf[atid->baw_head]) {
2575 atid->incomp++;
2576 atid->cleanup_inprogress = 1;
2577 atid->tx_buf[atid->baw_head] = NULL;
2578 }
2579 INCR(atid->baw_head, ATH_TID_MAX_BUFS);
2580 INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2581 }
2582
2583 /*
2584 * If cleanup is required, defer TID scheduling
2585 * until all the HW queued packets have been
2586 * sent.
2587 */
2588 if (! atid->cleanup_inprogress)
2589 ath_tx_tid_resume(sc, atid);
2590
2591 if (atid->cleanup_inprogress)
2592 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
2593 "%s: TID %d: cleanup needed: %d packets\n",
2594 __func__, tid, atid->incomp);
2595 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
2596
2597 /* Handle completing frames and fail them */
2598 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
2599 TAILQ_REMOVE(&bf_cq, bf, bf_list);
2600 ath_tx_default_comp(sc, bf, 1);
2601 }
2602}
2603
2604static void
2605ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
2606{
2607 struct ieee80211_frame *wh;
2608
2609 wh = mtod(bf->bf_m, struct ieee80211_frame *);
2610 /* Only update/resync if needed */
2611 if (bf->bf_state.bfs_isretried == 0) {
2612 wh->i_fc[1] |= IEEE80211_FC1_RETRY;
2613 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2614 BUS_DMASYNC_PREWRITE);
2615 }
2616 sc->sc_stats.ast_tx_swretries++;
2617 bf->bf_state.bfs_isretried = 1;
2618 bf->bf_state.bfs_retries ++;
2619}
2620
2621static struct ath_buf *
2622ath_tx_retry_clone(struct ath_softc *sc, struct ath_buf *bf)
2623{
2624 struct ath_buf *nbf;
2625 int error;
2626
2627 nbf = ath_buf_clone(sc, bf);
2628
2629#if 0
2630 device_printf(sc->sc_dev, "%s: ATH_BUF_BUSY; cloning\n",
2631 __func__);
2632#endif
2633
2634 if (nbf == NULL) {
2635 /* Failed to clone */
2636 device_printf(sc->sc_dev,
2637 "%s: failed to clone a busy buffer\n",
2638 __func__);
2639 return NULL;
2640 }
2641
2642 /* Setup the dma for the new buffer */
2643 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
2644 if (error != 0) {
2645 device_printf(sc->sc_dev,
2646 "%s: failed to setup dma for clone\n",
2647 __func__);
2648 /*
2649 * Put this at the head of the list, not tail;
2650 * that way it doesn't interfere with the
2651 * busy buffer logic (which uses the tail of
2652 * the list.)
2653 */
2654 ATH_TXBUF_LOCK(sc);
2655 TAILQ_INSERT_HEAD(&sc->sc_txbuf, nbf, bf_list);
2656 ATH_TXBUF_UNLOCK(sc);
2657 return NULL;
2658 }
2659
2660 /* Free current buffer; return the older buffer */
2661 bf->bf_m = NULL;
2662 bf->bf_node = NULL;
2663 ath_freebuf(sc, bf);
2664 return nbf;
2665}
2666
2667/*
2668 * Handle retrying an unaggregate frame in an aggregate
2669 * session.
2670 *
2671 * If too many retries occur, pause the TID, wait for
2672 * any further retransmits (as there's no reason why
2673 * non-aggregate frames in an aggregate session are
2674 * transmitted in-order; they just have to be in-BAW)
2675 * and then queue a BAR.
2676 */
2677static void
2678ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
2679{
2680 struct ieee80211_node *ni = bf->bf_node;
2681 struct ath_node *an = ATH_NODE(ni);
2682 int tid = bf->bf_state.bfs_tid;
2683 struct ath_tid *atid = &an->an_tid[tid];
2684 struct ieee80211_tx_ampdu *tap;
2685 int txseq;
2686
2687 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
2688
2689 tap = ath_tx_get_tx_tid(an, tid);
2690
2691 /*
2692 * If the buffer is marked as busy, we can't directly
2693 * reuse it. Instead, try to clone the buffer.
2694 * If the clone is successful, recycle the old buffer.
2695 * If the clone is unsuccessful, set bfs_retries to max
2696 * to force the next bit of code to free the buffer
2697 * for us.
2698 */
2699 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
2700 (bf->bf_flags & ATH_BUF_BUSY)) {
2701 struct ath_buf *nbf;
2702 nbf = ath_tx_retry_clone(sc, bf);
2703 if (nbf)
2704 /* bf has been freed at this point */
2705 bf = nbf;
2706 else
2707 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
2708 }
2709
2710 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
2711 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
2712 "%s: exceeded retries; seqno %d\n",
2713 __func__, SEQNO(bf->bf_state.bfs_seqno));
2714 sc->sc_stats.ast_tx_swretrymax++;
2715
2716 /* Update BAW anyway */
2717 if (bf->bf_state.bfs_dobaw) {
2718 ath_tx_update_baw(sc, an, atid, bf);
2719 if (! bf->bf_state.bfs_addedbaw)
2720 device_printf(sc->sc_dev,
2721 "%s: wasn't added: seqno %d\n",
2722 __func__, SEQNO(bf->bf_state.bfs_seqno));
2723 }
2724 bf->bf_state.bfs_dobaw = 0;
2725
2726 /* Send BAR frame */
2727 /*
2728 * This'll end up going into net80211 and back out
2729 * again, via ic->ic_raw_xmit().
2730 */
2731 txseq = tap->txa_start;
2732 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
2733
2734 device_printf(sc->sc_dev,
2735 "%s: TID %d: send BAR; seq %d\n", __func__, tid, txseq);
2736
2737 /* XXX TODO: send BAR */
2738
2739 /* Free buffer, bf is free after this call */
2740 ath_tx_default_comp(sc, bf, 0);
2741 return;
2742 }
2743
2744 /*
2745 * This increments the retry counter as well as
2746 * sets the retry flag in the ath_buf and packet
2747 * body.
2748 */
2749 ath_tx_set_retry(sc, bf);
2750
2751 /*
2752 * Insert this at the head of the queue, so it's
2753 * retried before any current/subsequent frames.
2754 */
2755 ATH_TXQ_INSERT_HEAD(atid, bf, bf_list);
2756 ath_tx_tid_sched(sc, atid);
2757
2758 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
2759}
2760
2761/*
2762 * Common code for aggregate excessive retry/subframe retry.
2763 * If retrying, queues buffers to bf_q. If not, frees the
2764 * buffers.
2765 *
2766 * XXX should unify this with ath_tx_aggr_retry_unaggr()
2767 */
2768static int
2769ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
2770 ath_bufhead *bf_q)
2771{
2772 struct ieee80211_node *ni = bf->bf_node;
2773 struct ath_node *an = ATH_NODE(ni);
2774 int tid = bf->bf_state.bfs_tid;
2775 struct ath_tid *atid = &an->an_tid[tid];
2776
2777 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
2778
2779 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
2780 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
2781 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
2782
2783 /*
2784 * If the buffer is marked as busy, we can't directly
2785 * reuse it. Instead, try to clone the buffer.
2786 * If the clone is successful, recycle the old buffer.
2787 * If the clone is unsuccessful, set bfs_retries to max
2788 * to force the next bit of code to free the buffer
2789 * for us.
2790 */
2791 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
2792 (bf->bf_flags & ATH_BUF_BUSY)) {
2793 struct ath_buf *nbf;
2794 nbf = ath_tx_retry_clone(sc, bf);
2795 if (nbf)
2796 /* bf has been freed at this point */
2797 bf = nbf;
2798 else
2799 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
2800 }
2801
2802 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
2803 sc->sc_stats.ast_tx_swretrymax++;
2804 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
2805 "%s: max retries: seqno %d\n",
2806 __func__, SEQNO(bf->bf_state.bfs_seqno));
2807 ath_tx_update_baw(sc, an, atid, bf);
2808 if (! bf->bf_state.bfs_addedbaw)
2809 device_printf(sc->sc_dev,
2810 "%s: wasn't added: seqno %d\n",
2811 __func__, SEQNO(bf->bf_state.bfs_seqno));
2812 bf->bf_state.bfs_dobaw = 0;
2813 return 1;
2814 }
2815
2816 ath_tx_set_retry(sc, bf);
2817 bf->bf_next = NULL; /* Just to make sure */
2818
2819 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
2820 return 0;
2821}
2822
2823/*
2824 * error pkt completion for an aggregate destination
2825 */
2826static void
2827ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
2828 struct ath_tid *tid)
2829{
2830 struct ieee80211_node *ni = bf_first->bf_node;
2831 struct ath_node *an = ATH_NODE(ni);
2832 struct ath_buf *bf_next, *bf;
2833 ath_bufhead bf_q;
2834 int drops = 0;
2835 struct ieee80211_tx_ampdu *tap;
2836 ath_bufhead bf_cq;
2837
2838 TAILQ_INIT(&bf_q);
2839 TAILQ_INIT(&bf_cq);
2840 sc->sc_stats.ast_tx_aggrfail++;
2841
2842 /*
2843 * Update rate control - all frames have failed.
2844 *
2845 * XXX use the length in the first frame in the series;
2846 * XXX just so things are consistent for now.
2847 */
2848 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
2849 &bf_first->bf_status.ds_txstat,
2850 bf_first->bf_state.bfs_pktlen,
2851 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
2852
2853 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]);
2854 tap = ath_tx_get_tx_tid(an, tid->tid);
2855
2856 /* Retry all subframes */
2857 bf = bf_first;
2858 while (bf) {
2859 bf_next = bf->bf_next;
2860 bf->bf_next = NULL; /* Remove it from the aggr list */
2861 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
2862 drops++;
2863 bf->bf_next = NULL;
2864 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
2865 }
2866 bf = bf_next;
2867 }
2868
2869 /* Prepend all frames to the beginning of the queue */
2870 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
2871 TAILQ_REMOVE(&bf_q, bf, bf_list);
2872 ATH_TXQ_INSERT_HEAD(tid, bf, bf_list);
2873 }
2874
2875 ath_tx_tid_sched(sc, tid);
2876
2877 /*
2878 * send bar if we dropped any frames
2879 *
2880 * Keep the txq lock held for now, as we need to ensure
2881 * that ni_txseqs[] is consistent (as it's being updated
2882 * in the ifnet TX context or raw TX context.)
2883 */
2884 if (drops) {
2885 int txseq = tap->txa_start;
2886 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
2887 device_printf(sc->sc_dev,
2888 "%s: TID %d: send BAR; seq %d\n",
2889 __func__, tid->tid, txseq);
2890
2891 /* XXX TODO: send BAR */
2892 } else {
2893 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
2894 }
2895
2896 /* Complete frames which errored out */
2897 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
2898 TAILQ_REMOVE(&bf_cq, bf, bf_list);
2899 ath_tx_default_comp(sc, bf, 0);
2900 }
2901}
2902
2903/*
2904 * Handle clean-up of packets from an aggregate list.
2905 *
2906 * There's no need to update the BAW here - the session is being
2907 * torn down.
2908 */
2909static void
2910ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
2911{
2912 struct ath_buf *bf, *bf_next;
2913 struct ieee80211_node *ni = bf_first->bf_node;
2914 struct ath_node *an = ATH_NODE(ni);
2915 int tid = bf_first->bf_state.bfs_tid;
2916 struct ath_tid *atid = &an->an_tid[tid];
2917
2918 bf = bf_first;
2919
2920 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
2921
2922 /* update incomp */
2923 while (bf) {
2924 atid->incomp--;
2925 bf = bf->bf_next;
2926 }
2927
2928 if (atid->incomp == 0) {
2929 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
2930 "%s: TID %d: cleaned up! resume!\n",
2931 __func__, tid);
2932 atid->cleanup_inprogress = 0;
2933 ath_tx_tid_resume(sc, atid);
2934 }
2935 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
2936
2937 /* Handle frame completion */
2938 while (bf) {
2939 bf_next = bf->bf_next;
2940 ath_tx_default_comp(sc, bf, 1);
2941 bf = bf_next;
2942 }
2943}
2944
2945/*
2946 * Handle completion of an set of aggregate frames.
2947 *
2948 * XXX for now, simply complete each sub-frame.
2949 *
2950 * Note: the completion handler is the last descriptor in the aggregate,
2951 * not the last descriptor in the first frame.
2952 */
2953static void
2954ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, int fail)
2955{
2956 //struct ath_desc *ds = bf->bf_lastds;
2957 struct ieee80211_node *ni = bf_first->bf_node;
2958 struct ath_node *an = ATH_NODE(ni);
2959 int tid = bf_first->bf_state.bfs_tid;
2960 struct ath_tid *atid = &an->an_tid[tid];
2961 struct ath_tx_status ts;
2962 struct ieee80211_tx_ampdu *tap;
2963 ath_bufhead bf_q;
2964 ath_bufhead bf_cq;
2965 int seq_st, tx_ok;
2966 int hasba, isaggr;
2967 uint32_t ba[2];
2968 struct ath_buf *bf, *bf_next;
2969 int ba_index;
2970 int drops = 0;
2971 int nframes = 0, nbad = 0, nf;
2972 int pktlen;
2973 /* XXX there's too much on the stack? */
2974 struct ath_rc_series rc[4];
2975 int txseq;
2976
2977 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
2978 __func__, atid->hwq_depth);
2979
2980 /* The TID state is kept behind the TXQ lock */
2981 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
2982
2983 atid->hwq_depth--;
2984 if (atid->hwq_depth < 0)
2985 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
2986 __func__, atid->hwq_depth);
2987
2988 /*
2989 * Punt cleanup to the relevant function, not our problem now
2990 */
2991 if (atid->cleanup_inprogress) {
2992 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
2993 ath_tx_comp_cleanup_aggr(sc, bf_first);
2994 return;
2995 }
2996
2997 /*
2998 * Take a copy; this may be needed -after- bf_first
2999 * has been completed and freed.
3000 */
3001 ts = bf_first->bf_status.ds_txstat;
3002 /*
3003 * XXX for now, use the first frame in the aggregate for
3004 * XXX rate control completion; it's at least consistent.
3005 */
3006 pktlen = bf_first->bf_state.bfs_pktlen;
3007
3008 /*
3009 * handle errors first
3010 */
3011 if (ts.ts_status & HAL_TXERR_XRETRY) {
3012 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3013 ath_tx_comp_aggr_error(sc, bf_first, atid);
3014 return;
3015 }
3016
3017 TAILQ_INIT(&bf_q);
3018 TAILQ_INIT(&bf_cq);
3019 tap = ath_tx_get_tx_tid(an, tid);
3020
3021 /*
3022 * extract starting sequence and block-ack bitmap
3023 */
3024 /* XXX endian-ness of seq_st, ba? */
3025 seq_st = ts.ts_seqnum;
3026 hasba = !! (ts.ts_flags & HAL_TX_BA);
3027 tx_ok = (ts.ts_status == 0);
3028 isaggr = bf_first->bf_state.bfs_aggr;
3029 ba[0] = ts.ts_ba_low;
3030 ba[1] = ts.ts_ba_high;
3031
3032 /*
3033 * Copy the TX completion status and the rate control
3034 * series from the first descriptor, as it may be freed
3035 * before the rate control code can get its grubby fingers
3036 * into things.
3037 */
3038 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
3039
3040 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3041 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
3042 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
3043 isaggr, seq_st, hasba, ba[0], ba[1]);
3044
3045 /* Occasionally, the MAC sends a tx status for the wrong TID. */
3046 if (tid != ts.ts_tid) {
3047 device_printf(sc->sc_dev, "%s: tid %d != hw tid %d\n",
3048 __func__, tid, ts.ts_tid);
3049 tx_ok = 0;
3050 }
3051
3052 /* AR5416 BA bug; this requires an interface reset */
3053 if (isaggr && tx_ok && (! hasba)) {
3054 device_printf(sc->sc_dev,
3055 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, seq_st=%d\n",
3056 __func__, hasba, tx_ok, isaggr, seq_st);
3057 /* XXX TODO: schedule an interface reset */
3058 }
3059
3060 /*
3061 * Walk the list of frames, figure out which ones were correctly
3062 * sent and which weren't.
3063 */
3064 bf = bf_first;
3065 nf = bf_first->bf_state.bfs_nframes;
3066
3067 /* bf_first is going to be invalid once this list is walked */
3068 bf_first = NULL;
3069
3070 /*
3071 * Walk the list of completed frames and determine
3072 * which need to be completed and which need to be
3073 * retransmitted.
3074 *
3075 * For completed frames, the completion functions need
3076 * to be called at the end of this function as the last
3077 * node reference may free the node.
3078 *
3079 * Finally, since the TXQ lock can't be held during the
3080 * completion callback (to avoid lock recursion),
3081 * the completion calls have to be done outside of the
3082 * lock.
3083 */
3084 while (bf) {
3085 nframes++;
3086 ba_index = ATH_BA_INDEX(seq_st, SEQNO(bf->bf_state.bfs_seqno));
3087 bf_next = bf->bf_next;
3088 bf->bf_next = NULL; /* Remove it from the aggr list */
3089
3090 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3091 "%s: checking bf=%p seqno=%d; ack=%d\n",
3092 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
3093 ATH_BA_ISSET(ba, ba_index));
3094
3095 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
3096 ath_tx_update_baw(sc, an, atid, bf);
3097 bf->bf_state.bfs_dobaw = 0;
3098 if (! bf->bf_state.bfs_addedbaw)
3099 device_printf(sc->sc_dev,
3100 "%s: wasn't added: seqno %d\n",
3101 __func__, SEQNO(bf->bf_state.bfs_seqno));
3102 bf->bf_next = NULL;
3103 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
3104 } else {
3105 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
3106 drops++;
3107 bf->bf_next = NULL;
3108 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
3109 }
3110 nbad++;
3111 }
3112 bf = bf_next;
3113 }
3114
3115 /*
3116 * Now that the BAW updates have been done, unlock
3117 *
3118 * txseq is grabbed before the lock is released so we
3119 * have a consistent view of what -was- in the BAW.
3120 * Anything after this point will not yet have been
3121 * TXed.
3122 */
3123 txseq = tap->txa_start;
3124 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3125
3126 if (nframes != nf)
3127 device_printf(sc->sc_dev,
3128 "%s: num frames seen=%d; bf nframes=%d\n",
3129 __func__, nframes, nf);
3130
3131 /*
3132 * Now we know how many frames were bad, call the rate
3133 * control code.
3134 */
3135 if (fail == 0)
3136 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, nbad);
3137
3138 /*
3139 * send bar if we dropped any frames
3140 */
3141 if (drops) {
3142 device_printf(sc->sc_dev,
3143 "%s: TID %d: send BAR; seq %d\n", __func__, tid, txseq);
3144 /* XXX TODO: send BAR */
3145 }
3146
3147 /* Prepend all frames to the beginning of the queue */
3148 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
3149 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
3150 TAILQ_REMOVE(&bf_q, bf, bf_list);
3151 ATH_TXQ_INSERT_HEAD(atid, bf, bf_list);
3152 }
3153 ath_tx_tid_sched(sc, atid);
3154 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3155
3156 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3157 "%s: txa_start now %d\n", __func__, tap->txa_start);
3158
3159 /* Do deferred completion */
3160 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
3161 TAILQ_REMOVE(&bf_cq, bf, bf_list);
3162 ath_tx_default_comp(sc, bf, 0);
3163 }
3164}
3165
3166/*
3167 * Handle completion of unaggregated frames in an ADDBA
3168 * session.
3169 *
3170 * Fail is set to 1 if the entry is being freed via a call to
3171 * ath_tx_draintxq().
3172 */
3173static void
3174ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
3175{
3176 struct ieee80211_node *ni = bf->bf_node;
3177 struct ath_node *an = ATH_NODE(ni);
3178 int tid = bf->bf_state.bfs_tid;
3179 struct ath_tid *atid = &an->an_tid[tid];
3180 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
3181
3182 /*
3183 * Update rate control status here, before we possibly
3184 * punt to retry or cleanup.
3185 *
3186 * Do it outside of the TXQ lock.
3187 */
3188 if (fail == 0 && ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0))
3189 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
3190 &bf->bf_status.ds_txstat,
3191 bf->bf_state.bfs_pktlen,
3192 1, (ts->ts_status == 0) ? 0 : 1);
3193
3194 /*
3195 * This is called early so atid->hwq_depth can be tracked.
3196 * This unfortunately means that it's released and regrabbed
3197 * during retry and cleanup. That's rather inefficient.
3198 */
3199 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
3200
3201 if (tid == IEEE80211_NONQOS_TID)
3202 device_printf(sc->sc_dev, "%s: TID=16!\n", __func__);
3203
3204 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: tid=%d, hwq_depth=%d\n",
3205 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth);
3206
3207 atid->hwq_depth--;
3208 if (atid->hwq_depth < 0)
3209 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
3210 __func__, atid->hwq_depth);
3211
3212 /*
3213 * If a cleanup is in progress, punt to comp_cleanup;
3214 * rather than handling it here. It's thus their
3215 * responsibility to clean up, call the completion
3216 * function in net80211, etc.
3217 */
3218 if (atid->cleanup_inprogress) {
3219 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3220 ath_tx_comp_cleanup_unaggr(sc, bf);
3221 return;
3222 }
3223
3224 /*
3225 * Don't bother with the retry check if all frames
3226 * are being failed (eg during queue deletion.)
3227 */
3228 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
3229 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3230 ath_tx_aggr_retry_unaggr(sc, bf);
3231 return;
3232 }
3233
3234 /* Success? Complete */
3235 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
3236 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
3237 if (bf->bf_state.bfs_dobaw) {
3238 ath_tx_update_baw(sc, an, atid, bf);
3239 bf->bf_state.bfs_dobaw = 0;
3240 if (! bf->bf_state.bfs_addedbaw)
3241 device_printf(sc->sc_dev,
3242 "%s: wasn't added: seqno %d\n",
3243 __func__, SEQNO(bf->bf_state.bfs_seqno));
3244 }
3245
3246 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3247
3248 ath_tx_default_comp(sc, bf, fail);
3249 /* bf is freed at this point */
3250}
3251
3252void
3253ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
3254{
3255 if (bf->bf_state.bfs_aggr)
3256 ath_tx_aggr_comp_aggr(sc, bf, fail);
3257 else
3258 ath_tx_aggr_comp_unaggr(sc, bf, fail);
3259}
3260
3261/*
3262 * Schedule some packets from the given node/TID to the hardware.
3263 *
3264 * This is the aggregate version.
3265 */
3266void
3267ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
3268 struct ath_tid *tid)
3269{
3270 struct ath_buf *bf;
3271 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
3272 struct ieee80211_tx_ampdu *tap;
3273 struct ieee80211_node *ni = &an->an_node;
3274 ATH_AGGR_STATUS status;
3275 ath_bufhead bf_q;
3276
3277 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
3278 ATH_TXQ_LOCK_ASSERT(txq);
3279
3280 tap = ath_tx_get_tx_tid(an, tid->tid);
3281
3282 if (tid->tid == IEEE80211_NONQOS_TID)
3283 device_printf(sc->sc_dev, "%s: called for TID=NONQOS_TID?\n",
3284 __func__);
3285
3286 for (;;) {
3287 status = ATH_AGGR_DONE;
3288
3289 /*
3290 * If the upper layer has paused the TID, don't
3291 * queue any further packets.
3292 *
3293 * This can also occur from the completion task because
3294 * of packet loss; but as its serialised with this code,
3295 * it won't "appear" half way through queuing packets.
3296 */
3297 if (tid->paused)
3298 break;
3299
3300 bf = TAILQ_FIRST(&tid->axq_q);
3301 if (bf == NULL) {
3302 break;
3303 }
3304
3305 /*
3306 * If the packet doesn't fall within the BAW (eg a NULL
3307 * data frame), schedule it directly; continue.
3308 */
3309 if (! bf->bf_state.bfs_dobaw) {
3310 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: non-baw packet\n",
3311 __func__);
3312 ATH_TXQ_REMOVE(tid, bf, bf_list);
3313 bf->bf_state.bfs_aggr = 0;
3314 ath_tx_do_ratelookup(sc, bf);
3315 ath_tx_rate_fill_rcflags(sc, bf);
3316 ath_tx_set_rtscts(sc, bf);
3317 ath_tx_setds(sc, bf);
3318 ath_tx_chaindesclist(sc, bf);
3319 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
3320 ath_tx_set_ratectrl(sc, ni, bf);
3321
3322 sc->sc_aggr_stats.aggr_nonbaw_pkt++;
3323
3324 /* Queue the packet; continue */
3325 goto queuepkt;
3326 }
3327
3328 TAILQ_INIT(&bf_q);
3329
3330 /*
3331 * Do a rate control lookup on the first frame in the
3332 * list. The rate control code needs that to occur
3333 * before it can determine whether to TX.
3334 * It's inaccurate because the rate control code doesn't
3335 * really "do" aggregate lookups, so it only considers
3336 * the size of the first frame.
3337 */
3338 ath_tx_do_ratelookup(sc, bf);
3339 bf->bf_state.bfs_rc[3].rix = 0;
3340 bf->bf_state.bfs_rc[3].tries = 0;
3341 ath_tx_rate_fill_rcflags(sc, bf);
3342
3343 status = ath_tx_form_aggr(sc, an, tid, &bf_q);
3344
3345 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3346 "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
3347
3348 /*
3349 * No frames to be picked up - out of BAW
3350 */
3351 if (TAILQ_EMPTY(&bf_q))
3352 break;
3353
3354 /*
3355 * This assumes that the descriptor list in the ath_bufhead
3356 * are already linked together via bf_next pointers.
3357 */
3358 bf = TAILQ_FIRST(&bf_q);
3359
3360 /*
3361 * If it's the only frame send as non-aggregate
3362 * assume that ath_tx_form_aggr() has checked
3363 * whether it's in the BAW and added it appropriately.
3364 */
3365 if (bf->bf_state.bfs_nframes == 1) {
3366 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3367 "%s: single-frame aggregate\n", __func__);
3368 bf->bf_state.bfs_aggr = 0;
3369 ath_tx_set_rtscts(sc, bf);
3370 ath_tx_setds(sc, bf);
3371 ath_tx_chaindesclist(sc, bf);
3372 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
3373 ath_tx_set_ratectrl(sc, ni, bf);
3374 if (status == ATH_AGGR_BAW_CLOSED)
3375 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
3376 else
3377 sc->sc_aggr_stats.aggr_single_pkt++;
3378 } else {
3379 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3380 "%s: multi-frame aggregate: %d frames, length %d\n",
3381 __func__, bf->bf_state.bfs_nframes,
3382 bf->bf_state.bfs_al);
3383 bf->bf_state.bfs_aggr = 1;
3384 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
3385 sc->sc_aggr_stats.aggr_aggr_pkt++;
3386
3387 /*
3388 * Update the rate and rtscts information based on the
3389 * rate decision made by the rate control code;
3390 * the first frame in the aggregate needs it.
3391 */
3392 ath_tx_set_rtscts(sc, bf);
3393
3394 /*
3395 * Setup the relevant descriptor fields
3396 * for aggregation. The first descriptor
3397 * already points to the rest in the chain.
3398 */
3399 ath_tx_setds_11n(sc, bf);
3400
3401 /*
3402 * setup first desc with rate and aggr info
3403 */
3404 ath_tx_set_ratectrl(sc, ni, bf);
3405 }
3406 queuepkt:
3407 //txq = bf->bf_state.bfs_txq;
3408
3409 /* Set completion handler, multi-frame aggregate or not */
3410 bf->bf_comp = ath_tx_aggr_comp;
3411
3412 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
3413 device_printf(sc->sc_dev, "%s: TID=16?\n", __func__);
3414
3415 /* Punt to txq */
3416 ath_tx_handoff(sc, txq, bf);
3417
3418 /* Track outstanding buffer count to hardware */
3419 /* aggregates are "one" buffer */
3420 tid->hwq_depth++;
3421
3422 /*
3423 * Break out if ath_tx_form_aggr() indicated
3424 * there can't be any further progress (eg BAW is full.)
3425 * Checking for an empty txq is done above.
3426 *
3427 * XXX locking on txq here?
3428 */
3429 if (txq->axq_aggr_depth >= sc->sc_hwq_limit ||
3430 status == ATH_AGGR_BAW_CLOSED)
3431 break;
3432 }
3433}
3434
3435/*
3436 * Schedule some packets from the given node/TID to the hardware.
3437 */
3438void
3439ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
3440 struct ath_tid *tid)
3441{
3442 struct ath_buf *bf;
3443 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
3444 struct ieee80211_node *ni = &an->an_node;
3445
3446 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
3447 __func__, an, tid->tid);
3448
3449 ATH_TXQ_LOCK_ASSERT(txq);
3450
3451 /* Check - is AMPDU pending or running? then print out something */
3452 if (ath_tx_ampdu_pending(sc, an, tid->tid))
3453 device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n",
3454 __func__, tid->tid);
3455 if (ath_tx_ampdu_running(sc, an, tid->tid))
3456 device_printf(sc->sc_dev, "%s: tid=%d, ampdu running?\n",
3457 __func__, tid->tid);
3458
3459 for (;;) {
3460
3461 /*
3462 * If the upper layers have paused the TID, don't
3463 * queue any further packets.
3464 */
3465 if (tid->paused)
3466 break;
3467
3468 bf = TAILQ_FIRST(&tid->axq_q);
3469 if (bf == NULL) {
3470 break;
3471 }
3472
3473 ATH_TXQ_REMOVE(tid, bf, bf_list);
3474
3475 KASSERT(txq == bf->bf_state.bfs_txq, ("txqs not equal!\n"));
3476
3477 /* Sanity check! */
3478 if (tid->tid != bf->bf_state.bfs_tid) {
3479 device_printf(sc->sc_dev, "%s: bfs_tid %d !="
3480 " tid %d\n",
3481 __func__, bf->bf_state.bfs_tid, tid->tid);
3482 }
3483 /* Normal completion handler */
3484 bf->bf_comp = ath_tx_normal_comp;
3485
3486 /* Program descriptors + rate control */
3487 ath_tx_do_ratelookup(sc, bf);
3488 ath_tx_rate_fill_rcflags(sc, bf);
3489 ath_tx_set_rtscts(sc, bf);
3490 ath_tx_setds(sc, bf);
3491 ath_tx_chaindesclist(sc, bf);
3492 ath_tx_set_ratectrl(sc, ni, bf);
3493
3494 /* Track outstanding buffer count to hardware */
3495 /* aggregates are "one" buffer */
3496 tid->hwq_depth++;
3497
3498 /* Punt to hardware or software txq */
3499 ath_tx_handoff(sc, txq, bf);
3500 }
3501}
3502
3503/*
3504 * Schedule some packets to the given hardware queue.
3505 *
3506 * This function walks the list of TIDs (ie, ath_node TIDs
3507 * with queued traffic) and attempts to schedule traffic
3508 * from them.
3509 *
3510 * TID scheduling is implemented as a FIFO, with TIDs being
3511 * added to the end of the queue after some frames have been
3512 * scheduled.
3513 */
3514void
3515ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
3516{
3517 struct ath_tid *tid, *next, *last;
3518
3519 ATH_TXQ_LOCK_ASSERT(txq);
3520
3521 /*
3522 * Don't schedule if the hardware queue is busy.
3523 * This (hopefully) gives some more time to aggregate
3524 * some packets in the aggregation queue.
3525 */
3526 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) {
3527 sc->sc_aggr_stats.aggr_sched_nopkt++;
3528 return;
3529 }
3530
3531 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
3532
3533 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
3534 /*
3535 * Suspend paused queues here; they'll be resumed
3536 * once the addba completes or times out.
3537 */
3538 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
3539 __func__, tid->tid, tid->paused);
3540 ath_tx_tid_unsched(sc, tid);
3541 if (tid->paused) {
3542 continue;
3543 }
3544 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
3545 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
3546 else
3547 ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
3548
3549 /* Not empty? Re-schedule */
3550 if (tid->axq_depth != 0)
3551 ath_tx_tid_sched(sc, tid);
3552
3553 /* Give the software queue time to aggregate more packets */
3554 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) {
3555 break;
3556 }
3557
3558 /*
3559 * If this was the last entry on the original list, stop.
3560 * Otherwise nodes that have been rescheduled onto the end
3561 * of the TID FIFO list will just keep being rescheduled.
3562 */
3563 if (tid == last)
3564 break;
3565 }
3566}
3567
3568/*
3569 * TX addba handling
3570 */
3571
3572/*
3573 * Return net80211 TID struct pointer, or NULL for none
3574 */
3575struct ieee80211_tx_ampdu *
3576ath_tx_get_tx_tid(struct ath_node *an, int tid)
3577{
3578 struct ieee80211_node *ni = &an->an_node;
3579 struct ieee80211_tx_ampdu *tap;
3580 int ac;
3581
3582 if (tid == IEEE80211_NONQOS_TID)
3583 return NULL;
3584
3585 ac = TID_TO_WME_AC(tid);
3586
3587 tap = &ni->ni_tx_ampdu[ac];
3588 return tap;
3589}
3590
3591/*
3592 * Is AMPDU-TX running?
3593 */
3594static int
3595ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
3596{
3597 struct ieee80211_tx_ampdu *tap;
3598
3599 if (tid == IEEE80211_NONQOS_TID)
3600 return 0;
3601
3602 tap = ath_tx_get_tx_tid(an, tid);
3603 if (tap == NULL)
3604 return 0; /* Not valid; default to not running */
3605
3606 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
3607}
3608
3609/*
3610 * Is AMPDU-TX negotiation pending?
3611 */
3612static int
3613ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
3614{
3615 struct ieee80211_tx_ampdu *tap;
3616
3617 if (tid == IEEE80211_NONQOS_TID)
3618 return 0;
3619
3620 tap = ath_tx_get_tx_tid(an, tid);
3621 if (tap == NULL)
3622 return 0; /* Not valid; default to not pending */
3623
3624 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
3625}
3626
3627/*
3628 * Is AMPDU-TX pending for the given TID?
3629 */
3630
3631
3632/*
3633 * Method to handle sending an ADDBA request.
3634 *
3635 * We tap this so the relevant flags can be set to pause the TID
3636 * whilst waiting for the response.
3637 *
3638 * XXX there's no timeout handler we can override?
3639 */
3640int
3641ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3642 int dialogtoken, int baparamset, int batimeout)
3643{
3644 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3645 int tid = WME_AC_TO_TID(tap->txa_ac);
3646 struct ath_node *an = ATH_NODE(ni);
3647 struct ath_tid *atid = &an->an_tid[tid];
3648
3649 /*
3650 * XXX danger Will Robinson!
3651 *
3652 * Although the taskqueue may be running and scheduling some more
3653 * packets, these should all be _before_ the addba sequence number.
3654 * However, net80211 will keep self-assigning sequence numbers
3655 * until addba has been negotiated.
3656 *
3657 * In the past, these packets would be "paused" (which still works
3658 * fine, as they're being scheduled to the driver in the same
3659 * serialised method which is calling the addba request routine)
3660 * and when the aggregation session begins, they'll be dequeued
3661 * as aggregate packets and added to the BAW. However, now there's
3662 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
3663 * packets. Thus they never get included in the BAW tracking and
3664 * this can cause the initial burst of packets after the addba
3665 * negotiation to "hang", as they quickly fall outside the BAW.
3666 *
3667 * The "eventual" solution should be to tag these packets with
3668 * dobaw. Although net80211 has given us a sequence number,
3669 * it'll be "after" the left edge of the BAW and thus it'll
3670 * fall within it.
3671 */
3672 ath_tx_tid_pause(sc, atid);
3673
3674 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3675 "%s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
3676 __func__, dialogtoken, baparamset, batimeout);
3677 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3678 "%s: txa_start=%d, ni_txseqs=%d\n",
3679 __func__, tap->txa_start, ni->ni_txseqs[tid]);
3680
3681 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
3682 batimeout);
3683}
3684
3685/*
3686 * Handle an ADDBA response.
3687 *
3688 * We unpause the queue so TX'ing can resume.
3689 *
3690 * Any packets TX'ed from this point should be "aggregate" (whether
3691 * aggregate or not) so the BAW is updated.
3692 *
3693 * Note! net80211 keeps self-assigning sequence numbers until
3694 * ampdu is negotiated. This means the initially-negotiated BAW left
3695 * edge won't match the ni->ni_txseq.
3696 *
3697 * So, being very dirty, the BAW left edge is "slid" here to match
3698 * ni->ni_txseq.
3699 *
3700 * What likely SHOULD happen is that all packets subsequent to the
3701 * addba request should be tagged as aggregate and queued as non-aggregate
3702 * frames; thus updating the BAW. For now though, I'll just slide the
3703 * window.
3704 */
3705int
3706ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3707 int status, int code, int batimeout)
3708{
3709 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3710 int tid = WME_AC_TO_TID(tap->txa_ac);
3711 struct ath_node *an = ATH_NODE(ni);
3712 struct ath_tid *atid = &an->an_tid[tid];
3713 int r;
3714
3715 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3716 "%s: called; status=%d, code=%d, batimeout=%d\n", __func__,
3717 status, code, batimeout);
3718
3719 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3720 "%s: txa_start=%d, ni_txseqs=%d\n",
3721 __func__, tap->txa_start, ni->ni_txseqs[tid]);
3722
3723 /*
3724 * Call this first, so the interface flags get updated
3725 * before the TID is unpaused. Otherwise a race condition
3726 * exists where the unpaused TID still doesn't yet have
3727 * IEEE80211_AGGR_RUNNING set.
3728 */
3729 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
3730
3731 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
3732 /*
3733 * XXX dirty!
3734 * Slide the BAW left edge to wherever net80211 left it for us.
3735 * Read above for more information.
3736 */
3737 tap->txa_start = ni->ni_txseqs[tid];
3738 ath_tx_tid_resume(sc, atid);
3739 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3740 return r;
3741}
3742
3743
3744/*
3745 * Stop ADDBA on a queue.
3746 */
3747void
3748ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3749{
3750 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3751 int tid = WME_AC_TO_TID(tap->txa_ac);
3752 struct ath_node *an = ATH_NODE(ni);
3753 struct ath_tid *atid = &an->an_tid[tid];
3754
3755 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called\n", __func__);
3756
3757 /* Pause TID traffic early, so there aren't any races */
3758 ath_tx_tid_pause(sc, atid);
3759
3760 /* There's no need to hold the TXQ lock here */
3761 sc->sc_addba_stop(ni, tap);
3762
3763 /*
3764 * ath_tx_cleanup will resume the TID if possible, otherwise
3765 * it'll set the cleanup flag, and it'll be unpaused once
3766 * things have been cleaned up.
3767 */
3768 ath_tx_cleanup(sc, an, tid);
3769}
3770
3771/*
3772 * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
3773 * it simply tears down the aggregation session. Ew.
3774 *
3775 * It however will call ieee80211_ampdu_stop() which will call
3776 * ic->ic_addba_stop().
3777 *
3778 * XXX This uses a hard-coded max BAR count value; the whole
3779 * XXX BAR TX success or failure should be better handled!
3780 */
3781void
3782ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3783 int status)
3784{
3785 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3786 int tid = WME_AC_TO_TID(tap->txa_ac);
3787 struct ath_node *an = ATH_NODE(ni);
3788 struct ath_tid *atid = &an->an_tid[tid];
3789 int attempts = tap->txa_attempts;
3790
3791 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3792 "%s: called; status=%d\n", __func__, status);
3793
3794 /* Note: This may update the BAW details */
3795 sc->sc_bar_response(ni, tap, status);
3796
3797 /* Unpause the TID */
3798 /*
3799 * XXX if this is attempt=50, the TID will be downgraded
3800 * XXX to a non-aggregate session. So we must unpause the
3801 * XXX TID here or it'll never be done.
3802 */
3803 if (status == 0 || attempts == 50) {
3804 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
3805 ath_tx_tid_resume(sc, atid);
3806 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3807 }
3808}
3809
3810/*
3811 * This is called whenever the pending ADDBA request times out.
3812 * Unpause and reschedule the TID.
3813 */
3814void
3815ath_addba_response_timeout(struct ieee80211_node *ni,
3816 struct ieee80211_tx_ampdu *tap)
3817{
3818 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3819 int tid = WME_AC_TO_TID(tap->txa_ac);
3820 struct ath_node *an = ATH_NODE(ni);
3821 struct ath_tid *atid = &an->an_tid[tid];
3822
3823 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3824 "%s: called; resuming\n", __func__);
3825
3826 /* Note: This updates the aggregate state to (again) pending */
3827 sc->sc_addba_response_timeout(ni, tap);
3828
3829 /* Unpause the TID; which reschedules it */
3830 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
3831 ath_tx_tid_resume(sc, atid);
3832 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
3833}