if_ath_tx.c revision 218157
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath_tx.c 218157 2011-02-01 07:50:26Z adrian $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42#include "opt_wlan.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/sysctl.h> 47#include <sys/mbuf.h> 48#include <sys/malloc.h> 49#include <sys/lock.h> 50#include <sys/mutex.h> 51#include <sys/kernel.h> 52#include <sys/socket.h> 53#include <sys/sockio.h> 54#include <sys/errno.h> 55#include <sys/callout.h> 56#include <sys/bus.h> 57#include <sys/endian.h> 58#include <sys/kthread.h> 59#include <sys/taskqueue.h> 60#include <sys/priv.h> 61 62#include <machine/bus.h> 63 64#include <net/if.h> 65#include <net/if_dl.h> 66#include <net/if_media.h> 67#include <net/if_types.h> 68#include <net/if_arp.h> 69#include <net/ethernet.h> 70#include <net/if_llc.h> 71 72#include <net80211/ieee80211_var.h> 73#include <net80211/ieee80211_regdomain.h> 74#ifdef IEEE80211_SUPPORT_SUPERG 75#include <net80211/ieee80211_superg.h> 76#endif 77#ifdef IEEE80211_SUPPORT_TDMA 78#include <net80211/ieee80211_tdma.h> 79#endif 80 81#include <net/bpf.h> 82 83#ifdef INET 84#include <netinet/in.h> 85#include <netinet/if_ether.h> 86#endif 87 88#include <dev/ath/if_athvar.h> 89#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 90#include <dev/ath/ath_hal/ah_diagcodes.h> 91 92#include <dev/ath/if_ath_debug.h> 93 94#ifdef ATH_TX99_DIAG 95#include <dev/ath/ath_tx99/ath_tx99.h> 96#endif 97 98#include <dev/ath/if_ath_misc.h> 99#include <dev/ath/if_ath_tx.h> 100 101/* 102 * Whether to use the 11n rate scenario functions or not 103 */ 104static inline int 105ath_tx_is_11n(struct ath_softc *sc) 106{ 107 return (sc->sc_ah->ah_magic == 0x20065416); 108} 109 110void 111ath_txfrag_cleanup(struct ath_softc *sc, 112 ath_bufhead *frags, struct ieee80211_node *ni) 113{ 114 struct ath_buf *bf, *next; 115 116 ATH_TXBUF_LOCK_ASSERT(sc); 117 118 STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 119 /* NB: bf assumed clean */ 120 STAILQ_REMOVE_HEAD(frags, bf_list); 121 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 122 ieee80211_node_decref(ni); 123 } 124} 125 126/* 127 * Setup xmit of a fragmented frame. Allocate a buffer 128 * for each frag and bump the node reference count to 129 * reflect the held reference to be setup by ath_tx_start. 130 */ 131int 132ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 133 struct mbuf *m0, struct ieee80211_node *ni) 134{ 135 struct mbuf *m; 136 struct ath_buf *bf; 137 138 ATH_TXBUF_LOCK(sc); 139 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 140 bf = _ath_getbuf_locked(sc); 141 if (bf == NULL) { /* out of buffers, cleanup */ 142 ath_txfrag_cleanup(sc, frags, ni); 143 break; 144 } 145 ieee80211_node_incref(ni); 146 STAILQ_INSERT_TAIL(frags, bf, bf_list); 147 } 148 ATH_TXBUF_UNLOCK(sc); 149 150 return !STAILQ_EMPTY(frags); 151} 152 153/* 154 * Reclaim mbuf resources. For fragmented frames we 155 * need to claim each frag chained with m_nextpkt. 156 */ 157void 158ath_freetx(struct mbuf *m) 159{ 160 struct mbuf *next; 161 162 do { 163 next = m->m_nextpkt; 164 m->m_nextpkt = NULL; 165 m_freem(m); 166 } while ((m = next) != NULL); 167} 168 169static int 170ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 171{ 172 struct mbuf *m; 173 int error; 174 175 /* 176 * Load the DMA map so any coalescing is done. This 177 * also calculates the number of descriptors we need. 178 */ 179 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 180 bf->bf_segs, &bf->bf_nseg, 181 BUS_DMA_NOWAIT); 182 if (error == EFBIG) { 183 /* XXX packet requires too many descriptors */ 184 bf->bf_nseg = ATH_TXDESC+1; 185 } else if (error != 0) { 186 sc->sc_stats.ast_tx_busdma++; 187 ath_freetx(m0); 188 return error; 189 } 190 /* 191 * Discard null packets and check for packets that 192 * require too many TX descriptors. We try to convert 193 * the latter to a cluster. 194 */ 195 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 196 sc->sc_stats.ast_tx_linear++; 197 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC); 198 if (m == NULL) { 199 ath_freetx(m0); 200 sc->sc_stats.ast_tx_nombuf++; 201 return ENOMEM; 202 } 203 m0 = m; 204 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 205 bf->bf_segs, &bf->bf_nseg, 206 BUS_DMA_NOWAIT); 207 if (error != 0) { 208 sc->sc_stats.ast_tx_busdma++; 209 ath_freetx(m0); 210 return error; 211 } 212 KASSERT(bf->bf_nseg <= ATH_TXDESC, 213 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 214 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 215 sc->sc_stats.ast_tx_nodata++; 216 ath_freetx(m0); 217 return EIO; 218 } 219 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 220 __func__, m0, m0->m_pkthdr.len); 221 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 222 bf->bf_m = m0; 223 224 return 0; 225} 226 227static void 228ath_tx_chaindesclist(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) 229{ 230 struct ath_hal *ah = sc->sc_ah; 231 struct ath_desc *ds, *ds0; 232 int i; 233 234 /* 235 * Fillin the remainder of the descriptor info. 236 */ 237 ds0 = ds = bf->bf_desc; 238 for (i = 0; i < bf->bf_nseg; i++, ds++) { 239 ds->ds_data = bf->bf_segs[i].ds_addr; 240 if (i == bf->bf_nseg - 1) 241 ds->ds_link = 0; 242 else 243 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 244 ath_hal_filltxdesc(ah, ds 245 , bf->bf_segs[i].ds_len /* segment length */ 246 , i == 0 /* first segment */ 247 , i == bf->bf_nseg - 1 /* last segment */ 248 , ds0 /* first descriptor */ 249 ); 250 DPRINTF(sc, ATH_DEBUG_XMIT, 251 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 252 __func__, i, ds->ds_link, ds->ds_data, 253 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 254 } 255 256} 257 258static void 259ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) 260{ 261 struct ath_hal *ah = sc->sc_ah; 262 263 /* Fill in the details in the descriptor list */ 264 ath_tx_chaindesclist(sc, txq, bf); 265 266 /* 267 * Insert the frame on the outbound list and pass it on 268 * to the hardware. Multicast frames buffered for power 269 * save stations and transmit from the CAB queue are stored 270 * on a s/w only queue and loaded on to the CAB queue in 271 * the SWBA handler since frames only go out on DTIM and 272 * to avoid possible races. 273 */ 274 ATH_TXQ_LOCK(txq); 275 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 276 ("busy status 0x%x", bf->bf_flags)); 277 if (txq->axq_qnum != ATH_TXQ_SWQ) { 278#ifdef IEEE80211_SUPPORT_TDMA 279 int qbusy; 280 281 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 282 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum); 283 if (txq->axq_link == NULL) { 284 /* 285 * Be careful writing the address to TXDP. If 286 * the tx q is enabled then this write will be 287 * ignored. Normally this is not an issue but 288 * when tdma is in use and the q is beacon gated 289 * this race can occur. If the q is busy then 290 * defer the work to later--either when another 291 * packet comes along or when we prepare a beacon 292 * frame at SWBA. 293 */ 294 if (!qbusy) { 295 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 296 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 297 DPRINTF(sc, ATH_DEBUG_XMIT, 298 "%s: TXDP[%u] = %p (%p) depth %d\n", 299 __func__, txq->axq_qnum, 300 (caddr_t)bf->bf_daddr, bf->bf_desc, 301 txq->axq_depth); 302 } else { 303 txq->axq_flags |= ATH_TXQ_PUTPENDING; 304 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 305 "%s: Q%u busy, defer enable\n", __func__, 306 txq->axq_qnum); 307 } 308 } else { 309 *txq->axq_link = bf->bf_daddr; 310 DPRINTF(sc, ATH_DEBUG_XMIT, 311 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 312 txq->axq_qnum, txq->axq_link, 313 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); 314 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) { 315 /* 316 * The q was busy when we previously tried 317 * to write the address of the first buffer 318 * in the chain. Since it's not busy now 319 * handle this chore. We are certain the 320 * buffer at the front is the right one since 321 * axq_link is NULL only when the buffer list 322 * is/was empty. 323 */ 324 ath_hal_puttxbuf(ah, txq->axq_qnum, 325 STAILQ_FIRST(&txq->axq_q)->bf_daddr); 326 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 327 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 328 "%s: Q%u restarted\n", __func__, 329 txq->axq_qnum); 330 } 331 } 332#else 333 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 334 if (txq->axq_link == NULL) { 335 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 336 DPRINTF(sc, ATH_DEBUG_XMIT, 337 "%s: TXDP[%u] = %p (%p) depth %d\n", 338 __func__, txq->axq_qnum, 339 (caddr_t)bf->bf_daddr, bf->bf_desc, 340 txq->axq_depth); 341 } else { 342 *txq->axq_link = bf->bf_daddr; 343 DPRINTF(sc, ATH_DEBUG_XMIT, 344 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 345 txq->axq_qnum, txq->axq_link, 346 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); 347 } 348#endif /* IEEE80211_SUPPORT_TDMA */ 349 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 350 ath_hal_txstart(ah, txq->axq_qnum); 351 } else { 352 if (txq->axq_link != NULL) { 353 struct ath_buf *last = ATH_TXQ_LAST(txq); 354 struct ieee80211_frame *wh; 355 356 /* mark previous frame */ 357 wh = mtod(last->bf_m, struct ieee80211_frame *); 358 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 359 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap, 360 BUS_DMASYNC_PREWRITE); 361 362 /* link descriptor */ 363 *txq->axq_link = bf->bf_daddr; 364 } 365 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 366 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 367 } 368 ATH_TXQ_UNLOCK(txq); 369} 370 371static int 372ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 373 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, int *keyix) 374{ 375 if (iswep) { 376 const struct ieee80211_cipher *cip; 377 struct ieee80211_key *k; 378 379 /* 380 * Construct the 802.11 header+trailer for an encrypted 381 * frame. The only reason this can fail is because of an 382 * unknown or unsupported cipher/key type. 383 */ 384 k = ieee80211_crypto_encap(ni, m0); 385 if (k == NULL) { 386 /* 387 * This can happen when the key is yanked after the 388 * frame was queued. Just discard the frame; the 389 * 802.11 layer counts failures and provides 390 * debugging/diagnostics. 391 */ 392 return 0; 393 } 394 /* 395 * Adjust the packet + header lengths for the crypto 396 * additions and calculate the h/w key index. When 397 * a s/w mic is done the frame will have had any mic 398 * added to it prior to entry so m0->m_pkthdr.len will 399 * account for it. Otherwise we need to add it to the 400 * packet length. 401 */ 402 cip = k->wk_cipher; 403 (*hdrlen) += cip->ic_header; 404 (*pktlen) += cip->ic_header + cip->ic_trailer; 405 /* NB: frags always have any TKIP MIC done in s/w */ 406 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 407 (*pktlen) += cip->ic_miclen; 408 (*keyix) = k->wk_keyix; 409 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 410 /* 411 * Use station key cache slot, if assigned. 412 */ 413 (*keyix) = ni->ni_ucastkey.wk_keyix; 414 if ((*keyix) == IEEE80211_KEYIX_NONE) 415 (*keyix) = HAL_TXKEYIX_INVALID; 416 } else 417 (*keyix) = HAL_TXKEYIX_INVALID; 418 419 return 1; 420} 421 422static void 423ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 424 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 425 int flags, u_int8_t *ctsrate, int *ctsduration) 426{ 427 /* 428 * CTS transmit rate is derived from the transmit rate 429 * by looking in the h/w rate table. We must also factor 430 * in whether or not a short preamble is to be used. 431 */ 432 /* NB: cix is set above where RTS/CTS is enabled */ 433 KASSERT(cix != 0xff, ("cix not setup")); 434 (*ctsrate) = rt->info[cix].rateCode; 435 /* 436 * Compute the transmit duration based on the frame 437 * size and the size of an ACK frame. We call into the 438 * HAL to do the computation since it depends on the 439 * characteristics of the actual PHY being used. 440 * 441 * NB: CTS is assumed the same size as an ACK so we can 442 * use the precalculated ACK durations. 443 */ 444 if (shortPreamble) { 445 (*ctsrate) |= rt->info[cix].shortPreamble; 446 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 447 (*ctsduration) += rt->info[cix].spAckDuration; 448 (*ctsduration) += ath_hal_computetxtime(ah, 449 rt, pktlen, rix, AH_TRUE); 450 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 451 (*ctsduration) += rt->info[rix].spAckDuration; 452 } else { 453 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 454 (*ctsduration) += rt->info[cix].lpAckDuration; 455 (*ctsduration) += ath_hal_computetxtime(ah, 456 rt, pktlen, rix, AH_FALSE); 457 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 458 (*ctsduration) += rt->info[rix].lpAckDuration; 459 } 460} 461 462int 463ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, 464 struct mbuf *m0) 465{ 466 struct ieee80211vap *vap = ni->ni_vap; 467 struct ath_vap *avp = ATH_VAP(vap); 468 struct ath_hal *ah = sc->sc_ah; 469 struct ifnet *ifp = sc->sc_ifp; 470 struct ieee80211com *ic = ifp->if_l2com; 471 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 472 int error, iswep, ismcast, isfrag, ismrr; 473 int keyix, hdrlen, pktlen, try0; 474 u_int8_t rix, txrate, ctsrate; 475 u_int8_t cix = 0xff; /* NB: silence compiler */ 476 struct ath_desc *ds; 477 struct ath_txq *txq; 478 struct ieee80211_frame *wh; 479 u_int subtype, flags, ctsduration; 480 HAL_PKT_TYPE atype; 481 const HAL_RATE_TABLE *rt; 482 HAL_BOOL shortPreamble; 483 struct ath_node *an; 484 u_int pri; 485 486 wh = mtod(m0, struct ieee80211_frame *); 487 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 488 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 489 isfrag = m0->m_flags & M_FRAG; 490 hdrlen = ieee80211_anyhdrsize(wh); 491 /* 492 * Packet length must not include any 493 * pad bytes; deduct them here. 494 */ 495 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 496 497 /* Handle encryption twiddling if needed */ 498 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, &pktlen, &keyix)) { 499 ath_freetx(m0); 500 return EIO; 501 } 502 503 /* packet header may have moved, reset our local pointer */ 504 wh = mtod(m0, struct ieee80211_frame *); 505 506 pktlen += IEEE80211_CRC_LEN; 507 508 /* 509 * Load the DMA map so any coalescing is done. This 510 * also calculates the number of descriptors we need. 511 */ 512 error = ath_tx_dmasetup(sc, bf, m0); 513 if (error != 0) 514 return error; 515 bf->bf_node = ni; /* NB: held reference */ 516 m0 = bf->bf_m; /* NB: may have changed */ 517 wh = mtod(m0, struct ieee80211_frame *); 518 519 /* setup descriptors */ 520 ds = bf->bf_desc; 521 rt = sc->sc_currates; 522 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 523 524 /* 525 * NB: the 802.11 layer marks whether or not we should 526 * use short preamble based on the current mode and 527 * negotiated parameters. 528 */ 529 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 530 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 531 shortPreamble = AH_TRUE; 532 sc->sc_stats.ast_tx_shortpre++; 533 } else { 534 shortPreamble = AH_FALSE; 535 } 536 537 an = ATH_NODE(ni); 538 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 539 ismrr = 0; /* default no multi-rate retry*/ 540 pri = M_WME_GETAC(m0); /* honor classification */ 541 /* XXX use txparams instead of fixed values */ 542 /* 543 * Calculate Atheros packet type from IEEE80211 packet header, 544 * setup for rate calculations, and select h/w transmit queue. 545 */ 546 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 547 case IEEE80211_FC0_TYPE_MGT: 548 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 549 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 550 atype = HAL_PKT_TYPE_BEACON; 551 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 552 atype = HAL_PKT_TYPE_PROBE_RESP; 553 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 554 atype = HAL_PKT_TYPE_ATIM; 555 else 556 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 557 rix = an->an_mgmtrix; 558 txrate = rt->info[rix].rateCode; 559 if (shortPreamble) 560 txrate |= rt->info[rix].shortPreamble; 561 try0 = ATH_TXMGTTRY; 562 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 563 break; 564 case IEEE80211_FC0_TYPE_CTL: 565 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 566 rix = an->an_mgmtrix; 567 txrate = rt->info[rix].rateCode; 568 if (shortPreamble) 569 txrate |= rt->info[rix].shortPreamble; 570 try0 = ATH_TXMGTTRY; 571 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 572 break; 573 case IEEE80211_FC0_TYPE_DATA: 574 atype = HAL_PKT_TYPE_NORMAL; /* default */ 575 /* 576 * Data frames: multicast frames go out at a fixed rate, 577 * EAPOL frames use the mgmt frame rate; otherwise consult 578 * the rate control module for the rate to use. 579 */ 580 if (ismcast) { 581 rix = an->an_mcastrix; 582 txrate = rt->info[rix].rateCode; 583 if (shortPreamble) 584 txrate |= rt->info[rix].shortPreamble; 585 try0 = 1; 586 } else if (m0->m_flags & M_EAPOL) { 587 /* XXX? maybe always use long preamble? */ 588 rix = an->an_mgmtrix; 589 txrate = rt->info[rix].rateCode; 590 if (shortPreamble) 591 txrate |= rt->info[rix].shortPreamble; 592 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 593 } else { 594 ath_rate_findrate(sc, an, shortPreamble, pktlen, 595 &rix, &try0, &txrate); 596 sc->sc_txrix = rix; /* for LED blinking */ 597 sc->sc_lastdatarix = rix; /* for fast frames */ 598 if (try0 != ATH_TXMAXTRY) 599 ismrr = 1; 600 } 601 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 602 flags |= HAL_TXDESC_NOACK; 603 break; 604 default: 605 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 606 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 607 /* XXX statistic */ 608 ath_freetx(m0); 609 return EIO; 610 } 611 txq = sc->sc_ac2q[pri]; 612 613 /* 614 * When servicing one or more stations in power-save mode 615 * (or) if there is some mcast data waiting on the mcast 616 * queue (to prevent out of order delivery) multicast 617 * frames must be buffered until after the beacon. 618 */ 619 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) 620 txq = &avp->av_mcastq; 621 622 /* 623 * Calculate miscellaneous flags. 624 */ 625 if (ismcast) { 626 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 627 } else if (pktlen > vap->iv_rtsthreshold && 628 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 629 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 630 cix = rt->info[rix].controlRate; 631 sc->sc_stats.ast_tx_rts++; 632 } 633 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 634 sc->sc_stats.ast_tx_noack++; 635#ifdef IEEE80211_SUPPORT_TDMA 636 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 637 DPRINTF(sc, ATH_DEBUG_TDMA, 638 "%s: discard frame, ACK required w/ TDMA\n", __func__); 639 sc->sc_stats.ast_tdma_ack++; 640 ath_freetx(m0); 641 return EIO; 642 } 643#endif 644 645 /* 646 * If 802.11g protection is enabled, determine whether 647 * to use RTS/CTS or just CTS. Note that this is only 648 * done for OFDM unicast frames. 649 */ 650 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 651 rt->info[rix].phy == IEEE80211_T_OFDM && 652 (flags & HAL_TXDESC_NOACK) == 0) { 653 /* XXX fragments must use CCK rates w/ protection */ 654 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 655 flags |= HAL_TXDESC_RTSENA; 656 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 657 flags |= HAL_TXDESC_CTSENA; 658 if (isfrag) { 659 /* 660 * For frags it would be desirable to use the 661 * highest CCK rate for RTS/CTS. But stations 662 * farther away may detect it at a lower CCK rate 663 * so use the configured protection rate instead 664 * (for now). 665 */ 666 cix = rt->info[sc->sc_protrix].controlRate; 667 } else 668 cix = rt->info[sc->sc_protrix].controlRate; 669 sc->sc_stats.ast_tx_protect++; 670 } 671 672 /* 673 * Calculate duration. This logically belongs in the 802.11 674 * layer but it lacks sufficient information to calculate it. 675 */ 676 if ((flags & HAL_TXDESC_NOACK) == 0 && 677 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 678 u_int16_t dur; 679 if (shortPreamble) 680 dur = rt->info[rix].spAckDuration; 681 else 682 dur = rt->info[rix].lpAckDuration; 683 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 684 dur += dur; /* additional SIFS+ACK */ 685 KASSERT(m0->m_nextpkt != NULL, ("no fragment")); 686 /* 687 * Include the size of next fragment so NAV is 688 * updated properly. The last fragment uses only 689 * the ACK duration 690 */ 691 dur += ath_hal_computetxtime(ah, rt, 692 m0->m_nextpkt->m_pkthdr.len, 693 rix, shortPreamble); 694 } 695 if (isfrag) { 696 /* 697 * Force hardware to use computed duration for next 698 * fragment by disabling multi-rate retry which updates 699 * duration based on the multi-rate duration table. 700 */ 701 ismrr = 0; 702 try0 = ATH_TXMGTTRY; /* XXX? */ 703 } 704 *(u_int16_t *)wh->i_dur = htole16(dur); 705 } 706 707 /* 708 * Calculate RTS/CTS rate and duration if needed. 709 */ 710 ctsduration = 0; 711 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 712 (void) ath_tx_calc_ctsduration(ah, rix, cix, shortPreamble, pktlen, 713 rt, flags, &ctsrate, &ctsduration); 714 /* 715 * Must disable multi-rate retry when using RTS/CTS. 716 */ 717 ismrr = 0; 718 try0 = ATH_TXMGTTRY; /* XXX */ 719 } else 720 ctsrate = 0; 721 722 /* 723 * At this point we are committed to sending the frame 724 * and we don't need to look at m_nextpkt; clear it in 725 * case this frame is part of frag chain. 726 */ 727 m0->m_nextpkt = NULL; 728 729 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 730 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 731 sc->sc_hwmap[rix].ieeerate, -1); 732 733 if (ieee80211_radiotap_active_vap(vap)) { 734 u_int64_t tsf = ath_hal_gettsf64(ah); 735 736 sc->sc_tx_th.wt_tsf = htole64(tsf); 737 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 738 if (iswep) 739 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 740 if (isfrag) 741 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 742 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 743 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 744 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 745 746 ieee80211_radiotap_tx(vap, m0); 747 } 748 749 /* 750 * Determine if a tx interrupt should be generated for 751 * this descriptor. We take a tx interrupt to reap 752 * descriptors when the h/w hits an EOL condition or 753 * when the descriptor is specifically marked to generate 754 * an interrupt. We periodically mark descriptors in this 755 * way to insure timely replenishing of the supply needed 756 * for sending frames. Defering interrupts reduces system 757 * load and potentially allows more concurrent work to be 758 * done but if done to aggressively can cause senders to 759 * backup. 760 * 761 * NB: use >= to deal with sc_txintrperiod changing 762 * dynamically through sysctl. 763 */ 764 if (flags & HAL_TXDESC_INTREQ) { 765 txq->axq_intrcnt = 0; 766 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 767 flags |= HAL_TXDESC_INTREQ; 768 txq->axq_intrcnt = 0; 769 } 770 771 /* 772 * Formulate first tx descriptor with tx controls. 773 */ 774 /* XXX check return value? */ 775 ath_hal_setuptxdesc(ah, ds 776 , pktlen /* packet length */ 777 , hdrlen /* header length */ 778 , atype /* Atheros packet type */ 779 , ni->ni_txpower /* txpower */ 780 , txrate, try0 /* series 0 rate/tries */ 781 , keyix /* key cache index */ 782 , sc->sc_txantenna /* antenna mode */ 783 , flags /* flags */ 784 , ctsrate /* rts/cts rate */ 785 , ctsduration /* rts/cts duration */ 786 ); 787 bf->bf_txflags = flags; 788 /* 789 * Setup the multi-rate retry state only when we're 790 * going to use it. This assumes ath_hal_setuptxdesc 791 * initializes the descriptors (so we don't have to) 792 * when the hardware supports multi-rate retry and 793 * we don't use it. 794 */ 795 if (ismrr) 796 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); 797 798 ath_tx_handoff(sc, txq, bf); 799 return 0; 800} 801 802static int 803ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 804 struct ath_buf *bf, struct mbuf *m0, 805 const struct ieee80211_bpf_params *params) 806{ 807 struct ifnet *ifp = sc->sc_ifp; 808 struct ieee80211com *ic = ifp->if_l2com; 809 struct ath_hal *ah = sc->sc_ah; 810 struct ieee80211vap *vap = ni->ni_vap; 811 int error, ismcast, ismrr; 812 int keyix, hdrlen, pktlen, try0, txantenna; 813 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3; 814 struct ieee80211_frame *wh; 815 u_int flags, ctsduration; 816 HAL_PKT_TYPE atype; 817 const HAL_RATE_TABLE *rt; 818 struct ath_desc *ds; 819 u_int pri; 820 821 wh = mtod(m0, struct ieee80211_frame *); 822 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 823 hdrlen = ieee80211_anyhdrsize(wh); 824 /* 825 * Packet length must not include any 826 * pad bytes; deduct them here. 827 */ 828 /* XXX honor IEEE80211_BPF_DATAPAD */ 829 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 830 831 /* Handle encryption twiddling if needed */ 832 if (! ath_tx_tag_crypto(sc, ni, m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, &hdrlen, &pktlen, &keyix)) { 833 ath_freetx(m0); 834 return EIO; 835 } 836 /* packet header may have moved, reset our local pointer */ 837 wh = mtod(m0, struct ieee80211_frame *); 838 839 error = ath_tx_dmasetup(sc, bf, m0); 840 if (error != 0) 841 return error; 842 m0 = bf->bf_m; /* NB: may have changed */ 843 wh = mtod(m0, struct ieee80211_frame *); 844 bf->bf_node = ni; /* NB: held reference */ 845 846 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 847 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 848 if (params->ibp_flags & IEEE80211_BPF_RTS) 849 flags |= HAL_TXDESC_RTSENA; 850 else if (params->ibp_flags & IEEE80211_BPF_CTS) 851 flags |= HAL_TXDESC_CTSENA; 852 /* XXX leave ismcast to injector? */ 853 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 854 flags |= HAL_TXDESC_NOACK; 855 856 rt = sc->sc_currates; 857 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 858 rix = ath_tx_findrix(sc, params->ibp_rate0); 859 txrate = rt->info[rix].rateCode; 860 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 861 txrate |= rt->info[rix].shortPreamble; 862 sc->sc_txrix = rix; 863 try0 = params->ibp_try0; 864 ismrr = (params->ibp_try1 != 0); 865 txantenna = params->ibp_pri >> 2; 866 if (txantenna == 0) /* XXX? */ 867 txantenna = sc->sc_txantenna; 868 869 ctsduration = 0; 870 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 871 cix = ath_tx_findrix(sc, params->ibp_ctsrate); 872 (void) ath_tx_calc_ctsduration(ah, rix, cix, 873 params->ibp_flags & IEEE80211_BPF_SHORTPRE, pktlen, 874 rt, flags, &ctsrate, &ctsduration); 875 /* 876 * Must disable multi-rate retry when using RTS/CTS. 877 */ 878 ismrr = 0; /* XXX */ 879 } else 880 ctsrate = 0; 881 882 pri = params->ibp_pri & 3; 883 /* 884 * NB: we mark all packets as type PSPOLL so the h/w won't 885 * set the sequence number, duration, etc. 886 */ 887 atype = HAL_PKT_TYPE_PSPOLL; 888 889 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 890 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 891 sc->sc_hwmap[rix].ieeerate, -1); 892 893 if (ieee80211_radiotap_active_vap(vap)) { 894 u_int64_t tsf = ath_hal_gettsf64(ah); 895 896 sc->sc_tx_th.wt_tsf = htole64(tsf); 897 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 898 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 899 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 900 if (m0->m_flags & M_FRAG) 901 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 902 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 903 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 904 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 905 906 ieee80211_radiotap_tx(vap, m0); 907 } 908 909 /* 910 * Formulate first tx descriptor with tx controls. 911 */ 912 ds = bf->bf_desc; 913 /* XXX check return value? */ 914 ath_hal_setuptxdesc(ah, ds 915 , pktlen /* packet length */ 916 , hdrlen /* header length */ 917 , atype /* Atheros packet type */ 918 , params->ibp_power /* txpower */ 919 , txrate, try0 /* series 0 rate/tries */ 920 , keyix /* key cache index */ 921 , txantenna /* antenna mode */ 922 , flags /* flags */ 923 , ctsrate /* rts/cts rate */ 924 , ctsduration /* rts/cts duration */ 925 ); 926 bf->bf_txflags = flags; 927 928 if (ismrr) { 929 rix = ath_tx_findrix(sc, params->ibp_rate1); 930 rate1 = rt->info[rix].rateCode; 931 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 932 rate1 |= rt->info[rix].shortPreamble; 933 if (params->ibp_try2) { 934 rix = ath_tx_findrix(sc, params->ibp_rate2); 935 rate2 = rt->info[rix].rateCode; 936 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 937 rate2 |= rt->info[rix].shortPreamble; 938 } else 939 rate2 = 0; 940 if (params->ibp_try3) { 941 rix = ath_tx_findrix(sc, params->ibp_rate3); 942 rate3 = rt->info[rix].rateCode; 943 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 944 rate3 |= rt->info[rix].shortPreamble; 945 } else 946 rate3 = 0; 947 ath_hal_setupxtxdesc(ah, ds 948 , rate1, params->ibp_try1 /* series 1 */ 949 , rate2, params->ibp_try2 /* series 2 */ 950 , rate3, params->ibp_try3 /* series 3 */ 951 ); 952 } 953 954 /* NB: no buffered multicast in power save support */ 955 ath_tx_handoff(sc, sc->sc_ac2q[pri], bf); 956 return 0; 957} 958 959int 960ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 961 const struct ieee80211_bpf_params *params) 962{ 963 struct ieee80211com *ic = ni->ni_ic; 964 struct ifnet *ifp = ic->ic_ifp; 965 struct ath_softc *sc = ifp->if_softc; 966 struct ath_buf *bf; 967 int error; 968 969 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 970 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 971 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? 972 "!running" : "invalid"); 973 m_freem(m); 974 error = ENETDOWN; 975 goto bad; 976 } 977 /* 978 * Grab a TX buffer and associated resources. 979 */ 980 bf = ath_getbuf(sc); 981 if (bf == NULL) { 982 sc->sc_stats.ast_tx_nobuf++; 983 m_freem(m); 984 error = ENOBUFS; 985 goto bad; 986 } 987 988 if (params == NULL) { 989 /* 990 * Legacy path; interpret frame contents to decide 991 * precisely how to send the frame. 992 */ 993 if (ath_tx_start(sc, ni, bf, m)) { 994 error = EIO; /* XXX */ 995 goto bad2; 996 } 997 } else { 998 /* 999 * Caller supplied explicit parameters to use in 1000 * sending the frame. 1001 */ 1002 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 1003 error = EIO; /* XXX */ 1004 goto bad2; 1005 } 1006 } 1007 sc->sc_wd_timer = 5; 1008 ifp->if_opackets++; 1009 sc->sc_stats.ast_tx_raw++; 1010 1011 return 0; 1012bad2: 1013 ATH_TXBUF_LOCK(sc); 1014 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 1015 ATH_TXBUF_UNLOCK(sc); 1016bad: 1017 ifp->if_oerrors++; 1018 sc->sc_stats.ast_tx_raw_fail++; 1019 ieee80211_free_node(ni); 1020 return error; 1021} 1022