if_ath.c revision 184357
1/*- 2 * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 184357 2008-10-27 17:43:23Z sam $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42 43#include <sys/param.h> 44#include <sys/systm.h> 45#include <sys/sysctl.h> 46#include <sys/mbuf.h> 47#include <sys/malloc.h> 48#include <sys/lock.h> 49#include <sys/mutex.h> 50#include <sys/kernel.h> 51#include <sys/socket.h> 52#include <sys/sockio.h> 53#include <sys/errno.h> 54#include <sys/callout.h> 55#include <sys/bus.h> 56#include <sys/endian.h> 57#include <sys/kthread.h> 58#include <sys/taskqueue.h> 59 60#include <machine/bus.h> 61 62#include <net/if.h> 63#include <net/if_dl.h> 64#include <net/if_media.h> 65#include <net/if_types.h> 66#include <net/if_arp.h> 67#include <net/ethernet.h> 68#include <net/if_llc.h> 69 70#include <net80211/ieee80211_var.h> 71 72#include <net/bpf.h> 73 74#ifdef INET 75#include <netinet/in.h> 76#include <netinet/if_ether.h> 77#endif 78 79#include <dev/ath/if_athvar.h> 80#include <contrib/dev/ath/ah_desc.h> 81#include <contrib/dev/ath/ah_devid.h> /* XXX for softled */ 82 83#ifdef ATH_TX99_DIAG 84#include <dev/ath/ath_tx99/ath_tx99.h> 85#endif 86 87/* 88 * We require a HAL w/ the changes for split tx/rx MIC. 89 */ 90CTASSERT(HAL_ABI_VERSION > 0x06052200); 91 92/* 93 * ATH_BCBUF determines the number of vap's that can transmit 94 * beacons and also (currently) the number of vap's that can 95 * have unique mac addresses/bssid. When staggering beacons 96 * 4 is probably a good max as otherwise the beacons become 97 * very closely spaced and there is limited time for cab q traffic 98 * to go out. You can burst beacons instead but that is not good 99 * for stations in power save and at some point you really want 100 * another radio (and channel). 101 * 102 * The limit on the number of mac addresses is tied to our use of 103 * the U/L bit and tracking addresses in a byte; it would be 104 * worthwhile to allow more for applications like proxy sta. 105 */ 106CTASSERT(ATH_BCBUF <= 8); 107 108/* unaligned little endian access */ 109#define LE_READ_2(p) \ 110 ((u_int16_t) \ 111 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) 112#define LE_READ_4(p) \ 113 ((u_int32_t) \ 114 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ 115 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) 116 117enum { 118 ATH_LED_TX, 119 ATH_LED_RX, 120 ATH_LED_POLL, 121}; 122 123static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 124 const char name[IFNAMSIZ], int unit, int opmode, 125 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], 126 const uint8_t mac[IEEE80211_ADDR_LEN]); 127static void ath_vap_delete(struct ieee80211vap *); 128static void ath_init(void *); 129static void ath_stop_locked(struct ifnet *); 130static void ath_stop(struct ifnet *); 131static void ath_start(struct ifnet *); 132static int ath_reset(struct ifnet *); 133static int ath_reset_vap(struct ieee80211vap *, u_long); 134static int ath_media_change(struct ifnet *); 135static void ath_watchdog(struct ifnet *); 136static int ath_ioctl(struct ifnet *, u_long, caddr_t); 137static void ath_fatal_proc(void *, int); 138static void ath_rxorn_proc(void *, int); 139static void ath_bmiss_vap(struct ieee80211vap *); 140static void ath_bmiss_proc(void *, int); 141static int ath_keyset(struct ath_softc *, const struct ieee80211_key *, 142 struct ieee80211_node *); 143static int ath_key_alloc(struct ieee80211vap *, 144 struct ieee80211_key *, 145 ieee80211_keyix *, ieee80211_keyix *); 146static int ath_key_delete(struct ieee80211vap *, 147 const struct ieee80211_key *); 148static int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *, 149 const u_int8_t mac[IEEE80211_ADDR_LEN]); 150static void ath_key_update_begin(struct ieee80211vap *); 151static void ath_key_update_end(struct ieee80211vap *); 152static void ath_update_mcast(struct ifnet *); 153static void ath_update_promisc(struct ifnet *); 154static void ath_mode_init(struct ath_softc *); 155static void ath_setslottime(struct ath_softc *); 156static void ath_updateslot(struct ifnet *); 157static int ath_beaconq_setup(struct ath_hal *); 158static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 159static void ath_beacon_update(struct ieee80211vap *, int item); 160static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 161static void ath_beacon_proc(void *, int); 162static struct ath_buf *ath_beacon_generate(struct ath_softc *, 163 struct ieee80211vap *); 164static void ath_bstuck_proc(void *, int); 165static void ath_beacon_return(struct ath_softc *, struct ath_buf *); 166static void ath_beacon_free(struct ath_softc *); 167static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); 168static void ath_descdma_cleanup(struct ath_softc *sc, 169 struct ath_descdma *, ath_bufhead *); 170static int ath_desc_alloc(struct ath_softc *); 171static void ath_desc_free(struct ath_softc *); 172static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 173 const uint8_t [IEEE80211_ADDR_LEN]); 174static void ath_node_free(struct ieee80211_node *); 175static void ath_node_getsignal(const struct ieee80211_node *, 176 int8_t *, int8_t *); 177static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 178static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 179 int subtype, int rssi, int noise, u_int32_t rstamp); 180static void ath_setdefantenna(struct ath_softc *, u_int); 181static void ath_rx_proc(void *, int); 182static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 183static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 184static int ath_tx_setup(struct ath_softc *, int, int); 185static int ath_wme_update(struct ieee80211com *); 186static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 187static void ath_tx_cleanup(struct ath_softc *); 188static void ath_freetx(struct mbuf *); 189static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 190 struct ath_buf *, struct mbuf *); 191static void ath_tx_proc_q0(void *, int); 192static void ath_tx_proc_q0123(void *, int); 193static void ath_tx_proc(void *, int); 194static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *); 195static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 196static void ath_draintxq(struct ath_softc *); 197static void ath_stoprecv(struct ath_softc *); 198static int ath_startrecv(struct ath_softc *); 199static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 200static void ath_scan_start(struct ieee80211com *); 201static void ath_scan_end(struct ieee80211com *); 202static void ath_set_channel(struct ieee80211com *); 203static void ath_calibrate(void *); 204static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 205static void ath_setup_stationkey(struct ieee80211_node *); 206static void ath_newassoc(struct ieee80211_node *, int); 207static int ath_setregdomain(struct ieee80211com *, 208 struct ieee80211_regdomain *, int, 209 struct ieee80211_channel []); 210static void ath_getradiocaps(struct ieee80211com *, int *, 211 struct ieee80211_channel []); 212static int ath_getchannels(struct ath_softc *); 213static void ath_led_event(struct ath_softc *, int); 214 215static int ath_rate_setup(struct ath_softc *, u_int mode); 216static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 217 218static void ath_sysctlattach(struct ath_softc *); 219static int ath_raw_xmit(struct ieee80211_node *, 220 struct mbuf *, const struct ieee80211_bpf_params *); 221static void ath_bpfattach(struct ath_softc *); 222static void ath_announce(struct ath_softc *); 223 224SYSCTL_DECL(_hw_ath); 225 226/* XXX validate sysctl values */ 227static int ath_calinterval = 30; /* calibrate every 30 secs */ 228SYSCTL_INT(_hw_ath, OID_AUTO, calibrate, CTLFLAG_RW, &ath_calinterval, 229 0, "chip calibration interval (secs)"); 230 231static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 232SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 233 0, "rx buffers allocated"); 234TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 235static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 236SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 237 0, "tx buffers allocated"); 238TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 239 240#ifdef ATH_DEBUG 241enum { 242 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 243 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 244 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ 245 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 246 ATH_DEBUG_RATE = 0x00000010, /* rate control */ 247 ATH_DEBUG_RESET = 0x00000020, /* reset processing */ 248 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ 249 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ 250 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ 251 ATH_DEBUG_INTR = 0x00001000, /* ISR */ 252 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ 253 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ 254 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ 255 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ 256 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ 257 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ 258 ATH_DEBUG_NODE = 0x00080000, /* node management */ 259 ATH_DEBUG_LED = 0x00100000, /* led management */ 260 ATH_DEBUG_FF = 0x00200000, /* fast frames */ 261 ATH_DEBUG_DFS = 0x00400000, /* DFS processing */ 262 ATH_DEBUG_REGDOMAIN = 0x02000000, /* regulatory processing */ 263 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ 264 ATH_DEBUG_ANY = 0xffffffff 265}; 266static int ath_debug = 0; 267SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug, 268 0, "control debugging printfs"); 269TUNABLE_INT("hw.ath.debug", &ath_debug); 270 271#define IFF_DUMPPKTS(sc, m) \ 272 ((sc->sc_debug & (m)) || \ 273 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 274#define DPRINTF(sc, m, fmt, ...) do { \ 275 if (sc->sc_debug & (m)) \ 276 printf(fmt, __VA_ARGS__); \ 277} while (0) 278#define KEYPRINTF(sc, ix, hk, mac) do { \ 279 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ 280 ath_keyprint(sc, __func__, ix, hk, mac); \ 281} while (0) 282static void ath_printrxbuf(const struct ath_buf *bf, u_int ix, int); 283static void ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done); 284#else 285#define IFF_DUMPPKTS(sc, m) \ 286 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 287#define DPRINTF(sc, m, fmt, ...) do { \ 288 (void) sc; \ 289} while (0) 290#define KEYPRINTF(sc, k, ix, mac) do { \ 291 (void) sc; \ 292} while (0) 293#endif 294 295MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 296 297int 298ath_attach(u_int16_t devid, struct ath_softc *sc) 299{ 300 struct ifnet *ifp; 301 struct ieee80211com *ic; 302 struct ath_hal *ah = NULL; 303 HAL_STATUS status; 304 int error = 0, i; 305 306 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 307 308 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 309 if (ifp == NULL) { 310 device_printf(sc->sc_dev, "can not if_alloc()\n"); 311 error = ENOSPC; 312 goto bad; 313 } 314 ic = ifp->if_l2com; 315 316 /* set these up early for if_printf use */ 317 if_initname(ifp, device_get_name(sc->sc_dev), 318 device_get_unit(sc->sc_dev)); 319 320 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); 321 if (ah == NULL) { 322 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 323 status); 324 error = ENXIO; 325 goto bad; 326 } 327 if (ah->ah_abi != HAL_ABI_VERSION) { 328 if_printf(ifp, "HAL ABI mismatch detected " 329 "(HAL:0x%x != driver:0x%x)\n", 330 ah->ah_abi, HAL_ABI_VERSION); 331 error = ENXIO; 332 goto bad; 333 } 334 sc->sc_ah = ah; 335 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 336#ifdef ATH_DEBUG 337 sc->sc_debug = ath_debug; 338#endif 339 340 /* 341 * Check if the MAC has multi-rate retry support. 342 * We do this by trying to setup a fake extended 343 * descriptor. MAC's that don't have support will 344 * return false w/o doing anything. MAC's that do 345 * support it will return true w/o doing anything. 346 */ 347 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 348 349 /* 350 * Check if the device has hardware counters for PHY 351 * errors. If so we need to enable the MIB interrupt 352 * so we can act on stat triggers. 353 */ 354 if (ath_hal_hwphycounters(ah)) 355 sc->sc_needmib = 1; 356 357 /* 358 * Get the hardware key cache size. 359 */ 360 sc->sc_keymax = ath_hal_keycachesize(ah); 361 if (sc->sc_keymax > ATH_KEYMAX) { 362 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 363 ATH_KEYMAX, sc->sc_keymax); 364 sc->sc_keymax = ATH_KEYMAX; 365 } 366 /* 367 * Reset the key cache since some parts do not 368 * reset the contents on initial power up. 369 */ 370 for (i = 0; i < sc->sc_keymax; i++) 371 ath_hal_keyreset(ah, i); 372 373 /* 374 * Collect the default channel list. 375 */ 376 error = ath_getchannels(sc); 377 if (error != 0) 378 goto bad; 379 380 /* 381 * Setup rate tables for all potential media types. 382 */ 383 ath_rate_setup(sc, IEEE80211_MODE_11A); 384 ath_rate_setup(sc, IEEE80211_MODE_11B); 385 ath_rate_setup(sc, IEEE80211_MODE_11G); 386 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 387 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 388 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 389 ath_rate_setup(sc, IEEE80211_MODE_11NA); 390 ath_rate_setup(sc, IEEE80211_MODE_11NG); 391 ath_rate_setup(sc, IEEE80211_MODE_HALF); 392 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 393 394 /* NB: setup here so ath_rate_update is happy */ 395 ath_setcurmode(sc, IEEE80211_MODE_11A); 396 397 /* 398 * Allocate tx+rx descriptors and populate the lists. 399 */ 400 error = ath_desc_alloc(sc); 401 if (error != 0) { 402 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 403 goto bad; 404 } 405 callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE); 406 407 ATH_TXBUF_LOCK_INIT(sc); 408 409 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 410 taskqueue_thread_enqueue, &sc->sc_tq); 411 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 412 "%s taskq", ifp->if_xname); 413 414 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); 415 TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); 416 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 417 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 418 419 /* 420 * Allocate hardware transmit queues: one queue for 421 * beacon frames and one data queue for each QoS 422 * priority. Note that the hal handles reseting 423 * these queues at the needed time. 424 * 425 * XXX PS-Poll 426 */ 427 sc->sc_bhalq = ath_beaconq_setup(ah); 428 if (sc->sc_bhalq == (u_int) -1) { 429 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 430 error = EIO; 431 goto bad2; 432 } 433 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 434 if (sc->sc_cabq == NULL) { 435 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 436 error = EIO; 437 goto bad2; 438 } 439 /* NB: insure BK queue is the lowest priority h/w queue */ 440 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 441 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 442 ieee80211_wme_acnames[WME_AC_BK]); 443 error = EIO; 444 goto bad2; 445 } 446 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 447 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 448 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 449 /* 450 * Not enough hardware tx queues to properly do WME; 451 * just punt and assign them all to the same h/w queue. 452 * We could do a better job of this if, for example, 453 * we allocate queues when we switch from station to 454 * AP mode. 455 */ 456 if (sc->sc_ac2q[WME_AC_VI] != NULL) 457 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 458 if (sc->sc_ac2q[WME_AC_BE] != NULL) 459 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 460 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 461 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 462 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 463 } 464 465 /* 466 * Special case certain configurations. Note the 467 * CAB queue is handled by these specially so don't 468 * include them when checking the txq setup mask. 469 */ 470 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 471 case 0x01: 472 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 473 break; 474 case 0x0f: 475 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 476 break; 477 default: 478 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 479 break; 480 } 481 482 /* 483 * Setup rate control. Some rate control modules 484 * call back to change the anntena state so expose 485 * the necessary entry points. 486 * XXX maybe belongs in struct ath_ratectrl? 487 */ 488 sc->sc_setdefantenna = ath_setdefantenna; 489 sc->sc_rc = ath_rate_attach(sc); 490 if (sc->sc_rc == NULL) { 491 error = EIO; 492 goto bad2; 493 } 494 495 sc->sc_blinking = 0; 496 sc->sc_ledstate = 1; 497 sc->sc_ledon = 0; /* low true */ 498 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 499 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 500 /* 501 * Auto-enable soft led processing for IBM cards and for 502 * 5211 minipci cards. Users can also manually enable/disable 503 * support with a sysctl. 504 */ 505 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 506 if (sc->sc_softled) { 507 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 508 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); 509 } 510 511 ifp->if_softc = sc; 512 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 513 ifp->if_start = ath_start; 514 ifp->if_watchdog = ath_watchdog; 515 ifp->if_ioctl = ath_ioctl; 516 ifp->if_init = ath_init; 517 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 518 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 519 IFQ_SET_READY(&ifp->if_snd); 520 521 ic->ic_ifp = ifp; 522 /* XXX not right but it's not used anywhere important */ 523 ic->ic_phytype = IEEE80211_T_OFDM; 524 ic->ic_opmode = IEEE80211_M_STA; 525 ic->ic_caps = 526 IEEE80211_C_STA /* station mode */ 527 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 528 | IEEE80211_C_HOSTAP /* hostap mode */ 529 | IEEE80211_C_MONITOR /* monitor mode */ 530 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 531 | IEEE80211_C_WDS /* 4-address traffic works */ 532 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 533 | IEEE80211_C_SHSLOT /* short slot time supported */ 534 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 535 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 536 | IEEE80211_C_TXFRAG /* handle tx frags */ 537 ; 538 /* 539 * Query the hal to figure out h/w crypto support. 540 */ 541 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 542 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 543 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 544 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 545 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 546 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 547 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 548 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 549 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 550 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 551 /* 552 * Check if h/w does the MIC and/or whether the 553 * separate key cache entries are required to 554 * handle both tx+rx MIC keys. 555 */ 556 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 557 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 558 /* 559 * If the h/w supports storing tx+rx MIC keys 560 * in one cache slot automatically enable use. 561 */ 562 if (ath_hal_hastkipsplit(ah) || 563 !ath_hal_settkipsplit(ah, AH_FALSE)) 564 sc->sc_splitmic = 1; 565 /* 566 * If the h/w can do TKIP MIC together with WME then 567 * we use it; otherwise we force the MIC to be done 568 * in software by the net80211 layer. 569 */ 570 if (ath_hal_haswmetkipmic(ah)) 571 sc->sc_wmetkipmic = 1; 572 } 573 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 574 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 575 /* 576 * Mark key cache slots associated with global keys 577 * as in use. If we knew TKIP was not to be used we 578 * could leave the +32, +64, and +32+64 slots free. 579 */ 580 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 581 setbit(sc->sc_keymap, i); 582 setbit(sc->sc_keymap, i+64); 583 if (sc->sc_splitmic) { 584 setbit(sc->sc_keymap, i+32); 585 setbit(sc->sc_keymap, i+32+64); 586 } 587 } 588 /* 589 * TPC support can be done either with a global cap or 590 * per-packet support. The latter is not available on 591 * all parts. We're a bit pedantic here as all parts 592 * support a global cap. 593 */ 594 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 595 ic->ic_caps |= IEEE80211_C_TXPMGT; 596 597 /* 598 * Mark WME capability only if we have sufficient 599 * hardware queues to do proper priority scheduling. 600 */ 601 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 602 ic->ic_caps |= IEEE80211_C_WME; 603 /* 604 * Check for misc other capabilities. 605 */ 606 if (ath_hal_hasbursting(ah)) 607 ic->ic_caps |= IEEE80211_C_BURST; 608 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 609 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 610 if (ath_hal_hasfastframes(ah)) 611 ic->ic_caps |= IEEE80211_C_FF; 612 if (ath_hal_getwirelessmodes(ah, ic->ic_regdomain.country) & (HAL_MODE_108G|HAL_MODE_TURBO)) 613 ic->ic_caps |= IEEE80211_C_TURBOP; 614 615 /* 616 * Indicate we need the 802.11 header padded to a 617 * 32-bit boundary for 4-address and QoS frames. 618 */ 619 ic->ic_flags |= IEEE80211_F_DATAPAD; 620 621 /* 622 * Query the hal about antenna support. 623 */ 624 sc->sc_defant = ath_hal_getdefantenna(ah); 625 626 /* 627 * Not all chips have the VEOL support we want to 628 * use with IBSS beacons; check here for it. 629 */ 630 sc->sc_hasveol = ath_hal_hasveol(ah); 631 632 /* get mac address from hardware */ 633 ath_hal_getmac(ah, ic->ic_myaddr); 634 if (sc->sc_hasbmask) 635 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 636 637 /* NB: used to size node table key mapping array */ 638 ic->ic_max_keyix = sc->sc_keymax; 639 /* call MI attach routine. */ 640 ieee80211_ifattach(ic); 641 ic->ic_setregdomain = ath_setregdomain; 642 ic->ic_getradiocaps = ath_getradiocaps; 643 sc->sc_opmode = HAL_M_STA; 644 645 /* override default methods */ 646 ic->ic_newassoc = ath_newassoc; 647 ic->ic_updateslot = ath_updateslot; 648 ic->ic_wme.wme_update = ath_wme_update; 649 ic->ic_vap_create = ath_vap_create; 650 ic->ic_vap_delete = ath_vap_delete; 651 ic->ic_raw_xmit = ath_raw_xmit; 652 ic->ic_update_mcast = ath_update_mcast; 653 ic->ic_update_promisc = ath_update_promisc; 654 ic->ic_node_alloc = ath_node_alloc; 655 sc->sc_node_free = ic->ic_node_free; 656 ic->ic_node_free = ath_node_free; 657 ic->ic_node_getsignal = ath_node_getsignal; 658 ic->ic_scan_start = ath_scan_start; 659 ic->ic_scan_end = ath_scan_end; 660 ic->ic_set_channel = ath_set_channel; 661 662 ath_bpfattach(sc); 663 /* 664 * Setup dynamic sysctl's now that country code and 665 * regdomain are available from the hal. 666 */ 667 ath_sysctlattach(sc); 668 669 if (bootverbose) 670 ieee80211_announce(ic); 671 ath_announce(sc); 672 return 0; 673bad2: 674 ath_tx_cleanup(sc); 675 ath_desc_free(sc); 676bad: 677 if (ah) 678 ath_hal_detach(ah); 679 if (ifp != NULL) 680 if_free(ifp); 681 sc->sc_invalid = 1; 682 return error; 683} 684 685int 686ath_detach(struct ath_softc *sc) 687{ 688 struct ifnet *ifp = sc->sc_ifp; 689 690 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 691 __func__, ifp->if_flags); 692 693 /* 694 * NB: the order of these is important: 695 * o stop the chip so no more interrupts will fire 696 * o call the 802.11 layer before detaching the hal to 697 * insure callbacks into the driver to delete global 698 * key cache entries can be handled 699 * o free the taskqueue which drains any pending tasks 700 * o reclaim the bpf tap now that we know nothing will use 701 * it (e.g. rx processing from the task q thread) 702 * o reclaim the tx queue data structures after calling 703 * the 802.11 layer as we'll get called back to reclaim 704 * node state and potentially want to use them 705 * o to cleanup the tx queues the hal is called, so detach 706 * it last 707 * Other than that, it's straightforward... 708 */ 709 ath_stop(ifp); 710 ieee80211_ifdetach(ifp->if_l2com); 711 taskqueue_free(sc->sc_tq); 712 bpfdetach(ifp); 713#ifdef ATH_TX99_DIAG 714 if (sc->sc_tx99 != NULL) 715 sc->sc_tx99->detach(sc->sc_tx99); 716#endif 717 ath_rate_detach(sc->sc_rc); 718 ath_desc_free(sc); 719 ath_tx_cleanup(sc); 720 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 721 if_free(ifp); 722 723 return 0; 724} 725 726/* 727 * MAC address handling for multiple BSS on the same radio. 728 * The first vap uses the MAC address from the EEPROM. For 729 * subsequent vap's we set the U/L bit (bit 1) in the MAC 730 * address and use the next six bits as an index. 731 */ 732static void 733assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 734{ 735 int i; 736 737 if (clone && sc->sc_hasbmask) { 738 /* NB: we only do this if h/w supports multiple bssid */ 739 for (i = 0; i < 8; i++) 740 if ((sc->sc_bssidmask & (1<<i)) == 0) 741 break; 742 if (i != 0) 743 mac[0] |= (i << 2)|0x2; 744 } else 745 i = 0; 746 sc->sc_bssidmask |= 1<<i; 747 sc->sc_hwbssidmask[0] &= ~mac[0]; 748 if (i == 0) 749 sc->sc_nbssid0++; 750} 751 752static void 753reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 754{ 755 int i = mac[0] >> 2; 756 uint8_t mask; 757 758 if (i != 0 || --sc->sc_nbssid0 == 0) { 759 sc->sc_bssidmask &= ~(1<<i); 760 /* recalculate bssid mask from remaining addresses */ 761 mask = 0xff; 762 for (i = 1; i < 8; i++) 763 if (sc->sc_bssidmask & (1<<i)) 764 mask &= ~((i<<2)|0x2); 765 sc->sc_hwbssidmask[0] |= mask; 766 } 767} 768 769/* 770 * Assign a beacon xmit slot. We try to space out 771 * assignments so when beacons are staggered the 772 * traffic coming out of the cab q has maximal time 773 * to go out before the next beacon is scheduled. 774 */ 775static int 776assign_bslot(struct ath_softc *sc) 777{ 778 u_int slot, free; 779 780 free = 0; 781 for (slot = 0; slot < ATH_BCBUF; slot++) 782 if (sc->sc_bslot[slot] == NULL) { 783 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 784 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 785 return slot; 786 free = slot; 787 /* NB: keep looking for a double slot */ 788 } 789 return free; 790} 791 792static struct ieee80211vap * 793ath_vap_create(struct ieee80211com *ic, 794 const char name[IFNAMSIZ], int unit, int opmode, int flags, 795 const uint8_t bssid[IEEE80211_ADDR_LEN], 796 const uint8_t mac0[IEEE80211_ADDR_LEN]) 797{ 798 struct ath_softc *sc = ic->ic_ifp->if_softc; 799 struct ath_vap *avp; 800 struct ieee80211vap *vap; 801 uint8_t mac[IEEE80211_ADDR_LEN]; 802 int ic_opmode, needbeacon, error; 803 804 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 805 M_80211_VAP, M_WAITOK | M_ZERO); 806 needbeacon = 0; 807 IEEE80211_ADDR_COPY(mac, mac0); 808 809 ATH_LOCK(sc); 810 switch (opmode) { 811 case IEEE80211_M_STA: 812 if (sc->sc_nstavaps != 0) { /* XXX only 1 sta for now */ 813 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 814 goto bad; 815 } 816 if (sc->sc_nvaps) { 817 /* 818 * When there are multiple vaps we must fall 819 * back to s/w beacon miss handling. 820 */ 821 flags |= IEEE80211_CLONE_NOBEACONS; 822 } 823 if (flags & IEEE80211_CLONE_NOBEACONS) { 824 sc->sc_swbmiss = 1; 825 ic_opmode = IEEE80211_M_HOSTAP; 826 } else 827 ic_opmode = opmode; 828 break; 829 case IEEE80211_M_IBSS: 830 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 831 device_printf(sc->sc_dev, 832 "only 1 ibss vap supported\n"); 833 goto bad; 834 } 835 ic_opmode = opmode; 836 needbeacon = 1; 837 break; 838 case IEEE80211_M_AHDEMO: 839 /* fall thru... */ 840 case IEEE80211_M_MONITOR: 841 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 842 /* XXX not right for monitor mode */ 843 ic_opmode = ic->ic_opmode; 844 } else 845 ic_opmode = opmode; 846 break; 847 case IEEE80211_M_HOSTAP: 848 needbeacon = 1; 849 /* fall thru... */ 850 case IEEE80211_M_WDS: 851 if (sc->sc_nvaps && ic->ic_opmode == IEEE80211_M_STA) { 852 device_printf(sc->sc_dev, 853 "wds not supported in sta mode\n"); 854 goto bad; 855 } 856 if (opmode == IEEE80211_M_WDS) { 857 /* 858 * Silently remove any request for a unique 859 * bssid; WDS vap's always share the local 860 * mac address. 861 */ 862 flags &= ~IEEE80211_CLONE_BSSID; 863 } 864 ic_opmode = IEEE80211_M_HOSTAP; 865 break; 866 default: 867 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 868 goto bad; 869 } 870 /* 871 * Check that a beacon buffer is available; the code below assumes it. 872 */ 873 if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) { 874 device_printf(sc->sc_dev, "no beacon buffer available\n"); 875 goto bad; 876 } 877 878 /* STA, AHDEMO? */ 879 if (opmode == IEEE80211_M_HOSTAP) { 880 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 881 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 882 } 883 884 vap = &avp->av_vap; 885 /* XXX can't hold mutex across if_alloc */ 886 ATH_UNLOCK(sc); 887 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 888 bssid, mac); 889 ATH_LOCK(sc); 890 if (error != 0) { 891 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 892 __func__, error); 893 goto bad2; 894 } 895 896 /* h/w crypto support */ 897 vap->iv_key_alloc = ath_key_alloc; 898 vap->iv_key_delete = ath_key_delete; 899 vap->iv_key_set = ath_key_set; 900 vap->iv_key_update_begin = ath_key_update_begin; 901 vap->iv_key_update_end = ath_key_update_end; 902 903 /* override various methods */ 904 avp->av_recv_mgmt = vap->iv_recv_mgmt; 905 vap->iv_recv_mgmt = ath_recv_mgmt; 906 vap->iv_reset = ath_reset_vap; 907 vap->iv_update_beacon = ath_beacon_update; 908 avp->av_newstate = vap->iv_newstate; 909 vap->iv_newstate = ath_newstate; 910 avp->av_bmiss = vap->iv_bmiss; 911 vap->iv_bmiss = ath_bmiss_vap; 912 913 avp->av_bslot = -1; 914 if (needbeacon) { 915 /* 916 * Allocate beacon state and setup the q for buffered 917 * multicast frames. We know a beacon buffer is 918 * available because we checked above. 919 */ 920 avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf); 921 STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list); 922 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 923 /* 924 * Assign the vap to a beacon xmit slot. As above 925 * this cannot fail to find a free one. 926 */ 927 avp->av_bslot = assign_bslot(sc); 928 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 929 ("beacon slot %u not empty", avp->av_bslot)); 930 sc->sc_bslot[avp->av_bslot] = vap; 931 sc->sc_nbcnvaps++; 932 } 933 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 934 /* 935 * Multple vaps are to transmit beacons and we 936 * have h/w support for TSF adjusting; enable 937 * use of staggered beacons. 938 */ 939 sc->sc_stagbeacons = 1; 940 } 941 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 942 } 943 944 ic->ic_opmode = ic_opmode; 945 if (opmode != IEEE80211_M_WDS) { 946 sc->sc_nvaps++; 947 if (opmode == IEEE80211_M_STA) 948 sc->sc_nstavaps++; 949 } 950 switch (ic_opmode) { 951 case IEEE80211_M_IBSS: 952 sc->sc_opmode = HAL_M_IBSS; 953 break; 954 case IEEE80211_M_STA: 955 sc->sc_opmode = HAL_M_STA; 956 break; 957 case IEEE80211_M_AHDEMO: 958 case IEEE80211_M_HOSTAP: 959 sc->sc_opmode = HAL_M_HOSTAP; 960 break; 961 case IEEE80211_M_MONITOR: 962 sc->sc_opmode = HAL_M_MONITOR; 963 break; 964 default: 965 /* XXX should not happen */ 966 break; 967 } 968 if (sc->sc_hastsfadd) { 969 /* 970 * Configure whether or not TSF adjust should be done. 971 */ 972 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 973 } 974 ATH_UNLOCK(sc); 975 976 /* complete setup */ 977 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 978 return vap; 979bad2: 980 reclaim_address(sc, mac); 981 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 982bad: 983 free(avp, M_80211_VAP); 984 ATH_UNLOCK(sc); 985 return NULL; 986} 987 988static void 989ath_vap_delete(struct ieee80211vap *vap) 990{ 991 struct ieee80211com *ic = vap->iv_ic; 992 struct ifnet *ifp = ic->ic_ifp; 993 struct ath_softc *sc = ifp->if_softc; 994 struct ath_hal *ah = sc->sc_ah; 995 struct ath_vap *avp = ATH_VAP(vap); 996 997 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 998 /* 999 * Quiesce the hardware while we remove the vap. In 1000 * particular we need to reclaim all references to 1001 * the vap state by any frames pending on the tx queues. 1002 */ 1003 ath_hal_intrset(ah, 0); /* disable interrupts */ 1004 ath_draintxq(sc); /* stop xmit side */ 1005 ath_stoprecv(sc); /* stop recv side */ 1006 } 1007 1008 ieee80211_vap_detach(vap); 1009 ATH_LOCK(sc); 1010 /* 1011 * Reclaim beacon state. Note this must be done before 1012 * the vap instance is reclaimed as we may have a reference 1013 * to it in the buffer for the beacon frame. 1014 */ 1015 if (avp->av_bcbuf != NULL) { 1016 if (avp->av_bslot != -1) { 1017 sc->sc_bslot[avp->av_bslot] = NULL; 1018 sc->sc_nbcnvaps--; 1019 } 1020 ath_beacon_return(sc, avp->av_bcbuf); 1021 avp->av_bcbuf = NULL; 1022 if (sc->sc_nbcnvaps == 0) { 1023 sc->sc_stagbeacons = 0; 1024 if (sc->sc_hastsfadd) 1025 ath_hal_settsfadjust(sc->sc_ah, 0); 1026 } 1027 /* 1028 * Reclaim any pending mcast frames for the vap. 1029 */ 1030 ath_tx_draintxq(sc, &avp->av_mcastq); 1031 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1032 } 1033 /* 1034 * Update bookkeeping. 1035 */ 1036 if (vap->iv_opmode == IEEE80211_M_STA) { 1037 sc->sc_nstavaps--; 1038 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1039 sc->sc_swbmiss = 0; 1040 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP) { 1041 reclaim_address(sc, vap->iv_myaddr); 1042 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1043 } 1044 if (vap->iv_opmode != IEEE80211_M_WDS) 1045 sc->sc_nvaps--; 1046 ATH_UNLOCK(sc); 1047 free(avp, M_80211_VAP); 1048 1049 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1050 /* 1051 * Restart rx+tx machines if still running (RUNNING will 1052 * be reset if we just destroyed the last vap). 1053 */ 1054 if (ath_startrecv(sc) != 0) 1055 if_printf(ifp, "%s: unable to restart recv logic\n", 1056 __func__); 1057 if (sc->sc_beacons) 1058 ath_beacon_config(sc, NULL); 1059 ath_hal_intrset(ah, sc->sc_imask); 1060 } 1061} 1062 1063void 1064ath_suspend(struct ath_softc *sc) 1065{ 1066 struct ifnet *ifp = sc->sc_ifp; 1067 struct ieee80211com *ic = ifp->if_l2com; 1068 1069 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1070 __func__, ifp->if_flags); 1071 1072 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1073 if (ic->ic_opmode == IEEE80211_M_STA) 1074 ath_stop(ifp); 1075 else 1076 ieee80211_suspend_all(ic); 1077 /* 1078 * NB: don't worry about putting the chip in low power 1079 * mode; pci will power off our socket on suspend and 1080 * cardbus detaches the device. 1081 */ 1082} 1083 1084/* 1085 * Reset the key cache since some parts do not reset the 1086 * contents on resume. First we clear all entries, then 1087 * re-load keys that the 802.11 layer assumes are setup 1088 * in h/w. 1089 */ 1090static void 1091ath_reset_keycache(struct ath_softc *sc) 1092{ 1093 struct ifnet *ifp = sc->sc_ifp; 1094 struct ieee80211com *ic = ifp->if_l2com; 1095 struct ath_hal *ah = sc->sc_ah; 1096 int i; 1097 1098 for (i = 0; i < sc->sc_keymax; i++) 1099 ath_hal_keyreset(ah, i); 1100 ieee80211_crypto_reload_keys(ic); 1101} 1102 1103void 1104ath_resume(struct ath_softc *sc) 1105{ 1106 struct ifnet *ifp = sc->sc_ifp; 1107 struct ieee80211com *ic = ifp->if_l2com; 1108 struct ath_hal *ah = sc->sc_ah; 1109 HAL_STATUS status; 1110 1111 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1112 __func__, ifp->if_flags); 1113 1114 /* 1115 * Must reset the chip before we reload the 1116 * keycache as we were powered down on suspend. 1117 */ 1118 ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status); 1119 ath_reset_keycache(sc); 1120 if (sc->sc_resume_up) { 1121 if (ic->ic_opmode == IEEE80211_M_STA) { 1122 ath_init(sc); 1123 ieee80211_beacon_miss(ic); 1124 } else 1125 ieee80211_resume_all(ic); 1126 } 1127 if (sc->sc_softled) { 1128 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 1129 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); 1130 } 1131} 1132 1133void 1134ath_shutdown(struct ath_softc *sc) 1135{ 1136 struct ifnet *ifp = sc->sc_ifp; 1137 1138 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1139 __func__, ifp->if_flags); 1140 1141 ath_stop(ifp); 1142 /* NB: no point powering down chip as we're about to reboot */ 1143} 1144 1145/* 1146 * Interrupt handler. Most of the actual processing is deferred. 1147 */ 1148void 1149ath_intr(void *arg) 1150{ 1151 struct ath_softc *sc = arg; 1152 struct ifnet *ifp = sc->sc_ifp; 1153 struct ath_hal *ah = sc->sc_ah; 1154 HAL_INT status; 1155 1156 if (sc->sc_invalid) { 1157 /* 1158 * The hardware is not ready/present, don't touch anything. 1159 * Note this can happen early on if the IRQ is shared. 1160 */ 1161 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1162 return; 1163 } 1164 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ 1165 return; 1166 if ((ifp->if_flags & IFF_UP) == 0 || 1167 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1168 HAL_INT status; 1169 1170 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1171 __func__, ifp->if_flags); 1172 ath_hal_getisr(ah, &status); /* clear ISR */ 1173 ath_hal_intrset(ah, 0); /* disable further intr's */ 1174 return; 1175 } 1176 /* 1177 * Figure out the reason(s) for the interrupt. Note 1178 * that the hal returns a pseudo-ISR that may include 1179 * bits we haven't explicitly enabled so we mask the 1180 * value to insure we only process bits we requested. 1181 */ 1182 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1183 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1184 status &= sc->sc_imask; /* discard unasked for bits */ 1185 if (status & HAL_INT_FATAL) { 1186 sc->sc_stats.ast_hardware++; 1187 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1188 ath_fatal_proc(sc, 0); 1189 } else if (status & HAL_INT_RXORN) { 1190 sc->sc_stats.ast_rxorn++; 1191 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1192 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxorntask); 1193 } else { 1194 if (status & HAL_INT_SWBA) { 1195 /* 1196 * Software beacon alert--time to send a beacon. 1197 * Handle beacon transmission directly; deferring 1198 * this is too slow to meet timing constraints 1199 * under load. 1200 */ 1201 ath_beacon_proc(sc, 0); 1202 } 1203 if (status & HAL_INT_RXEOL) { 1204 /* 1205 * NB: the hardware should re-read the link when 1206 * RXE bit is written, but it doesn't work at 1207 * least on older hardware revs. 1208 */ 1209 sc->sc_stats.ast_rxeol++; 1210 sc->sc_rxlink = NULL; 1211 } 1212 if (status & HAL_INT_TXURN) { 1213 sc->sc_stats.ast_txurn++; 1214 /* bump tx trigger level */ 1215 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1216 } 1217 if (status & HAL_INT_RX) 1218 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1219 if (status & HAL_INT_TX) 1220 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1221 if (status & HAL_INT_BMISS) { 1222 sc->sc_stats.ast_bmiss++; 1223 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1224 } 1225 if (status & HAL_INT_MIB) { 1226 sc->sc_stats.ast_mib++; 1227 /* 1228 * Disable interrupts until we service the MIB 1229 * interrupt; otherwise it will continue to fire. 1230 */ 1231 ath_hal_intrset(ah, 0); 1232 /* 1233 * Let the hal handle the event. We assume it will 1234 * clear whatever condition caused the interrupt. 1235 */ 1236 ath_hal_mibevent(ah, &sc->sc_halstats); 1237 ath_hal_intrset(ah, sc->sc_imask); 1238 } 1239 } 1240} 1241 1242static void 1243ath_fatal_proc(void *arg, int pending) 1244{ 1245 struct ath_softc *sc = arg; 1246 struct ifnet *ifp = sc->sc_ifp; 1247 u_int32_t *state; 1248 u_int32_t len; 1249 void *sp; 1250 1251 if_printf(ifp, "hardware error; resetting\n"); 1252 /* 1253 * Fatal errors are unrecoverable. Typically these 1254 * are caused by DMA errors. Collect h/w state from 1255 * the hal so we can diagnose what's going on. 1256 */ 1257 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1258 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1259 state = sp; 1260 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1261 state[0], state[1] , state[2], state[3], 1262 state[4], state[5]); 1263 } 1264 ath_reset(ifp); 1265} 1266 1267static void 1268ath_rxorn_proc(void *arg, int pending) 1269{ 1270 struct ath_softc *sc = arg; 1271 struct ifnet *ifp = sc->sc_ifp; 1272 1273 if_printf(ifp, "rx FIFO overrun; resetting\n"); 1274 ath_reset(ifp); 1275} 1276 1277static void 1278ath_bmiss_vap(struct ieee80211vap *vap) 1279{ 1280 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 1281 u_int64_t lastrx = sc->sc_lastrx; 1282 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1283 u_int bmisstimeout = 1284 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1285 1286 DPRINTF(sc, ATH_DEBUG_BEACON, 1287 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1288 __func__, (unsigned long long) tsf, 1289 (unsigned long long)(tsf - lastrx), 1290 (unsigned long long) lastrx, bmisstimeout); 1291 /* 1292 * Workaround phantom bmiss interrupts by sanity-checking 1293 * the time of our last rx'd frame. If it is within the 1294 * beacon miss interval then ignore the interrupt. If it's 1295 * truly a bmiss we'll get another interrupt soon and that'll 1296 * be dispatched up for processing. 1297 */ 1298 if (tsf - lastrx > bmisstimeout) 1299 ATH_VAP(vap)->av_bmiss(vap); 1300 else 1301 sc->sc_stats.ast_bmiss_phantom++; 1302} 1303 1304static void 1305ath_bmiss_proc(void *arg, int pending) 1306{ 1307 struct ath_softc *sc = arg; 1308 struct ifnet *ifp = sc->sc_ifp; 1309 1310 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1311 ieee80211_beacon_miss(ifp->if_l2com); 1312} 1313 1314/* 1315 * Convert net80211 channel to a HAL channel with the flags 1316 * constrained to reflect the current operating mode and 1317 * the frequency possibly mapped for GSM channels. 1318 */ 1319static void 1320ath_mapchan(HAL_CHANNEL *hc, const struct ieee80211_channel *chan) 1321{ 1322#define N(a) (sizeof(a) / sizeof(a[0])) 1323 static const u_int modeflags[IEEE80211_MODE_MAX] = { 1324 0, /* IEEE80211_MODE_AUTO */ 1325 CHANNEL_A, /* IEEE80211_MODE_11A */ 1326 CHANNEL_B, /* IEEE80211_MODE_11B */ 1327 CHANNEL_PUREG, /* IEEE80211_MODE_11G */ 1328 0, /* IEEE80211_MODE_FH */ 1329 CHANNEL_108A, /* IEEE80211_MODE_TURBO_A */ 1330 CHANNEL_108G, /* IEEE80211_MODE_TURBO_G */ 1331 CHANNEL_ST, /* IEEE80211_MODE_STURBO_A */ 1332 CHANNEL_A, /* IEEE80211_MODE_11NA */ 1333 CHANNEL_PUREG, /* IEEE80211_MODE_11NG */ 1334 }; 1335 enum ieee80211_phymode mode = ieee80211_chan2mode(chan); 1336 1337 KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode)); 1338 KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode)); 1339 hc->channelFlags = modeflags[mode]; 1340 if (IEEE80211_IS_CHAN_HALF(chan)) 1341 hc->channelFlags |= CHANNEL_HALF; 1342 if (IEEE80211_IS_CHAN_QUARTER(chan)) 1343 hc->channelFlags |= CHANNEL_QUARTER; 1344 if (IEEE80211_IS_CHAN_HT20(chan)) 1345 hc->channelFlags |= CHANNEL_HT20; 1346 if (IEEE80211_IS_CHAN_HT40D(chan)) 1347 hc->channelFlags |= CHANNEL_HT40MINUS; 1348 if (IEEE80211_IS_CHAN_HT40U(chan)) 1349 hc->channelFlags |= CHANNEL_HT40PLUS; 1350 1351 hc->channel = IEEE80211_IS_CHAN_GSM(chan) ? 1352 2422 + (922 - chan->ic_freq) : chan->ic_freq; 1353#undef N 1354} 1355 1356/* 1357 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1358 * calcs together with WME. If necessary disable the crypto 1359 * hardware and mark the 802.11 state so keys will be setup 1360 * with the MIC work done in software. 1361 */ 1362static void 1363ath_settkipmic(struct ath_softc *sc) 1364{ 1365 struct ifnet *ifp = sc->sc_ifp; 1366 struct ieee80211com *ic = ifp->if_l2com; 1367 1368 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1369 if (ic->ic_flags & IEEE80211_F_WME) { 1370 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1371 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1372 } else { 1373 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1374 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1375 } 1376 } 1377} 1378 1379static void 1380ath_init(void *arg) 1381{ 1382 struct ath_softc *sc = (struct ath_softc *) arg; 1383 struct ifnet *ifp = sc->sc_ifp; 1384 struct ieee80211com *ic = ifp->if_l2com; 1385 struct ath_hal *ah = sc->sc_ah; 1386 HAL_STATUS status; 1387 1388 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1389 __func__, ifp->if_flags); 1390 1391 ATH_LOCK(sc); 1392 /* 1393 * Stop anything previously setup. This is safe 1394 * whether this is the first time through or not. 1395 */ 1396 ath_stop_locked(ifp); 1397 1398 /* 1399 * The basic interface to setting the hardware in a good 1400 * state is ``reset''. On return the hardware is known to 1401 * be powered up and with interrupts disabled. This must 1402 * be followed by initialization of the appropriate bits 1403 * and then setup of the interrupt mask. 1404 */ 1405 ath_mapchan(&sc->sc_curchan, ic->ic_curchan); 1406 ath_settkipmic(sc); 1407 if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status)) { 1408 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1409 status); 1410 ATH_UNLOCK(sc); 1411 return; 1412 } 1413 ath_chan_change(sc, ic->ic_curchan); 1414 1415 /* 1416 * Likewise this is set during reset so update 1417 * state cached in the driver. 1418 */ 1419 sc->sc_diversity = ath_hal_getdiversity(ah); 1420 sc->sc_calinterval = 1; 1421 sc->sc_caltries = 0; 1422 1423 /* 1424 * Setup the hardware after reset: the key cache 1425 * is filled as needed and the receive engine is 1426 * set going. Frame transmit is handled entirely 1427 * in the frame output path; there's nothing to do 1428 * here except setup the interrupt mask. 1429 */ 1430 if (ath_startrecv(sc) != 0) { 1431 if_printf(ifp, "unable to start recv logic\n"); 1432 ATH_UNLOCK(sc); 1433 return; 1434 } 1435 1436 /* 1437 * Enable interrupts. 1438 */ 1439 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1440 | HAL_INT_RXEOL | HAL_INT_RXORN 1441 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1442 /* 1443 * Enable MIB interrupts when there are hardware phy counters. 1444 * Note we only do this (at the moment) for station mode. 1445 */ 1446 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1447 sc->sc_imask |= HAL_INT_MIB; 1448 1449 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1450 ath_hal_intrset(ah, sc->sc_imask); 1451 1452 ATH_UNLOCK(sc); 1453 1454#ifdef ATH_TX99_DIAG 1455 if (sc->sc_tx99 != NULL) 1456 sc->sc_tx99->start(sc->sc_tx99); 1457 else 1458#endif 1459 ieee80211_start_all(ic); /* start all vap's */ 1460} 1461 1462static void 1463ath_stop_locked(struct ifnet *ifp) 1464{ 1465 struct ath_softc *sc = ifp->if_softc; 1466 struct ath_hal *ah = sc->sc_ah; 1467 1468 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1469 __func__, sc->sc_invalid, ifp->if_flags); 1470 1471 ATH_LOCK_ASSERT(sc); 1472 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1473 /* 1474 * Shutdown the hardware and driver: 1475 * reset 802.11 state machine 1476 * turn off timers 1477 * disable interrupts 1478 * turn off the radio 1479 * clear transmit machinery 1480 * clear receive machinery 1481 * drain and release tx queues 1482 * reclaim beacon resources 1483 * power down hardware 1484 * 1485 * Note that some of this work is not possible if the 1486 * hardware is gone (invalid). 1487 */ 1488#ifdef ATH_TX99_DIAG 1489 if (sc->sc_tx99 != NULL) 1490 sc->sc_tx99->stop(sc->sc_tx99); 1491#endif 1492 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1493 ifp->if_timer = 0; 1494 if (!sc->sc_invalid) { 1495 if (sc->sc_softled) { 1496 callout_stop(&sc->sc_ledtimer); 1497 ath_hal_gpioset(ah, sc->sc_ledpin, 1498 !sc->sc_ledon); 1499 sc->sc_blinking = 0; 1500 } 1501 ath_hal_intrset(ah, 0); 1502 } 1503 ath_draintxq(sc); 1504 if (!sc->sc_invalid) { 1505 ath_stoprecv(sc); 1506 ath_hal_phydisable(ah); 1507 } else 1508 sc->sc_rxlink = NULL; 1509 ath_beacon_free(sc); /* XXX not needed */ 1510 } 1511} 1512 1513static void 1514ath_stop(struct ifnet *ifp) 1515{ 1516 struct ath_softc *sc = ifp->if_softc; 1517 1518 ATH_LOCK(sc); 1519 ath_stop_locked(ifp); 1520 ATH_UNLOCK(sc); 1521} 1522 1523/* 1524 * Reset the hardware w/o losing operational state. This is 1525 * basically a more efficient way of doing ath_stop, ath_init, 1526 * followed by state transitions to the current 802.11 1527 * operational state. Used to recover from various errors and 1528 * to reset or reload hardware state. 1529 */ 1530static int 1531ath_reset(struct ifnet *ifp) 1532{ 1533 struct ath_softc *sc = ifp->if_softc; 1534 struct ieee80211com *ic = ifp->if_l2com; 1535 struct ath_hal *ah = sc->sc_ah; 1536 HAL_STATUS status; 1537 1538 /* 1539 * Convert to a HAL channel description with the flags 1540 * constrained to reflect the current operating mode. 1541 */ 1542 ath_mapchan(&sc->sc_curchan, ic->ic_curchan); 1543 1544 ath_hal_intrset(ah, 0); /* disable interrupts */ 1545 ath_draintxq(sc); /* stop xmit side */ 1546 ath_stoprecv(sc); /* stop recv side */ 1547 ath_settkipmic(sc); /* configure TKIP MIC handling */ 1548 /* NB: indicate channel change so we do a full reset */ 1549 if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_TRUE, &status)) 1550 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 1551 __func__, status); 1552 sc->sc_diversity = ath_hal_getdiversity(ah); 1553 sc->sc_calinterval = 1; 1554 sc->sc_caltries = 0; 1555 if (ath_startrecv(sc) != 0) /* restart recv */ 1556 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 1557 /* 1558 * We may be doing a reset in response to an ioctl 1559 * that changes the channel so update any state that 1560 * might change as a result. 1561 */ 1562 ath_chan_change(sc, ic->ic_curchan); 1563 if (sc->sc_beacons) 1564 ath_beacon_config(sc, NULL); /* restart beacons */ 1565 ath_hal_intrset(ah, sc->sc_imask); 1566 1567 ath_start(ifp); /* restart xmit */ 1568 return 0; 1569} 1570 1571static int 1572ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 1573{ 1574 struct ieee80211com *ic = vap->iv_ic; 1575 struct ifnet *ifp = ic->ic_ifp; 1576 struct ath_softc *sc = ifp->if_softc; 1577 struct ath_hal *ah = sc->sc_ah; 1578 1579 switch (cmd) { 1580 case IEEE80211_IOC_TXPOWER: 1581 /* 1582 * If per-packet TPC is enabled, then we have nothing 1583 * to do; otherwise we need to force the global limit. 1584 * All this can happen directly; no need to reset. 1585 */ 1586 if (!ath_hal_gettpc(ah)) 1587 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 1588 return 0; 1589 } 1590 return ath_reset(ifp); 1591} 1592 1593static int 1594ath_ff_always(struct ath_txq *txq, struct ath_buf *bf) 1595{ 1596 return 0; 1597} 1598 1599#if 0 1600static int 1601ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf) 1602{ 1603 return (txq->axq_curage - bf->bf_age) < ATH_FF_STAGEMAX; 1604} 1605#endif 1606 1607/* 1608 * Flush FF staging queue. 1609 */ 1610static void 1611ath_ff_stageq_flush(struct ath_softc *sc, struct ath_txq *txq, 1612 int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf)) 1613{ 1614 struct ath_buf *bf; 1615 struct ieee80211_node *ni; 1616 int pktlen, pri; 1617 1618 for (;;) { 1619 ATH_TXQ_LOCK(txq); 1620 /* 1621 * Go from the back (oldest) to front so we can 1622 * stop early based on the age of the entry. 1623 */ 1624 bf = TAILQ_LAST(&txq->axq_stageq, axq_headtype); 1625 if (bf == NULL || ath_ff_flushdonetest(txq, bf)) { 1626 ATH_TXQ_UNLOCK(txq); 1627 break; 1628 } 1629 1630 ni = bf->bf_node; 1631 pri = M_WME_GETAC(bf->bf_m); 1632 KASSERT(ATH_NODE(ni)->an_ff_buf[pri], 1633 ("no bf on staging queue %p", bf)); 1634 ATH_NODE(ni)->an_ff_buf[pri] = NULL; 1635 TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist); 1636 1637 ATH_TXQ_UNLOCK(txq); 1638 1639 DPRINTF(sc, ATH_DEBUG_FF, "%s: flush frame, age %u\n", 1640 __func__, bf->bf_age); 1641 1642 sc->sc_stats.ast_ff_flush++; 1643 1644 /* encap and xmit */ 1645 bf->bf_m = ieee80211_encap(ni, bf->bf_m); 1646 if (bf->bf_m == NULL) { 1647 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, 1648 "%s: discard, encapsulation failure\n", 1649 __func__); 1650 sc->sc_stats.ast_tx_encap++; 1651 goto bad; 1652 } 1653 pktlen = bf->bf_m->m_pkthdr.len; /* NB: don't reference below */ 1654 if (ath_tx_start(sc, ni, bf, bf->bf_m) == 0) { 1655#if 0 /*XXX*/ 1656 ifp->if_opackets++; 1657#endif 1658 continue; 1659 } 1660 bad: 1661 if (ni != NULL) 1662 ieee80211_free_node(ni); 1663 bf->bf_node = NULL; 1664 if (bf->bf_m != NULL) { 1665 m_freem(bf->bf_m); 1666 bf->bf_m = NULL; 1667 } 1668 1669 ATH_TXBUF_LOCK(sc); 1670 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1671 ATH_TXBUF_UNLOCK(sc); 1672 } 1673} 1674 1675static __inline u_int32_t 1676ath_ff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct mbuf *m) 1677{ 1678 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1679 u_int32_t framelen; 1680 struct ath_buf *bf; 1681 1682 /* 1683 * Approximate the frame length to be transmitted. A swag to add 1684 * the following maximal values to the skb payload: 1685 * - 32: 802.11 encap + CRC 1686 * - 24: encryption overhead (if wep bit) 1687 * - 4 + 6: fast-frame header and padding 1688 * - 16: 2 LLC FF tunnel headers 1689 * - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd) 1690 */ 1691 framelen = m->m_pkthdr.len + 32 + 4 + 6 + 16 + 14; 1692 if (ic->ic_flags & IEEE80211_F_PRIVACY) 1693 framelen += 24; 1694 bf = an->an_ff_buf[M_WME_GETAC(m)]; 1695 if (bf != NULL) 1696 framelen += bf->bf_m->m_pkthdr.len; 1697 return ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen, 1698 sc->sc_lastdatarix, AH_FALSE); 1699} 1700 1701/* 1702 * Determine if a data frame may be aggregated via ff tunnelling. 1703 * Note the caller is responsible for checking if the destination 1704 * supports fast frames. 1705 * 1706 * NB: allowing EAPOL frames to be aggregated with other unicast traffic. 1707 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't 1708 * be aggregated with other types of frames when encryption is on? 1709 * 1710 * NB: assumes lock on an_ff_buf effectively held by txq lock mechanism. 1711 */ 1712static __inline int 1713ath_ff_can_aggregate(struct ath_softc *sc, 1714 struct ath_node *an, struct mbuf *m, int *flushq) 1715{ 1716 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1717 struct ath_txq *txq; 1718 u_int32_t txoplimit; 1719 u_int pri; 1720 1721 *flushq = 0; 1722 1723 /* 1724 * If there is no frame to combine with and the txq has 1725 * fewer frames than the minimum required; then do not 1726 * attempt to aggregate this frame. 1727 */ 1728 pri = M_WME_GETAC(m); 1729 txq = sc->sc_ac2q[pri]; 1730 if (an->an_ff_buf[pri] == NULL && txq->axq_depth < sc->sc_fftxqmin) 1731 return 0; 1732 /* 1733 * When not in station mode never aggregate a multicast 1734 * frame; this insures, for example, that a combined frame 1735 * does not require multiple encryption keys when using 1736 * 802.1x/WPA. 1737 */ 1738 if (ic->ic_opmode != IEEE80211_M_STA && 1739 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) 1740 return 0; 1741 /* 1742 * Consult the max bursting interval to insure a combined 1743 * frame fits within the TxOp window. 1744 */ 1745 txoplimit = IEEE80211_TXOP_TO_US( 1746 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit); 1747 if (txoplimit != 0 && ath_ff_approx_txtime(sc, an, m) > txoplimit) { 1748 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, 1749 "%s: FF TxOp violation\n", __func__); 1750 if (an->an_ff_buf[pri] != NULL) 1751 *flushq = 1; 1752 return 0; 1753 } 1754 return 1; /* try to aggregate */ 1755} 1756 1757/* 1758 * Check if the supplied frame can be partnered with an existing 1759 * or pending frame. Return a reference to any frame that should be 1760 * sent on return; otherwise return NULL. 1761 */ 1762static struct mbuf * 1763ath_ff_check(struct ath_softc *sc, struct ath_txq *txq, 1764 struct ath_buf *bf, struct mbuf *m, struct ieee80211_node *ni) 1765{ 1766 struct ath_node *an = ATH_NODE(ni); 1767 struct ath_buf *bfstaged; 1768 int ff_flush, pri; 1769 1770 /* 1771 * Check if the supplied frame can be aggregated. 1772 * 1773 * NB: we use the txq lock to protect references to 1774 * an->an_ff_txbuf in ath_ff_can_aggregate(). 1775 */ 1776 ATH_TXQ_LOCK(txq); 1777 pri = M_WME_GETAC(m); 1778 if (ath_ff_can_aggregate(sc, an, m, &ff_flush)) { 1779 struct ath_buf *bfstaged = an->an_ff_buf[pri]; 1780 if (bfstaged != NULL) { 1781 /* 1782 * A frame is available for partnering; remove 1783 * it, chain it to this one, and encapsulate. 1784 */ 1785 an->an_ff_buf[pri] = NULL; 1786 TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist); 1787 ATH_TXQ_UNLOCK(txq); 1788 1789 /* 1790 * Chain mbufs and add FF magic. 1791 */ 1792 DPRINTF(sc, ATH_DEBUG_FF, 1793 "[%s] aggregate fast-frame, age %u\n", 1794 ether_sprintf(ni->ni_macaddr), txq->axq_curage); 1795 m->m_nextpkt = NULL; 1796 bfstaged->bf_m->m_nextpkt = m; 1797 m = bfstaged->bf_m; 1798 bfstaged->bf_m = NULL; 1799 m->m_flags |= M_FF; 1800 /* 1801 * Release the node reference held while 1802 * the packet sat on an_ff_buf[] 1803 */ 1804 bfstaged->bf_node = NULL; 1805 ieee80211_free_node(ni); 1806 1807 /* 1808 * Return bfstaged to the free list. 1809 */ 1810 ATH_TXBUF_LOCK(sc); 1811 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bfstaged, bf_list); 1812 ATH_TXBUF_UNLOCK(sc); 1813 1814 return m; /* ready to go */ 1815 } else { 1816 /* 1817 * No frame available, queue this frame to wait 1818 * for a partner. Note that we hold the buffer 1819 * and a reference to the node; we need the 1820 * buffer in particular so we're certain we 1821 * can flush the frame at a later time. 1822 */ 1823 DPRINTF(sc, ATH_DEBUG_FF, 1824 "[%s] stage fast-frame, age %u\n", 1825 ether_sprintf(ni->ni_macaddr), txq->axq_curage); 1826 1827 bf->bf_m = m; 1828 bf->bf_node = ni; /* NB: held reference */ 1829 bf->bf_age = txq->axq_curage; 1830 an->an_ff_buf[pri] = bf; 1831 TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist); 1832 ATH_TXQ_UNLOCK(txq); 1833 1834 return NULL; /* consumed */ 1835 } 1836 } 1837 /* 1838 * Frame could not be aggregated, it needs to be returned 1839 * to the caller for immediate transmission. In addition 1840 * we check if we should first flush a frame from the 1841 * staging queue before sending this one. 1842 * 1843 * NB: ath_ff_can_aggregate only marks ff_flush if a frame 1844 * is present to flush. 1845 */ 1846 if (ff_flush) { 1847 int pktlen; 1848 1849 bfstaged = an->an_ff_buf[pri]; 1850 an->an_ff_buf[pri] = NULL; 1851 TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist); 1852 ATH_TXQ_UNLOCK(txq); 1853 1854 DPRINTF(sc, ATH_DEBUG_FF, "[%s] flush staged frame\n", 1855 ether_sprintf(an->an_node.ni_macaddr)); 1856 1857 /* encap and xmit */ 1858 bfstaged->bf_m = ieee80211_encap(ni, bfstaged->bf_m); 1859 if (bfstaged->bf_m == NULL) { 1860 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, 1861 "%s: discard, encap failure\n", __func__); 1862 sc->sc_stats.ast_tx_encap++; 1863 goto ff_flushbad; 1864 } 1865 pktlen = bfstaged->bf_m->m_pkthdr.len; 1866 if (ath_tx_start(sc, ni, bfstaged, bfstaged->bf_m)) { 1867 DPRINTF(sc, ATH_DEBUG_XMIT, 1868 "%s: discard, xmit failure\n", __func__); 1869 ff_flushbad: 1870 /* 1871 * Unable to transmit frame that was on the staging 1872 * queue. Reclaim the node reference and other 1873 * resources. 1874 */ 1875 if (ni != NULL) 1876 ieee80211_free_node(ni); 1877 bfstaged->bf_node = NULL; 1878 if (bfstaged->bf_m != NULL) { 1879 m_freem(bfstaged->bf_m); 1880 bfstaged->bf_m = NULL; 1881 } 1882 1883 ATH_TXBUF_LOCK(sc); 1884 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bfstaged, bf_list); 1885 ATH_TXBUF_UNLOCK(sc); 1886 } else { 1887#if 0 1888 ifp->if_opackets++; 1889#endif 1890 } 1891 } else { 1892 if (an->an_ff_buf[pri] != NULL) { 1893 /* 1894 * XXX: out-of-order condition only occurs for AP 1895 * mode and multicast. There may be no valid way 1896 * to get this condition. 1897 */ 1898 DPRINTF(sc, ATH_DEBUG_FF, "[%s] out-of-order frame\n", 1899 ether_sprintf(an->an_node.ni_macaddr)); 1900 /* XXX stat */ 1901 } 1902 ATH_TXQ_UNLOCK(txq); 1903 } 1904 return m; 1905} 1906 1907/* 1908 * Cleanup driver resources when we run out of buffers 1909 * while processing fragments; return the tx buffers 1910 * allocated and drop node references. 1911 */ 1912static void 1913ath_txfrag_cleanup(struct ath_softc *sc, 1914 ath_bufhead *frags, struct ieee80211_node *ni) 1915{ 1916 struct ath_buf *bf, *next; 1917 1918 ATH_TXBUF_LOCK_ASSERT(sc); 1919 1920 STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 1921 /* NB: bf assumed clean */ 1922 STAILQ_REMOVE_HEAD(frags, bf_list); 1923 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1924 ieee80211_node_decref(ni); 1925 } 1926} 1927 1928/* 1929 * Setup xmit of a fragmented frame. Allocate a buffer 1930 * for each frag and bump the node reference count to 1931 * reflect the held reference to be setup by ath_tx_start. 1932 */ 1933static int 1934ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 1935 struct mbuf *m0, struct ieee80211_node *ni) 1936{ 1937 struct mbuf *m; 1938 struct ath_buf *bf; 1939 1940 ATH_TXBUF_LOCK(sc); 1941 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 1942 bf = STAILQ_FIRST(&sc->sc_txbuf); 1943 if (bf == NULL) { /* out of buffers, cleanup */ 1944 ath_txfrag_cleanup(sc, frags, ni); 1945 break; 1946 } 1947 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 1948 ieee80211_node_incref(ni); 1949 STAILQ_INSERT_TAIL(frags, bf, bf_list); 1950 } 1951 ATH_TXBUF_UNLOCK(sc); 1952 1953 return !STAILQ_EMPTY(frags); 1954} 1955 1956static void 1957ath_start(struct ifnet *ifp) 1958{ 1959 struct ath_softc *sc = ifp->if_softc; 1960 struct ieee80211com *ic = ifp->if_l2com; 1961 struct ieee80211_node *ni; 1962 struct ath_buf *bf; 1963 struct mbuf *m, *next; 1964 struct ath_txq *txq; 1965 ath_bufhead frags; 1966 int pri; 1967 1968 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 1969 return; 1970 for (;;) { 1971 /* 1972 * Grab a TX buffer and associated resources. 1973 */ 1974 ATH_TXBUF_LOCK(sc); 1975 bf = STAILQ_FIRST(&sc->sc_txbuf); 1976 if (bf != NULL) 1977 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 1978 ATH_TXBUF_UNLOCK(sc); 1979 if (bf == NULL) { 1980 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n", 1981 __func__); 1982 sc->sc_stats.ast_tx_qstop++; 1983 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1984 break; 1985 } 1986 1987 IFQ_DEQUEUE(&ifp->if_snd, m); 1988 if (m == NULL) { 1989 ATH_TXBUF_LOCK(sc); 1990 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1991 ATH_TXBUF_UNLOCK(sc); 1992 break; 1993 } 1994 STAILQ_INIT(&frags); 1995 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1996 pri = M_WME_GETAC(m); 1997 txq = sc->sc_ac2q[pri]; 1998 if (ni->ni_ath_flags & IEEE80211_NODE_FF) { 1999 /* 2000 * Check queue length; if too deep drop this 2001 * frame (tail drop considered good). 2002 */ 2003 if (txq->axq_depth >= sc->sc_fftxqmax) { 2004 DPRINTF(sc, ATH_DEBUG_FF, 2005 "[%s] tail drop on q %u depth %u\n", 2006 ether_sprintf(ni->ni_macaddr), 2007 txq->axq_qnum, txq->axq_depth); 2008 sc->sc_stats.ast_tx_qfull++; 2009 m_freem(m); 2010 goto reclaim; 2011 } 2012 m = ath_ff_check(sc, txq, bf, m, ni); 2013 if (m == NULL) { 2014 /* NB: ni ref & bf held on stageq */ 2015 continue; 2016 } 2017 } 2018 ifp->if_opackets++; 2019 /* 2020 * Encapsulate the packet in prep for transmission. 2021 */ 2022 m = ieee80211_encap(ni, m); 2023 if (m == NULL) { 2024 DPRINTF(sc, ATH_DEBUG_XMIT, 2025 "%s: encapsulation failure\n", __func__); 2026 sc->sc_stats.ast_tx_encap++; 2027 goto bad; 2028 } 2029 /* 2030 * Check for fragmentation. If this frame 2031 * has been broken up verify we have enough 2032 * buffers to send all the fragments so all 2033 * go out or none... 2034 */ 2035 if ((m->m_flags & M_FRAG) && 2036 !ath_txfrag_setup(sc, &frags, m, ni)) { 2037 DPRINTF(sc, ATH_DEBUG_XMIT, 2038 "%s: out of txfrag buffers\n", __func__); 2039 ic->ic_stats.is_tx_nobuf++; /* XXX */ 2040 ath_freetx(m); 2041 goto bad; 2042 } 2043 nextfrag: 2044 /* 2045 * Pass the frame to the h/w for transmission. 2046 * Fragmented frames have each frag chained together 2047 * with m_nextpkt. We know there are sufficient ath_buf's 2048 * to send all the frags because of work done by 2049 * ath_txfrag_setup. We leave m_nextpkt set while 2050 * calling ath_tx_start so it can use it to extend the 2051 * the tx duration to cover the subsequent frag and 2052 * so it can reclaim all the mbufs in case of an error; 2053 * ath_tx_start clears m_nextpkt once it commits to 2054 * handing the frame to the hardware. 2055 */ 2056 next = m->m_nextpkt; 2057 if (ath_tx_start(sc, ni, bf, m)) { 2058 bad: 2059 ifp->if_oerrors++; 2060 reclaim: 2061 bf->bf_m = NULL; 2062 bf->bf_node = NULL; 2063 ATH_TXBUF_LOCK(sc); 2064 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 2065 ath_txfrag_cleanup(sc, &frags, ni); 2066 ATH_TXBUF_UNLOCK(sc); 2067 if (ni != NULL) 2068 ieee80211_free_node(ni); 2069 continue; 2070 } 2071 if (next != NULL) { 2072 /* 2073 * Beware of state changing between frags. 2074 * XXX check sta power-save state? 2075 */ 2076 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2077 DPRINTF(sc, ATH_DEBUG_XMIT, 2078 "%s: flush fragmented packet, state %s\n", 2079 __func__, 2080 ieee80211_state_name[ni->ni_vap->iv_state]); 2081 ath_freetx(next); 2082 goto reclaim; 2083 } 2084 m = next; 2085 bf = STAILQ_FIRST(&frags); 2086 KASSERT(bf != NULL, ("no buf for txfrag")); 2087 STAILQ_REMOVE_HEAD(&frags, bf_list); 2088 goto nextfrag; 2089 } 2090 2091 ifp->if_timer = 5; 2092#if 0 2093 /* 2094 * Flush stale frames from the fast-frame staging queue. 2095 */ 2096 if (ic->ic_opmode != IEEE80211_M_STA) 2097 ath_ff_stageq_flush(sc, txq, ath_ff_ageflushtestdone); 2098#endif 2099 } 2100} 2101 2102static int 2103ath_media_change(struct ifnet *ifp) 2104{ 2105 int error = ieee80211_media_change(ifp); 2106 /* NB: only the fixed rate can change and that doesn't need a reset */ 2107 return (error == ENETRESET ? 0 : error); 2108} 2109 2110#ifdef ATH_DEBUG 2111static void 2112ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix, 2113 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 2114{ 2115 static const char *ciphers[] = { 2116 "WEP", 2117 "AES-OCB", 2118 "AES-CCM", 2119 "CKIP", 2120 "TKIP", 2121 "CLR", 2122 }; 2123 int i, n; 2124 2125 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); 2126 for (i = 0, n = hk->kv_len; i < n; i++) 2127 printf("%02x", hk->kv_val[i]); 2128 printf(" mac %s", ether_sprintf(mac)); 2129 if (hk->kv_type == HAL_CIPHER_TKIP) { 2130 printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic"); 2131 for (i = 0; i < sizeof(hk->kv_mic); i++) 2132 printf("%02x", hk->kv_mic[i]); 2133 if (!sc->sc_splitmic) { 2134 printf(" txmic "); 2135 for (i = 0; i < sizeof(hk->kv_txmic); i++) 2136 printf("%02x", hk->kv_txmic[i]); 2137 } 2138 } 2139 printf("\n"); 2140} 2141#endif 2142 2143/* 2144 * Set a TKIP key into the hardware. This handles the 2145 * potential distribution of key state to multiple key 2146 * cache slots for TKIP. 2147 */ 2148static int 2149ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, 2150 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 2151{ 2152#define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) 2153 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; 2154 struct ath_hal *ah = sc->sc_ah; 2155 2156 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, 2157 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); 2158 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { 2159 if (sc->sc_splitmic) { 2160 /* 2161 * TX key goes at first index, RX key at the rx index. 2162 * The hal handles the MIC keys at index+64. 2163 */ 2164 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); 2165 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 2166 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) 2167 return 0; 2168 2169 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 2170 KEYPRINTF(sc, k->wk_keyix+32, hk, mac); 2171 /* XXX delete tx key on failure? */ 2172 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); 2173 } else { 2174 /* 2175 * Room for both TX+RX MIC keys in one key cache 2176 * slot, just set key at the first index; the hal 2177 * will handle the rest. 2178 */ 2179 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 2180 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); 2181 KEYPRINTF(sc, k->wk_keyix, hk, mac); 2182 return ath_hal_keyset(ah, k->wk_keyix, hk, mac); 2183 } 2184 } else if (k->wk_flags & IEEE80211_KEY_XMIT) { 2185 if (sc->sc_splitmic) { 2186 /* 2187 * NB: must pass MIC key in expected location when 2188 * the keycache only holds one MIC key per entry. 2189 */ 2190 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic)); 2191 } else 2192 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); 2193 KEYPRINTF(sc, k->wk_keyix, hk, mac); 2194 return ath_hal_keyset(ah, k->wk_keyix, hk, mac); 2195 } else if (k->wk_flags & IEEE80211_KEY_RECV) { 2196 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 2197 KEYPRINTF(sc, k->wk_keyix, hk, mac); 2198 return ath_hal_keyset(ah, k->wk_keyix, hk, mac); 2199 } 2200 return 0; 2201#undef IEEE80211_KEY_XR 2202} 2203 2204/* 2205 * Set a net80211 key into the hardware. This handles the 2206 * potential distribution of key state to multiple key 2207 * cache slots for TKIP with hardware MIC support. 2208 */ 2209static int 2210ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, 2211 struct ieee80211_node *bss) 2212{ 2213#define N(a) (sizeof(a)/sizeof(a[0])) 2214 static const u_int8_t ciphermap[] = { 2215 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ 2216 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ 2217 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ 2218 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ 2219 (u_int8_t) -1, /* 4 is not allocated */ 2220 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ 2221 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ 2222 }; 2223 struct ath_hal *ah = sc->sc_ah; 2224 const struct ieee80211_cipher *cip = k->wk_cipher; 2225 u_int8_t gmac[IEEE80211_ADDR_LEN]; 2226 const u_int8_t *mac; 2227 HAL_KEYVAL hk; 2228 2229 memset(&hk, 0, sizeof(hk)); 2230 /* 2231 * Software crypto uses a "clear key" so non-crypto 2232 * state kept in the key cache are maintained and 2233 * so that rx frames have an entry to match. 2234 */ 2235 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { 2236 KASSERT(cip->ic_cipher < N(ciphermap), 2237 ("invalid cipher type %u", cip->ic_cipher)); 2238 hk.kv_type = ciphermap[cip->ic_cipher]; 2239 hk.kv_len = k->wk_keylen; 2240 memcpy(hk.kv_val, k->wk_key, k->wk_keylen); 2241 } else 2242 hk.kv_type = HAL_CIPHER_CLR; 2243 2244 if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) { 2245 /* 2246 * Group keys on hardware that supports multicast frame 2247 * key search use a mac that is the sender's address with 2248 * the high bit set instead of the app-specified address. 2249 */ 2250 IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr); 2251 gmac[0] |= 0x80; 2252 mac = gmac; 2253 } else 2254 mac = k->wk_macaddr; 2255 2256 if (hk.kv_type == HAL_CIPHER_TKIP && 2257 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 2258 return ath_keyset_tkip(sc, k, &hk, mac); 2259 } else { 2260 KEYPRINTF(sc, k->wk_keyix, &hk, mac); 2261 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); 2262 } 2263#undef N 2264} 2265 2266/* 2267 * Allocate tx/rx key slots for TKIP. We allocate two slots for 2268 * each key, one for decrypt/encrypt and the other for the MIC. 2269 */ 2270static u_int16_t 2271key_alloc_2pair(struct ath_softc *sc, 2272 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 2273{ 2274#define N(a) (sizeof(a)/sizeof(a[0])) 2275 u_int i, keyix; 2276 2277 KASSERT(sc->sc_splitmic, ("key cache !split")); 2278 /* XXX could optimize */ 2279 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 2280 u_int8_t b = sc->sc_keymap[i]; 2281 if (b != 0xff) { 2282 /* 2283 * One or more slots in this byte are free. 2284 */ 2285 keyix = i*NBBY; 2286 while (b & 1) { 2287 again: 2288 keyix++; 2289 b >>= 1; 2290 } 2291 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ 2292 if (isset(sc->sc_keymap, keyix+32) || 2293 isset(sc->sc_keymap, keyix+64) || 2294 isset(sc->sc_keymap, keyix+32+64)) { 2295 /* full pair unavailable */ 2296 /* XXX statistic */ 2297 if (keyix == (i+1)*NBBY) { 2298 /* no slots were appropriate, advance */ 2299 continue; 2300 } 2301 goto again; 2302 } 2303 setbit(sc->sc_keymap, keyix); 2304 setbit(sc->sc_keymap, keyix+64); 2305 setbit(sc->sc_keymap, keyix+32); 2306 setbit(sc->sc_keymap, keyix+32+64); 2307 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 2308 "%s: key pair %u,%u %u,%u\n", 2309 __func__, keyix, keyix+64, 2310 keyix+32, keyix+32+64); 2311 *txkeyix = keyix; 2312 *rxkeyix = keyix+32; 2313 return 1; 2314 } 2315 } 2316 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 2317 return 0; 2318#undef N 2319} 2320 2321/* 2322 * Allocate tx/rx key slots for TKIP. We allocate two slots for 2323 * each key, one for decrypt/encrypt and the other for the MIC. 2324 */ 2325static u_int16_t 2326key_alloc_pair(struct ath_softc *sc, 2327 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 2328{ 2329#define N(a) (sizeof(a)/sizeof(a[0])) 2330 u_int i, keyix; 2331 2332 KASSERT(!sc->sc_splitmic, ("key cache split")); 2333 /* XXX could optimize */ 2334 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 2335 u_int8_t b = sc->sc_keymap[i]; 2336 if (b != 0xff) { 2337 /* 2338 * One or more slots in this byte are free. 2339 */ 2340 keyix = i*NBBY; 2341 while (b & 1) { 2342 again: 2343 keyix++; 2344 b >>= 1; 2345 } 2346 if (isset(sc->sc_keymap, keyix+64)) { 2347 /* full pair unavailable */ 2348 /* XXX statistic */ 2349 if (keyix == (i+1)*NBBY) { 2350 /* no slots were appropriate, advance */ 2351 continue; 2352 } 2353 goto again; 2354 } 2355 setbit(sc->sc_keymap, keyix); 2356 setbit(sc->sc_keymap, keyix+64); 2357 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 2358 "%s: key pair %u,%u\n", 2359 __func__, keyix, keyix+64); 2360 *txkeyix = *rxkeyix = keyix; 2361 return 1; 2362 } 2363 } 2364 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 2365 return 0; 2366#undef N 2367} 2368 2369/* 2370 * Allocate a single key cache slot. 2371 */ 2372static int 2373key_alloc_single(struct ath_softc *sc, 2374 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 2375{ 2376#define N(a) (sizeof(a)/sizeof(a[0])) 2377 u_int i, keyix; 2378 2379 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ 2380 for (i = 0; i < N(sc->sc_keymap); i++) { 2381 u_int8_t b = sc->sc_keymap[i]; 2382 if (b != 0xff) { 2383 /* 2384 * One or more slots are free. 2385 */ 2386 keyix = i*NBBY; 2387 while (b & 1) 2388 keyix++, b >>= 1; 2389 setbit(sc->sc_keymap, keyix); 2390 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", 2391 __func__, keyix); 2392 *txkeyix = *rxkeyix = keyix; 2393 return 1; 2394 } 2395 } 2396 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); 2397 return 0; 2398#undef N 2399} 2400 2401/* 2402 * Allocate one or more key cache slots for a uniacst key. The 2403 * key itself is needed only to identify the cipher. For hardware 2404 * TKIP with split cipher+MIC keys we allocate two key cache slot 2405 * pairs so that we can setup separate TX and RX MIC keys. Note 2406 * that the MIC key for a TKIP key at slot i is assumed by the 2407 * hardware to be at slot i+64. This limits TKIP keys to the first 2408 * 64 entries. 2409 */ 2410static int 2411ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, 2412 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) 2413{ 2414 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 2415 2416 /* 2417 * Group key allocation must be handled specially for 2418 * parts that do not support multicast key cache search 2419 * functionality. For those parts the key id must match 2420 * the h/w key index so lookups find the right key. On 2421 * parts w/ the key search facility we install the sender's 2422 * mac address (with the high bit set) and let the hardware 2423 * find the key w/o using the key id. This is preferred as 2424 * it permits us to support multiple users for adhoc and/or 2425 * multi-station operation. 2426 */ 2427 if (k->wk_keyix != IEEE80211_KEYIX_NONE || /* global key */ 2428 ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey)) { 2429 if (!(&vap->iv_nw_keys[0] <= k && 2430 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { 2431 /* should not happen */ 2432 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 2433 "%s: bogus group key\n", __func__); 2434 return 0; 2435 } 2436 /* 2437 * XXX we pre-allocate the global keys so 2438 * have no way to check if they've already been allocated. 2439 */ 2440 *keyix = *rxkeyix = k - vap->iv_nw_keys; 2441 return 1; 2442 } 2443 2444 /* 2445 * We allocate two pair for TKIP when using the h/w to do 2446 * the MIC. For everything else, including software crypto, 2447 * we allocate a single entry. Note that s/w crypto requires 2448 * a pass-through slot on the 5211 and 5212. The 5210 does 2449 * not support pass-through cache entries and we map all 2450 * those requests to slot 0. 2451 */ 2452 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 2453 return key_alloc_single(sc, keyix, rxkeyix); 2454 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && 2455 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 2456 if (sc->sc_splitmic) 2457 return key_alloc_2pair(sc, keyix, rxkeyix); 2458 else 2459 return key_alloc_pair(sc, keyix, rxkeyix); 2460 } else { 2461 return key_alloc_single(sc, keyix, rxkeyix); 2462 } 2463} 2464 2465/* 2466 * Delete an entry in the key cache allocated by ath_key_alloc. 2467 */ 2468static int 2469ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 2470{ 2471 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 2472 struct ath_hal *ah = sc->sc_ah; 2473 const struct ieee80211_cipher *cip = k->wk_cipher; 2474 u_int keyix = k->wk_keyix; 2475 2476 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); 2477 2478 ath_hal_keyreset(ah, keyix); 2479 /* 2480 * Handle split tx/rx keying required for TKIP with h/w MIC. 2481 */ 2482 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 2483 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) 2484 ath_hal_keyreset(ah, keyix+32); /* RX key */ 2485 if (keyix >= IEEE80211_WEP_NKID) { 2486 /* 2487 * Don't touch keymap entries for global keys so 2488 * they are never considered for dynamic allocation. 2489 */ 2490 clrbit(sc->sc_keymap, keyix); 2491 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 2492 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 2493 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ 2494 if (sc->sc_splitmic) { 2495 /* +32 for RX key, +32+64 for RX key MIC */ 2496 clrbit(sc->sc_keymap, keyix+32); 2497 clrbit(sc->sc_keymap, keyix+32+64); 2498 } 2499 } 2500 } 2501 return 1; 2502} 2503 2504/* 2505 * Set the key cache contents for the specified key. Key cache 2506 * slot(s) must already have been allocated by ath_key_alloc. 2507 */ 2508static int 2509ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, 2510 const u_int8_t mac[IEEE80211_ADDR_LEN]) 2511{ 2512 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 2513 2514 return ath_keyset(sc, k, vap->iv_bss); 2515} 2516 2517/* 2518 * Block/unblock tx+rx processing while a key change is done. 2519 * We assume the caller serializes key management operations 2520 * so we only need to worry about synchronization with other 2521 * uses that originate in the driver. 2522 */ 2523static void 2524ath_key_update_begin(struct ieee80211vap *vap) 2525{ 2526 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2527 struct ath_softc *sc = ifp->if_softc; 2528 2529 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2530 taskqueue_block(sc->sc_tq); 2531 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2532} 2533 2534static void 2535ath_key_update_end(struct ieee80211vap *vap) 2536{ 2537 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2538 struct ath_softc *sc = ifp->if_softc; 2539 2540 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2541 IF_UNLOCK(&ifp->if_snd); 2542 taskqueue_unblock(sc->sc_tq); 2543} 2544 2545/* 2546 * Calculate the receive filter according to the 2547 * operating mode and state: 2548 * 2549 * o always accept unicast, broadcast, and multicast traffic 2550 * o accept PHY error frames when hardware doesn't have MIB support 2551 * to count and we need them for ANI (sta mode only at the moment) 2552 * and we are not scanning (ANI is disabled) 2553 * NB: only with recent hal's; older hal's add rx filter bits out 2554 * of sight and we need to blindly preserve them 2555 * o probe request frames are accepted only when operating in 2556 * hostap, adhoc, or monitor modes 2557 * o enable promiscuous mode 2558 * - when in monitor mode 2559 * - if interface marked PROMISC (assumes bridge setting is filtered) 2560 * o accept beacons: 2561 * - when operating in station mode for collecting rssi data when 2562 * the station is otherwise quiet, or 2563 * - when operating in adhoc mode so the 802.11 layer creates 2564 * node table entries for peers, 2565 * - when scanning 2566 * - when doing s/w beacon miss (e.g. for ap+sta) 2567 * - when operating in ap mode in 11g to detect overlapping bss that 2568 * require protection 2569 * o accept control frames: 2570 * - when in monitor mode 2571 * XXX BAR frames for 11n 2572 * XXX HT protection for 11n 2573 */ 2574static u_int32_t 2575ath_calcrxfilter(struct ath_softc *sc) 2576{ 2577 struct ifnet *ifp = sc->sc_ifp; 2578 struct ieee80211com *ic = ifp->if_l2com; 2579 u_int32_t rfilt; 2580 2581#if HAL_ABI_VERSION < 0x08011600 2582 rfilt = (ath_hal_getrxfilter(sc->sc_ah) & 2583 (HAL_RX_FILTER_PHYRADAR | HAL_RX_FILTER_PHYERR)) 2584 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2585#else 2586 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2587 if (ic->ic_opmode == IEEE80211_M_STA && 2588 !sc->sc_needmib && !sc->sc_scanning) 2589 rfilt |= HAL_RX_FILTER_PHYERR; 2590#endif 2591 if (ic->ic_opmode != IEEE80211_M_STA) 2592 rfilt |= HAL_RX_FILTER_PROBEREQ; 2593 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 2594 rfilt |= HAL_RX_FILTER_PROM; 2595 if (ic->ic_opmode == IEEE80211_M_STA || 2596 ic->ic_opmode == IEEE80211_M_IBSS || 2597 sc->sc_swbmiss || sc->sc_scanning) 2598 rfilt |= HAL_RX_FILTER_BEACON; 2599 /* 2600 * NB: We don't recalculate the rx filter when 2601 * ic_protmode changes; otherwise we could do 2602 * this only when ic_protmode != NONE. 2603 */ 2604 if (ic->ic_opmode == IEEE80211_M_HOSTAP && 2605 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 2606 rfilt |= HAL_RX_FILTER_BEACON; 2607 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2608 rfilt |= HAL_RX_FILTER_CONTROL; 2609 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 2610 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 2611 return rfilt; 2612} 2613 2614static void 2615ath_update_promisc(struct ifnet *ifp) 2616{ 2617 struct ath_softc *sc = ifp->if_softc; 2618 u_int32_t rfilt; 2619 2620 /* configure rx filter */ 2621 rfilt = ath_calcrxfilter(sc); 2622 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2623 2624 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2625} 2626 2627static void 2628ath_update_mcast(struct ifnet *ifp) 2629{ 2630 struct ath_softc *sc = ifp->if_softc; 2631 u_int32_t mfilt[2]; 2632 2633 /* calculate and install multicast filter */ 2634 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2635 struct ifmultiaddr *ifma; 2636 /* 2637 * Merge multicast addresses to form the hardware filter. 2638 */ 2639 mfilt[0] = mfilt[1] = 0; 2640 IF_ADDR_LOCK(ifp); /* XXX need some fiddling to remove? */ 2641 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2642 caddr_t dl; 2643 u_int32_t val; 2644 u_int8_t pos; 2645 2646 /* calculate XOR of eight 6bit values */ 2647 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2648 val = LE_READ_4(dl + 0); 2649 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2650 val = LE_READ_4(dl + 3); 2651 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2652 pos &= 0x3f; 2653 mfilt[pos / 32] |= (1 << (pos % 32)); 2654 } 2655 IF_ADDR_UNLOCK(ifp); 2656 } else 2657 mfilt[0] = mfilt[1] = ~0; 2658 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2659 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2660 __func__, mfilt[0], mfilt[1]); 2661} 2662 2663static void 2664ath_mode_init(struct ath_softc *sc) 2665{ 2666 struct ifnet *ifp = sc->sc_ifp; 2667 struct ieee80211com *ic = ifp->if_l2com; 2668 struct ath_hal *ah = sc->sc_ah; 2669 u_int32_t rfilt; 2670 2671 /* configure rx filter */ 2672 rfilt = ath_calcrxfilter(sc); 2673 ath_hal_setrxfilter(ah, rfilt); 2674 2675 /* configure operational mode */ 2676 ath_hal_setopmode(ah); 2677 2678 /* 2679 * Handle any link-level address change. Note that we only 2680 * need to force ic_myaddr; any other addresses are handled 2681 * as a byproduct of the ifnet code marking the interface 2682 * down then up. 2683 * 2684 * XXX should get from lladdr instead of arpcom but that's more work 2685 */ 2686 IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); 2687 ath_hal_setmac(ah, ic->ic_myaddr); 2688 2689 /* calculate and install multicast filter */ 2690 ath_update_mcast(ifp); 2691} 2692 2693/* 2694 * Set the slot time based on the current setting. 2695 */ 2696static void 2697ath_setslottime(struct ath_softc *sc) 2698{ 2699 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2700 struct ath_hal *ah = sc->sc_ah; 2701 u_int usec; 2702 2703 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2704 usec = 13; 2705 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2706 usec = 21; 2707 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2708 /* honor short/long slot time only in 11g */ 2709 /* XXX shouldn't honor on pure g or turbo g channel */ 2710 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2711 usec = HAL_SLOT_TIME_9; 2712 else 2713 usec = HAL_SLOT_TIME_20; 2714 } else 2715 usec = HAL_SLOT_TIME_9; 2716 2717 DPRINTF(sc, ATH_DEBUG_RESET, 2718 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2719 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2720 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2721 2722 ath_hal_setslottime(ah, usec); 2723 sc->sc_updateslot = OK; 2724} 2725 2726/* 2727 * Callback from the 802.11 layer to update the 2728 * slot time based on the current setting. 2729 */ 2730static void 2731ath_updateslot(struct ifnet *ifp) 2732{ 2733 struct ath_softc *sc = ifp->if_softc; 2734 struct ieee80211com *ic = ifp->if_l2com; 2735 2736 /* 2737 * When not coordinating the BSS, change the hardware 2738 * immediately. For other operation we defer the change 2739 * until beacon updates have propagated to the stations. 2740 */ 2741 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 2742 sc->sc_updateslot = UPDATE; 2743 else 2744 ath_setslottime(sc); 2745} 2746 2747/* 2748 * Setup a h/w transmit queue for beacons. 2749 */ 2750static int 2751ath_beaconq_setup(struct ath_hal *ah) 2752{ 2753 HAL_TXQ_INFO qi; 2754 2755 memset(&qi, 0, sizeof(qi)); 2756 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 2757 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 2758 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 2759 /* NB: for dynamic turbo, don't enable any other interrupts */ 2760 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 2761 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 2762} 2763 2764/* 2765 * Setup the transmit queue parameters for the beacon queue. 2766 */ 2767static int 2768ath_beaconq_config(struct ath_softc *sc) 2769{ 2770#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 2771 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2772 struct ath_hal *ah = sc->sc_ah; 2773 HAL_TXQ_INFO qi; 2774 2775 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 2776 if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 2777 /* 2778 * Always burst out beacon and CAB traffic. 2779 */ 2780 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 2781 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 2782 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 2783 } else { 2784 struct wmeParams *wmep = 2785 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 2786 /* 2787 * Adhoc mode; important thing is to use 2x cwmin. 2788 */ 2789 qi.tqi_aifs = wmep->wmep_aifsn; 2790 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 2791 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 2792 } 2793 2794 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 2795 device_printf(sc->sc_dev, "unable to update parameters for " 2796 "beacon hardware queue!\n"); 2797 return 0; 2798 } else { 2799 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 2800 return 1; 2801 } 2802#undef ATH_EXPONENT_TO_VALUE 2803} 2804 2805/* 2806 * Allocate and setup an initial beacon frame. 2807 */ 2808static int 2809ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 2810{ 2811 struct ieee80211vap *vap = ni->ni_vap; 2812 struct ath_vap *avp = ATH_VAP(vap); 2813 struct ath_buf *bf; 2814 struct mbuf *m; 2815 int error; 2816 2817 bf = avp->av_bcbuf; 2818 if (bf->bf_m != NULL) { 2819 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2820 m_freem(bf->bf_m); 2821 bf->bf_m = NULL; 2822 } 2823 if (bf->bf_node != NULL) { 2824 ieee80211_free_node(bf->bf_node); 2825 bf->bf_node = NULL; 2826 } 2827 2828 /* 2829 * NB: the beacon data buffer must be 32-bit aligned; 2830 * we assume the mbuf routines will return us something 2831 * with this alignment (perhaps should assert). 2832 */ 2833 m = ieee80211_beacon_alloc(ni, &avp->av_boff); 2834 if (m == NULL) { 2835 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); 2836 sc->sc_stats.ast_be_nombuf++; 2837 return ENOMEM; 2838 } 2839 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2840 bf->bf_segs, &bf->bf_nseg, 2841 BUS_DMA_NOWAIT); 2842 if (error != 0) { 2843 device_printf(sc->sc_dev, 2844 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", 2845 __func__, error); 2846 m_freem(m); 2847 return error; 2848 } 2849 2850 /* 2851 * Calculate a TSF adjustment factor required for staggered 2852 * beacons. Note that we assume the format of the beacon 2853 * frame leaves the tstamp field immediately following the 2854 * header. 2855 */ 2856 if (sc->sc_stagbeacons && avp->av_bslot > 0) { 2857 uint64_t tsfadjust; 2858 struct ieee80211_frame *wh; 2859 2860 /* 2861 * The beacon interval is in TU's; the TSF is in usecs. 2862 * We figure out how many TU's to add to align the timestamp 2863 * then convert to TSF units and handle byte swapping before 2864 * inserting it in the frame. The hardware will then add this 2865 * each time a beacon frame is sent. Note that we align vap's 2866 * 1..N and leave vap 0 untouched. This means vap 0 has a 2867 * timestamp in one beacon interval while the others get a 2868 * timstamp aligned to the next interval. 2869 */ 2870 tsfadjust = ni->ni_intval * 2871 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; 2872 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ 2873 2874 DPRINTF(sc, ATH_DEBUG_BEACON, 2875 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", 2876 __func__, sc->sc_stagbeacons ? "stagger" : "burst", 2877 avp->av_bslot, ni->ni_intval, 2878 (long long unsigned) le64toh(tsfadjust)); 2879 2880 wh = mtod(m, struct ieee80211_frame *); 2881 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); 2882 } 2883 bf->bf_m = m; 2884 bf->bf_node = ieee80211_ref_node(ni); 2885 2886 return 0; 2887} 2888 2889/* 2890 * Setup the beacon frame for transmit. 2891 */ 2892static void 2893ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2894{ 2895#define USE_SHPREAMBLE(_ic) \ 2896 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2897 == IEEE80211_F_SHPREAMBLE) 2898 struct ieee80211_node *ni = bf->bf_node; 2899 struct ieee80211com *ic = ni->ni_ic; 2900 struct mbuf *m = bf->bf_m; 2901 struct ath_hal *ah = sc->sc_ah; 2902 struct ath_desc *ds; 2903 int flags, antenna; 2904 const HAL_RATE_TABLE *rt; 2905 u_int8_t rix, rate; 2906 2907 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2908 __func__, m, m->m_len); 2909 2910 /* setup descriptors */ 2911 ds = bf->bf_desc; 2912 2913 flags = HAL_TXDESC_NOACK; 2914 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2915 ds->ds_link = bf->bf_daddr; /* self-linked */ 2916 flags |= HAL_TXDESC_VEOL; 2917 /* 2918 * Let hardware handle antenna switching. 2919 */ 2920 antenna = sc->sc_txantenna; 2921 } else { 2922 ds->ds_link = 0; 2923 /* 2924 * Switch antenna every 4 beacons. 2925 * XXX assumes two antenna 2926 */ 2927 if (sc->sc_txantenna != 0) 2928 antenna = sc->sc_txantenna; 2929 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) 2930 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); 2931 else 2932 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2933 } 2934 2935 KASSERT(bf->bf_nseg == 1, 2936 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 2937 ds->ds_data = bf->bf_segs[0].ds_addr; 2938 /* 2939 * Calculate rate code. 2940 * XXX everything at min xmit rate 2941 */ 2942 rix = 0; 2943 rt = sc->sc_currates; 2944 rate = rt->info[rix].rateCode; 2945 if (USE_SHPREAMBLE(ic)) 2946 rate |= rt->info[rix].shortPreamble; 2947 ath_hal_setuptxdesc(ah, ds 2948 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 2949 , sizeof(struct ieee80211_frame)/* header length */ 2950 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2951 , ni->ni_txpower /* txpower XXX */ 2952 , rate, 1 /* series 0 rate/tries */ 2953 , HAL_TXKEYIX_INVALID /* no encryption */ 2954 , antenna /* antenna mode */ 2955 , flags /* no ack, veol for beacons */ 2956 , 0 /* rts/cts rate */ 2957 , 0 /* rts/cts duration */ 2958 ); 2959 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 2960 ath_hal_filltxdesc(ah, ds 2961 , roundup(m->m_len, 4) /* buffer length */ 2962 , AH_TRUE /* first segment */ 2963 , AH_TRUE /* last segment */ 2964 , ds /* first descriptor */ 2965 ); 2966#if 0 2967 ath_desc_swap(ds); 2968#endif 2969#undef USE_SHPREAMBLE 2970} 2971 2972static void 2973ath_beacon_update(struct ieee80211vap *vap, int item) 2974{ 2975 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; 2976 2977 setbit(bo->bo_flags, item); 2978} 2979 2980/* 2981 * Append the contents of src to dst; both queues 2982 * are assumed to be locked. 2983 */ 2984static void 2985ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2986{ 2987 STAILQ_CONCAT(&dst->axq_q, &src->axq_q); 2988 dst->axq_link = src->axq_link; 2989 src->axq_link = NULL; 2990 dst->axq_depth += src->axq_depth; 2991 src->axq_depth = 0; 2992} 2993 2994/* 2995 * Transmit a beacon frame at SWBA. Dynamic updates to the 2996 * frame contents are done as needed and the slot time is 2997 * also adjusted based on current state. 2998 */ 2999static void 3000ath_beacon_proc(void *arg, int pending) 3001{ 3002 struct ath_softc *sc = arg; 3003 struct ath_hal *ah = sc->sc_ah; 3004 struct ieee80211vap *vap; 3005 struct ath_buf *bf; 3006 int slot, otherant; 3007 uint32_t bfaddr; 3008 3009 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 3010 __func__, pending); 3011 /* 3012 * Check if the previous beacon has gone out. If 3013 * not don't try to post another, skip this period 3014 * and wait for the next. Missed beacons indicate 3015 * a problem and should not occur. If we miss too 3016 * many consecutive beacons reset the device. 3017 */ 3018 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 3019 sc->sc_bmisscount++; 3020 DPRINTF(sc, ATH_DEBUG_BEACON, 3021 "%s: missed %u consecutive beacons\n", 3022 __func__, sc->sc_bmisscount); 3023 if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ 3024 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 3025 return; 3026 } 3027 if (sc->sc_bmisscount != 0) { 3028 DPRINTF(sc, ATH_DEBUG_BEACON, 3029 "%s: resume beacon xmit after %u misses\n", 3030 __func__, sc->sc_bmisscount); 3031 sc->sc_bmisscount = 0; 3032 } 3033 3034 if (sc->sc_stagbeacons) { /* staggered beacons */ 3035 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3036 uint32_t tsftu; 3037 3038 tsftu = ath_hal_gettsf32(ah) >> 10; 3039 /* XXX lintval */ 3040 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; 3041 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; 3042 bfaddr = 0; 3043 if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) { 3044 bf = ath_beacon_generate(sc, vap); 3045 if (bf != NULL) 3046 bfaddr = bf->bf_daddr; 3047 } 3048 } else { /* burst'd beacons */ 3049 uint32_t *bflink = &bfaddr; 3050 3051 for (slot = 0; slot < ATH_BCBUF; slot++) { 3052 vap = sc->sc_bslot[slot]; 3053 if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) { 3054 bf = ath_beacon_generate(sc, vap); 3055 if (bf != NULL) { 3056 *bflink = bf->bf_daddr; 3057 bflink = &bf->bf_desc->ds_link; 3058 } 3059 } 3060 } 3061 *bflink = 0; /* terminate list */ 3062 } 3063 3064 /* 3065 * Handle slot time change when a non-ERP station joins/leaves 3066 * an 11g network. The 802.11 layer notifies us via callback, 3067 * we mark updateslot, then wait one beacon before effecting 3068 * the change. This gives associated stations at least one 3069 * beacon interval to note the state change. 3070 */ 3071 /* XXX locking */ 3072 if (sc->sc_updateslot == UPDATE) { 3073 sc->sc_updateslot = COMMIT; /* commit next beacon */ 3074 sc->sc_slotupdate = slot; 3075 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 3076 ath_setslottime(sc); /* commit change to h/w */ 3077 3078 /* 3079 * Check recent per-antenna transmit statistics and flip 3080 * the default antenna if noticeably more frames went out 3081 * on the non-default antenna. 3082 * XXX assumes 2 anntenae 3083 */ 3084 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { 3085 otherant = sc->sc_defant & 1 ? 2 : 1; 3086 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 3087 ath_setdefantenna(sc, otherant); 3088 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 3089 } 3090 3091 if (bfaddr != 0) { 3092 /* 3093 * Stop any current dma and put the new frame on the queue. 3094 * This should never fail since we check above that no frames 3095 * are still pending on the queue. 3096 */ 3097 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 3098 DPRINTF(sc, ATH_DEBUG_ANY, 3099 "%s: beacon queue %u did not stop?\n", 3100 __func__, sc->sc_bhalq); 3101 } 3102 /* NB: cabq traffic should already be queued and primed */ 3103 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); 3104 ath_hal_txstart(ah, sc->sc_bhalq); 3105 3106 sc->sc_stats.ast_be_xmit++; 3107 } 3108} 3109 3110static struct ath_buf * 3111ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) 3112{ 3113 struct ath_vap *avp = ATH_VAP(vap); 3114 struct ath_txq *cabq = sc->sc_cabq; 3115 struct ath_buf *bf; 3116 struct mbuf *m; 3117 int nmcastq, error; 3118 3119 KASSERT(vap->iv_state == IEEE80211_S_RUN, 3120 ("not running, state %d", vap->iv_state)); 3121 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3122 3123 /* 3124 * Update dynamic beacon contents. If this returns 3125 * non-zero then we need to remap the memory because 3126 * the beacon frame changed size (probably because 3127 * of the TIM bitmap). 3128 */ 3129 bf = avp->av_bcbuf; 3130 m = bf->bf_m; 3131 nmcastq = avp->av_mcastq.axq_depth; 3132 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { 3133 /* XXX too conservative? */ 3134 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3135 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3136 bf->bf_segs, &bf->bf_nseg, 3137 BUS_DMA_NOWAIT); 3138 if (error != 0) { 3139 if_printf(vap->iv_ifp, 3140 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3141 __func__, error); 3142 return NULL; 3143 } 3144 } 3145 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { 3146 DPRINTF(sc, ATH_DEBUG_BEACON, 3147 "%s: cabq did not drain, mcastq %u cabq %u\n", 3148 __func__, nmcastq, cabq->axq_depth); 3149 sc->sc_stats.ast_cabq_busy++; 3150 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { 3151 /* 3152 * CABQ traffic from a previous vap is still pending. 3153 * We must drain the q before this beacon frame goes 3154 * out as otherwise this vap's stations will get cab 3155 * frames from a different vap. 3156 * XXX could be slow causing us to miss DBA 3157 */ 3158 ath_tx_draintxq(sc, cabq); 3159 } 3160 } 3161 ath_beacon_setup(sc, bf); 3162 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3163 3164 /* 3165 * Enable the CAB queue before the beacon queue to 3166 * insure cab frames are triggered by this beacon. 3167 */ 3168 if (avp->av_boff.bo_tim[4] & 1) { 3169 struct ath_hal *ah = sc->sc_ah; 3170 3171 /* NB: only at DTIM */ 3172 ATH_TXQ_LOCK(cabq); 3173 ATH_TXQ_LOCK(&avp->av_mcastq); 3174 if (nmcastq) { 3175 struct ath_buf *bfm; 3176 3177 /* 3178 * Move frames from the s/w mcast q to the h/w cab q. 3179 * XXX MORE_DATA bit 3180 */ 3181 bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q); 3182 if (cabq->axq_link != NULL) { 3183 *cabq->axq_link = bfm->bf_daddr; 3184 } else 3185 ath_hal_puttxbuf(ah, cabq->axq_qnum, 3186 bfm->bf_daddr); 3187 ath_txqmove(cabq, &avp->av_mcastq); 3188 3189 sc->sc_stats.ast_cabq_xmit += nmcastq; 3190 } 3191 /* NB: gated by beacon so safe to start here */ 3192 ath_hal_txstart(ah, cabq->axq_qnum); 3193 ATH_TXQ_UNLOCK(cabq); 3194 ATH_TXQ_UNLOCK(&avp->av_mcastq); 3195 } 3196 return bf; 3197} 3198 3199static void 3200ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) 3201{ 3202 struct ath_vap *avp = ATH_VAP(vap); 3203 struct ath_hal *ah = sc->sc_ah; 3204 struct ath_buf *bf; 3205 struct mbuf *m; 3206 int error; 3207 3208 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3209 3210 /* 3211 * Update dynamic beacon contents. If this returns 3212 * non-zero then we need to remap the memory because 3213 * the beacon frame changed size (probably because 3214 * of the TIM bitmap). 3215 */ 3216 bf = avp->av_bcbuf; 3217 m = bf->bf_m; 3218 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { 3219 /* XXX too conservative? */ 3220 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3221 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3222 bf->bf_segs, &bf->bf_nseg, 3223 BUS_DMA_NOWAIT); 3224 if (error != 0) { 3225 if_printf(vap->iv_ifp, 3226 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3227 __func__, error); 3228 return; 3229 } 3230 } 3231 ath_beacon_setup(sc, bf); 3232 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3233 3234 /* NB: caller is known to have already stopped tx dma */ 3235 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 3236 ath_hal_txstart(ah, sc->sc_bhalq); 3237} 3238 3239/* 3240 * Reset the hardware after detecting beacons have stopped. 3241 */ 3242static void 3243ath_bstuck_proc(void *arg, int pending) 3244{ 3245 struct ath_softc *sc = arg; 3246 struct ifnet *ifp = sc->sc_ifp; 3247 3248 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3249 sc->sc_bmisscount); 3250 ath_reset(ifp); 3251} 3252 3253/* 3254 * Reclaim beacon resources and return buffer to the pool. 3255 */ 3256static void 3257ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) 3258{ 3259 3260 if (bf->bf_m != NULL) { 3261 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3262 m_freem(bf->bf_m); 3263 bf->bf_m = NULL; 3264 } 3265 if (bf->bf_node != NULL) { 3266 ieee80211_free_node(bf->bf_node); 3267 bf->bf_node = NULL; 3268 } 3269 STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); 3270} 3271 3272/* 3273 * Reclaim beacon resources. 3274 */ 3275static void 3276ath_beacon_free(struct ath_softc *sc) 3277{ 3278 struct ath_buf *bf; 3279 3280 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 3281 if (bf->bf_m != NULL) { 3282 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3283 m_freem(bf->bf_m); 3284 bf->bf_m = NULL; 3285 } 3286 if (bf->bf_node != NULL) { 3287 ieee80211_free_node(bf->bf_node); 3288 bf->bf_node = NULL; 3289 } 3290 } 3291} 3292 3293/* 3294 * Configure the beacon and sleep timers. 3295 * 3296 * When operating as an AP this resets the TSF and sets 3297 * up the hardware to notify us when we need to issue beacons. 3298 * 3299 * When operating in station mode this sets up the beacon 3300 * timers according to the timestamp of the last received 3301 * beacon and the current TSF, configures PCF and DTIM 3302 * handling, programs the sleep registers so the hardware 3303 * will wakeup in time to receive beacons, and configures 3304 * the beacon miss handling so we'll receive a BMISS 3305 * interrupt when we stop seeing beacons from the AP 3306 * we've associated with. 3307 */ 3308static void 3309ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) 3310{ 3311#define TSF_TO_TU(_h,_l) \ 3312 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 3313#define FUDGE 2 3314 struct ath_hal *ah = sc->sc_ah; 3315 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3316 struct ieee80211_node *ni; 3317 u_int32_t nexttbtt, intval, tsftu; 3318 u_int64_t tsf; 3319 3320 if (vap == NULL) 3321 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 3322 ni = vap->iv_bss; 3323 3324 /* extract tstamp from last beacon and convert to TU */ 3325 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 3326 LE_READ_4(ni->ni_tstamp.data)); 3327 if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 3328 /* 3329 * For multi-bss ap support beacons are either staggered 3330 * evenly over N slots or burst together. For the former 3331 * arrange for the SWBA to be delivered for each slot. 3332 * Slots that are not occupied will generate nothing. 3333 */ 3334 /* NB: the beacon interval is kept internally in TU's */ 3335 intval = ni->ni_intval & HAL_BEACON_PERIOD; 3336 if (sc->sc_stagbeacons) 3337 intval /= ATH_BCBUF; 3338 } else { 3339 /* NB: the beacon interval is kept internally in TU's */ 3340 intval = ni->ni_intval & HAL_BEACON_PERIOD; 3341 } 3342 if (nexttbtt == 0) /* e.g. for ap mode */ 3343 nexttbtt = intval; 3344 else if (intval) /* NB: can be 0 for monitor mode */ 3345 nexttbtt = roundup(nexttbtt, intval); 3346 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 3347 __func__, nexttbtt, intval, ni->ni_intval); 3348 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { 3349 HAL_BEACON_STATE bs; 3350 int dtimperiod, dtimcount; 3351 int cfpperiod, cfpcount; 3352 3353 /* 3354 * Setup dtim and cfp parameters according to 3355 * last beacon we received (which may be none). 3356 */ 3357 dtimperiod = ni->ni_dtim_period; 3358 if (dtimperiod <= 0) /* NB: 0 if not known */ 3359 dtimperiod = 1; 3360 dtimcount = ni->ni_dtim_count; 3361 if (dtimcount >= dtimperiod) /* NB: sanity check */ 3362 dtimcount = 0; /* XXX? */ 3363 cfpperiod = 1; /* NB: no PCF support yet */ 3364 cfpcount = 0; 3365 /* 3366 * Pull nexttbtt forward to reflect the current 3367 * TSF and calculate dtim+cfp state for the result. 3368 */ 3369 tsf = ath_hal_gettsf64(ah); 3370 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 3371 do { 3372 nexttbtt += intval; 3373 if (--dtimcount < 0) { 3374 dtimcount = dtimperiod - 1; 3375 if (--cfpcount < 0) 3376 cfpcount = cfpperiod - 1; 3377 } 3378 } while (nexttbtt < tsftu); 3379 memset(&bs, 0, sizeof(bs)); 3380 bs.bs_intval = intval; 3381 bs.bs_nexttbtt = nexttbtt; 3382 bs.bs_dtimperiod = dtimperiod*intval; 3383 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 3384 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 3385 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 3386 bs.bs_cfpmaxduration = 0; 3387#if 0 3388 /* 3389 * The 802.11 layer records the offset to the DTIM 3390 * bitmap while receiving beacons; use it here to 3391 * enable h/w detection of our AID being marked in 3392 * the bitmap vector (to indicate frames for us are 3393 * pending at the AP). 3394 * XXX do DTIM handling in s/w to WAR old h/w bugs 3395 * XXX enable based on h/w rev for newer chips 3396 */ 3397 bs.bs_timoffset = ni->ni_timoff; 3398#endif 3399 /* 3400 * Calculate the number of consecutive beacons to miss 3401 * before taking a BMISS interrupt. 3402 * Note that we clamp the result to at most 10 beacons. 3403 */ 3404 bs.bs_bmissthreshold = vap->iv_bmissthreshold; 3405 if (bs.bs_bmissthreshold > 10) 3406 bs.bs_bmissthreshold = 10; 3407 else if (bs.bs_bmissthreshold <= 0) 3408 bs.bs_bmissthreshold = 1; 3409 3410 /* 3411 * Calculate sleep duration. The configuration is 3412 * given in ms. We insure a multiple of the beacon 3413 * period is used. Also, if the sleep duration is 3414 * greater than the DTIM period then it makes senses 3415 * to make it a multiple of that. 3416 * 3417 * XXX fixed at 100ms 3418 */ 3419 bs.bs_sleepduration = 3420 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 3421 if (bs.bs_sleepduration > bs.bs_dtimperiod) 3422 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 3423 3424 DPRINTF(sc, ATH_DEBUG_BEACON, 3425 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 3426 , __func__ 3427 , tsf, tsftu 3428 , bs.bs_intval 3429 , bs.bs_nexttbtt 3430 , bs.bs_dtimperiod 3431 , bs.bs_nextdtim 3432 , bs.bs_bmissthreshold 3433 , bs.bs_sleepduration 3434 , bs.bs_cfpperiod 3435 , bs.bs_cfpmaxduration 3436 , bs.bs_cfpnext 3437 , bs.bs_timoffset 3438 ); 3439 ath_hal_intrset(ah, 0); 3440 ath_hal_beacontimers(ah, &bs); 3441 sc->sc_imask |= HAL_INT_BMISS; 3442 ath_hal_intrset(ah, sc->sc_imask); 3443 } else { 3444 ath_hal_intrset(ah, 0); 3445 if (nexttbtt == intval) 3446 intval |= HAL_BEACON_RESET_TSF; 3447 if (ic->ic_opmode == IEEE80211_M_IBSS) { 3448 /* 3449 * In IBSS mode enable the beacon timers but only 3450 * enable SWBA interrupts if we need to manually 3451 * prepare beacon frames. Otherwise we use a 3452 * self-linked tx descriptor and let the hardware 3453 * deal with things. 3454 */ 3455 intval |= HAL_BEACON_ENA; 3456 if (!sc->sc_hasveol) 3457 sc->sc_imask |= HAL_INT_SWBA; 3458 if ((intval & HAL_BEACON_RESET_TSF) == 0) { 3459 /* 3460 * Pull nexttbtt forward to reflect 3461 * the current TSF. 3462 */ 3463 tsf = ath_hal_gettsf64(ah); 3464 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 3465 do { 3466 nexttbtt += intval; 3467 } while (nexttbtt < tsftu); 3468 } 3469 ath_beaconq_config(sc); 3470 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 3471 /* 3472 * In AP mode we enable the beacon timers and 3473 * SWBA interrupts to prepare beacon frames. 3474 */ 3475 intval |= HAL_BEACON_ENA; 3476 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 3477 ath_beaconq_config(sc); 3478 } 3479 ath_hal_beaconinit(ah, nexttbtt, intval); 3480 sc->sc_bmisscount = 0; 3481 ath_hal_intrset(ah, sc->sc_imask); 3482 /* 3483 * When using a self-linked beacon descriptor in 3484 * ibss mode load it once here. 3485 */ 3486 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 3487 ath_beacon_start_adhoc(sc, vap); 3488 } 3489 sc->sc_syncbeacon = 0; 3490#undef FUDGE 3491#undef TSF_TO_TU 3492} 3493 3494static void 3495ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3496{ 3497 bus_addr_t *paddr = (bus_addr_t*) arg; 3498 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 3499 *paddr = segs->ds_addr; 3500} 3501 3502static int 3503ath_descdma_setup(struct ath_softc *sc, 3504 struct ath_descdma *dd, ath_bufhead *head, 3505 const char *name, int nbuf, int ndesc) 3506{ 3507#define DS2PHYS(_dd, _ds) \ 3508 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3509 struct ifnet *ifp = sc->sc_ifp; 3510 struct ath_desc *ds; 3511 struct ath_buf *bf; 3512 int i, bsize, error; 3513 3514 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 3515 __func__, name, nbuf, ndesc); 3516 3517 dd->dd_name = name; 3518 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 3519 3520 /* 3521 * Setup DMA descriptor area. 3522 */ 3523 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3524 PAGE_SIZE, 0, /* alignment, bounds */ 3525 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3526 BUS_SPACE_MAXADDR, /* highaddr */ 3527 NULL, NULL, /* filter, filterarg */ 3528 dd->dd_desc_len, /* maxsize */ 3529 1, /* nsegments */ 3530 dd->dd_desc_len, /* maxsegsize */ 3531 BUS_DMA_ALLOCNOW, /* flags */ 3532 NULL, /* lockfunc */ 3533 NULL, /* lockarg */ 3534 &dd->dd_dmat); 3535 if (error != 0) { 3536 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3537 return error; 3538 } 3539 3540 /* allocate descriptors */ 3541 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 3542 if (error != 0) { 3543 if_printf(ifp, "unable to create dmamap for %s descriptors, " 3544 "error %u\n", dd->dd_name, error); 3545 goto fail0; 3546 } 3547 3548 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 3549 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 3550 &dd->dd_dmamap); 3551 if (error != 0) { 3552 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3553 "error %u\n", nbuf * ndesc, dd->dd_name, error); 3554 goto fail1; 3555 } 3556 3557 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3558 dd->dd_desc, dd->dd_desc_len, 3559 ath_load_cb, &dd->dd_desc_paddr, 3560 BUS_DMA_NOWAIT); 3561 if (error != 0) { 3562 if_printf(ifp, "unable to map %s descriptors, error %u\n", 3563 dd->dd_name, error); 3564 goto fail2; 3565 } 3566 3567 ds = dd->dd_desc; 3568 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3569 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 3570 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 3571 3572 /* allocate rx buffers */ 3573 bsize = sizeof(struct ath_buf) * nbuf; 3574 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3575 if (bf == NULL) { 3576 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3577 dd->dd_name, bsize); 3578 goto fail3; 3579 } 3580 dd->dd_bufptr = bf; 3581 3582 STAILQ_INIT(head); 3583 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 3584 bf->bf_desc = ds; 3585 bf->bf_daddr = DS2PHYS(dd, ds); 3586 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3587 &bf->bf_dmamap); 3588 if (error != 0) { 3589 if_printf(ifp, "unable to create dmamap for %s " 3590 "buffer %u, error %u\n", dd->dd_name, i, error); 3591 ath_descdma_cleanup(sc, dd, head); 3592 return error; 3593 } 3594 STAILQ_INSERT_TAIL(head, bf, bf_list); 3595 } 3596 return 0; 3597fail3: 3598 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3599fail2: 3600 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3601fail1: 3602 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3603fail0: 3604 bus_dma_tag_destroy(dd->dd_dmat); 3605 memset(dd, 0, sizeof(*dd)); 3606 return error; 3607#undef DS2PHYS 3608} 3609 3610static void 3611ath_descdma_cleanup(struct ath_softc *sc, 3612 struct ath_descdma *dd, ath_bufhead *head) 3613{ 3614 struct ath_buf *bf; 3615 struct ieee80211_node *ni; 3616 3617 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3618 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3619 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3620 bus_dma_tag_destroy(dd->dd_dmat); 3621 3622 STAILQ_FOREACH(bf, head, bf_list) { 3623 if (bf->bf_m) { 3624 m_freem(bf->bf_m); 3625 bf->bf_m = NULL; 3626 } 3627 if (bf->bf_dmamap != NULL) { 3628 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3629 bf->bf_dmamap = NULL; 3630 } 3631 ni = bf->bf_node; 3632 bf->bf_node = NULL; 3633 if (ni != NULL) { 3634 /* 3635 * Reclaim node reference. 3636 */ 3637 ieee80211_free_node(ni); 3638 } 3639 } 3640 3641 STAILQ_INIT(head); 3642 free(dd->dd_bufptr, M_ATHDEV); 3643 memset(dd, 0, sizeof(*dd)); 3644} 3645 3646static int 3647ath_desc_alloc(struct ath_softc *sc) 3648{ 3649 int error; 3650 3651 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 3652 "rx", ath_rxbuf, 1); 3653 if (error != 0) 3654 return error; 3655 3656 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3657 "tx", ath_txbuf, ATH_TXDESC); 3658 if (error != 0) { 3659 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3660 return error; 3661 } 3662 3663 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3664 "beacon", ATH_BCBUF, 1); 3665 if (error != 0) { 3666 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3667 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3668 return error; 3669 } 3670 return 0; 3671} 3672 3673static void 3674ath_desc_free(struct ath_softc *sc) 3675{ 3676 3677 if (sc->sc_bdma.dd_desc_len != 0) 3678 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3679 if (sc->sc_txdma.dd_desc_len != 0) 3680 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3681 if (sc->sc_rxdma.dd_desc_len != 0) 3682 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3683} 3684 3685static struct ieee80211_node * 3686ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3687{ 3688 struct ieee80211com *ic = vap->iv_ic; 3689 struct ath_softc *sc = ic->ic_ifp->if_softc; 3690 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3691 struct ath_node *an; 3692 3693 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3694 if (an == NULL) { 3695 /* XXX stat+msg */ 3696 return NULL; 3697 } 3698 ath_rate_node_init(sc, an); 3699 3700 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3701 return &an->an_node; 3702} 3703 3704static void 3705ath_node_free(struct ieee80211_node *ni) 3706{ 3707 struct ieee80211com *ic = ni->ni_ic; 3708 struct ath_softc *sc = ic->ic_ifp->if_softc; 3709 3710 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 3711 3712 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3713 sc->sc_node_free(ni); 3714} 3715 3716static void 3717ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3718{ 3719 struct ieee80211com *ic = ni->ni_ic; 3720 struct ath_softc *sc = ic->ic_ifp->if_softc; 3721 struct ath_hal *ah = sc->sc_ah; 3722 HAL_CHANNEL hchan; 3723 3724 *rssi = ic->ic_node_getrssi(ni); 3725 if (ni->ni_chan != IEEE80211_CHAN_ANYC) { 3726 ath_mapchan(&hchan, ni->ni_chan); 3727 *noise = ath_hal_getchannoise(ah, &hchan); 3728 } else 3729 *noise = -95; /* nominally correct */ 3730} 3731 3732static int 3733ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 3734{ 3735 struct ath_hal *ah = sc->sc_ah; 3736 int error; 3737 struct mbuf *m; 3738 struct ath_desc *ds; 3739 3740 m = bf->bf_m; 3741 if (m == NULL) { 3742 /* 3743 * NB: by assigning a page to the rx dma buffer we 3744 * implicitly satisfy the Atheros requirement that 3745 * this buffer be cache-line-aligned and sized to be 3746 * multiple of the cache line size. Not doing this 3747 * causes weird stuff to happen (for the 5210 at least). 3748 */ 3749 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3750 if (m == NULL) { 3751 DPRINTF(sc, ATH_DEBUG_ANY, 3752 "%s: no mbuf/cluster\n", __func__); 3753 sc->sc_stats.ast_rx_nombuf++; 3754 return ENOMEM; 3755 } 3756 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 3757 3758 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 3759 bf->bf_dmamap, m, 3760 bf->bf_segs, &bf->bf_nseg, 3761 BUS_DMA_NOWAIT); 3762 if (error != 0) { 3763 DPRINTF(sc, ATH_DEBUG_ANY, 3764 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 3765 __func__, error); 3766 sc->sc_stats.ast_rx_busdma++; 3767 m_freem(m); 3768 return error; 3769 } 3770 KASSERT(bf->bf_nseg == 1, 3771 ("multi-segment packet; nseg %u", bf->bf_nseg)); 3772 bf->bf_m = m; 3773 } 3774 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 3775 3776 /* 3777 * Setup descriptors. For receive we always terminate 3778 * the descriptor list with a self-linked entry so we'll 3779 * not get overrun under high load (as can happen with a 3780 * 5212 when ANI processing enables PHY error frames). 3781 * 3782 * To insure the last descriptor is self-linked we create 3783 * each descriptor as self-linked and add it to the end. As 3784 * each additional descriptor is added the previous self-linked 3785 * entry is ``fixed'' naturally. This should be safe even 3786 * if DMA is happening. When processing RX interrupts we 3787 * never remove/process the last, self-linked, entry on the 3788 * descriptor list. This insures the hardware always has 3789 * someplace to write a new frame. 3790 */ 3791 ds = bf->bf_desc; 3792 ds->ds_link = bf->bf_daddr; /* link to self */ 3793 ds->ds_data = bf->bf_segs[0].ds_addr; 3794 ath_hal_setuprxdesc(ah, ds 3795 , m->m_len /* buffer size */ 3796 , 0 3797 ); 3798 3799 if (sc->sc_rxlink != NULL) 3800 *sc->sc_rxlink = bf->bf_daddr; 3801 sc->sc_rxlink = &ds->ds_link; 3802 return 0; 3803} 3804 3805/* 3806 * Extend 15-bit time stamp from rx descriptor to 3807 * a full 64-bit TSF using the specified TSF. 3808 */ 3809static __inline u_int64_t 3810ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf) 3811{ 3812 if ((tsf & 0x7fff) < rstamp) 3813 tsf -= 0x8000; 3814 return ((tsf &~ 0x7fff) | rstamp); 3815} 3816 3817/* 3818 * Intercept management frames to collect beacon rssi data 3819 * and to do ibss merges. 3820 */ 3821static void 3822ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 3823 int subtype, int rssi, int noise, u_int32_t rstamp) 3824{ 3825 struct ieee80211vap *vap = ni->ni_vap; 3826 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 3827 3828 /* 3829 * Call up first so subsequent work can use information 3830 * potentially stored in the node (e.g. for ibss merge). 3831 */ 3832 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, noise, rstamp); 3833 switch (subtype) { 3834 case IEEE80211_FC0_SUBTYPE_BEACON: 3835 /* update rssi statistics for use by the hal */ 3836 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 3837 if (sc->sc_syncbeacon && 3838 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 3839 /* 3840 * Resync beacon timers using the tsf of the beacon 3841 * frame we just received. 3842 */ 3843 ath_beacon_config(sc, vap); 3844 } 3845 /* fall thru... */ 3846 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3847 if (vap->iv_opmode == IEEE80211_M_IBSS && 3848 vap->iv_state == IEEE80211_S_RUN) { 3849 u_int64_t tsf = ath_extend_tsf(rstamp, 3850 ath_hal_gettsf64(sc->sc_ah)); 3851 /* 3852 * Handle ibss merge as needed; check the tsf on the 3853 * frame before attempting the merge. The 802.11 spec 3854 * says the station should change it's bssid to match 3855 * the oldest station with the same ssid, where oldest 3856 * is determined by the tsf. Note that hardware 3857 * reconfiguration happens through callback to 3858 * ath_newstate as the state machine will go from 3859 * RUN -> RUN when this happens. 3860 */ 3861 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 3862 DPRINTF(sc, ATH_DEBUG_STATE, 3863 "ibss merge, rstamp %u tsf %ju " 3864 "tstamp %ju\n", rstamp, (uintmax_t)tsf, 3865 (uintmax_t)ni->ni_tstamp.tsf); 3866 (void) ieee80211_ibss_merge(ni); 3867 } 3868 } 3869 break; 3870 } 3871} 3872 3873/* 3874 * Set the default antenna. 3875 */ 3876static void 3877ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3878{ 3879 struct ath_hal *ah = sc->sc_ah; 3880 3881 /* XXX block beacon interrupts */ 3882 ath_hal_setdefantenna(ah, antenna); 3883 if (sc->sc_defant != antenna) 3884 sc->sc_stats.ast_ant_defswitch++; 3885 sc->sc_defant = antenna; 3886 sc->sc_rxotherant = 0; 3887} 3888 3889static int 3890ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 3891 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 3892{ 3893#define CHAN_HT htole32(CHANNEL_HT20|CHANNEL_HT40PLUS|CHANNEL_HT40MINUS) 3894#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 3895#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 3896#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 3897 struct ath_softc *sc = ifp->if_softc; 3898 u_int8_t rix; 3899 3900 /* 3901 * Discard anything shorter than an ack or cts. 3902 */ 3903 if (m->m_pkthdr.len < IEEE80211_ACK_LEN) { 3904 DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n", 3905 __func__, m->m_pkthdr.len); 3906 sc->sc_stats.ast_rx_tooshort++; 3907 return 0; 3908 } 3909 rix = rs->rs_rate; 3910 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 3911 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 3912#if HAL_ABI_VERSION >= 0x07050400 3913 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 3914 if (sc->sc_rx_th.wr_rate & 0x80) { /* HT rate */ 3915 if ((rs->rs_flags & HAL_RX_2040) == 0) 3916 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 3917 else if (sc->sc_curchan.channelFlags & CHANNEL_HT40PLUS) 3918 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 3919 else 3920 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 3921 if ((rs->rs_flags & HAL_RX_GI) == 0) 3922 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 3923 } 3924#endif 3925 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf)); 3926 if (rs->rs_status & HAL_RXERR_CRC) 3927 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 3928 /* XXX propagate other error flags from descriptor */ 3929 sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf; 3930 sc->sc_rx_th.wr_antnoise = nf; 3931 sc->sc_rx_th.wr_antenna = rs->rs_antenna; 3932 3933 bpf_mtap2(ifp->if_bpf, &sc->sc_rx_th, sc->sc_rx_th_len, m); 3934 3935 return 1; 3936#undef CHAN_HT20 3937#undef CHAN_HT40U 3938#undef CHAN_HT40D 3939#undef CHAN_HT 3940} 3941 3942static void 3943ath_handle_micerror(struct ieee80211com *ic, 3944 struct ieee80211_frame *wh, int keyix) 3945{ 3946 struct ieee80211_node *ni; 3947 3948 /* XXX recheck MIC to deal w/ chips that lie */ 3949 /* XXX discard MIC errors on !data frames */ 3950 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 3951 if (ni != NULL) { 3952 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 3953 ieee80211_free_node(ni); 3954 } 3955} 3956 3957static void 3958ath_rx_proc(void *arg, int npending) 3959{ 3960#define PA2DESC(_sc, _pa) \ 3961 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 3962 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 3963 struct ath_softc *sc = arg; 3964 struct ath_buf *bf; 3965 struct ifnet *ifp = sc->sc_ifp; 3966 struct ieee80211com *ic = ifp->if_l2com; 3967 struct ath_hal *ah = sc->sc_ah; 3968 struct ath_desc *ds; 3969 struct ath_rx_status *rs; 3970 struct mbuf *m; 3971 struct ieee80211_node *ni; 3972 int len, type, ngood; 3973 u_int phyerr; 3974 HAL_STATUS status; 3975 int16_t nf; 3976 u_int64_t tsf; 3977 3978 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 3979 ngood = 0; 3980 nf = ath_hal_getchannoise(ah, &sc->sc_curchan); 3981 tsf = ath_hal_gettsf64(ah); 3982 do { 3983 bf = STAILQ_FIRST(&sc->sc_rxbuf); 3984 if (bf == NULL) { /* NB: shouldn't happen */ 3985 if_printf(ifp, "%s: no buffer!\n", __func__); 3986 break; 3987 } 3988 m = bf->bf_m; 3989 if (m == NULL) { /* NB: shouldn't happen */ 3990 /* 3991 * If mbuf allocation failed previously there 3992 * will be no mbuf; try again to re-populate it. 3993 */ 3994 /* XXX make debug msg */ 3995 if_printf(ifp, "%s: no mbuf!\n", __func__); 3996 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 3997 goto rx_next; 3998 } 3999 ds = bf->bf_desc; 4000 if (ds->ds_link == bf->bf_daddr) { 4001 /* NB: never process the self-linked entry at the end */ 4002 break; 4003 } 4004 /* XXX sync descriptor memory */ 4005 /* 4006 * Must provide the virtual address of the current 4007 * descriptor, the physical address, and the virtual 4008 * address of the next descriptor in the h/w chain. 4009 * This allows the HAL to look ahead to see if the 4010 * hardware is done with a descriptor by checking the 4011 * done bit in the following descriptor and the address 4012 * of the current descriptor the DMA engine is working 4013 * on. All this is necessary because of our use of 4014 * a self-linked list to avoid rx overruns. 4015 */ 4016 rs = &bf->bf_status.ds_rxstat; 4017 status = ath_hal_rxprocdesc(ah, ds, 4018 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4019#ifdef ATH_DEBUG 4020 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 4021 ath_printrxbuf(bf, 0, status == HAL_OK); 4022#endif 4023 if (status == HAL_EINPROGRESS) 4024 break; 4025 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 4026 if (rs->rs_status != 0) { 4027 if (rs->rs_status & HAL_RXERR_CRC) 4028 sc->sc_stats.ast_rx_crcerr++; 4029 if (rs->rs_status & HAL_RXERR_FIFO) 4030 sc->sc_stats.ast_rx_fifoerr++; 4031 if (rs->rs_status & HAL_RXERR_PHY) { 4032 sc->sc_stats.ast_rx_phyerr++; 4033 phyerr = rs->rs_phyerr & 0x1f; 4034 sc->sc_stats.ast_rx_phy[phyerr]++; 4035 goto rx_error; /* NB: don't count in ierrors */ 4036 } 4037 if (rs->rs_status & HAL_RXERR_DECRYPT) { 4038 /* 4039 * Decrypt error. If the error occurred 4040 * because there was no hardware key, then 4041 * let the frame through so the upper layers 4042 * can process it. This is necessary for 5210 4043 * parts which have no way to setup a ``clear'' 4044 * key cache entry. 4045 * 4046 * XXX do key cache faulting 4047 */ 4048 if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 4049 goto rx_accept; 4050 sc->sc_stats.ast_rx_badcrypt++; 4051 } 4052 if (rs->rs_status & HAL_RXERR_MIC) { 4053 sc->sc_stats.ast_rx_badmic++; 4054 /* 4055 * Do minimal work required to hand off 4056 * the 802.11 header for notifcation. 4057 */ 4058 /* XXX frag's and qos frames */ 4059 len = rs->rs_datalen; 4060 if (len >= sizeof (struct ieee80211_frame)) { 4061 bus_dmamap_sync(sc->sc_dmat, 4062 bf->bf_dmamap, 4063 BUS_DMASYNC_POSTREAD); 4064 ath_handle_micerror(ic, 4065 mtod(m, struct ieee80211_frame *), 4066 sc->sc_splitmic ? 4067 rs->rs_keyix-32 : rs->rs_keyix); 4068 } 4069 } 4070 ifp->if_ierrors++; 4071rx_error: 4072 /* 4073 * Cleanup any pending partial frame. 4074 */ 4075 if (sc->sc_rxpending != NULL) { 4076 m_freem(sc->sc_rxpending); 4077 sc->sc_rxpending = NULL; 4078 } 4079 /* 4080 * When a tap is present pass error frames 4081 * that have been requested. By default we 4082 * pass decrypt+mic errors but others may be 4083 * interesting (e.g. crc). 4084 */ 4085 if (bpf_peers_present(ifp->if_bpf) && 4086 (rs->rs_status & sc->sc_monpass)) { 4087 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4088 BUS_DMASYNC_POSTREAD); 4089 /* NB: bpf needs the mbuf length setup */ 4090 len = rs->rs_datalen; 4091 m->m_pkthdr.len = m->m_len = len; 4092 (void) ath_rx_tap(ifp, m, rs, tsf, nf); 4093 } 4094 /* XXX pass MIC errors up for s/w reclaculation */ 4095 goto rx_next; 4096 } 4097rx_accept: 4098 /* 4099 * Sync and unmap the frame. At this point we're 4100 * committed to passing the mbuf somewhere so clear 4101 * bf_m; this means a new mbuf must be allocated 4102 * when the rx descriptor is setup again to receive 4103 * another frame. 4104 */ 4105 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4106 BUS_DMASYNC_POSTREAD); 4107 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4108 bf->bf_m = NULL; 4109 4110 len = rs->rs_datalen; 4111 m->m_len = len; 4112 4113 if (rs->rs_more) { 4114 /* 4115 * Frame spans multiple descriptors; save 4116 * it for the next completed descriptor, it 4117 * will be used to construct a jumbogram. 4118 */ 4119 if (sc->sc_rxpending != NULL) { 4120 /* NB: max frame size is currently 2 clusters */ 4121 sc->sc_stats.ast_rx_toobig++; 4122 m_freem(sc->sc_rxpending); 4123 } 4124 m->m_pkthdr.rcvif = ifp; 4125 m->m_pkthdr.len = len; 4126 sc->sc_rxpending = m; 4127 goto rx_next; 4128 } else if (sc->sc_rxpending != NULL) { 4129 /* 4130 * This is the second part of a jumbogram, 4131 * chain it to the first mbuf, adjust the 4132 * frame length, and clear the rxpending state. 4133 */ 4134 sc->sc_rxpending->m_next = m; 4135 sc->sc_rxpending->m_pkthdr.len += len; 4136 m = sc->sc_rxpending; 4137 sc->sc_rxpending = NULL; 4138 } else { 4139 /* 4140 * Normal single-descriptor receive; setup 4141 * the rcvif and packet length. 4142 */ 4143 m->m_pkthdr.rcvif = ifp; 4144 m->m_pkthdr.len = len; 4145 } 4146 4147 ifp->if_ipackets++; 4148 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 4149 4150 if (bpf_peers_present(ifp->if_bpf) && 4151 !ath_rx_tap(ifp, m, rs, tsf, nf)) { 4152 m_freem(m); /* XXX reclaim */ 4153 goto rx_next; 4154 } 4155 4156 /* 4157 * From this point on we assume the frame is at least 4158 * as large as ieee80211_frame_min; verify that. 4159 */ 4160 if (len < IEEE80211_MIN_LEN) { 4161 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", 4162 __func__, len); 4163 sc->sc_stats.ast_rx_tooshort++; 4164 m_freem(m); 4165 goto rx_next; 4166 } 4167 4168 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 4169 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 4170 sc->sc_hwmap[rs->rs_rate].ieeerate, 4171 rs->rs_rssi); 4172 } 4173 4174 m_adj(m, -IEEE80211_CRC_LEN); 4175 4176 /* 4177 * Locate the node for sender, track state, and then 4178 * pass the (referenced) node up to the 802.11 layer 4179 * for its use. 4180 */ 4181 ni = ieee80211_find_rxnode_withkey(ic, 4182 mtod(m, const struct ieee80211_frame_min *), 4183 rs->rs_keyix == HAL_RXKEYIX_INVALID ? 4184 IEEE80211_KEYIX_NONE : rs->rs_keyix); 4185 if (ni != NULL) { 4186 /* 4187 * Sending station is known, dispatch directly. 4188 */ 4189 type = ieee80211_input(ni, m, 4190 rs->rs_rssi, nf, rs->rs_tstamp); 4191 ieee80211_free_node(ni); 4192 /* 4193 * Arrange to update the last rx timestamp only for 4194 * frames from our ap when operating in station mode. 4195 * This assumes the rx key is always setup when 4196 * associated. 4197 */ 4198 if (ic->ic_opmode == IEEE80211_M_STA && 4199 rs->rs_keyix != HAL_RXKEYIX_INVALID) 4200 ngood++; 4201 } else { 4202 type = ieee80211_input_all(ic, m, 4203 rs->rs_rssi, nf, rs->rs_tstamp); 4204 } 4205 /* 4206 * Track rx rssi and do any rx antenna management. 4207 */ 4208 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 4209 if (sc->sc_diversity) { 4210 /* 4211 * When using fast diversity, change the default rx 4212 * antenna if diversity chooses the other antenna 3 4213 * times in a row. 4214 */ 4215 if (sc->sc_defant != rs->rs_antenna) { 4216 if (++sc->sc_rxotherant >= 3) 4217 ath_setdefantenna(sc, rs->rs_antenna); 4218 } else 4219 sc->sc_rxotherant = 0; 4220 } 4221 if (sc->sc_softled) { 4222 /* 4223 * Blink for any data frame. Otherwise do a 4224 * heartbeat-style blink when idle. The latter 4225 * is mainly for station mode where we depend on 4226 * periodic beacon frames to trigger the poll event. 4227 */ 4228 if (type == IEEE80211_FC0_TYPE_DATA) { 4229 sc->sc_rxrate = rs->rs_rate; 4230 ath_led_event(sc, ATH_LED_RX); 4231 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 4232 ath_led_event(sc, ATH_LED_POLL); 4233 } 4234rx_next: 4235 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 4236 } while (ath_rxbuf_init(sc, bf) == 0); 4237 4238 /* rx signal state monitoring */ 4239 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan); 4240 if (ngood) 4241 sc->sc_lastrx = tsf; 4242 4243 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && 4244 !IFQ_IS_EMPTY(&ifp->if_snd)) 4245 ath_start(ifp); 4246 4247#undef PA2DESC 4248} 4249 4250static void 4251ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4252{ 4253 txq->axq_qnum = qnum; 4254 txq->axq_depth = 0; 4255 txq->axq_intrcnt = 0; 4256 txq->axq_link = NULL; 4257 STAILQ_INIT(&txq->axq_q); 4258 ATH_TXQ_LOCK_INIT(sc, txq); 4259 TAILQ_INIT(&txq->axq_stageq); 4260 txq->axq_curage = 0; 4261} 4262 4263/* 4264 * Setup a h/w transmit queue. 4265 */ 4266static struct ath_txq * 4267ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4268{ 4269#define N(a) (sizeof(a)/sizeof(a[0])) 4270 struct ath_hal *ah = sc->sc_ah; 4271 HAL_TXQ_INFO qi; 4272 int qnum; 4273 4274 memset(&qi, 0, sizeof(qi)); 4275 qi.tqi_subtype = subtype; 4276 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4277 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4278 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4279 /* 4280 * Enable interrupts only for EOL and DESC conditions. 4281 * We mark tx descriptors to receive a DESC interrupt 4282 * when a tx queue gets deep; otherwise waiting for the 4283 * EOL to reap descriptors. Note that this is done to 4284 * reduce interrupt load and this only defers reaping 4285 * descriptors, never transmitting frames. Aside from 4286 * reducing interrupts this also permits more concurrency. 4287 * The only potential downside is if the tx queue backs 4288 * up in which case the top half of the kernel may backup 4289 * due to a lack of tx descriptors. 4290 */ 4291 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 4292 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4293 if (qnum == -1) { 4294 /* 4295 * NB: don't print a message, this happens 4296 * normally on parts with too few tx queues 4297 */ 4298 return NULL; 4299 } 4300 if (qnum >= N(sc->sc_txq)) { 4301 device_printf(sc->sc_dev, 4302 "hal qnum %u out of range, max %zu!\n", 4303 qnum, N(sc->sc_txq)); 4304 ath_hal_releasetxqueue(ah, qnum); 4305 return NULL; 4306 } 4307 if (!ATH_TXQ_SETUP(sc, qnum)) { 4308 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4309 sc->sc_txqsetup |= 1<<qnum; 4310 } 4311 return &sc->sc_txq[qnum]; 4312#undef N 4313} 4314 4315/* 4316 * Setup a hardware data transmit queue for the specified 4317 * access control. The hal may not support all requested 4318 * queues in which case it will return a reference to a 4319 * previously setup queue. We record the mapping from ac's 4320 * to h/w queues for use by ath_tx_start and also track 4321 * the set of h/w queues being used to optimize work in the 4322 * transmit interrupt handler and related routines. 4323 */ 4324static int 4325ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4326{ 4327#define N(a) (sizeof(a)/sizeof(a[0])) 4328 struct ath_txq *txq; 4329 4330 if (ac >= N(sc->sc_ac2q)) { 4331 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4332 ac, N(sc->sc_ac2q)); 4333 return 0; 4334 } 4335 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4336 if (txq != NULL) { 4337 sc->sc_ac2q[ac] = txq; 4338 return 1; 4339 } else 4340 return 0; 4341#undef N 4342} 4343 4344/* 4345 * Update WME parameters for a transmit queue. 4346 */ 4347static int 4348ath_txq_update(struct ath_softc *sc, int ac) 4349{ 4350#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4351#define ATH_TXOP_TO_US(v) (v<<5) 4352 struct ifnet *ifp = sc->sc_ifp; 4353 struct ieee80211com *ic = ifp->if_l2com; 4354 struct ath_txq *txq = sc->sc_ac2q[ac]; 4355 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4356 struct ath_hal *ah = sc->sc_ah; 4357 HAL_TXQ_INFO qi; 4358 4359 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4360 qi.tqi_aifs = wmep->wmep_aifsn; 4361 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4362 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4363 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4364 4365 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4366 if_printf(ifp, "unable to update hardware queue " 4367 "parameters for %s traffic!\n", 4368 ieee80211_wme_acnames[ac]); 4369 return 0; 4370 } else { 4371 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4372 return 1; 4373 } 4374#undef ATH_TXOP_TO_US 4375#undef ATH_EXPONENT_TO_VALUE 4376} 4377 4378/* 4379 * Callback from the 802.11 layer to update WME parameters. 4380 */ 4381static int 4382ath_wme_update(struct ieee80211com *ic) 4383{ 4384 struct ath_softc *sc = ic->ic_ifp->if_softc; 4385 4386 return !ath_txq_update(sc, WME_AC_BE) || 4387 !ath_txq_update(sc, WME_AC_BK) || 4388 !ath_txq_update(sc, WME_AC_VI) || 4389 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4390} 4391 4392/* 4393 * Reclaim resources for a setup queue. 4394 */ 4395static void 4396ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4397{ 4398 4399 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4400 ATH_TXQ_LOCK_DESTROY(txq); 4401 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4402} 4403 4404/* 4405 * Reclaim all tx queue resources. 4406 */ 4407static void 4408ath_tx_cleanup(struct ath_softc *sc) 4409{ 4410 int i; 4411 4412 ATH_TXBUF_LOCK_DESTROY(sc); 4413 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4414 if (ATH_TXQ_SETUP(sc, i)) 4415 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4416} 4417 4418/* 4419 * Return h/w rate index for an IEEE rate (w/o basic rate bit). 4420 */ 4421static int 4422ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate) 4423{ 4424 int i; 4425 4426 for (i = 0; i < rt->rateCount; i++) 4427 if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate) 4428 return i; 4429 return 0; /* NB: lowest rate */ 4430} 4431 4432/* 4433 * Reclaim mbuf resources. For fragmented frames we 4434 * need to claim each frag chained with m_nextpkt. 4435 */ 4436static void 4437ath_freetx(struct mbuf *m) 4438{ 4439 struct mbuf *next; 4440 4441 do { 4442 next = m->m_nextpkt; 4443 m->m_nextpkt = NULL; 4444 m_freem(m); 4445 } while ((m = next) != NULL); 4446} 4447 4448static int 4449ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 4450{ 4451 struct mbuf *m; 4452 int error; 4453 4454 /* 4455 * Load the DMA map so any coalescing is done. This 4456 * also calculates the number of descriptors we need. 4457 */ 4458 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 4459 bf->bf_segs, &bf->bf_nseg, 4460 BUS_DMA_NOWAIT); 4461 if (error == EFBIG) { 4462 /* XXX packet requires too many descriptors */ 4463 bf->bf_nseg = ATH_TXDESC+1; 4464 } else if (error != 0) { 4465 sc->sc_stats.ast_tx_busdma++; 4466 ath_freetx(m0); 4467 return error; 4468 } 4469 /* 4470 * Discard null packets and check for packets that 4471 * require too many TX descriptors. We try to convert 4472 * the latter to a cluster. 4473 */ 4474 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 4475 sc->sc_stats.ast_tx_linear++; 4476 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC); 4477 if (m == NULL) { 4478 ath_freetx(m0); 4479 sc->sc_stats.ast_tx_nombuf++; 4480 return ENOMEM; 4481 } 4482 m0 = m; 4483 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 4484 bf->bf_segs, &bf->bf_nseg, 4485 BUS_DMA_NOWAIT); 4486 if (error != 0) { 4487 sc->sc_stats.ast_tx_busdma++; 4488 ath_freetx(m0); 4489 return error; 4490 } 4491 KASSERT(bf->bf_nseg <= ATH_TXDESC, 4492 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 4493 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 4494 sc->sc_stats.ast_tx_nodata++; 4495 ath_freetx(m0); 4496 return EIO; 4497 } 4498 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 4499 __func__, m0, m0->m_pkthdr.len); 4500 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 4501 bf->bf_m = m0; 4502 4503 return 0; 4504} 4505 4506static void 4507ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) 4508{ 4509 struct ath_hal *ah = sc->sc_ah; 4510 struct ath_desc *ds, *ds0; 4511 int i; 4512 4513 /* 4514 * Fillin the remainder of the descriptor info. 4515 */ 4516 ds0 = ds = bf->bf_desc; 4517 for (i = 0; i < bf->bf_nseg; i++, ds++) { 4518 ds->ds_data = bf->bf_segs[i].ds_addr; 4519 if (i == bf->bf_nseg - 1) 4520 ds->ds_link = 0; 4521 else 4522 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 4523 ath_hal_filltxdesc(ah, ds 4524 , bf->bf_segs[i].ds_len /* segment length */ 4525 , i == 0 /* first segment */ 4526 , i == bf->bf_nseg - 1 /* last segment */ 4527 , ds0 /* first descriptor */ 4528 ); 4529 DPRINTF(sc, ATH_DEBUG_XMIT, 4530 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 4531 __func__, i, ds->ds_link, ds->ds_data, 4532 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 4533 } 4534 /* 4535 * Insert the frame on the outbound list and pass it on 4536 * to the hardware. Multicast frames buffered for power 4537 * save stations and transmit from the CAB queue are stored 4538 * on a s/w only queue and loaded on to the CAB queue in 4539 * the SWBA handler since frames only go out on DTIM and 4540 * to avoid possible races. 4541 */ 4542 ATH_TXQ_LOCK(txq); 4543 if (txq->axq_qnum != ATH_TXQ_SWQ) { 4544 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 4545 if (txq->axq_link == NULL) { 4546 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 4547 DPRINTF(sc, ATH_DEBUG_XMIT, 4548 "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, 4549 txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, 4550 txq->axq_depth); 4551 } else { 4552 *txq->axq_link = bf->bf_daddr; 4553 DPRINTF(sc, ATH_DEBUG_XMIT, 4554 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 4555 txq->axq_qnum, txq->axq_link, 4556 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); 4557 } 4558 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 4559 ath_hal_txstart(ah, txq->axq_qnum); 4560 } else { 4561 if (txq->axq_link != NULL) { 4562 struct ath_buf *last = ATH_TXQ_LAST(txq); 4563 struct ieee80211_frame *wh; 4564 4565 /* mark previous frame */ 4566 wh = mtod(last->bf_m, struct ieee80211_frame *); 4567 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 4568 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap, 4569 BUS_DMASYNC_PREWRITE); 4570 4571 /* link descriptor */ 4572 *txq->axq_link = bf->bf_daddr; 4573 } 4574 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 4575 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 4576 } 4577 ATH_TXQ_UNLOCK(txq); 4578} 4579 4580static int 4581ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, 4582 struct mbuf *m0) 4583{ 4584 struct ieee80211vap *vap = ni->ni_vap; 4585 struct ath_vap *avp = ATH_VAP(vap); 4586 struct ath_hal *ah = sc->sc_ah; 4587 struct ifnet *ifp = sc->sc_ifp; 4588 struct ieee80211com *ic = ifp->if_l2com; 4589 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 4590 int error, iswep, ismcast, isfrag, ismrr; 4591 int keyix, hdrlen, pktlen, try0; 4592 u_int8_t rix, txrate, ctsrate; 4593 u_int8_t cix = 0xff; /* NB: silence compiler */ 4594 struct ath_desc *ds; 4595 struct ath_txq *txq; 4596 struct ieee80211_frame *wh; 4597 u_int subtype, flags, ctsduration; 4598 HAL_PKT_TYPE atype; 4599 const HAL_RATE_TABLE *rt; 4600 HAL_BOOL shortPreamble; 4601 struct ath_node *an; 4602 u_int pri; 4603 4604 wh = mtod(m0, struct ieee80211_frame *); 4605 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 4606 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 4607 isfrag = m0->m_flags & M_FRAG; 4608 hdrlen = ieee80211_anyhdrsize(wh); 4609 /* 4610 * Packet length must not include any 4611 * pad bytes; deduct them here. 4612 */ 4613 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 4614 4615 if (iswep) { 4616 const struct ieee80211_cipher *cip; 4617 struct ieee80211_key *k; 4618 4619 /* 4620 * Construct the 802.11 header+trailer for an encrypted 4621 * frame. The only reason this can fail is because of an 4622 * unknown or unsupported cipher/key type. 4623 */ 4624 k = ieee80211_crypto_encap(ni, m0); 4625 if (k == NULL) { 4626 /* 4627 * This can happen when the key is yanked after the 4628 * frame was queued. Just discard the frame; the 4629 * 802.11 layer counts failures and provides 4630 * debugging/diagnostics. 4631 */ 4632 ath_freetx(m0); 4633 return EIO; 4634 } 4635 /* 4636 * Adjust the packet + header lengths for the crypto 4637 * additions and calculate the h/w key index. When 4638 * a s/w mic is done the frame will have had any mic 4639 * added to it prior to entry so m0->m_pkthdr.len will 4640 * account for it. Otherwise we need to add it to the 4641 * packet length. 4642 */ 4643 cip = k->wk_cipher; 4644 hdrlen += cip->ic_header; 4645 pktlen += cip->ic_header + cip->ic_trailer; 4646 /* NB: frags always have any TKIP MIC done in s/w */ 4647 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 4648 pktlen += cip->ic_miclen; 4649 keyix = k->wk_keyix; 4650 4651 /* packet header may have moved, reset our local pointer */ 4652 wh = mtod(m0, struct ieee80211_frame *); 4653 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 4654 /* 4655 * Use station key cache slot, if assigned. 4656 */ 4657 keyix = ni->ni_ucastkey.wk_keyix; 4658 if (keyix == IEEE80211_KEYIX_NONE) 4659 keyix = HAL_TXKEYIX_INVALID; 4660 } else 4661 keyix = HAL_TXKEYIX_INVALID; 4662 4663 pktlen += IEEE80211_CRC_LEN; 4664 4665 /* 4666 * Load the DMA map so any coalescing is done. This 4667 * also calculates the number of descriptors we need. 4668 */ 4669 error = ath_tx_dmasetup(sc, bf, m0); 4670 if (error != 0) 4671 return error; 4672 bf->bf_node = ni; /* NB: held reference */ 4673 m0 = bf->bf_m; /* NB: may have changed */ 4674 wh = mtod(m0, struct ieee80211_frame *); 4675 4676 /* setup descriptors */ 4677 ds = bf->bf_desc; 4678 rt = sc->sc_currates; 4679 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 4680 4681 /* 4682 * NB: the 802.11 layer marks whether or not we should 4683 * use short preamble based on the current mode and 4684 * negotiated parameters. 4685 */ 4686 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 4687 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 4688 shortPreamble = AH_TRUE; 4689 sc->sc_stats.ast_tx_shortpre++; 4690 } else { 4691 shortPreamble = AH_FALSE; 4692 } 4693 4694 an = ATH_NODE(ni); 4695 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 4696 ismrr = 0; /* default no multi-rate retry*/ 4697 pri = M_WME_GETAC(m0); /* honor classification */ 4698 /* XXX use txparams instead of fixed values */ 4699 /* 4700 * Calculate Atheros packet type from IEEE80211 packet header, 4701 * setup for rate calculations, and select h/w transmit queue. 4702 */ 4703 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 4704 case IEEE80211_FC0_TYPE_MGT: 4705 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4706 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 4707 atype = HAL_PKT_TYPE_BEACON; 4708 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4709 atype = HAL_PKT_TYPE_PROBE_RESP; 4710 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 4711 atype = HAL_PKT_TYPE_ATIM; 4712 else 4713 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 4714 rix = an->an_mgmtrix; 4715 txrate = rt->info[rix].rateCode; 4716 if (shortPreamble) 4717 txrate |= rt->info[rix].shortPreamble; 4718 try0 = ATH_TXMGTTRY; 4719 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 4720 break; 4721 case IEEE80211_FC0_TYPE_CTL: 4722 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 4723 rix = an->an_mgmtrix; 4724 txrate = rt->info[rix].rateCode; 4725 if (shortPreamble) 4726 txrate |= rt->info[rix].shortPreamble; 4727 try0 = ATH_TXMGTTRY; 4728 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 4729 break; 4730 case IEEE80211_FC0_TYPE_DATA: 4731 atype = HAL_PKT_TYPE_NORMAL; /* default */ 4732 /* 4733 * Data frames: multicast frames go out at a fixed rate, 4734 * EAPOL frames use the mgmt frame rate; otherwise consult 4735 * the rate control module for the rate to use. 4736 */ 4737 if (ismcast) { 4738 rix = an->an_mcastrix; 4739 txrate = rt->info[rix].rateCode; 4740 if (shortPreamble) 4741 txrate |= rt->info[rix].shortPreamble; 4742 try0 = 1; 4743 } else if (m0->m_flags & M_EAPOL) { 4744 /* XXX? maybe always use long preamble? */ 4745 rix = an->an_mgmtrix; 4746 txrate = rt->info[rix].rateCode; 4747 if (shortPreamble) 4748 txrate |= rt->info[rix].shortPreamble; 4749 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 4750 } else { 4751 ath_rate_findrate(sc, an, shortPreamble, pktlen, 4752 &rix, &try0, &txrate); 4753 sc->sc_txrate = txrate; /* for LED blinking */ 4754 sc->sc_lastdatarix = rix; /* for fast frames */ 4755 if (try0 != ATH_TXMAXTRY) 4756 ismrr = 1; 4757 } 4758 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 4759 flags |= HAL_TXDESC_NOACK; 4760 break; 4761 default: 4762 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 4763 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 4764 /* XXX statistic */ 4765 ath_freetx(m0); 4766 return EIO; 4767 } 4768 txq = sc->sc_ac2q[pri]; 4769 4770 /* 4771 * When servicing one or more stations in power-save mode 4772 * (or) if there is some mcast data waiting on the mcast 4773 * queue (to prevent out of order delivery) multicast 4774 * frames must be buffered until after the beacon. 4775 */ 4776 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) 4777 txq = &avp->av_mcastq; 4778 4779 /* 4780 * Calculate miscellaneous flags. 4781 */ 4782 if (ismcast) { 4783 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 4784 } else if (pktlen > vap->iv_rtsthreshold && 4785 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 4786 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 4787 cix = rt->info[rix].controlRate; 4788 sc->sc_stats.ast_tx_rts++; 4789 } 4790 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 4791 sc->sc_stats.ast_tx_noack++; 4792 4793 /* 4794 * If 802.11g protection is enabled, determine whether 4795 * to use RTS/CTS or just CTS. Note that this is only 4796 * done for OFDM unicast frames. 4797 */ 4798 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 4799 rt->info[rix].phy == IEEE80211_T_OFDM && 4800 (flags & HAL_TXDESC_NOACK) == 0) { 4801 /* XXX fragments must use CCK rates w/ protection */ 4802 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4803 flags |= HAL_TXDESC_RTSENA; 4804 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4805 flags |= HAL_TXDESC_CTSENA; 4806 if (isfrag) { 4807 /* 4808 * For frags it would be desirable to use the 4809 * highest CCK rate for RTS/CTS. But stations 4810 * farther away may detect it at a lower CCK rate 4811 * so use the configured protection rate instead 4812 * (for now). 4813 */ 4814 cix = rt->info[sc->sc_protrix].controlRate; 4815 } else 4816 cix = rt->info[sc->sc_protrix].controlRate; 4817 sc->sc_stats.ast_tx_protect++; 4818 } 4819 4820 /* 4821 * Calculate duration. This logically belongs in the 802.11 4822 * layer but it lacks sufficient information to calculate it. 4823 */ 4824 if ((flags & HAL_TXDESC_NOACK) == 0 && 4825 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 4826 u_int16_t dur; 4827 if (shortPreamble) 4828 dur = rt->info[rix].spAckDuration; 4829 else 4830 dur = rt->info[rix].lpAckDuration; 4831 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 4832 dur += dur; /* additional SIFS+ACK */ 4833 KASSERT(m0->m_nextpkt != NULL, ("no fragment")); 4834 /* 4835 * Include the size of next fragment so NAV is 4836 * updated properly. The last fragment uses only 4837 * the ACK duration 4838 */ 4839 dur += ath_hal_computetxtime(ah, rt, 4840 m0->m_nextpkt->m_pkthdr.len, 4841 rix, shortPreamble); 4842 } 4843 if (isfrag) { 4844 /* 4845 * Force hardware to use computed duration for next 4846 * fragment by disabling multi-rate retry which updates 4847 * duration based on the multi-rate duration table. 4848 */ 4849 ismrr = 0; 4850 try0 = ATH_TXMGTTRY; /* XXX? */ 4851 } 4852 *(u_int16_t *)wh->i_dur = htole16(dur); 4853 } 4854 4855 /* 4856 * Calculate RTS/CTS rate and duration if needed. 4857 */ 4858 ctsduration = 0; 4859 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 4860 /* 4861 * CTS transmit rate is derived from the transmit rate 4862 * by looking in the h/w rate table. We must also factor 4863 * in whether or not a short preamble is to be used. 4864 */ 4865 /* NB: cix is set above where RTS/CTS is enabled */ 4866 KASSERT(cix != 0xff, ("cix not setup")); 4867 ctsrate = rt->info[cix].rateCode; 4868 /* 4869 * Compute the transmit duration based on the frame 4870 * size and the size of an ACK frame. We call into the 4871 * HAL to do the computation since it depends on the 4872 * characteristics of the actual PHY being used. 4873 * 4874 * NB: CTS is assumed the same size as an ACK so we can 4875 * use the precalculated ACK durations. 4876 */ 4877 if (shortPreamble) { 4878 ctsrate |= rt->info[cix].shortPreamble; 4879 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 4880 ctsduration += rt->info[cix].spAckDuration; 4881 ctsduration += ath_hal_computetxtime(ah, 4882 rt, pktlen, rix, AH_TRUE); 4883 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 4884 ctsduration += rt->info[rix].spAckDuration; 4885 } else { 4886 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 4887 ctsduration += rt->info[cix].lpAckDuration; 4888 ctsduration += ath_hal_computetxtime(ah, 4889 rt, pktlen, rix, AH_FALSE); 4890 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 4891 ctsduration += rt->info[rix].lpAckDuration; 4892 } 4893 /* 4894 * Must disable multi-rate retry when using RTS/CTS. 4895 */ 4896 ismrr = 0; 4897 try0 = ATH_TXMGTTRY; /* XXX */ 4898 } else 4899 ctsrate = 0; 4900 4901 /* 4902 * At this point we are committed to sending the frame 4903 * and we don't need to look at m_nextpkt; clear it in 4904 * case this frame is part of frag chain. 4905 */ 4906 m0->m_nextpkt = NULL; 4907 4908 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 4909 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 4910 sc->sc_hwmap[txrate].ieeerate, -1); 4911 4912 if (bpf_peers_present(ifp->if_bpf)) { 4913 u_int64_t tsf = ath_hal_gettsf64(ah); 4914 4915 sc->sc_tx_th.wt_tsf = htole64(tsf); 4916 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; 4917 if (iswep) 4918 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4919 if (isfrag) 4920 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 4921 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; 4922 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 4923 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 4924 4925 bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0); 4926 } 4927 4928 /* 4929 * Determine if a tx interrupt should be generated for 4930 * this descriptor. We take a tx interrupt to reap 4931 * descriptors when the h/w hits an EOL condition or 4932 * when the descriptor is specifically marked to generate 4933 * an interrupt. We periodically mark descriptors in this 4934 * way to insure timely replenishing of the supply needed 4935 * for sending frames. Defering interrupts reduces system 4936 * load and potentially allows more concurrent work to be 4937 * done but if done to aggressively can cause senders to 4938 * backup. 4939 * 4940 * NB: use >= to deal with sc_txintrperiod changing 4941 * dynamically through sysctl. 4942 */ 4943 if (flags & HAL_TXDESC_INTREQ) { 4944 txq->axq_intrcnt = 0; 4945 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 4946 flags |= HAL_TXDESC_INTREQ; 4947 txq->axq_intrcnt = 0; 4948 } 4949 4950 /* 4951 * Formulate first tx descriptor with tx controls. 4952 */ 4953 /* XXX check return value? */ 4954 ath_hal_setuptxdesc(ah, ds 4955 , pktlen /* packet length */ 4956 , hdrlen /* header length */ 4957 , atype /* Atheros packet type */ 4958 , ni->ni_txpower /* txpower */ 4959 , txrate, try0 /* series 0 rate/tries */ 4960 , keyix /* key cache index */ 4961 , sc->sc_txantenna /* antenna mode */ 4962 , flags /* flags */ 4963 , ctsrate /* rts/cts rate */ 4964 , ctsduration /* rts/cts duration */ 4965 ); 4966 bf->bf_txflags = flags; 4967 /* 4968 * Setup the multi-rate retry state only when we're 4969 * going to use it. This assumes ath_hal_setuptxdesc 4970 * initializes the descriptors (so we don't have to) 4971 * when the hardware supports multi-rate retry and 4972 * we don't use it. 4973 */ 4974 if (ismrr) 4975 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); 4976 4977 ath_tx_handoff(sc, txq, bf); 4978 return 0; 4979} 4980 4981/* 4982 * Process completed xmit descriptors from the specified queue. 4983 */ 4984static int 4985ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 4986{ 4987 struct ath_hal *ah = sc->sc_ah; 4988 struct ifnet *ifp = sc->sc_ifp; 4989 struct ieee80211com *ic = ifp->if_l2com; 4990 struct ath_buf *bf; 4991 struct ath_desc *ds, *ds0; 4992 struct ath_tx_status *ts; 4993 struct ieee80211_node *ni; 4994 struct ath_node *an; 4995 int sr, lr, pri, nacked; 4996 HAL_STATUS status; 4997 4998 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4999 __func__, txq->axq_qnum, 5000 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 5001 txq->axq_link); 5002 nacked = 0; 5003 for (;;) { 5004 ATH_TXQ_LOCK(txq); 5005 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 5006 bf = STAILQ_FIRST(&txq->axq_q); 5007 if (bf == NULL) { 5008 ATH_TXQ_UNLOCK(txq); 5009 break; 5010 } 5011 ds0 = &bf->bf_desc[0]; 5012 ds = &bf->bf_desc[bf->bf_nseg - 1]; 5013 ts = &bf->bf_status.ds_txstat; 5014 status = ath_hal_txprocdesc(ah, ds, ts); 5015#ifdef ATH_DEBUG 5016 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 5017 ath_printtxbuf(bf, txq->axq_qnum, 0, status == HAL_OK); 5018#endif 5019 if (status == HAL_EINPROGRESS) { 5020 ATH_TXQ_UNLOCK(txq); 5021 break; 5022 } 5023 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 5024 if (txq->axq_depth == 0) 5025 txq->axq_link = NULL; 5026 ATH_TXQ_UNLOCK(txq); 5027 5028 ni = bf->bf_node; 5029 if (ni != NULL) { 5030 an = ATH_NODE(ni); 5031 if (ts->ts_status == 0) { 5032 u_int8_t txant = ts->ts_antenna; 5033 sc->sc_stats.ast_ant_tx[txant]++; 5034 sc->sc_ant_tx[txant]++; 5035 if (ts->ts_rate & HAL_TXSTAT_ALTRATE) 5036 sc->sc_stats.ast_tx_altrate++; 5037 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 5038 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 5039 ts->ts_rssi); 5040 pri = M_WME_GETAC(bf->bf_m); 5041 if (pri >= WME_AC_VO) 5042 ic->ic_wme.wme_hipri_traffic++; 5043 if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) 5044 ni->ni_inact = ni->ni_inact_reload; 5045 } else { 5046 if (ts->ts_status & HAL_TXERR_XRETRY) 5047 sc->sc_stats.ast_tx_xretries++; 5048 if (ts->ts_status & HAL_TXERR_FIFO) 5049 sc->sc_stats.ast_tx_fifoerr++; 5050 if (ts->ts_status & HAL_TXERR_FILT) 5051 sc->sc_stats.ast_tx_filtered++; 5052 if (bf->bf_m->m_flags & M_FF) 5053 sc->sc_stats.ast_ff_txerr++; 5054 } 5055 sr = ts->ts_shortretry; 5056 lr = ts->ts_longretry; 5057 sc->sc_stats.ast_tx_shortretry += sr; 5058 sc->sc_stats.ast_tx_longretry += lr; 5059 /* 5060 * Hand the descriptor to the rate control algorithm. 5061 */ 5062 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 5063 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) { 5064 /* 5065 * If frame was ack'd update the last rx time 5066 * used to workaround phantom bmiss interrupts. 5067 */ 5068 if (ts->ts_status == 0) 5069 nacked++; 5070 ath_rate_tx_complete(sc, an, bf); 5071 } 5072 /* 5073 * Do any tx complete callback. Note this must 5074 * be done before releasing the node reference. 5075 */ 5076 if (bf->bf_m->m_flags & M_TXCB) 5077 ieee80211_process_callback(ni, bf->bf_m, 5078 ts->ts_status); 5079 /* 5080 * Reclaim reference to node. 5081 * 5082 * NB: the node may be reclaimed here if, for example 5083 * this is a DEAUTH message that was sent and the 5084 * node was timed out due to inactivity. 5085 */ 5086 ieee80211_free_node(ni); 5087 } 5088 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 5089 BUS_DMASYNC_POSTWRITE); 5090 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 5091 5092 m_freem(bf->bf_m); 5093 bf->bf_m = NULL; 5094 bf->bf_node = NULL; 5095 5096 ATH_TXBUF_LOCK(sc); 5097 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 5098 ATH_TXBUF_UNLOCK(sc); 5099 } 5100 /* 5101 * Flush fast-frame staging queue when traffic slows. 5102 */ 5103 if (txq->axq_depth <= 1) 5104 ath_ff_stageq_flush(sc, txq, ath_ff_always); 5105 return nacked; 5106} 5107 5108static __inline int 5109txqactive(struct ath_hal *ah, int qnum) 5110{ 5111 u_int32_t txqs = 1<<qnum; 5112 ath_hal_gettxintrtxqs(ah, &txqs); 5113 return (txqs & (1<<qnum)); 5114} 5115 5116/* 5117 * Deferred processing of transmit interrupt; special-cased 5118 * for a single hardware transmit queue (e.g. 5210 and 5211). 5119 */ 5120static void 5121ath_tx_proc_q0(void *arg, int npending) 5122{ 5123 struct ath_softc *sc = arg; 5124 struct ifnet *ifp = sc->sc_ifp; 5125 5126 if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0])) 5127 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5128 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) 5129 ath_tx_processq(sc, sc->sc_cabq); 5130 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5131 ifp->if_timer = 0; 5132 5133 if (sc->sc_softled) 5134 ath_led_event(sc, ATH_LED_TX); 5135 5136 ath_start(ifp); 5137} 5138 5139/* 5140 * Deferred processing of transmit interrupt; special-cased 5141 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 5142 */ 5143static void 5144ath_tx_proc_q0123(void *arg, int npending) 5145{ 5146 struct ath_softc *sc = arg; 5147 struct ifnet *ifp = sc->sc_ifp; 5148 int nacked; 5149 5150 /* 5151 * Process each active queue. 5152 */ 5153 nacked = 0; 5154 if (txqactive(sc->sc_ah, 0)) 5155 nacked += ath_tx_processq(sc, &sc->sc_txq[0]); 5156 if (txqactive(sc->sc_ah, 1)) 5157 nacked += ath_tx_processq(sc, &sc->sc_txq[1]); 5158 if (txqactive(sc->sc_ah, 2)) 5159 nacked += ath_tx_processq(sc, &sc->sc_txq[2]); 5160 if (txqactive(sc->sc_ah, 3)) 5161 nacked += ath_tx_processq(sc, &sc->sc_txq[3]); 5162 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) 5163 ath_tx_processq(sc, sc->sc_cabq); 5164 if (nacked) 5165 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5166 5167 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5168 ifp->if_timer = 0; 5169 5170 if (sc->sc_softled) 5171 ath_led_event(sc, ATH_LED_TX); 5172 5173 ath_start(ifp); 5174} 5175 5176/* 5177 * Deferred processing of transmit interrupt. 5178 */ 5179static void 5180ath_tx_proc(void *arg, int npending) 5181{ 5182 struct ath_softc *sc = arg; 5183 struct ifnet *ifp = sc->sc_ifp; 5184 int i, nacked; 5185 5186 /* 5187 * Process each active queue. 5188 */ 5189 nacked = 0; 5190 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5191 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i)) 5192 nacked += ath_tx_processq(sc, &sc->sc_txq[i]); 5193 if (nacked) 5194 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5195 5196 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5197 ifp->if_timer = 0; 5198 5199 if (sc->sc_softled) 5200 ath_led_event(sc, ATH_LED_TX); 5201 5202 ath_start(ifp); 5203} 5204 5205static void 5206ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 5207{ 5208#ifdef ATH_DEBUG 5209 struct ath_hal *ah = sc->sc_ah; 5210#endif 5211 struct ieee80211_node *ni; 5212 struct ath_buf *bf; 5213 u_int ix; 5214 5215 /* 5216 * NB: this assumes output has been stopped and 5217 * we do not need to block ath_tx_tasklet 5218 */ 5219 for (ix = 0;; ix++) { 5220 ATH_TXQ_LOCK(txq); 5221 bf = STAILQ_FIRST(&txq->axq_q); 5222 if (bf == NULL) { 5223 txq->axq_link = NULL; 5224 ATH_TXQ_UNLOCK(txq); 5225 break; 5226 } 5227 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 5228 ATH_TXQ_UNLOCK(txq); 5229#ifdef ATH_DEBUG 5230 if (sc->sc_debug & ATH_DEBUG_RESET) { 5231 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5232 5233 ath_printtxbuf(bf, txq->axq_qnum, ix, 5234 ath_hal_txprocdesc(ah, bf->bf_desc, 5235 &bf->bf_status.ds_txstat) == HAL_OK); 5236 ieee80211_dump_pkt(ic, mtod(bf->bf_m, caddr_t), 5237 bf->bf_m->m_len, 0, -1); 5238 } 5239#endif /* ATH_DEBUG */ 5240 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 5241 ni = bf->bf_node; 5242 bf->bf_node = NULL; 5243 if (ni != NULL) { 5244 /* 5245 * Do any callback and reclaim the node reference. 5246 */ 5247 if (bf->bf_m->m_flags & M_TXCB) 5248 ieee80211_process_callback(ni, bf->bf_m, -1); 5249 ieee80211_free_node(ni); 5250 } 5251 m_freem(bf->bf_m); 5252 bf->bf_m = NULL; 5253 5254 ATH_TXBUF_LOCK(sc); 5255 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 5256 ATH_TXBUF_UNLOCK(sc); 5257 } 5258} 5259 5260static void 5261ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5262{ 5263 struct ath_hal *ah = sc->sc_ah; 5264 5265 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5266 __func__, txq->axq_qnum, 5267 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 5268 txq->axq_link); 5269 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5270} 5271 5272/* 5273 * Drain the transmit queues and reclaim resources. 5274 */ 5275static void 5276ath_draintxq(struct ath_softc *sc) 5277{ 5278 struct ath_hal *ah = sc->sc_ah; 5279 struct ifnet *ifp = sc->sc_ifp; 5280 int i; 5281 5282 /* XXX return value */ 5283 if (!sc->sc_invalid) { 5284 /* don't touch the hardware if marked invalid */ 5285 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5286 __func__, sc->sc_bhalq, 5287 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 5288 NULL); 5289 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5290 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5291 if (ATH_TXQ_SETUP(sc, i)) 5292 ath_tx_stopdma(sc, &sc->sc_txq[i]); 5293 } 5294 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5295 if (ATH_TXQ_SETUP(sc, i)) 5296 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5297#ifdef ATH_DEBUG 5298 if (sc->sc_debug & ATH_DEBUG_RESET) { 5299 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); 5300 if (bf != NULL && bf->bf_m != NULL) { 5301 ath_printtxbuf(bf, sc->sc_bhalq, 0, 5302 ath_hal_txprocdesc(ah, bf->bf_desc, 5303 &bf->bf_status.ds_txstat) == HAL_OK); 5304 ieee80211_dump_pkt(ifp->if_l2com, mtod(bf->bf_m, caddr_t), 5305 bf->bf_m->m_len, 0, -1); 5306 } 5307 } 5308#endif /* ATH_DEBUG */ 5309 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5310 ifp->if_timer = 0; 5311} 5312 5313/* 5314 * Disable the receive h/w in preparation for a reset. 5315 */ 5316static void 5317ath_stoprecv(struct ath_softc *sc) 5318{ 5319#define PA2DESC(_sc, _pa) \ 5320 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 5321 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 5322 struct ath_hal *ah = sc->sc_ah; 5323 5324 ath_hal_stoppcurecv(ah); /* disable PCU */ 5325 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 5326 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 5327 DELAY(3000); /* 3ms is long enough for 1 frame */ 5328#ifdef ATH_DEBUG 5329 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 5330 struct ath_buf *bf; 5331 u_int ix; 5332 5333 printf("%s: rx queue %p, link %p\n", __func__, 5334 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 5335 ix = 0; 5336 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 5337 struct ath_desc *ds = bf->bf_desc; 5338 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5339 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 5340 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 5341 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 5342 ath_printrxbuf(bf, ix, status == HAL_OK); 5343 ix++; 5344 } 5345 } 5346#endif 5347 if (sc->sc_rxpending != NULL) { 5348 m_freem(sc->sc_rxpending); 5349 sc->sc_rxpending = NULL; 5350 } 5351 sc->sc_rxlink = NULL; /* just in case */ 5352#undef PA2DESC 5353} 5354 5355/* 5356 * Enable the receive h/w following a reset. 5357 */ 5358static int 5359ath_startrecv(struct ath_softc *sc) 5360{ 5361 struct ath_hal *ah = sc->sc_ah; 5362 struct ath_buf *bf; 5363 5364 sc->sc_rxlink = NULL; 5365 sc->sc_rxpending = NULL; 5366 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 5367 int error = ath_rxbuf_init(sc, bf); 5368 if (error != 0) { 5369 DPRINTF(sc, ATH_DEBUG_RECV, 5370 "%s: ath_rxbuf_init failed %d\n", 5371 __func__, error); 5372 return error; 5373 } 5374 } 5375 5376 bf = STAILQ_FIRST(&sc->sc_rxbuf); 5377 ath_hal_putrxbuf(ah, bf->bf_daddr); 5378 ath_hal_rxena(ah); /* enable recv descriptors */ 5379 ath_mode_init(sc); /* set filters, etc. */ 5380 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 5381 return 0; 5382} 5383 5384/* 5385 * Update internal state after a channel change. 5386 */ 5387static void 5388ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5389{ 5390 enum ieee80211_phymode mode; 5391 5392 /* 5393 * Change channels and update the h/w rate map 5394 * if we're switching; e.g. 11a to 11b/g. 5395 */ 5396 if (IEEE80211_IS_CHAN_HALF(chan)) 5397 mode = IEEE80211_MODE_HALF; 5398 else if (IEEE80211_IS_CHAN_QUARTER(chan)) 5399 mode = IEEE80211_MODE_QUARTER; 5400 else 5401 mode = ieee80211_chan2mode(chan); 5402 if (mode != sc->sc_curmode) 5403 ath_setcurmode(sc, mode); 5404 5405 sc->sc_rx_th.wr_chan_flags = htole32(chan->ic_flags); 5406 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags; 5407 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq); 5408 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq; 5409 sc->sc_rx_th.wr_chan_ieee = chan->ic_ieee; 5410 sc->sc_tx_th.wt_chan_ieee = sc->sc_rx_th.wr_chan_ieee; 5411 sc->sc_rx_th.wr_chan_maxpow = chan->ic_maxregpower; 5412 sc->sc_tx_th.wt_chan_maxpow = sc->sc_rx_th.wr_chan_maxpow; 5413} 5414 5415/* 5416 * Set/change channels. If the channel is really being changed, 5417 * it's done by reseting the chip. To accomplish this we must 5418 * first cleanup any pending DMA, then restart stuff after a la 5419 * ath_init. 5420 */ 5421static int 5422ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5423{ 5424 struct ifnet *ifp = sc->sc_ifp; 5425 struct ieee80211com *ic = ifp->if_l2com; 5426 struct ath_hal *ah = sc->sc_ah; 5427 HAL_CHANNEL hchan; 5428 5429 /* 5430 * Convert to a HAL channel description with 5431 * the flags constrained to reflect the current 5432 * operating mode. 5433 */ 5434 ath_mapchan(&hchan, chan); 5435 5436 DPRINTF(sc, ATH_DEBUG_RESET, 5437 "%s: %u (%u MHz, hal flags 0x%x) -> %u (%u MHz, hal flags 0x%x)\n", 5438 __func__, 5439 ath_hal_mhz2ieee(ah, sc->sc_curchan.channel, 5440 sc->sc_curchan.channelFlags), 5441 sc->sc_curchan.channel, sc->sc_curchan.channelFlags, 5442 ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags), 5443 hchan.channel, hchan.channelFlags); 5444 if (hchan.channel != sc->sc_curchan.channel || 5445 hchan.channelFlags != sc->sc_curchan.channelFlags) { 5446 HAL_STATUS status; 5447 5448 /* 5449 * To switch channels clear any pending DMA operations; 5450 * wait long enough for the RX fifo to drain, reset the 5451 * hardware at the new frequency, and then re-enable 5452 * the relevant bits of the h/w. 5453 */ 5454 ath_hal_intrset(ah, 0); /* disable interrupts */ 5455 ath_draintxq(sc); /* clear pending tx frames */ 5456 ath_stoprecv(sc); /* turn off frame recv */ 5457 if (!ath_hal_reset(ah, sc->sc_opmode, &hchan, AH_TRUE, &status)) { 5458 if_printf(ifp, "%s: unable to reset " 5459 "channel %u (%u Mhz, flags 0x%x hal flags 0x%x), " 5460 "hal status %u\n", __func__, 5461 ieee80211_chan2ieee(ic, chan), chan->ic_freq, 5462 chan->ic_flags, hchan.channelFlags, status); 5463 return EIO; 5464 } 5465 sc->sc_curchan = hchan; 5466 sc->sc_diversity = ath_hal_getdiversity(ah); 5467 sc->sc_calinterval = 1; 5468 sc->sc_caltries = 0; 5469 5470 /* 5471 * Re-enable rx framework. 5472 */ 5473 if (ath_startrecv(sc) != 0) { 5474 if_printf(ifp, "%s: unable to restart recv logic\n", 5475 __func__); 5476 return EIO; 5477 } 5478 5479 /* 5480 * Change channels and update the h/w rate map 5481 * if we're switching; e.g. 11a to 11b/g. 5482 */ 5483 ath_chan_change(sc, chan); 5484 5485 /* 5486 * Re-enable interrupts. 5487 */ 5488 ath_hal_intrset(ah, sc->sc_imask); 5489 } 5490 return 0; 5491} 5492 5493/* 5494 * Periodically recalibrate the PHY to account 5495 * for temperature/environment changes. 5496 */ 5497static void 5498ath_calibrate(void *arg) 5499{ 5500 struct ath_softc *sc = arg; 5501 struct ath_hal *ah = sc->sc_ah; 5502 HAL_BOOL iqCalDone; 5503 5504 sc->sc_stats.ast_per_cal++; 5505 5506 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5507 /* 5508 * Rfgain is out of bounds, reset the chip 5509 * to load new gain values. 5510 */ 5511 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5512 "%s: rfgain change\n", __func__); 5513 sc->sc_stats.ast_per_rfgain++; 5514 ath_reset(sc->sc_ifp); 5515 } 5516 if (!ath_hal_calibrate(ah, &sc->sc_curchan, &iqCalDone)) { 5517 DPRINTF(sc, ATH_DEBUG_ANY, 5518 "%s: calibration of channel %u failed\n", 5519 __func__, sc->sc_curchan.channel); 5520 sc->sc_stats.ast_per_calfail++; 5521 } 5522 /* 5523 * Calibrate noise floor data again in case of change. 5524 */ 5525 ath_hal_process_noisefloor(ah); 5526 /* 5527 * Poll more frequently when the IQ calibration is in 5528 * progress to speedup loading the final settings. 5529 * We temper this aggressive polling with an exponential 5530 * back off after 4 tries up to ath_calinterval. 5531 */ 5532 if (iqCalDone || sc->sc_calinterval >= ath_calinterval) { 5533 sc->sc_caltries = 0; 5534 sc->sc_calinterval = ath_calinterval; 5535 } else if (sc->sc_caltries > 4) { 5536 sc->sc_caltries = 0; 5537 sc->sc_calinterval <<= 1; 5538 if (sc->sc_calinterval > ath_calinterval) 5539 sc->sc_calinterval = ath_calinterval; 5540 } 5541 KASSERT(0 < sc->sc_calinterval && sc->sc_calinterval <= ath_calinterval, 5542 ("bad calibration interval %u", sc->sc_calinterval)); 5543 5544 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5545 "%s: next +%u (%siqCalDone tries %u)\n", __func__, 5546 sc->sc_calinterval, iqCalDone ? "" : "!", sc->sc_caltries); 5547 sc->sc_caltries++; 5548 callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, 5549 ath_calibrate, sc); 5550} 5551 5552static void 5553ath_scan_start(struct ieee80211com *ic) 5554{ 5555 struct ifnet *ifp = ic->ic_ifp; 5556 struct ath_softc *sc = ifp->if_softc; 5557 struct ath_hal *ah = sc->sc_ah; 5558 u_int32_t rfilt; 5559 5560 /* XXX calibration timer? */ 5561 5562 sc->sc_scanning = 1; 5563 sc->sc_syncbeacon = 0; 5564 rfilt = ath_calcrxfilter(sc); 5565 ath_hal_setrxfilter(ah, rfilt); 5566 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 5567 5568 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5569 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 5570} 5571 5572static void 5573ath_scan_end(struct ieee80211com *ic) 5574{ 5575 struct ifnet *ifp = ic->ic_ifp; 5576 struct ath_softc *sc = ifp->if_softc; 5577 struct ath_hal *ah = sc->sc_ah; 5578 u_int32_t rfilt; 5579 5580 sc->sc_scanning = 0; 5581 rfilt = ath_calcrxfilter(sc); 5582 ath_hal_setrxfilter(ah, rfilt); 5583 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5584 5585 ath_hal_process_noisefloor(ah); 5586 5587 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5588 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 5589 sc->sc_curaid); 5590} 5591 5592static void 5593ath_set_channel(struct ieee80211com *ic) 5594{ 5595 struct ifnet *ifp = ic->ic_ifp; 5596 struct ath_softc *sc = ifp->if_softc; 5597 5598 (void) ath_chan_set(sc, ic->ic_curchan); 5599 /* 5600 * If we are returning to our bss channel then mark state 5601 * so the next recv'd beacon's tsf will be used to sync the 5602 * beacon timers. Note that since we only hear beacons in 5603 * sta/ibss mode this has no effect in other operating modes. 5604 */ 5605 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5606 sc->sc_syncbeacon = 1; 5607} 5608 5609/* 5610 * Walk the vap list and check if there any vap's in RUN state. 5611 */ 5612static int 5613ath_isanyrunningvaps(struct ieee80211vap *this) 5614{ 5615 struct ieee80211com *ic = this->iv_ic; 5616 struct ieee80211vap *vap; 5617 5618 IEEE80211_LOCK_ASSERT(ic); 5619 5620 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5621 if (vap != this && vap->iv_state == IEEE80211_S_RUN) 5622 return 1; 5623 } 5624 return 0; 5625} 5626 5627static int 5628ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5629{ 5630 struct ieee80211com *ic = vap->iv_ic; 5631 struct ath_softc *sc = ic->ic_ifp->if_softc; 5632 struct ath_vap *avp = ATH_VAP(vap); 5633 struct ath_hal *ah = sc->sc_ah; 5634 struct ieee80211_node *ni = NULL; 5635 int i, error, stamode; 5636 u_int32_t rfilt; 5637 static const HAL_LED_STATE leds[] = { 5638 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5639 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5640 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5641 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5642 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5643 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5644 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5645 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5646 }; 5647 5648 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5649 ieee80211_state_name[vap->iv_state], 5650 ieee80211_state_name[nstate]); 5651 5652 callout_stop(&sc->sc_cal_ch); 5653 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5654 5655 if (nstate == IEEE80211_S_SCAN) { 5656 /* 5657 * Scanning: turn off beacon miss and don't beacon. 5658 * Mark beacon state so when we reach RUN state we'll 5659 * [re]setup beacons. Unblock the task q thread so 5660 * deferred interrupt processing is done. 5661 */ 5662 ath_hal_intrset(ah, 5663 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5664 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5665 sc->sc_beacons = 0; 5666 taskqueue_unblock(sc->sc_tq); 5667 } 5668 5669 ni = vap->iv_bss; 5670 rfilt = ath_calcrxfilter(sc); 5671 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5672 vap->iv_opmode == IEEE80211_M_IBSS); 5673 if (stamode && nstate == IEEE80211_S_RUN) { 5674 sc->sc_curaid = ni->ni_associd; 5675 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5676 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5677 } 5678 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5679 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5680 ath_hal_setrxfilter(ah, rfilt); 5681 5682 /* XXX is this to restore keycache on resume? */ 5683 if (vap->iv_opmode != IEEE80211_M_STA && 5684 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5685 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5686 if (ath_hal_keyisvalid(ah, i)) 5687 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5688 } 5689 5690 /* 5691 * Invoke the parent method to do net80211 work. 5692 */ 5693 error = avp->av_newstate(vap, nstate, arg); 5694 if (error != 0) 5695 goto bad; 5696 5697 if (nstate == IEEE80211_S_RUN) { 5698 /* NB: collect bss node again, it may have changed */ 5699 ni = vap->iv_bss; 5700 5701 DPRINTF(sc, ATH_DEBUG_STATE, 5702 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5703 "capinfo 0x%04x chan %d\n", __func__, 5704 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5705 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5706 5707 switch (vap->iv_opmode) { 5708 case IEEE80211_M_HOSTAP: 5709 case IEEE80211_M_IBSS: 5710 /* 5711 * Allocate and setup the beacon frame. 5712 * 5713 * Stop any previous beacon DMA. This may be 5714 * necessary, for example, when an ibss merge 5715 * causes reconfiguration; there will be a state 5716 * transition from RUN->RUN that means we may 5717 * be called with beacon transmission active. 5718 */ 5719 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5720 5721 error = ath_beacon_alloc(sc, ni); 5722 if (error != 0) 5723 goto bad; 5724 /* 5725 * If joining an adhoc network defer beacon timer 5726 * configuration to the next beacon frame so we 5727 * have a current TSF to use. Otherwise we're 5728 * starting an ibss/bss so there's no need to delay; 5729 * if this is the first vap moving to RUN state, then 5730 * beacon state needs to be [re]configured. 5731 */ 5732 if (vap->iv_opmode == IEEE80211_M_IBSS && 5733 ni->ni_tstamp.tsf != 0) { 5734 sc->sc_syncbeacon = 1; 5735 } else if (!sc->sc_beacons) { 5736 ath_beacon_config(sc, vap); 5737 sc->sc_beacons = 1; 5738 } 5739 break; 5740 case IEEE80211_M_STA: 5741 /* 5742 * Defer beacon timer configuration to the next 5743 * beacon frame so we have a current TSF to use 5744 * (any TSF collected when scanning is likely old). 5745 */ 5746 sc->sc_syncbeacon = 1; 5747 break; 5748 case IEEE80211_M_MONITOR: 5749 /* 5750 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5751 * transitions so we must re-enable interrupts here to 5752 * handle the case of a single monitor mode vap. 5753 */ 5754 ath_hal_intrset(ah, sc->sc_imask); 5755 break; 5756 case IEEE80211_M_WDS: 5757 break; 5758 default: 5759 break; 5760 } 5761 /* 5762 * Let the hal process statistics collected during a 5763 * scan so it can provide calibrated noise floor data. 5764 */ 5765 ath_hal_process_noisefloor(ah); 5766 /* 5767 * Reset rssi stats; maybe not the best place... 5768 */ 5769 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5770 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5771 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 5772 /* 5773 * Finally, start any timers and the task q thread 5774 * (in case we didn't go through SCAN state). 5775 */ 5776 if (sc->sc_calinterval != 0) { 5777 /* start periodic recalibration timer */ 5778 callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, 5779 ath_calibrate, sc); 5780 } 5781 taskqueue_unblock(sc->sc_tq); 5782 } else if (nstate == IEEE80211_S_INIT) { 5783 /* 5784 * If there are no vaps left in RUN state then 5785 * shutdown host/driver operation: 5786 * o disable interrupts 5787 * o disable the task queue thread 5788 * o mark beacon processing as stopped 5789 */ 5790 if (!ath_isanyrunningvaps(vap)) { 5791 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5792 /* disable interrupts */ 5793 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5794 taskqueue_block(sc->sc_tq); 5795 sc->sc_beacons = 0; 5796 } 5797 } 5798bad: 5799 return error; 5800} 5801 5802/* 5803 * Allocate a key cache slot to the station so we can 5804 * setup a mapping from key index to node. The key cache 5805 * slot is needed for managing antenna state and for 5806 * compression when stations do not use crypto. We do 5807 * it uniliaterally here; if crypto is employed this slot 5808 * will be reassigned. 5809 */ 5810static void 5811ath_setup_stationkey(struct ieee80211_node *ni) 5812{ 5813 struct ieee80211vap *vap = ni->ni_vap; 5814 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5815 ieee80211_keyix keyix, rxkeyix; 5816 5817 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5818 /* 5819 * Key cache is full; we'll fall back to doing 5820 * the more expensive lookup in software. Note 5821 * this also means no h/w compression. 5822 */ 5823 /* XXX msg+statistic */ 5824 } else { 5825 /* XXX locking? */ 5826 ni->ni_ucastkey.wk_keyix = keyix; 5827 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 5828 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5829 /* NB: this will create a pass-thru key entry */ 5830 ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss); 5831 } 5832} 5833 5834/* 5835 * Setup driver-specific state for a newly associated node. 5836 * Note that we're called also on a re-associate, the isnew 5837 * param tells us if this is the first time or not. 5838 */ 5839static void 5840ath_newassoc(struct ieee80211_node *ni, int isnew) 5841{ 5842 struct ath_node *an = ATH_NODE(ni); 5843 struct ieee80211vap *vap = ni->ni_vap; 5844 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5845 const struct ieee80211_txparam *tp = ni->ni_txparms; 5846 5847 an->an_mcastrix = ath_tx_findrix(sc->sc_currates, tp->mcastrate); 5848 an->an_mgmtrix = ath_tx_findrix(sc->sc_currates, tp->mgmtrate); 5849 5850 ath_rate_newassoc(sc, an, isnew); 5851 if (isnew && 5852 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 5853 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 5854 ath_setup_stationkey(ni); 5855} 5856 5857static int 5858getchannels(struct ath_softc *sc, int *nchans, struct ieee80211_channel chans[], 5859 int cc, int ecm, int outdoor) 5860{ 5861 struct ath_hal *ah = sc->sc_ah; 5862 HAL_CHANNEL *halchans; 5863 int i, nhalchans, error; 5864 5865 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: cc %u outdoor %u ecm %u\n", 5866 __func__, cc, outdoor, ecm); 5867 5868 halchans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 5869 M_TEMP, M_NOWAIT | M_ZERO); 5870 if (halchans == NULL) { 5871 device_printf(sc->sc_dev, 5872 "%s: unable to allocate channel table\n", __func__); 5873 return ENOMEM; 5874 } 5875 error = 0; 5876 if (!ath_hal_init_channels(ah, halchans, IEEE80211_CHAN_MAX, &nhalchans, 5877 NULL, 0, NULL, cc, HAL_MODE_ALL, outdoor, ecm)) { 5878 u_int32_t rd; 5879 (void) ath_hal_getregdomain(ah, &rd); 5880 device_printf(sc->sc_dev, "ath_hal_init_channels failed, " 5881 "rd %d cc %u outdoor %u ecm %u\n", rd, cc, outdoor, ecm); 5882 error = EINVAL; 5883 goto done; 5884 } 5885 if (nchans == NULL) /* no table requested */ 5886 goto done; 5887 5888 /* 5889 * Convert HAL channels to ieee80211 ones. 5890 */ 5891 for (i = 0; i < nhalchans; i++) { 5892 HAL_CHANNEL *c = &halchans[i]; 5893 struct ieee80211_channel *ichan = &chans[i]; 5894 5895 ichan->ic_ieee = ath_hal_mhz2ieee(ah, c->channel, 5896 c->channelFlags); 5897 if (bootverbose) 5898 device_printf(sc->sc_dev, "hal channel %u/%x -> %u " 5899 "maxpow %d minpow %d maxreg %d\n", 5900 c->channel, c->channelFlags, ichan->ic_ieee, 5901 c->maxTxPower, c->minTxPower, c->maxRegTxPower); 5902 ichan->ic_freq = c->channel; 5903 5904 if ((c->channelFlags & CHANNEL_PUREG) == CHANNEL_PUREG) { 5905 /* 5906 * Except for AR5211, HAL's PUREG means mixed 5907 * DSSS and OFDM. 5908 */ 5909 ichan->ic_flags = c->channelFlags &~ CHANNEL_PUREG; 5910 ichan->ic_flags |= IEEE80211_CHAN_G; 5911 } else { 5912 ichan->ic_flags = c->channelFlags; 5913 } 5914 5915 if (ath_hal_isgsmsku(ah)) { 5916 /* remap to true frequencies */ 5917 ichan->ic_freq = 922 + (2422 - ichan->ic_freq); 5918 ichan->ic_flags |= IEEE80211_CHAN_GSM; 5919 ichan->ic_ieee = ieee80211_mhz2ieee(ichan->ic_freq, 5920 ichan->ic_flags); 5921 } 5922 ichan->ic_maxregpower = c->maxRegTxPower; /* dBm */ 5923 /* XXX: old hal's don't provide maxTxPower for some parts */ 5924 ichan->ic_maxpower = (c->maxTxPower != 0) ? 5925 c->maxTxPower : 2*c->maxRegTxPower; /* 1/2 dBm */ 5926 ichan->ic_minpower = c->minTxPower; /* 1/2 dBm */ 5927 } 5928 *nchans = nhalchans; 5929done: 5930 free(halchans, M_TEMP); 5931 return error; 5932} 5933 5934/* XXX hard to include ieee80211_regdomain.h right now */ 5935#define SKU_DEBUG 0x1ff 5936 5937static void 5938ath_maprd(const struct ieee80211_regdomain *rd, 5939 u_int32_t *ath_rd, u_int32_t *ath_cc) 5940{ 5941 /* map SKU's to Atheros sku's */ 5942 switch (rd->regdomain) { 5943 case SKU_DEBUG: 5944 if (rd->country == 0) { 5945 *ath_rd = 0; 5946 *ath_cc = CTRY_DEBUG; 5947 return; 5948 } 5949 break; 5950 } 5951 *ath_rd = rd->regdomain; 5952 *ath_cc = rd->country; 5953} 5954 5955static int 5956ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 5957 int nchans, struct ieee80211_channel chans[]) 5958{ 5959 struct ath_softc *sc = ic->ic_ifp->if_softc; 5960 struct ath_hal *ah = sc->sc_ah; 5961 u_int32_t ord, regdomain, cc; 5962 int error; 5963 5964 (void) ath_hal_getregdomain(ah, &ord); 5965 ath_maprd(rd, ®domain, &cc); 5966 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 5967 "%s: rd %u cc %u location %c ecm %u (mapped rd %u cc %u)\n", 5968 __func__, rd->regdomain, rd->country, rd->location, rd->ecm, 5969 regdomain, cc); 5970 ath_hal_setregdomain(ah, regdomain); 5971 5972 error = getchannels(sc, &nchans, chans, cc, 5973 rd->ecm ? AH_TRUE : AH_FALSE, 5974 rd->location != 'I' ? AH_TRUE : AH_FALSE); 5975 if (error != 0) { 5976 /* 5977 * Restore previous state. 5978 */ 5979 ath_hal_setregdomain(ah, ord); 5980 (void) getchannels(sc, NULL, NULL, ic->ic_regdomain.country, 5981 ic->ic_regdomain.ecm ? AH_TRUE : AH_FALSE, 5982 ic->ic_regdomain.location != 'I' ? AH_TRUE : AH_FALSE); 5983 return error; 5984 } 5985 return 0; 5986} 5987 5988static void 5989ath_getradiocaps(struct ieee80211com *ic, 5990 int *nchans, struct ieee80211_channel chans[]) 5991{ 5992 struct ath_softc *sc = ic->ic_ifp->if_softc; 5993 struct ath_hal *ah = sc->sc_ah; 5994 u_int32_t ord; 5995 5996 (void) ath_hal_getregdomain(ah, &ord); 5997 5998 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d, ord %u\n", 5999 __func__, 0, CTRY_DEBUG, ord); 6000 6001 ath_hal_setregdomain(ah, 0); 6002 /* XXX not quite right but close enough for now */ 6003 getchannels(sc, nchans, chans, CTRY_DEBUG, AH_TRUE, AH_FALSE); 6004 6005 /* NB: restore previous state */ 6006 ath_hal_setregdomain(ah, ord); 6007 (void) getchannels(sc, NULL, NULL, ic->ic_regdomain.country, 6008 ic->ic_regdomain.ecm ? AH_TRUE : AH_FALSE, 6009 ic->ic_regdomain.location != 'I' ? AH_TRUE : AH_FALSE); 6010} 6011 6012static void 6013ath_mapsku(u_int32_t ath_rd, u_int32_t ath_cc, struct ieee80211_regdomain *rd) 6014{ 6015 rd->isocc[0] = ' '; /* XXX don't know */ 6016 rd->isocc[1] = ' '; 6017 6018 /* map Atheros sku's to SKU's */ 6019 switch (ath_rd) { 6020 case 0: 6021 if (ath_cc == CTRY_DEBUG) { 6022 rd->regdomain = SKU_DEBUG; 6023 rd->country = 0; 6024 return; 6025 } 6026 break; 6027 } 6028 /* XXX net80211 types too small */ 6029 rd->regdomain = (uint16_t) ath_rd; 6030 rd->country = (uint16_t) ath_cc; 6031} 6032 6033static int 6034ath_getchannels(struct ath_softc *sc) 6035{ 6036 struct ifnet *ifp = sc->sc_ifp; 6037 struct ieee80211com *ic = ifp->if_l2com; 6038 struct ath_hal *ah = sc->sc_ah; 6039 int error; 6040 6041 /* 6042 * Convert HAL channels to ieee80211 ones. 6043 */ 6044 error = getchannels(sc, &ic->ic_nchans, ic->ic_channels, 6045 CTRY_DEFAULT, AH_TRUE, AH_FALSE); 6046 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6047 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 6048 if (error) { 6049 if_printf(ifp, "%s: unable to collect channel list from hal, " 6050 "error %d\n", __func__, error); 6051 if (error == EINVAL) { 6052 if_printf(ifp, "%s: regdomain likely %u country code %u\n", 6053 __func__, sc->sc_eerd, sc->sc_eecc); 6054 } 6055 return error; 6056 } 6057 ic->ic_regdomain.ecm = 1; 6058 ic->ic_regdomain.location = 'I'; 6059 ath_mapsku(sc->sc_eerd, sc->sc_eecc, &ic->ic_regdomain); 6060 6061 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6062 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c ecm %u\n", 6063 __func__, sc->sc_eerd, sc->sc_eecc, 6064 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 6065 ic->ic_regdomain.location, ic->ic_regdomain.ecm); 6066 return 0; 6067} 6068 6069static void 6070ath_led_done(void *arg) 6071{ 6072 struct ath_softc *sc = arg; 6073 6074 sc->sc_blinking = 0; 6075} 6076 6077/* 6078 * Turn the LED off: flip the pin and then set a timer so no 6079 * update will happen for the specified duration. 6080 */ 6081static void 6082ath_led_off(void *arg) 6083{ 6084 struct ath_softc *sc = arg; 6085 6086 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); 6087 callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); 6088} 6089 6090/* 6091 * Blink the LED according to the specified on/off times. 6092 */ 6093static void 6094ath_led_blink(struct ath_softc *sc, int on, int off) 6095{ 6096 DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); 6097 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); 6098 sc->sc_blinking = 1; 6099 sc->sc_ledoff = off; 6100 callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); 6101} 6102 6103static void 6104ath_led_event(struct ath_softc *sc, int event) 6105{ 6106 6107 sc->sc_ledevent = ticks; /* time of last event */ 6108 if (sc->sc_blinking) /* don't interrupt active blink */ 6109 return; 6110 switch (event) { 6111 case ATH_LED_POLL: 6112 ath_led_blink(sc, sc->sc_hwmap[0].ledon, 6113 sc->sc_hwmap[0].ledoff); 6114 break; 6115 case ATH_LED_TX: 6116 ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon, 6117 sc->sc_hwmap[sc->sc_txrate].ledoff); 6118 break; 6119 case ATH_LED_RX: 6120 ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon, 6121 sc->sc_hwmap[sc->sc_rxrate].ledoff); 6122 break; 6123 } 6124} 6125 6126static int 6127ath_rate_setup(struct ath_softc *sc, u_int mode) 6128{ 6129 struct ath_hal *ah = sc->sc_ah; 6130 const HAL_RATE_TABLE *rt; 6131 6132 switch (mode) { 6133 case IEEE80211_MODE_11A: 6134 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 6135 break; 6136 case IEEE80211_MODE_HALF: 6137 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6138 break; 6139 case IEEE80211_MODE_QUARTER: 6140 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6141 break; 6142 case IEEE80211_MODE_11B: 6143 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 6144 break; 6145 case IEEE80211_MODE_11G: 6146 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 6147 break; 6148 case IEEE80211_MODE_TURBO_A: 6149 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 6150#if HAL_ABI_VERSION < 0x07013100 6151 if (rt == NULL) /* XXX bandaid for old hal's */ 6152 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6153#endif 6154 break; 6155 case IEEE80211_MODE_TURBO_G: 6156 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6157 break; 6158 case IEEE80211_MODE_STURBO_A: 6159 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6160 break; 6161 case IEEE80211_MODE_11NA: 6162 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6163 break; 6164 case IEEE80211_MODE_11NG: 6165 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6166 break; 6167 default: 6168 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6169 __func__, mode); 6170 return 0; 6171 } 6172 sc->sc_rates[mode] = rt; 6173 return (rt != NULL); 6174} 6175 6176static void 6177ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6178{ 6179#define N(a) (sizeof(a)/sizeof(a[0])) 6180 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6181 static const struct { 6182 u_int rate; /* tx/rx 802.11 rate */ 6183 u_int16_t timeOn; /* LED on time (ms) */ 6184 u_int16_t timeOff; /* LED off time (ms) */ 6185 } blinkrates[] = { 6186 { 108, 40, 10 }, 6187 { 96, 44, 11 }, 6188 { 72, 50, 13 }, 6189 { 48, 57, 14 }, 6190 { 36, 67, 16 }, 6191 { 24, 80, 20 }, 6192 { 22, 100, 25 }, 6193 { 18, 133, 34 }, 6194 { 12, 160, 40 }, 6195 { 10, 200, 50 }, 6196 { 6, 240, 58 }, 6197 { 4, 267, 66 }, 6198 { 2, 400, 100 }, 6199 { 0, 500, 130 }, 6200 /* XXX half/quarter rates */ 6201 }; 6202 const HAL_RATE_TABLE *rt; 6203 int i, j; 6204 6205 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6206 rt = sc->sc_rates[mode]; 6207 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6208 for (i = 0; i < rt->rateCount; i++) 6209 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 6210 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6211 for (i = 0; i < 32; i++) { 6212 u_int8_t ix = rt->rateCodeToIndex[i]; 6213 if (ix == 0xff) { 6214 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6215 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6216 continue; 6217 } 6218 sc->sc_hwmap[i].ieeerate = 6219 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; 6220 if (rt->info[ix].phy == IEEE80211_T_HT) 6221 sc->sc_hwmap[i].ieeerate |= 0x80; /* MCS */ 6222 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6223 if (rt->info[ix].shortPreamble || 6224 rt->info[ix].phy == IEEE80211_T_OFDM) 6225 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6226 /* NB: receive frames include FCS */ 6227 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags | 6228 IEEE80211_RADIOTAP_F_FCS; 6229 /* setup blink rate table to avoid per-packet lookup */ 6230 for (j = 0; j < N(blinkrates)-1; j++) 6231 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6232 break; 6233 /* NB: this uses the last entry if the rate isn't found */ 6234 /* XXX beware of overlow */ 6235 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6236 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6237 } 6238 sc->sc_currates = rt; 6239 sc->sc_curmode = mode; 6240 /* 6241 * All protection frames are transmited at 2Mb/s for 6242 * 11g, otherwise at 1Mb/s. 6243 */ 6244 if (mode == IEEE80211_MODE_11G) 6245 sc->sc_protrix = ath_tx_findrix(rt, 2*2); 6246 else 6247 sc->sc_protrix = ath_tx_findrix(rt, 2*1); 6248 /* NB: caller is responsible for reseting rate control state */ 6249#undef N 6250} 6251 6252#ifdef ATH_DEBUG 6253static void 6254ath_printrxbuf(const struct ath_buf *bf, u_int ix, int done) 6255{ 6256 const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 6257 const struct ath_desc *ds; 6258 int i; 6259 6260 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 6261 printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n" 6262 " %08x %08x %08x %08x\n", 6263 ix, ds, (const struct ath_desc *)bf->bf_daddr + i, 6264 ds->ds_link, ds->ds_data, 6265 !done ? "" : (rs->rs_status == 0) ? " *" : " !", 6266 ds->ds_ctl0, ds->ds_ctl1, 6267 ds->ds_hw[0], ds->ds_hw[1]); 6268 } 6269} 6270 6271static void 6272ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done) 6273{ 6274 const struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 6275 const struct ath_desc *ds; 6276 int i; 6277 6278 printf("Q%u[%3u]", qnum, ix); 6279 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 6280 printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n" 6281 " %08x %08x %08x %08x %08x %08x\n", 6282 ds, (const struct ath_desc *)bf->bf_daddr + i, 6283 ds->ds_link, ds->ds_data, bf->bf_txflags, 6284 !done ? "" : (ts->ts_status == 0) ? " *" : " !", 6285 ds->ds_ctl0, ds->ds_ctl1, 6286 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]); 6287 } 6288} 6289#endif /* ATH_DEBUG */ 6290 6291static void 6292ath_watchdog(struct ifnet *ifp) 6293{ 6294 struct ath_softc *sc = ifp->if_softc; 6295 6296 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) { 6297 if_printf(ifp, "device timeout\n"); 6298 ath_reset(ifp); 6299 ifp->if_oerrors++; 6300 sc->sc_stats.ast_watchdog++; 6301 } 6302} 6303 6304#ifdef ATH_DIAGAPI 6305/* 6306 * Diagnostic interface to the HAL. This is used by various 6307 * tools to do things like retrieve register contents for 6308 * debugging. The mechanism is intentionally opaque so that 6309 * it can change frequently w/o concern for compatiblity. 6310 */ 6311static int 6312ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6313{ 6314 struct ath_hal *ah = sc->sc_ah; 6315 u_int id = ad->ad_id & ATH_DIAG_ID; 6316 void *indata = NULL; 6317 void *outdata = NULL; 6318 u_int32_t insize = ad->ad_in_size; 6319 u_int32_t outsize = ad->ad_out_size; 6320 int error = 0; 6321 6322 if (ad->ad_id & ATH_DIAG_IN) { 6323 /* 6324 * Copy in data. 6325 */ 6326 indata = malloc(insize, M_TEMP, M_NOWAIT); 6327 if (indata == NULL) { 6328 error = ENOMEM; 6329 goto bad; 6330 } 6331 error = copyin(ad->ad_in_data, indata, insize); 6332 if (error) 6333 goto bad; 6334 } 6335 if (ad->ad_id & ATH_DIAG_DYN) { 6336 /* 6337 * Allocate a buffer for the results (otherwise the HAL 6338 * returns a pointer to a buffer where we can read the 6339 * results). Note that we depend on the HAL leaving this 6340 * pointer for us to use below in reclaiming the buffer; 6341 * may want to be more defensive. 6342 */ 6343 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 6344 if (outdata == NULL) { 6345 error = ENOMEM; 6346 goto bad; 6347 } 6348 } 6349 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6350 if (outsize < ad->ad_out_size) 6351 ad->ad_out_size = outsize; 6352 if (outdata != NULL) 6353 error = copyout(outdata, ad->ad_out_data, 6354 ad->ad_out_size); 6355 } else { 6356 error = EINVAL; 6357 } 6358bad: 6359 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6360 free(indata, M_TEMP); 6361 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6362 free(outdata, M_TEMP); 6363 return error; 6364} 6365#endif /* ATH_DIAGAPI */ 6366 6367static int 6368ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 6369{ 6370#define IS_RUNNING(ifp) \ 6371 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 6372 struct ath_softc *sc = ifp->if_softc; 6373 struct ieee80211com *ic = ifp->if_l2com; 6374 struct ifreq *ifr = (struct ifreq *)data; 6375 int error = 0; 6376 6377 switch (cmd) { 6378 case SIOCSIFFLAGS: 6379 ATH_LOCK(sc); 6380 if (IS_RUNNING(ifp)) { 6381 /* 6382 * To avoid rescanning another access point, 6383 * do not call ath_init() here. Instead, 6384 * only reflect promisc mode settings. 6385 */ 6386 ath_mode_init(sc); 6387 } else if (ifp->if_flags & IFF_UP) { 6388 /* 6389 * Beware of being called during attach/detach 6390 * to reset promiscuous mode. In that case we 6391 * will still be marked UP but not RUNNING. 6392 * However trying to re-init the interface 6393 * is the wrong thing to do as we've already 6394 * torn down much of our state. There's 6395 * probably a better way to deal with this. 6396 */ 6397 if (!sc->sc_invalid) 6398 ath_init(sc); /* XXX lose error */ 6399 } else { 6400 ath_stop_locked(ifp); 6401#ifdef notyet 6402 /* XXX must wakeup in places like ath_vap_delete */ 6403 if (!sc->sc_invalid) 6404 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 6405#endif 6406 } 6407 ATH_UNLOCK(sc); 6408 break; 6409 case SIOCGIFMEDIA: 6410 case SIOCSIFMEDIA: 6411 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6412 break; 6413 case SIOCGATHSTATS: 6414 /* NB: embed these numbers to get a consistent view */ 6415 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6416 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 6417#if 0 6418 ieee80211_getsignal(ic, &sc->sc_stats.ast_rx_rssi, 6419 &sc->sc_stats.ast_rx_noise); 6420#endif 6421 sc->sc_stats.ast_tx_rate = sc->sc_hwmap[sc->sc_txrate].ieeerate; 6422 return copyout(&sc->sc_stats, 6423 ifr->ifr_data, sizeof (sc->sc_stats)); 6424#ifdef ATH_DIAGAPI 6425 case SIOCGATHDIAG: 6426 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6427 break; 6428#endif 6429 case SIOCGIFADDR: 6430 error = ether_ioctl(ifp, cmd, data); 6431 break; 6432 default: 6433 error = EINVAL; 6434 break; 6435 } 6436 return error; 6437#undef IS_RUNNING 6438} 6439 6440static int 6441ath_sysctl_slottime(SYSCTL_HANDLER_ARGS) 6442{ 6443 struct ath_softc *sc = arg1; 6444 u_int slottime = ath_hal_getslottime(sc->sc_ah); 6445 int error; 6446 6447 error = sysctl_handle_int(oidp, &slottime, 0, req); 6448 if (error || !req->newptr) 6449 return error; 6450 return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0; 6451} 6452 6453static int 6454ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS) 6455{ 6456 struct ath_softc *sc = arg1; 6457 u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah); 6458 int error; 6459 6460 error = sysctl_handle_int(oidp, &acktimeout, 0, req); 6461 if (error || !req->newptr) 6462 return error; 6463 return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0; 6464} 6465 6466static int 6467ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS) 6468{ 6469 struct ath_softc *sc = arg1; 6470 u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah); 6471 int error; 6472 6473 error = sysctl_handle_int(oidp, &ctstimeout, 0, req); 6474 if (error || !req->newptr) 6475 return error; 6476 return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0; 6477} 6478 6479static int 6480ath_sysctl_softled(SYSCTL_HANDLER_ARGS) 6481{ 6482 struct ath_softc *sc = arg1; 6483 int softled = sc->sc_softled; 6484 int error; 6485 6486 error = sysctl_handle_int(oidp, &softled, 0, req); 6487 if (error || !req->newptr) 6488 return error; 6489 softled = (softled != 0); 6490 if (softled != sc->sc_softled) { 6491 if (softled) { 6492 /* NB: handle any sc_ledpin change */ 6493 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); 6494 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, 6495 !sc->sc_ledon); 6496 } 6497 sc->sc_softled = softled; 6498 } 6499 return 0; 6500} 6501 6502static int 6503ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS) 6504{ 6505 struct ath_softc *sc = arg1; 6506 int ledpin = sc->sc_ledpin; 6507 int error; 6508 6509 error = sysctl_handle_int(oidp, &ledpin, 0, req); 6510 if (error || !req->newptr) 6511 return error; 6512 if (ledpin != sc->sc_ledpin) { 6513 sc->sc_ledpin = ledpin; 6514 if (sc->sc_softled) { 6515 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); 6516 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, 6517 !sc->sc_ledon); 6518 } 6519 } 6520 return 0; 6521} 6522 6523static int 6524ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS) 6525{ 6526 struct ath_softc *sc = arg1; 6527 u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah); 6528 int error; 6529 6530 error = sysctl_handle_int(oidp, &txantenna, 0, req); 6531 if (!error && req->newptr) { 6532 /* XXX assumes 2 antenna ports */ 6533 if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B) 6534 return EINVAL; 6535 ath_hal_setantennaswitch(sc->sc_ah, txantenna); 6536 /* 6537 * NB: with the switch locked this isn't meaningful, 6538 * but set it anyway so things like radiotap get 6539 * consistent info in their data. 6540 */ 6541 sc->sc_txantenna = txantenna; 6542 } 6543 return error; 6544} 6545 6546static int 6547ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS) 6548{ 6549 struct ath_softc *sc = arg1; 6550 u_int defantenna = ath_hal_getdefantenna(sc->sc_ah); 6551 int error; 6552 6553 error = sysctl_handle_int(oidp, &defantenna, 0, req); 6554 if (!error && req->newptr) 6555 ath_hal_setdefantenna(sc->sc_ah, defantenna); 6556 return error; 6557} 6558 6559static int 6560ath_sysctl_diversity(SYSCTL_HANDLER_ARGS) 6561{ 6562 struct ath_softc *sc = arg1; 6563 u_int diversity = ath_hal_getdiversity(sc->sc_ah); 6564 int error; 6565 6566 error = sysctl_handle_int(oidp, &diversity, 0, req); 6567 if (error || !req->newptr) 6568 return error; 6569 if (!ath_hal_setdiversity(sc->sc_ah, diversity)) 6570 return EINVAL; 6571 sc->sc_diversity = diversity; 6572 return 0; 6573} 6574 6575static int 6576ath_sysctl_diag(SYSCTL_HANDLER_ARGS) 6577{ 6578 struct ath_softc *sc = arg1; 6579 u_int32_t diag; 6580 int error; 6581 6582 if (!ath_hal_getdiag(sc->sc_ah, &diag)) 6583 return EINVAL; 6584 error = sysctl_handle_int(oidp, &diag, 0, req); 6585 if (error || !req->newptr) 6586 return error; 6587 return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0; 6588} 6589 6590static int 6591ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS) 6592{ 6593 struct ath_softc *sc = arg1; 6594 struct ifnet *ifp = sc->sc_ifp; 6595 u_int32_t scale; 6596 int error; 6597 6598 (void) ath_hal_gettpscale(sc->sc_ah, &scale); 6599 error = sysctl_handle_int(oidp, &scale, 0, req); 6600 if (error || !req->newptr) 6601 return error; 6602 return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL : 6603 (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0; 6604} 6605 6606static int 6607ath_sysctl_tpc(SYSCTL_HANDLER_ARGS) 6608{ 6609 struct ath_softc *sc = arg1; 6610 u_int tpc = ath_hal_gettpc(sc->sc_ah); 6611 int error; 6612 6613 error = sysctl_handle_int(oidp, &tpc, 0, req); 6614 if (error || !req->newptr) 6615 return error; 6616 return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0; 6617} 6618 6619static int 6620ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS) 6621{ 6622 struct ath_softc *sc = arg1; 6623 struct ifnet *ifp = sc->sc_ifp; 6624 struct ath_hal *ah = sc->sc_ah; 6625 u_int rfkill = ath_hal_getrfkill(ah); 6626 int error; 6627 6628 error = sysctl_handle_int(oidp, &rfkill, 0, req); 6629 if (error || !req->newptr) 6630 return error; 6631 if (rfkill == ath_hal_getrfkill(ah)) /* unchanged */ 6632 return 0; 6633 if (!ath_hal_setrfkill(ah, rfkill)) 6634 return EINVAL; 6635 return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0; 6636} 6637 6638static int 6639ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS) 6640{ 6641 struct ath_softc *sc = arg1; 6642 u_int rfsilent; 6643 int error; 6644 6645 (void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent); 6646 error = sysctl_handle_int(oidp, &rfsilent, 0, req); 6647 if (error || !req->newptr) 6648 return error; 6649 if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent)) 6650 return EINVAL; 6651 sc->sc_rfsilentpin = rfsilent & 0x1c; 6652 sc->sc_rfsilentpol = (rfsilent & 0x2) != 0; 6653 return 0; 6654} 6655 6656static int 6657ath_sysctl_tpack(SYSCTL_HANDLER_ARGS) 6658{ 6659 struct ath_softc *sc = arg1; 6660 u_int32_t tpack; 6661 int error; 6662 6663 (void) ath_hal_gettpack(sc->sc_ah, &tpack); 6664 error = sysctl_handle_int(oidp, &tpack, 0, req); 6665 if (error || !req->newptr) 6666 return error; 6667 return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0; 6668} 6669 6670static int 6671ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS) 6672{ 6673 struct ath_softc *sc = arg1; 6674 u_int32_t tpcts; 6675 int error; 6676 6677 (void) ath_hal_gettpcts(sc->sc_ah, &tpcts); 6678 error = sysctl_handle_int(oidp, &tpcts, 0, req); 6679 if (error || !req->newptr) 6680 return error; 6681 return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0; 6682} 6683 6684static int 6685ath_sysctl_intmit(SYSCTL_HANDLER_ARGS) 6686{ 6687 struct ath_softc *sc = arg1; 6688 int intmit, error; 6689 6690 intmit = ath_hal_getintmit(sc->sc_ah); 6691 error = sysctl_handle_int(oidp, &intmit, 0, req); 6692 if (error || !req->newptr) 6693 return error; 6694 return !ath_hal_setintmit(sc->sc_ah, intmit) ? EINVAL : 0; 6695} 6696 6697static void 6698ath_sysctlattach(struct ath_softc *sc) 6699{ 6700 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 6701 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 6702 struct ath_hal *ah = sc->sc_ah; 6703 6704 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6705 "countrycode", CTLFLAG_RD, &sc->sc_eecc, 0, 6706 "EEPROM country code"); 6707 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6708 "regdomain", CTLFLAG_RD, &sc->sc_eerd, 0, 6709 "EEPROM regdomain code"); 6710#ifdef ATH_DEBUG 6711 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6712 "debug", CTLFLAG_RW, &sc->sc_debug, 0, 6713 "control debugging printfs"); 6714#endif 6715 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6716 "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6717 ath_sysctl_slottime, "I", "802.11 slot time (us)"); 6718 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6719 "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6720 ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)"); 6721 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6722 "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6723 ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)"); 6724 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6725 "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6726 ath_sysctl_softled, "I", "enable/disable software LED support"); 6727 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6728 "ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6729 ath_sysctl_ledpin, "I", "GPIO pin connected to LED"); 6730 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6731 "ledon", CTLFLAG_RW, &sc->sc_ledon, 0, 6732 "setting to turn LED on"); 6733 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6734 "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, 6735 "idle time for inactivity LED (ticks)"); 6736 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6737 "txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6738 ath_sysctl_txantenna, "I", "antenna switch"); 6739 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6740 "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6741 ath_sysctl_rxantenna, "I", "default/rx antenna"); 6742 if (ath_hal_hasdiversity(ah)) 6743 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6744 "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6745 ath_sysctl_diversity, "I", "antenna diversity"); 6746 sc->sc_txintrperiod = ATH_TXINTR_PERIOD; 6747 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6748 "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0, 6749 "tx descriptor batching"); 6750 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6751 "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6752 ath_sysctl_diag, "I", "h/w diagnostic control"); 6753 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6754 "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6755 ath_sysctl_tpscale, "I", "tx power scaling"); 6756 if (ath_hal_hastpc(ah)) { 6757 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6758 "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6759 ath_sysctl_tpc, "I", "enable/disable per-packet TPC"); 6760 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6761 "tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6762 ath_sysctl_tpack, "I", "tx power for ack frames"); 6763 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6764 "tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6765 ath_sysctl_tpcts, "I", "tx power for cts frames"); 6766 } 6767 if (ath_hal_hasfastframes(sc->sc_ah)) { 6768 sc->sc_fftxqmin = ATH_FF_TXQMIN; 6769 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6770 "fftxqmin", CTLFLAG_RW, &sc->sc_fftxqmin, 0, 6771 "min frames before fast-frame staging"); 6772 sc->sc_fftxqmax = ATH_FF_TXQMAX; 6773 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6774 "fftxqmax", CTLFLAG_RW, &sc->sc_fftxqmax, 0, 6775 "max queued frames before tail drop"); 6776 } 6777 if (ath_hal_hasrfsilent(ah)) { 6778 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6779 "rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6780 ath_sysctl_rfsilent, "I", "h/w RF silent config"); 6781 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6782 "rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6783 ath_sysctl_rfkill, "I", "enable/disable RF kill switch"); 6784 } 6785 if (ath_hal_hasintmit(ah)) { 6786 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6787 "intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 6788 ath_sysctl_intmit, "I", "interference mitigation"); 6789 } 6790 sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC; 6791 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6792 "monpass", CTLFLAG_RW, &sc->sc_monpass, 0, 6793 "mask of error frames to pass when monitoring"); 6794} 6795 6796static void 6797ath_bpfattach(struct ath_softc *sc) 6798{ 6799 struct ifnet *ifp = sc->sc_ifp; 6800 6801 bpfattach(ifp, DLT_IEEE802_11_RADIO, 6802 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th)); 6803 /* 6804 * Initialize constant fields. 6805 * XXX make header lengths a multiple of 32-bits so subsequent 6806 * headers are properly aligned; this is a kludge to keep 6807 * certain applications happy. 6808 * 6809 * NB: the channel is setup each time we transition to the 6810 * RUN state to avoid filling it in for each frame. 6811 */ 6812 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); 6813 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); 6814 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 6815 6816 sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); 6817 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); 6818 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 6819} 6820 6821static int 6822ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 6823 struct ath_buf *bf, struct mbuf *m0, 6824 const struct ieee80211_bpf_params *params) 6825{ 6826 struct ifnet *ifp = sc->sc_ifp; 6827 struct ieee80211com *ic = ifp->if_l2com; 6828 struct ath_hal *ah = sc->sc_ah; 6829 int error, ismcast, ismrr; 6830 int hdrlen, pktlen, try0, txantenna; 6831 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3; 6832 struct ieee80211_frame *wh; 6833 u_int flags, ctsduration; 6834 HAL_PKT_TYPE atype; 6835 const HAL_RATE_TABLE *rt; 6836 struct ath_desc *ds; 6837 u_int pri; 6838 6839 wh = mtod(m0, struct ieee80211_frame *); 6840 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 6841 hdrlen = ieee80211_anyhdrsize(wh); 6842 /* 6843 * Packet length must not include any 6844 * pad bytes; deduct them here. 6845 */ 6846 /* XXX honor IEEE80211_BPF_DATAPAD */ 6847 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 6848 6849 error = ath_tx_dmasetup(sc, bf, m0); 6850 if (error != 0) 6851 return error; 6852 m0 = bf->bf_m; /* NB: may have changed */ 6853 wh = mtod(m0, struct ieee80211_frame *); 6854 bf->bf_node = ni; /* NB: held reference */ 6855 6856 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 6857 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 6858 if (params->ibp_flags & IEEE80211_BPF_RTS) 6859 flags |= HAL_TXDESC_RTSENA; 6860 else if (params->ibp_flags & IEEE80211_BPF_CTS) 6861 flags |= HAL_TXDESC_CTSENA; 6862 /* XXX leave ismcast to injector? */ 6863 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 6864 flags |= HAL_TXDESC_NOACK; 6865 6866 rt = sc->sc_currates; 6867 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 6868 rix = ath_tx_findrix(rt, params->ibp_rate0); 6869 txrate = rt->info[rix].rateCode; 6870 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 6871 txrate |= rt->info[rix].shortPreamble; 6872 sc->sc_txrate = txrate; 6873 try0 = params->ibp_try0; 6874 ismrr = (params->ibp_try1 != 0); 6875 txantenna = params->ibp_pri >> 2; 6876 if (txantenna == 0) /* XXX? */ 6877 txantenna = sc->sc_txantenna; 6878 ctsduration = 0; 6879 if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) { 6880 cix = ath_tx_findrix(rt, params->ibp_ctsrate); 6881 ctsrate = rt->info[cix].rateCode; 6882 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) { 6883 ctsrate |= rt->info[cix].shortPreamble; 6884 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 6885 ctsduration += rt->info[cix].spAckDuration; 6886 ctsduration += ath_hal_computetxtime(ah, 6887 rt, pktlen, rix, AH_TRUE); 6888 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 6889 ctsduration += rt->info[rix].spAckDuration; 6890 } else { 6891 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 6892 ctsduration += rt->info[cix].lpAckDuration; 6893 ctsduration += ath_hal_computetxtime(ah, 6894 rt, pktlen, rix, AH_FALSE); 6895 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 6896 ctsduration += rt->info[rix].lpAckDuration; 6897 } 6898 ismrr = 0; /* XXX */ 6899 } else 6900 ctsrate = 0; 6901 pri = params->ibp_pri & 3; 6902 /* 6903 * NB: we mark all packets as type PSPOLL so the h/w won't 6904 * set the sequence number, duration, etc. 6905 */ 6906 atype = HAL_PKT_TYPE_PSPOLL; 6907 6908 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 6909 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 6910 sc->sc_hwmap[txrate].ieeerate, -1); 6911 6912 if (bpf_peers_present(ifp->if_bpf)) { 6913 u_int64_t tsf = ath_hal_gettsf64(ah); 6914 6915 sc->sc_tx_th.wt_tsf = htole64(tsf); 6916 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; 6917 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 6918 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 6919 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; 6920 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 6921 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 6922 6923 bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0); 6924 } 6925 6926 /* 6927 * Formulate first tx descriptor with tx controls. 6928 */ 6929 ds = bf->bf_desc; 6930 /* XXX check return value? */ 6931 ath_hal_setuptxdesc(ah, ds 6932 , pktlen /* packet length */ 6933 , hdrlen /* header length */ 6934 , atype /* Atheros packet type */ 6935 , params->ibp_power /* txpower */ 6936 , txrate, try0 /* series 0 rate/tries */ 6937 , HAL_TXKEYIX_INVALID /* key cache index */ 6938 , txantenna /* antenna mode */ 6939 , flags /* flags */ 6940 , ctsrate /* rts/cts rate */ 6941 , ctsduration /* rts/cts duration */ 6942 ); 6943 bf->bf_txflags = flags; 6944 6945 if (ismrr) { 6946 rix = ath_tx_findrix(rt, params->ibp_rate1); 6947 rate1 = rt->info[rix].rateCode; 6948 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 6949 rate1 |= rt->info[rix].shortPreamble; 6950 if (params->ibp_try2) { 6951 rix = ath_tx_findrix(rt, params->ibp_rate2); 6952 rate2 = rt->info[rix].rateCode; 6953 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 6954 rate2 |= rt->info[rix].shortPreamble; 6955 } else 6956 rate2 = 0; 6957 if (params->ibp_try3) { 6958 rix = ath_tx_findrix(rt, params->ibp_rate3); 6959 rate3 = rt->info[rix].rateCode; 6960 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 6961 rate3 |= rt->info[rix].shortPreamble; 6962 } else 6963 rate3 = 0; 6964 ath_hal_setupxtxdesc(ah, ds 6965 , rate1, params->ibp_try1 /* series 1 */ 6966 , rate2, params->ibp_try2 /* series 2 */ 6967 , rate3, params->ibp_try3 /* series 3 */ 6968 ); 6969 } 6970 6971 /* NB: no buffered multicast in power save support */ 6972 ath_tx_handoff(sc, sc->sc_ac2q[pri], bf); 6973 return 0; 6974} 6975 6976static int 6977ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 6978 const struct ieee80211_bpf_params *params) 6979{ 6980 struct ieee80211com *ic = ni->ni_ic; 6981 struct ifnet *ifp = ic->ic_ifp; 6982 struct ath_softc *sc = ifp->if_softc; 6983 struct ath_buf *bf; 6984 6985 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 6986 ieee80211_free_node(ni); 6987 m_freem(m); 6988 return ENETDOWN; 6989 } 6990 /* 6991 * Grab a TX buffer and associated resources. 6992 */ 6993 ATH_TXBUF_LOCK(sc); 6994 bf = STAILQ_FIRST(&sc->sc_txbuf); 6995 if (bf != NULL) 6996 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 6997 ATH_TXBUF_UNLOCK(sc); 6998 if (bf == NULL) { 6999 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n", 7000 __func__); 7001 sc->sc_stats.ast_tx_qstop++; 7002 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 7003 ieee80211_free_node(ni); 7004 m_freem(m); 7005 return ENOBUFS; 7006 } 7007 7008 ifp->if_opackets++; 7009 sc->sc_stats.ast_tx_raw++; 7010 7011 if (params == NULL) { 7012 /* 7013 * Legacy path; interpret frame contents to decide 7014 * precisely how to send the frame. 7015 */ 7016 if (ath_tx_start(sc, ni, bf, m)) 7017 goto bad; 7018 } else { 7019 /* 7020 * Caller supplied explicit parameters to use in 7021 * sending the frame. 7022 */ 7023 if (ath_tx_raw_start(sc, ni, bf, m, params)) 7024 goto bad; 7025 } 7026 ifp->if_timer = 5; 7027 7028 return 0; 7029bad: 7030 ifp->if_oerrors++; 7031 ATH_TXBUF_LOCK(sc); 7032 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 7033 ATH_TXBUF_UNLOCK(sc); 7034 ieee80211_free_node(ni); 7035 return EIO; /* XXX */ 7036} 7037 7038/* 7039 * Announce various information on device/driver attach. 7040 */ 7041static void 7042ath_announce(struct ath_softc *sc) 7043{ 7044#define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) 7045 struct ifnet *ifp = sc->sc_ifp; 7046 struct ath_hal *ah = sc->sc_ah; 7047 u_int modes, cc; 7048 7049 if_printf(ifp, "mac %d.%d phy %d.%d", 7050 ah->ah_macVersion, ah->ah_macRev, 7051 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 7052 /* 7053 * Print radio revision(s). We check the wireless modes 7054 * to avoid falsely printing revs for inoperable parts. 7055 * Dual-band radio revs are returned in the 5Ghz rev number. 7056 */ 7057 ath_hal_getcountrycode(ah, &cc); 7058 modes = ath_hal_getwirelessmodes(ah, cc); 7059 if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { 7060 if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) 7061 printf(" 5ghz radio %d.%d 2ghz radio %d.%d", 7062 ah->ah_analog5GhzRev >> 4, 7063 ah->ah_analog5GhzRev & 0xf, 7064 ah->ah_analog2GhzRev >> 4, 7065 ah->ah_analog2GhzRev & 0xf); 7066 else 7067 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 7068 ah->ah_analog5GhzRev & 0xf); 7069 } else 7070 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 7071 ah->ah_analog5GhzRev & 0xf); 7072 printf("\n"); 7073 if (bootverbose) { 7074 int i; 7075 for (i = 0; i <= WME_AC_VO; i++) { 7076 struct ath_txq *txq = sc->sc_ac2q[i]; 7077 if_printf(ifp, "Use hw queue %u for %s traffic\n", 7078 txq->axq_qnum, ieee80211_wme_acnames[i]); 7079 } 7080 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 7081 sc->sc_cabq->axq_qnum); 7082 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 7083 } 7084 if (ath_rxbuf != ATH_RXBUF) 7085 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 7086 if (ath_txbuf != ATH_TXBUF) 7087 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 7088#undef HAL_MODE_DUALBAND 7089} 7090