if_ath.c revision 234085
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 234085 2012-04-10 06:25:11Z adrian $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49#include "opt_ah.h" 50#include "opt_wlan.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysctl.h> 55#include <sys/mbuf.h> 56#include <sys/malloc.h> 57#include <sys/lock.h> 58#include <sys/mutex.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/errno.h> 63#include <sys/callout.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kthread.h> 67#include <sys/taskqueue.h> 68#include <sys/priv.h> 69#include <sys/module.h> 70#include <sys/ktr.h> 71#include <sys/smp.h> /* for mp_ncpus */ 72 73#include <machine/bus.h> 74 75#include <net/if.h> 76#include <net/if_dl.h> 77#include <net/if_media.h> 78#include <net/if_types.h> 79#include <net/if_arp.h> 80#include <net/ethernet.h> 81#include <net/if_llc.h> 82 83#include <net80211/ieee80211_var.h> 84#include <net80211/ieee80211_regdomain.h> 85#ifdef IEEE80211_SUPPORT_SUPERG 86#include <net80211/ieee80211_superg.h> 87#endif 88#ifdef IEEE80211_SUPPORT_TDMA 89#include <net80211/ieee80211_tdma.h> 90#endif 91 92#include <net/bpf.h> 93 94#ifdef INET 95#include <netinet/in.h> 96#include <netinet/if_ether.h> 97#endif 98 99#include <dev/ath/if_athvar.h> 100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101#include <dev/ath/ath_hal/ah_diagcodes.h> 102 103#include <dev/ath/if_ath_debug.h> 104#include <dev/ath/if_ath_misc.h> 105#include <dev/ath/if_ath_tx.h> 106#include <dev/ath/if_ath_sysctl.h> 107#include <dev/ath/if_ath_led.h> 108#include <dev/ath/if_ath_keycache.h> 109#include <dev/ath/if_athdfs.h> 110 111#ifdef ATH_TX99_DIAG 112#include <dev/ath/ath_tx99/ath_tx99.h> 113#endif 114 115#define ATH_KTR_INTR KTR_SPARE4 116#define ATH_KTR_ERR KTR_SPARE3 117 118/* 119 * ATH_BCBUF determines the number of vap's that can transmit 120 * beacons and also (currently) the number of vap's that can 121 * have unique mac addresses/bssid. When staggering beacons 122 * 4 is probably a good max as otherwise the beacons become 123 * very closely spaced and there is limited time for cab q traffic 124 * to go out. You can burst beacons instead but that is not good 125 * for stations in power save and at some point you really want 126 * another radio (and channel). 127 * 128 * The limit on the number of mac addresses is tied to our use of 129 * the U/L bit and tracking addresses in a byte; it would be 130 * worthwhile to allow more for applications like proxy sta. 131 */ 132CTASSERT(ATH_BCBUF <= 8); 133 134#if __FreeBSD_version > 1000003 135static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 136 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 137 const uint8_t [IEEE80211_ADDR_LEN], 138 const uint8_t [IEEE80211_ADDR_LEN]); 139#else 140static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 141 const char [IFNAMSIZ], int, int, int, 142 const uint8_t [IEEE80211_ADDR_LEN], 143 const uint8_t [IEEE80211_ADDR_LEN]); 144#endif 145static void ath_vap_delete(struct ieee80211vap *); 146static void ath_init(void *); 147static void ath_stop_locked(struct ifnet *); 148static void ath_stop(struct ifnet *); 149static void ath_start(struct ifnet *); 150static int ath_reset_vap(struct ieee80211vap *, u_long); 151static int ath_media_change(struct ifnet *); 152static void ath_watchdog(void *); 153static int ath_ioctl(struct ifnet *, u_long, caddr_t); 154static void ath_fatal_proc(void *, int); 155static void ath_bmiss_vap(struct ieee80211vap *); 156static void ath_bmiss_proc(void *, int); 157static void ath_key_update_begin(struct ieee80211vap *); 158static void ath_key_update_end(struct ieee80211vap *); 159static void ath_update_mcast(struct ifnet *); 160static void ath_update_promisc(struct ifnet *); 161static void ath_mode_init(struct ath_softc *); 162static void ath_setslottime(struct ath_softc *); 163static void ath_updateslot(struct ifnet *); 164static int ath_beaconq_setup(struct ath_hal *); 165static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 166static void ath_beacon_update(struct ieee80211vap *, int item); 167static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 168static void ath_beacon_proc(void *, int); 169static struct ath_buf *ath_beacon_generate(struct ath_softc *, 170 struct ieee80211vap *); 171static void ath_bstuck_proc(void *, int); 172static void ath_reset_proc(void *, int); 173static void ath_beacon_return(struct ath_softc *, struct ath_buf *); 174static void ath_beacon_free(struct ath_softc *); 175static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); 176static void ath_descdma_cleanup(struct ath_softc *sc, 177 struct ath_descdma *, ath_bufhead *); 178static int ath_desc_alloc(struct ath_softc *); 179static void ath_desc_free(struct ath_softc *); 180static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 181 const uint8_t [IEEE80211_ADDR_LEN]); 182static void ath_node_cleanup(struct ieee80211_node *); 183static void ath_node_free(struct ieee80211_node *); 184static void ath_node_getsignal(const struct ieee80211_node *, 185 int8_t *, int8_t *); 186static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 187static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 188 int subtype, int rssi, int nf); 189static void ath_setdefantenna(struct ath_softc *, u_int); 190static void ath_rx_proc(struct ath_softc *sc, int); 191static void ath_rx_tasklet(void *, int); 192static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 193static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 194static int ath_tx_setup(struct ath_softc *, int, int); 195static int ath_wme_update(struct ieee80211com *); 196static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 197static void ath_tx_cleanup(struct ath_softc *); 198static void ath_tx_proc_q0(void *, int); 199static void ath_tx_proc_q0123(void *, int); 200static void ath_tx_proc(void *, int); 201static void ath_txq_sched_tasklet(void *, int); 202static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 203static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 204static void ath_stoprecv(struct ath_softc *, int); 205static int ath_startrecv(struct ath_softc *); 206static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 207static void ath_scan_start(struct ieee80211com *); 208static void ath_scan_end(struct ieee80211com *); 209static void ath_set_channel(struct ieee80211com *); 210#ifdef ATH_ENABLE_11N 211static void ath_update_chw(struct ieee80211com *); 212#endif /* ATH_ENABLE_11N */ 213static void ath_calibrate(void *); 214static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 215static void ath_setup_stationkey(struct ieee80211_node *); 216static void ath_newassoc(struct ieee80211_node *, int); 217static int ath_setregdomain(struct ieee80211com *, 218 struct ieee80211_regdomain *, int, 219 struct ieee80211_channel []); 220static void ath_getradiocaps(struct ieee80211com *, int, int *, 221 struct ieee80211_channel []); 222static int ath_getchannels(struct ath_softc *); 223 224static int ath_rate_setup(struct ath_softc *, u_int mode); 225static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 226 227static void ath_announce(struct ath_softc *); 228 229static void ath_dfs_tasklet(void *, int); 230 231#ifdef IEEE80211_SUPPORT_TDMA 232static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, 233 u_int32_t bintval); 234static void ath_tdma_bintvalsetup(struct ath_softc *sc, 235 const struct ieee80211_tdma_state *tdma); 236static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap); 237static void ath_tdma_update(struct ieee80211_node *ni, 238 const struct ieee80211_tdma_param *tdma, int); 239static void ath_tdma_beacon_send(struct ath_softc *sc, 240 struct ieee80211vap *vap); 241 242#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 243#define TDMA_LPF_LEN 6 244#define TDMA_DUMMY_MARKER 0x127 245#define TDMA_EP_MUL(x, mul) ((x) * (mul)) 246#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 247#define TDMA_LPF(x, y, len) \ 248 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 249#define TDMA_SAMPLE(x, y) do { \ 250 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 251} while (0) 252#define TDMA_EP_RND(x,mul) \ 253 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 254#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 255#endif /* IEEE80211_SUPPORT_TDMA */ 256 257SYSCTL_DECL(_hw_ath); 258 259/* XXX validate sysctl values */ 260static int ath_longcalinterval = 30; /* long cals every 30 secs */ 261SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 262 0, "long chip calibration interval (secs)"); 263static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 264SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 265 0, "short chip calibration interval (msecs)"); 266static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 267SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 268 0, "reset chip calibration results (secs)"); 269static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 270SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 271 0, "ANI calibration (msecs)"); 272 273static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 274SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 275 0, "rx buffers allocated"); 276TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 277static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 278SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 279 0, "tx buffers allocated"); 280TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 281 282static int ath_bstuck_threshold = 4; /* max missed beacons */ 283SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 284 0, "max missed beacon xmits before chip reset"); 285 286MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 287 288#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 289#define HAL_MODE_HT40 \ 290 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 291 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 292int 293ath_attach(u_int16_t devid, struct ath_softc *sc) 294{ 295 struct ifnet *ifp; 296 struct ieee80211com *ic; 297 struct ath_hal *ah = NULL; 298 HAL_STATUS status; 299 int error = 0, i; 300 u_int wmodes; 301 uint8_t macaddr[IEEE80211_ADDR_LEN]; 302 int rx_chainmask, tx_chainmask; 303 304 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 305 306 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 307 if (ifp == NULL) { 308 device_printf(sc->sc_dev, "can not if_alloc()\n"); 309 error = ENOSPC; 310 goto bad; 311 } 312 ic = ifp->if_l2com; 313 314 /* set these up early for if_printf use */ 315 if_initname(ifp, device_get_name(sc->sc_dev), 316 device_get_unit(sc->sc_dev)); 317 318 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 319 sc->sc_eepromdata, &status); 320 if (ah == NULL) { 321 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 322 status); 323 error = ENXIO; 324 goto bad; 325 } 326 sc->sc_ah = ah; 327 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 328#ifdef ATH_DEBUG 329 sc->sc_debug = ath_debug; 330#endif 331 332 /* 333 * Check if the MAC has multi-rate retry support. 334 * We do this by trying to setup a fake extended 335 * descriptor. MAC's that don't have support will 336 * return false w/o doing anything. MAC's that do 337 * support it will return true w/o doing anything. 338 */ 339 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 340 341 /* 342 * Check if the device has hardware counters for PHY 343 * errors. If so we need to enable the MIB interrupt 344 * so we can act on stat triggers. 345 */ 346 if (ath_hal_hwphycounters(ah)) 347 sc->sc_needmib = 1; 348 349 /* 350 * Get the hardware key cache size. 351 */ 352 sc->sc_keymax = ath_hal_keycachesize(ah); 353 if (sc->sc_keymax > ATH_KEYMAX) { 354 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 355 ATH_KEYMAX, sc->sc_keymax); 356 sc->sc_keymax = ATH_KEYMAX; 357 } 358 /* 359 * Reset the key cache since some parts do not 360 * reset the contents on initial power up. 361 */ 362 for (i = 0; i < sc->sc_keymax; i++) 363 ath_hal_keyreset(ah, i); 364 365 /* 366 * Collect the default channel list. 367 */ 368 error = ath_getchannels(sc); 369 if (error != 0) 370 goto bad; 371 372 /* 373 * Setup rate tables for all potential media types. 374 */ 375 ath_rate_setup(sc, IEEE80211_MODE_11A); 376 ath_rate_setup(sc, IEEE80211_MODE_11B); 377 ath_rate_setup(sc, IEEE80211_MODE_11G); 378 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 379 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 380 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 381 ath_rate_setup(sc, IEEE80211_MODE_11NA); 382 ath_rate_setup(sc, IEEE80211_MODE_11NG); 383 ath_rate_setup(sc, IEEE80211_MODE_HALF); 384 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 385 386 /* NB: setup here so ath_rate_update is happy */ 387 ath_setcurmode(sc, IEEE80211_MODE_11A); 388 389 /* 390 * Allocate tx+rx descriptors and populate the lists. 391 */ 392 error = ath_desc_alloc(sc); 393 if (error != 0) { 394 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 395 goto bad; 396 } 397 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 398 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 399 400 ATH_TXBUF_LOCK_INIT(sc); 401 402 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 403 taskqueue_thread_enqueue, &sc->sc_tq); 404 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 405 "%s taskq", ifp->if_xname); 406 407 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 408 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 409 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 410 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 411 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); 412 413 /* 414 * Allocate hardware transmit queues: one queue for 415 * beacon frames and one data queue for each QoS 416 * priority. Note that the hal handles resetting 417 * these queues at the needed time. 418 * 419 * XXX PS-Poll 420 */ 421 sc->sc_bhalq = ath_beaconq_setup(ah); 422 if (sc->sc_bhalq == (u_int) -1) { 423 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 424 error = EIO; 425 goto bad2; 426 } 427 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 428 if (sc->sc_cabq == NULL) { 429 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 430 error = EIO; 431 goto bad2; 432 } 433 /* NB: insure BK queue is the lowest priority h/w queue */ 434 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 435 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 436 ieee80211_wme_acnames[WME_AC_BK]); 437 error = EIO; 438 goto bad2; 439 } 440 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 441 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 442 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 443 /* 444 * Not enough hardware tx queues to properly do WME; 445 * just punt and assign them all to the same h/w queue. 446 * We could do a better job of this if, for example, 447 * we allocate queues when we switch from station to 448 * AP mode. 449 */ 450 if (sc->sc_ac2q[WME_AC_VI] != NULL) 451 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 452 if (sc->sc_ac2q[WME_AC_BE] != NULL) 453 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 454 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 455 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 456 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 457 } 458 459 /* 460 * Special case certain configurations. Note the 461 * CAB queue is handled by these specially so don't 462 * include them when checking the txq setup mask. 463 */ 464 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 465 case 0x01: 466 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 467 break; 468 case 0x0f: 469 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 470 break; 471 default: 472 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 473 break; 474 } 475 476 /* 477 * Setup rate control. Some rate control modules 478 * call back to change the anntena state so expose 479 * the necessary entry points. 480 * XXX maybe belongs in struct ath_ratectrl? 481 */ 482 sc->sc_setdefantenna = ath_setdefantenna; 483 sc->sc_rc = ath_rate_attach(sc); 484 if (sc->sc_rc == NULL) { 485 error = EIO; 486 goto bad2; 487 } 488 489 /* Attach DFS module */ 490 if (! ath_dfs_attach(sc)) { 491 device_printf(sc->sc_dev, 492 "%s: unable to attach DFS\n", __func__); 493 error = EIO; 494 goto bad2; 495 } 496 497 /* Start DFS processing tasklet */ 498 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 499 500 /* Configure LED state */ 501 sc->sc_blinking = 0; 502 sc->sc_ledstate = 1; 503 sc->sc_ledon = 0; /* low true */ 504 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 505 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 506 507 /* 508 * Don't setup hardware-based blinking. 509 * 510 * Although some NICs may have this configured in the 511 * default reset register values, the user may wish 512 * to alter which pins have which function. 513 * 514 * The reference driver attaches the MAC network LED to GPIO1 and 515 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 516 * NIC has these reversed. 517 */ 518 sc->sc_hardled = (1 == 0); 519 sc->sc_led_net_pin = -1; 520 sc->sc_led_pwr_pin = -1; 521 /* 522 * Auto-enable soft led processing for IBM cards and for 523 * 5211 minipci cards. Users can also manually enable/disable 524 * support with a sysctl. 525 */ 526 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 527 ath_led_config(sc); 528 ath_hal_setledstate(ah, HAL_LED_INIT); 529 530 ifp->if_softc = sc; 531 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 532 ifp->if_start = ath_start; 533 ifp->if_ioctl = ath_ioctl; 534 ifp->if_init = ath_init; 535 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 536 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 537 IFQ_SET_READY(&ifp->if_snd); 538 539 ic->ic_ifp = ifp; 540 /* XXX not right but it's not used anywhere important */ 541 ic->ic_phytype = IEEE80211_T_OFDM; 542 ic->ic_opmode = IEEE80211_M_STA; 543 ic->ic_caps = 544 IEEE80211_C_STA /* station mode */ 545 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 546 | IEEE80211_C_HOSTAP /* hostap mode */ 547 | IEEE80211_C_MONITOR /* monitor mode */ 548 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 549 | IEEE80211_C_WDS /* 4-address traffic works */ 550 | IEEE80211_C_MBSS /* mesh point link mode */ 551 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 552 | IEEE80211_C_SHSLOT /* short slot time supported */ 553 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 554 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 555 | IEEE80211_C_TXFRAG /* handle tx frags */ 556#ifdef ATH_ENABLE_DFS 557 | IEEE80211_C_DFS /* Enable radar detection */ 558#endif 559 ; 560 /* 561 * Query the hal to figure out h/w crypto support. 562 */ 563 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 564 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 565 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 566 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 567 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 568 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 569 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 570 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 571 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 572 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 573 /* 574 * Check if h/w does the MIC and/or whether the 575 * separate key cache entries are required to 576 * handle both tx+rx MIC keys. 577 */ 578 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 579 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 580 /* 581 * If the h/w supports storing tx+rx MIC keys 582 * in one cache slot automatically enable use. 583 */ 584 if (ath_hal_hastkipsplit(ah) || 585 !ath_hal_settkipsplit(ah, AH_FALSE)) 586 sc->sc_splitmic = 1; 587 /* 588 * If the h/w can do TKIP MIC together with WME then 589 * we use it; otherwise we force the MIC to be done 590 * in software by the net80211 layer. 591 */ 592 if (ath_hal_haswmetkipmic(ah)) 593 sc->sc_wmetkipmic = 1; 594 } 595 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 596 /* 597 * Check for multicast key search support. 598 */ 599 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 600 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 601 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 602 } 603 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 604 /* 605 * Mark key cache slots associated with global keys 606 * as in use. If we knew TKIP was not to be used we 607 * could leave the +32, +64, and +32+64 slots free. 608 */ 609 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 610 setbit(sc->sc_keymap, i); 611 setbit(sc->sc_keymap, i+64); 612 if (sc->sc_splitmic) { 613 setbit(sc->sc_keymap, i+32); 614 setbit(sc->sc_keymap, i+32+64); 615 } 616 } 617 /* 618 * TPC support can be done either with a global cap or 619 * per-packet support. The latter is not available on 620 * all parts. We're a bit pedantic here as all parts 621 * support a global cap. 622 */ 623 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 624 ic->ic_caps |= IEEE80211_C_TXPMGT; 625 626 /* 627 * Mark WME capability only if we have sufficient 628 * hardware queues to do proper priority scheduling. 629 */ 630 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 631 ic->ic_caps |= IEEE80211_C_WME; 632 /* 633 * Check for misc other capabilities. 634 */ 635 if (ath_hal_hasbursting(ah)) 636 ic->ic_caps |= IEEE80211_C_BURST; 637 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 638 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 639 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 640 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 641 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 642 if (ath_hal_hasfastframes(ah)) 643 ic->ic_caps |= IEEE80211_C_FF; 644 wmodes = ath_hal_getwirelessmodes(ah); 645 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 646 ic->ic_caps |= IEEE80211_C_TURBOP; 647#ifdef IEEE80211_SUPPORT_TDMA 648 if (ath_hal_macversion(ah) > 0x78) { 649 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 650 ic->ic_tdma_update = ath_tdma_update; 651 } 652#endif 653 654 /* 655 * TODO: enforce that at least this many frames are available 656 * in the txbuf list before allowing data frames (raw or 657 * otherwise) to be transmitted. 658 */ 659 sc->sc_txq_data_minfree = 10; 660 /* 661 * Leave this as default to maintain legacy behaviour. 662 * Shortening the cabq/mcastq may end up causing some 663 * undesirable behaviour. 664 */ 665 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 666 667 /* 668 * Allow the TX and RX chainmasks to be overridden by 669 * environment variables and/or device.hints. 670 * 671 * This must be done early - before the hardware is 672 * calibrated or before the 802.11n stream calculation 673 * is done. 674 */ 675 if (resource_int_value(device_get_name(sc->sc_dev), 676 device_get_unit(sc->sc_dev), "rx_chainmask", 677 &rx_chainmask) == 0) { 678 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 679 rx_chainmask); 680 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 681 } 682 if (resource_int_value(device_get_name(sc->sc_dev), 683 device_get_unit(sc->sc_dev), "tx_chainmask", 684 &tx_chainmask) == 0) { 685 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 686 tx_chainmask); 687 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 688 } 689 690 /* 691 * The if_ath 11n support is completely not ready for normal use. 692 * Enabling this option will likely break everything and everything. 693 * Don't think of doing that unless you know what you're doing. 694 */ 695 696#ifdef ATH_ENABLE_11N 697 /* 698 * Query HT capabilities 699 */ 700 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 701 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 702 int rxs, txs; 703 704 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 705 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 706 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 707 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 708 | IEEE80211_HTCAP_MAXAMSDU_3839 709 /* max A-MSDU length */ 710 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 711 ; 712 713 /* 714 * Enable short-GI for HT20 only if the hardware 715 * advertises support. 716 * Notably, anything earlier than the AR9287 doesn't. 717 */ 718 if ((ath_hal_getcapability(ah, 719 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 720 (wmodes & HAL_MODE_HT20)) { 721 device_printf(sc->sc_dev, 722 "[HT] enabling short-GI in 20MHz mode\n"); 723 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 724 } 725 726 if (wmodes & HAL_MODE_HT40) 727 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 728 | IEEE80211_HTCAP_SHORTGI40; 729 730 /* 731 * TX/RX streams need to be taken into account when 732 * negotiating which MCS rates it'll receive and 733 * what MCS rates are available for TX. 734 */ 735 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 736 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 737 738 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 739 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 740 741 ic->ic_txstream = txs; 742 ic->ic_rxstream = rxs; 743 744 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 745 &sc->sc_rts_aggr_limit); 746 if (sc->sc_rts_aggr_limit != (64 * 1024)) 747 device_printf(sc->sc_dev, 748 "[HT] RTS aggregates limited to %d KiB\n", 749 sc->sc_rts_aggr_limit / 1024); 750 751 device_printf(sc->sc_dev, 752 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 753 } 754#endif 755 756 /* 757 * Check if the hardware requires PCI register serialisation. 758 * Some of the Owl based MACs require this. 759 */ 760 if (mp_ncpus > 1 && 761 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 762 0, NULL) == HAL_OK) { 763 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 764 device_printf(sc->sc_dev, 765 "Enabling register serialisation\n"); 766 } 767 768 /* 769 * Indicate we need the 802.11 header padded to a 770 * 32-bit boundary for 4-address and QoS frames. 771 */ 772 ic->ic_flags |= IEEE80211_F_DATAPAD; 773 774 /* 775 * Query the hal about antenna support. 776 */ 777 sc->sc_defant = ath_hal_getdefantenna(ah); 778 779 /* 780 * Not all chips have the VEOL support we want to 781 * use with IBSS beacons; check here for it. 782 */ 783 sc->sc_hasveol = ath_hal_hasveol(ah); 784 785 /* get mac address from hardware */ 786 ath_hal_getmac(ah, macaddr); 787 if (sc->sc_hasbmask) 788 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 789 790 /* NB: used to size node table key mapping array */ 791 ic->ic_max_keyix = sc->sc_keymax; 792 /* call MI attach routine. */ 793 ieee80211_ifattach(ic, macaddr); 794 ic->ic_setregdomain = ath_setregdomain; 795 ic->ic_getradiocaps = ath_getradiocaps; 796 sc->sc_opmode = HAL_M_STA; 797 798 /* override default methods */ 799 ic->ic_newassoc = ath_newassoc; 800 ic->ic_updateslot = ath_updateslot; 801 ic->ic_wme.wme_update = ath_wme_update; 802 ic->ic_vap_create = ath_vap_create; 803 ic->ic_vap_delete = ath_vap_delete; 804 ic->ic_raw_xmit = ath_raw_xmit; 805 ic->ic_update_mcast = ath_update_mcast; 806 ic->ic_update_promisc = ath_update_promisc; 807 ic->ic_node_alloc = ath_node_alloc; 808 sc->sc_node_free = ic->ic_node_free; 809 ic->ic_node_free = ath_node_free; 810 sc->sc_node_cleanup = ic->ic_node_cleanup; 811 ic->ic_node_cleanup = ath_node_cleanup; 812 ic->ic_node_getsignal = ath_node_getsignal; 813 ic->ic_scan_start = ath_scan_start; 814 ic->ic_scan_end = ath_scan_end; 815 ic->ic_set_channel = ath_set_channel; 816#ifdef ATH_ENABLE_11N 817 /* 802.11n specific - but just override anyway */ 818 sc->sc_addba_request = ic->ic_addba_request; 819 sc->sc_addba_response = ic->ic_addba_response; 820 sc->sc_addba_stop = ic->ic_addba_stop; 821 sc->sc_bar_response = ic->ic_bar_response; 822 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 823 824 ic->ic_addba_request = ath_addba_request; 825 ic->ic_addba_response = ath_addba_response; 826 ic->ic_addba_response_timeout = ath_addba_response_timeout; 827 ic->ic_addba_stop = ath_addba_stop; 828 ic->ic_bar_response = ath_bar_response; 829 830 ic->ic_update_chw = ath_update_chw; 831#endif /* ATH_ENABLE_11N */ 832 833 ieee80211_radiotap_attach(ic, 834 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 835 ATH_TX_RADIOTAP_PRESENT, 836 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 837 ATH_RX_RADIOTAP_PRESENT); 838 839 /* 840 * Setup dynamic sysctl's now that country code and 841 * regdomain are available from the hal. 842 */ 843 ath_sysctlattach(sc); 844 ath_sysctl_stats_attach(sc); 845 ath_sysctl_hal_attach(sc); 846 847 if (bootverbose) 848 ieee80211_announce(ic); 849 ath_announce(sc); 850 return 0; 851bad2: 852 ath_tx_cleanup(sc); 853 ath_desc_free(sc); 854bad: 855 if (ah) 856 ath_hal_detach(ah); 857 if (ifp != NULL) 858 if_free(ifp); 859 sc->sc_invalid = 1; 860 return error; 861} 862 863int 864ath_detach(struct ath_softc *sc) 865{ 866 struct ifnet *ifp = sc->sc_ifp; 867 868 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 869 __func__, ifp->if_flags); 870 871 /* 872 * NB: the order of these is important: 873 * o stop the chip so no more interrupts will fire 874 * o call the 802.11 layer before detaching the hal to 875 * insure callbacks into the driver to delete global 876 * key cache entries can be handled 877 * o free the taskqueue which drains any pending tasks 878 * o reclaim the tx queue data structures after calling 879 * the 802.11 layer as we'll get called back to reclaim 880 * node state and potentially want to use them 881 * o to cleanup the tx queues the hal is called, so detach 882 * it last 883 * Other than that, it's straightforward... 884 */ 885 ath_stop(ifp); 886 ieee80211_ifdetach(ifp->if_l2com); 887 taskqueue_free(sc->sc_tq); 888#ifdef ATH_TX99_DIAG 889 if (sc->sc_tx99 != NULL) 890 sc->sc_tx99->detach(sc->sc_tx99); 891#endif 892 ath_rate_detach(sc->sc_rc); 893 894 ath_dfs_detach(sc); 895 ath_desc_free(sc); 896 ath_tx_cleanup(sc); 897 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 898 if_free(ifp); 899 900 return 0; 901} 902 903/* 904 * MAC address handling for multiple BSS on the same radio. 905 * The first vap uses the MAC address from the EEPROM. For 906 * subsequent vap's we set the U/L bit (bit 1) in the MAC 907 * address and use the next six bits as an index. 908 */ 909static void 910assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 911{ 912 int i; 913 914 if (clone && sc->sc_hasbmask) { 915 /* NB: we only do this if h/w supports multiple bssid */ 916 for (i = 0; i < 8; i++) 917 if ((sc->sc_bssidmask & (1<<i)) == 0) 918 break; 919 if (i != 0) 920 mac[0] |= (i << 2)|0x2; 921 } else 922 i = 0; 923 sc->sc_bssidmask |= 1<<i; 924 sc->sc_hwbssidmask[0] &= ~mac[0]; 925 if (i == 0) 926 sc->sc_nbssid0++; 927} 928 929static void 930reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 931{ 932 int i = mac[0] >> 2; 933 uint8_t mask; 934 935 if (i != 0 || --sc->sc_nbssid0 == 0) { 936 sc->sc_bssidmask &= ~(1<<i); 937 /* recalculate bssid mask from remaining addresses */ 938 mask = 0xff; 939 for (i = 1; i < 8; i++) 940 if (sc->sc_bssidmask & (1<<i)) 941 mask &= ~((i<<2)|0x2); 942 sc->sc_hwbssidmask[0] |= mask; 943 } 944} 945 946/* 947 * Assign a beacon xmit slot. We try to space out 948 * assignments so when beacons are staggered the 949 * traffic coming out of the cab q has maximal time 950 * to go out before the next beacon is scheduled. 951 */ 952static int 953assign_bslot(struct ath_softc *sc) 954{ 955 u_int slot, free; 956 957 free = 0; 958 for (slot = 0; slot < ATH_BCBUF; slot++) 959 if (sc->sc_bslot[slot] == NULL) { 960 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 961 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 962 return slot; 963 free = slot; 964 /* NB: keep looking for a double slot */ 965 } 966 return free; 967} 968 969#if __FreeBSD_version > 1000003 970static struct ieee80211vap * 971ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 972 enum ieee80211_opmode opmode, int flags, 973 const uint8_t bssid[IEEE80211_ADDR_LEN], 974 const uint8_t mac0[IEEE80211_ADDR_LEN]) 975#else 976static struct ieee80211vap * 977ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 978 int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], 979 const uint8_t mac0[IEEE80211_ADDR_LEN]) 980#endif 981{ 982 struct ath_softc *sc = ic->ic_ifp->if_softc; 983 struct ath_vap *avp; 984 struct ieee80211vap *vap; 985 uint8_t mac[IEEE80211_ADDR_LEN]; 986 int needbeacon, error; 987 enum ieee80211_opmode ic_opmode; 988 989 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 990 M_80211_VAP, M_WAITOK | M_ZERO); 991 needbeacon = 0; 992 IEEE80211_ADDR_COPY(mac, mac0); 993 994 ATH_LOCK(sc); 995 ic_opmode = opmode; /* default to opmode of new vap */ 996 switch (opmode) { 997 case IEEE80211_M_STA: 998 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 999 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1000 goto bad; 1001 } 1002 if (sc->sc_nvaps) { 1003 /* 1004 * With multiple vaps we must fall back 1005 * to s/w beacon miss handling. 1006 */ 1007 flags |= IEEE80211_CLONE_NOBEACONS; 1008 } 1009 if (flags & IEEE80211_CLONE_NOBEACONS) { 1010 /* 1011 * Station mode w/o beacons are implemented w/ AP mode. 1012 */ 1013 ic_opmode = IEEE80211_M_HOSTAP; 1014 } 1015 break; 1016 case IEEE80211_M_IBSS: 1017 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1018 device_printf(sc->sc_dev, 1019 "only 1 ibss vap supported\n"); 1020 goto bad; 1021 } 1022 needbeacon = 1; 1023 break; 1024 case IEEE80211_M_AHDEMO: 1025#ifdef IEEE80211_SUPPORT_TDMA 1026 if (flags & IEEE80211_CLONE_TDMA) { 1027 if (sc->sc_nvaps != 0) { 1028 device_printf(sc->sc_dev, 1029 "only 1 tdma vap supported\n"); 1030 goto bad; 1031 } 1032 needbeacon = 1; 1033 flags |= IEEE80211_CLONE_NOBEACONS; 1034 } 1035 /* fall thru... */ 1036#endif 1037 case IEEE80211_M_MONITOR: 1038 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1039 /* 1040 * Adopt existing mode. Adding a monitor or ahdemo 1041 * vap to an existing configuration is of dubious 1042 * value but should be ok. 1043 */ 1044 /* XXX not right for monitor mode */ 1045 ic_opmode = ic->ic_opmode; 1046 } 1047 break; 1048 case IEEE80211_M_HOSTAP: 1049 case IEEE80211_M_MBSS: 1050 needbeacon = 1; 1051 break; 1052 case IEEE80211_M_WDS: 1053 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1054 device_printf(sc->sc_dev, 1055 "wds not supported in sta mode\n"); 1056 goto bad; 1057 } 1058 /* 1059 * Silently remove any request for a unique 1060 * bssid; WDS vap's always share the local 1061 * mac address. 1062 */ 1063 flags &= ~IEEE80211_CLONE_BSSID; 1064 if (sc->sc_nvaps == 0) 1065 ic_opmode = IEEE80211_M_HOSTAP; 1066 else 1067 ic_opmode = ic->ic_opmode; 1068 break; 1069 default: 1070 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1071 goto bad; 1072 } 1073 /* 1074 * Check that a beacon buffer is available; the code below assumes it. 1075 */ 1076 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1077 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1078 goto bad; 1079 } 1080 1081 /* STA, AHDEMO? */ 1082 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1083 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1084 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1085 } 1086 1087 vap = &avp->av_vap; 1088 /* XXX can't hold mutex across if_alloc */ 1089 ATH_UNLOCK(sc); 1090 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1091 bssid, mac); 1092 ATH_LOCK(sc); 1093 if (error != 0) { 1094 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1095 __func__, error); 1096 goto bad2; 1097 } 1098 1099 /* h/w crypto support */ 1100 vap->iv_key_alloc = ath_key_alloc; 1101 vap->iv_key_delete = ath_key_delete; 1102 vap->iv_key_set = ath_key_set; 1103 vap->iv_key_update_begin = ath_key_update_begin; 1104 vap->iv_key_update_end = ath_key_update_end; 1105 1106 /* override various methods */ 1107 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1108 vap->iv_recv_mgmt = ath_recv_mgmt; 1109 vap->iv_reset = ath_reset_vap; 1110 vap->iv_update_beacon = ath_beacon_update; 1111 avp->av_newstate = vap->iv_newstate; 1112 vap->iv_newstate = ath_newstate; 1113 avp->av_bmiss = vap->iv_bmiss; 1114 vap->iv_bmiss = ath_bmiss_vap; 1115 1116 /* Set default parameters */ 1117 1118 /* 1119 * Anything earlier than some AR9300 series MACs don't 1120 * support a smaller MPDU density. 1121 */ 1122 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1123 /* 1124 * All NICs can handle the maximum size, however 1125 * AR5416 based MACs can only TX aggregates w/ RTS 1126 * protection when the total aggregate size is <= 8k. 1127 * However, for now that's enforced by the TX path. 1128 */ 1129 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1130 1131 avp->av_bslot = -1; 1132 if (needbeacon) { 1133 /* 1134 * Allocate beacon state and setup the q for buffered 1135 * multicast frames. We know a beacon buffer is 1136 * available because we checked above. 1137 */ 1138 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1139 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1140 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1141 /* 1142 * Assign the vap to a beacon xmit slot. As above 1143 * this cannot fail to find a free one. 1144 */ 1145 avp->av_bslot = assign_bslot(sc); 1146 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1147 ("beacon slot %u not empty", avp->av_bslot)); 1148 sc->sc_bslot[avp->av_bslot] = vap; 1149 sc->sc_nbcnvaps++; 1150 } 1151 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1152 /* 1153 * Multple vaps are to transmit beacons and we 1154 * have h/w support for TSF adjusting; enable 1155 * use of staggered beacons. 1156 */ 1157 sc->sc_stagbeacons = 1; 1158 } 1159 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1160 } 1161 1162 ic->ic_opmode = ic_opmode; 1163 if (opmode != IEEE80211_M_WDS) { 1164 sc->sc_nvaps++; 1165 if (opmode == IEEE80211_M_STA) 1166 sc->sc_nstavaps++; 1167 if (opmode == IEEE80211_M_MBSS) 1168 sc->sc_nmeshvaps++; 1169 } 1170 switch (ic_opmode) { 1171 case IEEE80211_M_IBSS: 1172 sc->sc_opmode = HAL_M_IBSS; 1173 break; 1174 case IEEE80211_M_STA: 1175 sc->sc_opmode = HAL_M_STA; 1176 break; 1177 case IEEE80211_M_AHDEMO: 1178#ifdef IEEE80211_SUPPORT_TDMA 1179 if (vap->iv_caps & IEEE80211_C_TDMA) { 1180 sc->sc_tdma = 1; 1181 /* NB: disable tsf adjust */ 1182 sc->sc_stagbeacons = 0; 1183 } 1184 /* 1185 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1186 * just ap mode. 1187 */ 1188 /* fall thru... */ 1189#endif 1190 case IEEE80211_M_HOSTAP: 1191 case IEEE80211_M_MBSS: 1192 sc->sc_opmode = HAL_M_HOSTAP; 1193 break; 1194 case IEEE80211_M_MONITOR: 1195 sc->sc_opmode = HAL_M_MONITOR; 1196 break; 1197 default: 1198 /* XXX should not happen */ 1199 break; 1200 } 1201 if (sc->sc_hastsfadd) { 1202 /* 1203 * Configure whether or not TSF adjust should be done. 1204 */ 1205 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1206 } 1207 if (flags & IEEE80211_CLONE_NOBEACONS) { 1208 /* 1209 * Enable s/w beacon miss handling. 1210 */ 1211 sc->sc_swbmiss = 1; 1212 } 1213 ATH_UNLOCK(sc); 1214 1215 /* complete setup */ 1216 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1217 return vap; 1218bad2: 1219 reclaim_address(sc, mac); 1220 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1221bad: 1222 free(avp, M_80211_VAP); 1223 ATH_UNLOCK(sc); 1224 return NULL; 1225} 1226 1227static void 1228ath_vap_delete(struct ieee80211vap *vap) 1229{ 1230 struct ieee80211com *ic = vap->iv_ic; 1231 struct ifnet *ifp = ic->ic_ifp; 1232 struct ath_softc *sc = ifp->if_softc; 1233 struct ath_hal *ah = sc->sc_ah; 1234 struct ath_vap *avp = ATH_VAP(vap); 1235 1236 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1237 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1238 /* 1239 * Quiesce the hardware while we remove the vap. In 1240 * particular we need to reclaim all references to 1241 * the vap state by any frames pending on the tx queues. 1242 */ 1243 ath_hal_intrset(ah, 0); /* disable interrupts */ 1244 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1245 /* XXX Do all frames from all vaps/nodes need draining here? */ 1246 ath_stoprecv(sc, 1); /* stop recv side */ 1247 } 1248 1249 ieee80211_vap_detach(vap); 1250 1251 /* 1252 * XXX Danger Will Robinson! Danger! 1253 * 1254 * Because ieee80211_vap_detach() can queue a frame (the station 1255 * diassociate message?) after we've drained the TXQ and 1256 * flushed the software TXQ, we will end up with a frame queued 1257 * to a node whose vap is about to be freed. 1258 * 1259 * To work around this, flush the hardware/software again. 1260 * This may be racy - the ath task may be running and the packet 1261 * may be being scheduled between sw->hw txq. Tsk. 1262 * 1263 * TODO: figure out why a new node gets allocated somewhere around 1264 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1265 * call!) 1266 */ 1267 1268 ath_draintxq(sc, ATH_RESET_DEFAULT); 1269 1270 ATH_LOCK(sc); 1271 /* 1272 * Reclaim beacon state. Note this must be done before 1273 * the vap instance is reclaimed as we may have a reference 1274 * to it in the buffer for the beacon frame. 1275 */ 1276 if (avp->av_bcbuf != NULL) { 1277 if (avp->av_bslot != -1) { 1278 sc->sc_bslot[avp->av_bslot] = NULL; 1279 sc->sc_nbcnvaps--; 1280 } 1281 ath_beacon_return(sc, avp->av_bcbuf); 1282 avp->av_bcbuf = NULL; 1283 if (sc->sc_nbcnvaps == 0) { 1284 sc->sc_stagbeacons = 0; 1285 if (sc->sc_hastsfadd) 1286 ath_hal_settsfadjust(sc->sc_ah, 0); 1287 } 1288 /* 1289 * Reclaim any pending mcast frames for the vap. 1290 */ 1291 ath_tx_draintxq(sc, &avp->av_mcastq); 1292 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1293 } 1294 /* 1295 * Update bookkeeping. 1296 */ 1297 if (vap->iv_opmode == IEEE80211_M_STA) { 1298 sc->sc_nstavaps--; 1299 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1300 sc->sc_swbmiss = 0; 1301 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1302 vap->iv_opmode == IEEE80211_M_MBSS) { 1303 reclaim_address(sc, vap->iv_myaddr); 1304 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1305 if (vap->iv_opmode == IEEE80211_M_MBSS) 1306 sc->sc_nmeshvaps--; 1307 } 1308 if (vap->iv_opmode != IEEE80211_M_WDS) 1309 sc->sc_nvaps--; 1310#ifdef IEEE80211_SUPPORT_TDMA 1311 /* TDMA operation ceases when the last vap is destroyed */ 1312 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1313 sc->sc_tdma = 0; 1314 sc->sc_swbmiss = 0; 1315 } 1316#endif 1317 free(avp, M_80211_VAP); 1318 1319 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1320 /* 1321 * Restart rx+tx machines if still running (RUNNING will 1322 * be reset if we just destroyed the last vap). 1323 */ 1324 if (ath_startrecv(sc) != 0) 1325 if_printf(ifp, "%s: unable to restart recv logic\n", 1326 __func__); 1327 if (sc->sc_beacons) { /* restart beacons */ 1328#ifdef IEEE80211_SUPPORT_TDMA 1329 if (sc->sc_tdma) 1330 ath_tdma_config(sc, NULL); 1331 else 1332#endif 1333 ath_beacon_config(sc, NULL); 1334 } 1335 ath_hal_intrset(ah, sc->sc_imask); 1336 } 1337 ATH_UNLOCK(sc); 1338} 1339 1340void 1341ath_suspend(struct ath_softc *sc) 1342{ 1343 struct ifnet *ifp = sc->sc_ifp; 1344 struct ieee80211com *ic = ifp->if_l2com; 1345 1346 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1347 __func__, ifp->if_flags); 1348 1349 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1350 if (ic->ic_opmode == IEEE80211_M_STA) 1351 ath_stop(ifp); 1352 else 1353 ieee80211_suspend_all(ic); 1354 /* 1355 * NB: don't worry about putting the chip in low power 1356 * mode; pci will power off our socket on suspend and 1357 * CardBus detaches the device. 1358 */ 1359} 1360 1361/* 1362 * Reset the key cache since some parts do not reset the 1363 * contents on resume. First we clear all entries, then 1364 * re-load keys that the 802.11 layer assumes are setup 1365 * in h/w. 1366 */ 1367static void 1368ath_reset_keycache(struct ath_softc *sc) 1369{ 1370 struct ifnet *ifp = sc->sc_ifp; 1371 struct ieee80211com *ic = ifp->if_l2com; 1372 struct ath_hal *ah = sc->sc_ah; 1373 int i; 1374 1375 for (i = 0; i < sc->sc_keymax; i++) 1376 ath_hal_keyreset(ah, i); 1377 ieee80211_crypto_reload_keys(ic); 1378} 1379 1380void 1381ath_resume(struct ath_softc *sc) 1382{ 1383 struct ifnet *ifp = sc->sc_ifp; 1384 struct ieee80211com *ic = ifp->if_l2com; 1385 struct ath_hal *ah = sc->sc_ah; 1386 HAL_STATUS status; 1387 1388 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1389 __func__, ifp->if_flags); 1390 1391 /* 1392 * Must reset the chip before we reload the 1393 * keycache as we were powered down on suspend. 1394 */ 1395 ath_hal_reset(ah, sc->sc_opmode, 1396 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1397 AH_FALSE, &status); 1398 ath_reset_keycache(sc); 1399 1400 /* Let DFS at it in case it's a DFS channel */ 1401 ath_dfs_radar_enable(sc, ic->ic_curchan); 1402 1403 /* Restore the LED configuration */ 1404 ath_led_config(sc); 1405 ath_hal_setledstate(ah, HAL_LED_INIT); 1406 1407 if (sc->sc_resume_up) { 1408 if (ic->ic_opmode == IEEE80211_M_STA) { 1409 ath_init(sc); 1410 ath_hal_setledstate(ah, HAL_LED_RUN); 1411 /* 1412 * Program the beacon registers using the last rx'd 1413 * beacon frame and enable sync on the next beacon 1414 * we see. This should handle the case where we 1415 * wakeup and find the same AP and also the case where 1416 * we wakeup and need to roam. For the latter we 1417 * should get bmiss events that trigger a roam. 1418 */ 1419 ath_beacon_config(sc, NULL); 1420 sc->sc_syncbeacon = 1; 1421 } else 1422 ieee80211_resume_all(ic); 1423 } 1424 1425 /* XXX beacons ? */ 1426} 1427 1428void 1429ath_shutdown(struct ath_softc *sc) 1430{ 1431 struct ifnet *ifp = sc->sc_ifp; 1432 1433 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1434 __func__, ifp->if_flags); 1435 1436 ath_stop(ifp); 1437 /* NB: no point powering down chip as we're about to reboot */ 1438} 1439 1440/* 1441 * Interrupt handler. Most of the actual processing is deferred. 1442 */ 1443void 1444ath_intr(void *arg) 1445{ 1446 struct ath_softc *sc = arg; 1447 struct ifnet *ifp = sc->sc_ifp; 1448 struct ath_hal *ah = sc->sc_ah; 1449 HAL_INT status = 0; 1450 uint32_t txqs; 1451 1452 /* 1453 * If we're inside a reset path, just print a warning and 1454 * clear the ISR. The reset routine will finish it for us. 1455 */ 1456 ATH_PCU_LOCK(sc); 1457 if (sc->sc_inreset_cnt) { 1458 HAL_INT status; 1459 ath_hal_getisr(ah, &status); /* clear ISR */ 1460 ath_hal_intrset(ah, 0); /* disable further intr's */ 1461 DPRINTF(sc, ATH_DEBUG_ANY, 1462 "%s: in reset, ignoring: status=0x%x\n", 1463 __func__, status); 1464 ATH_PCU_UNLOCK(sc); 1465 return; 1466 } 1467 1468 if (sc->sc_invalid) { 1469 /* 1470 * The hardware is not ready/present, don't touch anything. 1471 * Note this can happen early on if the IRQ is shared. 1472 */ 1473 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1474 ATH_PCU_UNLOCK(sc); 1475 return; 1476 } 1477 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1478 ATH_PCU_UNLOCK(sc); 1479 return; 1480 } 1481 1482 if ((ifp->if_flags & IFF_UP) == 0 || 1483 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1484 HAL_INT status; 1485 1486 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1487 __func__, ifp->if_flags); 1488 ath_hal_getisr(ah, &status); /* clear ISR */ 1489 ath_hal_intrset(ah, 0); /* disable further intr's */ 1490 ATH_PCU_UNLOCK(sc); 1491 return; 1492 } 1493 1494 /* 1495 * Figure out the reason(s) for the interrupt. Note 1496 * that the hal returns a pseudo-ISR that may include 1497 * bits we haven't explicitly enabled so we mask the 1498 * value to insure we only process bits we requested. 1499 */ 1500 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1501 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1502 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 1503#ifdef ATH_KTR_INTR_DEBUG 1504 CTR5(ATH_KTR_INTR, 1505 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1506 ah->ah_intrstate[0], 1507 ah->ah_intrstate[1], 1508 ah->ah_intrstate[2], 1509 ah->ah_intrstate[3], 1510 ah->ah_intrstate[6]); 1511#endif 1512 status &= sc->sc_imask; /* discard unasked for bits */ 1513 1514 /* Short-circuit un-handled interrupts */ 1515 if (status == 0x0) { 1516 ATH_PCU_UNLOCK(sc); 1517 return; 1518 } 1519 1520 /* 1521 * Take a note that we're inside the interrupt handler, so 1522 * the reset routines know to wait. 1523 */ 1524 sc->sc_intr_cnt++; 1525 ATH_PCU_UNLOCK(sc); 1526 1527 /* 1528 * Handle the interrupt. We won't run concurrent with the reset 1529 * or channel change routines as they'll wait for sc_intr_cnt 1530 * to be 0 before continuing. 1531 */ 1532 if (status & HAL_INT_FATAL) { 1533 sc->sc_stats.ast_hardware++; 1534 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1535 ath_fatal_proc(sc, 0); 1536 } else { 1537 if (status & HAL_INT_SWBA) { 1538 /* 1539 * Software beacon alert--time to send a beacon. 1540 * Handle beacon transmission directly; deferring 1541 * this is too slow to meet timing constraints 1542 * under load. 1543 */ 1544#ifdef IEEE80211_SUPPORT_TDMA 1545 if (sc->sc_tdma) { 1546 if (sc->sc_tdmaswba == 0) { 1547 struct ieee80211com *ic = ifp->if_l2com; 1548 struct ieee80211vap *vap = 1549 TAILQ_FIRST(&ic->ic_vaps); 1550 ath_tdma_beacon_send(sc, vap); 1551 sc->sc_tdmaswba = 1552 vap->iv_tdma->tdma_bintval; 1553 } else 1554 sc->sc_tdmaswba--; 1555 } else 1556#endif 1557 { 1558 ath_beacon_proc(sc, 0); 1559#ifdef IEEE80211_SUPPORT_SUPERG 1560 /* 1561 * Schedule the rx taskq in case there's no 1562 * traffic so any frames held on the staging 1563 * queue are aged and potentially flushed. 1564 */ 1565 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1566#endif 1567 } 1568 } 1569 if (status & HAL_INT_RXEOL) { 1570 int imask; 1571 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1572 ATH_PCU_LOCK(sc); 1573 /* 1574 * NB: the hardware should re-read the link when 1575 * RXE bit is written, but it doesn't work at 1576 * least on older hardware revs. 1577 */ 1578 sc->sc_stats.ast_rxeol++; 1579 /* 1580 * Disable RXEOL/RXORN - prevent an interrupt 1581 * storm until the PCU logic can be reset. 1582 * In case the interface is reset some other 1583 * way before "sc_kickpcu" is called, don't 1584 * modify sc_imask - that way if it is reset 1585 * by a call to ath_reset() somehow, the 1586 * interrupt mask will be correctly reprogrammed. 1587 */ 1588 imask = sc->sc_imask; 1589 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1590 ath_hal_intrset(ah, imask); 1591 /* 1592 * Only blank sc_rxlink if we've not yet kicked 1593 * the PCU. 1594 * 1595 * This isn't entirely correct - the correct solution 1596 * would be to have a PCU lock and engage that for 1597 * the duration of the PCU fiddling; which would include 1598 * running the RX process. Otherwise we could end up 1599 * messing up the RX descriptor chain and making the 1600 * RX desc list much shorter. 1601 */ 1602 if (! sc->sc_kickpcu) 1603 sc->sc_rxlink = NULL; 1604 sc->sc_kickpcu = 1; 1605 /* 1606 * Enqueue an RX proc, to handled whatever 1607 * is in the RX queue. 1608 * This will then kick the PCU. 1609 */ 1610 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1611 ATH_PCU_UNLOCK(sc); 1612 } 1613 if (status & HAL_INT_TXURN) { 1614 sc->sc_stats.ast_txurn++; 1615 /* bump tx trigger level */ 1616 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1617 } 1618 if (status & HAL_INT_RX) { 1619 sc->sc_stats.ast_rx_intr++; 1620 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1621 } 1622 if (status & HAL_INT_TX) { 1623 sc->sc_stats.ast_tx_intr++; 1624 /* 1625 * Grab all the currently set bits in the HAL txq bitmap 1626 * and blank them. This is the only place we should be 1627 * doing this. 1628 */ 1629 ATH_PCU_LOCK(sc); 1630 txqs = 0xffffffff; 1631 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1632 sc->sc_txq_active |= txqs; 1633 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1634 ATH_PCU_UNLOCK(sc); 1635 } 1636 if (status & HAL_INT_BMISS) { 1637 sc->sc_stats.ast_bmiss++; 1638 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1639 } 1640 if (status & HAL_INT_GTT) 1641 sc->sc_stats.ast_tx_timeout++; 1642 if (status & HAL_INT_CST) 1643 sc->sc_stats.ast_tx_cst++; 1644 if (status & HAL_INT_MIB) { 1645 sc->sc_stats.ast_mib++; 1646 ATH_PCU_LOCK(sc); 1647 /* 1648 * Disable interrupts until we service the MIB 1649 * interrupt; otherwise it will continue to fire. 1650 */ 1651 ath_hal_intrset(ah, 0); 1652 /* 1653 * Let the hal handle the event. We assume it will 1654 * clear whatever condition caused the interrupt. 1655 */ 1656 ath_hal_mibevent(ah, &sc->sc_halstats); 1657 /* 1658 * Don't reset the interrupt if we've just 1659 * kicked the PCU, or we may get a nested 1660 * RXEOL before the rxproc has had a chance 1661 * to run. 1662 */ 1663 if (sc->sc_kickpcu == 0) 1664 ath_hal_intrset(ah, sc->sc_imask); 1665 ATH_PCU_UNLOCK(sc); 1666 } 1667 if (status & HAL_INT_RXORN) { 1668 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1669 CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 1670 sc->sc_stats.ast_rxorn++; 1671 } 1672 } 1673 ATH_PCU_LOCK(sc); 1674 sc->sc_intr_cnt--; 1675 ATH_PCU_UNLOCK(sc); 1676} 1677 1678static void 1679ath_fatal_proc(void *arg, int pending) 1680{ 1681 struct ath_softc *sc = arg; 1682 struct ifnet *ifp = sc->sc_ifp; 1683 u_int32_t *state; 1684 u_int32_t len; 1685 void *sp; 1686 1687 if_printf(ifp, "hardware error; resetting\n"); 1688 /* 1689 * Fatal errors are unrecoverable. Typically these 1690 * are caused by DMA errors. Collect h/w state from 1691 * the hal so we can diagnose what's going on. 1692 */ 1693 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1694 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1695 state = sp; 1696 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1697 state[0], state[1] , state[2], state[3], 1698 state[4], state[5]); 1699 } 1700 ath_reset(ifp, ATH_RESET_NOLOSS); 1701} 1702 1703static void 1704ath_bmiss_vap(struct ieee80211vap *vap) 1705{ 1706 /* 1707 * Workaround phantom bmiss interrupts by sanity-checking 1708 * the time of our last rx'd frame. If it is within the 1709 * beacon miss interval then ignore the interrupt. If it's 1710 * truly a bmiss we'll get another interrupt soon and that'll 1711 * be dispatched up for processing. Note this applies only 1712 * for h/w beacon miss events. 1713 */ 1714 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1715 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1716 struct ath_softc *sc = ifp->if_softc; 1717 u_int64_t lastrx = sc->sc_lastrx; 1718 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1719 /* XXX should take a locked ref to iv_bss */ 1720 u_int bmisstimeout = 1721 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1722 1723 DPRINTF(sc, ATH_DEBUG_BEACON, 1724 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1725 __func__, (unsigned long long) tsf, 1726 (unsigned long long)(tsf - lastrx), 1727 (unsigned long long) lastrx, bmisstimeout); 1728 1729 if (tsf - lastrx <= bmisstimeout) { 1730 sc->sc_stats.ast_bmiss_phantom++; 1731 return; 1732 } 1733 } 1734 ATH_VAP(vap)->av_bmiss(vap); 1735} 1736 1737static int 1738ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1739{ 1740 uint32_t rsize; 1741 void *sp; 1742 1743 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1744 return 0; 1745 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1746 *hangs = *(uint32_t *)sp; 1747 return 1; 1748} 1749 1750static void 1751ath_bmiss_proc(void *arg, int pending) 1752{ 1753 struct ath_softc *sc = arg; 1754 struct ifnet *ifp = sc->sc_ifp; 1755 uint32_t hangs; 1756 1757 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1758 1759 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1760 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1761 ath_reset(ifp, ATH_RESET_NOLOSS); 1762 } else 1763 ieee80211_beacon_miss(ifp->if_l2com); 1764} 1765 1766/* 1767 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1768 * calcs together with WME. If necessary disable the crypto 1769 * hardware and mark the 802.11 state so keys will be setup 1770 * with the MIC work done in software. 1771 */ 1772static void 1773ath_settkipmic(struct ath_softc *sc) 1774{ 1775 struct ifnet *ifp = sc->sc_ifp; 1776 struct ieee80211com *ic = ifp->if_l2com; 1777 1778 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1779 if (ic->ic_flags & IEEE80211_F_WME) { 1780 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1781 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1782 } else { 1783 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1784 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1785 } 1786 } 1787} 1788 1789static void 1790ath_init(void *arg) 1791{ 1792 struct ath_softc *sc = (struct ath_softc *) arg; 1793 struct ifnet *ifp = sc->sc_ifp; 1794 struct ieee80211com *ic = ifp->if_l2com; 1795 struct ath_hal *ah = sc->sc_ah; 1796 HAL_STATUS status; 1797 1798 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1799 __func__, ifp->if_flags); 1800 1801 ATH_LOCK(sc); 1802 /* 1803 * Stop anything previously setup. This is safe 1804 * whether this is the first time through or not. 1805 */ 1806 ath_stop_locked(ifp); 1807 1808 /* 1809 * The basic interface to setting the hardware in a good 1810 * state is ``reset''. On return the hardware is known to 1811 * be powered up and with interrupts disabled. This must 1812 * be followed by initialization of the appropriate bits 1813 * and then setup of the interrupt mask. 1814 */ 1815 ath_settkipmic(sc); 1816 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1817 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1818 status); 1819 ATH_UNLOCK(sc); 1820 return; 1821 } 1822 ath_chan_change(sc, ic->ic_curchan); 1823 1824 /* Let DFS at it in case it's a DFS channel */ 1825 ath_dfs_radar_enable(sc, ic->ic_curchan); 1826 1827 /* 1828 * Likewise this is set during reset so update 1829 * state cached in the driver. 1830 */ 1831 sc->sc_diversity = ath_hal_getdiversity(ah); 1832 sc->sc_lastlongcal = 0; 1833 sc->sc_resetcal = 1; 1834 sc->sc_lastcalreset = 0; 1835 sc->sc_lastani = 0; 1836 sc->sc_lastshortcal = 0; 1837 sc->sc_doresetcal = AH_FALSE; 1838 /* 1839 * Beacon timers were cleared here; give ath_newstate() 1840 * a hint that the beacon timers should be poked when 1841 * things transition to the RUN state. 1842 */ 1843 sc->sc_beacons = 0; 1844 1845 /* 1846 * Initial aggregation settings. 1847 */ 1848 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 1849 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1850 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1851 1852 /* 1853 * Setup the hardware after reset: the key cache 1854 * is filled as needed and the receive engine is 1855 * set going. Frame transmit is handled entirely 1856 * in the frame output path; there's nothing to do 1857 * here except setup the interrupt mask. 1858 */ 1859 if (ath_startrecv(sc) != 0) { 1860 if_printf(ifp, "unable to start recv logic\n"); 1861 ATH_UNLOCK(sc); 1862 return; 1863 } 1864 1865 /* 1866 * Enable interrupts. 1867 */ 1868 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1869 | HAL_INT_RXEOL | HAL_INT_RXORN 1870 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1871 /* 1872 * Enable MIB interrupts when there are hardware phy counters. 1873 * Note we only do this (at the moment) for station mode. 1874 */ 1875 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1876 sc->sc_imask |= HAL_INT_MIB; 1877 1878 /* Enable global TX timeout and carrier sense timeout if available */ 1879 if (ath_hal_gtxto_supported(ah)) 1880 sc->sc_imask |= HAL_INT_GTT; 1881 1882 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1883 __func__, sc->sc_imask); 1884 1885 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1886 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1887 ath_hal_intrset(ah, sc->sc_imask); 1888 1889 ATH_UNLOCK(sc); 1890 1891#ifdef ATH_TX99_DIAG 1892 if (sc->sc_tx99 != NULL) 1893 sc->sc_tx99->start(sc->sc_tx99); 1894 else 1895#endif 1896 ieee80211_start_all(ic); /* start all vap's */ 1897} 1898 1899static void 1900ath_stop_locked(struct ifnet *ifp) 1901{ 1902 struct ath_softc *sc = ifp->if_softc; 1903 struct ath_hal *ah = sc->sc_ah; 1904 1905 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1906 __func__, sc->sc_invalid, ifp->if_flags); 1907 1908 ATH_LOCK_ASSERT(sc); 1909 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1910 /* 1911 * Shutdown the hardware and driver: 1912 * reset 802.11 state machine 1913 * turn off timers 1914 * disable interrupts 1915 * turn off the radio 1916 * clear transmit machinery 1917 * clear receive machinery 1918 * drain and release tx queues 1919 * reclaim beacon resources 1920 * power down hardware 1921 * 1922 * Note that some of this work is not possible if the 1923 * hardware is gone (invalid). 1924 */ 1925#ifdef ATH_TX99_DIAG 1926 if (sc->sc_tx99 != NULL) 1927 sc->sc_tx99->stop(sc->sc_tx99); 1928#endif 1929 callout_stop(&sc->sc_wd_ch); 1930 sc->sc_wd_timer = 0; 1931 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1932 if (!sc->sc_invalid) { 1933 if (sc->sc_softled) { 1934 callout_stop(&sc->sc_ledtimer); 1935 ath_hal_gpioset(ah, sc->sc_ledpin, 1936 !sc->sc_ledon); 1937 sc->sc_blinking = 0; 1938 } 1939 ath_hal_intrset(ah, 0); 1940 } 1941 ath_draintxq(sc, ATH_RESET_DEFAULT); 1942 if (!sc->sc_invalid) { 1943 ath_stoprecv(sc, 1); 1944 ath_hal_phydisable(ah); 1945 } else 1946 sc->sc_rxlink = NULL; 1947 ath_beacon_free(sc); /* XXX not needed */ 1948 } 1949} 1950 1951#define MAX_TXRX_ITERATIONS 1000 1952static void 1953ath_txrx_stop_locked(struct ath_softc *sc) 1954{ 1955 int i = MAX_TXRX_ITERATIONS; 1956 1957 ATH_UNLOCK_ASSERT(sc); 1958 ATH_PCU_LOCK_ASSERT(sc); 1959 1960 /* 1961 * Sleep until all the pending operations have completed. 1962 * 1963 * The caller must ensure that reset has been incremented 1964 * or the pending operations may continue being queued. 1965 */ 1966 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1967 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1968 if (i <= 0) 1969 break; 1970 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1971 i--; 1972 } 1973 1974 if (i <= 0) 1975 device_printf(sc->sc_dev, 1976 "%s: didn't finish after %d iterations\n", 1977 __func__, MAX_TXRX_ITERATIONS); 1978} 1979#undef MAX_TXRX_ITERATIONS 1980 1981#if 0 1982static void 1983ath_txrx_stop(struct ath_softc *sc) 1984{ 1985 ATH_UNLOCK_ASSERT(sc); 1986 ATH_PCU_UNLOCK_ASSERT(sc); 1987 1988 ATH_PCU_LOCK(sc); 1989 ath_txrx_stop_locked(sc); 1990 ATH_PCU_UNLOCK(sc); 1991} 1992#endif 1993 1994static void 1995ath_txrx_start(struct ath_softc *sc) 1996{ 1997 1998 taskqueue_unblock(sc->sc_tq); 1999} 2000 2001/* 2002 * Grab the reset lock, and wait around until noone else 2003 * is trying to do anything with it. 2004 * 2005 * This is totally horrible but we can't hold this lock for 2006 * long enough to do TX/RX or we end up with net80211/ip stack 2007 * LORs and eventual deadlock. 2008 * 2009 * "dowait" signals whether to spin, waiting for the reset 2010 * lock count to reach 0. This should (for now) only be used 2011 * during the reset path, as the rest of the code may not 2012 * be locking-reentrant enough to behave correctly. 2013 * 2014 * Another, cleaner way should be found to serialise all of 2015 * these operations. 2016 */ 2017#define MAX_RESET_ITERATIONS 10 2018static int 2019ath_reset_grablock(struct ath_softc *sc, int dowait) 2020{ 2021 int w = 0; 2022 int i = MAX_RESET_ITERATIONS; 2023 2024 ATH_PCU_LOCK_ASSERT(sc); 2025 do { 2026 if (sc->sc_inreset_cnt == 0) { 2027 w = 1; 2028 break; 2029 } 2030 if (dowait == 0) { 2031 w = 0; 2032 break; 2033 } 2034 ATH_PCU_UNLOCK(sc); 2035 pause("ath_reset_grablock", 1); 2036 i--; 2037 ATH_PCU_LOCK(sc); 2038 } while (i > 0); 2039 2040 /* 2041 * We always increment the refcounter, regardless 2042 * of whether we succeeded to get it in an exclusive 2043 * way. 2044 */ 2045 sc->sc_inreset_cnt++; 2046 2047 if (i <= 0) 2048 device_printf(sc->sc_dev, 2049 "%s: didn't finish after %d iterations\n", 2050 __func__, MAX_RESET_ITERATIONS); 2051 2052 if (w == 0) 2053 device_printf(sc->sc_dev, 2054 "%s: warning, recursive reset path!\n", 2055 __func__); 2056 2057 return w; 2058} 2059#undef MAX_RESET_ITERATIONS 2060 2061/* 2062 * XXX TODO: write ath_reset_releaselock 2063 */ 2064 2065static void 2066ath_stop(struct ifnet *ifp) 2067{ 2068 struct ath_softc *sc = ifp->if_softc; 2069 2070 ATH_LOCK(sc); 2071 ath_stop_locked(ifp); 2072 ATH_UNLOCK(sc); 2073} 2074 2075/* 2076 * Reset the hardware w/o losing operational state. This is 2077 * basically a more efficient way of doing ath_stop, ath_init, 2078 * followed by state transitions to the current 802.11 2079 * operational state. Used to recover from various errors and 2080 * to reset or reload hardware state. 2081 */ 2082int 2083ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2084{ 2085 struct ath_softc *sc = ifp->if_softc; 2086 struct ieee80211com *ic = ifp->if_l2com; 2087 struct ath_hal *ah = sc->sc_ah; 2088 HAL_STATUS status; 2089 int i; 2090 2091 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2092 2093 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2094 ATH_PCU_UNLOCK_ASSERT(sc); 2095 ATH_UNLOCK_ASSERT(sc); 2096 2097 /* Try to (stop any further TX/RX from occuring */ 2098 taskqueue_block(sc->sc_tq); 2099 2100 ATH_PCU_LOCK(sc); 2101 ath_hal_intrset(ah, 0); /* disable interrupts */ 2102 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2103 if (ath_reset_grablock(sc, 1) == 0) { 2104 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2105 __func__); 2106 } 2107 ATH_PCU_UNLOCK(sc); 2108 2109 /* 2110 * Should now wait for pending TX/RX to complete 2111 * and block future ones from occuring. This needs to be 2112 * done before the TX queue is drained. 2113 */ 2114 ath_draintxq(sc, reset_type); /* stop xmit side */ 2115 2116 /* 2117 * Regardless of whether we're doing a no-loss flush or 2118 * not, stop the PCU and handle what's in the RX queue. 2119 * That way frames aren't dropped which shouldn't be. 2120 */ 2121 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2122 ath_rx_proc(sc, 0); 2123 2124 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2125 /* NB: indicate channel change so we do a full reset */ 2126 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2127 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2128 __func__, status); 2129 sc->sc_diversity = ath_hal_getdiversity(ah); 2130 2131 /* Let DFS at it in case it's a DFS channel */ 2132 ath_dfs_radar_enable(sc, ic->ic_curchan); 2133 2134 if (ath_startrecv(sc) != 0) /* restart recv */ 2135 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2136 /* 2137 * We may be doing a reset in response to an ioctl 2138 * that changes the channel so update any state that 2139 * might change as a result. 2140 */ 2141 ath_chan_change(sc, ic->ic_curchan); 2142 if (sc->sc_beacons) { /* restart beacons */ 2143#ifdef IEEE80211_SUPPORT_TDMA 2144 if (sc->sc_tdma) 2145 ath_tdma_config(sc, NULL); 2146 else 2147#endif 2148 ath_beacon_config(sc, NULL); 2149 } 2150 2151 /* 2152 * Release the reset lock and re-enable interrupts here. 2153 * If an interrupt was being processed in ath_intr(), 2154 * it would disable interrupts at this point. So we have 2155 * to atomically enable interrupts and decrement the 2156 * reset counter - this way ath_intr() doesn't end up 2157 * disabling interrupts without a corresponding enable 2158 * in the rest or channel change path. 2159 */ 2160 ATH_PCU_LOCK(sc); 2161 sc->sc_inreset_cnt--; 2162 /* XXX only do this if sc_inreset_cnt == 0? */ 2163 ath_hal_intrset(ah, sc->sc_imask); 2164 ATH_PCU_UNLOCK(sc); 2165 2166 /* 2167 * TX and RX can be started here. If it were started with 2168 * sc_inreset_cnt > 0, the TX and RX path would abort. 2169 * Thus if this is a nested call through the reset or 2170 * channel change code, TX completion will occur but 2171 * RX completion and ath_start / ath_tx_start will not 2172 * run. 2173 */ 2174 2175 /* Restart TX/RX as needed */ 2176 ath_txrx_start(sc); 2177 2178 /* XXX Restart TX completion and pending TX */ 2179 if (reset_type == ATH_RESET_NOLOSS) { 2180 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2181 if (ATH_TXQ_SETUP(sc, i)) { 2182 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2183 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2184 ath_txq_sched(sc, &sc->sc_txq[i]); 2185 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2186 } 2187 } 2188 } 2189 2190 /* 2191 * This may have been set during an ath_start() call which 2192 * set this once it detected a concurrent TX was going on. 2193 * So, clear it. 2194 */ 2195 IF_LOCK(&ifp->if_snd); 2196 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2197 IF_UNLOCK(&ifp->if_snd); 2198 2199 /* Handle any frames in the TX queue */ 2200 /* 2201 * XXX should this be done by the caller, rather than 2202 * ath_reset() ? 2203 */ 2204 ath_start(ifp); /* restart xmit */ 2205 return 0; 2206} 2207 2208static int 2209ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2210{ 2211 struct ieee80211com *ic = vap->iv_ic; 2212 struct ifnet *ifp = ic->ic_ifp; 2213 struct ath_softc *sc = ifp->if_softc; 2214 struct ath_hal *ah = sc->sc_ah; 2215 2216 switch (cmd) { 2217 case IEEE80211_IOC_TXPOWER: 2218 /* 2219 * If per-packet TPC is enabled, then we have nothing 2220 * to do; otherwise we need to force the global limit. 2221 * All this can happen directly; no need to reset. 2222 */ 2223 if (!ath_hal_gettpc(ah)) 2224 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2225 return 0; 2226 } 2227 /* XXX? Full or NOLOSS? */ 2228 return ath_reset(ifp, ATH_RESET_FULL); 2229} 2230 2231struct ath_buf * 2232_ath_getbuf_locked(struct ath_softc *sc) 2233{ 2234 struct ath_buf *bf; 2235 2236 ATH_TXBUF_LOCK_ASSERT(sc); 2237 2238 bf = TAILQ_FIRST(&sc->sc_txbuf); 2239 if (bf == NULL) { 2240 sc->sc_stats.ast_tx_getnobuf++; 2241 } else { 2242 if (bf->bf_flags & ATH_BUF_BUSY) { 2243 sc->sc_stats.ast_tx_getbusybuf++; 2244 bf = NULL; 2245 } 2246 } 2247 2248 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) 2249 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2250 else 2251 bf = NULL; 2252 2253 if (bf == NULL) { 2254 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2255 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2256 "out of xmit buffers" : "xmit buffer busy"); 2257 return NULL; 2258 } 2259 2260 /* Valid bf here; clear some basic fields */ 2261 bf->bf_next = NULL; /* XXX just to be sure */ 2262 bf->bf_last = NULL; /* XXX again, just to be sure */ 2263 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2264 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2265 2266 return bf; 2267} 2268 2269/* 2270 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2271 * can't be thrown back on the queue as they could still be 2272 * in use by the hardware. 2273 * 2274 * This duplicates the buffer, or returns NULL. 2275 * 2276 * The descriptor is also copied but the link pointers and 2277 * the DMA segments aren't copied; this frame should thus 2278 * be again passed through the descriptor setup/chain routines 2279 * so the link is correct. 2280 * 2281 * The caller must free the buffer using ath_freebuf(). 2282 * 2283 * XXX TODO: this call shouldn't fail as it'll cause packet loss 2284 * XXX in the TX pathway when retries are needed. 2285 * XXX Figure out how to keep some buffers free, or factor the 2286 * XXX number of busy buffers into the xmit path (ath_start()) 2287 * XXX so we don't over-commit. 2288 */ 2289struct ath_buf * 2290ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2291{ 2292 struct ath_buf *tbf; 2293 2294 tbf = ath_getbuf(sc); 2295 if (tbf == NULL) 2296 return NULL; /* XXX failure? Why? */ 2297 2298 /* Copy basics */ 2299 tbf->bf_next = NULL; 2300 tbf->bf_nseg = bf->bf_nseg; 2301 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2302 tbf->bf_status = bf->bf_status; 2303 tbf->bf_m = bf->bf_m; 2304 tbf->bf_node = bf->bf_node; 2305 /* will be setup by the chain/setup function */ 2306 tbf->bf_lastds = NULL; 2307 /* for now, last == self */ 2308 tbf->bf_last = tbf; 2309 tbf->bf_comp = bf->bf_comp; 2310 2311 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2312 2313 /* The caller has to re-init the descriptor + links */ 2314 2315 /* Copy state */ 2316 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2317 2318 return tbf; 2319} 2320 2321struct ath_buf * 2322ath_getbuf(struct ath_softc *sc) 2323{ 2324 struct ath_buf *bf; 2325 2326 ATH_TXBUF_LOCK(sc); 2327 bf = _ath_getbuf_locked(sc); 2328 ATH_TXBUF_UNLOCK(sc); 2329 if (bf == NULL) { 2330 struct ifnet *ifp = sc->sc_ifp; 2331 2332 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 2333 sc->sc_stats.ast_tx_qstop++; 2334 IF_LOCK(&ifp->if_snd); 2335 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2336 IF_UNLOCK(&ifp->if_snd); 2337 } 2338 return bf; 2339} 2340 2341static void 2342ath_start(struct ifnet *ifp) 2343{ 2344 struct ath_softc *sc = ifp->if_softc; 2345 struct ieee80211_node *ni; 2346 struct ath_buf *bf; 2347 struct mbuf *m, *next; 2348 ath_bufhead frags; 2349 2350 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 2351 return; 2352 2353 /* XXX is it ok to hold the ATH_LOCK here? */ 2354 ATH_PCU_LOCK(sc); 2355 if (sc->sc_inreset_cnt > 0) { 2356 device_printf(sc->sc_dev, 2357 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2358 ATH_PCU_UNLOCK(sc); 2359 IF_LOCK(&ifp->if_snd); 2360 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2361 IF_UNLOCK(&ifp->if_snd); 2362 return; 2363 } 2364 sc->sc_txstart_cnt++; 2365 ATH_PCU_UNLOCK(sc); 2366 2367 for (;;) { 2368 /* 2369 * Grab a TX buffer and associated resources. 2370 */ 2371 bf = ath_getbuf(sc); 2372 if (bf == NULL) 2373 break; 2374 2375 IFQ_DEQUEUE(&ifp->if_snd, m); 2376 if (m == NULL) { 2377 ATH_TXBUF_LOCK(sc); 2378 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2379 ATH_TXBUF_UNLOCK(sc); 2380 break; 2381 } 2382 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 2383 /* 2384 * Check for fragmentation. If this frame 2385 * has been broken up verify we have enough 2386 * buffers to send all the fragments so all 2387 * go out or none... 2388 */ 2389 TAILQ_INIT(&frags); 2390 if ((m->m_flags & M_FRAG) && 2391 !ath_txfrag_setup(sc, &frags, m, ni)) { 2392 DPRINTF(sc, ATH_DEBUG_XMIT, 2393 "%s: out of txfrag buffers\n", __func__); 2394 sc->sc_stats.ast_tx_nofrag++; 2395 ifp->if_oerrors++; 2396 ath_freetx(m); 2397 goto bad; 2398 } 2399 ifp->if_opackets++; 2400 nextfrag: 2401 /* 2402 * Pass the frame to the h/w for transmission. 2403 * Fragmented frames have each frag chained together 2404 * with m_nextpkt. We know there are sufficient ath_buf's 2405 * to send all the frags because of work done by 2406 * ath_txfrag_setup. We leave m_nextpkt set while 2407 * calling ath_tx_start so it can use it to extend the 2408 * the tx duration to cover the subsequent frag and 2409 * so it can reclaim all the mbufs in case of an error; 2410 * ath_tx_start clears m_nextpkt once it commits to 2411 * handing the frame to the hardware. 2412 */ 2413 next = m->m_nextpkt; 2414 if (ath_tx_start(sc, ni, bf, m)) { 2415 bad: 2416 ifp->if_oerrors++; 2417 reclaim: 2418 bf->bf_m = NULL; 2419 bf->bf_node = NULL; 2420 ATH_TXBUF_LOCK(sc); 2421 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2422 ath_txfrag_cleanup(sc, &frags, ni); 2423 ATH_TXBUF_UNLOCK(sc); 2424 if (ni != NULL) 2425 ieee80211_free_node(ni); 2426 continue; 2427 } 2428 if (next != NULL) { 2429 /* 2430 * Beware of state changing between frags. 2431 * XXX check sta power-save state? 2432 */ 2433 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2434 DPRINTF(sc, ATH_DEBUG_XMIT, 2435 "%s: flush fragmented packet, state %s\n", 2436 __func__, 2437 ieee80211_state_name[ni->ni_vap->iv_state]); 2438 ath_freetx(next); 2439 goto reclaim; 2440 } 2441 m = next; 2442 bf = TAILQ_FIRST(&frags); 2443 KASSERT(bf != NULL, ("no buf for txfrag")); 2444 TAILQ_REMOVE(&frags, bf, bf_list); 2445 goto nextfrag; 2446 } 2447 2448 sc->sc_wd_timer = 5; 2449 } 2450 2451 ATH_PCU_LOCK(sc); 2452 sc->sc_txstart_cnt--; 2453 ATH_PCU_UNLOCK(sc); 2454} 2455 2456static int 2457ath_media_change(struct ifnet *ifp) 2458{ 2459 int error = ieee80211_media_change(ifp); 2460 /* NB: only the fixed rate can change and that doesn't need a reset */ 2461 return (error == ENETRESET ? 0 : error); 2462} 2463 2464/* 2465 * Block/unblock tx+rx processing while a key change is done. 2466 * We assume the caller serializes key management operations 2467 * so we only need to worry about synchronization with other 2468 * uses that originate in the driver. 2469 */ 2470static void 2471ath_key_update_begin(struct ieee80211vap *vap) 2472{ 2473 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2474 struct ath_softc *sc = ifp->if_softc; 2475 2476 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2477 taskqueue_block(sc->sc_tq); 2478 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2479} 2480 2481static void 2482ath_key_update_end(struct ieee80211vap *vap) 2483{ 2484 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2485 struct ath_softc *sc = ifp->if_softc; 2486 2487 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2488 IF_UNLOCK(&ifp->if_snd); 2489 taskqueue_unblock(sc->sc_tq); 2490} 2491 2492/* 2493 * Calculate the receive filter according to the 2494 * operating mode and state: 2495 * 2496 * o always accept unicast, broadcast, and multicast traffic 2497 * o accept PHY error frames when hardware doesn't have MIB support 2498 * to count and we need them for ANI (sta mode only until recently) 2499 * and we are not scanning (ANI is disabled) 2500 * NB: older hal's add rx filter bits out of sight and we need to 2501 * blindly preserve them 2502 * o probe request frames are accepted only when operating in 2503 * hostap, adhoc, mesh, or monitor modes 2504 * o enable promiscuous mode 2505 * - when in monitor mode 2506 * - if interface marked PROMISC (assumes bridge setting is filtered) 2507 * o accept beacons: 2508 * - when operating in station mode for collecting rssi data when 2509 * the station is otherwise quiet, or 2510 * - when operating in adhoc mode so the 802.11 layer creates 2511 * node table entries for peers, 2512 * - when scanning 2513 * - when doing s/w beacon miss (e.g. for ap+sta) 2514 * - when operating in ap mode in 11g to detect overlapping bss that 2515 * require protection 2516 * - when operating in mesh mode to detect neighbors 2517 * o accept control frames: 2518 * - when in monitor mode 2519 * XXX HT protection for 11n 2520 */ 2521static u_int32_t 2522ath_calcrxfilter(struct ath_softc *sc) 2523{ 2524 struct ifnet *ifp = sc->sc_ifp; 2525 struct ieee80211com *ic = ifp->if_l2com; 2526 u_int32_t rfilt; 2527 2528 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2529 if (!sc->sc_needmib && !sc->sc_scanning) 2530 rfilt |= HAL_RX_FILTER_PHYERR; 2531 if (ic->ic_opmode != IEEE80211_M_STA) 2532 rfilt |= HAL_RX_FILTER_PROBEREQ; 2533 /* XXX ic->ic_monvaps != 0? */ 2534 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 2535 rfilt |= HAL_RX_FILTER_PROM; 2536 if (ic->ic_opmode == IEEE80211_M_STA || 2537 ic->ic_opmode == IEEE80211_M_IBSS || 2538 sc->sc_swbmiss || sc->sc_scanning) 2539 rfilt |= HAL_RX_FILTER_BEACON; 2540 /* 2541 * NB: We don't recalculate the rx filter when 2542 * ic_protmode changes; otherwise we could do 2543 * this only when ic_protmode != NONE. 2544 */ 2545 if (ic->ic_opmode == IEEE80211_M_HOSTAP && 2546 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 2547 rfilt |= HAL_RX_FILTER_BEACON; 2548 2549 /* 2550 * Enable hardware PS-POLL RX only for hostap mode; 2551 * STA mode sends PS-POLL frames but never 2552 * receives them. 2553 */ 2554 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 2555 0, NULL) == HAL_OK && 2556 ic->ic_opmode == IEEE80211_M_HOSTAP) 2557 rfilt |= HAL_RX_FILTER_PSPOLL; 2558 2559 if (sc->sc_nmeshvaps) { 2560 rfilt |= HAL_RX_FILTER_BEACON; 2561 if (sc->sc_hasbmatch) 2562 rfilt |= HAL_RX_FILTER_BSSID; 2563 else 2564 rfilt |= HAL_RX_FILTER_PROM; 2565 } 2566 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2567 rfilt |= HAL_RX_FILTER_CONTROL; 2568 2569 /* 2570 * Enable RX of compressed BAR frames only when doing 2571 * 802.11n. Required for A-MPDU. 2572 */ 2573 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) 2574 rfilt |= HAL_RX_FILTER_COMPBAR; 2575 2576 /* 2577 * Enable radar PHY errors if requested by the 2578 * DFS module. 2579 */ 2580 if (sc->sc_dodfs) 2581 rfilt |= HAL_RX_FILTER_PHYRADAR; 2582 2583 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 2584 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 2585 return rfilt; 2586} 2587 2588static void 2589ath_update_promisc(struct ifnet *ifp) 2590{ 2591 struct ath_softc *sc = ifp->if_softc; 2592 u_int32_t rfilt; 2593 2594 /* configure rx filter */ 2595 rfilt = ath_calcrxfilter(sc); 2596 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2597 2598 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2599} 2600 2601static void 2602ath_update_mcast(struct ifnet *ifp) 2603{ 2604 struct ath_softc *sc = ifp->if_softc; 2605 u_int32_t mfilt[2]; 2606 2607 /* calculate and install multicast filter */ 2608 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2609 struct ifmultiaddr *ifma; 2610 /* 2611 * Merge multicast addresses to form the hardware filter. 2612 */ 2613 mfilt[0] = mfilt[1] = 0; 2614 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2615 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2616 caddr_t dl; 2617 u_int32_t val; 2618 u_int8_t pos; 2619 2620 /* calculate XOR of eight 6bit values */ 2621 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2622 val = LE_READ_4(dl + 0); 2623 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2624 val = LE_READ_4(dl + 3); 2625 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2626 pos &= 0x3f; 2627 mfilt[pos / 32] |= (1 << (pos % 32)); 2628 } 2629 if_maddr_runlock(ifp); 2630 } else 2631 mfilt[0] = mfilt[1] = ~0; 2632 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2633 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2634 __func__, mfilt[0], mfilt[1]); 2635} 2636 2637static void 2638ath_mode_init(struct ath_softc *sc) 2639{ 2640 struct ifnet *ifp = sc->sc_ifp; 2641 struct ath_hal *ah = sc->sc_ah; 2642 u_int32_t rfilt; 2643 2644 /* configure rx filter */ 2645 rfilt = ath_calcrxfilter(sc); 2646 ath_hal_setrxfilter(ah, rfilt); 2647 2648 /* configure operational mode */ 2649 ath_hal_setopmode(ah); 2650 2651 /* handle any link-level address change */ 2652 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2653 2654 /* calculate and install multicast filter */ 2655 ath_update_mcast(ifp); 2656} 2657 2658/* 2659 * Set the slot time based on the current setting. 2660 */ 2661static void 2662ath_setslottime(struct ath_softc *sc) 2663{ 2664 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2665 struct ath_hal *ah = sc->sc_ah; 2666 u_int usec; 2667 2668 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2669 usec = 13; 2670 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2671 usec = 21; 2672 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2673 /* honor short/long slot time only in 11g */ 2674 /* XXX shouldn't honor on pure g or turbo g channel */ 2675 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2676 usec = HAL_SLOT_TIME_9; 2677 else 2678 usec = HAL_SLOT_TIME_20; 2679 } else 2680 usec = HAL_SLOT_TIME_9; 2681 2682 DPRINTF(sc, ATH_DEBUG_RESET, 2683 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2684 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2685 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2686 2687 ath_hal_setslottime(ah, usec); 2688 sc->sc_updateslot = OK; 2689} 2690 2691/* 2692 * Callback from the 802.11 layer to update the 2693 * slot time based on the current setting. 2694 */ 2695static void 2696ath_updateslot(struct ifnet *ifp) 2697{ 2698 struct ath_softc *sc = ifp->if_softc; 2699 struct ieee80211com *ic = ifp->if_l2com; 2700 2701 /* 2702 * When not coordinating the BSS, change the hardware 2703 * immediately. For other operation we defer the change 2704 * until beacon updates have propagated to the stations. 2705 */ 2706 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2707 ic->ic_opmode == IEEE80211_M_MBSS) 2708 sc->sc_updateslot = UPDATE; 2709 else 2710 ath_setslottime(sc); 2711} 2712 2713/* 2714 * Setup a h/w transmit queue for beacons. 2715 */ 2716static int 2717ath_beaconq_setup(struct ath_hal *ah) 2718{ 2719 HAL_TXQ_INFO qi; 2720 2721 memset(&qi, 0, sizeof(qi)); 2722 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 2723 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 2724 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 2725 /* NB: for dynamic turbo, don't enable any other interrupts */ 2726 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 2727 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 2728} 2729 2730/* 2731 * Setup the transmit queue parameters for the beacon queue. 2732 */ 2733static int 2734ath_beaconq_config(struct ath_softc *sc) 2735{ 2736#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 2737 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2738 struct ath_hal *ah = sc->sc_ah; 2739 HAL_TXQ_INFO qi; 2740 2741 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 2742 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2743 ic->ic_opmode == IEEE80211_M_MBSS) { 2744 /* 2745 * Always burst out beacon and CAB traffic. 2746 */ 2747 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 2748 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 2749 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 2750 } else { 2751 struct wmeParams *wmep = 2752 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 2753 /* 2754 * Adhoc mode; important thing is to use 2x cwmin. 2755 */ 2756 qi.tqi_aifs = wmep->wmep_aifsn; 2757 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 2758 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 2759 } 2760 2761 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 2762 device_printf(sc->sc_dev, "unable to update parameters for " 2763 "beacon hardware queue!\n"); 2764 return 0; 2765 } else { 2766 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 2767 return 1; 2768 } 2769#undef ATH_EXPONENT_TO_VALUE 2770} 2771 2772/* 2773 * Allocate and setup an initial beacon frame. 2774 */ 2775static int 2776ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 2777{ 2778 struct ieee80211vap *vap = ni->ni_vap; 2779 struct ath_vap *avp = ATH_VAP(vap); 2780 struct ath_buf *bf; 2781 struct mbuf *m; 2782 int error; 2783 2784 bf = avp->av_bcbuf; 2785 DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n", 2786 __func__, bf->bf_m, bf->bf_node); 2787 if (bf->bf_m != NULL) { 2788 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2789 m_freem(bf->bf_m); 2790 bf->bf_m = NULL; 2791 } 2792 if (bf->bf_node != NULL) { 2793 ieee80211_free_node(bf->bf_node); 2794 bf->bf_node = NULL; 2795 } 2796 2797 /* 2798 * NB: the beacon data buffer must be 32-bit aligned; 2799 * we assume the mbuf routines will return us something 2800 * with this alignment (perhaps should assert). 2801 */ 2802 m = ieee80211_beacon_alloc(ni, &avp->av_boff); 2803 if (m == NULL) { 2804 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); 2805 sc->sc_stats.ast_be_nombuf++; 2806 return ENOMEM; 2807 } 2808 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2809 bf->bf_segs, &bf->bf_nseg, 2810 BUS_DMA_NOWAIT); 2811 if (error != 0) { 2812 device_printf(sc->sc_dev, 2813 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", 2814 __func__, error); 2815 m_freem(m); 2816 return error; 2817 } 2818 2819 /* 2820 * Calculate a TSF adjustment factor required for staggered 2821 * beacons. Note that we assume the format of the beacon 2822 * frame leaves the tstamp field immediately following the 2823 * header. 2824 */ 2825 if (sc->sc_stagbeacons && avp->av_bslot > 0) { 2826 uint64_t tsfadjust; 2827 struct ieee80211_frame *wh; 2828 2829 /* 2830 * The beacon interval is in TU's; the TSF is in usecs. 2831 * We figure out how many TU's to add to align the timestamp 2832 * then convert to TSF units and handle byte swapping before 2833 * inserting it in the frame. The hardware will then add this 2834 * each time a beacon frame is sent. Note that we align vap's 2835 * 1..N and leave vap 0 untouched. This means vap 0 has a 2836 * timestamp in one beacon interval while the others get a 2837 * timstamp aligned to the next interval. 2838 */ 2839 tsfadjust = ni->ni_intval * 2840 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; 2841 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ 2842 2843 DPRINTF(sc, ATH_DEBUG_BEACON, 2844 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", 2845 __func__, sc->sc_stagbeacons ? "stagger" : "burst", 2846 avp->av_bslot, ni->ni_intval, 2847 (long long unsigned) le64toh(tsfadjust)); 2848 2849 wh = mtod(m, struct ieee80211_frame *); 2850 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); 2851 } 2852 bf->bf_m = m; 2853 bf->bf_node = ieee80211_ref_node(ni); 2854 2855 return 0; 2856} 2857 2858/* 2859 * Setup the beacon frame for transmit. 2860 */ 2861static void 2862ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2863{ 2864#define USE_SHPREAMBLE(_ic) \ 2865 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2866 == IEEE80211_F_SHPREAMBLE) 2867 struct ieee80211_node *ni = bf->bf_node; 2868 struct ieee80211com *ic = ni->ni_ic; 2869 struct mbuf *m = bf->bf_m; 2870 struct ath_hal *ah = sc->sc_ah; 2871 struct ath_desc *ds; 2872 int flags, antenna; 2873 const HAL_RATE_TABLE *rt; 2874 u_int8_t rix, rate; 2875 2876 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2877 __func__, m, m->m_len); 2878 2879 /* setup descriptors */ 2880 ds = bf->bf_desc; 2881 bf->bf_last = bf; 2882 bf->bf_lastds = ds; 2883 2884 flags = HAL_TXDESC_NOACK; 2885 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2886 ds->ds_link = bf->bf_daddr; /* self-linked */ 2887 flags |= HAL_TXDESC_VEOL; 2888 /* 2889 * Let hardware handle antenna switching. 2890 */ 2891 antenna = sc->sc_txantenna; 2892 } else { 2893 ds->ds_link = 0; 2894 /* 2895 * Switch antenna every 4 beacons. 2896 * XXX assumes two antenna 2897 */ 2898 if (sc->sc_txantenna != 0) 2899 antenna = sc->sc_txantenna; 2900 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) 2901 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); 2902 else 2903 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2904 } 2905 2906 KASSERT(bf->bf_nseg == 1, 2907 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 2908 ds->ds_data = bf->bf_segs[0].ds_addr; 2909 /* 2910 * Calculate rate code. 2911 * XXX everything at min xmit rate 2912 */ 2913 rix = 0; 2914 rt = sc->sc_currates; 2915 rate = rt->info[rix].rateCode; 2916 if (USE_SHPREAMBLE(ic)) 2917 rate |= rt->info[rix].shortPreamble; 2918 ath_hal_setuptxdesc(ah, ds 2919 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 2920 , sizeof(struct ieee80211_frame)/* header length */ 2921 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2922 , ni->ni_txpower /* txpower XXX */ 2923 , rate, 1 /* series 0 rate/tries */ 2924 , HAL_TXKEYIX_INVALID /* no encryption */ 2925 , antenna /* antenna mode */ 2926 , flags /* no ack, veol for beacons */ 2927 , 0 /* rts/cts rate */ 2928 , 0 /* rts/cts duration */ 2929 ); 2930 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 2931 ath_hal_filltxdesc(ah, ds 2932 , roundup(m->m_len, 4) /* buffer length */ 2933 , AH_TRUE /* first segment */ 2934 , AH_TRUE /* last segment */ 2935 , ds /* first descriptor */ 2936 ); 2937#if 0 2938 ath_desc_swap(ds); 2939#endif 2940#undef USE_SHPREAMBLE 2941} 2942 2943static void 2944ath_beacon_update(struct ieee80211vap *vap, int item) 2945{ 2946 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; 2947 2948 setbit(bo->bo_flags, item); 2949} 2950 2951/* 2952 * Append the contents of src to dst; both queues 2953 * are assumed to be locked. 2954 */ 2955static void 2956ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2957{ 2958 2959 ATH_TXQ_LOCK_ASSERT(dst); 2960 ATH_TXQ_LOCK_ASSERT(src); 2961 2962 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2963 dst->axq_link = src->axq_link; 2964 src->axq_link = NULL; 2965 dst->axq_depth += src->axq_depth; 2966 dst->axq_aggr_depth += src->axq_aggr_depth; 2967 src->axq_depth = 0; 2968 src->axq_aggr_depth = 0; 2969} 2970 2971/* 2972 * Transmit a beacon frame at SWBA. Dynamic updates to the 2973 * frame contents are done as needed and the slot time is 2974 * also adjusted based on current state. 2975 */ 2976static void 2977ath_beacon_proc(void *arg, int pending) 2978{ 2979 struct ath_softc *sc = arg; 2980 struct ath_hal *ah = sc->sc_ah; 2981 struct ieee80211vap *vap; 2982 struct ath_buf *bf; 2983 int slot, otherant; 2984 uint32_t bfaddr; 2985 2986 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2987 __func__, pending); 2988 /* 2989 * Check if the previous beacon has gone out. If 2990 * not don't try to post another, skip this period 2991 * and wait for the next. Missed beacons indicate 2992 * a problem and should not occur. If we miss too 2993 * many consecutive beacons reset the device. 2994 */ 2995 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2996 sc->sc_bmisscount++; 2997 sc->sc_stats.ast_be_missed++; 2998 DPRINTF(sc, ATH_DEBUG_BEACON, 2999 "%s: missed %u consecutive beacons\n", 3000 __func__, sc->sc_bmisscount); 3001 if (sc->sc_bmisscount >= ath_bstuck_threshold) 3002 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 3003 return; 3004 } 3005 if (sc->sc_bmisscount != 0) { 3006 DPRINTF(sc, ATH_DEBUG_BEACON, 3007 "%s: resume beacon xmit after %u misses\n", 3008 __func__, sc->sc_bmisscount); 3009 sc->sc_bmisscount = 0; 3010 } 3011 3012 if (sc->sc_stagbeacons) { /* staggered beacons */ 3013 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3014 uint32_t tsftu; 3015 3016 tsftu = ath_hal_gettsf32(ah) >> 10; 3017 /* XXX lintval */ 3018 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; 3019 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; 3020 bfaddr = 0; 3021 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 3022 bf = ath_beacon_generate(sc, vap); 3023 if (bf != NULL) 3024 bfaddr = bf->bf_daddr; 3025 } 3026 } else { /* burst'd beacons */ 3027 uint32_t *bflink = &bfaddr; 3028 3029 for (slot = 0; slot < ATH_BCBUF; slot++) { 3030 vap = sc->sc_bslot[slot]; 3031 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 3032 bf = ath_beacon_generate(sc, vap); 3033 if (bf != NULL) { 3034 *bflink = bf->bf_daddr; 3035 bflink = &bf->bf_desc->ds_link; 3036 } 3037 } 3038 } 3039 *bflink = 0; /* terminate list */ 3040 } 3041 3042 /* 3043 * Handle slot time change when a non-ERP station joins/leaves 3044 * an 11g network. The 802.11 layer notifies us via callback, 3045 * we mark updateslot, then wait one beacon before effecting 3046 * the change. This gives associated stations at least one 3047 * beacon interval to note the state change. 3048 */ 3049 /* XXX locking */ 3050 if (sc->sc_updateslot == UPDATE) { 3051 sc->sc_updateslot = COMMIT; /* commit next beacon */ 3052 sc->sc_slotupdate = slot; 3053 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 3054 ath_setslottime(sc); /* commit change to h/w */ 3055 3056 /* 3057 * Check recent per-antenna transmit statistics and flip 3058 * the default antenna if noticeably more frames went out 3059 * on the non-default antenna. 3060 * XXX assumes 2 anntenae 3061 */ 3062 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { 3063 otherant = sc->sc_defant & 1 ? 2 : 1; 3064 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 3065 ath_setdefantenna(sc, otherant); 3066 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 3067 } 3068 3069 if (bfaddr != 0) { 3070 /* 3071 * Stop any current dma and put the new frame on the queue. 3072 * This should never fail since we check above that no frames 3073 * are still pending on the queue. 3074 */ 3075 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 3076 DPRINTF(sc, ATH_DEBUG_ANY, 3077 "%s: beacon queue %u did not stop?\n", 3078 __func__, sc->sc_bhalq); 3079 } 3080 /* NB: cabq traffic should already be queued and primed */ 3081 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); 3082 ath_hal_txstart(ah, sc->sc_bhalq); 3083 3084 sc->sc_stats.ast_be_xmit++; 3085 } 3086} 3087 3088static struct ath_buf * 3089ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) 3090{ 3091 struct ath_vap *avp = ATH_VAP(vap); 3092 struct ath_txq *cabq = sc->sc_cabq; 3093 struct ath_buf *bf; 3094 struct mbuf *m; 3095 int nmcastq, error; 3096 3097 KASSERT(vap->iv_state >= IEEE80211_S_RUN, 3098 ("not running, state %d", vap->iv_state)); 3099 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3100 3101 /* 3102 * Update dynamic beacon contents. If this returns 3103 * non-zero then we need to remap the memory because 3104 * the beacon frame changed size (probably because 3105 * of the TIM bitmap). 3106 */ 3107 bf = avp->av_bcbuf; 3108 m = bf->bf_m; 3109 /* XXX lock mcastq? */ 3110 nmcastq = avp->av_mcastq.axq_depth; 3111 3112 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { 3113 /* XXX too conservative? */ 3114 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3115 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3116 bf->bf_segs, &bf->bf_nseg, 3117 BUS_DMA_NOWAIT); 3118 if (error != 0) { 3119 if_printf(vap->iv_ifp, 3120 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3121 __func__, error); 3122 return NULL; 3123 } 3124 } 3125 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { 3126 DPRINTF(sc, ATH_DEBUG_BEACON, 3127 "%s: cabq did not drain, mcastq %u cabq %u\n", 3128 __func__, nmcastq, cabq->axq_depth); 3129 sc->sc_stats.ast_cabq_busy++; 3130 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { 3131 /* 3132 * CABQ traffic from a previous vap is still pending. 3133 * We must drain the q before this beacon frame goes 3134 * out as otherwise this vap's stations will get cab 3135 * frames from a different vap. 3136 * XXX could be slow causing us to miss DBA 3137 */ 3138 ath_tx_draintxq(sc, cabq); 3139 } 3140 } 3141 ath_beacon_setup(sc, bf); 3142 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3143 3144 /* 3145 * Enable the CAB queue before the beacon queue to 3146 * insure cab frames are triggered by this beacon. 3147 */ 3148 if (avp->av_boff.bo_tim[4] & 1) { 3149 struct ath_hal *ah = sc->sc_ah; 3150 3151 /* NB: only at DTIM */ 3152 ATH_TXQ_LOCK(cabq); 3153 ATH_TXQ_LOCK(&avp->av_mcastq); 3154 if (nmcastq) { 3155 struct ath_buf *bfm; 3156 3157 /* 3158 * Move frames from the s/w mcast q to the h/w cab q. 3159 * XXX MORE_DATA bit 3160 */ 3161 bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q); 3162 if (cabq->axq_link != NULL) { 3163 *cabq->axq_link = bfm->bf_daddr; 3164 } else 3165 ath_hal_puttxbuf(ah, cabq->axq_qnum, 3166 bfm->bf_daddr); 3167 ath_txqmove(cabq, &avp->av_mcastq); 3168 3169 sc->sc_stats.ast_cabq_xmit += nmcastq; 3170 } 3171 /* NB: gated by beacon so safe to start here */ 3172 if (! TAILQ_EMPTY(&(cabq->axq_q))) 3173 ath_hal_txstart(ah, cabq->axq_qnum); 3174 ATH_TXQ_UNLOCK(&avp->av_mcastq); 3175 ATH_TXQ_UNLOCK(cabq); 3176 } 3177 return bf; 3178} 3179 3180static void 3181ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) 3182{ 3183 struct ath_vap *avp = ATH_VAP(vap); 3184 struct ath_hal *ah = sc->sc_ah; 3185 struct ath_buf *bf; 3186 struct mbuf *m; 3187 int error; 3188 3189 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3190 3191 /* 3192 * Update dynamic beacon contents. If this returns 3193 * non-zero then we need to remap the memory because 3194 * the beacon frame changed size (probably because 3195 * of the TIM bitmap). 3196 */ 3197 bf = avp->av_bcbuf; 3198 m = bf->bf_m; 3199 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { 3200 /* XXX too conservative? */ 3201 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3202 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3203 bf->bf_segs, &bf->bf_nseg, 3204 BUS_DMA_NOWAIT); 3205 if (error != 0) { 3206 if_printf(vap->iv_ifp, 3207 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3208 __func__, error); 3209 return; 3210 } 3211 } 3212 ath_beacon_setup(sc, bf); 3213 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3214 3215 /* NB: caller is known to have already stopped tx dma */ 3216 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 3217 ath_hal_txstart(ah, sc->sc_bhalq); 3218} 3219 3220/* 3221 * Reset the hardware, with no loss. 3222 * 3223 * This can't be used for a general case reset. 3224 */ 3225static void 3226ath_reset_proc(void *arg, int pending) 3227{ 3228 struct ath_softc *sc = arg; 3229 struct ifnet *ifp = sc->sc_ifp; 3230 3231#if 0 3232 if_printf(ifp, "%s: resetting\n", __func__); 3233#endif 3234 ath_reset(ifp, ATH_RESET_NOLOSS); 3235} 3236 3237/* 3238 * Reset the hardware after detecting beacons have stopped. 3239 */ 3240static void 3241ath_bstuck_proc(void *arg, int pending) 3242{ 3243 struct ath_softc *sc = arg; 3244 struct ifnet *ifp = sc->sc_ifp; 3245 uint32_t hangs = 0; 3246 3247 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3248 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3249 3250 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3251 sc->sc_bmisscount); 3252 sc->sc_stats.ast_bstuck++; 3253 /* 3254 * This assumes that there's no simultaneous channel mode change 3255 * occuring. 3256 */ 3257 ath_reset(ifp, ATH_RESET_NOLOSS); 3258} 3259 3260/* 3261 * Reclaim beacon resources and return buffer to the pool. 3262 */ 3263static void 3264ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) 3265{ 3266 3267 DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 3268 __func__, bf, bf->bf_m, bf->bf_node); 3269 if (bf->bf_m != NULL) { 3270 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3271 m_freem(bf->bf_m); 3272 bf->bf_m = NULL; 3273 } 3274 if (bf->bf_node != NULL) { 3275 ieee80211_free_node(bf->bf_node); 3276 bf->bf_node = NULL; 3277 } 3278 TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); 3279} 3280 3281/* 3282 * Reclaim beacon resources. 3283 */ 3284static void 3285ath_beacon_free(struct ath_softc *sc) 3286{ 3287 struct ath_buf *bf; 3288 3289 TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 3290 DPRINTF(sc, ATH_DEBUG_NODE, 3291 "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 3292 __func__, bf, bf->bf_m, bf->bf_node); 3293 if (bf->bf_m != NULL) { 3294 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3295 m_freem(bf->bf_m); 3296 bf->bf_m = NULL; 3297 } 3298 if (bf->bf_node != NULL) { 3299 ieee80211_free_node(bf->bf_node); 3300 bf->bf_node = NULL; 3301 } 3302 } 3303} 3304 3305/* 3306 * Configure the beacon and sleep timers. 3307 * 3308 * When operating as an AP this resets the TSF and sets 3309 * up the hardware to notify us when we need to issue beacons. 3310 * 3311 * When operating in station mode this sets up the beacon 3312 * timers according to the timestamp of the last received 3313 * beacon and the current TSF, configures PCF and DTIM 3314 * handling, programs the sleep registers so the hardware 3315 * will wakeup in time to receive beacons, and configures 3316 * the beacon miss handling so we'll receive a BMISS 3317 * interrupt when we stop seeing beacons from the AP 3318 * we've associated with. 3319 */ 3320static void 3321ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) 3322{ 3323#define TSF_TO_TU(_h,_l) \ 3324 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 3325#define FUDGE 2 3326 struct ath_hal *ah = sc->sc_ah; 3327 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3328 struct ieee80211_node *ni; 3329 u_int32_t nexttbtt, intval, tsftu; 3330 u_int64_t tsf; 3331 3332 if (vap == NULL) 3333 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 3334 ni = ieee80211_ref_node(vap->iv_bss); 3335 3336 /* extract tstamp from last beacon and convert to TU */ 3337 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 3338 LE_READ_4(ni->ni_tstamp.data)); 3339 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3340 ic->ic_opmode == IEEE80211_M_MBSS) { 3341 /* 3342 * For multi-bss ap/mesh support beacons are either staggered 3343 * evenly over N slots or burst together. For the former 3344 * arrange for the SWBA to be delivered for each slot. 3345 * Slots that are not occupied will generate nothing. 3346 */ 3347 /* NB: the beacon interval is kept internally in TU's */ 3348 intval = ni->ni_intval & HAL_BEACON_PERIOD; 3349 if (sc->sc_stagbeacons) 3350 intval /= ATH_BCBUF; 3351 } else { 3352 /* NB: the beacon interval is kept internally in TU's */ 3353 intval = ni->ni_intval & HAL_BEACON_PERIOD; 3354 } 3355 if (nexttbtt == 0) /* e.g. for ap mode */ 3356 nexttbtt = intval; 3357 else if (intval) /* NB: can be 0 for monitor mode */ 3358 nexttbtt = roundup(nexttbtt, intval); 3359 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 3360 __func__, nexttbtt, intval, ni->ni_intval); 3361 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { 3362 HAL_BEACON_STATE bs; 3363 int dtimperiod, dtimcount; 3364 int cfpperiod, cfpcount; 3365 3366 /* 3367 * Setup dtim and cfp parameters according to 3368 * last beacon we received (which may be none). 3369 */ 3370 dtimperiod = ni->ni_dtim_period; 3371 if (dtimperiod <= 0) /* NB: 0 if not known */ 3372 dtimperiod = 1; 3373 dtimcount = ni->ni_dtim_count; 3374 if (dtimcount >= dtimperiod) /* NB: sanity check */ 3375 dtimcount = 0; /* XXX? */ 3376 cfpperiod = 1; /* NB: no PCF support yet */ 3377 cfpcount = 0; 3378 /* 3379 * Pull nexttbtt forward to reflect the current 3380 * TSF and calculate dtim+cfp state for the result. 3381 */ 3382 tsf = ath_hal_gettsf64(ah); 3383 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 3384 do { 3385 nexttbtt += intval; 3386 if (--dtimcount < 0) { 3387 dtimcount = dtimperiod - 1; 3388 if (--cfpcount < 0) 3389 cfpcount = cfpperiod - 1; 3390 } 3391 } while (nexttbtt < tsftu); 3392 memset(&bs, 0, sizeof(bs)); 3393 bs.bs_intval = intval; 3394 bs.bs_nexttbtt = nexttbtt; 3395 bs.bs_dtimperiod = dtimperiod*intval; 3396 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 3397 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 3398 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 3399 bs.bs_cfpmaxduration = 0; 3400#if 0 3401 /* 3402 * The 802.11 layer records the offset to the DTIM 3403 * bitmap while receiving beacons; use it here to 3404 * enable h/w detection of our AID being marked in 3405 * the bitmap vector (to indicate frames for us are 3406 * pending at the AP). 3407 * XXX do DTIM handling in s/w to WAR old h/w bugs 3408 * XXX enable based on h/w rev for newer chips 3409 */ 3410 bs.bs_timoffset = ni->ni_timoff; 3411#endif 3412 /* 3413 * Calculate the number of consecutive beacons to miss 3414 * before taking a BMISS interrupt. 3415 * Note that we clamp the result to at most 10 beacons. 3416 */ 3417 bs.bs_bmissthreshold = vap->iv_bmissthreshold; 3418 if (bs.bs_bmissthreshold > 10) 3419 bs.bs_bmissthreshold = 10; 3420 else if (bs.bs_bmissthreshold <= 0) 3421 bs.bs_bmissthreshold = 1; 3422 3423 /* 3424 * Calculate sleep duration. The configuration is 3425 * given in ms. We insure a multiple of the beacon 3426 * period is used. Also, if the sleep duration is 3427 * greater than the DTIM period then it makes senses 3428 * to make it a multiple of that. 3429 * 3430 * XXX fixed at 100ms 3431 */ 3432 bs.bs_sleepduration = 3433 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 3434 if (bs.bs_sleepduration > bs.bs_dtimperiod) 3435 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 3436 3437 DPRINTF(sc, ATH_DEBUG_BEACON, 3438 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 3439 , __func__ 3440 , tsf, tsftu 3441 , bs.bs_intval 3442 , bs.bs_nexttbtt 3443 , bs.bs_dtimperiod 3444 , bs.bs_nextdtim 3445 , bs.bs_bmissthreshold 3446 , bs.bs_sleepduration 3447 , bs.bs_cfpperiod 3448 , bs.bs_cfpmaxduration 3449 , bs.bs_cfpnext 3450 , bs.bs_timoffset 3451 ); 3452 ath_hal_intrset(ah, 0); 3453 ath_hal_beacontimers(ah, &bs); 3454 sc->sc_imask |= HAL_INT_BMISS; 3455 ath_hal_intrset(ah, sc->sc_imask); 3456 } else { 3457 ath_hal_intrset(ah, 0); 3458 if (nexttbtt == intval) 3459 intval |= HAL_BEACON_RESET_TSF; 3460 if (ic->ic_opmode == IEEE80211_M_IBSS) { 3461 /* 3462 * In IBSS mode enable the beacon timers but only 3463 * enable SWBA interrupts if we need to manually 3464 * prepare beacon frames. Otherwise we use a 3465 * self-linked tx descriptor and let the hardware 3466 * deal with things. 3467 */ 3468 intval |= HAL_BEACON_ENA; 3469 if (!sc->sc_hasveol) 3470 sc->sc_imask |= HAL_INT_SWBA; 3471 if ((intval & HAL_BEACON_RESET_TSF) == 0) { 3472 /* 3473 * Pull nexttbtt forward to reflect 3474 * the current TSF. 3475 */ 3476 tsf = ath_hal_gettsf64(ah); 3477 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 3478 do { 3479 nexttbtt += intval; 3480 } while (nexttbtt < tsftu); 3481 } 3482 ath_beaconq_config(sc); 3483 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3484 ic->ic_opmode == IEEE80211_M_MBSS) { 3485 /* 3486 * In AP/mesh mode we enable the beacon timers 3487 * and SWBA interrupts to prepare beacon frames. 3488 */ 3489 intval |= HAL_BEACON_ENA; 3490 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 3491 ath_beaconq_config(sc); 3492 } 3493 ath_hal_beaconinit(ah, nexttbtt, intval); 3494 sc->sc_bmisscount = 0; 3495 ath_hal_intrset(ah, sc->sc_imask); 3496 /* 3497 * When using a self-linked beacon descriptor in 3498 * ibss mode load it once here. 3499 */ 3500 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 3501 ath_beacon_start_adhoc(sc, vap); 3502 } 3503 sc->sc_syncbeacon = 0; 3504 ieee80211_free_node(ni); 3505#undef FUDGE 3506#undef TSF_TO_TU 3507} 3508 3509static void 3510ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3511{ 3512 bus_addr_t *paddr = (bus_addr_t*) arg; 3513 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 3514 *paddr = segs->ds_addr; 3515} 3516 3517static int 3518ath_descdma_setup(struct ath_softc *sc, 3519 struct ath_descdma *dd, ath_bufhead *head, 3520 const char *name, int nbuf, int ndesc) 3521{ 3522#define DS2PHYS(_dd, _ds) \ 3523 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3524#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3525 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3526 struct ifnet *ifp = sc->sc_ifp; 3527 uint8_t *ds; 3528 struct ath_buf *bf; 3529 int i, bsize, error; 3530 int desc_len; 3531 3532 desc_len = sizeof(struct ath_desc); 3533 3534 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 3535 __func__, name, nbuf, ndesc); 3536 3537 dd->dd_name = name; 3538 dd->dd_desc_len = desc_len * nbuf * ndesc; 3539 3540 /* 3541 * Merlin work-around: 3542 * Descriptors that cross the 4KB boundary can't be used. 3543 * Assume one skipped descriptor per 4KB page. 3544 */ 3545 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3546 int numdescpage = 4096 / (desc_len * ndesc); 3547 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 3548 } 3549 3550 /* 3551 * Setup DMA descriptor area. 3552 */ 3553 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3554 PAGE_SIZE, 0, /* alignment, bounds */ 3555 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3556 BUS_SPACE_MAXADDR, /* highaddr */ 3557 NULL, NULL, /* filter, filterarg */ 3558 dd->dd_desc_len, /* maxsize */ 3559 1, /* nsegments */ 3560 dd->dd_desc_len, /* maxsegsize */ 3561 BUS_DMA_ALLOCNOW, /* flags */ 3562 NULL, /* lockfunc */ 3563 NULL, /* lockarg */ 3564 &dd->dd_dmat); 3565 if (error != 0) { 3566 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3567 return error; 3568 } 3569 3570 /* allocate descriptors */ 3571 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 3572 if (error != 0) { 3573 if_printf(ifp, "unable to create dmamap for %s descriptors, " 3574 "error %u\n", dd->dd_name, error); 3575 goto fail0; 3576 } 3577 3578 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 3579 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 3580 &dd->dd_dmamap); 3581 if (error != 0) { 3582 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3583 "error %u\n", nbuf * ndesc, dd->dd_name, error); 3584 goto fail1; 3585 } 3586 3587 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3588 dd->dd_desc, dd->dd_desc_len, 3589 ath_load_cb, &dd->dd_desc_paddr, 3590 BUS_DMA_NOWAIT); 3591 if (error != 0) { 3592 if_printf(ifp, "unable to map %s descriptors, error %u\n", 3593 dd->dd_name, error); 3594 goto fail2; 3595 } 3596 3597 ds = (uint8_t *) dd->dd_desc; 3598 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3599 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 3600 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 3601 3602 /* allocate rx buffers */ 3603 bsize = sizeof(struct ath_buf) * nbuf; 3604 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3605 if (bf == NULL) { 3606 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3607 dd->dd_name, bsize); 3608 goto fail3; 3609 } 3610 dd->dd_bufptr = bf; 3611 3612 TAILQ_INIT(head); 3613 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 3614 bf->bf_desc = (struct ath_desc *) ds; 3615 bf->bf_daddr = DS2PHYS(dd, ds); 3616 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3617 /* 3618 * Merlin WAR: Skip descriptor addresses which 3619 * cause 4KB boundary crossing along any point 3620 * in the descriptor. 3621 */ 3622 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 3623 desc_len * ndesc)) { 3624 /* Start at the next page */ 3625 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 3626 bf->bf_desc = (struct ath_desc *) ds; 3627 bf->bf_daddr = DS2PHYS(dd, ds); 3628 } 3629 } 3630 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3631 &bf->bf_dmamap); 3632 if (error != 0) { 3633 if_printf(ifp, "unable to create dmamap for %s " 3634 "buffer %u, error %u\n", dd->dd_name, i, error); 3635 ath_descdma_cleanup(sc, dd, head); 3636 return error; 3637 } 3638 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 3639 TAILQ_INSERT_TAIL(head, bf, bf_list); 3640 } 3641 return 0; 3642fail3: 3643 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3644fail2: 3645 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3646fail1: 3647 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3648fail0: 3649 bus_dma_tag_destroy(dd->dd_dmat); 3650 memset(dd, 0, sizeof(*dd)); 3651 return error; 3652#undef DS2PHYS 3653#undef ATH_DESC_4KB_BOUND_CHECK 3654} 3655 3656static void 3657ath_descdma_cleanup(struct ath_softc *sc, 3658 struct ath_descdma *dd, ath_bufhead *head) 3659{ 3660 struct ath_buf *bf; 3661 struct ieee80211_node *ni; 3662 3663 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3664 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3665 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3666 bus_dma_tag_destroy(dd->dd_dmat); 3667 3668 TAILQ_FOREACH(bf, head, bf_list) { 3669 if (bf->bf_m) { 3670 m_freem(bf->bf_m); 3671 bf->bf_m = NULL; 3672 } 3673 if (bf->bf_dmamap != NULL) { 3674 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3675 bf->bf_dmamap = NULL; 3676 } 3677 ni = bf->bf_node; 3678 bf->bf_node = NULL; 3679 if (ni != NULL) { 3680 /* 3681 * Reclaim node reference. 3682 */ 3683 ieee80211_free_node(ni); 3684 } 3685 } 3686 3687 TAILQ_INIT(head); 3688 free(dd->dd_bufptr, M_ATHDEV); 3689 memset(dd, 0, sizeof(*dd)); 3690} 3691 3692static int 3693ath_desc_alloc(struct ath_softc *sc) 3694{ 3695 int error; 3696 3697 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 3698 "rx", ath_rxbuf, 1); 3699 if (error != 0) 3700 return error; 3701 3702 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3703 "tx", ath_txbuf, ATH_TXDESC); 3704 if (error != 0) { 3705 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3706 return error; 3707 } 3708 3709 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3710 "beacon", ATH_BCBUF, 1); 3711 if (error != 0) { 3712 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3713 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3714 return error; 3715 } 3716 return 0; 3717} 3718 3719static void 3720ath_desc_free(struct ath_softc *sc) 3721{ 3722 3723 if (sc->sc_bdma.dd_desc_len != 0) 3724 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3725 if (sc->sc_txdma.dd_desc_len != 0) 3726 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3727 if (sc->sc_rxdma.dd_desc_len != 0) 3728 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3729} 3730 3731static struct ieee80211_node * 3732ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3733{ 3734 struct ieee80211com *ic = vap->iv_ic; 3735 struct ath_softc *sc = ic->ic_ifp->if_softc; 3736 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3737 struct ath_node *an; 3738 3739 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3740 if (an == NULL) { 3741 /* XXX stat+msg */ 3742 return NULL; 3743 } 3744 ath_rate_node_init(sc, an); 3745 3746 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3747 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3748 device_get_nameunit(sc->sc_dev), an); 3749 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3750 3751 /* XXX setup ath_tid */ 3752 ath_tx_tid_init(sc, an); 3753 3754 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3755 return &an->an_node; 3756} 3757 3758static void 3759ath_node_cleanup(struct ieee80211_node *ni) 3760{ 3761 struct ieee80211com *ic = ni->ni_ic; 3762 struct ath_softc *sc = ic->ic_ifp->if_softc; 3763 3764 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3765 ath_tx_node_flush(sc, ATH_NODE(ni)); 3766 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3767 sc->sc_node_cleanup(ni); 3768} 3769 3770static void 3771ath_node_free(struct ieee80211_node *ni) 3772{ 3773 struct ieee80211com *ic = ni->ni_ic; 3774 struct ath_softc *sc = ic->ic_ifp->if_softc; 3775 3776 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 3777 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3778 sc->sc_node_free(ni); 3779} 3780 3781static void 3782ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3783{ 3784 struct ieee80211com *ic = ni->ni_ic; 3785 struct ath_softc *sc = ic->ic_ifp->if_softc; 3786 struct ath_hal *ah = sc->sc_ah; 3787 3788 *rssi = ic->ic_node_getrssi(ni); 3789 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3790 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3791 else 3792 *noise = -95; /* nominally correct */ 3793} 3794 3795static int 3796ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 3797{ 3798 struct ath_hal *ah = sc->sc_ah; 3799 int error; 3800 struct mbuf *m; 3801 struct ath_desc *ds; 3802 3803 m = bf->bf_m; 3804 if (m == NULL) { 3805 /* 3806 * NB: by assigning a page to the rx dma buffer we 3807 * implicitly satisfy the Atheros requirement that 3808 * this buffer be cache-line-aligned and sized to be 3809 * multiple of the cache line size. Not doing this 3810 * causes weird stuff to happen (for the 5210 at least). 3811 */ 3812 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3813 if (m == NULL) { 3814 DPRINTF(sc, ATH_DEBUG_ANY, 3815 "%s: no mbuf/cluster\n", __func__); 3816 sc->sc_stats.ast_rx_nombuf++; 3817 return ENOMEM; 3818 } 3819 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 3820 3821 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 3822 bf->bf_dmamap, m, 3823 bf->bf_segs, &bf->bf_nseg, 3824 BUS_DMA_NOWAIT); 3825 if (error != 0) { 3826 DPRINTF(sc, ATH_DEBUG_ANY, 3827 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 3828 __func__, error); 3829 sc->sc_stats.ast_rx_busdma++; 3830 m_freem(m); 3831 return error; 3832 } 3833 KASSERT(bf->bf_nseg == 1, 3834 ("multi-segment packet; nseg %u", bf->bf_nseg)); 3835 bf->bf_m = m; 3836 } 3837 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 3838 3839 /* 3840 * Setup descriptors. For receive we always terminate 3841 * the descriptor list with a self-linked entry so we'll 3842 * not get overrun under high load (as can happen with a 3843 * 5212 when ANI processing enables PHY error frames). 3844 * 3845 * To insure the last descriptor is self-linked we create 3846 * each descriptor as self-linked and add it to the end. As 3847 * each additional descriptor is added the previous self-linked 3848 * entry is ``fixed'' naturally. This should be safe even 3849 * if DMA is happening. When processing RX interrupts we 3850 * never remove/process the last, self-linked, entry on the 3851 * descriptor list. This insures the hardware always has 3852 * someplace to write a new frame. 3853 */ 3854 /* 3855 * 11N: we can no longer afford to self link the last descriptor. 3856 * MAC acknowledges BA status as long as it copies frames to host 3857 * buffer (or rx fifo). This can incorrectly acknowledge packets 3858 * to a sender if last desc is self-linked. 3859 */ 3860 ds = bf->bf_desc; 3861 if (sc->sc_rxslink) 3862 ds->ds_link = bf->bf_daddr; /* link to self */ 3863 else 3864 ds->ds_link = 0; /* terminate the list */ 3865 ds->ds_data = bf->bf_segs[0].ds_addr; 3866 ath_hal_setuprxdesc(ah, ds 3867 , m->m_len /* buffer size */ 3868 , 0 3869 ); 3870 3871 if (sc->sc_rxlink != NULL) 3872 *sc->sc_rxlink = bf->bf_daddr; 3873 sc->sc_rxlink = &ds->ds_link; 3874 return 0; 3875} 3876 3877/* 3878 * Extend 15-bit time stamp from rx descriptor to 3879 * a full 64-bit TSF using the specified TSF. 3880 */ 3881static __inline u_int64_t 3882ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf) 3883{ 3884 if ((tsf & 0x7fff) < rstamp) 3885 tsf -= 0x8000; 3886 3887 return ((tsf &~ 0x7fff) | rstamp); 3888} 3889 3890/* 3891 * Extend 32-bit time stamp from rx descriptor to 3892 * a full 64-bit TSF using the specified TSF. 3893 */ 3894static __inline u_int64_t 3895ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf) 3896{ 3897 u_int32_t tsf_low = tsf & 0xffffffff; 3898 u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp; 3899 3900 if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000)) 3901 tsf64 -= 0x100000000ULL; 3902 3903 if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000)) 3904 tsf64 += 0x100000000ULL; 3905 3906 return tsf64; 3907} 3908 3909/* 3910 * Extend the TSF from the RX descriptor to a full 64 bit TSF. 3911 * Earlier hardware versions only wrote the low 15 bits of the 3912 * TSF into the RX descriptor; later versions (AR5416 and up) 3913 * include the 32 bit TSF value. 3914 */ 3915static __inline u_int64_t 3916ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf) 3917{ 3918 if (sc->sc_rxtsf32) 3919 return ath_extend_tsf32(rstamp, tsf); 3920 else 3921 return ath_extend_tsf15(rstamp, tsf); 3922} 3923 3924/* 3925 * Intercept management frames to collect beacon rssi data 3926 * and to do ibss merges. 3927 */ 3928static void 3929ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 3930 int subtype, int rssi, int nf) 3931{ 3932 struct ieee80211vap *vap = ni->ni_vap; 3933 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 3934 3935 /* 3936 * Call up first so subsequent work can use information 3937 * potentially stored in the node (e.g. for ibss merge). 3938 */ 3939 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); 3940 switch (subtype) { 3941 case IEEE80211_FC0_SUBTYPE_BEACON: 3942 /* update rssi statistics for use by the hal */ 3943 /* XXX unlocked check against vap->iv_bss? */ 3944 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 3945 if (sc->sc_syncbeacon && 3946 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 3947 /* 3948 * Resync beacon timers using the tsf of the beacon 3949 * frame we just received. 3950 */ 3951 ath_beacon_config(sc, vap); 3952 } 3953 /* fall thru... */ 3954 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3955 if (vap->iv_opmode == IEEE80211_M_IBSS && 3956 vap->iv_state == IEEE80211_S_RUN) { 3957 uint32_t rstamp = sc->sc_lastrs->rs_tstamp; 3958 uint64_t tsf = ath_extend_tsf(sc, rstamp, 3959 ath_hal_gettsf64(sc->sc_ah)); 3960 /* 3961 * Handle ibss merge as needed; check the tsf on the 3962 * frame before attempting the merge. The 802.11 spec 3963 * says the station should change it's bssid to match 3964 * the oldest station with the same ssid, where oldest 3965 * is determined by the tsf. Note that hardware 3966 * reconfiguration happens through callback to 3967 * ath_newstate as the state machine will go from 3968 * RUN -> RUN when this happens. 3969 */ 3970 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 3971 DPRINTF(sc, ATH_DEBUG_STATE, 3972 "ibss merge, rstamp %u tsf %ju " 3973 "tstamp %ju\n", rstamp, (uintmax_t)tsf, 3974 (uintmax_t)ni->ni_tstamp.tsf); 3975 (void) ieee80211_ibss_merge(ni); 3976 } 3977 } 3978 break; 3979 } 3980} 3981 3982/* 3983 * Set the default antenna. 3984 */ 3985static void 3986ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3987{ 3988 struct ath_hal *ah = sc->sc_ah; 3989 3990 /* XXX block beacon interrupts */ 3991 ath_hal_setdefantenna(ah, antenna); 3992 if (sc->sc_defant != antenna) 3993 sc->sc_stats.ast_ant_defswitch++; 3994 sc->sc_defant = antenna; 3995 sc->sc_rxotherant = 0; 3996} 3997 3998static void 3999ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 4000 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 4001{ 4002#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 4003#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 4004#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 4005#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) 4006 struct ath_softc *sc = ifp->if_softc; 4007 const HAL_RATE_TABLE *rt; 4008 uint8_t rix; 4009 4010 rt = sc->sc_currates; 4011 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 4012 rix = rt->rateCodeToIndex[rs->rs_rate]; 4013 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 4014 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 4015#ifdef AH_SUPPORT_AR5416 4016 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 4017 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ 4018 struct ieee80211com *ic = ifp->if_l2com; 4019 4020 if ((rs->rs_flags & HAL_RX_2040) == 0) 4021 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 4022 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) 4023 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 4024 else 4025 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 4026 if ((rs->rs_flags & HAL_RX_GI) == 0) 4027 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 4028 } 4029#endif 4030 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); 4031 if (rs->rs_status & HAL_RXERR_CRC) 4032 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 4033 /* XXX propagate other error flags from descriptor */ 4034 sc->sc_rx_th.wr_antnoise = nf; 4035 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; 4036 sc->sc_rx_th.wr_antenna = rs->rs_antenna; 4037#undef CHAN_HT 4038#undef CHAN_HT20 4039#undef CHAN_HT40U 4040#undef CHAN_HT40D 4041} 4042 4043static void 4044ath_handle_micerror(struct ieee80211com *ic, 4045 struct ieee80211_frame *wh, int keyix) 4046{ 4047 struct ieee80211_node *ni; 4048 4049 /* XXX recheck MIC to deal w/ chips that lie */ 4050 /* XXX discard MIC errors on !data frames */ 4051 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 4052 if (ni != NULL) { 4053 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 4054 ieee80211_free_node(ni); 4055 } 4056} 4057 4058/* 4059 * Only run the RX proc if it's not already running. 4060 * Since this may get run as part of the reset/flush path, 4061 * the task can't clash with an existing, running tasklet. 4062 */ 4063static void 4064ath_rx_tasklet(void *arg, int npending) 4065{ 4066 struct ath_softc *sc = arg; 4067 4068 CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending); 4069 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 4070 ATH_PCU_LOCK(sc); 4071 if (sc->sc_inreset_cnt > 0) { 4072 device_printf(sc->sc_dev, 4073 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4074 ATH_PCU_UNLOCK(sc); 4075 return; 4076 } 4077 ATH_PCU_UNLOCK(sc); 4078 ath_rx_proc(sc, 1); 4079} 4080 4081static void 4082ath_rx_proc(struct ath_softc *sc, int resched) 4083{ 4084#define PA2DESC(_sc, _pa) \ 4085 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 4086 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 4087 struct ath_buf *bf; 4088 struct ifnet *ifp = sc->sc_ifp; 4089 struct ieee80211com *ic = ifp->if_l2com; 4090 struct ath_hal *ah = sc->sc_ah; 4091 struct ath_desc *ds; 4092 struct ath_rx_status *rs; 4093 struct mbuf *m; 4094 struct ieee80211_node *ni; 4095 int len, type, ngood; 4096 HAL_STATUS status; 4097 int16_t nf; 4098 u_int64_t tsf, rstamp; 4099 int npkts = 0; 4100 4101 /* XXX we must not hold the ATH_LOCK here */ 4102 ATH_UNLOCK_ASSERT(sc); 4103 ATH_PCU_UNLOCK_ASSERT(sc); 4104 4105 ATH_PCU_LOCK(sc); 4106 sc->sc_rxproc_cnt++; 4107 ATH_PCU_UNLOCK(sc); 4108 4109 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); 4110 ngood = 0; 4111 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 4112 sc->sc_stats.ast_rx_noise = nf; 4113 tsf = ath_hal_gettsf64(ah); 4114 do { 4115 bf = TAILQ_FIRST(&sc->sc_rxbuf); 4116 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ 4117 if_printf(ifp, "%s: no buffer!\n", __func__); 4118 break; 4119 } else if (bf == NULL) { 4120 /* 4121 * End of List: 4122 * this can happen for non-self-linked RX chains 4123 */ 4124 sc->sc_stats.ast_rx_hitqueueend++; 4125 break; 4126 } 4127 m = bf->bf_m; 4128 if (m == NULL) { /* NB: shouldn't happen */ 4129 /* 4130 * If mbuf allocation failed previously there 4131 * will be no mbuf; try again to re-populate it. 4132 */ 4133 /* XXX make debug msg */ 4134 if_printf(ifp, "%s: no mbuf!\n", __func__); 4135 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4136 goto rx_next; 4137 } 4138 ds = bf->bf_desc; 4139 if (ds->ds_link == bf->bf_daddr) { 4140 /* NB: never process the self-linked entry at the end */ 4141 sc->sc_stats.ast_rx_hitqueueend++; 4142 break; 4143 } 4144 /* XXX sync descriptor memory */ 4145 /* 4146 * Must provide the virtual address of the current 4147 * descriptor, the physical address, and the virtual 4148 * address of the next descriptor in the h/w chain. 4149 * This allows the HAL to look ahead to see if the 4150 * hardware is done with a descriptor by checking the 4151 * done bit in the following descriptor and the address 4152 * of the current descriptor the DMA engine is working 4153 * on. All this is necessary because of our use of 4154 * a self-linked list to avoid rx overruns. 4155 */ 4156 rs = &bf->bf_status.ds_rxstat; 4157 status = ath_hal_rxprocdesc(ah, ds, 4158 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4159#ifdef ATH_DEBUG 4160 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 4161 ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4162#endif 4163 if (status == HAL_EINPROGRESS) 4164 break; 4165 4166 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4167 npkts++; 4168 4169 /* 4170 * Calculate the correct 64 bit TSF given 4171 * the TSF64 register value and rs_tstamp. 4172 */ 4173 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 4174 4175 /* These aren't specifically errors */ 4176#ifdef AH_SUPPORT_AR5416 4177 if (rs->rs_flags & HAL_RX_GI) 4178 sc->sc_stats.ast_rx_halfgi++; 4179 if (rs->rs_flags & HAL_RX_2040) 4180 sc->sc_stats.ast_rx_2040++; 4181 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) 4182 sc->sc_stats.ast_rx_pre_crc_err++; 4183 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) 4184 sc->sc_stats.ast_rx_post_crc_err++; 4185 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) 4186 sc->sc_stats.ast_rx_decrypt_busy_err++; 4187 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) 4188 sc->sc_stats.ast_rx_hi_rx_chain++; 4189#endif /* AH_SUPPORT_AR5416 */ 4190 4191 if (rs->rs_status != 0) { 4192 if (rs->rs_status & HAL_RXERR_CRC) 4193 sc->sc_stats.ast_rx_crcerr++; 4194 if (rs->rs_status & HAL_RXERR_FIFO) 4195 sc->sc_stats.ast_rx_fifoerr++; 4196 if (rs->rs_status & HAL_RXERR_PHY) { 4197 sc->sc_stats.ast_rx_phyerr++; 4198 /* Process DFS radar events */ 4199 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || 4200 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { 4201 /* Since we're touching the frame data, sync it */ 4202 bus_dmamap_sync(sc->sc_dmat, 4203 bf->bf_dmamap, 4204 BUS_DMASYNC_POSTREAD); 4205 /* Now pass it to the radar processing code */ 4206 ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs); 4207 } 4208 4209 /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ 4210 if (rs->rs_phyerr < 64) 4211 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; 4212 goto rx_error; /* NB: don't count in ierrors */ 4213 } 4214 if (rs->rs_status & HAL_RXERR_DECRYPT) { 4215 /* 4216 * Decrypt error. If the error occurred 4217 * because there was no hardware key, then 4218 * let the frame through so the upper layers 4219 * can process it. This is necessary for 5210 4220 * parts which have no way to setup a ``clear'' 4221 * key cache entry. 4222 * 4223 * XXX do key cache faulting 4224 */ 4225 if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 4226 goto rx_accept; 4227 sc->sc_stats.ast_rx_badcrypt++; 4228 } 4229 if (rs->rs_status & HAL_RXERR_MIC) { 4230 sc->sc_stats.ast_rx_badmic++; 4231 /* 4232 * Do minimal work required to hand off 4233 * the 802.11 header for notification. 4234 */ 4235 /* XXX frag's and qos frames */ 4236 len = rs->rs_datalen; 4237 if (len >= sizeof (struct ieee80211_frame)) { 4238 bus_dmamap_sync(sc->sc_dmat, 4239 bf->bf_dmamap, 4240 BUS_DMASYNC_POSTREAD); 4241 ath_handle_micerror(ic, 4242 mtod(m, struct ieee80211_frame *), 4243 sc->sc_splitmic ? 4244 rs->rs_keyix-32 : rs->rs_keyix); 4245 } 4246 } 4247 ifp->if_ierrors++; 4248rx_error: 4249 /* 4250 * Cleanup any pending partial frame. 4251 */ 4252 if (sc->sc_rxpending != NULL) { 4253 m_freem(sc->sc_rxpending); 4254 sc->sc_rxpending = NULL; 4255 } 4256 /* 4257 * When a tap is present pass error frames 4258 * that have been requested. By default we 4259 * pass decrypt+mic errors but others may be 4260 * interesting (e.g. crc). 4261 */ 4262 if (ieee80211_radiotap_active(ic) && 4263 (rs->rs_status & sc->sc_monpass)) { 4264 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4265 BUS_DMASYNC_POSTREAD); 4266 /* NB: bpf needs the mbuf length setup */ 4267 len = rs->rs_datalen; 4268 m->m_pkthdr.len = m->m_len = len; 4269 bf->bf_m = NULL; 4270 ath_rx_tap(ifp, m, rs, rstamp, nf); 4271 ieee80211_radiotap_rx_all(ic, m); 4272 m_freem(m); 4273 } 4274 /* XXX pass MIC errors up for s/w reclaculation */ 4275 goto rx_next; 4276 } 4277rx_accept: 4278 /* 4279 * Sync and unmap the frame. At this point we're 4280 * committed to passing the mbuf somewhere so clear 4281 * bf_m; this means a new mbuf must be allocated 4282 * when the rx descriptor is setup again to receive 4283 * another frame. 4284 */ 4285 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4286 BUS_DMASYNC_POSTREAD); 4287 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4288 bf->bf_m = NULL; 4289 4290 len = rs->rs_datalen; 4291 m->m_len = len; 4292 4293 if (rs->rs_more) { 4294 /* 4295 * Frame spans multiple descriptors; save 4296 * it for the next completed descriptor, it 4297 * will be used to construct a jumbogram. 4298 */ 4299 if (sc->sc_rxpending != NULL) { 4300 /* NB: max frame size is currently 2 clusters */ 4301 sc->sc_stats.ast_rx_toobig++; 4302 m_freem(sc->sc_rxpending); 4303 } 4304 m->m_pkthdr.rcvif = ifp; 4305 m->m_pkthdr.len = len; 4306 sc->sc_rxpending = m; 4307 goto rx_next; 4308 } else if (sc->sc_rxpending != NULL) { 4309 /* 4310 * This is the second part of a jumbogram, 4311 * chain it to the first mbuf, adjust the 4312 * frame length, and clear the rxpending state. 4313 */ 4314 sc->sc_rxpending->m_next = m; 4315 sc->sc_rxpending->m_pkthdr.len += len; 4316 m = sc->sc_rxpending; 4317 sc->sc_rxpending = NULL; 4318 } else { 4319 /* 4320 * Normal single-descriptor receive; setup 4321 * the rcvif and packet length. 4322 */ 4323 m->m_pkthdr.rcvif = ifp; 4324 m->m_pkthdr.len = len; 4325 } 4326 4327 /* 4328 * Validate rs->rs_antenna. 4329 * 4330 * Some users w/ AR9285 NICs have reported crashes 4331 * here because rs_antenna field is bogusly large. 4332 * Let's enforce the maximum antenna limit of 8 4333 * (and it shouldn't be hard coded, but that's a 4334 * separate problem) and if there's an issue, print 4335 * out an error and adjust rs_antenna to something 4336 * sensible. 4337 * 4338 * This code should be removed once the actual 4339 * root cause of the issue has been identified. 4340 * For example, it may be that the rs_antenna 4341 * field is only valid for the lsat frame of 4342 * an aggregate and it just happens that it is 4343 * "mostly" right. (This is a general statement - 4344 * the majority of the statistics are only valid 4345 * for the last frame in an aggregate. 4346 */ 4347 if (rs->rs_antenna > 7) { 4348 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", 4349 __func__, rs->rs_antenna); 4350#ifdef ATH_DEBUG 4351 ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4352#endif /* ATH_DEBUG */ 4353 rs->rs_antenna = 0; /* XXX better than nothing */ 4354 } 4355 4356 ifp->if_ipackets++; 4357 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 4358 4359 /* 4360 * Populate the rx status block. When there are bpf 4361 * listeners we do the additional work to provide 4362 * complete status. Otherwise we fill in only the 4363 * material required by ieee80211_input. Note that 4364 * noise setting is filled in above. 4365 */ 4366 if (ieee80211_radiotap_active(ic)) 4367 ath_rx_tap(ifp, m, rs, rstamp, nf); 4368 4369 /* 4370 * From this point on we assume the frame is at least 4371 * as large as ieee80211_frame_min; verify that. 4372 */ 4373 if (len < IEEE80211_MIN_LEN) { 4374 if (!ieee80211_radiotap_active(ic)) { 4375 DPRINTF(sc, ATH_DEBUG_RECV, 4376 "%s: short packet %d\n", __func__, len); 4377 sc->sc_stats.ast_rx_tooshort++; 4378 } else { 4379 /* NB: in particular this captures ack's */ 4380 ieee80211_radiotap_rx_all(ic, m); 4381 } 4382 m_freem(m); 4383 goto rx_next; 4384 } 4385 4386 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 4387 const HAL_RATE_TABLE *rt = sc->sc_currates; 4388 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; 4389 4390 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 4391 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); 4392 } 4393 4394 m_adj(m, -IEEE80211_CRC_LEN); 4395 4396 /* 4397 * Locate the node for sender, track state, and then 4398 * pass the (referenced) node up to the 802.11 layer 4399 * for its use. 4400 */ 4401 ni = ieee80211_find_rxnode_withkey(ic, 4402 mtod(m, const struct ieee80211_frame_min *), 4403 rs->rs_keyix == HAL_RXKEYIX_INVALID ? 4404 IEEE80211_KEYIX_NONE : rs->rs_keyix); 4405 sc->sc_lastrs = rs; 4406 4407#ifdef AH_SUPPORT_AR5416 4408 if (rs->rs_isaggr) 4409 sc->sc_stats.ast_rx_agg++; 4410#endif /* AH_SUPPORT_AR5416 */ 4411 4412 if (ni != NULL) { 4413 /* 4414 * Only punt packets for ampdu reorder processing for 4415 * 11n nodes; net80211 enforces that M_AMPDU is only 4416 * set for 11n nodes. 4417 */ 4418 if (ni->ni_flags & IEEE80211_NODE_HT) 4419 m->m_flags |= M_AMPDU; 4420 4421 /* 4422 * Sending station is known, dispatch directly. 4423 */ 4424 type = ieee80211_input(ni, m, rs->rs_rssi, nf); 4425 ieee80211_free_node(ni); 4426 /* 4427 * Arrange to update the last rx timestamp only for 4428 * frames from our ap when operating in station mode. 4429 * This assumes the rx key is always setup when 4430 * associated. 4431 */ 4432 if (ic->ic_opmode == IEEE80211_M_STA && 4433 rs->rs_keyix != HAL_RXKEYIX_INVALID) 4434 ngood++; 4435 } else { 4436 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf); 4437 } 4438 /* 4439 * Track rx rssi and do any rx antenna management. 4440 */ 4441 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 4442 if (sc->sc_diversity) { 4443 /* 4444 * When using fast diversity, change the default rx 4445 * antenna if diversity chooses the other antenna 3 4446 * times in a row. 4447 */ 4448 if (sc->sc_defant != rs->rs_antenna) { 4449 if (++sc->sc_rxotherant >= 3) 4450 ath_setdefantenna(sc, rs->rs_antenna); 4451 } else 4452 sc->sc_rxotherant = 0; 4453 } 4454 4455 /* Newer school diversity - kite specific for now */ 4456 /* XXX perhaps migrate the normal diversity code to this? */ 4457 if ((ah)->ah_rxAntCombDiversity) 4458 (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz); 4459 4460 if (sc->sc_softled) { 4461 /* 4462 * Blink for any data frame. Otherwise do a 4463 * heartbeat-style blink when idle. The latter 4464 * is mainly for station mode where we depend on 4465 * periodic beacon frames to trigger the poll event. 4466 */ 4467 if (type == IEEE80211_FC0_TYPE_DATA) { 4468 const HAL_RATE_TABLE *rt = sc->sc_currates; 4469 ath_led_event(sc, 4470 rt->rateCodeToIndex[rs->rs_rate]); 4471 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 4472 ath_led_event(sc, 0); 4473 } 4474rx_next: 4475 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 4476 } while (ath_rxbuf_init(sc, bf) == 0); 4477 4478 /* rx signal state monitoring */ 4479 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 4480 if (ngood) 4481 sc->sc_lastrx = tsf; 4482 4483 CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood); 4484 /* Queue DFS tasklet if needed */ 4485 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 4486 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 4487 4488 /* 4489 * Now that all the RX frames were handled that 4490 * need to be handled, kick the PCU if there's 4491 * been an RXEOL condition. 4492 */ 4493 ATH_PCU_LOCK(sc); 4494 if (resched && sc->sc_kickpcu) { 4495 CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu"); 4496 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", 4497 __func__, npkts); 4498 4499 /* XXX rxslink? */ 4500 /* 4501 * XXX can we hold the PCU lock here? 4502 * Are there any net80211 buffer calls involved? 4503 */ 4504 bf = TAILQ_FIRST(&sc->sc_rxbuf); 4505 ath_hal_putrxbuf(ah, bf->bf_daddr); 4506 ath_hal_rxena(ah); /* enable recv descriptors */ 4507 ath_mode_init(sc); /* set filters, etc. */ 4508 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 4509 4510 ath_hal_intrset(ah, sc->sc_imask); 4511 sc->sc_kickpcu = 0; 4512 } 4513 ATH_PCU_UNLOCK(sc); 4514 4515 /* XXX check this inside of IF_LOCK? */ 4516 if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 4517#ifdef IEEE80211_SUPPORT_SUPERG 4518 ieee80211_ff_age_all(ic, 100); 4519#endif 4520 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4521 ath_start(ifp); 4522 } 4523#undef PA2DESC 4524 4525 ATH_PCU_LOCK(sc); 4526 sc->sc_rxproc_cnt--; 4527 ATH_PCU_UNLOCK(sc); 4528} 4529 4530static void 4531ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4532{ 4533 txq->axq_qnum = qnum; 4534 txq->axq_ac = 0; 4535 txq->axq_depth = 0; 4536 txq->axq_aggr_depth = 0; 4537 txq->axq_intrcnt = 0; 4538 txq->axq_link = NULL; 4539 txq->axq_softc = sc; 4540 TAILQ_INIT(&txq->axq_q); 4541 TAILQ_INIT(&txq->axq_tidq); 4542 ATH_TXQ_LOCK_INIT(sc, txq); 4543} 4544 4545/* 4546 * Setup a h/w transmit queue. 4547 */ 4548static struct ath_txq * 4549ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4550{ 4551#define N(a) (sizeof(a)/sizeof(a[0])) 4552 struct ath_hal *ah = sc->sc_ah; 4553 HAL_TXQ_INFO qi; 4554 int qnum; 4555 4556 memset(&qi, 0, sizeof(qi)); 4557 qi.tqi_subtype = subtype; 4558 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4559 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4560 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4561 /* 4562 * Enable interrupts only for EOL and DESC conditions. 4563 * We mark tx descriptors to receive a DESC interrupt 4564 * when a tx queue gets deep; otherwise waiting for the 4565 * EOL to reap descriptors. Note that this is done to 4566 * reduce interrupt load and this only defers reaping 4567 * descriptors, never transmitting frames. Aside from 4568 * reducing interrupts this also permits more concurrency. 4569 * The only potential downside is if the tx queue backs 4570 * up in which case the top half of the kernel may backup 4571 * due to a lack of tx descriptors. 4572 */ 4573 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 4574 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4575 if (qnum == -1) { 4576 /* 4577 * NB: don't print a message, this happens 4578 * normally on parts with too few tx queues 4579 */ 4580 return NULL; 4581 } 4582 if (qnum >= N(sc->sc_txq)) { 4583 device_printf(sc->sc_dev, 4584 "hal qnum %u out of range, max %zu!\n", 4585 qnum, N(sc->sc_txq)); 4586 ath_hal_releasetxqueue(ah, qnum); 4587 return NULL; 4588 } 4589 if (!ATH_TXQ_SETUP(sc, qnum)) { 4590 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4591 sc->sc_txqsetup |= 1<<qnum; 4592 } 4593 return &sc->sc_txq[qnum]; 4594#undef N 4595} 4596 4597/* 4598 * Setup a hardware data transmit queue for the specified 4599 * access control. The hal may not support all requested 4600 * queues in which case it will return a reference to a 4601 * previously setup queue. We record the mapping from ac's 4602 * to h/w queues for use by ath_tx_start and also track 4603 * the set of h/w queues being used to optimize work in the 4604 * transmit interrupt handler and related routines. 4605 */ 4606static int 4607ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4608{ 4609#define N(a) (sizeof(a)/sizeof(a[0])) 4610 struct ath_txq *txq; 4611 4612 if (ac >= N(sc->sc_ac2q)) { 4613 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4614 ac, N(sc->sc_ac2q)); 4615 return 0; 4616 } 4617 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4618 if (txq != NULL) { 4619 txq->axq_ac = ac; 4620 sc->sc_ac2q[ac] = txq; 4621 return 1; 4622 } else 4623 return 0; 4624#undef N 4625} 4626 4627/* 4628 * Update WME parameters for a transmit queue. 4629 */ 4630static int 4631ath_txq_update(struct ath_softc *sc, int ac) 4632{ 4633#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4634#define ATH_TXOP_TO_US(v) (v<<5) 4635 struct ifnet *ifp = sc->sc_ifp; 4636 struct ieee80211com *ic = ifp->if_l2com; 4637 struct ath_txq *txq = sc->sc_ac2q[ac]; 4638 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4639 struct ath_hal *ah = sc->sc_ah; 4640 HAL_TXQ_INFO qi; 4641 4642 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4643#ifdef IEEE80211_SUPPORT_TDMA 4644 if (sc->sc_tdma) { 4645 /* 4646 * AIFS is zero so there's no pre-transmit wait. The 4647 * burst time defines the slot duration and is configured 4648 * through net80211. The QCU is setup to not do post-xmit 4649 * back off, lockout all lower-priority QCU's, and fire 4650 * off the DMA beacon alert timer which is setup based 4651 * on the slot configuration. 4652 */ 4653 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4654 | HAL_TXQ_TXERRINT_ENABLE 4655 | HAL_TXQ_TXURNINT_ENABLE 4656 | HAL_TXQ_TXEOLINT_ENABLE 4657 | HAL_TXQ_DBA_GATED 4658 | HAL_TXQ_BACKOFF_DISABLE 4659 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 4660 ; 4661 qi.tqi_aifs = 0; 4662 /* XXX +dbaprep? */ 4663 qi.tqi_readyTime = sc->sc_tdmaslotlen; 4664 qi.tqi_burstTime = qi.tqi_readyTime; 4665 } else { 4666#endif 4667 /* 4668 * XXX shouldn't this just use the default flags 4669 * used in the previous queue setup? 4670 */ 4671 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4672 | HAL_TXQ_TXERRINT_ENABLE 4673 | HAL_TXQ_TXDESCINT_ENABLE 4674 | HAL_TXQ_TXURNINT_ENABLE 4675 | HAL_TXQ_TXEOLINT_ENABLE 4676 ; 4677 qi.tqi_aifs = wmep->wmep_aifsn; 4678 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4679 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4680 qi.tqi_readyTime = 0; 4681 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4682#ifdef IEEE80211_SUPPORT_TDMA 4683 } 4684#endif 4685 4686 DPRINTF(sc, ATH_DEBUG_RESET, 4687 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 4688 __func__, txq->axq_qnum, qi.tqi_qflags, 4689 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4690 4691 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4692 if_printf(ifp, "unable to update hardware queue " 4693 "parameters for %s traffic!\n", 4694 ieee80211_wme_acnames[ac]); 4695 return 0; 4696 } else { 4697 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4698 return 1; 4699 } 4700#undef ATH_TXOP_TO_US 4701#undef ATH_EXPONENT_TO_VALUE 4702} 4703 4704/* 4705 * Callback from the 802.11 layer to update WME parameters. 4706 */ 4707static int 4708ath_wme_update(struct ieee80211com *ic) 4709{ 4710 struct ath_softc *sc = ic->ic_ifp->if_softc; 4711 4712 return !ath_txq_update(sc, WME_AC_BE) || 4713 !ath_txq_update(sc, WME_AC_BK) || 4714 !ath_txq_update(sc, WME_AC_VI) || 4715 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4716} 4717 4718/* 4719 * Reclaim resources for a setup queue. 4720 */ 4721static void 4722ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4723{ 4724 4725 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4726 ATH_TXQ_LOCK_DESTROY(txq); 4727 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4728} 4729 4730/* 4731 * Reclaim all tx queue resources. 4732 */ 4733static void 4734ath_tx_cleanup(struct ath_softc *sc) 4735{ 4736 int i; 4737 4738 ATH_TXBUF_LOCK_DESTROY(sc); 4739 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4740 if (ATH_TXQ_SETUP(sc, i)) 4741 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4742} 4743 4744/* 4745 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4746 * using the current rates in sc_rixmap. 4747 */ 4748int 4749ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4750{ 4751 int rix = sc->sc_rixmap[rate]; 4752 /* NB: return lowest rix for invalid rate */ 4753 return (rix == 0xff ? 0 : rix); 4754} 4755 4756static void 4757ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4758 struct ath_buf *bf) 4759{ 4760 struct ieee80211_node *ni = bf->bf_node; 4761 struct ifnet *ifp = sc->sc_ifp; 4762 struct ieee80211com *ic = ifp->if_l2com; 4763 int sr, lr, pri; 4764 4765 if (ts->ts_status == 0) { 4766 u_int8_t txant = ts->ts_antenna; 4767 sc->sc_stats.ast_ant_tx[txant]++; 4768 sc->sc_ant_tx[txant]++; 4769 if (ts->ts_finaltsi != 0) 4770 sc->sc_stats.ast_tx_altrate++; 4771 pri = M_WME_GETAC(bf->bf_m); 4772 if (pri >= WME_AC_VO) 4773 ic->ic_wme.wme_hipri_traffic++; 4774 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 4775 ni->ni_inact = ni->ni_inact_reload; 4776 } else { 4777 if (ts->ts_status & HAL_TXERR_XRETRY) 4778 sc->sc_stats.ast_tx_xretries++; 4779 if (ts->ts_status & HAL_TXERR_FIFO) 4780 sc->sc_stats.ast_tx_fifoerr++; 4781 if (ts->ts_status & HAL_TXERR_FILT) 4782 sc->sc_stats.ast_tx_filtered++; 4783 if (ts->ts_status & HAL_TXERR_XTXOP) 4784 sc->sc_stats.ast_tx_xtxop++; 4785 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4786 sc->sc_stats.ast_tx_timerexpired++; 4787 4788 if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 4789 sc->sc_stats.ast_tx_data_underrun++; 4790 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 4791 sc->sc_stats.ast_tx_delim_underrun++; 4792 4793 if (bf->bf_m->m_flags & M_FF) 4794 sc->sc_stats.ast_ff_txerr++; 4795 } 4796 /* XXX when is this valid? */ 4797 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 4798 sc->sc_stats.ast_tx_desccfgerr++; 4799 4800 sr = ts->ts_shortretry; 4801 lr = ts->ts_longretry; 4802 sc->sc_stats.ast_tx_shortretry += sr; 4803 sc->sc_stats.ast_tx_longretry += lr; 4804 4805} 4806 4807/* 4808 * The default completion. If fail is 1, this means 4809 * "please don't retry the frame, and just return -1 status 4810 * to the net80211 stack. 4811 */ 4812void 4813ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4814{ 4815 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4816 int st; 4817 4818 if (fail == 1) 4819 st = -1; 4820 else 4821 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 4822 ts->ts_status : HAL_TXERR_XRETRY; 4823 4824 if (bf->bf_state.bfs_dobaw) 4825 device_printf(sc->sc_dev, 4826 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 4827 __func__, 4828 bf, 4829 SEQNO(bf->bf_state.bfs_seqno)); 4830 if (bf->bf_next != NULL) 4831 device_printf(sc->sc_dev, 4832 "%s: bf %p: seqno %d: bf_next not NULL!\n", 4833 __func__, 4834 bf, 4835 SEQNO(bf->bf_state.bfs_seqno)); 4836 4837 /* 4838 * Do any tx complete callback. Note this must 4839 * be done before releasing the node reference. 4840 * This will free the mbuf, release the net80211 4841 * node and recycle the ath_buf. 4842 */ 4843 ath_tx_freebuf(sc, bf, st); 4844} 4845 4846/* 4847 * Update rate control with the given completion status. 4848 */ 4849void 4850ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4851 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4852 int nframes, int nbad) 4853{ 4854 struct ath_node *an; 4855 4856 /* Only for unicast frames */ 4857 if (ni == NULL) 4858 return; 4859 4860 an = ATH_NODE(ni); 4861 4862 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4863 ATH_NODE_LOCK(an); 4864 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4865 ATH_NODE_UNLOCK(an); 4866 } 4867} 4868 4869/* 4870 * Update the busy status of the last frame on the free list. 4871 * When doing TDMA, the busy flag tracks whether the hardware 4872 * currently points to this buffer or not, and thus gated DMA 4873 * may restart by re-reading the last descriptor in this 4874 * buffer. 4875 * 4876 * This should be called in the completion function once one 4877 * of the buffers has been used. 4878 */ 4879static void 4880ath_tx_update_busy(struct ath_softc *sc) 4881{ 4882 struct ath_buf *last; 4883 4884 /* 4885 * Since the last frame may still be marked 4886 * as ATH_BUF_BUSY, unmark it here before 4887 * finishing the frame processing. 4888 * Since we've completed a frame (aggregate 4889 * or otherwise), the hardware has moved on 4890 * and is no longer referencing the previous 4891 * descriptor. 4892 */ 4893 ATH_TXBUF_LOCK_ASSERT(sc); 4894 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 4895 if (last != NULL) 4896 last->bf_flags &= ~ATH_BUF_BUSY; 4897} 4898 4899 4900/* 4901 * Process completed xmit descriptors from the specified queue. 4902 * Kick the packet scheduler if needed. This can occur from this 4903 * particular task. 4904 */ 4905static int 4906ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4907{ 4908 struct ath_hal *ah = sc->sc_ah; 4909 struct ath_buf *bf; 4910 struct ath_desc *ds; 4911 struct ath_tx_status *ts; 4912 struct ieee80211_node *ni; 4913 struct ath_node *an; 4914 int nacked; 4915 HAL_STATUS status; 4916 4917 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4918 __func__, txq->axq_qnum, 4919 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4920 txq->axq_link); 4921 nacked = 0; 4922 for (;;) { 4923 ATH_TXQ_LOCK(txq); 4924 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4925 bf = TAILQ_FIRST(&txq->axq_q); 4926 if (bf == NULL) { 4927 ATH_TXQ_UNLOCK(txq); 4928 break; 4929 } 4930 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4931 ts = &bf->bf_status.ds_txstat; 4932 status = ath_hal_txprocdesc(ah, ds, ts); 4933#ifdef ATH_DEBUG 4934 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4935 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4936 status == HAL_OK); 4937 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) { 4938 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4939 status == HAL_OK); 4940 } 4941#endif 4942 if (status == HAL_EINPROGRESS) { 4943 ATH_TXQ_UNLOCK(txq); 4944 break; 4945 } 4946 ATH_TXQ_REMOVE(txq, bf, bf_list); 4947#ifdef IEEE80211_SUPPORT_TDMA 4948 if (txq->axq_depth > 0) { 4949 /* 4950 * More frames follow. Mark the buffer busy 4951 * so it's not re-used while the hardware may 4952 * still re-read the link field in the descriptor. 4953 * 4954 * Use the last buffer in an aggregate as that 4955 * is where the hardware may be - intermediate 4956 * descriptors won't be "busy". 4957 */ 4958 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4959 } else 4960#else 4961 if (txq->axq_depth == 0) 4962#endif 4963 txq->axq_link = NULL; 4964 if (bf->bf_state.bfs_aggr) 4965 txq->axq_aggr_depth--; 4966 4967 ni = bf->bf_node; 4968 /* 4969 * If unicast frame was ack'd update RSSI, 4970 * including the last rx time used to 4971 * workaround phantom bmiss interrupts. 4972 */ 4973 if (ni != NULL && ts->ts_status == 0 && 4974 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 4975 nacked++; 4976 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4977 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4978 ts->ts_rssi); 4979 } 4980 ATH_TXQ_UNLOCK(txq); 4981 4982 /* If unicast frame, update general statistics */ 4983 if (ni != NULL) { 4984 an = ATH_NODE(ni); 4985 /* update statistics */ 4986 ath_tx_update_stats(sc, ts, bf); 4987 } 4988 4989 /* 4990 * Call the completion handler. 4991 * The completion handler is responsible for 4992 * calling the rate control code. 4993 * 4994 * Frames with no completion handler get the 4995 * rate control code called here. 4996 */ 4997 if (bf->bf_comp == NULL) { 4998 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4999 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 5000 /* 5001 * XXX assume this isn't an aggregate 5002 * frame. 5003 */ 5004 ath_tx_update_ratectrl(sc, ni, 5005 bf->bf_state.bfs_rc, ts, 5006 bf->bf_state.bfs_pktlen, 1, 5007 (ts->ts_status == 0 ? 0 : 1)); 5008 } 5009 ath_tx_default_comp(sc, bf, 0); 5010 } else 5011 bf->bf_comp(sc, bf, 0); 5012 } 5013#ifdef IEEE80211_SUPPORT_SUPERG 5014 /* 5015 * Flush fast-frame staging queue when traffic slows. 5016 */ 5017 if (txq->axq_depth <= 1) 5018 ieee80211_ff_flush(ic, txq->axq_ac); 5019#endif 5020 5021 /* Kick the TXQ scheduler */ 5022 if (dosched) { 5023 ATH_TXQ_LOCK(txq); 5024 ath_txq_sched(sc, txq); 5025 ATH_TXQ_UNLOCK(txq); 5026 } 5027 5028 return nacked; 5029} 5030 5031#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 5032 5033/* 5034 * Deferred processing of transmit interrupt; special-cased 5035 * for a single hardware transmit queue (e.g. 5210 and 5211). 5036 */ 5037static void 5038ath_tx_proc_q0(void *arg, int npending) 5039{ 5040 struct ath_softc *sc = arg; 5041 struct ifnet *ifp = sc->sc_ifp; 5042 uint32_t txqs; 5043 5044 ATH_PCU_LOCK(sc); 5045 sc->sc_txproc_cnt++; 5046 txqs = sc->sc_txq_active; 5047 sc->sc_txq_active &= ~txqs; 5048 ATH_PCU_UNLOCK(sc); 5049 5050 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 5051 /* XXX why is lastrx updated in tx code? */ 5052 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5053 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 5054 ath_tx_processq(sc, sc->sc_cabq, 1); 5055 IF_LOCK(&ifp->if_snd); 5056 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5057 IF_UNLOCK(&ifp->if_snd); 5058 sc->sc_wd_timer = 0; 5059 5060 if (sc->sc_softled) 5061 ath_led_event(sc, sc->sc_txrix); 5062 5063 ATH_PCU_LOCK(sc); 5064 sc->sc_txproc_cnt--; 5065 ATH_PCU_UNLOCK(sc); 5066 5067 ath_start(ifp); 5068} 5069 5070/* 5071 * Deferred processing of transmit interrupt; special-cased 5072 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 5073 */ 5074static void 5075ath_tx_proc_q0123(void *arg, int npending) 5076{ 5077 struct ath_softc *sc = arg; 5078 struct ifnet *ifp = sc->sc_ifp; 5079 int nacked; 5080 uint32_t txqs; 5081 5082 ATH_PCU_LOCK(sc); 5083 sc->sc_txproc_cnt++; 5084 txqs = sc->sc_txq_active; 5085 sc->sc_txq_active &= ~txqs; 5086 ATH_PCU_UNLOCK(sc); 5087 5088 /* 5089 * Process each active queue. 5090 */ 5091 nacked = 0; 5092 if (TXQACTIVE(txqs, 0)) 5093 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 5094 if (TXQACTIVE(txqs, 1)) 5095 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 5096 if (TXQACTIVE(txqs, 2)) 5097 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 5098 if (TXQACTIVE(txqs, 3)) 5099 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 5100 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 5101 ath_tx_processq(sc, sc->sc_cabq, 1); 5102 if (nacked) 5103 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5104 5105 IF_LOCK(&ifp->if_snd); 5106 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5107 IF_UNLOCK(&ifp->if_snd); 5108 sc->sc_wd_timer = 0; 5109 5110 if (sc->sc_softled) 5111 ath_led_event(sc, sc->sc_txrix); 5112 5113 ATH_PCU_LOCK(sc); 5114 sc->sc_txproc_cnt--; 5115 ATH_PCU_UNLOCK(sc); 5116 5117 ath_start(ifp); 5118} 5119 5120/* 5121 * Deferred processing of transmit interrupt. 5122 */ 5123static void 5124ath_tx_proc(void *arg, int npending) 5125{ 5126 struct ath_softc *sc = arg; 5127 struct ifnet *ifp = sc->sc_ifp; 5128 int i, nacked; 5129 uint32_t txqs; 5130 5131 ATH_PCU_LOCK(sc); 5132 sc->sc_txproc_cnt++; 5133 txqs = sc->sc_txq_active; 5134 sc->sc_txq_active &= ~txqs; 5135 ATH_PCU_UNLOCK(sc); 5136 5137 /* 5138 * Process each active queue. 5139 */ 5140 nacked = 0; 5141 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5142 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 5143 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 5144 if (nacked) 5145 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5146 5147 /* XXX check this inside of IF_LOCK? */ 5148 IF_LOCK(&ifp->if_snd); 5149 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5150 IF_UNLOCK(&ifp->if_snd); 5151 sc->sc_wd_timer = 0; 5152 5153 if (sc->sc_softled) 5154 ath_led_event(sc, sc->sc_txrix); 5155 5156 ATH_PCU_LOCK(sc); 5157 sc->sc_txproc_cnt--; 5158 ATH_PCU_UNLOCK(sc); 5159 5160 ath_start(ifp); 5161} 5162#undef TXQACTIVE 5163 5164/* 5165 * Deferred processing of TXQ rescheduling. 5166 */ 5167static void 5168ath_txq_sched_tasklet(void *arg, int npending) 5169{ 5170 struct ath_softc *sc = arg; 5171 int i; 5172 5173 /* XXX is skipping ok? */ 5174 ATH_PCU_LOCK(sc); 5175#if 0 5176 if (sc->sc_inreset_cnt > 0) { 5177 device_printf(sc->sc_dev, 5178 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 5179 ATH_PCU_UNLOCK(sc); 5180 return; 5181 } 5182#endif 5183 sc->sc_txproc_cnt++; 5184 ATH_PCU_UNLOCK(sc); 5185 5186 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5187 if (ATH_TXQ_SETUP(sc, i)) { 5188 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5189 ath_txq_sched(sc, &sc->sc_txq[i]); 5190 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5191 } 5192 } 5193 5194 ATH_PCU_LOCK(sc); 5195 sc->sc_txproc_cnt--; 5196 ATH_PCU_UNLOCK(sc); 5197} 5198 5199/* 5200 * Return a buffer to the pool and update the 'busy' flag on the 5201 * previous 'tail' entry. 5202 * 5203 * This _must_ only be called when the buffer is involved in a completed 5204 * TX. The logic is that if it was part of an active TX, the previous 5205 * buffer on the list is now not involved in a halted TX DMA queue, waiting 5206 * for restart (eg for TDMA.) 5207 * 5208 * The caller must free the mbuf and recycle the node reference. 5209 */ 5210void 5211ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 5212{ 5213 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 5214 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 5215 5216 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 5217 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 5218 5219 ATH_TXBUF_LOCK(sc); 5220 ath_tx_update_busy(sc); 5221 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 5222 ATH_TXBUF_UNLOCK(sc); 5223} 5224 5225/* 5226 * This is currently used by ath_tx_draintxq() and 5227 * ath_tx_tid_free_pkts(). 5228 * 5229 * It recycles a single ath_buf. 5230 */ 5231void 5232ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 5233{ 5234 struct ieee80211_node *ni = bf->bf_node; 5235 struct mbuf *m0 = bf->bf_m; 5236 5237 bf->bf_node = NULL; 5238 bf->bf_m = NULL; 5239 5240 /* Free the buffer, it's not needed any longer */ 5241 ath_freebuf(sc, bf); 5242 5243 if (ni != NULL) { 5244 /* 5245 * Do any callback and reclaim the node reference. 5246 */ 5247 if (m0->m_flags & M_TXCB) 5248 ieee80211_process_callback(ni, m0, status); 5249 ieee80211_free_node(ni); 5250 } 5251 m_freem(m0); 5252 5253 /* 5254 * XXX the buffer used to be freed -after-, but the DMA map was 5255 * freed where ath_freebuf() now is. I've no idea what this 5256 * will do. 5257 */ 5258} 5259 5260void 5261ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 5262{ 5263#ifdef ATH_DEBUG 5264 struct ath_hal *ah = sc->sc_ah; 5265#endif 5266 struct ath_buf *bf; 5267 u_int ix; 5268 5269 /* 5270 * NB: this assumes output has been stopped and 5271 * we do not need to block ath_tx_proc 5272 */ 5273 ATH_TXBUF_LOCK(sc); 5274 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 5275 if (bf != NULL) 5276 bf->bf_flags &= ~ATH_BUF_BUSY; 5277 ATH_TXBUF_UNLOCK(sc); 5278 5279 for (ix = 0;; ix++) { 5280 ATH_TXQ_LOCK(txq); 5281 bf = TAILQ_FIRST(&txq->axq_q); 5282 if (bf == NULL) { 5283 txq->axq_link = NULL; 5284 ATH_TXQ_UNLOCK(txq); 5285 break; 5286 } 5287 ATH_TXQ_REMOVE(txq, bf, bf_list); 5288 if (bf->bf_state.bfs_aggr) 5289 txq->axq_aggr_depth--; 5290#ifdef ATH_DEBUG 5291 if (sc->sc_debug & ATH_DEBUG_RESET) { 5292 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5293 5294 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 5295 ath_hal_txprocdesc(ah, bf->bf_lastds, 5296 &bf->bf_status.ds_txstat) == HAL_OK); 5297 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 5298 bf->bf_m->m_len, 0, -1); 5299 } 5300#endif /* ATH_DEBUG */ 5301 /* 5302 * Since we're now doing magic in the completion 5303 * functions, we -must- call it for aggregation 5304 * destinations or BAW tracking will get upset. 5305 */ 5306 /* 5307 * Clear ATH_BUF_BUSY; the completion handler 5308 * will free the buffer. 5309 */ 5310 ATH_TXQ_UNLOCK(txq); 5311 bf->bf_flags &= ~ATH_BUF_BUSY; 5312 if (bf->bf_comp) 5313 bf->bf_comp(sc, bf, 1); 5314 else 5315 ath_tx_default_comp(sc, bf, 1); 5316 } 5317 5318 /* 5319 * Drain software queued frames which are on 5320 * active TIDs. 5321 */ 5322 ath_tx_txq_drain(sc, txq); 5323} 5324 5325static void 5326ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5327{ 5328 struct ath_hal *ah = sc->sc_ah; 5329 5330 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5331 __func__, txq->axq_qnum, 5332 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 5333 txq->axq_link); 5334 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5335} 5336 5337static int 5338ath_stoptxdma(struct ath_softc *sc) 5339{ 5340 struct ath_hal *ah = sc->sc_ah; 5341 int i; 5342 5343 /* XXX return value */ 5344 if (sc->sc_invalid) 5345 return 0; 5346 5347 if (!sc->sc_invalid) { 5348 /* don't touch the hardware if marked invalid */ 5349 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5350 __func__, sc->sc_bhalq, 5351 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 5352 NULL); 5353 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5354 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5355 if (ATH_TXQ_SETUP(sc, i)) 5356 ath_tx_stopdma(sc, &sc->sc_txq[i]); 5357 } 5358 5359 return 1; 5360} 5361 5362/* 5363 * Drain the transmit queues and reclaim resources. 5364 */ 5365static void 5366ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 5367{ 5368#ifdef ATH_DEBUG 5369 struct ath_hal *ah = sc->sc_ah; 5370#endif 5371 struct ifnet *ifp = sc->sc_ifp; 5372 int i; 5373 5374 (void) ath_stoptxdma(sc); 5375 5376 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5377 /* 5378 * XXX TODO: should we just handle the completed TX frames 5379 * here, whether or not the reset is a full one or not? 5380 */ 5381 if (ATH_TXQ_SETUP(sc, i)) { 5382 if (reset_type == ATH_RESET_NOLOSS) 5383 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5384 else 5385 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5386 } 5387 } 5388#ifdef ATH_DEBUG 5389 if (sc->sc_debug & ATH_DEBUG_RESET) { 5390 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5391 if (bf != NULL && bf->bf_m != NULL) { 5392 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5393 ath_hal_txprocdesc(ah, bf->bf_lastds, 5394 &bf->bf_status.ds_txstat) == HAL_OK); 5395 ieee80211_dump_pkt(ifp->if_l2com, 5396 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5397 0, -1); 5398 } 5399 } 5400#endif /* ATH_DEBUG */ 5401 IF_LOCK(&ifp->if_snd); 5402 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5403 IF_UNLOCK(&ifp->if_snd); 5404 sc->sc_wd_timer = 0; 5405} 5406 5407/* 5408 * Disable the receive h/w in preparation for a reset. 5409 */ 5410static void 5411ath_stoprecv(struct ath_softc *sc, int dodelay) 5412{ 5413#define PA2DESC(_sc, _pa) \ 5414 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 5415 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 5416 struct ath_hal *ah = sc->sc_ah; 5417 5418 ath_hal_stoppcurecv(ah); /* disable PCU */ 5419 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 5420 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 5421 if (dodelay) 5422 DELAY(3000); /* 3ms is long enough for 1 frame */ 5423#ifdef ATH_DEBUG 5424 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 5425 struct ath_buf *bf; 5426 u_int ix; 5427 5428 device_printf(sc->sc_dev, 5429 "%s: rx queue %p, link %p\n", 5430 __func__, 5431 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), 5432 sc->sc_rxlink); 5433 ix = 0; 5434 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 5435 struct ath_desc *ds = bf->bf_desc; 5436 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5437 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 5438 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 5439 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 5440 ath_printrxbuf(sc, bf, ix, status == HAL_OK); 5441 ix++; 5442 } 5443 } 5444#endif 5445 if (sc->sc_rxpending != NULL) { 5446 m_freem(sc->sc_rxpending); 5447 sc->sc_rxpending = NULL; 5448 } 5449 sc->sc_rxlink = NULL; /* just in case */ 5450#undef PA2DESC 5451} 5452 5453/* 5454 * Enable the receive h/w following a reset. 5455 */ 5456static int 5457ath_startrecv(struct ath_softc *sc) 5458{ 5459 struct ath_hal *ah = sc->sc_ah; 5460 struct ath_buf *bf; 5461 5462 sc->sc_rxlink = NULL; 5463 sc->sc_rxpending = NULL; 5464 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 5465 int error = ath_rxbuf_init(sc, bf); 5466 if (error != 0) { 5467 DPRINTF(sc, ATH_DEBUG_RECV, 5468 "%s: ath_rxbuf_init failed %d\n", 5469 __func__, error); 5470 return error; 5471 } 5472 } 5473 5474 bf = TAILQ_FIRST(&sc->sc_rxbuf); 5475 ath_hal_putrxbuf(ah, bf->bf_daddr); 5476 ath_hal_rxena(ah); /* enable recv descriptors */ 5477 ath_mode_init(sc); /* set filters, etc. */ 5478 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 5479 return 0; 5480} 5481 5482/* 5483 * Update internal state after a channel change. 5484 */ 5485static void 5486ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5487{ 5488 enum ieee80211_phymode mode; 5489 5490 /* 5491 * Change channels and update the h/w rate map 5492 * if we're switching; e.g. 11a to 11b/g. 5493 */ 5494 mode = ieee80211_chan2mode(chan); 5495 if (mode != sc->sc_curmode) 5496 ath_setcurmode(sc, mode); 5497 sc->sc_curchan = chan; 5498} 5499 5500/* 5501 * Set/change channels. If the channel is really being changed, 5502 * it's done by resetting the chip. To accomplish this we must 5503 * first cleanup any pending DMA, then restart stuff after a la 5504 * ath_init. 5505 */ 5506static int 5507ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5508{ 5509 struct ifnet *ifp = sc->sc_ifp; 5510 struct ieee80211com *ic = ifp->if_l2com; 5511 struct ath_hal *ah = sc->sc_ah; 5512 int ret = 0; 5513 5514 /* Treat this as an interface reset */ 5515 ATH_PCU_UNLOCK_ASSERT(sc); 5516 ATH_UNLOCK_ASSERT(sc); 5517 5518 /* (Try to) stop TX/RX from occuring */ 5519 taskqueue_block(sc->sc_tq); 5520 5521 ATH_PCU_LOCK(sc); 5522 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 5523 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 5524 if (ath_reset_grablock(sc, 1) == 0) { 5525 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5526 __func__); 5527 } 5528 ATH_PCU_UNLOCK(sc); 5529 5530 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5531 __func__, ieee80211_chan2ieee(ic, chan), 5532 chan->ic_freq, chan->ic_flags); 5533 if (chan != sc->sc_curchan) { 5534 HAL_STATUS status; 5535 /* 5536 * To switch channels clear any pending DMA operations; 5537 * wait long enough for the RX fifo to drain, reset the 5538 * hardware at the new frequency, and then re-enable 5539 * the relevant bits of the h/w. 5540 */ 5541#if 0 5542 ath_hal_intrset(ah, 0); /* disable interrupts */ 5543#endif 5544 ath_stoprecv(sc, 1); /* turn off frame recv */ 5545 /* 5546 * First, handle completed TX/RX frames. 5547 */ 5548 ath_rx_proc(sc, 0); 5549 ath_draintxq(sc, ATH_RESET_NOLOSS); 5550 /* 5551 * Next, flush the non-scheduled frames. 5552 */ 5553 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5554 5555 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5556 if_printf(ifp, "%s: unable to reset " 5557 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5558 __func__, ieee80211_chan2ieee(ic, chan), 5559 chan->ic_freq, chan->ic_flags, status); 5560 ret = EIO; 5561 goto finish; 5562 } 5563 sc->sc_diversity = ath_hal_getdiversity(ah); 5564 5565 /* Let DFS at it in case it's a DFS channel */ 5566 ath_dfs_radar_enable(sc, chan); 5567 5568 /* 5569 * Re-enable rx framework. 5570 */ 5571 if (ath_startrecv(sc) != 0) { 5572 if_printf(ifp, "%s: unable to restart recv logic\n", 5573 __func__); 5574 ret = EIO; 5575 goto finish; 5576 } 5577 5578 /* 5579 * Change channels and update the h/w rate map 5580 * if we're switching; e.g. 11a to 11b/g. 5581 */ 5582 ath_chan_change(sc, chan); 5583 5584 /* 5585 * Reset clears the beacon timers; reset them 5586 * here if needed. 5587 */ 5588 if (sc->sc_beacons) { /* restart beacons */ 5589#ifdef IEEE80211_SUPPORT_TDMA 5590 if (sc->sc_tdma) 5591 ath_tdma_config(sc, NULL); 5592 else 5593#endif 5594 ath_beacon_config(sc, NULL); 5595 } 5596 5597 /* 5598 * Re-enable interrupts. 5599 */ 5600#if 0 5601 ath_hal_intrset(ah, sc->sc_imask); 5602#endif 5603 } 5604 5605finish: 5606 ATH_PCU_LOCK(sc); 5607 sc->sc_inreset_cnt--; 5608 /* XXX only do this if sc_inreset_cnt == 0? */ 5609 ath_hal_intrset(ah, sc->sc_imask); 5610 ATH_PCU_UNLOCK(sc); 5611 5612 IF_LOCK(&ifp->if_snd); 5613 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5614 IF_UNLOCK(&ifp->if_snd); 5615 ath_txrx_start(sc); 5616 /* XXX ath_start? */ 5617 5618 return ret; 5619} 5620 5621/* 5622 * Periodically recalibrate the PHY to account 5623 * for temperature/environment changes. 5624 */ 5625static void 5626ath_calibrate(void *arg) 5627{ 5628 struct ath_softc *sc = arg; 5629 struct ath_hal *ah = sc->sc_ah; 5630 struct ifnet *ifp = sc->sc_ifp; 5631 struct ieee80211com *ic = ifp->if_l2com; 5632 HAL_BOOL longCal, isCalDone; 5633 HAL_BOOL aniCal, shortCal = AH_FALSE; 5634 int nextcal; 5635 5636 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5637 goto restart; 5638 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5639 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5640 if (sc->sc_doresetcal) 5641 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5642 5643 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5644 if (aniCal) { 5645 sc->sc_stats.ast_ani_cal++; 5646 sc->sc_lastani = ticks; 5647 ath_hal_ani_poll(ah, sc->sc_curchan); 5648 } 5649 5650 if (longCal) { 5651 sc->sc_stats.ast_per_cal++; 5652 sc->sc_lastlongcal = ticks; 5653 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5654 /* 5655 * Rfgain is out of bounds, reset the chip 5656 * to load new gain values. 5657 */ 5658 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5659 "%s: rfgain change\n", __func__); 5660 sc->sc_stats.ast_per_rfgain++; 5661 sc->sc_resetcal = 0; 5662 sc->sc_doresetcal = AH_TRUE; 5663 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5664 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5665 return; 5666 } 5667 /* 5668 * If this long cal is after an idle period, then 5669 * reset the data collection state so we start fresh. 5670 */ 5671 if (sc->sc_resetcal) { 5672 (void) ath_hal_calreset(ah, sc->sc_curchan); 5673 sc->sc_lastcalreset = ticks; 5674 sc->sc_lastshortcal = ticks; 5675 sc->sc_resetcal = 0; 5676 sc->sc_doresetcal = AH_TRUE; 5677 } 5678 } 5679 5680 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5681 if (shortCal || longCal) { 5682 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5683 if (longCal) { 5684 /* 5685 * Calibrate noise floor data again in case of change. 5686 */ 5687 ath_hal_process_noisefloor(ah); 5688 } 5689 } else { 5690 DPRINTF(sc, ATH_DEBUG_ANY, 5691 "%s: calibration of channel %u failed\n", 5692 __func__, sc->sc_curchan->ic_freq); 5693 sc->sc_stats.ast_per_calfail++; 5694 } 5695 if (shortCal) 5696 sc->sc_lastshortcal = ticks; 5697 } 5698 if (!isCalDone) { 5699restart: 5700 /* 5701 * Use a shorter interval to potentially collect multiple 5702 * data samples required to complete calibration. Once 5703 * we're told the work is done we drop back to a longer 5704 * interval between requests. We're more aggressive doing 5705 * work when operating as an AP to improve operation right 5706 * after startup. 5707 */ 5708 sc->sc_lastshortcal = ticks; 5709 nextcal = ath_shortcalinterval*hz/1000; 5710 if (sc->sc_opmode != HAL_M_HOSTAP) 5711 nextcal *= 10; 5712 sc->sc_doresetcal = AH_TRUE; 5713 } else { 5714 /* nextcal should be the shortest time for next event */ 5715 nextcal = ath_longcalinterval*hz; 5716 if (sc->sc_lastcalreset == 0) 5717 sc->sc_lastcalreset = sc->sc_lastlongcal; 5718 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5719 sc->sc_resetcal = 1; /* setup reset next trip */ 5720 sc->sc_doresetcal = AH_FALSE; 5721 } 5722 /* ANI calibration may occur more often than short/long/resetcal */ 5723 if (ath_anicalinterval > 0) 5724 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5725 5726 if (nextcal != 0) { 5727 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5728 __func__, nextcal, isCalDone ? "" : "!"); 5729 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5730 } else { 5731 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5732 __func__); 5733 /* NB: don't rearm timer */ 5734 } 5735} 5736 5737static void 5738ath_scan_start(struct ieee80211com *ic) 5739{ 5740 struct ifnet *ifp = ic->ic_ifp; 5741 struct ath_softc *sc = ifp->if_softc; 5742 struct ath_hal *ah = sc->sc_ah; 5743 u_int32_t rfilt; 5744 5745 /* XXX calibration timer? */ 5746 5747 ATH_LOCK(sc); 5748 sc->sc_scanning = 1; 5749 sc->sc_syncbeacon = 0; 5750 rfilt = ath_calcrxfilter(sc); 5751 ATH_UNLOCK(sc); 5752 5753 ATH_PCU_LOCK(sc); 5754 ath_hal_setrxfilter(ah, rfilt); 5755 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 5756 ATH_PCU_UNLOCK(sc); 5757 5758 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5759 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 5760} 5761 5762static void 5763ath_scan_end(struct ieee80211com *ic) 5764{ 5765 struct ifnet *ifp = ic->ic_ifp; 5766 struct ath_softc *sc = ifp->if_softc; 5767 struct ath_hal *ah = sc->sc_ah; 5768 u_int32_t rfilt; 5769 5770 ATH_LOCK(sc); 5771 sc->sc_scanning = 0; 5772 rfilt = ath_calcrxfilter(sc); 5773 ATH_UNLOCK(sc); 5774 5775 ATH_PCU_LOCK(sc); 5776 ath_hal_setrxfilter(ah, rfilt); 5777 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5778 5779 ath_hal_process_noisefloor(ah); 5780 ATH_PCU_UNLOCK(sc); 5781 5782 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5783 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 5784 sc->sc_curaid); 5785} 5786 5787#ifdef ATH_ENABLE_11N 5788/* 5789 * For now, just do a channel change. 5790 * 5791 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 5792 * control state and resetting the hardware without dropping frames out 5793 * of the queue. 5794 * 5795 * The unfortunate trouble here is making absolutely sure that the 5796 * channel width change has propagated enough so the hardware 5797 * absolutely isn't handed bogus frames for it's current operating 5798 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 5799 * does occur in parallel, we need to make certain we've blocked 5800 * any further ongoing TX (and RX, that can cause raw TX) 5801 * before we do this. 5802 */ 5803static void 5804ath_update_chw(struct ieee80211com *ic) 5805{ 5806 struct ifnet *ifp = ic->ic_ifp; 5807 struct ath_softc *sc = ifp->if_softc; 5808 5809 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 5810 ath_set_channel(ic); 5811} 5812#endif /* ATH_ENABLE_11N */ 5813 5814static void 5815ath_set_channel(struct ieee80211com *ic) 5816{ 5817 struct ifnet *ifp = ic->ic_ifp; 5818 struct ath_softc *sc = ifp->if_softc; 5819 5820 (void) ath_chan_set(sc, ic->ic_curchan); 5821 /* 5822 * If we are returning to our bss channel then mark state 5823 * so the next recv'd beacon's tsf will be used to sync the 5824 * beacon timers. Note that since we only hear beacons in 5825 * sta/ibss mode this has no effect in other operating modes. 5826 */ 5827 ATH_LOCK(sc); 5828 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5829 sc->sc_syncbeacon = 1; 5830 ATH_UNLOCK(sc); 5831} 5832 5833/* 5834 * Walk the vap list and check if there any vap's in RUN state. 5835 */ 5836static int 5837ath_isanyrunningvaps(struct ieee80211vap *this) 5838{ 5839 struct ieee80211com *ic = this->iv_ic; 5840 struct ieee80211vap *vap; 5841 5842 IEEE80211_LOCK_ASSERT(ic); 5843 5844 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5845 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5846 return 1; 5847 } 5848 return 0; 5849} 5850 5851static int 5852ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5853{ 5854 struct ieee80211com *ic = vap->iv_ic; 5855 struct ath_softc *sc = ic->ic_ifp->if_softc; 5856 struct ath_vap *avp = ATH_VAP(vap); 5857 struct ath_hal *ah = sc->sc_ah; 5858 struct ieee80211_node *ni = NULL; 5859 int i, error, stamode; 5860 u_int32_t rfilt; 5861 int csa_run_transition = 0; 5862 static const HAL_LED_STATE leds[] = { 5863 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5864 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5865 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5866 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5867 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5868 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5869 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5870 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5871 }; 5872 5873 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5874 ieee80211_state_name[vap->iv_state], 5875 ieee80211_state_name[nstate]); 5876 5877 /* 5878 * net80211 _should_ have the comlock asserted at this point. 5879 * There are some comments around the calls to vap->iv_newstate 5880 * which indicate that it (newstate) may end up dropping the 5881 * lock. This and the subsequent lock assert check after newstate 5882 * are an attempt to catch these and figure out how/why. 5883 */ 5884 IEEE80211_LOCK_ASSERT(ic); 5885 5886 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5887 csa_run_transition = 1; 5888 5889 callout_drain(&sc->sc_cal_ch); 5890 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5891 5892 if (nstate == IEEE80211_S_SCAN) { 5893 /* 5894 * Scanning: turn off beacon miss and don't beacon. 5895 * Mark beacon state so when we reach RUN state we'll 5896 * [re]setup beacons. Unblock the task q thread so 5897 * deferred interrupt processing is done. 5898 */ 5899 ath_hal_intrset(ah, 5900 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5901 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5902 sc->sc_beacons = 0; 5903 taskqueue_unblock(sc->sc_tq); 5904 } 5905 5906 ni = ieee80211_ref_node(vap->iv_bss); 5907 rfilt = ath_calcrxfilter(sc); 5908 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5909 vap->iv_opmode == IEEE80211_M_AHDEMO || 5910 vap->iv_opmode == IEEE80211_M_IBSS); 5911 if (stamode && nstate == IEEE80211_S_RUN) { 5912 sc->sc_curaid = ni->ni_associd; 5913 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5914 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5915 } 5916 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5917 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5918 ath_hal_setrxfilter(ah, rfilt); 5919 5920 /* XXX is this to restore keycache on resume? */ 5921 if (vap->iv_opmode != IEEE80211_M_STA && 5922 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5923 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5924 if (ath_hal_keyisvalid(ah, i)) 5925 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5926 } 5927 5928 /* 5929 * Invoke the parent method to do net80211 work. 5930 */ 5931 error = avp->av_newstate(vap, nstate, arg); 5932 if (error != 0) 5933 goto bad; 5934 5935 /* 5936 * See above: ensure av_newstate() doesn't drop the lock 5937 * on us. 5938 */ 5939 IEEE80211_LOCK_ASSERT(ic); 5940 5941 if (nstate == IEEE80211_S_RUN) { 5942 /* NB: collect bss node again, it may have changed */ 5943 ieee80211_free_node(ni); 5944 ni = ieee80211_ref_node(vap->iv_bss); 5945 5946 DPRINTF(sc, ATH_DEBUG_STATE, 5947 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5948 "capinfo 0x%04x chan %d\n", __func__, 5949 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5950 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5951 5952 switch (vap->iv_opmode) { 5953#ifdef IEEE80211_SUPPORT_TDMA 5954 case IEEE80211_M_AHDEMO: 5955 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 5956 break; 5957 /* fall thru... */ 5958#endif 5959 case IEEE80211_M_HOSTAP: 5960 case IEEE80211_M_IBSS: 5961 case IEEE80211_M_MBSS: 5962 /* 5963 * Allocate and setup the beacon frame. 5964 * 5965 * Stop any previous beacon DMA. This may be 5966 * necessary, for example, when an ibss merge 5967 * causes reconfiguration; there will be a state 5968 * transition from RUN->RUN that means we may 5969 * be called with beacon transmission active. 5970 */ 5971 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5972 5973 error = ath_beacon_alloc(sc, ni); 5974 if (error != 0) 5975 goto bad; 5976 /* 5977 * If joining an adhoc network defer beacon timer 5978 * configuration to the next beacon frame so we 5979 * have a current TSF to use. Otherwise we're 5980 * starting an ibss/bss so there's no need to delay; 5981 * if this is the first vap moving to RUN state, then 5982 * beacon state needs to be [re]configured. 5983 */ 5984 if (vap->iv_opmode == IEEE80211_M_IBSS && 5985 ni->ni_tstamp.tsf != 0) { 5986 sc->sc_syncbeacon = 1; 5987 } else if (!sc->sc_beacons) { 5988#ifdef IEEE80211_SUPPORT_TDMA 5989 if (vap->iv_caps & IEEE80211_C_TDMA) 5990 ath_tdma_config(sc, vap); 5991 else 5992#endif 5993 ath_beacon_config(sc, vap); 5994 sc->sc_beacons = 1; 5995 } 5996 break; 5997 case IEEE80211_M_STA: 5998 /* 5999 * Defer beacon timer configuration to the next 6000 * beacon frame so we have a current TSF to use 6001 * (any TSF collected when scanning is likely old). 6002 * However if it's due to a CSA -> RUN transition, 6003 * force a beacon update so we pick up a lack of 6004 * beacons from an AP in CAC and thus force a 6005 * scan. 6006 */ 6007 sc->sc_syncbeacon = 1; 6008 if (csa_run_transition) 6009 ath_beacon_config(sc, vap); 6010 break; 6011 case IEEE80211_M_MONITOR: 6012 /* 6013 * Monitor mode vaps have only INIT->RUN and RUN->RUN 6014 * transitions so we must re-enable interrupts here to 6015 * handle the case of a single monitor mode vap. 6016 */ 6017 ath_hal_intrset(ah, sc->sc_imask); 6018 break; 6019 case IEEE80211_M_WDS: 6020 break; 6021 default: 6022 break; 6023 } 6024 /* 6025 * Let the hal process statistics collected during a 6026 * scan so it can provide calibrated noise floor data. 6027 */ 6028 ath_hal_process_noisefloor(ah); 6029 /* 6030 * Reset rssi stats; maybe not the best place... 6031 */ 6032 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 6033 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 6034 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 6035 /* 6036 * Finally, start any timers and the task q thread 6037 * (in case we didn't go through SCAN state). 6038 */ 6039 if (ath_longcalinterval != 0) { 6040 /* start periodic recalibration timer */ 6041 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 6042 } else { 6043 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 6044 "%s: calibration disabled\n", __func__); 6045 } 6046 taskqueue_unblock(sc->sc_tq); 6047 } else if (nstate == IEEE80211_S_INIT) { 6048 /* 6049 * If there are no vaps left in RUN state then 6050 * shutdown host/driver operation: 6051 * o disable interrupts 6052 * o disable the task queue thread 6053 * o mark beacon processing as stopped 6054 */ 6055 if (!ath_isanyrunningvaps(vap)) { 6056 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 6057 /* disable interrupts */ 6058 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 6059 taskqueue_block(sc->sc_tq); 6060 sc->sc_beacons = 0; 6061 } 6062#ifdef IEEE80211_SUPPORT_TDMA 6063 ath_hal_setcca(ah, AH_TRUE); 6064#endif 6065 } 6066bad: 6067 ieee80211_free_node(ni); 6068 return error; 6069} 6070 6071/* 6072 * Allocate a key cache slot to the station so we can 6073 * setup a mapping from key index to node. The key cache 6074 * slot is needed for managing antenna state and for 6075 * compression when stations do not use crypto. We do 6076 * it uniliaterally here; if crypto is employed this slot 6077 * will be reassigned. 6078 */ 6079static void 6080ath_setup_stationkey(struct ieee80211_node *ni) 6081{ 6082 struct ieee80211vap *vap = ni->ni_vap; 6083 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6084 ieee80211_keyix keyix, rxkeyix; 6085 6086 /* XXX should take a locked ref to vap->iv_bss */ 6087 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 6088 /* 6089 * Key cache is full; we'll fall back to doing 6090 * the more expensive lookup in software. Note 6091 * this also means no h/w compression. 6092 */ 6093 /* XXX msg+statistic */ 6094 } else { 6095 /* XXX locking? */ 6096 ni->ni_ucastkey.wk_keyix = keyix; 6097 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 6098 /* NB: must mark device key to get called back on delete */ 6099 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 6100 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 6101 /* NB: this will create a pass-thru key entry */ 6102 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 6103 } 6104} 6105 6106/* 6107 * Setup driver-specific state for a newly associated node. 6108 * Note that we're called also on a re-associate, the isnew 6109 * param tells us if this is the first time or not. 6110 */ 6111static void 6112ath_newassoc(struct ieee80211_node *ni, int isnew) 6113{ 6114 struct ath_node *an = ATH_NODE(ni); 6115 struct ieee80211vap *vap = ni->ni_vap; 6116 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6117 const struct ieee80211_txparam *tp = ni->ni_txparms; 6118 6119 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 6120 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 6121 6122 ath_rate_newassoc(sc, an, isnew); 6123 if (isnew && 6124 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 6125 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 6126 ath_setup_stationkey(ni); 6127} 6128 6129static int 6130ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 6131 int nchans, struct ieee80211_channel chans[]) 6132{ 6133 struct ath_softc *sc = ic->ic_ifp->if_softc; 6134 struct ath_hal *ah = sc->sc_ah; 6135 HAL_STATUS status; 6136 6137 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6138 "%s: rd %u cc %u location %c%s\n", 6139 __func__, reg->regdomain, reg->country, reg->location, 6140 reg->ecm ? " ecm" : ""); 6141 6142 status = ath_hal_set_channels(ah, chans, nchans, 6143 reg->country, reg->regdomain); 6144 if (status != HAL_OK) { 6145 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 6146 __func__, status); 6147 return EINVAL; /* XXX */ 6148 } 6149 6150 return 0; 6151} 6152 6153static void 6154ath_getradiocaps(struct ieee80211com *ic, 6155 int maxchans, int *nchans, struct ieee80211_channel chans[]) 6156{ 6157 struct ath_softc *sc = ic->ic_ifp->if_softc; 6158 struct ath_hal *ah = sc->sc_ah; 6159 6160 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 6161 __func__, SKU_DEBUG, CTRY_DEFAULT); 6162 6163 /* XXX check return */ 6164 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 6165 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 6166 6167} 6168 6169static int 6170ath_getchannels(struct ath_softc *sc) 6171{ 6172 struct ifnet *ifp = sc->sc_ifp; 6173 struct ieee80211com *ic = ifp->if_l2com; 6174 struct ath_hal *ah = sc->sc_ah; 6175 HAL_STATUS status; 6176 6177 /* 6178 * Collect channel set based on EEPROM contents. 6179 */ 6180 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 6181 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 6182 if (status != HAL_OK) { 6183 if_printf(ifp, "%s: unable to collect channel list from hal, " 6184 "status %d\n", __func__, status); 6185 return EINVAL; 6186 } 6187 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6188 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 6189 /* XXX map Atheros sku's to net80211 SKU's */ 6190 /* XXX net80211 types too small */ 6191 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 6192 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 6193 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 6194 ic->ic_regdomain.isocc[1] = ' '; 6195 6196 ic->ic_regdomain.ecm = 1; 6197 ic->ic_regdomain.location = 'I'; 6198 6199 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6200 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6201 __func__, sc->sc_eerd, sc->sc_eecc, 6202 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 6203 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 6204 return 0; 6205} 6206 6207static int 6208ath_rate_setup(struct ath_softc *sc, u_int mode) 6209{ 6210 struct ath_hal *ah = sc->sc_ah; 6211 const HAL_RATE_TABLE *rt; 6212 6213 switch (mode) { 6214 case IEEE80211_MODE_11A: 6215 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 6216 break; 6217 case IEEE80211_MODE_HALF: 6218 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6219 break; 6220 case IEEE80211_MODE_QUARTER: 6221 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6222 break; 6223 case IEEE80211_MODE_11B: 6224 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 6225 break; 6226 case IEEE80211_MODE_11G: 6227 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 6228 break; 6229 case IEEE80211_MODE_TURBO_A: 6230 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 6231 break; 6232 case IEEE80211_MODE_TURBO_G: 6233 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6234 break; 6235 case IEEE80211_MODE_STURBO_A: 6236 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6237 break; 6238 case IEEE80211_MODE_11NA: 6239 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6240 break; 6241 case IEEE80211_MODE_11NG: 6242 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6243 break; 6244 default: 6245 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6246 __func__, mode); 6247 return 0; 6248 } 6249 sc->sc_rates[mode] = rt; 6250 return (rt != NULL); 6251} 6252 6253static void 6254ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6255{ 6256#define N(a) (sizeof(a)/sizeof(a[0])) 6257 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6258 static const struct { 6259 u_int rate; /* tx/rx 802.11 rate */ 6260 u_int16_t timeOn; /* LED on time (ms) */ 6261 u_int16_t timeOff; /* LED off time (ms) */ 6262 } blinkrates[] = { 6263 { 108, 40, 10 }, 6264 { 96, 44, 11 }, 6265 { 72, 50, 13 }, 6266 { 48, 57, 14 }, 6267 { 36, 67, 16 }, 6268 { 24, 80, 20 }, 6269 { 22, 100, 25 }, 6270 { 18, 133, 34 }, 6271 { 12, 160, 40 }, 6272 { 10, 200, 50 }, 6273 { 6, 240, 58 }, 6274 { 4, 267, 66 }, 6275 { 2, 400, 100 }, 6276 { 0, 500, 130 }, 6277 /* XXX half/quarter rates */ 6278 }; 6279 const HAL_RATE_TABLE *rt; 6280 int i, j; 6281 6282 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6283 rt = sc->sc_rates[mode]; 6284 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6285 for (i = 0; i < rt->rateCount; i++) { 6286 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6287 if (rt->info[i].phy != IEEE80211_T_HT) 6288 sc->sc_rixmap[ieeerate] = i; 6289 else 6290 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6291 } 6292 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6293 for (i = 0; i < N(sc->sc_hwmap); i++) { 6294 if (i >= rt->rateCount) { 6295 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6296 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6297 continue; 6298 } 6299 sc->sc_hwmap[i].ieeerate = 6300 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6301 if (rt->info[i].phy == IEEE80211_T_HT) 6302 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6303 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6304 if (rt->info[i].shortPreamble || 6305 rt->info[i].phy == IEEE80211_T_OFDM) 6306 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6307 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6308 for (j = 0; j < N(blinkrates)-1; j++) 6309 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6310 break; 6311 /* NB: this uses the last entry if the rate isn't found */ 6312 /* XXX beware of overlow */ 6313 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6314 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6315 } 6316 sc->sc_currates = rt; 6317 sc->sc_curmode = mode; 6318 /* 6319 * All protection frames are transmited at 2Mb/s for 6320 * 11g, otherwise at 1Mb/s. 6321 */ 6322 if (mode == IEEE80211_MODE_11G) 6323 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6324 else 6325 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6326 /* NB: caller is responsible for resetting rate control state */ 6327#undef N 6328} 6329 6330static void 6331ath_watchdog(void *arg) 6332{ 6333 struct ath_softc *sc = arg; 6334 int do_reset = 0; 6335 6336 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6337 struct ifnet *ifp = sc->sc_ifp; 6338 uint32_t hangs; 6339 6340 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6341 hangs != 0) { 6342 if_printf(ifp, "%s hang detected (0x%x)\n", 6343 hangs & 0xff ? "bb" : "mac", hangs); 6344 } else 6345 if_printf(ifp, "device timeout\n"); 6346 do_reset = 1; 6347 ifp->if_oerrors++; 6348 sc->sc_stats.ast_watchdog++; 6349 } 6350 6351 /* 6352 * We can't hold the lock across the ath_reset() call. 6353 * 6354 * And since this routine can't hold a lock and sleep, 6355 * do the reset deferred. 6356 */ 6357 if (do_reset) { 6358 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6359 } 6360 6361 callout_schedule(&sc->sc_wd_ch, hz); 6362} 6363 6364#ifdef ATH_DIAGAPI 6365/* 6366 * Diagnostic interface to the HAL. This is used by various 6367 * tools to do things like retrieve register contents for 6368 * debugging. The mechanism is intentionally opaque so that 6369 * it can change frequently w/o concern for compatiblity. 6370 */ 6371static int 6372ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6373{ 6374 struct ath_hal *ah = sc->sc_ah; 6375 u_int id = ad->ad_id & ATH_DIAG_ID; 6376 void *indata = NULL; 6377 void *outdata = NULL; 6378 u_int32_t insize = ad->ad_in_size; 6379 u_int32_t outsize = ad->ad_out_size; 6380 int error = 0; 6381 6382 if (ad->ad_id & ATH_DIAG_IN) { 6383 /* 6384 * Copy in data. 6385 */ 6386 indata = malloc(insize, M_TEMP, M_NOWAIT); 6387 if (indata == NULL) { 6388 error = ENOMEM; 6389 goto bad; 6390 } 6391 error = copyin(ad->ad_in_data, indata, insize); 6392 if (error) 6393 goto bad; 6394 } 6395 if (ad->ad_id & ATH_DIAG_DYN) { 6396 /* 6397 * Allocate a buffer for the results (otherwise the HAL 6398 * returns a pointer to a buffer where we can read the 6399 * results). Note that we depend on the HAL leaving this 6400 * pointer for us to use below in reclaiming the buffer; 6401 * may want to be more defensive. 6402 */ 6403 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 6404 if (outdata == NULL) { 6405 error = ENOMEM; 6406 goto bad; 6407 } 6408 } 6409 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6410 if (outsize < ad->ad_out_size) 6411 ad->ad_out_size = outsize; 6412 if (outdata != NULL) 6413 error = copyout(outdata, ad->ad_out_data, 6414 ad->ad_out_size); 6415 } else { 6416 error = EINVAL; 6417 } 6418bad: 6419 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6420 free(indata, M_TEMP); 6421 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6422 free(outdata, M_TEMP); 6423 return error; 6424} 6425#endif /* ATH_DIAGAPI */ 6426 6427static int 6428ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 6429{ 6430#define IS_RUNNING(ifp) \ 6431 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 6432 struct ath_softc *sc = ifp->if_softc; 6433 struct ieee80211com *ic = ifp->if_l2com; 6434 struct ifreq *ifr = (struct ifreq *)data; 6435 const HAL_RATE_TABLE *rt; 6436 int error = 0; 6437 6438 switch (cmd) { 6439 case SIOCSIFFLAGS: 6440 ATH_LOCK(sc); 6441 if (IS_RUNNING(ifp)) { 6442 /* 6443 * To avoid rescanning another access point, 6444 * do not call ath_init() here. Instead, 6445 * only reflect promisc mode settings. 6446 */ 6447 ath_mode_init(sc); 6448 } else if (ifp->if_flags & IFF_UP) { 6449 /* 6450 * Beware of being called during attach/detach 6451 * to reset promiscuous mode. In that case we 6452 * will still be marked UP but not RUNNING. 6453 * However trying to re-init the interface 6454 * is the wrong thing to do as we've already 6455 * torn down much of our state. There's 6456 * probably a better way to deal with this. 6457 */ 6458 if (!sc->sc_invalid) 6459 ath_init(sc); /* XXX lose error */ 6460 } else { 6461 ath_stop_locked(ifp); 6462#ifdef notyet 6463 /* XXX must wakeup in places like ath_vap_delete */ 6464 if (!sc->sc_invalid) 6465 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 6466#endif 6467 } 6468 ATH_UNLOCK(sc); 6469 break; 6470 case SIOCGIFMEDIA: 6471 case SIOCSIFMEDIA: 6472 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6473 break; 6474 case SIOCGATHSTATS: 6475 /* NB: embed these numbers to get a consistent view */ 6476 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6477 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 6478 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 6479 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6480#ifdef IEEE80211_SUPPORT_TDMA 6481 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 6482 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 6483#endif 6484 rt = sc->sc_currates; 6485 sc->sc_stats.ast_tx_rate = 6486 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 6487 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 6488 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6489 return copyout(&sc->sc_stats, 6490 ifr->ifr_data, sizeof (sc->sc_stats)); 6491 case SIOCZATHSTATS: 6492 error = priv_check(curthread, PRIV_DRIVER); 6493 if (error == 0) 6494 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 6495 break; 6496#ifdef ATH_DIAGAPI 6497 case SIOCGATHDIAG: 6498 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6499 break; 6500 case SIOCGATHPHYERR: 6501 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6502 break; 6503#endif 6504 case SIOCGIFADDR: 6505 error = ether_ioctl(ifp, cmd, data); 6506 break; 6507 default: 6508 error = EINVAL; 6509 break; 6510 } 6511 return error; 6512#undef IS_RUNNING 6513} 6514 6515/* 6516 * Announce various information on device/driver attach. 6517 */ 6518static void 6519ath_announce(struct ath_softc *sc) 6520{ 6521 struct ifnet *ifp = sc->sc_ifp; 6522 struct ath_hal *ah = sc->sc_ah; 6523 6524 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6525 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6526 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6527 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6528 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6529 if (bootverbose) { 6530 int i; 6531 for (i = 0; i <= WME_AC_VO; i++) { 6532 struct ath_txq *txq = sc->sc_ac2q[i]; 6533 if_printf(ifp, "Use hw queue %u for %s traffic\n", 6534 txq->axq_qnum, ieee80211_wme_acnames[i]); 6535 } 6536 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6537 sc->sc_cabq->axq_qnum); 6538 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6539 } 6540 if (ath_rxbuf != ATH_RXBUF) 6541 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6542 if (ath_txbuf != ATH_TXBUF) 6543 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 6544 if (sc->sc_mcastkey && bootverbose) 6545 if_printf(ifp, "using multicast key search\n"); 6546} 6547 6548#ifdef IEEE80211_SUPPORT_TDMA 6549static void 6550ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval) 6551{ 6552 struct ath_hal *ah = sc->sc_ah; 6553 HAL_BEACON_TIMERS bt; 6554 6555 bt.bt_intval = bintval | HAL_BEACON_ENA; 6556 bt.bt_nexttbtt = nexttbtt; 6557 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep; 6558 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep; 6559 bt.bt_nextatim = nexttbtt+1; 6560 /* Enables TBTT, DBA, SWBA timers by default */ 6561 bt.bt_flags = 0; 6562 ath_hal_beaconsettimers(ah, &bt); 6563} 6564 6565/* 6566 * Calculate the beacon interval. This is periodic in the 6567 * superframe for the bss. We assume each station is configured 6568 * identically wrt transmit rate so the guard time we calculate 6569 * above will be the same on all stations. Note we need to 6570 * factor in the xmit time because the hardware will schedule 6571 * a frame for transmit if the start of the frame is within 6572 * the burst time. When we get hardware that properly kills 6573 * frames in the PCU we can reduce/eliminate the guard time. 6574 * 6575 * Roundup to 1024 is so we have 1 TU buffer in the guard time 6576 * to deal with the granularity of the nexttbtt timer. 11n MAC's 6577 * with 1us timer granularity should allow us to reduce/eliminate 6578 * this. 6579 */ 6580static void 6581ath_tdma_bintvalsetup(struct ath_softc *sc, 6582 const struct ieee80211_tdma_state *tdma) 6583{ 6584 /* copy from vap state (XXX check all vaps have same value?) */ 6585 sc->sc_tdmaslotlen = tdma->tdma_slotlen; 6586 6587 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) * 6588 tdma->tdma_slotcnt, 1024); 6589 sc->sc_tdmabintval >>= 10; /* TSF -> TU */ 6590 if (sc->sc_tdmabintval & 1) 6591 sc->sc_tdmabintval++; 6592 6593 if (tdma->tdma_slot == 0) { 6594 /* 6595 * Only slot 0 beacons; other slots respond. 6596 */ 6597 sc->sc_imask |= HAL_INT_SWBA; 6598 sc->sc_tdmaswba = 0; /* beacon immediately */ 6599 } else { 6600 /* XXX all vaps must be slot 0 or slot !0 */ 6601 sc->sc_imask &= ~HAL_INT_SWBA; 6602 } 6603} 6604 6605/* 6606 * Max 802.11 overhead. This assumes no 4-address frames and 6607 * the encapsulation done by ieee80211_encap (llc). We also 6608 * include potential crypto overhead. 6609 */ 6610#define IEEE80211_MAXOVERHEAD \ 6611 (sizeof(struct ieee80211_qosframe) \ 6612 + sizeof(struct llc) \ 6613 + IEEE80211_ADDR_LEN \ 6614 + IEEE80211_WEP_IVLEN \ 6615 + IEEE80211_WEP_KIDLEN \ 6616 + IEEE80211_WEP_CRCLEN \ 6617 + IEEE80211_WEP_MICLEN \ 6618 + IEEE80211_CRC_LEN) 6619 6620/* 6621 * Setup initially for tdma operation. Start the beacon 6622 * timers and enable SWBA if we are slot 0. Otherwise 6623 * we wait for slot 0 to arrive so we can sync up before 6624 * starting to transmit. 6625 */ 6626static void 6627ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap) 6628{ 6629 struct ath_hal *ah = sc->sc_ah; 6630 struct ifnet *ifp = sc->sc_ifp; 6631 struct ieee80211com *ic = ifp->if_l2com; 6632 const struct ieee80211_txparam *tp; 6633 const struct ieee80211_tdma_state *tdma = NULL; 6634 int rix; 6635 6636 if (vap == NULL) { 6637 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 6638 if (vap == NULL) { 6639 if_printf(ifp, "%s: no vaps?\n", __func__); 6640 return; 6641 } 6642 } 6643 /* XXX should take a locked ref to iv_bss */ 6644 tp = vap->iv_bss->ni_txparms; 6645 /* 6646 * Calculate the guard time for each slot. This is the 6647 * time to send a maximal-size frame according to the 6648 * fixed/lowest transmit rate. Note that the interface 6649 * mtu does not include the 802.11 overhead so we must 6650 * tack that on (ath_hal_computetxtime includes the 6651 * preamble and plcp in it's calculation). 6652 */ 6653 tdma = vap->iv_tdma; 6654 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 6655 rix = ath_tx_findrix(sc, tp->ucastrate); 6656 else 6657 rix = ath_tx_findrix(sc, tp->mcastrate); 6658 /* XXX short preamble assumed */ 6659 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates, 6660 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE); 6661 6662 ath_hal_intrset(ah, 0); 6663 6664 ath_beaconq_config(sc); /* setup h/w beacon q */ 6665 if (sc->sc_setcca) 6666 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */ 6667 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */ 6668 ath_tdma_settimers(sc, sc->sc_tdmabintval, 6669 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF); 6670 sc->sc_syncbeacon = 0; 6671 6672 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER; 6673 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER; 6674 6675 ath_hal_intrset(ah, sc->sc_imask); 6676 6677 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u " 6678 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__, 6679 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt, 6680 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval, 6681 sc->sc_tdmadbaprep); 6682} 6683 6684/* 6685 * Update tdma operation. Called from the 802.11 layer 6686 * when a beacon is received from the TDMA station operating 6687 * in the slot immediately preceding us in the bss. Use 6688 * the rx timestamp for the beacon frame to update our 6689 * beacon timers so we follow their schedule. Note that 6690 * by using the rx timestamp we implicitly include the 6691 * propagation delay in our schedule. 6692 */ 6693static void 6694ath_tdma_update(struct ieee80211_node *ni, 6695 const struct ieee80211_tdma_param *tdma, int changed) 6696{ 6697#define TSF_TO_TU(_h,_l) \ 6698 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 6699#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) 6700 struct ieee80211vap *vap = ni->ni_vap; 6701 struct ieee80211com *ic = ni->ni_ic; 6702 struct ath_softc *sc = ic->ic_ifp->if_softc; 6703 struct ath_hal *ah = sc->sc_ah; 6704 const HAL_RATE_TABLE *rt = sc->sc_currates; 6705 u_int64_t tsf, rstamp, nextslot, nexttbtt; 6706 u_int32_t txtime, nextslottu; 6707 int32_t tudelta, tsfdelta; 6708 const struct ath_rx_status *rs; 6709 int rix; 6710 6711 sc->sc_stats.ast_tdma_update++; 6712 6713 /* 6714 * Check for and adopt configuration changes. 6715 */ 6716 if (changed != 0) { 6717 const struct ieee80211_tdma_state *ts = vap->iv_tdma; 6718 6719 ath_tdma_bintvalsetup(sc, ts); 6720 if (changed & TDMA_UPDATE_SLOTLEN) 6721 ath_wme_update(ic); 6722 6723 DPRINTF(sc, ATH_DEBUG_TDMA, 6724 "%s: adopt slot %u slotcnt %u slotlen %u us " 6725 "bintval %u TU\n", __func__, 6726 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen, 6727 sc->sc_tdmabintval); 6728 6729 /* XXX right? */ 6730 ath_hal_intrset(ah, sc->sc_imask); 6731 /* NB: beacon timers programmed below */ 6732 } 6733 6734 /* extend rx timestamp to 64 bits */ 6735 rs = sc->sc_lastrs; 6736 tsf = ath_hal_gettsf64(ah); 6737 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 6738 /* 6739 * The rx timestamp is set by the hardware on completing 6740 * reception (at the point where the rx descriptor is DMA'd 6741 * to the host). To find the start of our next slot we 6742 * must adjust this time by the time required to send 6743 * the packet just received. 6744 */ 6745 rix = rt->rateCodeToIndex[rs->rs_rate]; 6746 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix, 6747 rt->info[rix].shortPreamble); 6748 /* NB: << 9 is to cvt to TU and /2 */ 6749 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9); 6750 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD; 6751 6752 /* 6753 * Retrieve the hardware NextTBTT in usecs 6754 * and calculate the difference between what the 6755 * other station thinks and what we have programmed. This 6756 * lets us figure how to adjust our timers to match. The 6757 * adjustments are done by pulling the TSF forward and possibly 6758 * rewriting the beacon timers. 6759 */ 6760 nexttbtt = ath_hal_getnexttbtt(ah); 6761 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt); 6762 6763 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6764 "tsfdelta %d avg +%d/-%d\n", tsfdelta, 6765 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam)); 6766 6767 if (tsfdelta < 0) { 6768 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 6769 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta); 6770 tsfdelta = -tsfdelta % 1024; 6771 nextslottu++; 6772 } else if (tsfdelta > 0) { 6773 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta); 6774 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 6775 tsfdelta = 1024 - (tsfdelta % 1024); 6776 nextslottu++; 6777 } else { 6778 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 6779 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 6780 } 6781 tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt); 6782 6783 /* 6784 * Copy sender's timetstamp into tdma ie so they can 6785 * calculate roundtrip time. We submit a beacon frame 6786 * below after any timer adjustment. The frame goes out 6787 * at the next TBTT so the sender can calculate the 6788 * roundtrip by inspecting the tdma ie in our beacon frame. 6789 * 6790 * NB: This tstamp is subtlely preserved when 6791 * IEEE80211_BEACON_TDMA is marked (e.g. when the 6792 * slot position changes) because ieee80211_add_tdma 6793 * skips over the data. 6794 */ 6795 memcpy(ATH_VAP(vap)->av_boff.bo_tdma + 6796 __offsetof(struct ieee80211_tdma_param, tdma_tstamp), 6797 &ni->ni_tstamp.data, 8); 6798#if 0 6799 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6800 "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n", 6801 (unsigned long long) tsf, (unsigned long long) nextslot, 6802 (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta); 6803#endif 6804 /* 6805 * Adjust the beacon timers only when pulling them forward 6806 * or when going back by less than the beacon interval. 6807 * Negative jumps larger than the beacon interval seem to 6808 * cause the timers to stop and generally cause instability. 6809 * This basically filters out jumps due to missed beacons. 6810 */ 6811 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) { 6812 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval); 6813 sc->sc_stats.ast_tdma_timers++; 6814 } 6815 if (tsfdelta > 0) { 6816 ath_hal_adjusttsf(ah, tsfdelta); 6817 sc->sc_stats.ast_tdma_tsf++; 6818 } 6819 ath_tdma_beacon_send(sc, vap); /* prepare response */ 6820#undef TU_TO_TSF 6821#undef TSF_TO_TU 6822} 6823 6824/* 6825 * Transmit a beacon frame at SWBA. Dynamic updates 6826 * to the frame contents are done as needed. 6827 */ 6828static void 6829ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) 6830{ 6831 struct ath_hal *ah = sc->sc_ah; 6832 struct ath_buf *bf; 6833 int otherant; 6834 6835 /* 6836 * Check if the previous beacon has gone out. If 6837 * not don't try to post another, skip this period 6838 * and wait for the next. Missed beacons indicate 6839 * a problem and should not occur. If we miss too 6840 * many consecutive beacons reset the device. 6841 */ 6842 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 6843 sc->sc_bmisscount++; 6844 DPRINTF(sc, ATH_DEBUG_BEACON, 6845 "%s: missed %u consecutive beacons\n", 6846 __func__, sc->sc_bmisscount); 6847 if (sc->sc_bmisscount >= ath_bstuck_threshold) 6848 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 6849 return; 6850 } 6851 if (sc->sc_bmisscount != 0) { 6852 DPRINTF(sc, ATH_DEBUG_BEACON, 6853 "%s: resume beacon xmit after %u misses\n", 6854 __func__, sc->sc_bmisscount); 6855 sc->sc_bmisscount = 0; 6856 } 6857 6858 /* 6859 * Check recent per-antenna transmit statistics and flip 6860 * the default antenna if noticeably more frames went out 6861 * on the non-default antenna. 6862 * XXX assumes 2 anntenae 6863 */ 6864 if (!sc->sc_diversity) { 6865 otherant = sc->sc_defant & 1 ? 2 : 1; 6866 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 6867 ath_setdefantenna(sc, otherant); 6868 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 6869 } 6870 6871 bf = ath_beacon_generate(sc, vap); 6872 if (bf != NULL) { 6873 /* 6874 * Stop any current dma and put the new frame on the queue. 6875 * This should never fail since we check above that no frames 6876 * are still pending on the queue. 6877 */ 6878 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 6879 DPRINTF(sc, ATH_DEBUG_ANY, 6880 "%s: beacon queue %u did not stop?\n", 6881 __func__, sc->sc_bhalq); 6882 /* NB: the HAL still stops DMA, so proceed */ 6883 } 6884 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 6885 ath_hal_txstart(ah, sc->sc_bhalq); 6886 6887 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */ 6888 6889 /* 6890 * Record local TSF for our last send for use 6891 * in arbitrating slot collisions. 6892 */ 6893 /* XXX should take a locked ref to iv_bss */ 6894 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah); 6895 } 6896} 6897#endif /* IEEE80211_SUPPORT_TDMA */ 6898 6899static void 6900ath_dfs_tasklet(void *p, int npending) 6901{ 6902 struct ath_softc *sc = (struct ath_softc *) p; 6903 struct ifnet *ifp = sc->sc_ifp; 6904 struct ieee80211com *ic = ifp->if_l2com; 6905 6906 /* 6907 * If previous processing has found a radar event, 6908 * signal this to the net80211 layer to begin DFS 6909 * processing. 6910 */ 6911 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6912 /* DFS event found, initiate channel change */ 6913 /* 6914 * XXX doesn't currently tell us whether the event 6915 * XXX was found in the primary or extension 6916 * XXX channel! 6917 */ 6918 IEEE80211_LOCK(ic); 6919 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6920 IEEE80211_UNLOCK(ic); 6921 } 6922} 6923 6924MODULE_VERSION(if_ath, 1); 6925MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6926#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 6927MODULE_DEPEND(if_ath, alq, 1, 1, 1); 6928#endif 6929