32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49#include "opt_ah.h" 50#include "opt_wlan.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysctl.h> 55#include <sys/mbuf.h> 56#include <sys/malloc.h> 57#include <sys/lock.h> 58#include <sys/mutex.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/errno.h> 63#include <sys/callout.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kthread.h> 67#include <sys/taskqueue.h> 68#include <sys/priv.h> 69#include <sys/module.h> 70#include <sys/ktr.h> 71#include <sys/smp.h> /* for mp_ncpus */ 72 73#include <machine/bus.h> 74 75#include <net/if.h> 76#include <net/if_dl.h> 77#include <net/if_media.h> 78#include <net/if_types.h> 79#include <net/if_arp.h> 80#include <net/ethernet.h> 81#include <net/if_llc.h> 82 83#include <net80211/ieee80211_var.h> 84#include <net80211/ieee80211_regdomain.h> 85#ifdef IEEE80211_SUPPORT_SUPERG 86#include <net80211/ieee80211_superg.h> 87#endif 88#ifdef IEEE80211_SUPPORT_TDMA 89#include <net80211/ieee80211_tdma.h> 90#endif 91 92#include <net/bpf.h> 93 94#ifdef INET 95#include <netinet/in.h> 96#include <netinet/if_ether.h> 97#endif 98 99#include <dev/ath/if_athvar.h> 100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101#include <dev/ath/ath_hal/ah_diagcodes.h> 102 103#include <dev/ath/if_ath_debug.h> 104#include <dev/ath/if_ath_misc.h> 105#include <dev/ath/if_ath_tsf.h> 106#include <dev/ath/if_ath_tx.h> 107#include <dev/ath/if_ath_sysctl.h> 108#include <dev/ath/if_ath_led.h> 109#include <dev/ath/if_ath_keycache.h> 110#include <dev/ath/if_ath_rx.h> 111#include <dev/ath/if_ath_beacon.h> 112#include <dev/ath/if_athdfs.h> 113 114#ifdef ATH_TX99_DIAG 115#include <dev/ath/ath_tx99/ath_tx99.h> 116#endif 117 118#define ATH_KTR_INTR KTR_SPARE4 119#define ATH_KTR_ERR KTR_SPARE3 120 121/* 122 * ATH_BCBUF determines the number of vap's that can transmit 123 * beacons and also (currently) the number of vap's that can 124 * have unique mac addresses/bssid. When staggering beacons 125 * 4 is probably a good max as otherwise the beacons become 126 * very closely spaced and there is limited time for cab q traffic 127 * to go out. You can burst beacons instead but that is not good 128 * for stations in power save and at some point you really want 129 * another radio (and channel). 130 * 131 * The limit on the number of mac addresses is tied to our use of 132 * the U/L bit and tracking addresses in a byte; it would be 133 * worthwhile to allow more for applications like proxy sta. 134 */ 135CTASSERT(ATH_BCBUF <= 8); 136 137static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 138 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 139 const uint8_t [IEEE80211_ADDR_LEN], 140 const uint8_t [IEEE80211_ADDR_LEN]); 141static void ath_vap_delete(struct ieee80211vap *); 142static void ath_init(void *); 143static void ath_stop_locked(struct ifnet *); 144static void ath_stop(struct ifnet *); 145static int ath_reset_vap(struct ieee80211vap *, u_long); 146static int ath_media_change(struct ifnet *); 147static void ath_watchdog(void *); 148static int ath_ioctl(struct ifnet *, u_long, caddr_t); 149static void ath_fatal_proc(void *, int); 150static void ath_bmiss_vap(struct ieee80211vap *); 151static void ath_bmiss_proc(void *, int); 152static void ath_key_update_begin(struct ieee80211vap *); 153static void ath_key_update_end(struct ieee80211vap *); 154static void ath_update_mcast(struct ifnet *); 155static void ath_update_promisc(struct ifnet *); 156static void ath_updateslot(struct ifnet *); 157static void ath_bstuck_proc(void *, int); 158static void ath_reset_proc(void *, int); 159static void ath_descdma_cleanup(struct ath_softc *sc, 160 struct ath_descdma *, ath_bufhead *); 161static int ath_desc_alloc(struct ath_softc *); 162static void ath_desc_free(struct ath_softc *); 163static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 164 const uint8_t [IEEE80211_ADDR_LEN]); 165static void ath_node_cleanup(struct ieee80211_node *); 166static void ath_node_free(struct ieee80211_node *); 167static void ath_node_getsignal(const struct ieee80211_node *, 168 int8_t *, int8_t *); 169static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 170static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 171static int ath_tx_setup(struct ath_softc *, int, int); 172static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 173static void ath_tx_cleanup(struct ath_softc *); 174static void ath_tx_proc_q0(void *, int); 175static void ath_tx_proc_q0123(void *, int); 176static void ath_tx_proc(void *, int); 177static void ath_txq_sched_tasklet(void *, int); 178static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 179static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 180static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 181static void ath_scan_start(struct ieee80211com *); 182static void ath_scan_end(struct ieee80211com *); 183static void ath_set_channel(struct ieee80211com *); 184#ifdef ATH_ENABLE_11N 185static void ath_update_chw(struct ieee80211com *); 186#endif /* ATH_ENABLE_11N */ 187static void ath_calibrate(void *); 188static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 189static void ath_setup_stationkey(struct ieee80211_node *); 190static void ath_newassoc(struct ieee80211_node *, int); 191static int ath_setregdomain(struct ieee80211com *, 192 struct ieee80211_regdomain *, int, 193 struct ieee80211_channel []); 194static void ath_getradiocaps(struct ieee80211com *, int, int *, 195 struct ieee80211_channel []); 196static int ath_getchannels(struct ath_softc *); 197 198static int ath_rate_setup(struct ath_softc *, u_int mode); 199static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 200 201static void ath_announce(struct ath_softc *); 202 203static void ath_dfs_tasklet(void *, int); 204 205#ifdef IEEE80211_SUPPORT_TDMA 206#include <dev/ath/if_ath_tdma.h> 207#endif 208 209#if 0 210#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 211#define TDMA_LPF_LEN 6 212#define TDMA_DUMMY_MARKER 0x127 213#define TDMA_EP_MUL(x, mul) ((x) * (mul)) 214#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 215#define TDMA_LPF(x, y, len) \ 216 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 217#define TDMA_SAMPLE(x, y) do { \ 218 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 219} while (0) 220#define TDMA_EP_RND(x,mul) \ 221 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 222#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 223#endif /* IEEE80211_SUPPORT_TDMA */ 224 225SYSCTL_DECL(_hw_ath); 226 227/* XXX validate sysctl values */ 228static int ath_longcalinterval = 30; /* long cals every 30 secs */ 229SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 230 0, "long chip calibration interval (secs)"); 231static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 232SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 233 0, "short chip calibration interval (msecs)"); 234static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 235SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 236 0, "reset chip calibration results (secs)"); 237static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 238SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 239 0, "ANI calibration (msecs)"); 240 241static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 242SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 243 0, "rx buffers allocated"); 244TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 245static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 246SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 247 0, "tx buffers allocated"); 248TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 249static int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 250SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, 251 0, "tx (mgmt) buffers allocated"); 252TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 253 254int ath_bstuck_threshold = 4; /* max missed beacons */ 255SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 256 0, "max missed beacon xmits before chip reset"); 257 258MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 259 260#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 261#define HAL_MODE_HT40 \ 262 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 263 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 264int 265ath_attach(u_int16_t devid, struct ath_softc *sc) 266{ 267 struct ifnet *ifp; 268 struct ieee80211com *ic; 269 struct ath_hal *ah = NULL; 270 HAL_STATUS status; 271 int error = 0, i; 272 u_int wmodes; 273 uint8_t macaddr[IEEE80211_ADDR_LEN]; 274 int rx_chainmask, tx_chainmask; 275 276 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 277 278 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 279 if (ifp == NULL) { 280 device_printf(sc->sc_dev, "can not if_alloc()\n"); 281 error = ENOSPC; 282 goto bad; 283 } 284 ic = ifp->if_l2com; 285 286 /* set these up early for if_printf use */ 287 if_initname(ifp, device_get_name(sc->sc_dev), 288 device_get_unit(sc->sc_dev)); 289 290 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 291 sc->sc_eepromdata, &status); 292 if (ah == NULL) { 293 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 294 status); 295 error = ENXIO; 296 goto bad; 297 } 298 sc->sc_ah = ah; 299 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 300#ifdef ATH_DEBUG 301 sc->sc_debug = ath_debug; 302#endif 303 304 /* 305 * Check if the MAC has multi-rate retry support. 306 * We do this by trying to setup a fake extended 307 * descriptor. MAC's that don't have support will 308 * return false w/o doing anything. MAC's that do 309 * support it will return true w/o doing anything. 310 */ 311 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 312 313 /* 314 * Check if the device has hardware counters for PHY 315 * errors. If so we need to enable the MIB interrupt 316 * so we can act on stat triggers. 317 */ 318 if (ath_hal_hwphycounters(ah)) 319 sc->sc_needmib = 1; 320 321 /* 322 * Get the hardware key cache size. 323 */ 324 sc->sc_keymax = ath_hal_keycachesize(ah); 325 if (sc->sc_keymax > ATH_KEYMAX) { 326 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 327 ATH_KEYMAX, sc->sc_keymax); 328 sc->sc_keymax = ATH_KEYMAX; 329 } 330 /* 331 * Reset the key cache since some parts do not 332 * reset the contents on initial power up. 333 */ 334 for (i = 0; i < sc->sc_keymax; i++) 335 ath_hal_keyreset(ah, i); 336 337 /* 338 * Collect the default channel list. 339 */ 340 error = ath_getchannels(sc); 341 if (error != 0) 342 goto bad; 343 344 /* 345 * Setup rate tables for all potential media types. 346 */ 347 ath_rate_setup(sc, IEEE80211_MODE_11A); 348 ath_rate_setup(sc, IEEE80211_MODE_11B); 349 ath_rate_setup(sc, IEEE80211_MODE_11G); 350 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 351 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 352 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 353 ath_rate_setup(sc, IEEE80211_MODE_11NA); 354 ath_rate_setup(sc, IEEE80211_MODE_11NG); 355 ath_rate_setup(sc, IEEE80211_MODE_HALF); 356 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 357 358 /* NB: setup here so ath_rate_update is happy */ 359 ath_setcurmode(sc, IEEE80211_MODE_11A); 360 361 /* 362 * Allocate tx+rx descriptors and populate the lists. 363 */ 364 error = ath_desc_alloc(sc); 365 if (error != 0) { 366 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 367 goto bad; 368 } 369 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 370 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 371 372 ATH_TXBUF_LOCK_INIT(sc); 373 374 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 375 taskqueue_thread_enqueue, &sc->sc_tq); 376 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 377 "%s taskq", ifp->if_xname); 378 379 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 380 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 381 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 382 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 383 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); 384 TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc); 385 386 /* 387 * Allocate hardware transmit queues: one queue for 388 * beacon frames and one data queue for each QoS 389 * priority. Note that the hal handles resetting 390 * these queues at the needed time. 391 * 392 * XXX PS-Poll 393 */ 394 sc->sc_bhalq = ath_beaconq_setup(ah); 395 if (sc->sc_bhalq == (u_int) -1) { 396 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 397 error = EIO; 398 goto bad2; 399 } 400 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 401 if (sc->sc_cabq == NULL) { 402 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 403 error = EIO; 404 goto bad2; 405 } 406 /* NB: insure BK queue is the lowest priority h/w queue */ 407 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 408 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 409 ieee80211_wme_acnames[WME_AC_BK]); 410 error = EIO; 411 goto bad2; 412 } 413 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 414 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 415 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 416 /* 417 * Not enough hardware tx queues to properly do WME; 418 * just punt and assign them all to the same h/w queue. 419 * We could do a better job of this if, for example, 420 * we allocate queues when we switch from station to 421 * AP mode. 422 */ 423 if (sc->sc_ac2q[WME_AC_VI] != NULL) 424 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 425 if (sc->sc_ac2q[WME_AC_BE] != NULL) 426 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 427 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 428 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 429 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 430 } 431 432 /* 433 * Special case certain configurations. Note the 434 * CAB queue is handled by these specially so don't 435 * include them when checking the txq setup mask. 436 */ 437 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 438 case 0x01: 439 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 440 break; 441 case 0x0f: 442 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 443 break; 444 default: 445 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 446 break; 447 } 448 449 /* 450 * Setup rate control. Some rate control modules 451 * call back to change the anntena state so expose 452 * the necessary entry points. 453 * XXX maybe belongs in struct ath_ratectrl? 454 */ 455 sc->sc_setdefantenna = ath_setdefantenna; 456 sc->sc_rc = ath_rate_attach(sc); 457 if (sc->sc_rc == NULL) { 458 error = EIO; 459 goto bad2; 460 } 461 462 /* Attach DFS module */ 463 if (! ath_dfs_attach(sc)) { 464 device_printf(sc->sc_dev, 465 "%s: unable to attach DFS\n", __func__); 466 error = EIO; 467 goto bad2; 468 } 469 470 /* Start DFS processing tasklet */ 471 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 472 473 /* Configure LED state */ 474 sc->sc_blinking = 0; 475 sc->sc_ledstate = 1; 476 sc->sc_ledon = 0; /* low true */ 477 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 478 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 479 480 /* 481 * Don't setup hardware-based blinking. 482 * 483 * Although some NICs may have this configured in the 484 * default reset register values, the user may wish 485 * to alter which pins have which function. 486 * 487 * The reference driver attaches the MAC network LED to GPIO1 and 488 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 489 * NIC has these reversed. 490 */ 491 sc->sc_hardled = (1 == 0); 492 sc->sc_led_net_pin = -1; 493 sc->sc_led_pwr_pin = -1; 494 /* 495 * Auto-enable soft led processing for IBM cards and for 496 * 5211 minipci cards. Users can also manually enable/disable 497 * support with a sysctl. 498 */ 499 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 500 ath_led_config(sc); 501 ath_hal_setledstate(ah, HAL_LED_INIT); 502 503 ifp->if_softc = sc; 504 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 505 ifp->if_start = ath_start; 506 ifp->if_ioctl = ath_ioctl; 507 ifp->if_init = ath_init; 508 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 509 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 510 IFQ_SET_READY(&ifp->if_snd); 511 512 ic->ic_ifp = ifp; 513 /* XXX not right but it's not used anywhere important */ 514 ic->ic_phytype = IEEE80211_T_OFDM; 515 ic->ic_opmode = IEEE80211_M_STA; 516 ic->ic_caps = 517 IEEE80211_C_STA /* station mode */ 518 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 519 | IEEE80211_C_HOSTAP /* hostap mode */ 520 | IEEE80211_C_MONITOR /* monitor mode */ 521 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 522 | IEEE80211_C_WDS /* 4-address traffic works */ 523 | IEEE80211_C_MBSS /* mesh point link mode */ 524 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 525 | IEEE80211_C_SHSLOT /* short slot time supported */ 526 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 527#ifndef ATH_ENABLE_11N 528 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 529#endif 530 | IEEE80211_C_TXFRAG /* handle tx frags */ 531#ifdef ATH_ENABLE_DFS 532 | IEEE80211_C_DFS /* Enable radar detection */ 533#endif 534 ; 535 /* 536 * Query the hal to figure out h/w crypto support. 537 */ 538 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 539 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 540 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 541 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 542 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 543 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 544 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 545 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 546 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 547 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 548 /* 549 * Check if h/w does the MIC and/or whether the 550 * separate key cache entries are required to 551 * handle both tx+rx MIC keys. 552 */ 553 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 554 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 555 /* 556 * If the h/w supports storing tx+rx MIC keys 557 * in one cache slot automatically enable use. 558 */ 559 if (ath_hal_hastkipsplit(ah) || 560 !ath_hal_settkipsplit(ah, AH_FALSE)) 561 sc->sc_splitmic = 1; 562 /* 563 * If the h/w can do TKIP MIC together with WME then 564 * we use it; otherwise we force the MIC to be done 565 * in software by the net80211 layer. 566 */ 567 if (ath_hal_haswmetkipmic(ah)) 568 sc->sc_wmetkipmic = 1; 569 } 570 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 571 /* 572 * Check for multicast key search support. 573 */ 574 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 575 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 576 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 577 } 578 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 579 /* 580 * Mark key cache slots associated with global keys 581 * as in use. If we knew TKIP was not to be used we 582 * could leave the +32, +64, and +32+64 slots free. 583 */ 584 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 585 setbit(sc->sc_keymap, i); 586 setbit(sc->sc_keymap, i+64); 587 if (sc->sc_splitmic) { 588 setbit(sc->sc_keymap, i+32); 589 setbit(sc->sc_keymap, i+32+64); 590 } 591 } 592 /* 593 * TPC support can be done either with a global cap or 594 * per-packet support. The latter is not available on 595 * all parts. We're a bit pedantic here as all parts 596 * support a global cap. 597 */ 598 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 599 ic->ic_caps |= IEEE80211_C_TXPMGT; 600 601 /* 602 * Mark WME capability only if we have sufficient 603 * hardware queues to do proper priority scheduling. 604 */ 605 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 606 ic->ic_caps |= IEEE80211_C_WME; 607 /* 608 * Check for misc other capabilities. 609 */ 610 if (ath_hal_hasbursting(ah)) 611 ic->ic_caps |= IEEE80211_C_BURST; 612 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 613 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 614 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 615 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 616 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 617 if (ath_hal_hasfastframes(ah)) 618 ic->ic_caps |= IEEE80211_C_FF; 619 wmodes = ath_hal_getwirelessmodes(ah); 620 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 621 ic->ic_caps |= IEEE80211_C_TURBOP; 622#ifdef IEEE80211_SUPPORT_TDMA 623 if (ath_hal_macversion(ah) > 0x78) { 624 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 625 ic->ic_tdma_update = ath_tdma_update; 626 } 627#endif 628 629 /* 630 * TODO: enforce that at least this many frames are available 631 * in the txbuf list before allowing data frames (raw or 632 * otherwise) to be transmitted. 633 */ 634 sc->sc_txq_data_minfree = 10; 635 /* 636 * Leave this as default to maintain legacy behaviour. 637 * Shortening the cabq/mcastq may end up causing some 638 * undesirable behaviour. 639 */ 640 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 641 642 /* 643 * Allow the TX and RX chainmasks to be overridden by 644 * environment variables and/or device.hints. 645 * 646 * This must be done early - before the hardware is 647 * calibrated or before the 802.11n stream calculation 648 * is done. 649 */ 650 if (resource_int_value(device_get_name(sc->sc_dev), 651 device_get_unit(sc->sc_dev), "rx_chainmask", 652 &rx_chainmask) == 0) { 653 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 654 rx_chainmask); 655 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 656 } 657 if (resource_int_value(device_get_name(sc->sc_dev), 658 device_get_unit(sc->sc_dev), "tx_chainmask", 659 &tx_chainmask) == 0) { 660 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 661 tx_chainmask); 662 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 663 } 664 665#ifdef ATH_ENABLE_11N 666 /* 667 * Query HT capabilities 668 */ 669 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 670 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 671 int rxs, txs; 672 673 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 674 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 675 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 676 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 677 | IEEE80211_HTCAP_MAXAMSDU_3839 678 /* max A-MSDU length */ 679 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 680 ; 681 682 /* 683 * Enable short-GI for HT20 only if the hardware 684 * advertises support. 685 * Notably, anything earlier than the AR9287 doesn't. 686 */ 687 if ((ath_hal_getcapability(ah, 688 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 689 (wmodes & HAL_MODE_HT20)) { 690 device_printf(sc->sc_dev, 691 "[HT] enabling short-GI in 20MHz mode\n"); 692 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 693 } 694 695 if (wmodes & HAL_MODE_HT40) 696 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 697 | IEEE80211_HTCAP_SHORTGI40; 698 699 /* 700 * TX/RX streams need to be taken into account when 701 * negotiating which MCS rates it'll receive and 702 * what MCS rates are available for TX. 703 */ 704 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 705 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 706 707 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 708 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 709 710 ic->ic_txstream = txs; 711 ic->ic_rxstream = rxs; 712 713 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 714 &sc->sc_rts_aggr_limit); 715 if (sc->sc_rts_aggr_limit != (64 * 1024)) 716 device_printf(sc->sc_dev, 717 "[HT] RTS aggregates limited to %d KiB\n", 718 sc->sc_rts_aggr_limit / 1024); 719 720 device_printf(sc->sc_dev, 721 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 722 } 723#endif 724 725 /* 726 * Check if the hardware requires PCI register serialisation. 727 * Some of the Owl based MACs require this. 728 */ 729 if (mp_ncpus > 1 && 730 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 731 0, NULL) == HAL_OK) { 732 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 733 device_printf(sc->sc_dev, 734 "Enabling register serialisation\n"); 735 } 736 737 /* 738 * Indicate we need the 802.11 header padded to a 739 * 32-bit boundary for 4-address and QoS frames. 740 */ 741 ic->ic_flags |= IEEE80211_F_DATAPAD; 742 743 /* 744 * Query the hal about antenna support. 745 */ 746 sc->sc_defant = ath_hal_getdefantenna(ah); 747 748 /* 749 * Not all chips have the VEOL support we want to 750 * use with IBSS beacons; check here for it. 751 */ 752 sc->sc_hasveol = ath_hal_hasveol(ah); 753 754 /* get mac address from hardware */ 755 ath_hal_getmac(ah, macaddr); 756 if (sc->sc_hasbmask) 757 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 758 759 /* NB: used to size node table key mapping array */ 760 ic->ic_max_keyix = sc->sc_keymax; 761 /* call MI attach routine. */ 762 ieee80211_ifattach(ic, macaddr); 763 ic->ic_setregdomain = ath_setregdomain; 764 ic->ic_getradiocaps = ath_getradiocaps; 765 sc->sc_opmode = HAL_M_STA; 766 767 /* override default methods */ 768 ic->ic_newassoc = ath_newassoc; 769 ic->ic_updateslot = ath_updateslot; 770 ic->ic_wme.wme_update = ath_wme_update; 771 ic->ic_vap_create = ath_vap_create; 772 ic->ic_vap_delete = ath_vap_delete; 773 ic->ic_raw_xmit = ath_raw_xmit; 774 ic->ic_update_mcast = ath_update_mcast; 775 ic->ic_update_promisc = ath_update_promisc; 776 ic->ic_node_alloc = ath_node_alloc; 777 sc->sc_node_free = ic->ic_node_free; 778 ic->ic_node_free = ath_node_free; 779 sc->sc_node_cleanup = ic->ic_node_cleanup; 780 ic->ic_node_cleanup = ath_node_cleanup; 781 ic->ic_node_getsignal = ath_node_getsignal; 782 ic->ic_scan_start = ath_scan_start; 783 ic->ic_scan_end = ath_scan_end; 784 ic->ic_set_channel = ath_set_channel; 785#ifdef ATH_ENABLE_11N 786 /* 802.11n specific - but just override anyway */ 787 sc->sc_addba_request = ic->ic_addba_request; 788 sc->sc_addba_response = ic->ic_addba_response; 789 sc->sc_addba_stop = ic->ic_addba_stop; 790 sc->sc_bar_response = ic->ic_bar_response; 791 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 792 793 ic->ic_addba_request = ath_addba_request; 794 ic->ic_addba_response = ath_addba_response; 795 ic->ic_addba_response_timeout = ath_addba_response_timeout; 796 ic->ic_addba_stop = ath_addba_stop; 797 ic->ic_bar_response = ath_bar_response; 798 799 ic->ic_update_chw = ath_update_chw; 800#endif /* ATH_ENABLE_11N */ 801
|
822 823 /* 824 * Setup dynamic sysctl's now that country code and 825 * regdomain are available from the hal. 826 */ 827 ath_sysctlattach(sc); 828 ath_sysctl_stats_attach(sc); 829 ath_sysctl_hal_attach(sc); 830 831 if (bootverbose) 832 ieee80211_announce(ic); 833 ath_announce(sc); 834 return 0; 835bad2: 836 ath_tx_cleanup(sc); 837 ath_desc_free(sc); 838bad: 839 if (ah) 840 ath_hal_detach(ah); 841 if (ifp != NULL) 842 if_free(ifp); 843 sc->sc_invalid = 1; 844 return error; 845} 846 847int 848ath_detach(struct ath_softc *sc) 849{ 850 struct ifnet *ifp = sc->sc_ifp; 851 852 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 853 __func__, ifp->if_flags); 854 855 /* 856 * NB: the order of these is important: 857 * o stop the chip so no more interrupts will fire 858 * o call the 802.11 layer before detaching the hal to 859 * insure callbacks into the driver to delete global 860 * key cache entries can be handled 861 * o free the taskqueue which drains any pending tasks 862 * o reclaim the tx queue data structures after calling 863 * the 802.11 layer as we'll get called back to reclaim 864 * node state and potentially want to use them 865 * o to cleanup the tx queues the hal is called, so detach 866 * it last 867 * Other than that, it's straightforward... 868 */ 869 ath_stop(ifp); 870 ieee80211_ifdetach(ifp->if_l2com); 871 taskqueue_free(sc->sc_tq); 872#ifdef ATH_TX99_DIAG 873 if (sc->sc_tx99 != NULL) 874 sc->sc_tx99->detach(sc->sc_tx99); 875#endif 876 ath_rate_detach(sc->sc_rc); 877 878 ath_dfs_detach(sc); 879 ath_desc_free(sc); 880 ath_tx_cleanup(sc); 881 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 882 if_free(ifp); 883 884 return 0; 885} 886 887/* 888 * MAC address handling for multiple BSS on the same radio. 889 * The first vap uses the MAC address from the EEPROM. For 890 * subsequent vap's we set the U/L bit (bit 1) in the MAC 891 * address and use the next six bits as an index. 892 */ 893static void 894assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 895{ 896 int i; 897 898 if (clone && sc->sc_hasbmask) { 899 /* NB: we only do this if h/w supports multiple bssid */ 900 for (i = 0; i < 8; i++) 901 if ((sc->sc_bssidmask & (1<<i)) == 0) 902 break; 903 if (i != 0) 904 mac[0] |= (i << 2)|0x2; 905 } else 906 i = 0; 907 sc->sc_bssidmask |= 1<<i; 908 sc->sc_hwbssidmask[0] &= ~mac[0]; 909 if (i == 0) 910 sc->sc_nbssid0++; 911} 912 913static void 914reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 915{ 916 int i = mac[0] >> 2; 917 uint8_t mask; 918 919 if (i != 0 || --sc->sc_nbssid0 == 0) { 920 sc->sc_bssidmask &= ~(1<<i); 921 /* recalculate bssid mask from remaining addresses */ 922 mask = 0xff; 923 for (i = 1; i < 8; i++) 924 if (sc->sc_bssidmask & (1<<i)) 925 mask &= ~((i<<2)|0x2); 926 sc->sc_hwbssidmask[0] |= mask; 927 } 928} 929 930/* 931 * Assign a beacon xmit slot. We try to space out 932 * assignments so when beacons are staggered the 933 * traffic coming out of the cab q has maximal time 934 * to go out before the next beacon is scheduled. 935 */ 936static int 937assign_bslot(struct ath_softc *sc) 938{ 939 u_int slot, free; 940 941 free = 0; 942 for (slot = 0; slot < ATH_BCBUF; slot++) 943 if (sc->sc_bslot[slot] == NULL) { 944 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 945 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 946 return slot; 947 free = slot; 948 /* NB: keep looking for a double slot */ 949 } 950 return free; 951} 952 953static struct ieee80211vap * 954ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 955 enum ieee80211_opmode opmode, int flags, 956 const uint8_t bssid[IEEE80211_ADDR_LEN], 957 const uint8_t mac0[IEEE80211_ADDR_LEN]) 958{ 959 struct ath_softc *sc = ic->ic_ifp->if_softc; 960 struct ath_vap *avp; 961 struct ieee80211vap *vap; 962 uint8_t mac[IEEE80211_ADDR_LEN]; 963 int needbeacon, error; 964 enum ieee80211_opmode ic_opmode; 965 966 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 967 M_80211_VAP, M_WAITOK | M_ZERO); 968 needbeacon = 0; 969 IEEE80211_ADDR_COPY(mac, mac0); 970 971 ATH_LOCK(sc); 972 ic_opmode = opmode; /* default to opmode of new vap */ 973 switch (opmode) { 974 case IEEE80211_M_STA: 975 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 976 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 977 goto bad; 978 } 979 if (sc->sc_nvaps) { 980 /* 981 * With multiple vaps we must fall back 982 * to s/w beacon miss handling. 983 */ 984 flags |= IEEE80211_CLONE_NOBEACONS; 985 } 986 if (flags & IEEE80211_CLONE_NOBEACONS) { 987 /* 988 * Station mode w/o beacons are implemented w/ AP mode. 989 */ 990 ic_opmode = IEEE80211_M_HOSTAP; 991 } 992 break; 993 case IEEE80211_M_IBSS: 994 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 995 device_printf(sc->sc_dev, 996 "only 1 ibss vap supported\n"); 997 goto bad; 998 } 999 needbeacon = 1; 1000 break; 1001 case IEEE80211_M_AHDEMO: 1002#ifdef IEEE80211_SUPPORT_TDMA 1003 if (flags & IEEE80211_CLONE_TDMA) { 1004 if (sc->sc_nvaps != 0) { 1005 device_printf(sc->sc_dev, 1006 "only 1 tdma vap supported\n"); 1007 goto bad; 1008 } 1009 needbeacon = 1; 1010 flags |= IEEE80211_CLONE_NOBEACONS; 1011 } 1012 /* fall thru... */ 1013#endif 1014 case IEEE80211_M_MONITOR: 1015 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1016 /* 1017 * Adopt existing mode. Adding a monitor or ahdemo 1018 * vap to an existing configuration is of dubious 1019 * value but should be ok. 1020 */ 1021 /* XXX not right for monitor mode */ 1022 ic_opmode = ic->ic_opmode; 1023 } 1024 break; 1025 case IEEE80211_M_HOSTAP: 1026 case IEEE80211_M_MBSS: 1027 needbeacon = 1; 1028 break; 1029 case IEEE80211_M_WDS: 1030 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1031 device_printf(sc->sc_dev, 1032 "wds not supported in sta mode\n"); 1033 goto bad; 1034 } 1035 /* 1036 * Silently remove any request for a unique 1037 * bssid; WDS vap's always share the local 1038 * mac address. 1039 */ 1040 flags &= ~IEEE80211_CLONE_BSSID; 1041 if (sc->sc_nvaps == 0) 1042 ic_opmode = IEEE80211_M_HOSTAP; 1043 else 1044 ic_opmode = ic->ic_opmode; 1045 break; 1046 default: 1047 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1048 goto bad; 1049 } 1050 /* 1051 * Check that a beacon buffer is available; the code below assumes it. 1052 */ 1053 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1054 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1055 goto bad; 1056 } 1057 1058 /* STA, AHDEMO? */ 1059 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1060 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1061 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1062 } 1063 1064 vap = &avp->av_vap; 1065 /* XXX can't hold mutex across if_alloc */ 1066 ATH_UNLOCK(sc); 1067 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1068 bssid, mac); 1069 ATH_LOCK(sc); 1070 if (error != 0) { 1071 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1072 __func__, error); 1073 goto bad2; 1074 } 1075 1076 /* h/w crypto support */ 1077 vap->iv_key_alloc = ath_key_alloc; 1078 vap->iv_key_delete = ath_key_delete; 1079 vap->iv_key_set = ath_key_set; 1080 vap->iv_key_update_begin = ath_key_update_begin; 1081 vap->iv_key_update_end = ath_key_update_end; 1082 1083 /* override various methods */ 1084 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1085 vap->iv_recv_mgmt = ath_recv_mgmt; 1086 vap->iv_reset = ath_reset_vap; 1087 vap->iv_update_beacon = ath_beacon_update; 1088 avp->av_newstate = vap->iv_newstate; 1089 vap->iv_newstate = ath_newstate; 1090 avp->av_bmiss = vap->iv_bmiss; 1091 vap->iv_bmiss = ath_bmiss_vap; 1092 1093 /* Set default parameters */ 1094 1095 /* 1096 * Anything earlier than some AR9300 series MACs don't 1097 * support a smaller MPDU density. 1098 */ 1099 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1100 /* 1101 * All NICs can handle the maximum size, however 1102 * AR5416 based MACs can only TX aggregates w/ RTS 1103 * protection when the total aggregate size is <= 8k. 1104 * However, for now that's enforced by the TX path. 1105 */ 1106 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1107 1108 avp->av_bslot = -1; 1109 if (needbeacon) { 1110 /* 1111 * Allocate beacon state and setup the q for buffered 1112 * multicast frames. We know a beacon buffer is 1113 * available because we checked above. 1114 */ 1115 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1116 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1117 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1118 /* 1119 * Assign the vap to a beacon xmit slot. As above 1120 * this cannot fail to find a free one. 1121 */ 1122 avp->av_bslot = assign_bslot(sc); 1123 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1124 ("beacon slot %u not empty", avp->av_bslot)); 1125 sc->sc_bslot[avp->av_bslot] = vap; 1126 sc->sc_nbcnvaps++; 1127 } 1128 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1129 /* 1130 * Multple vaps are to transmit beacons and we 1131 * have h/w support for TSF adjusting; enable 1132 * use of staggered beacons. 1133 */ 1134 sc->sc_stagbeacons = 1; 1135 } 1136 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1137 } 1138 1139 ic->ic_opmode = ic_opmode; 1140 if (opmode != IEEE80211_M_WDS) { 1141 sc->sc_nvaps++; 1142 if (opmode == IEEE80211_M_STA) 1143 sc->sc_nstavaps++; 1144 if (opmode == IEEE80211_M_MBSS) 1145 sc->sc_nmeshvaps++; 1146 } 1147 switch (ic_opmode) { 1148 case IEEE80211_M_IBSS: 1149 sc->sc_opmode = HAL_M_IBSS; 1150 break; 1151 case IEEE80211_M_STA: 1152 sc->sc_opmode = HAL_M_STA; 1153 break; 1154 case IEEE80211_M_AHDEMO: 1155#ifdef IEEE80211_SUPPORT_TDMA 1156 if (vap->iv_caps & IEEE80211_C_TDMA) { 1157 sc->sc_tdma = 1; 1158 /* NB: disable tsf adjust */ 1159 sc->sc_stagbeacons = 0; 1160 } 1161 /* 1162 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1163 * just ap mode. 1164 */ 1165 /* fall thru... */ 1166#endif 1167 case IEEE80211_M_HOSTAP: 1168 case IEEE80211_M_MBSS: 1169 sc->sc_opmode = HAL_M_HOSTAP; 1170 break; 1171 case IEEE80211_M_MONITOR: 1172 sc->sc_opmode = HAL_M_MONITOR; 1173 break; 1174 default: 1175 /* XXX should not happen */ 1176 break; 1177 } 1178 if (sc->sc_hastsfadd) { 1179 /* 1180 * Configure whether or not TSF adjust should be done. 1181 */ 1182 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1183 } 1184 if (flags & IEEE80211_CLONE_NOBEACONS) { 1185 /* 1186 * Enable s/w beacon miss handling. 1187 */ 1188 sc->sc_swbmiss = 1; 1189 } 1190 ATH_UNLOCK(sc); 1191 1192 /* complete setup */ 1193 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1194 return vap; 1195bad2: 1196 reclaim_address(sc, mac); 1197 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1198bad: 1199 free(avp, M_80211_VAP); 1200 ATH_UNLOCK(sc); 1201 return NULL; 1202} 1203 1204static void 1205ath_vap_delete(struct ieee80211vap *vap) 1206{ 1207 struct ieee80211com *ic = vap->iv_ic; 1208 struct ifnet *ifp = ic->ic_ifp; 1209 struct ath_softc *sc = ifp->if_softc; 1210 struct ath_hal *ah = sc->sc_ah; 1211 struct ath_vap *avp = ATH_VAP(vap); 1212 1213 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1214 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1215 /* 1216 * Quiesce the hardware while we remove the vap. In 1217 * particular we need to reclaim all references to 1218 * the vap state by any frames pending on the tx queues. 1219 */ 1220 ath_hal_intrset(ah, 0); /* disable interrupts */ 1221 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1222 /* XXX Do all frames from all vaps/nodes need draining here? */ 1223 ath_stoprecv(sc, 1); /* stop recv side */ 1224 } 1225 1226 ieee80211_vap_detach(vap); 1227 1228 /* 1229 * XXX Danger Will Robinson! Danger! 1230 * 1231 * Because ieee80211_vap_detach() can queue a frame (the station 1232 * diassociate message?) after we've drained the TXQ and 1233 * flushed the software TXQ, we will end up with a frame queued 1234 * to a node whose vap is about to be freed. 1235 * 1236 * To work around this, flush the hardware/software again. 1237 * This may be racy - the ath task may be running and the packet 1238 * may be being scheduled between sw->hw txq. Tsk. 1239 * 1240 * TODO: figure out why a new node gets allocated somewhere around 1241 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1242 * call!) 1243 */ 1244 1245 ath_draintxq(sc, ATH_RESET_DEFAULT); 1246 1247 ATH_LOCK(sc); 1248 /* 1249 * Reclaim beacon state. Note this must be done before 1250 * the vap instance is reclaimed as we may have a reference 1251 * to it in the buffer for the beacon frame. 1252 */ 1253 if (avp->av_bcbuf != NULL) { 1254 if (avp->av_bslot != -1) { 1255 sc->sc_bslot[avp->av_bslot] = NULL; 1256 sc->sc_nbcnvaps--; 1257 } 1258 ath_beacon_return(sc, avp->av_bcbuf); 1259 avp->av_bcbuf = NULL; 1260 if (sc->sc_nbcnvaps == 0) { 1261 sc->sc_stagbeacons = 0; 1262 if (sc->sc_hastsfadd) 1263 ath_hal_settsfadjust(sc->sc_ah, 0); 1264 } 1265 /* 1266 * Reclaim any pending mcast frames for the vap. 1267 */ 1268 ath_tx_draintxq(sc, &avp->av_mcastq); 1269 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1270 } 1271 /* 1272 * Update bookkeeping. 1273 */ 1274 if (vap->iv_opmode == IEEE80211_M_STA) { 1275 sc->sc_nstavaps--; 1276 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1277 sc->sc_swbmiss = 0; 1278 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1279 vap->iv_opmode == IEEE80211_M_MBSS) { 1280 reclaim_address(sc, vap->iv_myaddr); 1281 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1282 if (vap->iv_opmode == IEEE80211_M_MBSS) 1283 sc->sc_nmeshvaps--; 1284 } 1285 if (vap->iv_opmode != IEEE80211_M_WDS) 1286 sc->sc_nvaps--; 1287#ifdef IEEE80211_SUPPORT_TDMA 1288 /* TDMA operation ceases when the last vap is destroyed */ 1289 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1290 sc->sc_tdma = 0; 1291 sc->sc_swbmiss = 0; 1292 } 1293#endif 1294 free(avp, M_80211_VAP); 1295 1296 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1297 /* 1298 * Restart rx+tx machines if still running (RUNNING will 1299 * be reset if we just destroyed the last vap). 1300 */ 1301 if (ath_startrecv(sc) != 0) 1302 if_printf(ifp, "%s: unable to restart recv logic\n", 1303 __func__); 1304 if (sc->sc_beacons) { /* restart beacons */ 1305#ifdef IEEE80211_SUPPORT_TDMA 1306 if (sc->sc_tdma) 1307 ath_tdma_config(sc, NULL); 1308 else 1309#endif 1310 ath_beacon_config(sc, NULL); 1311 } 1312 ath_hal_intrset(ah, sc->sc_imask); 1313 } 1314 ATH_UNLOCK(sc); 1315} 1316 1317void 1318ath_suspend(struct ath_softc *sc) 1319{ 1320 struct ifnet *ifp = sc->sc_ifp; 1321 struct ieee80211com *ic = ifp->if_l2com; 1322 1323 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1324 __func__, ifp->if_flags); 1325 1326 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1327 1328 ieee80211_suspend_all(ic); 1329 /* 1330 * NB: don't worry about putting the chip in low power 1331 * mode; pci will power off our socket on suspend and 1332 * CardBus detaches the device. 1333 */ 1334 1335 /* 1336 * XXX ensure none of the taskqueues are running 1337 * XXX ensure sc_invalid is 1 1338 * XXX ensure the calibration callout is disabled 1339 */ 1340 1341 /* Disable the PCIe PHY, complete with workarounds */ 1342 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1343} 1344 1345/* 1346 * Reset the key cache since some parts do not reset the 1347 * contents on resume. First we clear all entries, then 1348 * re-load keys that the 802.11 layer assumes are setup 1349 * in h/w. 1350 */ 1351static void 1352ath_reset_keycache(struct ath_softc *sc) 1353{ 1354 struct ifnet *ifp = sc->sc_ifp; 1355 struct ieee80211com *ic = ifp->if_l2com; 1356 struct ath_hal *ah = sc->sc_ah; 1357 int i; 1358 1359 for (i = 0; i < sc->sc_keymax; i++) 1360 ath_hal_keyreset(ah, i); 1361 ieee80211_crypto_reload_keys(ic); 1362} 1363 1364void 1365ath_resume(struct ath_softc *sc) 1366{ 1367 struct ifnet *ifp = sc->sc_ifp; 1368 struct ieee80211com *ic = ifp->if_l2com; 1369 struct ath_hal *ah = sc->sc_ah; 1370 HAL_STATUS status; 1371 1372 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1373 __func__, ifp->if_flags); 1374 1375 /* Re-enable PCIe, re-enable the PCIe bus */ 1376 ath_hal_enablepcie(ah, 0, 0); 1377 1378 /* 1379 * Must reset the chip before we reload the 1380 * keycache as we were powered down on suspend. 1381 */ 1382 ath_hal_reset(ah, sc->sc_opmode, 1383 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1384 AH_FALSE, &status); 1385 ath_reset_keycache(sc); 1386 1387 /* Let DFS at it in case it's a DFS channel */ 1388 ath_dfs_radar_enable(sc, ic->ic_curchan); 1389 1390 /* Restore the LED configuration */ 1391 ath_led_config(sc); 1392 ath_hal_setledstate(ah, HAL_LED_INIT); 1393 1394 if (sc->sc_resume_up) 1395 ieee80211_resume_all(ic); 1396 1397 /* XXX beacons ? */ 1398} 1399 1400void 1401ath_shutdown(struct ath_softc *sc) 1402{ 1403 struct ifnet *ifp = sc->sc_ifp; 1404 1405 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1406 __func__, ifp->if_flags); 1407 1408 ath_stop(ifp); 1409 /* NB: no point powering down chip as we're about to reboot */ 1410} 1411 1412/* 1413 * Interrupt handler. Most of the actual processing is deferred. 1414 */ 1415void 1416ath_intr(void *arg) 1417{ 1418 struct ath_softc *sc = arg; 1419 struct ifnet *ifp = sc->sc_ifp; 1420 struct ath_hal *ah = sc->sc_ah; 1421 HAL_INT status = 0; 1422 uint32_t txqs; 1423 1424 /* 1425 * If we're inside a reset path, just print a warning and 1426 * clear the ISR. The reset routine will finish it for us. 1427 */ 1428 ATH_PCU_LOCK(sc); 1429 if (sc->sc_inreset_cnt) { 1430 HAL_INT status; 1431 ath_hal_getisr(ah, &status); /* clear ISR */ 1432 ath_hal_intrset(ah, 0); /* disable further intr's */ 1433 DPRINTF(sc, ATH_DEBUG_ANY, 1434 "%s: in reset, ignoring: status=0x%x\n", 1435 __func__, status); 1436 ATH_PCU_UNLOCK(sc); 1437 return; 1438 } 1439 1440 if (sc->sc_invalid) { 1441 /* 1442 * The hardware is not ready/present, don't touch anything. 1443 * Note this can happen early on if the IRQ is shared. 1444 */ 1445 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1446 ATH_PCU_UNLOCK(sc); 1447 return; 1448 } 1449 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1450 ATH_PCU_UNLOCK(sc); 1451 return; 1452 } 1453 1454 if ((ifp->if_flags & IFF_UP) == 0 || 1455 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1456 HAL_INT status; 1457 1458 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1459 __func__, ifp->if_flags); 1460 ath_hal_getisr(ah, &status); /* clear ISR */ 1461 ath_hal_intrset(ah, 0); /* disable further intr's */ 1462 ATH_PCU_UNLOCK(sc); 1463 return; 1464 } 1465 1466 /* 1467 * Figure out the reason(s) for the interrupt. Note 1468 * that the hal returns a pseudo-ISR that may include 1469 * bits we haven't explicitly enabled so we mask the 1470 * value to insure we only process bits we requested. 1471 */ 1472 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1473 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1474 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 1475#ifdef ATH_KTR_INTR_DEBUG 1476 CTR5(ATH_KTR_INTR, 1477 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1478 ah->ah_intrstate[0], 1479 ah->ah_intrstate[1], 1480 ah->ah_intrstate[2], 1481 ah->ah_intrstate[3], 1482 ah->ah_intrstate[6]); 1483#endif 1484 1485 /* Squirrel away SYNC interrupt debugging */ 1486 if (ah->ah_syncstate != 0) { 1487 int i; 1488 for (i = 0; i < 32; i++) 1489 if (ah->ah_syncstate & (i << i)) 1490 sc->sc_intr_stats.sync_intr[i]++; 1491 } 1492 1493 status &= sc->sc_imask; /* discard unasked for bits */ 1494 1495 /* Short-circuit un-handled interrupts */ 1496 if (status == 0x0) { 1497 ATH_PCU_UNLOCK(sc); 1498 return; 1499 } 1500 1501 /* 1502 * Take a note that we're inside the interrupt handler, so 1503 * the reset routines know to wait. 1504 */ 1505 sc->sc_intr_cnt++; 1506 ATH_PCU_UNLOCK(sc); 1507 1508 /* 1509 * Handle the interrupt. We won't run concurrent with the reset 1510 * or channel change routines as they'll wait for sc_intr_cnt 1511 * to be 0 before continuing. 1512 */ 1513 if (status & HAL_INT_FATAL) { 1514 sc->sc_stats.ast_hardware++; 1515 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1516 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 1517 } else { 1518 if (status & HAL_INT_SWBA) { 1519 /* 1520 * Software beacon alert--time to send a beacon. 1521 * Handle beacon transmission directly; deferring 1522 * this is too slow to meet timing constraints 1523 * under load. 1524 */ 1525#ifdef IEEE80211_SUPPORT_TDMA 1526 if (sc->sc_tdma) { 1527 if (sc->sc_tdmaswba == 0) { 1528 struct ieee80211com *ic = ifp->if_l2com; 1529 struct ieee80211vap *vap = 1530 TAILQ_FIRST(&ic->ic_vaps); 1531 ath_tdma_beacon_send(sc, vap); 1532 sc->sc_tdmaswba = 1533 vap->iv_tdma->tdma_bintval; 1534 } else 1535 sc->sc_tdmaswba--; 1536 } else 1537#endif 1538 { 1539 ath_beacon_proc(sc, 0); 1540#ifdef IEEE80211_SUPPORT_SUPERG 1541 /* 1542 * Schedule the rx taskq in case there's no 1543 * traffic so any frames held on the staging 1544 * queue are aged and potentially flushed. 1545 */ 1546 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1547#endif 1548 } 1549 } 1550 if (status & HAL_INT_RXEOL) { 1551 int imask; 1552 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1553 ATH_PCU_LOCK(sc); 1554 /* 1555 * NB: the hardware should re-read the link when 1556 * RXE bit is written, but it doesn't work at 1557 * least on older hardware revs. 1558 */ 1559 sc->sc_stats.ast_rxeol++; 1560 /* 1561 * Disable RXEOL/RXORN - prevent an interrupt 1562 * storm until the PCU logic can be reset. 1563 * In case the interface is reset some other 1564 * way before "sc_kickpcu" is called, don't 1565 * modify sc_imask - that way if it is reset 1566 * by a call to ath_reset() somehow, the 1567 * interrupt mask will be correctly reprogrammed. 1568 */ 1569 imask = sc->sc_imask; 1570 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1571 ath_hal_intrset(ah, imask); 1572 /* 1573 * Only blank sc_rxlink if we've not yet kicked 1574 * the PCU. 1575 * 1576 * This isn't entirely correct - the correct solution 1577 * would be to have a PCU lock and engage that for 1578 * the duration of the PCU fiddling; which would include 1579 * running the RX process. Otherwise we could end up 1580 * messing up the RX descriptor chain and making the 1581 * RX desc list much shorter. 1582 */ 1583 if (! sc->sc_kickpcu) 1584 sc->sc_rxlink = NULL; 1585 sc->sc_kickpcu = 1; 1586 /* 1587 * Enqueue an RX proc, to handled whatever 1588 * is in the RX queue. 1589 * This will then kick the PCU. 1590 */ 1591 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1592 ATH_PCU_UNLOCK(sc); 1593 } 1594 if (status & HAL_INT_TXURN) { 1595 sc->sc_stats.ast_txurn++; 1596 /* bump tx trigger level */ 1597 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1598 } 1599 if (status & HAL_INT_RX) { 1600 sc->sc_stats.ast_rx_intr++; 1601 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1602 } 1603 if (status & HAL_INT_TX) { 1604 sc->sc_stats.ast_tx_intr++; 1605 /* 1606 * Grab all the currently set bits in the HAL txq bitmap 1607 * and blank them. This is the only place we should be 1608 * doing this. 1609 */ 1610 ATH_PCU_LOCK(sc); 1611 txqs = 0xffffffff; 1612 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1613 sc->sc_txq_active |= txqs; 1614 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1615 ATH_PCU_UNLOCK(sc); 1616 } 1617 if (status & HAL_INT_BMISS) { 1618 sc->sc_stats.ast_bmiss++; 1619 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1620 } 1621 if (status & HAL_INT_GTT) 1622 sc->sc_stats.ast_tx_timeout++; 1623 if (status & HAL_INT_CST) 1624 sc->sc_stats.ast_tx_cst++; 1625 if (status & HAL_INT_MIB) { 1626 sc->sc_stats.ast_mib++; 1627 ATH_PCU_LOCK(sc); 1628 /* 1629 * Disable interrupts until we service the MIB 1630 * interrupt; otherwise it will continue to fire. 1631 */ 1632 ath_hal_intrset(ah, 0); 1633 /* 1634 * Let the hal handle the event. We assume it will 1635 * clear whatever condition caused the interrupt. 1636 */ 1637 ath_hal_mibevent(ah, &sc->sc_halstats); 1638 /* 1639 * Don't reset the interrupt if we've just 1640 * kicked the PCU, or we may get a nested 1641 * RXEOL before the rxproc has had a chance 1642 * to run. 1643 */ 1644 if (sc->sc_kickpcu == 0) 1645 ath_hal_intrset(ah, sc->sc_imask); 1646 ATH_PCU_UNLOCK(sc); 1647 } 1648 if (status & HAL_INT_RXORN) { 1649 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1650 CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 1651 sc->sc_stats.ast_rxorn++; 1652 } 1653 } 1654 ATH_PCU_LOCK(sc); 1655 sc->sc_intr_cnt--; 1656 ATH_PCU_UNLOCK(sc); 1657} 1658 1659static void 1660ath_fatal_proc(void *arg, int pending) 1661{ 1662 struct ath_softc *sc = arg; 1663 struct ifnet *ifp = sc->sc_ifp; 1664 u_int32_t *state; 1665 u_int32_t len; 1666 void *sp; 1667 1668 if_printf(ifp, "hardware error; resetting\n"); 1669 /* 1670 * Fatal errors are unrecoverable. Typically these 1671 * are caused by DMA errors. Collect h/w state from 1672 * the hal so we can diagnose what's going on. 1673 */ 1674 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1675 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1676 state = sp; 1677 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1678 state[0], state[1] , state[2], state[3], 1679 state[4], state[5]); 1680 } 1681 ath_reset(ifp, ATH_RESET_NOLOSS); 1682} 1683 1684static void 1685ath_bmiss_vap(struct ieee80211vap *vap) 1686{ 1687 /* 1688 * Workaround phantom bmiss interrupts by sanity-checking 1689 * the time of our last rx'd frame. If it is within the 1690 * beacon miss interval then ignore the interrupt. If it's 1691 * truly a bmiss we'll get another interrupt soon and that'll 1692 * be dispatched up for processing. Note this applies only 1693 * for h/w beacon miss events. 1694 */ 1695 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1696 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1697 struct ath_softc *sc = ifp->if_softc; 1698 u_int64_t lastrx = sc->sc_lastrx; 1699 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1700 /* XXX should take a locked ref to iv_bss */ 1701 u_int bmisstimeout = 1702 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1703 1704 DPRINTF(sc, ATH_DEBUG_BEACON, 1705 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1706 __func__, (unsigned long long) tsf, 1707 (unsigned long long)(tsf - lastrx), 1708 (unsigned long long) lastrx, bmisstimeout); 1709 1710 if (tsf - lastrx <= bmisstimeout) { 1711 sc->sc_stats.ast_bmiss_phantom++; 1712 return; 1713 } 1714 } 1715 ATH_VAP(vap)->av_bmiss(vap); 1716} 1717 1718static int 1719ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1720{ 1721 uint32_t rsize; 1722 void *sp; 1723 1724 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1725 return 0; 1726 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1727 *hangs = *(uint32_t *)sp; 1728 return 1; 1729} 1730 1731static void 1732ath_bmiss_proc(void *arg, int pending) 1733{ 1734 struct ath_softc *sc = arg; 1735 struct ifnet *ifp = sc->sc_ifp; 1736 uint32_t hangs; 1737 1738 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1739 1740 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1741 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1742 ath_reset(ifp, ATH_RESET_NOLOSS); 1743 } else 1744 ieee80211_beacon_miss(ifp->if_l2com); 1745} 1746 1747/* 1748 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1749 * calcs together with WME. If necessary disable the crypto 1750 * hardware and mark the 802.11 state so keys will be setup 1751 * with the MIC work done in software. 1752 */ 1753static void 1754ath_settkipmic(struct ath_softc *sc) 1755{ 1756 struct ifnet *ifp = sc->sc_ifp; 1757 struct ieee80211com *ic = ifp->if_l2com; 1758 1759 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1760 if (ic->ic_flags & IEEE80211_F_WME) { 1761 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1762 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1763 } else { 1764 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1765 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1766 } 1767 } 1768} 1769 1770static void 1771ath_init(void *arg) 1772{ 1773 struct ath_softc *sc = (struct ath_softc *) arg; 1774 struct ifnet *ifp = sc->sc_ifp; 1775 struct ieee80211com *ic = ifp->if_l2com; 1776 struct ath_hal *ah = sc->sc_ah; 1777 HAL_STATUS status; 1778 1779 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1780 __func__, ifp->if_flags); 1781 1782 ATH_LOCK(sc); 1783 /* 1784 * Stop anything previously setup. This is safe 1785 * whether this is the first time through or not. 1786 */ 1787 ath_stop_locked(ifp); 1788 1789 /* 1790 * The basic interface to setting the hardware in a good 1791 * state is ``reset''. On return the hardware is known to 1792 * be powered up and with interrupts disabled. This must 1793 * be followed by initialization of the appropriate bits 1794 * and then setup of the interrupt mask. 1795 */ 1796 ath_settkipmic(sc); 1797 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1798 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1799 status); 1800 ATH_UNLOCK(sc); 1801 return; 1802 } 1803 ath_chan_change(sc, ic->ic_curchan); 1804 1805 /* Let DFS at it in case it's a DFS channel */ 1806 ath_dfs_radar_enable(sc, ic->ic_curchan); 1807 1808 /* 1809 * Likewise this is set during reset so update 1810 * state cached in the driver. 1811 */ 1812 sc->sc_diversity = ath_hal_getdiversity(ah); 1813 sc->sc_lastlongcal = 0; 1814 sc->sc_resetcal = 1; 1815 sc->sc_lastcalreset = 0; 1816 sc->sc_lastani = 0; 1817 sc->sc_lastshortcal = 0; 1818 sc->sc_doresetcal = AH_FALSE; 1819 /* 1820 * Beacon timers were cleared here; give ath_newstate() 1821 * a hint that the beacon timers should be poked when 1822 * things transition to the RUN state. 1823 */ 1824 sc->sc_beacons = 0; 1825 1826 /* 1827 * Initial aggregation settings. 1828 */ 1829 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 1830 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1831 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1832 1833 /* 1834 * Setup the hardware after reset: the key cache 1835 * is filled as needed and the receive engine is 1836 * set going. Frame transmit is handled entirely 1837 * in the frame output path; there's nothing to do 1838 * here except setup the interrupt mask. 1839 */ 1840 if (ath_startrecv(sc) != 0) { 1841 if_printf(ifp, "unable to start recv logic\n"); 1842 ATH_UNLOCK(sc); 1843 return; 1844 } 1845 1846 /* 1847 * Enable interrupts. 1848 */ 1849 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1850 | HAL_INT_RXEOL | HAL_INT_RXORN 1851 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1852 /* 1853 * Enable MIB interrupts when there are hardware phy counters. 1854 * Note we only do this (at the moment) for station mode. 1855 */ 1856 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1857 sc->sc_imask |= HAL_INT_MIB; 1858 1859 /* Enable global TX timeout and carrier sense timeout if available */ 1860 if (ath_hal_gtxto_supported(ah)) 1861 sc->sc_imask |= HAL_INT_GTT; 1862 1863 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1864 __func__, sc->sc_imask); 1865 1866 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1867 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1868 ath_hal_intrset(ah, sc->sc_imask); 1869 1870 ATH_UNLOCK(sc); 1871 1872#ifdef ATH_TX99_DIAG 1873 if (sc->sc_tx99 != NULL) 1874 sc->sc_tx99->start(sc->sc_tx99); 1875 else 1876#endif 1877 ieee80211_start_all(ic); /* start all vap's */ 1878} 1879 1880static void 1881ath_stop_locked(struct ifnet *ifp) 1882{ 1883 struct ath_softc *sc = ifp->if_softc; 1884 struct ath_hal *ah = sc->sc_ah; 1885 1886 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1887 __func__, sc->sc_invalid, ifp->if_flags); 1888 1889 ATH_LOCK_ASSERT(sc); 1890 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1891 /* 1892 * Shutdown the hardware and driver: 1893 * reset 802.11 state machine 1894 * turn off timers 1895 * disable interrupts 1896 * turn off the radio 1897 * clear transmit machinery 1898 * clear receive machinery 1899 * drain and release tx queues 1900 * reclaim beacon resources 1901 * power down hardware 1902 * 1903 * Note that some of this work is not possible if the 1904 * hardware is gone (invalid). 1905 */ 1906#ifdef ATH_TX99_DIAG 1907 if (sc->sc_tx99 != NULL) 1908 sc->sc_tx99->stop(sc->sc_tx99); 1909#endif 1910 callout_stop(&sc->sc_wd_ch); 1911 sc->sc_wd_timer = 0; 1912 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1913 if (!sc->sc_invalid) { 1914 if (sc->sc_softled) { 1915 callout_stop(&sc->sc_ledtimer); 1916 ath_hal_gpioset(ah, sc->sc_ledpin, 1917 !sc->sc_ledon); 1918 sc->sc_blinking = 0; 1919 } 1920 ath_hal_intrset(ah, 0); 1921 } 1922 ath_draintxq(sc, ATH_RESET_DEFAULT); 1923 if (!sc->sc_invalid) { 1924 ath_stoprecv(sc, 1); 1925 ath_hal_phydisable(ah); 1926 } else 1927 sc->sc_rxlink = NULL; 1928 ath_beacon_free(sc); /* XXX not needed */ 1929 } 1930} 1931 1932#define MAX_TXRX_ITERATIONS 1000 1933static void 1934ath_txrx_stop_locked(struct ath_softc *sc) 1935{ 1936 int i = MAX_TXRX_ITERATIONS; 1937 1938 ATH_UNLOCK_ASSERT(sc); 1939 ATH_PCU_LOCK_ASSERT(sc); 1940 1941 /* 1942 * Sleep until all the pending operations have completed. 1943 * 1944 * The caller must ensure that reset has been incremented 1945 * or the pending operations may continue being queued. 1946 */ 1947 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1948 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1949 if (i <= 0) 1950 break; 1951 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1952 i--; 1953 } 1954 1955 if (i <= 0) 1956 device_printf(sc->sc_dev, 1957 "%s: didn't finish after %d iterations\n", 1958 __func__, MAX_TXRX_ITERATIONS); 1959} 1960#undef MAX_TXRX_ITERATIONS 1961 1962#if 0 1963static void 1964ath_txrx_stop(struct ath_softc *sc) 1965{ 1966 ATH_UNLOCK_ASSERT(sc); 1967 ATH_PCU_UNLOCK_ASSERT(sc); 1968 1969 ATH_PCU_LOCK(sc); 1970 ath_txrx_stop_locked(sc); 1971 ATH_PCU_UNLOCK(sc); 1972} 1973#endif 1974 1975static void 1976ath_txrx_start(struct ath_softc *sc) 1977{ 1978 1979 taskqueue_unblock(sc->sc_tq); 1980} 1981 1982/* 1983 * Grab the reset lock, and wait around until noone else 1984 * is trying to do anything with it. 1985 * 1986 * This is totally horrible but we can't hold this lock for 1987 * long enough to do TX/RX or we end up with net80211/ip stack 1988 * LORs and eventual deadlock. 1989 * 1990 * "dowait" signals whether to spin, waiting for the reset 1991 * lock count to reach 0. This should (for now) only be used 1992 * during the reset path, as the rest of the code may not 1993 * be locking-reentrant enough to behave correctly. 1994 * 1995 * Another, cleaner way should be found to serialise all of 1996 * these operations. 1997 */ 1998#define MAX_RESET_ITERATIONS 10 1999static int 2000ath_reset_grablock(struct ath_softc *sc, int dowait) 2001{ 2002 int w = 0; 2003 int i = MAX_RESET_ITERATIONS; 2004 2005 ATH_PCU_LOCK_ASSERT(sc); 2006 do { 2007 if (sc->sc_inreset_cnt == 0) { 2008 w = 1; 2009 break; 2010 } 2011 if (dowait == 0) { 2012 w = 0; 2013 break; 2014 } 2015 ATH_PCU_UNLOCK(sc); 2016 pause("ath_reset_grablock", 1); 2017 i--; 2018 ATH_PCU_LOCK(sc); 2019 } while (i > 0); 2020 2021 /* 2022 * We always increment the refcounter, regardless 2023 * of whether we succeeded to get it in an exclusive 2024 * way. 2025 */ 2026 sc->sc_inreset_cnt++; 2027 2028 if (i <= 0) 2029 device_printf(sc->sc_dev, 2030 "%s: didn't finish after %d iterations\n", 2031 __func__, MAX_RESET_ITERATIONS); 2032 2033 if (w == 0) 2034 device_printf(sc->sc_dev, 2035 "%s: warning, recursive reset path!\n", 2036 __func__); 2037 2038 return w; 2039} 2040#undef MAX_RESET_ITERATIONS 2041 2042/* 2043 * XXX TODO: write ath_reset_releaselock 2044 */ 2045 2046static void 2047ath_stop(struct ifnet *ifp) 2048{ 2049 struct ath_softc *sc = ifp->if_softc; 2050 2051 ATH_LOCK(sc); 2052 ath_stop_locked(ifp); 2053 ATH_UNLOCK(sc); 2054} 2055 2056/* 2057 * Reset the hardware w/o losing operational state. This is 2058 * basically a more efficient way of doing ath_stop, ath_init, 2059 * followed by state transitions to the current 802.11 2060 * operational state. Used to recover from various errors and 2061 * to reset or reload hardware state. 2062 */ 2063int 2064ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2065{ 2066 struct ath_softc *sc = ifp->if_softc; 2067 struct ieee80211com *ic = ifp->if_l2com; 2068 struct ath_hal *ah = sc->sc_ah; 2069 HAL_STATUS status; 2070 int i; 2071 2072 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2073 2074 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2075 ATH_PCU_UNLOCK_ASSERT(sc); 2076 ATH_UNLOCK_ASSERT(sc); 2077 2078 /* Try to (stop any further TX/RX from occuring */ 2079 taskqueue_block(sc->sc_tq); 2080 2081 ATH_PCU_LOCK(sc); 2082 ath_hal_intrset(ah, 0); /* disable interrupts */ 2083 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2084 if (ath_reset_grablock(sc, 1) == 0) { 2085 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2086 __func__); 2087 } 2088 ATH_PCU_UNLOCK(sc); 2089 2090 /* 2091 * Should now wait for pending TX/RX to complete 2092 * and block future ones from occuring. This needs to be 2093 * done before the TX queue is drained. 2094 */ 2095 ath_draintxq(sc, reset_type); /* stop xmit side */ 2096 2097 /* 2098 * Regardless of whether we're doing a no-loss flush or 2099 * not, stop the PCU and handle what's in the RX queue. 2100 * That way frames aren't dropped which shouldn't be. 2101 */ 2102 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2103 ath_rx_proc(sc, 0); 2104 2105 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2106 /* NB: indicate channel change so we do a full reset */ 2107 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2108 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2109 __func__, status); 2110 sc->sc_diversity = ath_hal_getdiversity(ah); 2111 2112 /* Let DFS at it in case it's a DFS channel */ 2113 ath_dfs_radar_enable(sc, ic->ic_curchan); 2114 2115 if (ath_startrecv(sc) != 0) /* restart recv */ 2116 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2117 /* 2118 * We may be doing a reset in response to an ioctl 2119 * that changes the channel so update any state that 2120 * might change as a result. 2121 */ 2122 ath_chan_change(sc, ic->ic_curchan); 2123 if (sc->sc_beacons) { /* restart beacons */ 2124#ifdef IEEE80211_SUPPORT_TDMA 2125 if (sc->sc_tdma) 2126 ath_tdma_config(sc, NULL); 2127 else 2128#endif 2129 ath_beacon_config(sc, NULL); 2130 } 2131 2132 /* 2133 * Release the reset lock and re-enable interrupts here. 2134 * If an interrupt was being processed in ath_intr(), 2135 * it would disable interrupts at this point. So we have 2136 * to atomically enable interrupts and decrement the 2137 * reset counter - this way ath_intr() doesn't end up 2138 * disabling interrupts without a corresponding enable 2139 * in the rest or channel change path. 2140 */ 2141 ATH_PCU_LOCK(sc); 2142 sc->sc_inreset_cnt--; 2143 /* XXX only do this if sc_inreset_cnt == 0? */ 2144 ath_hal_intrset(ah, sc->sc_imask); 2145 ATH_PCU_UNLOCK(sc); 2146 2147 /* 2148 * TX and RX can be started here. If it were started with 2149 * sc_inreset_cnt > 0, the TX and RX path would abort. 2150 * Thus if this is a nested call through the reset or 2151 * channel change code, TX completion will occur but 2152 * RX completion and ath_start / ath_tx_start will not 2153 * run. 2154 */ 2155 2156 /* Restart TX/RX as needed */ 2157 ath_txrx_start(sc); 2158 2159 /* XXX Restart TX completion and pending TX */ 2160 if (reset_type == ATH_RESET_NOLOSS) { 2161 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2162 if (ATH_TXQ_SETUP(sc, i)) { 2163 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2164 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2165 ath_txq_sched(sc, &sc->sc_txq[i]); 2166 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2167 } 2168 } 2169 } 2170 2171 /* 2172 * This may have been set during an ath_start() call which 2173 * set this once it detected a concurrent TX was going on. 2174 * So, clear it. 2175 */ 2176 IF_LOCK(&ifp->if_snd); 2177 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2178 IF_UNLOCK(&ifp->if_snd); 2179 2180 /* Handle any frames in the TX queue */ 2181 /* 2182 * XXX should this be done by the caller, rather than 2183 * ath_reset() ? 2184 */ 2185 ath_start(ifp); /* restart xmit */ 2186 return 0; 2187} 2188 2189static int 2190ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2191{ 2192 struct ieee80211com *ic = vap->iv_ic; 2193 struct ifnet *ifp = ic->ic_ifp; 2194 struct ath_softc *sc = ifp->if_softc; 2195 struct ath_hal *ah = sc->sc_ah; 2196 2197 switch (cmd) { 2198 case IEEE80211_IOC_TXPOWER: 2199 /* 2200 * If per-packet TPC is enabled, then we have nothing 2201 * to do; otherwise we need to force the global limit. 2202 * All this can happen directly; no need to reset. 2203 */ 2204 if (!ath_hal_gettpc(ah)) 2205 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2206 return 0; 2207 } 2208 /* XXX? Full or NOLOSS? */ 2209 return ath_reset(ifp, ATH_RESET_FULL); 2210} 2211 2212struct ath_buf * 2213_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2214{ 2215 struct ath_buf *bf; 2216 2217 ATH_TXBUF_LOCK_ASSERT(sc); 2218 2219 if (btype == ATH_BUFTYPE_MGMT) 2220 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2221 else 2222 bf = TAILQ_FIRST(&sc->sc_txbuf); 2223 2224 if (bf == NULL) { 2225 sc->sc_stats.ast_tx_getnobuf++; 2226 } else { 2227 if (bf->bf_flags & ATH_BUF_BUSY) { 2228 sc->sc_stats.ast_tx_getbusybuf++; 2229 bf = NULL; 2230 } 2231 } 2232 2233 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 2234 if (btype == ATH_BUFTYPE_MGMT) 2235 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 2236 else { 2237 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2238 sc->sc_txbuf_cnt--; 2239 2240 /* 2241 * This shuldn't happen; however just to be 2242 * safe print a warning and fudge the txbuf 2243 * count. 2244 */ 2245 if (sc->sc_txbuf_cnt < 0) { 2246 device_printf(sc->sc_dev, 2247 "%s: sc_txbuf_cnt < 0?\n", 2248 __func__); 2249 sc->sc_txbuf_cnt = 0; 2250 } 2251 } 2252 } else 2253 bf = NULL; 2254 2255 if (bf == NULL) { 2256 /* XXX should check which list, mgmt or otherwise */ 2257 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2258 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2259 "out of xmit buffers" : "xmit buffer busy"); 2260 return NULL; 2261 } 2262 2263 /* XXX TODO: should do this at buffer list initialisation */ 2264 /* XXX (then, ensure the buffer has the right flag set) */ 2265 if (btype == ATH_BUFTYPE_MGMT) 2266 bf->bf_flags |= ATH_BUF_MGMT; 2267 else 2268 bf->bf_flags &= (~ATH_BUF_MGMT); 2269 2270 /* Valid bf here; clear some basic fields */ 2271 bf->bf_next = NULL; /* XXX just to be sure */ 2272 bf->bf_last = NULL; /* XXX again, just to be sure */ 2273 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2274 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2275 2276 return bf; 2277} 2278 2279/* 2280 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2281 * can't be thrown back on the queue as they could still be 2282 * in use by the hardware. 2283 * 2284 * This duplicates the buffer, or returns NULL. 2285 * 2286 * The descriptor is also copied but the link pointers and 2287 * the DMA segments aren't copied; this frame should thus 2288 * be again passed through the descriptor setup/chain routines 2289 * so the link is correct. 2290 * 2291 * The caller must free the buffer using ath_freebuf(). 2292 * 2293 * XXX TODO: this call shouldn't fail as it'll cause packet loss 2294 * XXX in the TX pathway when retries are needed. 2295 * XXX Figure out how to keep some buffers free, or factor the 2296 * XXX number of busy buffers into the xmit path (ath_start()) 2297 * XXX so we don't over-commit. 2298 */ 2299struct ath_buf * 2300ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2301{ 2302 struct ath_buf *tbf; 2303 2304 tbf = ath_getbuf(sc, 2305 (bf->bf_flags & ATH_BUF_MGMT) ? 2306 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 2307 if (tbf == NULL) 2308 return NULL; /* XXX failure? Why? */ 2309 2310 /* Copy basics */ 2311 tbf->bf_next = NULL; 2312 tbf->bf_nseg = bf->bf_nseg; 2313 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2314 tbf->bf_status = bf->bf_status; 2315 tbf->bf_m = bf->bf_m; 2316 tbf->bf_node = bf->bf_node; 2317 /* will be setup by the chain/setup function */ 2318 tbf->bf_lastds = NULL; 2319 /* for now, last == self */ 2320 tbf->bf_last = tbf; 2321 tbf->bf_comp = bf->bf_comp; 2322 2323 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2324 2325 /* The caller has to re-init the descriptor + links */ 2326 2327 /* Copy state */ 2328 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2329 2330 return tbf; 2331} 2332 2333struct ath_buf * 2334ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 2335{ 2336 struct ath_buf *bf; 2337 2338 ATH_TXBUF_LOCK(sc); 2339 bf = _ath_getbuf_locked(sc, btype); 2340 /* 2341 * If a mgmt buffer was requested but we're out of those, 2342 * try requesting a normal one. 2343 */ 2344 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 2345 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 2346 ATH_TXBUF_UNLOCK(sc); 2347 if (bf == NULL) { 2348 struct ifnet *ifp = sc->sc_ifp; 2349 2350 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 2351 sc->sc_stats.ast_tx_qstop++; 2352 IF_LOCK(&ifp->if_snd); 2353 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2354 IF_UNLOCK(&ifp->if_snd); 2355 } 2356 return bf; 2357} 2358 2359void 2360ath_start(struct ifnet *ifp) 2361{ 2362 struct ath_softc *sc = ifp->if_softc; 2363 struct ieee80211_node *ni; 2364 struct ath_buf *bf; 2365 struct mbuf *m, *next; 2366 ath_bufhead frags; 2367 2368 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 2369 return; 2370 2371 /* XXX is it ok to hold the ATH_LOCK here? */ 2372 ATH_PCU_LOCK(sc); 2373 if (sc->sc_inreset_cnt > 0) { 2374 device_printf(sc->sc_dev, 2375 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2376 ATH_PCU_UNLOCK(sc); 2377 IF_LOCK(&ifp->if_snd); 2378 sc->sc_stats.ast_tx_qstop++; 2379 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2380 IF_UNLOCK(&ifp->if_snd); 2381 return; 2382 } 2383 sc->sc_txstart_cnt++; 2384 ATH_PCU_UNLOCK(sc); 2385 2386 for (;;) { 2387 ATH_TXBUF_LOCK(sc); 2388 if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { 2389 /* XXX increment counter? */ 2390 ATH_TXBUF_UNLOCK(sc); 2391 IF_LOCK(&ifp->if_snd); 2392 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2393 IF_UNLOCK(&ifp->if_snd); 2394 break; 2395 } 2396 ATH_TXBUF_UNLOCK(sc); 2397 2398 /* 2399 * Grab a TX buffer and associated resources. 2400 */ 2401 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 2402 if (bf == NULL) 2403 break; 2404 2405 IFQ_DEQUEUE(&ifp->if_snd, m); 2406 if (m == NULL) { 2407 ATH_TXBUF_LOCK(sc); 2408 ath_returnbuf_head(sc, bf); 2409 ATH_TXBUF_UNLOCK(sc); 2410 break; 2411 } 2412 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 2413 /* 2414 * Check for fragmentation. If this frame 2415 * has been broken up verify we have enough 2416 * buffers to send all the fragments so all 2417 * go out or none... 2418 */ 2419 TAILQ_INIT(&frags); 2420 if ((m->m_flags & M_FRAG) && 2421 !ath_txfrag_setup(sc, &frags, m, ni)) { 2422 DPRINTF(sc, ATH_DEBUG_XMIT, 2423 "%s: out of txfrag buffers\n", __func__); 2424 sc->sc_stats.ast_tx_nofrag++; 2425 ifp->if_oerrors++; 2426 ath_freetx(m); 2427 goto bad; 2428 } 2429 ifp->if_opackets++; 2430 nextfrag: 2431 /* 2432 * Pass the frame to the h/w for transmission. 2433 * Fragmented frames have each frag chained together 2434 * with m_nextpkt. We know there are sufficient ath_buf's 2435 * to send all the frags because of work done by 2436 * ath_txfrag_setup. We leave m_nextpkt set while 2437 * calling ath_tx_start so it can use it to extend the 2438 * the tx duration to cover the subsequent frag and 2439 * so it can reclaim all the mbufs in case of an error; 2440 * ath_tx_start clears m_nextpkt once it commits to 2441 * handing the frame to the hardware. 2442 */ 2443 next = m->m_nextpkt; 2444 if (ath_tx_start(sc, ni, bf, m)) { 2445 bad: 2446 ifp->if_oerrors++; 2447 reclaim: 2448 bf->bf_m = NULL; 2449 bf->bf_node = NULL; 2450 ATH_TXBUF_LOCK(sc); 2451 ath_returnbuf_head(sc, bf); 2452 ath_txfrag_cleanup(sc, &frags, ni); 2453 ATH_TXBUF_UNLOCK(sc); 2454 if (ni != NULL) 2455 ieee80211_free_node(ni); 2456 continue; 2457 } 2458 if (next != NULL) { 2459 /* 2460 * Beware of state changing between frags. 2461 * XXX check sta power-save state? 2462 */ 2463 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2464 DPRINTF(sc, ATH_DEBUG_XMIT, 2465 "%s: flush fragmented packet, state %s\n", 2466 __func__, 2467 ieee80211_state_name[ni->ni_vap->iv_state]); 2468 ath_freetx(next); 2469 goto reclaim; 2470 } 2471 m = next; 2472 bf = TAILQ_FIRST(&frags); 2473 KASSERT(bf != NULL, ("no buf for txfrag")); 2474 TAILQ_REMOVE(&frags, bf, bf_list); 2475 goto nextfrag; 2476 } 2477 2478 sc->sc_wd_timer = 5; 2479 } 2480 2481 ATH_PCU_LOCK(sc); 2482 sc->sc_txstart_cnt--; 2483 ATH_PCU_UNLOCK(sc); 2484} 2485 2486static int 2487ath_media_change(struct ifnet *ifp) 2488{ 2489 int error = ieee80211_media_change(ifp); 2490 /* NB: only the fixed rate can change and that doesn't need a reset */ 2491 return (error == ENETRESET ? 0 : error); 2492} 2493 2494/* 2495 * Block/unblock tx+rx processing while a key change is done. 2496 * We assume the caller serializes key management operations 2497 * so we only need to worry about synchronization with other 2498 * uses that originate in the driver. 2499 */ 2500static void 2501ath_key_update_begin(struct ieee80211vap *vap) 2502{ 2503 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2504 struct ath_softc *sc = ifp->if_softc; 2505 2506 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2507 taskqueue_block(sc->sc_tq); 2508 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2509} 2510 2511static void 2512ath_key_update_end(struct ieee80211vap *vap) 2513{ 2514 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2515 struct ath_softc *sc = ifp->if_softc; 2516 2517 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2518 IF_UNLOCK(&ifp->if_snd); 2519 taskqueue_unblock(sc->sc_tq); 2520} 2521 2522static void 2523ath_update_promisc(struct ifnet *ifp) 2524{ 2525 struct ath_softc *sc = ifp->if_softc; 2526 u_int32_t rfilt; 2527 2528 /* configure rx filter */ 2529 rfilt = ath_calcrxfilter(sc); 2530 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2531 2532 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2533} 2534 2535static void 2536ath_update_mcast(struct ifnet *ifp) 2537{ 2538 struct ath_softc *sc = ifp->if_softc; 2539 u_int32_t mfilt[2]; 2540 2541 /* calculate and install multicast filter */ 2542 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2543 struct ifmultiaddr *ifma; 2544 /* 2545 * Merge multicast addresses to form the hardware filter. 2546 */ 2547 mfilt[0] = mfilt[1] = 0; 2548 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2549 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2550 caddr_t dl; 2551 u_int32_t val; 2552 u_int8_t pos; 2553 2554 /* calculate XOR of eight 6bit values */ 2555 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2556 val = LE_READ_4(dl + 0); 2557 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2558 val = LE_READ_4(dl + 3); 2559 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2560 pos &= 0x3f; 2561 mfilt[pos / 32] |= (1 << (pos % 32)); 2562 } 2563 if_maddr_runlock(ifp); 2564 } else 2565 mfilt[0] = mfilt[1] = ~0; 2566 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2567 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2568 __func__, mfilt[0], mfilt[1]); 2569} 2570 2571void 2572ath_mode_init(struct ath_softc *sc) 2573{ 2574 struct ifnet *ifp = sc->sc_ifp; 2575 struct ath_hal *ah = sc->sc_ah; 2576 u_int32_t rfilt; 2577 2578 /* configure rx filter */ 2579 rfilt = ath_calcrxfilter(sc); 2580 ath_hal_setrxfilter(ah, rfilt); 2581 2582 /* configure operational mode */ 2583 ath_hal_setopmode(ah); 2584 2585 /* handle any link-level address change */ 2586 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2587 2588 /* calculate and install multicast filter */ 2589 ath_update_mcast(ifp); 2590} 2591 2592/* 2593 * Set the slot time based on the current setting. 2594 */ 2595void 2596ath_setslottime(struct ath_softc *sc) 2597{ 2598 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2599 struct ath_hal *ah = sc->sc_ah; 2600 u_int usec; 2601 2602 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2603 usec = 13; 2604 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2605 usec = 21; 2606 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2607 /* honor short/long slot time only in 11g */ 2608 /* XXX shouldn't honor on pure g or turbo g channel */ 2609 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2610 usec = HAL_SLOT_TIME_9; 2611 else 2612 usec = HAL_SLOT_TIME_20; 2613 } else 2614 usec = HAL_SLOT_TIME_9; 2615 2616 DPRINTF(sc, ATH_DEBUG_RESET, 2617 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2618 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2619 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2620 2621 ath_hal_setslottime(ah, usec); 2622 sc->sc_updateslot = OK; 2623} 2624 2625/* 2626 * Callback from the 802.11 layer to update the 2627 * slot time based on the current setting. 2628 */ 2629static void 2630ath_updateslot(struct ifnet *ifp) 2631{ 2632 struct ath_softc *sc = ifp->if_softc; 2633 struct ieee80211com *ic = ifp->if_l2com; 2634 2635 /* 2636 * When not coordinating the BSS, change the hardware 2637 * immediately. For other operation we defer the change 2638 * until beacon updates have propagated to the stations. 2639 */ 2640 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2641 ic->ic_opmode == IEEE80211_M_MBSS) 2642 sc->sc_updateslot = UPDATE; 2643 else 2644 ath_setslottime(sc); 2645} 2646 2647/* 2648 * Append the contents of src to dst; both queues 2649 * are assumed to be locked. 2650 */ 2651void 2652ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2653{ 2654 2655 ATH_TXQ_LOCK_ASSERT(dst); 2656 ATH_TXQ_LOCK_ASSERT(src); 2657 2658 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2659 dst->axq_link = src->axq_link; 2660 src->axq_link = NULL; 2661 dst->axq_depth += src->axq_depth; 2662 dst->axq_aggr_depth += src->axq_aggr_depth; 2663 src->axq_depth = 0; 2664 src->axq_aggr_depth = 0; 2665} 2666 2667/* 2668 * Reset the hardware, with no loss. 2669 * 2670 * This can't be used for a general case reset. 2671 */ 2672static void 2673ath_reset_proc(void *arg, int pending) 2674{ 2675 struct ath_softc *sc = arg; 2676 struct ifnet *ifp = sc->sc_ifp; 2677 2678#if 0 2679 if_printf(ifp, "%s: resetting\n", __func__); 2680#endif 2681 ath_reset(ifp, ATH_RESET_NOLOSS); 2682} 2683 2684/* 2685 * Reset the hardware after detecting beacons have stopped. 2686 */ 2687static void 2688ath_bstuck_proc(void *arg, int pending) 2689{ 2690 struct ath_softc *sc = arg; 2691 struct ifnet *ifp = sc->sc_ifp; 2692 uint32_t hangs = 0; 2693 2694 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 2695 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 2696 2697 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2698 sc->sc_bmisscount); 2699 sc->sc_stats.ast_bstuck++; 2700 /* 2701 * This assumes that there's no simultaneous channel mode change 2702 * occuring. 2703 */ 2704 ath_reset(ifp, ATH_RESET_NOLOSS); 2705} 2706 2707static void 2708ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2709{ 2710 bus_addr_t *paddr = (bus_addr_t*) arg; 2711 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2712 *paddr = segs->ds_addr; 2713} 2714 2715static int 2716ath_descdma_setup(struct ath_softc *sc, 2717 struct ath_descdma *dd, ath_bufhead *head, 2718 const char *name, int nbuf, int ndesc) 2719{ 2720#define DS2PHYS(_dd, _ds) \ 2721 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2722#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2723 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2724 struct ifnet *ifp = sc->sc_ifp; 2725 uint8_t *ds; 2726 struct ath_buf *bf; 2727 int i, bsize, error; 2728 int desc_len; 2729 2730 desc_len = sizeof(struct ath_desc); 2731 2732 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 2733 __func__, name, nbuf, ndesc); 2734 2735 dd->dd_name = name; 2736 dd->dd_desc_len = desc_len * nbuf * ndesc; 2737 2738 /* 2739 * Merlin work-around: 2740 * Descriptors that cross the 4KB boundary can't be used. 2741 * Assume one skipped descriptor per 4KB page. 2742 */ 2743 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2744 int numdescpage = 4096 / (desc_len * ndesc); 2745 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 2746 } 2747 2748 /* 2749 * Setup DMA descriptor area. 2750 */ 2751 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 2752 PAGE_SIZE, 0, /* alignment, bounds */ 2753 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2754 BUS_SPACE_MAXADDR, /* highaddr */ 2755 NULL, NULL, /* filter, filterarg */ 2756 dd->dd_desc_len, /* maxsize */ 2757 1, /* nsegments */ 2758 dd->dd_desc_len, /* maxsegsize */ 2759 BUS_DMA_ALLOCNOW, /* flags */ 2760 NULL, /* lockfunc */ 2761 NULL, /* lockarg */ 2762 &dd->dd_dmat); 2763 if (error != 0) { 2764 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2765 return error; 2766 } 2767 2768 /* allocate descriptors */ 2769 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2770 if (error != 0) { 2771 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2772 "error %u\n", dd->dd_name, error); 2773 goto fail0; 2774 } 2775 2776 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2777 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2778 &dd->dd_dmamap); 2779 if (error != 0) { 2780 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2781 "error %u\n", nbuf * ndesc, dd->dd_name, error); 2782 goto fail1; 2783 } 2784 2785 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2786 dd->dd_desc, dd->dd_desc_len, 2787 ath_load_cb, &dd->dd_desc_paddr, 2788 BUS_DMA_NOWAIT); 2789 if (error != 0) { 2790 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2791 dd->dd_name, error); 2792 goto fail2; 2793 } 2794 2795 ds = (uint8_t *) dd->dd_desc; 2796 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2797 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 2798 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 2799 2800 /* allocate rx buffers */ 2801 bsize = sizeof(struct ath_buf) * nbuf; 2802 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2803 if (bf == NULL) { 2804 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2805 dd->dd_name, bsize); 2806 goto fail3; 2807 } 2808 dd->dd_bufptr = bf; 2809 2810 TAILQ_INIT(head); 2811 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 2812 bf->bf_desc = (struct ath_desc *) ds; 2813 bf->bf_daddr = DS2PHYS(dd, ds); 2814 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2815 /* 2816 * Merlin WAR: Skip descriptor addresses which 2817 * cause 4KB boundary crossing along any point 2818 * in the descriptor. 2819 */ 2820 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 2821 desc_len * ndesc)) { 2822 /* Start at the next page */ 2823 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 2824 bf->bf_desc = (struct ath_desc *) ds; 2825 bf->bf_daddr = DS2PHYS(dd, ds); 2826 } 2827 } 2828 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2829 &bf->bf_dmamap); 2830 if (error != 0) { 2831 if_printf(ifp, "unable to create dmamap for %s " 2832 "buffer %u, error %u\n", dd->dd_name, i, error); 2833 ath_descdma_cleanup(sc, dd, head); 2834 return error; 2835 } 2836 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 2837 TAILQ_INSERT_TAIL(head, bf, bf_list); 2838 } 2839 return 0; 2840fail3: 2841 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2842fail2: 2843 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2844fail1: 2845 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2846fail0: 2847 bus_dma_tag_destroy(dd->dd_dmat); 2848 memset(dd, 0, sizeof(*dd)); 2849 return error; 2850#undef DS2PHYS 2851#undef ATH_DESC_4KB_BOUND_CHECK 2852} 2853 2854static void 2855ath_descdma_cleanup(struct ath_softc *sc, 2856 struct ath_descdma *dd, ath_bufhead *head) 2857{ 2858 struct ath_buf *bf; 2859 struct ieee80211_node *ni; 2860 2861 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2862 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2863 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2864 bus_dma_tag_destroy(dd->dd_dmat); 2865 2866 TAILQ_FOREACH(bf, head, bf_list) { 2867 if (bf->bf_m) { 2868 m_freem(bf->bf_m); 2869 bf->bf_m = NULL; 2870 } 2871 if (bf->bf_dmamap != NULL) { 2872 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2873 bf->bf_dmamap = NULL; 2874 } 2875 ni = bf->bf_node; 2876 bf->bf_node = NULL; 2877 if (ni != NULL) { 2878 /* 2879 * Reclaim node reference. 2880 */ 2881 ieee80211_free_node(ni); 2882 } 2883 } 2884 2885 TAILQ_INIT(head); 2886 free(dd->dd_bufptr, M_ATHDEV); 2887 memset(dd, 0, sizeof(*dd)); 2888} 2889 2890static int 2891ath_desc_alloc(struct ath_softc *sc) 2892{ 2893 int error; 2894 2895 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 2896 "rx", ath_rxbuf, 1); 2897 if (error != 0) 2898 return error; 2899 2900 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2901 "tx", ath_txbuf, ATH_TXDESC); 2902 if (error != 0) { 2903 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2904 return error; 2905 } 2906 sc->sc_txbuf_cnt = ath_txbuf; 2907 2908 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 2909 "tx_mgmt", ath_txbuf_mgmt, ATH_TXDESC); 2910 if (error != 0) { 2911 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2912 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2913 return error; 2914 } 2915 2916 /* 2917 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 2918 * flag doesn't have to be set in ath_getbuf_locked(). 2919 */ 2920 2921 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 2922 "beacon", ATH_BCBUF, 1); 2923 if (error != 0) { 2924 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2925 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2926 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 2927 &sc->sc_txbuf_mgmt); 2928 return error; 2929 } 2930 return 0; 2931} 2932 2933static void 2934ath_desc_free(struct ath_softc *sc) 2935{ 2936 2937 if (sc->sc_bdma.dd_desc_len != 0) 2938 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 2939 if (sc->sc_txdma.dd_desc_len != 0) 2940 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2941 if (sc->sc_rxdma.dd_desc_len != 0) 2942 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2943 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 2944 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 2945 &sc->sc_txbuf_mgmt); 2946} 2947 2948static struct ieee80211_node * 2949ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2950{ 2951 struct ieee80211com *ic = vap->iv_ic; 2952 struct ath_softc *sc = ic->ic_ifp->if_softc; 2953 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 2954 struct ath_node *an; 2955 2956 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 2957 if (an == NULL) { 2958 /* XXX stat+msg */ 2959 return NULL; 2960 } 2961 ath_rate_node_init(sc, an); 2962 2963 /* Setup the mutex - there's no associd yet so set the name to NULL */ 2964 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 2965 device_get_nameunit(sc->sc_dev), an); 2966 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 2967 2968 /* XXX setup ath_tid */ 2969 ath_tx_tid_init(sc, an); 2970 2971 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 2972 return &an->an_node; 2973} 2974 2975static void 2976ath_node_cleanup(struct ieee80211_node *ni) 2977{ 2978 struct ieee80211com *ic = ni->ni_ic; 2979 struct ath_softc *sc = ic->ic_ifp->if_softc; 2980 2981 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 2982 ath_tx_node_flush(sc, ATH_NODE(ni)); 2983 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 2984 sc->sc_node_cleanup(ni); 2985} 2986 2987static void 2988ath_node_free(struct ieee80211_node *ni) 2989{ 2990 struct ieee80211com *ic = ni->ni_ic; 2991 struct ath_softc *sc = ic->ic_ifp->if_softc; 2992 2993 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 2994 mtx_destroy(&ATH_NODE(ni)->an_mtx); 2995 sc->sc_node_free(ni); 2996} 2997 2998static void 2999ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3000{ 3001 struct ieee80211com *ic = ni->ni_ic; 3002 struct ath_softc *sc = ic->ic_ifp->if_softc; 3003 struct ath_hal *ah = sc->sc_ah; 3004 3005 *rssi = ic->ic_node_getrssi(ni); 3006 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3007 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3008 else 3009 *noise = -95; /* nominally correct */ 3010} 3011 3012/* 3013 * Set the default antenna. 3014 */ 3015void 3016ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3017{ 3018 struct ath_hal *ah = sc->sc_ah; 3019 3020 /* XXX block beacon interrupts */ 3021 ath_hal_setdefantenna(ah, antenna); 3022 if (sc->sc_defant != antenna) 3023 sc->sc_stats.ast_ant_defswitch++; 3024 sc->sc_defant = antenna; 3025 sc->sc_rxotherant = 0; 3026} 3027 3028static void 3029ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3030{ 3031 txq->axq_qnum = qnum; 3032 txq->axq_ac = 0; 3033 txq->axq_depth = 0; 3034 txq->axq_aggr_depth = 0; 3035 txq->axq_intrcnt = 0; 3036 txq->axq_link = NULL; 3037 txq->axq_softc = sc; 3038 TAILQ_INIT(&txq->axq_q); 3039 TAILQ_INIT(&txq->axq_tidq); 3040 ATH_TXQ_LOCK_INIT(sc, txq); 3041} 3042 3043/* 3044 * Setup a h/w transmit queue. 3045 */ 3046static struct ath_txq * 3047ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3048{ 3049#define N(a) (sizeof(a)/sizeof(a[0])) 3050 struct ath_hal *ah = sc->sc_ah; 3051 HAL_TXQ_INFO qi; 3052 int qnum; 3053 3054 memset(&qi, 0, sizeof(qi)); 3055 qi.tqi_subtype = subtype; 3056 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3057 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3058 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3059 /* 3060 * Enable interrupts only for EOL and DESC conditions. 3061 * We mark tx descriptors to receive a DESC interrupt 3062 * when a tx queue gets deep; otherwise waiting for the 3063 * EOL to reap descriptors. Note that this is done to 3064 * reduce interrupt load and this only defers reaping 3065 * descriptors, never transmitting frames. Aside from 3066 * reducing interrupts this also permits more concurrency. 3067 * The only potential downside is if the tx queue backs 3068 * up in which case the top half of the kernel may backup 3069 * due to a lack of tx descriptors. 3070 */ 3071 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3072 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3073 if (qnum == -1) { 3074 /* 3075 * NB: don't print a message, this happens 3076 * normally on parts with too few tx queues 3077 */ 3078 return NULL; 3079 } 3080 if (qnum >= N(sc->sc_txq)) { 3081 device_printf(sc->sc_dev, 3082 "hal qnum %u out of range, max %zu!\n", 3083 qnum, N(sc->sc_txq)); 3084 ath_hal_releasetxqueue(ah, qnum); 3085 return NULL; 3086 } 3087 if (!ATH_TXQ_SETUP(sc, qnum)) { 3088 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3089 sc->sc_txqsetup |= 1<<qnum; 3090 } 3091 return &sc->sc_txq[qnum]; 3092#undef N 3093} 3094 3095/* 3096 * Setup a hardware data transmit queue for the specified 3097 * access control. The hal may not support all requested 3098 * queues in which case it will return a reference to a 3099 * previously setup queue. We record the mapping from ac's 3100 * to h/w queues for use by ath_tx_start and also track 3101 * the set of h/w queues being used to optimize work in the 3102 * transmit interrupt handler and related routines. 3103 */ 3104static int 3105ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3106{ 3107#define N(a) (sizeof(a)/sizeof(a[0])) 3108 struct ath_txq *txq; 3109 3110 if (ac >= N(sc->sc_ac2q)) { 3111 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3112 ac, N(sc->sc_ac2q)); 3113 return 0; 3114 } 3115 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3116 if (txq != NULL) { 3117 txq->axq_ac = ac; 3118 sc->sc_ac2q[ac] = txq; 3119 return 1; 3120 } else 3121 return 0; 3122#undef N 3123} 3124 3125/* 3126 * Update WME parameters for a transmit queue. 3127 */ 3128static int 3129ath_txq_update(struct ath_softc *sc, int ac) 3130{ 3131#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3132#define ATH_TXOP_TO_US(v) (v<<5) 3133 struct ifnet *ifp = sc->sc_ifp; 3134 struct ieee80211com *ic = ifp->if_l2com; 3135 struct ath_txq *txq = sc->sc_ac2q[ac]; 3136 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3137 struct ath_hal *ah = sc->sc_ah; 3138 HAL_TXQ_INFO qi; 3139 3140 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3141#ifdef IEEE80211_SUPPORT_TDMA 3142 if (sc->sc_tdma) { 3143 /* 3144 * AIFS is zero so there's no pre-transmit wait. The 3145 * burst time defines the slot duration and is configured 3146 * through net80211. The QCU is setup to not do post-xmit 3147 * back off, lockout all lower-priority QCU's, and fire 3148 * off the DMA beacon alert timer which is setup based 3149 * on the slot configuration. 3150 */ 3151 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3152 | HAL_TXQ_TXERRINT_ENABLE 3153 | HAL_TXQ_TXURNINT_ENABLE 3154 | HAL_TXQ_TXEOLINT_ENABLE 3155 | HAL_TXQ_DBA_GATED 3156 | HAL_TXQ_BACKOFF_DISABLE 3157 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 3158 ; 3159 qi.tqi_aifs = 0; 3160 /* XXX +dbaprep? */ 3161 qi.tqi_readyTime = sc->sc_tdmaslotlen; 3162 qi.tqi_burstTime = qi.tqi_readyTime; 3163 } else { 3164#endif 3165 /* 3166 * XXX shouldn't this just use the default flags 3167 * used in the previous queue setup? 3168 */ 3169 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3170 | HAL_TXQ_TXERRINT_ENABLE 3171 | HAL_TXQ_TXDESCINT_ENABLE 3172 | HAL_TXQ_TXURNINT_ENABLE 3173 | HAL_TXQ_TXEOLINT_ENABLE 3174 ; 3175 qi.tqi_aifs = wmep->wmep_aifsn; 3176 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3177 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3178 qi.tqi_readyTime = 0; 3179 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3180#ifdef IEEE80211_SUPPORT_TDMA 3181 } 3182#endif 3183 3184 DPRINTF(sc, ATH_DEBUG_RESET, 3185 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 3186 __func__, txq->axq_qnum, qi.tqi_qflags, 3187 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 3188 3189 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3190 if_printf(ifp, "unable to update hardware queue " 3191 "parameters for %s traffic!\n", 3192 ieee80211_wme_acnames[ac]); 3193 return 0; 3194 } else { 3195 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3196 return 1; 3197 } 3198#undef ATH_TXOP_TO_US 3199#undef ATH_EXPONENT_TO_VALUE 3200} 3201 3202/* 3203 * Callback from the 802.11 layer to update WME parameters. 3204 */ 3205int 3206ath_wme_update(struct ieee80211com *ic) 3207{ 3208 struct ath_softc *sc = ic->ic_ifp->if_softc; 3209 3210 return !ath_txq_update(sc, WME_AC_BE) || 3211 !ath_txq_update(sc, WME_AC_BK) || 3212 !ath_txq_update(sc, WME_AC_VI) || 3213 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3214} 3215 3216/* 3217 * Reclaim resources for a setup queue. 3218 */ 3219static void 3220ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3221{ 3222 3223 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3224 ATH_TXQ_LOCK_DESTROY(txq); 3225 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3226} 3227 3228/* 3229 * Reclaim all tx queue resources. 3230 */ 3231static void 3232ath_tx_cleanup(struct ath_softc *sc) 3233{ 3234 int i; 3235 3236 ATH_TXBUF_LOCK_DESTROY(sc); 3237 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3238 if (ATH_TXQ_SETUP(sc, i)) 3239 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3240} 3241 3242/* 3243 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 3244 * using the current rates in sc_rixmap. 3245 */ 3246int 3247ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 3248{ 3249 int rix = sc->sc_rixmap[rate]; 3250 /* NB: return lowest rix for invalid rate */ 3251 return (rix == 0xff ? 0 : rix); 3252} 3253 3254static void 3255ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 3256 struct ath_buf *bf) 3257{ 3258 struct ieee80211_node *ni = bf->bf_node; 3259 struct ifnet *ifp = sc->sc_ifp; 3260 struct ieee80211com *ic = ifp->if_l2com; 3261 int sr, lr, pri; 3262 3263 if (ts->ts_status == 0) { 3264 u_int8_t txant = ts->ts_antenna; 3265 sc->sc_stats.ast_ant_tx[txant]++; 3266 sc->sc_ant_tx[txant]++; 3267 if (ts->ts_finaltsi != 0) 3268 sc->sc_stats.ast_tx_altrate++; 3269 pri = M_WME_GETAC(bf->bf_m); 3270 if (pri >= WME_AC_VO) 3271 ic->ic_wme.wme_hipri_traffic++; 3272 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 3273 ni->ni_inact = ni->ni_inact_reload; 3274 } else { 3275 if (ts->ts_status & HAL_TXERR_XRETRY) 3276 sc->sc_stats.ast_tx_xretries++; 3277 if (ts->ts_status & HAL_TXERR_FIFO) 3278 sc->sc_stats.ast_tx_fifoerr++; 3279 if (ts->ts_status & HAL_TXERR_FILT) 3280 sc->sc_stats.ast_tx_filtered++; 3281 if (ts->ts_status & HAL_TXERR_XTXOP) 3282 sc->sc_stats.ast_tx_xtxop++; 3283 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 3284 sc->sc_stats.ast_tx_timerexpired++; 3285 3286 if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 3287 sc->sc_stats.ast_tx_data_underrun++; 3288 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 3289 sc->sc_stats.ast_tx_delim_underrun++; 3290 3291 if (bf->bf_m->m_flags & M_FF) 3292 sc->sc_stats.ast_ff_txerr++; 3293 } 3294 /* XXX when is this valid? */ 3295 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 3296 sc->sc_stats.ast_tx_desccfgerr++; 3297 3298 sr = ts->ts_shortretry; 3299 lr = ts->ts_longretry; 3300 sc->sc_stats.ast_tx_shortretry += sr; 3301 sc->sc_stats.ast_tx_longretry += lr; 3302 3303} 3304 3305/* 3306 * The default completion. If fail is 1, this means 3307 * "please don't retry the frame, and just return -1 status 3308 * to the net80211 stack. 3309 */ 3310void 3311ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3312{ 3313 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3314 int st; 3315 3316 if (fail == 1) 3317 st = -1; 3318 else 3319 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 3320 ts->ts_status : HAL_TXERR_XRETRY; 3321 3322 if (bf->bf_state.bfs_dobaw) 3323 device_printf(sc->sc_dev, 3324 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 3325 __func__, 3326 bf, 3327 SEQNO(bf->bf_state.bfs_seqno)); 3328 if (bf->bf_next != NULL) 3329 device_printf(sc->sc_dev, 3330 "%s: bf %p: seqno %d: bf_next not NULL!\n", 3331 __func__, 3332 bf, 3333 SEQNO(bf->bf_state.bfs_seqno)); 3334 3335 /* 3336 * Do any tx complete callback. Note this must 3337 * be done before releasing the node reference. 3338 * This will free the mbuf, release the net80211 3339 * node and recycle the ath_buf. 3340 */ 3341 ath_tx_freebuf(sc, bf, st); 3342} 3343 3344/* 3345 * Update rate control with the given completion status. 3346 */ 3347void 3348ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 3349 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 3350 int nframes, int nbad) 3351{ 3352 struct ath_node *an; 3353 3354 /* Only for unicast frames */ 3355 if (ni == NULL) 3356 return; 3357 3358 an = ATH_NODE(ni); 3359 3360 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 3361 ATH_NODE_LOCK(an); 3362 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 3363 ATH_NODE_UNLOCK(an); 3364 } 3365} 3366 3367/* 3368 * Update the busy status of the last frame on the free list. 3369 * When doing TDMA, the busy flag tracks whether the hardware 3370 * currently points to this buffer or not, and thus gated DMA 3371 * may restart by re-reading the last descriptor in this 3372 * buffer. 3373 * 3374 * This should be called in the completion function once one 3375 * of the buffers has been used. 3376 */ 3377static void 3378ath_tx_update_busy(struct ath_softc *sc) 3379{ 3380 struct ath_buf *last; 3381 3382 /* 3383 * Since the last frame may still be marked 3384 * as ATH_BUF_BUSY, unmark it here before 3385 * finishing the frame processing. 3386 * Since we've completed a frame (aggregate 3387 * or otherwise), the hardware has moved on 3388 * and is no longer referencing the previous 3389 * descriptor. 3390 */ 3391 ATH_TXBUF_LOCK_ASSERT(sc); 3392 last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3393 if (last != NULL) 3394 last->bf_flags &= ~ATH_BUF_BUSY; 3395 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 3396 if (last != NULL) 3397 last->bf_flags &= ~ATH_BUF_BUSY; 3398} 3399 3400/* 3401 * Process completed xmit descriptors from the specified queue. 3402 * Kick the packet scheduler if needed. This can occur from this 3403 * particular task. 3404 */ 3405static int 3406ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 3407{ 3408 struct ath_hal *ah = sc->sc_ah; 3409 struct ath_buf *bf; 3410 struct ath_desc *ds; 3411 struct ath_tx_status *ts; 3412 struct ieee80211_node *ni; 3413 struct ath_node *an; 3414#ifdef IEEE80211_SUPPORT_SUPERG 3415 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3416#endif /* IEEE80211_SUPPORT_SUPERG */ 3417 int nacked; 3418 HAL_STATUS status; 3419 3420 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3421 __func__, txq->axq_qnum, 3422 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3423 txq->axq_link); 3424 nacked = 0; 3425 for (;;) { 3426 ATH_TXQ_LOCK(txq); 3427 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3428 bf = TAILQ_FIRST(&txq->axq_q); 3429 if (bf == NULL) { 3430 ATH_TXQ_UNLOCK(txq); 3431 break; 3432 } 3433 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 3434 ts = &bf->bf_status.ds_txstat; 3435 status = ath_hal_txprocdesc(ah, ds, ts); 3436#ifdef ATH_DEBUG 3437 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3438 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3439 status == HAL_OK); 3440 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) { 3441 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3442 status == HAL_OK); 3443 } 3444#endif 3445 if (status == HAL_EINPROGRESS) { 3446 ATH_TXQ_UNLOCK(txq); 3447 break; 3448 } 3449 ATH_TXQ_REMOVE(txq, bf, bf_list); 3450#ifdef IEEE80211_SUPPORT_TDMA 3451 if (txq->axq_depth > 0) { 3452 /* 3453 * More frames follow. Mark the buffer busy 3454 * so it's not re-used while the hardware may 3455 * still re-read the link field in the descriptor. 3456 * 3457 * Use the last buffer in an aggregate as that 3458 * is where the hardware may be - intermediate 3459 * descriptors won't be "busy". 3460 */ 3461 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 3462 } else 3463#else 3464 if (txq->axq_depth == 0) 3465#endif 3466 txq->axq_link = NULL; 3467 if (bf->bf_state.bfs_aggr) 3468 txq->axq_aggr_depth--; 3469 3470 ni = bf->bf_node; 3471 /* 3472 * If unicast frame was ack'd update RSSI, 3473 * including the last rx time used to 3474 * workaround phantom bmiss interrupts. 3475 */ 3476 if (ni != NULL && ts->ts_status == 0 && 3477 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 3478 nacked++; 3479 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 3480 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 3481 ts->ts_rssi); 3482 } 3483 ATH_TXQ_UNLOCK(txq); 3484 3485 /* If unicast frame, update general statistics */ 3486 if (ni != NULL) { 3487 an = ATH_NODE(ni); 3488 /* update statistics */ 3489 ath_tx_update_stats(sc, ts, bf); 3490 } 3491 3492 /* 3493 * Call the completion handler. 3494 * The completion handler is responsible for 3495 * calling the rate control code. 3496 * 3497 * Frames with no completion handler get the 3498 * rate control code called here. 3499 */ 3500 if (bf->bf_comp == NULL) { 3501 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 3502 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 3503 /* 3504 * XXX assume this isn't an aggregate 3505 * frame. 3506 */ 3507 ath_tx_update_ratectrl(sc, ni, 3508 bf->bf_state.bfs_rc, ts, 3509 bf->bf_state.bfs_pktlen, 1, 3510 (ts->ts_status == 0 ? 0 : 1)); 3511 } 3512 ath_tx_default_comp(sc, bf, 0); 3513 } else 3514 bf->bf_comp(sc, bf, 0); 3515 } 3516#ifdef IEEE80211_SUPPORT_SUPERG 3517 /* 3518 * Flush fast-frame staging queue when traffic slows. 3519 */ 3520 if (txq->axq_depth <= 1) 3521 ieee80211_ff_flush(ic, txq->axq_ac); 3522#endif 3523 3524 /* Kick the TXQ scheduler */ 3525 if (dosched) { 3526 ATH_TXQ_LOCK(txq); 3527 ath_txq_sched(sc, txq); 3528 ATH_TXQ_UNLOCK(txq); 3529 } 3530 3531 return nacked; 3532} 3533 3534#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 3535 3536/* 3537 * Deferred processing of transmit interrupt; special-cased 3538 * for a single hardware transmit queue (e.g. 5210 and 5211). 3539 */ 3540static void 3541ath_tx_proc_q0(void *arg, int npending) 3542{ 3543 struct ath_softc *sc = arg; 3544 struct ifnet *ifp = sc->sc_ifp; 3545 uint32_t txqs; 3546 3547 ATH_PCU_LOCK(sc); 3548 sc->sc_txproc_cnt++; 3549 txqs = sc->sc_txq_active; 3550 sc->sc_txq_active &= ~txqs; 3551 ATH_PCU_UNLOCK(sc); 3552 3553 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 3554 /* XXX why is lastrx updated in tx code? */ 3555 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3556 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3557 ath_tx_processq(sc, sc->sc_cabq, 1); 3558 IF_LOCK(&ifp->if_snd); 3559 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3560 IF_UNLOCK(&ifp->if_snd); 3561 sc->sc_wd_timer = 0; 3562 3563 if (sc->sc_softled) 3564 ath_led_event(sc, sc->sc_txrix); 3565 3566 ATH_PCU_LOCK(sc); 3567 sc->sc_txproc_cnt--; 3568 ATH_PCU_UNLOCK(sc); 3569 3570 ath_tx_kick(sc); 3571} 3572 3573/* 3574 * Deferred processing of transmit interrupt; special-cased 3575 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 3576 */ 3577static void 3578ath_tx_proc_q0123(void *arg, int npending) 3579{ 3580 struct ath_softc *sc = arg; 3581 struct ifnet *ifp = sc->sc_ifp; 3582 int nacked; 3583 uint32_t txqs; 3584 3585 ATH_PCU_LOCK(sc); 3586 sc->sc_txproc_cnt++; 3587 txqs = sc->sc_txq_active; 3588 sc->sc_txq_active &= ~txqs; 3589 ATH_PCU_UNLOCK(sc); 3590 3591 /* 3592 * Process each active queue. 3593 */ 3594 nacked = 0; 3595 if (TXQACTIVE(txqs, 0)) 3596 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 3597 if (TXQACTIVE(txqs, 1)) 3598 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 3599 if (TXQACTIVE(txqs, 2)) 3600 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 3601 if (TXQACTIVE(txqs, 3)) 3602 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 3603 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3604 ath_tx_processq(sc, sc->sc_cabq, 1); 3605 if (nacked) 3606 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3607 3608 IF_LOCK(&ifp->if_snd); 3609 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3610 IF_UNLOCK(&ifp->if_snd); 3611 sc->sc_wd_timer = 0; 3612 3613 if (sc->sc_softled) 3614 ath_led_event(sc, sc->sc_txrix); 3615 3616 ATH_PCU_LOCK(sc); 3617 sc->sc_txproc_cnt--; 3618 ATH_PCU_UNLOCK(sc); 3619 3620 ath_tx_kick(sc); 3621} 3622 3623/* 3624 * Deferred processing of transmit interrupt. 3625 */ 3626static void 3627ath_tx_proc(void *arg, int npending) 3628{ 3629 struct ath_softc *sc = arg; 3630 struct ifnet *ifp = sc->sc_ifp; 3631 int i, nacked; 3632 uint32_t txqs; 3633 3634 ATH_PCU_LOCK(sc); 3635 sc->sc_txproc_cnt++; 3636 txqs = sc->sc_txq_active; 3637 sc->sc_txq_active &= ~txqs; 3638 ATH_PCU_UNLOCK(sc); 3639 3640 /* 3641 * Process each active queue. 3642 */ 3643 nacked = 0; 3644 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3645 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 3646 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 3647 if (nacked) 3648 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3649 3650 /* XXX check this inside of IF_LOCK? */ 3651 IF_LOCK(&ifp->if_snd); 3652 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3653 IF_UNLOCK(&ifp->if_snd); 3654 sc->sc_wd_timer = 0; 3655 3656 if (sc->sc_softled) 3657 ath_led_event(sc, sc->sc_txrix); 3658 3659 ATH_PCU_LOCK(sc); 3660 sc->sc_txproc_cnt--; 3661 ATH_PCU_UNLOCK(sc); 3662 3663 ath_tx_kick(sc); 3664} 3665#undef TXQACTIVE 3666 3667/* 3668 * Deferred processing of TXQ rescheduling. 3669 */ 3670static void 3671ath_txq_sched_tasklet(void *arg, int npending) 3672{ 3673 struct ath_softc *sc = arg; 3674 int i; 3675 3676 /* XXX is skipping ok? */ 3677 ATH_PCU_LOCK(sc); 3678#if 0 3679 if (sc->sc_inreset_cnt > 0) { 3680 device_printf(sc->sc_dev, 3681 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 3682 ATH_PCU_UNLOCK(sc); 3683 return; 3684 } 3685#endif 3686 sc->sc_txproc_cnt++; 3687 ATH_PCU_UNLOCK(sc); 3688 3689 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3690 if (ATH_TXQ_SETUP(sc, i)) { 3691 ATH_TXQ_LOCK(&sc->sc_txq[i]); 3692 ath_txq_sched(sc, &sc->sc_txq[i]); 3693 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 3694 } 3695 } 3696 3697 ATH_PCU_LOCK(sc); 3698 sc->sc_txproc_cnt--; 3699 ATH_PCU_UNLOCK(sc); 3700} 3701 3702void 3703ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 3704{ 3705 3706 ATH_TXBUF_LOCK_ASSERT(sc); 3707 3708 if (bf->bf_flags & ATH_BUF_MGMT) 3709 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 3710 else { 3711 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3712 sc->sc_txbuf_cnt++; 3713 if (sc->sc_txbuf_cnt > ath_txbuf) { 3714 device_printf(sc->sc_dev, 3715 "%s: sc_txbuf_cnt > %d?\n", 3716 __func__, 3717 ath_txbuf); 3718 sc->sc_txbuf_cnt = ath_txbuf; 3719 } 3720 } 3721} 3722 3723void 3724ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 3725{ 3726 3727 ATH_TXBUF_LOCK_ASSERT(sc); 3728 3729 if (bf->bf_flags & ATH_BUF_MGMT) 3730 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 3731 else { 3732 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 3733 sc->sc_txbuf_cnt++; 3734 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 3735 device_printf(sc->sc_dev, 3736 "%s: sc_txbuf_cnt > %d?\n", 3737 __func__, 3738 ATH_TXBUF); 3739 sc->sc_txbuf_cnt = ATH_TXBUF; 3740 } 3741 } 3742} 3743 3744/* 3745 * Return a buffer to the pool and update the 'busy' flag on the 3746 * previous 'tail' entry. 3747 * 3748 * This _must_ only be called when the buffer is involved in a completed 3749 * TX. The logic is that if it was part of an active TX, the previous 3750 * buffer on the list is now not involved in a halted TX DMA queue, waiting 3751 * for restart (eg for TDMA.) 3752 * 3753 * The caller must free the mbuf and recycle the node reference. 3754 */ 3755void 3756ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 3757{ 3758 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3759 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 3760 3761 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 3762 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 3763 3764 ATH_TXBUF_LOCK(sc); 3765 ath_tx_update_busy(sc); 3766 ath_returnbuf_tail(sc, bf); 3767 ATH_TXBUF_UNLOCK(sc); 3768} 3769 3770/* 3771 * This is currently used by ath_tx_draintxq() and 3772 * ath_tx_tid_free_pkts(). 3773 * 3774 * It recycles a single ath_buf. 3775 */ 3776void 3777ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 3778{ 3779 struct ieee80211_node *ni = bf->bf_node; 3780 struct mbuf *m0 = bf->bf_m; 3781 3782 bf->bf_node = NULL; 3783 bf->bf_m = NULL; 3784 3785 /* Free the buffer, it's not needed any longer */ 3786 ath_freebuf(sc, bf); 3787 3788 if (ni != NULL) { 3789 /* 3790 * Do any callback and reclaim the node reference. 3791 */ 3792 if (m0->m_flags & M_TXCB) 3793 ieee80211_process_callback(ni, m0, status); 3794 ieee80211_free_node(ni); 3795 } 3796 m_freem(m0); 3797 3798 /* 3799 * XXX the buffer used to be freed -after-, but the DMA map was 3800 * freed where ath_freebuf() now is. I've no idea what this 3801 * will do. 3802 */ 3803} 3804 3805void 3806ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 3807{ 3808#ifdef ATH_DEBUG 3809 struct ath_hal *ah = sc->sc_ah; 3810#endif 3811 struct ath_buf *bf; 3812 u_int ix; 3813 3814 /* 3815 * NB: this assumes output has been stopped and 3816 * we do not need to block ath_tx_proc 3817 */ 3818 ATH_TXBUF_LOCK(sc); 3819 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 3820 if (bf != NULL) 3821 bf->bf_flags &= ~ATH_BUF_BUSY; 3822 bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3823 if (bf != NULL) 3824 bf->bf_flags &= ~ATH_BUF_BUSY; 3825 ATH_TXBUF_UNLOCK(sc); 3826 3827 for (ix = 0;; ix++) { 3828 ATH_TXQ_LOCK(txq); 3829 bf = TAILQ_FIRST(&txq->axq_q); 3830 if (bf == NULL) { 3831 txq->axq_link = NULL; 3832 ATH_TXQ_UNLOCK(txq); 3833 break; 3834 } 3835 ATH_TXQ_REMOVE(txq, bf, bf_list); 3836 if (bf->bf_state.bfs_aggr) 3837 txq->axq_aggr_depth--; 3838#ifdef ATH_DEBUG 3839 if (sc->sc_debug & ATH_DEBUG_RESET) { 3840 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3841 3842 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 3843 ath_hal_txprocdesc(ah, bf->bf_lastds, 3844 &bf->bf_status.ds_txstat) == HAL_OK); 3845 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 3846 bf->bf_m->m_len, 0, -1); 3847 } 3848#endif /* ATH_DEBUG */ 3849 /* 3850 * Since we're now doing magic in the completion 3851 * functions, we -must- call it for aggregation 3852 * destinations or BAW tracking will get upset. 3853 */ 3854 /* 3855 * Clear ATH_BUF_BUSY; the completion handler 3856 * will free the buffer. 3857 */ 3858 ATH_TXQ_UNLOCK(txq); 3859 bf->bf_flags &= ~ATH_BUF_BUSY; 3860 if (bf->bf_comp) 3861 bf->bf_comp(sc, bf, 1); 3862 else 3863 ath_tx_default_comp(sc, bf, 1); 3864 } 3865 3866 /* 3867 * Drain software queued frames which are on 3868 * active TIDs. 3869 */ 3870 ath_tx_txq_drain(sc, txq); 3871} 3872 3873static void 3874ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 3875{ 3876 struct ath_hal *ah = sc->sc_ah; 3877 3878 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 3879 __func__, txq->axq_qnum, 3880 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 3881 txq->axq_link); 3882 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 3883} 3884 3885static int 3886ath_stoptxdma(struct ath_softc *sc) 3887{ 3888 struct ath_hal *ah = sc->sc_ah; 3889 int i; 3890 3891 /* XXX return value */ 3892 if (sc->sc_invalid) 3893 return 0; 3894 3895 if (!sc->sc_invalid) { 3896 /* don't touch the hardware if marked invalid */ 3897 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 3898 __func__, sc->sc_bhalq, 3899 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 3900 NULL); 3901 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 3902 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3903 if (ATH_TXQ_SETUP(sc, i)) 3904 ath_tx_stopdma(sc, &sc->sc_txq[i]); 3905 } 3906 3907 return 1; 3908} 3909 3910/* 3911 * Drain the transmit queues and reclaim resources. 3912 */ 3913static void 3914ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 3915{ 3916#ifdef ATH_DEBUG 3917 struct ath_hal *ah = sc->sc_ah; 3918#endif 3919 struct ifnet *ifp = sc->sc_ifp; 3920 int i; 3921 3922 (void) ath_stoptxdma(sc); 3923 3924 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3925 /* 3926 * XXX TODO: should we just handle the completed TX frames 3927 * here, whether or not the reset is a full one or not? 3928 */ 3929 if (ATH_TXQ_SETUP(sc, i)) { 3930 if (reset_type == ATH_RESET_NOLOSS) 3931 ath_tx_processq(sc, &sc->sc_txq[i], 0); 3932 else 3933 ath_tx_draintxq(sc, &sc->sc_txq[i]); 3934 } 3935 } 3936#ifdef ATH_DEBUG 3937 if (sc->sc_debug & ATH_DEBUG_RESET) { 3938 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 3939 if (bf != NULL && bf->bf_m != NULL) { 3940 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 3941 ath_hal_txprocdesc(ah, bf->bf_lastds, 3942 &bf->bf_status.ds_txstat) == HAL_OK); 3943 ieee80211_dump_pkt(ifp->if_l2com, 3944 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 3945 0, -1); 3946 } 3947 } 3948#endif /* ATH_DEBUG */ 3949 IF_LOCK(&ifp->if_snd); 3950 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3951 IF_UNLOCK(&ifp->if_snd); 3952 sc->sc_wd_timer = 0; 3953} 3954 3955/* 3956 * Update internal state after a channel change. 3957 */ 3958static void 3959ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 3960{ 3961 enum ieee80211_phymode mode; 3962 3963 /* 3964 * Change channels and update the h/w rate map 3965 * if we're switching; e.g. 11a to 11b/g. 3966 */ 3967 mode = ieee80211_chan2mode(chan); 3968 if (mode != sc->sc_curmode) 3969 ath_setcurmode(sc, mode); 3970 sc->sc_curchan = chan; 3971} 3972 3973/* 3974 * Set/change channels. If the channel is really being changed, 3975 * it's done by resetting the chip. To accomplish this we must 3976 * first cleanup any pending DMA, then restart stuff after a la 3977 * ath_init. 3978 */ 3979static int 3980ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 3981{ 3982 struct ifnet *ifp = sc->sc_ifp; 3983 struct ieee80211com *ic = ifp->if_l2com; 3984 struct ath_hal *ah = sc->sc_ah; 3985 int ret = 0; 3986 3987 /* Treat this as an interface reset */ 3988 ATH_PCU_UNLOCK_ASSERT(sc); 3989 ATH_UNLOCK_ASSERT(sc); 3990 3991 /* (Try to) stop TX/RX from occuring */ 3992 taskqueue_block(sc->sc_tq); 3993 3994 ATH_PCU_LOCK(sc); 3995 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 3996 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 3997 if (ath_reset_grablock(sc, 1) == 0) { 3998 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 3999 __func__); 4000 } 4001 ATH_PCU_UNLOCK(sc); 4002 4003 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 4004 __func__, ieee80211_chan2ieee(ic, chan), 4005 chan->ic_freq, chan->ic_flags); 4006 if (chan != sc->sc_curchan) { 4007 HAL_STATUS status; 4008 /* 4009 * To switch channels clear any pending DMA operations; 4010 * wait long enough for the RX fifo to drain, reset the 4011 * hardware at the new frequency, and then re-enable 4012 * the relevant bits of the h/w. 4013 */ 4014#if 0 4015 ath_hal_intrset(ah, 0); /* disable interrupts */ 4016#endif 4017 ath_stoprecv(sc, 1); /* turn off frame recv */ 4018 /* 4019 * First, handle completed TX/RX frames. 4020 */ 4021 ath_rx_proc(sc, 0); 4022 ath_draintxq(sc, ATH_RESET_NOLOSS); 4023 /* 4024 * Next, flush the non-scheduled frames. 4025 */ 4026 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 4027 4028 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 4029 if_printf(ifp, "%s: unable to reset " 4030 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 4031 __func__, ieee80211_chan2ieee(ic, chan), 4032 chan->ic_freq, chan->ic_flags, status); 4033 ret = EIO; 4034 goto finish; 4035 } 4036 sc->sc_diversity = ath_hal_getdiversity(ah); 4037 4038 /* Let DFS at it in case it's a DFS channel */ 4039 ath_dfs_radar_enable(sc, chan); 4040 4041 /* 4042 * Re-enable rx framework. 4043 */ 4044 if (ath_startrecv(sc) != 0) { 4045 if_printf(ifp, "%s: unable to restart recv logic\n", 4046 __func__); 4047 ret = EIO; 4048 goto finish; 4049 } 4050 4051 /* 4052 * Change channels and update the h/w rate map 4053 * if we're switching; e.g. 11a to 11b/g. 4054 */ 4055 ath_chan_change(sc, chan); 4056 4057 /* 4058 * Reset clears the beacon timers; reset them 4059 * here if needed. 4060 */ 4061 if (sc->sc_beacons) { /* restart beacons */ 4062#ifdef IEEE80211_SUPPORT_TDMA 4063 if (sc->sc_tdma) 4064 ath_tdma_config(sc, NULL); 4065 else 4066#endif 4067 ath_beacon_config(sc, NULL); 4068 } 4069 4070 /* 4071 * Re-enable interrupts. 4072 */ 4073#if 0 4074 ath_hal_intrset(ah, sc->sc_imask); 4075#endif 4076 } 4077 4078finish: 4079 ATH_PCU_LOCK(sc); 4080 sc->sc_inreset_cnt--; 4081 /* XXX only do this if sc_inreset_cnt == 0? */ 4082 ath_hal_intrset(ah, sc->sc_imask); 4083 ATH_PCU_UNLOCK(sc); 4084 4085 IF_LOCK(&ifp->if_snd); 4086 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4087 IF_UNLOCK(&ifp->if_snd); 4088 ath_txrx_start(sc); 4089 /* XXX ath_start? */ 4090 4091 return ret; 4092} 4093 4094/* 4095 * Periodically recalibrate the PHY to account 4096 * for temperature/environment changes. 4097 */ 4098static void 4099ath_calibrate(void *arg) 4100{ 4101 struct ath_softc *sc = arg; 4102 struct ath_hal *ah = sc->sc_ah; 4103 struct ifnet *ifp = sc->sc_ifp; 4104 struct ieee80211com *ic = ifp->if_l2com; 4105 HAL_BOOL longCal, isCalDone; 4106 HAL_BOOL aniCal, shortCal = AH_FALSE; 4107 int nextcal; 4108 4109 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 4110 goto restart; 4111 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 4112 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 4113 if (sc->sc_doresetcal) 4114 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 4115 4116 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 4117 if (aniCal) { 4118 sc->sc_stats.ast_ani_cal++; 4119 sc->sc_lastani = ticks; 4120 ath_hal_ani_poll(ah, sc->sc_curchan); 4121 } 4122 4123 if (longCal) { 4124 sc->sc_stats.ast_per_cal++; 4125 sc->sc_lastlongcal = ticks; 4126 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4127 /* 4128 * Rfgain is out of bounds, reset the chip 4129 * to load new gain values. 4130 */ 4131 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4132 "%s: rfgain change\n", __func__); 4133 sc->sc_stats.ast_per_rfgain++; 4134 sc->sc_resetcal = 0; 4135 sc->sc_doresetcal = AH_TRUE; 4136 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4137 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4138 return; 4139 } 4140 /* 4141 * If this long cal is after an idle period, then 4142 * reset the data collection state so we start fresh. 4143 */ 4144 if (sc->sc_resetcal) { 4145 (void) ath_hal_calreset(ah, sc->sc_curchan); 4146 sc->sc_lastcalreset = ticks; 4147 sc->sc_lastshortcal = ticks; 4148 sc->sc_resetcal = 0; 4149 sc->sc_doresetcal = AH_TRUE; 4150 } 4151 } 4152 4153 /* Only call if we're doing a short/long cal, not for ANI calibration */ 4154 if (shortCal || longCal) { 4155 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 4156 if (longCal) { 4157 /* 4158 * Calibrate noise floor data again in case of change. 4159 */ 4160 ath_hal_process_noisefloor(ah); 4161 } 4162 } else { 4163 DPRINTF(sc, ATH_DEBUG_ANY, 4164 "%s: calibration of channel %u failed\n", 4165 __func__, sc->sc_curchan->ic_freq); 4166 sc->sc_stats.ast_per_calfail++; 4167 } 4168 if (shortCal) 4169 sc->sc_lastshortcal = ticks; 4170 } 4171 if (!isCalDone) { 4172restart: 4173 /* 4174 * Use a shorter interval to potentially collect multiple 4175 * data samples required to complete calibration. Once 4176 * we're told the work is done we drop back to a longer 4177 * interval between requests. We're more aggressive doing 4178 * work when operating as an AP to improve operation right 4179 * after startup. 4180 */ 4181 sc->sc_lastshortcal = ticks; 4182 nextcal = ath_shortcalinterval*hz/1000; 4183 if (sc->sc_opmode != HAL_M_HOSTAP) 4184 nextcal *= 10; 4185 sc->sc_doresetcal = AH_TRUE; 4186 } else { 4187 /* nextcal should be the shortest time for next event */ 4188 nextcal = ath_longcalinterval*hz; 4189 if (sc->sc_lastcalreset == 0) 4190 sc->sc_lastcalreset = sc->sc_lastlongcal; 4191 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 4192 sc->sc_resetcal = 1; /* setup reset next trip */ 4193 sc->sc_doresetcal = AH_FALSE; 4194 } 4195 /* ANI calibration may occur more often than short/long/resetcal */ 4196 if (ath_anicalinterval > 0) 4197 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 4198 4199 if (nextcal != 0) { 4200 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 4201 __func__, nextcal, isCalDone ? "" : "!"); 4202 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 4203 } else { 4204 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 4205 __func__); 4206 /* NB: don't rearm timer */ 4207 } 4208} 4209 4210static void 4211ath_scan_start(struct ieee80211com *ic) 4212{ 4213 struct ifnet *ifp = ic->ic_ifp; 4214 struct ath_softc *sc = ifp->if_softc; 4215 struct ath_hal *ah = sc->sc_ah; 4216 u_int32_t rfilt; 4217 4218 /* XXX calibration timer? */ 4219 4220 ATH_LOCK(sc); 4221 sc->sc_scanning = 1; 4222 sc->sc_syncbeacon = 0; 4223 rfilt = ath_calcrxfilter(sc); 4224 ATH_UNLOCK(sc); 4225 4226 ATH_PCU_LOCK(sc); 4227 ath_hal_setrxfilter(ah, rfilt); 4228 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 4229 ATH_PCU_UNLOCK(sc); 4230 4231 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 4232 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 4233} 4234 4235static void 4236ath_scan_end(struct ieee80211com *ic) 4237{ 4238 struct ifnet *ifp = ic->ic_ifp; 4239 struct ath_softc *sc = ifp->if_softc; 4240 struct ath_hal *ah = sc->sc_ah; 4241 u_int32_t rfilt; 4242 4243 ATH_LOCK(sc); 4244 sc->sc_scanning = 0; 4245 rfilt = ath_calcrxfilter(sc); 4246 ATH_UNLOCK(sc); 4247 4248 ATH_PCU_LOCK(sc); 4249 ath_hal_setrxfilter(ah, rfilt); 4250 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4251 4252 ath_hal_process_noisefloor(ah); 4253 ATH_PCU_UNLOCK(sc); 4254 4255 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4256 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 4257 sc->sc_curaid); 4258} 4259 4260#ifdef ATH_ENABLE_11N 4261/* 4262 * For now, just do a channel change. 4263 * 4264 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 4265 * control state and resetting the hardware without dropping frames out 4266 * of the queue. 4267 * 4268 * The unfortunate trouble here is making absolutely sure that the 4269 * channel width change has propagated enough so the hardware 4270 * absolutely isn't handed bogus frames for it's current operating 4271 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 4272 * does occur in parallel, we need to make certain we've blocked 4273 * any further ongoing TX (and RX, that can cause raw TX) 4274 * before we do this. 4275 */ 4276static void 4277ath_update_chw(struct ieee80211com *ic) 4278{ 4279 struct ifnet *ifp = ic->ic_ifp; 4280 struct ath_softc *sc = ifp->if_softc; 4281 4282 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 4283 ath_set_channel(ic); 4284} 4285#endif /* ATH_ENABLE_11N */ 4286 4287static void 4288ath_set_channel(struct ieee80211com *ic) 4289{ 4290 struct ifnet *ifp = ic->ic_ifp; 4291 struct ath_softc *sc = ifp->if_softc; 4292 4293 (void) ath_chan_set(sc, ic->ic_curchan); 4294 /* 4295 * If we are returning to our bss channel then mark state 4296 * so the next recv'd beacon's tsf will be used to sync the 4297 * beacon timers. Note that since we only hear beacons in 4298 * sta/ibss mode this has no effect in other operating modes. 4299 */ 4300 ATH_LOCK(sc); 4301 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 4302 sc->sc_syncbeacon = 1; 4303 ATH_UNLOCK(sc); 4304} 4305 4306/* 4307 * Walk the vap list and check if there any vap's in RUN state. 4308 */ 4309static int 4310ath_isanyrunningvaps(struct ieee80211vap *this) 4311{ 4312 struct ieee80211com *ic = this->iv_ic; 4313 struct ieee80211vap *vap; 4314 4315 IEEE80211_LOCK_ASSERT(ic); 4316 4317 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 4318 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 4319 return 1; 4320 } 4321 return 0; 4322} 4323 4324static int 4325ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4326{ 4327 struct ieee80211com *ic = vap->iv_ic; 4328 struct ath_softc *sc = ic->ic_ifp->if_softc; 4329 struct ath_vap *avp = ATH_VAP(vap); 4330 struct ath_hal *ah = sc->sc_ah; 4331 struct ieee80211_node *ni = NULL; 4332 int i, error, stamode; 4333 u_int32_t rfilt; 4334 int csa_run_transition = 0; 4335 static const HAL_LED_STATE leds[] = { 4336 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4337 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4338 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4339 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4340 HAL_LED_RUN, /* IEEE80211_S_CAC */ 4341 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4342 HAL_LED_RUN, /* IEEE80211_S_CSA */ 4343 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 4344 }; 4345 4346 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4347 ieee80211_state_name[vap->iv_state], 4348 ieee80211_state_name[nstate]); 4349 4350 /* 4351 * net80211 _should_ have the comlock asserted at this point. 4352 * There are some comments around the calls to vap->iv_newstate 4353 * which indicate that it (newstate) may end up dropping the 4354 * lock. This and the subsequent lock assert check after newstate 4355 * are an attempt to catch these and figure out how/why. 4356 */ 4357 IEEE80211_LOCK_ASSERT(ic); 4358 4359 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 4360 csa_run_transition = 1; 4361 4362 callout_drain(&sc->sc_cal_ch); 4363 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4364 4365 if (nstate == IEEE80211_S_SCAN) { 4366 /* 4367 * Scanning: turn off beacon miss and don't beacon. 4368 * Mark beacon state so when we reach RUN state we'll 4369 * [re]setup beacons. Unblock the task q thread so 4370 * deferred interrupt processing is done. 4371 */ 4372 ath_hal_intrset(ah, 4373 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4374 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4375 sc->sc_beacons = 0; 4376 taskqueue_unblock(sc->sc_tq); 4377 } 4378 4379 ni = ieee80211_ref_node(vap->iv_bss); 4380 rfilt = ath_calcrxfilter(sc); 4381 stamode = (vap->iv_opmode == IEEE80211_M_STA || 4382 vap->iv_opmode == IEEE80211_M_AHDEMO || 4383 vap->iv_opmode == IEEE80211_M_IBSS); 4384 if (stamode && nstate == IEEE80211_S_RUN) { 4385 sc->sc_curaid = ni->ni_associd; 4386 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 4387 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4388 } 4389 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4390 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 4391 ath_hal_setrxfilter(ah, rfilt); 4392 4393 /* XXX is this to restore keycache on resume? */ 4394 if (vap->iv_opmode != IEEE80211_M_STA && 4395 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 4396 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4397 if (ath_hal_keyisvalid(ah, i)) 4398 ath_hal_keysetmac(ah, i, ni->ni_bssid); 4399 } 4400 4401 /* 4402 * Invoke the parent method to do net80211 work. 4403 */ 4404 error = avp->av_newstate(vap, nstate, arg); 4405 if (error != 0) 4406 goto bad; 4407 4408 /* 4409 * See above: ensure av_newstate() doesn't drop the lock 4410 * on us. 4411 */ 4412 IEEE80211_LOCK_ASSERT(ic); 4413 4414 if (nstate == IEEE80211_S_RUN) { 4415 /* NB: collect bss node again, it may have changed */ 4416 ieee80211_free_node(ni); 4417 ni = ieee80211_ref_node(vap->iv_bss); 4418 4419 DPRINTF(sc, ATH_DEBUG_STATE, 4420 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 4421 "capinfo 0x%04x chan %d\n", __func__, 4422 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 4423 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 4424 4425 switch (vap->iv_opmode) { 4426#ifdef IEEE80211_SUPPORT_TDMA 4427 case IEEE80211_M_AHDEMO: 4428 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 4429 break; 4430 /* fall thru... */ 4431#endif 4432 case IEEE80211_M_HOSTAP: 4433 case IEEE80211_M_IBSS: 4434 case IEEE80211_M_MBSS: 4435 /* 4436 * Allocate and setup the beacon frame. 4437 * 4438 * Stop any previous beacon DMA. This may be 4439 * necessary, for example, when an ibss merge 4440 * causes reconfiguration; there will be a state 4441 * transition from RUN->RUN that means we may 4442 * be called with beacon transmission active. 4443 */ 4444 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4445 4446 error = ath_beacon_alloc(sc, ni); 4447 if (error != 0) 4448 goto bad; 4449 /* 4450 * If joining an adhoc network defer beacon timer 4451 * configuration to the next beacon frame so we 4452 * have a current TSF to use. Otherwise we're 4453 * starting an ibss/bss so there's no need to delay; 4454 * if this is the first vap moving to RUN state, then 4455 * beacon state needs to be [re]configured. 4456 */ 4457 if (vap->iv_opmode == IEEE80211_M_IBSS && 4458 ni->ni_tstamp.tsf != 0) { 4459 sc->sc_syncbeacon = 1; 4460 } else if (!sc->sc_beacons) { 4461#ifdef IEEE80211_SUPPORT_TDMA 4462 if (vap->iv_caps & IEEE80211_C_TDMA) 4463 ath_tdma_config(sc, vap); 4464 else 4465#endif 4466 ath_beacon_config(sc, vap); 4467 sc->sc_beacons = 1; 4468 } 4469 break; 4470 case IEEE80211_M_STA: 4471 /* 4472 * Defer beacon timer configuration to the next 4473 * beacon frame so we have a current TSF to use 4474 * (any TSF collected when scanning is likely old). 4475 * However if it's due to a CSA -> RUN transition, 4476 * force a beacon update so we pick up a lack of 4477 * beacons from an AP in CAC and thus force a 4478 * scan. 4479 */ 4480 sc->sc_syncbeacon = 1; 4481 if (csa_run_transition) 4482 ath_beacon_config(sc, vap); 4483 break; 4484 case IEEE80211_M_MONITOR: 4485 /* 4486 * Monitor mode vaps have only INIT->RUN and RUN->RUN 4487 * transitions so we must re-enable interrupts here to 4488 * handle the case of a single monitor mode vap. 4489 */ 4490 ath_hal_intrset(ah, sc->sc_imask); 4491 break; 4492 case IEEE80211_M_WDS: 4493 break; 4494 default: 4495 break; 4496 } 4497 /* 4498 * Let the hal process statistics collected during a 4499 * scan so it can provide calibrated noise floor data. 4500 */ 4501 ath_hal_process_noisefloor(ah); 4502 /* 4503 * Reset rssi stats; maybe not the best place... 4504 */ 4505 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 4506 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 4507 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 4508 /* 4509 * Finally, start any timers and the task q thread 4510 * (in case we didn't go through SCAN state). 4511 */ 4512 if (ath_longcalinterval != 0) { 4513 /* start periodic recalibration timer */ 4514 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4515 } else { 4516 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4517 "%s: calibration disabled\n", __func__); 4518 } 4519 taskqueue_unblock(sc->sc_tq); 4520 } else if (nstate == IEEE80211_S_INIT) { 4521 /* 4522 * If there are no vaps left in RUN state then 4523 * shutdown host/driver operation: 4524 * o disable interrupts 4525 * o disable the task queue thread 4526 * o mark beacon processing as stopped 4527 */ 4528 if (!ath_isanyrunningvaps(vap)) { 4529 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4530 /* disable interrupts */ 4531 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 4532 taskqueue_block(sc->sc_tq); 4533 sc->sc_beacons = 0; 4534 } 4535#ifdef IEEE80211_SUPPORT_TDMA 4536 ath_hal_setcca(ah, AH_TRUE); 4537#endif 4538 } 4539bad: 4540 ieee80211_free_node(ni); 4541 return error; 4542} 4543 4544/* 4545 * Allocate a key cache slot to the station so we can 4546 * setup a mapping from key index to node. The key cache 4547 * slot is needed for managing antenna state and for 4548 * compression when stations do not use crypto. We do 4549 * it uniliaterally here; if crypto is employed this slot 4550 * will be reassigned. 4551 */ 4552static void 4553ath_setup_stationkey(struct ieee80211_node *ni) 4554{ 4555 struct ieee80211vap *vap = ni->ni_vap; 4556 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4557 ieee80211_keyix keyix, rxkeyix; 4558 4559 /* XXX should take a locked ref to vap->iv_bss */ 4560 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 4561 /* 4562 * Key cache is full; we'll fall back to doing 4563 * the more expensive lookup in software. Note 4564 * this also means no h/w compression. 4565 */ 4566 /* XXX msg+statistic */ 4567 } else { 4568 /* XXX locking? */ 4569 ni->ni_ucastkey.wk_keyix = keyix; 4570 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 4571 /* NB: must mark device key to get called back on delete */ 4572 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 4573 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 4574 /* NB: this will create a pass-thru key entry */ 4575 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 4576 } 4577} 4578 4579/* 4580 * Setup driver-specific state for a newly associated node. 4581 * Note that we're called also on a re-associate, the isnew 4582 * param tells us if this is the first time or not. 4583 */ 4584static void 4585ath_newassoc(struct ieee80211_node *ni, int isnew) 4586{ 4587 struct ath_node *an = ATH_NODE(ni); 4588 struct ieee80211vap *vap = ni->ni_vap; 4589 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4590 const struct ieee80211_txparam *tp = ni->ni_txparms; 4591 4592 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 4593 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 4594 4595 ath_rate_newassoc(sc, an, isnew); 4596 if (isnew && 4597 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 4598 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 4599 ath_setup_stationkey(ni); 4600} 4601 4602static int 4603ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 4604 int nchans, struct ieee80211_channel chans[]) 4605{ 4606 struct ath_softc *sc = ic->ic_ifp->if_softc; 4607 struct ath_hal *ah = sc->sc_ah; 4608 HAL_STATUS status; 4609 4610 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4611 "%s: rd %u cc %u location %c%s\n", 4612 __func__, reg->regdomain, reg->country, reg->location, 4613 reg->ecm ? " ecm" : ""); 4614 4615 status = ath_hal_set_channels(ah, chans, nchans, 4616 reg->country, reg->regdomain); 4617 if (status != HAL_OK) { 4618 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 4619 __func__, status); 4620 return EINVAL; /* XXX */ 4621 } 4622 4623 return 0; 4624} 4625 4626static void 4627ath_getradiocaps(struct ieee80211com *ic, 4628 int maxchans, int *nchans, struct ieee80211_channel chans[]) 4629{ 4630 struct ath_softc *sc = ic->ic_ifp->if_softc; 4631 struct ath_hal *ah = sc->sc_ah; 4632 4633 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 4634 __func__, SKU_DEBUG, CTRY_DEFAULT); 4635 4636 /* XXX check return */ 4637 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 4638 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 4639 4640} 4641 4642static int 4643ath_getchannels(struct ath_softc *sc) 4644{ 4645 struct ifnet *ifp = sc->sc_ifp; 4646 struct ieee80211com *ic = ifp->if_l2com; 4647 struct ath_hal *ah = sc->sc_ah; 4648 HAL_STATUS status; 4649 4650 /* 4651 * Collect channel set based on EEPROM contents. 4652 */ 4653 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 4654 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 4655 if (status != HAL_OK) { 4656 if_printf(ifp, "%s: unable to collect channel list from hal, " 4657 "status %d\n", __func__, status); 4658 return EINVAL; 4659 } 4660 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 4661 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 4662 /* XXX map Atheros sku's to net80211 SKU's */ 4663 /* XXX net80211 types too small */ 4664 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 4665 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 4666 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 4667 ic->ic_regdomain.isocc[1] = ' '; 4668 4669 ic->ic_regdomain.ecm = 1; 4670 ic->ic_regdomain.location = 'I'; 4671 4672 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4673 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 4674 __func__, sc->sc_eerd, sc->sc_eecc, 4675 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 4676 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 4677 return 0; 4678} 4679 4680static int 4681ath_rate_setup(struct ath_softc *sc, u_int mode) 4682{ 4683 struct ath_hal *ah = sc->sc_ah; 4684 const HAL_RATE_TABLE *rt; 4685 4686 switch (mode) { 4687 case IEEE80211_MODE_11A: 4688 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 4689 break; 4690 case IEEE80211_MODE_HALF: 4691 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 4692 break; 4693 case IEEE80211_MODE_QUARTER: 4694 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 4695 break; 4696 case IEEE80211_MODE_11B: 4697 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 4698 break; 4699 case IEEE80211_MODE_11G: 4700 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 4701 break; 4702 case IEEE80211_MODE_TURBO_A: 4703 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 4704 break; 4705 case IEEE80211_MODE_TURBO_G: 4706 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 4707 break; 4708 case IEEE80211_MODE_STURBO_A: 4709 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4710 break; 4711 case IEEE80211_MODE_11NA: 4712 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 4713 break; 4714 case IEEE80211_MODE_11NG: 4715 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 4716 break; 4717 default: 4718 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4719 __func__, mode); 4720 return 0; 4721 } 4722 sc->sc_rates[mode] = rt; 4723 return (rt != NULL); 4724} 4725 4726static void 4727ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4728{ 4729#define N(a) (sizeof(a)/sizeof(a[0])) 4730 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 4731 static const struct { 4732 u_int rate; /* tx/rx 802.11 rate */ 4733 u_int16_t timeOn; /* LED on time (ms) */ 4734 u_int16_t timeOff; /* LED off time (ms) */ 4735 } blinkrates[] = { 4736 { 108, 40, 10 }, 4737 { 96, 44, 11 }, 4738 { 72, 50, 13 }, 4739 { 48, 57, 14 }, 4740 { 36, 67, 16 }, 4741 { 24, 80, 20 }, 4742 { 22, 100, 25 }, 4743 { 18, 133, 34 }, 4744 { 12, 160, 40 }, 4745 { 10, 200, 50 }, 4746 { 6, 240, 58 }, 4747 { 4, 267, 66 }, 4748 { 2, 400, 100 }, 4749 { 0, 500, 130 }, 4750 /* XXX half/quarter rates */ 4751 }; 4752 const HAL_RATE_TABLE *rt; 4753 int i, j; 4754 4755 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 4756 rt = sc->sc_rates[mode]; 4757 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 4758 for (i = 0; i < rt->rateCount; i++) { 4759 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 4760 if (rt->info[i].phy != IEEE80211_T_HT) 4761 sc->sc_rixmap[ieeerate] = i; 4762 else 4763 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 4764 } 4765 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 4766 for (i = 0; i < N(sc->sc_hwmap); i++) { 4767 if (i >= rt->rateCount) { 4768 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 4769 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 4770 continue; 4771 } 4772 sc->sc_hwmap[i].ieeerate = 4773 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 4774 if (rt->info[i].phy == IEEE80211_T_HT) 4775 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 4776 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 4777 if (rt->info[i].shortPreamble || 4778 rt->info[i].phy == IEEE80211_T_OFDM) 4779 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 4780 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 4781 for (j = 0; j < N(blinkrates)-1; j++) 4782 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 4783 break; 4784 /* NB: this uses the last entry if the rate isn't found */ 4785 /* XXX beware of overlow */ 4786 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 4787 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 4788 } 4789 sc->sc_currates = rt; 4790 sc->sc_curmode = mode; 4791 /* 4792 * All protection frames are transmited at 2Mb/s for 4793 * 11g, otherwise at 1Mb/s. 4794 */ 4795 if (mode == IEEE80211_MODE_11G) 4796 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 4797 else 4798 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 4799 /* NB: caller is responsible for resetting rate control state */ 4800#undef N 4801} 4802 4803static void 4804ath_watchdog(void *arg) 4805{ 4806 struct ath_softc *sc = arg; 4807 int do_reset = 0; 4808 4809 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 4810 struct ifnet *ifp = sc->sc_ifp; 4811 uint32_t hangs; 4812 4813 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 4814 hangs != 0) { 4815 if_printf(ifp, "%s hang detected (0x%x)\n", 4816 hangs & 0xff ? "bb" : "mac", hangs); 4817 } else 4818 if_printf(ifp, "device timeout\n"); 4819 do_reset = 1; 4820 ifp->if_oerrors++; 4821 sc->sc_stats.ast_watchdog++; 4822 } 4823 4824 /* 4825 * We can't hold the lock across the ath_reset() call. 4826 * 4827 * And since this routine can't hold a lock and sleep, 4828 * do the reset deferred. 4829 */ 4830 if (do_reset) { 4831 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4832 } 4833 4834 callout_schedule(&sc->sc_wd_ch, hz); 4835} 4836 4837#ifdef ATH_DIAGAPI 4838/* 4839 * Diagnostic interface to the HAL. This is used by various 4840 * tools to do things like retrieve register contents for 4841 * debugging. The mechanism is intentionally opaque so that 4842 * it can change frequently w/o concern for compatiblity. 4843 */ 4844static int 4845ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 4846{ 4847 struct ath_hal *ah = sc->sc_ah; 4848 u_int id = ad->ad_id & ATH_DIAG_ID; 4849 void *indata = NULL; 4850 void *outdata = NULL; 4851 u_int32_t insize = ad->ad_in_size; 4852 u_int32_t outsize = ad->ad_out_size; 4853 int error = 0; 4854 4855 if (ad->ad_id & ATH_DIAG_IN) { 4856 /* 4857 * Copy in data. 4858 */ 4859 indata = malloc(insize, M_TEMP, M_NOWAIT); 4860 if (indata == NULL) { 4861 error = ENOMEM; 4862 goto bad; 4863 } 4864 error = copyin(ad->ad_in_data, indata, insize); 4865 if (error) 4866 goto bad; 4867 } 4868 if (ad->ad_id & ATH_DIAG_DYN) { 4869 /* 4870 * Allocate a buffer for the results (otherwise the HAL 4871 * returns a pointer to a buffer where we can read the 4872 * results). Note that we depend on the HAL leaving this 4873 * pointer for us to use below in reclaiming the buffer; 4874 * may want to be more defensive. 4875 */ 4876 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 4877 if (outdata == NULL) { 4878 error = ENOMEM; 4879 goto bad; 4880 } 4881 } 4882 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 4883 if (outsize < ad->ad_out_size) 4884 ad->ad_out_size = outsize; 4885 if (outdata != NULL) 4886 error = copyout(outdata, ad->ad_out_data, 4887 ad->ad_out_size); 4888 } else { 4889 error = EINVAL; 4890 } 4891bad: 4892 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 4893 free(indata, M_TEMP); 4894 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 4895 free(outdata, M_TEMP); 4896 return error; 4897} 4898#endif /* ATH_DIAGAPI */ 4899 4900static int 4901ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4902{ 4903#define IS_RUNNING(ifp) \ 4904 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 4905 struct ath_softc *sc = ifp->if_softc; 4906 struct ieee80211com *ic = ifp->if_l2com; 4907 struct ifreq *ifr = (struct ifreq *)data; 4908 const HAL_RATE_TABLE *rt; 4909 int error = 0; 4910 4911 switch (cmd) { 4912 case SIOCSIFFLAGS: 4913 ATH_LOCK(sc); 4914 if (IS_RUNNING(ifp)) { 4915 /* 4916 * To avoid rescanning another access point, 4917 * do not call ath_init() here. Instead, 4918 * only reflect promisc mode settings. 4919 */ 4920 ath_mode_init(sc); 4921 } else if (ifp->if_flags & IFF_UP) { 4922 /* 4923 * Beware of being called during attach/detach 4924 * to reset promiscuous mode. In that case we 4925 * will still be marked UP but not RUNNING. 4926 * However trying to re-init the interface 4927 * is the wrong thing to do as we've already 4928 * torn down much of our state. There's 4929 * probably a better way to deal with this. 4930 */ 4931 if (!sc->sc_invalid) 4932 ath_init(sc); /* XXX lose error */ 4933 } else { 4934 ath_stop_locked(ifp); 4935#ifdef notyet 4936 /* XXX must wakeup in places like ath_vap_delete */ 4937 if (!sc->sc_invalid) 4938 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 4939#endif 4940 } 4941 ATH_UNLOCK(sc); 4942 break; 4943 case SIOCGIFMEDIA: 4944 case SIOCSIFMEDIA: 4945 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4946 break; 4947 case SIOCGATHSTATS: 4948 /* NB: embed these numbers to get a consistent view */ 4949 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 4950 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 4951 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 4952 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 4953#ifdef IEEE80211_SUPPORT_TDMA 4954 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 4955 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 4956#endif 4957 rt = sc->sc_currates; 4958 sc->sc_stats.ast_tx_rate = 4959 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 4960 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 4961 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 4962 return copyout(&sc->sc_stats, 4963 ifr->ifr_data, sizeof (sc->sc_stats)); 4964 case SIOCGATHAGSTATS: 4965 return copyout(&sc->sc_aggr_stats, 4966 ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 4967 case SIOCZATHSTATS: 4968 error = priv_check(curthread, PRIV_DRIVER); 4969 if (error == 0) { 4970 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 4971 memset(&sc->sc_aggr_stats, 0, 4972 sizeof(sc->sc_aggr_stats)); 4973 memset(&sc->sc_intr_stats, 0, 4974 sizeof(sc->sc_intr_stats)); 4975 } 4976 break; 4977#ifdef ATH_DIAGAPI 4978 case SIOCGATHDIAG: 4979 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 4980 break; 4981 case SIOCGATHPHYERR: 4982 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 4983 break; 4984#endif 4985 case SIOCGIFADDR: 4986 error = ether_ioctl(ifp, cmd, data); 4987 break; 4988 default: 4989 error = EINVAL; 4990 break; 4991 } 4992 return error; 4993#undef IS_RUNNING 4994} 4995 4996/* 4997 * Announce various information on device/driver attach. 4998 */ 4999static void 5000ath_announce(struct ath_softc *sc) 5001{ 5002 struct ifnet *ifp = sc->sc_ifp; 5003 struct ath_hal *ah = sc->sc_ah; 5004 5005 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 5006 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 5007 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 5008 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 5009 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 5010 if (bootverbose) { 5011 int i; 5012 for (i = 0; i <= WME_AC_VO; i++) { 5013 struct ath_txq *txq = sc->sc_ac2q[i]; 5014 if_printf(ifp, "Use hw queue %u for %s traffic\n", 5015 txq->axq_qnum, ieee80211_wme_acnames[i]); 5016 } 5017 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5018 sc->sc_cabq->axq_qnum); 5019 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5020 } 5021 if (ath_rxbuf != ATH_RXBUF) 5022 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5023 if (ath_txbuf != ATH_TXBUF) 5024 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 5025 if (sc->sc_mcastkey && bootverbose) 5026 if_printf(ifp, "using multicast key search\n"); 5027} 5028 5029static void 5030ath_dfs_tasklet(void *p, int npending) 5031{ 5032 struct ath_softc *sc = (struct ath_softc *) p; 5033 struct ifnet *ifp = sc->sc_ifp; 5034 struct ieee80211com *ic = ifp->if_l2com; 5035 5036 /* 5037 * If previous processing has found a radar event, 5038 * signal this to the net80211 layer to begin DFS 5039 * processing. 5040 */ 5041 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 5042 /* DFS event found, initiate channel change */ 5043 /* 5044 * XXX doesn't currently tell us whether the event 5045 * XXX was found in the primary or extension 5046 * XXX channel! 5047 */ 5048 IEEE80211_LOCK(ic); 5049 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 5050 IEEE80211_UNLOCK(ic); 5051 } 5052} 5053 5054MODULE_VERSION(if_ath, 1); 5055MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 5056#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 5057MODULE_DEPEND(if_ath, alq, 1, 1, 1); 5058#endif
|