if_ath.c revision 283537
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 283537 2015-05-25 18:50:26Z glebius $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49#include "opt_ah.h" 50#include "opt_wlan.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysctl.h> 55#include <sys/mbuf.h> 56#include <sys/malloc.h> 57#include <sys/lock.h> 58#include <sys/mutex.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/errno.h> 63#include <sys/callout.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kthread.h> 67#include <sys/taskqueue.h> 68#include <sys/priv.h> 69#include <sys/module.h> 70#include <sys/ktr.h> 71#include <sys/smp.h> /* for mp_ncpus */ 72 73#include <machine/bus.h> 74 75#include <net/if.h> 76#include <net/if_var.h> 77#include <net/if_dl.h> 78#include <net/if_media.h> 79#include <net/if_types.h> 80#include <net/if_arp.h> 81#include <net/ethernet.h> 82#include <net/if_llc.h> 83 84#include <net80211/ieee80211_var.h> 85#include <net80211/ieee80211_regdomain.h> 86#ifdef IEEE80211_SUPPORT_SUPERG 87#include <net80211/ieee80211_superg.h> 88#endif 89#ifdef IEEE80211_SUPPORT_TDMA 90#include <net80211/ieee80211_tdma.h> 91#endif 92 93#include <net/bpf.h> 94 95#ifdef INET 96#include <netinet/in.h> 97#include <netinet/if_ether.h> 98#endif 99 100#include <dev/ath/if_athvar.h> 101#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102#include <dev/ath/ath_hal/ah_diagcodes.h> 103 104#include <dev/ath/if_ath_debug.h> 105#include <dev/ath/if_ath_misc.h> 106#include <dev/ath/if_ath_tsf.h> 107#include <dev/ath/if_ath_tx.h> 108#include <dev/ath/if_ath_sysctl.h> 109#include <dev/ath/if_ath_led.h> 110#include <dev/ath/if_ath_keycache.h> 111#include <dev/ath/if_ath_rx.h> 112#include <dev/ath/if_ath_rx_edma.h> 113#include <dev/ath/if_ath_tx_edma.h> 114#include <dev/ath/if_ath_beacon.h> 115#include <dev/ath/if_ath_btcoex.h> 116#include <dev/ath/if_ath_spectral.h> 117#include <dev/ath/if_ath_lna_div.h> 118#include <dev/ath/if_athdfs.h> 119 120#ifdef ATH_TX99_DIAG 121#include <dev/ath/ath_tx99/ath_tx99.h> 122#endif 123 124#ifdef ATH_DEBUG_ALQ 125#include <dev/ath/if_ath_alq.h> 126#endif 127 128/* 129 * Only enable this if you're working on PS-POLL support. 130 */ 131#define ATH_SW_PSQ 132 133/* 134 * ATH_BCBUF determines the number of vap's that can transmit 135 * beacons and also (currently) the number of vap's that can 136 * have unique mac addresses/bssid. When staggering beacons 137 * 4 is probably a good max as otherwise the beacons become 138 * very closely spaced and there is limited time for cab q traffic 139 * to go out. You can burst beacons instead but that is not good 140 * for stations in power save and at some point you really want 141 * another radio (and channel). 142 * 143 * The limit on the number of mac addresses is tied to our use of 144 * the U/L bit and tracking addresses in a byte; it would be 145 * worthwhile to allow more for applications like proxy sta. 146 */ 147CTASSERT(ATH_BCBUF <= 8); 148 149static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 150 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 151 const uint8_t [IEEE80211_ADDR_LEN], 152 const uint8_t [IEEE80211_ADDR_LEN]); 153static void ath_vap_delete(struct ieee80211vap *); 154static void ath_init(void *); 155static void ath_stop_locked(struct ifnet *); 156static void ath_stop(struct ifnet *); 157static int ath_reset_vap(struct ieee80211vap *, u_long); 158static int ath_transmit(struct ifnet *ifp, struct mbuf *m); 159static void ath_qflush(struct ifnet *ifp); 160static int ath_media_change(struct ifnet *); 161static void ath_watchdog(void *); 162static int ath_ioctl(struct ifnet *, u_long, caddr_t); 163static void ath_fatal_proc(void *, int); 164static void ath_bmiss_vap(struct ieee80211vap *); 165static void ath_bmiss_proc(void *, int); 166static void ath_key_update_begin(struct ieee80211vap *); 167static void ath_key_update_end(struct ieee80211vap *); 168static void ath_update_mcast_hw(struct ath_softc *); 169static void ath_update_mcast(struct ifnet *); 170static void ath_update_promisc(struct ifnet *); 171static void ath_updateslot(struct ifnet *); 172static void ath_bstuck_proc(void *, int); 173static void ath_reset_proc(void *, int); 174static int ath_desc_alloc(struct ath_softc *); 175static void ath_desc_free(struct ath_softc *); 176static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 177 const uint8_t [IEEE80211_ADDR_LEN]); 178static void ath_node_cleanup(struct ieee80211_node *); 179static void ath_node_free(struct ieee80211_node *); 180static void ath_node_getsignal(const struct ieee80211_node *, 181 int8_t *, int8_t *); 182static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 183static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 184static int ath_tx_setup(struct ath_softc *, int, int); 185static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 186static void ath_tx_cleanup(struct ath_softc *); 187static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, 188 int dosched); 189static void ath_tx_proc_q0(void *, int); 190static void ath_tx_proc_q0123(void *, int); 191static void ath_tx_proc(void *, int); 192static void ath_txq_sched_tasklet(void *, int); 193static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 194static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 195static void ath_scan_start(struct ieee80211com *); 196static void ath_scan_end(struct ieee80211com *); 197static void ath_set_channel(struct ieee80211com *); 198#ifdef ATH_ENABLE_11N 199static void ath_update_chw(struct ieee80211com *); 200#endif /* ATH_ENABLE_11N */ 201static void ath_calibrate(void *); 202static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 203static void ath_setup_stationkey(struct ieee80211_node *); 204static void ath_newassoc(struct ieee80211_node *, int); 205static int ath_setregdomain(struct ieee80211com *, 206 struct ieee80211_regdomain *, int, 207 struct ieee80211_channel []); 208static void ath_getradiocaps(struct ieee80211com *, int, int *, 209 struct ieee80211_channel []); 210static int ath_getchannels(struct ath_softc *); 211 212static int ath_rate_setup(struct ath_softc *, u_int mode); 213static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 214 215static void ath_announce(struct ath_softc *); 216 217static void ath_dfs_tasklet(void *, int); 218static void ath_node_powersave(struct ieee80211_node *, int); 219static int ath_node_set_tim(struct ieee80211_node *, int); 220static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); 221 222#ifdef IEEE80211_SUPPORT_TDMA 223#include <dev/ath/if_ath_tdma.h> 224#endif 225 226SYSCTL_DECL(_hw_ath); 227 228/* XXX validate sysctl values */ 229static int ath_longcalinterval = 30; /* long cals every 30 secs */ 230SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 231 0, "long chip calibration interval (secs)"); 232static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 233SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 234 0, "short chip calibration interval (msecs)"); 235static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 236SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 237 0, "reset chip calibration results (secs)"); 238static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 239SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 240 0, "ANI calibration (msecs)"); 241 242int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 243SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf, 244 0, "rx buffers allocated"); 245int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 246SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf, 247 0, "tx buffers allocated"); 248int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 249SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt, 250 0, "tx (mgmt) buffers allocated"); 251 252int ath_bstuck_threshold = 4; /* max missed beacons */ 253SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 254 0, "max missed beacon xmits before chip reset"); 255 256MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 257 258void 259ath_legacy_attach_comp_func(struct ath_softc *sc) 260{ 261 262 /* 263 * Special case certain configurations. Note the 264 * CAB queue is handled by these specially so don't 265 * include them when checking the txq setup mask. 266 */ 267 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 268 case 0x01: 269 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 270 break; 271 case 0x0f: 272 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 273 break; 274 default: 275 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 276 break; 277 } 278} 279 280/* 281 * Set the target power mode. 282 * 283 * If this is called during a point in time where 284 * the hardware is being programmed elsewhere, it will 285 * simply store it away and update it when all current 286 * uses of the hardware are completed. 287 */ 288void 289_ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line) 290{ 291 ATH_LOCK_ASSERT(sc); 292 293 sc->sc_target_powerstate = power_state; 294 295 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 296 __func__, 297 file, 298 line, 299 power_state, 300 sc->sc_powersave_refcnt); 301 302 if (sc->sc_powersave_refcnt == 0 && 303 power_state != sc->sc_cur_powerstate) { 304 sc->sc_cur_powerstate = power_state; 305 ath_hal_setpower(sc->sc_ah, power_state); 306 307 /* 308 * If the NIC is force-awake, then set the 309 * self-gen frame state appropriately. 310 * 311 * If the nic is in network sleep or full-sleep, 312 * we let the above call leave the self-gen 313 * state as "sleep". 314 */ 315 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 316 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 317 ath_hal_setselfgenpower(sc->sc_ah, 318 sc->sc_target_selfgen_state); 319 } 320 } 321} 322 323/* 324 * Set the current self-generated frames state. 325 * 326 * This is separate from the target power mode. The chip may be 327 * awake but the desired state is "sleep", so frames sent to the 328 * destination has PWRMGT=1 in the 802.11 header. The NIC also 329 * needs to know to set PWRMGT=1 in self-generated frames. 330 */ 331void 332_ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line) 333{ 334 335 ATH_LOCK_ASSERT(sc); 336 337 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 338 __func__, 339 file, 340 line, 341 power_state, 342 sc->sc_target_selfgen_state); 343 344 sc->sc_target_selfgen_state = power_state; 345 346 /* 347 * If the NIC is force-awake, then set the power state. 348 * Network-state and full-sleep will already transition it to 349 * mark self-gen frames as sleeping - and we can't 350 * guarantee the NIC is awake to program the self-gen frame 351 * setting anyway. 352 */ 353 if (sc->sc_cur_powerstate == HAL_PM_AWAKE) { 354 ath_hal_setselfgenpower(sc->sc_ah, power_state); 355 } 356} 357 358/* 359 * Set the hardware power mode and take a reference. 360 * 361 * This doesn't update the target power mode in the driver; 362 * it just updates the hardware power state. 363 * 364 * XXX it should only ever force the hardware awake; it should 365 * never be called to set it asleep. 366 */ 367void 368_ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line) 369{ 370 ATH_LOCK_ASSERT(sc); 371 372 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 373 __func__, 374 file, 375 line, 376 power_state, 377 sc->sc_powersave_refcnt); 378 379 sc->sc_powersave_refcnt++; 380 381 if (power_state != sc->sc_cur_powerstate) { 382 ath_hal_setpower(sc->sc_ah, power_state); 383 sc->sc_cur_powerstate = power_state; 384 385 /* 386 * Adjust the self-gen powerstate if appropriate. 387 */ 388 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 389 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 390 ath_hal_setselfgenpower(sc->sc_ah, 391 sc->sc_target_selfgen_state); 392 } 393 394 } 395} 396 397/* 398 * Restore the power save mode to what it once was. 399 * 400 * This will decrement the reference counter and once it hits 401 * zero, it'll restore the powersave state. 402 */ 403void 404_ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line) 405{ 406 407 ATH_LOCK_ASSERT(sc); 408 409 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n", 410 __func__, 411 file, 412 line, 413 sc->sc_powersave_refcnt, 414 sc->sc_target_powerstate); 415 416 if (sc->sc_powersave_refcnt == 0) 417 device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__); 418 else 419 sc->sc_powersave_refcnt--; 420 421 if (sc->sc_powersave_refcnt == 0 && 422 sc->sc_target_powerstate != sc->sc_cur_powerstate) { 423 sc->sc_cur_powerstate = sc->sc_target_powerstate; 424 ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate); 425 } 426 427 /* 428 * Adjust the self-gen powerstate if appropriate. 429 */ 430 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 431 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 432 ath_hal_setselfgenpower(sc->sc_ah, 433 sc->sc_target_selfgen_state); 434 } 435 436} 437 438/* 439 * Configure the initial HAL configuration values based on bus 440 * specific parameters. 441 * 442 * Some PCI IDs and other information may need tweaking. 443 * 444 * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable 445 * if BT antenna diversity isn't enabled. 446 * 447 * So, let's also figure out how to enable BT diversity for AR9485. 448 */ 449static void 450ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config) 451{ 452 /* XXX TODO: only for PCI devices? */ 453 454 if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) { 455 ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */ 456 ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE; 457 ah_config->ath_hal_min_gainidx = AH_TRUE; 458 ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88; 459 /* XXX low_rssi_thresh */ 460 /* XXX fast_div_bias */ 461 device_printf(sc->sc_dev, "configuring for %s\n", 462 (sc->sc_pci_devinfo & ATH_PCI_CUS198) ? 463 "CUS198" : "CUS230"); 464 } 465 466 if (sc->sc_pci_devinfo & ATH_PCI_CUS217) 467 device_printf(sc->sc_dev, "CUS217 card detected\n"); 468 469 if (sc->sc_pci_devinfo & ATH_PCI_CUS252) 470 device_printf(sc->sc_dev, "CUS252 card detected\n"); 471 472 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT) 473 device_printf(sc->sc_dev, "WB335 1-ANT card detected\n"); 474 475 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT) 476 device_printf(sc->sc_dev, "WB335 2-ANT card detected\n"); 477 478 if (sc->sc_pci_devinfo & ATH_PCI_KILLER) 479 device_printf(sc->sc_dev, "Killer Wireless card detected\n"); 480 481#if 0 482 /* 483 * Some WB335 cards do not support antenna diversity. Since 484 * we use a hardcoded value for AR9565 instead of using the 485 * EEPROM/OTP data, remove the combining feature from 486 * the HW capabilities bitmap. 487 */ 488 if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) { 489 if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV)) 490 pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB; 491 } 492 493 if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) { 494 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; 495 device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n"); 496 } 497#endif 498 499 if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) { 500 ah_config->ath_hal_pcie_waen = 0x0040473b; 501 device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n"); 502 } 503 504#if 0 505 if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) { 506 ah->config.no_pll_pwrsave = true; 507 device_printf(sc->sc_dev, "Disable PLL PowerSave\n"); 508 } 509#endif 510 511} 512 513/* 514 * Attempt to fetch the MAC address from the kernel environment. 515 * 516 * Returns 0, macaddr in macaddr if successful; -1 otherwise. 517 */ 518static int 519ath_fetch_mac_kenv(struct ath_softc *sc, uint8_t *macaddr) 520{ 521 char devid_str[32]; 522 int local_mac = 0; 523 char *local_macstr; 524 525 /* 526 * Fetch from the kenv rather than using hints. 527 * 528 * Hints would be nice but the transition to dynamic 529 * hints/kenv doesn't happen early enough for this 530 * to work reliably (eg on anything embedded.) 531 */ 532 snprintf(devid_str, 32, "hint.%s.%d.macaddr", 533 device_get_name(sc->sc_dev), 534 device_get_unit(sc->sc_dev)); 535 536 if ((local_macstr = kern_getenv(devid_str)) != NULL) { 537 uint32_t tmpmac[ETHER_ADDR_LEN]; 538 int count; 539 int i; 540 541 /* Have a MAC address; should use it */ 542 device_printf(sc->sc_dev, 543 "Overriding MAC address from environment: '%s'\n", 544 local_macstr); 545 546 /* Extract out the MAC address */ 547 count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", 548 &tmpmac[0], &tmpmac[1], 549 &tmpmac[2], &tmpmac[3], 550 &tmpmac[4], &tmpmac[5]); 551 if (count == 6) { 552 /* Valid! */ 553 local_mac = 1; 554 for (i = 0; i < ETHER_ADDR_LEN; i++) 555 macaddr[i] = tmpmac[i]; 556 } 557 /* Done! */ 558 freeenv(local_macstr); 559 local_macstr = NULL; 560 } 561 562 if (local_mac) 563 return (0); 564 return (-1); 565} 566 567#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 568#define HAL_MODE_HT40 \ 569 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 570 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 571int 572ath_attach(u_int16_t devid, struct ath_softc *sc) 573{ 574 struct ifnet *ifp; 575 struct ieee80211com *ic; 576 struct ath_hal *ah = NULL; 577 HAL_STATUS status; 578 int error = 0, i; 579 u_int wmodes; 580 uint8_t macaddr[IEEE80211_ADDR_LEN]; 581 int rx_chainmask, tx_chainmask; 582 HAL_OPS_CONFIG ah_config; 583 584 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 585 586 CURVNET_SET(vnet0); 587 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 588 if (ifp == NULL) { 589 device_printf(sc->sc_dev, "can not if_alloc()\n"); 590 error = ENOSPC; 591 CURVNET_RESTORE(); 592 goto bad; 593 } 594 ic = ifp->if_l2com; 595 ic->ic_softc = sc; 596 ic->ic_name = device_get_nameunit(sc->sc_dev); 597 598 /* set these up early for if_printf use */ 599 if_initname(ifp, device_get_name(sc->sc_dev), 600 device_get_unit(sc->sc_dev)); 601 CURVNET_RESTORE(); 602 603 /* 604 * Configure the initial configuration data. 605 * 606 * This is stuff that may be needed early during attach 607 * rather than done via configuration calls later. 608 */ 609 bzero(&ah_config, sizeof(ah_config)); 610 ath_setup_hal_config(sc, &ah_config); 611 612 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 613 sc->sc_eepromdata, &ah_config, &status); 614 if (ah == NULL) { 615 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 616 status); 617 error = ENXIO; 618 goto bad; 619 } 620 sc->sc_ah = ah; 621 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 622#ifdef ATH_DEBUG 623 sc->sc_debug = ath_debug; 624#endif 625 626 /* 627 * Setup the DMA/EDMA functions based on the current 628 * hardware support. 629 * 630 * This is required before the descriptors are allocated. 631 */ 632 if (ath_hal_hasedma(sc->sc_ah)) { 633 sc->sc_isedma = 1; 634 ath_recv_setup_edma(sc); 635 ath_xmit_setup_edma(sc); 636 } else { 637 ath_recv_setup_legacy(sc); 638 ath_xmit_setup_legacy(sc); 639 } 640 641 if (ath_hal_hasmybeacon(sc->sc_ah)) { 642 sc->sc_do_mybeacon = 1; 643 } 644 645 /* 646 * Check if the MAC has multi-rate retry support. 647 * We do this by trying to setup a fake extended 648 * descriptor. MAC's that don't have support will 649 * return false w/o doing anything. MAC's that do 650 * support it will return true w/o doing anything. 651 */ 652 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 653 654 /* 655 * Check if the device has hardware counters for PHY 656 * errors. If so we need to enable the MIB interrupt 657 * so we can act on stat triggers. 658 */ 659 if (ath_hal_hwphycounters(ah)) 660 sc->sc_needmib = 1; 661 662 /* 663 * Get the hardware key cache size. 664 */ 665 sc->sc_keymax = ath_hal_keycachesize(ah); 666 if (sc->sc_keymax > ATH_KEYMAX) { 667 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 668 ATH_KEYMAX, sc->sc_keymax); 669 sc->sc_keymax = ATH_KEYMAX; 670 } 671 /* 672 * Reset the key cache since some parts do not 673 * reset the contents on initial power up. 674 */ 675 for (i = 0; i < sc->sc_keymax; i++) 676 ath_hal_keyreset(ah, i); 677 678 /* 679 * Collect the default channel list. 680 */ 681 error = ath_getchannels(sc); 682 if (error != 0) 683 goto bad; 684 685 /* 686 * Setup rate tables for all potential media types. 687 */ 688 ath_rate_setup(sc, IEEE80211_MODE_11A); 689 ath_rate_setup(sc, IEEE80211_MODE_11B); 690 ath_rate_setup(sc, IEEE80211_MODE_11G); 691 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 692 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 693 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 694 ath_rate_setup(sc, IEEE80211_MODE_11NA); 695 ath_rate_setup(sc, IEEE80211_MODE_11NG); 696 ath_rate_setup(sc, IEEE80211_MODE_HALF); 697 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 698 699 /* NB: setup here so ath_rate_update is happy */ 700 ath_setcurmode(sc, IEEE80211_MODE_11A); 701 702 /* 703 * Allocate TX descriptors and populate the lists. 704 */ 705 error = ath_desc_alloc(sc); 706 if (error != 0) { 707 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 708 error); 709 goto bad; 710 } 711 error = ath_txdma_setup(sc); 712 if (error != 0) { 713 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 714 error); 715 goto bad; 716 } 717 718 /* 719 * Allocate RX descriptors and populate the lists. 720 */ 721 error = ath_rxdma_setup(sc); 722 if (error != 0) { 723 if_printf(ifp, "failed to allocate RX descriptors: %d\n", 724 error); 725 goto bad; 726 } 727 728 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 729 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 730 731 ATH_TXBUF_LOCK_INIT(sc); 732 733 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 734 taskqueue_thread_enqueue, &sc->sc_tq); 735 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 736 "%s taskq", ifp->if_xname); 737 738 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 739 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 740 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 741 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 742 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); 743 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 744 745 /* 746 * Allocate hardware transmit queues: one queue for 747 * beacon frames and one data queue for each QoS 748 * priority. Note that the hal handles resetting 749 * these queues at the needed time. 750 * 751 * XXX PS-Poll 752 */ 753 sc->sc_bhalq = ath_beaconq_setup(sc); 754 if (sc->sc_bhalq == (u_int) -1) { 755 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 756 error = EIO; 757 goto bad2; 758 } 759 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 760 if (sc->sc_cabq == NULL) { 761 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 762 error = EIO; 763 goto bad2; 764 } 765 /* NB: insure BK queue is the lowest priority h/w queue */ 766 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 767 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 768 ieee80211_wme_acnames[WME_AC_BK]); 769 error = EIO; 770 goto bad2; 771 } 772 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 773 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 774 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 775 /* 776 * Not enough hardware tx queues to properly do WME; 777 * just punt and assign them all to the same h/w queue. 778 * We could do a better job of this if, for example, 779 * we allocate queues when we switch from station to 780 * AP mode. 781 */ 782 if (sc->sc_ac2q[WME_AC_VI] != NULL) 783 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 784 if (sc->sc_ac2q[WME_AC_BE] != NULL) 785 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 786 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 787 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 788 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 789 } 790 791 /* 792 * Attach the TX completion function. 793 * 794 * The non-EDMA chips may have some special case optimisations; 795 * this method gives everyone a chance to attach cleanly. 796 */ 797 sc->sc_tx.xmit_attach_comp_func(sc); 798 799 /* 800 * Setup rate control. Some rate control modules 801 * call back to change the anntena state so expose 802 * the necessary entry points. 803 * XXX maybe belongs in struct ath_ratectrl? 804 */ 805 sc->sc_setdefantenna = ath_setdefantenna; 806 sc->sc_rc = ath_rate_attach(sc); 807 if (sc->sc_rc == NULL) { 808 error = EIO; 809 goto bad2; 810 } 811 812 /* Attach DFS module */ 813 if (! ath_dfs_attach(sc)) { 814 device_printf(sc->sc_dev, 815 "%s: unable to attach DFS\n", __func__); 816 error = EIO; 817 goto bad2; 818 } 819 820 /* Attach spectral module */ 821 if (ath_spectral_attach(sc) < 0) { 822 device_printf(sc->sc_dev, 823 "%s: unable to attach spectral\n", __func__); 824 error = EIO; 825 goto bad2; 826 } 827 828 /* Attach bluetooth coexistence module */ 829 if (ath_btcoex_attach(sc) < 0) { 830 device_printf(sc->sc_dev, 831 "%s: unable to attach bluetooth coexistence\n", __func__); 832 error = EIO; 833 goto bad2; 834 } 835 836 /* Attach LNA diversity module */ 837 if (ath_lna_div_attach(sc) < 0) { 838 device_printf(sc->sc_dev, 839 "%s: unable to attach LNA diversity\n", __func__); 840 error = EIO; 841 goto bad2; 842 } 843 844 /* Start DFS processing tasklet */ 845 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 846 847 /* Configure LED state */ 848 sc->sc_blinking = 0; 849 sc->sc_ledstate = 1; 850 sc->sc_ledon = 0; /* low true */ 851 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 852 callout_init(&sc->sc_ledtimer, 1); 853 854 /* 855 * Don't setup hardware-based blinking. 856 * 857 * Although some NICs may have this configured in the 858 * default reset register values, the user may wish 859 * to alter which pins have which function. 860 * 861 * The reference driver attaches the MAC network LED to GPIO1 and 862 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 863 * NIC has these reversed. 864 */ 865 sc->sc_hardled = (1 == 0); 866 sc->sc_led_net_pin = -1; 867 sc->sc_led_pwr_pin = -1; 868 /* 869 * Auto-enable soft led processing for IBM cards and for 870 * 5211 minipci cards. Users can also manually enable/disable 871 * support with a sysctl. 872 */ 873 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 874 ath_led_config(sc); 875 ath_hal_setledstate(ah, HAL_LED_INIT); 876 877 ifp->if_softc = sc; 878 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 879 ifp->if_transmit = ath_transmit; 880 ifp->if_qflush = ath_qflush; 881 ifp->if_ioctl = ath_ioctl; 882 ifp->if_init = ath_init; 883 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 884 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 885 IFQ_SET_READY(&ifp->if_snd); 886 887 ic->ic_ifp = ifp; 888 /* XXX not right but it's not used anywhere important */ 889 ic->ic_phytype = IEEE80211_T_OFDM; 890 ic->ic_opmode = IEEE80211_M_STA; 891 ic->ic_caps = 892 IEEE80211_C_STA /* station mode */ 893 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 894 | IEEE80211_C_HOSTAP /* hostap mode */ 895 | IEEE80211_C_MONITOR /* monitor mode */ 896 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 897 | IEEE80211_C_WDS /* 4-address traffic works */ 898 | IEEE80211_C_MBSS /* mesh point link mode */ 899 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 900 | IEEE80211_C_SHSLOT /* short slot time supported */ 901 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 902#ifndef ATH_ENABLE_11N 903 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 904#endif 905 | IEEE80211_C_TXFRAG /* handle tx frags */ 906#ifdef ATH_ENABLE_DFS 907 | IEEE80211_C_DFS /* Enable radar detection */ 908#endif 909 | IEEE80211_C_PMGT /* Station side power mgmt */ 910 | IEEE80211_C_SWSLEEP 911 ; 912 /* 913 * Query the hal to figure out h/w crypto support. 914 */ 915 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 916 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 917 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 918 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 919 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 920 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 921 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 922 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 923 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 924 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 925 /* 926 * Check if h/w does the MIC and/or whether the 927 * separate key cache entries are required to 928 * handle both tx+rx MIC keys. 929 */ 930 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 931 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 932 /* 933 * If the h/w supports storing tx+rx MIC keys 934 * in one cache slot automatically enable use. 935 */ 936 if (ath_hal_hastkipsplit(ah) || 937 !ath_hal_settkipsplit(ah, AH_FALSE)) 938 sc->sc_splitmic = 1; 939 /* 940 * If the h/w can do TKIP MIC together with WME then 941 * we use it; otherwise we force the MIC to be done 942 * in software by the net80211 layer. 943 */ 944 if (ath_hal_haswmetkipmic(ah)) 945 sc->sc_wmetkipmic = 1; 946 } 947 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 948 /* 949 * Check for multicast key search support. 950 */ 951 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 952 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 953 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 954 } 955 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 956 /* 957 * Mark key cache slots associated with global keys 958 * as in use. If we knew TKIP was not to be used we 959 * could leave the +32, +64, and +32+64 slots free. 960 */ 961 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 962 setbit(sc->sc_keymap, i); 963 setbit(sc->sc_keymap, i+64); 964 if (sc->sc_splitmic) { 965 setbit(sc->sc_keymap, i+32); 966 setbit(sc->sc_keymap, i+32+64); 967 } 968 } 969 /* 970 * TPC support can be done either with a global cap or 971 * per-packet support. The latter is not available on 972 * all parts. We're a bit pedantic here as all parts 973 * support a global cap. 974 */ 975 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 976 ic->ic_caps |= IEEE80211_C_TXPMGT; 977 978 /* 979 * Mark WME capability only if we have sufficient 980 * hardware queues to do proper priority scheduling. 981 */ 982 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 983 ic->ic_caps |= IEEE80211_C_WME; 984 /* 985 * Check for misc other capabilities. 986 */ 987 if (ath_hal_hasbursting(ah)) 988 ic->ic_caps |= IEEE80211_C_BURST; 989 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 990 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 991 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 992 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 993 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 994 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); 995 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); 996 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); 997 998 if (ath_hal_hasfastframes(ah)) 999 ic->ic_caps |= IEEE80211_C_FF; 1000 wmodes = ath_hal_getwirelessmodes(ah); 1001 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 1002 ic->ic_caps |= IEEE80211_C_TURBOP; 1003#ifdef IEEE80211_SUPPORT_TDMA 1004 if (ath_hal_macversion(ah) > 0x78) { 1005 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 1006 ic->ic_tdma_update = ath_tdma_update; 1007 } 1008#endif 1009 1010 /* 1011 * TODO: enforce that at least this many frames are available 1012 * in the txbuf list before allowing data frames (raw or 1013 * otherwise) to be transmitted. 1014 */ 1015 sc->sc_txq_data_minfree = 10; 1016 /* 1017 * Leave this as default to maintain legacy behaviour. 1018 * Shortening the cabq/mcastq may end up causing some 1019 * undesirable behaviour. 1020 */ 1021 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 1022 1023 /* 1024 * How deep can the node software TX queue get whilst it's asleep. 1025 */ 1026 sc->sc_txq_node_psq_maxdepth = 16; 1027 1028 /* 1029 * Default the maximum queue depth for a given node 1030 * to 1/4'th the TX buffers, or 64, whichever 1031 * is larger. 1032 */ 1033 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4); 1034 1035 /* Enable CABQ by default */ 1036 sc->sc_cabq_enable = 1; 1037 1038 /* 1039 * Allow the TX and RX chainmasks to be overridden by 1040 * environment variables and/or device.hints. 1041 * 1042 * This must be done early - before the hardware is 1043 * calibrated or before the 802.11n stream calculation 1044 * is done. 1045 */ 1046 if (resource_int_value(device_get_name(sc->sc_dev), 1047 device_get_unit(sc->sc_dev), "rx_chainmask", 1048 &rx_chainmask) == 0) { 1049 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 1050 rx_chainmask); 1051 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 1052 } 1053 if (resource_int_value(device_get_name(sc->sc_dev), 1054 device_get_unit(sc->sc_dev), "tx_chainmask", 1055 &tx_chainmask) == 0) { 1056 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 1057 tx_chainmask); 1058 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 1059 } 1060 1061 /* 1062 * Query the TX/RX chainmask configuration. 1063 * 1064 * This is only relevant for 11n devices. 1065 */ 1066 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 1067 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 1068 1069 /* 1070 * Disable MRR with protected frames by default. 1071 * Only 802.11n series NICs can handle this. 1072 */ 1073 sc->sc_mrrprot = 0; /* XXX should be a capability */ 1074 1075 /* 1076 * Query the enterprise mode information the HAL. 1077 */ 1078 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, 1079 &sc->sc_ent_cfg) == HAL_OK) 1080 sc->sc_use_ent = 1; 1081 1082#ifdef ATH_ENABLE_11N 1083 /* 1084 * Query HT capabilities 1085 */ 1086 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 1087 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 1088 uint32_t rxs, txs; 1089 1090 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 1091 1092 sc->sc_mrrprot = 1; /* XXX should be a capability */ 1093 1094 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 1095 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 1096 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 1097 | IEEE80211_HTCAP_MAXAMSDU_3839 1098 /* max A-MSDU length */ 1099 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 1100 ; 1101 1102 /* 1103 * Enable short-GI for HT20 only if the hardware 1104 * advertises support. 1105 * Notably, anything earlier than the AR9287 doesn't. 1106 */ 1107 if ((ath_hal_getcapability(ah, 1108 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 1109 (wmodes & HAL_MODE_HT20)) { 1110 device_printf(sc->sc_dev, 1111 "[HT] enabling short-GI in 20MHz mode\n"); 1112 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 1113 } 1114 1115 if (wmodes & HAL_MODE_HT40) 1116 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 1117 | IEEE80211_HTCAP_SHORTGI40; 1118 1119 /* 1120 * TX/RX streams need to be taken into account when 1121 * negotiating which MCS rates it'll receive and 1122 * what MCS rates are available for TX. 1123 */ 1124 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 1125 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 1126 ic->ic_txstream = txs; 1127 ic->ic_rxstream = rxs; 1128 1129 /* 1130 * Setup TX and RX STBC based on what the HAL allows and 1131 * the currently configured chainmask set. 1132 * Ie - don't enable STBC TX if only one chain is enabled. 1133 * STBC RX is fine on a single RX chain; it just won't 1134 * provide any real benefit. 1135 */ 1136 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, 1137 NULL) == HAL_OK) { 1138 sc->sc_rx_stbc = 1; 1139 device_printf(sc->sc_dev, 1140 "[HT] 1 stream STBC receive enabled\n"); 1141 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; 1142 } 1143 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, 1144 NULL) == HAL_OK) { 1145 sc->sc_tx_stbc = 1; 1146 device_printf(sc->sc_dev, 1147 "[HT] 1 stream STBC transmit enabled\n"); 1148 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; 1149 } 1150 1151 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 1152 &sc->sc_rts_aggr_limit); 1153 if (sc->sc_rts_aggr_limit != (64 * 1024)) 1154 device_printf(sc->sc_dev, 1155 "[HT] RTS aggregates limited to %d KiB\n", 1156 sc->sc_rts_aggr_limit / 1024); 1157 1158 device_printf(sc->sc_dev, 1159 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 1160 } 1161#endif 1162 1163 /* 1164 * Initial aggregation settings. 1165 */ 1166 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; 1167 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; 1168 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1169 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1170 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; 1171 sc->sc_delim_min_pad = 0; 1172 1173 /* 1174 * Check if the hardware requires PCI register serialisation. 1175 * Some of the Owl based MACs require this. 1176 */ 1177 if (mp_ncpus > 1 && 1178 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 1179 0, NULL) == HAL_OK) { 1180 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 1181 device_printf(sc->sc_dev, 1182 "Enabling register serialisation\n"); 1183 } 1184 1185 /* 1186 * Initialise the deferred completed RX buffer list. 1187 */ 1188 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 1189 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 1190 1191 /* 1192 * Indicate we need the 802.11 header padded to a 1193 * 32-bit boundary for 4-address and QoS frames. 1194 */ 1195 ic->ic_flags |= IEEE80211_F_DATAPAD; 1196 1197 /* 1198 * Query the hal about antenna support. 1199 */ 1200 sc->sc_defant = ath_hal_getdefantenna(ah); 1201 1202 /* 1203 * Not all chips have the VEOL support we want to 1204 * use with IBSS beacons; check here for it. 1205 */ 1206 sc->sc_hasveol = ath_hal_hasveol(ah); 1207 1208 /* get mac address from kenv first, then hardware */ 1209 if (ath_fetch_mac_kenv(sc, macaddr) == 0) { 1210 /* Tell the HAL now about the new MAC */ 1211 ath_hal_setmac(ah, macaddr); 1212 } else { 1213 ath_hal_getmac(ah, macaddr); 1214 } 1215 1216 if (sc->sc_hasbmask) 1217 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 1218 1219 /* NB: used to size node table key mapping array */ 1220 ic->ic_max_keyix = sc->sc_keymax; 1221 /* call MI attach routine. */ 1222 ieee80211_ifattach(ic, macaddr); 1223 ic->ic_setregdomain = ath_setregdomain; 1224 ic->ic_getradiocaps = ath_getradiocaps; 1225 sc->sc_opmode = HAL_M_STA; 1226 1227 /* override default methods */ 1228 ic->ic_newassoc = ath_newassoc; 1229 ic->ic_updateslot = ath_updateslot; 1230 ic->ic_wme.wme_update = ath_wme_update; 1231 ic->ic_vap_create = ath_vap_create; 1232 ic->ic_vap_delete = ath_vap_delete; 1233 ic->ic_raw_xmit = ath_raw_xmit; 1234 ic->ic_update_mcast = ath_update_mcast; 1235 ic->ic_update_promisc = ath_update_promisc; 1236 ic->ic_node_alloc = ath_node_alloc; 1237 sc->sc_node_free = ic->ic_node_free; 1238 ic->ic_node_free = ath_node_free; 1239 sc->sc_node_cleanup = ic->ic_node_cleanup; 1240 ic->ic_node_cleanup = ath_node_cleanup; 1241 ic->ic_node_getsignal = ath_node_getsignal; 1242 ic->ic_scan_start = ath_scan_start; 1243 ic->ic_scan_end = ath_scan_end; 1244 ic->ic_set_channel = ath_set_channel; 1245#ifdef ATH_ENABLE_11N 1246 /* 802.11n specific - but just override anyway */ 1247 sc->sc_addba_request = ic->ic_addba_request; 1248 sc->sc_addba_response = ic->ic_addba_response; 1249 sc->sc_addba_stop = ic->ic_addba_stop; 1250 sc->sc_bar_response = ic->ic_bar_response; 1251 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 1252 1253 ic->ic_addba_request = ath_addba_request; 1254 ic->ic_addba_response = ath_addba_response; 1255 ic->ic_addba_response_timeout = ath_addba_response_timeout; 1256 ic->ic_addba_stop = ath_addba_stop; 1257 ic->ic_bar_response = ath_bar_response; 1258 1259 ic->ic_update_chw = ath_update_chw; 1260#endif /* ATH_ENABLE_11N */ 1261 1262#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 1263 /* 1264 * There's one vendor bitmap entry in the RX radiotap 1265 * header; make sure that's taken into account. 1266 */ 1267 ieee80211_radiotap_attachv(ic, 1268 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 1269 ATH_TX_RADIOTAP_PRESENT, 1270 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 1271 ATH_RX_RADIOTAP_PRESENT); 1272#else 1273 /* 1274 * No vendor bitmap/extensions are present. 1275 */ 1276 ieee80211_radiotap_attach(ic, 1277 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 1278 ATH_TX_RADIOTAP_PRESENT, 1279 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1280 ATH_RX_RADIOTAP_PRESENT); 1281#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 1282 1283 /* 1284 * Setup the ALQ logging if required 1285 */ 1286#ifdef ATH_DEBUG_ALQ 1287 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); 1288 if_ath_alq_setcfg(&sc->sc_alq, 1289 sc->sc_ah->ah_macVersion, 1290 sc->sc_ah->ah_macRev, 1291 sc->sc_ah->ah_phyRev, 1292 sc->sc_ah->ah_magic); 1293#endif 1294 1295 /* 1296 * Setup dynamic sysctl's now that country code and 1297 * regdomain are available from the hal. 1298 */ 1299 ath_sysctlattach(sc); 1300 ath_sysctl_stats_attach(sc); 1301 ath_sysctl_hal_attach(sc); 1302 1303 if (bootverbose) 1304 ieee80211_announce(ic); 1305 ath_announce(sc); 1306 1307 /* 1308 * Put it to sleep for now. 1309 */ 1310 ATH_LOCK(sc); 1311 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 1312 ATH_UNLOCK(sc); 1313 1314 return 0; 1315bad2: 1316 ath_tx_cleanup(sc); 1317 ath_desc_free(sc); 1318 ath_txdma_teardown(sc); 1319 ath_rxdma_teardown(sc); 1320bad: 1321 if (ah) 1322 ath_hal_detach(ah); 1323 1324 /* 1325 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE.. 1326 */ 1327 if (ifp != NULL && ifp->if_vnet) { 1328 CURVNET_SET(ifp->if_vnet); 1329 if_free(ifp); 1330 CURVNET_RESTORE(); 1331 } else if (ifp != NULL) 1332 if_free(ifp); 1333 sc->sc_invalid = 1; 1334 return error; 1335} 1336 1337int 1338ath_detach(struct ath_softc *sc) 1339{ 1340 struct ifnet *ifp = sc->sc_ifp; 1341 1342 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1343 __func__, ifp->if_flags); 1344 1345 /* 1346 * NB: the order of these is important: 1347 * o stop the chip so no more interrupts will fire 1348 * o call the 802.11 layer before detaching the hal to 1349 * insure callbacks into the driver to delete global 1350 * key cache entries can be handled 1351 * o free the taskqueue which drains any pending tasks 1352 * o reclaim the tx queue data structures after calling 1353 * the 802.11 layer as we'll get called back to reclaim 1354 * node state and potentially want to use them 1355 * o to cleanup the tx queues the hal is called, so detach 1356 * it last 1357 * Other than that, it's straightforward... 1358 */ 1359 1360 /* 1361 * XXX Wake the hardware up first. ath_stop() will still 1362 * wake it up first, but I'd rather do it here just to 1363 * ensure it's awake. 1364 */ 1365 ATH_LOCK(sc); 1366 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1367 ath_power_setpower(sc, HAL_PM_AWAKE); 1368 ATH_UNLOCK(sc); 1369 1370 /* 1371 * Stop things cleanly. 1372 */ 1373 ath_stop(ifp); 1374 1375 ieee80211_ifdetach(ifp->if_l2com); 1376 taskqueue_free(sc->sc_tq); 1377#ifdef ATH_TX99_DIAG 1378 if (sc->sc_tx99 != NULL) 1379 sc->sc_tx99->detach(sc->sc_tx99); 1380#endif 1381 ath_rate_detach(sc->sc_rc); 1382#ifdef ATH_DEBUG_ALQ 1383 if_ath_alq_tidyup(&sc->sc_alq); 1384#endif 1385 ath_lna_div_detach(sc); 1386 ath_btcoex_detach(sc); 1387 ath_spectral_detach(sc); 1388 ath_dfs_detach(sc); 1389 ath_desc_free(sc); 1390 ath_txdma_teardown(sc); 1391 ath_rxdma_teardown(sc); 1392 ath_tx_cleanup(sc); 1393 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 1394 1395 CURVNET_SET(ifp->if_vnet); 1396 if_free(ifp); 1397 CURVNET_RESTORE(); 1398 1399 return 0; 1400} 1401 1402/* 1403 * MAC address handling for multiple BSS on the same radio. 1404 * The first vap uses the MAC address from the EEPROM. For 1405 * subsequent vap's we set the U/L bit (bit 1) in the MAC 1406 * address and use the next six bits as an index. 1407 */ 1408static void 1409assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 1410{ 1411 int i; 1412 1413 if (clone && sc->sc_hasbmask) { 1414 /* NB: we only do this if h/w supports multiple bssid */ 1415 for (i = 0; i < 8; i++) 1416 if ((sc->sc_bssidmask & (1<<i)) == 0) 1417 break; 1418 if (i != 0) 1419 mac[0] |= (i << 2)|0x2; 1420 } else 1421 i = 0; 1422 sc->sc_bssidmask |= 1<<i; 1423 sc->sc_hwbssidmask[0] &= ~mac[0]; 1424 if (i == 0) 1425 sc->sc_nbssid0++; 1426} 1427 1428static void 1429reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 1430{ 1431 int i = mac[0] >> 2; 1432 uint8_t mask; 1433 1434 if (i != 0 || --sc->sc_nbssid0 == 0) { 1435 sc->sc_bssidmask &= ~(1<<i); 1436 /* recalculate bssid mask from remaining addresses */ 1437 mask = 0xff; 1438 for (i = 1; i < 8; i++) 1439 if (sc->sc_bssidmask & (1<<i)) 1440 mask &= ~((i<<2)|0x2); 1441 sc->sc_hwbssidmask[0] |= mask; 1442 } 1443} 1444 1445/* 1446 * Assign a beacon xmit slot. We try to space out 1447 * assignments so when beacons are staggered the 1448 * traffic coming out of the cab q has maximal time 1449 * to go out before the next beacon is scheduled. 1450 */ 1451static int 1452assign_bslot(struct ath_softc *sc) 1453{ 1454 u_int slot, free; 1455 1456 free = 0; 1457 for (slot = 0; slot < ATH_BCBUF; slot++) 1458 if (sc->sc_bslot[slot] == NULL) { 1459 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 1460 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 1461 return slot; 1462 free = slot; 1463 /* NB: keep looking for a double slot */ 1464 } 1465 return free; 1466} 1467 1468static struct ieee80211vap * 1469ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1470 enum ieee80211_opmode opmode, int flags, 1471 const uint8_t bssid[IEEE80211_ADDR_LEN], 1472 const uint8_t mac0[IEEE80211_ADDR_LEN]) 1473{ 1474 struct ath_softc *sc = ic->ic_ifp->if_softc; 1475 struct ath_vap *avp; 1476 struct ieee80211vap *vap; 1477 uint8_t mac[IEEE80211_ADDR_LEN]; 1478 int needbeacon, error; 1479 enum ieee80211_opmode ic_opmode; 1480 1481 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 1482 M_80211_VAP, M_WAITOK | M_ZERO); 1483 needbeacon = 0; 1484 IEEE80211_ADDR_COPY(mac, mac0); 1485 1486 ATH_LOCK(sc); 1487 ic_opmode = opmode; /* default to opmode of new vap */ 1488 switch (opmode) { 1489 case IEEE80211_M_STA: 1490 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1491 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1492 goto bad; 1493 } 1494 if (sc->sc_nvaps) { 1495 /* 1496 * With multiple vaps we must fall back 1497 * to s/w beacon miss handling. 1498 */ 1499 flags |= IEEE80211_CLONE_NOBEACONS; 1500 } 1501 if (flags & IEEE80211_CLONE_NOBEACONS) { 1502 /* 1503 * Station mode w/o beacons are implemented w/ AP mode. 1504 */ 1505 ic_opmode = IEEE80211_M_HOSTAP; 1506 } 1507 break; 1508 case IEEE80211_M_IBSS: 1509 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1510 device_printf(sc->sc_dev, 1511 "only 1 ibss vap supported\n"); 1512 goto bad; 1513 } 1514 needbeacon = 1; 1515 break; 1516 case IEEE80211_M_AHDEMO: 1517#ifdef IEEE80211_SUPPORT_TDMA 1518 if (flags & IEEE80211_CLONE_TDMA) { 1519 if (sc->sc_nvaps != 0) { 1520 device_printf(sc->sc_dev, 1521 "only 1 tdma vap supported\n"); 1522 goto bad; 1523 } 1524 needbeacon = 1; 1525 flags |= IEEE80211_CLONE_NOBEACONS; 1526 } 1527 /* fall thru... */ 1528#endif 1529 case IEEE80211_M_MONITOR: 1530 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1531 /* 1532 * Adopt existing mode. Adding a monitor or ahdemo 1533 * vap to an existing configuration is of dubious 1534 * value but should be ok. 1535 */ 1536 /* XXX not right for monitor mode */ 1537 ic_opmode = ic->ic_opmode; 1538 } 1539 break; 1540 case IEEE80211_M_HOSTAP: 1541 case IEEE80211_M_MBSS: 1542 needbeacon = 1; 1543 break; 1544 case IEEE80211_M_WDS: 1545 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1546 device_printf(sc->sc_dev, 1547 "wds not supported in sta mode\n"); 1548 goto bad; 1549 } 1550 /* 1551 * Silently remove any request for a unique 1552 * bssid; WDS vap's always share the local 1553 * mac address. 1554 */ 1555 flags &= ~IEEE80211_CLONE_BSSID; 1556 if (sc->sc_nvaps == 0) 1557 ic_opmode = IEEE80211_M_HOSTAP; 1558 else 1559 ic_opmode = ic->ic_opmode; 1560 break; 1561 default: 1562 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1563 goto bad; 1564 } 1565 /* 1566 * Check that a beacon buffer is available; the code below assumes it. 1567 */ 1568 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1569 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1570 goto bad; 1571 } 1572 1573 /* STA, AHDEMO? */ 1574 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1575 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1576 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1577 } 1578 1579 vap = &avp->av_vap; 1580 /* XXX can't hold mutex across if_alloc */ 1581 ATH_UNLOCK(sc); 1582 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1583 bssid, mac); 1584 ATH_LOCK(sc); 1585 if (error != 0) { 1586 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1587 __func__, error); 1588 goto bad2; 1589 } 1590 1591 /* h/w crypto support */ 1592 vap->iv_key_alloc = ath_key_alloc; 1593 vap->iv_key_delete = ath_key_delete; 1594 vap->iv_key_set = ath_key_set; 1595 vap->iv_key_update_begin = ath_key_update_begin; 1596 vap->iv_key_update_end = ath_key_update_end; 1597 1598 /* override various methods */ 1599 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1600 vap->iv_recv_mgmt = ath_recv_mgmt; 1601 vap->iv_reset = ath_reset_vap; 1602 vap->iv_update_beacon = ath_beacon_update; 1603 avp->av_newstate = vap->iv_newstate; 1604 vap->iv_newstate = ath_newstate; 1605 avp->av_bmiss = vap->iv_bmiss; 1606 vap->iv_bmiss = ath_bmiss_vap; 1607 1608 avp->av_node_ps = vap->iv_node_ps; 1609 vap->iv_node_ps = ath_node_powersave; 1610 1611 avp->av_set_tim = vap->iv_set_tim; 1612 vap->iv_set_tim = ath_node_set_tim; 1613 1614 avp->av_recv_pspoll = vap->iv_recv_pspoll; 1615 vap->iv_recv_pspoll = ath_node_recv_pspoll; 1616 1617 /* Set default parameters */ 1618 1619 /* 1620 * Anything earlier than some AR9300 series MACs don't 1621 * support a smaller MPDU density. 1622 */ 1623 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1624 /* 1625 * All NICs can handle the maximum size, however 1626 * AR5416 based MACs can only TX aggregates w/ RTS 1627 * protection when the total aggregate size is <= 8k. 1628 * However, for now that's enforced by the TX path. 1629 */ 1630 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1631 1632 avp->av_bslot = -1; 1633 if (needbeacon) { 1634 /* 1635 * Allocate beacon state and setup the q for buffered 1636 * multicast frames. We know a beacon buffer is 1637 * available because we checked above. 1638 */ 1639 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1640 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1641 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1642 /* 1643 * Assign the vap to a beacon xmit slot. As above 1644 * this cannot fail to find a free one. 1645 */ 1646 avp->av_bslot = assign_bslot(sc); 1647 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1648 ("beacon slot %u not empty", avp->av_bslot)); 1649 sc->sc_bslot[avp->av_bslot] = vap; 1650 sc->sc_nbcnvaps++; 1651 } 1652 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1653 /* 1654 * Multple vaps are to transmit beacons and we 1655 * have h/w support for TSF adjusting; enable 1656 * use of staggered beacons. 1657 */ 1658 sc->sc_stagbeacons = 1; 1659 } 1660 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1661 } 1662 1663 ic->ic_opmode = ic_opmode; 1664 if (opmode != IEEE80211_M_WDS) { 1665 sc->sc_nvaps++; 1666 if (opmode == IEEE80211_M_STA) 1667 sc->sc_nstavaps++; 1668 if (opmode == IEEE80211_M_MBSS) 1669 sc->sc_nmeshvaps++; 1670 } 1671 switch (ic_opmode) { 1672 case IEEE80211_M_IBSS: 1673 sc->sc_opmode = HAL_M_IBSS; 1674 break; 1675 case IEEE80211_M_STA: 1676 sc->sc_opmode = HAL_M_STA; 1677 break; 1678 case IEEE80211_M_AHDEMO: 1679#ifdef IEEE80211_SUPPORT_TDMA 1680 if (vap->iv_caps & IEEE80211_C_TDMA) { 1681 sc->sc_tdma = 1; 1682 /* NB: disable tsf adjust */ 1683 sc->sc_stagbeacons = 0; 1684 } 1685 /* 1686 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1687 * just ap mode. 1688 */ 1689 /* fall thru... */ 1690#endif 1691 case IEEE80211_M_HOSTAP: 1692 case IEEE80211_M_MBSS: 1693 sc->sc_opmode = HAL_M_HOSTAP; 1694 break; 1695 case IEEE80211_M_MONITOR: 1696 sc->sc_opmode = HAL_M_MONITOR; 1697 break; 1698 default: 1699 /* XXX should not happen */ 1700 break; 1701 } 1702 if (sc->sc_hastsfadd) { 1703 /* 1704 * Configure whether or not TSF adjust should be done. 1705 */ 1706 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1707 } 1708 if (flags & IEEE80211_CLONE_NOBEACONS) { 1709 /* 1710 * Enable s/w beacon miss handling. 1711 */ 1712 sc->sc_swbmiss = 1; 1713 } 1714 ATH_UNLOCK(sc); 1715 1716 /* complete setup */ 1717 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1718 return vap; 1719bad2: 1720 reclaim_address(sc, mac); 1721 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1722bad: 1723 free(avp, M_80211_VAP); 1724 ATH_UNLOCK(sc); 1725 return NULL; 1726} 1727 1728static void 1729ath_vap_delete(struct ieee80211vap *vap) 1730{ 1731 struct ieee80211com *ic = vap->iv_ic; 1732 struct ifnet *ifp = ic->ic_ifp; 1733 struct ath_softc *sc = ifp->if_softc; 1734 struct ath_hal *ah = sc->sc_ah; 1735 struct ath_vap *avp = ATH_VAP(vap); 1736 1737 ATH_LOCK(sc); 1738 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1739 ATH_UNLOCK(sc); 1740 1741 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1742 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1743 /* 1744 * Quiesce the hardware while we remove the vap. In 1745 * particular we need to reclaim all references to 1746 * the vap state by any frames pending on the tx queues. 1747 */ 1748 ath_hal_intrset(ah, 0); /* disable interrupts */ 1749 /* XXX Do all frames from all vaps/nodes need draining here? */ 1750 ath_stoprecv(sc, 1); /* stop recv side */ 1751 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1752 } 1753 1754 /* .. leave the hardware awake for now. */ 1755 1756 ieee80211_vap_detach(vap); 1757 1758 /* 1759 * XXX Danger Will Robinson! Danger! 1760 * 1761 * Because ieee80211_vap_detach() can queue a frame (the station 1762 * diassociate message?) after we've drained the TXQ and 1763 * flushed the software TXQ, we will end up with a frame queued 1764 * to a node whose vap is about to be freed. 1765 * 1766 * To work around this, flush the hardware/software again. 1767 * This may be racy - the ath task may be running and the packet 1768 * may be being scheduled between sw->hw txq. Tsk. 1769 * 1770 * TODO: figure out why a new node gets allocated somewhere around 1771 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1772 * call!) 1773 */ 1774 1775 ath_draintxq(sc, ATH_RESET_DEFAULT); 1776 1777 ATH_LOCK(sc); 1778 /* 1779 * Reclaim beacon state. Note this must be done before 1780 * the vap instance is reclaimed as we may have a reference 1781 * to it in the buffer for the beacon frame. 1782 */ 1783 if (avp->av_bcbuf != NULL) { 1784 if (avp->av_bslot != -1) { 1785 sc->sc_bslot[avp->av_bslot] = NULL; 1786 sc->sc_nbcnvaps--; 1787 } 1788 ath_beacon_return(sc, avp->av_bcbuf); 1789 avp->av_bcbuf = NULL; 1790 if (sc->sc_nbcnvaps == 0) { 1791 sc->sc_stagbeacons = 0; 1792 if (sc->sc_hastsfadd) 1793 ath_hal_settsfadjust(sc->sc_ah, 0); 1794 } 1795 /* 1796 * Reclaim any pending mcast frames for the vap. 1797 */ 1798 ath_tx_draintxq(sc, &avp->av_mcastq); 1799 } 1800 /* 1801 * Update bookkeeping. 1802 */ 1803 if (vap->iv_opmode == IEEE80211_M_STA) { 1804 sc->sc_nstavaps--; 1805 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1806 sc->sc_swbmiss = 0; 1807 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1808 vap->iv_opmode == IEEE80211_M_MBSS) { 1809 reclaim_address(sc, vap->iv_myaddr); 1810 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1811 if (vap->iv_opmode == IEEE80211_M_MBSS) 1812 sc->sc_nmeshvaps--; 1813 } 1814 if (vap->iv_opmode != IEEE80211_M_WDS) 1815 sc->sc_nvaps--; 1816#ifdef IEEE80211_SUPPORT_TDMA 1817 /* TDMA operation ceases when the last vap is destroyed */ 1818 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1819 sc->sc_tdma = 0; 1820 sc->sc_swbmiss = 0; 1821 } 1822#endif 1823 free(avp, M_80211_VAP); 1824 1825 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1826 /* 1827 * Restart rx+tx machines if still running (RUNNING will 1828 * be reset if we just destroyed the last vap). 1829 */ 1830 if (ath_startrecv(sc) != 0) 1831 if_printf(ifp, "%s: unable to restart recv logic\n", 1832 __func__); 1833 if (sc->sc_beacons) { /* restart beacons */ 1834#ifdef IEEE80211_SUPPORT_TDMA 1835 if (sc->sc_tdma) 1836 ath_tdma_config(sc, NULL); 1837 else 1838#endif 1839 ath_beacon_config(sc, NULL); 1840 } 1841 ath_hal_intrset(ah, sc->sc_imask); 1842 } 1843 1844 /* Ok, let the hardware asleep. */ 1845 ath_power_restore_power_state(sc); 1846 ATH_UNLOCK(sc); 1847} 1848 1849void 1850ath_suspend(struct ath_softc *sc) 1851{ 1852 struct ifnet *ifp = sc->sc_ifp; 1853 struct ieee80211com *ic = ifp->if_l2com; 1854 1855 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1856 __func__, ifp->if_flags); 1857 1858 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1859 1860 ieee80211_suspend_all(ic); 1861 /* 1862 * NB: don't worry about putting the chip in low power 1863 * mode; pci will power off our socket on suspend and 1864 * CardBus detaches the device. 1865 * 1866 * XXX TODO: well, that's great, except for non-cardbus 1867 * devices! 1868 */ 1869 1870 /* 1871 * XXX This doesn't wait until all pending taskqueue 1872 * items and parallel transmit/receive/other threads 1873 * are running! 1874 */ 1875 ath_hal_intrset(sc->sc_ah, 0); 1876 taskqueue_block(sc->sc_tq); 1877 1878 ATH_LOCK(sc); 1879 callout_stop(&sc->sc_cal_ch); 1880 ATH_UNLOCK(sc); 1881 1882 /* 1883 * XXX ensure sc_invalid is 1 1884 */ 1885 1886 /* Disable the PCIe PHY, complete with workarounds */ 1887 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1888} 1889 1890/* 1891 * Reset the key cache since some parts do not reset the 1892 * contents on resume. First we clear all entries, then 1893 * re-load keys that the 802.11 layer assumes are setup 1894 * in h/w. 1895 */ 1896static void 1897ath_reset_keycache(struct ath_softc *sc) 1898{ 1899 struct ifnet *ifp = sc->sc_ifp; 1900 struct ieee80211com *ic = ifp->if_l2com; 1901 struct ath_hal *ah = sc->sc_ah; 1902 int i; 1903 1904 ATH_LOCK(sc); 1905 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1906 for (i = 0; i < sc->sc_keymax; i++) 1907 ath_hal_keyreset(ah, i); 1908 ath_power_restore_power_state(sc); 1909 ATH_UNLOCK(sc); 1910 ieee80211_crypto_reload_keys(ic); 1911} 1912 1913/* 1914 * Fetch the current chainmask configuration based on the current 1915 * operating channel and options. 1916 */ 1917static void 1918ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) 1919{ 1920 1921 /* 1922 * Set TX chainmask to the currently configured chainmask; 1923 * the TX chainmask depends upon the current operating mode. 1924 */ 1925 sc->sc_cur_rxchainmask = sc->sc_rxchainmask; 1926 if (IEEE80211_IS_CHAN_HT(chan)) { 1927 sc->sc_cur_txchainmask = sc->sc_txchainmask; 1928 } else { 1929 sc->sc_cur_txchainmask = 1; 1930 } 1931 1932 DPRINTF(sc, ATH_DEBUG_RESET, 1933 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", 1934 __func__, 1935 sc->sc_cur_txchainmask, 1936 sc->sc_cur_rxchainmask); 1937} 1938 1939void 1940ath_resume(struct ath_softc *sc) 1941{ 1942 struct ifnet *ifp = sc->sc_ifp; 1943 struct ieee80211com *ic = ifp->if_l2com; 1944 struct ath_hal *ah = sc->sc_ah; 1945 HAL_STATUS status; 1946 1947 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1948 __func__, ifp->if_flags); 1949 1950 /* Re-enable PCIe, re-enable the PCIe bus */ 1951 ath_hal_enablepcie(ah, 0, 0); 1952 1953 /* 1954 * Must reset the chip before we reload the 1955 * keycache as we were powered down on suspend. 1956 */ 1957 ath_update_chainmasks(sc, 1958 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); 1959 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 1960 sc->sc_cur_rxchainmask); 1961 1962 /* Ensure we set the current power state to on */ 1963 ATH_LOCK(sc); 1964 ath_power_setselfgen(sc, HAL_PM_AWAKE); 1965 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1966 ath_power_setpower(sc, HAL_PM_AWAKE); 1967 ATH_UNLOCK(sc); 1968 1969 ath_hal_reset(ah, sc->sc_opmode, 1970 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1971 AH_FALSE, &status); 1972 ath_reset_keycache(sc); 1973 1974 ATH_RX_LOCK(sc); 1975 sc->sc_rx_stopped = 1; 1976 sc->sc_rx_resetted = 1; 1977 ATH_RX_UNLOCK(sc); 1978 1979 /* Let DFS at it in case it's a DFS channel */ 1980 ath_dfs_radar_enable(sc, ic->ic_curchan); 1981 1982 /* Let spectral at in case spectral is enabled */ 1983 ath_spectral_enable(sc, ic->ic_curchan); 1984 1985 /* 1986 * Let bluetooth coexistence at in case it's needed for this channel 1987 */ 1988 ath_btcoex_enable(sc, ic->ic_curchan); 1989 1990 /* 1991 * If we're doing TDMA, enforce the TXOP limitation for chips that 1992 * support it. 1993 */ 1994 if (sc->sc_hasenforcetxop && sc->sc_tdma) 1995 ath_hal_setenforcetxop(sc->sc_ah, 1); 1996 else 1997 ath_hal_setenforcetxop(sc->sc_ah, 0); 1998 1999 /* Restore the LED configuration */ 2000 ath_led_config(sc); 2001 ath_hal_setledstate(ah, HAL_LED_INIT); 2002 2003 if (sc->sc_resume_up) 2004 ieee80211_resume_all(ic); 2005 2006 ATH_LOCK(sc); 2007 ath_power_restore_power_state(sc); 2008 ATH_UNLOCK(sc); 2009 2010 /* XXX beacons ? */ 2011} 2012 2013void 2014ath_shutdown(struct ath_softc *sc) 2015{ 2016 struct ifnet *ifp = sc->sc_ifp; 2017 2018 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 2019 __func__, ifp->if_flags); 2020 2021 ath_stop(ifp); 2022 /* NB: no point powering down chip as we're about to reboot */ 2023} 2024 2025/* 2026 * Interrupt handler. Most of the actual processing is deferred. 2027 */ 2028void 2029ath_intr(void *arg) 2030{ 2031 struct ath_softc *sc = arg; 2032 struct ifnet *ifp = sc->sc_ifp; 2033 struct ath_hal *ah = sc->sc_ah; 2034 HAL_INT status = 0; 2035 uint32_t txqs; 2036 2037 /* 2038 * If we're inside a reset path, just print a warning and 2039 * clear the ISR. The reset routine will finish it for us. 2040 */ 2041 ATH_PCU_LOCK(sc); 2042 if (sc->sc_inreset_cnt) { 2043 HAL_INT status; 2044 ath_hal_getisr(ah, &status); /* clear ISR */ 2045 ath_hal_intrset(ah, 0); /* disable further intr's */ 2046 DPRINTF(sc, ATH_DEBUG_ANY, 2047 "%s: in reset, ignoring: status=0x%x\n", 2048 __func__, status); 2049 ATH_PCU_UNLOCK(sc); 2050 return; 2051 } 2052 2053 if (sc->sc_invalid) { 2054 /* 2055 * The hardware is not ready/present, don't touch anything. 2056 * Note this can happen early on if the IRQ is shared. 2057 */ 2058 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 2059 ATH_PCU_UNLOCK(sc); 2060 return; 2061 } 2062 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 2063 ATH_PCU_UNLOCK(sc); 2064 return; 2065 } 2066 2067 ATH_LOCK(sc); 2068 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2069 ATH_UNLOCK(sc); 2070 2071 if ((ifp->if_flags & IFF_UP) == 0 || 2072 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2073 HAL_INT status; 2074 2075 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 2076 __func__, ifp->if_flags); 2077 ath_hal_getisr(ah, &status); /* clear ISR */ 2078 ath_hal_intrset(ah, 0); /* disable further intr's */ 2079 ATH_PCU_UNLOCK(sc); 2080 2081 ATH_LOCK(sc); 2082 ath_power_restore_power_state(sc); 2083 ATH_UNLOCK(sc); 2084 return; 2085 } 2086 2087 /* 2088 * Figure out the reason(s) for the interrupt. Note 2089 * that the hal returns a pseudo-ISR that may include 2090 * bits we haven't explicitly enabled so we mask the 2091 * value to insure we only process bits we requested. 2092 */ 2093 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 2094 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 2095 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); 2096#ifdef ATH_DEBUG_ALQ 2097 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, 2098 ah->ah_syncstate); 2099#endif /* ATH_DEBUG_ALQ */ 2100#ifdef ATH_KTR_INTR_DEBUG 2101 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, 2102 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 2103 ah->ah_intrstate[0], 2104 ah->ah_intrstate[1], 2105 ah->ah_intrstate[2], 2106 ah->ah_intrstate[3], 2107 ah->ah_intrstate[6]); 2108#endif 2109 2110 /* Squirrel away SYNC interrupt debugging */ 2111 if (ah->ah_syncstate != 0) { 2112 int i; 2113 for (i = 0; i < 32; i++) 2114 if (ah->ah_syncstate & (i << i)) 2115 sc->sc_intr_stats.sync_intr[i]++; 2116 } 2117 2118 status &= sc->sc_imask; /* discard unasked for bits */ 2119 2120 /* Short-circuit un-handled interrupts */ 2121 if (status == 0x0) { 2122 ATH_PCU_UNLOCK(sc); 2123 2124 ATH_LOCK(sc); 2125 ath_power_restore_power_state(sc); 2126 ATH_UNLOCK(sc); 2127 2128 return; 2129 } 2130 2131 /* 2132 * Take a note that we're inside the interrupt handler, so 2133 * the reset routines know to wait. 2134 */ 2135 sc->sc_intr_cnt++; 2136 ATH_PCU_UNLOCK(sc); 2137 2138 /* 2139 * Handle the interrupt. We won't run concurrent with the reset 2140 * or channel change routines as they'll wait for sc_intr_cnt 2141 * to be 0 before continuing. 2142 */ 2143 if (status & HAL_INT_FATAL) { 2144 sc->sc_stats.ast_hardware++; 2145 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 2146 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 2147 } else { 2148 if (status & HAL_INT_SWBA) { 2149 /* 2150 * Software beacon alert--time to send a beacon. 2151 * Handle beacon transmission directly; deferring 2152 * this is too slow to meet timing constraints 2153 * under load. 2154 */ 2155#ifdef IEEE80211_SUPPORT_TDMA 2156 if (sc->sc_tdma) { 2157 if (sc->sc_tdmaswba == 0) { 2158 struct ieee80211com *ic = ifp->if_l2com; 2159 struct ieee80211vap *vap = 2160 TAILQ_FIRST(&ic->ic_vaps); 2161 ath_tdma_beacon_send(sc, vap); 2162 sc->sc_tdmaswba = 2163 vap->iv_tdma->tdma_bintval; 2164 } else 2165 sc->sc_tdmaswba--; 2166 } else 2167#endif 2168 { 2169 ath_beacon_proc(sc, 0); 2170#ifdef IEEE80211_SUPPORT_SUPERG 2171 /* 2172 * Schedule the rx taskq in case there's no 2173 * traffic so any frames held on the staging 2174 * queue are aged and potentially flushed. 2175 */ 2176 sc->sc_rx.recv_sched(sc, 1); 2177#endif 2178 } 2179 } 2180 if (status & HAL_INT_RXEOL) { 2181 int imask; 2182 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); 2183 if (! sc->sc_isedma) { 2184 ATH_PCU_LOCK(sc); 2185 /* 2186 * NB: the hardware should re-read the link when 2187 * RXE bit is written, but it doesn't work at 2188 * least on older hardware revs. 2189 */ 2190 sc->sc_stats.ast_rxeol++; 2191 /* 2192 * Disable RXEOL/RXORN - prevent an interrupt 2193 * storm until the PCU logic can be reset. 2194 * In case the interface is reset some other 2195 * way before "sc_kickpcu" is called, don't 2196 * modify sc_imask - that way if it is reset 2197 * by a call to ath_reset() somehow, the 2198 * interrupt mask will be correctly reprogrammed. 2199 */ 2200 imask = sc->sc_imask; 2201 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 2202 ath_hal_intrset(ah, imask); 2203 /* 2204 * Only blank sc_rxlink if we've not yet kicked 2205 * the PCU. 2206 * 2207 * This isn't entirely correct - the correct solution 2208 * would be to have a PCU lock and engage that for 2209 * the duration of the PCU fiddling; which would include 2210 * running the RX process. Otherwise we could end up 2211 * messing up the RX descriptor chain and making the 2212 * RX desc list much shorter. 2213 */ 2214 if (! sc->sc_kickpcu) 2215 sc->sc_rxlink = NULL; 2216 sc->sc_kickpcu = 1; 2217 ATH_PCU_UNLOCK(sc); 2218 } 2219 /* 2220 * Enqueue an RX proc to handle whatever 2221 * is in the RX queue. 2222 * This will then kick the PCU if required. 2223 */ 2224 sc->sc_rx.recv_sched(sc, 1); 2225 } 2226 if (status & HAL_INT_TXURN) { 2227 sc->sc_stats.ast_txurn++; 2228 /* bump tx trigger level */ 2229 ath_hal_updatetxtriglevel(ah, AH_TRUE); 2230 } 2231 /* 2232 * Handle both the legacy and RX EDMA interrupt bits. 2233 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 2234 */ 2235 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 2236 sc->sc_stats.ast_rx_intr++; 2237 sc->sc_rx.recv_sched(sc, 1); 2238 } 2239 if (status & HAL_INT_TX) { 2240 sc->sc_stats.ast_tx_intr++; 2241 /* 2242 * Grab all the currently set bits in the HAL txq bitmap 2243 * and blank them. This is the only place we should be 2244 * doing this. 2245 */ 2246 if (! sc->sc_isedma) { 2247 ATH_PCU_LOCK(sc); 2248 txqs = 0xffffffff; 2249 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 2250 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, 2251 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", 2252 txqs, 2253 sc->sc_txq_active, 2254 sc->sc_txq_active | txqs); 2255 sc->sc_txq_active |= txqs; 2256 ATH_PCU_UNLOCK(sc); 2257 } 2258 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 2259 } 2260 if (status & HAL_INT_BMISS) { 2261 sc->sc_stats.ast_bmiss++; 2262 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 2263 } 2264 if (status & HAL_INT_GTT) 2265 sc->sc_stats.ast_tx_timeout++; 2266 if (status & HAL_INT_CST) 2267 sc->sc_stats.ast_tx_cst++; 2268 if (status & HAL_INT_MIB) { 2269 sc->sc_stats.ast_mib++; 2270 ATH_PCU_LOCK(sc); 2271 /* 2272 * Disable interrupts until we service the MIB 2273 * interrupt; otherwise it will continue to fire. 2274 */ 2275 ath_hal_intrset(ah, 0); 2276 /* 2277 * Let the hal handle the event. We assume it will 2278 * clear whatever condition caused the interrupt. 2279 */ 2280 ath_hal_mibevent(ah, &sc->sc_halstats); 2281 /* 2282 * Don't reset the interrupt if we've just 2283 * kicked the PCU, or we may get a nested 2284 * RXEOL before the rxproc has had a chance 2285 * to run. 2286 */ 2287 if (sc->sc_kickpcu == 0) 2288 ath_hal_intrset(ah, sc->sc_imask); 2289 ATH_PCU_UNLOCK(sc); 2290 } 2291 if (status & HAL_INT_RXORN) { 2292 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 2293 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); 2294 sc->sc_stats.ast_rxorn++; 2295 } 2296 if (status & HAL_INT_TSFOOR) { 2297 device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__); 2298 sc->sc_syncbeacon = 1; 2299 } 2300 } 2301 ATH_PCU_LOCK(sc); 2302 sc->sc_intr_cnt--; 2303 ATH_PCU_UNLOCK(sc); 2304 2305 ATH_LOCK(sc); 2306 ath_power_restore_power_state(sc); 2307 ATH_UNLOCK(sc); 2308} 2309 2310static void 2311ath_fatal_proc(void *arg, int pending) 2312{ 2313 struct ath_softc *sc = arg; 2314 struct ifnet *ifp = sc->sc_ifp; 2315 u_int32_t *state; 2316 u_int32_t len; 2317 void *sp; 2318 2319 if_printf(ifp, "hardware error; resetting\n"); 2320 /* 2321 * Fatal errors are unrecoverable. Typically these 2322 * are caused by DMA errors. Collect h/w state from 2323 * the hal so we can diagnose what's going on. 2324 */ 2325 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 2326 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 2327 state = sp; 2328 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 2329 state[0], state[1] , state[2], state[3], 2330 state[4], state[5]); 2331 } 2332 ath_reset(ifp, ATH_RESET_NOLOSS); 2333} 2334 2335static void 2336ath_bmiss_vap(struct ieee80211vap *vap) 2337{ 2338 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 2339 2340 /* 2341 * Workaround phantom bmiss interrupts by sanity-checking 2342 * the time of our last rx'd frame. If it is within the 2343 * beacon miss interval then ignore the interrupt. If it's 2344 * truly a bmiss we'll get another interrupt soon and that'll 2345 * be dispatched up for processing. Note this applies only 2346 * for h/w beacon miss events. 2347 */ 2348 2349 /* 2350 * XXX TODO: Just read the TSF during the interrupt path; 2351 * that way we don't have to wake up again just to read it 2352 * again. 2353 */ 2354 ATH_LOCK(sc); 2355 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2356 ATH_UNLOCK(sc); 2357 2358 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 2359 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2360 struct ath_softc *sc = ifp->if_softc; 2361 u_int64_t lastrx = sc->sc_lastrx; 2362 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 2363 /* XXX should take a locked ref to iv_bss */ 2364 u_int bmisstimeout = 2365 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 2366 2367 DPRINTF(sc, ATH_DEBUG_BEACON, 2368 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 2369 __func__, (unsigned long long) tsf, 2370 (unsigned long long)(tsf - lastrx), 2371 (unsigned long long) lastrx, bmisstimeout); 2372 2373 if (tsf - lastrx <= bmisstimeout) { 2374 sc->sc_stats.ast_bmiss_phantom++; 2375 2376 ATH_LOCK(sc); 2377 ath_power_restore_power_state(sc); 2378 ATH_UNLOCK(sc); 2379 2380 return; 2381 } 2382 } 2383 2384 /* 2385 * There's no need to keep the hardware awake during the call 2386 * to av_bmiss(). 2387 */ 2388 ATH_LOCK(sc); 2389 ath_power_restore_power_state(sc); 2390 ATH_UNLOCK(sc); 2391 2392 /* 2393 * Attempt to force a beacon resync. 2394 */ 2395 sc->sc_syncbeacon = 1; 2396 2397 ATH_VAP(vap)->av_bmiss(vap); 2398} 2399 2400/* XXX this needs a force wakeup! */ 2401int 2402ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 2403{ 2404 uint32_t rsize; 2405 void *sp; 2406 2407 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 2408 return 0; 2409 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 2410 *hangs = *(uint32_t *)sp; 2411 return 1; 2412} 2413 2414static void 2415ath_bmiss_proc(void *arg, int pending) 2416{ 2417 struct ath_softc *sc = arg; 2418 struct ifnet *ifp = sc->sc_ifp; 2419 uint32_t hangs; 2420 2421 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 2422 2423 ATH_LOCK(sc); 2424 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2425 ATH_UNLOCK(sc); 2426 2427 ath_beacon_miss(sc); 2428 2429 /* 2430 * Do a reset upon any becaon miss event. 2431 * 2432 * It may be a non-recognised RX clear hang which needs a reset 2433 * to clear. 2434 */ 2435 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 2436 ath_reset(ifp, ATH_RESET_NOLOSS); 2437 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 2438 } else { 2439 ath_reset(ifp, ATH_RESET_NOLOSS); 2440 ieee80211_beacon_miss(ifp->if_l2com); 2441 } 2442 2443 /* Force a beacon resync, in case they've drifted */ 2444 sc->sc_syncbeacon = 1; 2445 2446 ATH_LOCK(sc); 2447 ath_power_restore_power_state(sc); 2448 ATH_UNLOCK(sc); 2449} 2450 2451/* 2452 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 2453 * calcs together with WME. If necessary disable the crypto 2454 * hardware and mark the 802.11 state so keys will be setup 2455 * with the MIC work done in software. 2456 */ 2457static void 2458ath_settkipmic(struct ath_softc *sc) 2459{ 2460 struct ifnet *ifp = sc->sc_ifp; 2461 struct ieee80211com *ic = ifp->if_l2com; 2462 2463 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 2464 if (ic->ic_flags & IEEE80211_F_WME) { 2465 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 2466 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 2467 } else { 2468 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 2469 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 2470 } 2471 } 2472} 2473 2474static void 2475ath_init(void *arg) 2476{ 2477 struct ath_softc *sc = (struct ath_softc *) arg; 2478 struct ifnet *ifp = sc->sc_ifp; 2479 struct ieee80211com *ic = ifp->if_l2com; 2480 struct ath_hal *ah = sc->sc_ah; 2481 HAL_STATUS status; 2482 2483 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 2484 __func__, ifp->if_flags); 2485 2486 ATH_LOCK(sc); 2487 /* 2488 * Force the sleep state awake. 2489 */ 2490 ath_power_setselfgen(sc, HAL_PM_AWAKE); 2491 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2492 ath_power_setpower(sc, HAL_PM_AWAKE); 2493 2494 /* 2495 * Stop anything previously setup. This is safe 2496 * whether this is the first time through or not. 2497 */ 2498 ath_stop_locked(ifp); 2499 2500 /* 2501 * The basic interface to setting the hardware in a good 2502 * state is ``reset''. On return the hardware is known to 2503 * be powered up and with interrupts disabled. This must 2504 * be followed by initialization of the appropriate bits 2505 * and then setup of the interrupt mask. 2506 */ 2507 ath_settkipmic(sc); 2508 ath_update_chainmasks(sc, ic->ic_curchan); 2509 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2510 sc->sc_cur_rxchainmask); 2511 2512 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 2513 if_printf(ifp, "unable to reset hardware; hal status %u\n", 2514 status); 2515 ATH_UNLOCK(sc); 2516 return; 2517 } 2518 2519 ATH_RX_LOCK(sc); 2520 sc->sc_rx_stopped = 1; 2521 sc->sc_rx_resetted = 1; 2522 ATH_RX_UNLOCK(sc); 2523 2524 ath_chan_change(sc, ic->ic_curchan); 2525 2526 /* Let DFS at it in case it's a DFS channel */ 2527 ath_dfs_radar_enable(sc, ic->ic_curchan); 2528 2529 /* Let spectral at in case spectral is enabled */ 2530 ath_spectral_enable(sc, ic->ic_curchan); 2531 2532 /* 2533 * Let bluetooth coexistence at in case it's needed for this channel 2534 */ 2535 ath_btcoex_enable(sc, ic->ic_curchan); 2536 2537 /* 2538 * If we're doing TDMA, enforce the TXOP limitation for chips that 2539 * support it. 2540 */ 2541 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2542 ath_hal_setenforcetxop(sc->sc_ah, 1); 2543 else 2544 ath_hal_setenforcetxop(sc->sc_ah, 0); 2545 2546 /* 2547 * Likewise this is set during reset so update 2548 * state cached in the driver. 2549 */ 2550 sc->sc_diversity = ath_hal_getdiversity(ah); 2551 sc->sc_lastlongcal = ticks; 2552 sc->sc_resetcal = 1; 2553 sc->sc_lastcalreset = 0; 2554 sc->sc_lastani = ticks; 2555 sc->sc_lastshortcal = ticks; 2556 sc->sc_doresetcal = AH_FALSE; 2557 /* 2558 * Beacon timers were cleared here; give ath_newstate() 2559 * a hint that the beacon timers should be poked when 2560 * things transition to the RUN state. 2561 */ 2562 sc->sc_beacons = 0; 2563 2564 /* 2565 * Setup the hardware after reset: the key cache 2566 * is filled as needed and the receive engine is 2567 * set going. Frame transmit is handled entirely 2568 * in the frame output path; there's nothing to do 2569 * here except setup the interrupt mask. 2570 */ 2571 if (ath_startrecv(sc) != 0) { 2572 if_printf(ifp, "unable to start recv logic\n"); 2573 ath_power_restore_power_state(sc); 2574 ATH_UNLOCK(sc); 2575 return; 2576 } 2577 2578 /* 2579 * Enable interrupts. 2580 */ 2581 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 2582 | HAL_INT_RXORN | HAL_INT_TXURN 2583 | HAL_INT_FATAL | HAL_INT_GLOBAL; 2584 2585 /* 2586 * Enable RX EDMA bits. Note these overlap with 2587 * HAL_INT_RX and HAL_INT_RXDESC respectively. 2588 */ 2589 if (sc->sc_isedma) 2590 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 2591 2592 /* 2593 * If we're an EDMA NIC, we don't care about RXEOL. 2594 * Writing a new descriptor in will simply restart 2595 * RX DMA. 2596 */ 2597 if (! sc->sc_isedma) 2598 sc->sc_imask |= HAL_INT_RXEOL; 2599 2600 /* 2601 * Enable MIB interrupts when there are hardware phy counters. 2602 * Note we only do this (at the moment) for station mode. 2603 */ 2604 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 2605 sc->sc_imask |= HAL_INT_MIB; 2606 2607 /* 2608 * XXX add capability for this. 2609 * 2610 * If we're in STA mode (and maybe IBSS?) then register for 2611 * TSFOOR interrupts. 2612 */ 2613 if (ic->ic_opmode == IEEE80211_M_STA) 2614 sc->sc_imask |= HAL_INT_TSFOOR; 2615 2616 /* Enable global TX timeout and carrier sense timeout if available */ 2617 if (ath_hal_gtxto_supported(ah)) 2618 sc->sc_imask |= HAL_INT_GTT; 2619 2620 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 2621 __func__, sc->sc_imask); 2622 2623 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2624 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 2625 ath_hal_intrset(ah, sc->sc_imask); 2626 2627 ath_power_restore_power_state(sc); 2628 ATH_UNLOCK(sc); 2629 2630#ifdef ATH_TX99_DIAG 2631 if (sc->sc_tx99 != NULL) 2632 sc->sc_tx99->start(sc->sc_tx99); 2633 else 2634#endif 2635 ieee80211_start_all(ic); /* start all vap's */ 2636} 2637 2638static void 2639ath_stop_locked(struct ifnet *ifp) 2640{ 2641 struct ath_softc *sc = ifp->if_softc; 2642 struct ath_hal *ah = sc->sc_ah; 2643 2644 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 2645 __func__, sc->sc_invalid, ifp->if_flags); 2646 2647 ATH_LOCK_ASSERT(sc); 2648 2649 /* 2650 * Wake the hardware up before fiddling with it. 2651 */ 2652 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2653 2654 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2655 /* 2656 * Shutdown the hardware and driver: 2657 * reset 802.11 state machine 2658 * turn off timers 2659 * disable interrupts 2660 * turn off the radio 2661 * clear transmit machinery 2662 * clear receive machinery 2663 * drain and release tx queues 2664 * reclaim beacon resources 2665 * power down hardware 2666 * 2667 * Note that some of this work is not possible if the 2668 * hardware is gone (invalid). 2669 */ 2670#ifdef ATH_TX99_DIAG 2671 if (sc->sc_tx99 != NULL) 2672 sc->sc_tx99->stop(sc->sc_tx99); 2673#endif 2674 callout_stop(&sc->sc_wd_ch); 2675 sc->sc_wd_timer = 0; 2676 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2677 if (!sc->sc_invalid) { 2678 if (sc->sc_softled) { 2679 callout_stop(&sc->sc_ledtimer); 2680 ath_hal_gpioset(ah, sc->sc_ledpin, 2681 !sc->sc_ledon); 2682 sc->sc_blinking = 0; 2683 } 2684 ath_hal_intrset(ah, 0); 2685 } 2686 /* XXX we should stop RX regardless of whether it's valid */ 2687 if (!sc->sc_invalid) { 2688 ath_stoprecv(sc, 1); 2689 ath_hal_phydisable(ah); 2690 } else 2691 sc->sc_rxlink = NULL; 2692 ath_draintxq(sc, ATH_RESET_DEFAULT); 2693 ath_beacon_free(sc); /* XXX not needed */ 2694 } 2695 2696 /* And now, restore the current power state */ 2697 ath_power_restore_power_state(sc); 2698} 2699 2700/* 2701 * Wait until all pending TX/RX has completed. 2702 * 2703 * This waits until all existing transmit, receive and interrupts 2704 * have completed. It's assumed that the caller has first 2705 * grabbed the reset lock so it doesn't try to do overlapping 2706 * chip resets. 2707 */ 2708#define MAX_TXRX_ITERATIONS 100 2709static void 2710ath_txrx_stop_locked(struct ath_softc *sc) 2711{ 2712 int i = MAX_TXRX_ITERATIONS; 2713 2714 ATH_UNLOCK_ASSERT(sc); 2715 ATH_PCU_LOCK_ASSERT(sc); 2716 2717 /* 2718 * Sleep until all the pending operations have completed. 2719 * 2720 * The caller must ensure that reset has been incremented 2721 * or the pending operations may continue being queued. 2722 */ 2723 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 2724 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 2725 if (i <= 0) 2726 break; 2727 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 2728 msecs_to_ticks(10)); 2729 i--; 2730 } 2731 2732 if (i <= 0) 2733 device_printf(sc->sc_dev, 2734 "%s: didn't finish after %d iterations\n", 2735 __func__, MAX_TXRX_ITERATIONS); 2736} 2737#undef MAX_TXRX_ITERATIONS 2738 2739#if 0 2740static void 2741ath_txrx_stop(struct ath_softc *sc) 2742{ 2743 ATH_UNLOCK_ASSERT(sc); 2744 ATH_PCU_UNLOCK_ASSERT(sc); 2745 2746 ATH_PCU_LOCK(sc); 2747 ath_txrx_stop_locked(sc); 2748 ATH_PCU_UNLOCK(sc); 2749} 2750#endif 2751 2752static void 2753ath_txrx_start(struct ath_softc *sc) 2754{ 2755 2756 taskqueue_unblock(sc->sc_tq); 2757} 2758 2759/* 2760 * Grab the reset lock, and wait around until noone else 2761 * is trying to do anything with it. 2762 * 2763 * This is totally horrible but we can't hold this lock for 2764 * long enough to do TX/RX or we end up with net80211/ip stack 2765 * LORs and eventual deadlock. 2766 * 2767 * "dowait" signals whether to spin, waiting for the reset 2768 * lock count to reach 0. This should (for now) only be used 2769 * during the reset path, as the rest of the code may not 2770 * be locking-reentrant enough to behave correctly. 2771 * 2772 * Another, cleaner way should be found to serialise all of 2773 * these operations. 2774 */ 2775#define MAX_RESET_ITERATIONS 25 2776static int 2777ath_reset_grablock(struct ath_softc *sc, int dowait) 2778{ 2779 int w = 0; 2780 int i = MAX_RESET_ITERATIONS; 2781 2782 ATH_PCU_LOCK_ASSERT(sc); 2783 do { 2784 if (sc->sc_inreset_cnt == 0) { 2785 w = 1; 2786 break; 2787 } 2788 if (dowait == 0) { 2789 w = 0; 2790 break; 2791 } 2792 ATH_PCU_UNLOCK(sc); 2793 /* 2794 * 1 tick is likely not enough time for long calibrations 2795 * to complete. So we should wait quite a while. 2796 */ 2797 pause("ath_reset_grablock", msecs_to_ticks(100)); 2798 i--; 2799 ATH_PCU_LOCK(sc); 2800 } while (i > 0); 2801 2802 /* 2803 * We always increment the refcounter, regardless 2804 * of whether we succeeded to get it in an exclusive 2805 * way. 2806 */ 2807 sc->sc_inreset_cnt++; 2808 2809 if (i <= 0) 2810 device_printf(sc->sc_dev, 2811 "%s: didn't finish after %d iterations\n", 2812 __func__, MAX_RESET_ITERATIONS); 2813 2814 if (w == 0) 2815 device_printf(sc->sc_dev, 2816 "%s: warning, recursive reset path!\n", 2817 __func__); 2818 2819 return w; 2820} 2821#undef MAX_RESET_ITERATIONS 2822 2823/* 2824 * XXX TODO: write ath_reset_releaselock 2825 */ 2826 2827static void 2828ath_stop(struct ifnet *ifp) 2829{ 2830 struct ath_softc *sc = ifp->if_softc; 2831 2832 ATH_LOCK(sc); 2833 ath_stop_locked(ifp); 2834 ATH_UNLOCK(sc); 2835} 2836 2837/* 2838 * Reset the hardware w/o losing operational state. This is 2839 * basically a more efficient way of doing ath_stop, ath_init, 2840 * followed by state transitions to the current 802.11 2841 * operational state. Used to recover from various errors and 2842 * to reset or reload hardware state. 2843 */ 2844int 2845ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2846{ 2847 struct ath_softc *sc = ifp->if_softc; 2848 struct ieee80211com *ic = ifp->if_l2com; 2849 struct ath_hal *ah = sc->sc_ah; 2850 HAL_STATUS status; 2851 int i; 2852 2853 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2854 2855 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2856 ATH_PCU_UNLOCK_ASSERT(sc); 2857 ATH_UNLOCK_ASSERT(sc); 2858 2859 /* Try to (stop any further TX/RX from occuring */ 2860 taskqueue_block(sc->sc_tq); 2861 2862 /* 2863 * Wake the hardware up. 2864 */ 2865 ATH_LOCK(sc); 2866 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2867 ATH_UNLOCK(sc); 2868 2869 ATH_PCU_LOCK(sc); 2870 2871 /* 2872 * Grab the reset lock before TX/RX is stopped. 2873 * 2874 * This is needed to ensure that when the TX/RX actually does finish, 2875 * no further TX/RX/reset runs in parallel with this. 2876 */ 2877 if (ath_reset_grablock(sc, 1) == 0) { 2878 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2879 __func__); 2880 } 2881 2882 /* disable interrupts */ 2883 ath_hal_intrset(ah, 0); 2884 2885 /* 2886 * Now, ensure that any in progress TX/RX completes before we 2887 * continue. 2888 */ 2889 ath_txrx_stop_locked(sc); 2890 2891 ATH_PCU_UNLOCK(sc); 2892 2893 /* 2894 * Regardless of whether we're doing a no-loss flush or 2895 * not, stop the PCU and handle what's in the RX queue. 2896 * That way frames aren't dropped which shouldn't be. 2897 */ 2898 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2899 ath_rx_flush(sc); 2900 2901 /* 2902 * Should now wait for pending TX/RX to complete 2903 * and block future ones from occuring. This needs to be 2904 * done before the TX queue is drained. 2905 */ 2906 ath_draintxq(sc, reset_type); /* stop xmit side */ 2907 2908 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2909 /* NB: indicate channel change so we do a full reset */ 2910 ath_update_chainmasks(sc, ic->ic_curchan); 2911 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2912 sc->sc_cur_rxchainmask); 2913 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2914 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2915 __func__, status); 2916 sc->sc_diversity = ath_hal_getdiversity(ah); 2917 2918 ATH_RX_LOCK(sc); 2919 sc->sc_rx_stopped = 1; 2920 sc->sc_rx_resetted = 1; 2921 ATH_RX_UNLOCK(sc); 2922 2923 /* Let DFS at it in case it's a DFS channel */ 2924 ath_dfs_radar_enable(sc, ic->ic_curchan); 2925 2926 /* Let spectral at in case spectral is enabled */ 2927 ath_spectral_enable(sc, ic->ic_curchan); 2928 2929 /* 2930 * Let bluetooth coexistence at in case it's needed for this channel 2931 */ 2932 ath_btcoex_enable(sc, ic->ic_curchan); 2933 2934 /* 2935 * If we're doing TDMA, enforce the TXOP limitation for chips that 2936 * support it. 2937 */ 2938 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2939 ath_hal_setenforcetxop(sc->sc_ah, 1); 2940 else 2941 ath_hal_setenforcetxop(sc->sc_ah, 0); 2942 2943 if (ath_startrecv(sc) != 0) /* restart recv */ 2944 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2945 /* 2946 * We may be doing a reset in response to an ioctl 2947 * that changes the channel so update any state that 2948 * might change as a result. 2949 */ 2950 ath_chan_change(sc, ic->ic_curchan); 2951 if (sc->sc_beacons) { /* restart beacons */ 2952#ifdef IEEE80211_SUPPORT_TDMA 2953 if (sc->sc_tdma) 2954 ath_tdma_config(sc, NULL); 2955 else 2956#endif 2957 ath_beacon_config(sc, NULL); 2958 } 2959 2960 /* 2961 * Release the reset lock and re-enable interrupts here. 2962 * If an interrupt was being processed in ath_intr(), 2963 * it would disable interrupts at this point. So we have 2964 * to atomically enable interrupts and decrement the 2965 * reset counter - this way ath_intr() doesn't end up 2966 * disabling interrupts without a corresponding enable 2967 * in the rest or channel change path. 2968 * 2969 * Grab the TX reference in case we need to transmit. 2970 * That way a parallel transmit doesn't. 2971 */ 2972 ATH_PCU_LOCK(sc); 2973 sc->sc_inreset_cnt--; 2974 sc->sc_txstart_cnt++; 2975 /* XXX only do this if sc_inreset_cnt == 0? */ 2976 ath_hal_intrset(ah, sc->sc_imask); 2977 ATH_PCU_UNLOCK(sc); 2978 2979 /* 2980 * TX and RX can be started here. If it were started with 2981 * sc_inreset_cnt > 0, the TX and RX path would abort. 2982 * Thus if this is a nested call through the reset or 2983 * channel change code, TX completion will occur but 2984 * RX completion and ath_start / ath_tx_start will not 2985 * run. 2986 */ 2987 2988 /* Restart TX/RX as needed */ 2989 ath_txrx_start(sc); 2990 2991 /* XXX TODO: we need to hold the tx refcount here! */ 2992 2993 /* Restart TX completion and pending TX */ 2994 if (reset_type == ATH_RESET_NOLOSS) { 2995 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2996 if (ATH_TXQ_SETUP(sc, i)) { 2997 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2998 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2999 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 3000 3001 ATH_TX_LOCK(sc); 3002 ath_txq_sched(sc, &sc->sc_txq[i]); 3003 ATH_TX_UNLOCK(sc); 3004 } 3005 } 3006 } 3007 3008 /* 3009 * This may have been set during an ath_start() call which 3010 * set this once it detected a concurrent TX was going on. 3011 * So, clear it. 3012 */ 3013 IF_LOCK(&ifp->if_snd); 3014 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3015 IF_UNLOCK(&ifp->if_snd); 3016 3017 ATH_LOCK(sc); 3018 ath_power_restore_power_state(sc); 3019 ATH_UNLOCK(sc); 3020 3021 ATH_PCU_LOCK(sc); 3022 sc->sc_txstart_cnt--; 3023 ATH_PCU_UNLOCK(sc); 3024 3025 /* Handle any frames in the TX queue */ 3026 /* 3027 * XXX should this be done by the caller, rather than 3028 * ath_reset() ? 3029 */ 3030 ath_tx_kick(sc); /* restart xmit */ 3031 return 0; 3032} 3033 3034static int 3035ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 3036{ 3037 struct ieee80211com *ic = vap->iv_ic; 3038 struct ifnet *ifp = ic->ic_ifp; 3039 struct ath_softc *sc = ifp->if_softc; 3040 struct ath_hal *ah = sc->sc_ah; 3041 3042 switch (cmd) { 3043 case IEEE80211_IOC_TXPOWER: 3044 /* 3045 * If per-packet TPC is enabled, then we have nothing 3046 * to do; otherwise we need to force the global limit. 3047 * All this can happen directly; no need to reset. 3048 */ 3049 if (!ath_hal_gettpc(ah)) 3050 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 3051 return 0; 3052 } 3053 /* XXX? Full or NOLOSS? */ 3054 return ath_reset(ifp, ATH_RESET_FULL); 3055} 3056 3057struct ath_buf * 3058_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 3059{ 3060 struct ath_buf *bf; 3061 3062 ATH_TXBUF_LOCK_ASSERT(sc); 3063 3064 if (btype == ATH_BUFTYPE_MGMT) 3065 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 3066 else 3067 bf = TAILQ_FIRST(&sc->sc_txbuf); 3068 3069 if (bf == NULL) { 3070 sc->sc_stats.ast_tx_getnobuf++; 3071 } else { 3072 if (bf->bf_flags & ATH_BUF_BUSY) { 3073 sc->sc_stats.ast_tx_getbusybuf++; 3074 bf = NULL; 3075 } 3076 } 3077 3078 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 3079 if (btype == ATH_BUFTYPE_MGMT) 3080 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 3081 else { 3082 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 3083 sc->sc_txbuf_cnt--; 3084 3085 /* 3086 * This shuldn't happen; however just to be 3087 * safe print a warning and fudge the txbuf 3088 * count. 3089 */ 3090 if (sc->sc_txbuf_cnt < 0) { 3091 device_printf(sc->sc_dev, 3092 "%s: sc_txbuf_cnt < 0?\n", 3093 __func__); 3094 sc->sc_txbuf_cnt = 0; 3095 } 3096 } 3097 } else 3098 bf = NULL; 3099 3100 if (bf == NULL) { 3101 /* XXX should check which list, mgmt or otherwise */ 3102 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 3103 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 3104 "out of xmit buffers" : "xmit buffer busy"); 3105 return NULL; 3106 } 3107 3108 /* XXX TODO: should do this at buffer list initialisation */ 3109 /* XXX (then, ensure the buffer has the right flag set) */ 3110 bf->bf_flags = 0; 3111 if (btype == ATH_BUFTYPE_MGMT) 3112 bf->bf_flags |= ATH_BUF_MGMT; 3113 else 3114 bf->bf_flags &= (~ATH_BUF_MGMT); 3115 3116 /* Valid bf here; clear some basic fields */ 3117 bf->bf_next = NULL; /* XXX just to be sure */ 3118 bf->bf_last = NULL; /* XXX again, just to be sure */ 3119 bf->bf_comp = NULL; /* XXX again, just to be sure */ 3120 bzero(&bf->bf_state, sizeof(bf->bf_state)); 3121 3122 /* 3123 * Track the descriptor ID only if doing EDMA 3124 */ 3125 if (sc->sc_isedma) { 3126 bf->bf_descid = sc->sc_txbuf_descid; 3127 sc->sc_txbuf_descid++; 3128 } 3129 3130 return bf; 3131} 3132 3133/* 3134 * When retrying a software frame, buffers marked ATH_BUF_BUSY 3135 * can't be thrown back on the queue as they could still be 3136 * in use by the hardware. 3137 * 3138 * This duplicates the buffer, or returns NULL. 3139 * 3140 * The descriptor is also copied but the link pointers and 3141 * the DMA segments aren't copied; this frame should thus 3142 * be again passed through the descriptor setup/chain routines 3143 * so the link is correct. 3144 * 3145 * The caller must free the buffer using ath_freebuf(). 3146 */ 3147struct ath_buf * 3148ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) 3149{ 3150 struct ath_buf *tbf; 3151 3152 tbf = ath_getbuf(sc, 3153 (bf->bf_flags & ATH_BUF_MGMT) ? 3154 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 3155 if (tbf == NULL) 3156 return NULL; /* XXX failure? Why? */ 3157 3158 /* Copy basics */ 3159 tbf->bf_next = NULL; 3160 tbf->bf_nseg = bf->bf_nseg; 3161 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; 3162 tbf->bf_status = bf->bf_status; 3163 tbf->bf_m = bf->bf_m; 3164 tbf->bf_node = bf->bf_node; 3165 KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__)); 3166 /* will be setup by the chain/setup function */ 3167 tbf->bf_lastds = NULL; 3168 /* for now, last == self */ 3169 tbf->bf_last = tbf; 3170 tbf->bf_comp = bf->bf_comp; 3171 3172 /* NOTE: DMA segments will be setup by the setup/chain functions */ 3173 3174 /* The caller has to re-init the descriptor + links */ 3175 3176 /* 3177 * Free the DMA mapping here, before we NULL the mbuf. 3178 * We must only call bus_dmamap_unload() once per mbuf chain 3179 * or behaviour is undefined. 3180 */ 3181 if (bf->bf_m != NULL) { 3182 /* 3183 * XXX is this POSTWRITE call required? 3184 */ 3185 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3186 BUS_DMASYNC_POSTWRITE); 3187 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3188 } 3189 3190 bf->bf_m = NULL; 3191 bf->bf_node = NULL; 3192 3193 /* Copy state */ 3194 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 3195 3196 return tbf; 3197} 3198 3199struct ath_buf * 3200ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 3201{ 3202 struct ath_buf *bf; 3203 3204 ATH_TXBUF_LOCK(sc); 3205 bf = _ath_getbuf_locked(sc, btype); 3206 /* 3207 * If a mgmt buffer was requested but we're out of those, 3208 * try requesting a normal one. 3209 */ 3210 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 3211 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 3212 ATH_TXBUF_UNLOCK(sc); 3213 if (bf == NULL) { 3214 struct ifnet *ifp = sc->sc_ifp; 3215 3216 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 3217 sc->sc_stats.ast_tx_qstop++; 3218 IF_LOCK(&ifp->if_snd); 3219 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3220 IF_UNLOCK(&ifp->if_snd); 3221 } 3222 return bf; 3223} 3224 3225static void 3226ath_qflush(struct ifnet *ifp) 3227{ 3228 3229 /* XXX TODO */ 3230} 3231 3232/* 3233 * Transmit a single frame. 3234 * 3235 * net80211 will free the node reference if the transmit 3236 * fails, so don't free the node reference here. 3237 */ 3238static int 3239ath_transmit(struct ifnet *ifp, struct mbuf *m) 3240{ 3241 struct ieee80211com *ic = ifp->if_l2com; 3242 struct ath_softc *sc = ic->ic_ifp->if_softc; 3243 struct ieee80211_node *ni; 3244 struct mbuf *next; 3245 struct ath_buf *bf; 3246 ath_bufhead frags; 3247 int retval = 0; 3248 3249 /* 3250 * Tell the reset path that we're currently transmitting. 3251 */ 3252 ATH_PCU_LOCK(sc); 3253 if (sc->sc_inreset_cnt > 0) { 3254 DPRINTF(sc, ATH_DEBUG_XMIT, 3255 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 3256 ATH_PCU_UNLOCK(sc); 3257 IF_LOCK(&ifp->if_snd); 3258 sc->sc_stats.ast_tx_qstop++; 3259 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3260 IF_UNLOCK(&ifp->if_snd); 3261 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); 3262 return (ENOBUFS); /* XXX should be EINVAL or? */ 3263 } 3264 sc->sc_txstart_cnt++; 3265 ATH_PCU_UNLOCK(sc); 3266 3267 /* Wake the hardware up already */ 3268 ATH_LOCK(sc); 3269 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3270 ATH_UNLOCK(sc); 3271 3272 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); 3273 /* 3274 * Grab the TX lock - it's ok to do this here; we haven't 3275 * yet started transmitting. 3276 */ 3277 ATH_TX_LOCK(sc); 3278 3279 /* 3280 * Node reference, if there's one. 3281 */ 3282 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 3283 3284 /* 3285 * Enforce how deep a node queue can get. 3286 * 3287 * XXX it would be nicer if we kept an mbuf queue per 3288 * node and only whacked them into ath_bufs when we 3289 * are ready to schedule some traffic from them. 3290 * .. that may come later. 3291 * 3292 * XXX we should also track the per-node hardware queue 3293 * depth so it is easy to limit the _SUM_ of the swq and 3294 * hwq frames. Since we only schedule two HWQ frames 3295 * at a time, this should be OK for now. 3296 */ 3297 if ((!(m->m_flags & M_EAPOL)) && 3298 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { 3299 sc->sc_stats.ast_tx_nodeq_overflow++; 3300 m_freem(m); 3301 m = NULL; 3302 retval = ENOBUFS; 3303 goto finish; 3304 } 3305 3306 /* 3307 * Check how many TX buffers are available. 3308 * 3309 * If this is for non-EAPOL traffic, just leave some 3310 * space free in order for buffer cloning and raw 3311 * frame transmission to occur. 3312 * 3313 * If it's for EAPOL traffic, ignore this for now. 3314 * Management traffic will be sent via the raw transmit 3315 * method which bypasses this check. 3316 * 3317 * This is needed to ensure that EAPOL frames during 3318 * (re) keying have a chance to go out. 3319 * 3320 * See kern/138379 for more information. 3321 */ 3322 if ((!(m->m_flags & M_EAPOL)) && 3323 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { 3324 sc->sc_stats.ast_tx_nobuf++; 3325 m_freem(m); 3326 m = NULL; 3327 retval = ENOBUFS; 3328 goto finish; 3329 } 3330 3331 /* 3332 * Grab a TX buffer and associated resources. 3333 * 3334 * If it's an EAPOL frame, allocate a MGMT ath_buf. 3335 * That way even with temporary buffer exhaustion due to 3336 * the data path doesn't leave us without the ability 3337 * to transmit management frames. 3338 * 3339 * Otherwise allocate a normal buffer. 3340 */ 3341 if (m->m_flags & M_EAPOL) 3342 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 3343 else 3344 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 3345 3346 if (bf == NULL) { 3347 /* 3348 * If we failed to allocate a buffer, fail. 3349 * 3350 * We shouldn't fail normally, due to the check 3351 * above. 3352 */ 3353 sc->sc_stats.ast_tx_nobuf++; 3354 IF_LOCK(&ifp->if_snd); 3355 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3356 IF_UNLOCK(&ifp->if_snd); 3357 m_freem(m); 3358 m = NULL; 3359 retval = ENOBUFS; 3360 goto finish; 3361 } 3362 3363 /* 3364 * At this point we have a buffer; so we need to free it 3365 * if we hit any error conditions. 3366 */ 3367 3368 /* 3369 * Check for fragmentation. If this frame 3370 * has been broken up verify we have enough 3371 * buffers to send all the fragments so all 3372 * go out or none... 3373 */ 3374 TAILQ_INIT(&frags); 3375 if ((m->m_flags & M_FRAG) && 3376 !ath_txfrag_setup(sc, &frags, m, ni)) { 3377 DPRINTF(sc, ATH_DEBUG_XMIT, 3378 "%s: out of txfrag buffers\n", __func__); 3379 sc->sc_stats.ast_tx_nofrag++; 3380 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 3381 ath_freetx(m); 3382 goto bad; 3383 } 3384 3385 /* 3386 * At this point if we have any TX fragments, then we will 3387 * have bumped the node reference once for each of those. 3388 */ 3389 3390 /* 3391 * XXX Is there anything actually _enforcing_ that the 3392 * fragments are being transmitted in one hit, rather than 3393 * being interleaved with other transmissions on that 3394 * hardware queue? 3395 * 3396 * The ATH TX output lock is the only thing serialising this 3397 * right now. 3398 */ 3399 3400 /* 3401 * Calculate the "next fragment" length field in ath_buf 3402 * in order to let the transmit path know enough about 3403 * what to next write to the hardware. 3404 */ 3405 if (m->m_flags & M_FRAG) { 3406 struct ath_buf *fbf = bf; 3407 struct ath_buf *n_fbf = NULL; 3408 struct mbuf *fm = m->m_nextpkt; 3409 3410 /* 3411 * We need to walk the list of fragments and set 3412 * the next size to the following buffer. 3413 * However, the first buffer isn't in the frag 3414 * list, so we have to do some gymnastics here. 3415 */ 3416 TAILQ_FOREACH(n_fbf, &frags, bf_list) { 3417 fbf->bf_nextfraglen = fm->m_pkthdr.len; 3418 fbf = n_fbf; 3419 fm = fm->m_nextpkt; 3420 } 3421 } 3422 3423 /* 3424 * Bump the ifp output counter. 3425 * 3426 * XXX should use atomics? 3427 */ 3428 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 3429nextfrag: 3430 /* 3431 * Pass the frame to the h/w for transmission. 3432 * Fragmented frames have each frag chained together 3433 * with m_nextpkt. We know there are sufficient ath_buf's 3434 * to send all the frags because of work done by 3435 * ath_txfrag_setup. We leave m_nextpkt set while 3436 * calling ath_tx_start so it can use it to extend the 3437 * the tx duration to cover the subsequent frag and 3438 * so it can reclaim all the mbufs in case of an error; 3439 * ath_tx_start clears m_nextpkt once it commits to 3440 * handing the frame to the hardware. 3441 * 3442 * Note: if this fails, then the mbufs are freed but 3443 * not the node reference. 3444 */ 3445 next = m->m_nextpkt; 3446 if (ath_tx_start(sc, ni, bf, m)) { 3447bad: 3448 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 3449reclaim: 3450 bf->bf_m = NULL; 3451 bf->bf_node = NULL; 3452 ATH_TXBUF_LOCK(sc); 3453 ath_returnbuf_head(sc, bf); 3454 /* 3455 * Free the rest of the node references and 3456 * buffers for the fragment list. 3457 */ 3458 ath_txfrag_cleanup(sc, &frags, ni); 3459 ATH_TXBUF_UNLOCK(sc); 3460 retval = ENOBUFS; 3461 goto finish; 3462 } 3463 3464 /* 3465 * Check here if the node is in power save state. 3466 */ 3467 ath_tx_update_tim(sc, ni, 1); 3468 3469 if (next != NULL) { 3470 /* 3471 * Beware of state changing between frags. 3472 * XXX check sta power-save state? 3473 */ 3474 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 3475 DPRINTF(sc, ATH_DEBUG_XMIT, 3476 "%s: flush fragmented packet, state %s\n", 3477 __func__, 3478 ieee80211_state_name[ni->ni_vap->iv_state]); 3479 /* XXX dmamap */ 3480 ath_freetx(next); 3481 goto reclaim; 3482 } 3483 m = next; 3484 bf = TAILQ_FIRST(&frags); 3485 KASSERT(bf != NULL, ("no buf for txfrag")); 3486 TAILQ_REMOVE(&frags, bf, bf_list); 3487 goto nextfrag; 3488 } 3489 3490 /* 3491 * Bump watchdog timer. 3492 */ 3493 sc->sc_wd_timer = 5; 3494 3495finish: 3496 ATH_TX_UNLOCK(sc); 3497 3498 /* 3499 * Finished transmitting! 3500 */ 3501 ATH_PCU_LOCK(sc); 3502 sc->sc_txstart_cnt--; 3503 ATH_PCU_UNLOCK(sc); 3504 3505 /* Sleep the hardware if required */ 3506 ATH_LOCK(sc); 3507 ath_power_restore_power_state(sc); 3508 ATH_UNLOCK(sc); 3509 3510 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); 3511 3512 return (retval); 3513} 3514 3515static int 3516ath_media_change(struct ifnet *ifp) 3517{ 3518 int error = ieee80211_media_change(ifp); 3519 /* NB: only the fixed rate can change and that doesn't need a reset */ 3520 return (error == ENETRESET ? 0 : error); 3521} 3522 3523/* 3524 * Block/unblock tx+rx processing while a key change is done. 3525 * We assume the caller serializes key management operations 3526 * so we only need to worry about synchronization with other 3527 * uses that originate in the driver. 3528 */ 3529static void 3530ath_key_update_begin(struct ieee80211vap *vap) 3531{ 3532 struct ifnet *ifp = vap->iv_ic->ic_ifp; 3533 struct ath_softc *sc = ifp->if_softc; 3534 3535 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3536 taskqueue_block(sc->sc_tq); 3537} 3538 3539static void 3540ath_key_update_end(struct ieee80211vap *vap) 3541{ 3542 struct ifnet *ifp = vap->iv_ic->ic_ifp; 3543 struct ath_softc *sc = ifp->if_softc; 3544 3545 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3546 taskqueue_unblock(sc->sc_tq); 3547} 3548 3549static void 3550ath_update_promisc(struct ifnet *ifp) 3551{ 3552 struct ath_softc *sc = ifp->if_softc; 3553 u_int32_t rfilt; 3554 3555 /* configure rx filter */ 3556 ATH_LOCK(sc); 3557 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3558 rfilt = ath_calcrxfilter(sc); 3559 ath_hal_setrxfilter(sc->sc_ah, rfilt); 3560 ath_power_restore_power_state(sc); 3561 ATH_UNLOCK(sc); 3562 3563 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 3564} 3565 3566/* 3567 * Driver-internal mcast update call. 3568 * 3569 * Assumes the hardware is already awake. 3570 */ 3571static void 3572ath_update_mcast_hw(struct ath_softc *sc) 3573{ 3574 struct ifnet *ifp = sc->sc_ifp; 3575 u_int32_t mfilt[2]; 3576 3577 /* calculate and install multicast filter */ 3578 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 3579 struct ifmultiaddr *ifma; 3580 /* 3581 * Merge multicast addresses to form the hardware filter. 3582 */ 3583 mfilt[0] = mfilt[1] = 0; 3584 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 3585 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3586 caddr_t dl; 3587 u_int32_t val; 3588 u_int8_t pos; 3589 3590 /* calculate XOR of eight 6bit values */ 3591 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 3592 val = LE_READ_4(dl + 0); 3593 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3594 val = LE_READ_4(dl + 3); 3595 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3596 pos &= 0x3f; 3597 mfilt[pos / 32] |= (1 << (pos % 32)); 3598 } 3599 if_maddr_runlock(ifp); 3600 } else 3601 mfilt[0] = mfilt[1] = ~0; 3602 3603 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 3604 3605 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 3606 __func__, mfilt[0], mfilt[1]); 3607} 3608 3609/* 3610 * Called from the net80211 layer - force the hardware 3611 * awake before operating. 3612 */ 3613static void 3614ath_update_mcast(struct ifnet *ifp) 3615{ 3616 struct ath_softc *sc = ifp->if_softc; 3617 3618 ATH_LOCK(sc); 3619 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3620 ATH_UNLOCK(sc); 3621 3622 ath_update_mcast_hw(sc); 3623 3624 ATH_LOCK(sc); 3625 ath_power_restore_power_state(sc); 3626 ATH_UNLOCK(sc); 3627} 3628 3629void 3630ath_mode_init(struct ath_softc *sc) 3631{ 3632 struct ifnet *ifp = sc->sc_ifp; 3633 struct ath_hal *ah = sc->sc_ah; 3634 u_int32_t rfilt; 3635 3636 /* configure rx filter */ 3637 rfilt = ath_calcrxfilter(sc); 3638 ath_hal_setrxfilter(ah, rfilt); 3639 3640 /* configure operational mode */ 3641 ath_hal_setopmode(ah); 3642 3643 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE, 3644 "%s: ah=%p, ifp=%p, if_addr=%p\n", 3645 __func__, 3646 ah, 3647 ifp, 3648 (ifp == NULL) ? NULL : ifp->if_addr); 3649 3650 /* handle any link-level address change */ 3651 ath_hal_setmac(ah, IF_LLADDR(ifp)); 3652 3653 /* calculate and install multicast filter */ 3654 ath_update_mcast_hw(sc); 3655} 3656 3657/* 3658 * Set the slot time based on the current setting. 3659 */ 3660void 3661ath_setslottime(struct ath_softc *sc) 3662{ 3663 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3664 struct ath_hal *ah = sc->sc_ah; 3665 u_int usec; 3666 3667 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 3668 usec = 13; 3669 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 3670 usec = 21; 3671 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 3672 /* honor short/long slot time only in 11g */ 3673 /* XXX shouldn't honor on pure g or turbo g channel */ 3674 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3675 usec = HAL_SLOT_TIME_9; 3676 else 3677 usec = HAL_SLOT_TIME_20; 3678 } else 3679 usec = HAL_SLOT_TIME_9; 3680 3681 DPRINTF(sc, ATH_DEBUG_RESET, 3682 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 3683 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 3684 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 3685 3686 /* Wake up the hardware first before updating the slot time */ 3687 ATH_LOCK(sc); 3688 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3689 ath_hal_setslottime(ah, usec); 3690 ath_power_restore_power_state(sc); 3691 sc->sc_updateslot = OK; 3692 ATH_UNLOCK(sc); 3693} 3694 3695/* 3696 * Callback from the 802.11 layer to update the 3697 * slot time based on the current setting. 3698 */ 3699static void 3700ath_updateslot(struct ifnet *ifp) 3701{ 3702 struct ath_softc *sc = ifp->if_softc; 3703 struct ieee80211com *ic = ifp->if_l2com; 3704 3705 /* 3706 * When not coordinating the BSS, change the hardware 3707 * immediately. For other operation we defer the change 3708 * until beacon updates have propagated to the stations. 3709 * 3710 * XXX sc_updateslot isn't changed behind a lock? 3711 */ 3712 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3713 ic->ic_opmode == IEEE80211_M_MBSS) 3714 sc->sc_updateslot = UPDATE; 3715 else 3716 ath_setslottime(sc); 3717} 3718 3719/* 3720 * Append the contents of src to dst; both queues 3721 * are assumed to be locked. 3722 */ 3723void 3724ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 3725{ 3726 3727 ATH_TXQ_LOCK_ASSERT(src); 3728 ATH_TXQ_LOCK_ASSERT(dst); 3729 3730 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 3731 dst->axq_link = src->axq_link; 3732 src->axq_link = NULL; 3733 dst->axq_depth += src->axq_depth; 3734 dst->axq_aggr_depth += src->axq_aggr_depth; 3735 src->axq_depth = 0; 3736 src->axq_aggr_depth = 0; 3737} 3738 3739/* 3740 * Reset the hardware, with no loss. 3741 * 3742 * This can't be used for a general case reset. 3743 */ 3744static void 3745ath_reset_proc(void *arg, int pending) 3746{ 3747 struct ath_softc *sc = arg; 3748 struct ifnet *ifp = sc->sc_ifp; 3749 3750#if 0 3751 if_printf(ifp, "%s: resetting\n", __func__); 3752#endif 3753 ath_reset(ifp, ATH_RESET_NOLOSS); 3754} 3755 3756/* 3757 * Reset the hardware after detecting beacons have stopped. 3758 */ 3759static void 3760ath_bstuck_proc(void *arg, int pending) 3761{ 3762 struct ath_softc *sc = arg; 3763 struct ifnet *ifp = sc->sc_ifp; 3764 uint32_t hangs = 0; 3765 3766 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3767 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3768 3769#ifdef ATH_DEBUG_ALQ 3770 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) 3771 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); 3772#endif 3773 3774 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3775 sc->sc_bmisscount); 3776 sc->sc_stats.ast_bstuck++; 3777 /* 3778 * This assumes that there's no simultaneous channel mode change 3779 * occuring. 3780 */ 3781 ath_reset(ifp, ATH_RESET_NOLOSS); 3782} 3783 3784static void 3785ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3786{ 3787 bus_addr_t *paddr = (bus_addr_t*) arg; 3788 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 3789 *paddr = segs->ds_addr; 3790} 3791 3792/* 3793 * Allocate the descriptors and appropriate DMA tag/setup. 3794 * 3795 * For some situations (eg EDMA TX completion), there isn't a requirement 3796 * for the ath_buf entries to be allocated. 3797 */ 3798int 3799ath_descdma_alloc_desc(struct ath_softc *sc, 3800 struct ath_descdma *dd, ath_bufhead *head, 3801 const char *name, int ds_size, int ndesc) 3802{ 3803#define DS2PHYS(_dd, _ds) \ 3804 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3805#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3806 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3807 struct ifnet *ifp = sc->sc_ifp; 3808 int error; 3809 3810 dd->dd_descsize = ds_size; 3811 3812 DPRINTF(sc, ATH_DEBUG_RESET, 3813 "%s: %s DMA: %u desc, %d bytes per descriptor\n", 3814 __func__, name, ndesc, dd->dd_descsize); 3815 3816 dd->dd_name = name; 3817 dd->dd_desc_len = dd->dd_descsize * ndesc; 3818 3819 /* 3820 * Merlin work-around: 3821 * Descriptors that cross the 4KB boundary can't be used. 3822 * Assume one skipped descriptor per 4KB page. 3823 */ 3824 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3825 int numpages = dd->dd_desc_len / 4096; 3826 dd->dd_desc_len += ds_size * numpages; 3827 } 3828 3829 /* 3830 * Setup DMA descriptor area. 3831 * 3832 * BUS_DMA_ALLOCNOW is not used; we never use bounce 3833 * buffers for the descriptors themselves. 3834 */ 3835 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3836 PAGE_SIZE, 0, /* alignment, bounds */ 3837 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3838 BUS_SPACE_MAXADDR, /* highaddr */ 3839 NULL, NULL, /* filter, filterarg */ 3840 dd->dd_desc_len, /* maxsize */ 3841 1, /* nsegments */ 3842 dd->dd_desc_len, /* maxsegsize */ 3843 0, /* flags */ 3844 NULL, /* lockfunc */ 3845 NULL, /* lockarg */ 3846 &dd->dd_dmat); 3847 if (error != 0) { 3848 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3849 return error; 3850 } 3851 3852 /* allocate descriptors */ 3853 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 3854 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 3855 &dd->dd_dmamap); 3856 if (error != 0) { 3857 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3858 "error %u\n", ndesc, dd->dd_name, error); 3859 goto fail1; 3860 } 3861 3862 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3863 dd->dd_desc, dd->dd_desc_len, 3864 ath_load_cb, &dd->dd_desc_paddr, 3865 BUS_DMA_NOWAIT); 3866 if (error != 0) { 3867 if_printf(ifp, "unable to map %s descriptors, error %u\n", 3868 dd->dd_name, error); 3869 goto fail2; 3870 } 3871 3872 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3873 __func__, dd->dd_name, (uint8_t *) dd->dd_desc, 3874 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, 3875 /*XXX*/ (u_long) dd->dd_desc_len); 3876 3877 return (0); 3878 3879fail2: 3880 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3881fail1: 3882 bus_dma_tag_destroy(dd->dd_dmat); 3883 memset(dd, 0, sizeof(*dd)); 3884 return error; 3885#undef DS2PHYS 3886#undef ATH_DESC_4KB_BOUND_CHECK 3887} 3888 3889int 3890ath_descdma_setup(struct ath_softc *sc, 3891 struct ath_descdma *dd, ath_bufhead *head, 3892 const char *name, int ds_size, int nbuf, int ndesc) 3893{ 3894#define DS2PHYS(_dd, _ds) \ 3895 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3896#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3897 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3898 struct ifnet *ifp = sc->sc_ifp; 3899 uint8_t *ds; 3900 struct ath_buf *bf; 3901 int i, bsize, error; 3902 3903 /* Allocate descriptors */ 3904 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size, 3905 nbuf * ndesc); 3906 3907 /* Assume any errors during allocation were dealt with */ 3908 if (error != 0) { 3909 return (error); 3910 } 3911 3912 ds = (uint8_t *) dd->dd_desc; 3913 3914 /* allocate rx buffers */ 3915 bsize = sizeof(struct ath_buf) * nbuf; 3916 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3917 if (bf == NULL) { 3918 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3919 dd->dd_name, bsize); 3920 goto fail3; 3921 } 3922 dd->dd_bufptr = bf; 3923 3924 TAILQ_INIT(head); 3925 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) { 3926 bf->bf_desc = (struct ath_desc *) ds; 3927 bf->bf_daddr = DS2PHYS(dd, ds); 3928 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3929 /* 3930 * Merlin WAR: Skip descriptor addresses which 3931 * cause 4KB boundary crossing along any point 3932 * in the descriptor. 3933 */ 3934 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 3935 dd->dd_descsize)) { 3936 /* Start at the next page */ 3937 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 3938 bf->bf_desc = (struct ath_desc *) ds; 3939 bf->bf_daddr = DS2PHYS(dd, ds); 3940 } 3941 } 3942 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3943 &bf->bf_dmamap); 3944 if (error != 0) { 3945 if_printf(ifp, "unable to create dmamap for %s " 3946 "buffer %u, error %u\n", dd->dd_name, i, error); 3947 ath_descdma_cleanup(sc, dd, head); 3948 return error; 3949 } 3950 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 3951 TAILQ_INSERT_TAIL(head, bf, bf_list); 3952 } 3953 3954 /* 3955 * XXX TODO: ensure that ds doesn't overflow the descriptor 3956 * allocation otherwise weird stuff will occur and crash your 3957 * machine. 3958 */ 3959 return 0; 3960 /* XXX this should likely just call ath_descdma_cleanup() */ 3961fail3: 3962 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3963 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3964 bus_dma_tag_destroy(dd->dd_dmat); 3965 memset(dd, 0, sizeof(*dd)); 3966 return error; 3967#undef DS2PHYS 3968#undef ATH_DESC_4KB_BOUND_CHECK 3969} 3970 3971/* 3972 * Allocate ath_buf entries but no descriptor contents. 3973 * 3974 * This is for RX EDMA where the descriptors are the header part of 3975 * the RX buffer. 3976 */ 3977int 3978ath_descdma_setup_rx_edma(struct ath_softc *sc, 3979 struct ath_descdma *dd, ath_bufhead *head, 3980 const char *name, int nbuf, int rx_status_len) 3981{ 3982 struct ifnet *ifp = sc->sc_ifp; 3983 struct ath_buf *bf; 3984 int i, bsize, error; 3985 3986 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n", 3987 __func__, name, nbuf); 3988 3989 dd->dd_name = name; 3990 /* 3991 * This is (mostly) purely for show. We're not allocating any actual 3992 * descriptors here as EDMA RX has the descriptor be part 3993 * of the RX buffer. 3994 * 3995 * However, dd_desc_len is used by ath_descdma_free() to determine 3996 * whether we have already freed this DMA mapping. 3997 */ 3998 dd->dd_desc_len = rx_status_len * nbuf; 3999 dd->dd_descsize = rx_status_len; 4000 4001 /* allocate rx buffers */ 4002 bsize = sizeof(struct ath_buf) * nbuf; 4003 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 4004 if (bf == NULL) { 4005 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 4006 dd->dd_name, bsize); 4007 error = ENOMEM; 4008 goto fail3; 4009 } 4010 dd->dd_bufptr = bf; 4011 4012 TAILQ_INIT(head); 4013 for (i = 0; i < nbuf; i++, bf++) { 4014 bf->bf_desc = NULL; 4015 bf->bf_daddr = 0; 4016 bf->bf_lastds = NULL; /* Just an initial value */ 4017 4018 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 4019 &bf->bf_dmamap); 4020 if (error != 0) { 4021 if_printf(ifp, "unable to create dmamap for %s " 4022 "buffer %u, error %u\n", dd->dd_name, i, error); 4023 ath_descdma_cleanup(sc, dd, head); 4024 return error; 4025 } 4026 TAILQ_INSERT_TAIL(head, bf, bf_list); 4027 } 4028 return 0; 4029fail3: 4030 memset(dd, 0, sizeof(*dd)); 4031 return error; 4032} 4033 4034void 4035ath_descdma_cleanup(struct ath_softc *sc, 4036 struct ath_descdma *dd, ath_bufhead *head) 4037{ 4038 struct ath_buf *bf; 4039 struct ieee80211_node *ni; 4040 int do_warning = 0; 4041 4042 if (dd->dd_dmamap != 0) { 4043 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 4044 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 4045 bus_dma_tag_destroy(dd->dd_dmat); 4046 } 4047 4048 if (head != NULL) { 4049 TAILQ_FOREACH(bf, head, bf_list) { 4050 if (bf->bf_m) { 4051 /* 4052 * XXX warn if there's buffers here. 4053 * XXX it should have been freed by the 4054 * owner! 4055 */ 4056 4057 if (do_warning == 0) { 4058 do_warning = 1; 4059 device_printf(sc->sc_dev, 4060 "%s: %s: mbuf should've been" 4061 " unmapped/freed!\n", 4062 __func__, 4063 dd->dd_name); 4064 } 4065 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4066 BUS_DMASYNC_POSTREAD); 4067 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4068 m_freem(bf->bf_m); 4069 bf->bf_m = NULL; 4070 } 4071 if (bf->bf_dmamap != NULL) { 4072 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 4073 bf->bf_dmamap = NULL; 4074 } 4075 ni = bf->bf_node; 4076 bf->bf_node = NULL; 4077 if (ni != NULL) { 4078 /* 4079 * Reclaim node reference. 4080 */ 4081 ieee80211_free_node(ni); 4082 } 4083 } 4084 } 4085 4086 if (head != NULL) 4087 TAILQ_INIT(head); 4088 4089 if (dd->dd_bufptr != NULL) 4090 free(dd->dd_bufptr, M_ATHDEV); 4091 memset(dd, 0, sizeof(*dd)); 4092} 4093 4094static int 4095ath_desc_alloc(struct ath_softc *sc) 4096{ 4097 int error; 4098 4099 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 4100 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); 4101 if (error != 0) { 4102 return error; 4103 } 4104 sc->sc_txbuf_cnt = ath_txbuf; 4105 4106 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 4107 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 4108 ATH_TXDESC); 4109 if (error != 0) { 4110 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 4111 return error; 4112 } 4113 4114 /* 4115 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 4116 * flag doesn't have to be set in ath_getbuf_locked(). 4117 */ 4118 4119 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 4120 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 4121 if (error != 0) { 4122 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 4123 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 4124 &sc->sc_txbuf_mgmt); 4125 return error; 4126 } 4127 return 0; 4128} 4129 4130static void 4131ath_desc_free(struct ath_softc *sc) 4132{ 4133 4134 if (sc->sc_bdma.dd_desc_len != 0) 4135 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 4136 if (sc->sc_txdma.dd_desc_len != 0) 4137 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 4138 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 4139 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 4140 &sc->sc_txbuf_mgmt); 4141} 4142 4143static struct ieee80211_node * 4144ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 4145{ 4146 struct ieee80211com *ic = vap->iv_ic; 4147 struct ath_softc *sc = ic->ic_ifp->if_softc; 4148 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 4149 struct ath_node *an; 4150 4151 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 4152 if (an == NULL) { 4153 /* XXX stat+msg */ 4154 return NULL; 4155 } 4156 ath_rate_node_init(sc, an); 4157 4158 /* Setup the mutex - there's no associd yet so set the name to NULL */ 4159 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 4160 device_get_nameunit(sc->sc_dev), an); 4161 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 4162 4163 /* XXX setup ath_tid */ 4164 ath_tx_tid_init(sc, an); 4165 4166 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an); 4167 return &an->an_node; 4168} 4169 4170static void 4171ath_node_cleanup(struct ieee80211_node *ni) 4172{ 4173 struct ieee80211com *ic = ni->ni_ic; 4174 struct ath_softc *sc = ic->ic_ifp->if_softc; 4175 4176 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, 4177 ni->ni_macaddr, ":", ATH_NODE(ni)); 4178 4179 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 4180 ath_tx_node_flush(sc, ATH_NODE(ni)); 4181 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 4182 sc->sc_node_cleanup(ni); 4183} 4184 4185static void 4186ath_node_free(struct ieee80211_node *ni) 4187{ 4188 struct ieee80211com *ic = ni->ni_ic; 4189 struct ath_softc *sc = ic->ic_ifp->if_softc; 4190 4191 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, 4192 ni->ni_macaddr, ":", ATH_NODE(ni)); 4193 mtx_destroy(&ATH_NODE(ni)->an_mtx); 4194 sc->sc_node_free(ni); 4195} 4196 4197static void 4198ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 4199{ 4200 struct ieee80211com *ic = ni->ni_ic; 4201 struct ath_softc *sc = ic->ic_ifp->if_softc; 4202 struct ath_hal *ah = sc->sc_ah; 4203 4204 *rssi = ic->ic_node_getrssi(ni); 4205 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 4206 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 4207 else 4208 *noise = -95; /* nominally correct */ 4209} 4210 4211/* 4212 * Set the default antenna. 4213 */ 4214void 4215ath_setdefantenna(struct ath_softc *sc, u_int antenna) 4216{ 4217 struct ath_hal *ah = sc->sc_ah; 4218 4219 /* XXX block beacon interrupts */ 4220 ath_hal_setdefantenna(ah, antenna); 4221 if (sc->sc_defant != antenna) 4222 sc->sc_stats.ast_ant_defswitch++; 4223 sc->sc_defant = antenna; 4224 sc->sc_rxotherant = 0; 4225} 4226 4227static void 4228ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4229{ 4230 txq->axq_qnum = qnum; 4231 txq->axq_ac = 0; 4232 txq->axq_depth = 0; 4233 txq->axq_aggr_depth = 0; 4234 txq->axq_intrcnt = 0; 4235 txq->axq_link = NULL; 4236 txq->axq_softc = sc; 4237 TAILQ_INIT(&txq->axq_q); 4238 TAILQ_INIT(&txq->axq_tidq); 4239 TAILQ_INIT(&txq->fifo.axq_q); 4240 ATH_TXQ_LOCK_INIT(sc, txq); 4241} 4242 4243/* 4244 * Setup a h/w transmit queue. 4245 */ 4246static struct ath_txq * 4247ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4248{ 4249#define N(a) (sizeof(a)/sizeof(a[0])) 4250 struct ath_hal *ah = sc->sc_ah; 4251 HAL_TXQ_INFO qi; 4252 int qnum; 4253 4254 memset(&qi, 0, sizeof(qi)); 4255 qi.tqi_subtype = subtype; 4256 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4257 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4258 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4259 /* 4260 * Enable interrupts only for EOL and DESC conditions. 4261 * We mark tx descriptors to receive a DESC interrupt 4262 * when a tx queue gets deep; otherwise waiting for the 4263 * EOL to reap descriptors. Note that this is done to 4264 * reduce interrupt load and this only defers reaping 4265 * descriptors, never transmitting frames. Aside from 4266 * reducing interrupts this also permits more concurrency. 4267 * The only potential downside is if the tx queue backs 4268 * up in which case the top half of the kernel may backup 4269 * due to a lack of tx descriptors. 4270 */ 4271 if (sc->sc_isedma) 4272 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 4273 HAL_TXQ_TXOKINT_ENABLE; 4274 else 4275 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 4276 HAL_TXQ_TXDESCINT_ENABLE; 4277 4278 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4279 if (qnum == -1) { 4280 /* 4281 * NB: don't print a message, this happens 4282 * normally on parts with too few tx queues 4283 */ 4284 return NULL; 4285 } 4286 if (qnum >= N(sc->sc_txq)) { 4287 device_printf(sc->sc_dev, 4288 "hal qnum %u out of range, max %zu!\n", 4289 qnum, N(sc->sc_txq)); 4290 ath_hal_releasetxqueue(ah, qnum); 4291 return NULL; 4292 } 4293 if (!ATH_TXQ_SETUP(sc, qnum)) { 4294 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4295 sc->sc_txqsetup |= 1<<qnum; 4296 } 4297 return &sc->sc_txq[qnum]; 4298#undef N 4299} 4300 4301/* 4302 * Setup a hardware data transmit queue for the specified 4303 * access control. The hal may not support all requested 4304 * queues in which case it will return a reference to a 4305 * previously setup queue. We record the mapping from ac's 4306 * to h/w queues for use by ath_tx_start and also track 4307 * the set of h/w queues being used to optimize work in the 4308 * transmit interrupt handler and related routines. 4309 */ 4310static int 4311ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4312{ 4313#define N(a) (sizeof(a)/sizeof(a[0])) 4314 struct ath_txq *txq; 4315 4316 if (ac >= N(sc->sc_ac2q)) { 4317 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4318 ac, N(sc->sc_ac2q)); 4319 return 0; 4320 } 4321 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4322 if (txq != NULL) { 4323 txq->axq_ac = ac; 4324 sc->sc_ac2q[ac] = txq; 4325 return 1; 4326 } else 4327 return 0; 4328#undef N 4329} 4330 4331/* 4332 * Update WME parameters for a transmit queue. 4333 */ 4334static int 4335ath_txq_update(struct ath_softc *sc, int ac) 4336{ 4337#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4338#define ATH_TXOP_TO_US(v) (v<<5) 4339 struct ifnet *ifp = sc->sc_ifp; 4340 struct ieee80211com *ic = ifp->if_l2com; 4341 struct ath_txq *txq = sc->sc_ac2q[ac]; 4342 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4343 struct ath_hal *ah = sc->sc_ah; 4344 HAL_TXQ_INFO qi; 4345 4346 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4347#ifdef IEEE80211_SUPPORT_TDMA 4348 if (sc->sc_tdma) { 4349 /* 4350 * AIFS is zero so there's no pre-transmit wait. The 4351 * burst time defines the slot duration and is configured 4352 * through net80211. The QCU is setup to not do post-xmit 4353 * back off, lockout all lower-priority QCU's, and fire 4354 * off the DMA beacon alert timer which is setup based 4355 * on the slot configuration. 4356 */ 4357 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4358 | HAL_TXQ_TXERRINT_ENABLE 4359 | HAL_TXQ_TXURNINT_ENABLE 4360 | HAL_TXQ_TXEOLINT_ENABLE 4361 | HAL_TXQ_DBA_GATED 4362 | HAL_TXQ_BACKOFF_DISABLE 4363 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 4364 ; 4365 qi.tqi_aifs = 0; 4366 /* XXX +dbaprep? */ 4367 qi.tqi_readyTime = sc->sc_tdmaslotlen; 4368 qi.tqi_burstTime = qi.tqi_readyTime; 4369 } else { 4370#endif 4371 /* 4372 * XXX shouldn't this just use the default flags 4373 * used in the previous queue setup? 4374 */ 4375 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4376 | HAL_TXQ_TXERRINT_ENABLE 4377 | HAL_TXQ_TXDESCINT_ENABLE 4378 | HAL_TXQ_TXURNINT_ENABLE 4379 | HAL_TXQ_TXEOLINT_ENABLE 4380 ; 4381 qi.tqi_aifs = wmep->wmep_aifsn; 4382 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4383 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4384 qi.tqi_readyTime = 0; 4385 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4386#ifdef IEEE80211_SUPPORT_TDMA 4387 } 4388#endif 4389 4390 DPRINTF(sc, ATH_DEBUG_RESET, 4391 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 4392 __func__, txq->axq_qnum, qi.tqi_qflags, 4393 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4394 4395 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4396 if_printf(ifp, "unable to update hardware queue " 4397 "parameters for %s traffic!\n", 4398 ieee80211_wme_acnames[ac]); 4399 return 0; 4400 } else { 4401 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4402 return 1; 4403 } 4404#undef ATH_TXOP_TO_US 4405#undef ATH_EXPONENT_TO_VALUE 4406} 4407 4408/* 4409 * Callback from the 802.11 layer to update WME parameters. 4410 */ 4411int 4412ath_wme_update(struct ieee80211com *ic) 4413{ 4414 struct ath_softc *sc = ic->ic_ifp->if_softc; 4415 4416 return !ath_txq_update(sc, WME_AC_BE) || 4417 !ath_txq_update(sc, WME_AC_BK) || 4418 !ath_txq_update(sc, WME_AC_VI) || 4419 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4420} 4421 4422/* 4423 * Reclaim resources for a setup queue. 4424 */ 4425static void 4426ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4427{ 4428 4429 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4430 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4431 ATH_TXQ_LOCK_DESTROY(txq); 4432} 4433 4434/* 4435 * Reclaim all tx queue resources. 4436 */ 4437static void 4438ath_tx_cleanup(struct ath_softc *sc) 4439{ 4440 int i; 4441 4442 ATH_TXBUF_LOCK_DESTROY(sc); 4443 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4444 if (ATH_TXQ_SETUP(sc, i)) 4445 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4446} 4447 4448/* 4449 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4450 * using the current rates in sc_rixmap. 4451 */ 4452int 4453ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4454{ 4455 int rix = sc->sc_rixmap[rate]; 4456 /* NB: return lowest rix for invalid rate */ 4457 return (rix == 0xff ? 0 : rix); 4458} 4459 4460static void 4461ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4462 struct ath_buf *bf) 4463{ 4464 struct ieee80211_node *ni = bf->bf_node; 4465 struct ifnet *ifp = sc->sc_ifp; 4466 struct ieee80211com *ic = ifp->if_l2com; 4467 int sr, lr, pri; 4468 4469 if (ts->ts_status == 0) { 4470 u_int8_t txant = ts->ts_antenna; 4471 sc->sc_stats.ast_ant_tx[txant]++; 4472 sc->sc_ant_tx[txant]++; 4473 if (ts->ts_finaltsi != 0) 4474 sc->sc_stats.ast_tx_altrate++; 4475 pri = M_WME_GETAC(bf->bf_m); 4476 if (pri >= WME_AC_VO) 4477 ic->ic_wme.wme_hipri_traffic++; 4478 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 4479 ni->ni_inact = ni->ni_inact_reload; 4480 } else { 4481 if (ts->ts_status & HAL_TXERR_XRETRY) 4482 sc->sc_stats.ast_tx_xretries++; 4483 if (ts->ts_status & HAL_TXERR_FIFO) 4484 sc->sc_stats.ast_tx_fifoerr++; 4485 if (ts->ts_status & HAL_TXERR_FILT) 4486 sc->sc_stats.ast_tx_filtered++; 4487 if (ts->ts_status & HAL_TXERR_XTXOP) 4488 sc->sc_stats.ast_tx_xtxop++; 4489 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4490 sc->sc_stats.ast_tx_timerexpired++; 4491 4492 if (bf->bf_m->m_flags & M_FF) 4493 sc->sc_stats.ast_ff_txerr++; 4494 } 4495 /* XXX when is this valid? */ 4496 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) 4497 sc->sc_stats.ast_tx_desccfgerr++; 4498 /* 4499 * This can be valid for successful frame transmission! 4500 * If there's a TX FIFO underrun during aggregate transmission, 4501 * the MAC will pad the rest of the aggregate with delimiters. 4502 * If a BA is returned, the frame is marked as "OK" and it's up 4503 * to the TX completion code to notice which frames weren't 4504 * successfully transmitted. 4505 */ 4506 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) 4507 sc->sc_stats.ast_tx_data_underrun++; 4508 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) 4509 sc->sc_stats.ast_tx_delim_underrun++; 4510 4511 sr = ts->ts_shortretry; 4512 lr = ts->ts_longretry; 4513 sc->sc_stats.ast_tx_shortretry += sr; 4514 sc->sc_stats.ast_tx_longretry += lr; 4515 4516} 4517 4518/* 4519 * The default completion. If fail is 1, this means 4520 * "please don't retry the frame, and just return -1 status 4521 * to the net80211 stack. 4522 */ 4523void 4524ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4525{ 4526 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4527 int st; 4528 4529 if (fail == 1) 4530 st = -1; 4531 else 4532 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 4533 ts->ts_status : HAL_TXERR_XRETRY; 4534 4535#if 0 4536 if (bf->bf_state.bfs_dobaw) 4537 device_printf(sc->sc_dev, 4538 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 4539 __func__, 4540 bf, 4541 SEQNO(bf->bf_state.bfs_seqno)); 4542#endif 4543 if (bf->bf_next != NULL) 4544 device_printf(sc->sc_dev, 4545 "%s: bf %p: seqno %d: bf_next not NULL!\n", 4546 __func__, 4547 bf, 4548 SEQNO(bf->bf_state.bfs_seqno)); 4549 4550 /* 4551 * Check if the node software queue is empty; if so 4552 * then clear the TIM. 4553 * 4554 * This needs to be done before the buffer is freed as 4555 * otherwise the node reference will have been released 4556 * and the node may not actually exist any longer. 4557 * 4558 * XXX I don't like this belonging here, but it's cleaner 4559 * to do it here right now then all the other places 4560 * where ath_tx_default_comp() is called. 4561 * 4562 * XXX TODO: during drain, ensure that the callback is 4563 * being called so we get a chance to update the TIM. 4564 */ 4565 if (bf->bf_node) { 4566 ATH_TX_LOCK(sc); 4567 ath_tx_update_tim(sc, bf->bf_node, 0); 4568 ATH_TX_UNLOCK(sc); 4569 } 4570 4571 /* 4572 * Do any tx complete callback. Note this must 4573 * be done before releasing the node reference. 4574 * This will free the mbuf, release the net80211 4575 * node and recycle the ath_buf. 4576 */ 4577 ath_tx_freebuf(sc, bf, st); 4578} 4579 4580/* 4581 * Update rate control with the given completion status. 4582 */ 4583void 4584ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4585 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4586 int nframes, int nbad) 4587{ 4588 struct ath_node *an; 4589 4590 /* Only for unicast frames */ 4591 if (ni == NULL) 4592 return; 4593 4594 an = ATH_NODE(ni); 4595 ATH_NODE_UNLOCK_ASSERT(an); 4596 4597 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4598 ATH_NODE_LOCK(an); 4599 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4600 ATH_NODE_UNLOCK(an); 4601 } 4602} 4603 4604/* 4605 * Process the completion of the given buffer. 4606 * 4607 * This calls the rate control update and then the buffer completion. 4608 * This will either free the buffer or requeue it. In any case, the 4609 * bf pointer should be treated as invalid after this function is called. 4610 */ 4611void 4612ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, 4613 struct ath_tx_status *ts, struct ath_buf *bf) 4614{ 4615 struct ieee80211_node *ni = bf->bf_node; 4616 4617 ATH_TX_UNLOCK_ASSERT(sc); 4618 ATH_TXQ_UNLOCK_ASSERT(txq); 4619 4620 /* If unicast frame, update general statistics */ 4621 if (ni != NULL) { 4622 /* update statistics */ 4623 ath_tx_update_stats(sc, ts, bf); 4624 } 4625 4626 /* 4627 * Call the completion handler. 4628 * The completion handler is responsible for 4629 * calling the rate control code. 4630 * 4631 * Frames with no completion handler get the 4632 * rate control code called here. 4633 */ 4634 if (bf->bf_comp == NULL) { 4635 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4636 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 4637 /* 4638 * XXX assume this isn't an aggregate 4639 * frame. 4640 */ 4641 ath_tx_update_ratectrl(sc, ni, 4642 bf->bf_state.bfs_rc, ts, 4643 bf->bf_state.bfs_pktlen, 1, 4644 (ts->ts_status == 0 ? 0 : 1)); 4645 } 4646 ath_tx_default_comp(sc, bf, 0); 4647 } else 4648 bf->bf_comp(sc, bf, 0); 4649} 4650 4651 4652 4653/* 4654 * Process completed xmit descriptors from the specified queue. 4655 * Kick the packet scheduler if needed. This can occur from this 4656 * particular task. 4657 */ 4658static int 4659ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4660{ 4661 struct ath_hal *ah = sc->sc_ah; 4662 struct ath_buf *bf; 4663 struct ath_desc *ds; 4664 struct ath_tx_status *ts; 4665 struct ieee80211_node *ni; 4666#ifdef IEEE80211_SUPPORT_SUPERG 4667 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4668#endif /* IEEE80211_SUPPORT_SUPERG */ 4669 int nacked; 4670 HAL_STATUS status; 4671 4672 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4673 __func__, txq->axq_qnum, 4674 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4675 txq->axq_link); 4676 4677 ATH_KTR(sc, ATH_KTR_TXCOMP, 4, 4678 "ath_tx_processq: txq=%u head %p link %p depth %p", 4679 txq->axq_qnum, 4680 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4681 txq->axq_link, 4682 txq->axq_depth); 4683 4684 nacked = 0; 4685 for (;;) { 4686 ATH_TXQ_LOCK(txq); 4687 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4688 bf = TAILQ_FIRST(&txq->axq_q); 4689 if (bf == NULL) { 4690 ATH_TXQ_UNLOCK(txq); 4691 break; 4692 } 4693 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4694 ts = &bf->bf_status.ds_txstat; 4695 4696 status = ath_hal_txprocdesc(ah, ds, ts); 4697#ifdef ATH_DEBUG 4698 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4699 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4700 status == HAL_OK); 4701 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) 4702 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4703 status == HAL_OK); 4704#endif 4705#ifdef ATH_DEBUG_ALQ 4706 if (if_ath_alq_checkdebug(&sc->sc_alq, 4707 ATH_ALQ_EDMA_TXSTATUS)) { 4708 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 4709 sc->sc_tx_statuslen, 4710 (char *) ds); 4711 } 4712#endif 4713 4714 if (status == HAL_EINPROGRESS) { 4715 ATH_KTR(sc, ATH_KTR_TXCOMP, 3, 4716 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", 4717 txq->axq_qnum, bf, ds); 4718 ATH_TXQ_UNLOCK(txq); 4719 break; 4720 } 4721 ATH_TXQ_REMOVE(txq, bf, bf_list); 4722 4723 /* 4724 * Sanity check. 4725 */ 4726 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { 4727 device_printf(sc->sc_dev, 4728 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", 4729 __func__, 4730 txq->axq_qnum, 4731 bf, 4732 bf->bf_state.bfs_tx_queue); 4733 } 4734 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { 4735 device_printf(sc->sc_dev, 4736 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", 4737 __func__, 4738 txq->axq_qnum, 4739 bf->bf_last, 4740 bf->bf_last->bf_state.bfs_tx_queue); 4741 } 4742 4743#if 0 4744 if (txq->axq_depth > 0) { 4745 /* 4746 * More frames follow. Mark the buffer busy 4747 * so it's not re-used while the hardware may 4748 * still re-read the link field in the descriptor. 4749 * 4750 * Use the last buffer in an aggregate as that 4751 * is where the hardware may be - intermediate 4752 * descriptors won't be "busy". 4753 */ 4754 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4755 } else 4756 txq->axq_link = NULL; 4757#else 4758 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4759#endif 4760 if (bf->bf_state.bfs_aggr) 4761 txq->axq_aggr_depth--; 4762 4763 ni = bf->bf_node; 4764 4765 ATH_KTR(sc, ATH_KTR_TXCOMP, 5, 4766 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", 4767 txq->axq_qnum, bf, ds, ni, ts->ts_status); 4768 /* 4769 * If unicast frame was ack'd update RSSI, 4770 * including the last rx time used to 4771 * workaround phantom bmiss interrupts. 4772 */ 4773 if (ni != NULL && ts->ts_status == 0 && 4774 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 4775 nacked++; 4776 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4777 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4778 ts->ts_rssi); 4779 } 4780 ATH_TXQ_UNLOCK(txq); 4781 4782 /* 4783 * Update statistics and call completion 4784 */ 4785 ath_tx_process_buf_completion(sc, txq, ts, bf); 4786 4787 /* XXX at this point, bf and ni may be totally invalid */ 4788 } 4789#ifdef IEEE80211_SUPPORT_SUPERG 4790 /* 4791 * Flush fast-frame staging queue when traffic slows. 4792 */ 4793 if (txq->axq_depth <= 1) 4794 ieee80211_ff_flush(ic, txq->axq_ac); 4795#endif 4796 4797 /* Kick the software TXQ scheduler */ 4798 if (dosched) { 4799 ATH_TX_LOCK(sc); 4800 ath_txq_sched(sc, txq); 4801 ATH_TX_UNLOCK(sc); 4802 } 4803 4804 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4805 "ath_tx_processq: txq=%u: done", 4806 txq->axq_qnum); 4807 4808 return nacked; 4809} 4810 4811#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4812 4813/* 4814 * Deferred processing of transmit interrupt; special-cased 4815 * for a single hardware transmit queue (e.g. 5210 and 5211). 4816 */ 4817static void 4818ath_tx_proc_q0(void *arg, int npending) 4819{ 4820 struct ath_softc *sc = arg; 4821 struct ifnet *ifp = sc->sc_ifp; 4822 uint32_t txqs; 4823 4824 ATH_PCU_LOCK(sc); 4825 sc->sc_txproc_cnt++; 4826 txqs = sc->sc_txq_active; 4827 sc->sc_txq_active &= ~txqs; 4828 ATH_PCU_UNLOCK(sc); 4829 4830 ATH_LOCK(sc); 4831 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4832 ATH_UNLOCK(sc); 4833 4834 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4835 "ath_tx_proc_q0: txqs=0x%08x", txqs); 4836 4837 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 4838 /* XXX why is lastrx updated in tx code? */ 4839 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4840 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4841 ath_tx_processq(sc, sc->sc_cabq, 1); 4842 IF_LOCK(&ifp->if_snd); 4843 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4844 IF_UNLOCK(&ifp->if_snd); 4845 sc->sc_wd_timer = 0; 4846 4847 if (sc->sc_softled) 4848 ath_led_event(sc, sc->sc_txrix); 4849 4850 ATH_PCU_LOCK(sc); 4851 sc->sc_txproc_cnt--; 4852 ATH_PCU_UNLOCK(sc); 4853 4854 ATH_LOCK(sc); 4855 ath_power_restore_power_state(sc); 4856 ATH_UNLOCK(sc); 4857 4858 ath_tx_kick(sc); 4859} 4860 4861/* 4862 * Deferred processing of transmit interrupt; special-cased 4863 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4864 */ 4865static void 4866ath_tx_proc_q0123(void *arg, int npending) 4867{ 4868 struct ath_softc *sc = arg; 4869 struct ifnet *ifp = sc->sc_ifp; 4870 int nacked; 4871 uint32_t txqs; 4872 4873 ATH_PCU_LOCK(sc); 4874 sc->sc_txproc_cnt++; 4875 txqs = sc->sc_txq_active; 4876 sc->sc_txq_active &= ~txqs; 4877 ATH_PCU_UNLOCK(sc); 4878 4879 ATH_LOCK(sc); 4880 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4881 ATH_UNLOCK(sc); 4882 4883 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4884 "ath_tx_proc_q0123: txqs=0x%08x", txqs); 4885 4886 /* 4887 * Process each active queue. 4888 */ 4889 nacked = 0; 4890 if (TXQACTIVE(txqs, 0)) 4891 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 4892 if (TXQACTIVE(txqs, 1)) 4893 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 4894 if (TXQACTIVE(txqs, 2)) 4895 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 4896 if (TXQACTIVE(txqs, 3)) 4897 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 4898 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4899 ath_tx_processq(sc, sc->sc_cabq, 1); 4900 if (nacked) 4901 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4902 4903 IF_LOCK(&ifp->if_snd); 4904 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4905 IF_UNLOCK(&ifp->if_snd); 4906 sc->sc_wd_timer = 0; 4907 4908 if (sc->sc_softled) 4909 ath_led_event(sc, sc->sc_txrix); 4910 4911 ATH_PCU_LOCK(sc); 4912 sc->sc_txproc_cnt--; 4913 ATH_PCU_UNLOCK(sc); 4914 4915 ATH_LOCK(sc); 4916 ath_power_restore_power_state(sc); 4917 ATH_UNLOCK(sc); 4918 4919 ath_tx_kick(sc); 4920} 4921 4922/* 4923 * Deferred processing of transmit interrupt. 4924 */ 4925static void 4926ath_tx_proc(void *arg, int npending) 4927{ 4928 struct ath_softc *sc = arg; 4929 struct ifnet *ifp = sc->sc_ifp; 4930 int i, nacked; 4931 uint32_t txqs; 4932 4933 ATH_PCU_LOCK(sc); 4934 sc->sc_txproc_cnt++; 4935 txqs = sc->sc_txq_active; 4936 sc->sc_txq_active &= ~txqs; 4937 ATH_PCU_UNLOCK(sc); 4938 4939 ATH_LOCK(sc); 4940 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4941 ATH_UNLOCK(sc); 4942 4943 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); 4944 4945 /* 4946 * Process each active queue. 4947 */ 4948 nacked = 0; 4949 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4950 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 4951 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 4952 if (nacked) 4953 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4954 4955 /* XXX check this inside of IF_LOCK? */ 4956 IF_LOCK(&ifp->if_snd); 4957 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4958 IF_UNLOCK(&ifp->if_snd); 4959 sc->sc_wd_timer = 0; 4960 4961 if (sc->sc_softled) 4962 ath_led_event(sc, sc->sc_txrix); 4963 4964 ATH_PCU_LOCK(sc); 4965 sc->sc_txproc_cnt--; 4966 ATH_PCU_UNLOCK(sc); 4967 4968 ATH_LOCK(sc); 4969 ath_power_restore_power_state(sc); 4970 ATH_UNLOCK(sc); 4971 4972 ath_tx_kick(sc); 4973} 4974#undef TXQACTIVE 4975 4976/* 4977 * Deferred processing of TXQ rescheduling. 4978 */ 4979static void 4980ath_txq_sched_tasklet(void *arg, int npending) 4981{ 4982 struct ath_softc *sc = arg; 4983 int i; 4984 4985 /* XXX is skipping ok? */ 4986 ATH_PCU_LOCK(sc); 4987#if 0 4988 if (sc->sc_inreset_cnt > 0) { 4989 device_printf(sc->sc_dev, 4990 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4991 ATH_PCU_UNLOCK(sc); 4992 return; 4993 } 4994#endif 4995 sc->sc_txproc_cnt++; 4996 ATH_PCU_UNLOCK(sc); 4997 4998 ATH_LOCK(sc); 4999 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5000 ATH_UNLOCK(sc); 5001 5002 ATH_TX_LOCK(sc); 5003 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5004 if (ATH_TXQ_SETUP(sc, i)) { 5005 ath_txq_sched(sc, &sc->sc_txq[i]); 5006 } 5007 } 5008 ATH_TX_UNLOCK(sc); 5009 5010 ATH_LOCK(sc); 5011 ath_power_restore_power_state(sc); 5012 ATH_UNLOCK(sc); 5013 5014 ATH_PCU_LOCK(sc); 5015 sc->sc_txproc_cnt--; 5016 ATH_PCU_UNLOCK(sc); 5017} 5018 5019void 5020ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 5021{ 5022 5023 ATH_TXBUF_LOCK_ASSERT(sc); 5024 5025 if (bf->bf_flags & ATH_BUF_MGMT) 5026 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 5027 else { 5028 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 5029 sc->sc_txbuf_cnt++; 5030 if (sc->sc_txbuf_cnt > ath_txbuf) { 5031 device_printf(sc->sc_dev, 5032 "%s: sc_txbuf_cnt > %d?\n", 5033 __func__, 5034 ath_txbuf); 5035 sc->sc_txbuf_cnt = ath_txbuf; 5036 } 5037 } 5038} 5039 5040void 5041ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 5042{ 5043 5044 ATH_TXBUF_LOCK_ASSERT(sc); 5045 5046 if (bf->bf_flags & ATH_BUF_MGMT) 5047 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 5048 else { 5049 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 5050 sc->sc_txbuf_cnt++; 5051 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 5052 device_printf(sc->sc_dev, 5053 "%s: sc_txbuf_cnt > %d?\n", 5054 __func__, 5055 ATH_TXBUF); 5056 sc->sc_txbuf_cnt = ATH_TXBUF; 5057 } 5058 } 5059} 5060 5061/* 5062 * Free the holding buffer if it exists 5063 */ 5064void 5065ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) 5066{ 5067 ATH_TXBUF_UNLOCK_ASSERT(sc); 5068 ATH_TXQ_LOCK_ASSERT(txq); 5069 5070 if (txq->axq_holdingbf == NULL) 5071 return; 5072 5073 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; 5074 5075 ATH_TXBUF_LOCK(sc); 5076 ath_returnbuf_tail(sc, txq->axq_holdingbf); 5077 ATH_TXBUF_UNLOCK(sc); 5078 5079 txq->axq_holdingbf = NULL; 5080} 5081 5082/* 5083 * Add this buffer to the holding queue, freeing the previous 5084 * one if it exists. 5085 */ 5086static void 5087ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) 5088{ 5089 struct ath_txq *txq; 5090 5091 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 5092 5093 ATH_TXBUF_UNLOCK_ASSERT(sc); 5094 ATH_TXQ_LOCK_ASSERT(txq); 5095 5096 /* XXX assert ATH_BUF_BUSY is set */ 5097 5098 /* XXX assert the tx queue is under the max number */ 5099 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { 5100 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", 5101 __func__, 5102 bf, 5103 bf->bf_state.bfs_tx_queue); 5104 bf->bf_flags &= ~ATH_BUF_BUSY; 5105 ath_returnbuf_tail(sc, bf); 5106 return; 5107 } 5108 ath_txq_freeholdingbuf(sc, txq); 5109 txq->axq_holdingbf = bf; 5110} 5111 5112/* 5113 * Return a buffer to the pool and update the 'busy' flag on the 5114 * previous 'tail' entry. 5115 * 5116 * This _must_ only be called when the buffer is involved in a completed 5117 * TX. The logic is that if it was part of an active TX, the previous 5118 * buffer on the list is now not involved in a halted TX DMA queue, waiting 5119 * for restart (eg for TDMA.) 5120 * 5121 * The caller must free the mbuf and recycle the node reference. 5122 * 5123 * XXX This method of handling busy / holding buffers is insanely stupid. 5124 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would 5125 * be much nicer if buffers in the processq() methods would instead be 5126 * always completed there (pushed onto a txq or ath_bufhead) so we knew 5127 * exactly what hardware queue they came from in the first place. 5128 */ 5129void 5130ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 5131{ 5132 struct ath_txq *txq; 5133 5134 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 5135 5136 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 5137 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 5138 5139 /* 5140 * If this buffer is busy, push it onto the holding queue. 5141 */ 5142 if (bf->bf_flags & ATH_BUF_BUSY) { 5143 ATH_TXQ_LOCK(txq); 5144 ath_txq_addholdingbuf(sc, bf); 5145 ATH_TXQ_UNLOCK(txq); 5146 return; 5147 } 5148 5149 /* 5150 * Not a busy buffer, so free normally 5151 */ 5152 ATH_TXBUF_LOCK(sc); 5153 ath_returnbuf_tail(sc, bf); 5154 ATH_TXBUF_UNLOCK(sc); 5155} 5156 5157/* 5158 * This is currently used by ath_tx_draintxq() and 5159 * ath_tx_tid_free_pkts(). 5160 * 5161 * It recycles a single ath_buf. 5162 */ 5163void 5164ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 5165{ 5166 struct ieee80211_node *ni = bf->bf_node; 5167 struct mbuf *m0 = bf->bf_m; 5168 5169 /* 5170 * Make sure that we only sync/unload if there's an mbuf. 5171 * If not (eg we cloned a buffer), the unload will have already 5172 * occured. 5173 */ 5174 if (bf->bf_m != NULL) { 5175 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 5176 BUS_DMASYNC_POSTWRITE); 5177 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 5178 } 5179 5180 bf->bf_node = NULL; 5181 bf->bf_m = NULL; 5182 5183 /* Free the buffer, it's not needed any longer */ 5184 ath_freebuf(sc, bf); 5185 5186 /* Pass the buffer back to net80211 - completing it */ 5187 ieee80211_tx_complete(ni, m0, status); 5188} 5189 5190static struct ath_buf * 5191ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) 5192{ 5193 struct ath_buf *bf; 5194 5195 ATH_TXQ_LOCK_ASSERT(txq); 5196 5197 /* 5198 * Drain the FIFO queue first, then if it's 5199 * empty, move to the normal frame queue. 5200 */ 5201 bf = TAILQ_FIRST(&txq->fifo.axq_q); 5202 if (bf != NULL) { 5203 /* 5204 * Is it the last buffer in this set? 5205 * Decrement the FIFO counter. 5206 */ 5207 if (bf->bf_flags & ATH_BUF_FIFOEND) { 5208 if (txq->axq_fifo_depth == 0) { 5209 device_printf(sc->sc_dev, 5210 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", 5211 __func__, 5212 txq->axq_qnum, 5213 txq->fifo.axq_depth); 5214 } else 5215 txq->axq_fifo_depth--; 5216 } 5217 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 5218 return (bf); 5219 } 5220 5221 /* 5222 * Debugging! 5223 */ 5224 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { 5225 device_printf(sc->sc_dev, 5226 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", 5227 __func__, 5228 txq->axq_qnum, 5229 txq->axq_fifo_depth, 5230 txq->fifo.axq_depth); 5231 } 5232 5233 /* 5234 * Now drain the pending queue. 5235 */ 5236 bf = TAILQ_FIRST(&txq->axq_q); 5237 if (bf == NULL) { 5238 txq->axq_link = NULL; 5239 return (NULL); 5240 } 5241 ATH_TXQ_REMOVE(txq, bf, bf_list); 5242 return (bf); 5243} 5244 5245void 5246ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 5247{ 5248#ifdef ATH_DEBUG 5249 struct ath_hal *ah = sc->sc_ah; 5250#endif 5251 struct ath_buf *bf; 5252 u_int ix; 5253 5254 /* 5255 * NB: this assumes output has been stopped and 5256 * we do not need to block ath_tx_proc 5257 */ 5258 for (ix = 0;; ix++) { 5259 ATH_TXQ_LOCK(txq); 5260 bf = ath_tx_draintxq_get_one(sc, txq); 5261 if (bf == NULL) { 5262 ATH_TXQ_UNLOCK(txq); 5263 break; 5264 } 5265 if (bf->bf_state.bfs_aggr) 5266 txq->axq_aggr_depth--; 5267#ifdef ATH_DEBUG 5268 if (sc->sc_debug & ATH_DEBUG_RESET) { 5269 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5270 int status = 0; 5271 5272 /* 5273 * EDMA operation has a TX completion FIFO 5274 * separate from the TX descriptor, so this 5275 * method of checking the "completion" status 5276 * is wrong. 5277 */ 5278 if (! sc->sc_isedma) { 5279 status = (ath_hal_txprocdesc(ah, 5280 bf->bf_lastds, 5281 &bf->bf_status.ds_txstat) == HAL_OK); 5282 } 5283 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); 5284 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 5285 bf->bf_m->m_len, 0, -1); 5286 } 5287#endif /* ATH_DEBUG */ 5288 /* 5289 * Since we're now doing magic in the completion 5290 * functions, we -must- call it for aggregation 5291 * destinations or BAW tracking will get upset. 5292 */ 5293 /* 5294 * Clear ATH_BUF_BUSY; the completion handler 5295 * will free the buffer. 5296 */ 5297 ATH_TXQ_UNLOCK(txq); 5298 bf->bf_flags &= ~ATH_BUF_BUSY; 5299 if (bf->bf_comp) 5300 bf->bf_comp(sc, bf, 1); 5301 else 5302 ath_tx_default_comp(sc, bf, 1); 5303 } 5304 5305 /* 5306 * Free the holding buffer if it exists 5307 */ 5308 ATH_TXQ_LOCK(txq); 5309 ath_txq_freeholdingbuf(sc, txq); 5310 ATH_TXQ_UNLOCK(txq); 5311 5312 /* 5313 * Drain software queued frames which are on 5314 * active TIDs. 5315 */ 5316 ath_tx_txq_drain(sc, txq); 5317} 5318 5319static void 5320ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5321{ 5322 struct ath_hal *ah = sc->sc_ah; 5323 5324 ATH_TXQ_LOCK_ASSERT(txq); 5325 5326 DPRINTF(sc, ATH_DEBUG_RESET, 5327 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " 5328 "link %p, holdingbf=%p\n", 5329 __func__, 5330 txq->axq_qnum, 5331 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 5332 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), 5333 (int) ath_hal_numtxpending(ah, txq->axq_qnum), 5334 txq->axq_flags, 5335 txq->axq_link, 5336 txq->axq_holdingbf); 5337 5338 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5339 /* We've stopped TX DMA, so mark this as stopped. */ 5340 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; 5341 5342#ifdef ATH_DEBUG 5343 if ((sc->sc_debug & ATH_DEBUG_RESET) 5344 && (txq->axq_holdingbf != NULL)) { 5345 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); 5346 } 5347#endif 5348} 5349 5350int 5351ath_stoptxdma(struct ath_softc *sc) 5352{ 5353 struct ath_hal *ah = sc->sc_ah; 5354 int i; 5355 5356 /* XXX return value */ 5357 if (sc->sc_invalid) 5358 return 0; 5359 5360 if (!sc->sc_invalid) { 5361 /* don't touch the hardware if marked invalid */ 5362 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5363 __func__, sc->sc_bhalq, 5364 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 5365 NULL); 5366 5367 /* stop the beacon queue */ 5368 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5369 5370 /* Stop the data queues */ 5371 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5372 if (ATH_TXQ_SETUP(sc, i)) { 5373 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5374 ath_tx_stopdma(sc, &sc->sc_txq[i]); 5375 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5376 } 5377 } 5378 } 5379 5380 return 1; 5381} 5382 5383#ifdef ATH_DEBUG 5384void 5385ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) 5386{ 5387 struct ath_hal *ah = sc->sc_ah; 5388 struct ath_buf *bf; 5389 int i = 0; 5390 5391 if (! (sc->sc_debug & ATH_DEBUG_RESET)) 5392 return; 5393 5394 device_printf(sc->sc_dev, "%s: Q%d: begin\n", 5395 __func__, txq->axq_qnum); 5396 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 5397 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 5398 ath_hal_txprocdesc(ah, bf->bf_lastds, 5399 &bf->bf_status.ds_txstat) == HAL_OK); 5400 i++; 5401 } 5402 device_printf(sc->sc_dev, "%s: Q%d: end\n", 5403 __func__, txq->axq_qnum); 5404} 5405#endif /* ATH_DEBUG */ 5406 5407/* 5408 * Drain the transmit queues and reclaim resources. 5409 */ 5410void 5411ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 5412{ 5413 struct ath_hal *ah = sc->sc_ah; 5414 struct ifnet *ifp = sc->sc_ifp; 5415 int i; 5416 struct ath_buf *bf_last; 5417 5418 (void) ath_stoptxdma(sc); 5419 5420 /* 5421 * Dump the queue contents 5422 */ 5423 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5424 /* 5425 * XXX TODO: should we just handle the completed TX frames 5426 * here, whether or not the reset is a full one or not? 5427 */ 5428 if (ATH_TXQ_SETUP(sc, i)) { 5429#ifdef ATH_DEBUG 5430 if (sc->sc_debug & ATH_DEBUG_RESET) 5431 ath_tx_dump(sc, &sc->sc_txq[i]); 5432#endif /* ATH_DEBUG */ 5433 if (reset_type == ATH_RESET_NOLOSS) { 5434 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5435 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5436 /* 5437 * Free the holding buffer; DMA is now 5438 * stopped. 5439 */ 5440 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 5441 /* 5442 * Setup the link pointer to be the 5443 * _last_ buffer/descriptor in the list. 5444 * If there's nothing in the list, set it 5445 * to NULL. 5446 */ 5447 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], 5448 axq_q_s); 5449 if (bf_last != NULL) { 5450 ath_hal_gettxdesclinkptr(ah, 5451 bf_last->bf_lastds, 5452 &sc->sc_txq[i].axq_link); 5453 } else { 5454 sc->sc_txq[i].axq_link = NULL; 5455 } 5456 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5457 } else 5458 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5459 } 5460 } 5461#ifdef ATH_DEBUG 5462 if (sc->sc_debug & ATH_DEBUG_RESET) { 5463 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5464 if (bf != NULL && bf->bf_m != NULL) { 5465 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5466 ath_hal_txprocdesc(ah, bf->bf_lastds, 5467 &bf->bf_status.ds_txstat) == HAL_OK); 5468 ieee80211_dump_pkt(ifp->if_l2com, 5469 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5470 0, -1); 5471 } 5472 } 5473#endif /* ATH_DEBUG */ 5474 IF_LOCK(&ifp->if_snd); 5475 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5476 IF_UNLOCK(&ifp->if_snd); 5477 sc->sc_wd_timer = 0; 5478} 5479 5480/* 5481 * Update internal state after a channel change. 5482 */ 5483static void 5484ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5485{ 5486 enum ieee80211_phymode mode; 5487 5488 /* 5489 * Change channels and update the h/w rate map 5490 * if we're switching; e.g. 11a to 11b/g. 5491 */ 5492 mode = ieee80211_chan2mode(chan); 5493 if (mode != sc->sc_curmode) 5494 ath_setcurmode(sc, mode); 5495 sc->sc_curchan = chan; 5496} 5497 5498/* 5499 * Set/change channels. If the channel is really being changed, 5500 * it's done by resetting the chip. To accomplish this we must 5501 * first cleanup any pending DMA, then restart stuff after a la 5502 * ath_init. 5503 */ 5504static int 5505ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5506{ 5507 struct ifnet *ifp = sc->sc_ifp; 5508 struct ieee80211com *ic = ifp->if_l2com; 5509 struct ath_hal *ah = sc->sc_ah; 5510 int ret = 0; 5511 5512 /* Treat this as an interface reset */ 5513 ATH_PCU_UNLOCK_ASSERT(sc); 5514 ATH_UNLOCK_ASSERT(sc); 5515 5516 /* (Try to) stop TX/RX from occuring */ 5517 taskqueue_block(sc->sc_tq); 5518 5519 ATH_PCU_LOCK(sc); 5520 5521 /* Disable interrupts */ 5522 ath_hal_intrset(ah, 0); 5523 5524 /* Stop new RX/TX/interrupt completion */ 5525 if (ath_reset_grablock(sc, 1) == 0) { 5526 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5527 __func__); 5528 } 5529 5530 /* Stop pending RX/TX completion */ 5531 ath_txrx_stop_locked(sc); 5532 5533 ATH_PCU_UNLOCK(sc); 5534 5535 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5536 __func__, ieee80211_chan2ieee(ic, chan), 5537 chan->ic_freq, chan->ic_flags); 5538 if (chan != sc->sc_curchan) { 5539 HAL_STATUS status; 5540 /* 5541 * To switch channels clear any pending DMA operations; 5542 * wait long enough for the RX fifo to drain, reset the 5543 * hardware at the new frequency, and then re-enable 5544 * the relevant bits of the h/w. 5545 */ 5546#if 0 5547 ath_hal_intrset(ah, 0); /* disable interrupts */ 5548#endif 5549 ath_stoprecv(sc, 1); /* turn off frame recv */ 5550 /* 5551 * First, handle completed TX/RX frames. 5552 */ 5553 ath_rx_flush(sc); 5554 ath_draintxq(sc, ATH_RESET_NOLOSS); 5555 /* 5556 * Next, flush the non-scheduled frames. 5557 */ 5558 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5559 5560 ath_update_chainmasks(sc, chan); 5561 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 5562 sc->sc_cur_rxchainmask); 5563 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5564 if_printf(ifp, "%s: unable to reset " 5565 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5566 __func__, ieee80211_chan2ieee(ic, chan), 5567 chan->ic_freq, chan->ic_flags, status); 5568 ret = EIO; 5569 goto finish; 5570 } 5571 sc->sc_diversity = ath_hal_getdiversity(ah); 5572 5573 ATH_RX_LOCK(sc); 5574 sc->sc_rx_stopped = 1; 5575 sc->sc_rx_resetted = 1; 5576 ATH_RX_UNLOCK(sc); 5577 5578 /* Let DFS at it in case it's a DFS channel */ 5579 ath_dfs_radar_enable(sc, chan); 5580 5581 /* Let spectral at in case spectral is enabled */ 5582 ath_spectral_enable(sc, chan); 5583 5584 /* 5585 * Let bluetooth coexistence at in case it's needed for this 5586 * channel 5587 */ 5588 ath_btcoex_enable(sc, ic->ic_curchan); 5589 5590 /* 5591 * If we're doing TDMA, enforce the TXOP limitation for chips 5592 * that support it. 5593 */ 5594 if (sc->sc_hasenforcetxop && sc->sc_tdma) 5595 ath_hal_setenforcetxop(sc->sc_ah, 1); 5596 else 5597 ath_hal_setenforcetxop(sc->sc_ah, 0); 5598 5599 /* 5600 * Re-enable rx framework. 5601 */ 5602 if (ath_startrecv(sc) != 0) { 5603 if_printf(ifp, "%s: unable to restart recv logic\n", 5604 __func__); 5605 ret = EIO; 5606 goto finish; 5607 } 5608 5609 /* 5610 * Change channels and update the h/w rate map 5611 * if we're switching; e.g. 11a to 11b/g. 5612 */ 5613 ath_chan_change(sc, chan); 5614 5615 /* 5616 * Reset clears the beacon timers; reset them 5617 * here if needed. 5618 */ 5619 if (sc->sc_beacons) { /* restart beacons */ 5620#ifdef IEEE80211_SUPPORT_TDMA 5621 if (sc->sc_tdma) 5622 ath_tdma_config(sc, NULL); 5623 else 5624#endif 5625 ath_beacon_config(sc, NULL); 5626 } 5627 5628 /* 5629 * Re-enable interrupts. 5630 */ 5631#if 0 5632 ath_hal_intrset(ah, sc->sc_imask); 5633#endif 5634 } 5635 5636finish: 5637 ATH_PCU_LOCK(sc); 5638 sc->sc_inreset_cnt--; 5639 /* XXX only do this if sc_inreset_cnt == 0? */ 5640 ath_hal_intrset(ah, sc->sc_imask); 5641 ATH_PCU_UNLOCK(sc); 5642 5643 IF_LOCK(&ifp->if_snd); 5644 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5645 IF_UNLOCK(&ifp->if_snd); 5646 ath_txrx_start(sc); 5647 /* XXX ath_start? */ 5648 5649 return ret; 5650} 5651 5652/* 5653 * Periodically recalibrate the PHY to account 5654 * for temperature/environment changes. 5655 */ 5656static void 5657ath_calibrate(void *arg) 5658{ 5659 struct ath_softc *sc = arg; 5660 struct ath_hal *ah = sc->sc_ah; 5661 struct ifnet *ifp = sc->sc_ifp; 5662 struct ieee80211com *ic = ifp->if_l2com; 5663 HAL_BOOL longCal, isCalDone = AH_TRUE; 5664 HAL_BOOL aniCal, shortCal = AH_FALSE; 5665 int nextcal; 5666 5667 ATH_LOCK_ASSERT(sc); 5668 5669 /* 5670 * Force the hardware awake for ANI work. 5671 */ 5672 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5673 5674 /* Skip trying to do this if we're in reset */ 5675 if (sc->sc_inreset_cnt) 5676 goto restart; 5677 5678 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5679 goto restart; 5680 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5681 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5682 if (sc->sc_doresetcal) 5683 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5684 5685 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5686 if (aniCal) { 5687 sc->sc_stats.ast_ani_cal++; 5688 sc->sc_lastani = ticks; 5689 ath_hal_ani_poll(ah, sc->sc_curchan); 5690 } 5691 5692 if (longCal) { 5693 sc->sc_stats.ast_per_cal++; 5694 sc->sc_lastlongcal = ticks; 5695 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5696 /* 5697 * Rfgain is out of bounds, reset the chip 5698 * to load new gain values. 5699 */ 5700 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5701 "%s: rfgain change\n", __func__); 5702 sc->sc_stats.ast_per_rfgain++; 5703 sc->sc_resetcal = 0; 5704 sc->sc_doresetcal = AH_TRUE; 5705 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5706 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5707 ath_power_restore_power_state(sc); 5708 return; 5709 } 5710 /* 5711 * If this long cal is after an idle period, then 5712 * reset the data collection state so we start fresh. 5713 */ 5714 if (sc->sc_resetcal) { 5715 (void) ath_hal_calreset(ah, sc->sc_curchan); 5716 sc->sc_lastcalreset = ticks; 5717 sc->sc_lastshortcal = ticks; 5718 sc->sc_resetcal = 0; 5719 sc->sc_doresetcal = AH_TRUE; 5720 } 5721 } 5722 5723 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5724 if (shortCal || longCal) { 5725 isCalDone = AH_FALSE; 5726 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5727 if (longCal) { 5728 /* 5729 * Calibrate noise floor data again in case of change. 5730 */ 5731 ath_hal_process_noisefloor(ah); 5732 } 5733 } else { 5734 DPRINTF(sc, ATH_DEBUG_ANY, 5735 "%s: calibration of channel %u failed\n", 5736 __func__, sc->sc_curchan->ic_freq); 5737 sc->sc_stats.ast_per_calfail++; 5738 } 5739 if (shortCal) 5740 sc->sc_lastshortcal = ticks; 5741 } 5742 if (!isCalDone) { 5743restart: 5744 /* 5745 * Use a shorter interval to potentially collect multiple 5746 * data samples required to complete calibration. Once 5747 * we're told the work is done we drop back to a longer 5748 * interval between requests. We're more aggressive doing 5749 * work when operating as an AP to improve operation right 5750 * after startup. 5751 */ 5752 sc->sc_lastshortcal = ticks; 5753 nextcal = ath_shortcalinterval*hz/1000; 5754 if (sc->sc_opmode != HAL_M_HOSTAP) 5755 nextcal *= 10; 5756 sc->sc_doresetcal = AH_TRUE; 5757 } else { 5758 /* nextcal should be the shortest time for next event */ 5759 nextcal = ath_longcalinterval*hz; 5760 if (sc->sc_lastcalreset == 0) 5761 sc->sc_lastcalreset = sc->sc_lastlongcal; 5762 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5763 sc->sc_resetcal = 1; /* setup reset next trip */ 5764 sc->sc_doresetcal = AH_FALSE; 5765 } 5766 /* ANI calibration may occur more often than short/long/resetcal */ 5767 if (ath_anicalinterval > 0) 5768 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5769 5770 if (nextcal != 0) { 5771 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5772 __func__, nextcal, isCalDone ? "" : "!"); 5773 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5774 } else { 5775 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5776 __func__); 5777 /* NB: don't rearm timer */ 5778 } 5779 /* 5780 * Restore power state now that we're done. 5781 */ 5782 ath_power_restore_power_state(sc); 5783} 5784 5785static void 5786ath_scan_start(struct ieee80211com *ic) 5787{ 5788 struct ifnet *ifp = ic->ic_ifp; 5789 struct ath_softc *sc = ifp->if_softc; 5790 struct ath_hal *ah = sc->sc_ah; 5791 u_int32_t rfilt; 5792 5793 /* XXX calibration timer? */ 5794 5795 ATH_LOCK(sc); 5796 sc->sc_scanning = 1; 5797 sc->sc_syncbeacon = 0; 5798 rfilt = ath_calcrxfilter(sc); 5799 ATH_UNLOCK(sc); 5800 5801 ATH_PCU_LOCK(sc); 5802 ath_hal_setrxfilter(ah, rfilt); 5803 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 5804 ATH_PCU_UNLOCK(sc); 5805 5806 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5807 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 5808} 5809 5810static void 5811ath_scan_end(struct ieee80211com *ic) 5812{ 5813 struct ifnet *ifp = ic->ic_ifp; 5814 struct ath_softc *sc = ifp->if_softc; 5815 struct ath_hal *ah = sc->sc_ah; 5816 u_int32_t rfilt; 5817 5818 ATH_LOCK(sc); 5819 sc->sc_scanning = 0; 5820 rfilt = ath_calcrxfilter(sc); 5821 ATH_UNLOCK(sc); 5822 5823 ATH_PCU_LOCK(sc); 5824 ath_hal_setrxfilter(ah, rfilt); 5825 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5826 5827 ath_hal_process_noisefloor(ah); 5828 ATH_PCU_UNLOCK(sc); 5829 5830 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5831 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 5832 sc->sc_curaid); 5833} 5834 5835#ifdef ATH_ENABLE_11N 5836/* 5837 * For now, just do a channel change. 5838 * 5839 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 5840 * control state and resetting the hardware without dropping frames out 5841 * of the queue. 5842 * 5843 * The unfortunate trouble here is making absolutely sure that the 5844 * channel width change has propagated enough so the hardware 5845 * absolutely isn't handed bogus frames for it's current operating 5846 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 5847 * does occur in parallel, we need to make certain we've blocked 5848 * any further ongoing TX (and RX, that can cause raw TX) 5849 * before we do this. 5850 */ 5851static void 5852ath_update_chw(struct ieee80211com *ic) 5853{ 5854 struct ifnet *ifp = ic->ic_ifp; 5855 struct ath_softc *sc = ifp->if_softc; 5856 5857 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 5858 ath_set_channel(ic); 5859} 5860#endif /* ATH_ENABLE_11N */ 5861 5862static void 5863ath_set_channel(struct ieee80211com *ic) 5864{ 5865 struct ifnet *ifp = ic->ic_ifp; 5866 struct ath_softc *sc = ifp->if_softc; 5867 5868 ATH_LOCK(sc); 5869 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5870 ATH_UNLOCK(sc); 5871 5872 (void) ath_chan_set(sc, ic->ic_curchan); 5873 /* 5874 * If we are returning to our bss channel then mark state 5875 * so the next recv'd beacon's tsf will be used to sync the 5876 * beacon timers. Note that since we only hear beacons in 5877 * sta/ibss mode this has no effect in other operating modes. 5878 */ 5879 ATH_LOCK(sc); 5880 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5881 sc->sc_syncbeacon = 1; 5882 ath_power_restore_power_state(sc); 5883 ATH_UNLOCK(sc); 5884} 5885 5886/* 5887 * Walk the vap list and check if there any vap's in RUN state. 5888 */ 5889static int 5890ath_isanyrunningvaps(struct ieee80211vap *this) 5891{ 5892 struct ieee80211com *ic = this->iv_ic; 5893 struct ieee80211vap *vap; 5894 5895 IEEE80211_LOCK_ASSERT(ic); 5896 5897 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5898 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5899 return 1; 5900 } 5901 return 0; 5902} 5903 5904static int 5905ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5906{ 5907 struct ieee80211com *ic = vap->iv_ic; 5908 struct ath_softc *sc = ic->ic_ifp->if_softc; 5909 struct ath_vap *avp = ATH_VAP(vap); 5910 struct ath_hal *ah = sc->sc_ah; 5911 struct ieee80211_node *ni = NULL; 5912 int i, error, stamode; 5913 u_int32_t rfilt; 5914 int csa_run_transition = 0; 5915 enum ieee80211_state ostate = vap->iv_state; 5916 5917 static const HAL_LED_STATE leds[] = { 5918 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5919 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5920 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5921 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5922 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5923 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5924 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5925 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5926 }; 5927 5928 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5929 ieee80211_state_name[ostate], 5930 ieee80211_state_name[nstate]); 5931 5932 /* 5933 * net80211 _should_ have the comlock asserted at this point. 5934 * There are some comments around the calls to vap->iv_newstate 5935 * which indicate that it (newstate) may end up dropping the 5936 * lock. This and the subsequent lock assert check after newstate 5937 * are an attempt to catch these and figure out how/why. 5938 */ 5939 IEEE80211_LOCK_ASSERT(ic); 5940 5941 /* Before we touch the hardware - wake it up */ 5942 ATH_LOCK(sc); 5943 /* 5944 * If the NIC is in anything other than SLEEP state, 5945 * we need to ensure that self-generated frames are 5946 * set for PWRMGT=0. Otherwise we may end up with 5947 * strange situations. 5948 * 5949 * XXX TODO: is this actually the case? :-) 5950 */ 5951 if (nstate != IEEE80211_S_SLEEP) 5952 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5953 5954 /* 5955 * Now, wake the thing up. 5956 */ 5957 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5958 5959 /* 5960 * And stop the calibration callout whilst we have 5961 * ATH_LOCK held. 5962 */ 5963 callout_stop(&sc->sc_cal_ch); 5964 ATH_UNLOCK(sc); 5965 5966 if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5967 csa_run_transition = 1; 5968 5969 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5970 5971 if (nstate == IEEE80211_S_SCAN) { 5972 /* 5973 * Scanning: turn off beacon miss and don't beacon. 5974 * Mark beacon state so when we reach RUN state we'll 5975 * [re]setup beacons. Unblock the task q thread so 5976 * deferred interrupt processing is done. 5977 */ 5978 5979 /* Ensure we stay awake during scan */ 5980 ATH_LOCK(sc); 5981 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5982 ath_power_setpower(sc, HAL_PM_AWAKE); 5983 ATH_UNLOCK(sc); 5984 5985 ath_hal_intrset(ah, 5986 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5987 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5988 sc->sc_beacons = 0; 5989 taskqueue_unblock(sc->sc_tq); 5990 } 5991 5992 ni = ieee80211_ref_node(vap->iv_bss); 5993 rfilt = ath_calcrxfilter(sc); 5994 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5995 vap->iv_opmode == IEEE80211_M_AHDEMO || 5996 vap->iv_opmode == IEEE80211_M_IBSS); 5997 5998 /* 5999 * XXX Dont need to do this (and others) if we've transitioned 6000 * from SLEEP->RUN. 6001 */ 6002 if (stamode && nstate == IEEE80211_S_RUN) { 6003 sc->sc_curaid = ni->ni_associd; 6004 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 6005 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 6006 } 6007 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 6008 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 6009 ath_hal_setrxfilter(ah, rfilt); 6010 6011 /* XXX is this to restore keycache on resume? */ 6012 if (vap->iv_opmode != IEEE80211_M_STA && 6013 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 6014 for (i = 0; i < IEEE80211_WEP_NKID; i++) 6015 if (ath_hal_keyisvalid(ah, i)) 6016 ath_hal_keysetmac(ah, i, ni->ni_bssid); 6017 } 6018 6019 /* 6020 * Invoke the parent method to do net80211 work. 6021 */ 6022 error = avp->av_newstate(vap, nstate, arg); 6023 if (error != 0) 6024 goto bad; 6025 6026 /* 6027 * See above: ensure av_newstate() doesn't drop the lock 6028 * on us. 6029 */ 6030 IEEE80211_LOCK_ASSERT(ic); 6031 6032 if (nstate == IEEE80211_S_RUN) { 6033 /* NB: collect bss node again, it may have changed */ 6034 ieee80211_free_node(ni); 6035 ni = ieee80211_ref_node(vap->iv_bss); 6036 6037 DPRINTF(sc, ATH_DEBUG_STATE, 6038 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 6039 "capinfo 0x%04x chan %d\n", __func__, 6040 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 6041 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 6042 6043 switch (vap->iv_opmode) { 6044#ifdef IEEE80211_SUPPORT_TDMA 6045 case IEEE80211_M_AHDEMO: 6046 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 6047 break; 6048 /* fall thru... */ 6049#endif 6050 case IEEE80211_M_HOSTAP: 6051 case IEEE80211_M_IBSS: 6052 case IEEE80211_M_MBSS: 6053 /* 6054 * Allocate and setup the beacon frame. 6055 * 6056 * Stop any previous beacon DMA. This may be 6057 * necessary, for example, when an ibss merge 6058 * causes reconfiguration; there will be a state 6059 * transition from RUN->RUN that means we may 6060 * be called with beacon transmission active. 6061 */ 6062 ath_hal_stoptxdma(ah, sc->sc_bhalq); 6063 6064 error = ath_beacon_alloc(sc, ni); 6065 if (error != 0) 6066 goto bad; 6067 /* 6068 * If joining an adhoc network defer beacon timer 6069 * configuration to the next beacon frame so we 6070 * have a current TSF to use. Otherwise we're 6071 * starting an ibss/bss so there's no need to delay; 6072 * if this is the first vap moving to RUN state, then 6073 * beacon state needs to be [re]configured. 6074 */ 6075 if (vap->iv_opmode == IEEE80211_M_IBSS && 6076 ni->ni_tstamp.tsf != 0) { 6077 sc->sc_syncbeacon = 1; 6078 } else if (!sc->sc_beacons) { 6079#ifdef IEEE80211_SUPPORT_TDMA 6080 if (vap->iv_caps & IEEE80211_C_TDMA) 6081 ath_tdma_config(sc, vap); 6082 else 6083#endif 6084 ath_beacon_config(sc, vap); 6085 sc->sc_beacons = 1; 6086 } 6087 break; 6088 case IEEE80211_M_STA: 6089 /* 6090 * Defer beacon timer configuration to the next 6091 * beacon frame so we have a current TSF to use 6092 * (any TSF collected when scanning is likely old). 6093 * However if it's due to a CSA -> RUN transition, 6094 * force a beacon update so we pick up a lack of 6095 * beacons from an AP in CAC and thus force a 6096 * scan. 6097 * 6098 * And, there's also corner cases here where 6099 * after a scan, the AP may have disappeared. 6100 * In that case, we may not receive an actual 6101 * beacon to update the beacon timer and thus we 6102 * won't get notified of the missing beacons. 6103 */ 6104 if (ostate != IEEE80211_S_RUN && 6105 ostate != IEEE80211_S_SLEEP) { 6106 DPRINTF(sc, ATH_DEBUG_BEACON, 6107 "%s: STA; syncbeacon=1\n", __func__); 6108 sc->sc_syncbeacon = 1; 6109 6110 if (csa_run_transition) 6111 ath_beacon_config(sc, vap); 6112 6113 /* 6114 * PR: kern/175227 6115 * 6116 * Reconfigure beacons during reset; as otherwise 6117 * we won't get the beacon timers reprogrammed 6118 * after a reset and thus we won't pick up a 6119 * beacon miss interrupt. 6120 * 6121 * Hopefully we'll see a beacon before the BMISS 6122 * timer fires (too often), leading to a STA 6123 * disassociation. 6124 */ 6125 sc->sc_beacons = 1; 6126 } 6127 break; 6128 case IEEE80211_M_MONITOR: 6129 /* 6130 * Monitor mode vaps have only INIT->RUN and RUN->RUN 6131 * transitions so we must re-enable interrupts here to 6132 * handle the case of a single monitor mode vap. 6133 */ 6134 ath_hal_intrset(ah, sc->sc_imask); 6135 break; 6136 case IEEE80211_M_WDS: 6137 break; 6138 default: 6139 break; 6140 } 6141 /* 6142 * Let the hal process statistics collected during a 6143 * scan so it can provide calibrated noise floor data. 6144 */ 6145 ath_hal_process_noisefloor(ah); 6146 /* 6147 * Reset rssi stats; maybe not the best place... 6148 */ 6149 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 6150 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 6151 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 6152 6153 /* 6154 * Force awake for RUN mode. 6155 */ 6156 ATH_LOCK(sc); 6157 ath_power_setselfgen(sc, HAL_PM_AWAKE); 6158 ath_power_setpower(sc, HAL_PM_AWAKE); 6159 6160 /* 6161 * Finally, start any timers and the task q thread 6162 * (in case we didn't go through SCAN state). 6163 */ 6164 if (ath_longcalinterval != 0) { 6165 /* start periodic recalibration timer */ 6166 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 6167 } else { 6168 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 6169 "%s: calibration disabled\n", __func__); 6170 } 6171 ATH_UNLOCK(sc); 6172 6173 taskqueue_unblock(sc->sc_tq); 6174 } else if (nstate == IEEE80211_S_INIT) { 6175 /* 6176 * If there are no vaps left in RUN state then 6177 * shutdown host/driver operation: 6178 * o disable interrupts 6179 * o disable the task queue thread 6180 * o mark beacon processing as stopped 6181 */ 6182 if (!ath_isanyrunningvaps(vap)) { 6183 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 6184 /* disable interrupts */ 6185 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 6186 taskqueue_block(sc->sc_tq); 6187 sc->sc_beacons = 0; 6188 } 6189#ifdef IEEE80211_SUPPORT_TDMA 6190 ath_hal_setcca(ah, AH_TRUE); 6191#endif 6192 } else if (nstate == IEEE80211_S_SLEEP) { 6193 /* We're going to sleep, so transition appropriately */ 6194 /* For now, only do this if we're a single STA vap */ 6195 if (sc->sc_nvaps == 1 && 6196 vap->iv_opmode == IEEE80211_M_STA) { 6197 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon); 6198 ATH_LOCK(sc); 6199 /* 6200 * Always at least set the self-generated 6201 * frame config to set PWRMGT=1. 6202 */ 6203 ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP); 6204 6205 /* 6206 * If we're not syncing beacons, transition 6207 * to NETWORK_SLEEP. 6208 * 6209 * We stay awake if syncbeacon > 0 in case 6210 * we need to listen for some beacons otherwise 6211 * our beacon timer config may be wrong. 6212 */ 6213 if (sc->sc_syncbeacon == 0) { 6214 ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP); 6215 } 6216 ATH_UNLOCK(sc); 6217 } 6218 } 6219bad: 6220 ieee80211_free_node(ni); 6221 6222 /* 6223 * Restore the power state - either to what it was, or 6224 * to network_sleep if it's alright. 6225 */ 6226 ATH_LOCK(sc); 6227 ath_power_restore_power_state(sc); 6228 ATH_UNLOCK(sc); 6229 return error; 6230} 6231 6232/* 6233 * Allocate a key cache slot to the station so we can 6234 * setup a mapping from key index to node. The key cache 6235 * slot is needed for managing antenna state and for 6236 * compression when stations do not use crypto. We do 6237 * it uniliaterally here; if crypto is employed this slot 6238 * will be reassigned. 6239 */ 6240static void 6241ath_setup_stationkey(struct ieee80211_node *ni) 6242{ 6243 struct ieee80211vap *vap = ni->ni_vap; 6244 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6245 ieee80211_keyix keyix, rxkeyix; 6246 6247 /* XXX should take a locked ref to vap->iv_bss */ 6248 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 6249 /* 6250 * Key cache is full; we'll fall back to doing 6251 * the more expensive lookup in software. Note 6252 * this also means no h/w compression. 6253 */ 6254 /* XXX msg+statistic */ 6255 } else { 6256 /* XXX locking? */ 6257 ni->ni_ucastkey.wk_keyix = keyix; 6258 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 6259 /* NB: must mark device key to get called back on delete */ 6260 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 6261 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 6262 /* NB: this will create a pass-thru key entry */ 6263 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 6264 } 6265} 6266 6267/* 6268 * Setup driver-specific state for a newly associated node. 6269 * Note that we're called also on a re-associate, the isnew 6270 * param tells us if this is the first time or not. 6271 */ 6272static void 6273ath_newassoc(struct ieee80211_node *ni, int isnew) 6274{ 6275 struct ath_node *an = ATH_NODE(ni); 6276 struct ieee80211vap *vap = ni->ni_vap; 6277 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6278 const struct ieee80211_txparam *tp = ni->ni_txparms; 6279 6280 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 6281 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 6282 6283 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n", 6284 __func__, 6285 ni->ni_macaddr, 6286 ":", 6287 isnew, 6288 an->an_is_powersave); 6289 6290 ATH_NODE_LOCK(an); 6291 ath_rate_newassoc(sc, an, isnew); 6292 ATH_NODE_UNLOCK(an); 6293 6294 if (isnew && 6295 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 6296 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 6297 ath_setup_stationkey(ni); 6298 6299 /* 6300 * If we're reassociating, make sure that any paused queues 6301 * get unpaused. 6302 * 6303 * Now, we may hvae frames in the hardware queue for this node. 6304 * So if we are reassociating and there are frames in the queue, 6305 * we need to go through the cleanup path to ensure that they're 6306 * marked as non-aggregate. 6307 */ 6308 if (! isnew) { 6309 DPRINTF(sc, ATH_DEBUG_NODE, 6310 "%s: %6D: reassoc; is_powersave=%d\n", 6311 __func__, 6312 ni->ni_macaddr, 6313 ":", 6314 an->an_is_powersave); 6315 6316 /* XXX for now, we can't hold the lock across assoc */ 6317 ath_tx_node_reassoc(sc, an); 6318 6319 /* XXX for now, we can't hold the lock across wakeup */ 6320 if (an->an_is_powersave) 6321 ath_tx_node_wakeup(sc, an); 6322 } 6323} 6324 6325static int 6326ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 6327 int nchans, struct ieee80211_channel chans[]) 6328{ 6329 struct ath_softc *sc = ic->ic_ifp->if_softc; 6330 struct ath_hal *ah = sc->sc_ah; 6331 HAL_STATUS status; 6332 6333 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6334 "%s: rd %u cc %u location %c%s\n", 6335 __func__, reg->regdomain, reg->country, reg->location, 6336 reg->ecm ? " ecm" : ""); 6337 6338 status = ath_hal_set_channels(ah, chans, nchans, 6339 reg->country, reg->regdomain); 6340 if (status != HAL_OK) { 6341 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 6342 __func__, status); 6343 return EINVAL; /* XXX */ 6344 } 6345 6346 return 0; 6347} 6348 6349static void 6350ath_getradiocaps(struct ieee80211com *ic, 6351 int maxchans, int *nchans, struct ieee80211_channel chans[]) 6352{ 6353 struct ath_softc *sc = ic->ic_ifp->if_softc; 6354 struct ath_hal *ah = sc->sc_ah; 6355 6356 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 6357 __func__, SKU_DEBUG, CTRY_DEFAULT); 6358 6359 /* XXX check return */ 6360 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 6361 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 6362 6363} 6364 6365static int 6366ath_getchannels(struct ath_softc *sc) 6367{ 6368 struct ifnet *ifp = sc->sc_ifp; 6369 struct ieee80211com *ic = ifp->if_l2com; 6370 struct ath_hal *ah = sc->sc_ah; 6371 HAL_STATUS status; 6372 6373 /* 6374 * Collect channel set based on EEPROM contents. 6375 */ 6376 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 6377 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 6378 if (status != HAL_OK) { 6379 if_printf(ifp, "%s: unable to collect channel list from hal, " 6380 "status %d\n", __func__, status); 6381 return EINVAL; 6382 } 6383 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6384 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 6385 /* XXX map Atheros sku's to net80211 SKU's */ 6386 /* XXX net80211 types too small */ 6387 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 6388 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 6389 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 6390 ic->ic_regdomain.isocc[1] = ' '; 6391 6392 ic->ic_regdomain.ecm = 1; 6393 ic->ic_regdomain.location = 'I'; 6394 6395 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6396 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6397 __func__, sc->sc_eerd, sc->sc_eecc, 6398 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 6399 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 6400 return 0; 6401} 6402 6403static int 6404ath_rate_setup(struct ath_softc *sc, u_int mode) 6405{ 6406 struct ath_hal *ah = sc->sc_ah; 6407 const HAL_RATE_TABLE *rt; 6408 6409 switch (mode) { 6410 case IEEE80211_MODE_11A: 6411 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 6412 break; 6413 case IEEE80211_MODE_HALF: 6414 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6415 break; 6416 case IEEE80211_MODE_QUARTER: 6417 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6418 break; 6419 case IEEE80211_MODE_11B: 6420 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 6421 break; 6422 case IEEE80211_MODE_11G: 6423 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 6424 break; 6425 case IEEE80211_MODE_TURBO_A: 6426 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 6427 break; 6428 case IEEE80211_MODE_TURBO_G: 6429 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6430 break; 6431 case IEEE80211_MODE_STURBO_A: 6432 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6433 break; 6434 case IEEE80211_MODE_11NA: 6435 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6436 break; 6437 case IEEE80211_MODE_11NG: 6438 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6439 break; 6440 default: 6441 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6442 __func__, mode); 6443 return 0; 6444 } 6445 sc->sc_rates[mode] = rt; 6446 return (rt != NULL); 6447} 6448 6449static void 6450ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6451{ 6452#define N(a) (sizeof(a)/sizeof(a[0])) 6453 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6454 static const struct { 6455 u_int rate; /* tx/rx 802.11 rate */ 6456 u_int16_t timeOn; /* LED on time (ms) */ 6457 u_int16_t timeOff; /* LED off time (ms) */ 6458 } blinkrates[] = { 6459 { 108, 40, 10 }, 6460 { 96, 44, 11 }, 6461 { 72, 50, 13 }, 6462 { 48, 57, 14 }, 6463 { 36, 67, 16 }, 6464 { 24, 80, 20 }, 6465 { 22, 100, 25 }, 6466 { 18, 133, 34 }, 6467 { 12, 160, 40 }, 6468 { 10, 200, 50 }, 6469 { 6, 240, 58 }, 6470 { 4, 267, 66 }, 6471 { 2, 400, 100 }, 6472 { 0, 500, 130 }, 6473 /* XXX half/quarter rates */ 6474 }; 6475 const HAL_RATE_TABLE *rt; 6476 int i, j; 6477 6478 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6479 rt = sc->sc_rates[mode]; 6480 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6481 for (i = 0; i < rt->rateCount; i++) { 6482 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6483 if (rt->info[i].phy != IEEE80211_T_HT) 6484 sc->sc_rixmap[ieeerate] = i; 6485 else 6486 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6487 } 6488 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6489 for (i = 0; i < N(sc->sc_hwmap); i++) { 6490 if (i >= rt->rateCount) { 6491 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6492 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6493 continue; 6494 } 6495 sc->sc_hwmap[i].ieeerate = 6496 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6497 if (rt->info[i].phy == IEEE80211_T_HT) 6498 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6499 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6500 if (rt->info[i].shortPreamble || 6501 rt->info[i].phy == IEEE80211_T_OFDM) 6502 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6503 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6504 for (j = 0; j < N(blinkrates)-1; j++) 6505 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6506 break; 6507 /* NB: this uses the last entry if the rate isn't found */ 6508 /* XXX beware of overlow */ 6509 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6510 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6511 } 6512 sc->sc_currates = rt; 6513 sc->sc_curmode = mode; 6514 /* 6515 * All protection frames are transmited at 2Mb/s for 6516 * 11g, otherwise at 1Mb/s. 6517 */ 6518 if (mode == IEEE80211_MODE_11G) 6519 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6520 else 6521 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6522 /* NB: caller is responsible for resetting rate control state */ 6523#undef N 6524} 6525 6526static void 6527ath_watchdog(void *arg) 6528{ 6529 struct ath_softc *sc = arg; 6530 int do_reset = 0; 6531 6532 ATH_LOCK_ASSERT(sc); 6533 6534 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6535 struct ifnet *ifp = sc->sc_ifp; 6536 uint32_t hangs; 6537 6538 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6539 6540 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6541 hangs != 0) { 6542 if_printf(ifp, "%s hang detected (0x%x)\n", 6543 hangs & 0xff ? "bb" : "mac", hangs); 6544 } else 6545 if_printf(ifp, "device timeout\n"); 6546 do_reset = 1; 6547 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 6548 sc->sc_stats.ast_watchdog++; 6549 6550 ath_power_restore_power_state(sc); 6551 } 6552 6553 /* 6554 * We can't hold the lock across the ath_reset() call. 6555 * 6556 * And since this routine can't hold a lock and sleep, 6557 * do the reset deferred. 6558 */ 6559 if (do_reset) { 6560 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6561 } 6562 6563 callout_schedule(&sc->sc_wd_ch, hz); 6564} 6565 6566/* 6567 * Fetch the rate control statistics for the given node. 6568 */ 6569static int 6570ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs) 6571{ 6572 struct ath_node *an; 6573 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6574 struct ieee80211_node *ni; 6575 int error = 0; 6576 6577 /* Perform a lookup on the given node */ 6578 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr); 6579 if (ni == NULL) { 6580 error = EINVAL; 6581 goto bad; 6582 } 6583 6584 /* Lock the ath_node */ 6585 an = ATH_NODE(ni); 6586 ATH_NODE_LOCK(an); 6587 6588 /* Fetch the rate control stats for this node */ 6589 error = ath_rate_fetch_node_stats(sc, an, rs); 6590 6591 /* No matter what happens here, just drop through */ 6592 6593 /* Unlock the ath_node */ 6594 ATH_NODE_UNLOCK(an); 6595 6596 /* Unref the node */ 6597 ieee80211_node_decref(ni); 6598 6599bad: 6600 return (error); 6601} 6602 6603#ifdef ATH_DIAGAPI 6604/* 6605 * Diagnostic interface to the HAL. This is used by various 6606 * tools to do things like retrieve register contents for 6607 * debugging. The mechanism is intentionally opaque so that 6608 * it can change frequently w/o concern for compatiblity. 6609 */ 6610static int 6611ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6612{ 6613 struct ath_hal *ah = sc->sc_ah; 6614 u_int id = ad->ad_id & ATH_DIAG_ID; 6615 void *indata = NULL; 6616 void *outdata = NULL; 6617 u_int32_t insize = ad->ad_in_size; 6618 u_int32_t outsize = ad->ad_out_size; 6619 int error = 0; 6620 6621 if (ad->ad_id & ATH_DIAG_IN) { 6622 /* 6623 * Copy in data. 6624 */ 6625 indata = malloc(insize, M_TEMP, M_NOWAIT); 6626 if (indata == NULL) { 6627 error = ENOMEM; 6628 goto bad; 6629 } 6630 error = copyin(ad->ad_in_data, indata, insize); 6631 if (error) 6632 goto bad; 6633 } 6634 if (ad->ad_id & ATH_DIAG_DYN) { 6635 /* 6636 * Allocate a buffer for the results (otherwise the HAL 6637 * returns a pointer to a buffer where we can read the 6638 * results). Note that we depend on the HAL leaving this 6639 * pointer for us to use below in reclaiming the buffer; 6640 * may want to be more defensive. 6641 */ 6642 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 6643 if (outdata == NULL) { 6644 error = ENOMEM; 6645 goto bad; 6646 } 6647 } 6648 6649 6650 ATH_LOCK(sc); 6651 if (id != HAL_DIAG_REGS) 6652 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6653 ATH_UNLOCK(sc); 6654 6655 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6656 if (outsize < ad->ad_out_size) 6657 ad->ad_out_size = outsize; 6658 if (outdata != NULL) 6659 error = copyout(outdata, ad->ad_out_data, 6660 ad->ad_out_size); 6661 } else { 6662 error = EINVAL; 6663 } 6664 6665 ATH_LOCK(sc); 6666 if (id != HAL_DIAG_REGS) 6667 ath_power_restore_power_state(sc); 6668 ATH_UNLOCK(sc); 6669 6670bad: 6671 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6672 free(indata, M_TEMP); 6673 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6674 free(outdata, M_TEMP); 6675 return error; 6676} 6677#endif /* ATH_DIAGAPI */ 6678 6679static int 6680ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 6681{ 6682#define IS_RUNNING(ifp) \ 6683 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 6684 struct ath_softc *sc = ifp->if_softc; 6685 struct ieee80211com *ic = ifp->if_l2com; 6686 struct ifreq *ifr = (struct ifreq *)data; 6687 const HAL_RATE_TABLE *rt; 6688 int error = 0; 6689 6690 switch (cmd) { 6691 case SIOCSIFFLAGS: 6692 if (IS_RUNNING(ifp)) { 6693 /* 6694 * To avoid rescanning another access point, 6695 * do not call ath_init() here. Instead, 6696 * only reflect promisc mode settings. 6697 */ 6698 ATH_LOCK(sc); 6699 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6700 ath_mode_init(sc); 6701 ath_power_restore_power_state(sc); 6702 ATH_UNLOCK(sc); 6703 } else if (ifp->if_flags & IFF_UP) { 6704 /* 6705 * Beware of being called during attach/detach 6706 * to reset promiscuous mode. In that case we 6707 * will still be marked UP but not RUNNING. 6708 * However trying to re-init the interface 6709 * is the wrong thing to do as we've already 6710 * torn down much of our state. There's 6711 * probably a better way to deal with this. 6712 */ 6713 if (!sc->sc_invalid) 6714 ath_init(sc); /* XXX lose error */ 6715 } else { 6716 ATH_LOCK(sc); 6717 ath_stop_locked(ifp); 6718 if (!sc->sc_invalid) 6719 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 6720 ATH_UNLOCK(sc); 6721 } 6722 break; 6723 case SIOCGIFMEDIA: 6724 case SIOCSIFMEDIA: 6725 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6726 break; 6727 case SIOCGATHSTATS: 6728 /* NB: embed these numbers to get a consistent view */ 6729 sc->sc_stats.ast_tx_packets = ifp->if_get_counter(ifp, 6730 IFCOUNTER_OPACKETS); 6731 sc->sc_stats.ast_rx_packets = ifp->if_get_counter(ifp, 6732 IFCOUNTER_IPACKETS); 6733 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 6734 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6735#ifdef IEEE80211_SUPPORT_TDMA 6736 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 6737 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 6738#endif 6739 rt = sc->sc_currates; 6740 sc->sc_stats.ast_tx_rate = 6741 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 6742 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 6743 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6744 return copyout(&sc->sc_stats, 6745 ifr->ifr_data, sizeof (sc->sc_stats)); 6746 case SIOCGATHAGSTATS: 6747 return copyout(&sc->sc_aggr_stats, 6748 ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 6749 case SIOCZATHSTATS: 6750 error = priv_check(curthread, PRIV_DRIVER); 6751 if (error == 0) { 6752 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 6753 memset(&sc->sc_aggr_stats, 0, 6754 sizeof(sc->sc_aggr_stats)); 6755 memset(&sc->sc_intr_stats, 0, 6756 sizeof(sc->sc_intr_stats)); 6757 } 6758 break; 6759#ifdef ATH_DIAGAPI 6760 case SIOCGATHDIAG: 6761 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6762 break; 6763 case SIOCGATHPHYERR: 6764 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6765 break; 6766#endif 6767 case SIOCGATHSPECTRAL: 6768 error = ath_ioctl_spectral(sc,(struct ath_diag*) ifr); 6769 break; 6770 case SIOCGATHNODERATESTATS: 6771 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr); 6772 break; 6773 case SIOCGIFADDR: 6774 error = ether_ioctl(ifp, cmd, data); 6775 break; 6776 default: 6777 error = EINVAL; 6778 break; 6779 } 6780 return error; 6781#undef IS_RUNNING 6782} 6783 6784/* 6785 * Announce various information on device/driver attach. 6786 */ 6787static void 6788ath_announce(struct ath_softc *sc) 6789{ 6790 struct ifnet *ifp = sc->sc_ifp; 6791 struct ath_hal *ah = sc->sc_ah; 6792 6793 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6794 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6795 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6796 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6797 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6798 if (bootverbose) { 6799 int i; 6800 for (i = 0; i <= WME_AC_VO; i++) { 6801 struct ath_txq *txq = sc->sc_ac2q[i]; 6802 if_printf(ifp, "Use hw queue %u for %s traffic\n", 6803 txq->axq_qnum, ieee80211_wme_acnames[i]); 6804 } 6805 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6806 sc->sc_cabq->axq_qnum); 6807 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6808 } 6809 if (ath_rxbuf != ATH_RXBUF) 6810 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6811 if (ath_txbuf != ATH_TXBUF) 6812 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 6813 if (sc->sc_mcastkey && bootverbose) 6814 if_printf(ifp, "using multicast key search\n"); 6815} 6816 6817static void 6818ath_dfs_tasklet(void *p, int npending) 6819{ 6820 struct ath_softc *sc = (struct ath_softc *) p; 6821 struct ifnet *ifp = sc->sc_ifp; 6822 struct ieee80211com *ic = ifp->if_l2com; 6823 6824 /* 6825 * If previous processing has found a radar event, 6826 * signal this to the net80211 layer to begin DFS 6827 * processing. 6828 */ 6829 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6830 /* DFS event found, initiate channel change */ 6831 /* 6832 * XXX doesn't currently tell us whether the event 6833 * XXX was found in the primary or extension 6834 * XXX channel! 6835 */ 6836 IEEE80211_LOCK(ic); 6837 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6838 IEEE80211_UNLOCK(ic); 6839 } 6840} 6841 6842/* 6843 * Enable/disable power save. This must be called with 6844 * no TX driver locks currently held, so it should only 6845 * be called from the RX path (which doesn't hold any 6846 * TX driver locks.) 6847 */ 6848static void 6849ath_node_powersave(struct ieee80211_node *ni, int enable) 6850{ 6851#ifdef ATH_SW_PSQ 6852 struct ath_node *an = ATH_NODE(ni); 6853 struct ieee80211com *ic = ni->ni_ic; 6854 struct ath_softc *sc = ic->ic_ifp->if_softc; 6855 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6856 6857 /* XXX and no TXQ locks should be held here */ 6858 6859 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n", 6860 __func__, 6861 ni->ni_macaddr, 6862 ":", 6863 !! enable); 6864 6865 /* Suspend or resume software queue handling */ 6866 if (enable) 6867 ath_tx_node_sleep(sc, an); 6868 else 6869 ath_tx_node_wakeup(sc, an); 6870 6871 /* Update net80211 state */ 6872 avp->av_node_ps(ni, enable); 6873#else 6874 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6875 6876 /* Update net80211 state */ 6877 avp->av_node_ps(ni, enable); 6878#endif/* ATH_SW_PSQ */ 6879} 6880 6881/* 6882 * Notification from net80211 that the powersave queue state has 6883 * changed. 6884 * 6885 * Since the software queue also may have some frames: 6886 * 6887 * + if the node software queue has frames and the TID state 6888 * is 0, we set the TIM; 6889 * + if the node and the stack are both empty, we clear the TIM bit. 6890 * + If the stack tries to set the bit, always set it. 6891 * + If the stack tries to clear the bit, only clear it if the 6892 * software queue in question is also cleared. 6893 * 6894 * TODO: this is called during node teardown; so let's ensure this 6895 * is all correctly handled and that the TIM bit is cleared. 6896 * It may be that the node flush is called _AFTER_ the net80211 6897 * stack clears the TIM. 6898 * 6899 * Here is the racy part. Since it's possible >1 concurrent, 6900 * overlapping TXes will appear complete with a TX completion in 6901 * another thread, it's possible that the concurrent TIM calls will 6902 * clash. We can't hold the node lock here because setting the 6903 * TIM grabs the net80211 comlock and this may cause a LOR. 6904 * The solution is either to totally serialise _everything_ at 6905 * this point (ie, all TX, completion and any reset/flush go into 6906 * one taskqueue) or a new "ath TIM lock" needs to be created that 6907 * just wraps the driver state change and this call to avp->av_set_tim(). 6908 * 6909 * The same race exists in the net80211 power save queue handling 6910 * as well. Since multiple transmitting threads may queue frames 6911 * into the driver, as well as ps-poll and the driver transmitting 6912 * frames (and thus clearing the psq), it's quite possible that 6913 * a packet entering the PSQ and a ps-poll being handled will 6914 * race, causing the TIM to be cleared and not re-set. 6915 */ 6916static int 6917ath_node_set_tim(struct ieee80211_node *ni, int enable) 6918{ 6919#ifdef ATH_SW_PSQ 6920 struct ieee80211com *ic = ni->ni_ic; 6921 struct ath_softc *sc = ic->ic_ifp->if_softc; 6922 struct ath_node *an = ATH_NODE(ni); 6923 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6924 int changed = 0; 6925 6926 ATH_TX_LOCK(sc); 6927 an->an_stack_psq = enable; 6928 6929 /* 6930 * This will get called for all operating modes, 6931 * even if avp->av_set_tim is unset. 6932 * It's currently set for hostap/ibss modes; but 6933 * the same infrastructure is used for both STA 6934 * and AP/IBSS node power save. 6935 */ 6936 if (avp->av_set_tim == NULL) { 6937 ATH_TX_UNLOCK(sc); 6938 return (0); 6939 } 6940 6941 /* 6942 * If setting the bit, always set it here. 6943 * If clearing the bit, only clear it if the 6944 * software queue is also empty. 6945 * 6946 * If the node has left power save, just clear the TIM 6947 * bit regardless of the state of the power save queue. 6948 * 6949 * XXX TODO: although atomics are used, it's quite possible 6950 * that a race will occur between this and setting/clearing 6951 * in another thread. TX completion will occur always in 6952 * one thread, however setting/clearing the TIM bit can come 6953 * from a variety of different process contexts! 6954 */ 6955 if (enable && an->an_tim_set == 1) { 6956 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6957 "%s: %6D: enable=%d, tim_set=1, ignoring\n", 6958 __func__, 6959 ni->ni_macaddr, 6960 ":", 6961 enable); 6962 ATH_TX_UNLOCK(sc); 6963 } else if (enable) { 6964 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6965 "%s: %6D: enable=%d, enabling TIM\n", 6966 __func__, 6967 ni->ni_macaddr, 6968 ":", 6969 enable); 6970 an->an_tim_set = 1; 6971 ATH_TX_UNLOCK(sc); 6972 changed = avp->av_set_tim(ni, enable); 6973 } else if (an->an_swq_depth == 0) { 6974 /* disable */ 6975 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6976 "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n", 6977 __func__, 6978 ni->ni_macaddr, 6979 ":", 6980 enable); 6981 an->an_tim_set = 0; 6982 ATH_TX_UNLOCK(sc); 6983 changed = avp->av_set_tim(ni, enable); 6984 } else if (! an->an_is_powersave) { 6985 /* 6986 * disable regardless; the node isn't in powersave now 6987 */ 6988 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6989 "%s: %6D: enable=%d, an_pwrsave=0, disabling\n", 6990 __func__, 6991 ni->ni_macaddr, 6992 ":", 6993 enable); 6994 an->an_tim_set = 0; 6995 ATH_TX_UNLOCK(sc); 6996 changed = avp->av_set_tim(ni, enable); 6997 } else { 6998 /* 6999 * psq disable, node is currently in powersave, node 7000 * software queue isn't empty, so don't clear the TIM bit 7001 * for now. 7002 */ 7003 ATH_TX_UNLOCK(sc); 7004 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7005 "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n", 7006 __func__, 7007 ni->ni_macaddr, 7008 ":", 7009 enable); 7010 changed = 0; 7011 } 7012 7013 return (changed); 7014#else 7015 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 7016 7017 /* 7018 * Some operating modes don't set av_set_tim(), so don't 7019 * update it here. 7020 */ 7021 if (avp->av_set_tim == NULL) 7022 return (0); 7023 7024 return (avp->av_set_tim(ni, enable)); 7025#endif /* ATH_SW_PSQ */ 7026} 7027 7028/* 7029 * Set or update the TIM from the software queue. 7030 * 7031 * Check the software queue depth before attempting to do lock 7032 * anything; that avoids trying to obtain the lock. Then, 7033 * re-check afterwards to ensure nothing has changed in the 7034 * meantime. 7035 * 7036 * set: This is designed to be called from the TX path, after 7037 * a frame has been queued; to see if the swq > 0. 7038 * 7039 * clear: This is designed to be called from the buffer completion point 7040 * (right now it's ath_tx_default_comp()) where the state of 7041 * a software queue has changed. 7042 * 7043 * It makes sense to place it at buffer free / completion rather 7044 * than after each software queue operation, as there's no real 7045 * point in churning the TIM bit as the last frames in the software 7046 * queue are transmitted. If they fail and we retry them, we'd 7047 * just be setting the TIM bit again anyway. 7048 */ 7049void 7050ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, 7051 int enable) 7052{ 7053#ifdef ATH_SW_PSQ 7054 struct ath_node *an; 7055 struct ath_vap *avp; 7056 7057 /* Don't do this for broadcast/etc frames */ 7058 if (ni == NULL) 7059 return; 7060 7061 an = ATH_NODE(ni); 7062 avp = ATH_VAP(ni->ni_vap); 7063 7064 /* 7065 * And for operating modes without the TIM handler set, let's 7066 * just skip those. 7067 */ 7068 if (avp->av_set_tim == NULL) 7069 return; 7070 7071 ATH_TX_LOCK_ASSERT(sc); 7072 7073 if (enable) { 7074 if (an->an_is_powersave && 7075 an->an_tim_set == 0 && 7076 an->an_swq_depth != 0) { 7077 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7078 "%s: %6D: swq_depth>0, tim_set=0, set!\n", 7079 __func__, 7080 ni->ni_macaddr, 7081 ":"); 7082 an->an_tim_set = 1; 7083 (void) avp->av_set_tim(ni, 1); 7084 } 7085 } else { 7086 /* 7087 * Don't bother grabbing the lock unless the queue is empty. 7088 */ 7089 if (an->an_swq_depth != 0) 7090 return; 7091 7092 if (an->an_is_powersave && 7093 an->an_stack_psq == 0 && 7094 an->an_tim_set == 1 && 7095 an->an_swq_depth == 0) { 7096 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7097 "%s: %6D: swq_depth=0, tim_set=1, psq_set=0," 7098 " clear!\n", 7099 __func__, 7100 ni->ni_macaddr, 7101 ":"); 7102 an->an_tim_set = 0; 7103 (void) avp->av_set_tim(ni, 0); 7104 } 7105 } 7106#else 7107 return; 7108#endif /* ATH_SW_PSQ */ 7109} 7110 7111/* 7112 * Received a ps-poll frame from net80211. 7113 * 7114 * Here we get a chance to serve out a software-queued frame ourselves 7115 * before we punt it to net80211 to transmit us one itself - either 7116 * because there's traffic in the net80211 psq, or a NULL frame to 7117 * indicate there's nothing else. 7118 */ 7119static void 7120ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) 7121{ 7122#ifdef ATH_SW_PSQ 7123 struct ath_node *an; 7124 struct ath_vap *avp; 7125 struct ieee80211com *ic = ni->ni_ic; 7126 struct ath_softc *sc = ic->ic_ifp->if_softc; 7127 int tid; 7128 7129 /* Just paranoia */ 7130 if (ni == NULL) 7131 return; 7132 7133 /* 7134 * Unassociated (temporary node) station. 7135 */ 7136 if (ni->ni_associd == 0) 7137 return; 7138 7139 /* 7140 * We do have an active node, so let's begin looking into it. 7141 */ 7142 an = ATH_NODE(ni); 7143 avp = ATH_VAP(ni->ni_vap); 7144 7145 /* 7146 * For now, we just call the original ps-poll method. 7147 * Once we're ready to flip this on: 7148 * 7149 * + Set leak to 1, as no matter what we're going to have 7150 * to send a frame; 7151 * + Check the software queue and if there's something in it, 7152 * schedule the highest TID thas has traffic from this node. 7153 * Then make sure we schedule the software scheduler to 7154 * run so it picks up said frame. 7155 * 7156 * That way whatever happens, we'll at least send _a_ frame 7157 * to the given node. 7158 * 7159 * Again, yes, it's crappy QoS if the node has multiple 7160 * TIDs worth of traffic - but let's get it working first 7161 * before we optimise it. 7162 * 7163 * Also yes, there's definitely latency here - we're not 7164 * direct dispatching to the hardware in this path (and 7165 * we're likely being called from the packet receive path, 7166 * so going back into TX may be a little hairy!) but again 7167 * I'd like to get this working first before optimising 7168 * turn-around time. 7169 */ 7170 7171 ATH_TX_LOCK(sc); 7172 7173 /* 7174 * Legacy - we're called and the node isn't asleep. 7175 * Immediately punt. 7176 */ 7177 if (! an->an_is_powersave) { 7178 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7179 "%s: %6D: not in powersave?\n", 7180 __func__, 7181 ni->ni_macaddr, 7182 ":"); 7183 ATH_TX_UNLOCK(sc); 7184 avp->av_recv_pspoll(ni, m); 7185 return; 7186 } 7187 7188 /* 7189 * We're in powersave. 7190 * 7191 * Leak a frame. 7192 */ 7193 an->an_leak_count = 1; 7194 7195 /* 7196 * Now, if there's no frames in the node, just punt to 7197 * recv_pspoll. 7198 * 7199 * Don't bother checking if the TIM bit is set, we really 7200 * only care if there are any frames here! 7201 */ 7202 if (an->an_swq_depth == 0) { 7203 ATH_TX_UNLOCK(sc); 7204 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7205 "%s: %6D: SWQ empty; punting to net80211\n", 7206 __func__, 7207 ni->ni_macaddr, 7208 ":"); 7209 avp->av_recv_pspoll(ni, m); 7210 return; 7211 } 7212 7213 /* 7214 * Ok, let's schedule the highest TID that has traffic 7215 * and then schedule something. 7216 */ 7217 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { 7218 struct ath_tid *atid = &an->an_tid[tid]; 7219 /* 7220 * No frames? Skip. 7221 */ 7222 if (atid->axq_depth == 0) 7223 continue; 7224 ath_tx_tid_sched(sc, atid); 7225 /* 7226 * XXX we could do a direct call to the TXQ 7227 * scheduler code here to optimise latency 7228 * at the expense of a REALLY deep callstack. 7229 */ 7230 ATH_TX_UNLOCK(sc); 7231 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 7232 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7233 "%s: %6D: leaking frame to TID %d\n", 7234 __func__, 7235 ni->ni_macaddr, 7236 ":", 7237 tid); 7238 return; 7239 } 7240 7241 ATH_TX_UNLOCK(sc); 7242 7243 /* 7244 * XXX nothing in the TIDs at this point? Eek. 7245 */ 7246 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7247 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", 7248 __func__, 7249 ni->ni_macaddr, 7250 ":"); 7251 avp->av_recv_pspoll(ni, m); 7252#else 7253 avp->av_recv_pspoll(ni, m); 7254#endif /* ATH_SW_PSQ */ 7255} 7256 7257MODULE_VERSION(if_ath, 1); 7258MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 7259#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ) 7260MODULE_DEPEND(if_ath, alq, 1, 1, 1); 7261#endif 7262