1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/11/sys/dev/ath/if_ath.c 330446 2018-03-05 06:59:30Z eadler $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49#include "opt_ah.h" 50#include "opt_wlan.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysctl.h> 55#include <sys/mbuf.h> 56#include <sys/malloc.h> 57#include <sys/lock.h> 58#include <sys/mutex.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/errno.h> 63#include <sys/callout.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kthread.h> 67#include <sys/taskqueue.h> 68#include <sys/priv.h> 69#include <sys/module.h> 70#include <sys/ktr.h> 71#include <sys/smp.h> /* for mp_ncpus */ 72 73#include <machine/bus.h> 74 75#include <net/if.h> 76#include <net/if_var.h> 77#include <net/if_dl.h> 78#include <net/if_media.h> 79#include <net/if_types.h> 80#include <net/if_arp.h> 81#include <net/ethernet.h> 82#include <net/if_llc.h> 83 84#include <net80211/ieee80211_var.h> 85#include <net80211/ieee80211_regdomain.h> 86#ifdef IEEE80211_SUPPORT_SUPERG 87#include <net80211/ieee80211_superg.h> 88#endif 89#ifdef IEEE80211_SUPPORT_TDMA 90#include <net80211/ieee80211_tdma.h> 91#endif 92 93#include <net/bpf.h> 94 95#ifdef INET 96#include <netinet/in.h> 97#include <netinet/if_ether.h> 98#endif 99 100#include <dev/ath/if_athvar.h> 101#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102#include <dev/ath/ath_hal/ah_diagcodes.h> 103 104#include <dev/ath/if_ath_debug.h> 105#include <dev/ath/if_ath_misc.h> 106#include <dev/ath/if_ath_tsf.h> 107#include <dev/ath/if_ath_tx.h> 108#include <dev/ath/if_ath_sysctl.h> 109#include <dev/ath/if_ath_led.h> 110#include <dev/ath/if_ath_keycache.h> 111#include <dev/ath/if_ath_rx.h> 112#include <dev/ath/if_ath_rx_edma.h> 113#include <dev/ath/if_ath_tx_edma.h> 114#include <dev/ath/if_ath_beacon.h> 115#include <dev/ath/if_ath_btcoex.h> 116#include <dev/ath/if_ath_btcoex_mci.h> 117#include <dev/ath/if_ath_spectral.h> 118#include <dev/ath/if_ath_lna_div.h> 119#include <dev/ath/if_athdfs.h> 120#include <dev/ath/if_ath_ioctl.h> 121#include <dev/ath/if_ath_descdma.h> 122 123#ifdef ATH_TX99_DIAG 124#include <dev/ath/ath_tx99/ath_tx99.h> 125#endif 126 127#ifdef ATH_DEBUG_ALQ 128#include <dev/ath/if_ath_alq.h> 129#endif 130 131/* 132 * Only enable this if you're working on PS-POLL support. 133 */ 134#define ATH_SW_PSQ 135 136/* 137 * ATH_BCBUF determines the number of vap's that can transmit 138 * beacons and also (currently) the number of vap's that can 139 * have unique mac addresses/bssid. When staggering beacons 140 * 4 is probably a good max as otherwise the beacons become 141 * very closely spaced and there is limited time for cab q traffic 142 * to go out. You can burst beacons instead but that is not good 143 * for stations in power save and at some point you really want 144 * another radio (and channel). 145 * 146 * The limit on the number of mac addresses is tied to our use of 147 * the U/L bit and tracking addresses in a byte; it would be 148 * worthwhile to allow more for applications like proxy sta. 149 */ 150CTASSERT(ATH_BCBUF <= 8); 151 152static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 153 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 154 const uint8_t [IEEE80211_ADDR_LEN], 155 const uint8_t [IEEE80211_ADDR_LEN]); 156static void ath_vap_delete(struct ieee80211vap *); 157static int ath_init(struct ath_softc *); 158static void ath_stop(struct ath_softc *); 159static int ath_reset_vap(struct ieee80211vap *, u_long); 160static int ath_transmit(struct ieee80211com *, struct mbuf *); 161static int ath_media_change(struct ifnet *); 162static void ath_watchdog(void *); 163static void ath_parent(struct ieee80211com *); 164static void ath_fatal_proc(void *, int); 165static void ath_bmiss_vap(struct ieee80211vap *); 166static void ath_bmiss_proc(void *, int); 167static void ath_key_update_begin(struct ieee80211vap *); 168static void ath_key_update_end(struct ieee80211vap *); 169static void ath_update_mcast_hw(struct ath_softc *); 170static void ath_update_mcast(struct ieee80211com *); 171static void ath_update_promisc(struct ieee80211com *); 172static void ath_updateslot(struct ieee80211com *); 173static void ath_bstuck_proc(void *, int); 174static void ath_reset_proc(void *, int); 175static int ath_desc_alloc(struct ath_softc *); 176static void ath_desc_free(struct ath_softc *); 177static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 178 const uint8_t [IEEE80211_ADDR_LEN]); 179static void ath_node_cleanup(struct ieee80211_node *); 180static void ath_node_free(struct ieee80211_node *); 181static void ath_node_getsignal(const struct ieee80211_node *, 182 int8_t *, int8_t *); 183static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 184static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 185static int ath_tx_setup(struct ath_softc *, int, int); 186static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 187static void ath_tx_cleanup(struct ath_softc *); 188static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, 189 int dosched); 190static void ath_tx_proc_q0(void *, int); 191static void ath_tx_proc_q0123(void *, int); 192static void ath_tx_proc(void *, int); 193static void ath_txq_sched_tasklet(void *, int); 194static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 195static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 196static void ath_scan_start(struct ieee80211com *); 197static void ath_scan_end(struct ieee80211com *); 198static void ath_set_channel(struct ieee80211com *); 199#ifdef ATH_ENABLE_11N 200static void ath_update_chw(struct ieee80211com *); 201#endif /* ATH_ENABLE_11N */ 202static void ath_calibrate(void *); 203static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 204static void ath_setup_stationkey(struct ieee80211_node *); 205static void ath_newassoc(struct ieee80211_node *, int); 206static int ath_setregdomain(struct ieee80211com *, 207 struct ieee80211_regdomain *, int, 208 struct ieee80211_channel []); 209static void ath_getradiocaps(struct ieee80211com *, int, int *, 210 struct ieee80211_channel []); 211static int ath_getchannels(struct ath_softc *); 212 213static int ath_rate_setup(struct ath_softc *, u_int mode); 214static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 215 216static void ath_announce(struct ath_softc *); 217 218static void ath_dfs_tasklet(void *, int); 219static void ath_node_powersave(struct ieee80211_node *, int); 220static int ath_node_set_tim(struct ieee80211_node *, int); 221static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); 222 223#ifdef IEEE80211_SUPPORT_TDMA 224#include <dev/ath/if_ath_tdma.h> 225#endif 226 227SYSCTL_DECL(_hw_ath); 228 229/* XXX validate sysctl values */ 230static int ath_longcalinterval = 30; /* long cals every 30 secs */ 231SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 232 0, "long chip calibration interval (secs)"); 233static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 234SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 235 0, "short chip calibration interval (msecs)"); 236static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 237SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 238 0, "reset chip calibration results (secs)"); 239static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 240SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 241 0, "ANI calibration (msecs)"); 242 243int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 244SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf, 245 0, "rx buffers allocated"); 246int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 247SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf, 248 0, "tx buffers allocated"); 249int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 250SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt, 251 0, "tx (mgmt) buffers allocated"); 252 253int ath_bstuck_threshold = 4; /* max missed beacons */ 254SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 255 0, "max missed beacon xmits before chip reset"); 256 257MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 258 259void 260ath_legacy_attach_comp_func(struct ath_softc *sc) 261{ 262 263 /* 264 * Special case certain configurations. Note the 265 * CAB queue is handled by these specially so don't 266 * include them when checking the txq setup mask. 267 */ 268 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 269 case 0x01: 270 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 271 break; 272 case 0x0f: 273 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 274 break; 275 default: 276 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 277 break; 278 } 279} 280 281/* 282 * Set the target power mode. 283 * 284 * If this is called during a point in time where 285 * the hardware is being programmed elsewhere, it will 286 * simply store it away and update it when all current 287 * uses of the hardware are completed. 288 */ 289void 290_ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line) 291{ 292 ATH_LOCK_ASSERT(sc); 293 294 sc->sc_target_powerstate = power_state; 295 296 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 297 __func__, 298 file, 299 line, 300 power_state, 301 sc->sc_powersave_refcnt); 302 303 if (sc->sc_powersave_refcnt == 0 && 304 power_state != sc->sc_cur_powerstate) { 305 sc->sc_cur_powerstate = power_state; 306 ath_hal_setpower(sc->sc_ah, power_state); 307 308 /* 309 * If the NIC is force-awake, then set the 310 * self-gen frame state appropriately. 311 * 312 * If the nic is in network sleep or full-sleep, 313 * we let the above call leave the self-gen 314 * state as "sleep". 315 */ 316 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 317 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 318 ath_hal_setselfgenpower(sc->sc_ah, 319 sc->sc_target_selfgen_state); 320 } 321 } 322} 323 324/* 325 * Set the current self-generated frames state. 326 * 327 * This is separate from the target power mode. The chip may be 328 * awake but the desired state is "sleep", so frames sent to the 329 * destination has PWRMGT=1 in the 802.11 header. The NIC also 330 * needs to know to set PWRMGT=1 in self-generated frames. 331 */ 332void 333_ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line) 334{ 335 336 ATH_LOCK_ASSERT(sc); 337 338 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 339 __func__, 340 file, 341 line, 342 power_state, 343 sc->sc_target_selfgen_state); 344 345 sc->sc_target_selfgen_state = power_state; 346 347 /* 348 * If the NIC is force-awake, then set the power state. 349 * Network-state and full-sleep will already transition it to 350 * mark self-gen frames as sleeping - and we can't 351 * guarantee the NIC is awake to program the self-gen frame 352 * setting anyway. 353 */ 354 if (sc->sc_cur_powerstate == HAL_PM_AWAKE) { 355 ath_hal_setselfgenpower(sc->sc_ah, power_state); 356 } 357} 358 359/* 360 * Set the hardware power mode and take a reference. 361 * 362 * This doesn't update the target power mode in the driver; 363 * it just updates the hardware power state. 364 * 365 * XXX it should only ever force the hardware awake; it should 366 * never be called to set it asleep. 367 */ 368void 369_ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line) 370{ 371 ATH_LOCK_ASSERT(sc); 372 373 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 374 __func__, 375 file, 376 line, 377 power_state, 378 sc->sc_powersave_refcnt); 379 380 sc->sc_powersave_refcnt++; 381 382 if (power_state != sc->sc_cur_powerstate) { 383 ath_hal_setpower(sc->sc_ah, power_state); 384 sc->sc_cur_powerstate = power_state; 385 386 /* 387 * Adjust the self-gen powerstate if appropriate. 388 */ 389 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 390 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 391 ath_hal_setselfgenpower(sc->sc_ah, 392 sc->sc_target_selfgen_state); 393 } 394 395 } 396} 397 398/* 399 * Restore the power save mode to what it once was. 400 * 401 * This will decrement the reference counter and once it hits 402 * zero, it'll restore the powersave state. 403 */ 404void 405_ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line) 406{ 407 408 ATH_LOCK_ASSERT(sc); 409 410 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n", 411 __func__, 412 file, 413 line, 414 sc->sc_powersave_refcnt, 415 sc->sc_target_powerstate); 416 417 if (sc->sc_powersave_refcnt == 0) 418 device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__); 419 else 420 sc->sc_powersave_refcnt--; 421 422 if (sc->sc_powersave_refcnt == 0 && 423 sc->sc_target_powerstate != sc->sc_cur_powerstate) { 424 sc->sc_cur_powerstate = sc->sc_target_powerstate; 425 ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate); 426 } 427 428 /* 429 * Adjust the self-gen powerstate if appropriate. 430 */ 431 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 432 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 433 ath_hal_setselfgenpower(sc->sc_ah, 434 sc->sc_target_selfgen_state); 435 } 436 437} 438 439/* 440 * Configure the initial HAL configuration values based on bus 441 * specific parameters. 442 * 443 * Some PCI IDs and other information may need tweaking. 444 * 445 * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable 446 * if BT antenna diversity isn't enabled. 447 * 448 * So, let's also figure out how to enable BT diversity for AR9485. 449 */ 450static void 451ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config) 452{ 453 /* XXX TODO: only for PCI devices? */ 454 455 if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) { 456 ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */ 457 ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE; 458 ah_config->ath_hal_min_gainidx = AH_TRUE; 459 ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88; 460 /* XXX low_rssi_thresh */ 461 /* XXX fast_div_bias */ 462 device_printf(sc->sc_dev, "configuring for %s\n", 463 (sc->sc_pci_devinfo & ATH_PCI_CUS198) ? 464 "CUS198" : "CUS230"); 465 } 466 467 if (sc->sc_pci_devinfo & ATH_PCI_CUS217) 468 device_printf(sc->sc_dev, "CUS217 card detected\n"); 469 470 if (sc->sc_pci_devinfo & ATH_PCI_CUS252) 471 device_printf(sc->sc_dev, "CUS252 card detected\n"); 472 473 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT) 474 device_printf(sc->sc_dev, "WB335 1-ANT card detected\n"); 475 476 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT) 477 device_printf(sc->sc_dev, "WB335 2-ANT card detected\n"); 478 479 if (sc->sc_pci_devinfo & ATH_PCI_BT_ANT_DIV) 480 device_printf(sc->sc_dev, 481 "Bluetooth Antenna Diversity card detected\n"); 482 483 if (sc->sc_pci_devinfo & ATH_PCI_KILLER) 484 device_printf(sc->sc_dev, "Killer Wireless card detected\n"); 485 486#if 0 487 /* 488 * Some WB335 cards do not support antenna diversity. Since 489 * we use a hardcoded value for AR9565 instead of using the 490 * EEPROM/OTP data, remove the combining feature from 491 * the HW capabilities bitmap. 492 */ 493 if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) { 494 if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV)) 495 pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB; 496 } 497 498 if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) { 499 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; 500 device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n"); 501 } 502#endif 503 504 if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) { 505 ah_config->ath_hal_pcie_waen = 0x0040473b; 506 device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n"); 507 } 508 509#if 0 510 if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) { 511 ah->config.no_pll_pwrsave = true; 512 device_printf(sc->sc_dev, "Disable PLL PowerSave\n"); 513 } 514#endif 515 516} 517 518/* 519 * Attempt to fetch the MAC address from the kernel environment. 520 * 521 * Returns 0, macaddr in macaddr if successful; -1 otherwise. 522 */ 523static int 524ath_fetch_mac_kenv(struct ath_softc *sc, uint8_t *macaddr) 525{ 526 char devid_str[32]; 527 int local_mac = 0; 528 char *local_macstr; 529 530 /* 531 * Fetch from the kenv rather than using hints. 532 * 533 * Hints would be nice but the transition to dynamic 534 * hints/kenv doesn't happen early enough for this 535 * to work reliably (eg on anything embedded.) 536 */ 537 snprintf(devid_str, 32, "hint.%s.%d.macaddr", 538 device_get_name(sc->sc_dev), 539 device_get_unit(sc->sc_dev)); 540 541 if ((local_macstr = kern_getenv(devid_str)) != NULL) { 542 uint32_t tmpmac[ETHER_ADDR_LEN]; 543 int count; 544 int i; 545 546 /* Have a MAC address; should use it */ 547 device_printf(sc->sc_dev, 548 "Overriding MAC address from environment: '%s'\n", 549 local_macstr); 550 551 /* Extract out the MAC address */ 552 count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", 553 &tmpmac[0], &tmpmac[1], 554 &tmpmac[2], &tmpmac[3], 555 &tmpmac[4], &tmpmac[5]); 556 if (count == 6) { 557 /* Valid! */ 558 local_mac = 1; 559 for (i = 0; i < ETHER_ADDR_LEN; i++) 560 macaddr[i] = tmpmac[i]; 561 } 562 /* Done! */ 563 freeenv(local_macstr); 564 local_macstr = NULL; 565 } 566 567 if (local_mac) 568 return (0); 569 return (-1); 570} 571 572#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 573#define HAL_MODE_HT40 \ 574 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 575 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 576int 577ath_attach(u_int16_t devid, struct ath_softc *sc) 578{ 579 struct ieee80211com *ic = &sc->sc_ic; 580 struct ath_hal *ah = NULL; 581 HAL_STATUS status; 582 int error = 0, i; 583 u_int wmodes; 584 int rx_chainmask, tx_chainmask; 585 HAL_OPS_CONFIG ah_config; 586 587 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 588 589 ic->ic_softc = sc; 590 ic->ic_name = device_get_nameunit(sc->sc_dev); 591 592 /* 593 * Configure the initial configuration data. 594 * 595 * This is stuff that may be needed early during attach 596 * rather than done via configuration calls later. 597 */ 598 bzero(&ah_config, sizeof(ah_config)); 599 ath_setup_hal_config(sc, &ah_config); 600 601 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 602 sc->sc_eepromdata, &ah_config, &status); 603 if (ah == NULL) { 604 device_printf(sc->sc_dev, 605 "unable to attach hardware; HAL status %u\n", status); 606 error = ENXIO; 607 goto bad; 608 } 609 sc->sc_ah = ah; 610 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 611#ifdef ATH_DEBUG 612 sc->sc_debug = ath_debug; 613#endif 614 615 /* 616 * Setup the DMA/EDMA functions based on the current 617 * hardware support. 618 * 619 * This is required before the descriptors are allocated. 620 */ 621 if (ath_hal_hasedma(sc->sc_ah)) { 622 sc->sc_isedma = 1; 623 ath_recv_setup_edma(sc); 624 ath_xmit_setup_edma(sc); 625 } else { 626 ath_recv_setup_legacy(sc); 627 ath_xmit_setup_legacy(sc); 628 } 629 630 if (ath_hal_hasmybeacon(sc->sc_ah)) { 631 sc->sc_do_mybeacon = 1; 632 } 633 634 /* 635 * Check if the MAC has multi-rate retry support. 636 * We do this by trying to setup a fake extended 637 * descriptor. MAC's that don't have support will 638 * return false w/o doing anything. MAC's that do 639 * support it will return true w/o doing anything. 640 */ 641 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 642 643 /* 644 * Check if the device has hardware counters for PHY 645 * errors. If so we need to enable the MIB interrupt 646 * so we can act on stat triggers. 647 */ 648 if (ath_hal_hwphycounters(ah)) 649 sc->sc_needmib = 1; 650 651 /* 652 * Get the hardware key cache size. 653 */ 654 sc->sc_keymax = ath_hal_keycachesize(ah); 655 if (sc->sc_keymax > ATH_KEYMAX) { 656 device_printf(sc->sc_dev, 657 "Warning, using only %u of %u key cache slots\n", 658 ATH_KEYMAX, sc->sc_keymax); 659 sc->sc_keymax = ATH_KEYMAX; 660 } 661 /* 662 * Reset the key cache since some parts do not 663 * reset the contents on initial power up. 664 */ 665 for (i = 0; i < sc->sc_keymax; i++) 666 ath_hal_keyreset(ah, i); 667 668 /* 669 * Collect the default channel list. 670 */ 671 error = ath_getchannels(sc); 672 if (error != 0) 673 goto bad; 674 675 /* 676 * Setup rate tables for all potential media types. 677 */ 678 ath_rate_setup(sc, IEEE80211_MODE_11A); 679 ath_rate_setup(sc, IEEE80211_MODE_11B); 680 ath_rate_setup(sc, IEEE80211_MODE_11G); 681 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 682 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 683 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 684 ath_rate_setup(sc, IEEE80211_MODE_11NA); 685 ath_rate_setup(sc, IEEE80211_MODE_11NG); 686 ath_rate_setup(sc, IEEE80211_MODE_HALF); 687 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 688 689 /* NB: setup here so ath_rate_update is happy */ 690 ath_setcurmode(sc, IEEE80211_MODE_11A); 691 692 /* 693 * Allocate TX descriptors and populate the lists. 694 */ 695 error = ath_desc_alloc(sc); 696 if (error != 0) { 697 device_printf(sc->sc_dev, 698 "failed to allocate TX descriptors: %d\n", error); 699 goto bad; 700 } 701 error = ath_txdma_setup(sc); 702 if (error != 0) { 703 device_printf(sc->sc_dev, 704 "failed to allocate TX descriptors: %d\n", error); 705 goto bad; 706 } 707 708 /* 709 * Allocate RX descriptors and populate the lists. 710 */ 711 error = ath_rxdma_setup(sc); 712 if (error != 0) { 713 device_printf(sc->sc_dev, 714 "failed to allocate RX descriptors: %d\n", error); 715 goto bad; 716 } 717 718 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 719 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 720 721 ATH_TXBUF_LOCK_INIT(sc); 722 723 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 724 taskqueue_thread_enqueue, &sc->sc_tq); 725 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", 726 device_get_nameunit(sc->sc_dev)); 727 728 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 729 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 730 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 731 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 732 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); 733 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 734 735 /* 736 * Allocate hardware transmit queues: one queue for 737 * beacon frames and one data queue for each QoS 738 * priority. Note that the hal handles resetting 739 * these queues at the needed time. 740 * 741 * XXX PS-Poll 742 */ 743 sc->sc_bhalq = ath_beaconq_setup(sc); 744 if (sc->sc_bhalq == (u_int) -1) { 745 device_printf(sc->sc_dev, 746 "unable to setup a beacon xmit queue!\n"); 747 error = EIO; 748 goto bad2; 749 } 750 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 751 if (sc->sc_cabq == NULL) { 752 device_printf(sc->sc_dev, "unable to setup CAB xmit queue!\n"); 753 error = EIO; 754 goto bad2; 755 } 756 /* NB: insure BK queue is the lowest priority h/w queue */ 757 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 758 device_printf(sc->sc_dev, 759 "unable to setup xmit queue for %s traffic!\n", 760 ieee80211_wme_acnames[WME_AC_BK]); 761 error = EIO; 762 goto bad2; 763 } 764 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 765 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 766 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 767 /* 768 * Not enough hardware tx queues to properly do WME; 769 * just punt and assign them all to the same h/w queue. 770 * We could do a better job of this if, for example, 771 * we allocate queues when we switch from station to 772 * AP mode. 773 */ 774 if (sc->sc_ac2q[WME_AC_VI] != NULL) 775 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 776 if (sc->sc_ac2q[WME_AC_BE] != NULL) 777 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 778 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 779 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 780 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 781 } 782 783 /* 784 * Attach the TX completion function. 785 * 786 * The non-EDMA chips may have some special case optimisations; 787 * this method gives everyone a chance to attach cleanly. 788 */ 789 sc->sc_tx.xmit_attach_comp_func(sc); 790 791 /* 792 * Setup rate control. Some rate control modules 793 * call back to change the anntena state so expose 794 * the necessary entry points. 795 * XXX maybe belongs in struct ath_ratectrl? 796 */ 797 sc->sc_setdefantenna = ath_setdefantenna; 798 sc->sc_rc = ath_rate_attach(sc); 799 if (sc->sc_rc == NULL) { 800 error = EIO; 801 goto bad2; 802 } 803 804 /* Attach DFS module */ 805 if (! ath_dfs_attach(sc)) { 806 device_printf(sc->sc_dev, 807 "%s: unable to attach DFS\n", __func__); 808 error = EIO; 809 goto bad2; 810 } 811 812 /* Attach spectral module */ 813 if (ath_spectral_attach(sc) < 0) { 814 device_printf(sc->sc_dev, 815 "%s: unable to attach spectral\n", __func__); 816 error = EIO; 817 goto bad2; 818 } 819 820 /* Attach bluetooth coexistence module */ 821 if (ath_btcoex_attach(sc) < 0) { 822 device_printf(sc->sc_dev, 823 "%s: unable to attach bluetooth coexistence\n", __func__); 824 error = EIO; 825 goto bad2; 826 } 827 828 /* Attach LNA diversity module */ 829 if (ath_lna_div_attach(sc) < 0) { 830 device_printf(sc->sc_dev, 831 "%s: unable to attach LNA diversity\n", __func__); 832 error = EIO; 833 goto bad2; 834 } 835 836 /* Start DFS processing tasklet */ 837 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 838 839 /* Configure LED state */ 840 sc->sc_blinking = 0; 841 sc->sc_ledstate = 1; 842 sc->sc_ledon = 0; /* low true */ 843 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 844 callout_init(&sc->sc_ledtimer, 1); 845 846 /* 847 * Don't setup hardware-based blinking. 848 * 849 * Although some NICs may have this configured in the 850 * default reset register values, the user may wish 851 * to alter which pins have which function. 852 * 853 * The reference driver attaches the MAC network LED to GPIO1 and 854 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 855 * NIC has these reversed. 856 */ 857 sc->sc_hardled = (1 == 0); 858 sc->sc_led_net_pin = -1; 859 sc->sc_led_pwr_pin = -1; 860 /* 861 * Auto-enable soft led processing for IBM cards and for 862 * 5211 minipci cards. Users can also manually enable/disable 863 * support with a sysctl. 864 */ 865 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 866 ath_led_config(sc); 867 ath_hal_setledstate(ah, HAL_LED_INIT); 868 869 /* XXX not right but it's not used anywhere important */ 870 ic->ic_phytype = IEEE80211_T_OFDM; 871 ic->ic_opmode = IEEE80211_M_STA; 872 ic->ic_caps = 873 IEEE80211_C_STA /* station mode */ 874 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 875 | IEEE80211_C_HOSTAP /* hostap mode */ 876 | IEEE80211_C_MONITOR /* monitor mode */ 877 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 878 | IEEE80211_C_WDS /* 4-address traffic works */ 879 | IEEE80211_C_MBSS /* mesh point link mode */ 880 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 881 | IEEE80211_C_SHSLOT /* short slot time supported */ 882 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 883#ifndef ATH_ENABLE_11N 884 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 885#endif 886 | IEEE80211_C_TXFRAG /* handle tx frags */ 887#ifdef ATH_ENABLE_DFS 888 | IEEE80211_C_DFS /* Enable radar detection */ 889#endif 890 | IEEE80211_C_PMGT /* Station side power mgmt */ 891 | IEEE80211_C_SWSLEEP 892 ; 893 /* 894 * Query the hal to figure out h/w crypto support. 895 */ 896 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 897 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 898 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 899 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 900 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 901 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 902 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 903 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 904 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 905 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 906 /* 907 * Check if h/w does the MIC and/or whether the 908 * separate key cache entries are required to 909 * handle both tx+rx MIC keys. 910 */ 911 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 912 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 913 /* 914 * If the h/w supports storing tx+rx MIC keys 915 * in one cache slot automatically enable use. 916 */ 917 if (ath_hal_hastkipsplit(ah) || 918 !ath_hal_settkipsplit(ah, AH_FALSE)) 919 sc->sc_splitmic = 1; 920 /* 921 * If the h/w can do TKIP MIC together with WME then 922 * we use it; otherwise we force the MIC to be done 923 * in software by the net80211 layer. 924 */ 925 if (ath_hal_haswmetkipmic(ah)) 926 sc->sc_wmetkipmic = 1; 927 } 928 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 929 /* 930 * Check for multicast key search support. 931 */ 932 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 933 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 934 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 935 } 936 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 937 /* 938 * Mark key cache slots associated with global keys 939 * as in use. If we knew TKIP was not to be used we 940 * could leave the +32, +64, and +32+64 slots free. 941 */ 942 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 943 setbit(sc->sc_keymap, i); 944 setbit(sc->sc_keymap, i+64); 945 if (sc->sc_splitmic) { 946 setbit(sc->sc_keymap, i+32); 947 setbit(sc->sc_keymap, i+32+64); 948 } 949 } 950 /* 951 * TPC support can be done either with a global cap or 952 * per-packet support. The latter is not available on 953 * all parts. We're a bit pedantic here as all parts 954 * support a global cap. 955 */ 956 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 957 ic->ic_caps |= IEEE80211_C_TXPMGT; 958 959 /* 960 * Mark WME capability only if we have sufficient 961 * hardware queues to do proper priority scheduling. 962 */ 963 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 964 ic->ic_caps |= IEEE80211_C_WME; 965 /* 966 * Check for misc other capabilities. 967 */ 968 if (ath_hal_hasbursting(ah)) 969 ic->ic_caps |= IEEE80211_C_BURST; 970 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 971 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 972 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 973 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 974 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 975 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); 976 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); 977 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); 978 979 if (ath_hal_hasfastframes(ah)) 980 ic->ic_caps |= IEEE80211_C_FF; 981 wmodes = ath_hal_getwirelessmodes(ah); 982 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 983 ic->ic_caps |= IEEE80211_C_TURBOP; 984#ifdef IEEE80211_SUPPORT_TDMA 985 if (ath_hal_macversion(ah) > 0x78) { 986 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 987 ic->ic_tdma_update = ath_tdma_update; 988 } 989#endif 990 991 /* 992 * TODO: enforce that at least this many frames are available 993 * in the txbuf list before allowing data frames (raw or 994 * otherwise) to be transmitted. 995 */ 996 sc->sc_txq_data_minfree = 10; 997 /* 998 * Leave this as default to maintain legacy behaviour. 999 * Shortening the cabq/mcastq may end up causing some 1000 * undesirable behaviour. 1001 */ 1002 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 1003 1004 /* 1005 * How deep can the node software TX queue get whilst it's asleep. 1006 */ 1007 sc->sc_txq_node_psq_maxdepth = 16; 1008 1009 /* 1010 * Default the maximum queue to to 1/4'th the TX buffers, or 1011 * 64, whichever is smaller. 1012 */ 1013 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4); 1014 1015 /* Enable CABQ by default */ 1016 sc->sc_cabq_enable = 1; 1017 1018 /* 1019 * Allow the TX and RX chainmasks to be overridden by 1020 * environment variables and/or device.hints. 1021 * 1022 * This must be done early - before the hardware is 1023 * calibrated or before the 802.11n stream calculation 1024 * is done. 1025 */ 1026 if (resource_int_value(device_get_name(sc->sc_dev), 1027 device_get_unit(sc->sc_dev), "rx_chainmask", 1028 &rx_chainmask) == 0) { 1029 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 1030 rx_chainmask); 1031 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 1032 } 1033 if (resource_int_value(device_get_name(sc->sc_dev), 1034 device_get_unit(sc->sc_dev), "tx_chainmask", 1035 &tx_chainmask) == 0) { 1036 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 1037 tx_chainmask); 1038 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 1039 } 1040 1041 /* 1042 * Query the TX/RX chainmask configuration. 1043 * 1044 * This is only relevant for 11n devices. 1045 */ 1046 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 1047 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 1048 1049 /* 1050 * Disable MRR with protected frames by default. 1051 * Only 802.11n series NICs can handle this. 1052 */ 1053 sc->sc_mrrprot = 0; /* XXX should be a capability */ 1054 1055 /* 1056 * Query the enterprise mode information the HAL. 1057 */ 1058 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, 1059 &sc->sc_ent_cfg) == HAL_OK) 1060 sc->sc_use_ent = 1; 1061 1062#ifdef ATH_ENABLE_11N 1063 /* 1064 * Query HT capabilities 1065 */ 1066 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 1067 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 1068 uint32_t rxs, txs; 1069 uint32_t ldpc; 1070 1071 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 1072 1073 sc->sc_mrrprot = 1; /* XXX should be a capability */ 1074 1075 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 1076 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 1077 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 1078 | IEEE80211_HTCAP_MAXAMSDU_3839 1079 /* max A-MSDU length */ 1080 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 1081 1082 /* 1083 * Enable short-GI for HT20 only if the hardware 1084 * advertises support. 1085 * Notably, anything earlier than the AR9287 doesn't. 1086 */ 1087 if ((ath_hal_getcapability(ah, 1088 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 1089 (wmodes & HAL_MODE_HT20)) { 1090 device_printf(sc->sc_dev, 1091 "[HT] enabling short-GI in 20MHz mode\n"); 1092 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 1093 } 1094 1095 if (wmodes & HAL_MODE_HT40) 1096 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 1097 | IEEE80211_HTCAP_SHORTGI40; 1098 1099 /* 1100 * TX/RX streams need to be taken into account when 1101 * negotiating which MCS rates it'll receive and 1102 * what MCS rates are available for TX. 1103 */ 1104 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 1105 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 1106 ic->ic_txstream = txs; 1107 ic->ic_rxstream = rxs; 1108 1109 /* 1110 * Setup TX and RX STBC based on what the HAL allows and 1111 * the currently configured chainmask set. 1112 * Ie - don't enable STBC TX if only one chain is enabled. 1113 * STBC RX is fine on a single RX chain; it just won't 1114 * provide any real benefit. 1115 */ 1116 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, 1117 NULL) == HAL_OK) { 1118 sc->sc_rx_stbc = 1; 1119 device_printf(sc->sc_dev, 1120 "[HT] 1 stream STBC receive enabled\n"); 1121 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; 1122 } 1123 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, 1124 NULL) == HAL_OK) { 1125 sc->sc_tx_stbc = 1; 1126 device_printf(sc->sc_dev, 1127 "[HT] 1 stream STBC transmit enabled\n"); 1128 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; 1129 } 1130 1131 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 1132 &sc->sc_rts_aggr_limit); 1133 if (sc->sc_rts_aggr_limit != (64 * 1024)) 1134 device_printf(sc->sc_dev, 1135 "[HT] RTS aggregates limited to %d KiB\n", 1136 sc->sc_rts_aggr_limit / 1024); 1137 1138 /* 1139 * LDPC 1140 */ 1141 if ((ath_hal_getcapability(ah, HAL_CAP_LDPC, 0, &ldpc)) 1142 == HAL_OK && (ldpc == 1)) { 1143 sc->sc_has_ldpc = 1; 1144 device_printf(sc->sc_dev, 1145 "[HT] LDPC transmit/receive enabled\n"); 1146 ic->ic_htcaps |= IEEE80211_HTCAP_LDPC; 1147 } 1148 1149 1150 device_printf(sc->sc_dev, 1151 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 1152 } 1153#endif 1154 1155 /* 1156 * Initial aggregation settings. 1157 */ 1158 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; 1159 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; 1160 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1161 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1162 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; 1163 sc->sc_delim_min_pad = 0; 1164 1165 /* 1166 * Check if the hardware requires PCI register serialisation. 1167 * Some of the Owl based MACs require this. 1168 */ 1169 if (mp_ncpus > 1 && 1170 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 1171 0, NULL) == HAL_OK) { 1172 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 1173 device_printf(sc->sc_dev, 1174 "Enabling register serialisation\n"); 1175 } 1176 1177 /* 1178 * Initialise the deferred completed RX buffer list. 1179 */ 1180 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 1181 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 1182 1183 /* 1184 * Indicate we need the 802.11 header padded to a 1185 * 32-bit boundary for 4-address and QoS frames. 1186 */ 1187 ic->ic_flags |= IEEE80211_F_DATAPAD; 1188 1189 /* 1190 * Query the hal about antenna support. 1191 */ 1192 sc->sc_defant = ath_hal_getdefantenna(ah); 1193 1194 /* 1195 * Not all chips have the VEOL support we want to 1196 * use with IBSS beacons; check here for it. 1197 */ 1198 sc->sc_hasveol = ath_hal_hasveol(ah); 1199 1200 /* get mac address from kenv first, then hardware */ 1201 if (ath_fetch_mac_kenv(sc, ic->ic_macaddr) == 0) { 1202 /* Tell the HAL now about the new MAC */ 1203 ath_hal_setmac(ah, ic->ic_macaddr); 1204 } else { 1205 ath_hal_getmac(ah, ic->ic_macaddr); 1206 } 1207 1208 if (sc->sc_hasbmask) 1209 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 1210 1211 /* NB: used to size node table key mapping array */ 1212 ic->ic_max_keyix = sc->sc_keymax; 1213 /* call MI attach routine. */ 1214 ieee80211_ifattach(ic); 1215 ic->ic_setregdomain = ath_setregdomain; 1216 ic->ic_getradiocaps = ath_getradiocaps; 1217 sc->sc_opmode = HAL_M_STA; 1218 1219 /* override default methods */ 1220 ic->ic_ioctl = ath_ioctl; 1221 ic->ic_parent = ath_parent; 1222 ic->ic_transmit = ath_transmit; 1223 ic->ic_newassoc = ath_newassoc; 1224 ic->ic_updateslot = ath_updateslot; 1225 ic->ic_wme.wme_update = ath_wme_update; 1226 ic->ic_vap_create = ath_vap_create; 1227 ic->ic_vap_delete = ath_vap_delete; 1228 ic->ic_raw_xmit = ath_raw_xmit; 1229 ic->ic_update_mcast = ath_update_mcast; 1230 ic->ic_update_promisc = ath_update_promisc; 1231 ic->ic_node_alloc = ath_node_alloc; 1232 sc->sc_node_free = ic->ic_node_free; 1233 ic->ic_node_free = ath_node_free; 1234 sc->sc_node_cleanup = ic->ic_node_cleanup; 1235 ic->ic_node_cleanup = ath_node_cleanup; 1236 ic->ic_node_getsignal = ath_node_getsignal; 1237 ic->ic_scan_start = ath_scan_start; 1238 ic->ic_scan_end = ath_scan_end; 1239 ic->ic_set_channel = ath_set_channel; 1240#ifdef ATH_ENABLE_11N 1241 /* 802.11n specific - but just override anyway */ 1242 sc->sc_addba_request = ic->ic_addba_request; 1243 sc->sc_addba_response = ic->ic_addba_response; 1244 sc->sc_addba_stop = ic->ic_addba_stop; 1245 sc->sc_bar_response = ic->ic_bar_response; 1246 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 1247 1248 ic->ic_addba_request = ath_addba_request; 1249 ic->ic_addba_response = ath_addba_response; 1250 ic->ic_addba_response_timeout = ath_addba_response_timeout; 1251 ic->ic_addba_stop = ath_addba_stop; 1252 ic->ic_bar_response = ath_bar_response; 1253 1254 ic->ic_update_chw = ath_update_chw; 1255#endif /* ATH_ENABLE_11N */ 1256 1257#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 1258 /* 1259 * There's one vendor bitmap entry in the RX radiotap 1260 * header; make sure that's taken into account. 1261 */ 1262 ieee80211_radiotap_attachv(ic, 1263 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 1264 ATH_TX_RADIOTAP_PRESENT, 1265 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 1266 ATH_RX_RADIOTAP_PRESENT); 1267#else 1268 /* 1269 * No vendor bitmap/extensions are present. 1270 */ 1271 ieee80211_radiotap_attach(ic, 1272 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 1273 ATH_TX_RADIOTAP_PRESENT, 1274 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1275 ATH_RX_RADIOTAP_PRESENT); 1276#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 1277 1278 /* 1279 * Setup the ALQ logging if required 1280 */ 1281#ifdef ATH_DEBUG_ALQ 1282 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); 1283 if_ath_alq_setcfg(&sc->sc_alq, 1284 sc->sc_ah->ah_macVersion, 1285 sc->sc_ah->ah_macRev, 1286 sc->sc_ah->ah_phyRev, 1287 sc->sc_ah->ah_magic); 1288#endif 1289 1290 /* 1291 * Setup dynamic sysctl's now that country code and 1292 * regdomain are available from the hal. 1293 */ 1294 ath_sysctlattach(sc); 1295 ath_sysctl_stats_attach(sc); 1296 ath_sysctl_hal_attach(sc); 1297 1298 if (bootverbose) 1299 ieee80211_announce(ic); 1300 ath_announce(sc); 1301 1302 /* 1303 * Put it to sleep for now. 1304 */ 1305 ATH_LOCK(sc); 1306 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 1307 ATH_UNLOCK(sc); 1308 1309 return 0; 1310bad2: 1311 ath_tx_cleanup(sc); 1312 ath_desc_free(sc); 1313 ath_txdma_teardown(sc); 1314 ath_rxdma_teardown(sc); 1315bad: 1316 if (ah) 1317 ath_hal_detach(ah); 1318 sc->sc_invalid = 1; 1319 return error; 1320} 1321 1322int 1323ath_detach(struct ath_softc *sc) 1324{ 1325 1326 /* 1327 * NB: the order of these is important: 1328 * o stop the chip so no more interrupts will fire 1329 * o call the 802.11 layer before detaching the hal to 1330 * insure callbacks into the driver to delete global 1331 * key cache entries can be handled 1332 * o free the taskqueue which drains any pending tasks 1333 * o reclaim the tx queue data structures after calling 1334 * the 802.11 layer as we'll get called back to reclaim 1335 * node state and potentially want to use them 1336 * o to cleanup the tx queues the hal is called, so detach 1337 * it last 1338 * Other than that, it's straightforward... 1339 */ 1340 1341 /* 1342 * XXX Wake the hardware up first. ath_stop() will still 1343 * wake it up first, but I'd rather do it here just to 1344 * ensure it's awake. 1345 */ 1346 ATH_LOCK(sc); 1347 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1348 ath_power_setpower(sc, HAL_PM_AWAKE); 1349 1350 /* 1351 * Stop things cleanly. 1352 */ 1353 ath_stop(sc); 1354 ATH_UNLOCK(sc); 1355 1356 ieee80211_ifdetach(&sc->sc_ic); 1357 taskqueue_free(sc->sc_tq); 1358#ifdef ATH_TX99_DIAG 1359 if (sc->sc_tx99 != NULL) 1360 sc->sc_tx99->detach(sc->sc_tx99); 1361#endif 1362 ath_rate_detach(sc->sc_rc); 1363#ifdef ATH_DEBUG_ALQ 1364 if_ath_alq_tidyup(&sc->sc_alq); 1365#endif 1366 ath_lna_div_detach(sc); 1367 ath_btcoex_detach(sc); 1368 ath_spectral_detach(sc); 1369 ath_dfs_detach(sc); 1370 ath_desc_free(sc); 1371 ath_txdma_teardown(sc); 1372 ath_rxdma_teardown(sc); 1373 ath_tx_cleanup(sc); 1374 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 1375 1376 return 0; 1377} 1378 1379/* 1380 * MAC address handling for multiple BSS on the same radio. 1381 * The first vap uses the MAC address from the EEPROM. For 1382 * subsequent vap's we set the U/L bit (bit 1) in the MAC 1383 * address and use the next six bits as an index. 1384 */ 1385static void 1386assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 1387{ 1388 int i; 1389 1390 if (clone && sc->sc_hasbmask) { 1391 /* NB: we only do this if h/w supports multiple bssid */ 1392 for (i = 0; i < 8; i++) 1393 if ((sc->sc_bssidmask & (1<<i)) == 0) 1394 break; 1395 if (i != 0) 1396 mac[0] |= (i << 2)|0x2; 1397 } else 1398 i = 0; 1399 sc->sc_bssidmask |= 1<<i; 1400 sc->sc_hwbssidmask[0] &= ~mac[0]; 1401 if (i == 0) 1402 sc->sc_nbssid0++; 1403} 1404 1405static void 1406reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 1407{ 1408 int i = mac[0] >> 2; 1409 uint8_t mask; 1410 1411 if (i != 0 || --sc->sc_nbssid0 == 0) { 1412 sc->sc_bssidmask &= ~(1<<i); 1413 /* recalculate bssid mask from remaining addresses */ 1414 mask = 0xff; 1415 for (i = 1; i < 8; i++) 1416 if (sc->sc_bssidmask & (1<<i)) 1417 mask &= ~((i<<2)|0x2); 1418 sc->sc_hwbssidmask[0] |= mask; 1419 } 1420} 1421 1422/* 1423 * Assign a beacon xmit slot. We try to space out 1424 * assignments so when beacons are staggered the 1425 * traffic coming out of the cab q has maximal time 1426 * to go out before the next beacon is scheduled. 1427 */ 1428static int 1429assign_bslot(struct ath_softc *sc) 1430{ 1431 u_int slot, free; 1432 1433 free = 0; 1434 for (slot = 0; slot < ATH_BCBUF; slot++) 1435 if (sc->sc_bslot[slot] == NULL) { 1436 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 1437 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 1438 return slot; 1439 free = slot; 1440 /* NB: keep looking for a double slot */ 1441 } 1442 return free; 1443} 1444 1445static struct ieee80211vap * 1446ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1447 enum ieee80211_opmode opmode, int flags, 1448 const uint8_t bssid[IEEE80211_ADDR_LEN], 1449 const uint8_t mac0[IEEE80211_ADDR_LEN]) 1450{ 1451 struct ath_softc *sc = ic->ic_softc; 1452 struct ath_vap *avp; 1453 struct ieee80211vap *vap; 1454 uint8_t mac[IEEE80211_ADDR_LEN]; 1455 int needbeacon, error; 1456 enum ieee80211_opmode ic_opmode; 1457 1458 avp = malloc(sizeof(struct ath_vap), M_80211_VAP, M_WAITOK | M_ZERO); 1459 needbeacon = 0; 1460 IEEE80211_ADDR_COPY(mac, mac0); 1461 1462 ATH_LOCK(sc); 1463 ic_opmode = opmode; /* default to opmode of new vap */ 1464 switch (opmode) { 1465 case IEEE80211_M_STA: 1466 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1467 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1468 goto bad; 1469 } 1470 if (sc->sc_nvaps) { 1471 /* 1472 * With multiple vaps we must fall back 1473 * to s/w beacon miss handling. 1474 */ 1475 flags |= IEEE80211_CLONE_NOBEACONS; 1476 } 1477 if (flags & IEEE80211_CLONE_NOBEACONS) { 1478 /* 1479 * Station mode w/o beacons are implemented w/ AP mode. 1480 */ 1481 ic_opmode = IEEE80211_M_HOSTAP; 1482 } 1483 break; 1484 case IEEE80211_M_IBSS: 1485 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1486 device_printf(sc->sc_dev, 1487 "only 1 ibss vap supported\n"); 1488 goto bad; 1489 } 1490 needbeacon = 1; 1491 break; 1492 case IEEE80211_M_AHDEMO: 1493#ifdef IEEE80211_SUPPORT_TDMA 1494 if (flags & IEEE80211_CLONE_TDMA) { 1495 if (sc->sc_nvaps != 0) { 1496 device_printf(sc->sc_dev, 1497 "only 1 tdma vap supported\n"); 1498 goto bad; 1499 } 1500 needbeacon = 1; 1501 flags |= IEEE80211_CLONE_NOBEACONS; 1502 } 1503 /* fall thru... */ 1504#endif 1505 case IEEE80211_M_MONITOR: 1506 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1507 /* 1508 * Adopt existing mode. Adding a monitor or ahdemo 1509 * vap to an existing configuration is of dubious 1510 * value but should be ok. 1511 */ 1512 /* XXX not right for monitor mode */ 1513 ic_opmode = ic->ic_opmode; 1514 } 1515 break; 1516 case IEEE80211_M_HOSTAP: 1517 case IEEE80211_M_MBSS: 1518 needbeacon = 1; 1519 break; 1520 case IEEE80211_M_WDS: 1521 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1522 device_printf(sc->sc_dev, 1523 "wds not supported in sta mode\n"); 1524 goto bad; 1525 } 1526 /* 1527 * Silently remove any request for a unique 1528 * bssid; WDS vap's always share the local 1529 * mac address. 1530 */ 1531 flags &= ~IEEE80211_CLONE_BSSID; 1532 if (sc->sc_nvaps == 0) 1533 ic_opmode = IEEE80211_M_HOSTAP; 1534 else 1535 ic_opmode = ic->ic_opmode; 1536 break; 1537 default: 1538 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1539 goto bad; 1540 } 1541 /* 1542 * Check that a beacon buffer is available; the code below assumes it. 1543 */ 1544 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1545 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1546 goto bad; 1547 } 1548 1549 /* STA, AHDEMO? */ 1550 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1551 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1552 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1553 } 1554 1555 vap = &avp->av_vap; 1556 /* XXX can't hold mutex across if_alloc */ 1557 ATH_UNLOCK(sc); 1558 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 1559 ATH_LOCK(sc); 1560 if (error != 0) { 1561 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1562 __func__, error); 1563 goto bad2; 1564 } 1565 1566 /* h/w crypto support */ 1567 vap->iv_key_alloc = ath_key_alloc; 1568 vap->iv_key_delete = ath_key_delete; 1569 vap->iv_key_set = ath_key_set; 1570 vap->iv_key_update_begin = ath_key_update_begin; 1571 vap->iv_key_update_end = ath_key_update_end; 1572 1573 /* override various methods */ 1574 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1575 vap->iv_recv_mgmt = ath_recv_mgmt; 1576 vap->iv_reset = ath_reset_vap; 1577 vap->iv_update_beacon = ath_beacon_update; 1578 avp->av_newstate = vap->iv_newstate; 1579 vap->iv_newstate = ath_newstate; 1580 avp->av_bmiss = vap->iv_bmiss; 1581 vap->iv_bmiss = ath_bmiss_vap; 1582 1583 avp->av_node_ps = vap->iv_node_ps; 1584 vap->iv_node_ps = ath_node_powersave; 1585 1586 avp->av_set_tim = vap->iv_set_tim; 1587 vap->iv_set_tim = ath_node_set_tim; 1588 1589 avp->av_recv_pspoll = vap->iv_recv_pspoll; 1590 vap->iv_recv_pspoll = ath_node_recv_pspoll; 1591 1592 /* Set default parameters */ 1593 1594 /* 1595 * Anything earlier than some AR9300 series MACs don't 1596 * support a smaller MPDU density. 1597 */ 1598 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1599 /* 1600 * All NICs can handle the maximum size, however 1601 * AR5416 based MACs can only TX aggregates w/ RTS 1602 * protection when the total aggregate size is <= 8k. 1603 * However, for now that's enforced by the TX path. 1604 */ 1605 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1606 1607 avp->av_bslot = -1; 1608 if (needbeacon) { 1609 /* 1610 * Allocate beacon state and setup the q for buffered 1611 * multicast frames. We know a beacon buffer is 1612 * available because we checked above. 1613 */ 1614 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1615 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1616 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1617 /* 1618 * Assign the vap to a beacon xmit slot. As above 1619 * this cannot fail to find a free one. 1620 */ 1621 avp->av_bslot = assign_bslot(sc); 1622 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1623 ("beacon slot %u not empty", avp->av_bslot)); 1624 sc->sc_bslot[avp->av_bslot] = vap; 1625 sc->sc_nbcnvaps++; 1626 } 1627 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1628 /* 1629 * Multple vaps are to transmit beacons and we 1630 * have h/w support for TSF adjusting; enable 1631 * use of staggered beacons. 1632 */ 1633 sc->sc_stagbeacons = 1; 1634 } 1635 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1636 } 1637 1638 ic->ic_opmode = ic_opmode; 1639 if (opmode != IEEE80211_M_WDS) { 1640 sc->sc_nvaps++; 1641 if (opmode == IEEE80211_M_STA) 1642 sc->sc_nstavaps++; 1643 if (opmode == IEEE80211_M_MBSS) 1644 sc->sc_nmeshvaps++; 1645 } 1646 switch (ic_opmode) { 1647 case IEEE80211_M_IBSS: 1648 sc->sc_opmode = HAL_M_IBSS; 1649 break; 1650 case IEEE80211_M_STA: 1651 sc->sc_opmode = HAL_M_STA; 1652 break; 1653 case IEEE80211_M_AHDEMO: 1654#ifdef IEEE80211_SUPPORT_TDMA 1655 if (vap->iv_caps & IEEE80211_C_TDMA) { 1656 sc->sc_tdma = 1; 1657 /* NB: disable tsf adjust */ 1658 sc->sc_stagbeacons = 0; 1659 } 1660 /* 1661 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1662 * just ap mode. 1663 */ 1664 /* fall thru... */ 1665#endif 1666 case IEEE80211_M_HOSTAP: 1667 case IEEE80211_M_MBSS: 1668 sc->sc_opmode = HAL_M_HOSTAP; 1669 break; 1670 case IEEE80211_M_MONITOR: 1671 sc->sc_opmode = HAL_M_MONITOR; 1672 break; 1673 default: 1674 /* XXX should not happen */ 1675 break; 1676 } 1677 if (sc->sc_hastsfadd) { 1678 /* 1679 * Configure whether or not TSF adjust should be done. 1680 */ 1681 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1682 } 1683 if (flags & IEEE80211_CLONE_NOBEACONS) { 1684 /* 1685 * Enable s/w beacon miss handling. 1686 */ 1687 sc->sc_swbmiss = 1; 1688 } 1689 ATH_UNLOCK(sc); 1690 1691 /* complete setup */ 1692 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status, 1693 mac); 1694 return vap; 1695bad2: 1696 reclaim_address(sc, mac); 1697 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1698bad: 1699 free(avp, M_80211_VAP); 1700 ATH_UNLOCK(sc); 1701 return NULL; 1702} 1703 1704static void 1705ath_vap_delete(struct ieee80211vap *vap) 1706{ 1707 struct ieee80211com *ic = vap->iv_ic; 1708 struct ath_softc *sc = ic->ic_softc; 1709 struct ath_hal *ah = sc->sc_ah; 1710 struct ath_vap *avp = ATH_VAP(vap); 1711 1712 ATH_LOCK(sc); 1713 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1714 ATH_UNLOCK(sc); 1715 1716 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1717 if (sc->sc_running) { 1718 /* 1719 * Quiesce the hardware while we remove the vap. In 1720 * particular we need to reclaim all references to 1721 * the vap state by any frames pending on the tx queues. 1722 */ 1723 ath_hal_intrset(ah, 0); /* disable interrupts */ 1724 /* XXX Do all frames from all vaps/nodes need draining here? */ 1725 ath_stoprecv(sc, 1); /* stop recv side */ 1726 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1727 } 1728 1729 /* .. leave the hardware awake for now. */ 1730 1731 ieee80211_vap_detach(vap); 1732 1733 /* 1734 * XXX Danger Will Robinson! Danger! 1735 * 1736 * Because ieee80211_vap_detach() can queue a frame (the station 1737 * diassociate message?) after we've drained the TXQ and 1738 * flushed the software TXQ, we will end up with a frame queued 1739 * to a node whose vap is about to be freed. 1740 * 1741 * To work around this, flush the hardware/software again. 1742 * This may be racy - the ath task may be running and the packet 1743 * may be being scheduled between sw->hw txq. Tsk. 1744 * 1745 * TODO: figure out why a new node gets allocated somewhere around 1746 * here (after the ath_tx_swq() call; and after an ath_stop() 1747 * call!) 1748 */ 1749 1750 ath_draintxq(sc, ATH_RESET_DEFAULT); 1751 1752 ATH_LOCK(sc); 1753 /* 1754 * Reclaim beacon state. Note this must be done before 1755 * the vap instance is reclaimed as we may have a reference 1756 * to it in the buffer for the beacon frame. 1757 */ 1758 if (avp->av_bcbuf != NULL) { 1759 if (avp->av_bslot != -1) { 1760 sc->sc_bslot[avp->av_bslot] = NULL; 1761 sc->sc_nbcnvaps--; 1762 } 1763 ath_beacon_return(sc, avp->av_bcbuf); 1764 avp->av_bcbuf = NULL; 1765 if (sc->sc_nbcnvaps == 0) { 1766 sc->sc_stagbeacons = 0; 1767 if (sc->sc_hastsfadd) 1768 ath_hal_settsfadjust(sc->sc_ah, 0); 1769 } 1770 /* 1771 * Reclaim any pending mcast frames for the vap. 1772 */ 1773 ath_tx_draintxq(sc, &avp->av_mcastq); 1774 } 1775 /* 1776 * Update bookkeeping. 1777 */ 1778 if (vap->iv_opmode == IEEE80211_M_STA) { 1779 sc->sc_nstavaps--; 1780 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1781 sc->sc_swbmiss = 0; 1782 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1783 vap->iv_opmode == IEEE80211_M_MBSS) { 1784 reclaim_address(sc, vap->iv_myaddr); 1785 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1786 if (vap->iv_opmode == IEEE80211_M_MBSS) 1787 sc->sc_nmeshvaps--; 1788 } 1789 if (vap->iv_opmode != IEEE80211_M_WDS) 1790 sc->sc_nvaps--; 1791#ifdef IEEE80211_SUPPORT_TDMA 1792 /* TDMA operation ceases when the last vap is destroyed */ 1793 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1794 sc->sc_tdma = 0; 1795 sc->sc_swbmiss = 0; 1796 } 1797#endif 1798 free(avp, M_80211_VAP); 1799 1800 if (sc->sc_running) { 1801 /* 1802 * Restart rx+tx machines if still running (RUNNING will 1803 * be reset if we just destroyed the last vap). 1804 */ 1805 if (ath_startrecv(sc) != 0) 1806 device_printf(sc->sc_dev, 1807 "%s: unable to restart recv logic\n", __func__); 1808 if (sc->sc_beacons) { /* restart beacons */ 1809#ifdef IEEE80211_SUPPORT_TDMA 1810 if (sc->sc_tdma) 1811 ath_tdma_config(sc, NULL); 1812 else 1813#endif 1814 ath_beacon_config(sc, NULL); 1815 } 1816 ath_hal_intrset(ah, sc->sc_imask); 1817 } 1818 1819 /* Ok, let the hardware asleep. */ 1820 ath_power_restore_power_state(sc); 1821 ATH_UNLOCK(sc); 1822} 1823 1824void 1825ath_suspend(struct ath_softc *sc) 1826{ 1827 struct ieee80211com *ic = &sc->sc_ic; 1828 1829 sc->sc_resume_up = ic->ic_nrunning != 0; 1830 1831 ieee80211_suspend_all(ic); 1832 /* 1833 * NB: don't worry about putting the chip in low power 1834 * mode; pci will power off our socket on suspend and 1835 * CardBus detaches the device. 1836 * 1837 * XXX TODO: well, that's great, except for non-cardbus 1838 * devices! 1839 */ 1840 1841 /* 1842 * XXX This doesn't wait until all pending taskqueue 1843 * items and parallel transmit/receive/other threads 1844 * are running! 1845 */ 1846 ath_hal_intrset(sc->sc_ah, 0); 1847 taskqueue_block(sc->sc_tq); 1848 1849 ATH_LOCK(sc); 1850 callout_stop(&sc->sc_cal_ch); 1851 ATH_UNLOCK(sc); 1852 1853 /* 1854 * XXX ensure sc_invalid is 1 1855 */ 1856 1857 /* Disable the PCIe PHY, complete with workarounds */ 1858 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1859} 1860 1861/* 1862 * Reset the key cache since some parts do not reset the 1863 * contents on resume. First we clear all entries, then 1864 * re-load keys that the 802.11 layer assumes are setup 1865 * in h/w. 1866 */ 1867static void 1868ath_reset_keycache(struct ath_softc *sc) 1869{ 1870 struct ieee80211com *ic = &sc->sc_ic; 1871 struct ath_hal *ah = sc->sc_ah; 1872 int i; 1873 1874 ATH_LOCK(sc); 1875 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1876 for (i = 0; i < sc->sc_keymax; i++) 1877 ath_hal_keyreset(ah, i); 1878 ath_power_restore_power_state(sc); 1879 ATH_UNLOCK(sc); 1880 ieee80211_crypto_reload_keys(ic); 1881} 1882 1883/* 1884 * Fetch the current chainmask configuration based on the current 1885 * operating channel and options. 1886 */ 1887static void 1888ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) 1889{ 1890 1891 /* 1892 * Set TX chainmask to the currently configured chainmask; 1893 * the TX chainmask depends upon the current operating mode. 1894 */ 1895 sc->sc_cur_rxchainmask = sc->sc_rxchainmask; 1896 if (IEEE80211_IS_CHAN_HT(chan)) { 1897 sc->sc_cur_txchainmask = sc->sc_txchainmask; 1898 } else { 1899 sc->sc_cur_txchainmask = 1; 1900 } 1901 1902 DPRINTF(sc, ATH_DEBUG_RESET, 1903 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", 1904 __func__, 1905 sc->sc_cur_txchainmask, 1906 sc->sc_cur_rxchainmask); 1907} 1908 1909void 1910ath_resume(struct ath_softc *sc) 1911{ 1912 struct ieee80211com *ic = &sc->sc_ic; 1913 struct ath_hal *ah = sc->sc_ah; 1914 HAL_STATUS status; 1915 1916 ath_hal_enablepcie(ah, 0, 0); 1917 1918 /* 1919 * Must reset the chip before we reload the 1920 * keycache as we were powered down on suspend. 1921 */ 1922 ath_update_chainmasks(sc, 1923 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); 1924 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 1925 sc->sc_cur_rxchainmask); 1926 1927 /* Ensure we set the current power state to on */ 1928 ATH_LOCK(sc); 1929 ath_power_setselfgen(sc, HAL_PM_AWAKE); 1930 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1931 ath_power_setpower(sc, HAL_PM_AWAKE); 1932 ATH_UNLOCK(sc); 1933 1934 ath_hal_reset(ah, sc->sc_opmode, 1935 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1936 AH_FALSE, HAL_RESET_NORMAL, &status); 1937 ath_reset_keycache(sc); 1938 1939 ATH_RX_LOCK(sc); 1940 sc->sc_rx_stopped = 1; 1941 sc->sc_rx_resetted = 1; 1942 ATH_RX_UNLOCK(sc); 1943 1944 /* Let DFS at it in case it's a DFS channel */ 1945 ath_dfs_radar_enable(sc, ic->ic_curchan); 1946 1947 /* Let spectral at in case spectral is enabled */ 1948 ath_spectral_enable(sc, ic->ic_curchan); 1949 1950 /* 1951 * Let bluetooth coexistence at in case it's needed for this channel 1952 */ 1953 ath_btcoex_enable(sc, ic->ic_curchan); 1954 1955 /* 1956 * If we're doing TDMA, enforce the TXOP limitation for chips that 1957 * support it. 1958 */ 1959 if (sc->sc_hasenforcetxop && sc->sc_tdma) 1960 ath_hal_setenforcetxop(sc->sc_ah, 1); 1961 else 1962 ath_hal_setenforcetxop(sc->sc_ah, 0); 1963 1964 /* Restore the LED configuration */ 1965 ath_led_config(sc); 1966 ath_hal_setledstate(ah, HAL_LED_INIT); 1967 1968 if (sc->sc_resume_up) 1969 ieee80211_resume_all(ic); 1970 1971 ATH_LOCK(sc); 1972 ath_power_restore_power_state(sc); 1973 ATH_UNLOCK(sc); 1974 1975 /* XXX beacons ? */ 1976} 1977 1978void 1979ath_shutdown(struct ath_softc *sc) 1980{ 1981 1982 ATH_LOCK(sc); 1983 ath_stop(sc); 1984 ATH_UNLOCK(sc); 1985 /* NB: no point powering down chip as we're about to reboot */ 1986} 1987 1988/* 1989 * Interrupt handler. Most of the actual processing is deferred. 1990 */ 1991void 1992ath_intr(void *arg) 1993{ 1994 struct ath_softc *sc = arg; 1995 struct ath_hal *ah = sc->sc_ah; 1996 HAL_INT status = 0; 1997 uint32_t txqs; 1998 1999 /* 2000 * If we're inside a reset path, just print a warning and 2001 * clear the ISR. The reset routine will finish it for us. 2002 */ 2003 ATH_PCU_LOCK(sc); 2004 if (sc->sc_inreset_cnt) { 2005 HAL_INT status; 2006 ath_hal_getisr(ah, &status); /* clear ISR */ 2007 ath_hal_intrset(ah, 0); /* disable further intr's */ 2008 DPRINTF(sc, ATH_DEBUG_ANY, 2009 "%s: in reset, ignoring: status=0x%x\n", 2010 __func__, status); 2011 ATH_PCU_UNLOCK(sc); 2012 return; 2013 } 2014 2015 if (sc->sc_invalid) { 2016 /* 2017 * The hardware is not ready/present, don't touch anything. 2018 * Note this can happen early on if the IRQ is shared. 2019 */ 2020 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 2021 ATH_PCU_UNLOCK(sc); 2022 return; 2023 } 2024 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 2025 ATH_PCU_UNLOCK(sc); 2026 return; 2027 } 2028 2029 ATH_LOCK(sc); 2030 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2031 ATH_UNLOCK(sc); 2032 2033 if (sc->sc_ic.ic_nrunning == 0 && sc->sc_running == 0) { 2034 HAL_INT status; 2035 2036 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_nrunning %d sc_running %d\n", 2037 __func__, sc->sc_ic.ic_nrunning, sc->sc_running); 2038 ath_hal_getisr(ah, &status); /* clear ISR */ 2039 ath_hal_intrset(ah, 0); /* disable further intr's */ 2040 ATH_PCU_UNLOCK(sc); 2041 2042 ATH_LOCK(sc); 2043 ath_power_restore_power_state(sc); 2044 ATH_UNLOCK(sc); 2045 return; 2046 } 2047 2048 /* 2049 * Figure out the reason(s) for the interrupt. Note 2050 * that the hal returns a pseudo-ISR that may include 2051 * bits we haven't explicitly enabled so we mask the 2052 * value to insure we only process bits we requested. 2053 */ 2054 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 2055 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 2056 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); 2057#ifdef ATH_DEBUG_ALQ 2058 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, 2059 ah->ah_syncstate); 2060#endif /* ATH_DEBUG_ALQ */ 2061#ifdef ATH_KTR_INTR_DEBUG 2062 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, 2063 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 2064 ah->ah_intrstate[0], 2065 ah->ah_intrstate[1], 2066 ah->ah_intrstate[2], 2067 ah->ah_intrstate[3], 2068 ah->ah_intrstate[6]); 2069#endif 2070 2071 /* Squirrel away SYNC interrupt debugging */ 2072 if (ah->ah_syncstate != 0) { 2073 int i; 2074 for (i = 0; i < 32; i++) 2075 if (ah->ah_syncstate & (i << i)) 2076 sc->sc_intr_stats.sync_intr[i]++; 2077 } 2078 2079 status &= sc->sc_imask; /* discard unasked for bits */ 2080 2081 /* Short-circuit un-handled interrupts */ 2082 if (status == 0x0) { 2083 ATH_PCU_UNLOCK(sc); 2084 2085 ATH_LOCK(sc); 2086 ath_power_restore_power_state(sc); 2087 ATH_UNLOCK(sc); 2088 2089 return; 2090 } 2091 2092 /* 2093 * Take a note that we're inside the interrupt handler, so 2094 * the reset routines know to wait. 2095 */ 2096 sc->sc_intr_cnt++; 2097 ATH_PCU_UNLOCK(sc); 2098 2099 /* 2100 * Handle the interrupt. We won't run concurrent with the reset 2101 * or channel change routines as they'll wait for sc_intr_cnt 2102 * to be 0 before continuing. 2103 */ 2104 if (status & HAL_INT_FATAL) { 2105 sc->sc_stats.ast_hardware++; 2106 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 2107 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 2108 } else { 2109 if (status & HAL_INT_SWBA) { 2110 /* 2111 * Software beacon alert--time to send a beacon. 2112 * Handle beacon transmission directly; deferring 2113 * this is too slow to meet timing constraints 2114 * under load. 2115 */ 2116#ifdef IEEE80211_SUPPORT_TDMA 2117 if (sc->sc_tdma) { 2118 if (sc->sc_tdmaswba == 0) { 2119 struct ieee80211com *ic = &sc->sc_ic; 2120 struct ieee80211vap *vap = 2121 TAILQ_FIRST(&ic->ic_vaps); 2122 ath_tdma_beacon_send(sc, vap); 2123 sc->sc_tdmaswba = 2124 vap->iv_tdma->tdma_bintval; 2125 } else 2126 sc->sc_tdmaswba--; 2127 } else 2128#endif 2129 { 2130 ath_beacon_proc(sc, 0); 2131#ifdef IEEE80211_SUPPORT_SUPERG 2132 /* 2133 * Schedule the rx taskq in case there's no 2134 * traffic so any frames held on the staging 2135 * queue are aged and potentially flushed. 2136 */ 2137 sc->sc_rx.recv_sched(sc, 1); 2138#endif 2139 } 2140 } 2141 if (status & HAL_INT_RXEOL) { 2142 int imask; 2143 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); 2144 if (! sc->sc_isedma) { 2145 ATH_PCU_LOCK(sc); 2146 /* 2147 * NB: the hardware should re-read the link when 2148 * RXE bit is written, but it doesn't work at 2149 * least on older hardware revs. 2150 */ 2151 sc->sc_stats.ast_rxeol++; 2152 /* 2153 * Disable RXEOL/RXORN - prevent an interrupt 2154 * storm until the PCU logic can be reset. 2155 * In case the interface is reset some other 2156 * way before "sc_kickpcu" is called, don't 2157 * modify sc_imask - that way if it is reset 2158 * by a call to ath_reset() somehow, the 2159 * interrupt mask will be correctly reprogrammed. 2160 */ 2161 imask = sc->sc_imask; 2162 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 2163 ath_hal_intrset(ah, imask); 2164 /* 2165 * Only blank sc_rxlink if we've not yet kicked 2166 * the PCU. 2167 * 2168 * This isn't entirely correct - the correct solution 2169 * would be to have a PCU lock and engage that for 2170 * the duration of the PCU fiddling; which would include 2171 * running the RX process. Otherwise we could end up 2172 * messing up the RX descriptor chain and making the 2173 * RX desc list much shorter. 2174 */ 2175 if (! sc->sc_kickpcu) 2176 sc->sc_rxlink = NULL; 2177 sc->sc_kickpcu = 1; 2178 ATH_PCU_UNLOCK(sc); 2179 } 2180 /* 2181 * Enqueue an RX proc to handle whatever 2182 * is in the RX queue. 2183 * This will then kick the PCU if required. 2184 */ 2185 sc->sc_rx.recv_sched(sc, 1); 2186 } 2187 if (status & HAL_INT_TXURN) { 2188 sc->sc_stats.ast_txurn++; 2189 /* bump tx trigger level */ 2190 ath_hal_updatetxtriglevel(ah, AH_TRUE); 2191 } 2192 /* 2193 * Handle both the legacy and RX EDMA interrupt bits. 2194 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 2195 */ 2196 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 2197 sc->sc_stats.ast_rx_intr++; 2198 sc->sc_rx.recv_sched(sc, 1); 2199 } 2200 if (status & HAL_INT_TX) { 2201 sc->sc_stats.ast_tx_intr++; 2202 /* 2203 * Grab all the currently set bits in the HAL txq bitmap 2204 * and blank them. This is the only place we should be 2205 * doing this. 2206 */ 2207 if (! sc->sc_isedma) { 2208 ATH_PCU_LOCK(sc); 2209 txqs = 0xffffffff; 2210 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 2211 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, 2212 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", 2213 txqs, 2214 sc->sc_txq_active, 2215 sc->sc_txq_active | txqs); 2216 sc->sc_txq_active |= txqs; 2217 ATH_PCU_UNLOCK(sc); 2218 } 2219 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 2220 } 2221 if (status & HAL_INT_BMISS) { 2222 sc->sc_stats.ast_bmiss++; 2223 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 2224 } 2225 if (status & HAL_INT_GTT) 2226 sc->sc_stats.ast_tx_timeout++; 2227 if (status & HAL_INT_CST) 2228 sc->sc_stats.ast_tx_cst++; 2229 if (status & HAL_INT_MIB) { 2230 sc->sc_stats.ast_mib++; 2231 ATH_PCU_LOCK(sc); 2232 /* 2233 * Disable interrupts until we service the MIB 2234 * interrupt; otherwise it will continue to fire. 2235 */ 2236 ath_hal_intrset(ah, 0); 2237 /* 2238 * Let the hal handle the event. We assume it will 2239 * clear whatever condition caused the interrupt. 2240 */ 2241 ath_hal_mibevent(ah, &sc->sc_halstats); 2242 /* 2243 * Don't reset the interrupt if we've just 2244 * kicked the PCU, or we may get a nested 2245 * RXEOL before the rxproc has had a chance 2246 * to run. 2247 */ 2248 if (sc->sc_kickpcu == 0) 2249 ath_hal_intrset(ah, sc->sc_imask); 2250 ATH_PCU_UNLOCK(sc); 2251 } 2252 if (status & HAL_INT_RXORN) { 2253 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 2254 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); 2255 sc->sc_stats.ast_rxorn++; 2256 } 2257 if (status & HAL_INT_TSFOOR) { 2258 device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__); 2259 sc->sc_syncbeacon = 1; 2260 } 2261 if (status & HAL_INT_MCI) { 2262 ath_btcoex_mci_intr(sc); 2263 } 2264 } 2265 ATH_PCU_LOCK(sc); 2266 sc->sc_intr_cnt--; 2267 ATH_PCU_UNLOCK(sc); 2268 2269 ATH_LOCK(sc); 2270 ath_power_restore_power_state(sc); 2271 ATH_UNLOCK(sc); 2272} 2273 2274static void 2275ath_fatal_proc(void *arg, int pending) 2276{ 2277 struct ath_softc *sc = arg; 2278 u_int32_t *state; 2279 u_int32_t len; 2280 void *sp; 2281 2282 if (sc->sc_invalid) 2283 return; 2284 2285 device_printf(sc->sc_dev, "hardware error; resetting\n"); 2286 /* 2287 * Fatal errors are unrecoverable. Typically these 2288 * are caused by DMA errors. Collect h/w state from 2289 * the hal so we can diagnose what's going on. 2290 */ 2291 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 2292 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 2293 state = sp; 2294 device_printf(sc->sc_dev, 2295 "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", state[0], 2296 state[1] , state[2], state[3], state[4], state[5]); 2297 } 2298 ath_reset(sc, ATH_RESET_NOLOSS); 2299} 2300 2301static void 2302ath_bmiss_vap(struct ieee80211vap *vap) 2303{ 2304 struct ath_softc *sc = vap->iv_ic->ic_softc; 2305 2306 /* 2307 * Workaround phantom bmiss interrupts by sanity-checking 2308 * the time of our last rx'd frame. If it is within the 2309 * beacon miss interval then ignore the interrupt. If it's 2310 * truly a bmiss we'll get another interrupt soon and that'll 2311 * be dispatched up for processing. Note this applies only 2312 * for h/w beacon miss events. 2313 */ 2314 2315 /* 2316 * XXX TODO: Just read the TSF during the interrupt path; 2317 * that way we don't have to wake up again just to read it 2318 * again. 2319 */ 2320 ATH_LOCK(sc); 2321 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2322 ATH_UNLOCK(sc); 2323 2324 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 2325 u_int64_t lastrx = sc->sc_lastrx; 2326 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 2327 /* XXX should take a locked ref to iv_bss */ 2328 u_int bmisstimeout = 2329 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 2330 2331 DPRINTF(sc, ATH_DEBUG_BEACON, 2332 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 2333 __func__, (unsigned long long) tsf, 2334 (unsigned long long)(tsf - lastrx), 2335 (unsigned long long) lastrx, bmisstimeout); 2336 2337 if (tsf - lastrx <= bmisstimeout) { 2338 sc->sc_stats.ast_bmiss_phantom++; 2339 2340 ATH_LOCK(sc); 2341 ath_power_restore_power_state(sc); 2342 ATH_UNLOCK(sc); 2343 2344 return; 2345 } 2346 } 2347 2348 /* 2349 * There's no need to keep the hardware awake during the call 2350 * to av_bmiss(). 2351 */ 2352 ATH_LOCK(sc); 2353 ath_power_restore_power_state(sc); 2354 ATH_UNLOCK(sc); 2355 2356 /* 2357 * Attempt to force a beacon resync. 2358 */ 2359 sc->sc_syncbeacon = 1; 2360 2361 ATH_VAP(vap)->av_bmiss(vap); 2362} 2363 2364/* XXX this needs a force wakeup! */ 2365int 2366ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 2367{ 2368 uint32_t rsize; 2369 void *sp; 2370 2371 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 2372 return 0; 2373 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 2374 *hangs = *(uint32_t *)sp; 2375 return 1; 2376} 2377 2378static void 2379ath_bmiss_proc(void *arg, int pending) 2380{ 2381 struct ath_softc *sc = arg; 2382 uint32_t hangs; 2383 2384 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 2385 2386 ATH_LOCK(sc); 2387 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2388 ATH_UNLOCK(sc); 2389 2390 ath_beacon_miss(sc); 2391 2392 /* 2393 * Do a reset upon any becaon miss event. 2394 * 2395 * It may be a non-recognised RX clear hang which needs a reset 2396 * to clear. 2397 */ 2398 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 2399 ath_reset(sc, ATH_RESET_NOLOSS); 2400 device_printf(sc->sc_dev, 2401 "bb hang detected (0x%x), resetting\n", hangs); 2402 } else { 2403 ath_reset(sc, ATH_RESET_NOLOSS); 2404 ieee80211_beacon_miss(&sc->sc_ic); 2405 } 2406 2407 /* Force a beacon resync, in case they've drifted */ 2408 sc->sc_syncbeacon = 1; 2409 2410 ATH_LOCK(sc); 2411 ath_power_restore_power_state(sc); 2412 ATH_UNLOCK(sc); 2413} 2414 2415/* 2416 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 2417 * calcs together with WME. If necessary disable the crypto 2418 * hardware and mark the 802.11 state so keys will be setup 2419 * with the MIC work done in software. 2420 */ 2421static void 2422ath_settkipmic(struct ath_softc *sc) 2423{ 2424 struct ieee80211com *ic = &sc->sc_ic; 2425 2426 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 2427 if (ic->ic_flags & IEEE80211_F_WME) { 2428 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 2429 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 2430 } else { 2431 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 2432 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 2433 } 2434 } 2435} 2436 2437static int 2438ath_init(struct ath_softc *sc) 2439{ 2440 struct ieee80211com *ic = &sc->sc_ic; 2441 struct ath_hal *ah = sc->sc_ah; 2442 HAL_STATUS status; 2443 2444 ATH_LOCK_ASSERT(sc); 2445 2446 /* 2447 * Force the sleep state awake. 2448 */ 2449 ath_power_setselfgen(sc, HAL_PM_AWAKE); 2450 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2451 ath_power_setpower(sc, HAL_PM_AWAKE); 2452 2453 /* 2454 * Stop anything previously setup. This is safe 2455 * whether this is the first time through or not. 2456 */ 2457 ath_stop(sc); 2458 2459 /* 2460 * The basic interface to setting the hardware in a good 2461 * state is ``reset''. On return the hardware is known to 2462 * be powered up and with interrupts disabled. This must 2463 * be followed by initialization of the appropriate bits 2464 * and then setup of the interrupt mask. 2465 */ 2466 ath_settkipmic(sc); 2467 ath_update_chainmasks(sc, ic->ic_curchan); 2468 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2469 sc->sc_cur_rxchainmask); 2470 2471 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, 2472 HAL_RESET_NORMAL, &status)) { 2473 device_printf(sc->sc_dev, 2474 "unable to reset hardware; hal status %u\n", status); 2475 return (ENODEV); 2476 } 2477 2478 ATH_RX_LOCK(sc); 2479 sc->sc_rx_stopped = 1; 2480 sc->sc_rx_resetted = 1; 2481 ATH_RX_UNLOCK(sc); 2482 2483 ath_chan_change(sc, ic->ic_curchan); 2484 2485 /* Let DFS at it in case it's a DFS channel */ 2486 ath_dfs_radar_enable(sc, ic->ic_curchan); 2487 2488 /* Let spectral at in case spectral is enabled */ 2489 ath_spectral_enable(sc, ic->ic_curchan); 2490 2491 /* 2492 * Let bluetooth coexistence at in case it's needed for this channel 2493 */ 2494 ath_btcoex_enable(sc, ic->ic_curchan); 2495 2496 /* 2497 * If we're doing TDMA, enforce the TXOP limitation for chips that 2498 * support it. 2499 */ 2500 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2501 ath_hal_setenforcetxop(sc->sc_ah, 1); 2502 else 2503 ath_hal_setenforcetxop(sc->sc_ah, 0); 2504 2505 /* 2506 * Likewise this is set during reset so update 2507 * state cached in the driver. 2508 */ 2509 sc->sc_diversity = ath_hal_getdiversity(ah); 2510 sc->sc_lastlongcal = ticks; 2511 sc->sc_resetcal = 1; 2512 sc->sc_lastcalreset = 0; 2513 sc->sc_lastani = ticks; 2514 sc->sc_lastshortcal = ticks; 2515 sc->sc_doresetcal = AH_FALSE; 2516 /* 2517 * Beacon timers were cleared here; give ath_newstate() 2518 * a hint that the beacon timers should be poked when 2519 * things transition to the RUN state. 2520 */ 2521 sc->sc_beacons = 0; 2522 2523 /* 2524 * Setup the hardware after reset: the key cache 2525 * is filled as needed and the receive engine is 2526 * set going. Frame transmit is handled entirely 2527 * in the frame output path; there's nothing to do 2528 * here except setup the interrupt mask. 2529 */ 2530 if (ath_startrecv(sc) != 0) { 2531 device_printf(sc->sc_dev, "unable to start recv logic\n"); 2532 ath_power_restore_power_state(sc); 2533 return (ENODEV); 2534 } 2535 2536 /* 2537 * Enable interrupts. 2538 */ 2539 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 2540 | HAL_INT_RXORN | HAL_INT_TXURN 2541 | HAL_INT_FATAL | HAL_INT_GLOBAL; 2542 2543 /* 2544 * Enable RX EDMA bits. Note these overlap with 2545 * HAL_INT_RX and HAL_INT_RXDESC respectively. 2546 */ 2547 if (sc->sc_isedma) 2548 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 2549 2550 /* 2551 * If we're an EDMA NIC, we don't care about RXEOL. 2552 * Writing a new descriptor in will simply restart 2553 * RX DMA. 2554 */ 2555 if (! sc->sc_isedma) 2556 sc->sc_imask |= HAL_INT_RXEOL; 2557 2558 /* 2559 * Enable MCI interrupt for MCI devices. 2560 */ 2561 if (sc->sc_btcoex_mci) 2562 sc->sc_imask |= HAL_INT_MCI; 2563 2564 /* 2565 * Enable MIB interrupts when there are hardware phy counters. 2566 * Note we only do this (at the moment) for station mode. 2567 */ 2568 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 2569 sc->sc_imask |= HAL_INT_MIB; 2570 2571 /* 2572 * XXX add capability for this. 2573 * 2574 * If we're in STA mode (and maybe IBSS?) then register for 2575 * TSFOOR interrupts. 2576 */ 2577 if (ic->ic_opmode == IEEE80211_M_STA) 2578 sc->sc_imask |= HAL_INT_TSFOOR; 2579 2580 /* Enable global TX timeout and carrier sense timeout if available */ 2581 if (ath_hal_gtxto_supported(ah)) 2582 sc->sc_imask |= HAL_INT_GTT; 2583 2584 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 2585 __func__, sc->sc_imask); 2586 2587 sc->sc_running = 1; 2588 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 2589 ath_hal_intrset(ah, sc->sc_imask); 2590 2591 ath_power_restore_power_state(sc); 2592 2593 return (0); 2594} 2595 2596static void 2597ath_stop(struct ath_softc *sc) 2598{ 2599 struct ath_hal *ah = sc->sc_ah; 2600 2601 ATH_LOCK_ASSERT(sc); 2602 2603 /* 2604 * Wake the hardware up before fiddling with it. 2605 */ 2606 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2607 2608 if (sc->sc_running) { 2609 /* 2610 * Shutdown the hardware and driver: 2611 * reset 802.11 state machine 2612 * turn off timers 2613 * disable interrupts 2614 * turn off the radio 2615 * clear transmit machinery 2616 * clear receive machinery 2617 * drain and release tx queues 2618 * reclaim beacon resources 2619 * power down hardware 2620 * 2621 * Note that some of this work is not possible if the 2622 * hardware is gone (invalid). 2623 */ 2624#ifdef ATH_TX99_DIAG 2625 if (sc->sc_tx99 != NULL) 2626 sc->sc_tx99->stop(sc->sc_tx99); 2627#endif 2628 callout_stop(&sc->sc_wd_ch); 2629 sc->sc_wd_timer = 0; 2630 sc->sc_running = 0; 2631 if (!sc->sc_invalid) { 2632 if (sc->sc_softled) { 2633 callout_stop(&sc->sc_ledtimer); 2634 ath_hal_gpioset(ah, sc->sc_ledpin, 2635 !sc->sc_ledon); 2636 sc->sc_blinking = 0; 2637 } 2638 ath_hal_intrset(ah, 0); 2639 } 2640 /* XXX we should stop RX regardless of whether it's valid */ 2641 if (!sc->sc_invalid) { 2642 ath_stoprecv(sc, 1); 2643 ath_hal_phydisable(ah); 2644 } else 2645 sc->sc_rxlink = NULL; 2646 ath_draintxq(sc, ATH_RESET_DEFAULT); 2647 ath_beacon_free(sc); /* XXX not needed */ 2648 } 2649 2650 /* And now, restore the current power state */ 2651 ath_power_restore_power_state(sc); 2652} 2653 2654/* 2655 * Wait until all pending TX/RX has completed. 2656 * 2657 * This waits until all existing transmit, receive and interrupts 2658 * have completed. It's assumed that the caller has first 2659 * grabbed the reset lock so it doesn't try to do overlapping 2660 * chip resets. 2661 */ 2662#define MAX_TXRX_ITERATIONS 100 2663static void 2664ath_txrx_stop_locked(struct ath_softc *sc) 2665{ 2666 int i = MAX_TXRX_ITERATIONS; 2667 2668 ATH_UNLOCK_ASSERT(sc); 2669 ATH_PCU_LOCK_ASSERT(sc); 2670 2671 /* 2672 * Sleep until all the pending operations have completed. 2673 * 2674 * The caller must ensure that reset has been incremented 2675 * or the pending operations may continue being queued. 2676 */ 2677 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 2678 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 2679 if (i <= 0) 2680 break; 2681 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 2682 msecs_to_ticks(10)); 2683 i--; 2684 } 2685 2686 if (i <= 0) 2687 device_printf(sc->sc_dev, 2688 "%s: didn't finish after %d iterations\n", 2689 __func__, MAX_TXRX_ITERATIONS); 2690} 2691#undef MAX_TXRX_ITERATIONS 2692 2693#if 0 2694static void 2695ath_txrx_stop(struct ath_softc *sc) 2696{ 2697 ATH_UNLOCK_ASSERT(sc); 2698 ATH_PCU_UNLOCK_ASSERT(sc); 2699 2700 ATH_PCU_LOCK(sc); 2701 ath_txrx_stop_locked(sc); 2702 ATH_PCU_UNLOCK(sc); 2703} 2704#endif 2705 2706static void 2707ath_txrx_start(struct ath_softc *sc) 2708{ 2709 2710 taskqueue_unblock(sc->sc_tq); 2711} 2712 2713/* 2714 * Grab the reset lock, and wait around until no one else 2715 * is trying to do anything with it. 2716 * 2717 * This is totally horrible but we can't hold this lock for 2718 * long enough to do TX/RX or we end up with net80211/ip stack 2719 * LORs and eventual deadlock. 2720 * 2721 * "dowait" signals whether to spin, waiting for the reset 2722 * lock count to reach 0. This should (for now) only be used 2723 * during the reset path, as the rest of the code may not 2724 * be locking-reentrant enough to behave correctly. 2725 * 2726 * Another, cleaner way should be found to serialise all of 2727 * these operations. 2728 */ 2729#define MAX_RESET_ITERATIONS 25 2730static int 2731ath_reset_grablock(struct ath_softc *sc, int dowait) 2732{ 2733 int w = 0; 2734 int i = MAX_RESET_ITERATIONS; 2735 2736 ATH_PCU_LOCK_ASSERT(sc); 2737 do { 2738 if (sc->sc_inreset_cnt == 0) { 2739 w = 1; 2740 break; 2741 } 2742 if (dowait == 0) { 2743 w = 0; 2744 break; 2745 } 2746 ATH_PCU_UNLOCK(sc); 2747 /* 2748 * 1 tick is likely not enough time for long calibrations 2749 * to complete. So we should wait quite a while. 2750 */ 2751 pause("ath_reset_grablock", msecs_to_ticks(100)); 2752 i--; 2753 ATH_PCU_LOCK(sc); 2754 } while (i > 0); 2755 2756 /* 2757 * We always increment the refcounter, regardless 2758 * of whether we succeeded to get it in an exclusive 2759 * way. 2760 */ 2761 sc->sc_inreset_cnt++; 2762 2763 if (i <= 0) 2764 device_printf(sc->sc_dev, 2765 "%s: didn't finish after %d iterations\n", 2766 __func__, MAX_RESET_ITERATIONS); 2767 2768 if (w == 0) 2769 device_printf(sc->sc_dev, 2770 "%s: warning, recursive reset path!\n", 2771 __func__); 2772 2773 return w; 2774} 2775#undef MAX_RESET_ITERATIONS 2776 2777/* 2778 * Reset the hardware w/o losing operational state. This is 2779 * basically a more efficient way of doing ath_stop, ath_init, 2780 * followed by state transitions to the current 802.11 2781 * operational state. Used to recover from various errors and 2782 * to reset or reload hardware state. 2783 */ 2784int 2785ath_reset(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 2786{ 2787 struct ieee80211com *ic = &sc->sc_ic; 2788 struct ath_hal *ah = sc->sc_ah; 2789 HAL_STATUS status; 2790 int i; 2791 2792 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2793 2794 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2795 ATH_PCU_UNLOCK_ASSERT(sc); 2796 ATH_UNLOCK_ASSERT(sc); 2797 2798 /* Try to (stop any further TX/RX from occurring */ 2799 taskqueue_block(sc->sc_tq); 2800 2801 /* 2802 * Wake the hardware up. 2803 */ 2804 ATH_LOCK(sc); 2805 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2806 ATH_UNLOCK(sc); 2807 2808 ATH_PCU_LOCK(sc); 2809 2810 /* 2811 * Grab the reset lock before TX/RX is stopped. 2812 * 2813 * This is needed to ensure that when the TX/RX actually does finish, 2814 * no further TX/RX/reset runs in parallel with this. 2815 */ 2816 if (ath_reset_grablock(sc, 1) == 0) { 2817 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2818 __func__); 2819 } 2820 2821 /* disable interrupts */ 2822 ath_hal_intrset(ah, 0); 2823 2824 /* 2825 * Now, ensure that any in progress TX/RX completes before we 2826 * continue. 2827 */ 2828 ath_txrx_stop_locked(sc); 2829 2830 ATH_PCU_UNLOCK(sc); 2831 2832 /* 2833 * Regardless of whether we're doing a no-loss flush or 2834 * not, stop the PCU and handle what's in the RX queue. 2835 * That way frames aren't dropped which shouldn't be. 2836 */ 2837 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2838 ath_rx_flush(sc); 2839 2840 /* 2841 * Should now wait for pending TX/RX to complete 2842 * and block future ones from occurring. This needs to be 2843 * done before the TX queue is drained. 2844 */ 2845 ath_draintxq(sc, reset_type); /* stop xmit side */ 2846 2847 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2848 /* NB: indicate channel change so we do a full reset */ 2849 ath_update_chainmasks(sc, ic->ic_curchan); 2850 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2851 sc->sc_cur_rxchainmask); 2852 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, 2853 HAL_RESET_NORMAL, &status)) 2854 device_printf(sc->sc_dev, 2855 "%s: unable to reset hardware; hal status %u\n", 2856 __func__, status); 2857 sc->sc_diversity = ath_hal_getdiversity(ah); 2858 2859 ATH_RX_LOCK(sc); 2860 sc->sc_rx_stopped = 1; 2861 sc->sc_rx_resetted = 1; 2862 ATH_RX_UNLOCK(sc); 2863 2864 /* Let DFS at it in case it's a DFS channel */ 2865 ath_dfs_radar_enable(sc, ic->ic_curchan); 2866 2867 /* Let spectral at in case spectral is enabled */ 2868 ath_spectral_enable(sc, ic->ic_curchan); 2869 2870 /* 2871 * Let bluetooth coexistence at in case it's needed for this channel 2872 */ 2873 ath_btcoex_enable(sc, ic->ic_curchan); 2874 2875 /* 2876 * If we're doing TDMA, enforce the TXOP limitation for chips that 2877 * support it. 2878 */ 2879 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2880 ath_hal_setenforcetxop(sc->sc_ah, 1); 2881 else 2882 ath_hal_setenforcetxop(sc->sc_ah, 0); 2883 2884 if (ath_startrecv(sc) != 0) /* restart recv */ 2885 device_printf(sc->sc_dev, 2886 "%s: unable to start recv logic\n", __func__); 2887 /* 2888 * We may be doing a reset in response to an ioctl 2889 * that changes the channel so update any state that 2890 * might change as a result. 2891 */ 2892 ath_chan_change(sc, ic->ic_curchan); 2893 if (sc->sc_beacons) { /* restart beacons */ 2894#ifdef IEEE80211_SUPPORT_TDMA 2895 if (sc->sc_tdma) 2896 ath_tdma_config(sc, NULL); 2897 else 2898#endif 2899 ath_beacon_config(sc, NULL); 2900 } 2901 2902 /* 2903 * Release the reset lock and re-enable interrupts here. 2904 * If an interrupt was being processed in ath_intr(), 2905 * it would disable interrupts at this point. So we have 2906 * to atomically enable interrupts and decrement the 2907 * reset counter - this way ath_intr() doesn't end up 2908 * disabling interrupts without a corresponding enable 2909 * in the rest or channel change path. 2910 * 2911 * Grab the TX reference in case we need to transmit. 2912 * That way a parallel transmit doesn't. 2913 */ 2914 ATH_PCU_LOCK(sc); 2915 sc->sc_inreset_cnt--; 2916 sc->sc_txstart_cnt++; 2917 /* XXX only do this if sc_inreset_cnt == 0? */ 2918 ath_hal_intrset(ah, sc->sc_imask); 2919 ATH_PCU_UNLOCK(sc); 2920 2921 /* 2922 * TX and RX can be started here. If it were started with 2923 * sc_inreset_cnt > 0, the TX and RX path would abort. 2924 * Thus if this is a nested call through the reset or 2925 * channel change code, TX completion will occur but 2926 * RX completion and ath_start / ath_tx_start will not 2927 * run. 2928 */ 2929 2930 /* Restart TX/RX as needed */ 2931 ath_txrx_start(sc); 2932 2933 /* XXX TODO: we need to hold the tx refcount here! */ 2934 2935 /* Restart TX completion and pending TX */ 2936 if (reset_type == ATH_RESET_NOLOSS) { 2937 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2938 if (ATH_TXQ_SETUP(sc, i)) { 2939 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2940 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2941 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2942 2943 ATH_TX_LOCK(sc); 2944 ath_txq_sched(sc, &sc->sc_txq[i]); 2945 ATH_TX_UNLOCK(sc); 2946 } 2947 } 2948 } 2949 2950 ATH_LOCK(sc); 2951 ath_power_restore_power_state(sc); 2952 ATH_UNLOCK(sc); 2953 2954 ATH_PCU_LOCK(sc); 2955 sc->sc_txstart_cnt--; 2956 ATH_PCU_UNLOCK(sc); 2957 2958 /* Handle any frames in the TX queue */ 2959 /* 2960 * XXX should this be done by the caller, rather than 2961 * ath_reset() ? 2962 */ 2963 ath_tx_kick(sc); /* restart xmit */ 2964 return 0; 2965} 2966 2967static int 2968ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2969{ 2970 struct ieee80211com *ic = vap->iv_ic; 2971 struct ath_softc *sc = ic->ic_softc; 2972 struct ath_hal *ah = sc->sc_ah; 2973 2974 switch (cmd) { 2975 case IEEE80211_IOC_TXPOWER: 2976 /* 2977 * If per-packet TPC is enabled, then we have nothing 2978 * to do; otherwise we need to force the global limit. 2979 * All this can happen directly; no need to reset. 2980 */ 2981 if (!ath_hal_gettpc(ah)) 2982 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2983 return 0; 2984 } 2985 /* XXX? Full or NOLOSS? */ 2986 return ath_reset(sc, ATH_RESET_FULL); 2987} 2988 2989struct ath_buf * 2990_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2991{ 2992 struct ath_buf *bf; 2993 2994 ATH_TXBUF_LOCK_ASSERT(sc); 2995 2996 if (btype == ATH_BUFTYPE_MGMT) 2997 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2998 else 2999 bf = TAILQ_FIRST(&sc->sc_txbuf); 3000 3001 if (bf == NULL) { 3002 sc->sc_stats.ast_tx_getnobuf++; 3003 } else { 3004 if (bf->bf_flags & ATH_BUF_BUSY) { 3005 sc->sc_stats.ast_tx_getbusybuf++; 3006 bf = NULL; 3007 } 3008 } 3009 3010 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 3011 if (btype == ATH_BUFTYPE_MGMT) 3012 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 3013 else { 3014 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 3015 sc->sc_txbuf_cnt--; 3016 3017 /* 3018 * This shuldn't happen; however just to be 3019 * safe print a warning and fudge the txbuf 3020 * count. 3021 */ 3022 if (sc->sc_txbuf_cnt < 0) { 3023 device_printf(sc->sc_dev, 3024 "%s: sc_txbuf_cnt < 0?\n", 3025 __func__); 3026 sc->sc_txbuf_cnt = 0; 3027 } 3028 } 3029 } else 3030 bf = NULL; 3031 3032 if (bf == NULL) { 3033 /* XXX should check which list, mgmt or otherwise */ 3034 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 3035 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 3036 "out of xmit buffers" : "xmit buffer busy"); 3037 return NULL; 3038 } 3039 3040 /* XXX TODO: should do this at buffer list initialisation */ 3041 /* XXX (then, ensure the buffer has the right flag set) */ 3042 bf->bf_flags = 0; 3043 if (btype == ATH_BUFTYPE_MGMT) 3044 bf->bf_flags |= ATH_BUF_MGMT; 3045 else 3046 bf->bf_flags &= (~ATH_BUF_MGMT); 3047 3048 /* Valid bf here; clear some basic fields */ 3049 bf->bf_next = NULL; /* XXX just to be sure */ 3050 bf->bf_last = NULL; /* XXX again, just to be sure */ 3051 bf->bf_comp = NULL; /* XXX again, just to be sure */ 3052 bzero(&bf->bf_state, sizeof(bf->bf_state)); 3053 3054 /* 3055 * Track the descriptor ID only if doing EDMA 3056 */ 3057 if (sc->sc_isedma) { 3058 bf->bf_descid = sc->sc_txbuf_descid; 3059 sc->sc_txbuf_descid++; 3060 } 3061 3062 return bf; 3063} 3064 3065/* 3066 * When retrying a software frame, buffers marked ATH_BUF_BUSY 3067 * can't be thrown back on the queue as they could still be 3068 * in use by the hardware. 3069 * 3070 * This duplicates the buffer, or returns NULL. 3071 * 3072 * The descriptor is also copied but the link pointers and 3073 * the DMA segments aren't copied; this frame should thus 3074 * be again passed through the descriptor setup/chain routines 3075 * so the link is correct. 3076 * 3077 * The caller must free the buffer using ath_freebuf(). 3078 */ 3079struct ath_buf * 3080ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) 3081{ 3082 struct ath_buf *tbf; 3083 3084 tbf = ath_getbuf(sc, 3085 (bf->bf_flags & ATH_BUF_MGMT) ? 3086 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 3087 if (tbf == NULL) 3088 return NULL; /* XXX failure? Why? */ 3089 3090 /* Copy basics */ 3091 tbf->bf_next = NULL; 3092 tbf->bf_nseg = bf->bf_nseg; 3093 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; 3094 tbf->bf_status = bf->bf_status; 3095 tbf->bf_m = bf->bf_m; 3096 tbf->bf_node = bf->bf_node; 3097 KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__)); 3098 /* will be setup by the chain/setup function */ 3099 tbf->bf_lastds = NULL; 3100 /* for now, last == self */ 3101 tbf->bf_last = tbf; 3102 tbf->bf_comp = bf->bf_comp; 3103 3104 /* NOTE: DMA segments will be setup by the setup/chain functions */ 3105 3106 /* The caller has to re-init the descriptor + links */ 3107 3108 /* 3109 * Free the DMA mapping here, before we NULL the mbuf. 3110 * We must only call bus_dmamap_unload() once per mbuf chain 3111 * or behaviour is undefined. 3112 */ 3113 if (bf->bf_m != NULL) { 3114 /* 3115 * XXX is this POSTWRITE call required? 3116 */ 3117 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3118 BUS_DMASYNC_POSTWRITE); 3119 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3120 } 3121 3122 bf->bf_m = NULL; 3123 bf->bf_node = NULL; 3124 3125 /* Copy state */ 3126 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 3127 3128 return tbf; 3129} 3130 3131struct ath_buf * 3132ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 3133{ 3134 struct ath_buf *bf; 3135 3136 ATH_TXBUF_LOCK(sc); 3137 bf = _ath_getbuf_locked(sc, btype); 3138 /* 3139 * If a mgmt buffer was requested but we're out of those, 3140 * try requesting a normal one. 3141 */ 3142 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 3143 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 3144 ATH_TXBUF_UNLOCK(sc); 3145 if (bf == NULL) { 3146 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 3147 sc->sc_stats.ast_tx_qstop++; 3148 } 3149 return bf; 3150} 3151 3152/* 3153 * Transmit a single frame. 3154 * 3155 * net80211 will free the node reference if the transmit 3156 * fails, so don't free the node reference here. 3157 */ 3158static int 3159ath_transmit(struct ieee80211com *ic, struct mbuf *m) 3160{ 3161 struct ath_softc *sc = ic->ic_softc; 3162 struct ieee80211_node *ni; 3163 struct mbuf *next; 3164 struct ath_buf *bf; 3165 ath_bufhead frags; 3166 int retval = 0; 3167 3168 /* 3169 * Tell the reset path that we're currently transmitting. 3170 */ 3171 ATH_PCU_LOCK(sc); 3172 if (sc->sc_inreset_cnt > 0) { 3173 DPRINTF(sc, ATH_DEBUG_XMIT, 3174 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 3175 ATH_PCU_UNLOCK(sc); 3176 sc->sc_stats.ast_tx_qstop++; 3177 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); 3178 return (ENOBUFS); /* XXX should be EINVAL or? */ 3179 } 3180 sc->sc_txstart_cnt++; 3181 ATH_PCU_UNLOCK(sc); 3182 3183 /* Wake the hardware up already */ 3184 ATH_LOCK(sc); 3185 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3186 ATH_UNLOCK(sc); 3187 3188 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); 3189 /* 3190 * Grab the TX lock - it's ok to do this here; we haven't 3191 * yet started transmitting. 3192 */ 3193 ATH_TX_LOCK(sc); 3194 3195 /* 3196 * Node reference, if there's one. 3197 */ 3198 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 3199 3200 /* 3201 * Enforce how deep a node queue can get. 3202 * 3203 * XXX it would be nicer if we kept an mbuf queue per 3204 * node and only whacked them into ath_bufs when we 3205 * are ready to schedule some traffic from them. 3206 * .. that may come later. 3207 * 3208 * XXX we should also track the per-node hardware queue 3209 * depth so it is easy to limit the _SUM_ of the swq and 3210 * hwq frames. Since we only schedule two HWQ frames 3211 * at a time, this should be OK for now. 3212 */ 3213 if ((!(m->m_flags & M_EAPOL)) && 3214 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { 3215 sc->sc_stats.ast_tx_nodeq_overflow++; 3216 retval = ENOBUFS; 3217 goto finish; 3218 } 3219 3220 /* 3221 * Check how many TX buffers are available. 3222 * 3223 * If this is for non-EAPOL traffic, just leave some 3224 * space free in order for buffer cloning and raw 3225 * frame transmission to occur. 3226 * 3227 * If it's for EAPOL traffic, ignore this for now. 3228 * Management traffic will be sent via the raw transmit 3229 * method which bypasses this check. 3230 * 3231 * This is needed to ensure that EAPOL frames during 3232 * (re) keying have a chance to go out. 3233 * 3234 * See kern/138379 for more information. 3235 */ 3236 if ((!(m->m_flags & M_EAPOL)) && 3237 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { 3238 sc->sc_stats.ast_tx_nobuf++; 3239 retval = ENOBUFS; 3240 goto finish; 3241 } 3242 3243 /* 3244 * Grab a TX buffer and associated resources. 3245 * 3246 * If it's an EAPOL frame, allocate a MGMT ath_buf. 3247 * That way even with temporary buffer exhaustion due to 3248 * the data path doesn't leave us without the ability 3249 * to transmit management frames. 3250 * 3251 * Otherwise allocate a normal buffer. 3252 */ 3253 if (m->m_flags & M_EAPOL) 3254 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 3255 else 3256 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 3257 3258 if (bf == NULL) { 3259 /* 3260 * If we failed to allocate a buffer, fail. 3261 * 3262 * We shouldn't fail normally, due to the check 3263 * above. 3264 */ 3265 sc->sc_stats.ast_tx_nobuf++; 3266 retval = ENOBUFS; 3267 goto finish; 3268 } 3269 3270 /* 3271 * At this point we have a buffer; so we need to free it 3272 * if we hit any error conditions. 3273 */ 3274 3275 /* 3276 * Check for fragmentation. If this frame 3277 * has been broken up verify we have enough 3278 * buffers to send all the fragments so all 3279 * go out or none... 3280 */ 3281 TAILQ_INIT(&frags); 3282 if ((m->m_flags & M_FRAG) && 3283 !ath_txfrag_setup(sc, &frags, m, ni)) { 3284 DPRINTF(sc, ATH_DEBUG_XMIT, 3285 "%s: out of txfrag buffers\n", __func__); 3286 sc->sc_stats.ast_tx_nofrag++; 3287 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3288 /* 3289 * XXXGL: is mbuf valid after ath_txfrag_setup? If yes, 3290 * we shouldn't free it but return back. 3291 */ 3292 ieee80211_free_mbuf(m); 3293 m = NULL; 3294 goto bad; 3295 } 3296 3297 /* 3298 * At this point if we have any TX fragments, then we will 3299 * have bumped the node reference once for each of those. 3300 */ 3301 3302 /* 3303 * XXX Is there anything actually _enforcing_ that the 3304 * fragments are being transmitted in one hit, rather than 3305 * being interleaved with other transmissions on that 3306 * hardware queue? 3307 * 3308 * The ATH TX output lock is the only thing serialising this 3309 * right now. 3310 */ 3311 3312 /* 3313 * Calculate the "next fragment" length field in ath_buf 3314 * in order to let the transmit path know enough about 3315 * what to next write to the hardware. 3316 */ 3317 if (m->m_flags & M_FRAG) { 3318 struct ath_buf *fbf = bf; 3319 struct ath_buf *n_fbf = NULL; 3320 struct mbuf *fm = m->m_nextpkt; 3321 3322 /* 3323 * We need to walk the list of fragments and set 3324 * the next size to the following buffer. 3325 * However, the first buffer isn't in the frag 3326 * list, so we have to do some gymnastics here. 3327 */ 3328 TAILQ_FOREACH(n_fbf, &frags, bf_list) { 3329 fbf->bf_nextfraglen = fm->m_pkthdr.len; 3330 fbf = n_fbf; 3331 fm = fm->m_nextpkt; 3332 } 3333 } 3334 3335nextfrag: 3336 /* 3337 * Pass the frame to the h/w for transmission. 3338 * Fragmented frames have each frag chained together 3339 * with m_nextpkt. We know there are sufficient ath_buf's 3340 * to send all the frags because of work done by 3341 * ath_txfrag_setup. We leave m_nextpkt set while 3342 * calling ath_tx_start so it can use it to extend the 3343 * the tx duration to cover the subsequent frag and 3344 * so it can reclaim all the mbufs in case of an error; 3345 * ath_tx_start clears m_nextpkt once it commits to 3346 * handing the frame to the hardware. 3347 * 3348 * Note: if this fails, then the mbufs are freed but 3349 * not the node reference. 3350 * 3351 * So, we now have to free the node reference ourselves here 3352 * and return OK up to the stack. 3353 */ 3354 next = m->m_nextpkt; 3355 if (ath_tx_start(sc, ni, bf, m)) { 3356bad: 3357 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3358reclaim: 3359 bf->bf_m = NULL; 3360 bf->bf_node = NULL; 3361 ATH_TXBUF_LOCK(sc); 3362 ath_returnbuf_head(sc, bf); 3363 /* 3364 * Free the rest of the node references and 3365 * buffers for the fragment list. 3366 */ 3367 ath_txfrag_cleanup(sc, &frags, ni); 3368 ATH_TXBUF_UNLOCK(sc); 3369 3370 /* 3371 * XXX: And free the node/return OK; ath_tx_start() may have 3372 * modified the buffer. We currently have no way to 3373 * signify that the mbuf was freed but there was an error. 3374 */ 3375 ieee80211_free_node(ni); 3376 retval = 0; 3377 goto finish; 3378 } 3379 3380 /* 3381 * Check here if the node is in power save state. 3382 */ 3383 ath_tx_update_tim(sc, ni, 1); 3384 3385 if (next != NULL) { 3386 /* 3387 * Beware of state changing between frags. 3388 * XXX check sta power-save state? 3389 */ 3390 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 3391 DPRINTF(sc, ATH_DEBUG_XMIT, 3392 "%s: flush fragmented packet, state %s\n", 3393 __func__, 3394 ieee80211_state_name[ni->ni_vap->iv_state]); 3395 /* XXX dmamap */ 3396 ieee80211_free_mbuf(next); 3397 goto reclaim; 3398 } 3399 m = next; 3400 bf = TAILQ_FIRST(&frags); 3401 KASSERT(bf != NULL, ("no buf for txfrag")); 3402 TAILQ_REMOVE(&frags, bf, bf_list); 3403 goto nextfrag; 3404 } 3405 3406 /* 3407 * Bump watchdog timer. 3408 */ 3409 sc->sc_wd_timer = 5; 3410 3411finish: 3412 ATH_TX_UNLOCK(sc); 3413 3414 /* 3415 * Finished transmitting! 3416 */ 3417 ATH_PCU_LOCK(sc); 3418 sc->sc_txstart_cnt--; 3419 ATH_PCU_UNLOCK(sc); 3420 3421 /* Sleep the hardware if required */ 3422 ATH_LOCK(sc); 3423 ath_power_restore_power_state(sc); 3424 ATH_UNLOCK(sc); 3425 3426 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); 3427 3428 return (retval); 3429} 3430 3431static int 3432ath_media_change(struct ifnet *ifp) 3433{ 3434 int error = ieee80211_media_change(ifp); 3435 /* NB: only the fixed rate can change and that doesn't need a reset */ 3436 return (error == ENETRESET ? 0 : error); 3437} 3438 3439/* 3440 * Block/unblock tx+rx processing while a key change is done. 3441 * We assume the caller serializes key management operations 3442 * so we only need to worry about synchronization with other 3443 * uses that originate in the driver. 3444 */ 3445static void 3446ath_key_update_begin(struct ieee80211vap *vap) 3447{ 3448 struct ath_softc *sc = vap->iv_ic->ic_softc; 3449 3450 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3451 taskqueue_block(sc->sc_tq); 3452} 3453 3454static void 3455ath_key_update_end(struct ieee80211vap *vap) 3456{ 3457 struct ath_softc *sc = vap->iv_ic->ic_softc; 3458 3459 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3460 taskqueue_unblock(sc->sc_tq); 3461} 3462 3463static void 3464ath_update_promisc(struct ieee80211com *ic) 3465{ 3466 struct ath_softc *sc = ic->ic_softc; 3467 u_int32_t rfilt; 3468 3469 /* configure rx filter */ 3470 ATH_LOCK(sc); 3471 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3472 rfilt = ath_calcrxfilter(sc); 3473 ath_hal_setrxfilter(sc->sc_ah, rfilt); 3474 ath_power_restore_power_state(sc); 3475 ATH_UNLOCK(sc); 3476 3477 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 3478} 3479 3480/* 3481 * Driver-internal mcast update call. 3482 * 3483 * Assumes the hardware is already awake. 3484 */ 3485static void 3486ath_update_mcast_hw(struct ath_softc *sc) 3487{ 3488 struct ieee80211com *ic = &sc->sc_ic; 3489 u_int32_t mfilt[2]; 3490 3491 /* calculate and install multicast filter */ 3492 if (ic->ic_allmulti == 0) { 3493 struct ieee80211vap *vap; 3494 struct ifnet *ifp; 3495 struct ifmultiaddr *ifma; 3496 3497 /* 3498 * Merge multicast addresses to form the hardware filter. 3499 */ 3500 mfilt[0] = mfilt[1] = 0; 3501 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 3502 ifp = vap->iv_ifp; 3503 if_maddr_rlock(ifp); 3504 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3505 caddr_t dl; 3506 uint32_t val; 3507 uint8_t pos; 3508 3509 /* calculate XOR of eight 6bit values */ 3510 dl = LLADDR((struct sockaddr_dl *) 3511 ifma->ifma_addr); 3512 val = le32dec(dl + 0); 3513 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ 3514 val; 3515 val = le32dec(dl + 3); 3516 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ 3517 val; 3518 pos &= 0x3f; 3519 mfilt[pos / 32] |= (1 << (pos % 32)); 3520 } 3521 if_maddr_runlock(ifp); 3522 } 3523 } else 3524 mfilt[0] = mfilt[1] = ~0; 3525 3526 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 3527 3528 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 3529 __func__, mfilt[0], mfilt[1]); 3530} 3531 3532/* 3533 * Called from the net80211 layer - force the hardware 3534 * awake before operating. 3535 */ 3536static void 3537ath_update_mcast(struct ieee80211com *ic) 3538{ 3539 struct ath_softc *sc = ic->ic_softc; 3540 3541 ATH_LOCK(sc); 3542 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3543 ATH_UNLOCK(sc); 3544 3545 ath_update_mcast_hw(sc); 3546 3547 ATH_LOCK(sc); 3548 ath_power_restore_power_state(sc); 3549 ATH_UNLOCK(sc); 3550} 3551 3552void 3553ath_mode_init(struct ath_softc *sc) 3554{ 3555 struct ieee80211com *ic = &sc->sc_ic; 3556 struct ath_hal *ah = sc->sc_ah; 3557 u_int32_t rfilt; 3558 3559 /* configure rx filter */ 3560 rfilt = ath_calcrxfilter(sc); 3561 ath_hal_setrxfilter(ah, rfilt); 3562 3563 /* configure operational mode */ 3564 ath_hal_setopmode(ah); 3565 3566 /* handle any link-level address change */ 3567 ath_hal_setmac(ah, ic->ic_macaddr); 3568 3569 /* calculate and install multicast filter */ 3570 ath_update_mcast_hw(sc); 3571} 3572 3573/* 3574 * Set the slot time based on the current setting. 3575 */ 3576void 3577ath_setslottime(struct ath_softc *sc) 3578{ 3579 struct ieee80211com *ic = &sc->sc_ic; 3580 struct ath_hal *ah = sc->sc_ah; 3581 u_int usec; 3582 3583 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 3584 usec = 13; 3585 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 3586 usec = 21; 3587 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 3588 /* honor short/long slot time only in 11g */ 3589 /* XXX shouldn't honor on pure g or turbo g channel */ 3590 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3591 usec = HAL_SLOT_TIME_9; 3592 else 3593 usec = HAL_SLOT_TIME_20; 3594 } else 3595 usec = HAL_SLOT_TIME_9; 3596 3597 DPRINTF(sc, ATH_DEBUG_RESET, 3598 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 3599 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 3600 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 3601 3602 /* Wake up the hardware first before updating the slot time */ 3603 ATH_LOCK(sc); 3604 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3605 ath_hal_setslottime(ah, usec); 3606 ath_power_restore_power_state(sc); 3607 sc->sc_updateslot = OK; 3608 ATH_UNLOCK(sc); 3609} 3610 3611/* 3612 * Callback from the 802.11 layer to update the 3613 * slot time based on the current setting. 3614 */ 3615static void 3616ath_updateslot(struct ieee80211com *ic) 3617{ 3618 struct ath_softc *sc = ic->ic_softc; 3619 3620 /* 3621 * When not coordinating the BSS, change the hardware 3622 * immediately. For other operation we defer the change 3623 * until beacon updates have propagated to the stations. 3624 * 3625 * XXX sc_updateslot isn't changed behind a lock? 3626 */ 3627 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3628 ic->ic_opmode == IEEE80211_M_MBSS) 3629 sc->sc_updateslot = UPDATE; 3630 else 3631 ath_setslottime(sc); 3632} 3633 3634/* 3635 * Append the contents of src to dst; both queues 3636 * are assumed to be locked. 3637 */ 3638void 3639ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 3640{ 3641 3642 ATH_TXQ_LOCK_ASSERT(src); 3643 ATH_TXQ_LOCK_ASSERT(dst); 3644 3645 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 3646 dst->axq_link = src->axq_link; 3647 src->axq_link = NULL; 3648 dst->axq_depth += src->axq_depth; 3649 dst->axq_aggr_depth += src->axq_aggr_depth; 3650 src->axq_depth = 0; 3651 src->axq_aggr_depth = 0; 3652} 3653 3654/* 3655 * Reset the hardware, with no loss. 3656 * 3657 * This can't be used for a general case reset. 3658 */ 3659static void 3660ath_reset_proc(void *arg, int pending) 3661{ 3662 struct ath_softc *sc = arg; 3663 3664#if 0 3665 device_printf(sc->sc_dev, "%s: resetting\n", __func__); 3666#endif 3667 ath_reset(sc, ATH_RESET_NOLOSS); 3668} 3669 3670/* 3671 * Reset the hardware after detecting beacons have stopped. 3672 */ 3673static void 3674ath_bstuck_proc(void *arg, int pending) 3675{ 3676 struct ath_softc *sc = arg; 3677 uint32_t hangs = 0; 3678 3679 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3680 device_printf(sc->sc_dev, "bb hang detected (0x%x)\n", hangs); 3681 3682#ifdef ATH_DEBUG_ALQ 3683 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) 3684 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); 3685#endif 3686 3687 device_printf(sc->sc_dev, "stuck beacon; resetting (bmiss count %u)\n", 3688 sc->sc_bmisscount); 3689 sc->sc_stats.ast_bstuck++; 3690 /* 3691 * This assumes that there's no simultaneous channel mode change 3692 * occurring. 3693 */ 3694 ath_reset(sc, ATH_RESET_NOLOSS); 3695} 3696 3697static int 3698ath_desc_alloc(struct ath_softc *sc) 3699{ 3700 int error; 3701 3702 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3703 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); 3704 if (error != 0) { 3705 return error; 3706 } 3707 sc->sc_txbuf_cnt = ath_txbuf; 3708 3709 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3710 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 3711 ATH_TXDESC); 3712 if (error != 0) { 3713 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3714 return error; 3715 } 3716 3717 /* 3718 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3719 * flag doesn't have to be set in ath_getbuf_locked(). 3720 */ 3721 3722 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3723 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 3724 if (error != 0) { 3725 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3726 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3727 &sc->sc_txbuf_mgmt); 3728 return error; 3729 } 3730 return 0; 3731} 3732 3733static void 3734ath_desc_free(struct ath_softc *sc) 3735{ 3736 3737 if (sc->sc_bdma.dd_desc_len != 0) 3738 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3739 if (sc->sc_txdma.dd_desc_len != 0) 3740 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3741 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3742 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3743 &sc->sc_txbuf_mgmt); 3744} 3745 3746static struct ieee80211_node * 3747ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3748{ 3749 struct ieee80211com *ic = vap->iv_ic; 3750 struct ath_softc *sc = ic->ic_softc; 3751 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3752 struct ath_node *an; 3753 3754 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3755 if (an == NULL) { 3756 /* XXX stat+msg */ 3757 return NULL; 3758 } 3759 ath_rate_node_init(sc, an); 3760 3761 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3762 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3763 device_get_nameunit(sc->sc_dev), an); 3764 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3765 3766 /* XXX setup ath_tid */ 3767 ath_tx_tid_init(sc, an); 3768 3769 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an); 3770 return &an->an_node; 3771} 3772 3773static void 3774ath_node_cleanup(struct ieee80211_node *ni) 3775{ 3776 struct ieee80211com *ic = ni->ni_ic; 3777 struct ath_softc *sc = ic->ic_softc; 3778 3779 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, 3780 ni->ni_macaddr, ":", ATH_NODE(ni)); 3781 3782 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3783 ath_tx_node_flush(sc, ATH_NODE(ni)); 3784 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3785 sc->sc_node_cleanup(ni); 3786} 3787 3788static void 3789ath_node_free(struct ieee80211_node *ni) 3790{ 3791 struct ieee80211com *ic = ni->ni_ic; 3792 struct ath_softc *sc = ic->ic_softc; 3793 3794 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, 3795 ni->ni_macaddr, ":", ATH_NODE(ni)); 3796 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3797 sc->sc_node_free(ni); 3798} 3799 3800static void 3801ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3802{ 3803 struct ieee80211com *ic = ni->ni_ic; 3804 struct ath_softc *sc = ic->ic_softc; 3805 struct ath_hal *ah = sc->sc_ah; 3806 3807 *rssi = ic->ic_node_getrssi(ni); 3808 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3809 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3810 else 3811 *noise = -95; /* nominally correct */ 3812} 3813 3814/* 3815 * Set the default antenna. 3816 */ 3817void 3818ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3819{ 3820 struct ath_hal *ah = sc->sc_ah; 3821 3822 /* XXX block beacon interrupts */ 3823 ath_hal_setdefantenna(ah, antenna); 3824 if (sc->sc_defant != antenna) 3825 sc->sc_stats.ast_ant_defswitch++; 3826 sc->sc_defant = antenna; 3827 sc->sc_rxotherant = 0; 3828} 3829 3830static void 3831ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3832{ 3833 txq->axq_qnum = qnum; 3834 txq->axq_ac = 0; 3835 txq->axq_depth = 0; 3836 txq->axq_aggr_depth = 0; 3837 txq->axq_intrcnt = 0; 3838 txq->axq_link = NULL; 3839 txq->axq_softc = sc; 3840 TAILQ_INIT(&txq->axq_q); 3841 TAILQ_INIT(&txq->axq_tidq); 3842 TAILQ_INIT(&txq->fifo.axq_q); 3843 ATH_TXQ_LOCK_INIT(sc, txq); 3844} 3845 3846/* 3847 * Setup a h/w transmit queue. 3848 */ 3849static struct ath_txq * 3850ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3851{ 3852 struct ath_hal *ah = sc->sc_ah; 3853 HAL_TXQ_INFO qi; 3854 int qnum; 3855 3856 memset(&qi, 0, sizeof(qi)); 3857 qi.tqi_subtype = subtype; 3858 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3859 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3860 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3861 /* 3862 * Enable interrupts only for EOL and DESC conditions. 3863 * We mark tx descriptors to receive a DESC interrupt 3864 * when a tx queue gets deep; otherwise waiting for the 3865 * EOL to reap descriptors. Note that this is done to 3866 * reduce interrupt load and this only defers reaping 3867 * descriptors, never transmitting frames. Aside from 3868 * reducing interrupts this also permits more concurrency. 3869 * The only potential downside is if the tx queue backs 3870 * up in which case the top half of the kernel may backup 3871 * due to a lack of tx descriptors. 3872 */ 3873 if (sc->sc_isedma) 3874 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 3875 HAL_TXQ_TXOKINT_ENABLE; 3876 else 3877 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 3878 HAL_TXQ_TXDESCINT_ENABLE; 3879 3880 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3881 if (qnum == -1) { 3882 /* 3883 * NB: don't print a message, this happens 3884 * normally on parts with too few tx queues 3885 */ 3886 return NULL; 3887 } 3888 if (qnum >= nitems(sc->sc_txq)) { 3889 device_printf(sc->sc_dev, 3890 "hal qnum %u out of range, max %zu!\n", 3891 qnum, nitems(sc->sc_txq)); 3892 ath_hal_releasetxqueue(ah, qnum); 3893 return NULL; 3894 } 3895 if (!ATH_TXQ_SETUP(sc, qnum)) { 3896 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3897 sc->sc_txqsetup |= 1<<qnum; 3898 } 3899 return &sc->sc_txq[qnum]; 3900} 3901 3902/* 3903 * Setup a hardware data transmit queue for the specified 3904 * access control. The hal may not support all requested 3905 * queues in which case it will return a reference to a 3906 * previously setup queue. We record the mapping from ac's 3907 * to h/w queues for use by ath_tx_start and also track 3908 * the set of h/w queues being used to optimize work in the 3909 * transmit interrupt handler and related routines. 3910 */ 3911static int 3912ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3913{ 3914 struct ath_txq *txq; 3915 3916 if (ac >= nitems(sc->sc_ac2q)) { 3917 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3918 ac, nitems(sc->sc_ac2q)); 3919 return 0; 3920 } 3921 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3922 if (txq != NULL) { 3923 txq->axq_ac = ac; 3924 sc->sc_ac2q[ac] = txq; 3925 return 1; 3926 } else 3927 return 0; 3928} 3929 3930/* 3931 * Update WME parameters for a transmit queue. 3932 */ 3933static int 3934ath_txq_update(struct ath_softc *sc, int ac) 3935{ 3936#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3937 struct ieee80211com *ic = &sc->sc_ic; 3938 struct ath_txq *txq = sc->sc_ac2q[ac]; 3939 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3940 struct ath_hal *ah = sc->sc_ah; 3941 HAL_TXQ_INFO qi; 3942 3943 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3944#ifdef IEEE80211_SUPPORT_TDMA 3945 if (sc->sc_tdma) { 3946 /* 3947 * AIFS is zero so there's no pre-transmit wait. The 3948 * burst time defines the slot duration and is configured 3949 * through net80211. The QCU is setup to not do post-xmit 3950 * back off, lockout all lower-priority QCU's, and fire 3951 * off the DMA beacon alert timer which is setup based 3952 * on the slot configuration. 3953 */ 3954 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3955 | HAL_TXQ_TXERRINT_ENABLE 3956 | HAL_TXQ_TXURNINT_ENABLE 3957 | HAL_TXQ_TXEOLINT_ENABLE 3958 | HAL_TXQ_DBA_GATED 3959 | HAL_TXQ_BACKOFF_DISABLE 3960 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 3961 ; 3962 qi.tqi_aifs = 0; 3963 /* XXX +dbaprep? */ 3964 qi.tqi_readyTime = sc->sc_tdmaslotlen; 3965 qi.tqi_burstTime = qi.tqi_readyTime; 3966 } else { 3967#endif 3968 /* 3969 * XXX shouldn't this just use the default flags 3970 * used in the previous queue setup? 3971 */ 3972 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3973 | HAL_TXQ_TXERRINT_ENABLE 3974 | HAL_TXQ_TXDESCINT_ENABLE 3975 | HAL_TXQ_TXURNINT_ENABLE 3976 | HAL_TXQ_TXEOLINT_ENABLE 3977 ; 3978 qi.tqi_aifs = wmep->wmep_aifsn; 3979 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3980 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3981 qi.tqi_readyTime = 0; 3982 qi.tqi_burstTime = IEEE80211_TXOP_TO_US(wmep->wmep_txopLimit); 3983#ifdef IEEE80211_SUPPORT_TDMA 3984 } 3985#endif 3986 3987 DPRINTF(sc, ATH_DEBUG_RESET, 3988 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 3989 __func__, txq->axq_qnum, qi.tqi_qflags, 3990 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 3991 3992 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3993 device_printf(sc->sc_dev, "unable to update hardware queue " 3994 "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); 3995 return 0; 3996 } else { 3997 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3998 return 1; 3999 } 4000#undef ATH_EXPONENT_TO_VALUE 4001} 4002 4003/* 4004 * Callback from the 802.11 layer to update WME parameters. 4005 */ 4006int 4007ath_wme_update(struct ieee80211com *ic) 4008{ 4009 struct ath_softc *sc = ic->ic_softc; 4010 4011 return !ath_txq_update(sc, WME_AC_BE) || 4012 !ath_txq_update(sc, WME_AC_BK) || 4013 !ath_txq_update(sc, WME_AC_VI) || 4014 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4015} 4016 4017/* 4018 * Reclaim resources for a setup queue. 4019 */ 4020static void 4021ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4022{ 4023 4024 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4025 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4026 ATH_TXQ_LOCK_DESTROY(txq); 4027} 4028 4029/* 4030 * Reclaim all tx queue resources. 4031 */ 4032static void 4033ath_tx_cleanup(struct ath_softc *sc) 4034{ 4035 int i; 4036 4037 ATH_TXBUF_LOCK_DESTROY(sc); 4038 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4039 if (ATH_TXQ_SETUP(sc, i)) 4040 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4041} 4042 4043/* 4044 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4045 * using the current rates in sc_rixmap. 4046 */ 4047int 4048ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4049{ 4050 int rix = sc->sc_rixmap[rate]; 4051 /* NB: return lowest rix for invalid rate */ 4052 return (rix == 0xff ? 0 : rix); 4053} 4054 4055static void 4056ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4057 struct ath_buf *bf) 4058{ 4059 struct ieee80211_node *ni = bf->bf_node; 4060 struct ieee80211com *ic = &sc->sc_ic; 4061 int sr, lr, pri; 4062 4063 if (ts->ts_status == 0) { 4064 u_int8_t txant = ts->ts_antenna; 4065 sc->sc_stats.ast_ant_tx[txant]++; 4066 sc->sc_ant_tx[txant]++; 4067 if (ts->ts_finaltsi != 0) 4068 sc->sc_stats.ast_tx_altrate++; 4069 pri = M_WME_GETAC(bf->bf_m); 4070 if (pri >= WME_AC_VO) 4071 ic->ic_wme.wme_hipri_traffic++; 4072 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 4073 ni->ni_inact = ni->ni_inact_reload; 4074 } else { 4075 if (ts->ts_status & HAL_TXERR_XRETRY) 4076 sc->sc_stats.ast_tx_xretries++; 4077 if (ts->ts_status & HAL_TXERR_FIFO) 4078 sc->sc_stats.ast_tx_fifoerr++; 4079 if (ts->ts_status & HAL_TXERR_FILT) 4080 sc->sc_stats.ast_tx_filtered++; 4081 if (ts->ts_status & HAL_TXERR_XTXOP) 4082 sc->sc_stats.ast_tx_xtxop++; 4083 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4084 sc->sc_stats.ast_tx_timerexpired++; 4085 4086 if (bf->bf_m->m_flags & M_FF) 4087 sc->sc_stats.ast_ff_txerr++; 4088 } 4089 /* XXX when is this valid? */ 4090 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) 4091 sc->sc_stats.ast_tx_desccfgerr++; 4092 /* 4093 * This can be valid for successful frame transmission! 4094 * If there's a TX FIFO underrun during aggregate transmission, 4095 * the MAC will pad the rest of the aggregate with delimiters. 4096 * If a BA is returned, the frame is marked as "OK" and it's up 4097 * to the TX completion code to notice which frames weren't 4098 * successfully transmitted. 4099 */ 4100 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) 4101 sc->sc_stats.ast_tx_data_underrun++; 4102 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) 4103 sc->sc_stats.ast_tx_delim_underrun++; 4104 4105 sr = ts->ts_shortretry; 4106 lr = ts->ts_longretry; 4107 sc->sc_stats.ast_tx_shortretry += sr; 4108 sc->sc_stats.ast_tx_longretry += lr; 4109 4110} 4111 4112/* 4113 * The default completion. If fail is 1, this means 4114 * "please don't retry the frame, and just return -1 status 4115 * to the net80211 stack. 4116 */ 4117void 4118ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4119{ 4120 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4121 int st; 4122 4123 if (fail == 1) 4124 st = -1; 4125 else 4126 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 4127 ts->ts_status : HAL_TXERR_XRETRY; 4128 4129#if 0 4130 if (bf->bf_state.bfs_dobaw) 4131 device_printf(sc->sc_dev, 4132 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 4133 __func__, 4134 bf, 4135 SEQNO(bf->bf_state.bfs_seqno)); 4136#endif 4137 if (bf->bf_next != NULL) 4138 device_printf(sc->sc_dev, 4139 "%s: bf %p: seqno %d: bf_next not NULL!\n", 4140 __func__, 4141 bf, 4142 SEQNO(bf->bf_state.bfs_seqno)); 4143 4144 /* 4145 * Check if the node software queue is empty; if so 4146 * then clear the TIM. 4147 * 4148 * This needs to be done before the buffer is freed as 4149 * otherwise the node reference will have been released 4150 * and the node may not actually exist any longer. 4151 * 4152 * XXX I don't like this belonging here, but it's cleaner 4153 * to do it here right now then all the other places 4154 * where ath_tx_default_comp() is called. 4155 * 4156 * XXX TODO: during drain, ensure that the callback is 4157 * being called so we get a chance to update the TIM. 4158 */ 4159 if (bf->bf_node) { 4160 ATH_TX_LOCK(sc); 4161 ath_tx_update_tim(sc, bf->bf_node, 0); 4162 ATH_TX_UNLOCK(sc); 4163 } 4164 4165 /* 4166 * Do any tx complete callback. Note this must 4167 * be done before releasing the node reference. 4168 * This will free the mbuf, release the net80211 4169 * node and recycle the ath_buf. 4170 */ 4171 ath_tx_freebuf(sc, bf, st); 4172} 4173 4174/* 4175 * Update rate control with the given completion status. 4176 */ 4177void 4178ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4179 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4180 int nframes, int nbad) 4181{ 4182 struct ath_node *an; 4183 4184 /* Only for unicast frames */ 4185 if (ni == NULL) 4186 return; 4187 4188 an = ATH_NODE(ni); 4189 ATH_NODE_UNLOCK_ASSERT(an); 4190 4191 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4192 ATH_NODE_LOCK(an); 4193 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4194 ATH_NODE_UNLOCK(an); 4195 } 4196} 4197 4198/* 4199 * Process the completion of the given buffer. 4200 * 4201 * This calls the rate control update and then the buffer completion. 4202 * This will either free the buffer or requeue it. In any case, the 4203 * bf pointer should be treated as invalid after this function is called. 4204 */ 4205void 4206ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, 4207 struct ath_tx_status *ts, struct ath_buf *bf) 4208{ 4209 struct ieee80211_node *ni = bf->bf_node; 4210 4211 ATH_TX_UNLOCK_ASSERT(sc); 4212 ATH_TXQ_UNLOCK_ASSERT(txq); 4213 4214 /* If unicast frame, update general statistics */ 4215 if (ni != NULL) { 4216 /* update statistics */ 4217 ath_tx_update_stats(sc, ts, bf); 4218 } 4219 4220 /* 4221 * Call the completion handler. 4222 * The completion handler is responsible for 4223 * calling the rate control code. 4224 * 4225 * Frames with no completion handler get the 4226 * rate control code called here. 4227 */ 4228 if (bf->bf_comp == NULL) { 4229 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4230 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 4231 /* 4232 * XXX assume this isn't an aggregate 4233 * frame. 4234 */ 4235 ath_tx_update_ratectrl(sc, ni, 4236 bf->bf_state.bfs_rc, ts, 4237 bf->bf_state.bfs_pktlen, 1, 4238 (ts->ts_status == 0 ? 0 : 1)); 4239 } 4240 ath_tx_default_comp(sc, bf, 0); 4241 } else 4242 bf->bf_comp(sc, bf, 0); 4243} 4244 4245 4246 4247/* 4248 * Process completed xmit descriptors from the specified queue. 4249 * Kick the packet scheduler if needed. This can occur from this 4250 * particular task. 4251 */ 4252static int 4253ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4254{ 4255 struct ath_hal *ah = sc->sc_ah; 4256 struct ath_buf *bf; 4257 struct ath_desc *ds; 4258 struct ath_tx_status *ts; 4259 struct ieee80211_node *ni; 4260#ifdef IEEE80211_SUPPORT_SUPERG 4261 struct ieee80211com *ic = &sc->sc_ic; 4262#endif /* IEEE80211_SUPPORT_SUPERG */ 4263 int nacked; 4264 HAL_STATUS status; 4265 4266 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4267 __func__, txq->axq_qnum, 4268 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4269 txq->axq_link); 4270 4271 ATH_KTR(sc, ATH_KTR_TXCOMP, 4, 4272 "ath_tx_processq: txq=%u head %p link %p depth %p", 4273 txq->axq_qnum, 4274 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4275 txq->axq_link, 4276 txq->axq_depth); 4277 4278 nacked = 0; 4279 for (;;) { 4280 ATH_TXQ_LOCK(txq); 4281 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4282 bf = TAILQ_FIRST(&txq->axq_q); 4283 if (bf == NULL) { 4284 ATH_TXQ_UNLOCK(txq); 4285 break; 4286 } 4287 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4288 ts = &bf->bf_status.ds_txstat; 4289 4290 status = ath_hal_txprocdesc(ah, ds, ts); 4291#ifdef ATH_DEBUG 4292 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4293 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4294 status == HAL_OK); 4295 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) 4296 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4297 status == HAL_OK); 4298#endif 4299#ifdef ATH_DEBUG_ALQ 4300 if (if_ath_alq_checkdebug(&sc->sc_alq, 4301 ATH_ALQ_EDMA_TXSTATUS)) { 4302 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 4303 sc->sc_tx_statuslen, 4304 (char *) ds); 4305 } 4306#endif 4307 4308 if (status == HAL_EINPROGRESS) { 4309 ATH_KTR(sc, ATH_KTR_TXCOMP, 3, 4310 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", 4311 txq->axq_qnum, bf, ds); 4312 ATH_TXQ_UNLOCK(txq); 4313 break; 4314 } 4315 ATH_TXQ_REMOVE(txq, bf, bf_list); 4316 4317 /* 4318 * Sanity check. 4319 */ 4320 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { 4321 device_printf(sc->sc_dev, 4322 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", 4323 __func__, 4324 txq->axq_qnum, 4325 bf, 4326 bf->bf_state.bfs_tx_queue); 4327 } 4328 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { 4329 device_printf(sc->sc_dev, 4330 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", 4331 __func__, 4332 txq->axq_qnum, 4333 bf->bf_last, 4334 bf->bf_last->bf_state.bfs_tx_queue); 4335 } 4336 4337#if 0 4338 if (txq->axq_depth > 0) { 4339 /* 4340 * More frames follow. Mark the buffer busy 4341 * so it's not re-used while the hardware may 4342 * still re-read the link field in the descriptor. 4343 * 4344 * Use the last buffer in an aggregate as that 4345 * is where the hardware may be - intermediate 4346 * descriptors won't be "busy". 4347 */ 4348 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4349 } else 4350 txq->axq_link = NULL; 4351#else 4352 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4353#endif 4354 if (bf->bf_state.bfs_aggr) 4355 txq->axq_aggr_depth--; 4356 4357 ni = bf->bf_node; 4358 4359 ATH_KTR(sc, ATH_KTR_TXCOMP, 5, 4360 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", 4361 txq->axq_qnum, bf, ds, ni, ts->ts_status); 4362 /* 4363 * If unicast frame was ack'd update RSSI, 4364 * including the last rx time used to 4365 * workaround phantom bmiss interrupts. 4366 */ 4367 if (ni != NULL && ts->ts_status == 0 && 4368 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 4369 nacked++; 4370 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4371 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4372 ts->ts_rssi); 4373 } 4374 ATH_TXQ_UNLOCK(txq); 4375 4376 /* 4377 * Update statistics and call completion 4378 */ 4379 ath_tx_process_buf_completion(sc, txq, ts, bf); 4380 4381 /* XXX at this point, bf and ni may be totally invalid */ 4382 } 4383#ifdef IEEE80211_SUPPORT_SUPERG 4384 /* 4385 * Flush fast-frame staging queue when traffic slows. 4386 */ 4387 if (txq->axq_depth <= 1) 4388 ieee80211_ff_flush(ic, txq->axq_ac); 4389#endif 4390 4391 /* Kick the software TXQ scheduler */ 4392 if (dosched) { 4393 ATH_TX_LOCK(sc); 4394 ath_txq_sched(sc, txq); 4395 ATH_TX_UNLOCK(sc); 4396 } 4397 4398 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4399 "ath_tx_processq: txq=%u: done", 4400 txq->axq_qnum); 4401 4402 return nacked; 4403} 4404 4405#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4406 4407/* 4408 * Deferred processing of transmit interrupt; special-cased 4409 * for a single hardware transmit queue (e.g. 5210 and 5211). 4410 */ 4411static void 4412ath_tx_proc_q0(void *arg, int npending) 4413{ 4414 struct ath_softc *sc = arg; 4415 uint32_t txqs; 4416 4417 ATH_PCU_LOCK(sc); 4418 sc->sc_txproc_cnt++; 4419 txqs = sc->sc_txq_active; 4420 sc->sc_txq_active &= ~txqs; 4421 ATH_PCU_UNLOCK(sc); 4422 4423 ATH_LOCK(sc); 4424 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4425 ATH_UNLOCK(sc); 4426 4427 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4428 "ath_tx_proc_q0: txqs=0x%08x", txqs); 4429 4430 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 4431 /* XXX why is lastrx updated in tx code? */ 4432 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4433 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4434 ath_tx_processq(sc, sc->sc_cabq, 1); 4435 sc->sc_wd_timer = 0; 4436 4437 if (sc->sc_softled) 4438 ath_led_event(sc, sc->sc_txrix); 4439 4440 ATH_PCU_LOCK(sc); 4441 sc->sc_txproc_cnt--; 4442 ATH_PCU_UNLOCK(sc); 4443 4444 ATH_LOCK(sc); 4445 ath_power_restore_power_state(sc); 4446 ATH_UNLOCK(sc); 4447 4448 ath_tx_kick(sc); 4449} 4450 4451/* 4452 * Deferred processing of transmit interrupt; special-cased 4453 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4454 */ 4455static void 4456ath_tx_proc_q0123(void *arg, int npending) 4457{ 4458 struct ath_softc *sc = arg; 4459 int nacked; 4460 uint32_t txqs; 4461 4462 ATH_PCU_LOCK(sc); 4463 sc->sc_txproc_cnt++; 4464 txqs = sc->sc_txq_active; 4465 sc->sc_txq_active &= ~txqs; 4466 ATH_PCU_UNLOCK(sc); 4467 4468 ATH_LOCK(sc); 4469 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4470 ATH_UNLOCK(sc); 4471 4472 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4473 "ath_tx_proc_q0123: txqs=0x%08x", txqs); 4474 4475 /* 4476 * Process each active queue. 4477 */ 4478 nacked = 0; 4479 if (TXQACTIVE(txqs, 0)) 4480 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 4481 if (TXQACTIVE(txqs, 1)) 4482 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 4483 if (TXQACTIVE(txqs, 2)) 4484 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 4485 if (TXQACTIVE(txqs, 3)) 4486 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 4487 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4488 ath_tx_processq(sc, sc->sc_cabq, 1); 4489 if (nacked) 4490 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4491 4492 sc->sc_wd_timer = 0; 4493 4494 if (sc->sc_softled) 4495 ath_led_event(sc, sc->sc_txrix); 4496 4497 ATH_PCU_LOCK(sc); 4498 sc->sc_txproc_cnt--; 4499 ATH_PCU_UNLOCK(sc); 4500 4501 ATH_LOCK(sc); 4502 ath_power_restore_power_state(sc); 4503 ATH_UNLOCK(sc); 4504 4505 ath_tx_kick(sc); 4506} 4507 4508/* 4509 * Deferred processing of transmit interrupt. 4510 */ 4511static void 4512ath_tx_proc(void *arg, int npending) 4513{ 4514 struct ath_softc *sc = arg; 4515 int i, nacked; 4516 uint32_t txqs; 4517 4518 ATH_PCU_LOCK(sc); 4519 sc->sc_txproc_cnt++; 4520 txqs = sc->sc_txq_active; 4521 sc->sc_txq_active &= ~txqs; 4522 ATH_PCU_UNLOCK(sc); 4523 4524 ATH_LOCK(sc); 4525 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4526 ATH_UNLOCK(sc); 4527 4528 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); 4529 4530 /* 4531 * Process each active queue. 4532 */ 4533 nacked = 0; 4534 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4535 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 4536 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 4537 if (nacked) 4538 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4539 4540 sc->sc_wd_timer = 0; 4541 4542 if (sc->sc_softled) 4543 ath_led_event(sc, sc->sc_txrix); 4544 4545 ATH_PCU_LOCK(sc); 4546 sc->sc_txproc_cnt--; 4547 ATH_PCU_UNLOCK(sc); 4548 4549 ATH_LOCK(sc); 4550 ath_power_restore_power_state(sc); 4551 ATH_UNLOCK(sc); 4552 4553 ath_tx_kick(sc); 4554} 4555#undef TXQACTIVE 4556 4557/* 4558 * Deferred processing of TXQ rescheduling. 4559 */ 4560static void 4561ath_txq_sched_tasklet(void *arg, int npending) 4562{ 4563 struct ath_softc *sc = arg; 4564 int i; 4565 4566 /* XXX is skipping ok? */ 4567 ATH_PCU_LOCK(sc); 4568#if 0 4569 if (sc->sc_inreset_cnt > 0) { 4570 device_printf(sc->sc_dev, 4571 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4572 ATH_PCU_UNLOCK(sc); 4573 return; 4574 } 4575#endif 4576 sc->sc_txproc_cnt++; 4577 ATH_PCU_UNLOCK(sc); 4578 4579 ATH_LOCK(sc); 4580 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4581 ATH_UNLOCK(sc); 4582 4583 ATH_TX_LOCK(sc); 4584 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4585 if (ATH_TXQ_SETUP(sc, i)) { 4586 ath_txq_sched(sc, &sc->sc_txq[i]); 4587 } 4588 } 4589 ATH_TX_UNLOCK(sc); 4590 4591 ATH_LOCK(sc); 4592 ath_power_restore_power_state(sc); 4593 ATH_UNLOCK(sc); 4594 4595 ATH_PCU_LOCK(sc); 4596 sc->sc_txproc_cnt--; 4597 ATH_PCU_UNLOCK(sc); 4598} 4599 4600void 4601ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 4602{ 4603 4604 ATH_TXBUF_LOCK_ASSERT(sc); 4605 4606 if (bf->bf_flags & ATH_BUF_MGMT) 4607 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 4608 else { 4609 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4610 sc->sc_txbuf_cnt++; 4611 if (sc->sc_txbuf_cnt > ath_txbuf) { 4612 device_printf(sc->sc_dev, 4613 "%s: sc_txbuf_cnt > %d?\n", 4614 __func__, 4615 ath_txbuf); 4616 sc->sc_txbuf_cnt = ath_txbuf; 4617 } 4618 } 4619} 4620 4621void 4622ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 4623{ 4624 4625 ATH_TXBUF_LOCK_ASSERT(sc); 4626 4627 if (bf->bf_flags & ATH_BUF_MGMT) 4628 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 4629 else { 4630 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 4631 sc->sc_txbuf_cnt++; 4632 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 4633 device_printf(sc->sc_dev, 4634 "%s: sc_txbuf_cnt > %d?\n", 4635 __func__, 4636 ATH_TXBUF); 4637 sc->sc_txbuf_cnt = ATH_TXBUF; 4638 } 4639 } 4640} 4641 4642/* 4643 * Free the holding buffer if it exists 4644 */ 4645void 4646ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) 4647{ 4648 ATH_TXBUF_UNLOCK_ASSERT(sc); 4649 ATH_TXQ_LOCK_ASSERT(txq); 4650 4651 if (txq->axq_holdingbf == NULL) 4652 return; 4653 4654 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; 4655 4656 ATH_TXBUF_LOCK(sc); 4657 ath_returnbuf_tail(sc, txq->axq_holdingbf); 4658 ATH_TXBUF_UNLOCK(sc); 4659 4660 txq->axq_holdingbf = NULL; 4661} 4662 4663/* 4664 * Add this buffer to the holding queue, freeing the previous 4665 * one if it exists. 4666 */ 4667static void 4668ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) 4669{ 4670 struct ath_txq *txq; 4671 4672 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4673 4674 ATH_TXBUF_UNLOCK_ASSERT(sc); 4675 ATH_TXQ_LOCK_ASSERT(txq); 4676 4677 /* XXX assert ATH_BUF_BUSY is set */ 4678 4679 /* XXX assert the tx queue is under the max number */ 4680 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { 4681 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", 4682 __func__, 4683 bf, 4684 bf->bf_state.bfs_tx_queue); 4685 bf->bf_flags &= ~ATH_BUF_BUSY; 4686 ath_returnbuf_tail(sc, bf); 4687 return; 4688 } 4689 ath_txq_freeholdingbuf(sc, txq); 4690 txq->axq_holdingbf = bf; 4691} 4692 4693/* 4694 * Return a buffer to the pool and update the 'busy' flag on the 4695 * previous 'tail' entry. 4696 * 4697 * This _must_ only be called when the buffer is involved in a completed 4698 * TX. The logic is that if it was part of an active TX, the previous 4699 * buffer on the list is now not involved in a halted TX DMA queue, waiting 4700 * for restart (eg for TDMA.) 4701 * 4702 * The caller must free the mbuf and recycle the node reference. 4703 * 4704 * XXX This method of handling busy / holding buffers is insanely stupid. 4705 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would 4706 * be much nicer if buffers in the processq() methods would instead be 4707 * always completed there (pushed onto a txq or ath_bufhead) so we knew 4708 * exactly what hardware queue they came from in the first place. 4709 */ 4710void 4711ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 4712{ 4713 struct ath_txq *txq; 4714 4715 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4716 4717 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 4718 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 4719 4720 /* 4721 * If this buffer is busy, push it onto the holding queue. 4722 */ 4723 if (bf->bf_flags & ATH_BUF_BUSY) { 4724 ATH_TXQ_LOCK(txq); 4725 ath_txq_addholdingbuf(sc, bf); 4726 ATH_TXQ_UNLOCK(txq); 4727 return; 4728 } 4729 4730 /* 4731 * Not a busy buffer, so free normally 4732 */ 4733 ATH_TXBUF_LOCK(sc); 4734 ath_returnbuf_tail(sc, bf); 4735 ATH_TXBUF_UNLOCK(sc); 4736} 4737 4738/* 4739 * This is currently used by ath_tx_draintxq() and 4740 * ath_tx_tid_free_pkts(). 4741 * 4742 * It recycles a single ath_buf. 4743 */ 4744void 4745ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 4746{ 4747 struct ieee80211_node *ni = bf->bf_node; 4748 struct mbuf *m0 = bf->bf_m; 4749 4750 /* 4751 * Make sure that we only sync/unload if there's an mbuf. 4752 * If not (eg we cloned a buffer), the unload will have already 4753 * occurred. 4754 */ 4755 if (bf->bf_m != NULL) { 4756 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4757 BUS_DMASYNC_POSTWRITE); 4758 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4759 } 4760 4761 bf->bf_node = NULL; 4762 bf->bf_m = NULL; 4763 4764 /* Free the buffer, it's not needed any longer */ 4765 ath_freebuf(sc, bf); 4766 4767 /* Pass the buffer back to net80211 - completing it */ 4768 ieee80211_tx_complete(ni, m0, status); 4769} 4770 4771static struct ath_buf * 4772ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) 4773{ 4774 struct ath_buf *bf; 4775 4776 ATH_TXQ_LOCK_ASSERT(txq); 4777 4778 /* 4779 * Drain the FIFO queue first, then if it's 4780 * empty, move to the normal frame queue. 4781 */ 4782 bf = TAILQ_FIRST(&txq->fifo.axq_q); 4783 if (bf != NULL) { 4784 /* 4785 * Is it the last buffer in this set? 4786 * Decrement the FIFO counter. 4787 */ 4788 if (bf->bf_flags & ATH_BUF_FIFOEND) { 4789 if (txq->axq_fifo_depth == 0) { 4790 device_printf(sc->sc_dev, 4791 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", 4792 __func__, 4793 txq->axq_qnum, 4794 txq->fifo.axq_depth); 4795 } else 4796 txq->axq_fifo_depth--; 4797 } 4798 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 4799 return (bf); 4800 } 4801 4802 /* 4803 * Debugging! 4804 */ 4805 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { 4806 device_printf(sc->sc_dev, 4807 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", 4808 __func__, 4809 txq->axq_qnum, 4810 txq->axq_fifo_depth, 4811 txq->fifo.axq_depth); 4812 } 4813 4814 /* 4815 * Now drain the pending queue. 4816 */ 4817 bf = TAILQ_FIRST(&txq->axq_q); 4818 if (bf == NULL) { 4819 txq->axq_link = NULL; 4820 return (NULL); 4821 } 4822 ATH_TXQ_REMOVE(txq, bf, bf_list); 4823 return (bf); 4824} 4825 4826void 4827ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 4828{ 4829#ifdef ATH_DEBUG 4830 struct ath_hal *ah = sc->sc_ah; 4831#endif 4832 struct ath_buf *bf; 4833 u_int ix; 4834 4835 /* 4836 * NB: this assumes output has been stopped and 4837 * we do not need to block ath_tx_proc 4838 */ 4839 for (ix = 0;; ix++) { 4840 ATH_TXQ_LOCK(txq); 4841 bf = ath_tx_draintxq_get_one(sc, txq); 4842 if (bf == NULL) { 4843 ATH_TXQ_UNLOCK(txq); 4844 break; 4845 } 4846 if (bf->bf_state.bfs_aggr) 4847 txq->axq_aggr_depth--; 4848#ifdef ATH_DEBUG 4849 if (sc->sc_debug & ATH_DEBUG_RESET) { 4850 struct ieee80211com *ic = &sc->sc_ic; 4851 int status = 0; 4852 4853 /* 4854 * EDMA operation has a TX completion FIFO 4855 * separate from the TX descriptor, so this 4856 * method of checking the "completion" status 4857 * is wrong. 4858 */ 4859 if (! sc->sc_isedma) { 4860 status = (ath_hal_txprocdesc(ah, 4861 bf->bf_lastds, 4862 &bf->bf_status.ds_txstat) == HAL_OK); 4863 } 4864 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); 4865 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 4866 bf->bf_m->m_len, 0, -1); 4867 } 4868#endif /* ATH_DEBUG */ 4869 /* 4870 * Since we're now doing magic in the completion 4871 * functions, we -must- call it for aggregation 4872 * destinations or BAW tracking will get upset. 4873 */ 4874 /* 4875 * Clear ATH_BUF_BUSY; the completion handler 4876 * will free the buffer. 4877 */ 4878 ATH_TXQ_UNLOCK(txq); 4879 bf->bf_flags &= ~ATH_BUF_BUSY; 4880 if (bf->bf_comp) 4881 bf->bf_comp(sc, bf, 1); 4882 else 4883 ath_tx_default_comp(sc, bf, 1); 4884 } 4885 4886 /* 4887 * Free the holding buffer if it exists 4888 */ 4889 ATH_TXQ_LOCK(txq); 4890 ath_txq_freeholdingbuf(sc, txq); 4891 ATH_TXQ_UNLOCK(txq); 4892 4893 /* 4894 * Drain software queued frames which are on 4895 * active TIDs. 4896 */ 4897 ath_tx_txq_drain(sc, txq); 4898} 4899 4900static void 4901ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 4902{ 4903 struct ath_hal *ah = sc->sc_ah; 4904 4905 ATH_TXQ_LOCK_ASSERT(txq); 4906 4907 DPRINTF(sc, ATH_DEBUG_RESET, 4908 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " 4909 "link %p, holdingbf=%p\n", 4910 __func__, 4911 txq->axq_qnum, 4912 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 4913 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), 4914 (int) ath_hal_numtxpending(ah, txq->axq_qnum), 4915 txq->axq_flags, 4916 txq->axq_link, 4917 txq->axq_holdingbf); 4918 4919 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 4920 /* We've stopped TX DMA, so mark this as stopped. */ 4921 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; 4922 4923#ifdef ATH_DEBUG 4924 if ((sc->sc_debug & ATH_DEBUG_RESET) 4925 && (txq->axq_holdingbf != NULL)) { 4926 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); 4927 } 4928#endif 4929} 4930 4931int 4932ath_stoptxdma(struct ath_softc *sc) 4933{ 4934 struct ath_hal *ah = sc->sc_ah; 4935 int i; 4936 4937 /* XXX return value */ 4938 if (sc->sc_invalid) 4939 return 0; 4940 4941 if (!sc->sc_invalid) { 4942 /* don't touch the hardware if marked invalid */ 4943 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4944 __func__, sc->sc_bhalq, 4945 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 4946 NULL); 4947 4948 /* stop the beacon queue */ 4949 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 4950 4951 /* Stop the data queues */ 4952 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4953 if (ATH_TXQ_SETUP(sc, i)) { 4954 ATH_TXQ_LOCK(&sc->sc_txq[i]); 4955 ath_tx_stopdma(sc, &sc->sc_txq[i]); 4956 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 4957 } 4958 } 4959 } 4960 4961 return 1; 4962} 4963 4964#ifdef ATH_DEBUG 4965void 4966ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) 4967{ 4968 struct ath_hal *ah = sc->sc_ah; 4969 struct ath_buf *bf; 4970 int i = 0; 4971 4972 if (! (sc->sc_debug & ATH_DEBUG_RESET)) 4973 return; 4974 4975 device_printf(sc->sc_dev, "%s: Q%d: begin\n", 4976 __func__, txq->axq_qnum); 4977 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 4978 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 4979 ath_hal_txprocdesc(ah, bf->bf_lastds, 4980 &bf->bf_status.ds_txstat) == HAL_OK); 4981 i++; 4982 } 4983 device_printf(sc->sc_dev, "%s: Q%d: end\n", 4984 __func__, txq->axq_qnum); 4985} 4986#endif /* ATH_DEBUG */ 4987 4988/* 4989 * Drain the transmit queues and reclaim resources. 4990 */ 4991void 4992ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 4993{ 4994 struct ath_hal *ah = sc->sc_ah; 4995 struct ath_buf *bf_last; 4996 int i; 4997 4998 (void) ath_stoptxdma(sc); 4999 5000 /* 5001 * Dump the queue contents 5002 */ 5003 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5004 /* 5005 * XXX TODO: should we just handle the completed TX frames 5006 * here, whether or not the reset is a full one or not? 5007 */ 5008 if (ATH_TXQ_SETUP(sc, i)) { 5009#ifdef ATH_DEBUG 5010 if (sc->sc_debug & ATH_DEBUG_RESET) 5011 ath_tx_dump(sc, &sc->sc_txq[i]); 5012#endif /* ATH_DEBUG */ 5013 if (reset_type == ATH_RESET_NOLOSS) { 5014 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5015 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5016 /* 5017 * Free the holding buffer; DMA is now 5018 * stopped. 5019 */ 5020 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 5021 /* 5022 * Setup the link pointer to be the 5023 * _last_ buffer/descriptor in the list. 5024 * If there's nothing in the list, set it 5025 * to NULL. 5026 */ 5027 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], 5028 axq_q_s); 5029 if (bf_last != NULL) { 5030 ath_hal_gettxdesclinkptr(ah, 5031 bf_last->bf_lastds, 5032 &sc->sc_txq[i].axq_link); 5033 } else { 5034 sc->sc_txq[i].axq_link = NULL; 5035 } 5036 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5037 } else 5038 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5039 } 5040 } 5041#ifdef ATH_DEBUG 5042 if (sc->sc_debug & ATH_DEBUG_RESET) { 5043 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5044 if (bf != NULL && bf->bf_m != NULL) { 5045 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5046 ath_hal_txprocdesc(ah, bf->bf_lastds, 5047 &bf->bf_status.ds_txstat) == HAL_OK); 5048 ieee80211_dump_pkt(&sc->sc_ic, 5049 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5050 0, -1); 5051 } 5052 } 5053#endif /* ATH_DEBUG */ 5054 sc->sc_wd_timer = 0; 5055} 5056 5057/* 5058 * Update internal state after a channel change. 5059 */ 5060static void 5061ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5062{ 5063 enum ieee80211_phymode mode; 5064 5065 /* 5066 * Change channels and update the h/w rate map 5067 * if we're switching; e.g. 11a to 11b/g. 5068 */ 5069 mode = ieee80211_chan2mode(chan); 5070 if (mode != sc->sc_curmode) 5071 ath_setcurmode(sc, mode); 5072 sc->sc_curchan = chan; 5073} 5074 5075/* 5076 * Set/change channels. If the channel is really being changed, 5077 * it's done by resetting the chip. To accomplish this we must 5078 * first cleanup any pending DMA, then restart stuff after a la 5079 * ath_init. 5080 */ 5081static int 5082ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5083{ 5084 struct ieee80211com *ic = &sc->sc_ic; 5085 struct ath_hal *ah = sc->sc_ah; 5086 int ret = 0; 5087 5088 /* Treat this as an interface reset */ 5089 ATH_PCU_UNLOCK_ASSERT(sc); 5090 ATH_UNLOCK_ASSERT(sc); 5091 5092 /* (Try to) stop TX/RX from occurring */ 5093 taskqueue_block(sc->sc_tq); 5094 5095 ATH_PCU_LOCK(sc); 5096 5097 /* Disable interrupts */ 5098 ath_hal_intrset(ah, 0); 5099 5100 /* Stop new RX/TX/interrupt completion */ 5101 if (ath_reset_grablock(sc, 1) == 0) { 5102 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5103 __func__); 5104 } 5105 5106 /* Stop pending RX/TX completion */ 5107 ath_txrx_stop_locked(sc); 5108 5109 ATH_PCU_UNLOCK(sc); 5110 5111 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5112 __func__, ieee80211_chan2ieee(ic, chan), 5113 chan->ic_freq, chan->ic_flags); 5114 if (chan != sc->sc_curchan) { 5115 HAL_STATUS status; 5116 /* 5117 * To switch channels clear any pending DMA operations; 5118 * wait long enough for the RX fifo to drain, reset the 5119 * hardware at the new frequency, and then re-enable 5120 * the relevant bits of the h/w. 5121 */ 5122#if 0 5123 ath_hal_intrset(ah, 0); /* disable interrupts */ 5124#endif 5125 ath_stoprecv(sc, 1); /* turn off frame recv */ 5126 /* 5127 * First, handle completed TX/RX frames. 5128 */ 5129 ath_rx_flush(sc); 5130 ath_draintxq(sc, ATH_RESET_NOLOSS); 5131 /* 5132 * Next, flush the non-scheduled frames. 5133 */ 5134 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5135 5136 ath_update_chainmasks(sc, chan); 5137 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 5138 sc->sc_cur_rxchainmask); 5139 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, 5140 HAL_RESET_NORMAL, &status)) { 5141 device_printf(sc->sc_dev, "%s: unable to reset " 5142 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5143 __func__, ieee80211_chan2ieee(ic, chan), 5144 chan->ic_freq, chan->ic_flags, status); 5145 ret = EIO; 5146 goto finish; 5147 } 5148 sc->sc_diversity = ath_hal_getdiversity(ah); 5149 5150 ATH_RX_LOCK(sc); 5151 sc->sc_rx_stopped = 1; 5152 sc->sc_rx_resetted = 1; 5153 ATH_RX_UNLOCK(sc); 5154 5155 /* Let DFS at it in case it's a DFS channel */ 5156 ath_dfs_radar_enable(sc, chan); 5157 5158 /* Let spectral at in case spectral is enabled */ 5159 ath_spectral_enable(sc, chan); 5160 5161 /* 5162 * Let bluetooth coexistence at in case it's needed for this 5163 * channel 5164 */ 5165 ath_btcoex_enable(sc, ic->ic_curchan); 5166 5167 /* 5168 * If we're doing TDMA, enforce the TXOP limitation for chips 5169 * that support it. 5170 */ 5171 if (sc->sc_hasenforcetxop && sc->sc_tdma) 5172 ath_hal_setenforcetxop(sc->sc_ah, 1); 5173 else 5174 ath_hal_setenforcetxop(sc->sc_ah, 0); 5175 5176 /* 5177 * Re-enable rx framework. 5178 */ 5179 if (ath_startrecv(sc) != 0) { 5180 device_printf(sc->sc_dev, 5181 "%s: unable to restart recv logic\n", __func__); 5182 ret = EIO; 5183 goto finish; 5184 } 5185 5186 /* 5187 * Change channels and update the h/w rate map 5188 * if we're switching; e.g. 11a to 11b/g. 5189 */ 5190 ath_chan_change(sc, chan); 5191 5192 /* 5193 * Reset clears the beacon timers; reset them 5194 * here if needed. 5195 */ 5196 if (sc->sc_beacons) { /* restart beacons */ 5197#ifdef IEEE80211_SUPPORT_TDMA 5198 if (sc->sc_tdma) 5199 ath_tdma_config(sc, NULL); 5200 else 5201#endif 5202 ath_beacon_config(sc, NULL); 5203 } 5204 5205 /* 5206 * Re-enable interrupts. 5207 */ 5208#if 0 5209 ath_hal_intrset(ah, sc->sc_imask); 5210#endif 5211 } 5212 5213finish: 5214 ATH_PCU_LOCK(sc); 5215 sc->sc_inreset_cnt--; 5216 /* XXX only do this if sc_inreset_cnt == 0? */ 5217 ath_hal_intrset(ah, sc->sc_imask); 5218 ATH_PCU_UNLOCK(sc); 5219 5220 ath_txrx_start(sc); 5221 /* XXX ath_start? */ 5222 5223 return ret; 5224} 5225 5226/* 5227 * Periodically recalibrate the PHY to account 5228 * for temperature/environment changes. 5229 */ 5230static void 5231ath_calibrate(void *arg) 5232{ 5233 struct ath_softc *sc = arg; 5234 struct ath_hal *ah = sc->sc_ah; 5235 struct ieee80211com *ic = &sc->sc_ic; 5236 HAL_BOOL longCal, isCalDone = AH_TRUE; 5237 HAL_BOOL aniCal, shortCal = AH_FALSE; 5238 int nextcal; 5239 5240 ATH_LOCK_ASSERT(sc); 5241 5242 /* 5243 * Force the hardware awake for ANI work. 5244 */ 5245 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5246 5247 /* Skip trying to do this if we're in reset */ 5248 if (sc->sc_inreset_cnt) 5249 goto restart; 5250 5251 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5252 goto restart; 5253 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5254 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5255 if (sc->sc_doresetcal) 5256 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5257 5258 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5259 if (aniCal) { 5260 sc->sc_stats.ast_ani_cal++; 5261 sc->sc_lastani = ticks; 5262 ath_hal_ani_poll(ah, sc->sc_curchan); 5263 } 5264 5265 if (longCal) { 5266 sc->sc_stats.ast_per_cal++; 5267 sc->sc_lastlongcal = ticks; 5268 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5269 /* 5270 * Rfgain is out of bounds, reset the chip 5271 * to load new gain values. 5272 */ 5273 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5274 "%s: rfgain change\n", __func__); 5275 sc->sc_stats.ast_per_rfgain++; 5276 sc->sc_resetcal = 0; 5277 sc->sc_doresetcal = AH_TRUE; 5278 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5279 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5280 ath_power_restore_power_state(sc); 5281 return; 5282 } 5283 /* 5284 * If this long cal is after an idle period, then 5285 * reset the data collection state so we start fresh. 5286 */ 5287 if (sc->sc_resetcal) { 5288 (void) ath_hal_calreset(ah, sc->sc_curchan); 5289 sc->sc_lastcalreset = ticks; 5290 sc->sc_lastshortcal = ticks; 5291 sc->sc_resetcal = 0; 5292 sc->sc_doresetcal = AH_TRUE; 5293 } 5294 } 5295 5296 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5297 if (shortCal || longCal) { 5298 isCalDone = AH_FALSE; 5299 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5300 if (longCal) { 5301 /* 5302 * Calibrate noise floor data again in case of change. 5303 */ 5304 ath_hal_process_noisefloor(ah); 5305 } 5306 } else { 5307 DPRINTF(sc, ATH_DEBUG_ANY, 5308 "%s: calibration of channel %u failed\n", 5309 __func__, sc->sc_curchan->ic_freq); 5310 sc->sc_stats.ast_per_calfail++; 5311 } 5312 if (shortCal) 5313 sc->sc_lastshortcal = ticks; 5314 } 5315 if (!isCalDone) { 5316restart: 5317 /* 5318 * Use a shorter interval to potentially collect multiple 5319 * data samples required to complete calibration. Once 5320 * we're told the work is done we drop back to a longer 5321 * interval between requests. We're more aggressive doing 5322 * work when operating as an AP to improve operation right 5323 * after startup. 5324 */ 5325 sc->sc_lastshortcal = ticks; 5326 nextcal = ath_shortcalinterval*hz/1000; 5327 if (sc->sc_opmode != HAL_M_HOSTAP) 5328 nextcal *= 10; 5329 sc->sc_doresetcal = AH_TRUE; 5330 } else { 5331 /* nextcal should be the shortest time for next event */ 5332 nextcal = ath_longcalinterval*hz; 5333 if (sc->sc_lastcalreset == 0) 5334 sc->sc_lastcalreset = sc->sc_lastlongcal; 5335 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5336 sc->sc_resetcal = 1; /* setup reset next trip */ 5337 sc->sc_doresetcal = AH_FALSE; 5338 } 5339 /* ANI calibration may occur more often than short/long/resetcal */ 5340 if (ath_anicalinterval > 0) 5341 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5342 5343 if (nextcal != 0) { 5344 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5345 __func__, nextcal, isCalDone ? "" : "!"); 5346 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5347 } else { 5348 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5349 __func__); 5350 /* NB: don't rearm timer */ 5351 } 5352 /* 5353 * Restore power state now that we're done. 5354 */ 5355 ath_power_restore_power_state(sc); 5356} 5357 5358static void 5359ath_scan_start(struct ieee80211com *ic) 5360{ 5361 struct ath_softc *sc = ic->ic_softc; 5362 struct ath_hal *ah = sc->sc_ah; 5363 u_int32_t rfilt; 5364 5365 /* XXX calibration timer? */ 5366 /* XXXGL: is constant ieee80211broadcastaddr a correct choice? */ 5367 5368 ATH_LOCK(sc); 5369 sc->sc_scanning = 1; 5370 sc->sc_syncbeacon = 0; 5371 rfilt = ath_calcrxfilter(sc); 5372 ATH_UNLOCK(sc); 5373 5374 ATH_PCU_LOCK(sc); 5375 ath_hal_setrxfilter(ah, rfilt); 5376 ath_hal_setassocid(ah, ieee80211broadcastaddr, 0); 5377 ATH_PCU_UNLOCK(sc); 5378 5379 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5380 __func__, rfilt, ether_sprintf(ieee80211broadcastaddr)); 5381} 5382 5383static void 5384ath_scan_end(struct ieee80211com *ic) 5385{ 5386 struct ath_softc *sc = ic->ic_softc; 5387 struct ath_hal *ah = sc->sc_ah; 5388 u_int32_t rfilt; 5389 5390 ATH_LOCK(sc); 5391 sc->sc_scanning = 0; 5392 rfilt = ath_calcrxfilter(sc); 5393 ATH_UNLOCK(sc); 5394 5395 ATH_PCU_LOCK(sc); 5396 ath_hal_setrxfilter(ah, rfilt); 5397 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5398 5399 ath_hal_process_noisefloor(ah); 5400 ATH_PCU_UNLOCK(sc); 5401 5402 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5403 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 5404 sc->sc_curaid); 5405} 5406 5407#ifdef ATH_ENABLE_11N 5408/* 5409 * For now, just do a channel change. 5410 * 5411 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 5412 * control state and resetting the hardware without dropping frames out 5413 * of the queue. 5414 * 5415 * The unfortunate trouble here is making absolutely sure that the 5416 * channel width change has propagated enough so the hardware 5417 * absolutely isn't handed bogus frames for it's current operating 5418 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 5419 * does occur in parallel, we need to make certain we've blocked 5420 * any further ongoing TX (and RX, that can cause raw TX) 5421 * before we do this. 5422 */ 5423static void 5424ath_update_chw(struct ieee80211com *ic) 5425{ 5426 struct ath_softc *sc = ic->ic_softc; 5427 5428 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 5429 ath_set_channel(ic); 5430} 5431#endif /* ATH_ENABLE_11N */ 5432 5433static void 5434ath_set_channel(struct ieee80211com *ic) 5435{ 5436 struct ath_softc *sc = ic->ic_softc; 5437 5438 ATH_LOCK(sc); 5439 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5440 ATH_UNLOCK(sc); 5441 5442 (void) ath_chan_set(sc, ic->ic_curchan); 5443 /* 5444 * If we are returning to our bss channel then mark state 5445 * so the next recv'd beacon's tsf will be used to sync the 5446 * beacon timers. Note that since we only hear beacons in 5447 * sta/ibss mode this has no effect in other operating modes. 5448 */ 5449 ATH_LOCK(sc); 5450 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5451 sc->sc_syncbeacon = 1; 5452 ath_power_restore_power_state(sc); 5453 ATH_UNLOCK(sc); 5454} 5455 5456/* 5457 * Walk the vap list and check if there any vap's in RUN state. 5458 */ 5459static int 5460ath_isanyrunningvaps(struct ieee80211vap *this) 5461{ 5462 struct ieee80211com *ic = this->iv_ic; 5463 struct ieee80211vap *vap; 5464 5465 IEEE80211_LOCK_ASSERT(ic); 5466 5467 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5468 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5469 return 1; 5470 } 5471 return 0; 5472} 5473 5474static int 5475ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5476{ 5477 struct ieee80211com *ic = vap->iv_ic; 5478 struct ath_softc *sc = ic->ic_softc; 5479 struct ath_vap *avp = ATH_VAP(vap); 5480 struct ath_hal *ah = sc->sc_ah; 5481 struct ieee80211_node *ni = NULL; 5482 int i, error, stamode; 5483 u_int32_t rfilt; 5484 int csa_run_transition = 0; 5485 enum ieee80211_state ostate = vap->iv_state; 5486 5487 static const HAL_LED_STATE leds[] = { 5488 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5489 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5490 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5491 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5492 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5493 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5494 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5495 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5496 }; 5497 5498 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5499 ieee80211_state_name[ostate], 5500 ieee80211_state_name[nstate]); 5501 5502 /* 5503 * net80211 _should_ have the comlock asserted at this point. 5504 * There are some comments around the calls to vap->iv_newstate 5505 * which indicate that it (newstate) may end up dropping the 5506 * lock. This and the subsequent lock assert check after newstate 5507 * are an attempt to catch these and figure out how/why. 5508 */ 5509 IEEE80211_LOCK_ASSERT(ic); 5510 5511 /* Before we touch the hardware - wake it up */ 5512 ATH_LOCK(sc); 5513 /* 5514 * If the NIC is in anything other than SLEEP state, 5515 * we need to ensure that self-generated frames are 5516 * set for PWRMGT=0. Otherwise we may end up with 5517 * strange situations. 5518 * 5519 * XXX TODO: is this actually the case? :-) 5520 */ 5521 if (nstate != IEEE80211_S_SLEEP) 5522 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5523 5524 /* 5525 * Now, wake the thing up. 5526 */ 5527 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5528 5529 /* 5530 * And stop the calibration callout whilst we have 5531 * ATH_LOCK held. 5532 */ 5533 callout_stop(&sc->sc_cal_ch); 5534 ATH_UNLOCK(sc); 5535 5536 if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5537 csa_run_transition = 1; 5538 5539 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5540 5541 if (nstate == IEEE80211_S_SCAN) { 5542 /* 5543 * Scanning: turn off beacon miss and don't beacon. 5544 * Mark beacon state so when we reach RUN state we'll 5545 * [re]setup beacons. Unblock the task q thread so 5546 * deferred interrupt processing is done. 5547 */ 5548 5549 /* Ensure we stay awake during scan */ 5550 ATH_LOCK(sc); 5551 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5552 ath_power_setpower(sc, HAL_PM_AWAKE); 5553 ATH_UNLOCK(sc); 5554 5555 ath_hal_intrset(ah, 5556 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5557 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5558 sc->sc_beacons = 0; 5559 taskqueue_unblock(sc->sc_tq); 5560 } 5561 5562 ni = ieee80211_ref_node(vap->iv_bss); 5563 rfilt = ath_calcrxfilter(sc); 5564 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5565 vap->iv_opmode == IEEE80211_M_AHDEMO || 5566 vap->iv_opmode == IEEE80211_M_IBSS); 5567 5568 /* 5569 * XXX Dont need to do this (and others) if we've transitioned 5570 * from SLEEP->RUN. 5571 */ 5572 if (stamode && nstate == IEEE80211_S_RUN) { 5573 sc->sc_curaid = ni->ni_associd; 5574 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5575 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5576 } 5577 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5578 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5579 ath_hal_setrxfilter(ah, rfilt); 5580 5581 /* XXX is this to restore keycache on resume? */ 5582 if (vap->iv_opmode != IEEE80211_M_STA && 5583 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5584 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5585 if (ath_hal_keyisvalid(ah, i)) 5586 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5587 } 5588 5589 /* 5590 * Invoke the parent method to do net80211 work. 5591 */ 5592 error = avp->av_newstate(vap, nstate, arg); 5593 if (error != 0) 5594 goto bad; 5595 5596 /* 5597 * See above: ensure av_newstate() doesn't drop the lock 5598 * on us. 5599 */ 5600 IEEE80211_LOCK_ASSERT(ic); 5601 5602 if (nstate == IEEE80211_S_RUN) { 5603 /* NB: collect bss node again, it may have changed */ 5604 ieee80211_free_node(ni); 5605 ni = ieee80211_ref_node(vap->iv_bss); 5606 5607 DPRINTF(sc, ATH_DEBUG_STATE, 5608 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5609 "capinfo 0x%04x chan %d\n", __func__, 5610 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5611 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5612 5613 switch (vap->iv_opmode) { 5614#ifdef IEEE80211_SUPPORT_TDMA 5615 case IEEE80211_M_AHDEMO: 5616 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 5617 break; 5618 /* fall thru... */ 5619#endif 5620 case IEEE80211_M_HOSTAP: 5621 case IEEE80211_M_IBSS: 5622 case IEEE80211_M_MBSS: 5623 /* 5624 * Allocate and setup the beacon frame. 5625 * 5626 * Stop any previous beacon DMA. This may be 5627 * necessary, for example, when an ibss merge 5628 * causes reconfiguration; there will be a state 5629 * transition from RUN->RUN that means we may 5630 * be called with beacon transmission active. 5631 */ 5632 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5633 5634 error = ath_beacon_alloc(sc, ni); 5635 if (error != 0) 5636 goto bad; 5637 /* 5638 * If joining an adhoc network defer beacon timer 5639 * configuration to the next beacon frame so we 5640 * have a current TSF to use. Otherwise we're 5641 * starting an ibss/bss so there's no need to delay; 5642 * if this is the first vap moving to RUN state, then 5643 * beacon state needs to be [re]configured. 5644 */ 5645 if (vap->iv_opmode == IEEE80211_M_IBSS && 5646 ni->ni_tstamp.tsf != 0) { 5647 sc->sc_syncbeacon = 1; 5648 } else if (!sc->sc_beacons) { 5649#ifdef IEEE80211_SUPPORT_TDMA 5650 if (vap->iv_caps & IEEE80211_C_TDMA) 5651 ath_tdma_config(sc, vap); 5652 else 5653#endif 5654 ath_beacon_config(sc, vap); 5655 sc->sc_beacons = 1; 5656 } 5657 break; 5658 case IEEE80211_M_STA: 5659 /* 5660 * Defer beacon timer configuration to the next 5661 * beacon frame so we have a current TSF to use 5662 * (any TSF collected when scanning is likely old). 5663 * However if it's due to a CSA -> RUN transition, 5664 * force a beacon update so we pick up a lack of 5665 * beacons from an AP in CAC and thus force a 5666 * scan. 5667 * 5668 * And, there's also corner cases here where 5669 * after a scan, the AP may have disappeared. 5670 * In that case, we may not receive an actual 5671 * beacon to update the beacon timer and thus we 5672 * won't get notified of the missing beacons. 5673 */ 5674 if (ostate != IEEE80211_S_RUN && 5675 ostate != IEEE80211_S_SLEEP) { 5676 DPRINTF(sc, ATH_DEBUG_BEACON, 5677 "%s: STA; syncbeacon=1\n", __func__); 5678 sc->sc_syncbeacon = 1; 5679 5680 if (csa_run_transition) 5681 ath_beacon_config(sc, vap); 5682 5683 /* 5684 * PR: kern/175227 5685 * 5686 * Reconfigure beacons during reset; as otherwise 5687 * we won't get the beacon timers reprogrammed 5688 * after a reset and thus we won't pick up a 5689 * beacon miss interrupt. 5690 * 5691 * Hopefully we'll see a beacon before the BMISS 5692 * timer fires (too often), leading to a STA 5693 * disassociation. 5694 */ 5695 sc->sc_beacons = 1; 5696 } 5697 break; 5698 case IEEE80211_M_MONITOR: 5699 /* 5700 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5701 * transitions so we must re-enable interrupts here to 5702 * handle the case of a single monitor mode vap. 5703 */ 5704 ath_hal_intrset(ah, sc->sc_imask); 5705 break; 5706 case IEEE80211_M_WDS: 5707 break; 5708 default: 5709 break; 5710 } 5711 /* 5712 * Let the hal process statistics collected during a 5713 * scan so it can provide calibrated noise floor data. 5714 */ 5715 ath_hal_process_noisefloor(ah); 5716 /* 5717 * Reset rssi stats; maybe not the best place... 5718 */ 5719 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5720 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5721 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 5722 5723 /* 5724 * Force awake for RUN mode. 5725 */ 5726 ATH_LOCK(sc); 5727 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5728 ath_power_setpower(sc, HAL_PM_AWAKE); 5729 5730 /* 5731 * Finally, start any timers and the task q thread 5732 * (in case we didn't go through SCAN state). 5733 */ 5734 if (ath_longcalinterval != 0) { 5735 /* start periodic recalibration timer */ 5736 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5737 } else { 5738 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5739 "%s: calibration disabled\n", __func__); 5740 } 5741 ATH_UNLOCK(sc); 5742 5743 taskqueue_unblock(sc->sc_tq); 5744 } else if (nstate == IEEE80211_S_INIT) { 5745 /* 5746 * If there are no vaps left in RUN state then 5747 * shutdown host/driver operation: 5748 * o disable interrupts 5749 * o disable the task queue thread 5750 * o mark beacon processing as stopped 5751 */ 5752 if (!ath_isanyrunningvaps(vap)) { 5753 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5754 /* disable interrupts */ 5755 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5756 taskqueue_block(sc->sc_tq); 5757 sc->sc_beacons = 0; 5758 } 5759#ifdef IEEE80211_SUPPORT_TDMA 5760 ath_hal_setcca(ah, AH_TRUE); 5761#endif 5762 } else if (nstate == IEEE80211_S_SLEEP) { 5763 /* We're going to sleep, so transition appropriately */ 5764 /* For now, only do this if we're a single STA vap */ 5765 if (sc->sc_nvaps == 1 && 5766 vap->iv_opmode == IEEE80211_M_STA) { 5767 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon); 5768 ATH_LOCK(sc); 5769 /* 5770 * Always at least set the self-generated 5771 * frame config to set PWRMGT=1. 5772 */ 5773 ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP); 5774 5775 /* 5776 * If we're not syncing beacons, transition 5777 * to NETWORK_SLEEP. 5778 * 5779 * We stay awake if syncbeacon > 0 in case 5780 * we need to listen for some beacons otherwise 5781 * our beacon timer config may be wrong. 5782 */ 5783 if (sc->sc_syncbeacon == 0) { 5784 ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP); 5785 } 5786 ATH_UNLOCK(sc); 5787 } 5788 } 5789bad: 5790 ieee80211_free_node(ni); 5791 5792 /* 5793 * Restore the power state - either to what it was, or 5794 * to network_sleep if it's alright. 5795 */ 5796 ATH_LOCK(sc); 5797 ath_power_restore_power_state(sc); 5798 ATH_UNLOCK(sc); 5799 return error; 5800} 5801 5802/* 5803 * Allocate a key cache slot to the station so we can 5804 * setup a mapping from key index to node. The key cache 5805 * slot is needed for managing antenna state and for 5806 * compression when stations do not use crypto. We do 5807 * it uniliaterally here; if crypto is employed this slot 5808 * will be reassigned. 5809 */ 5810static void 5811ath_setup_stationkey(struct ieee80211_node *ni) 5812{ 5813 struct ieee80211vap *vap = ni->ni_vap; 5814 struct ath_softc *sc = vap->iv_ic->ic_softc; 5815 ieee80211_keyix keyix, rxkeyix; 5816 5817 /* XXX should take a locked ref to vap->iv_bss */ 5818 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5819 /* 5820 * Key cache is full; we'll fall back to doing 5821 * the more expensive lookup in software. Note 5822 * this also means no h/w compression. 5823 */ 5824 /* XXX msg+statistic */ 5825 } else { 5826 /* XXX locking? */ 5827 ni->ni_ucastkey.wk_keyix = keyix; 5828 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 5829 /* NB: must mark device key to get called back on delete */ 5830 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 5831 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5832 /* NB: this will create a pass-thru key entry */ 5833 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 5834 } 5835} 5836 5837/* 5838 * Setup driver-specific state for a newly associated node. 5839 * Note that we're called also on a re-associate, the isnew 5840 * param tells us if this is the first time or not. 5841 */ 5842static void 5843ath_newassoc(struct ieee80211_node *ni, int isnew) 5844{ 5845 struct ath_node *an = ATH_NODE(ni); 5846 struct ieee80211vap *vap = ni->ni_vap; 5847 struct ath_softc *sc = vap->iv_ic->ic_softc; 5848 const struct ieee80211_txparam *tp = ni->ni_txparms; 5849 5850 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 5851 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 5852 5853 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n", 5854 __func__, 5855 ni->ni_macaddr, 5856 ":", 5857 isnew, 5858 an->an_is_powersave); 5859 5860 ATH_NODE_LOCK(an); 5861 ath_rate_newassoc(sc, an, isnew); 5862 ATH_NODE_UNLOCK(an); 5863 5864 if (isnew && 5865 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 5866 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 5867 ath_setup_stationkey(ni); 5868 5869 /* 5870 * If we're reassociating, make sure that any paused queues 5871 * get unpaused. 5872 * 5873 * Now, we may have frames in the hardware queue for this node. 5874 * So if we are reassociating and there are frames in the queue, 5875 * we need to go through the cleanup path to ensure that they're 5876 * marked as non-aggregate. 5877 */ 5878 if (! isnew) { 5879 DPRINTF(sc, ATH_DEBUG_NODE, 5880 "%s: %6D: reassoc; is_powersave=%d\n", 5881 __func__, 5882 ni->ni_macaddr, 5883 ":", 5884 an->an_is_powersave); 5885 5886 /* XXX for now, we can't hold the lock across assoc */ 5887 ath_tx_node_reassoc(sc, an); 5888 5889 /* XXX for now, we can't hold the lock across wakeup */ 5890 if (an->an_is_powersave) 5891 ath_tx_node_wakeup(sc, an); 5892 } 5893} 5894 5895static int 5896ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 5897 int nchans, struct ieee80211_channel chans[]) 5898{ 5899 struct ath_softc *sc = ic->ic_softc; 5900 struct ath_hal *ah = sc->sc_ah; 5901 HAL_STATUS status; 5902 5903 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 5904 "%s: rd %u cc %u location %c%s\n", 5905 __func__, reg->regdomain, reg->country, reg->location, 5906 reg->ecm ? " ecm" : ""); 5907 5908 status = ath_hal_set_channels(ah, chans, nchans, 5909 reg->country, reg->regdomain); 5910 if (status != HAL_OK) { 5911 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 5912 __func__, status); 5913 return EINVAL; /* XXX */ 5914 } 5915 5916 return 0; 5917} 5918 5919static void 5920ath_getradiocaps(struct ieee80211com *ic, 5921 int maxchans, int *nchans, struct ieee80211_channel chans[]) 5922{ 5923 struct ath_softc *sc = ic->ic_softc; 5924 struct ath_hal *ah = sc->sc_ah; 5925 5926 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 5927 __func__, SKU_DEBUG, CTRY_DEFAULT); 5928 5929 /* XXX check return */ 5930 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 5931 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 5932 5933} 5934 5935static int 5936ath_getchannels(struct ath_softc *sc) 5937{ 5938 struct ieee80211com *ic = &sc->sc_ic; 5939 struct ath_hal *ah = sc->sc_ah; 5940 HAL_STATUS status; 5941 5942 /* 5943 * Collect channel set based on EEPROM contents. 5944 */ 5945 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 5946 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 5947 if (status != HAL_OK) { 5948 device_printf(sc->sc_dev, 5949 "%s: unable to collect channel list from hal, status %d\n", 5950 __func__, status); 5951 return EINVAL; 5952 } 5953 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 5954 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 5955 /* XXX map Atheros sku's to net80211 SKU's */ 5956 /* XXX net80211 types too small */ 5957 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 5958 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 5959 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 5960 ic->ic_regdomain.isocc[1] = ' '; 5961 5962 ic->ic_regdomain.ecm = 1; 5963 ic->ic_regdomain.location = 'I'; 5964 5965 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 5966 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 5967 __func__, sc->sc_eerd, sc->sc_eecc, 5968 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 5969 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 5970 return 0; 5971} 5972 5973static int 5974ath_rate_setup(struct ath_softc *sc, u_int mode) 5975{ 5976 struct ath_hal *ah = sc->sc_ah; 5977 const HAL_RATE_TABLE *rt; 5978 5979 switch (mode) { 5980 case IEEE80211_MODE_11A: 5981 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 5982 break; 5983 case IEEE80211_MODE_HALF: 5984 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 5985 break; 5986 case IEEE80211_MODE_QUARTER: 5987 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 5988 break; 5989 case IEEE80211_MODE_11B: 5990 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 5991 break; 5992 case IEEE80211_MODE_11G: 5993 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 5994 break; 5995 case IEEE80211_MODE_TURBO_A: 5996 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 5997 break; 5998 case IEEE80211_MODE_TURBO_G: 5999 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6000 break; 6001 case IEEE80211_MODE_STURBO_A: 6002 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6003 break; 6004 case IEEE80211_MODE_11NA: 6005 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6006 break; 6007 case IEEE80211_MODE_11NG: 6008 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6009 break; 6010 default: 6011 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6012 __func__, mode); 6013 return 0; 6014 } 6015 sc->sc_rates[mode] = rt; 6016 return (rt != NULL); 6017} 6018 6019static void 6020ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6021{ 6022 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6023 static const struct { 6024 u_int rate; /* tx/rx 802.11 rate */ 6025 u_int16_t timeOn; /* LED on time (ms) */ 6026 u_int16_t timeOff; /* LED off time (ms) */ 6027 } blinkrates[] = { 6028 { 108, 40, 10 }, 6029 { 96, 44, 11 }, 6030 { 72, 50, 13 }, 6031 { 48, 57, 14 }, 6032 { 36, 67, 16 }, 6033 { 24, 80, 20 }, 6034 { 22, 100, 25 }, 6035 { 18, 133, 34 }, 6036 { 12, 160, 40 }, 6037 { 10, 200, 50 }, 6038 { 6, 240, 58 }, 6039 { 4, 267, 66 }, 6040 { 2, 400, 100 }, 6041 { 0, 500, 130 }, 6042 /* XXX half/quarter rates */ 6043 }; 6044 const HAL_RATE_TABLE *rt; 6045 int i, j; 6046 6047 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6048 rt = sc->sc_rates[mode]; 6049 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6050 for (i = 0; i < rt->rateCount; i++) { 6051 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6052 if (rt->info[i].phy != IEEE80211_T_HT) 6053 sc->sc_rixmap[ieeerate] = i; 6054 else 6055 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6056 } 6057 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6058 for (i = 0; i < nitems(sc->sc_hwmap); i++) { 6059 if (i >= rt->rateCount) { 6060 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6061 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6062 continue; 6063 } 6064 sc->sc_hwmap[i].ieeerate = 6065 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6066 if (rt->info[i].phy == IEEE80211_T_HT) 6067 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6068 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6069 if (rt->info[i].shortPreamble || 6070 rt->info[i].phy == IEEE80211_T_OFDM) 6071 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6072 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6073 for (j = 0; j < nitems(blinkrates)-1; j++) 6074 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6075 break; 6076 /* NB: this uses the last entry if the rate isn't found */ 6077 /* XXX beware of overlow */ 6078 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6079 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6080 } 6081 sc->sc_currates = rt; 6082 sc->sc_curmode = mode; 6083 /* 6084 * All protection frames are transmitted at 2Mb/s for 6085 * 11g, otherwise at 1Mb/s. 6086 */ 6087 if (mode == IEEE80211_MODE_11G) 6088 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6089 else 6090 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6091 /* NB: caller is responsible for resetting rate control state */ 6092} 6093 6094static void 6095ath_watchdog(void *arg) 6096{ 6097 struct ath_softc *sc = arg; 6098 struct ieee80211com *ic = &sc->sc_ic; 6099 int do_reset = 0; 6100 6101 ATH_LOCK_ASSERT(sc); 6102 6103 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6104 uint32_t hangs; 6105 6106 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6107 6108 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6109 hangs != 0) { 6110 device_printf(sc->sc_dev, "%s hang detected (0x%x)\n", 6111 hangs & 0xff ? "bb" : "mac", hangs); 6112 } else 6113 device_printf(sc->sc_dev, "device timeout\n"); 6114 do_reset = 1; 6115 counter_u64_add(ic->ic_oerrors, 1); 6116 sc->sc_stats.ast_watchdog++; 6117 6118 ath_power_restore_power_state(sc); 6119 } 6120 6121 /* 6122 * We can't hold the lock across the ath_reset() call. 6123 * 6124 * And since this routine can't hold a lock and sleep, 6125 * do the reset deferred. 6126 */ 6127 if (do_reset) { 6128 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6129 } 6130 6131 callout_schedule(&sc->sc_wd_ch, hz); 6132} 6133 6134static void 6135ath_parent(struct ieee80211com *ic) 6136{ 6137 struct ath_softc *sc = ic->ic_softc; 6138 int error = EDOOFUS; 6139 6140 ATH_LOCK(sc); 6141 if (ic->ic_nrunning > 0) { 6142 /* 6143 * To avoid rescanning another access point, 6144 * do not call ath_init() here. Instead, 6145 * only reflect promisc mode settings. 6146 */ 6147 if (sc->sc_running) { 6148 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6149 ath_mode_init(sc); 6150 ath_power_restore_power_state(sc); 6151 } else if (!sc->sc_invalid) { 6152 /* 6153 * Beware of being called during attach/detach 6154 * to reset promiscuous mode. In that case we 6155 * will still be marked UP but not RUNNING. 6156 * However trying to re-init the interface 6157 * is the wrong thing to do as we've already 6158 * torn down much of our state. There's 6159 * probably a better way to deal with this. 6160 */ 6161 error = ath_init(sc); 6162 } 6163 } else { 6164 ath_stop(sc); 6165 if (!sc->sc_invalid) 6166 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 6167 } 6168 ATH_UNLOCK(sc); 6169 6170 if (error == 0) { 6171#ifdef ATH_TX99_DIAG 6172 if (sc->sc_tx99 != NULL) 6173 sc->sc_tx99->start(sc->sc_tx99); 6174 else 6175#endif 6176 ieee80211_start_all(ic); 6177 } 6178} 6179 6180/* 6181 * Announce various information on device/driver attach. 6182 */ 6183static void 6184ath_announce(struct ath_softc *sc) 6185{ 6186 struct ath_hal *ah = sc->sc_ah; 6187 6188 device_printf(sc->sc_dev, "%s mac %d.%d RF%s phy %d.%d\n", 6189 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6190 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6191 device_printf(sc->sc_dev, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6192 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6193 if (bootverbose) { 6194 int i; 6195 for (i = 0; i <= WME_AC_VO; i++) { 6196 struct ath_txq *txq = sc->sc_ac2q[i]; 6197 device_printf(sc->sc_dev, 6198 "Use hw queue %u for %s traffic\n", 6199 txq->axq_qnum, ieee80211_wme_acnames[i]); 6200 } 6201 device_printf(sc->sc_dev, "Use hw queue %u for CAB traffic\n", 6202 sc->sc_cabq->axq_qnum); 6203 device_printf(sc->sc_dev, "Use hw queue %u for beacons\n", 6204 sc->sc_bhalq); 6205 } 6206 if (ath_rxbuf != ATH_RXBUF) 6207 device_printf(sc->sc_dev, "using %u rx buffers\n", ath_rxbuf); 6208 if (ath_txbuf != ATH_TXBUF) 6209 device_printf(sc->sc_dev, "using %u tx buffers\n", ath_txbuf); 6210 if (sc->sc_mcastkey && bootverbose) 6211 device_printf(sc->sc_dev, "using multicast key search\n"); 6212} 6213 6214static void 6215ath_dfs_tasklet(void *p, int npending) 6216{ 6217 struct ath_softc *sc = (struct ath_softc *) p; 6218 struct ieee80211com *ic = &sc->sc_ic; 6219 6220 /* 6221 * If previous processing has found a radar event, 6222 * signal this to the net80211 layer to begin DFS 6223 * processing. 6224 */ 6225 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6226 /* DFS event found, initiate channel change */ 6227 /* 6228 * XXX doesn't currently tell us whether the event 6229 * XXX was found in the primary or extension 6230 * XXX channel! 6231 */ 6232 IEEE80211_LOCK(ic); 6233 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6234 IEEE80211_UNLOCK(ic); 6235 } 6236} 6237 6238/* 6239 * Enable/disable power save. This must be called with 6240 * no TX driver locks currently held, so it should only 6241 * be called from the RX path (which doesn't hold any 6242 * TX driver locks.) 6243 */ 6244static void 6245ath_node_powersave(struct ieee80211_node *ni, int enable) 6246{ 6247#ifdef ATH_SW_PSQ 6248 struct ath_node *an = ATH_NODE(ni); 6249 struct ieee80211com *ic = ni->ni_ic; 6250 struct ath_softc *sc = ic->ic_softc; 6251 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6252 6253 /* XXX and no TXQ locks should be held here */ 6254 6255 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n", 6256 __func__, 6257 ni->ni_macaddr, 6258 ":", 6259 !! enable); 6260 6261 /* Suspend or resume software queue handling */ 6262 if (enable) 6263 ath_tx_node_sleep(sc, an); 6264 else 6265 ath_tx_node_wakeup(sc, an); 6266 6267 /* Update net80211 state */ 6268 avp->av_node_ps(ni, enable); 6269#else 6270 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6271 6272 /* Update net80211 state */ 6273 avp->av_node_ps(ni, enable); 6274#endif/* ATH_SW_PSQ */ 6275} 6276 6277/* 6278 * Notification from net80211 that the powersave queue state has 6279 * changed. 6280 * 6281 * Since the software queue also may have some frames: 6282 * 6283 * + if the node software queue has frames and the TID state 6284 * is 0, we set the TIM; 6285 * + if the node and the stack are both empty, we clear the TIM bit. 6286 * + If the stack tries to set the bit, always set it. 6287 * + If the stack tries to clear the bit, only clear it if the 6288 * software queue in question is also cleared. 6289 * 6290 * TODO: this is called during node teardown; so let's ensure this 6291 * is all correctly handled and that the TIM bit is cleared. 6292 * It may be that the node flush is called _AFTER_ the net80211 6293 * stack clears the TIM. 6294 * 6295 * Here is the racy part. Since it's possible >1 concurrent, 6296 * overlapping TXes will appear complete with a TX completion in 6297 * another thread, it's possible that the concurrent TIM calls will 6298 * clash. We can't hold the node lock here because setting the 6299 * TIM grabs the net80211 comlock and this may cause a LOR. 6300 * The solution is either to totally serialise _everything_ at 6301 * this point (ie, all TX, completion and any reset/flush go into 6302 * one taskqueue) or a new "ath TIM lock" needs to be created that 6303 * just wraps the driver state change and this call to avp->av_set_tim(). 6304 * 6305 * The same race exists in the net80211 power save queue handling 6306 * as well. Since multiple transmitting threads may queue frames 6307 * into the driver, as well as ps-poll and the driver transmitting 6308 * frames (and thus clearing the psq), it's quite possible that 6309 * a packet entering the PSQ and a ps-poll being handled will 6310 * race, causing the TIM to be cleared and not re-set. 6311 */ 6312static int 6313ath_node_set_tim(struct ieee80211_node *ni, int enable) 6314{ 6315#ifdef ATH_SW_PSQ 6316 struct ieee80211com *ic = ni->ni_ic; 6317 struct ath_softc *sc = ic->ic_softc; 6318 struct ath_node *an = ATH_NODE(ni); 6319 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6320 int changed = 0; 6321 6322 ATH_TX_LOCK(sc); 6323 an->an_stack_psq = enable; 6324 6325 /* 6326 * This will get called for all operating modes, 6327 * even if avp->av_set_tim is unset. 6328 * It's currently set for hostap/ibss modes; but 6329 * the same infrastructure is used for both STA 6330 * and AP/IBSS node power save. 6331 */ 6332 if (avp->av_set_tim == NULL) { 6333 ATH_TX_UNLOCK(sc); 6334 return (0); 6335 } 6336 6337 /* 6338 * If setting the bit, always set it here. 6339 * If clearing the bit, only clear it if the 6340 * software queue is also empty. 6341 * 6342 * If the node has left power save, just clear the TIM 6343 * bit regardless of the state of the power save queue. 6344 * 6345 * XXX TODO: although atomics are used, it's quite possible 6346 * that a race will occur between this and setting/clearing 6347 * in another thread. TX completion will occur always in 6348 * one thread, however setting/clearing the TIM bit can come 6349 * from a variety of different process contexts! 6350 */ 6351 if (enable && an->an_tim_set == 1) { 6352 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6353 "%s: %6D: enable=%d, tim_set=1, ignoring\n", 6354 __func__, 6355 ni->ni_macaddr, 6356 ":", 6357 enable); 6358 ATH_TX_UNLOCK(sc); 6359 } else if (enable) { 6360 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6361 "%s: %6D: enable=%d, enabling TIM\n", 6362 __func__, 6363 ni->ni_macaddr, 6364 ":", 6365 enable); 6366 an->an_tim_set = 1; 6367 ATH_TX_UNLOCK(sc); 6368 changed = avp->av_set_tim(ni, enable); 6369 } else if (an->an_swq_depth == 0) { 6370 /* disable */ 6371 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6372 "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n", 6373 __func__, 6374 ni->ni_macaddr, 6375 ":", 6376 enable); 6377 an->an_tim_set = 0; 6378 ATH_TX_UNLOCK(sc); 6379 changed = avp->av_set_tim(ni, enable); 6380 } else if (! an->an_is_powersave) { 6381 /* 6382 * disable regardless; the node isn't in powersave now 6383 */ 6384 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6385 "%s: %6D: enable=%d, an_pwrsave=0, disabling\n", 6386 __func__, 6387 ni->ni_macaddr, 6388 ":", 6389 enable); 6390 an->an_tim_set = 0; 6391 ATH_TX_UNLOCK(sc); 6392 changed = avp->av_set_tim(ni, enable); 6393 } else { 6394 /* 6395 * psq disable, node is currently in powersave, node 6396 * software queue isn't empty, so don't clear the TIM bit 6397 * for now. 6398 */ 6399 ATH_TX_UNLOCK(sc); 6400 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6401 "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n", 6402 __func__, 6403 ni->ni_macaddr, 6404 ":", 6405 enable); 6406 changed = 0; 6407 } 6408 6409 return (changed); 6410#else 6411 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6412 6413 /* 6414 * Some operating modes don't set av_set_tim(), so don't 6415 * update it here. 6416 */ 6417 if (avp->av_set_tim == NULL) 6418 return (0); 6419 6420 return (avp->av_set_tim(ni, enable)); 6421#endif /* ATH_SW_PSQ */ 6422} 6423 6424/* 6425 * Set or update the TIM from the software queue. 6426 * 6427 * Check the software queue depth before attempting to do lock 6428 * anything; that avoids trying to obtain the lock. Then, 6429 * re-check afterwards to ensure nothing has changed in the 6430 * meantime. 6431 * 6432 * set: This is designed to be called from the TX path, after 6433 * a frame has been queued; to see if the swq > 0. 6434 * 6435 * clear: This is designed to be called from the buffer completion point 6436 * (right now it's ath_tx_default_comp()) where the state of 6437 * a software queue has changed. 6438 * 6439 * It makes sense to place it at buffer free / completion rather 6440 * than after each software queue operation, as there's no real 6441 * point in churning the TIM bit as the last frames in the software 6442 * queue are transmitted. If they fail and we retry them, we'd 6443 * just be setting the TIM bit again anyway. 6444 */ 6445void 6446ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, 6447 int enable) 6448{ 6449#ifdef ATH_SW_PSQ 6450 struct ath_node *an; 6451 struct ath_vap *avp; 6452 6453 /* Don't do this for broadcast/etc frames */ 6454 if (ni == NULL) 6455 return; 6456 6457 an = ATH_NODE(ni); 6458 avp = ATH_VAP(ni->ni_vap); 6459 6460 /* 6461 * And for operating modes without the TIM handler set, let's 6462 * just skip those. 6463 */ 6464 if (avp->av_set_tim == NULL) 6465 return; 6466 6467 ATH_TX_LOCK_ASSERT(sc); 6468 6469 if (enable) { 6470 if (an->an_is_powersave && 6471 an->an_tim_set == 0 && 6472 an->an_swq_depth != 0) { 6473 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6474 "%s: %6D: swq_depth>0, tim_set=0, set!\n", 6475 __func__, 6476 ni->ni_macaddr, 6477 ":"); 6478 an->an_tim_set = 1; 6479 (void) avp->av_set_tim(ni, 1); 6480 } 6481 } else { 6482 /* 6483 * Don't bother grabbing the lock unless the queue is empty. 6484 */ 6485 if (an->an_swq_depth != 0) 6486 return; 6487 6488 if (an->an_is_powersave && 6489 an->an_stack_psq == 0 && 6490 an->an_tim_set == 1 && 6491 an->an_swq_depth == 0) { 6492 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6493 "%s: %6D: swq_depth=0, tim_set=1, psq_set=0," 6494 " clear!\n", 6495 __func__, 6496 ni->ni_macaddr, 6497 ":"); 6498 an->an_tim_set = 0; 6499 (void) avp->av_set_tim(ni, 0); 6500 } 6501 } 6502#else 6503 return; 6504#endif /* ATH_SW_PSQ */ 6505} 6506 6507/* 6508 * Received a ps-poll frame from net80211. 6509 * 6510 * Here we get a chance to serve out a software-queued frame ourselves 6511 * before we punt it to net80211 to transmit us one itself - either 6512 * because there's traffic in the net80211 psq, or a NULL frame to 6513 * indicate there's nothing else. 6514 */ 6515static void 6516ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) 6517{ 6518#ifdef ATH_SW_PSQ 6519 struct ath_node *an; 6520 struct ath_vap *avp; 6521 struct ieee80211com *ic = ni->ni_ic; 6522 struct ath_softc *sc = ic->ic_softc; 6523 int tid; 6524 6525 /* Just paranoia */ 6526 if (ni == NULL) 6527 return; 6528 6529 /* 6530 * Unassociated (temporary node) station. 6531 */ 6532 if (ni->ni_associd == 0) 6533 return; 6534 6535 /* 6536 * We do have an active node, so let's begin looking into it. 6537 */ 6538 an = ATH_NODE(ni); 6539 avp = ATH_VAP(ni->ni_vap); 6540 6541 /* 6542 * For now, we just call the original ps-poll method. 6543 * Once we're ready to flip this on: 6544 * 6545 * + Set leak to 1, as no matter what we're going to have 6546 * to send a frame; 6547 * + Check the software queue and if there's something in it, 6548 * schedule the highest TID thas has traffic from this node. 6549 * Then make sure we schedule the software scheduler to 6550 * run so it picks up said frame. 6551 * 6552 * That way whatever happens, we'll at least send _a_ frame 6553 * to the given node. 6554 * 6555 * Again, yes, it's crappy QoS if the node has multiple 6556 * TIDs worth of traffic - but let's get it working first 6557 * before we optimise it. 6558 * 6559 * Also yes, there's definitely latency here - we're not 6560 * direct dispatching to the hardware in this path (and 6561 * we're likely being called from the packet receive path, 6562 * so going back into TX may be a little hairy!) but again 6563 * I'd like to get this working first before optimising 6564 * turn-around time. 6565 */ 6566 6567 ATH_TX_LOCK(sc); 6568 6569 /* 6570 * Legacy - we're called and the node isn't asleep. 6571 * Immediately punt. 6572 */ 6573 if (! an->an_is_powersave) { 6574 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6575 "%s: %6D: not in powersave?\n", 6576 __func__, 6577 ni->ni_macaddr, 6578 ":"); 6579 ATH_TX_UNLOCK(sc); 6580 avp->av_recv_pspoll(ni, m); 6581 return; 6582 } 6583 6584 /* 6585 * We're in powersave. 6586 * 6587 * Leak a frame. 6588 */ 6589 an->an_leak_count = 1; 6590 6591 /* 6592 * Now, if there's no frames in the node, just punt to 6593 * recv_pspoll. 6594 * 6595 * Don't bother checking if the TIM bit is set, we really 6596 * only care if there are any frames here! 6597 */ 6598 if (an->an_swq_depth == 0) { 6599 ATH_TX_UNLOCK(sc); 6600 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6601 "%s: %6D: SWQ empty; punting to net80211\n", 6602 __func__, 6603 ni->ni_macaddr, 6604 ":"); 6605 avp->av_recv_pspoll(ni, m); 6606 return; 6607 } 6608 6609 /* 6610 * Ok, let's schedule the highest TID that has traffic 6611 * and then schedule something. 6612 */ 6613 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { 6614 struct ath_tid *atid = &an->an_tid[tid]; 6615 /* 6616 * No frames? Skip. 6617 */ 6618 if (atid->axq_depth == 0) 6619 continue; 6620 ath_tx_tid_sched(sc, atid); 6621 /* 6622 * XXX we could do a direct call to the TXQ 6623 * scheduler code here to optimise latency 6624 * at the expense of a REALLY deep callstack. 6625 */ 6626 ATH_TX_UNLOCK(sc); 6627 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 6628 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6629 "%s: %6D: leaking frame to TID %d\n", 6630 __func__, 6631 ni->ni_macaddr, 6632 ":", 6633 tid); 6634 return; 6635 } 6636 6637 ATH_TX_UNLOCK(sc); 6638 6639 /* 6640 * XXX nothing in the TIDs at this point? Eek. 6641 */ 6642 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6643 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", 6644 __func__, 6645 ni->ni_macaddr, 6646 ":"); 6647 avp->av_recv_pspoll(ni, m); 6648#else 6649 avp->av_recv_pspoll(ni, m); 6650#endif /* ATH_SW_PSQ */ 6651} 6652 6653MODULE_VERSION(if_ath, 1); 6654MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6655#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ) 6656MODULE_DEPEND(if_ath, alq, 1, 1, 1); 6657#endif 6658