if_ath.c revision 242258
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 242258 2012-10-28 18:46:06Z adrian $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49#include "opt_ah.h" 50#include "opt_wlan.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysctl.h> 55#include <sys/mbuf.h> 56#include <sys/malloc.h> 57#include <sys/lock.h> 58#include <sys/mutex.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/errno.h> 63#include <sys/callout.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kthread.h> 67#include <sys/taskqueue.h> 68#include <sys/priv.h> 69#include <sys/module.h> 70#include <sys/ktr.h> 71#include <sys/smp.h> /* for mp_ncpus */ 72 73#include <machine/bus.h> 74 75#include <net/if.h> 76#include <net/if_dl.h> 77#include <net/if_media.h> 78#include <net/if_types.h> 79#include <net/if_arp.h> 80#include <net/ethernet.h> 81#include <net/if_llc.h> 82 83#include <net80211/ieee80211_var.h> 84#include <net80211/ieee80211_regdomain.h> 85#ifdef IEEE80211_SUPPORT_SUPERG 86#include <net80211/ieee80211_superg.h> 87#endif 88#ifdef IEEE80211_SUPPORT_TDMA 89#include <net80211/ieee80211_tdma.h> 90#endif 91 92#include <net/bpf.h> 93 94#ifdef INET 95#include <netinet/in.h> 96#include <netinet/if_ether.h> 97#endif 98 99#include <dev/ath/if_athvar.h> 100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101#include <dev/ath/ath_hal/ah_diagcodes.h> 102 103#include <dev/ath/if_ath_debug.h> 104#include <dev/ath/if_ath_misc.h> 105#include <dev/ath/if_ath_tsf.h> 106#include <dev/ath/if_ath_tx.h> 107#include <dev/ath/if_ath_sysctl.h> 108#include <dev/ath/if_ath_led.h> 109#include <dev/ath/if_ath_keycache.h> 110#include <dev/ath/if_ath_rx.h> 111#include <dev/ath/if_ath_rx_edma.h> 112#include <dev/ath/if_ath_tx_edma.h> 113#include <dev/ath/if_ath_beacon.h> 114#include <dev/ath/if_athdfs.h> 115 116#ifdef ATH_TX99_DIAG 117#include <dev/ath/ath_tx99/ath_tx99.h> 118#endif 119 120/* 121 * ATH_BCBUF determines the number of vap's that can transmit 122 * beacons and also (currently) the number of vap's that can 123 * have unique mac addresses/bssid. When staggering beacons 124 * 4 is probably a good max as otherwise the beacons become 125 * very closely spaced and there is limited time for cab q traffic 126 * to go out. You can burst beacons instead but that is not good 127 * for stations in power save and at some point you really want 128 * another radio (and channel). 129 * 130 * The limit on the number of mac addresses is tied to our use of 131 * the U/L bit and tracking addresses in a byte; it would be 132 * worthwhile to allow more for applications like proxy sta. 133 */ 134CTASSERT(ATH_BCBUF <= 8); 135 136static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 137 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 138 const uint8_t [IEEE80211_ADDR_LEN], 139 const uint8_t [IEEE80211_ADDR_LEN]); 140static void ath_vap_delete(struct ieee80211vap *); 141static void ath_init(void *); 142static void ath_stop_locked(struct ifnet *); 143static void ath_stop(struct ifnet *); 144static int ath_reset_vap(struct ieee80211vap *, u_long); 145static void ath_start_queue(struct ifnet *ifp); 146static int ath_media_change(struct ifnet *); 147static void ath_watchdog(void *); 148static int ath_ioctl(struct ifnet *, u_long, caddr_t); 149static void ath_fatal_proc(void *, int); 150static void ath_bmiss_vap(struct ieee80211vap *); 151static void ath_bmiss_proc(void *, int); 152static void ath_key_update_begin(struct ieee80211vap *); 153static void ath_key_update_end(struct ieee80211vap *); 154static void ath_update_mcast(struct ifnet *); 155static void ath_update_promisc(struct ifnet *); 156static void ath_updateslot(struct ifnet *); 157static void ath_bstuck_proc(void *, int); 158static void ath_reset_proc(void *, int); 159static int ath_desc_alloc(struct ath_softc *); 160static void ath_desc_free(struct ath_softc *); 161static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 162 const uint8_t [IEEE80211_ADDR_LEN]); 163static void ath_node_cleanup(struct ieee80211_node *); 164static void ath_node_free(struct ieee80211_node *); 165static void ath_node_getsignal(const struct ieee80211_node *, 166 int8_t *, int8_t *); 167static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 168static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 169static int ath_tx_setup(struct ath_softc *, int, int); 170static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 171static void ath_tx_cleanup(struct ath_softc *); 172static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, 173 int dosched); 174static void ath_tx_proc_q0(void *, int); 175static void ath_tx_proc_q0123(void *, int); 176static void ath_tx_proc(void *, int); 177static void ath_txq_sched_tasklet(void *, int); 178static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 179static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 180static void ath_scan_start(struct ieee80211com *); 181static void ath_scan_end(struct ieee80211com *); 182static void ath_set_channel(struct ieee80211com *); 183#ifdef ATH_ENABLE_11N 184static void ath_update_chw(struct ieee80211com *); 185#endif /* ATH_ENABLE_11N */ 186static void ath_calibrate(void *); 187static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 188static void ath_setup_stationkey(struct ieee80211_node *); 189static void ath_newassoc(struct ieee80211_node *, int); 190static int ath_setregdomain(struct ieee80211com *, 191 struct ieee80211_regdomain *, int, 192 struct ieee80211_channel []); 193static void ath_getradiocaps(struct ieee80211com *, int, int *, 194 struct ieee80211_channel []); 195static int ath_getchannels(struct ath_softc *); 196 197static int ath_rate_setup(struct ath_softc *, u_int mode); 198static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 199 200static void ath_announce(struct ath_softc *); 201 202static void ath_dfs_tasklet(void *, int); 203static void ath_node_powersave(struct ieee80211_node *, int); 204 205#ifdef IEEE80211_SUPPORT_TDMA 206#include <dev/ath/if_ath_tdma.h> 207#endif 208 209SYSCTL_DECL(_hw_ath); 210 211/* XXX validate sysctl values */ 212static int ath_longcalinterval = 30; /* long cals every 30 secs */ 213SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 214 0, "long chip calibration interval (secs)"); 215static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 216SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 217 0, "short chip calibration interval (msecs)"); 218static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 219SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 220 0, "reset chip calibration results (secs)"); 221static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 222SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 223 0, "ANI calibration (msecs)"); 224 225int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 226SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 227 0, "rx buffers allocated"); 228TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 229int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 230SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 231 0, "tx buffers allocated"); 232TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 233int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 234SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, 235 0, "tx (mgmt) buffers allocated"); 236TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 237 238int ath_bstuck_threshold = 4; /* max missed beacons */ 239SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 240 0, "max missed beacon xmits before chip reset"); 241 242MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 243 244void 245ath_legacy_attach_comp_func(struct ath_softc *sc) 246{ 247 248 /* 249 * Special case certain configurations. Note the 250 * CAB queue is handled by these specially so don't 251 * include them when checking the txq setup mask. 252 */ 253 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 254 case 0x01: 255 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 256 break; 257 case 0x0f: 258 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 259 break; 260 default: 261 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 262 break; 263 } 264} 265 266#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 267#define HAL_MODE_HT40 \ 268 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 269 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 270int 271ath_attach(u_int16_t devid, struct ath_softc *sc) 272{ 273 struct ifnet *ifp; 274 struct ieee80211com *ic; 275 struct ath_hal *ah = NULL; 276 HAL_STATUS status; 277 int error = 0, i; 278 u_int wmodes; 279 uint8_t macaddr[IEEE80211_ADDR_LEN]; 280 int rx_chainmask, tx_chainmask; 281 282 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 283 284 CURVNET_SET(vnet0); 285 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 286 if (ifp == NULL) { 287 device_printf(sc->sc_dev, "can not if_alloc()\n"); 288 error = ENOSPC; 289 goto bad; 290 } 291 ic = ifp->if_l2com; 292 293 /* set these up early for if_printf use */ 294 if_initname(ifp, device_get_name(sc->sc_dev), 295 device_get_unit(sc->sc_dev)); 296 CURVNET_RESTORE(); 297 298 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 299 sc->sc_eepromdata, &status); 300 if (ah == NULL) { 301 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 302 status); 303 error = ENXIO; 304 goto bad; 305 } 306 sc->sc_ah = ah; 307 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 308#ifdef ATH_DEBUG 309 sc->sc_debug = ath_debug; 310#endif 311 312 /* 313 * Setup the DMA/EDMA functions based on the current 314 * hardware support. 315 * 316 * This is required before the descriptors are allocated. 317 */ 318 if (ath_hal_hasedma(sc->sc_ah)) { 319 sc->sc_isedma = 1; 320 ath_recv_setup_edma(sc); 321 ath_xmit_setup_edma(sc); 322 } else { 323 ath_recv_setup_legacy(sc); 324 ath_xmit_setup_legacy(sc); 325 } 326 327 /* 328 * Check if the MAC has multi-rate retry support. 329 * We do this by trying to setup a fake extended 330 * descriptor. MAC's that don't have support will 331 * return false w/o doing anything. MAC's that do 332 * support it will return true w/o doing anything. 333 */ 334 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 335 336 /* 337 * Check if the device has hardware counters for PHY 338 * errors. If so we need to enable the MIB interrupt 339 * so we can act on stat triggers. 340 */ 341 if (ath_hal_hwphycounters(ah)) 342 sc->sc_needmib = 1; 343 344 /* 345 * Get the hardware key cache size. 346 */ 347 sc->sc_keymax = ath_hal_keycachesize(ah); 348 if (sc->sc_keymax > ATH_KEYMAX) { 349 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 350 ATH_KEYMAX, sc->sc_keymax); 351 sc->sc_keymax = ATH_KEYMAX; 352 } 353 /* 354 * Reset the key cache since some parts do not 355 * reset the contents on initial power up. 356 */ 357 for (i = 0; i < sc->sc_keymax; i++) 358 ath_hal_keyreset(ah, i); 359 360 /* 361 * Collect the default channel list. 362 */ 363 error = ath_getchannels(sc); 364 if (error != 0) 365 goto bad; 366 367 /* 368 * Setup rate tables for all potential media types. 369 */ 370 ath_rate_setup(sc, IEEE80211_MODE_11A); 371 ath_rate_setup(sc, IEEE80211_MODE_11B); 372 ath_rate_setup(sc, IEEE80211_MODE_11G); 373 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 374 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 375 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 376 ath_rate_setup(sc, IEEE80211_MODE_11NA); 377 ath_rate_setup(sc, IEEE80211_MODE_11NG); 378 ath_rate_setup(sc, IEEE80211_MODE_HALF); 379 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 380 381 /* NB: setup here so ath_rate_update is happy */ 382 ath_setcurmode(sc, IEEE80211_MODE_11A); 383 384 /* 385 * Allocate TX descriptors and populate the lists. 386 */ 387 error = ath_desc_alloc(sc); 388 if (error != 0) { 389 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 390 error); 391 goto bad; 392 } 393 error = ath_txdma_setup(sc); 394 if (error != 0) { 395 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 396 error); 397 goto bad; 398 } 399 400 /* 401 * Allocate RX descriptors and populate the lists. 402 */ 403 error = ath_rxdma_setup(sc); 404 if (error != 0) { 405 if_printf(ifp, "failed to allocate RX descriptors: %d\n", 406 error); 407 goto bad; 408 } 409 410 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 411 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 412 413 ATH_TXBUF_LOCK_INIT(sc); 414 415 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 416 taskqueue_thread_enqueue, &sc->sc_tq); 417 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 418 "%s taskq", ifp->if_xname); 419 420 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 421 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 422 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 423 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 424 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); 425 TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc); 426 TASK_INIT(&sc->sc_txsndtask, 0, ath_start_task, sc); 427 428 /* 429 * Allocate hardware transmit queues: one queue for 430 * beacon frames and one data queue for each QoS 431 * priority. Note that the hal handles resetting 432 * these queues at the needed time. 433 * 434 * XXX PS-Poll 435 */ 436 sc->sc_bhalq = ath_beaconq_setup(sc); 437 if (sc->sc_bhalq == (u_int) -1) { 438 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 439 error = EIO; 440 goto bad2; 441 } 442 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 443 if (sc->sc_cabq == NULL) { 444 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 445 error = EIO; 446 goto bad2; 447 } 448 /* NB: insure BK queue is the lowest priority h/w queue */ 449 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 450 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 451 ieee80211_wme_acnames[WME_AC_BK]); 452 error = EIO; 453 goto bad2; 454 } 455 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 456 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 457 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 458 /* 459 * Not enough hardware tx queues to properly do WME; 460 * just punt and assign them all to the same h/w queue. 461 * We could do a better job of this if, for example, 462 * we allocate queues when we switch from station to 463 * AP mode. 464 */ 465 if (sc->sc_ac2q[WME_AC_VI] != NULL) 466 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 467 if (sc->sc_ac2q[WME_AC_BE] != NULL) 468 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 469 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 470 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 471 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 472 } 473 474 /* 475 * Attach the TX completion function. 476 * 477 * The non-EDMA chips may have some special case optimisations; 478 * this method gives everyone a chance to attach cleanly. 479 */ 480 sc->sc_tx.xmit_attach_comp_func(sc); 481 482 /* 483 * Setup rate control. Some rate control modules 484 * call back to change the anntena state so expose 485 * the necessary entry points. 486 * XXX maybe belongs in struct ath_ratectrl? 487 */ 488 sc->sc_setdefantenna = ath_setdefantenna; 489 sc->sc_rc = ath_rate_attach(sc); 490 if (sc->sc_rc == NULL) { 491 error = EIO; 492 goto bad2; 493 } 494 495 /* Attach DFS module */ 496 if (! ath_dfs_attach(sc)) { 497 device_printf(sc->sc_dev, 498 "%s: unable to attach DFS\n", __func__); 499 error = EIO; 500 goto bad2; 501 } 502 503 /* Start DFS processing tasklet */ 504 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 505 506 /* Configure LED state */ 507 sc->sc_blinking = 0; 508 sc->sc_ledstate = 1; 509 sc->sc_ledon = 0; /* low true */ 510 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 511 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 512 513 /* 514 * Don't setup hardware-based blinking. 515 * 516 * Although some NICs may have this configured in the 517 * default reset register values, the user may wish 518 * to alter which pins have which function. 519 * 520 * The reference driver attaches the MAC network LED to GPIO1 and 521 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 522 * NIC has these reversed. 523 */ 524 sc->sc_hardled = (1 == 0); 525 sc->sc_led_net_pin = -1; 526 sc->sc_led_pwr_pin = -1; 527 /* 528 * Auto-enable soft led processing for IBM cards and for 529 * 5211 minipci cards. Users can also manually enable/disable 530 * support with a sysctl. 531 */ 532 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 533 ath_led_config(sc); 534 ath_hal_setledstate(ah, HAL_LED_INIT); 535 536 ifp->if_softc = sc; 537 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 538 ifp->if_start = ath_start_queue; 539 ifp->if_ioctl = ath_ioctl; 540 ifp->if_init = ath_init; 541 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 542 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 543 IFQ_SET_READY(&ifp->if_snd); 544 545 ic->ic_ifp = ifp; 546 /* XXX not right but it's not used anywhere important */ 547 ic->ic_phytype = IEEE80211_T_OFDM; 548 ic->ic_opmode = IEEE80211_M_STA; 549 ic->ic_caps = 550 IEEE80211_C_STA /* station mode */ 551 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 552 | IEEE80211_C_HOSTAP /* hostap mode */ 553 | IEEE80211_C_MONITOR /* monitor mode */ 554 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 555 | IEEE80211_C_WDS /* 4-address traffic works */ 556 | IEEE80211_C_MBSS /* mesh point link mode */ 557 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 558 | IEEE80211_C_SHSLOT /* short slot time supported */ 559 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 560#ifndef ATH_ENABLE_11N 561 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 562#endif 563 | IEEE80211_C_TXFRAG /* handle tx frags */ 564#ifdef ATH_ENABLE_DFS 565 | IEEE80211_C_DFS /* Enable radar detection */ 566#endif 567 ; 568 /* 569 * Query the hal to figure out h/w crypto support. 570 */ 571 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 572 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 573 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 574 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 575 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 576 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 577 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 578 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 579 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 580 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 581 /* 582 * Check if h/w does the MIC and/or whether the 583 * separate key cache entries are required to 584 * handle both tx+rx MIC keys. 585 */ 586 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 587 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 588 /* 589 * If the h/w supports storing tx+rx MIC keys 590 * in one cache slot automatically enable use. 591 */ 592 if (ath_hal_hastkipsplit(ah) || 593 !ath_hal_settkipsplit(ah, AH_FALSE)) 594 sc->sc_splitmic = 1; 595 /* 596 * If the h/w can do TKIP MIC together with WME then 597 * we use it; otherwise we force the MIC to be done 598 * in software by the net80211 layer. 599 */ 600 if (ath_hal_haswmetkipmic(ah)) 601 sc->sc_wmetkipmic = 1; 602 } 603 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 604 /* 605 * Check for multicast key search support. 606 */ 607 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 608 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 609 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 610 } 611 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 612 /* 613 * Mark key cache slots associated with global keys 614 * as in use. If we knew TKIP was not to be used we 615 * could leave the +32, +64, and +32+64 slots free. 616 */ 617 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 618 setbit(sc->sc_keymap, i); 619 setbit(sc->sc_keymap, i+64); 620 if (sc->sc_splitmic) { 621 setbit(sc->sc_keymap, i+32); 622 setbit(sc->sc_keymap, i+32+64); 623 } 624 } 625 /* 626 * TPC support can be done either with a global cap or 627 * per-packet support. The latter is not available on 628 * all parts. We're a bit pedantic here as all parts 629 * support a global cap. 630 */ 631 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 632 ic->ic_caps |= IEEE80211_C_TXPMGT; 633 634 /* 635 * Mark WME capability only if we have sufficient 636 * hardware queues to do proper priority scheduling. 637 */ 638 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 639 ic->ic_caps |= IEEE80211_C_WME; 640 /* 641 * Check for misc other capabilities. 642 */ 643 if (ath_hal_hasbursting(ah)) 644 ic->ic_caps |= IEEE80211_C_BURST; 645 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 646 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 647 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 648 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 649 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 650 if (ath_hal_hasfastframes(ah)) 651 ic->ic_caps |= IEEE80211_C_FF; 652 wmodes = ath_hal_getwirelessmodes(ah); 653 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 654 ic->ic_caps |= IEEE80211_C_TURBOP; 655#ifdef IEEE80211_SUPPORT_TDMA 656 if (ath_hal_macversion(ah) > 0x78) { 657 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 658 ic->ic_tdma_update = ath_tdma_update; 659 } 660#endif 661 662 /* 663 * TODO: enforce that at least this many frames are available 664 * in the txbuf list before allowing data frames (raw or 665 * otherwise) to be transmitted. 666 */ 667 sc->sc_txq_data_minfree = 10; 668 /* 669 * Leave this as default to maintain legacy behaviour. 670 * Shortening the cabq/mcastq may end up causing some 671 * undesirable behaviour. 672 */ 673 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 674 675 /* 676 * Allow the TX and RX chainmasks to be overridden by 677 * environment variables and/or device.hints. 678 * 679 * This must be done early - before the hardware is 680 * calibrated or before the 802.11n stream calculation 681 * is done. 682 */ 683 if (resource_int_value(device_get_name(sc->sc_dev), 684 device_get_unit(sc->sc_dev), "rx_chainmask", 685 &rx_chainmask) == 0) { 686 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 687 rx_chainmask); 688 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 689 } 690 if (resource_int_value(device_get_name(sc->sc_dev), 691 device_get_unit(sc->sc_dev), "tx_chainmask", 692 &tx_chainmask) == 0) { 693 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 694 tx_chainmask); 695 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 696 } 697 698 /* 699 * Disable MRR with protected frames by default. 700 * Only 802.11n series NICs can handle this. 701 */ 702 sc->sc_mrrprot = 0; /* XXX should be a capability */ 703 704#ifdef ATH_ENABLE_11N 705 /* 706 * Query HT capabilities 707 */ 708 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 709 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 710 int rxs, txs; 711 712 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 713 714 sc->sc_mrrprot = 1; /* XXX should be a capability */ 715 716 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 717 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 718 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 719 | IEEE80211_HTCAP_MAXAMSDU_3839 720 /* max A-MSDU length */ 721 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 722 ; 723 724 /* 725 * Enable short-GI for HT20 only if the hardware 726 * advertises support. 727 * Notably, anything earlier than the AR9287 doesn't. 728 */ 729 if ((ath_hal_getcapability(ah, 730 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 731 (wmodes & HAL_MODE_HT20)) { 732 device_printf(sc->sc_dev, 733 "[HT] enabling short-GI in 20MHz mode\n"); 734 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 735 } 736 737 if (wmodes & HAL_MODE_HT40) 738 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 739 | IEEE80211_HTCAP_SHORTGI40; 740 741 /* 742 * TX/RX streams need to be taken into account when 743 * negotiating which MCS rates it'll receive and 744 * what MCS rates are available for TX. 745 */ 746 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 747 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 748 749 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 750 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 751 752 ic->ic_txstream = txs; 753 ic->ic_rxstream = rxs; 754 755 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 756 &sc->sc_rts_aggr_limit); 757 if (sc->sc_rts_aggr_limit != (64 * 1024)) 758 device_printf(sc->sc_dev, 759 "[HT] RTS aggregates limited to %d KiB\n", 760 sc->sc_rts_aggr_limit / 1024); 761 762 device_printf(sc->sc_dev, 763 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 764 } 765#endif 766 767 /* 768 * Initial aggregation settings. 769 */ 770 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 771 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 772 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 773 774 /* 775 * Check if the hardware requires PCI register serialisation. 776 * Some of the Owl based MACs require this. 777 */ 778 if (mp_ncpus > 1 && 779 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 780 0, NULL) == HAL_OK) { 781 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 782 device_printf(sc->sc_dev, 783 "Enabling register serialisation\n"); 784 } 785 786 /* 787 * Indicate we need the 802.11 header padded to a 788 * 32-bit boundary for 4-address and QoS frames. 789 */ 790 ic->ic_flags |= IEEE80211_F_DATAPAD; 791 792 /* 793 * Query the hal about antenna support. 794 */ 795 sc->sc_defant = ath_hal_getdefantenna(ah); 796 797 /* 798 * Not all chips have the VEOL support we want to 799 * use with IBSS beacons; check here for it. 800 */ 801 sc->sc_hasveol = ath_hal_hasveol(ah); 802 803 /* get mac address from hardware */ 804 ath_hal_getmac(ah, macaddr); 805 if (sc->sc_hasbmask) 806 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 807 808 /* NB: used to size node table key mapping array */ 809 ic->ic_max_keyix = sc->sc_keymax; 810 /* call MI attach routine. */ 811 ieee80211_ifattach(ic, macaddr); 812 ic->ic_setregdomain = ath_setregdomain; 813 ic->ic_getradiocaps = ath_getradiocaps; 814 sc->sc_opmode = HAL_M_STA; 815 816 /* override default methods */ 817 ic->ic_newassoc = ath_newassoc; 818 ic->ic_updateslot = ath_updateslot; 819 ic->ic_wme.wme_update = ath_wme_update; 820 ic->ic_vap_create = ath_vap_create; 821 ic->ic_vap_delete = ath_vap_delete; 822 ic->ic_raw_xmit = ath_raw_xmit; 823 ic->ic_update_mcast = ath_update_mcast; 824 ic->ic_update_promisc = ath_update_promisc; 825 ic->ic_node_alloc = ath_node_alloc; 826 sc->sc_node_free = ic->ic_node_free; 827 ic->ic_node_free = ath_node_free; 828 sc->sc_node_cleanup = ic->ic_node_cleanup; 829 ic->ic_node_cleanup = ath_node_cleanup; 830 ic->ic_node_getsignal = ath_node_getsignal; 831 ic->ic_scan_start = ath_scan_start; 832 ic->ic_scan_end = ath_scan_end; 833 ic->ic_set_channel = ath_set_channel; 834#ifdef ATH_ENABLE_11N 835 /* 802.11n specific - but just override anyway */ 836 sc->sc_addba_request = ic->ic_addba_request; 837 sc->sc_addba_response = ic->ic_addba_response; 838 sc->sc_addba_stop = ic->ic_addba_stop; 839 sc->sc_bar_response = ic->ic_bar_response; 840 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 841 842 ic->ic_addba_request = ath_addba_request; 843 ic->ic_addba_response = ath_addba_response; 844 ic->ic_addba_response_timeout = ath_addba_response_timeout; 845 ic->ic_addba_stop = ath_addba_stop; 846 ic->ic_bar_response = ath_bar_response; 847 848 ic->ic_update_chw = ath_update_chw; 849#endif /* ATH_ENABLE_11N */ 850 851#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 852 /* 853 * There's one vendor bitmap entry in the RX radiotap 854 * header; make sure that's taken into account. 855 */ 856 ieee80211_radiotap_attachv(ic, 857 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 858 ATH_TX_RADIOTAP_PRESENT, 859 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 860 ATH_RX_RADIOTAP_PRESENT); 861#else 862 /* 863 * No vendor bitmap/extensions are present. 864 */ 865 ieee80211_radiotap_attach(ic, 866 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 867 ATH_TX_RADIOTAP_PRESENT, 868 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 869 ATH_RX_RADIOTAP_PRESENT); 870#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 871 872 /* 873 * Setup dynamic sysctl's now that country code and 874 * regdomain are available from the hal. 875 */ 876 ath_sysctlattach(sc); 877 ath_sysctl_stats_attach(sc); 878 ath_sysctl_hal_attach(sc); 879 880 if (bootverbose) 881 ieee80211_announce(ic); 882 ath_announce(sc); 883 return 0; 884bad2: 885 ath_tx_cleanup(sc); 886 ath_desc_free(sc); 887 ath_txdma_teardown(sc); 888 ath_rxdma_teardown(sc); 889bad: 890 if (ah) 891 ath_hal_detach(ah); 892 if (ifp != NULL) { 893 CURVNET_SET(ifp->if_vnet); 894 if_free(ifp); 895 CURVNET_RESTORE(); 896 } 897 sc->sc_invalid = 1; 898 return error; 899} 900 901int 902ath_detach(struct ath_softc *sc) 903{ 904 struct ifnet *ifp = sc->sc_ifp; 905 906 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 907 __func__, ifp->if_flags); 908 909 /* 910 * NB: the order of these is important: 911 * o stop the chip so no more interrupts will fire 912 * o call the 802.11 layer before detaching the hal to 913 * insure callbacks into the driver to delete global 914 * key cache entries can be handled 915 * o free the taskqueue which drains any pending tasks 916 * o reclaim the tx queue data structures after calling 917 * the 802.11 layer as we'll get called back to reclaim 918 * node state and potentially want to use them 919 * o to cleanup the tx queues the hal is called, so detach 920 * it last 921 * Other than that, it's straightforward... 922 */ 923 ath_stop(ifp); 924 ieee80211_ifdetach(ifp->if_l2com); 925 taskqueue_free(sc->sc_tq); 926#ifdef ATH_TX99_DIAG 927 if (sc->sc_tx99 != NULL) 928 sc->sc_tx99->detach(sc->sc_tx99); 929#endif 930 ath_rate_detach(sc->sc_rc); 931 932 ath_dfs_detach(sc); 933 ath_desc_free(sc); 934 ath_txdma_teardown(sc); 935 ath_rxdma_teardown(sc); 936 ath_tx_cleanup(sc); 937 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 938 939 CURVNET_SET(ifp->if_vnet); 940 if_free(ifp); 941 CURVNET_RESTORE(); 942 943 return 0; 944} 945 946/* 947 * MAC address handling for multiple BSS on the same radio. 948 * The first vap uses the MAC address from the EEPROM. For 949 * subsequent vap's we set the U/L bit (bit 1) in the MAC 950 * address and use the next six bits as an index. 951 */ 952static void 953assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 954{ 955 int i; 956 957 if (clone && sc->sc_hasbmask) { 958 /* NB: we only do this if h/w supports multiple bssid */ 959 for (i = 0; i < 8; i++) 960 if ((sc->sc_bssidmask & (1<<i)) == 0) 961 break; 962 if (i != 0) 963 mac[0] |= (i << 2)|0x2; 964 } else 965 i = 0; 966 sc->sc_bssidmask |= 1<<i; 967 sc->sc_hwbssidmask[0] &= ~mac[0]; 968 if (i == 0) 969 sc->sc_nbssid0++; 970} 971 972static void 973reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 974{ 975 int i = mac[0] >> 2; 976 uint8_t mask; 977 978 if (i != 0 || --sc->sc_nbssid0 == 0) { 979 sc->sc_bssidmask &= ~(1<<i); 980 /* recalculate bssid mask from remaining addresses */ 981 mask = 0xff; 982 for (i = 1; i < 8; i++) 983 if (sc->sc_bssidmask & (1<<i)) 984 mask &= ~((i<<2)|0x2); 985 sc->sc_hwbssidmask[0] |= mask; 986 } 987} 988 989/* 990 * Assign a beacon xmit slot. We try to space out 991 * assignments so when beacons are staggered the 992 * traffic coming out of the cab q has maximal time 993 * to go out before the next beacon is scheduled. 994 */ 995static int 996assign_bslot(struct ath_softc *sc) 997{ 998 u_int slot, free; 999 1000 free = 0; 1001 for (slot = 0; slot < ATH_BCBUF; slot++) 1002 if (sc->sc_bslot[slot] == NULL) { 1003 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 1004 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 1005 return slot; 1006 free = slot; 1007 /* NB: keep looking for a double slot */ 1008 } 1009 return free; 1010} 1011 1012static struct ieee80211vap * 1013ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1014 enum ieee80211_opmode opmode, int flags, 1015 const uint8_t bssid[IEEE80211_ADDR_LEN], 1016 const uint8_t mac0[IEEE80211_ADDR_LEN]) 1017{ 1018 struct ath_softc *sc = ic->ic_ifp->if_softc; 1019 struct ath_vap *avp; 1020 struct ieee80211vap *vap; 1021 uint8_t mac[IEEE80211_ADDR_LEN]; 1022 int needbeacon, error; 1023 enum ieee80211_opmode ic_opmode; 1024 1025 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 1026 M_80211_VAP, M_WAITOK | M_ZERO); 1027 needbeacon = 0; 1028 IEEE80211_ADDR_COPY(mac, mac0); 1029 1030 ATH_LOCK(sc); 1031 ic_opmode = opmode; /* default to opmode of new vap */ 1032 switch (opmode) { 1033 case IEEE80211_M_STA: 1034 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1035 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1036 goto bad; 1037 } 1038 if (sc->sc_nvaps) { 1039 /* 1040 * With multiple vaps we must fall back 1041 * to s/w beacon miss handling. 1042 */ 1043 flags |= IEEE80211_CLONE_NOBEACONS; 1044 } 1045 if (flags & IEEE80211_CLONE_NOBEACONS) { 1046 /* 1047 * Station mode w/o beacons are implemented w/ AP mode. 1048 */ 1049 ic_opmode = IEEE80211_M_HOSTAP; 1050 } 1051 break; 1052 case IEEE80211_M_IBSS: 1053 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1054 device_printf(sc->sc_dev, 1055 "only 1 ibss vap supported\n"); 1056 goto bad; 1057 } 1058 needbeacon = 1; 1059 break; 1060 case IEEE80211_M_AHDEMO: 1061#ifdef IEEE80211_SUPPORT_TDMA 1062 if (flags & IEEE80211_CLONE_TDMA) { 1063 if (sc->sc_nvaps != 0) { 1064 device_printf(sc->sc_dev, 1065 "only 1 tdma vap supported\n"); 1066 goto bad; 1067 } 1068 needbeacon = 1; 1069 flags |= IEEE80211_CLONE_NOBEACONS; 1070 } 1071 /* fall thru... */ 1072#endif 1073 case IEEE80211_M_MONITOR: 1074 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1075 /* 1076 * Adopt existing mode. Adding a monitor or ahdemo 1077 * vap to an existing configuration is of dubious 1078 * value but should be ok. 1079 */ 1080 /* XXX not right for monitor mode */ 1081 ic_opmode = ic->ic_opmode; 1082 } 1083 break; 1084 case IEEE80211_M_HOSTAP: 1085 case IEEE80211_M_MBSS: 1086 needbeacon = 1; 1087 break; 1088 case IEEE80211_M_WDS: 1089 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1090 device_printf(sc->sc_dev, 1091 "wds not supported in sta mode\n"); 1092 goto bad; 1093 } 1094 /* 1095 * Silently remove any request for a unique 1096 * bssid; WDS vap's always share the local 1097 * mac address. 1098 */ 1099 flags &= ~IEEE80211_CLONE_BSSID; 1100 if (sc->sc_nvaps == 0) 1101 ic_opmode = IEEE80211_M_HOSTAP; 1102 else 1103 ic_opmode = ic->ic_opmode; 1104 break; 1105 default: 1106 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1107 goto bad; 1108 } 1109 /* 1110 * Check that a beacon buffer is available; the code below assumes it. 1111 */ 1112 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1113 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1114 goto bad; 1115 } 1116 1117 /* STA, AHDEMO? */ 1118 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1119 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1120 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1121 } 1122 1123 vap = &avp->av_vap; 1124 /* XXX can't hold mutex across if_alloc */ 1125 ATH_UNLOCK(sc); 1126 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1127 bssid, mac); 1128 ATH_LOCK(sc); 1129 if (error != 0) { 1130 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1131 __func__, error); 1132 goto bad2; 1133 } 1134 1135 /* h/w crypto support */ 1136 vap->iv_key_alloc = ath_key_alloc; 1137 vap->iv_key_delete = ath_key_delete; 1138 vap->iv_key_set = ath_key_set; 1139 vap->iv_key_update_begin = ath_key_update_begin; 1140 vap->iv_key_update_end = ath_key_update_end; 1141 1142 /* override various methods */ 1143 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1144 vap->iv_recv_mgmt = ath_recv_mgmt; 1145 vap->iv_reset = ath_reset_vap; 1146 vap->iv_update_beacon = ath_beacon_update; 1147 avp->av_newstate = vap->iv_newstate; 1148 vap->iv_newstate = ath_newstate; 1149 avp->av_bmiss = vap->iv_bmiss; 1150 vap->iv_bmiss = ath_bmiss_vap; 1151 1152 avp->av_node_ps = vap->iv_node_ps; 1153 vap->iv_node_ps = ath_node_powersave; 1154 1155 /* Set default parameters */ 1156 1157 /* 1158 * Anything earlier than some AR9300 series MACs don't 1159 * support a smaller MPDU density. 1160 */ 1161 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1162 /* 1163 * All NICs can handle the maximum size, however 1164 * AR5416 based MACs can only TX aggregates w/ RTS 1165 * protection when the total aggregate size is <= 8k. 1166 * However, for now that's enforced by the TX path. 1167 */ 1168 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1169 1170 avp->av_bslot = -1; 1171 if (needbeacon) { 1172 /* 1173 * Allocate beacon state and setup the q for buffered 1174 * multicast frames. We know a beacon buffer is 1175 * available because we checked above. 1176 */ 1177 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1178 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1179 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1180 /* 1181 * Assign the vap to a beacon xmit slot. As above 1182 * this cannot fail to find a free one. 1183 */ 1184 avp->av_bslot = assign_bslot(sc); 1185 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1186 ("beacon slot %u not empty", avp->av_bslot)); 1187 sc->sc_bslot[avp->av_bslot] = vap; 1188 sc->sc_nbcnvaps++; 1189 } 1190 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1191 /* 1192 * Multple vaps are to transmit beacons and we 1193 * have h/w support for TSF adjusting; enable 1194 * use of staggered beacons. 1195 */ 1196 sc->sc_stagbeacons = 1; 1197 } 1198 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1199 } 1200 1201 ic->ic_opmode = ic_opmode; 1202 if (opmode != IEEE80211_M_WDS) { 1203 sc->sc_nvaps++; 1204 if (opmode == IEEE80211_M_STA) 1205 sc->sc_nstavaps++; 1206 if (opmode == IEEE80211_M_MBSS) 1207 sc->sc_nmeshvaps++; 1208 } 1209 switch (ic_opmode) { 1210 case IEEE80211_M_IBSS: 1211 sc->sc_opmode = HAL_M_IBSS; 1212 break; 1213 case IEEE80211_M_STA: 1214 sc->sc_opmode = HAL_M_STA; 1215 break; 1216 case IEEE80211_M_AHDEMO: 1217#ifdef IEEE80211_SUPPORT_TDMA 1218 if (vap->iv_caps & IEEE80211_C_TDMA) { 1219 sc->sc_tdma = 1; 1220 /* NB: disable tsf adjust */ 1221 sc->sc_stagbeacons = 0; 1222 } 1223 /* 1224 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1225 * just ap mode. 1226 */ 1227 /* fall thru... */ 1228#endif 1229 case IEEE80211_M_HOSTAP: 1230 case IEEE80211_M_MBSS: 1231 sc->sc_opmode = HAL_M_HOSTAP; 1232 break; 1233 case IEEE80211_M_MONITOR: 1234 sc->sc_opmode = HAL_M_MONITOR; 1235 break; 1236 default: 1237 /* XXX should not happen */ 1238 break; 1239 } 1240 if (sc->sc_hastsfadd) { 1241 /* 1242 * Configure whether or not TSF adjust should be done. 1243 */ 1244 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1245 } 1246 if (flags & IEEE80211_CLONE_NOBEACONS) { 1247 /* 1248 * Enable s/w beacon miss handling. 1249 */ 1250 sc->sc_swbmiss = 1; 1251 } 1252 ATH_UNLOCK(sc); 1253 1254 /* complete setup */ 1255 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1256 return vap; 1257bad2: 1258 reclaim_address(sc, mac); 1259 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1260bad: 1261 free(avp, M_80211_VAP); 1262 ATH_UNLOCK(sc); 1263 return NULL; 1264} 1265 1266static void 1267ath_vap_delete(struct ieee80211vap *vap) 1268{ 1269 struct ieee80211com *ic = vap->iv_ic; 1270 struct ifnet *ifp = ic->ic_ifp; 1271 struct ath_softc *sc = ifp->if_softc; 1272 struct ath_hal *ah = sc->sc_ah; 1273 struct ath_vap *avp = ATH_VAP(vap); 1274 1275 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1276 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1277 /* 1278 * Quiesce the hardware while we remove the vap. In 1279 * particular we need to reclaim all references to 1280 * the vap state by any frames pending on the tx queues. 1281 */ 1282 ath_hal_intrset(ah, 0); /* disable interrupts */ 1283 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1284 /* XXX Do all frames from all vaps/nodes need draining here? */ 1285 ath_stoprecv(sc, 1); /* stop recv side */ 1286 } 1287 1288 ieee80211_vap_detach(vap); 1289 1290 /* 1291 * XXX Danger Will Robinson! Danger! 1292 * 1293 * Because ieee80211_vap_detach() can queue a frame (the station 1294 * diassociate message?) after we've drained the TXQ and 1295 * flushed the software TXQ, we will end up with a frame queued 1296 * to a node whose vap is about to be freed. 1297 * 1298 * To work around this, flush the hardware/software again. 1299 * This may be racy - the ath task may be running and the packet 1300 * may be being scheduled between sw->hw txq. Tsk. 1301 * 1302 * TODO: figure out why a new node gets allocated somewhere around 1303 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1304 * call!) 1305 */ 1306 1307 ath_draintxq(sc, ATH_RESET_DEFAULT); 1308 1309 ATH_LOCK(sc); 1310 /* 1311 * Reclaim beacon state. Note this must be done before 1312 * the vap instance is reclaimed as we may have a reference 1313 * to it in the buffer for the beacon frame. 1314 */ 1315 if (avp->av_bcbuf != NULL) { 1316 if (avp->av_bslot != -1) { 1317 sc->sc_bslot[avp->av_bslot] = NULL; 1318 sc->sc_nbcnvaps--; 1319 } 1320 ath_beacon_return(sc, avp->av_bcbuf); 1321 avp->av_bcbuf = NULL; 1322 if (sc->sc_nbcnvaps == 0) { 1323 sc->sc_stagbeacons = 0; 1324 if (sc->sc_hastsfadd) 1325 ath_hal_settsfadjust(sc->sc_ah, 0); 1326 } 1327 /* 1328 * Reclaim any pending mcast frames for the vap. 1329 */ 1330 ath_tx_draintxq(sc, &avp->av_mcastq); 1331 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1332 } 1333 /* 1334 * Update bookkeeping. 1335 */ 1336 if (vap->iv_opmode == IEEE80211_M_STA) { 1337 sc->sc_nstavaps--; 1338 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1339 sc->sc_swbmiss = 0; 1340 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1341 vap->iv_opmode == IEEE80211_M_MBSS) { 1342 reclaim_address(sc, vap->iv_myaddr); 1343 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1344 if (vap->iv_opmode == IEEE80211_M_MBSS) 1345 sc->sc_nmeshvaps--; 1346 } 1347 if (vap->iv_opmode != IEEE80211_M_WDS) 1348 sc->sc_nvaps--; 1349#ifdef IEEE80211_SUPPORT_TDMA 1350 /* TDMA operation ceases when the last vap is destroyed */ 1351 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1352 sc->sc_tdma = 0; 1353 sc->sc_swbmiss = 0; 1354 } 1355#endif 1356 free(avp, M_80211_VAP); 1357 1358 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1359 /* 1360 * Restart rx+tx machines if still running (RUNNING will 1361 * be reset if we just destroyed the last vap). 1362 */ 1363 if (ath_startrecv(sc) != 0) 1364 if_printf(ifp, "%s: unable to restart recv logic\n", 1365 __func__); 1366 if (sc->sc_beacons) { /* restart beacons */ 1367#ifdef IEEE80211_SUPPORT_TDMA 1368 if (sc->sc_tdma) 1369 ath_tdma_config(sc, NULL); 1370 else 1371#endif 1372 ath_beacon_config(sc, NULL); 1373 } 1374 ath_hal_intrset(ah, sc->sc_imask); 1375 } 1376 ATH_UNLOCK(sc); 1377} 1378 1379void 1380ath_suspend(struct ath_softc *sc) 1381{ 1382 struct ifnet *ifp = sc->sc_ifp; 1383 struct ieee80211com *ic = ifp->if_l2com; 1384 1385 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1386 __func__, ifp->if_flags); 1387 1388 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1389 1390 ieee80211_suspend_all(ic); 1391 /* 1392 * NB: don't worry about putting the chip in low power 1393 * mode; pci will power off our socket on suspend and 1394 * CardBus detaches the device. 1395 */ 1396 1397 /* 1398 * XXX ensure none of the taskqueues are running 1399 * XXX ensure sc_invalid is 1 1400 * XXX ensure the calibration callout is disabled 1401 */ 1402 1403 /* Disable the PCIe PHY, complete with workarounds */ 1404 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1405} 1406 1407/* 1408 * Reset the key cache since some parts do not reset the 1409 * contents on resume. First we clear all entries, then 1410 * re-load keys that the 802.11 layer assumes are setup 1411 * in h/w. 1412 */ 1413static void 1414ath_reset_keycache(struct ath_softc *sc) 1415{ 1416 struct ifnet *ifp = sc->sc_ifp; 1417 struct ieee80211com *ic = ifp->if_l2com; 1418 struct ath_hal *ah = sc->sc_ah; 1419 int i; 1420 1421 for (i = 0; i < sc->sc_keymax; i++) 1422 ath_hal_keyreset(ah, i); 1423 ieee80211_crypto_reload_keys(ic); 1424} 1425 1426void 1427ath_resume(struct ath_softc *sc) 1428{ 1429 struct ifnet *ifp = sc->sc_ifp; 1430 struct ieee80211com *ic = ifp->if_l2com; 1431 struct ath_hal *ah = sc->sc_ah; 1432 HAL_STATUS status; 1433 1434 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1435 __func__, ifp->if_flags); 1436 1437 /* Re-enable PCIe, re-enable the PCIe bus */ 1438 ath_hal_enablepcie(ah, 0, 0); 1439 1440 /* 1441 * Must reset the chip before we reload the 1442 * keycache as we were powered down on suspend. 1443 */ 1444 ath_hal_reset(ah, sc->sc_opmode, 1445 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1446 AH_FALSE, &status); 1447 ath_reset_keycache(sc); 1448 1449 /* Let DFS at it in case it's a DFS channel */ 1450 ath_dfs_radar_enable(sc, ic->ic_curchan); 1451 1452 /* Restore the LED configuration */ 1453 ath_led_config(sc); 1454 ath_hal_setledstate(ah, HAL_LED_INIT); 1455 1456 if (sc->sc_resume_up) 1457 ieee80211_resume_all(ic); 1458 1459 /* XXX beacons ? */ 1460} 1461 1462void 1463ath_shutdown(struct ath_softc *sc) 1464{ 1465 struct ifnet *ifp = sc->sc_ifp; 1466 1467 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1468 __func__, ifp->if_flags); 1469 1470 ath_stop(ifp); 1471 /* NB: no point powering down chip as we're about to reboot */ 1472} 1473 1474/* 1475 * Interrupt handler. Most of the actual processing is deferred. 1476 */ 1477void 1478ath_intr(void *arg) 1479{ 1480 struct ath_softc *sc = arg; 1481 struct ifnet *ifp = sc->sc_ifp; 1482 struct ath_hal *ah = sc->sc_ah; 1483 HAL_INT status = 0; 1484 uint32_t txqs; 1485 1486 /* 1487 * If we're inside a reset path, just print a warning and 1488 * clear the ISR. The reset routine will finish it for us. 1489 */ 1490 ATH_PCU_LOCK(sc); 1491 if (sc->sc_inreset_cnt) { 1492 HAL_INT status; 1493 ath_hal_getisr(ah, &status); /* clear ISR */ 1494 ath_hal_intrset(ah, 0); /* disable further intr's */ 1495 DPRINTF(sc, ATH_DEBUG_ANY, 1496 "%s: in reset, ignoring: status=0x%x\n", 1497 __func__, status); 1498 ATH_PCU_UNLOCK(sc); 1499 return; 1500 } 1501 1502 if (sc->sc_invalid) { 1503 /* 1504 * The hardware is not ready/present, don't touch anything. 1505 * Note this can happen early on if the IRQ is shared. 1506 */ 1507 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1508 ATH_PCU_UNLOCK(sc); 1509 return; 1510 } 1511 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1512 ATH_PCU_UNLOCK(sc); 1513 return; 1514 } 1515 1516 if ((ifp->if_flags & IFF_UP) == 0 || 1517 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1518 HAL_INT status; 1519 1520 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1521 __func__, ifp->if_flags); 1522 ath_hal_getisr(ah, &status); /* clear ISR */ 1523 ath_hal_intrset(ah, 0); /* disable further intr's */ 1524 ATH_PCU_UNLOCK(sc); 1525 return; 1526 } 1527 1528 /* 1529 * Figure out the reason(s) for the interrupt. Note 1530 * that the hal returns a pseudo-ISR that may include 1531 * bits we haven't explicitly enabled so we mask the 1532 * value to insure we only process bits we requested. 1533 */ 1534 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1535 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1536 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); 1537#ifdef ATH_KTR_INTR_DEBUG 1538 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, 1539 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1540 ah->ah_intrstate[0], 1541 ah->ah_intrstate[1], 1542 ah->ah_intrstate[2], 1543 ah->ah_intrstate[3], 1544 ah->ah_intrstate[6]); 1545#endif 1546 1547 /* Squirrel away SYNC interrupt debugging */ 1548 if (ah->ah_syncstate != 0) { 1549 int i; 1550 for (i = 0; i < 32; i++) 1551 if (ah->ah_syncstate & (i << i)) 1552 sc->sc_intr_stats.sync_intr[i]++; 1553 } 1554 1555 status &= sc->sc_imask; /* discard unasked for bits */ 1556 1557 /* Short-circuit un-handled interrupts */ 1558 if (status == 0x0) { 1559 ATH_PCU_UNLOCK(sc); 1560 return; 1561 } 1562 1563 /* 1564 * Take a note that we're inside the interrupt handler, so 1565 * the reset routines know to wait. 1566 */ 1567 sc->sc_intr_cnt++; 1568 ATH_PCU_UNLOCK(sc); 1569 1570 /* 1571 * Handle the interrupt. We won't run concurrent with the reset 1572 * or channel change routines as they'll wait for sc_intr_cnt 1573 * to be 0 before continuing. 1574 */ 1575 if (status & HAL_INT_FATAL) { 1576 sc->sc_stats.ast_hardware++; 1577 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1578 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 1579 } else { 1580 if (status & HAL_INT_SWBA) { 1581 /* 1582 * Software beacon alert--time to send a beacon. 1583 * Handle beacon transmission directly; deferring 1584 * this is too slow to meet timing constraints 1585 * under load. 1586 */ 1587#ifdef IEEE80211_SUPPORT_TDMA 1588 if (sc->sc_tdma) { 1589 if (sc->sc_tdmaswba == 0) { 1590 struct ieee80211com *ic = ifp->if_l2com; 1591 struct ieee80211vap *vap = 1592 TAILQ_FIRST(&ic->ic_vaps); 1593 ath_tdma_beacon_send(sc, vap); 1594 sc->sc_tdmaswba = 1595 vap->iv_tdma->tdma_bintval; 1596 } else 1597 sc->sc_tdmaswba--; 1598 } else 1599#endif 1600 { 1601 ath_beacon_proc(sc, 0); 1602#ifdef IEEE80211_SUPPORT_SUPERG 1603 /* 1604 * Schedule the rx taskq in case there's no 1605 * traffic so any frames held on the staging 1606 * queue are aged and potentially flushed. 1607 */ 1608 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1609#endif 1610 } 1611 } 1612 if (status & HAL_INT_RXEOL) { 1613 int imask; 1614 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); 1615 ATH_PCU_LOCK(sc); 1616 /* 1617 * NB: the hardware should re-read the link when 1618 * RXE bit is written, but it doesn't work at 1619 * least on older hardware revs. 1620 */ 1621 sc->sc_stats.ast_rxeol++; 1622 /* 1623 * Disable RXEOL/RXORN - prevent an interrupt 1624 * storm until the PCU logic can be reset. 1625 * In case the interface is reset some other 1626 * way before "sc_kickpcu" is called, don't 1627 * modify sc_imask - that way if it is reset 1628 * by a call to ath_reset() somehow, the 1629 * interrupt mask will be correctly reprogrammed. 1630 */ 1631 imask = sc->sc_imask; 1632 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1633 ath_hal_intrset(ah, imask); 1634 /* 1635 * Only blank sc_rxlink if we've not yet kicked 1636 * the PCU. 1637 * 1638 * This isn't entirely correct - the correct solution 1639 * would be to have a PCU lock and engage that for 1640 * the duration of the PCU fiddling; which would include 1641 * running the RX process. Otherwise we could end up 1642 * messing up the RX descriptor chain and making the 1643 * RX desc list much shorter. 1644 */ 1645 if (! sc->sc_kickpcu) 1646 sc->sc_rxlink = NULL; 1647 sc->sc_kickpcu = 1; 1648 /* 1649 * Enqueue an RX proc, to handled whatever 1650 * is in the RX queue. 1651 * This will then kick the PCU. 1652 */ 1653 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1654 ATH_PCU_UNLOCK(sc); 1655 } 1656 if (status & HAL_INT_TXURN) { 1657 sc->sc_stats.ast_txurn++; 1658 /* bump tx trigger level */ 1659 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1660 } 1661 /* 1662 * Handle both the legacy and RX EDMA interrupt bits. 1663 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 1664 */ 1665 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 1666 sc->sc_stats.ast_rx_intr++; 1667 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1668 } 1669 if (status & HAL_INT_TX) { 1670 sc->sc_stats.ast_tx_intr++; 1671 /* 1672 * Grab all the currently set bits in the HAL txq bitmap 1673 * and blank them. This is the only place we should be 1674 * doing this. 1675 */ 1676 if (! sc->sc_isedma) { 1677 ATH_PCU_LOCK(sc); 1678 txqs = 0xffffffff; 1679 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1680 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, 1681 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", 1682 txqs, 1683 sc->sc_txq_active, 1684 sc->sc_txq_active | txqs); 1685 sc->sc_txq_active |= txqs; 1686 ATH_PCU_UNLOCK(sc); 1687 } 1688 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1689 } 1690 if (status & HAL_INT_BMISS) { 1691 sc->sc_stats.ast_bmiss++; 1692 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1693 } 1694 if (status & HAL_INT_GTT) 1695 sc->sc_stats.ast_tx_timeout++; 1696 if (status & HAL_INT_CST) 1697 sc->sc_stats.ast_tx_cst++; 1698 if (status & HAL_INT_MIB) { 1699 sc->sc_stats.ast_mib++; 1700 ATH_PCU_LOCK(sc); 1701 /* 1702 * Disable interrupts until we service the MIB 1703 * interrupt; otherwise it will continue to fire. 1704 */ 1705 ath_hal_intrset(ah, 0); 1706 /* 1707 * Let the hal handle the event. We assume it will 1708 * clear whatever condition caused the interrupt. 1709 */ 1710 ath_hal_mibevent(ah, &sc->sc_halstats); 1711 /* 1712 * Don't reset the interrupt if we've just 1713 * kicked the PCU, or we may get a nested 1714 * RXEOL before the rxproc has had a chance 1715 * to run. 1716 */ 1717 if (sc->sc_kickpcu == 0) 1718 ath_hal_intrset(ah, sc->sc_imask); 1719 ATH_PCU_UNLOCK(sc); 1720 } 1721 if (status & HAL_INT_RXORN) { 1722 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1723 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); 1724 sc->sc_stats.ast_rxorn++; 1725 } 1726 } 1727 ATH_PCU_LOCK(sc); 1728 sc->sc_intr_cnt--; 1729 ATH_PCU_UNLOCK(sc); 1730} 1731 1732static void 1733ath_fatal_proc(void *arg, int pending) 1734{ 1735 struct ath_softc *sc = arg; 1736 struct ifnet *ifp = sc->sc_ifp; 1737 u_int32_t *state; 1738 u_int32_t len; 1739 void *sp; 1740 1741 if_printf(ifp, "hardware error; resetting\n"); 1742 /* 1743 * Fatal errors are unrecoverable. Typically these 1744 * are caused by DMA errors. Collect h/w state from 1745 * the hal so we can diagnose what's going on. 1746 */ 1747 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1748 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1749 state = sp; 1750 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1751 state[0], state[1] , state[2], state[3], 1752 state[4], state[5]); 1753 } 1754 ath_reset(ifp, ATH_RESET_NOLOSS); 1755} 1756 1757static void 1758ath_bmiss_vap(struct ieee80211vap *vap) 1759{ 1760 /* 1761 * Workaround phantom bmiss interrupts by sanity-checking 1762 * the time of our last rx'd frame. If it is within the 1763 * beacon miss interval then ignore the interrupt. If it's 1764 * truly a bmiss we'll get another interrupt soon and that'll 1765 * be dispatched up for processing. Note this applies only 1766 * for h/w beacon miss events. 1767 */ 1768 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1769 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1770 struct ath_softc *sc = ifp->if_softc; 1771 u_int64_t lastrx = sc->sc_lastrx; 1772 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1773 /* XXX should take a locked ref to iv_bss */ 1774 u_int bmisstimeout = 1775 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1776 1777 DPRINTF(sc, ATH_DEBUG_BEACON, 1778 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1779 __func__, (unsigned long long) tsf, 1780 (unsigned long long)(tsf - lastrx), 1781 (unsigned long long) lastrx, bmisstimeout); 1782 1783 if (tsf - lastrx <= bmisstimeout) { 1784 sc->sc_stats.ast_bmiss_phantom++; 1785 return; 1786 } 1787 } 1788 ATH_VAP(vap)->av_bmiss(vap); 1789} 1790 1791static int 1792ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1793{ 1794 uint32_t rsize; 1795 void *sp; 1796 1797 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1798 return 0; 1799 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1800 *hangs = *(uint32_t *)sp; 1801 return 1; 1802} 1803 1804static void 1805ath_bmiss_proc(void *arg, int pending) 1806{ 1807 struct ath_softc *sc = arg; 1808 struct ifnet *ifp = sc->sc_ifp; 1809 uint32_t hangs; 1810 1811 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1812 1813 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1814 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1815 ath_reset(ifp, ATH_RESET_NOLOSS); 1816 } else 1817 ieee80211_beacon_miss(ifp->if_l2com); 1818} 1819 1820/* 1821 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1822 * calcs together with WME. If necessary disable the crypto 1823 * hardware and mark the 802.11 state so keys will be setup 1824 * with the MIC work done in software. 1825 */ 1826static void 1827ath_settkipmic(struct ath_softc *sc) 1828{ 1829 struct ifnet *ifp = sc->sc_ifp; 1830 struct ieee80211com *ic = ifp->if_l2com; 1831 1832 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1833 if (ic->ic_flags & IEEE80211_F_WME) { 1834 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1835 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1836 } else { 1837 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1838 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1839 } 1840 } 1841} 1842 1843static void 1844ath_init(void *arg) 1845{ 1846 struct ath_softc *sc = (struct ath_softc *) arg; 1847 struct ifnet *ifp = sc->sc_ifp; 1848 struct ieee80211com *ic = ifp->if_l2com; 1849 struct ath_hal *ah = sc->sc_ah; 1850 HAL_STATUS status; 1851 1852 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1853 __func__, ifp->if_flags); 1854 1855 ATH_LOCK(sc); 1856 /* 1857 * Stop anything previously setup. This is safe 1858 * whether this is the first time through or not. 1859 */ 1860 ath_stop_locked(ifp); 1861 1862 /* 1863 * The basic interface to setting the hardware in a good 1864 * state is ``reset''. On return the hardware is known to 1865 * be powered up and with interrupts disabled. This must 1866 * be followed by initialization of the appropriate bits 1867 * and then setup of the interrupt mask. 1868 */ 1869 ath_settkipmic(sc); 1870 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1871 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1872 status); 1873 ATH_UNLOCK(sc); 1874 return; 1875 } 1876 ath_chan_change(sc, ic->ic_curchan); 1877 1878 /* Let DFS at it in case it's a DFS channel */ 1879 ath_dfs_radar_enable(sc, ic->ic_curchan); 1880 1881 /* 1882 * Likewise this is set during reset so update 1883 * state cached in the driver. 1884 */ 1885 sc->sc_diversity = ath_hal_getdiversity(ah); 1886 sc->sc_lastlongcal = 0; 1887 sc->sc_resetcal = 1; 1888 sc->sc_lastcalreset = 0; 1889 sc->sc_lastani = 0; 1890 sc->sc_lastshortcal = 0; 1891 sc->sc_doresetcal = AH_FALSE; 1892 /* 1893 * Beacon timers were cleared here; give ath_newstate() 1894 * a hint that the beacon timers should be poked when 1895 * things transition to the RUN state. 1896 */ 1897 sc->sc_beacons = 0; 1898 1899 /* 1900 * Setup the hardware after reset: the key cache 1901 * is filled as needed and the receive engine is 1902 * set going. Frame transmit is handled entirely 1903 * in the frame output path; there's nothing to do 1904 * here except setup the interrupt mask. 1905 */ 1906 if (ath_startrecv(sc) != 0) { 1907 if_printf(ifp, "unable to start recv logic\n"); 1908 ATH_UNLOCK(sc); 1909 return; 1910 } 1911 1912 /* 1913 * Enable interrupts. 1914 */ 1915 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1916 | HAL_INT_RXEOL | HAL_INT_RXORN 1917 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1918 1919 /* 1920 * Enable RX EDMA bits. Note these overlap with 1921 * HAL_INT_RX and HAL_INT_RXDESC respectively. 1922 */ 1923 if (sc->sc_isedma) 1924 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 1925 1926 /* 1927 * Enable MIB interrupts when there are hardware phy counters. 1928 * Note we only do this (at the moment) for station mode. 1929 */ 1930 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1931 sc->sc_imask |= HAL_INT_MIB; 1932 1933 /* Enable global TX timeout and carrier sense timeout if available */ 1934 if (ath_hal_gtxto_supported(ah)) 1935 sc->sc_imask |= HAL_INT_GTT; 1936 1937 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1938 __func__, sc->sc_imask); 1939 1940 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1941 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1942 ath_hal_intrset(ah, sc->sc_imask); 1943 1944 ATH_UNLOCK(sc); 1945 1946#ifdef ATH_TX99_DIAG 1947 if (sc->sc_tx99 != NULL) 1948 sc->sc_tx99->start(sc->sc_tx99); 1949 else 1950#endif 1951 ieee80211_start_all(ic); /* start all vap's */ 1952} 1953 1954static void 1955ath_stop_locked(struct ifnet *ifp) 1956{ 1957 struct ath_softc *sc = ifp->if_softc; 1958 struct ath_hal *ah = sc->sc_ah; 1959 1960 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1961 __func__, sc->sc_invalid, ifp->if_flags); 1962 1963 ATH_LOCK_ASSERT(sc); 1964 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1965 /* 1966 * Shutdown the hardware and driver: 1967 * reset 802.11 state machine 1968 * turn off timers 1969 * disable interrupts 1970 * turn off the radio 1971 * clear transmit machinery 1972 * clear receive machinery 1973 * drain and release tx queues 1974 * reclaim beacon resources 1975 * power down hardware 1976 * 1977 * Note that some of this work is not possible if the 1978 * hardware is gone (invalid). 1979 */ 1980#ifdef ATH_TX99_DIAG 1981 if (sc->sc_tx99 != NULL) 1982 sc->sc_tx99->stop(sc->sc_tx99); 1983#endif 1984 callout_stop(&sc->sc_wd_ch); 1985 sc->sc_wd_timer = 0; 1986 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1987 if (!sc->sc_invalid) { 1988 if (sc->sc_softled) { 1989 callout_stop(&sc->sc_ledtimer); 1990 ath_hal_gpioset(ah, sc->sc_ledpin, 1991 !sc->sc_ledon); 1992 sc->sc_blinking = 0; 1993 } 1994 ath_hal_intrset(ah, 0); 1995 } 1996 ath_draintxq(sc, ATH_RESET_DEFAULT); 1997 if (!sc->sc_invalid) { 1998 ath_stoprecv(sc, 1); 1999 ath_hal_phydisable(ah); 2000 } else 2001 sc->sc_rxlink = NULL; 2002 ath_beacon_free(sc); /* XXX not needed */ 2003 } 2004} 2005 2006#define MAX_TXRX_ITERATIONS 1000 2007static void 2008ath_txrx_stop_locked(struct ath_softc *sc) 2009{ 2010 int i = MAX_TXRX_ITERATIONS; 2011 2012 ATH_UNLOCK_ASSERT(sc); 2013 ATH_PCU_LOCK_ASSERT(sc); 2014 2015 /* 2016 * Sleep until all the pending operations have completed. 2017 * 2018 * The caller must ensure that reset has been incremented 2019 * or the pending operations may continue being queued. 2020 */ 2021 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 2022 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 2023 if (i <= 0) 2024 break; 2025 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 2026 i--; 2027 } 2028 2029 if (i <= 0) 2030 device_printf(sc->sc_dev, 2031 "%s: didn't finish after %d iterations\n", 2032 __func__, MAX_TXRX_ITERATIONS); 2033} 2034#undef MAX_TXRX_ITERATIONS 2035 2036#if 0 2037static void 2038ath_txrx_stop(struct ath_softc *sc) 2039{ 2040 ATH_UNLOCK_ASSERT(sc); 2041 ATH_PCU_UNLOCK_ASSERT(sc); 2042 2043 ATH_PCU_LOCK(sc); 2044 ath_txrx_stop_locked(sc); 2045 ATH_PCU_UNLOCK(sc); 2046} 2047#endif 2048 2049static void 2050ath_txrx_start(struct ath_softc *sc) 2051{ 2052 2053 taskqueue_unblock(sc->sc_tq); 2054} 2055 2056/* 2057 * Grab the reset lock, and wait around until noone else 2058 * is trying to do anything with it. 2059 * 2060 * This is totally horrible but we can't hold this lock for 2061 * long enough to do TX/RX or we end up with net80211/ip stack 2062 * LORs and eventual deadlock. 2063 * 2064 * "dowait" signals whether to spin, waiting for the reset 2065 * lock count to reach 0. This should (for now) only be used 2066 * during the reset path, as the rest of the code may not 2067 * be locking-reentrant enough to behave correctly. 2068 * 2069 * Another, cleaner way should be found to serialise all of 2070 * these operations. 2071 */ 2072#define MAX_RESET_ITERATIONS 10 2073static int 2074ath_reset_grablock(struct ath_softc *sc, int dowait) 2075{ 2076 int w = 0; 2077 int i = MAX_RESET_ITERATIONS; 2078 2079 ATH_PCU_LOCK_ASSERT(sc); 2080 do { 2081 if (sc->sc_inreset_cnt == 0) { 2082 w = 1; 2083 break; 2084 } 2085 if (dowait == 0) { 2086 w = 0; 2087 break; 2088 } 2089 ATH_PCU_UNLOCK(sc); 2090 pause("ath_reset_grablock", 1); 2091 i--; 2092 ATH_PCU_LOCK(sc); 2093 } while (i > 0); 2094 2095 /* 2096 * We always increment the refcounter, regardless 2097 * of whether we succeeded to get it in an exclusive 2098 * way. 2099 */ 2100 sc->sc_inreset_cnt++; 2101 2102 if (i <= 0) 2103 device_printf(sc->sc_dev, 2104 "%s: didn't finish after %d iterations\n", 2105 __func__, MAX_RESET_ITERATIONS); 2106 2107 if (w == 0) 2108 device_printf(sc->sc_dev, 2109 "%s: warning, recursive reset path!\n", 2110 __func__); 2111 2112 return w; 2113} 2114#undef MAX_RESET_ITERATIONS 2115 2116/* 2117 * XXX TODO: write ath_reset_releaselock 2118 */ 2119 2120static void 2121ath_stop(struct ifnet *ifp) 2122{ 2123 struct ath_softc *sc = ifp->if_softc; 2124 2125 ATH_LOCK(sc); 2126 ath_stop_locked(ifp); 2127 ATH_UNLOCK(sc); 2128} 2129 2130/* 2131 * Reset the hardware w/o losing operational state. This is 2132 * basically a more efficient way of doing ath_stop, ath_init, 2133 * followed by state transitions to the current 802.11 2134 * operational state. Used to recover from various errors and 2135 * to reset or reload hardware state. 2136 */ 2137int 2138ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2139{ 2140 struct ath_softc *sc = ifp->if_softc; 2141 struct ieee80211com *ic = ifp->if_l2com; 2142 struct ath_hal *ah = sc->sc_ah; 2143 HAL_STATUS status; 2144 int i; 2145 2146 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2147 2148 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2149 ATH_PCU_UNLOCK_ASSERT(sc); 2150 ATH_UNLOCK_ASSERT(sc); 2151 2152 /* Try to (stop any further TX/RX from occuring */ 2153 taskqueue_block(sc->sc_tq); 2154 2155 ATH_PCU_LOCK(sc); 2156 ath_hal_intrset(ah, 0); /* disable interrupts */ 2157 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2158 if (ath_reset_grablock(sc, 1) == 0) { 2159 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2160 __func__); 2161 } 2162 ATH_PCU_UNLOCK(sc); 2163 2164 /* 2165 * Should now wait for pending TX/RX to complete 2166 * and block future ones from occuring. This needs to be 2167 * done before the TX queue is drained. 2168 */ 2169 ath_draintxq(sc, reset_type); /* stop xmit side */ 2170 2171 /* 2172 * Regardless of whether we're doing a no-loss flush or 2173 * not, stop the PCU and handle what's in the RX queue. 2174 * That way frames aren't dropped which shouldn't be. 2175 */ 2176 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2177 ath_rx_flush(sc); 2178 2179 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2180 /* NB: indicate channel change so we do a full reset */ 2181 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2182 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2183 __func__, status); 2184 sc->sc_diversity = ath_hal_getdiversity(ah); 2185 2186 /* Let DFS at it in case it's a DFS channel */ 2187 ath_dfs_radar_enable(sc, ic->ic_curchan); 2188 2189 if (ath_startrecv(sc) != 0) /* restart recv */ 2190 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2191 /* 2192 * We may be doing a reset in response to an ioctl 2193 * that changes the channel so update any state that 2194 * might change as a result. 2195 */ 2196 ath_chan_change(sc, ic->ic_curchan); 2197 if (sc->sc_beacons) { /* restart beacons */ 2198#ifdef IEEE80211_SUPPORT_TDMA 2199 if (sc->sc_tdma) 2200 ath_tdma_config(sc, NULL); 2201 else 2202#endif 2203 ath_beacon_config(sc, NULL); 2204 } 2205 2206 /* 2207 * Release the reset lock and re-enable interrupts here. 2208 * If an interrupt was being processed in ath_intr(), 2209 * it would disable interrupts at this point. So we have 2210 * to atomically enable interrupts and decrement the 2211 * reset counter - this way ath_intr() doesn't end up 2212 * disabling interrupts without a corresponding enable 2213 * in the rest or channel change path. 2214 */ 2215 ATH_PCU_LOCK(sc); 2216 sc->sc_inreset_cnt--; 2217 /* XXX only do this if sc_inreset_cnt == 0? */ 2218 ath_hal_intrset(ah, sc->sc_imask); 2219 ATH_PCU_UNLOCK(sc); 2220 2221 /* 2222 * TX and RX can be started here. If it were started with 2223 * sc_inreset_cnt > 0, the TX and RX path would abort. 2224 * Thus if this is a nested call through the reset or 2225 * channel change code, TX completion will occur but 2226 * RX completion and ath_start / ath_tx_start will not 2227 * run. 2228 */ 2229 2230 /* Restart TX/RX as needed */ 2231 ath_txrx_start(sc); 2232 2233 /* XXX Restart TX completion and pending TX */ 2234 if (reset_type == ATH_RESET_NOLOSS) { 2235 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2236 if (ATH_TXQ_SETUP(sc, i)) { 2237 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2238 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2239 ath_txq_sched(sc, &sc->sc_txq[i]); 2240 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2241 } 2242 } 2243 } 2244 2245 /* 2246 * This may have been set during an ath_start() call which 2247 * set this once it detected a concurrent TX was going on. 2248 * So, clear it. 2249 */ 2250 IF_LOCK(&ifp->if_snd); 2251 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2252 IF_UNLOCK(&ifp->if_snd); 2253 2254 /* Handle any frames in the TX queue */ 2255 /* 2256 * XXX should this be done by the caller, rather than 2257 * ath_reset() ? 2258 */ 2259 ath_tx_kick(sc); /* restart xmit */ 2260 return 0; 2261} 2262 2263static int 2264ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2265{ 2266 struct ieee80211com *ic = vap->iv_ic; 2267 struct ifnet *ifp = ic->ic_ifp; 2268 struct ath_softc *sc = ifp->if_softc; 2269 struct ath_hal *ah = sc->sc_ah; 2270 2271 switch (cmd) { 2272 case IEEE80211_IOC_TXPOWER: 2273 /* 2274 * If per-packet TPC is enabled, then we have nothing 2275 * to do; otherwise we need to force the global limit. 2276 * All this can happen directly; no need to reset. 2277 */ 2278 if (!ath_hal_gettpc(ah)) 2279 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2280 return 0; 2281 } 2282 /* XXX? Full or NOLOSS? */ 2283 return ath_reset(ifp, ATH_RESET_FULL); 2284} 2285 2286struct ath_buf * 2287_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2288{ 2289 struct ath_buf *bf; 2290 2291 ATH_TXBUF_LOCK_ASSERT(sc); 2292 2293 if (btype == ATH_BUFTYPE_MGMT) 2294 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2295 else 2296 bf = TAILQ_FIRST(&sc->sc_txbuf); 2297 2298 if (bf == NULL) { 2299 sc->sc_stats.ast_tx_getnobuf++; 2300 } else { 2301 if (bf->bf_flags & ATH_BUF_BUSY) { 2302 sc->sc_stats.ast_tx_getbusybuf++; 2303 bf = NULL; 2304 } 2305 } 2306 2307 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 2308 if (btype == ATH_BUFTYPE_MGMT) 2309 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 2310 else { 2311 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2312 sc->sc_txbuf_cnt--; 2313 2314 /* 2315 * This shuldn't happen; however just to be 2316 * safe print a warning and fudge the txbuf 2317 * count. 2318 */ 2319 if (sc->sc_txbuf_cnt < 0) { 2320 device_printf(sc->sc_dev, 2321 "%s: sc_txbuf_cnt < 0?\n", 2322 __func__); 2323 sc->sc_txbuf_cnt = 0; 2324 } 2325 } 2326 } else 2327 bf = NULL; 2328 2329 if (bf == NULL) { 2330 /* XXX should check which list, mgmt or otherwise */ 2331 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2332 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2333 "out of xmit buffers" : "xmit buffer busy"); 2334 return NULL; 2335 } 2336 2337 /* XXX TODO: should do this at buffer list initialisation */ 2338 /* XXX (then, ensure the buffer has the right flag set) */ 2339 if (btype == ATH_BUFTYPE_MGMT) 2340 bf->bf_flags |= ATH_BUF_MGMT; 2341 else 2342 bf->bf_flags &= (~ATH_BUF_MGMT); 2343 2344 /* Valid bf here; clear some basic fields */ 2345 bf->bf_next = NULL; /* XXX just to be sure */ 2346 bf->bf_last = NULL; /* XXX again, just to be sure */ 2347 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2348 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2349 2350 /* 2351 * Track the descriptor ID only if doing EDMA 2352 */ 2353 if (sc->sc_isedma) { 2354 bf->bf_descid = sc->sc_txbuf_descid; 2355 sc->sc_txbuf_descid++; 2356 } 2357 2358 return bf; 2359} 2360 2361/* 2362 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2363 * can't be thrown back on the queue as they could still be 2364 * in use by the hardware. 2365 * 2366 * This duplicates the buffer, or returns NULL. 2367 * 2368 * The descriptor is also copied but the link pointers and 2369 * the DMA segments aren't copied; this frame should thus 2370 * be again passed through the descriptor setup/chain routines 2371 * so the link is correct. 2372 * 2373 * The caller must free the buffer using ath_freebuf(). 2374 * 2375 * XXX TODO: this call shouldn't fail as it'll cause packet loss 2376 * XXX in the TX pathway when retries are needed. 2377 * XXX Figure out how to keep some buffers free, or factor the 2378 * XXX number of busy buffers into the xmit path (ath_start()) 2379 * XXX so we don't over-commit. 2380 */ 2381struct ath_buf * 2382ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2383{ 2384 struct ath_buf *tbf; 2385 2386 tbf = ath_getbuf(sc, 2387 (bf->bf_flags & ATH_BUF_MGMT) ? 2388 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 2389 if (tbf == NULL) 2390 return NULL; /* XXX failure? Why? */ 2391 2392 /* Copy basics */ 2393 tbf->bf_next = NULL; 2394 tbf->bf_nseg = bf->bf_nseg; 2395 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2396 tbf->bf_status = bf->bf_status; 2397 tbf->bf_m = bf->bf_m; 2398 tbf->bf_node = bf->bf_node; 2399 /* will be setup by the chain/setup function */ 2400 tbf->bf_lastds = NULL; 2401 /* for now, last == self */ 2402 tbf->bf_last = tbf; 2403 tbf->bf_comp = bf->bf_comp; 2404 2405 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2406 2407 /* The caller has to re-init the descriptor + links */ 2408 2409 /* Copy state */ 2410 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2411 2412 return tbf; 2413} 2414 2415struct ath_buf * 2416ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 2417{ 2418 struct ath_buf *bf; 2419 2420 ATH_TXBUF_LOCK(sc); 2421 bf = _ath_getbuf_locked(sc, btype); 2422 /* 2423 * If a mgmt buffer was requested but we're out of those, 2424 * try requesting a normal one. 2425 */ 2426 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 2427 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 2428 ATH_TXBUF_UNLOCK(sc); 2429 if (bf == NULL) { 2430 struct ifnet *ifp = sc->sc_ifp; 2431 2432 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 2433 sc->sc_stats.ast_tx_qstop++; 2434 IF_LOCK(&ifp->if_snd); 2435 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2436 IF_UNLOCK(&ifp->if_snd); 2437 } 2438 return bf; 2439} 2440 2441static void 2442ath_start_queue(struct ifnet *ifp) 2443{ 2444 struct ath_softc *sc = ifp->if_softc; 2445 2446 ath_tx_kick(sc); 2447} 2448 2449void 2450ath_start_task(void *arg, int npending) 2451{ 2452 struct ath_softc *sc = (struct ath_softc *) arg; 2453 struct ifnet *ifp = sc->sc_ifp; 2454 2455 /* XXX is it ok to hold the ATH_LOCK here? */ 2456 ATH_PCU_LOCK(sc); 2457 if (sc->sc_inreset_cnt > 0) { 2458 device_printf(sc->sc_dev, 2459 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2460 ATH_PCU_UNLOCK(sc); 2461 IF_LOCK(&ifp->if_snd); 2462 sc->sc_stats.ast_tx_qstop++; 2463 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2464 IF_UNLOCK(&ifp->if_snd); 2465 return; 2466 } 2467 sc->sc_txstart_cnt++; 2468 ATH_PCU_UNLOCK(sc); 2469 2470 ath_start(sc->sc_ifp); 2471 2472 ATH_PCU_LOCK(sc); 2473 sc->sc_txstart_cnt--; 2474 ATH_PCU_UNLOCK(sc); 2475} 2476 2477void 2478ath_start(struct ifnet *ifp) 2479{ 2480 struct ath_softc *sc = ifp->if_softc; 2481 struct ieee80211_node *ni; 2482 struct ath_buf *bf; 2483 struct mbuf *m, *next; 2484 ath_bufhead frags; 2485 2486 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 2487 return; 2488 2489 for (;;) { 2490 ATH_TXBUF_LOCK(sc); 2491 if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { 2492 /* XXX increment counter? */ 2493 ATH_TXBUF_UNLOCK(sc); 2494 IF_LOCK(&ifp->if_snd); 2495 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2496 IF_UNLOCK(&ifp->if_snd); 2497 break; 2498 } 2499 ATH_TXBUF_UNLOCK(sc); 2500 2501 /* 2502 * Grab a TX buffer and associated resources. 2503 */ 2504 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 2505 if (bf == NULL) 2506 break; 2507 2508 IFQ_DEQUEUE(&ifp->if_snd, m); 2509 if (m == NULL) { 2510 ATH_TXBUF_LOCK(sc); 2511 ath_returnbuf_head(sc, bf); 2512 ATH_TXBUF_UNLOCK(sc); 2513 break; 2514 } 2515 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 2516 /* 2517 * Check for fragmentation. If this frame 2518 * has been broken up verify we have enough 2519 * buffers to send all the fragments so all 2520 * go out or none... 2521 */ 2522 TAILQ_INIT(&frags); 2523 if ((m->m_flags & M_FRAG) && 2524 !ath_txfrag_setup(sc, &frags, m, ni)) { 2525 DPRINTF(sc, ATH_DEBUG_XMIT, 2526 "%s: out of txfrag buffers\n", __func__); 2527 sc->sc_stats.ast_tx_nofrag++; 2528 ifp->if_oerrors++; 2529 ath_freetx(m); 2530 goto bad; 2531 } 2532 ifp->if_opackets++; 2533 nextfrag: 2534 /* 2535 * Pass the frame to the h/w for transmission. 2536 * Fragmented frames have each frag chained together 2537 * with m_nextpkt. We know there are sufficient ath_buf's 2538 * to send all the frags because of work done by 2539 * ath_txfrag_setup. We leave m_nextpkt set while 2540 * calling ath_tx_start so it can use it to extend the 2541 * the tx duration to cover the subsequent frag and 2542 * so it can reclaim all the mbufs in case of an error; 2543 * ath_tx_start clears m_nextpkt once it commits to 2544 * handing the frame to the hardware. 2545 */ 2546 next = m->m_nextpkt; 2547 if (ath_tx_start(sc, ni, bf, m)) { 2548 bad: 2549 ifp->if_oerrors++; 2550 reclaim: 2551 bf->bf_m = NULL; 2552 bf->bf_node = NULL; 2553 ATH_TXBUF_LOCK(sc); 2554 ath_returnbuf_head(sc, bf); 2555 ath_txfrag_cleanup(sc, &frags, ni); 2556 ATH_TXBUF_UNLOCK(sc); 2557 if (ni != NULL) 2558 ieee80211_free_node(ni); 2559 continue; 2560 } 2561 if (next != NULL) { 2562 /* 2563 * Beware of state changing between frags. 2564 * XXX check sta power-save state? 2565 */ 2566 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2567 DPRINTF(sc, ATH_DEBUG_XMIT, 2568 "%s: flush fragmented packet, state %s\n", 2569 __func__, 2570 ieee80211_state_name[ni->ni_vap->iv_state]); 2571 ath_freetx(next); 2572 goto reclaim; 2573 } 2574 m = next; 2575 bf = TAILQ_FIRST(&frags); 2576 KASSERT(bf != NULL, ("no buf for txfrag")); 2577 TAILQ_REMOVE(&frags, bf, bf_list); 2578 goto nextfrag; 2579 } 2580 2581 sc->sc_wd_timer = 5; 2582 } 2583} 2584 2585static int 2586ath_media_change(struct ifnet *ifp) 2587{ 2588 int error = ieee80211_media_change(ifp); 2589 /* NB: only the fixed rate can change and that doesn't need a reset */ 2590 return (error == ENETRESET ? 0 : error); 2591} 2592 2593/* 2594 * Block/unblock tx+rx processing while a key change is done. 2595 * We assume the caller serializes key management operations 2596 * so we only need to worry about synchronization with other 2597 * uses that originate in the driver. 2598 */ 2599static void 2600ath_key_update_begin(struct ieee80211vap *vap) 2601{ 2602 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2603 struct ath_softc *sc = ifp->if_softc; 2604 2605 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2606 taskqueue_block(sc->sc_tq); 2607 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2608} 2609 2610static void 2611ath_key_update_end(struct ieee80211vap *vap) 2612{ 2613 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2614 struct ath_softc *sc = ifp->if_softc; 2615 2616 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2617 IF_UNLOCK(&ifp->if_snd); 2618 taskqueue_unblock(sc->sc_tq); 2619} 2620 2621static void 2622ath_update_promisc(struct ifnet *ifp) 2623{ 2624 struct ath_softc *sc = ifp->if_softc; 2625 u_int32_t rfilt; 2626 2627 /* configure rx filter */ 2628 rfilt = ath_calcrxfilter(sc); 2629 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2630 2631 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2632} 2633 2634static void 2635ath_update_mcast(struct ifnet *ifp) 2636{ 2637 struct ath_softc *sc = ifp->if_softc; 2638 u_int32_t mfilt[2]; 2639 2640 /* calculate and install multicast filter */ 2641 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2642 struct ifmultiaddr *ifma; 2643 /* 2644 * Merge multicast addresses to form the hardware filter. 2645 */ 2646 mfilt[0] = mfilt[1] = 0; 2647 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2648 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2649 caddr_t dl; 2650 u_int32_t val; 2651 u_int8_t pos; 2652 2653 /* calculate XOR of eight 6bit values */ 2654 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2655 val = LE_READ_4(dl + 0); 2656 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2657 val = LE_READ_4(dl + 3); 2658 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2659 pos &= 0x3f; 2660 mfilt[pos / 32] |= (1 << (pos % 32)); 2661 } 2662 if_maddr_runlock(ifp); 2663 } else 2664 mfilt[0] = mfilt[1] = ~0; 2665 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2666 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2667 __func__, mfilt[0], mfilt[1]); 2668} 2669 2670void 2671ath_mode_init(struct ath_softc *sc) 2672{ 2673 struct ifnet *ifp = sc->sc_ifp; 2674 struct ath_hal *ah = sc->sc_ah; 2675 u_int32_t rfilt; 2676 2677 /* configure rx filter */ 2678 rfilt = ath_calcrxfilter(sc); 2679 ath_hal_setrxfilter(ah, rfilt); 2680 2681 /* configure operational mode */ 2682 ath_hal_setopmode(ah); 2683 2684 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE, 2685 "%s: ah=%p, ifp=%p, if_addr=%p\n", 2686 __func__, 2687 ah, 2688 ifp, 2689 (ifp == NULL) ? NULL : ifp->if_addr); 2690 2691 /* handle any link-level address change */ 2692 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2693 2694 /* calculate and install multicast filter */ 2695 ath_update_mcast(ifp); 2696} 2697 2698/* 2699 * Set the slot time based on the current setting. 2700 */ 2701void 2702ath_setslottime(struct ath_softc *sc) 2703{ 2704 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2705 struct ath_hal *ah = sc->sc_ah; 2706 u_int usec; 2707 2708 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2709 usec = 13; 2710 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2711 usec = 21; 2712 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2713 /* honor short/long slot time only in 11g */ 2714 /* XXX shouldn't honor on pure g or turbo g channel */ 2715 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2716 usec = HAL_SLOT_TIME_9; 2717 else 2718 usec = HAL_SLOT_TIME_20; 2719 } else 2720 usec = HAL_SLOT_TIME_9; 2721 2722 DPRINTF(sc, ATH_DEBUG_RESET, 2723 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2724 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2725 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2726 2727 ath_hal_setslottime(ah, usec); 2728 sc->sc_updateslot = OK; 2729} 2730 2731/* 2732 * Callback from the 802.11 layer to update the 2733 * slot time based on the current setting. 2734 */ 2735static void 2736ath_updateslot(struct ifnet *ifp) 2737{ 2738 struct ath_softc *sc = ifp->if_softc; 2739 struct ieee80211com *ic = ifp->if_l2com; 2740 2741 /* 2742 * When not coordinating the BSS, change the hardware 2743 * immediately. For other operation we defer the change 2744 * until beacon updates have propagated to the stations. 2745 */ 2746 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2747 ic->ic_opmode == IEEE80211_M_MBSS) 2748 sc->sc_updateslot = UPDATE; 2749 else 2750 ath_setslottime(sc); 2751} 2752 2753/* 2754 * Append the contents of src to dst; both queues 2755 * are assumed to be locked. 2756 */ 2757void 2758ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2759{ 2760 2761 ATH_TXQ_LOCK_ASSERT(dst); 2762 ATH_TXQ_LOCK_ASSERT(src); 2763 2764 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2765 dst->axq_link = src->axq_link; 2766 src->axq_link = NULL; 2767 dst->axq_depth += src->axq_depth; 2768 dst->axq_aggr_depth += src->axq_aggr_depth; 2769 src->axq_depth = 0; 2770 src->axq_aggr_depth = 0; 2771} 2772 2773/* 2774 * Reset the hardware, with no loss. 2775 * 2776 * This can't be used for a general case reset. 2777 */ 2778static void 2779ath_reset_proc(void *arg, int pending) 2780{ 2781 struct ath_softc *sc = arg; 2782 struct ifnet *ifp = sc->sc_ifp; 2783 2784#if 0 2785 if_printf(ifp, "%s: resetting\n", __func__); 2786#endif 2787 ath_reset(ifp, ATH_RESET_NOLOSS); 2788} 2789 2790/* 2791 * Reset the hardware after detecting beacons have stopped. 2792 */ 2793static void 2794ath_bstuck_proc(void *arg, int pending) 2795{ 2796 struct ath_softc *sc = arg; 2797 struct ifnet *ifp = sc->sc_ifp; 2798 uint32_t hangs = 0; 2799 2800 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 2801 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 2802 2803 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2804 sc->sc_bmisscount); 2805 sc->sc_stats.ast_bstuck++; 2806 /* 2807 * This assumes that there's no simultaneous channel mode change 2808 * occuring. 2809 */ 2810 ath_reset(ifp, ATH_RESET_NOLOSS); 2811} 2812 2813static void 2814ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2815{ 2816 bus_addr_t *paddr = (bus_addr_t*) arg; 2817 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2818 *paddr = segs->ds_addr; 2819} 2820 2821/* 2822 * Allocate the descriptors and appropriate DMA tag/setup. 2823 * 2824 * For some situations (eg EDMA TX completion), there isn't a requirement 2825 * for the ath_buf entries to be allocated. 2826 */ 2827int 2828ath_descdma_alloc_desc(struct ath_softc *sc, 2829 struct ath_descdma *dd, ath_bufhead *head, 2830 const char *name, int ds_size, int ndesc) 2831{ 2832#define DS2PHYS(_dd, _ds) \ 2833 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2834#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2835 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2836 struct ifnet *ifp = sc->sc_ifp; 2837 int error; 2838 2839 dd->dd_descsize = ds_size; 2840 2841 DPRINTF(sc, ATH_DEBUG_RESET, 2842 "%s: %s DMA: %u desc, %d bytes per descriptor\n", 2843 __func__, name, ndesc, dd->dd_descsize); 2844 2845 dd->dd_name = name; 2846 dd->dd_desc_len = dd->dd_descsize * ndesc; 2847 2848 /* 2849 * Merlin work-around: 2850 * Descriptors that cross the 4KB boundary can't be used. 2851 * Assume one skipped descriptor per 4KB page. 2852 */ 2853 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2854 int numpages = dd->dd_desc_len / 4096; 2855 dd->dd_desc_len += ds_size * numpages; 2856 } 2857 2858 /* 2859 * Setup DMA descriptor area. 2860 */ 2861 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 2862 PAGE_SIZE, 0, /* alignment, bounds */ 2863 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2864 BUS_SPACE_MAXADDR, /* highaddr */ 2865 NULL, NULL, /* filter, filterarg */ 2866 dd->dd_desc_len, /* maxsize */ 2867 1, /* nsegments */ 2868 dd->dd_desc_len, /* maxsegsize */ 2869 BUS_DMA_ALLOCNOW, /* flags */ 2870 NULL, /* lockfunc */ 2871 NULL, /* lockarg */ 2872 &dd->dd_dmat); 2873 if (error != 0) { 2874 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2875 return error; 2876 } 2877 2878 /* allocate descriptors */ 2879 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2880 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2881 &dd->dd_dmamap); 2882 if (error != 0) { 2883 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2884 "error %u\n", ndesc, dd->dd_name, error); 2885 goto fail1; 2886 } 2887 2888 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2889 dd->dd_desc, dd->dd_desc_len, 2890 ath_load_cb, &dd->dd_desc_paddr, 2891 BUS_DMA_NOWAIT); 2892 if (error != 0) { 2893 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2894 dd->dd_name, error); 2895 goto fail2; 2896 } 2897 2898 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2899 __func__, dd->dd_name, (uint8_t *) dd->dd_desc, 2900 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, 2901 /*XXX*/ (u_long) dd->dd_desc_len); 2902 2903 return (0); 2904 2905fail2: 2906 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2907fail1: 2908 bus_dma_tag_destroy(dd->dd_dmat); 2909 memset(dd, 0, sizeof(*dd)); 2910 return error; 2911#undef DS2PHYS 2912#undef ATH_DESC_4KB_BOUND_CHECK 2913} 2914 2915int 2916ath_descdma_setup(struct ath_softc *sc, 2917 struct ath_descdma *dd, ath_bufhead *head, 2918 const char *name, int ds_size, int nbuf, int ndesc) 2919{ 2920#define DS2PHYS(_dd, _ds) \ 2921 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2922#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2923 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2924 struct ifnet *ifp = sc->sc_ifp; 2925 uint8_t *ds; 2926 struct ath_buf *bf; 2927 int i, bsize, error; 2928 2929 /* Allocate descriptors */ 2930 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size, 2931 nbuf * ndesc); 2932 2933 /* Assume any errors during allocation were dealt with */ 2934 if (error != 0) { 2935 return (error); 2936 } 2937 2938 ds = (uint8_t *) dd->dd_desc; 2939 2940 /* allocate rx buffers */ 2941 bsize = sizeof(struct ath_buf) * nbuf; 2942 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2943 if (bf == NULL) { 2944 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2945 dd->dd_name, bsize); 2946 goto fail3; 2947 } 2948 dd->dd_bufptr = bf; 2949 2950 TAILQ_INIT(head); 2951 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) { 2952 bf->bf_desc = (struct ath_desc *) ds; 2953 bf->bf_daddr = DS2PHYS(dd, ds); 2954 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2955 /* 2956 * Merlin WAR: Skip descriptor addresses which 2957 * cause 4KB boundary crossing along any point 2958 * in the descriptor. 2959 */ 2960 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 2961 dd->dd_descsize)) { 2962 /* Start at the next page */ 2963 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 2964 bf->bf_desc = (struct ath_desc *) ds; 2965 bf->bf_daddr = DS2PHYS(dd, ds); 2966 } 2967 } 2968 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2969 &bf->bf_dmamap); 2970 if (error != 0) { 2971 if_printf(ifp, "unable to create dmamap for %s " 2972 "buffer %u, error %u\n", dd->dd_name, i, error); 2973 ath_descdma_cleanup(sc, dd, head); 2974 return error; 2975 } 2976 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 2977 TAILQ_INSERT_TAIL(head, bf, bf_list); 2978 } 2979 2980 /* 2981 * XXX TODO: ensure that ds doesn't overflow the descriptor 2982 * allocation otherwise weird stuff will occur and crash your 2983 * machine. 2984 */ 2985 return 0; 2986 /* XXX this should likely just call ath_descdma_cleanup() */ 2987fail3: 2988 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2989 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2990 bus_dma_tag_destroy(dd->dd_dmat); 2991 memset(dd, 0, sizeof(*dd)); 2992 return error; 2993#undef DS2PHYS 2994#undef ATH_DESC_4KB_BOUND_CHECK 2995} 2996 2997/* 2998 * Allocate ath_buf entries but no descriptor contents. 2999 * 3000 * This is for RX EDMA where the descriptors are the header part of 3001 * the RX buffer. 3002 */ 3003int 3004ath_descdma_setup_rx_edma(struct ath_softc *sc, 3005 struct ath_descdma *dd, ath_bufhead *head, 3006 const char *name, int nbuf, int rx_status_len) 3007{ 3008 struct ifnet *ifp = sc->sc_ifp; 3009 struct ath_buf *bf; 3010 int i, bsize, error; 3011 3012 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n", 3013 __func__, name, nbuf); 3014 3015 dd->dd_name = name; 3016 /* 3017 * This is (mostly) purely for show. We're not allocating any actual 3018 * descriptors here as EDMA RX has the descriptor be part 3019 * of the RX buffer. 3020 * 3021 * However, dd_desc_len is used by ath_descdma_free() to determine 3022 * whether we have already freed this DMA mapping. 3023 */ 3024 dd->dd_desc_len = rx_status_len * nbuf; 3025 dd->dd_descsize = rx_status_len; 3026 3027 /* allocate rx buffers */ 3028 bsize = sizeof(struct ath_buf) * nbuf; 3029 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3030 if (bf == NULL) { 3031 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3032 dd->dd_name, bsize); 3033 error = ENOMEM; 3034 goto fail3; 3035 } 3036 dd->dd_bufptr = bf; 3037 3038 TAILQ_INIT(head); 3039 for (i = 0; i < nbuf; i++, bf++) { 3040 bf->bf_desc = NULL; 3041 bf->bf_daddr = 0; 3042 bf->bf_lastds = NULL; /* Just an initial value */ 3043 3044 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3045 &bf->bf_dmamap); 3046 if (error != 0) { 3047 if_printf(ifp, "unable to create dmamap for %s " 3048 "buffer %u, error %u\n", dd->dd_name, i, error); 3049 ath_descdma_cleanup(sc, dd, head); 3050 return error; 3051 } 3052 TAILQ_INSERT_TAIL(head, bf, bf_list); 3053 } 3054 return 0; 3055fail3: 3056 memset(dd, 0, sizeof(*dd)); 3057 return error; 3058} 3059 3060void 3061ath_descdma_cleanup(struct ath_softc *sc, 3062 struct ath_descdma *dd, ath_bufhead *head) 3063{ 3064 struct ath_buf *bf; 3065 struct ieee80211_node *ni; 3066 3067 if (dd->dd_dmamap != 0) { 3068 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3069 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3070 bus_dma_tag_destroy(dd->dd_dmat); 3071 } 3072 3073 if (head != NULL) { 3074 TAILQ_FOREACH(bf, head, bf_list) { 3075 if (bf->bf_m) { 3076 m_freem(bf->bf_m); 3077 bf->bf_m = NULL; 3078 } 3079 if (bf->bf_dmamap != NULL) { 3080 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3081 bf->bf_dmamap = NULL; 3082 } 3083 ni = bf->bf_node; 3084 bf->bf_node = NULL; 3085 if (ni != NULL) { 3086 /* 3087 * Reclaim node reference. 3088 */ 3089 ieee80211_free_node(ni); 3090 } 3091 } 3092 } 3093 3094 if (head != NULL) 3095 TAILQ_INIT(head); 3096 3097 if (dd->dd_bufptr != NULL) 3098 free(dd->dd_bufptr, M_ATHDEV); 3099 memset(dd, 0, sizeof(*dd)); 3100} 3101 3102static int 3103ath_desc_alloc(struct ath_softc *sc) 3104{ 3105 int error; 3106 3107 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3108 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_TXDESC); 3109 if (error != 0) { 3110 return error; 3111 } 3112 sc->sc_txbuf_cnt = ath_txbuf; 3113 3114 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3115 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 3116 ATH_TXDESC); 3117 if (error != 0) { 3118 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3119 return error; 3120 } 3121 3122 /* 3123 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3124 * flag doesn't have to be set in ath_getbuf_locked(). 3125 */ 3126 3127 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3128 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 3129 if (error != 0) { 3130 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3131 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3132 &sc->sc_txbuf_mgmt); 3133 return error; 3134 } 3135 return 0; 3136} 3137 3138static void 3139ath_desc_free(struct ath_softc *sc) 3140{ 3141 3142 if (sc->sc_bdma.dd_desc_len != 0) 3143 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3144 if (sc->sc_txdma.dd_desc_len != 0) 3145 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3146 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3147 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3148 &sc->sc_txbuf_mgmt); 3149} 3150 3151static struct ieee80211_node * 3152ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3153{ 3154 struct ieee80211com *ic = vap->iv_ic; 3155 struct ath_softc *sc = ic->ic_ifp->if_softc; 3156 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3157 struct ath_node *an; 3158 3159 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3160 if (an == NULL) { 3161 /* XXX stat+msg */ 3162 return NULL; 3163 } 3164 ath_rate_node_init(sc, an); 3165 3166 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3167 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3168 device_get_nameunit(sc->sc_dev), an); 3169 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3170 3171 /* XXX setup ath_tid */ 3172 ath_tx_tid_init(sc, an); 3173 3174 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3175 return &an->an_node; 3176} 3177 3178static void 3179ath_node_cleanup(struct ieee80211_node *ni) 3180{ 3181 struct ieee80211com *ic = ni->ni_ic; 3182 struct ath_softc *sc = ic->ic_ifp->if_softc; 3183 3184 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3185 ath_tx_node_flush(sc, ATH_NODE(ni)); 3186 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3187 sc->sc_node_cleanup(ni); 3188} 3189 3190static void 3191ath_node_free(struct ieee80211_node *ni) 3192{ 3193 struct ieee80211com *ic = ni->ni_ic; 3194 struct ath_softc *sc = ic->ic_ifp->if_softc; 3195 3196 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 3197 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3198 sc->sc_node_free(ni); 3199} 3200 3201static void 3202ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3203{ 3204 struct ieee80211com *ic = ni->ni_ic; 3205 struct ath_softc *sc = ic->ic_ifp->if_softc; 3206 struct ath_hal *ah = sc->sc_ah; 3207 3208 *rssi = ic->ic_node_getrssi(ni); 3209 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3210 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3211 else 3212 *noise = -95; /* nominally correct */ 3213} 3214 3215/* 3216 * Set the default antenna. 3217 */ 3218void 3219ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3220{ 3221 struct ath_hal *ah = sc->sc_ah; 3222 3223 /* XXX block beacon interrupts */ 3224 ath_hal_setdefantenna(ah, antenna); 3225 if (sc->sc_defant != antenna) 3226 sc->sc_stats.ast_ant_defswitch++; 3227 sc->sc_defant = antenna; 3228 sc->sc_rxotherant = 0; 3229} 3230 3231static void 3232ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3233{ 3234 txq->axq_qnum = qnum; 3235 txq->axq_ac = 0; 3236 txq->axq_depth = 0; 3237 txq->axq_aggr_depth = 0; 3238 txq->axq_intrcnt = 0; 3239 txq->axq_link = NULL; 3240 txq->axq_softc = sc; 3241 TAILQ_INIT(&txq->axq_q); 3242 TAILQ_INIT(&txq->axq_tidq); 3243 ATH_TXQ_LOCK_INIT(sc, txq); 3244} 3245 3246/* 3247 * Setup a h/w transmit queue. 3248 */ 3249static struct ath_txq * 3250ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3251{ 3252#define N(a) (sizeof(a)/sizeof(a[0])) 3253 struct ath_hal *ah = sc->sc_ah; 3254 HAL_TXQ_INFO qi; 3255 int qnum; 3256 3257 memset(&qi, 0, sizeof(qi)); 3258 qi.tqi_subtype = subtype; 3259 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3260 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3261 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3262 /* 3263 * Enable interrupts only for EOL and DESC conditions. 3264 * We mark tx descriptors to receive a DESC interrupt 3265 * when a tx queue gets deep; otherwise waiting for the 3266 * EOL to reap descriptors. Note that this is done to 3267 * reduce interrupt load and this only defers reaping 3268 * descriptors, never transmitting frames. Aside from 3269 * reducing interrupts this also permits more concurrency. 3270 * The only potential downside is if the tx queue backs 3271 * up in which case the top half of the kernel may backup 3272 * due to a lack of tx descriptors. 3273 */ 3274 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3275 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3276 if (qnum == -1) { 3277 /* 3278 * NB: don't print a message, this happens 3279 * normally on parts with too few tx queues 3280 */ 3281 return NULL; 3282 } 3283 if (qnum >= N(sc->sc_txq)) { 3284 device_printf(sc->sc_dev, 3285 "hal qnum %u out of range, max %zu!\n", 3286 qnum, N(sc->sc_txq)); 3287 ath_hal_releasetxqueue(ah, qnum); 3288 return NULL; 3289 } 3290 if (!ATH_TXQ_SETUP(sc, qnum)) { 3291 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3292 sc->sc_txqsetup |= 1<<qnum; 3293 } 3294 return &sc->sc_txq[qnum]; 3295#undef N 3296} 3297 3298/* 3299 * Setup a hardware data transmit queue for the specified 3300 * access control. The hal may not support all requested 3301 * queues in which case it will return a reference to a 3302 * previously setup queue. We record the mapping from ac's 3303 * to h/w queues for use by ath_tx_start and also track 3304 * the set of h/w queues being used to optimize work in the 3305 * transmit interrupt handler and related routines. 3306 */ 3307static int 3308ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3309{ 3310#define N(a) (sizeof(a)/sizeof(a[0])) 3311 struct ath_txq *txq; 3312 3313 if (ac >= N(sc->sc_ac2q)) { 3314 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3315 ac, N(sc->sc_ac2q)); 3316 return 0; 3317 } 3318 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3319 if (txq != NULL) { 3320 txq->axq_ac = ac; 3321 sc->sc_ac2q[ac] = txq; 3322 return 1; 3323 } else 3324 return 0; 3325#undef N 3326} 3327 3328/* 3329 * Update WME parameters for a transmit queue. 3330 */ 3331static int 3332ath_txq_update(struct ath_softc *sc, int ac) 3333{ 3334#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3335#define ATH_TXOP_TO_US(v) (v<<5) 3336 struct ifnet *ifp = sc->sc_ifp; 3337 struct ieee80211com *ic = ifp->if_l2com; 3338 struct ath_txq *txq = sc->sc_ac2q[ac]; 3339 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3340 struct ath_hal *ah = sc->sc_ah; 3341 HAL_TXQ_INFO qi; 3342 3343 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3344#ifdef IEEE80211_SUPPORT_TDMA 3345 if (sc->sc_tdma) { 3346 /* 3347 * AIFS is zero so there's no pre-transmit wait. The 3348 * burst time defines the slot duration and is configured 3349 * through net80211. The QCU is setup to not do post-xmit 3350 * back off, lockout all lower-priority QCU's, and fire 3351 * off the DMA beacon alert timer which is setup based 3352 * on the slot configuration. 3353 */ 3354 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3355 | HAL_TXQ_TXERRINT_ENABLE 3356 | HAL_TXQ_TXURNINT_ENABLE 3357 | HAL_TXQ_TXEOLINT_ENABLE 3358 | HAL_TXQ_DBA_GATED 3359 | HAL_TXQ_BACKOFF_DISABLE 3360 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 3361 ; 3362 qi.tqi_aifs = 0; 3363 /* XXX +dbaprep? */ 3364 qi.tqi_readyTime = sc->sc_tdmaslotlen; 3365 qi.tqi_burstTime = qi.tqi_readyTime; 3366 } else { 3367#endif 3368 /* 3369 * XXX shouldn't this just use the default flags 3370 * used in the previous queue setup? 3371 */ 3372 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3373 | HAL_TXQ_TXERRINT_ENABLE 3374 | HAL_TXQ_TXDESCINT_ENABLE 3375 | HAL_TXQ_TXURNINT_ENABLE 3376 | HAL_TXQ_TXEOLINT_ENABLE 3377 ; 3378 qi.tqi_aifs = wmep->wmep_aifsn; 3379 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3380 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3381 qi.tqi_readyTime = 0; 3382 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3383#ifdef IEEE80211_SUPPORT_TDMA 3384 } 3385#endif 3386 3387 DPRINTF(sc, ATH_DEBUG_RESET, 3388 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 3389 __func__, txq->axq_qnum, qi.tqi_qflags, 3390 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 3391 3392 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3393 if_printf(ifp, "unable to update hardware queue " 3394 "parameters for %s traffic!\n", 3395 ieee80211_wme_acnames[ac]); 3396 return 0; 3397 } else { 3398 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3399 return 1; 3400 } 3401#undef ATH_TXOP_TO_US 3402#undef ATH_EXPONENT_TO_VALUE 3403} 3404 3405/* 3406 * Callback from the 802.11 layer to update WME parameters. 3407 */ 3408int 3409ath_wme_update(struct ieee80211com *ic) 3410{ 3411 struct ath_softc *sc = ic->ic_ifp->if_softc; 3412 3413 return !ath_txq_update(sc, WME_AC_BE) || 3414 !ath_txq_update(sc, WME_AC_BK) || 3415 !ath_txq_update(sc, WME_AC_VI) || 3416 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3417} 3418 3419/* 3420 * Reclaim resources for a setup queue. 3421 */ 3422static void 3423ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3424{ 3425 3426 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3427 ATH_TXQ_LOCK_DESTROY(txq); 3428 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3429} 3430 3431/* 3432 * Reclaim all tx queue resources. 3433 */ 3434static void 3435ath_tx_cleanup(struct ath_softc *sc) 3436{ 3437 int i; 3438 3439 ATH_TXBUF_LOCK_DESTROY(sc); 3440 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3441 if (ATH_TXQ_SETUP(sc, i)) 3442 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3443} 3444 3445/* 3446 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 3447 * using the current rates in sc_rixmap. 3448 */ 3449int 3450ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 3451{ 3452 int rix = sc->sc_rixmap[rate]; 3453 /* NB: return lowest rix for invalid rate */ 3454 return (rix == 0xff ? 0 : rix); 3455} 3456 3457static void 3458ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 3459 struct ath_buf *bf) 3460{ 3461 struct ieee80211_node *ni = bf->bf_node; 3462 struct ifnet *ifp = sc->sc_ifp; 3463 struct ieee80211com *ic = ifp->if_l2com; 3464 int sr, lr, pri; 3465 3466 if (ts->ts_status == 0) { 3467 u_int8_t txant = ts->ts_antenna; 3468 sc->sc_stats.ast_ant_tx[txant]++; 3469 sc->sc_ant_tx[txant]++; 3470 if (ts->ts_finaltsi != 0) 3471 sc->sc_stats.ast_tx_altrate++; 3472 pri = M_WME_GETAC(bf->bf_m); 3473 if (pri >= WME_AC_VO) 3474 ic->ic_wme.wme_hipri_traffic++; 3475 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 3476 ni->ni_inact = ni->ni_inact_reload; 3477 } else { 3478 if (ts->ts_status & HAL_TXERR_XRETRY) 3479 sc->sc_stats.ast_tx_xretries++; 3480 if (ts->ts_status & HAL_TXERR_FIFO) 3481 sc->sc_stats.ast_tx_fifoerr++; 3482 if (ts->ts_status & HAL_TXERR_FILT) 3483 sc->sc_stats.ast_tx_filtered++; 3484 if (ts->ts_status & HAL_TXERR_XTXOP) 3485 sc->sc_stats.ast_tx_xtxop++; 3486 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 3487 sc->sc_stats.ast_tx_timerexpired++; 3488 3489 if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 3490 sc->sc_stats.ast_tx_data_underrun++; 3491 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 3492 sc->sc_stats.ast_tx_delim_underrun++; 3493 3494 if (bf->bf_m->m_flags & M_FF) 3495 sc->sc_stats.ast_ff_txerr++; 3496 } 3497 /* XXX when is this valid? */ 3498 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 3499 sc->sc_stats.ast_tx_desccfgerr++; 3500 3501 sr = ts->ts_shortretry; 3502 lr = ts->ts_longretry; 3503 sc->sc_stats.ast_tx_shortretry += sr; 3504 sc->sc_stats.ast_tx_longretry += lr; 3505 3506} 3507 3508/* 3509 * The default completion. If fail is 1, this means 3510 * "please don't retry the frame, and just return -1 status 3511 * to the net80211 stack. 3512 */ 3513void 3514ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3515{ 3516 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3517 int st; 3518 3519 if (fail == 1) 3520 st = -1; 3521 else 3522 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 3523 ts->ts_status : HAL_TXERR_XRETRY; 3524 3525 if (bf->bf_state.bfs_dobaw) 3526 device_printf(sc->sc_dev, 3527 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 3528 __func__, 3529 bf, 3530 SEQNO(bf->bf_state.bfs_seqno)); 3531 if (bf->bf_next != NULL) 3532 device_printf(sc->sc_dev, 3533 "%s: bf %p: seqno %d: bf_next not NULL!\n", 3534 __func__, 3535 bf, 3536 SEQNO(bf->bf_state.bfs_seqno)); 3537 3538 /* 3539 * Do any tx complete callback. Note this must 3540 * be done before releasing the node reference. 3541 * This will free the mbuf, release the net80211 3542 * node and recycle the ath_buf. 3543 */ 3544 ath_tx_freebuf(sc, bf, st); 3545} 3546 3547/* 3548 * Update rate control with the given completion status. 3549 */ 3550void 3551ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 3552 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 3553 int nframes, int nbad) 3554{ 3555 struct ath_node *an; 3556 3557 /* Only for unicast frames */ 3558 if (ni == NULL) 3559 return; 3560 3561 an = ATH_NODE(ni); 3562 3563 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 3564 ATH_NODE_LOCK(an); 3565 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 3566 ATH_NODE_UNLOCK(an); 3567 } 3568} 3569 3570/* 3571 * Update the busy status of the last frame on the free list. 3572 * When doing TDMA, the busy flag tracks whether the hardware 3573 * currently points to this buffer or not, and thus gated DMA 3574 * may restart by re-reading the last descriptor in this 3575 * buffer. 3576 * 3577 * This should be called in the completion function once one 3578 * of the buffers has been used. 3579 */ 3580static void 3581ath_tx_update_busy(struct ath_softc *sc) 3582{ 3583 struct ath_buf *last; 3584 3585 /* 3586 * Since the last frame may still be marked 3587 * as ATH_BUF_BUSY, unmark it here before 3588 * finishing the frame processing. 3589 * Since we've completed a frame (aggregate 3590 * or otherwise), the hardware has moved on 3591 * and is no longer referencing the previous 3592 * descriptor. 3593 */ 3594 ATH_TXBUF_LOCK_ASSERT(sc); 3595 last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3596 if (last != NULL) 3597 last->bf_flags &= ~ATH_BUF_BUSY; 3598 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 3599 if (last != NULL) 3600 last->bf_flags &= ~ATH_BUF_BUSY; 3601} 3602 3603/* 3604 * Process the completion of the given buffer. 3605 * 3606 * This calls the rate control update and then the buffer completion. 3607 * This will either free the buffer or requeue it. In any case, the 3608 * bf pointer should be treated as invalid after this function is called. 3609 */ 3610void 3611ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, 3612 struct ath_tx_status *ts, struct ath_buf *bf) 3613{ 3614 struct ieee80211_node *ni = bf->bf_node; 3615 struct ath_node *an = NULL; 3616 3617 ATH_TXQ_UNLOCK_ASSERT(txq); 3618 3619 /* If unicast frame, update general statistics */ 3620 if (ni != NULL) { 3621 an = ATH_NODE(ni); 3622 /* update statistics */ 3623 ath_tx_update_stats(sc, ts, bf); 3624 } 3625 3626 /* 3627 * Call the completion handler. 3628 * The completion handler is responsible for 3629 * calling the rate control code. 3630 * 3631 * Frames with no completion handler get the 3632 * rate control code called here. 3633 */ 3634 if (bf->bf_comp == NULL) { 3635 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 3636 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 3637 /* 3638 * XXX assume this isn't an aggregate 3639 * frame. 3640 */ 3641 ath_tx_update_ratectrl(sc, ni, 3642 bf->bf_state.bfs_rc, ts, 3643 bf->bf_state.bfs_pktlen, 1, 3644 (ts->ts_status == 0 ? 0 : 1)); 3645 } 3646 ath_tx_default_comp(sc, bf, 0); 3647 } else 3648 bf->bf_comp(sc, bf, 0); 3649} 3650 3651 3652 3653/* 3654 * Process completed xmit descriptors from the specified queue. 3655 * Kick the packet scheduler if needed. This can occur from this 3656 * particular task. 3657 */ 3658static int 3659ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 3660{ 3661 struct ath_hal *ah = sc->sc_ah; 3662 struct ath_buf *bf; 3663 struct ath_desc *ds; 3664 struct ath_tx_status *ts; 3665 struct ieee80211_node *ni; 3666#ifdef IEEE80211_SUPPORT_SUPERG 3667 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3668#endif /* IEEE80211_SUPPORT_SUPERG */ 3669 int nacked; 3670 HAL_STATUS status; 3671 3672 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3673 __func__, txq->axq_qnum, 3674 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3675 txq->axq_link); 3676 3677 ATH_KTR(sc, ATH_KTR_TXCOMP, 4, 3678 "ath_tx_processq: txq=%u head %p link %p depth %p", 3679 txq->axq_qnum, 3680 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3681 txq->axq_link, 3682 txq->axq_depth); 3683 3684 nacked = 0; 3685 for (;;) { 3686 ATH_TXQ_LOCK(txq); 3687 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3688 bf = TAILQ_FIRST(&txq->axq_q); 3689 if (bf == NULL) { 3690 ATH_TXQ_UNLOCK(txq); 3691 break; 3692 } 3693 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 3694 ts = &bf->bf_status.ds_txstat; 3695 3696 status = ath_hal_txprocdesc(ah, ds, ts); 3697#ifdef ATH_DEBUG 3698 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3699 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3700 status == HAL_OK); 3701 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) 3702 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3703 status == HAL_OK); 3704#endif 3705 3706 if (status == HAL_EINPROGRESS) { 3707 ATH_KTR(sc, ATH_KTR_TXCOMP, 3, 3708 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", 3709 txq->axq_qnum, bf, ds); 3710 ATH_TXQ_UNLOCK(txq); 3711 break; 3712 } 3713 ATH_TXQ_REMOVE(txq, bf, bf_list); 3714#ifdef IEEE80211_SUPPORT_TDMA 3715 if (txq->axq_depth > 0) { 3716 /* 3717 * More frames follow. Mark the buffer busy 3718 * so it's not re-used while the hardware may 3719 * still re-read the link field in the descriptor. 3720 * 3721 * Use the last buffer in an aggregate as that 3722 * is where the hardware may be - intermediate 3723 * descriptors won't be "busy". 3724 */ 3725 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 3726 } else 3727#else 3728 if (txq->axq_depth == 0) 3729#endif 3730 txq->axq_link = NULL; 3731 if (bf->bf_state.bfs_aggr) 3732 txq->axq_aggr_depth--; 3733 3734 ni = bf->bf_node; 3735 3736 ATH_KTR(sc, ATH_KTR_TXCOMP, 5, 3737 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", 3738 txq->axq_qnum, bf, ds, ni, ts->ts_status); 3739 /* 3740 * If unicast frame was ack'd update RSSI, 3741 * including the last rx time used to 3742 * workaround phantom bmiss interrupts. 3743 */ 3744 if (ni != NULL && ts->ts_status == 0 && 3745 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 3746 nacked++; 3747 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 3748 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 3749 ts->ts_rssi); 3750 } 3751 ATH_TXQ_UNLOCK(txq); 3752 3753 /* 3754 * Update statistics and call completion 3755 */ 3756 ath_tx_process_buf_completion(sc, txq, ts, bf); 3757 } 3758#ifdef IEEE80211_SUPPORT_SUPERG 3759 /* 3760 * Flush fast-frame staging queue when traffic slows. 3761 */ 3762 if (txq->axq_depth <= 1) 3763 ieee80211_ff_flush(ic, txq->axq_ac); 3764#endif 3765 3766 /* Kick the TXQ scheduler */ 3767 if (dosched) { 3768 ATH_TXQ_LOCK(txq); 3769 ath_txq_sched(sc, txq); 3770 ATH_TXQ_UNLOCK(txq); 3771 } 3772 3773 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 3774 "ath_tx_processq: txq=%u: done", 3775 txq->axq_qnum); 3776 3777 return nacked; 3778} 3779 3780#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 3781 3782/* 3783 * Deferred processing of transmit interrupt; special-cased 3784 * for a single hardware transmit queue (e.g. 5210 and 5211). 3785 */ 3786static void 3787ath_tx_proc_q0(void *arg, int npending) 3788{ 3789 struct ath_softc *sc = arg; 3790 struct ifnet *ifp = sc->sc_ifp; 3791 uint32_t txqs; 3792 3793 ATH_PCU_LOCK(sc); 3794 sc->sc_txproc_cnt++; 3795 txqs = sc->sc_txq_active; 3796 sc->sc_txq_active &= ~txqs; 3797 ATH_PCU_UNLOCK(sc); 3798 3799 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 3800 "ath_tx_proc_q0: txqs=0x%08x", txqs); 3801 3802 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 3803 /* XXX why is lastrx updated in tx code? */ 3804 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3805 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3806 ath_tx_processq(sc, sc->sc_cabq, 1); 3807 IF_LOCK(&ifp->if_snd); 3808 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3809 IF_UNLOCK(&ifp->if_snd); 3810 sc->sc_wd_timer = 0; 3811 3812 if (sc->sc_softled) 3813 ath_led_event(sc, sc->sc_txrix); 3814 3815 ATH_PCU_LOCK(sc); 3816 sc->sc_txproc_cnt--; 3817 ATH_PCU_UNLOCK(sc); 3818 3819 ath_tx_kick(sc); 3820} 3821 3822/* 3823 * Deferred processing of transmit interrupt; special-cased 3824 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 3825 */ 3826static void 3827ath_tx_proc_q0123(void *arg, int npending) 3828{ 3829 struct ath_softc *sc = arg; 3830 struct ifnet *ifp = sc->sc_ifp; 3831 int nacked; 3832 uint32_t txqs; 3833 3834 ATH_PCU_LOCK(sc); 3835 sc->sc_txproc_cnt++; 3836 txqs = sc->sc_txq_active; 3837 sc->sc_txq_active &= ~txqs; 3838 ATH_PCU_UNLOCK(sc); 3839 3840 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 3841 "ath_tx_proc_q0123: txqs=0x%08x", txqs); 3842 3843 /* 3844 * Process each active queue. 3845 */ 3846 nacked = 0; 3847 if (TXQACTIVE(txqs, 0)) 3848 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 3849 if (TXQACTIVE(txqs, 1)) 3850 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 3851 if (TXQACTIVE(txqs, 2)) 3852 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 3853 if (TXQACTIVE(txqs, 3)) 3854 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 3855 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3856 ath_tx_processq(sc, sc->sc_cabq, 1); 3857 if (nacked) 3858 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3859 3860 IF_LOCK(&ifp->if_snd); 3861 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3862 IF_UNLOCK(&ifp->if_snd); 3863 sc->sc_wd_timer = 0; 3864 3865 if (sc->sc_softled) 3866 ath_led_event(sc, sc->sc_txrix); 3867 3868 ATH_PCU_LOCK(sc); 3869 sc->sc_txproc_cnt--; 3870 ATH_PCU_UNLOCK(sc); 3871 3872 ath_tx_kick(sc); 3873} 3874 3875/* 3876 * Deferred processing of transmit interrupt. 3877 */ 3878static void 3879ath_tx_proc(void *arg, int npending) 3880{ 3881 struct ath_softc *sc = arg; 3882 struct ifnet *ifp = sc->sc_ifp; 3883 int i, nacked; 3884 uint32_t txqs; 3885 3886 ATH_PCU_LOCK(sc); 3887 sc->sc_txproc_cnt++; 3888 txqs = sc->sc_txq_active; 3889 sc->sc_txq_active &= ~txqs; 3890 ATH_PCU_UNLOCK(sc); 3891 3892 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); 3893 3894 /* 3895 * Process each active queue. 3896 */ 3897 nacked = 0; 3898 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3899 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 3900 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 3901 if (nacked) 3902 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3903 3904 /* XXX check this inside of IF_LOCK? */ 3905 IF_LOCK(&ifp->if_snd); 3906 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3907 IF_UNLOCK(&ifp->if_snd); 3908 sc->sc_wd_timer = 0; 3909 3910 if (sc->sc_softled) 3911 ath_led_event(sc, sc->sc_txrix); 3912 3913 ATH_PCU_LOCK(sc); 3914 sc->sc_txproc_cnt--; 3915 ATH_PCU_UNLOCK(sc); 3916 3917 ath_tx_kick(sc); 3918} 3919#undef TXQACTIVE 3920 3921/* 3922 * Deferred processing of TXQ rescheduling. 3923 */ 3924static void 3925ath_txq_sched_tasklet(void *arg, int npending) 3926{ 3927 struct ath_softc *sc = arg; 3928 int i; 3929 3930 /* XXX is skipping ok? */ 3931 ATH_PCU_LOCK(sc); 3932#if 0 3933 if (sc->sc_inreset_cnt > 0) { 3934 device_printf(sc->sc_dev, 3935 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 3936 ATH_PCU_UNLOCK(sc); 3937 return; 3938 } 3939#endif 3940 sc->sc_txproc_cnt++; 3941 ATH_PCU_UNLOCK(sc); 3942 3943 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3944 if (ATH_TXQ_SETUP(sc, i)) { 3945 ATH_TXQ_LOCK(&sc->sc_txq[i]); 3946 ath_txq_sched(sc, &sc->sc_txq[i]); 3947 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 3948 } 3949 } 3950 3951 ATH_PCU_LOCK(sc); 3952 sc->sc_txproc_cnt--; 3953 ATH_PCU_UNLOCK(sc); 3954} 3955 3956void 3957ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 3958{ 3959 3960 ATH_TXBUF_LOCK_ASSERT(sc); 3961 3962 if (bf->bf_flags & ATH_BUF_MGMT) 3963 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 3964 else { 3965 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3966 sc->sc_txbuf_cnt++; 3967 if (sc->sc_txbuf_cnt > ath_txbuf) { 3968 device_printf(sc->sc_dev, 3969 "%s: sc_txbuf_cnt > %d?\n", 3970 __func__, 3971 ath_txbuf); 3972 sc->sc_txbuf_cnt = ath_txbuf; 3973 } 3974 } 3975} 3976 3977void 3978ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 3979{ 3980 3981 ATH_TXBUF_LOCK_ASSERT(sc); 3982 3983 if (bf->bf_flags & ATH_BUF_MGMT) 3984 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 3985 else { 3986 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 3987 sc->sc_txbuf_cnt++; 3988 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 3989 device_printf(sc->sc_dev, 3990 "%s: sc_txbuf_cnt > %d?\n", 3991 __func__, 3992 ATH_TXBUF); 3993 sc->sc_txbuf_cnt = ATH_TXBUF; 3994 } 3995 } 3996} 3997 3998/* 3999 * Return a buffer to the pool and update the 'busy' flag on the 4000 * previous 'tail' entry. 4001 * 4002 * This _must_ only be called when the buffer is involved in a completed 4003 * TX. The logic is that if it was part of an active TX, the previous 4004 * buffer on the list is now not involved in a halted TX DMA queue, waiting 4005 * for restart (eg for TDMA.) 4006 * 4007 * The caller must free the mbuf and recycle the node reference. 4008 */ 4009void 4010ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 4011{ 4012 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4013 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 4014 4015 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 4016 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 4017 4018 ATH_TXBUF_LOCK(sc); 4019 ath_tx_update_busy(sc); 4020 ath_returnbuf_tail(sc, bf); 4021 ATH_TXBUF_UNLOCK(sc); 4022} 4023 4024/* 4025 * This is currently used by ath_tx_draintxq() and 4026 * ath_tx_tid_free_pkts(). 4027 * 4028 * It recycles a single ath_buf. 4029 */ 4030void 4031ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 4032{ 4033 struct ieee80211_node *ni = bf->bf_node; 4034 struct mbuf *m0 = bf->bf_m; 4035 4036 bf->bf_node = NULL; 4037 bf->bf_m = NULL; 4038 4039 /* Free the buffer, it's not needed any longer */ 4040 ath_freebuf(sc, bf); 4041 4042 if (ni != NULL) { 4043 /* 4044 * Do any callback and reclaim the node reference. 4045 */ 4046 if (m0->m_flags & M_TXCB) 4047 ieee80211_process_callback(ni, m0, status); 4048 ieee80211_free_node(ni); 4049 } 4050 m_freem(m0); 4051 4052 /* 4053 * XXX the buffer used to be freed -after-, but the DMA map was 4054 * freed where ath_freebuf() now is. I've no idea what this 4055 * will do. 4056 */ 4057} 4058 4059void 4060ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 4061{ 4062#ifdef ATH_DEBUG 4063 struct ath_hal *ah = sc->sc_ah; 4064#endif 4065 struct ath_buf *bf; 4066 u_int ix; 4067 4068 /* 4069 * NB: this assumes output has been stopped and 4070 * we do not need to block ath_tx_proc 4071 */ 4072 ATH_TXBUF_LOCK(sc); 4073 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 4074 if (bf != NULL) 4075 bf->bf_flags &= ~ATH_BUF_BUSY; 4076 bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 4077 if (bf != NULL) 4078 bf->bf_flags &= ~ATH_BUF_BUSY; 4079 ATH_TXBUF_UNLOCK(sc); 4080 4081 for (ix = 0;; ix++) { 4082 ATH_TXQ_LOCK(txq); 4083 bf = TAILQ_FIRST(&txq->axq_q); 4084 if (bf == NULL) { 4085 txq->axq_link = NULL; 4086 /* 4087 * There's currently no flag that indicates 4088 * a buffer is on the FIFO. So until that 4089 * occurs, just clear the FIFO counter here. 4090 * 4091 * Yes, this means that if something in parallel 4092 * is pushing things onto this TXQ and pushing 4093 * _that_ into the hardware, things will get 4094 * very fruity very quickly. 4095 */ 4096 txq->axq_fifo_depth = 0; 4097 ATH_TXQ_UNLOCK(txq); 4098 break; 4099 } 4100 ATH_TXQ_REMOVE(txq, bf, bf_list); 4101 if (bf->bf_state.bfs_aggr) 4102 txq->axq_aggr_depth--; 4103#ifdef ATH_DEBUG 4104 if (sc->sc_debug & ATH_DEBUG_RESET) { 4105 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4106 int status = 0; 4107 4108 /* 4109 * EDMA operation has a TX completion FIFO 4110 * separate from the TX descriptor, so this 4111 * method of checking the "completion" status 4112 * is wrong. 4113 */ 4114 if (! sc->sc_isedma) { 4115 status = (ath_hal_txprocdesc(ah, 4116 bf->bf_lastds, 4117 &bf->bf_status.ds_txstat) == HAL_OK); 4118 } 4119 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); 4120 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 4121 bf->bf_m->m_len, 0, -1); 4122 } 4123#endif /* ATH_DEBUG */ 4124 /* 4125 * Since we're now doing magic in the completion 4126 * functions, we -must- call it for aggregation 4127 * destinations or BAW tracking will get upset. 4128 */ 4129 /* 4130 * Clear ATH_BUF_BUSY; the completion handler 4131 * will free the buffer. 4132 */ 4133 ATH_TXQ_UNLOCK(txq); 4134 bf->bf_flags &= ~ATH_BUF_BUSY; 4135 if (bf->bf_comp) 4136 bf->bf_comp(sc, bf, 1); 4137 else 4138 ath_tx_default_comp(sc, bf, 1); 4139 } 4140 4141 /* 4142 * Drain software queued frames which are on 4143 * active TIDs. 4144 */ 4145 ath_tx_txq_drain(sc, txq); 4146} 4147 4148static void 4149ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 4150{ 4151 struct ath_hal *ah = sc->sc_ah; 4152 4153 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4154 __func__, txq->axq_qnum, 4155 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 4156 txq->axq_link); 4157 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 4158} 4159 4160int 4161ath_stoptxdma(struct ath_softc *sc) 4162{ 4163 struct ath_hal *ah = sc->sc_ah; 4164 int i; 4165 4166 /* XXX return value */ 4167 if (sc->sc_invalid) 4168 return 0; 4169 4170 if (!sc->sc_invalid) { 4171 /* don't touch the hardware if marked invalid */ 4172 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4173 __func__, sc->sc_bhalq, 4174 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 4175 NULL); 4176 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 4177 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4178 if (ATH_TXQ_SETUP(sc, i)) 4179 ath_tx_stopdma(sc, &sc->sc_txq[i]); 4180 } 4181 4182 return 1; 4183} 4184 4185/* 4186 * Drain the transmit queues and reclaim resources. 4187 */ 4188void 4189ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 4190{ 4191#ifdef ATH_DEBUG 4192 struct ath_hal *ah = sc->sc_ah; 4193#endif 4194 struct ifnet *ifp = sc->sc_ifp; 4195 int i; 4196 4197 (void) ath_stoptxdma(sc); 4198 4199 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4200 /* 4201 * XXX TODO: should we just handle the completed TX frames 4202 * here, whether or not the reset is a full one or not? 4203 */ 4204 if (ATH_TXQ_SETUP(sc, i)) { 4205 if (reset_type == ATH_RESET_NOLOSS) 4206 ath_tx_processq(sc, &sc->sc_txq[i], 0); 4207 else 4208 ath_tx_draintxq(sc, &sc->sc_txq[i]); 4209 } 4210 } 4211#ifdef ATH_DEBUG 4212 if (sc->sc_debug & ATH_DEBUG_RESET) { 4213 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 4214 if (bf != NULL && bf->bf_m != NULL) { 4215 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 4216 ath_hal_txprocdesc(ah, bf->bf_lastds, 4217 &bf->bf_status.ds_txstat) == HAL_OK); 4218 ieee80211_dump_pkt(ifp->if_l2com, 4219 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 4220 0, -1); 4221 } 4222 } 4223#endif /* ATH_DEBUG */ 4224 IF_LOCK(&ifp->if_snd); 4225 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4226 IF_UNLOCK(&ifp->if_snd); 4227 sc->sc_wd_timer = 0; 4228} 4229 4230/* 4231 * Update internal state after a channel change. 4232 */ 4233static void 4234ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 4235{ 4236 enum ieee80211_phymode mode; 4237 4238 /* 4239 * Change channels and update the h/w rate map 4240 * if we're switching; e.g. 11a to 11b/g. 4241 */ 4242 mode = ieee80211_chan2mode(chan); 4243 if (mode != sc->sc_curmode) 4244 ath_setcurmode(sc, mode); 4245 sc->sc_curchan = chan; 4246} 4247 4248/* 4249 * Set/change channels. If the channel is really being changed, 4250 * it's done by resetting the chip. To accomplish this we must 4251 * first cleanup any pending DMA, then restart stuff after a la 4252 * ath_init. 4253 */ 4254static int 4255ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 4256{ 4257 struct ifnet *ifp = sc->sc_ifp; 4258 struct ieee80211com *ic = ifp->if_l2com; 4259 struct ath_hal *ah = sc->sc_ah; 4260 int ret = 0; 4261 4262 /* Treat this as an interface reset */ 4263 ATH_PCU_UNLOCK_ASSERT(sc); 4264 ATH_UNLOCK_ASSERT(sc); 4265 4266 /* (Try to) stop TX/RX from occuring */ 4267 taskqueue_block(sc->sc_tq); 4268 4269 ATH_PCU_LOCK(sc); 4270 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 4271 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 4272 if (ath_reset_grablock(sc, 1) == 0) { 4273 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 4274 __func__); 4275 } 4276 ATH_PCU_UNLOCK(sc); 4277 4278 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 4279 __func__, ieee80211_chan2ieee(ic, chan), 4280 chan->ic_freq, chan->ic_flags); 4281 if (chan != sc->sc_curchan) { 4282 HAL_STATUS status; 4283 /* 4284 * To switch channels clear any pending DMA operations; 4285 * wait long enough for the RX fifo to drain, reset the 4286 * hardware at the new frequency, and then re-enable 4287 * the relevant bits of the h/w. 4288 */ 4289#if 0 4290 ath_hal_intrset(ah, 0); /* disable interrupts */ 4291#endif 4292 ath_stoprecv(sc, 1); /* turn off frame recv */ 4293 /* 4294 * First, handle completed TX/RX frames. 4295 */ 4296 ath_rx_flush(sc); 4297 ath_draintxq(sc, ATH_RESET_NOLOSS); 4298 /* 4299 * Next, flush the non-scheduled frames. 4300 */ 4301 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 4302 4303 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 4304 if_printf(ifp, "%s: unable to reset " 4305 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 4306 __func__, ieee80211_chan2ieee(ic, chan), 4307 chan->ic_freq, chan->ic_flags, status); 4308 ret = EIO; 4309 goto finish; 4310 } 4311 sc->sc_diversity = ath_hal_getdiversity(ah); 4312 4313 /* Let DFS at it in case it's a DFS channel */ 4314 ath_dfs_radar_enable(sc, chan); 4315 4316 /* 4317 * Re-enable rx framework. 4318 */ 4319 if (ath_startrecv(sc) != 0) { 4320 if_printf(ifp, "%s: unable to restart recv logic\n", 4321 __func__); 4322 ret = EIO; 4323 goto finish; 4324 } 4325 4326 /* 4327 * Change channels and update the h/w rate map 4328 * if we're switching; e.g. 11a to 11b/g. 4329 */ 4330 ath_chan_change(sc, chan); 4331 4332 /* 4333 * Reset clears the beacon timers; reset them 4334 * here if needed. 4335 */ 4336 if (sc->sc_beacons) { /* restart beacons */ 4337#ifdef IEEE80211_SUPPORT_TDMA 4338 if (sc->sc_tdma) 4339 ath_tdma_config(sc, NULL); 4340 else 4341#endif 4342 ath_beacon_config(sc, NULL); 4343 } 4344 4345 /* 4346 * Re-enable interrupts. 4347 */ 4348#if 0 4349 ath_hal_intrset(ah, sc->sc_imask); 4350#endif 4351 } 4352 4353finish: 4354 ATH_PCU_LOCK(sc); 4355 sc->sc_inreset_cnt--; 4356 /* XXX only do this if sc_inreset_cnt == 0? */ 4357 ath_hal_intrset(ah, sc->sc_imask); 4358 ATH_PCU_UNLOCK(sc); 4359 4360 IF_LOCK(&ifp->if_snd); 4361 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4362 IF_UNLOCK(&ifp->if_snd); 4363 ath_txrx_start(sc); 4364 /* XXX ath_start? */ 4365 4366 return ret; 4367} 4368 4369/* 4370 * Periodically recalibrate the PHY to account 4371 * for temperature/environment changes. 4372 */ 4373static void 4374ath_calibrate(void *arg) 4375{ 4376 struct ath_softc *sc = arg; 4377 struct ath_hal *ah = sc->sc_ah; 4378 struct ifnet *ifp = sc->sc_ifp; 4379 struct ieee80211com *ic = ifp->if_l2com; 4380 HAL_BOOL longCal, isCalDone = AH_TRUE; 4381 HAL_BOOL aniCal, shortCal = AH_FALSE; 4382 int nextcal; 4383 4384 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 4385 goto restart; 4386 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 4387 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 4388 if (sc->sc_doresetcal) 4389 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 4390 4391 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 4392 if (aniCal) { 4393 sc->sc_stats.ast_ani_cal++; 4394 sc->sc_lastani = ticks; 4395 ath_hal_ani_poll(ah, sc->sc_curchan); 4396 } 4397 4398 if (longCal) { 4399 sc->sc_stats.ast_per_cal++; 4400 sc->sc_lastlongcal = ticks; 4401 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4402 /* 4403 * Rfgain is out of bounds, reset the chip 4404 * to load new gain values. 4405 */ 4406 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4407 "%s: rfgain change\n", __func__); 4408 sc->sc_stats.ast_per_rfgain++; 4409 sc->sc_resetcal = 0; 4410 sc->sc_doresetcal = AH_TRUE; 4411 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4412 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4413 return; 4414 } 4415 /* 4416 * If this long cal is after an idle period, then 4417 * reset the data collection state so we start fresh. 4418 */ 4419 if (sc->sc_resetcal) { 4420 (void) ath_hal_calreset(ah, sc->sc_curchan); 4421 sc->sc_lastcalreset = ticks; 4422 sc->sc_lastshortcal = ticks; 4423 sc->sc_resetcal = 0; 4424 sc->sc_doresetcal = AH_TRUE; 4425 } 4426 } 4427 4428 /* Only call if we're doing a short/long cal, not for ANI calibration */ 4429 if (shortCal || longCal) { 4430 isCalDone = AH_FALSE; 4431 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 4432 if (longCal) { 4433 /* 4434 * Calibrate noise floor data again in case of change. 4435 */ 4436 ath_hal_process_noisefloor(ah); 4437 } 4438 } else { 4439 DPRINTF(sc, ATH_DEBUG_ANY, 4440 "%s: calibration of channel %u failed\n", 4441 __func__, sc->sc_curchan->ic_freq); 4442 sc->sc_stats.ast_per_calfail++; 4443 } 4444 if (shortCal) 4445 sc->sc_lastshortcal = ticks; 4446 } 4447 if (!isCalDone) { 4448restart: 4449 /* 4450 * Use a shorter interval to potentially collect multiple 4451 * data samples required to complete calibration. Once 4452 * we're told the work is done we drop back to a longer 4453 * interval between requests. We're more aggressive doing 4454 * work when operating as an AP to improve operation right 4455 * after startup. 4456 */ 4457 sc->sc_lastshortcal = ticks; 4458 nextcal = ath_shortcalinterval*hz/1000; 4459 if (sc->sc_opmode != HAL_M_HOSTAP) 4460 nextcal *= 10; 4461 sc->sc_doresetcal = AH_TRUE; 4462 } else { 4463 /* nextcal should be the shortest time for next event */ 4464 nextcal = ath_longcalinterval*hz; 4465 if (sc->sc_lastcalreset == 0) 4466 sc->sc_lastcalreset = sc->sc_lastlongcal; 4467 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 4468 sc->sc_resetcal = 1; /* setup reset next trip */ 4469 sc->sc_doresetcal = AH_FALSE; 4470 } 4471 /* ANI calibration may occur more often than short/long/resetcal */ 4472 if (ath_anicalinterval > 0) 4473 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 4474 4475 if (nextcal != 0) { 4476 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 4477 __func__, nextcal, isCalDone ? "" : "!"); 4478 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 4479 } else { 4480 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 4481 __func__); 4482 /* NB: don't rearm timer */ 4483 } 4484} 4485 4486static void 4487ath_scan_start(struct ieee80211com *ic) 4488{ 4489 struct ifnet *ifp = ic->ic_ifp; 4490 struct ath_softc *sc = ifp->if_softc; 4491 struct ath_hal *ah = sc->sc_ah; 4492 u_int32_t rfilt; 4493 4494 /* XXX calibration timer? */ 4495 4496 ATH_LOCK(sc); 4497 sc->sc_scanning = 1; 4498 sc->sc_syncbeacon = 0; 4499 rfilt = ath_calcrxfilter(sc); 4500 ATH_UNLOCK(sc); 4501 4502 ATH_PCU_LOCK(sc); 4503 ath_hal_setrxfilter(ah, rfilt); 4504 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 4505 ATH_PCU_UNLOCK(sc); 4506 4507 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 4508 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 4509} 4510 4511static void 4512ath_scan_end(struct ieee80211com *ic) 4513{ 4514 struct ifnet *ifp = ic->ic_ifp; 4515 struct ath_softc *sc = ifp->if_softc; 4516 struct ath_hal *ah = sc->sc_ah; 4517 u_int32_t rfilt; 4518 4519 ATH_LOCK(sc); 4520 sc->sc_scanning = 0; 4521 rfilt = ath_calcrxfilter(sc); 4522 ATH_UNLOCK(sc); 4523 4524 ATH_PCU_LOCK(sc); 4525 ath_hal_setrxfilter(ah, rfilt); 4526 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4527 4528 ath_hal_process_noisefloor(ah); 4529 ATH_PCU_UNLOCK(sc); 4530 4531 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4532 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 4533 sc->sc_curaid); 4534} 4535 4536#ifdef ATH_ENABLE_11N 4537/* 4538 * For now, just do a channel change. 4539 * 4540 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 4541 * control state and resetting the hardware without dropping frames out 4542 * of the queue. 4543 * 4544 * The unfortunate trouble here is making absolutely sure that the 4545 * channel width change has propagated enough so the hardware 4546 * absolutely isn't handed bogus frames for it's current operating 4547 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 4548 * does occur in parallel, we need to make certain we've blocked 4549 * any further ongoing TX (and RX, that can cause raw TX) 4550 * before we do this. 4551 */ 4552static void 4553ath_update_chw(struct ieee80211com *ic) 4554{ 4555 struct ifnet *ifp = ic->ic_ifp; 4556 struct ath_softc *sc = ifp->if_softc; 4557 4558 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 4559 ath_set_channel(ic); 4560} 4561#endif /* ATH_ENABLE_11N */ 4562 4563static void 4564ath_set_channel(struct ieee80211com *ic) 4565{ 4566 struct ifnet *ifp = ic->ic_ifp; 4567 struct ath_softc *sc = ifp->if_softc; 4568 4569 (void) ath_chan_set(sc, ic->ic_curchan); 4570 /* 4571 * If we are returning to our bss channel then mark state 4572 * so the next recv'd beacon's tsf will be used to sync the 4573 * beacon timers. Note that since we only hear beacons in 4574 * sta/ibss mode this has no effect in other operating modes. 4575 */ 4576 ATH_LOCK(sc); 4577 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 4578 sc->sc_syncbeacon = 1; 4579 ATH_UNLOCK(sc); 4580} 4581 4582/* 4583 * Walk the vap list and check if there any vap's in RUN state. 4584 */ 4585static int 4586ath_isanyrunningvaps(struct ieee80211vap *this) 4587{ 4588 struct ieee80211com *ic = this->iv_ic; 4589 struct ieee80211vap *vap; 4590 4591 IEEE80211_LOCK_ASSERT(ic); 4592 4593 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 4594 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 4595 return 1; 4596 } 4597 return 0; 4598} 4599 4600static int 4601ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4602{ 4603 struct ieee80211com *ic = vap->iv_ic; 4604 struct ath_softc *sc = ic->ic_ifp->if_softc; 4605 struct ath_vap *avp = ATH_VAP(vap); 4606 struct ath_hal *ah = sc->sc_ah; 4607 struct ieee80211_node *ni = NULL; 4608 int i, error, stamode; 4609 u_int32_t rfilt; 4610 int csa_run_transition = 0; 4611 static const HAL_LED_STATE leds[] = { 4612 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4613 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4614 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4615 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4616 HAL_LED_RUN, /* IEEE80211_S_CAC */ 4617 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4618 HAL_LED_RUN, /* IEEE80211_S_CSA */ 4619 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 4620 }; 4621 4622 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4623 ieee80211_state_name[vap->iv_state], 4624 ieee80211_state_name[nstate]); 4625 4626 /* 4627 * net80211 _should_ have the comlock asserted at this point. 4628 * There are some comments around the calls to vap->iv_newstate 4629 * which indicate that it (newstate) may end up dropping the 4630 * lock. This and the subsequent lock assert check after newstate 4631 * are an attempt to catch these and figure out how/why. 4632 */ 4633 IEEE80211_LOCK_ASSERT(ic); 4634 4635 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 4636 csa_run_transition = 1; 4637 4638 callout_drain(&sc->sc_cal_ch); 4639 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4640 4641 if (nstate == IEEE80211_S_SCAN) { 4642 /* 4643 * Scanning: turn off beacon miss and don't beacon. 4644 * Mark beacon state so when we reach RUN state we'll 4645 * [re]setup beacons. Unblock the task q thread so 4646 * deferred interrupt processing is done. 4647 */ 4648 ath_hal_intrset(ah, 4649 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4650 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4651 sc->sc_beacons = 0; 4652 taskqueue_unblock(sc->sc_tq); 4653 } 4654 4655 ni = ieee80211_ref_node(vap->iv_bss); 4656 rfilt = ath_calcrxfilter(sc); 4657 stamode = (vap->iv_opmode == IEEE80211_M_STA || 4658 vap->iv_opmode == IEEE80211_M_AHDEMO || 4659 vap->iv_opmode == IEEE80211_M_IBSS); 4660 if (stamode && nstate == IEEE80211_S_RUN) { 4661 sc->sc_curaid = ni->ni_associd; 4662 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 4663 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4664 } 4665 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4666 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 4667 ath_hal_setrxfilter(ah, rfilt); 4668 4669 /* XXX is this to restore keycache on resume? */ 4670 if (vap->iv_opmode != IEEE80211_M_STA && 4671 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 4672 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4673 if (ath_hal_keyisvalid(ah, i)) 4674 ath_hal_keysetmac(ah, i, ni->ni_bssid); 4675 } 4676 4677 /* 4678 * Invoke the parent method to do net80211 work. 4679 */ 4680 error = avp->av_newstate(vap, nstate, arg); 4681 if (error != 0) 4682 goto bad; 4683 4684 /* 4685 * See above: ensure av_newstate() doesn't drop the lock 4686 * on us. 4687 */ 4688 IEEE80211_LOCK_ASSERT(ic); 4689 4690 if (nstate == IEEE80211_S_RUN) { 4691 /* NB: collect bss node again, it may have changed */ 4692 ieee80211_free_node(ni); 4693 ni = ieee80211_ref_node(vap->iv_bss); 4694 4695 DPRINTF(sc, ATH_DEBUG_STATE, 4696 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 4697 "capinfo 0x%04x chan %d\n", __func__, 4698 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 4699 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 4700 4701 switch (vap->iv_opmode) { 4702#ifdef IEEE80211_SUPPORT_TDMA 4703 case IEEE80211_M_AHDEMO: 4704 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 4705 break; 4706 /* fall thru... */ 4707#endif 4708 case IEEE80211_M_HOSTAP: 4709 case IEEE80211_M_IBSS: 4710 case IEEE80211_M_MBSS: 4711 /* 4712 * Allocate and setup the beacon frame. 4713 * 4714 * Stop any previous beacon DMA. This may be 4715 * necessary, for example, when an ibss merge 4716 * causes reconfiguration; there will be a state 4717 * transition from RUN->RUN that means we may 4718 * be called with beacon transmission active. 4719 */ 4720 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4721 4722 error = ath_beacon_alloc(sc, ni); 4723 if (error != 0) 4724 goto bad; 4725 /* 4726 * If joining an adhoc network defer beacon timer 4727 * configuration to the next beacon frame so we 4728 * have a current TSF to use. Otherwise we're 4729 * starting an ibss/bss so there's no need to delay; 4730 * if this is the first vap moving to RUN state, then 4731 * beacon state needs to be [re]configured. 4732 */ 4733 if (vap->iv_opmode == IEEE80211_M_IBSS && 4734 ni->ni_tstamp.tsf != 0) { 4735 sc->sc_syncbeacon = 1; 4736 } else if (!sc->sc_beacons) { 4737#ifdef IEEE80211_SUPPORT_TDMA 4738 if (vap->iv_caps & IEEE80211_C_TDMA) 4739 ath_tdma_config(sc, vap); 4740 else 4741#endif 4742 ath_beacon_config(sc, vap); 4743 sc->sc_beacons = 1; 4744 } 4745 break; 4746 case IEEE80211_M_STA: 4747 /* 4748 * Defer beacon timer configuration to the next 4749 * beacon frame so we have a current TSF to use 4750 * (any TSF collected when scanning is likely old). 4751 * However if it's due to a CSA -> RUN transition, 4752 * force a beacon update so we pick up a lack of 4753 * beacons from an AP in CAC and thus force a 4754 * scan. 4755 */ 4756 sc->sc_syncbeacon = 1; 4757 if (csa_run_transition) 4758 ath_beacon_config(sc, vap); 4759 break; 4760 case IEEE80211_M_MONITOR: 4761 /* 4762 * Monitor mode vaps have only INIT->RUN and RUN->RUN 4763 * transitions so we must re-enable interrupts here to 4764 * handle the case of a single monitor mode vap. 4765 */ 4766 ath_hal_intrset(ah, sc->sc_imask); 4767 break; 4768 case IEEE80211_M_WDS: 4769 break; 4770 default: 4771 break; 4772 } 4773 /* 4774 * Let the hal process statistics collected during a 4775 * scan so it can provide calibrated noise floor data. 4776 */ 4777 ath_hal_process_noisefloor(ah); 4778 /* 4779 * Reset rssi stats; maybe not the best place... 4780 */ 4781 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 4782 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 4783 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 4784 /* 4785 * Finally, start any timers and the task q thread 4786 * (in case we didn't go through SCAN state). 4787 */ 4788 if (ath_longcalinterval != 0) { 4789 /* start periodic recalibration timer */ 4790 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4791 } else { 4792 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4793 "%s: calibration disabled\n", __func__); 4794 } 4795 taskqueue_unblock(sc->sc_tq); 4796 } else if (nstate == IEEE80211_S_INIT) { 4797 /* 4798 * If there are no vaps left in RUN state then 4799 * shutdown host/driver operation: 4800 * o disable interrupts 4801 * o disable the task queue thread 4802 * o mark beacon processing as stopped 4803 */ 4804 if (!ath_isanyrunningvaps(vap)) { 4805 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4806 /* disable interrupts */ 4807 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 4808 taskqueue_block(sc->sc_tq); 4809 sc->sc_beacons = 0; 4810 } 4811#ifdef IEEE80211_SUPPORT_TDMA 4812 ath_hal_setcca(ah, AH_TRUE); 4813#endif 4814 } 4815bad: 4816 ieee80211_free_node(ni); 4817 return error; 4818} 4819 4820/* 4821 * Allocate a key cache slot to the station so we can 4822 * setup a mapping from key index to node. The key cache 4823 * slot is needed for managing antenna state and for 4824 * compression when stations do not use crypto. We do 4825 * it uniliaterally here; if crypto is employed this slot 4826 * will be reassigned. 4827 */ 4828static void 4829ath_setup_stationkey(struct ieee80211_node *ni) 4830{ 4831 struct ieee80211vap *vap = ni->ni_vap; 4832 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4833 ieee80211_keyix keyix, rxkeyix; 4834 4835 /* XXX should take a locked ref to vap->iv_bss */ 4836 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 4837 /* 4838 * Key cache is full; we'll fall back to doing 4839 * the more expensive lookup in software. Note 4840 * this also means no h/w compression. 4841 */ 4842 /* XXX msg+statistic */ 4843 } else { 4844 /* XXX locking? */ 4845 ni->ni_ucastkey.wk_keyix = keyix; 4846 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 4847 /* NB: must mark device key to get called back on delete */ 4848 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 4849 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 4850 /* NB: this will create a pass-thru key entry */ 4851 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 4852 } 4853} 4854 4855/* 4856 * Setup driver-specific state for a newly associated node. 4857 * Note that we're called also on a re-associate, the isnew 4858 * param tells us if this is the first time or not. 4859 */ 4860static void 4861ath_newassoc(struct ieee80211_node *ni, int isnew) 4862{ 4863 struct ath_node *an = ATH_NODE(ni); 4864 struct ieee80211vap *vap = ni->ni_vap; 4865 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4866 const struct ieee80211_txparam *tp = ni->ni_txparms; 4867 4868 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 4869 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 4870 4871 ath_rate_newassoc(sc, an, isnew); 4872 if (isnew && 4873 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 4874 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 4875 ath_setup_stationkey(ni); 4876} 4877 4878static int 4879ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 4880 int nchans, struct ieee80211_channel chans[]) 4881{ 4882 struct ath_softc *sc = ic->ic_ifp->if_softc; 4883 struct ath_hal *ah = sc->sc_ah; 4884 HAL_STATUS status; 4885 4886 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4887 "%s: rd %u cc %u location %c%s\n", 4888 __func__, reg->regdomain, reg->country, reg->location, 4889 reg->ecm ? " ecm" : ""); 4890 4891 status = ath_hal_set_channels(ah, chans, nchans, 4892 reg->country, reg->regdomain); 4893 if (status != HAL_OK) { 4894 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 4895 __func__, status); 4896 return EINVAL; /* XXX */ 4897 } 4898 4899 return 0; 4900} 4901 4902static void 4903ath_getradiocaps(struct ieee80211com *ic, 4904 int maxchans, int *nchans, struct ieee80211_channel chans[]) 4905{ 4906 struct ath_softc *sc = ic->ic_ifp->if_softc; 4907 struct ath_hal *ah = sc->sc_ah; 4908 4909 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 4910 __func__, SKU_DEBUG, CTRY_DEFAULT); 4911 4912 /* XXX check return */ 4913 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 4914 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 4915 4916} 4917 4918static int 4919ath_getchannels(struct ath_softc *sc) 4920{ 4921 struct ifnet *ifp = sc->sc_ifp; 4922 struct ieee80211com *ic = ifp->if_l2com; 4923 struct ath_hal *ah = sc->sc_ah; 4924 HAL_STATUS status; 4925 4926 /* 4927 * Collect channel set based on EEPROM contents. 4928 */ 4929 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 4930 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 4931 if (status != HAL_OK) { 4932 if_printf(ifp, "%s: unable to collect channel list from hal, " 4933 "status %d\n", __func__, status); 4934 return EINVAL; 4935 } 4936 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 4937 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 4938 /* XXX map Atheros sku's to net80211 SKU's */ 4939 /* XXX net80211 types too small */ 4940 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 4941 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 4942 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 4943 ic->ic_regdomain.isocc[1] = ' '; 4944 4945 ic->ic_regdomain.ecm = 1; 4946 ic->ic_regdomain.location = 'I'; 4947 4948 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4949 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 4950 __func__, sc->sc_eerd, sc->sc_eecc, 4951 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 4952 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 4953 return 0; 4954} 4955 4956static int 4957ath_rate_setup(struct ath_softc *sc, u_int mode) 4958{ 4959 struct ath_hal *ah = sc->sc_ah; 4960 const HAL_RATE_TABLE *rt; 4961 4962 switch (mode) { 4963 case IEEE80211_MODE_11A: 4964 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 4965 break; 4966 case IEEE80211_MODE_HALF: 4967 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 4968 break; 4969 case IEEE80211_MODE_QUARTER: 4970 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 4971 break; 4972 case IEEE80211_MODE_11B: 4973 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 4974 break; 4975 case IEEE80211_MODE_11G: 4976 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 4977 break; 4978 case IEEE80211_MODE_TURBO_A: 4979 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 4980 break; 4981 case IEEE80211_MODE_TURBO_G: 4982 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 4983 break; 4984 case IEEE80211_MODE_STURBO_A: 4985 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4986 break; 4987 case IEEE80211_MODE_11NA: 4988 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 4989 break; 4990 case IEEE80211_MODE_11NG: 4991 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 4992 break; 4993 default: 4994 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4995 __func__, mode); 4996 return 0; 4997 } 4998 sc->sc_rates[mode] = rt; 4999 return (rt != NULL); 5000} 5001 5002static void 5003ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 5004{ 5005#define N(a) (sizeof(a)/sizeof(a[0])) 5006 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 5007 static const struct { 5008 u_int rate; /* tx/rx 802.11 rate */ 5009 u_int16_t timeOn; /* LED on time (ms) */ 5010 u_int16_t timeOff; /* LED off time (ms) */ 5011 } blinkrates[] = { 5012 { 108, 40, 10 }, 5013 { 96, 44, 11 }, 5014 { 72, 50, 13 }, 5015 { 48, 57, 14 }, 5016 { 36, 67, 16 }, 5017 { 24, 80, 20 }, 5018 { 22, 100, 25 }, 5019 { 18, 133, 34 }, 5020 { 12, 160, 40 }, 5021 { 10, 200, 50 }, 5022 { 6, 240, 58 }, 5023 { 4, 267, 66 }, 5024 { 2, 400, 100 }, 5025 { 0, 500, 130 }, 5026 /* XXX half/quarter rates */ 5027 }; 5028 const HAL_RATE_TABLE *rt; 5029 int i, j; 5030 5031 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 5032 rt = sc->sc_rates[mode]; 5033 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 5034 for (i = 0; i < rt->rateCount; i++) { 5035 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 5036 if (rt->info[i].phy != IEEE80211_T_HT) 5037 sc->sc_rixmap[ieeerate] = i; 5038 else 5039 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 5040 } 5041 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 5042 for (i = 0; i < N(sc->sc_hwmap); i++) { 5043 if (i >= rt->rateCount) { 5044 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 5045 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 5046 continue; 5047 } 5048 sc->sc_hwmap[i].ieeerate = 5049 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 5050 if (rt->info[i].phy == IEEE80211_T_HT) 5051 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 5052 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 5053 if (rt->info[i].shortPreamble || 5054 rt->info[i].phy == IEEE80211_T_OFDM) 5055 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 5056 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 5057 for (j = 0; j < N(blinkrates)-1; j++) 5058 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 5059 break; 5060 /* NB: this uses the last entry if the rate isn't found */ 5061 /* XXX beware of overlow */ 5062 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 5063 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 5064 } 5065 sc->sc_currates = rt; 5066 sc->sc_curmode = mode; 5067 /* 5068 * All protection frames are transmited at 2Mb/s for 5069 * 11g, otherwise at 1Mb/s. 5070 */ 5071 if (mode == IEEE80211_MODE_11G) 5072 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 5073 else 5074 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 5075 /* NB: caller is responsible for resetting rate control state */ 5076#undef N 5077} 5078 5079static void 5080ath_watchdog(void *arg) 5081{ 5082 struct ath_softc *sc = arg; 5083 int do_reset = 0; 5084 5085 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 5086 struct ifnet *ifp = sc->sc_ifp; 5087 uint32_t hangs; 5088 5089 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 5090 hangs != 0) { 5091 if_printf(ifp, "%s hang detected (0x%x)\n", 5092 hangs & 0xff ? "bb" : "mac", hangs); 5093 } else 5094 if_printf(ifp, "device timeout\n"); 5095 do_reset = 1; 5096 ifp->if_oerrors++; 5097 sc->sc_stats.ast_watchdog++; 5098 } 5099 5100 /* 5101 * We can't hold the lock across the ath_reset() call. 5102 * 5103 * And since this routine can't hold a lock and sleep, 5104 * do the reset deferred. 5105 */ 5106 if (do_reset) { 5107 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5108 } 5109 5110 callout_schedule(&sc->sc_wd_ch, hz); 5111} 5112 5113/* 5114 * Fetch the rate control statistics for the given node. 5115 */ 5116static int 5117ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs) 5118{ 5119 struct ath_node *an; 5120 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5121 struct ieee80211_node *ni; 5122 int error = 0; 5123 5124 /* Perform a lookup on the given node */ 5125 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr); 5126 if (ni == NULL) { 5127 error = EINVAL; 5128 goto bad; 5129 } 5130 5131 /* Lock the ath_node */ 5132 an = ATH_NODE(ni); 5133 ATH_NODE_LOCK(an); 5134 5135 /* Fetch the rate control stats for this node */ 5136 error = ath_rate_fetch_node_stats(sc, an, rs); 5137 5138 /* No matter what happens here, just drop through */ 5139 5140 /* Unlock the ath_node */ 5141 ATH_NODE_UNLOCK(an); 5142 5143 /* Unref the node */ 5144 ieee80211_node_decref(ni); 5145 5146bad: 5147 return (error); 5148} 5149 5150#ifdef ATH_DIAGAPI 5151/* 5152 * Diagnostic interface to the HAL. This is used by various 5153 * tools to do things like retrieve register contents for 5154 * debugging. The mechanism is intentionally opaque so that 5155 * it can change frequently w/o concern for compatiblity. 5156 */ 5157static int 5158ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 5159{ 5160 struct ath_hal *ah = sc->sc_ah; 5161 u_int id = ad->ad_id & ATH_DIAG_ID; 5162 void *indata = NULL; 5163 void *outdata = NULL; 5164 u_int32_t insize = ad->ad_in_size; 5165 u_int32_t outsize = ad->ad_out_size; 5166 int error = 0; 5167 5168 if (ad->ad_id & ATH_DIAG_IN) { 5169 /* 5170 * Copy in data. 5171 */ 5172 indata = malloc(insize, M_TEMP, M_NOWAIT); 5173 if (indata == NULL) { 5174 error = ENOMEM; 5175 goto bad; 5176 } 5177 error = copyin(ad->ad_in_data, indata, insize); 5178 if (error) 5179 goto bad; 5180 } 5181 if (ad->ad_id & ATH_DIAG_DYN) { 5182 /* 5183 * Allocate a buffer for the results (otherwise the HAL 5184 * returns a pointer to a buffer where we can read the 5185 * results). Note that we depend on the HAL leaving this 5186 * pointer for us to use below in reclaiming the buffer; 5187 * may want to be more defensive. 5188 */ 5189 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 5190 if (outdata == NULL) { 5191 error = ENOMEM; 5192 goto bad; 5193 } 5194 } 5195 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 5196 if (outsize < ad->ad_out_size) 5197 ad->ad_out_size = outsize; 5198 if (outdata != NULL) 5199 error = copyout(outdata, ad->ad_out_data, 5200 ad->ad_out_size); 5201 } else { 5202 error = EINVAL; 5203 } 5204bad: 5205 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 5206 free(indata, M_TEMP); 5207 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 5208 free(outdata, M_TEMP); 5209 return error; 5210} 5211#endif /* ATH_DIAGAPI */ 5212 5213static int 5214ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 5215{ 5216#define IS_RUNNING(ifp) \ 5217 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 5218 struct ath_softc *sc = ifp->if_softc; 5219 struct ieee80211com *ic = ifp->if_l2com; 5220 struct ifreq *ifr = (struct ifreq *)data; 5221 const HAL_RATE_TABLE *rt; 5222 int error = 0; 5223 5224 switch (cmd) { 5225 case SIOCSIFFLAGS: 5226 ATH_LOCK(sc); 5227 if (IS_RUNNING(ifp)) { 5228 /* 5229 * To avoid rescanning another access point, 5230 * do not call ath_init() here. Instead, 5231 * only reflect promisc mode settings. 5232 */ 5233 ath_mode_init(sc); 5234 } else if (ifp->if_flags & IFF_UP) { 5235 /* 5236 * Beware of being called during attach/detach 5237 * to reset promiscuous mode. In that case we 5238 * will still be marked UP but not RUNNING. 5239 * However trying to re-init the interface 5240 * is the wrong thing to do as we've already 5241 * torn down much of our state. There's 5242 * probably a better way to deal with this. 5243 */ 5244 if (!sc->sc_invalid) 5245 ath_init(sc); /* XXX lose error */ 5246 } else { 5247 ath_stop_locked(ifp); 5248#ifdef notyet 5249 /* XXX must wakeup in places like ath_vap_delete */ 5250 if (!sc->sc_invalid) 5251 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 5252#endif 5253 } 5254 ATH_UNLOCK(sc); 5255 break; 5256 case SIOCGIFMEDIA: 5257 case SIOCSIFMEDIA: 5258 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 5259 break; 5260 case SIOCGATHSTATS: 5261 /* NB: embed these numbers to get a consistent view */ 5262 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 5263 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 5264 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 5265 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 5266#ifdef IEEE80211_SUPPORT_TDMA 5267 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 5268 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 5269#endif 5270 rt = sc->sc_currates; 5271 sc->sc_stats.ast_tx_rate = 5272 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 5273 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 5274 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 5275 return copyout(&sc->sc_stats, 5276 ifr->ifr_data, sizeof (sc->sc_stats)); 5277 case SIOCGATHAGSTATS: 5278 return copyout(&sc->sc_aggr_stats, 5279 ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 5280 case SIOCZATHSTATS: 5281 error = priv_check(curthread, PRIV_DRIVER); 5282 if (error == 0) { 5283 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 5284 memset(&sc->sc_aggr_stats, 0, 5285 sizeof(sc->sc_aggr_stats)); 5286 memset(&sc->sc_intr_stats, 0, 5287 sizeof(sc->sc_intr_stats)); 5288 } 5289 break; 5290#ifdef ATH_DIAGAPI 5291 case SIOCGATHDIAG: 5292 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 5293 break; 5294 case SIOCGATHPHYERR: 5295 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 5296 break; 5297#endif 5298 case SIOCGATHNODERATESTATS: 5299 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr); 5300 break; 5301 case SIOCGIFADDR: 5302 error = ether_ioctl(ifp, cmd, data); 5303 break; 5304 default: 5305 error = EINVAL; 5306 break; 5307 } 5308 return error; 5309#undef IS_RUNNING 5310} 5311 5312/* 5313 * Announce various information on device/driver attach. 5314 */ 5315static void 5316ath_announce(struct ath_softc *sc) 5317{ 5318 struct ifnet *ifp = sc->sc_ifp; 5319 struct ath_hal *ah = sc->sc_ah; 5320 5321 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 5322 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 5323 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 5324 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 5325 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 5326 if (bootverbose) { 5327 int i; 5328 for (i = 0; i <= WME_AC_VO; i++) { 5329 struct ath_txq *txq = sc->sc_ac2q[i]; 5330 if_printf(ifp, "Use hw queue %u for %s traffic\n", 5331 txq->axq_qnum, ieee80211_wme_acnames[i]); 5332 } 5333 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5334 sc->sc_cabq->axq_qnum); 5335 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5336 } 5337 if (ath_rxbuf != ATH_RXBUF) 5338 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5339 if (ath_txbuf != ATH_TXBUF) 5340 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 5341 if (sc->sc_mcastkey && bootverbose) 5342 if_printf(ifp, "using multicast key search\n"); 5343} 5344 5345static void 5346ath_dfs_tasklet(void *p, int npending) 5347{ 5348 struct ath_softc *sc = (struct ath_softc *) p; 5349 struct ifnet *ifp = sc->sc_ifp; 5350 struct ieee80211com *ic = ifp->if_l2com; 5351 5352 /* 5353 * If previous processing has found a radar event, 5354 * signal this to the net80211 layer to begin DFS 5355 * processing. 5356 */ 5357 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 5358 /* DFS event found, initiate channel change */ 5359 /* 5360 * XXX doesn't currently tell us whether the event 5361 * XXX was found in the primary or extension 5362 * XXX channel! 5363 */ 5364 IEEE80211_LOCK(ic); 5365 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 5366 IEEE80211_UNLOCK(ic); 5367 } 5368} 5369 5370/* 5371 * Enable/disable power save. This must be called with 5372 * no TX driver locks currently held, so it should only 5373 * be called from the RX path (which doesn't hold any 5374 * TX driver locks.) 5375 */ 5376static void 5377ath_node_powersave(struct ieee80211_node *ni, int enable) 5378{ 5379 struct ath_node *an = ATH_NODE(ni); 5380 struct ieee80211com *ic = ni->ni_ic; 5381 struct ath_softc *sc = ic->ic_ifp->if_softc; 5382 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 5383 5384 ATH_NODE_UNLOCK_ASSERT(an); 5385 /* XXX and no TXQ locks should be held here */ 5386 5387 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: ni=%p, enable=%d\n", 5388 __func__, ni, enable); 5389 5390 /* Suspend or resume software queue handling */ 5391 if (enable) 5392 ath_tx_node_sleep(sc, an); 5393 else 5394 ath_tx_node_wakeup(sc, an); 5395 5396 /* Update net80211 state */ 5397 avp->av_node_ps(ni, enable); 5398} 5399 5400 5401MODULE_VERSION(if_ath, 1); 5402MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 5403#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 5404MODULE_DEPEND(if_ath, alq, 1, 1, 1); 5405#endif 5406