if_ath.c revision 239282
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 239282 2012-08-15 06:48:34Z adrian $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49#include "opt_ah.h" 50#include "opt_wlan.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysctl.h> 55#include <sys/mbuf.h> 56#include <sys/malloc.h> 57#include <sys/lock.h> 58#include <sys/mutex.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/errno.h> 63#include <sys/callout.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kthread.h> 67#include <sys/taskqueue.h> 68#include <sys/priv.h> 69#include <sys/module.h> 70#include <sys/ktr.h> 71#include <sys/smp.h> /* for mp_ncpus */ 72 73#include <machine/bus.h> 74 75#include <net/if.h> 76#include <net/if_dl.h> 77#include <net/if_media.h> 78#include <net/if_types.h> 79#include <net/if_arp.h> 80#include <net/ethernet.h> 81#include <net/if_llc.h> 82 83#include <net80211/ieee80211_var.h> 84#include <net80211/ieee80211_regdomain.h> 85#ifdef IEEE80211_SUPPORT_SUPERG 86#include <net80211/ieee80211_superg.h> 87#endif 88#ifdef IEEE80211_SUPPORT_TDMA 89#include <net80211/ieee80211_tdma.h> 90#endif 91 92#include <net/bpf.h> 93 94#ifdef INET 95#include <netinet/in.h> 96#include <netinet/if_ether.h> 97#endif 98 99#include <dev/ath/if_athvar.h> 100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101#include <dev/ath/ath_hal/ah_diagcodes.h> 102 103#include <dev/ath/if_ath_debug.h> 104#include <dev/ath/if_ath_misc.h> 105#include <dev/ath/if_ath_tsf.h> 106#include <dev/ath/if_ath_tx.h> 107#include <dev/ath/if_ath_sysctl.h> 108#include <dev/ath/if_ath_led.h> 109#include <dev/ath/if_ath_keycache.h> 110#include <dev/ath/if_ath_rx.h> 111#include <dev/ath/if_ath_rx_edma.h> 112#include <dev/ath/if_ath_tx_edma.h> 113#include <dev/ath/if_ath_beacon.h> 114#include <dev/ath/if_athdfs.h> 115 116#ifdef ATH_TX99_DIAG 117#include <dev/ath/ath_tx99/ath_tx99.h> 118#endif 119 120/* 121 * ATH_BCBUF determines the number of vap's that can transmit 122 * beacons and also (currently) the number of vap's that can 123 * have unique mac addresses/bssid. When staggering beacons 124 * 4 is probably a good max as otherwise the beacons become 125 * very closely spaced and there is limited time for cab q traffic 126 * to go out. You can burst beacons instead but that is not good 127 * for stations in power save and at some point you really want 128 * another radio (and channel). 129 * 130 * The limit on the number of mac addresses is tied to our use of 131 * the U/L bit and tracking addresses in a byte; it would be 132 * worthwhile to allow more for applications like proxy sta. 133 */ 134CTASSERT(ATH_BCBUF <= 8); 135 136static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 137 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 138 const uint8_t [IEEE80211_ADDR_LEN], 139 const uint8_t [IEEE80211_ADDR_LEN]); 140static void ath_vap_delete(struct ieee80211vap *); 141static void ath_init(void *); 142static void ath_stop_locked(struct ifnet *); 143static void ath_stop(struct ifnet *); 144static int ath_reset_vap(struct ieee80211vap *, u_long); 145static int ath_media_change(struct ifnet *); 146static void ath_watchdog(void *); 147static int ath_ioctl(struct ifnet *, u_long, caddr_t); 148static void ath_fatal_proc(void *, int); 149static void ath_bmiss_vap(struct ieee80211vap *); 150static void ath_bmiss_proc(void *, int); 151static void ath_key_update_begin(struct ieee80211vap *); 152static void ath_key_update_end(struct ieee80211vap *); 153static void ath_update_mcast(struct ifnet *); 154static void ath_update_promisc(struct ifnet *); 155static void ath_updateslot(struct ifnet *); 156static void ath_bstuck_proc(void *, int); 157static void ath_reset_proc(void *, int); 158static int ath_desc_alloc(struct ath_softc *); 159static void ath_desc_free(struct ath_softc *); 160static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 161 const uint8_t [IEEE80211_ADDR_LEN]); 162static void ath_node_cleanup(struct ieee80211_node *); 163static void ath_node_free(struct ieee80211_node *); 164static void ath_node_getsignal(const struct ieee80211_node *, 165 int8_t *, int8_t *); 166static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 167static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 168static int ath_tx_setup(struct ath_softc *, int, int); 169static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 170static void ath_tx_cleanup(struct ath_softc *); 171static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, 172 int dosched); 173static void ath_tx_proc_q0(void *, int); 174static void ath_tx_proc_q0123(void *, int); 175static void ath_tx_proc(void *, int); 176static void ath_txq_sched_tasklet(void *, int); 177static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 178static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 179static void ath_scan_start(struct ieee80211com *); 180static void ath_scan_end(struct ieee80211com *); 181static void ath_set_channel(struct ieee80211com *); 182#ifdef ATH_ENABLE_11N 183static void ath_update_chw(struct ieee80211com *); 184#endif /* ATH_ENABLE_11N */ 185static void ath_calibrate(void *); 186static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 187static void ath_setup_stationkey(struct ieee80211_node *); 188static void ath_newassoc(struct ieee80211_node *, int); 189static int ath_setregdomain(struct ieee80211com *, 190 struct ieee80211_regdomain *, int, 191 struct ieee80211_channel []); 192static void ath_getradiocaps(struct ieee80211com *, int, int *, 193 struct ieee80211_channel []); 194static int ath_getchannels(struct ath_softc *); 195 196static int ath_rate_setup(struct ath_softc *, u_int mode); 197static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 198 199static void ath_announce(struct ath_softc *); 200 201static void ath_dfs_tasklet(void *, int); 202 203#ifdef IEEE80211_SUPPORT_TDMA 204#include <dev/ath/if_ath_tdma.h> 205#endif 206 207#if 0 208#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 209#define TDMA_LPF_LEN 6 210#define TDMA_DUMMY_MARKER 0x127 211#define TDMA_EP_MUL(x, mul) ((x) * (mul)) 212#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 213#define TDMA_LPF(x, y, len) \ 214 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 215#define TDMA_SAMPLE(x, y) do { \ 216 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 217} while (0) 218#define TDMA_EP_RND(x,mul) \ 219 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 220#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 221#endif /* IEEE80211_SUPPORT_TDMA */ 222 223SYSCTL_DECL(_hw_ath); 224 225/* XXX validate sysctl values */ 226static int ath_longcalinterval = 30; /* long cals every 30 secs */ 227SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 228 0, "long chip calibration interval (secs)"); 229static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 230SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 231 0, "short chip calibration interval (msecs)"); 232static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 233SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 234 0, "reset chip calibration results (secs)"); 235static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 236SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 237 0, "ANI calibration (msecs)"); 238 239int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 240SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 241 0, "rx buffers allocated"); 242TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 243int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 244SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 245 0, "tx buffers allocated"); 246TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 247int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 248SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, 249 0, "tx (mgmt) buffers allocated"); 250TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 251 252int ath_bstuck_threshold = 4; /* max missed beacons */ 253SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 254 0, "max missed beacon xmits before chip reset"); 255 256MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 257 258void 259ath_legacy_attach_comp_func(struct ath_softc *sc) 260{ 261 262 /* 263 * Special case certain configurations. Note the 264 * CAB queue is handled by these specially so don't 265 * include them when checking the txq setup mask. 266 */ 267 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 268 case 0x01: 269 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 270 break; 271 case 0x0f: 272 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 273 break; 274 default: 275 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 276 break; 277 } 278} 279 280#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 281#define HAL_MODE_HT40 \ 282 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 283 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 284int 285ath_attach(u_int16_t devid, struct ath_softc *sc) 286{ 287 struct ifnet *ifp; 288 struct ieee80211com *ic; 289 struct ath_hal *ah = NULL; 290 HAL_STATUS status; 291 int error = 0, i; 292 u_int wmodes; 293 uint8_t macaddr[IEEE80211_ADDR_LEN]; 294 int rx_chainmask, tx_chainmask; 295 296 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 297 298 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 299 if (ifp == NULL) { 300 device_printf(sc->sc_dev, "can not if_alloc()\n"); 301 error = ENOSPC; 302 goto bad; 303 } 304 ic = ifp->if_l2com; 305 306 /* set these up early for if_printf use */ 307 if_initname(ifp, device_get_name(sc->sc_dev), 308 device_get_unit(sc->sc_dev)); 309 310 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 311 sc->sc_eepromdata, &status); 312 if (ah == NULL) { 313 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 314 status); 315 error = ENXIO; 316 goto bad; 317 } 318 sc->sc_ah = ah; 319 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 320#ifdef ATH_DEBUG 321 sc->sc_debug = ath_debug; 322#endif 323 324 /* 325 * Setup the DMA/EDMA functions based on the current 326 * hardware support. 327 * 328 * This is required before the descriptors are allocated. 329 */ 330 if (ath_hal_hasedma(sc->sc_ah)) { 331 sc->sc_isedma = 1; 332 ath_recv_setup_edma(sc); 333 ath_xmit_setup_edma(sc); 334 } else { 335 ath_recv_setup_legacy(sc); 336 ath_xmit_setup_legacy(sc); 337 } 338 339 /* 340 * Check if the MAC has multi-rate retry support. 341 * We do this by trying to setup a fake extended 342 * descriptor. MAC's that don't have support will 343 * return false w/o doing anything. MAC's that do 344 * support it will return true w/o doing anything. 345 */ 346 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 347 348 /* 349 * Check if the device has hardware counters for PHY 350 * errors. If so we need to enable the MIB interrupt 351 * so we can act on stat triggers. 352 */ 353 if (ath_hal_hwphycounters(ah)) 354 sc->sc_needmib = 1; 355 356 /* 357 * Get the hardware key cache size. 358 */ 359 sc->sc_keymax = ath_hal_keycachesize(ah); 360 if (sc->sc_keymax > ATH_KEYMAX) { 361 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 362 ATH_KEYMAX, sc->sc_keymax); 363 sc->sc_keymax = ATH_KEYMAX; 364 } 365 /* 366 * Reset the key cache since some parts do not 367 * reset the contents on initial power up. 368 */ 369 for (i = 0; i < sc->sc_keymax; i++) 370 ath_hal_keyreset(ah, i); 371 372 /* 373 * Collect the default channel list. 374 */ 375 error = ath_getchannels(sc); 376 if (error != 0) 377 goto bad; 378 379 /* 380 * Setup rate tables for all potential media types. 381 */ 382 ath_rate_setup(sc, IEEE80211_MODE_11A); 383 ath_rate_setup(sc, IEEE80211_MODE_11B); 384 ath_rate_setup(sc, IEEE80211_MODE_11G); 385 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 386 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 387 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 388 ath_rate_setup(sc, IEEE80211_MODE_11NA); 389 ath_rate_setup(sc, IEEE80211_MODE_11NG); 390 ath_rate_setup(sc, IEEE80211_MODE_HALF); 391 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 392 393 /* NB: setup here so ath_rate_update is happy */ 394 ath_setcurmode(sc, IEEE80211_MODE_11A); 395 396 /* 397 * Allocate TX descriptors and populate the lists. 398 */ 399 error = ath_desc_alloc(sc); 400 if (error != 0) { 401 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 402 error); 403 goto bad; 404 } 405 error = ath_txdma_setup(sc); 406 if (error != 0) { 407 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 408 error); 409 goto bad; 410 } 411 412 /* 413 * Allocate RX descriptors and populate the lists. 414 */ 415 error = ath_rxdma_setup(sc); 416 if (error != 0) { 417 if_printf(ifp, "failed to allocate RX descriptors: %d\n", 418 error); 419 goto bad; 420 } 421 422 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 423 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 424 425 ATH_TXBUF_LOCK_INIT(sc); 426 427 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 428 taskqueue_thread_enqueue, &sc->sc_tq); 429 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 430 "%s taskq", ifp->if_xname); 431 432 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 433 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 434 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 435 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 436 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); 437 TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc); 438 439 /* 440 * Allocate hardware transmit queues: one queue for 441 * beacon frames and one data queue for each QoS 442 * priority. Note that the hal handles resetting 443 * these queues at the needed time. 444 * 445 * XXX PS-Poll 446 */ 447 sc->sc_bhalq = ath_beaconq_setup(sc); 448 if (sc->sc_bhalq == (u_int) -1) { 449 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 450 error = EIO; 451 goto bad2; 452 } 453 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 454 if (sc->sc_cabq == NULL) { 455 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 456 error = EIO; 457 goto bad2; 458 } 459 /* NB: insure BK queue is the lowest priority h/w queue */ 460 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 461 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 462 ieee80211_wme_acnames[WME_AC_BK]); 463 error = EIO; 464 goto bad2; 465 } 466 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 467 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 468 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 469 /* 470 * Not enough hardware tx queues to properly do WME; 471 * just punt and assign them all to the same h/w queue. 472 * We could do a better job of this if, for example, 473 * we allocate queues when we switch from station to 474 * AP mode. 475 */ 476 if (sc->sc_ac2q[WME_AC_VI] != NULL) 477 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 478 if (sc->sc_ac2q[WME_AC_BE] != NULL) 479 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 480 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 481 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 482 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 483 } 484 485 /* 486 * Attach the TX completion function. 487 * 488 * The non-EDMA chips may have some special case optimisations; 489 * this method gives everyone a chance to attach cleanly. 490 */ 491 sc->sc_tx.xmit_attach_comp_func(sc); 492 493 /* 494 * Setup rate control. Some rate control modules 495 * call back to change the anntena state so expose 496 * the necessary entry points. 497 * XXX maybe belongs in struct ath_ratectrl? 498 */ 499 sc->sc_setdefantenna = ath_setdefantenna; 500 sc->sc_rc = ath_rate_attach(sc); 501 if (sc->sc_rc == NULL) { 502 error = EIO; 503 goto bad2; 504 } 505 506 /* Attach DFS module */ 507 if (! ath_dfs_attach(sc)) { 508 device_printf(sc->sc_dev, 509 "%s: unable to attach DFS\n", __func__); 510 error = EIO; 511 goto bad2; 512 } 513 514 /* Start DFS processing tasklet */ 515 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 516 517 /* Configure LED state */ 518 sc->sc_blinking = 0; 519 sc->sc_ledstate = 1; 520 sc->sc_ledon = 0; /* low true */ 521 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 522 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 523 524 /* 525 * Don't setup hardware-based blinking. 526 * 527 * Although some NICs may have this configured in the 528 * default reset register values, the user may wish 529 * to alter which pins have which function. 530 * 531 * The reference driver attaches the MAC network LED to GPIO1 and 532 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 533 * NIC has these reversed. 534 */ 535 sc->sc_hardled = (1 == 0); 536 sc->sc_led_net_pin = -1; 537 sc->sc_led_pwr_pin = -1; 538 /* 539 * Auto-enable soft led processing for IBM cards and for 540 * 5211 minipci cards. Users can also manually enable/disable 541 * support with a sysctl. 542 */ 543 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 544 ath_led_config(sc); 545 ath_hal_setledstate(ah, HAL_LED_INIT); 546 547 ifp->if_softc = sc; 548 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 549 ifp->if_start = ath_start; 550 ifp->if_ioctl = ath_ioctl; 551 ifp->if_init = ath_init; 552 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 553 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 554 IFQ_SET_READY(&ifp->if_snd); 555 556 ic->ic_ifp = ifp; 557 /* XXX not right but it's not used anywhere important */ 558 ic->ic_phytype = IEEE80211_T_OFDM; 559 ic->ic_opmode = IEEE80211_M_STA; 560 ic->ic_caps = 561 IEEE80211_C_STA /* station mode */ 562 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 563 | IEEE80211_C_HOSTAP /* hostap mode */ 564 | IEEE80211_C_MONITOR /* monitor mode */ 565 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 566 | IEEE80211_C_WDS /* 4-address traffic works */ 567 | IEEE80211_C_MBSS /* mesh point link mode */ 568 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 569 | IEEE80211_C_SHSLOT /* short slot time supported */ 570 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 571#ifndef ATH_ENABLE_11N 572 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 573#endif 574 | IEEE80211_C_TXFRAG /* handle tx frags */ 575#ifdef ATH_ENABLE_DFS 576 | IEEE80211_C_DFS /* Enable radar detection */ 577#endif 578 ; 579 /* 580 * Query the hal to figure out h/w crypto support. 581 */ 582 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 583 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 584 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 585 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 586 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 587 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 588 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 589 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 590 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 591 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 592 /* 593 * Check if h/w does the MIC and/or whether the 594 * separate key cache entries are required to 595 * handle both tx+rx MIC keys. 596 */ 597 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 598 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 599 /* 600 * If the h/w supports storing tx+rx MIC keys 601 * in one cache slot automatically enable use. 602 */ 603 if (ath_hal_hastkipsplit(ah) || 604 !ath_hal_settkipsplit(ah, AH_FALSE)) 605 sc->sc_splitmic = 1; 606 /* 607 * If the h/w can do TKIP MIC together with WME then 608 * we use it; otherwise we force the MIC to be done 609 * in software by the net80211 layer. 610 */ 611 if (ath_hal_haswmetkipmic(ah)) 612 sc->sc_wmetkipmic = 1; 613 } 614 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 615 /* 616 * Check for multicast key search support. 617 */ 618 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 619 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 620 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 621 } 622 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 623 /* 624 * Mark key cache slots associated with global keys 625 * as in use. If we knew TKIP was not to be used we 626 * could leave the +32, +64, and +32+64 slots free. 627 */ 628 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 629 setbit(sc->sc_keymap, i); 630 setbit(sc->sc_keymap, i+64); 631 if (sc->sc_splitmic) { 632 setbit(sc->sc_keymap, i+32); 633 setbit(sc->sc_keymap, i+32+64); 634 } 635 } 636 /* 637 * TPC support can be done either with a global cap or 638 * per-packet support. The latter is not available on 639 * all parts. We're a bit pedantic here as all parts 640 * support a global cap. 641 */ 642 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 643 ic->ic_caps |= IEEE80211_C_TXPMGT; 644 645 /* 646 * Mark WME capability only if we have sufficient 647 * hardware queues to do proper priority scheduling. 648 */ 649 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 650 ic->ic_caps |= IEEE80211_C_WME; 651 /* 652 * Check for misc other capabilities. 653 */ 654 if (ath_hal_hasbursting(ah)) 655 ic->ic_caps |= IEEE80211_C_BURST; 656 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 657 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 658 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 659 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 660 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 661 if (ath_hal_hasfastframes(ah)) 662 ic->ic_caps |= IEEE80211_C_FF; 663 wmodes = ath_hal_getwirelessmodes(ah); 664 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 665 ic->ic_caps |= IEEE80211_C_TURBOP; 666#ifdef IEEE80211_SUPPORT_TDMA 667 if (ath_hal_macversion(ah) > 0x78) { 668 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 669 ic->ic_tdma_update = ath_tdma_update; 670 } 671#endif 672 673 /* 674 * TODO: enforce that at least this many frames are available 675 * in the txbuf list before allowing data frames (raw or 676 * otherwise) to be transmitted. 677 */ 678 sc->sc_txq_data_minfree = 10; 679 /* 680 * Leave this as default to maintain legacy behaviour. 681 * Shortening the cabq/mcastq may end up causing some 682 * undesirable behaviour. 683 */ 684 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 685 686 /* 687 * Allow the TX and RX chainmasks to be overridden by 688 * environment variables and/or device.hints. 689 * 690 * This must be done early - before the hardware is 691 * calibrated or before the 802.11n stream calculation 692 * is done. 693 */ 694 if (resource_int_value(device_get_name(sc->sc_dev), 695 device_get_unit(sc->sc_dev), "rx_chainmask", 696 &rx_chainmask) == 0) { 697 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 698 rx_chainmask); 699 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 700 } 701 if (resource_int_value(device_get_name(sc->sc_dev), 702 device_get_unit(sc->sc_dev), "tx_chainmask", 703 &tx_chainmask) == 0) { 704 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 705 tx_chainmask); 706 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 707 } 708 709 /* 710 * Disable MRR with protected frames by default. 711 * Only 802.11n series NICs can handle this. 712 */ 713 sc->sc_mrrprot = 0; /* XXX should be a capability */ 714 715#ifdef ATH_ENABLE_11N 716 /* 717 * Query HT capabilities 718 */ 719 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 720 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 721 int rxs, txs; 722 723 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 724 725 sc->sc_mrrprot = 1; /* XXX should be a capability */ 726 727 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 728 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 729 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 730 | IEEE80211_HTCAP_MAXAMSDU_3839 731 /* max A-MSDU length */ 732 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 733 ; 734 735 /* 736 * Enable short-GI for HT20 only if the hardware 737 * advertises support. 738 * Notably, anything earlier than the AR9287 doesn't. 739 */ 740 if ((ath_hal_getcapability(ah, 741 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 742 (wmodes & HAL_MODE_HT20)) { 743 device_printf(sc->sc_dev, 744 "[HT] enabling short-GI in 20MHz mode\n"); 745 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 746 } 747 748 if (wmodes & HAL_MODE_HT40) 749 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 750 | IEEE80211_HTCAP_SHORTGI40; 751 752 /* 753 * TX/RX streams need to be taken into account when 754 * negotiating which MCS rates it'll receive and 755 * what MCS rates are available for TX. 756 */ 757 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 758 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 759 760 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 761 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 762 763 ic->ic_txstream = txs; 764 ic->ic_rxstream = rxs; 765 766 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 767 &sc->sc_rts_aggr_limit); 768 if (sc->sc_rts_aggr_limit != (64 * 1024)) 769 device_printf(sc->sc_dev, 770 "[HT] RTS aggregates limited to %d KiB\n", 771 sc->sc_rts_aggr_limit / 1024); 772 773 device_printf(sc->sc_dev, 774 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 775 } 776#endif 777 778 /* 779 * Initial aggregation settings. 780 */ 781 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 782 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 783 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 784 785 /* 786 * Check if the hardware requires PCI register serialisation. 787 * Some of the Owl based MACs require this. 788 */ 789 if (mp_ncpus > 1 && 790 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 791 0, NULL) == HAL_OK) { 792 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 793 device_printf(sc->sc_dev, 794 "Enabling register serialisation\n"); 795 } 796 797 /* 798 * Indicate we need the 802.11 header padded to a 799 * 32-bit boundary for 4-address and QoS frames. 800 */ 801 ic->ic_flags |= IEEE80211_F_DATAPAD; 802 803 /* 804 * Query the hal about antenna support. 805 */ 806 sc->sc_defant = ath_hal_getdefantenna(ah); 807 808 /* 809 * Not all chips have the VEOL support we want to 810 * use with IBSS beacons; check here for it. 811 */ 812 sc->sc_hasveol = ath_hal_hasveol(ah); 813 814 /* get mac address from hardware */ 815 ath_hal_getmac(ah, macaddr); 816 if (sc->sc_hasbmask) 817 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 818 819 /* NB: used to size node table key mapping array */ 820 ic->ic_max_keyix = sc->sc_keymax; 821 /* call MI attach routine. */ 822 ieee80211_ifattach(ic, macaddr); 823 ic->ic_setregdomain = ath_setregdomain; 824 ic->ic_getradiocaps = ath_getradiocaps; 825 sc->sc_opmode = HAL_M_STA; 826 827 /* override default methods */ 828 ic->ic_newassoc = ath_newassoc; 829 ic->ic_updateslot = ath_updateslot; 830 ic->ic_wme.wme_update = ath_wme_update; 831 ic->ic_vap_create = ath_vap_create; 832 ic->ic_vap_delete = ath_vap_delete; 833 ic->ic_raw_xmit = ath_raw_xmit; 834 ic->ic_update_mcast = ath_update_mcast; 835 ic->ic_update_promisc = ath_update_promisc; 836 ic->ic_node_alloc = ath_node_alloc; 837 sc->sc_node_free = ic->ic_node_free; 838 ic->ic_node_free = ath_node_free; 839 sc->sc_node_cleanup = ic->ic_node_cleanup; 840 ic->ic_node_cleanup = ath_node_cleanup; 841 ic->ic_node_getsignal = ath_node_getsignal; 842 ic->ic_scan_start = ath_scan_start; 843 ic->ic_scan_end = ath_scan_end; 844 ic->ic_set_channel = ath_set_channel; 845#ifdef ATH_ENABLE_11N 846 /* 802.11n specific - but just override anyway */ 847 sc->sc_addba_request = ic->ic_addba_request; 848 sc->sc_addba_response = ic->ic_addba_response; 849 sc->sc_addba_stop = ic->ic_addba_stop; 850 sc->sc_bar_response = ic->ic_bar_response; 851 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 852 853 ic->ic_addba_request = ath_addba_request; 854 ic->ic_addba_response = ath_addba_response; 855 ic->ic_addba_response_timeout = ath_addba_response_timeout; 856 ic->ic_addba_stop = ath_addba_stop; 857 ic->ic_bar_response = ath_bar_response; 858 859 ic->ic_update_chw = ath_update_chw; 860#endif /* ATH_ENABLE_11N */ 861 862#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 863 /* 864 * There's one vendor bitmap entry in the RX radiotap 865 * header; make sure that's taken into account. 866 */ 867 ieee80211_radiotap_attachv(ic, 868 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 869 ATH_TX_RADIOTAP_PRESENT, 870 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 871 ATH_RX_RADIOTAP_PRESENT); 872#else 873 /* 874 * No vendor bitmap/extensions are present. 875 */ 876 ieee80211_radiotap_attach(ic, 877 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 878 ATH_TX_RADIOTAP_PRESENT, 879 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 880 ATH_RX_RADIOTAP_PRESENT); 881#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 882 883 /* 884 * Setup dynamic sysctl's now that country code and 885 * regdomain are available from the hal. 886 */ 887 ath_sysctlattach(sc); 888 ath_sysctl_stats_attach(sc); 889 ath_sysctl_hal_attach(sc); 890 891 if (bootverbose) 892 ieee80211_announce(ic); 893 ath_announce(sc); 894 return 0; 895bad2: 896 ath_tx_cleanup(sc); 897 ath_desc_free(sc); 898 ath_txdma_teardown(sc); 899 ath_rxdma_teardown(sc); 900bad: 901 if (ah) 902 ath_hal_detach(ah); 903 if (ifp != NULL) 904 if_free(ifp); 905 sc->sc_invalid = 1; 906 return error; 907} 908 909int 910ath_detach(struct ath_softc *sc) 911{ 912 struct ifnet *ifp = sc->sc_ifp; 913 914 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 915 __func__, ifp->if_flags); 916 917 /* 918 * NB: the order of these is important: 919 * o stop the chip so no more interrupts will fire 920 * o call the 802.11 layer before detaching the hal to 921 * insure callbacks into the driver to delete global 922 * key cache entries can be handled 923 * o free the taskqueue which drains any pending tasks 924 * o reclaim the tx queue data structures after calling 925 * the 802.11 layer as we'll get called back to reclaim 926 * node state and potentially want to use them 927 * o to cleanup the tx queues the hal is called, so detach 928 * it last 929 * Other than that, it's straightforward... 930 */ 931 ath_stop(ifp); 932 ieee80211_ifdetach(ifp->if_l2com); 933 taskqueue_free(sc->sc_tq); 934#ifdef ATH_TX99_DIAG 935 if (sc->sc_tx99 != NULL) 936 sc->sc_tx99->detach(sc->sc_tx99); 937#endif 938 ath_rate_detach(sc->sc_rc); 939 940 ath_dfs_detach(sc); 941 ath_desc_free(sc); 942 ath_txdma_teardown(sc); 943 ath_rxdma_teardown(sc); 944 ath_tx_cleanup(sc); 945 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 946 if_free(ifp); 947 948 return 0; 949} 950 951/* 952 * MAC address handling for multiple BSS on the same radio. 953 * The first vap uses the MAC address from the EEPROM. For 954 * subsequent vap's we set the U/L bit (bit 1) in the MAC 955 * address and use the next six bits as an index. 956 */ 957static void 958assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 959{ 960 int i; 961 962 if (clone && sc->sc_hasbmask) { 963 /* NB: we only do this if h/w supports multiple bssid */ 964 for (i = 0; i < 8; i++) 965 if ((sc->sc_bssidmask & (1<<i)) == 0) 966 break; 967 if (i != 0) 968 mac[0] |= (i << 2)|0x2; 969 } else 970 i = 0; 971 sc->sc_bssidmask |= 1<<i; 972 sc->sc_hwbssidmask[0] &= ~mac[0]; 973 if (i == 0) 974 sc->sc_nbssid0++; 975} 976 977static void 978reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 979{ 980 int i = mac[0] >> 2; 981 uint8_t mask; 982 983 if (i != 0 || --sc->sc_nbssid0 == 0) { 984 sc->sc_bssidmask &= ~(1<<i); 985 /* recalculate bssid mask from remaining addresses */ 986 mask = 0xff; 987 for (i = 1; i < 8; i++) 988 if (sc->sc_bssidmask & (1<<i)) 989 mask &= ~((i<<2)|0x2); 990 sc->sc_hwbssidmask[0] |= mask; 991 } 992} 993 994/* 995 * Assign a beacon xmit slot. We try to space out 996 * assignments so when beacons are staggered the 997 * traffic coming out of the cab q has maximal time 998 * to go out before the next beacon is scheduled. 999 */ 1000static int 1001assign_bslot(struct ath_softc *sc) 1002{ 1003 u_int slot, free; 1004 1005 free = 0; 1006 for (slot = 0; slot < ATH_BCBUF; slot++) 1007 if (sc->sc_bslot[slot] == NULL) { 1008 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 1009 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 1010 return slot; 1011 free = slot; 1012 /* NB: keep looking for a double slot */ 1013 } 1014 return free; 1015} 1016 1017static struct ieee80211vap * 1018ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1019 enum ieee80211_opmode opmode, int flags, 1020 const uint8_t bssid[IEEE80211_ADDR_LEN], 1021 const uint8_t mac0[IEEE80211_ADDR_LEN]) 1022{ 1023 struct ath_softc *sc = ic->ic_ifp->if_softc; 1024 struct ath_vap *avp; 1025 struct ieee80211vap *vap; 1026 uint8_t mac[IEEE80211_ADDR_LEN]; 1027 int needbeacon, error; 1028 enum ieee80211_opmode ic_opmode; 1029 1030 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 1031 M_80211_VAP, M_WAITOK | M_ZERO); 1032 needbeacon = 0; 1033 IEEE80211_ADDR_COPY(mac, mac0); 1034 1035 ATH_LOCK(sc); 1036 ic_opmode = opmode; /* default to opmode of new vap */ 1037 switch (opmode) { 1038 case IEEE80211_M_STA: 1039 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1040 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1041 goto bad; 1042 } 1043 if (sc->sc_nvaps) { 1044 /* 1045 * With multiple vaps we must fall back 1046 * to s/w beacon miss handling. 1047 */ 1048 flags |= IEEE80211_CLONE_NOBEACONS; 1049 } 1050 if (flags & IEEE80211_CLONE_NOBEACONS) { 1051 /* 1052 * Station mode w/o beacons are implemented w/ AP mode. 1053 */ 1054 ic_opmode = IEEE80211_M_HOSTAP; 1055 } 1056 break; 1057 case IEEE80211_M_IBSS: 1058 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1059 device_printf(sc->sc_dev, 1060 "only 1 ibss vap supported\n"); 1061 goto bad; 1062 } 1063 needbeacon = 1; 1064 break; 1065 case IEEE80211_M_AHDEMO: 1066#ifdef IEEE80211_SUPPORT_TDMA 1067 if (flags & IEEE80211_CLONE_TDMA) { 1068 if (sc->sc_nvaps != 0) { 1069 device_printf(sc->sc_dev, 1070 "only 1 tdma vap supported\n"); 1071 goto bad; 1072 } 1073 needbeacon = 1; 1074 flags |= IEEE80211_CLONE_NOBEACONS; 1075 } 1076 /* fall thru... */ 1077#endif 1078 case IEEE80211_M_MONITOR: 1079 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1080 /* 1081 * Adopt existing mode. Adding a monitor or ahdemo 1082 * vap to an existing configuration is of dubious 1083 * value but should be ok. 1084 */ 1085 /* XXX not right for monitor mode */ 1086 ic_opmode = ic->ic_opmode; 1087 } 1088 break; 1089 case IEEE80211_M_HOSTAP: 1090 case IEEE80211_M_MBSS: 1091 needbeacon = 1; 1092 break; 1093 case IEEE80211_M_WDS: 1094 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1095 device_printf(sc->sc_dev, 1096 "wds not supported in sta mode\n"); 1097 goto bad; 1098 } 1099 /* 1100 * Silently remove any request for a unique 1101 * bssid; WDS vap's always share the local 1102 * mac address. 1103 */ 1104 flags &= ~IEEE80211_CLONE_BSSID; 1105 if (sc->sc_nvaps == 0) 1106 ic_opmode = IEEE80211_M_HOSTAP; 1107 else 1108 ic_opmode = ic->ic_opmode; 1109 break; 1110 default: 1111 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1112 goto bad; 1113 } 1114 /* 1115 * Check that a beacon buffer is available; the code below assumes it. 1116 */ 1117 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1118 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1119 goto bad; 1120 } 1121 1122 /* STA, AHDEMO? */ 1123 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1124 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1125 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1126 } 1127 1128 vap = &avp->av_vap; 1129 /* XXX can't hold mutex across if_alloc */ 1130 ATH_UNLOCK(sc); 1131 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1132 bssid, mac); 1133 ATH_LOCK(sc); 1134 if (error != 0) { 1135 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1136 __func__, error); 1137 goto bad2; 1138 } 1139 1140 /* h/w crypto support */ 1141 vap->iv_key_alloc = ath_key_alloc; 1142 vap->iv_key_delete = ath_key_delete; 1143 vap->iv_key_set = ath_key_set; 1144 vap->iv_key_update_begin = ath_key_update_begin; 1145 vap->iv_key_update_end = ath_key_update_end; 1146 1147 /* override various methods */ 1148 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1149 vap->iv_recv_mgmt = ath_recv_mgmt; 1150 vap->iv_reset = ath_reset_vap; 1151 vap->iv_update_beacon = ath_beacon_update; 1152 avp->av_newstate = vap->iv_newstate; 1153 vap->iv_newstate = ath_newstate; 1154 avp->av_bmiss = vap->iv_bmiss; 1155 vap->iv_bmiss = ath_bmiss_vap; 1156 1157 /* Set default parameters */ 1158 1159 /* 1160 * Anything earlier than some AR9300 series MACs don't 1161 * support a smaller MPDU density. 1162 */ 1163 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1164 /* 1165 * All NICs can handle the maximum size, however 1166 * AR5416 based MACs can only TX aggregates w/ RTS 1167 * protection when the total aggregate size is <= 8k. 1168 * However, for now that's enforced by the TX path. 1169 */ 1170 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1171 1172 avp->av_bslot = -1; 1173 if (needbeacon) { 1174 /* 1175 * Allocate beacon state and setup the q for buffered 1176 * multicast frames. We know a beacon buffer is 1177 * available because we checked above. 1178 */ 1179 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1180 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1181 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1182 /* 1183 * Assign the vap to a beacon xmit slot. As above 1184 * this cannot fail to find a free one. 1185 */ 1186 avp->av_bslot = assign_bslot(sc); 1187 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1188 ("beacon slot %u not empty", avp->av_bslot)); 1189 sc->sc_bslot[avp->av_bslot] = vap; 1190 sc->sc_nbcnvaps++; 1191 } 1192 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1193 /* 1194 * Multple vaps are to transmit beacons and we 1195 * have h/w support for TSF adjusting; enable 1196 * use of staggered beacons. 1197 */ 1198 sc->sc_stagbeacons = 1; 1199 } 1200 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1201 } 1202 1203 ic->ic_opmode = ic_opmode; 1204 if (opmode != IEEE80211_M_WDS) { 1205 sc->sc_nvaps++; 1206 if (opmode == IEEE80211_M_STA) 1207 sc->sc_nstavaps++; 1208 if (opmode == IEEE80211_M_MBSS) 1209 sc->sc_nmeshvaps++; 1210 } 1211 switch (ic_opmode) { 1212 case IEEE80211_M_IBSS: 1213 sc->sc_opmode = HAL_M_IBSS; 1214 break; 1215 case IEEE80211_M_STA: 1216 sc->sc_opmode = HAL_M_STA; 1217 break; 1218 case IEEE80211_M_AHDEMO: 1219#ifdef IEEE80211_SUPPORT_TDMA 1220 if (vap->iv_caps & IEEE80211_C_TDMA) { 1221 sc->sc_tdma = 1; 1222 /* NB: disable tsf adjust */ 1223 sc->sc_stagbeacons = 0; 1224 } 1225 /* 1226 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1227 * just ap mode. 1228 */ 1229 /* fall thru... */ 1230#endif 1231 case IEEE80211_M_HOSTAP: 1232 case IEEE80211_M_MBSS: 1233 sc->sc_opmode = HAL_M_HOSTAP; 1234 break; 1235 case IEEE80211_M_MONITOR: 1236 sc->sc_opmode = HAL_M_MONITOR; 1237 break; 1238 default: 1239 /* XXX should not happen */ 1240 break; 1241 } 1242 if (sc->sc_hastsfadd) { 1243 /* 1244 * Configure whether or not TSF adjust should be done. 1245 */ 1246 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1247 } 1248 if (flags & IEEE80211_CLONE_NOBEACONS) { 1249 /* 1250 * Enable s/w beacon miss handling. 1251 */ 1252 sc->sc_swbmiss = 1; 1253 } 1254 ATH_UNLOCK(sc); 1255 1256 /* complete setup */ 1257 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1258 return vap; 1259bad2: 1260 reclaim_address(sc, mac); 1261 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1262bad: 1263 free(avp, M_80211_VAP); 1264 ATH_UNLOCK(sc); 1265 return NULL; 1266} 1267 1268static void 1269ath_vap_delete(struct ieee80211vap *vap) 1270{ 1271 struct ieee80211com *ic = vap->iv_ic; 1272 struct ifnet *ifp = ic->ic_ifp; 1273 struct ath_softc *sc = ifp->if_softc; 1274 struct ath_hal *ah = sc->sc_ah; 1275 struct ath_vap *avp = ATH_VAP(vap); 1276 1277 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1278 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1279 /* 1280 * Quiesce the hardware while we remove the vap. In 1281 * particular we need to reclaim all references to 1282 * the vap state by any frames pending on the tx queues. 1283 */ 1284 ath_hal_intrset(ah, 0); /* disable interrupts */ 1285 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1286 /* XXX Do all frames from all vaps/nodes need draining here? */ 1287 ath_stoprecv(sc, 1); /* stop recv side */ 1288 } 1289 1290 ieee80211_vap_detach(vap); 1291 1292 /* 1293 * XXX Danger Will Robinson! Danger! 1294 * 1295 * Because ieee80211_vap_detach() can queue a frame (the station 1296 * diassociate message?) after we've drained the TXQ and 1297 * flushed the software TXQ, we will end up with a frame queued 1298 * to a node whose vap is about to be freed. 1299 * 1300 * To work around this, flush the hardware/software again. 1301 * This may be racy - the ath task may be running and the packet 1302 * may be being scheduled between sw->hw txq. Tsk. 1303 * 1304 * TODO: figure out why a new node gets allocated somewhere around 1305 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1306 * call!) 1307 */ 1308 1309 ath_draintxq(sc, ATH_RESET_DEFAULT); 1310 1311 ATH_LOCK(sc); 1312 /* 1313 * Reclaim beacon state. Note this must be done before 1314 * the vap instance is reclaimed as we may have a reference 1315 * to it in the buffer for the beacon frame. 1316 */ 1317 if (avp->av_bcbuf != NULL) { 1318 if (avp->av_bslot != -1) { 1319 sc->sc_bslot[avp->av_bslot] = NULL; 1320 sc->sc_nbcnvaps--; 1321 } 1322 ath_beacon_return(sc, avp->av_bcbuf); 1323 avp->av_bcbuf = NULL; 1324 if (sc->sc_nbcnvaps == 0) { 1325 sc->sc_stagbeacons = 0; 1326 if (sc->sc_hastsfadd) 1327 ath_hal_settsfadjust(sc->sc_ah, 0); 1328 } 1329 /* 1330 * Reclaim any pending mcast frames for the vap. 1331 */ 1332 ath_tx_draintxq(sc, &avp->av_mcastq); 1333 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1334 } 1335 /* 1336 * Update bookkeeping. 1337 */ 1338 if (vap->iv_opmode == IEEE80211_M_STA) { 1339 sc->sc_nstavaps--; 1340 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1341 sc->sc_swbmiss = 0; 1342 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1343 vap->iv_opmode == IEEE80211_M_MBSS) { 1344 reclaim_address(sc, vap->iv_myaddr); 1345 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1346 if (vap->iv_opmode == IEEE80211_M_MBSS) 1347 sc->sc_nmeshvaps--; 1348 } 1349 if (vap->iv_opmode != IEEE80211_M_WDS) 1350 sc->sc_nvaps--; 1351#ifdef IEEE80211_SUPPORT_TDMA 1352 /* TDMA operation ceases when the last vap is destroyed */ 1353 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1354 sc->sc_tdma = 0; 1355 sc->sc_swbmiss = 0; 1356 } 1357#endif 1358 free(avp, M_80211_VAP); 1359 1360 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1361 /* 1362 * Restart rx+tx machines if still running (RUNNING will 1363 * be reset if we just destroyed the last vap). 1364 */ 1365 if (ath_startrecv(sc) != 0) 1366 if_printf(ifp, "%s: unable to restart recv logic\n", 1367 __func__); 1368 if (sc->sc_beacons) { /* restart beacons */ 1369#ifdef IEEE80211_SUPPORT_TDMA 1370 if (sc->sc_tdma) 1371 ath_tdma_config(sc, NULL); 1372 else 1373#endif 1374 ath_beacon_config(sc, NULL); 1375 } 1376 ath_hal_intrset(ah, sc->sc_imask); 1377 } 1378 ATH_UNLOCK(sc); 1379} 1380 1381void 1382ath_suspend(struct ath_softc *sc) 1383{ 1384 struct ifnet *ifp = sc->sc_ifp; 1385 struct ieee80211com *ic = ifp->if_l2com; 1386 1387 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1388 __func__, ifp->if_flags); 1389 1390 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1391 1392 ieee80211_suspend_all(ic); 1393 /* 1394 * NB: don't worry about putting the chip in low power 1395 * mode; pci will power off our socket on suspend and 1396 * CardBus detaches the device. 1397 */ 1398 1399 /* 1400 * XXX ensure none of the taskqueues are running 1401 * XXX ensure sc_invalid is 1 1402 * XXX ensure the calibration callout is disabled 1403 */ 1404 1405 /* Disable the PCIe PHY, complete with workarounds */ 1406 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1407} 1408 1409/* 1410 * Reset the key cache since some parts do not reset the 1411 * contents on resume. First we clear all entries, then 1412 * re-load keys that the 802.11 layer assumes are setup 1413 * in h/w. 1414 */ 1415static void 1416ath_reset_keycache(struct ath_softc *sc) 1417{ 1418 struct ifnet *ifp = sc->sc_ifp; 1419 struct ieee80211com *ic = ifp->if_l2com; 1420 struct ath_hal *ah = sc->sc_ah; 1421 int i; 1422 1423 for (i = 0; i < sc->sc_keymax; i++) 1424 ath_hal_keyreset(ah, i); 1425 ieee80211_crypto_reload_keys(ic); 1426} 1427 1428void 1429ath_resume(struct ath_softc *sc) 1430{ 1431 struct ifnet *ifp = sc->sc_ifp; 1432 struct ieee80211com *ic = ifp->if_l2com; 1433 struct ath_hal *ah = sc->sc_ah; 1434 HAL_STATUS status; 1435 1436 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1437 __func__, ifp->if_flags); 1438 1439 /* Re-enable PCIe, re-enable the PCIe bus */ 1440 ath_hal_enablepcie(ah, 0, 0); 1441 1442 /* 1443 * Must reset the chip before we reload the 1444 * keycache as we were powered down on suspend. 1445 */ 1446 ath_hal_reset(ah, sc->sc_opmode, 1447 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1448 AH_FALSE, &status); 1449 ath_reset_keycache(sc); 1450 1451 /* Let DFS at it in case it's a DFS channel */ 1452 ath_dfs_radar_enable(sc, ic->ic_curchan); 1453 1454 /* Restore the LED configuration */ 1455 ath_led_config(sc); 1456 ath_hal_setledstate(ah, HAL_LED_INIT); 1457 1458 if (sc->sc_resume_up) 1459 ieee80211_resume_all(ic); 1460 1461 /* XXX beacons ? */ 1462} 1463 1464void 1465ath_shutdown(struct ath_softc *sc) 1466{ 1467 struct ifnet *ifp = sc->sc_ifp; 1468 1469 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1470 __func__, ifp->if_flags); 1471 1472 ath_stop(ifp); 1473 /* NB: no point powering down chip as we're about to reboot */ 1474} 1475 1476/* 1477 * Interrupt handler. Most of the actual processing is deferred. 1478 */ 1479void 1480ath_intr(void *arg) 1481{ 1482 struct ath_softc *sc = arg; 1483 struct ifnet *ifp = sc->sc_ifp; 1484 struct ath_hal *ah = sc->sc_ah; 1485 HAL_INT status = 0; 1486 uint32_t txqs; 1487 1488 /* 1489 * If we're inside a reset path, just print a warning and 1490 * clear the ISR. The reset routine will finish it for us. 1491 */ 1492 ATH_PCU_LOCK(sc); 1493 if (sc->sc_inreset_cnt) { 1494 HAL_INT status; 1495 ath_hal_getisr(ah, &status); /* clear ISR */ 1496 ath_hal_intrset(ah, 0); /* disable further intr's */ 1497 DPRINTF(sc, ATH_DEBUG_ANY, 1498 "%s: in reset, ignoring: status=0x%x\n", 1499 __func__, status); 1500 ATH_PCU_UNLOCK(sc); 1501 return; 1502 } 1503 1504 if (sc->sc_invalid) { 1505 /* 1506 * The hardware is not ready/present, don't touch anything. 1507 * Note this can happen early on if the IRQ is shared. 1508 */ 1509 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1510 ATH_PCU_UNLOCK(sc); 1511 return; 1512 } 1513 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1514 ATH_PCU_UNLOCK(sc); 1515 return; 1516 } 1517 1518 if ((ifp->if_flags & IFF_UP) == 0 || 1519 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1520 HAL_INT status; 1521 1522 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1523 __func__, ifp->if_flags); 1524 ath_hal_getisr(ah, &status); /* clear ISR */ 1525 ath_hal_intrset(ah, 0); /* disable further intr's */ 1526 ATH_PCU_UNLOCK(sc); 1527 return; 1528 } 1529 1530 /* 1531 * Figure out the reason(s) for the interrupt. Note 1532 * that the hal returns a pseudo-ISR that may include 1533 * bits we haven't explicitly enabled so we mask the 1534 * value to insure we only process bits we requested. 1535 */ 1536 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1537 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1538 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 1539#ifdef ATH_KTR_INTR_DEBUG 1540 CTR5(ATH_KTR_INTR, 1541 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1542 ah->ah_intrstate[0], 1543 ah->ah_intrstate[1], 1544 ah->ah_intrstate[2], 1545 ah->ah_intrstate[3], 1546 ah->ah_intrstate[6]); 1547#endif 1548 1549 /* Squirrel away SYNC interrupt debugging */ 1550 if (ah->ah_syncstate != 0) { 1551 int i; 1552 for (i = 0; i < 32; i++) 1553 if (ah->ah_syncstate & (i << i)) 1554 sc->sc_intr_stats.sync_intr[i]++; 1555 } 1556 1557 status &= sc->sc_imask; /* discard unasked for bits */ 1558 1559 /* Short-circuit un-handled interrupts */ 1560 if (status == 0x0) { 1561 ATH_PCU_UNLOCK(sc); 1562 return; 1563 } 1564 1565 /* 1566 * Take a note that we're inside the interrupt handler, so 1567 * the reset routines know to wait. 1568 */ 1569 sc->sc_intr_cnt++; 1570 ATH_PCU_UNLOCK(sc); 1571 1572 /* 1573 * Handle the interrupt. We won't run concurrent with the reset 1574 * or channel change routines as they'll wait for sc_intr_cnt 1575 * to be 0 before continuing. 1576 */ 1577 if (status & HAL_INT_FATAL) { 1578 sc->sc_stats.ast_hardware++; 1579 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1580 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 1581 } else { 1582 if (status & HAL_INT_SWBA) { 1583 /* 1584 * Software beacon alert--time to send a beacon. 1585 * Handle beacon transmission directly; deferring 1586 * this is too slow to meet timing constraints 1587 * under load. 1588 */ 1589#ifdef IEEE80211_SUPPORT_TDMA 1590 if (sc->sc_tdma) { 1591 if (sc->sc_tdmaswba == 0) { 1592 struct ieee80211com *ic = ifp->if_l2com; 1593 struct ieee80211vap *vap = 1594 TAILQ_FIRST(&ic->ic_vaps); 1595 ath_tdma_beacon_send(sc, vap); 1596 sc->sc_tdmaswba = 1597 vap->iv_tdma->tdma_bintval; 1598 } else 1599 sc->sc_tdmaswba--; 1600 } else 1601#endif 1602 { 1603 ath_beacon_proc(sc, 0); 1604#ifdef IEEE80211_SUPPORT_SUPERG 1605 /* 1606 * Schedule the rx taskq in case there's no 1607 * traffic so any frames held on the staging 1608 * queue are aged and potentially flushed. 1609 */ 1610 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1611#endif 1612 } 1613 } 1614 if (status & HAL_INT_RXEOL) { 1615 int imask; 1616 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1617 ATH_PCU_LOCK(sc); 1618 /* 1619 * NB: the hardware should re-read the link when 1620 * RXE bit is written, but it doesn't work at 1621 * least on older hardware revs. 1622 */ 1623 sc->sc_stats.ast_rxeol++; 1624 /* 1625 * Disable RXEOL/RXORN - prevent an interrupt 1626 * storm until the PCU logic can be reset. 1627 * In case the interface is reset some other 1628 * way before "sc_kickpcu" is called, don't 1629 * modify sc_imask - that way if it is reset 1630 * by a call to ath_reset() somehow, the 1631 * interrupt mask will be correctly reprogrammed. 1632 */ 1633 imask = sc->sc_imask; 1634 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1635 ath_hal_intrset(ah, imask); 1636 /* 1637 * Only blank sc_rxlink if we've not yet kicked 1638 * the PCU. 1639 * 1640 * This isn't entirely correct - the correct solution 1641 * would be to have a PCU lock and engage that for 1642 * the duration of the PCU fiddling; which would include 1643 * running the RX process. Otherwise we could end up 1644 * messing up the RX descriptor chain and making the 1645 * RX desc list much shorter. 1646 */ 1647 if (! sc->sc_kickpcu) 1648 sc->sc_rxlink = NULL; 1649 sc->sc_kickpcu = 1; 1650 /* 1651 * Enqueue an RX proc, to handled whatever 1652 * is in the RX queue. 1653 * This will then kick the PCU. 1654 */ 1655 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1656 ATH_PCU_UNLOCK(sc); 1657 } 1658 if (status & HAL_INT_TXURN) { 1659 sc->sc_stats.ast_txurn++; 1660 /* bump tx trigger level */ 1661 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1662 } 1663 /* 1664 * Handle both the legacy and RX EDMA interrupt bits. 1665 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 1666 */ 1667 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 1668 sc->sc_stats.ast_rx_intr++; 1669 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1670 } 1671 if (status & HAL_INT_TX) { 1672 sc->sc_stats.ast_tx_intr++; 1673 /* 1674 * Grab all the currently set bits in the HAL txq bitmap 1675 * and blank them. This is the only place we should be 1676 * doing this. 1677 */ 1678 if (! sc->sc_isedma) { 1679 ATH_PCU_LOCK(sc); 1680 txqs = 0xffffffff; 1681 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1682 sc->sc_txq_active |= txqs; 1683 ATH_PCU_UNLOCK(sc); 1684 } 1685 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1686 } 1687 if (status & HAL_INT_BMISS) { 1688 sc->sc_stats.ast_bmiss++; 1689 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1690 } 1691 if (status & HAL_INT_GTT) 1692 sc->sc_stats.ast_tx_timeout++; 1693 if (status & HAL_INT_CST) 1694 sc->sc_stats.ast_tx_cst++; 1695 if (status & HAL_INT_MIB) { 1696 sc->sc_stats.ast_mib++; 1697 ATH_PCU_LOCK(sc); 1698 /* 1699 * Disable interrupts until we service the MIB 1700 * interrupt; otherwise it will continue to fire. 1701 */ 1702 ath_hal_intrset(ah, 0); 1703 /* 1704 * Let the hal handle the event. We assume it will 1705 * clear whatever condition caused the interrupt. 1706 */ 1707 ath_hal_mibevent(ah, &sc->sc_halstats); 1708 /* 1709 * Don't reset the interrupt if we've just 1710 * kicked the PCU, or we may get a nested 1711 * RXEOL before the rxproc has had a chance 1712 * to run. 1713 */ 1714 if (sc->sc_kickpcu == 0) 1715 ath_hal_intrset(ah, sc->sc_imask); 1716 ATH_PCU_UNLOCK(sc); 1717 } 1718 if (status & HAL_INT_RXORN) { 1719 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1720 CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 1721 sc->sc_stats.ast_rxorn++; 1722 } 1723 } 1724 ATH_PCU_LOCK(sc); 1725 sc->sc_intr_cnt--; 1726 ATH_PCU_UNLOCK(sc); 1727} 1728 1729static void 1730ath_fatal_proc(void *arg, int pending) 1731{ 1732 struct ath_softc *sc = arg; 1733 struct ifnet *ifp = sc->sc_ifp; 1734 u_int32_t *state; 1735 u_int32_t len; 1736 void *sp; 1737 1738 if_printf(ifp, "hardware error; resetting\n"); 1739 /* 1740 * Fatal errors are unrecoverable. Typically these 1741 * are caused by DMA errors. Collect h/w state from 1742 * the hal so we can diagnose what's going on. 1743 */ 1744 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1745 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1746 state = sp; 1747 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1748 state[0], state[1] , state[2], state[3], 1749 state[4], state[5]); 1750 } 1751 ath_reset(ifp, ATH_RESET_NOLOSS); 1752} 1753 1754static void 1755ath_bmiss_vap(struct ieee80211vap *vap) 1756{ 1757 /* 1758 * Workaround phantom bmiss interrupts by sanity-checking 1759 * the time of our last rx'd frame. If it is within the 1760 * beacon miss interval then ignore the interrupt. If it's 1761 * truly a bmiss we'll get another interrupt soon and that'll 1762 * be dispatched up for processing. Note this applies only 1763 * for h/w beacon miss events. 1764 */ 1765 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1766 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1767 struct ath_softc *sc = ifp->if_softc; 1768 u_int64_t lastrx = sc->sc_lastrx; 1769 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1770 /* XXX should take a locked ref to iv_bss */ 1771 u_int bmisstimeout = 1772 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1773 1774 DPRINTF(sc, ATH_DEBUG_BEACON, 1775 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1776 __func__, (unsigned long long) tsf, 1777 (unsigned long long)(tsf - lastrx), 1778 (unsigned long long) lastrx, bmisstimeout); 1779 1780 if (tsf - lastrx <= bmisstimeout) { 1781 sc->sc_stats.ast_bmiss_phantom++; 1782 return; 1783 } 1784 } 1785 ATH_VAP(vap)->av_bmiss(vap); 1786} 1787 1788static int 1789ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1790{ 1791 uint32_t rsize; 1792 void *sp; 1793 1794 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1795 return 0; 1796 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1797 *hangs = *(uint32_t *)sp; 1798 return 1; 1799} 1800 1801static void 1802ath_bmiss_proc(void *arg, int pending) 1803{ 1804 struct ath_softc *sc = arg; 1805 struct ifnet *ifp = sc->sc_ifp; 1806 uint32_t hangs; 1807 1808 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1809 1810 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1811 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1812 ath_reset(ifp, ATH_RESET_NOLOSS); 1813 } else 1814 ieee80211_beacon_miss(ifp->if_l2com); 1815} 1816 1817/* 1818 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1819 * calcs together with WME. If necessary disable the crypto 1820 * hardware and mark the 802.11 state so keys will be setup 1821 * with the MIC work done in software. 1822 */ 1823static void 1824ath_settkipmic(struct ath_softc *sc) 1825{ 1826 struct ifnet *ifp = sc->sc_ifp; 1827 struct ieee80211com *ic = ifp->if_l2com; 1828 1829 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1830 if (ic->ic_flags & IEEE80211_F_WME) { 1831 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1832 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1833 } else { 1834 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1835 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1836 } 1837 } 1838} 1839 1840static void 1841ath_init(void *arg) 1842{ 1843 struct ath_softc *sc = (struct ath_softc *) arg; 1844 struct ifnet *ifp = sc->sc_ifp; 1845 struct ieee80211com *ic = ifp->if_l2com; 1846 struct ath_hal *ah = sc->sc_ah; 1847 HAL_STATUS status; 1848 1849 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1850 __func__, ifp->if_flags); 1851 1852 ATH_LOCK(sc); 1853 /* 1854 * Stop anything previously setup. This is safe 1855 * whether this is the first time through or not. 1856 */ 1857 ath_stop_locked(ifp); 1858 1859 /* 1860 * The basic interface to setting the hardware in a good 1861 * state is ``reset''. On return the hardware is known to 1862 * be powered up and with interrupts disabled. This must 1863 * be followed by initialization of the appropriate bits 1864 * and then setup of the interrupt mask. 1865 */ 1866 ath_settkipmic(sc); 1867 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1868 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1869 status); 1870 ATH_UNLOCK(sc); 1871 return; 1872 } 1873 ath_chan_change(sc, ic->ic_curchan); 1874 1875 /* Let DFS at it in case it's a DFS channel */ 1876 ath_dfs_radar_enable(sc, ic->ic_curchan); 1877 1878 /* 1879 * Likewise this is set during reset so update 1880 * state cached in the driver. 1881 */ 1882 sc->sc_diversity = ath_hal_getdiversity(ah); 1883 sc->sc_lastlongcal = 0; 1884 sc->sc_resetcal = 1; 1885 sc->sc_lastcalreset = 0; 1886 sc->sc_lastani = 0; 1887 sc->sc_lastshortcal = 0; 1888 sc->sc_doresetcal = AH_FALSE; 1889 /* 1890 * Beacon timers were cleared here; give ath_newstate() 1891 * a hint that the beacon timers should be poked when 1892 * things transition to the RUN state. 1893 */ 1894 sc->sc_beacons = 0; 1895 1896 /* 1897 * Setup the hardware after reset: the key cache 1898 * is filled as needed and the receive engine is 1899 * set going. Frame transmit is handled entirely 1900 * in the frame output path; there's nothing to do 1901 * here except setup the interrupt mask. 1902 */ 1903 if (ath_startrecv(sc) != 0) { 1904 if_printf(ifp, "unable to start recv logic\n"); 1905 ATH_UNLOCK(sc); 1906 return; 1907 } 1908 1909 /* 1910 * Enable interrupts. 1911 */ 1912 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1913 | HAL_INT_RXEOL | HAL_INT_RXORN 1914 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1915 1916 /* 1917 * Enable RX EDMA bits. Note these overlap with 1918 * HAL_INT_RX and HAL_INT_RXDESC respectively. 1919 */ 1920 if (sc->sc_isedma) 1921 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 1922 1923 /* 1924 * Enable MIB interrupts when there are hardware phy counters. 1925 * Note we only do this (at the moment) for station mode. 1926 */ 1927 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1928 sc->sc_imask |= HAL_INT_MIB; 1929 1930 /* Enable global TX timeout and carrier sense timeout if available */ 1931 if (ath_hal_gtxto_supported(ah)) 1932 sc->sc_imask |= HAL_INT_GTT; 1933 1934 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1935 __func__, sc->sc_imask); 1936 1937 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1938 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1939 ath_hal_intrset(ah, sc->sc_imask); 1940 1941 ATH_UNLOCK(sc); 1942 1943#ifdef ATH_TX99_DIAG 1944 if (sc->sc_tx99 != NULL) 1945 sc->sc_tx99->start(sc->sc_tx99); 1946 else 1947#endif 1948 ieee80211_start_all(ic); /* start all vap's */ 1949} 1950 1951static void 1952ath_stop_locked(struct ifnet *ifp) 1953{ 1954 struct ath_softc *sc = ifp->if_softc; 1955 struct ath_hal *ah = sc->sc_ah; 1956 1957 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1958 __func__, sc->sc_invalid, ifp->if_flags); 1959 1960 ATH_LOCK_ASSERT(sc); 1961 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1962 /* 1963 * Shutdown the hardware and driver: 1964 * reset 802.11 state machine 1965 * turn off timers 1966 * disable interrupts 1967 * turn off the radio 1968 * clear transmit machinery 1969 * clear receive machinery 1970 * drain and release tx queues 1971 * reclaim beacon resources 1972 * power down hardware 1973 * 1974 * Note that some of this work is not possible if the 1975 * hardware is gone (invalid). 1976 */ 1977#ifdef ATH_TX99_DIAG 1978 if (sc->sc_tx99 != NULL) 1979 sc->sc_tx99->stop(sc->sc_tx99); 1980#endif 1981 callout_stop(&sc->sc_wd_ch); 1982 sc->sc_wd_timer = 0; 1983 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1984 if (!sc->sc_invalid) { 1985 if (sc->sc_softled) { 1986 callout_stop(&sc->sc_ledtimer); 1987 ath_hal_gpioset(ah, sc->sc_ledpin, 1988 !sc->sc_ledon); 1989 sc->sc_blinking = 0; 1990 } 1991 ath_hal_intrset(ah, 0); 1992 } 1993 ath_draintxq(sc, ATH_RESET_DEFAULT); 1994 if (!sc->sc_invalid) { 1995 ath_stoprecv(sc, 1); 1996 ath_hal_phydisable(ah); 1997 } else 1998 sc->sc_rxlink = NULL; 1999 ath_beacon_free(sc); /* XXX not needed */ 2000 } 2001} 2002 2003#define MAX_TXRX_ITERATIONS 1000 2004static void 2005ath_txrx_stop_locked(struct ath_softc *sc) 2006{ 2007 int i = MAX_TXRX_ITERATIONS; 2008 2009 ATH_UNLOCK_ASSERT(sc); 2010 ATH_PCU_LOCK_ASSERT(sc); 2011 2012 /* 2013 * Sleep until all the pending operations have completed. 2014 * 2015 * The caller must ensure that reset has been incremented 2016 * or the pending operations may continue being queued. 2017 */ 2018 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 2019 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 2020 if (i <= 0) 2021 break; 2022 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 2023 i--; 2024 } 2025 2026 if (i <= 0) 2027 device_printf(sc->sc_dev, 2028 "%s: didn't finish after %d iterations\n", 2029 __func__, MAX_TXRX_ITERATIONS); 2030} 2031#undef MAX_TXRX_ITERATIONS 2032 2033#if 0 2034static void 2035ath_txrx_stop(struct ath_softc *sc) 2036{ 2037 ATH_UNLOCK_ASSERT(sc); 2038 ATH_PCU_UNLOCK_ASSERT(sc); 2039 2040 ATH_PCU_LOCK(sc); 2041 ath_txrx_stop_locked(sc); 2042 ATH_PCU_UNLOCK(sc); 2043} 2044#endif 2045 2046static void 2047ath_txrx_start(struct ath_softc *sc) 2048{ 2049 2050 taskqueue_unblock(sc->sc_tq); 2051} 2052 2053/* 2054 * Grab the reset lock, and wait around until noone else 2055 * is trying to do anything with it. 2056 * 2057 * This is totally horrible but we can't hold this lock for 2058 * long enough to do TX/RX or we end up with net80211/ip stack 2059 * LORs and eventual deadlock. 2060 * 2061 * "dowait" signals whether to spin, waiting for the reset 2062 * lock count to reach 0. This should (for now) only be used 2063 * during the reset path, as the rest of the code may not 2064 * be locking-reentrant enough to behave correctly. 2065 * 2066 * Another, cleaner way should be found to serialise all of 2067 * these operations. 2068 */ 2069#define MAX_RESET_ITERATIONS 10 2070static int 2071ath_reset_grablock(struct ath_softc *sc, int dowait) 2072{ 2073 int w = 0; 2074 int i = MAX_RESET_ITERATIONS; 2075 2076 ATH_PCU_LOCK_ASSERT(sc); 2077 do { 2078 if (sc->sc_inreset_cnt == 0) { 2079 w = 1; 2080 break; 2081 } 2082 if (dowait == 0) { 2083 w = 0; 2084 break; 2085 } 2086 ATH_PCU_UNLOCK(sc); 2087 pause("ath_reset_grablock", 1); 2088 i--; 2089 ATH_PCU_LOCK(sc); 2090 } while (i > 0); 2091 2092 /* 2093 * We always increment the refcounter, regardless 2094 * of whether we succeeded to get it in an exclusive 2095 * way. 2096 */ 2097 sc->sc_inreset_cnt++; 2098 2099 if (i <= 0) 2100 device_printf(sc->sc_dev, 2101 "%s: didn't finish after %d iterations\n", 2102 __func__, MAX_RESET_ITERATIONS); 2103 2104 if (w == 0) 2105 device_printf(sc->sc_dev, 2106 "%s: warning, recursive reset path!\n", 2107 __func__); 2108 2109 return w; 2110} 2111#undef MAX_RESET_ITERATIONS 2112 2113/* 2114 * XXX TODO: write ath_reset_releaselock 2115 */ 2116 2117static void 2118ath_stop(struct ifnet *ifp) 2119{ 2120 struct ath_softc *sc = ifp->if_softc; 2121 2122 ATH_LOCK(sc); 2123 ath_stop_locked(ifp); 2124 ATH_UNLOCK(sc); 2125} 2126 2127/* 2128 * Reset the hardware w/o losing operational state. This is 2129 * basically a more efficient way of doing ath_stop, ath_init, 2130 * followed by state transitions to the current 802.11 2131 * operational state. Used to recover from various errors and 2132 * to reset or reload hardware state. 2133 */ 2134int 2135ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2136{ 2137 struct ath_softc *sc = ifp->if_softc; 2138 struct ieee80211com *ic = ifp->if_l2com; 2139 struct ath_hal *ah = sc->sc_ah; 2140 HAL_STATUS status; 2141 int i; 2142 2143 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2144 2145 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2146 ATH_PCU_UNLOCK_ASSERT(sc); 2147 ATH_UNLOCK_ASSERT(sc); 2148 2149 /* Try to (stop any further TX/RX from occuring */ 2150 taskqueue_block(sc->sc_tq); 2151 2152 ATH_PCU_LOCK(sc); 2153 ath_hal_intrset(ah, 0); /* disable interrupts */ 2154 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2155 if (ath_reset_grablock(sc, 1) == 0) { 2156 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2157 __func__); 2158 } 2159 ATH_PCU_UNLOCK(sc); 2160 2161 /* 2162 * Should now wait for pending TX/RX to complete 2163 * and block future ones from occuring. This needs to be 2164 * done before the TX queue is drained. 2165 */ 2166 ath_draintxq(sc, reset_type); /* stop xmit side */ 2167 2168 /* 2169 * Regardless of whether we're doing a no-loss flush or 2170 * not, stop the PCU and handle what's in the RX queue. 2171 * That way frames aren't dropped which shouldn't be. 2172 */ 2173 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2174 ath_rx_flush(sc); 2175 2176 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2177 /* NB: indicate channel change so we do a full reset */ 2178 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2179 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2180 __func__, status); 2181 sc->sc_diversity = ath_hal_getdiversity(ah); 2182 2183 /* Let DFS at it in case it's a DFS channel */ 2184 ath_dfs_radar_enable(sc, ic->ic_curchan); 2185 2186 if (ath_startrecv(sc) != 0) /* restart recv */ 2187 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2188 /* 2189 * We may be doing a reset in response to an ioctl 2190 * that changes the channel so update any state that 2191 * might change as a result. 2192 */ 2193 ath_chan_change(sc, ic->ic_curchan); 2194 if (sc->sc_beacons) { /* restart beacons */ 2195#ifdef IEEE80211_SUPPORT_TDMA 2196 if (sc->sc_tdma) 2197 ath_tdma_config(sc, NULL); 2198 else 2199#endif 2200 ath_beacon_config(sc, NULL); 2201 } 2202 2203 /* 2204 * Release the reset lock and re-enable interrupts here. 2205 * If an interrupt was being processed in ath_intr(), 2206 * it would disable interrupts at this point. So we have 2207 * to atomically enable interrupts and decrement the 2208 * reset counter - this way ath_intr() doesn't end up 2209 * disabling interrupts without a corresponding enable 2210 * in the rest or channel change path. 2211 */ 2212 ATH_PCU_LOCK(sc); 2213 sc->sc_inreset_cnt--; 2214 /* XXX only do this if sc_inreset_cnt == 0? */ 2215 ath_hal_intrset(ah, sc->sc_imask); 2216 ATH_PCU_UNLOCK(sc); 2217 2218 /* 2219 * TX and RX can be started here. If it were started with 2220 * sc_inreset_cnt > 0, the TX and RX path would abort. 2221 * Thus if this is a nested call through the reset or 2222 * channel change code, TX completion will occur but 2223 * RX completion and ath_start / ath_tx_start will not 2224 * run. 2225 */ 2226 2227 /* Restart TX/RX as needed */ 2228 ath_txrx_start(sc); 2229 2230 /* XXX Restart TX completion and pending TX */ 2231 if (reset_type == ATH_RESET_NOLOSS) { 2232 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2233 if (ATH_TXQ_SETUP(sc, i)) { 2234 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2235 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2236 ath_txq_sched(sc, &sc->sc_txq[i]); 2237 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2238 } 2239 } 2240 } 2241 2242 /* 2243 * This may have been set during an ath_start() call which 2244 * set this once it detected a concurrent TX was going on. 2245 * So, clear it. 2246 */ 2247 IF_LOCK(&ifp->if_snd); 2248 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2249 IF_UNLOCK(&ifp->if_snd); 2250 2251 /* Handle any frames in the TX queue */ 2252 /* 2253 * XXX should this be done by the caller, rather than 2254 * ath_reset() ? 2255 */ 2256 ath_start(ifp); /* restart xmit */ 2257 return 0; 2258} 2259 2260static int 2261ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2262{ 2263 struct ieee80211com *ic = vap->iv_ic; 2264 struct ifnet *ifp = ic->ic_ifp; 2265 struct ath_softc *sc = ifp->if_softc; 2266 struct ath_hal *ah = sc->sc_ah; 2267 2268 switch (cmd) { 2269 case IEEE80211_IOC_TXPOWER: 2270 /* 2271 * If per-packet TPC is enabled, then we have nothing 2272 * to do; otherwise we need to force the global limit. 2273 * All this can happen directly; no need to reset. 2274 */ 2275 if (!ath_hal_gettpc(ah)) 2276 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2277 return 0; 2278 } 2279 /* XXX? Full or NOLOSS? */ 2280 return ath_reset(ifp, ATH_RESET_FULL); 2281} 2282 2283struct ath_buf * 2284_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2285{ 2286 struct ath_buf *bf; 2287 2288 ATH_TXBUF_LOCK_ASSERT(sc); 2289 2290 if (btype == ATH_BUFTYPE_MGMT) 2291 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2292 else 2293 bf = TAILQ_FIRST(&sc->sc_txbuf); 2294 2295 if (bf == NULL) { 2296 sc->sc_stats.ast_tx_getnobuf++; 2297 } else { 2298 if (bf->bf_flags & ATH_BUF_BUSY) { 2299 sc->sc_stats.ast_tx_getbusybuf++; 2300 bf = NULL; 2301 } 2302 } 2303 2304 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 2305 if (btype == ATH_BUFTYPE_MGMT) 2306 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 2307 else { 2308 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2309 sc->sc_txbuf_cnt--; 2310 2311 /* 2312 * This shuldn't happen; however just to be 2313 * safe print a warning and fudge the txbuf 2314 * count. 2315 */ 2316 if (sc->sc_txbuf_cnt < 0) { 2317 device_printf(sc->sc_dev, 2318 "%s: sc_txbuf_cnt < 0?\n", 2319 __func__); 2320 sc->sc_txbuf_cnt = 0; 2321 } 2322 } 2323 } else 2324 bf = NULL; 2325 2326 if (bf == NULL) { 2327 /* XXX should check which list, mgmt or otherwise */ 2328 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2329 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2330 "out of xmit buffers" : "xmit buffer busy"); 2331 return NULL; 2332 } 2333 2334 /* XXX TODO: should do this at buffer list initialisation */ 2335 /* XXX (then, ensure the buffer has the right flag set) */ 2336 if (btype == ATH_BUFTYPE_MGMT) 2337 bf->bf_flags |= ATH_BUF_MGMT; 2338 else 2339 bf->bf_flags &= (~ATH_BUF_MGMT); 2340 2341 /* Valid bf here; clear some basic fields */ 2342 bf->bf_next = NULL; /* XXX just to be sure */ 2343 bf->bf_last = NULL; /* XXX again, just to be sure */ 2344 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2345 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2346 2347 /* 2348 * Track the descriptor ID only if doing EDMA 2349 */ 2350 if (sc->sc_isedma) { 2351 bf->bf_descid = sc->sc_txbuf_descid; 2352 sc->sc_txbuf_descid++; 2353 } 2354 2355 return bf; 2356} 2357 2358/* 2359 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2360 * can't be thrown back on the queue as they could still be 2361 * in use by the hardware. 2362 * 2363 * This duplicates the buffer, or returns NULL. 2364 * 2365 * The descriptor is also copied but the link pointers and 2366 * the DMA segments aren't copied; this frame should thus 2367 * be again passed through the descriptor setup/chain routines 2368 * so the link is correct. 2369 * 2370 * The caller must free the buffer using ath_freebuf(). 2371 * 2372 * XXX TODO: this call shouldn't fail as it'll cause packet loss 2373 * XXX in the TX pathway when retries are needed. 2374 * XXX Figure out how to keep some buffers free, or factor the 2375 * XXX number of busy buffers into the xmit path (ath_start()) 2376 * XXX so we don't over-commit. 2377 */ 2378struct ath_buf * 2379ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2380{ 2381 struct ath_buf *tbf; 2382 2383 tbf = ath_getbuf(sc, 2384 (bf->bf_flags & ATH_BUF_MGMT) ? 2385 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 2386 if (tbf == NULL) 2387 return NULL; /* XXX failure? Why? */ 2388 2389 /* Copy basics */ 2390 tbf->bf_next = NULL; 2391 tbf->bf_nseg = bf->bf_nseg; 2392 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2393 tbf->bf_status = bf->bf_status; 2394 tbf->bf_m = bf->bf_m; 2395 tbf->bf_node = bf->bf_node; 2396 /* will be setup by the chain/setup function */ 2397 tbf->bf_lastds = NULL; 2398 /* for now, last == self */ 2399 tbf->bf_last = tbf; 2400 tbf->bf_comp = bf->bf_comp; 2401 2402 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2403 2404 /* The caller has to re-init the descriptor + links */ 2405 2406 /* Copy state */ 2407 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2408 2409 return tbf; 2410} 2411 2412struct ath_buf * 2413ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 2414{ 2415 struct ath_buf *bf; 2416 2417 ATH_TXBUF_LOCK(sc); 2418 bf = _ath_getbuf_locked(sc, btype); 2419 /* 2420 * If a mgmt buffer was requested but we're out of those, 2421 * try requesting a normal one. 2422 */ 2423 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 2424 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 2425 ATH_TXBUF_UNLOCK(sc); 2426 if (bf == NULL) { 2427 struct ifnet *ifp = sc->sc_ifp; 2428 2429 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 2430 sc->sc_stats.ast_tx_qstop++; 2431 IF_LOCK(&ifp->if_snd); 2432 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2433 IF_UNLOCK(&ifp->if_snd); 2434 } 2435 return bf; 2436} 2437 2438void 2439ath_start(struct ifnet *ifp) 2440{ 2441 struct ath_softc *sc = ifp->if_softc; 2442 struct ieee80211_node *ni; 2443 struct ath_buf *bf; 2444 struct mbuf *m, *next; 2445 ath_bufhead frags; 2446 2447 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 2448 return; 2449 2450 /* XXX is it ok to hold the ATH_LOCK here? */ 2451 ATH_PCU_LOCK(sc); 2452 if (sc->sc_inreset_cnt > 0) { 2453 device_printf(sc->sc_dev, 2454 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2455 ATH_PCU_UNLOCK(sc); 2456 IF_LOCK(&ifp->if_snd); 2457 sc->sc_stats.ast_tx_qstop++; 2458 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2459 IF_UNLOCK(&ifp->if_snd); 2460 return; 2461 } 2462 sc->sc_txstart_cnt++; 2463 ATH_PCU_UNLOCK(sc); 2464 2465 for (;;) { 2466 ATH_TXBUF_LOCK(sc); 2467 if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { 2468 /* XXX increment counter? */ 2469 ATH_TXBUF_UNLOCK(sc); 2470 IF_LOCK(&ifp->if_snd); 2471 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2472 IF_UNLOCK(&ifp->if_snd); 2473 break; 2474 } 2475 ATH_TXBUF_UNLOCK(sc); 2476 2477 /* 2478 * Grab a TX buffer and associated resources. 2479 */ 2480 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 2481 if (bf == NULL) 2482 break; 2483 2484 IFQ_DEQUEUE(&ifp->if_snd, m); 2485 if (m == NULL) { 2486 ATH_TXBUF_LOCK(sc); 2487 ath_returnbuf_head(sc, bf); 2488 ATH_TXBUF_UNLOCK(sc); 2489 break; 2490 } 2491 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 2492 /* 2493 * Check for fragmentation. If this frame 2494 * has been broken up verify we have enough 2495 * buffers to send all the fragments so all 2496 * go out or none... 2497 */ 2498 TAILQ_INIT(&frags); 2499 if ((m->m_flags & M_FRAG) && 2500 !ath_txfrag_setup(sc, &frags, m, ni)) { 2501 DPRINTF(sc, ATH_DEBUG_XMIT, 2502 "%s: out of txfrag buffers\n", __func__); 2503 sc->sc_stats.ast_tx_nofrag++; 2504 ifp->if_oerrors++; 2505 ath_freetx(m); 2506 goto bad; 2507 } 2508 ifp->if_opackets++; 2509 nextfrag: 2510 /* 2511 * Pass the frame to the h/w for transmission. 2512 * Fragmented frames have each frag chained together 2513 * with m_nextpkt. We know there are sufficient ath_buf's 2514 * to send all the frags because of work done by 2515 * ath_txfrag_setup. We leave m_nextpkt set while 2516 * calling ath_tx_start so it can use it to extend the 2517 * the tx duration to cover the subsequent frag and 2518 * so it can reclaim all the mbufs in case of an error; 2519 * ath_tx_start clears m_nextpkt once it commits to 2520 * handing the frame to the hardware. 2521 */ 2522 next = m->m_nextpkt; 2523 if (ath_tx_start(sc, ni, bf, m)) { 2524 bad: 2525 ifp->if_oerrors++; 2526 reclaim: 2527 bf->bf_m = NULL; 2528 bf->bf_node = NULL; 2529 ATH_TXBUF_LOCK(sc); 2530 ath_returnbuf_head(sc, bf); 2531 ath_txfrag_cleanup(sc, &frags, ni); 2532 ATH_TXBUF_UNLOCK(sc); 2533 if (ni != NULL) 2534 ieee80211_free_node(ni); 2535 continue; 2536 } 2537 if (next != NULL) { 2538 /* 2539 * Beware of state changing between frags. 2540 * XXX check sta power-save state? 2541 */ 2542 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2543 DPRINTF(sc, ATH_DEBUG_XMIT, 2544 "%s: flush fragmented packet, state %s\n", 2545 __func__, 2546 ieee80211_state_name[ni->ni_vap->iv_state]); 2547 ath_freetx(next); 2548 goto reclaim; 2549 } 2550 m = next; 2551 bf = TAILQ_FIRST(&frags); 2552 KASSERT(bf != NULL, ("no buf for txfrag")); 2553 TAILQ_REMOVE(&frags, bf, bf_list); 2554 goto nextfrag; 2555 } 2556 2557 sc->sc_wd_timer = 5; 2558 } 2559 2560 ATH_PCU_LOCK(sc); 2561 sc->sc_txstart_cnt--; 2562 ATH_PCU_UNLOCK(sc); 2563} 2564 2565static int 2566ath_media_change(struct ifnet *ifp) 2567{ 2568 int error = ieee80211_media_change(ifp); 2569 /* NB: only the fixed rate can change and that doesn't need a reset */ 2570 return (error == ENETRESET ? 0 : error); 2571} 2572 2573/* 2574 * Block/unblock tx+rx processing while a key change is done. 2575 * We assume the caller serializes key management operations 2576 * so we only need to worry about synchronization with other 2577 * uses that originate in the driver. 2578 */ 2579static void 2580ath_key_update_begin(struct ieee80211vap *vap) 2581{ 2582 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2583 struct ath_softc *sc = ifp->if_softc; 2584 2585 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2586 taskqueue_block(sc->sc_tq); 2587 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2588} 2589 2590static void 2591ath_key_update_end(struct ieee80211vap *vap) 2592{ 2593 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2594 struct ath_softc *sc = ifp->if_softc; 2595 2596 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2597 IF_UNLOCK(&ifp->if_snd); 2598 taskqueue_unblock(sc->sc_tq); 2599} 2600 2601static void 2602ath_update_promisc(struct ifnet *ifp) 2603{ 2604 struct ath_softc *sc = ifp->if_softc; 2605 u_int32_t rfilt; 2606 2607 /* configure rx filter */ 2608 rfilt = ath_calcrxfilter(sc); 2609 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2610 2611 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2612} 2613 2614static void 2615ath_update_mcast(struct ifnet *ifp) 2616{ 2617 struct ath_softc *sc = ifp->if_softc; 2618 u_int32_t mfilt[2]; 2619 2620 /* calculate and install multicast filter */ 2621 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2622 struct ifmultiaddr *ifma; 2623 /* 2624 * Merge multicast addresses to form the hardware filter. 2625 */ 2626 mfilt[0] = mfilt[1] = 0; 2627 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2628 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2629 caddr_t dl; 2630 u_int32_t val; 2631 u_int8_t pos; 2632 2633 /* calculate XOR of eight 6bit values */ 2634 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2635 val = LE_READ_4(dl + 0); 2636 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2637 val = LE_READ_4(dl + 3); 2638 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2639 pos &= 0x3f; 2640 mfilt[pos / 32] |= (1 << (pos % 32)); 2641 } 2642 if_maddr_runlock(ifp); 2643 } else 2644 mfilt[0] = mfilt[1] = ~0; 2645 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2646 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2647 __func__, mfilt[0], mfilt[1]); 2648} 2649 2650void 2651ath_mode_init(struct ath_softc *sc) 2652{ 2653 struct ifnet *ifp = sc->sc_ifp; 2654 struct ath_hal *ah = sc->sc_ah; 2655 u_int32_t rfilt; 2656 2657 /* configure rx filter */ 2658 rfilt = ath_calcrxfilter(sc); 2659 ath_hal_setrxfilter(ah, rfilt); 2660 2661 /* configure operational mode */ 2662 ath_hal_setopmode(ah); 2663 2664 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE, 2665 "%s: ah=%p, ifp=%p, if_addr=%p\n", 2666 __func__, 2667 ah, 2668 ifp, 2669 (ifp == NULL) ? NULL : ifp->if_addr); 2670 2671 /* handle any link-level address change */ 2672 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2673 2674 /* calculate and install multicast filter */ 2675 ath_update_mcast(ifp); 2676} 2677 2678/* 2679 * Set the slot time based on the current setting. 2680 */ 2681void 2682ath_setslottime(struct ath_softc *sc) 2683{ 2684 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2685 struct ath_hal *ah = sc->sc_ah; 2686 u_int usec; 2687 2688 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2689 usec = 13; 2690 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2691 usec = 21; 2692 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2693 /* honor short/long slot time only in 11g */ 2694 /* XXX shouldn't honor on pure g or turbo g channel */ 2695 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2696 usec = HAL_SLOT_TIME_9; 2697 else 2698 usec = HAL_SLOT_TIME_20; 2699 } else 2700 usec = HAL_SLOT_TIME_9; 2701 2702 DPRINTF(sc, ATH_DEBUG_RESET, 2703 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2704 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2705 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2706 2707 ath_hal_setslottime(ah, usec); 2708 sc->sc_updateslot = OK; 2709} 2710 2711/* 2712 * Callback from the 802.11 layer to update the 2713 * slot time based on the current setting. 2714 */ 2715static void 2716ath_updateslot(struct ifnet *ifp) 2717{ 2718 struct ath_softc *sc = ifp->if_softc; 2719 struct ieee80211com *ic = ifp->if_l2com; 2720 2721 /* 2722 * When not coordinating the BSS, change the hardware 2723 * immediately. For other operation we defer the change 2724 * until beacon updates have propagated to the stations. 2725 */ 2726 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2727 ic->ic_opmode == IEEE80211_M_MBSS) 2728 sc->sc_updateslot = UPDATE; 2729 else 2730 ath_setslottime(sc); 2731} 2732 2733/* 2734 * Append the contents of src to dst; both queues 2735 * are assumed to be locked. 2736 */ 2737void 2738ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2739{ 2740 2741 ATH_TXQ_LOCK_ASSERT(dst); 2742 ATH_TXQ_LOCK_ASSERT(src); 2743 2744 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2745 dst->axq_link = src->axq_link; 2746 src->axq_link = NULL; 2747 dst->axq_depth += src->axq_depth; 2748 dst->axq_aggr_depth += src->axq_aggr_depth; 2749 src->axq_depth = 0; 2750 src->axq_aggr_depth = 0; 2751} 2752 2753/* 2754 * Reset the hardware, with no loss. 2755 * 2756 * This can't be used for a general case reset. 2757 */ 2758static void 2759ath_reset_proc(void *arg, int pending) 2760{ 2761 struct ath_softc *sc = arg; 2762 struct ifnet *ifp = sc->sc_ifp; 2763 2764#if 0 2765 if_printf(ifp, "%s: resetting\n", __func__); 2766#endif 2767 ath_reset(ifp, ATH_RESET_NOLOSS); 2768} 2769 2770/* 2771 * Reset the hardware after detecting beacons have stopped. 2772 */ 2773static void 2774ath_bstuck_proc(void *arg, int pending) 2775{ 2776 struct ath_softc *sc = arg; 2777 struct ifnet *ifp = sc->sc_ifp; 2778 uint32_t hangs = 0; 2779 2780 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 2781 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 2782 2783 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2784 sc->sc_bmisscount); 2785 sc->sc_stats.ast_bstuck++; 2786 /* 2787 * This assumes that there's no simultaneous channel mode change 2788 * occuring. 2789 */ 2790 ath_reset(ifp, ATH_RESET_NOLOSS); 2791} 2792 2793static void 2794ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2795{ 2796 bus_addr_t *paddr = (bus_addr_t*) arg; 2797 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2798 *paddr = segs->ds_addr; 2799} 2800 2801/* 2802 * Allocate the descriptors and appropriate DMA tag/setup. 2803 * 2804 * For some situations (eg EDMA TX completion), there isn't a requirement 2805 * for the ath_buf entries to be allocated. 2806 */ 2807int 2808ath_descdma_alloc_desc(struct ath_softc *sc, 2809 struct ath_descdma *dd, ath_bufhead *head, 2810 const char *name, int ds_size, int ndesc) 2811{ 2812#define DS2PHYS(_dd, _ds) \ 2813 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2814#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2815 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2816 struct ifnet *ifp = sc->sc_ifp; 2817 int error; 2818 2819 dd->dd_descsize = ds_size; 2820 2821 DPRINTF(sc, ATH_DEBUG_RESET, 2822 "%s: %s DMA: %u desc, %d bytes per descriptor\n", 2823 __func__, name, ndesc, dd->dd_descsize); 2824 2825 dd->dd_name = name; 2826 dd->dd_desc_len = dd->dd_descsize * ndesc; 2827 2828 /* 2829 * Merlin work-around: 2830 * Descriptors that cross the 4KB boundary can't be used. 2831 * Assume one skipped descriptor per 4KB page. 2832 */ 2833 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2834 int numpages = dd->dd_desc_len / 4096; 2835 dd->dd_desc_len += ds_size * numpages; 2836 } 2837 2838 /* 2839 * Setup DMA descriptor area. 2840 */ 2841 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 2842 PAGE_SIZE, 0, /* alignment, bounds */ 2843 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2844 BUS_SPACE_MAXADDR, /* highaddr */ 2845 NULL, NULL, /* filter, filterarg */ 2846 dd->dd_desc_len, /* maxsize */ 2847 1, /* nsegments */ 2848 dd->dd_desc_len, /* maxsegsize */ 2849 BUS_DMA_ALLOCNOW, /* flags */ 2850 NULL, /* lockfunc */ 2851 NULL, /* lockarg */ 2852 &dd->dd_dmat); 2853 if (error != 0) { 2854 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2855 return error; 2856 } 2857 2858 /* allocate descriptors */ 2859 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2860 if (error != 0) { 2861 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2862 "error %u\n", dd->dd_name, error); 2863 goto fail0; 2864 } 2865 2866 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2867 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2868 &dd->dd_dmamap); 2869 if (error != 0) { 2870 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2871 "error %u\n", ndesc, dd->dd_name, error); 2872 goto fail1; 2873 } 2874 2875 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2876 dd->dd_desc, dd->dd_desc_len, 2877 ath_load_cb, &dd->dd_desc_paddr, 2878 BUS_DMA_NOWAIT); 2879 if (error != 0) { 2880 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2881 dd->dd_name, error); 2882 goto fail2; 2883 } 2884 2885 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2886 __func__, dd->dd_name, (uint8_t *) dd->dd_desc, 2887 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, 2888 /*XXX*/ (u_long) dd->dd_desc_len); 2889 2890 return (0); 2891 2892fail2: 2893 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2894fail1: 2895 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2896fail0: 2897 bus_dma_tag_destroy(dd->dd_dmat); 2898 memset(dd, 0, sizeof(*dd)); 2899 return error; 2900#undef DS2PHYS 2901#undef ATH_DESC_4KB_BOUND_CHECK 2902} 2903 2904int 2905ath_descdma_setup(struct ath_softc *sc, 2906 struct ath_descdma *dd, ath_bufhead *head, 2907 const char *name, int ds_size, int nbuf, int ndesc) 2908{ 2909#define DS2PHYS(_dd, _ds) \ 2910 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2911#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2912 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2913 struct ifnet *ifp = sc->sc_ifp; 2914 uint8_t *ds; 2915 struct ath_buf *bf; 2916 int i, bsize, error; 2917 2918 /* Allocate descriptors */ 2919 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size, 2920 nbuf * ndesc); 2921 2922 /* Assume any errors during allocation were dealt with */ 2923 if (error != 0) { 2924 return (error); 2925 } 2926 2927 ds = (uint8_t *) dd->dd_desc; 2928 2929 /* allocate rx buffers */ 2930 bsize = sizeof(struct ath_buf) * nbuf; 2931 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2932 if (bf == NULL) { 2933 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2934 dd->dd_name, bsize); 2935 goto fail3; 2936 } 2937 dd->dd_bufptr = bf; 2938 2939 TAILQ_INIT(head); 2940 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) { 2941 bf->bf_desc = (struct ath_desc *) ds; 2942 bf->bf_daddr = DS2PHYS(dd, ds); 2943 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2944 /* 2945 * Merlin WAR: Skip descriptor addresses which 2946 * cause 4KB boundary crossing along any point 2947 * in the descriptor. 2948 */ 2949 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 2950 dd->dd_descsize)) { 2951 /* Start at the next page */ 2952 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 2953 bf->bf_desc = (struct ath_desc *) ds; 2954 bf->bf_daddr = DS2PHYS(dd, ds); 2955 } 2956 } 2957 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2958 &bf->bf_dmamap); 2959 if (error != 0) { 2960 if_printf(ifp, "unable to create dmamap for %s " 2961 "buffer %u, error %u\n", dd->dd_name, i, error); 2962 ath_descdma_cleanup(sc, dd, head); 2963 return error; 2964 } 2965 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 2966 TAILQ_INSERT_TAIL(head, bf, bf_list); 2967 } 2968 2969 /* 2970 * XXX TODO: ensure that ds doesn't overflow the descriptor 2971 * allocation otherwise weird stuff will occur and crash your 2972 * machine. 2973 */ 2974 return 0; 2975 /* XXX this should likely just call ath_descdma_cleanup() */ 2976fail3: 2977 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2978 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2979 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2980 bus_dma_tag_destroy(dd->dd_dmat); 2981 memset(dd, 0, sizeof(*dd)); 2982 return error; 2983#undef DS2PHYS 2984#undef ATH_DESC_4KB_BOUND_CHECK 2985} 2986 2987/* 2988 * Allocate ath_buf entries but no descriptor contents. 2989 * 2990 * This is for RX EDMA where the descriptors are the header part of 2991 * the RX buffer. 2992 */ 2993int 2994ath_descdma_setup_rx_edma(struct ath_softc *sc, 2995 struct ath_descdma *dd, ath_bufhead *head, 2996 const char *name, int nbuf, int rx_status_len) 2997{ 2998 struct ifnet *ifp = sc->sc_ifp; 2999 struct ath_buf *bf; 3000 int i, bsize, error; 3001 3002 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n", 3003 __func__, name, nbuf); 3004 3005 dd->dd_name = name; 3006 /* 3007 * This is (mostly) purely for show. We're not allocating any actual 3008 * descriptors here as EDMA RX has the descriptor be part 3009 * of the RX buffer. 3010 * 3011 * However, dd_desc_len is used by ath_descdma_free() to determine 3012 * whether we have already freed this DMA mapping. 3013 */ 3014 dd->dd_desc_len = rx_status_len * nbuf; 3015 dd->dd_descsize = rx_status_len; 3016 3017 /* allocate rx buffers */ 3018 bsize = sizeof(struct ath_buf) * nbuf; 3019 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3020 if (bf == NULL) { 3021 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3022 dd->dd_name, bsize); 3023 error = ENOMEM; 3024 goto fail3; 3025 } 3026 dd->dd_bufptr = bf; 3027 3028 TAILQ_INIT(head); 3029 for (i = 0; i < nbuf; i++, bf++) { 3030 bf->bf_desc = NULL; 3031 bf->bf_daddr = 0; 3032 bf->bf_lastds = NULL; /* Just an initial value */ 3033 3034 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3035 &bf->bf_dmamap); 3036 if (error != 0) { 3037 if_printf(ifp, "unable to create dmamap for %s " 3038 "buffer %u, error %u\n", dd->dd_name, i, error); 3039 ath_descdma_cleanup(sc, dd, head); 3040 return error; 3041 } 3042 TAILQ_INSERT_TAIL(head, bf, bf_list); 3043 } 3044 return 0; 3045fail3: 3046 memset(dd, 0, sizeof(*dd)); 3047 return error; 3048} 3049 3050void 3051ath_descdma_cleanup(struct ath_softc *sc, 3052 struct ath_descdma *dd, ath_bufhead *head) 3053{ 3054 struct ath_buf *bf; 3055 struct ieee80211_node *ni; 3056 3057 if (dd->dd_dmamap != 0) { 3058 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3059 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3060 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3061 bus_dma_tag_destroy(dd->dd_dmat); 3062 } 3063 3064 if (head != NULL) { 3065 TAILQ_FOREACH(bf, head, bf_list) { 3066 if (bf->bf_m) { 3067 m_freem(bf->bf_m); 3068 bf->bf_m = NULL; 3069 } 3070 if (bf->bf_dmamap != NULL) { 3071 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3072 bf->bf_dmamap = NULL; 3073 } 3074 ni = bf->bf_node; 3075 bf->bf_node = NULL; 3076 if (ni != NULL) { 3077 /* 3078 * Reclaim node reference. 3079 */ 3080 ieee80211_free_node(ni); 3081 } 3082 } 3083 } 3084 3085 if (head != NULL) 3086 TAILQ_INIT(head); 3087 3088 if (dd->dd_bufptr != NULL) 3089 free(dd->dd_bufptr, M_ATHDEV); 3090 memset(dd, 0, sizeof(*dd)); 3091} 3092 3093static int 3094ath_desc_alloc(struct ath_softc *sc) 3095{ 3096 int error; 3097 3098 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3099 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_TXDESC); 3100 if (error != 0) { 3101 return error; 3102 } 3103 sc->sc_txbuf_cnt = ath_txbuf; 3104 3105 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3106 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 3107 ATH_TXDESC); 3108 if (error != 0) { 3109 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3110 return error; 3111 } 3112 3113 /* 3114 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3115 * flag doesn't have to be set in ath_getbuf_locked(). 3116 */ 3117 3118 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3119 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 3120 if (error != 0) { 3121 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3122 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3123 &sc->sc_txbuf_mgmt); 3124 return error; 3125 } 3126 return 0; 3127} 3128 3129static void 3130ath_desc_free(struct ath_softc *sc) 3131{ 3132 3133 if (sc->sc_bdma.dd_desc_len != 0) 3134 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3135 if (sc->sc_txdma.dd_desc_len != 0) 3136 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3137 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3138 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3139 &sc->sc_txbuf_mgmt); 3140} 3141 3142static struct ieee80211_node * 3143ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3144{ 3145 struct ieee80211com *ic = vap->iv_ic; 3146 struct ath_softc *sc = ic->ic_ifp->if_softc; 3147 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3148 struct ath_node *an; 3149 3150 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3151 if (an == NULL) { 3152 /* XXX stat+msg */ 3153 return NULL; 3154 } 3155 ath_rate_node_init(sc, an); 3156 3157 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3158 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3159 device_get_nameunit(sc->sc_dev), an); 3160 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3161 3162 /* XXX setup ath_tid */ 3163 ath_tx_tid_init(sc, an); 3164 3165 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3166 return &an->an_node; 3167} 3168 3169static void 3170ath_node_cleanup(struct ieee80211_node *ni) 3171{ 3172 struct ieee80211com *ic = ni->ni_ic; 3173 struct ath_softc *sc = ic->ic_ifp->if_softc; 3174 3175 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3176 ath_tx_node_flush(sc, ATH_NODE(ni)); 3177 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3178 sc->sc_node_cleanup(ni); 3179} 3180 3181static void 3182ath_node_free(struct ieee80211_node *ni) 3183{ 3184 struct ieee80211com *ic = ni->ni_ic; 3185 struct ath_softc *sc = ic->ic_ifp->if_softc; 3186 3187 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 3188 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3189 sc->sc_node_free(ni); 3190} 3191 3192static void 3193ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3194{ 3195 struct ieee80211com *ic = ni->ni_ic; 3196 struct ath_softc *sc = ic->ic_ifp->if_softc; 3197 struct ath_hal *ah = sc->sc_ah; 3198 3199 *rssi = ic->ic_node_getrssi(ni); 3200 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3201 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3202 else 3203 *noise = -95; /* nominally correct */ 3204} 3205 3206/* 3207 * Set the default antenna. 3208 */ 3209void 3210ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3211{ 3212 struct ath_hal *ah = sc->sc_ah; 3213 3214 /* XXX block beacon interrupts */ 3215 ath_hal_setdefantenna(ah, antenna); 3216 if (sc->sc_defant != antenna) 3217 sc->sc_stats.ast_ant_defswitch++; 3218 sc->sc_defant = antenna; 3219 sc->sc_rxotherant = 0; 3220} 3221 3222static void 3223ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3224{ 3225 txq->axq_qnum = qnum; 3226 txq->axq_ac = 0; 3227 txq->axq_depth = 0; 3228 txq->axq_aggr_depth = 0; 3229 txq->axq_intrcnt = 0; 3230 txq->axq_link = NULL; 3231 txq->axq_softc = sc; 3232 TAILQ_INIT(&txq->axq_q); 3233 TAILQ_INIT(&txq->axq_tidq); 3234 ATH_TXQ_LOCK_INIT(sc, txq); 3235} 3236 3237/* 3238 * Setup a h/w transmit queue. 3239 */ 3240static struct ath_txq * 3241ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3242{ 3243#define N(a) (sizeof(a)/sizeof(a[0])) 3244 struct ath_hal *ah = sc->sc_ah; 3245 HAL_TXQ_INFO qi; 3246 int qnum; 3247 3248 memset(&qi, 0, sizeof(qi)); 3249 qi.tqi_subtype = subtype; 3250 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3251 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3252 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3253 /* 3254 * Enable interrupts only for EOL and DESC conditions. 3255 * We mark tx descriptors to receive a DESC interrupt 3256 * when a tx queue gets deep; otherwise waiting for the 3257 * EOL to reap descriptors. Note that this is done to 3258 * reduce interrupt load and this only defers reaping 3259 * descriptors, never transmitting frames. Aside from 3260 * reducing interrupts this also permits more concurrency. 3261 * The only potential downside is if the tx queue backs 3262 * up in which case the top half of the kernel may backup 3263 * due to a lack of tx descriptors. 3264 */ 3265 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3266 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3267 if (qnum == -1) { 3268 /* 3269 * NB: don't print a message, this happens 3270 * normally on parts with too few tx queues 3271 */ 3272 return NULL; 3273 } 3274 if (qnum >= N(sc->sc_txq)) { 3275 device_printf(sc->sc_dev, 3276 "hal qnum %u out of range, max %zu!\n", 3277 qnum, N(sc->sc_txq)); 3278 ath_hal_releasetxqueue(ah, qnum); 3279 return NULL; 3280 } 3281 if (!ATH_TXQ_SETUP(sc, qnum)) { 3282 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3283 sc->sc_txqsetup |= 1<<qnum; 3284 } 3285 return &sc->sc_txq[qnum]; 3286#undef N 3287} 3288 3289/* 3290 * Setup a hardware data transmit queue for the specified 3291 * access control. The hal may not support all requested 3292 * queues in which case it will return a reference to a 3293 * previously setup queue. We record the mapping from ac's 3294 * to h/w queues for use by ath_tx_start and also track 3295 * the set of h/w queues being used to optimize work in the 3296 * transmit interrupt handler and related routines. 3297 */ 3298static int 3299ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3300{ 3301#define N(a) (sizeof(a)/sizeof(a[0])) 3302 struct ath_txq *txq; 3303 3304 if (ac >= N(sc->sc_ac2q)) { 3305 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3306 ac, N(sc->sc_ac2q)); 3307 return 0; 3308 } 3309 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3310 if (txq != NULL) { 3311 txq->axq_ac = ac; 3312 sc->sc_ac2q[ac] = txq; 3313 return 1; 3314 } else 3315 return 0; 3316#undef N 3317} 3318 3319/* 3320 * Update WME parameters for a transmit queue. 3321 */ 3322static int 3323ath_txq_update(struct ath_softc *sc, int ac) 3324{ 3325#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3326#define ATH_TXOP_TO_US(v) (v<<5) 3327 struct ifnet *ifp = sc->sc_ifp; 3328 struct ieee80211com *ic = ifp->if_l2com; 3329 struct ath_txq *txq = sc->sc_ac2q[ac]; 3330 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3331 struct ath_hal *ah = sc->sc_ah; 3332 HAL_TXQ_INFO qi; 3333 3334 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3335#ifdef IEEE80211_SUPPORT_TDMA 3336 if (sc->sc_tdma) { 3337 /* 3338 * AIFS is zero so there's no pre-transmit wait. The 3339 * burst time defines the slot duration and is configured 3340 * through net80211. The QCU is setup to not do post-xmit 3341 * back off, lockout all lower-priority QCU's, and fire 3342 * off the DMA beacon alert timer which is setup based 3343 * on the slot configuration. 3344 */ 3345 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3346 | HAL_TXQ_TXERRINT_ENABLE 3347 | HAL_TXQ_TXURNINT_ENABLE 3348 | HAL_TXQ_TXEOLINT_ENABLE 3349 | HAL_TXQ_DBA_GATED 3350 | HAL_TXQ_BACKOFF_DISABLE 3351 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 3352 ; 3353 qi.tqi_aifs = 0; 3354 /* XXX +dbaprep? */ 3355 qi.tqi_readyTime = sc->sc_tdmaslotlen; 3356 qi.tqi_burstTime = qi.tqi_readyTime; 3357 } else { 3358#endif 3359 /* 3360 * XXX shouldn't this just use the default flags 3361 * used in the previous queue setup? 3362 */ 3363 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3364 | HAL_TXQ_TXERRINT_ENABLE 3365 | HAL_TXQ_TXDESCINT_ENABLE 3366 | HAL_TXQ_TXURNINT_ENABLE 3367 | HAL_TXQ_TXEOLINT_ENABLE 3368 ; 3369 qi.tqi_aifs = wmep->wmep_aifsn; 3370 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3371 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3372 qi.tqi_readyTime = 0; 3373 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3374#ifdef IEEE80211_SUPPORT_TDMA 3375 } 3376#endif 3377 3378 DPRINTF(sc, ATH_DEBUG_RESET, 3379 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 3380 __func__, txq->axq_qnum, qi.tqi_qflags, 3381 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 3382 3383 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3384 if_printf(ifp, "unable to update hardware queue " 3385 "parameters for %s traffic!\n", 3386 ieee80211_wme_acnames[ac]); 3387 return 0; 3388 } else { 3389 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3390 return 1; 3391 } 3392#undef ATH_TXOP_TO_US 3393#undef ATH_EXPONENT_TO_VALUE 3394} 3395 3396/* 3397 * Callback from the 802.11 layer to update WME parameters. 3398 */ 3399int 3400ath_wme_update(struct ieee80211com *ic) 3401{ 3402 struct ath_softc *sc = ic->ic_ifp->if_softc; 3403 3404 return !ath_txq_update(sc, WME_AC_BE) || 3405 !ath_txq_update(sc, WME_AC_BK) || 3406 !ath_txq_update(sc, WME_AC_VI) || 3407 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3408} 3409 3410/* 3411 * Reclaim resources for a setup queue. 3412 */ 3413static void 3414ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3415{ 3416 3417 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3418 ATH_TXQ_LOCK_DESTROY(txq); 3419 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3420} 3421 3422/* 3423 * Reclaim all tx queue resources. 3424 */ 3425static void 3426ath_tx_cleanup(struct ath_softc *sc) 3427{ 3428 int i; 3429 3430 ATH_TXBUF_LOCK_DESTROY(sc); 3431 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3432 if (ATH_TXQ_SETUP(sc, i)) 3433 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3434} 3435 3436/* 3437 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 3438 * using the current rates in sc_rixmap. 3439 */ 3440int 3441ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 3442{ 3443 int rix = sc->sc_rixmap[rate]; 3444 /* NB: return lowest rix for invalid rate */ 3445 return (rix == 0xff ? 0 : rix); 3446} 3447 3448static void 3449ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 3450 struct ath_buf *bf) 3451{ 3452 struct ieee80211_node *ni = bf->bf_node; 3453 struct ifnet *ifp = sc->sc_ifp; 3454 struct ieee80211com *ic = ifp->if_l2com; 3455 int sr, lr, pri; 3456 3457 if (ts->ts_status == 0) { 3458 u_int8_t txant = ts->ts_antenna; 3459 sc->sc_stats.ast_ant_tx[txant]++; 3460 sc->sc_ant_tx[txant]++; 3461 if (ts->ts_finaltsi != 0) 3462 sc->sc_stats.ast_tx_altrate++; 3463 pri = M_WME_GETAC(bf->bf_m); 3464 if (pri >= WME_AC_VO) 3465 ic->ic_wme.wme_hipri_traffic++; 3466 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 3467 ni->ni_inact = ni->ni_inact_reload; 3468 } else { 3469 if (ts->ts_status & HAL_TXERR_XRETRY) 3470 sc->sc_stats.ast_tx_xretries++; 3471 if (ts->ts_status & HAL_TXERR_FIFO) 3472 sc->sc_stats.ast_tx_fifoerr++; 3473 if (ts->ts_status & HAL_TXERR_FILT) 3474 sc->sc_stats.ast_tx_filtered++; 3475 if (ts->ts_status & HAL_TXERR_XTXOP) 3476 sc->sc_stats.ast_tx_xtxop++; 3477 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 3478 sc->sc_stats.ast_tx_timerexpired++; 3479 3480 if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 3481 sc->sc_stats.ast_tx_data_underrun++; 3482 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 3483 sc->sc_stats.ast_tx_delim_underrun++; 3484 3485 if (bf->bf_m->m_flags & M_FF) 3486 sc->sc_stats.ast_ff_txerr++; 3487 } 3488 /* XXX when is this valid? */ 3489 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 3490 sc->sc_stats.ast_tx_desccfgerr++; 3491 3492 sr = ts->ts_shortretry; 3493 lr = ts->ts_longretry; 3494 sc->sc_stats.ast_tx_shortretry += sr; 3495 sc->sc_stats.ast_tx_longretry += lr; 3496 3497} 3498 3499/* 3500 * The default completion. If fail is 1, this means 3501 * "please don't retry the frame, and just return -1 status 3502 * to the net80211 stack. 3503 */ 3504void 3505ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3506{ 3507 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3508 int st; 3509 3510 if (fail == 1) 3511 st = -1; 3512 else 3513 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 3514 ts->ts_status : HAL_TXERR_XRETRY; 3515 3516 if (bf->bf_state.bfs_dobaw) 3517 device_printf(sc->sc_dev, 3518 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 3519 __func__, 3520 bf, 3521 SEQNO(bf->bf_state.bfs_seqno)); 3522 if (bf->bf_next != NULL) 3523 device_printf(sc->sc_dev, 3524 "%s: bf %p: seqno %d: bf_next not NULL!\n", 3525 __func__, 3526 bf, 3527 SEQNO(bf->bf_state.bfs_seqno)); 3528 3529 /* 3530 * Do any tx complete callback. Note this must 3531 * be done before releasing the node reference. 3532 * This will free the mbuf, release the net80211 3533 * node and recycle the ath_buf. 3534 */ 3535 ath_tx_freebuf(sc, bf, st); 3536} 3537 3538/* 3539 * Update rate control with the given completion status. 3540 */ 3541void 3542ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 3543 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 3544 int nframes, int nbad) 3545{ 3546 struct ath_node *an; 3547 3548 /* Only for unicast frames */ 3549 if (ni == NULL) 3550 return; 3551 3552 an = ATH_NODE(ni); 3553 3554 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 3555 ATH_NODE_LOCK(an); 3556 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 3557 ATH_NODE_UNLOCK(an); 3558 } 3559} 3560 3561/* 3562 * Update the busy status of the last frame on the free list. 3563 * When doing TDMA, the busy flag tracks whether the hardware 3564 * currently points to this buffer or not, and thus gated DMA 3565 * may restart by re-reading the last descriptor in this 3566 * buffer. 3567 * 3568 * This should be called in the completion function once one 3569 * of the buffers has been used. 3570 */ 3571static void 3572ath_tx_update_busy(struct ath_softc *sc) 3573{ 3574 struct ath_buf *last; 3575 3576 /* 3577 * Since the last frame may still be marked 3578 * as ATH_BUF_BUSY, unmark it here before 3579 * finishing the frame processing. 3580 * Since we've completed a frame (aggregate 3581 * or otherwise), the hardware has moved on 3582 * and is no longer referencing the previous 3583 * descriptor. 3584 */ 3585 ATH_TXBUF_LOCK_ASSERT(sc); 3586 last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3587 if (last != NULL) 3588 last->bf_flags &= ~ATH_BUF_BUSY; 3589 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 3590 if (last != NULL) 3591 last->bf_flags &= ~ATH_BUF_BUSY; 3592} 3593 3594/* 3595 * Process the completion of the given buffer. 3596 * 3597 * This calls the rate control update and then the buffer completion. 3598 * This will either free the buffer or requeue it. In any case, the 3599 * bf pointer should be treated as invalid after this function is called. 3600 */ 3601void 3602ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, 3603 struct ath_tx_status *ts, struct ath_buf *bf) 3604{ 3605 struct ieee80211_node *ni = bf->bf_node; 3606 struct ath_node *an = NULL; 3607 3608 ATH_TXQ_UNLOCK_ASSERT(txq); 3609 3610 /* If unicast frame, update general statistics */ 3611 if (ni != NULL) { 3612 an = ATH_NODE(ni); 3613 /* update statistics */ 3614 ath_tx_update_stats(sc, ts, bf); 3615 } 3616 3617 /* 3618 * Call the completion handler. 3619 * The completion handler is responsible for 3620 * calling the rate control code. 3621 * 3622 * Frames with no completion handler get the 3623 * rate control code called here. 3624 */ 3625 if (bf->bf_comp == NULL) { 3626 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 3627 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 3628 /* 3629 * XXX assume this isn't an aggregate 3630 * frame. 3631 */ 3632 ath_tx_update_ratectrl(sc, ni, 3633 bf->bf_state.bfs_rc, ts, 3634 bf->bf_state.bfs_pktlen, 1, 3635 (ts->ts_status == 0 ? 0 : 1)); 3636 } 3637 ath_tx_default_comp(sc, bf, 0); 3638 } else 3639 bf->bf_comp(sc, bf, 0); 3640} 3641 3642 3643 3644/* 3645 * Process completed xmit descriptors from the specified queue. 3646 * Kick the packet scheduler if needed. This can occur from this 3647 * particular task. 3648 */ 3649static int 3650ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 3651{ 3652 struct ath_hal *ah = sc->sc_ah; 3653 struct ath_buf *bf; 3654 struct ath_desc *ds; 3655 struct ath_tx_status *ts; 3656 struct ieee80211_node *ni; 3657#ifdef IEEE80211_SUPPORT_SUPERG 3658 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3659#endif /* IEEE80211_SUPPORT_SUPERG */ 3660 int nacked; 3661 HAL_STATUS status; 3662 3663 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3664 __func__, txq->axq_qnum, 3665 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3666 txq->axq_link); 3667 nacked = 0; 3668 for (;;) { 3669 ATH_TXQ_LOCK(txq); 3670 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3671 bf = TAILQ_FIRST(&txq->axq_q); 3672 if (bf == NULL) { 3673 ATH_TXQ_UNLOCK(txq); 3674 break; 3675 } 3676 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 3677 ts = &bf->bf_status.ds_txstat; 3678 status = ath_hal_txprocdesc(ah, ds, ts); 3679#ifdef ATH_DEBUG 3680 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3681 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3682 status == HAL_OK); 3683 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) { 3684 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3685 status == HAL_OK); 3686 } 3687#endif 3688 if (status == HAL_EINPROGRESS) { 3689 ATH_TXQ_UNLOCK(txq); 3690 break; 3691 } 3692 ATH_TXQ_REMOVE(txq, bf, bf_list); 3693#ifdef IEEE80211_SUPPORT_TDMA 3694 if (txq->axq_depth > 0) { 3695 /* 3696 * More frames follow. Mark the buffer busy 3697 * so it's not re-used while the hardware may 3698 * still re-read the link field in the descriptor. 3699 * 3700 * Use the last buffer in an aggregate as that 3701 * is where the hardware may be - intermediate 3702 * descriptors won't be "busy". 3703 */ 3704 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 3705 } else 3706#else 3707 if (txq->axq_depth == 0) 3708#endif 3709 txq->axq_link = NULL; 3710 if (bf->bf_state.bfs_aggr) 3711 txq->axq_aggr_depth--; 3712 3713 ni = bf->bf_node; 3714 /* 3715 * If unicast frame was ack'd update RSSI, 3716 * including the last rx time used to 3717 * workaround phantom bmiss interrupts. 3718 */ 3719 if (ni != NULL && ts->ts_status == 0 && 3720 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 3721 nacked++; 3722 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 3723 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 3724 ts->ts_rssi); 3725 } 3726 ATH_TXQ_UNLOCK(txq); 3727 3728 /* 3729 * Update statistics and call completion 3730 */ 3731 ath_tx_process_buf_completion(sc, txq, ts, bf); 3732 3733 3734 } 3735#ifdef IEEE80211_SUPPORT_SUPERG 3736 /* 3737 * Flush fast-frame staging queue when traffic slows. 3738 */ 3739 if (txq->axq_depth <= 1) 3740 ieee80211_ff_flush(ic, txq->axq_ac); 3741#endif 3742 3743 /* Kick the TXQ scheduler */ 3744 if (dosched) { 3745 ATH_TXQ_LOCK(txq); 3746 ath_txq_sched(sc, txq); 3747 ATH_TXQ_UNLOCK(txq); 3748 } 3749 3750 return nacked; 3751} 3752 3753#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 3754 3755/* 3756 * Deferred processing of transmit interrupt; special-cased 3757 * for a single hardware transmit queue (e.g. 5210 and 5211). 3758 */ 3759static void 3760ath_tx_proc_q0(void *arg, int npending) 3761{ 3762 struct ath_softc *sc = arg; 3763 struct ifnet *ifp = sc->sc_ifp; 3764 uint32_t txqs; 3765 3766 ATH_PCU_LOCK(sc); 3767 sc->sc_txproc_cnt++; 3768 txqs = sc->sc_txq_active; 3769 sc->sc_txq_active &= ~txqs; 3770 ATH_PCU_UNLOCK(sc); 3771 3772 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 3773 /* XXX why is lastrx updated in tx code? */ 3774 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3775 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3776 ath_tx_processq(sc, sc->sc_cabq, 1); 3777 IF_LOCK(&ifp->if_snd); 3778 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3779 IF_UNLOCK(&ifp->if_snd); 3780 sc->sc_wd_timer = 0; 3781 3782 if (sc->sc_softled) 3783 ath_led_event(sc, sc->sc_txrix); 3784 3785 ATH_PCU_LOCK(sc); 3786 sc->sc_txproc_cnt--; 3787 ATH_PCU_UNLOCK(sc); 3788 3789 ath_tx_kick(sc); 3790} 3791 3792/* 3793 * Deferred processing of transmit interrupt; special-cased 3794 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 3795 */ 3796static void 3797ath_tx_proc_q0123(void *arg, int npending) 3798{ 3799 struct ath_softc *sc = arg; 3800 struct ifnet *ifp = sc->sc_ifp; 3801 int nacked; 3802 uint32_t txqs; 3803 3804 ATH_PCU_LOCK(sc); 3805 sc->sc_txproc_cnt++; 3806 txqs = sc->sc_txq_active; 3807 sc->sc_txq_active &= ~txqs; 3808 ATH_PCU_UNLOCK(sc); 3809 3810 /* 3811 * Process each active queue. 3812 */ 3813 nacked = 0; 3814 if (TXQACTIVE(txqs, 0)) 3815 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 3816 if (TXQACTIVE(txqs, 1)) 3817 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 3818 if (TXQACTIVE(txqs, 2)) 3819 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 3820 if (TXQACTIVE(txqs, 3)) 3821 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 3822 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3823 ath_tx_processq(sc, sc->sc_cabq, 1); 3824 if (nacked) 3825 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3826 3827 IF_LOCK(&ifp->if_snd); 3828 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3829 IF_UNLOCK(&ifp->if_snd); 3830 sc->sc_wd_timer = 0; 3831 3832 if (sc->sc_softled) 3833 ath_led_event(sc, sc->sc_txrix); 3834 3835 ATH_PCU_LOCK(sc); 3836 sc->sc_txproc_cnt--; 3837 ATH_PCU_UNLOCK(sc); 3838 3839 ath_tx_kick(sc); 3840} 3841 3842/* 3843 * Deferred processing of transmit interrupt. 3844 */ 3845static void 3846ath_tx_proc(void *arg, int npending) 3847{ 3848 struct ath_softc *sc = arg; 3849 struct ifnet *ifp = sc->sc_ifp; 3850 int i, nacked; 3851 uint32_t txqs; 3852 3853 ATH_PCU_LOCK(sc); 3854 sc->sc_txproc_cnt++; 3855 txqs = sc->sc_txq_active; 3856 sc->sc_txq_active &= ~txqs; 3857 ATH_PCU_UNLOCK(sc); 3858 3859 /* 3860 * Process each active queue. 3861 */ 3862 nacked = 0; 3863 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3864 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 3865 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 3866 if (nacked) 3867 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3868 3869 /* XXX check this inside of IF_LOCK? */ 3870 IF_LOCK(&ifp->if_snd); 3871 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3872 IF_UNLOCK(&ifp->if_snd); 3873 sc->sc_wd_timer = 0; 3874 3875 if (sc->sc_softled) 3876 ath_led_event(sc, sc->sc_txrix); 3877 3878 ATH_PCU_LOCK(sc); 3879 sc->sc_txproc_cnt--; 3880 ATH_PCU_UNLOCK(sc); 3881 3882 ath_tx_kick(sc); 3883} 3884#undef TXQACTIVE 3885 3886/* 3887 * Deferred processing of TXQ rescheduling. 3888 */ 3889static void 3890ath_txq_sched_tasklet(void *arg, int npending) 3891{ 3892 struct ath_softc *sc = arg; 3893 int i; 3894 3895 /* XXX is skipping ok? */ 3896 ATH_PCU_LOCK(sc); 3897#if 0 3898 if (sc->sc_inreset_cnt > 0) { 3899 device_printf(sc->sc_dev, 3900 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 3901 ATH_PCU_UNLOCK(sc); 3902 return; 3903 } 3904#endif 3905 sc->sc_txproc_cnt++; 3906 ATH_PCU_UNLOCK(sc); 3907 3908 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3909 if (ATH_TXQ_SETUP(sc, i)) { 3910 ATH_TXQ_LOCK(&sc->sc_txq[i]); 3911 ath_txq_sched(sc, &sc->sc_txq[i]); 3912 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 3913 } 3914 } 3915 3916 ATH_PCU_LOCK(sc); 3917 sc->sc_txproc_cnt--; 3918 ATH_PCU_UNLOCK(sc); 3919} 3920 3921void 3922ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 3923{ 3924 3925 ATH_TXBUF_LOCK_ASSERT(sc); 3926 3927 if (bf->bf_flags & ATH_BUF_MGMT) 3928 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 3929 else { 3930 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3931 sc->sc_txbuf_cnt++; 3932 if (sc->sc_txbuf_cnt > ath_txbuf) { 3933 device_printf(sc->sc_dev, 3934 "%s: sc_txbuf_cnt > %d?\n", 3935 __func__, 3936 ath_txbuf); 3937 sc->sc_txbuf_cnt = ath_txbuf; 3938 } 3939 } 3940} 3941 3942void 3943ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 3944{ 3945 3946 ATH_TXBUF_LOCK_ASSERT(sc); 3947 3948 if (bf->bf_flags & ATH_BUF_MGMT) 3949 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 3950 else { 3951 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 3952 sc->sc_txbuf_cnt++; 3953 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 3954 device_printf(sc->sc_dev, 3955 "%s: sc_txbuf_cnt > %d?\n", 3956 __func__, 3957 ATH_TXBUF); 3958 sc->sc_txbuf_cnt = ATH_TXBUF; 3959 } 3960 } 3961} 3962 3963/* 3964 * Return a buffer to the pool and update the 'busy' flag on the 3965 * previous 'tail' entry. 3966 * 3967 * This _must_ only be called when the buffer is involved in a completed 3968 * TX. The logic is that if it was part of an active TX, the previous 3969 * buffer on the list is now not involved in a halted TX DMA queue, waiting 3970 * for restart (eg for TDMA.) 3971 * 3972 * The caller must free the mbuf and recycle the node reference. 3973 */ 3974void 3975ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 3976{ 3977 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3978 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 3979 3980 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 3981 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 3982 3983 ATH_TXBUF_LOCK(sc); 3984 ath_tx_update_busy(sc); 3985 ath_returnbuf_tail(sc, bf); 3986 ATH_TXBUF_UNLOCK(sc); 3987} 3988 3989/* 3990 * This is currently used by ath_tx_draintxq() and 3991 * ath_tx_tid_free_pkts(). 3992 * 3993 * It recycles a single ath_buf. 3994 */ 3995void 3996ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 3997{ 3998 struct ieee80211_node *ni = bf->bf_node; 3999 struct mbuf *m0 = bf->bf_m; 4000 4001 bf->bf_node = NULL; 4002 bf->bf_m = NULL; 4003 4004 /* Free the buffer, it's not needed any longer */ 4005 ath_freebuf(sc, bf); 4006 4007 if (ni != NULL) { 4008 /* 4009 * Do any callback and reclaim the node reference. 4010 */ 4011 if (m0->m_flags & M_TXCB) 4012 ieee80211_process_callback(ni, m0, status); 4013 ieee80211_free_node(ni); 4014 } 4015 m_freem(m0); 4016 4017 /* 4018 * XXX the buffer used to be freed -after-, but the DMA map was 4019 * freed where ath_freebuf() now is. I've no idea what this 4020 * will do. 4021 */ 4022} 4023 4024void 4025ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 4026{ 4027#ifdef ATH_DEBUG 4028 struct ath_hal *ah = sc->sc_ah; 4029#endif 4030 struct ath_buf *bf; 4031 u_int ix; 4032 4033 /* 4034 * NB: this assumes output has been stopped and 4035 * we do not need to block ath_tx_proc 4036 */ 4037 ATH_TXBUF_LOCK(sc); 4038 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 4039 if (bf != NULL) 4040 bf->bf_flags &= ~ATH_BUF_BUSY; 4041 bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 4042 if (bf != NULL) 4043 bf->bf_flags &= ~ATH_BUF_BUSY; 4044 ATH_TXBUF_UNLOCK(sc); 4045 4046 for (ix = 0;; ix++) { 4047 ATH_TXQ_LOCK(txq); 4048 bf = TAILQ_FIRST(&txq->axq_q); 4049 if (bf == NULL) { 4050 txq->axq_link = NULL; 4051 /* 4052 * There's currently no flag that indicates 4053 * a buffer is on the FIFO. So until that 4054 * occurs, just clear the FIFO counter here. 4055 * 4056 * Yes, this means that if something in parallel 4057 * is pushing things onto this TXQ and pushing 4058 * _that_ into the hardware, things will get 4059 * very fruity very quickly. 4060 */ 4061 txq->axq_fifo_depth = 0; 4062 ATH_TXQ_UNLOCK(txq); 4063 break; 4064 } 4065 ATH_TXQ_REMOVE(txq, bf, bf_list); 4066 if (bf->bf_state.bfs_aggr) 4067 txq->axq_aggr_depth--; 4068#ifdef ATH_DEBUG 4069 if (sc->sc_debug & ATH_DEBUG_RESET) { 4070 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4071 int status = 0; 4072 4073 /* 4074 * EDMA operation has a TX completion FIFO 4075 * separate from the TX descriptor, so this 4076 * method of checking the "completion" status 4077 * is wrong. 4078 */ 4079 if (! sc->sc_isedma) { 4080 status = (ath_hal_txprocdesc(ah, 4081 bf->bf_lastds, 4082 &bf->bf_status.ds_txstat) == HAL_OK); 4083 } 4084 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); 4085 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 4086 bf->bf_m->m_len, 0, -1); 4087 } 4088#endif /* ATH_DEBUG */ 4089 /* 4090 * Since we're now doing magic in the completion 4091 * functions, we -must- call it for aggregation 4092 * destinations or BAW tracking will get upset. 4093 */ 4094 /* 4095 * Clear ATH_BUF_BUSY; the completion handler 4096 * will free the buffer. 4097 */ 4098 ATH_TXQ_UNLOCK(txq); 4099 bf->bf_flags &= ~ATH_BUF_BUSY; 4100 if (bf->bf_comp) 4101 bf->bf_comp(sc, bf, 1); 4102 else 4103 ath_tx_default_comp(sc, bf, 1); 4104 } 4105 4106 /* 4107 * Drain software queued frames which are on 4108 * active TIDs. 4109 */ 4110 ath_tx_txq_drain(sc, txq); 4111} 4112 4113static void 4114ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 4115{ 4116 struct ath_hal *ah = sc->sc_ah; 4117 4118 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4119 __func__, txq->axq_qnum, 4120 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 4121 txq->axq_link); 4122 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 4123} 4124 4125int 4126ath_stoptxdma(struct ath_softc *sc) 4127{ 4128 struct ath_hal *ah = sc->sc_ah; 4129 int i; 4130 4131 /* XXX return value */ 4132 if (sc->sc_invalid) 4133 return 0; 4134 4135 if (!sc->sc_invalid) { 4136 /* don't touch the hardware if marked invalid */ 4137 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4138 __func__, sc->sc_bhalq, 4139 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 4140 NULL); 4141 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 4142 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4143 if (ATH_TXQ_SETUP(sc, i)) 4144 ath_tx_stopdma(sc, &sc->sc_txq[i]); 4145 } 4146 4147 return 1; 4148} 4149 4150/* 4151 * Drain the transmit queues and reclaim resources. 4152 */ 4153void 4154ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 4155{ 4156#ifdef ATH_DEBUG 4157 struct ath_hal *ah = sc->sc_ah; 4158#endif 4159 struct ifnet *ifp = sc->sc_ifp; 4160 int i; 4161 4162 (void) ath_stoptxdma(sc); 4163 4164 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4165 /* 4166 * XXX TODO: should we just handle the completed TX frames 4167 * here, whether or not the reset is a full one or not? 4168 */ 4169 if (ATH_TXQ_SETUP(sc, i)) { 4170 if (reset_type == ATH_RESET_NOLOSS) 4171 ath_tx_processq(sc, &sc->sc_txq[i], 0); 4172 else 4173 ath_tx_draintxq(sc, &sc->sc_txq[i]); 4174 } 4175 } 4176#ifdef ATH_DEBUG 4177 if (sc->sc_debug & ATH_DEBUG_RESET) { 4178 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 4179 if (bf != NULL && bf->bf_m != NULL) { 4180 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 4181 ath_hal_txprocdesc(ah, bf->bf_lastds, 4182 &bf->bf_status.ds_txstat) == HAL_OK); 4183 ieee80211_dump_pkt(ifp->if_l2com, 4184 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 4185 0, -1); 4186 } 4187 } 4188#endif /* ATH_DEBUG */ 4189 IF_LOCK(&ifp->if_snd); 4190 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4191 IF_UNLOCK(&ifp->if_snd); 4192 sc->sc_wd_timer = 0; 4193} 4194 4195/* 4196 * Update internal state after a channel change. 4197 */ 4198static void 4199ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 4200{ 4201 enum ieee80211_phymode mode; 4202 4203 /* 4204 * Change channels and update the h/w rate map 4205 * if we're switching; e.g. 11a to 11b/g. 4206 */ 4207 mode = ieee80211_chan2mode(chan); 4208 if (mode != sc->sc_curmode) 4209 ath_setcurmode(sc, mode); 4210 sc->sc_curchan = chan; 4211} 4212 4213/* 4214 * Set/change channels. If the channel is really being changed, 4215 * it's done by resetting the chip. To accomplish this we must 4216 * first cleanup any pending DMA, then restart stuff after a la 4217 * ath_init. 4218 */ 4219static int 4220ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 4221{ 4222 struct ifnet *ifp = sc->sc_ifp; 4223 struct ieee80211com *ic = ifp->if_l2com; 4224 struct ath_hal *ah = sc->sc_ah; 4225 int ret = 0; 4226 4227 /* Treat this as an interface reset */ 4228 ATH_PCU_UNLOCK_ASSERT(sc); 4229 ATH_UNLOCK_ASSERT(sc); 4230 4231 /* (Try to) stop TX/RX from occuring */ 4232 taskqueue_block(sc->sc_tq); 4233 4234 ATH_PCU_LOCK(sc); 4235 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 4236 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 4237 if (ath_reset_grablock(sc, 1) == 0) { 4238 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 4239 __func__); 4240 } 4241 ATH_PCU_UNLOCK(sc); 4242 4243 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 4244 __func__, ieee80211_chan2ieee(ic, chan), 4245 chan->ic_freq, chan->ic_flags); 4246 if (chan != sc->sc_curchan) { 4247 HAL_STATUS status; 4248 /* 4249 * To switch channels clear any pending DMA operations; 4250 * wait long enough for the RX fifo to drain, reset the 4251 * hardware at the new frequency, and then re-enable 4252 * the relevant bits of the h/w. 4253 */ 4254#if 0 4255 ath_hal_intrset(ah, 0); /* disable interrupts */ 4256#endif 4257 ath_stoprecv(sc, 1); /* turn off frame recv */ 4258 /* 4259 * First, handle completed TX/RX frames. 4260 */ 4261 ath_rx_flush(sc); 4262 ath_draintxq(sc, ATH_RESET_NOLOSS); 4263 /* 4264 * Next, flush the non-scheduled frames. 4265 */ 4266 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 4267 4268 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 4269 if_printf(ifp, "%s: unable to reset " 4270 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 4271 __func__, ieee80211_chan2ieee(ic, chan), 4272 chan->ic_freq, chan->ic_flags, status); 4273 ret = EIO; 4274 goto finish; 4275 } 4276 sc->sc_diversity = ath_hal_getdiversity(ah); 4277 4278 /* Let DFS at it in case it's a DFS channel */ 4279 ath_dfs_radar_enable(sc, chan); 4280 4281 /* 4282 * Re-enable rx framework. 4283 */ 4284 if (ath_startrecv(sc) != 0) { 4285 if_printf(ifp, "%s: unable to restart recv logic\n", 4286 __func__); 4287 ret = EIO; 4288 goto finish; 4289 } 4290 4291 /* 4292 * Change channels and update the h/w rate map 4293 * if we're switching; e.g. 11a to 11b/g. 4294 */ 4295 ath_chan_change(sc, chan); 4296 4297 /* 4298 * Reset clears the beacon timers; reset them 4299 * here if needed. 4300 */ 4301 if (sc->sc_beacons) { /* restart beacons */ 4302#ifdef IEEE80211_SUPPORT_TDMA 4303 if (sc->sc_tdma) 4304 ath_tdma_config(sc, NULL); 4305 else 4306#endif 4307 ath_beacon_config(sc, NULL); 4308 } 4309 4310 /* 4311 * Re-enable interrupts. 4312 */ 4313#if 0 4314 ath_hal_intrset(ah, sc->sc_imask); 4315#endif 4316 } 4317 4318finish: 4319 ATH_PCU_LOCK(sc); 4320 sc->sc_inreset_cnt--; 4321 /* XXX only do this if sc_inreset_cnt == 0? */ 4322 ath_hal_intrset(ah, sc->sc_imask); 4323 ATH_PCU_UNLOCK(sc); 4324 4325 IF_LOCK(&ifp->if_snd); 4326 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4327 IF_UNLOCK(&ifp->if_snd); 4328 ath_txrx_start(sc); 4329 /* XXX ath_start? */ 4330 4331 return ret; 4332} 4333 4334/* 4335 * Periodically recalibrate the PHY to account 4336 * for temperature/environment changes. 4337 */ 4338static void 4339ath_calibrate(void *arg) 4340{ 4341 struct ath_softc *sc = arg; 4342 struct ath_hal *ah = sc->sc_ah; 4343 struct ifnet *ifp = sc->sc_ifp; 4344 struct ieee80211com *ic = ifp->if_l2com; 4345 HAL_BOOL longCal, isCalDone; 4346 HAL_BOOL aniCal, shortCal = AH_FALSE; 4347 int nextcal; 4348 4349 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 4350 goto restart; 4351 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 4352 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 4353 if (sc->sc_doresetcal) 4354 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 4355 4356 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 4357 if (aniCal) { 4358 sc->sc_stats.ast_ani_cal++; 4359 sc->sc_lastani = ticks; 4360 ath_hal_ani_poll(ah, sc->sc_curchan); 4361 } 4362 4363 if (longCal) { 4364 sc->sc_stats.ast_per_cal++; 4365 sc->sc_lastlongcal = ticks; 4366 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4367 /* 4368 * Rfgain is out of bounds, reset the chip 4369 * to load new gain values. 4370 */ 4371 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4372 "%s: rfgain change\n", __func__); 4373 sc->sc_stats.ast_per_rfgain++; 4374 sc->sc_resetcal = 0; 4375 sc->sc_doresetcal = AH_TRUE; 4376 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4377 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4378 return; 4379 } 4380 /* 4381 * If this long cal is after an idle period, then 4382 * reset the data collection state so we start fresh. 4383 */ 4384 if (sc->sc_resetcal) { 4385 (void) ath_hal_calreset(ah, sc->sc_curchan); 4386 sc->sc_lastcalreset = ticks; 4387 sc->sc_lastshortcal = ticks; 4388 sc->sc_resetcal = 0; 4389 sc->sc_doresetcal = AH_TRUE; 4390 } 4391 } 4392 4393 /* Only call if we're doing a short/long cal, not for ANI calibration */ 4394 if (shortCal || longCal) { 4395 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 4396 if (longCal) { 4397 /* 4398 * Calibrate noise floor data again in case of change. 4399 */ 4400 ath_hal_process_noisefloor(ah); 4401 } 4402 } else { 4403 DPRINTF(sc, ATH_DEBUG_ANY, 4404 "%s: calibration of channel %u failed\n", 4405 __func__, sc->sc_curchan->ic_freq); 4406 sc->sc_stats.ast_per_calfail++; 4407 } 4408 if (shortCal) 4409 sc->sc_lastshortcal = ticks; 4410 } 4411 if (!isCalDone) { 4412restart: 4413 /* 4414 * Use a shorter interval to potentially collect multiple 4415 * data samples required to complete calibration. Once 4416 * we're told the work is done we drop back to a longer 4417 * interval between requests. We're more aggressive doing 4418 * work when operating as an AP to improve operation right 4419 * after startup. 4420 */ 4421 sc->sc_lastshortcal = ticks; 4422 nextcal = ath_shortcalinterval*hz/1000; 4423 if (sc->sc_opmode != HAL_M_HOSTAP) 4424 nextcal *= 10; 4425 sc->sc_doresetcal = AH_TRUE; 4426 } else { 4427 /* nextcal should be the shortest time for next event */ 4428 nextcal = ath_longcalinterval*hz; 4429 if (sc->sc_lastcalreset == 0) 4430 sc->sc_lastcalreset = sc->sc_lastlongcal; 4431 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 4432 sc->sc_resetcal = 1; /* setup reset next trip */ 4433 sc->sc_doresetcal = AH_FALSE; 4434 } 4435 /* ANI calibration may occur more often than short/long/resetcal */ 4436 if (ath_anicalinterval > 0) 4437 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 4438 4439 if (nextcal != 0) { 4440 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 4441 __func__, nextcal, isCalDone ? "" : "!"); 4442 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 4443 } else { 4444 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 4445 __func__); 4446 /* NB: don't rearm timer */ 4447 } 4448} 4449 4450static void 4451ath_scan_start(struct ieee80211com *ic) 4452{ 4453 struct ifnet *ifp = ic->ic_ifp; 4454 struct ath_softc *sc = ifp->if_softc; 4455 struct ath_hal *ah = sc->sc_ah; 4456 u_int32_t rfilt; 4457 4458 /* XXX calibration timer? */ 4459 4460 ATH_LOCK(sc); 4461 sc->sc_scanning = 1; 4462 sc->sc_syncbeacon = 0; 4463 rfilt = ath_calcrxfilter(sc); 4464 ATH_UNLOCK(sc); 4465 4466 ATH_PCU_LOCK(sc); 4467 ath_hal_setrxfilter(ah, rfilt); 4468 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 4469 ATH_PCU_UNLOCK(sc); 4470 4471 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 4472 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 4473} 4474 4475static void 4476ath_scan_end(struct ieee80211com *ic) 4477{ 4478 struct ifnet *ifp = ic->ic_ifp; 4479 struct ath_softc *sc = ifp->if_softc; 4480 struct ath_hal *ah = sc->sc_ah; 4481 u_int32_t rfilt; 4482 4483 ATH_LOCK(sc); 4484 sc->sc_scanning = 0; 4485 rfilt = ath_calcrxfilter(sc); 4486 ATH_UNLOCK(sc); 4487 4488 ATH_PCU_LOCK(sc); 4489 ath_hal_setrxfilter(ah, rfilt); 4490 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4491 4492 ath_hal_process_noisefloor(ah); 4493 ATH_PCU_UNLOCK(sc); 4494 4495 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4496 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 4497 sc->sc_curaid); 4498} 4499 4500#ifdef ATH_ENABLE_11N 4501/* 4502 * For now, just do a channel change. 4503 * 4504 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 4505 * control state and resetting the hardware without dropping frames out 4506 * of the queue. 4507 * 4508 * The unfortunate trouble here is making absolutely sure that the 4509 * channel width change has propagated enough so the hardware 4510 * absolutely isn't handed bogus frames for it's current operating 4511 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 4512 * does occur in parallel, we need to make certain we've blocked 4513 * any further ongoing TX (and RX, that can cause raw TX) 4514 * before we do this. 4515 */ 4516static void 4517ath_update_chw(struct ieee80211com *ic) 4518{ 4519 struct ifnet *ifp = ic->ic_ifp; 4520 struct ath_softc *sc = ifp->if_softc; 4521 4522 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 4523 ath_set_channel(ic); 4524} 4525#endif /* ATH_ENABLE_11N */ 4526 4527static void 4528ath_set_channel(struct ieee80211com *ic) 4529{ 4530 struct ifnet *ifp = ic->ic_ifp; 4531 struct ath_softc *sc = ifp->if_softc; 4532 4533 (void) ath_chan_set(sc, ic->ic_curchan); 4534 /* 4535 * If we are returning to our bss channel then mark state 4536 * so the next recv'd beacon's tsf will be used to sync the 4537 * beacon timers. Note that since we only hear beacons in 4538 * sta/ibss mode this has no effect in other operating modes. 4539 */ 4540 ATH_LOCK(sc); 4541 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 4542 sc->sc_syncbeacon = 1; 4543 ATH_UNLOCK(sc); 4544} 4545 4546/* 4547 * Walk the vap list and check if there any vap's in RUN state. 4548 */ 4549static int 4550ath_isanyrunningvaps(struct ieee80211vap *this) 4551{ 4552 struct ieee80211com *ic = this->iv_ic; 4553 struct ieee80211vap *vap; 4554 4555 IEEE80211_LOCK_ASSERT(ic); 4556 4557 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 4558 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 4559 return 1; 4560 } 4561 return 0; 4562} 4563 4564static int 4565ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4566{ 4567 struct ieee80211com *ic = vap->iv_ic; 4568 struct ath_softc *sc = ic->ic_ifp->if_softc; 4569 struct ath_vap *avp = ATH_VAP(vap); 4570 struct ath_hal *ah = sc->sc_ah; 4571 struct ieee80211_node *ni = NULL; 4572 int i, error, stamode; 4573 u_int32_t rfilt; 4574 int csa_run_transition = 0; 4575 static const HAL_LED_STATE leds[] = { 4576 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4577 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4578 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4579 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4580 HAL_LED_RUN, /* IEEE80211_S_CAC */ 4581 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4582 HAL_LED_RUN, /* IEEE80211_S_CSA */ 4583 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 4584 }; 4585 4586 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4587 ieee80211_state_name[vap->iv_state], 4588 ieee80211_state_name[nstate]); 4589 4590 /* 4591 * net80211 _should_ have the comlock asserted at this point. 4592 * There are some comments around the calls to vap->iv_newstate 4593 * which indicate that it (newstate) may end up dropping the 4594 * lock. This and the subsequent lock assert check after newstate 4595 * are an attempt to catch these and figure out how/why. 4596 */ 4597 IEEE80211_LOCK_ASSERT(ic); 4598 4599 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 4600 csa_run_transition = 1; 4601 4602 callout_drain(&sc->sc_cal_ch); 4603 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4604 4605 if (nstate == IEEE80211_S_SCAN) { 4606 /* 4607 * Scanning: turn off beacon miss and don't beacon. 4608 * Mark beacon state so when we reach RUN state we'll 4609 * [re]setup beacons. Unblock the task q thread so 4610 * deferred interrupt processing is done. 4611 */ 4612 ath_hal_intrset(ah, 4613 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4614 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4615 sc->sc_beacons = 0; 4616 taskqueue_unblock(sc->sc_tq); 4617 } 4618 4619 ni = ieee80211_ref_node(vap->iv_bss); 4620 rfilt = ath_calcrxfilter(sc); 4621 stamode = (vap->iv_opmode == IEEE80211_M_STA || 4622 vap->iv_opmode == IEEE80211_M_AHDEMO || 4623 vap->iv_opmode == IEEE80211_M_IBSS); 4624 if (stamode && nstate == IEEE80211_S_RUN) { 4625 sc->sc_curaid = ni->ni_associd; 4626 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 4627 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4628 } 4629 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4630 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 4631 ath_hal_setrxfilter(ah, rfilt); 4632 4633 /* XXX is this to restore keycache on resume? */ 4634 if (vap->iv_opmode != IEEE80211_M_STA && 4635 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 4636 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4637 if (ath_hal_keyisvalid(ah, i)) 4638 ath_hal_keysetmac(ah, i, ni->ni_bssid); 4639 } 4640 4641 /* 4642 * Invoke the parent method to do net80211 work. 4643 */ 4644 error = avp->av_newstate(vap, nstate, arg); 4645 if (error != 0) 4646 goto bad; 4647 4648 /* 4649 * See above: ensure av_newstate() doesn't drop the lock 4650 * on us. 4651 */ 4652 IEEE80211_LOCK_ASSERT(ic); 4653 4654 if (nstate == IEEE80211_S_RUN) { 4655 /* NB: collect bss node again, it may have changed */ 4656 ieee80211_free_node(ni); 4657 ni = ieee80211_ref_node(vap->iv_bss); 4658 4659 DPRINTF(sc, ATH_DEBUG_STATE, 4660 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 4661 "capinfo 0x%04x chan %d\n", __func__, 4662 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 4663 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 4664 4665 switch (vap->iv_opmode) { 4666#ifdef IEEE80211_SUPPORT_TDMA 4667 case IEEE80211_M_AHDEMO: 4668 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 4669 break; 4670 /* fall thru... */ 4671#endif 4672 case IEEE80211_M_HOSTAP: 4673 case IEEE80211_M_IBSS: 4674 case IEEE80211_M_MBSS: 4675 /* 4676 * Allocate and setup the beacon frame. 4677 * 4678 * Stop any previous beacon DMA. This may be 4679 * necessary, for example, when an ibss merge 4680 * causes reconfiguration; there will be a state 4681 * transition from RUN->RUN that means we may 4682 * be called with beacon transmission active. 4683 */ 4684 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4685 4686 error = ath_beacon_alloc(sc, ni); 4687 if (error != 0) 4688 goto bad; 4689 /* 4690 * If joining an adhoc network defer beacon timer 4691 * configuration to the next beacon frame so we 4692 * have a current TSF to use. Otherwise we're 4693 * starting an ibss/bss so there's no need to delay; 4694 * if this is the first vap moving to RUN state, then 4695 * beacon state needs to be [re]configured. 4696 */ 4697 if (vap->iv_opmode == IEEE80211_M_IBSS && 4698 ni->ni_tstamp.tsf != 0) { 4699 sc->sc_syncbeacon = 1; 4700 } else if (!sc->sc_beacons) { 4701#ifdef IEEE80211_SUPPORT_TDMA 4702 if (vap->iv_caps & IEEE80211_C_TDMA) 4703 ath_tdma_config(sc, vap); 4704 else 4705#endif 4706 ath_beacon_config(sc, vap); 4707 sc->sc_beacons = 1; 4708 } 4709 break; 4710 case IEEE80211_M_STA: 4711 /* 4712 * Defer beacon timer configuration to the next 4713 * beacon frame so we have a current TSF to use 4714 * (any TSF collected when scanning is likely old). 4715 * However if it's due to a CSA -> RUN transition, 4716 * force a beacon update so we pick up a lack of 4717 * beacons from an AP in CAC and thus force a 4718 * scan. 4719 */ 4720 sc->sc_syncbeacon = 1; 4721 if (csa_run_transition) 4722 ath_beacon_config(sc, vap); 4723 break; 4724 case IEEE80211_M_MONITOR: 4725 /* 4726 * Monitor mode vaps have only INIT->RUN and RUN->RUN 4727 * transitions so we must re-enable interrupts here to 4728 * handle the case of a single monitor mode vap. 4729 */ 4730 ath_hal_intrset(ah, sc->sc_imask); 4731 break; 4732 case IEEE80211_M_WDS: 4733 break; 4734 default: 4735 break; 4736 } 4737 /* 4738 * Let the hal process statistics collected during a 4739 * scan so it can provide calibrated noise floor data. 4740 */ 4741 ath_hal_process_noisefloor(ah); 4742 /* 4743 * Reset rssi stats; maybe not the best place... 4744 */ 4745 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 4746 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 4747 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 4748 /* 4749 * Finally, start any timers and the task q thread 4750 * (in case we didn't go through SCAN state). 4751 */ 4752 if (ath_longcalinterval != 0) { 4753 /* start periodic recalibration timer */ 4754 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4755 } else { 4756 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4757 "%s: calibration disabled\n", __func__); 4758 } 4759 taskqueue_unblock(sc->sc_tq); 4760 } else if (nstate == IEEE80211_S_INIT) { 4761 /* 4762 * If there are no vaps left in RUN state then 4763 * shutdown host/driver operation: 4764 * o disable interrupts 4765 * o disable the task queue thread 4766 * o mark beacon processing as stopped 4767 */ 4768 if (!ath_isanyrunningvaps(vap)) { 4769 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4770 /* disable interrupts */ 4771 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 4772 taskqueue_block(sc->sc_tq); 4773 sc->sc_beacons = 0; 4774 } 4775#ifdef IEEE80211_SUPPORT_TDMA 4776 ath_hal_setcca(ah, AH_TRUE); 4777#endif 4778 } 4779bad: 4780 ieee80211_free_node(ni); 4781 return error; 4782} 4783 4784/* 4785 * Allocate a key cache slot to the station so we can 4786 * setup a mapping from key index to node. The key cache 4787 * slot is needed for managing antenna state and for 4788 * compression when stations do not use crypto. We do 4789 * it uniliaterally here; if crypto is employed this slot 4790 * will be reassigned. 4791 */ 4792static void 4793ath_setup_stationkey(struct ieee80211_node *ni) 4794{ 4795 struct ieee80211vap *vap = ni->ni_vap; 4796 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4797 ieee80211_keyix keyix, rxkeyix; 4798 4799 /* XXX should take a locked ref to vap->iv_bss */ 4800 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 4801 /* 4802 * Key cache is full; we'll fall back to doing 4803 * the more expensive lookup in software. Note 4804 * this also means no h/w compression. 4805 */ 4806 /* XXX msg+statistic */ 4807 } else { 4808 /* XXX locking? */ 4809 ni->ni_ucastkey.wk_keyix = keyix; 4810 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 4811 /* NB: must mark device key to get called back on delete */ 4812 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 4813 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 4814 /* NB: this will create a pass-thru key entry */ 4815 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 4816 } 4817} 4818 4819/* 4820 * Setup driver-specific state for a newly associated node. 4821 * Note that we're called also on a re-associate, the isnew 4822 * param tells us if this is the first time or not. 4823 */ 4824static void 4825ath_newassoc(struct ieee80211_node *ni, int isnew) 4826{ 4827 struct ath_node *an = ATH_NODE(ni); 4828 struct ieee80211vap *vap = ni->ni_vap; 4829 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4830 const struct ieee80211_txparam *tp = ni->ni_txparms; 4831 4832 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 4833 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 4834 4835 ath_rate_newassoc(sc, an, isnew); 4836 if (isnew && 4837 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 4838 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 4839 ath_setup_stationkey(ni); 4840} 4841 4842static int 4843ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 4844 int nchans, struct ieee80211_channel chans[]) 4845{ 4846 struct ath_softc *sc = ic->ic_ifp->if_softc; 4847 struct ath_hal *ah = sc->sc_ah; 4848 HAL_STATUS status; 4849 4850 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4851 "%s: rd %u cc %u location %c%s\n", 4852 __func__, reg->regdomain, reg->country, reg->location, 4853 reg->ecm ? " ecm" : ""); 4854 4855 status = ath_hal_set_channels(ah, chans, nchans, 4856 reg->country, reg->regdomain); 4857 if (status != HAL_OK) { 4858 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 4859 __func__, status); 4860 return EINVAL; /* XXX */ 4861 } 4862 4863 return 0; 4864} 4865 4866static void 4867ath_getradiocaps(struct ieee80211com *ic, 4868 int maxchans, int *nchans, struct ieee80211_channel chans[]) 4869{ 4870 struct ath_softc *sc = ic->ic_ifp->if_softc; 4871 struct ath_hal *ah = sc->sc_ah; 4872 4873 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 4874 __func__, SKU_DEBUG, CTRY_DEFAULT); 4875 4876 /* XXX check return */ 4877 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 4878 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 4879 4880} 4881 4882static int 4883ath_getchannels(struct ath_softc *sc) 4884{ 4885 struct ifnet *ifp = sc->sc_ifp; 4886 struct ieee80211com *ic = ifp->if_l2com; 4887 struct ath_hal *ah = sc->sc_ah; 4888 HAL_STATUS status; 4889 4890 /* 4891 * Collect channel set based on EEPROM contents. 4892 */ 4893 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 4894 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 4895 if (status != HAL_OK) { 4896 if_printf(ifp, "%s: unable to collect channel list from hal, " 4897 "status %d\n", __func__, status); 4898 return EINVAL; 4899 } 4900 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 4901 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 4902 /* XXX map Atheros sku's to net80211 SKU's */ 4903 /* XXX net80211 types too small */ 4904 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 4905 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 4906 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 4907 ic->ic_regdomain.isocc[1] = ' '; 4908 4909 ic->ic_regdomain.ecm = 1; 4910 ic->ic_regdomain.location = 'I'; 4911 4912 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4913 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 4914 __func__, sc->sc_eerd, sc->sc_eecc, 4915 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 4916 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 4917 return 0; 4918} 4919 4920static int 4921ath_rate_setup(struct ath_softc *sc, u_int mode) 4922{ 4923 struct ath_hal *ah = sc->sc_ah; 4924 const HAL_RATE_TABLE *rt; 4925 4926 switch (mode) { 4927 case IEEE80211_MODE_11A: 4928 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 4929 break; 4930 case IEEE80211_MODE_HALF: 4931 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 4932 break; 4933 case IEEE80211_MODE_QUARTER: 4934 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 4935 break; 4936 case IEEE80211_MODE_11B: 4937 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 4938 break; 4939 case IEEE80211_MODE_11G: 4940 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 4941 break; 4942 case IEEE80211_MODE_TURBO_A: 4943 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 4944 break; 4945 case IEEE80211_MODE_TURBO_G: 4946 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 4947 break; 4948 case IEEE80211_MODE_STURBO_A: 4949 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4950 break; 4951 case IEEE80211_MODE_11NA: 4952 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 4953 break; 4954 case IEEE80211_MODE_11NG: 4955 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 4956 break; 4957 default: 4958 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4959 __func__, mode); 4960 return 0; 4961 } 4962 sc->sc_rates[mode] = rt; 4963 return (rt != NULL); 4964} 4965 4966static void 4967ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4968{ 4969#define N(a) (sizeof(a)/sizeof(a[0])) 4970 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 4971 static const struct { 4972 u_int rate; /* tx/rx 802.11 rate */ 4973 u_int16_t timeOn; /* LED on time (ms) */ 4974 u_int16_t timeOff; /* LED off time (ms) */ 4975 } blinkrates[] = { 4976 { 108, 40, 10 }, 4977 { 96, 44, 11 }, 4978 { 72, 50, 13 }, 4979 { 48, 57, 14 }, 4980 { 36, 67, 16 }, 4981 { 24, 80, 20 }, 4982 { 22, 100, 25 }, 4983 { 18, 133, 34 }, 4984 { 12, 160, 40 }, 4985 { 10, 200, 50 }, 4986 { 6, 240, 58 }, 4987 { 4, 267, 66 }, 4988 { 2, 400, 100 }, 4989 { 0, 500, 130 }, 4990 /* XXX half/quarter rates */ 4991 }; 4992 const HAL_RATE_TABLE *rt; 4993 int i, j; 4994 4995 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 4996 rt = sc->sc_rates[mode]; 4997 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 4998 for (i = 0; i < rt->rateCount; i++) { 4999 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 5000 if (rt->info[i].phy != IEEE80211_T_HT) 5001 sc->sc_rixmap[ieeerate] = i; 5002 else 5003 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 5004 } 5005 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 5006 for (i = 0; i < N(sc->sc_hwmap); i++) { 5007 if (i >= rt->rateCount) { 5008 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 5009 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 5010 continue; 5011 } 5012 sc->sc_hwmap[i].ieeerate = 5013 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 5014 if (rt->info[i].phy == IEEE80211_T_HT) 5015 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 5016 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 5017 if (rt->info[i].shortPreamble || 5018 rt->info[i].phy == IEEE80211_T_OFDM) 5019 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 5020 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 5021 for (j = 0; j < N(blinkrates)-1; j++) 5022 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 5023 break; 5024 /* NB: this uses the last entry if the rate isn't found */ 5025 /* XXX beware of overlow */ 5026 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 5027 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 5028 } 5029 sc->sc_currates = rt; 5030 sc->sc_curmode = mode; 5031 /* 5032 * All protection frames are transmited at 2Mb/s for 5033 * 11g, otherwise at 1Mb/s. 5034 */ 5035 if (mode == IEEE80211_MODE_11G) 5036 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 5037 else 5038 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 5039 /* NB: caller is responsible for resetting rate control state */ 5040#undef N 5041} 5042 5043static void 5044ath_watchdog(void *arg) 5045{ 5046 struct ath_softc *sc = arg; 5047 int do_reset = 0; 5048 5049 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 5050 struct ifnet *ifp = sc->sc_ifp; 5051 uint32_t hangs; 5052 5053 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 5054 hangs != 0) { 5055 if_printf(ifp, "%s hang detected (0x%x)\n", 5056 hangs & 0xff ? "bb" : "mac", hangs); 5057 } else 5058 if_printf(ifp, "device timeout\n"); 5059 do_reset = 1; 5060 ifp->if_oerrors++; 5061 sc->sc_stats.ast_watchdog++; 5062 } 5063 5064 /* 5065 * We can't hold the lock across the ath_reset() call. 5066 * 5067 * And since this routine can't hold a lock and sleep, 5068 * do the reset deferred. 5069 */ 5070 if (do_reset) { 5071 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5072 } 5073 5074 callout_schedule(&sc->sc_wd_ch, hz); 5075} 5076 5077/* 5078 * Fetch the rate control statistics for the given node. 5079 */ 5080static int 5081ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs) 5082{ 5083 struct ath_node *an; 5084 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5085 struct ieee80211_node *ni; 5086 int error = 0; 5087 5088 /* Perform a lookup on the given node */ 5089 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr); 5090 if (ni == NULL) { 5091 error = EINVAL; 5092 goto bad; 5093 } 5094 5095 /* Lock the ath_node */ 5096 an = ATH_NODE(ni); 5097 ATH_NODE_LOCK(an); 5098 5099 /* Fetch the rate control stats for this node */ 5100 error = ath_rate_fetch_node_stats(sc, an, rs); 5101 5102 /* No matter what happens here, just drop through */ 5103 5104 /* Unlock the ath_node */ 5105 ATH_NODE_UNLOCK(an); 5106 5107 /* Unref the node */ 5108 ieee80211_node_decref(ni); 5109 5110bad: 5111 return (error); 5112} 5113 5114#ifdef ATH_DIAGAPI 5115/* 5116 * Diagnostic interface to the HAL. This is used by various 5117 * tools to do things like retrieve register contents for 5118 * debugging. The mechanism is intentionally opaque so that 5119 * it can change frequently w/o concern for compatiblity. 5120 */ 5121static int 5122ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 5123{ 5124 struct ath_hal *ah = sc->sc_ah; 5125 u_int id = ad->ad_id & ATH_DIAG_ID; 5126 void *indata = NULL; 5127 void *outdata = NULL; 5128 u_int32_t insize = ad->ad_in_size; 5129 u_int32_t outsize = ad->ad_out_size; 5130 int error = 0; 5131 5132 if (ad->ad_id & ATH_DIAG_IN) { 5133 /* 5134 * Copy in data. 5135 */ 5136 indata = malloc(insize, M_TEMP, M_NOWAIT); 5137 if (indata == NULL) { 5138 error = ENOMEM; 5139 goto bad; 5140 } 5141 error = copyin(ad->ad_in_data, indata, insize); 5142 if (error) 5143 goto bad; 5144 } 5145 if (ad->ad_id & ATH_DIAG_DYN) { 5146 /* 5147 * Allocate a buffer for the results (otherwise the HAL 5148 * returns a pointer to a buffer where we can read the 5149 * results). Note that we depend on the HAL leaving this 5150 * pointer for us to use below in reclaiming the buffer; 5151 * may want to be more defensive. 5152 */ 5153 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 5154 if (outdata == NULL) { 5155 error = ENOMEM; 5156 goto bad; 5157 } 5158 } 5159 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 5160 if (outsize < ad->ad_out_size) 5161 ad->ad_out_size = outsize; 5162 if (outdata != NULL) 5163 error = copyout(outdata, ad->ad_out_data, 5164 ad->ad_out_size); 5165 } else { 5166 error = EINVAL; 5167 } 5168bad: 5169 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 5170 free(indata, M_TEMP); 5171 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 5172 free(outdata, M_TEMP); 5173 return error; 5174} 5175#endif /* ATH_DIAGAPI */ 5176 5177static int 5178ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 5179{ 5180#define IS_RUNNING(ifp) \ 5181 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 5182 struct ath_softc *sc = ifp->if_softc; 5183 struct ieee80211com *ic = ifp->if_l2com; 5184 struct ifreq *ifr = (struct ifreq *)data; 5185 const HAL_RATE_TABLE *rt; 5186 int error = 0; 5187 5188 switch (cmd) { 5189 case SIOCSIFFLAGS: 5190 ATH_LOCK(sc); 5191 if (IS_RUNNING(ifp)) { 5192 /* 5193 * To avoid rescanning another access point, 5194 * do not call ath_init() here. Instead, 5195 * only reflect promisc mode settings. 5196 */ 5197 ath_mode_init(sc); 5198 } else if (ifp->if_flags & IFF_UP) { 5199 /* 5200 * Beware of being called during attach/detach 5201 * to reset promiscuous mode. In that case we 5202 * will still be marked UP but not RUNNING. 5203 * However trying to re-init the interface 5204 * is the wrong thing to do as we've already 5205 * torn down much of our state. There's 5206 * probably a better way to deal with this. 5207 */ 5208 if (!sc->sc_invalid) 5209 ath_init(sc); /* XXX lose error */ 5210 } else { 5211 ath_stop_locked(ifp); 5212#ifdef notyet 5213 /* XXX must wakeup in places like ath_vap_delete */ 5214 if (!sc->sc_invalid) 5215 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 5216#endif 5217 } 5218 ATH_UNLOCK(sc); 5219 break; 5220 case SIOCGIFMEDIA: 5221 case SIOCSIFMEDIA: 5222 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 5223 break; 5224 case SIOCGATHSTATS: 5225 /* NB: embed these numbers to get a consistent view */ 5226 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 5227 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 5228 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 5229 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 5230#ifdef IEEE80211_SUPPORT_TDMA 5231 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 5232 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 5233#endif 5234 rt = sc->sc_currates; 5235 sc->sc_stats.ast_tx_rate = 5236 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 5237 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 5238 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 5239 return copyout(&sc->sc_stats, 5240 ifr->ifr_data, sizeof (sc->sc_stats)); 5241 case SIOCGATHAGSTATS: 5242 return copyout(&sc->sc_aggr_stats, 5243 ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 5244 case SIOCZATHSTATS: 5245 error = priv_check(curthread, PRIV_DRIVER); 5246 if (error == 0) { 5247 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 5248 memset(&sc->sc_aggr_stats, 0, 5249 sizeof(sc->sc_aggr_stats)); 5250 memset(&sc->sc_intr_stats, 0, 5251 sizeof(sc->sc_intr_stats)); 5252 } 5253 break; 5254#ifdef ATH_DIAGAPI 5255 case SIOCGATHDIAG: 5256 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 5257 break; 5258 case SIOCGATHPHYERR: 5259 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 5260 break; 5261#endif 5262 case SIOCGATHNODERATESTATS: 5263 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr); 5264 break; 5265 case SIOCGIFADDR: 5266 error = ether_ioctl(ifp, cmd, data); 5267 break; 5268 default: 5269 error = EINVAL; 5270 break; 5271 } 5272 return error; 5273#undef IS_RUNNING 5274} 5275 5276/* 5277 * Announce various information on device/driver attach. 5278 */ 5279static void 5280ath_announce(struct ath_softc *sc) 5281{ 5282 struct ifnet *ifp = sc->sc_ifp; 5283 struct ath_hal *ah = sc->sc_ah; 5284 5285 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 5286 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 5287 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 5288 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 5289 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 5290 if (bootverbose) { 5291 int i; 5292 for (i = 0; i <= WME_AC_VO; i++) { 5293 struct ath_txq *txq = sc->sc_ac2q[i]; 5294 if_printf(ifp, "Use hw queue %u for %s traffic\n", 5295 txq->axq_qnum, ieee80211_wme_acnames[i]); 5296 } 5297 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5298 sc->sc_cabq->axq_qnum); 5299 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5300 } 5301 if (ath_rxbuf != ATH_RXBUF) 5302 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5303 if (ath_txbuf != ATH_TXBUF) 5304 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 5305 if (sc->sc_mcastkey && bootverbose) 5306 if_printf(ifp, "using multicast key search\n"); 5307} 5308 5309static void 5310ath_dfs_tasklet(void *p, int npending) 5311{ 5312 struct ath_softc *sc = (struct ath_softc *) p; 5313 struct ifnet *ifp = sc->sc_ifp; 5314 struct ieee80211com *ic = ifp->if_l2com; 5315 5316 /* 5317 * If previous processing has found a radar event, 5318 * signal this to the net80211 layer to begin DFS 5319 * processing. 5320 */ 5321 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 5322 /* DFS event found, initiate channel change */ 5323 /* 5324 * XXX doesn't currently tell us whether the event 5325 * XXX was found in the primary or extension 5326 * XXX channel! 5327 */ 5328 IEEE80211_LOCK(ic); 5329 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 5330 IEEE80211_UNLOCK(ic); 5331 } 5332} 5333 5334MODULE_VERSION(if_ath, 1); 5335MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 5336#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 5337MODULE_DEPEND(if_ath, alq, 1, 1, 1); 5338#endif 5339