if_ath.c revision 238884
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 238884 2012-07-29 08:52:32Z adrian $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49#include "opt_ah.h" 50#include "opt_wlan.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysctl.h> 55#include <sys/mbuf.h> 56#include <sys/malloc.h> 57#include <sys/lock.h> 58#include <sys/mutex.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/errno.h> 63#include <sys/callout.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kthread.h> 67#include <sys/taskqueue.h> 68#include <sys/priv.h> 69#include <sys/module.h> 70#include <sys/ktr.h> 71#include <sys/smp.h> /* for mp_ncpus */ 72 73#include <machine/bus.h> 74 75#include <net/if.h> 76#include <net/if_dl.h> 77#include <net/if_media.h> 78#include <net/if_types.h> 79#include <net/if_arp.h> 80#include <net/ethernet.h> 81#include <net/if_llc.h> 82 83#include <net80211/ieee80211_var.h> 84#include <net80211/ieee80211_regdomain.h> 85#ifdef IEEE80211_SUPPORT_SUPERG 86#include <net80211/ieee80211_superg.h> 87#endif 88#ifdef IEEE80211_SUPPORT_TDMA 89#include <net80211/ieee80211_tdma.h> 90#endif 91 92#include <net/bpf.h> 93 94#ifdef INET 95#include <netinet/in.h> 96#include <netinet/if_ether.h> 97#endif 98 99#include <dev/ath/if_athvar.h> 100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101#include <dev/ath/ath_hal/ah_diagcodes.h> 102 103#include <dev/ath/if_ath_debug.h> 104#include <dev/ath/if_ath_misc.h> 105#include <dev/ath/if_ath_tsf.h> 106#include <dev/ath/if_ath_tx.h> 107#include <dev/ath/if_ath_sysctl.h> 108#include <dev/ath/if_ath_led.h> 109#include <dev/ath/if_ath_keycache.h> 110#include <dev/ath/if_ath_rx.h> 111#include <dev/ath/if_ath_rx_edma.h> 112#include <dev/ath/if_ath_tx_edma.h> 113#include <dev/ath/if_ath_beacon.h> 114#include <dev/ath/if_athdfs.h> 115 116#ifdef ATH_TX99_DIAG 117#include <dev/ath/ath_tx99/ath_tx99.h> 118#endif 119 120/* 121 * ATH_BCBUF determines the number of vap's that can transmit 122 * beacons and also (currently) the number of vap's that can 123 * have unique mac addresses/bssid. When staggering beacons 124 * 4 is probably a good max as otherwise the beacons become 125 * very closely spaced and there is limited time for cab q traffic 126 * to go out. You can burst beacons instead but that is not good 127 * for stations in power save and at some point you really want 128 * another radio (and channel). 129 * 130 * The limit on the number of mac addresses is tied to our use of 131 * the U/L bit and tracking addresses in a byte; it would be 132 * worthwhile to allow more for applications like proxy sta. 133 */ 134CTASSERT(ATH_BCBUF <= 8); 135 136static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 137 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 138 const uint8_t [IEEE80211_ADDR_LEN], 139 const uint8_t [IEEE80211_ADDR_LEN]); 140static void ath_vap_delete(struct ieee80211vap *); 141static void ath_init(void *); 142static void ath_stop_locked(struct ifnet *); 143static void ath_stop(struct ifnet *); 144static int ath_reset_vap(struct ieee80211vap *, u_long); 145static int ath_media_change(struct ifnet *); 146static void ath_watchdog(void *); 147static int ath_ioctl(struct ifnet *, u_long, caddr_t); 148static void ath_fatal_proc(void *, int); 149static void ath_bmiss_vap(struct ieee80211vap *); 150static void ath_bmiss_proc(void *, int); 151static void ath_key_update_begin(struct ieee80211vap *); 152static void ath_key_update_end(struct ieee80211vap *); 153static void ath_update_mcast(struct ifnet *); 154static void ath_update_promisc(struct ifnet *); 155static void ath_updateslot(struct ifnet *); 156static void ath_bstuck_proc(void *, int); 157static void ath_reset_proc(void *, int); 158static int ath_desc_alloc(struct ath_softc *); 159static void ath_desc_free(struct ath_softc *); 160static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 161 const uint8_t [IEEE80211_ADDR_LEN]); 162static void ath_node_cleanup(struct ieee80211_node *); 163static void ath_node_free(struct ieee80211_node *); 164static void ath_node_getsignal(const struct ieee80211_node *, 165 int8_t *, int8_t *); 166static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 167static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 168static int ath_tx_setup(struct ath_softc *, int, int); 169static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 170static void ath_tx_cleanup(struct ath_softc *); 171static void ath_tx_proc_q0(void *, int); 172static void ath_tx_proc_q0123(void *, int); 173static void ath_tx_proc(void *, int); 174static void ath_txq_sched_tasklet(void *, int); 175static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 176static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 177static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 178static void ath_scan_start(struct ieee80211com *); 179static void ath_scan_end(struct ieee80211com *); 180static void ath_set_channel(struct ieee80211com *); 181#ifdef ATH_ENABLE_11N 182static void ath_update_chw(struct ieee80211com *); 183#endif /* ATH_ENABLE_11N */ 184static void ath_calibrate(void *); 185static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 186static void ath_setup_stationkey(struct ieee80211_node *); 187static void ath_newassoc(struct ieee80211_node *, int); 188static int ath_setregdomain(struct ieee80211com *, 189 struct ieee80211_regdomain *, int, 190 struct ieee80211_channel []); 191static void ath_getradiocaps(struct ieee80211com *, int, int *, 192 struct ieee80211_channel []); 193static int ath_getchannels(struct ath_softc *); 194 195static int ath_rate_setup(struct ath_softc *, u_int mode); 196static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 197 198static void ath_announce(struct ath_softc *); 199 200static void ath_dfs_tasklet(void *, int); 201 202#ifdef IEEE80211_SUPPORT_TDMA 203#include <dev/ath/if_ath_tdma.h> 204#endif 205 206#if 0 207#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 208#define TDMA_LPF_LEN 6 209#define TDMA_DUMMY_MARKER 0x127 210#define TDMA_EP_MUL(x, mul) ((x) * (mul)) 211#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 212#define TDMA_LPF(x, y, len) \ 213 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 214#define TDMA_SAMPLE(x, y) do { \ 215 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 216} while (0) 217#define TDMA_EP_RND(x,mul) \ 218 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 219#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 220#endif /* IEEE80211_SUPPORT_TDMA */ 221 222SYSCTL_DECL(_hw_ath); 223 224/* XXX validate sysctl values */ 225static int ath_longcalinterval = 30; /* long cals every 30 secs */ 226SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 227 0, "long chip calibration interval (secs)"); 228static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 229SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 230 0, "short chip calibration interval (msecs)"); 231static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 232SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 233 0, "reset chip calibration results (secs)"); 234static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 235SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 236 0, "ANI calibration (msecs)"); 237 238int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 239SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 240 0, "rx buffers allocated"); 241TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 242int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 243SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 244 0, "tx buffers allocated"); 245TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 246int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 247SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, 248 0, "tx (mgmt) buffers allocated"); 249TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 250 251int ath_bstuck_threshold = 4; /* max missed beacons */ 252SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 253 0, "max missed beacon xmits before chip reset"); 254 255MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 256 257#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 258#define HAL_MODE_HT40 \ 259 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 260 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 261int 262ath_attach(u_int16_t devid, struct ath_softc *sc) 263{ 264 struct ifnet *ifp; 265 struct ieee80211com *ic; 266 struct ath_hal *ah = NULL; 267 HAL_STATUS status; 268 int error = 0, i; 269 u_int wmodes; 270 uint8_t macaddr[IEEE80211_ADDR_LEN]; 271 int rx_chainmask, tx_chainmask; 272 273 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 274 275 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 276 if (ifp == NULL) { 277 device_printf(sc->sc_dev, "can not if_alloc()\n"); 278 error = ENOSPC; 279 goto bad; 280 } 281 ic = ifp->if_l2com; 282 283 /* set these up early for if_printf use */ 284 if_initname(ifp, device_get_name(sc->sc_dev), 285 device_get_unit(sc->sc_dev)); 286 287 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 288 sc->sc_eepromdata, &status); 289 if (ah == NULL) { 290 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 291 status); 292 error = ENXIO; 293 goto bad; 294 } 295 sc->sc_ah = ah; 296 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 297#ifdef ATH_DEBUG 298 sc->sc_debug = ath_debug; 299#endif 300 301 /* 302 * Setup the DMA/EDMA functions based on the current 303 * hardware support. 304 * 305 * This is required before the descriptors are allocated. 306 */ 307 if (ath_hal_hasedma(sc->sc_ah)) { 308 sc->sc_isedma = 1; 309 ath_recv_setup_edma(sc); 310 ath_xmit_setup_edma(sc); 311 } else { 312 ath_recv_setup_legacy(sc); 313 ath_xmit_setup_legacy(sc); 314 } 315 316 /* 317 * Check if the MAC has multi-rate retry support. 318 * We do this by trying to setup a fake extended 319 * descriptor. MAC's that don't have support will 320 * return false w/o doing anything. MAC's that do 321 * support it will return true w/o doing anything. 322 */ 323 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 324 325 /* 326 * Check if the device has hardware counters for PHY 327 * errors. If so we need to enable the MIB interrupt 328 * so we can act on stat triggers. 329 */ 330 if (ath_hal_hwphycounters(ah)) 331 sc->sc_needmib = 1; 332 333 /* 334 * Get the hardware key cache size. 335 */ 336 sc->sc_keymax = ath_hal_keycachesize(ah); 337 if (sc->sc_keymax > ATH_KEYMAX) { 338 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 339 ATH_KEYMAX, sc->sc_keymax); 340 sc->sc_keymax = ATH_KEYMAX; 341 } 342 /* 343 * Reset the key cache since some parts do not 344 * reset the contents on initial power up. 345 */ 346 for (i = 0; i < sc->sc_keymax; i++) 347 ath_hal_keyreset(ah, i); 348 349 /* 350 * Collect the default channel list. 351 */ 352 error = ath_getchannels(sc); 353 if (error != 0) 354 goto bad; 355 356 /* 357 * Setup rate tables for all potential media types. 358 */ 359 ath_rate_setup(sc, IEEE80211_MODE_11A); 360 ath_rate_setup(sc, IEEE80211_MODE_11B); 361 ath_rate_setup(sc, IEEE80211_MODE_11G); 362 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 363 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 364 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 365 ath_rate_setup(sc, IEEE80211_MODE_11NA); 366 ath_rate_setup(sc, IEEE80211_MODE_11NG); 367 ath_rate_setup(sc, IEEE80211_MODE_HALF); 368 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 369 370 /* NB: setup here so ath_rate_update is happy */ 371 ath_setcurmode(sc, IEEE80211_MODE_11A); 372 373 /* 374 * Allocate TX descriptors and populate the lists. 375 */ 376 error = ath_desc_alloc(sc); 377 if (error != 0) { 378 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 379 error); 380 goto bad; 381 } 382 error = ath_txdma_setup(sc); 383 if (error != 0) { 384 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 385 error); 386 goto bad; 387 } 388 389 /* 390 * Allocate RX descriptors and populate the lists. 391 */ 392 error = ath_rxdma_setup(sc); 393 if (error != 0) { 394 if_printf(ifp, "failed to allocate RX descriptors: %d\n", 395 error); 396 goto bad; 397 } 398 399 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 400 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 401 402 ATH_TXBUF_LOCK_INIT(sc); 403 404 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 405 taskqueue_thread_enqueue, &sc->sc_tq); 406 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 407 "%s taskq", ifp->if_xname); 408 409 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 410 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 411 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 412 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 413 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); 414 TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc); 415 416 /* 417 * Allocate hardware transmit queues: one queue for 418 * beacon frames and one data queue for each QoS 419 * priority. Note that the hal handles resetting 420 * these queues at the needed time. 421 * 422 * XXX PS-Poll 423 */ 424 sc->sc_bhalq = ath_beaconq_setup(ah); 425 if (sc->sc_bhalq == (u_int) -1) { 426 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 427 error = EIO; 428 goto bad2; 429 } 430 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 431 if (sc->sc_cabq == NULL) { 432 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 433 error = EIO; 434 goto bad2; 435 } 436 /* NB: insure BK queue is the lowest priority h/w queue */ 437 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 438 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 439 ieee80211_wme_acnames[WME_AC_BK]); 440 error = EIO; 441 goto bad2; 442 } 443 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 444 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 445 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 446 /* 447 * Not enough hardware tx queues to properly do WME; 448 * just punt and assign them all to the same h/w queue. 449 * We could do a better job of this if, for example, 450 * we allocate queues when we switch from station to 451 * AP mode. 452 */ 453 if (sc->sc_ac2q[WME_AC_VI] != NULL) 454 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 455 if (sc->sc_ac2q[WME_AC_BE] != NULL) 456 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 457 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 458 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 459 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 460 } 461 462 /* 463 * Special case certain configurations. Note the 464 * CAB queue is handled by these specially so don't 465 * include them when checking the txq setup mask. 466 */ 467 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 468 case 0x01: 469 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 470 break; 471 case 0x0f: 472 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 473 break; 474 default: 475 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 476 break; 477 } 478 479 /* 480 * Setup rate control. Some rate control modules 481 * call back to change the anntena state so expose 482 * the necessary entry points. 483 * XXX maybe belongs in struct ath_ratectrl? 484 */ 485 sc->sc_setdefantenna = ath_setdefantenna; 486 sc->sc_rc = ath_rate_attach(sc); 487 if (sc->sc_rc == NULL) { 488 error = EIO; 489 goto bad2; 490 } 491 492 /* Attach DFS module */ 493 if (! ath_dfs_attach(sc)) { 494 device_printf(sc->sc_dev, 495 "%s: unable to attach DFS\n", __func__); 496 error = EIO; 497 goto bad2; 498 } 499 500 /* Start DFS processing tasklet */ 501 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 502 503 /* Configure LED state */ 504 sc->sc_blinking = 0; 505 sc->sc_ledstate = 1; 506 sc->sc_ledon = 0; /* low true */ 507 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 508 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 509 510 /* 511 * Don't setup hardware-based blinking. 512 * 513 * Although some NICs may have this configured in the 514 * default reset register values, the user may wish 515 * to alter which pins have which function. 516 * 517 * The reference driver attaches the MAC network LED to GPIO1 and 518 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 519 * NIC has these reversed. 520 */ 521 sc->sc_hardled = (1 == 0); 522 sc->sc_led_net_pin = -1; 523 sc->sc_led_pwr_pin = -1; 524 /* 525 * Auto-enable soft led processing for IBM cards and for 526 * 5211 minipci cards. Users can also manually enable/disable 527 * support with a sysctl. 528 */ 529 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 530 ath_led_config(sc); 531 ath_hal_setledstate(ah, HAL_LED_INIT); 532 533 ifp->if_softc = sc; 534 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 535 ifp->if_start = ath_start; 536 ifp->if_ioctl = ath_ioctl; 537 ifp->if_init = ath_init; 538 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 539 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 540 IFQ_SET_READY(&ifp->if_snd); 541 542 ic->ic_ifp = ifp; 543 /* XXX not right but it's not used anywhere important */ 544 ic->ic_phytype = IEEE80211_T_OFDM; 545 ic->ic_opmode = IEEE80211_M_STA; 546 ic->ic_caps = 547 IEEE80211_C_STA /* station mode */ 548 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 549 | IEEE80211_C_HOSTAP /* hostap mode */ 550 | IEEE80211_C_MONITOR /* monitor mode */ 551 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 552 | IEEE80211_C_WDS /* 4-address traffic works */ 553 | IEEE80211_C_MBSS /* mesh point link mode */ 554 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 555 | IEEE80211_C_SHSLOT /* short slot time supported */ 556 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 557#ifndef ATH_ENABLE_11N 558 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 559#endif 560 | IEEE80211_C_TXFRAG /* handle tx frags */ 561#ifdef ATH_ENABLE_DFS 562 | IEEE80211_C_DFS /* Enable radar detection */ 563#endif 564 ; 565 /* 566 * Query the hal to figure out h/w crypto support. 567 */ 568 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 569 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 570 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 571 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 572 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 573 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 574 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 575 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 576 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 577 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 578 /* 579 * Check if h/w does the MIC and/or whether the 580 * separate key cache entries are required to 581 * handle both tx+rx MIC keys. 582 */ 583 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 584 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 585 /* 586 * If the h/w supports storing tx+rx MIC keys 587 * in one cache slot automatically enable use. 588 */ 589 if (ath_hal_hastkipsplit(ah) || 590 !ath_hal_settkipsplit(ah, AH_FALSE)) 591 sc->sc_splitmic = 1; 592 /* 593 * If the h/w can do TKIP MIC together with WME then 594 * we use it; otherwise we force the MIC to be done 595 * in software by the net80211 layer. 596 */ 597 if (ath_hal_haswmetkipmic(ah)) 598 sc->sc_wmetkipmic = 1; 599 } 600 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 601 /* 602 * Check for multicast key search support. 603 */ 604 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 605 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 606 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 607 } 608 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 609 /* 610 * Mark key cache slots associated with global keys 611 * as in use. If we knew TKIP was not to be used we 612 * could leave the +32, +64, and +32+64 slots free. 613 */ 614 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 615 setbit(sc->sc_keymap, i); 616 setbit(sc->sc_keymap, i+64); 617 if (sc->sc_splitmic) { 618 setbit(sc->sc_keymap, i+32); 619 setbit(sc->sc_keymap, i+32+64); 620 } 621 } 622 /* 623 * TPC support can be done either with a global cap or 624 * per-packet support. The latter is not available on 625 * all parts. We're a bit pedantic here as all parts 626 * support a global cap. 627 */ 628 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 629 ic->ic_caps |= IEEE80211_C_TXPMGT; 630 631 /* 632 * Mark WME capability only if we have sufficient 633 * hardware queues to do proper priority scheduling. 634 */ 635 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 636 ic->ic_caps |= IEEE80211_C_WME; 637 /* 638 * Check for misc other capabilities. 639 */ 640 if (ath_hal_hasbursting(ah)) 641 ic->ic_caps |= IEEE80211_C_BURST; 642 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 643 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 644 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 645 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 646 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 647 if (ath_hal_hasfastframes(ah)) 648 ic->ic_caps |= IEEE80211_C_FF; 649 wmodes = ath_hal_getwirelessmodes(ah); 650 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 651 ic->ic_caps |= IEEE80211_C_TURBOP; 652#ifdef IEEE80211_SUPPORT_TDMA 653 if (ath_hal_macversion(ah) > 0x78) { 654 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 655 ic->ic_tdma_update = ath_tdma_update; 656 } 657#endif 658 659 /* 660 * TODO: enforce that at least this many frames are available 661 * in the txbuf list before allowing data frames (raw or 662 * otherwise) to be transmitted. 663 */ 664 sc->sc_txq_data_minfree = 10; 665 /* 666 * Leave this as default to maintain legacy behaviour. 667 * Shortening the cabq/mcastq may end up causing some 668 * undesirable behaviour. 669 */ 670 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 671 672 /* 673 * Allow the TX and RX chainmasks to be overridden by 674 * environment variables and/or device.hints. 675 * 676 * This must be done early - before the hardware is 677 * calibrated or before the 802.11n stream calculation 678 * is done. 679 */ 680 if (resource_int_value(device_get_name(sc->sc_dev), 681 device_get_unit(sc->sc_dev), "rx_chainmask", 682 &rx_chainmask) == 0) { 683 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 684 rx_chainmask); 685 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 686 } 687 if (resource_int_value(device_get_name(sc->sc_dev), 688 device_get_unit(sc->sc_dev), "tx_chainmask", 689 &tx_chainmask) == 0) { 690 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 691 tx_chainmask); 692 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 693 } 694 695#ifdef ATH_ENABLE_11N 696 /* 697 * Query HT capabilities 698 */ 699 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 700 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 701 int rxs, txs; 702 703 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 704 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 705 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 706 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 707 | IEEE80211_HTCAP_MAXAMSDU_3839 708 /* max A-MSDU length */ 709 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 710 ; 711 712 /* 713 * Enable short-GI for HT20 only if the hardware 714 * advertises support. 715 * Notably, anything earlier than the AR9287 doesn't. 716 */ 717 if ((ath_hal_getcapability(ah, 718 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 719 (wmodes & HAL_MODE_HT20)) { 720 device_printf(sc->sc_dev, 721 "[HT] enabling short-GI in 20MHz mode\n"); 722 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 723 } 724 725 if (wmodes & HAL_MODE_HT40) 726 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 727 | IEEE80211_HTCAP_SHORTGI40; 728 729 /* 730 * TX/RX streams need to be taken into account when 731 * negotiating which MCS rates it'll receive and 732 * what MCS rates are available for TX. 733 */ 734 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 735 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 736 737 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 738 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 739 740 ic->ic_txstream = txs; 741 ic->ic_rxstream = rxs; 742 743 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 744 &sc->sc_rts_aggr_limit); 745 if (sc->sc_rts_aggr_limit != (64 * 1024)) 746 device_printf(sc->sc_dev, 747 "[HT] RTS aggregates limited to %d KiB\n", 748 sc->sc_rts_aggr_limit / 1024); 749 750 device_printf(sc->sc_dev, 751 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 752 } 753#endif 754 755 /* 756 * Initial aggregation settings. 757 */ 758 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 759 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 760 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 761 762 /* 763 * Check if the hardware requires PCI register serialisation. 764 * Some of the Owl based MACs require this. 765 */ 766 if (mp_ncpus > 1 && 767 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 768 0, NULL) == HAL_OK) { 769 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 770 device_printf(sc->sc_dev, 771 "Enabling register serialisation\n"); 772 } 773 774 /* 775 * Indicate we need the 802.11 header padded to a 776 * 32-bit boundary for 4-address and QoS frames. 777 */ 778 ic->ic_flags |= IEEE80211_F_DATAPAD; 779 780 /* 781 * Query the hal about antenna support. 782 */ 783 sc->sc_defant = ath_hal_getdefantenna(ah); 784 785 /* 786 * Not all chips have the VEOL support we want to 787 * use with IBSS beacons; check here for it. 788 */ 789 sc->sc_hasveol = ath_hal_hasveol(ah); 790 791 /* get mac address from hardware */ 792 ath_hal_getmac(ah, macaddr); 793 if (sc->sc_hasbmask) 794 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 795 796 /* NB: used to size node table key mapping array */ 797 ic->ic_max_keyix = sc->sc_keymax; 798 /* call MI attach routine. */ 799 ieee80211_ifattach(ic, macaddr); 800 ic->ic_setregdomain = ath_setregdomain; 801 ic->ic_getradiocaps = ath_getradiocaps; 802 sc->sc_opmode = HAL_M_STA; 803 804 /* override default methods */ 805 ic->ic_newassoc = ath_newassoc; 806 ic->ic_updateslot = ath_updateslot; 807 ic->ic_wme.wme_update = ath_wme_update; 808 ic->ic_vap_create = ath_vap_create; 809 ic->ic_vap_delete = ath_vap_delete; 810 ic->ic_raw_xmit = ath_raw_xmit; 811 ic->ic_update_mcast = ath_update_mcast; 812 ic->ic_update_promisc = ath_update_promisc; 813 ic->ic_node_alloc = ath_node_alloc; 814 sc->sc_node_free = ic->ic_node_free; 815 ic->ic_node_free = ath_node_free; 816 sc->sc_node_cleanup = ic->ic_node_cleanup; 817 ic->ic_node_cleanup = ath_node_cleanup; 818 ic->ic_node_getsignal = ath_node_getsignal; 819 ic->ic_scan_start = ath_scan_start; 820 ic->ic_scan_end = ath_scan_end; 821 ic->ic_set_channel = ath_set_channel; 822#ifdef ATH_ENABLE_11N 823 /* 802.11n specific - but just override anyway */ 824 sc->sc_addba_request = ic->ic_addba_request; 825 sc->sc_addba_response = ic->ic_addba_response; 826 sc->sc_addba_stop = ic->ic_addba_stop; 827 sc->sc_bar_response = ic->ic_bar_response; 828 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 829 830 ic->ic_addba_request = ath_addba_request; 831 ic->ic_addba_response = ath_addba_response; 832 ic->ic_addba_response_timeout = ath_addba_response_timeout; 833 ic->ic_addba_stop = ath_addba_stop; 834 ic->ic_bar_response = ath_bar_response; 835 836 ic->ic_update_chw = ath_update_chw; 837#endif /* ATH_ENABLE_11N */ 838 839#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 840 /* 841 * There's one vendor bitmap entry in the RX radiotap 842 * header; make sure that's taken into account. 843 */ 844 ieee80211_radiotap_attachv(ic, 845 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 846 ATH_TX_RADIOTAP_PRESENT, 847 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 848 ATH_RX_RADIOTAP_PRESENT); 849#else 850 /* 851 * No vendor bitmap/extensions are present. 852 */ 853 ieee80211_radiotap_attach(ic, 854 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 855 ATH_TX_RADIOTAP_PRESENT, 856 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 857 ATH_RX_RADIOTAP_PRESENT); 858#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 859 860 /* 861 * Setup dynamic sysctl's now that country code and 862 * regdomain are available from the hal. 863 */ 864 ath_sysctlattach(sc); 865 ath_sysctl_stats_attach(sc); 866 ath_sysctl_hal_attach(sc); 867 868 if (bootverbose) 869 ieee80211_announce(ic); 870 ath_announce(sc); 871 return 0; 872bad2: 873 ath_tx_cleanup(sc); 874 ath_desc_free(sc); 875 ath_txdma_teardown(sc); 876 ath_rxdma_teardown(sc); 877bad: 878 if (ah) 879 ath_hal_detach(ah); 880 if (ifp != NULL) 881 if_free(ifp); 882 sc->sc_invalid = 1; 883 return error; 884} 885 886int 887ath_detach(struct ath_softc *sc) 888{ 889 struct ifnet *ifp = sc->sc_ifp; 890 891 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 892 __func__, ifp->if_flags); 893 894 /* 895 * NB: the order of these is important: 896 * o stop the chip so no more interrupts will fire 897 * o call the 802.11 layer before detaching the hal to 898 * insure callbacks into the driver to delete global 899 * key cache entries can be handled 900 * o free the taskqueue which drains any pending tasks 901 * o reclaim the tx queue data structures after calling 902 * the 802.11 layer as we'll get called back to reclaim 903 * node state and potentially want to use them 904 * o to cleanup the tx queues the hal is called, so detach 905 * it last 906 * Other than that, it's straightforward... 907 */ 908 ath_stop(ifp); 909 ieee80211_ifdetach(ifp->if_l2com); 910 taskqueue_free(sc->sc_tq); 911#ifdef ATH_TX99_DIAG 912 if (sc->sc_tx99 != NULL) 913 sc->sc_tx99->detach(sc->sc_tx99); 914#endif 915 ath_rate_detach(sc->sc_rc); 916 917 ath_dfs_detach(sc); 918 ath_desc_free(sc); 919 ath_txdma_teardown(sc); 920 ath_rxdma_teardown(sc); 921 ath_tx_cleanup(sc); 922 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 923 if_free(ifp); 924 925 return 0; 926} 927 928/* 929 * MAC address handling for multiple BSS on the same radio. 930 * The first vap uses the MAC address from the EEPROM. For 931 * subsequent vap's we set the U/L bit (bit 1) in the MAC 932 * address and use the next six bits as an index. 933 */ 934static void 935assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 936{ 937 int i; 938 939 if (clone && sc->sc_hasbmask) { 940 /* NB: we only do this if h/w supports multiple bssid */ 941 for (i = 0; i < 8; i++) 942 if ((sc->sc_bssidmask & (1<<i)) == 0) 943 break; 944 if (i != 0) 945 mac[0] |= (i << 2)|0x2; 946 } else 947 i = 0; 948 sc->sc_bssidmask |= 1<<i; 949 sc->sc_hwbssidmask[0] &= ~mac[0]; 950 if (i == 0) 951 sc->sc_nbssid0++; 952} 953 954static void 955reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 956{ 957 int i = mac[0] >> 2; 958 uint8_t mask; 959 960 if (i != 0 || --sc->sc_nbssid0 == 0) { 961 sc->sc_bssidmask &= ~(1<<i); 962 /* recalculate bssid mask from remaining addresses */ 963 mask = 0xff; 964 for (i = 1; i < 8; i++) 965 if (sc->sc_bssidmask & (1<<i)) 966 mask &= ~((i<<2)|0x2); 967 sc->sc_hwbssidmask[0] |= mask; 968 } 969} 970 971/* 972 * Assign a beacon xmit slot. We try to space out 973 * assignments so when beacons are staggered the 974 * traffic coming out of the cab q has maximal time 975 * to go out before the next beacon is scheduled. 976 */ 977static int 978assign_bslot(struct ath_softc *sc) 979{ 980 u_int slot, free; 981 982 free = 0; 983 for (slot = 0; slot < ATH_BCBUF; slot++) 984 if (sc->sc_bslot[slot] == NULL) { 985 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 986 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 987 return slot; 988 free = slot; 989 /* NB: keep looking for a double slot */ 990 } 991 return free; 992} 993 994static struct ieee80211vap * 995ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 996 enum ieee80211_opmode opmode, int flags, 997 const uint8_t bssid[IEEE80211_ADDR_LEN], 998 const uint8_t mac0[IEEE80211_ADDR_LEN]) 999{ 1000 struct ath_softc *sc = ic->ic_ifp->if_softc; 1001 struct ath_vap *avp; 1002 struct ieee80211vap *vap; 1003 uint8_t mac[IEEE80211_ADDR_LEN]; 1004 int needbeacon, error; 1005 enum ieee80211_opmode ic_opmode; 1006 1007 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 1008 M_80211_VAP, M_WAITOK | M_ZERO); 1009 needbeacon = 0; 1010 IEEE80211_ADDR_COPY(mac, mac0); 1011 1012 ATH_LOCK(sc); 1013 ic_opmode = opmode; /* default to opmode of new vap */ 1014 switch (opmode) { 1015 case IEEE80211_M_STA: 1016 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1017 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1018 goto bad; 1019 } 1020 if (sc->sc_nvaps) { 1021 /* 1022 * With multiple vaps we must fall back 1023 * to s/w beacon miss handling. 1024 */ 1025 flags |= IEEE80211_CLONE_NOBEACONS; 1026 } 1027 if (flags & IEEE80211_CLONE_NOBEACONS) { 1028 /* 1029 * Station mode w/o beacons are implemented w/ AP mode. 1030 */ 1031 ic_opmode = IEEE80211_M_HOSTAP; 1032 } 1033 break; 1034 case IEEE80211_M_IBSS: 1035 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1036 device_printf(sc->sc_dev, 1037 "only 1 ibss vap supported\n"); 1038 goto bad; 1039 } 1040 needbeacon = 1; 1041 break; 1042 case IEEE80211_M_AHDEMO: 1043#ifdef IEEE80211_SUPPORT_TDMA 1044 if (flags & IEEE80211_CLONE_TDMA) { 1045 if (sc->sc_nvaps != 0) { 1046 device_printf(sc->sc_dev, 1047 "only 1 tdma vap supported\n"); 1048 goto bad; 1049 } 1050 needbeacon = 1; 1051 flags |= IEEE80211_CLONE_NOBEACONS; 1052 } 1053 /* fall thru... */ 1054#endif 1055 case IEEE80211_M_MONITOR: 1056 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1057 /* 1058 * Adopt existing mode. Adding a monitor or ahdemo 1059 * vap to an existing configuration is of dubious 1060 * value but should be ok. 1061 */ 1062 /* XXX not right for monitor mode */ 1063 ic_opmode = ic->ic_opmode; 1064 } 1065 break; 1066 case IEEE80211_M_HOSTAP: 1067 case IEEE80211_M_MBSS: 1068 needbeacon = 1; 1069 break; 1070 case IEEE80211_M_WDS: 1071 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1072 device_printf(sc->sc_dev, 1073 "wds not supported in sta mode\n"); 1074 goto bad; 1075 } 1076 /* 1077 * Silently remove any request for a unique 1078 * bssid; WDS vap's always share the local 1079 * mac address. 1080 */ 1081 flags &= ~IEEE80211_CLONE_BSSID; 1082 if (sc->sc_nvaps == 0) 1083 ic_opmode = IEEE80211_M_HOSTAP; 1084 else 1085 ic_opmode = ic->ic_opmode; 1086 break; 1087 default: 1088 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1089 goto bad; 1090 } 1091 /* 1092 * Check that a beacon buffer is available; the code below assumes it. 1093 */ 1094 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1095 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1096 goto bad; 1097 } 1098 1099 /* STA, AHDEMO? */ 1100 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1101 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1102 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1103 } 1104 1105 vap = &avp->av_vap; 1106 /* XXX can't hold mutex across if_alloc */ 1107 ATH_UNLOCK(sc); 1108 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1109 bssid, mac); 1110 ATH_LOCK(sc); 1111 if (error != 0) { 1112 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1113 __func__, error); 1114 goto bad2; 1115 } 1116 1117 /* h/w crypto support */ 1118 vap->iv_key_alloc = ath_key_alloc; 1119 vap->iv_key_delete = ath_key_delete; 1120 vap->iv_key_set = ath_key_set; 1121 vap->iv_key_update_begin = ath_key_update_begin; 1122 vap->iv_key_update_end = ath_key_update_end; 1123 1124 /* override various methods */ 1125 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1126 vap->iv_recv_mgmt = ath_recv_mgmt; 1127 vap->iv_reset = ath_reset_vap; 1128 vap->iv_update_beacon = ath_beacon_update; 1129 avp->av_newstate = vap->iv_newstate; 1130 vap->iv_newstate = ath_newstate; 1131 avp->av_bmiss = vap->iv_bmiss; 1132 vap->iv_bmiss = ath_bmiss_vap; 1133 1134 /* Set default parameters */ 1135 1136 /* 1137 * Anything earlier than some AR9300 series MACs don't 1138 * support a smaller MPDU density. 1139 */ 1140 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1141 /* 1142 * All NICs can handle the maximum size, however 1143 * AR5416 based MACs can only TX aggregates w/ RTS 1144 * protection when the total aggregate size is <= 8k. 1145 * However, for now that's enforced by the TX path. 1146 */ 1147 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1148 1149 avp->av_bslot = -1; 1150 if (needbeacon) { 1151 /* 1152 * Allocate beacon state and setup the q for buffered 1153 * multicast frames. We know a beacon buffer is 1154 * available because we checked above. 1155 */ 1156 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1157 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1158 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1159 /* 1160 * Assign the vap to a beacon xmit slot. As above 1161 * this cannot fail to find a free one. 1162 */ 1163 avp->av_bslot = assign_bslot(sc); 1164 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1165 ("beacon slot %u not empty", avp->av_bslot)); 1166 sc->sc_bslot[avp->av_bslot] = vap; 1167 sc->sc_nbcnvaps++; 1168 } 1169 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1170 /* 1171 * Multple vaps are to transmit beacons and we 1172 * have h/w support for TSF adjusting; enable 1173 * use of staggered beacons. 1174 */ 1175 sc->sc_stagbeacons = 1; 1176 } 1177 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1178 } 1179 1180 ic->ic_opmode = ic_opmode; 1181 if (opmode != IEEE80211_M_WDS) { 1182 sc->sc_nvaps++; 1183 if (opmode == IEEE80211_M_STA) 1184 sc->sc_nstavaps++; 1185 if (opmode == IEEE80211_M_MBSS) 1186 sc->sc_nmeshvaps++; 1187 } 1188 switch (ic_opmode) { 1189 case IEEE80211_M_IBSS: 1190 sc->sc_opmode = HAL_M_IBSS; 1191 break; 1192 case IEEE80211_M_STA: 1193 sc->sc_opmode = HAL_M_STA; 1194 break; 1195 case IEEE80211_M_AHDEMO: 1196#ifdef IEEE80211_SUPPORT_TDMA 1197 if (vap->iv_caps & IEEE80211_C_TDMA) { 1198 sc->sc_tdma = 1; 1199 /* NB: disable tsf adjust */ 1200 sc->sc_stagbeacons = 0; 1201 } 1202 /* 1203 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1204 * just ap mode. 1205 */ 1206 /* fall thru... */ 1207#endif 1208 case IEEE80211_M_HOSTAP: 1209 case IEEE80211_M_MBSS: 1210 sc->sc_opmode = HAL_M_HOSTAP; 1211 break; 1212 case IEEE80211_M_MONITOR: 1213 sc->sc_opmode = HAL_M_MONITOR; 1214 break; 1215 default: 1216 /* XXX should not happen */ 1217 break; 1218 } 1219 if (sc->sc_hastsfadd) { 1220 /* 1221 * Configure whether or not TSF adjust should be done. 1222 */ 1223 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1224 } 1225 if (flags & IEEE80211_CLONE_NOBEACONS) { 1226 /* 1227 * Enable s/w beacon miss handling. 1228 */ 1229 sc->sc_swbmiss = 1; 1230 } 1231 ATH_UNLOCK(sc); 1232 1233 /* complete setup */ 1234 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1235 return vap; 1236bad2: 1237 reclaim_address(sc, mac); 1238 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1239bad: 1240 free(avp, M_80211_VAP); 1241 ATH_UNLOCK(sc); 1242 return NULL; 1243} 1244 1245static void 1246ath_vap_delete(struct ieee80211vap *vap) 1247{ 1248 struct ieee80211com *ic = vap->iv_ic; 1249 struct ifnet *ifp = ic->ic_ifp; 1250 struct ath_softc *sc = ifp->if_softc; 1251 struct ath_hal *ah = sc->sc_ah; 1252 struct ath_vap *avp = ATH_VAP(vap); 1253 1254 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1255 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1256 /* 1257 * Quiesce the hardware while we remove the vap. In 1258 * particular we need to reclaim all references to 1259 * the vap state by any frames pending on the tx queues. 1260 */ 1261 ath_hal_intrset(ah, 0); /* disable interrupts */ 1262 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1263 /* XXX Do all frames from all vaps/nodes need draining here? */ 1264 ath_stoprecv(sc, 1); /* stop recv side */ 1265 } 1266 1267 ieee80211_vap_detach(vap); 1268 1269 /* 1270 * XXX Danger Will Robinson! Danger! 1271 * 1272 * Because ieee80211_vap_detach() can queue a frame (the station 1273 * diassociate message?) after we've drained the TXQ and 1274 * flushed the software TXQ, we will end up with a frame queued 1275 * to a node whose vap is about to be freed. 1276 * 1277 * To work around this, flush the hardware/software again. 1278 * This may be racy - the ath task may be running and the packet 1279 * may be being scheduled between sw->hw txq. Tsk. 1280 * 1281 * TODO: figure out why a new node gets allocated somewhere around 1282 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1283 * call!) 1284 */ 1285 1286 ath_draintxq(sc, ATH_RESET_DEFAULT); 1287 1288 ATH_LOCK(sc); 1289 /* 1290 * Reclaim beacon state. Note this must be done before 1291 * the vap instance is reclaimed as we may have a reference 1292 * to it in the buffer for the beacon frame. 1293 */ 1294 if (avp->av_bcbuf != NULL) { 1295 if (avp->av_bslot != -1) { 1296 sc->sc_bslot[avp->av_bslot] = NULL; 1297 sc->sc_nbcnvaps--; 1298 } 1299 ath_beacon_return(sc, avp->av_bcbuf); 1300 avp->av_bcbuf = NULL; 1301 if (sc->sc_nbcnvaps == 0) { 1302 sc->sc_stagbeacons = 0; 1303 if (sc->sc_hastsfadd) 1304 ath_hal_settsfadjust(sc->sc_ah, 0); 1305 } 1306 /* 1307 * Reclaim any pending mcast frames for the vap. 1308 */ 1309 ath_tx_draintxq(sc, &avp->av_mcastq); 1310 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1311 } 1312 /* 1313 * Update bookkeeping. 1314 */ 1315 if (vap->iv_opmode == IEEE80211_M_STA) { 1316 sc->sc_nstavaps--; 1317 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1318 sc->sc_swbmiss = 0; 1319 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1320 vap->iv_opmode == IEEE80211_M_MBSS) { 1321 reclaim_address(sc, vap->iv_myaddr); 1322 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1323 if (vap->iv_opmode == IEEE80211_M_MBSS) 1324 sc->sc_nmeshvaps--; 1325 } 1326 if (vap->iv_opmode != IEEE80211_M_WDS) 1327 sc->sc_nvaps--; 1328#ifdef IEEE80211_SUPPORT_TDMA 1329 /* TDMA operation ceases when the last vap is destroyed */ 1330 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1331 sc->sc_tdma = 0; 1332 sc->sc_swbmiss = 0; 1333 } 1334#endif 1335 free(avp, M_80211_VAP); 1336 1337 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1338 /* 1339 * Restart rx+tx machines if still running (RUNNING will 1340 * be reset if we just destroyed the last vap). 1341 */ 1342 if (ath_startrecv(sc) != 0) 1343 if_printf(ifp, "%s: unable to restart recv logic\n", 1344 __func__); 1345 if (sc->sc_beacons) { /* restart beacons */ 1346#ifdef IEEE80211_SUPPORT_TDMA 1347 if (sc->sc_tdma) 1348 ath_tdma_config(sc, NULL); 1349 else 1350#endif 1351 ath_beacon_config(sc, NULL); 1352 } 1353 ath_hal_intrset(ah, sc->sc_imask); 1354 } 1355 ATH_UNLOCK(sc); 1356} 1357 1358void 1359ath_suspend(struct ath_softc *sc) 1360{ 1361 struct ifnet *ifp = sc->sc_ifp; 1362 struct ieee80211com *ic = ifp->if_l2com; 1363 1364 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1365 __func__, ifp->if_flags); 1366 1367 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1368 1369 ieee80211_suspend_all(ic); 1370 /* 1371 * NB: don't worry about putting the chip in low power 1372 * mode; pci will power off our socket on suspend and 1373 * CardBus detaches the device. 1374 */ 1375 1376 /* 1377 * XXX ensure none of the taskqueues are running 1378 * XXX ensure sc_invalid is 1 1379 * XXX ensure the calibration callout is disabled 1380 */ 1381 1382 /* Disable the PCIe PHY, complete with workarounds */ 1383 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1384} 1385 1386/* 1387 * Reset the key cache since some parts do not reset the 1388 * contents on resume. First we clear all entries, then 1389 * re-load keys that the 802.11 layer assumes are setup 1390 * in h/w. 1391 */ 1392static void 1393ath_reset_keycache(struct ath_softc *sc) 1394{ 1395 struct ifnet *ifp = sc->sc_ifp; 1396 struct ieee80211com *ic = ifp->if_l2com; 1397 struct ath_hal *ah = sc->sc_ah; 1398 int i; 1399 1400 for (i = 0; i < sc->sc_keymax; i++) 1401 ath_hal_keyreset(ah, i); 1402 ieee80211_crypto_reload_keys(ic); 1403} 1404 1405void 1406ath_resume(struct ath_softc *sc) 1407{ 1408 struct ifnet *ifp = sc->sc_ifp; 1409 struct ieee80211com *ic = ifp->if_l2com; 1410 struct ath_hal *ah = sc->sc_ah; 1411 HAL_STATUS status; 1412 1413 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1414 __func__, ifp->if_flags); 1415 1416 /* Re-enable PCIe, re-enable the PCIe bus */ 1417 ath_hal_enablepcie(ah, 0, 0); 1418 1419 /* 1420 * Must reset the chip before we reload the 1421 * keycache as we were powered down on suspend. 1422 */ 1423 ath_hal_reset(ah, sc->sc_opmode, 1424 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1425 AH_FALSE, &status); 1426 ath_reset_keycache(sc); 1427 1428 /* Let DFS at it in case it's a DFS channel */ 1429 ath_dfs_radar_enable(sc, ic->ic_curchan); 1430 1431 /* Restore the LED configuration */ 1432 ath_led_config(sc); 1433 ath_hal_setledstate(ah, HAL_LED_INIT); 1434 1435 if (sc->sc_resume_up) 1436 ieee80211_resume_all(ic); 1437 1438 /* XXX beacons ? */ 1439} 1440 1441void 1442ath_shutdown(struct ath_softc *sc) 1443{ 1444 struct ifnet *ifp = sc->sc_ifp; 1445 1446 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1447 __func__, ifp->if_flags); 1448 1449 ath_stop(ifp); 1450 /* NB: no point powering down chip as we're about to reboot */ 1451} 1452 1453/* 1454 * Interrupt handler. Most of the actual processing is deferred. 1455 */ 1456void 1457ath_intr(void *arg) 1458{ 1459 struct ath_softc *sc = arg; 1460 struct ifnet *ifp = sc->sc_ifp; 1461 struct ath_hal *ah = sc->sc_ah; 1462 HAL_INT status = 0; 1463 uint32_t txqs; 1464 1465 /* 1466 * If we're inside a reset path, just print a warning and 1467 * clear the ISR. The reset routine will finish it for us. 1468 */ 1469 ATH_PCU_LOCK(sc); 1470 if (sc->sc_inreset_cnt) { 1471 HAL_INT status; 1472 ath_hal_getisr(ah, &status); /* clear ISR */ 1473 ath_hal_intrset(ah, 0); /* disable further intr's */ 1474 DPRINTF(sc, ATH_DEBUG_ANY, 1475 "%s: in reset, ignoring: status=0x%x\n", 1476 __func__, status); 1477 ATH_PCU_UNLOCK(sc); 1478 return; 1479 } 1480 1481 if (sc->sc_invalid) { 1482 /* 1483 * The hardware is not ready/present, don't touch anything. 1484 * Note this can happen early on if the IRQ is shared. 1485 */ 1486 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1487 ATH_PCU_UNLOCK(sc); 1488 return; 1489 } 1490 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1491 ATH_PCU_UNLOCK(sc); 1492 return; 1493 } 1494 1495 if ((ifp->if_flags & IFF_UP) == 0 || 1496 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1497 HAL_INT status; 1498 1499 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1500 __func__, ifp->if_flags); 1501 ath_hal_getisr(ah, &status); /* clear ISR */ 1502 ath_hal_intrset(ah, 0); /* disable further intr's */ 1503 ATH_PCU_UNLOCK(sc); 1504 return; 1505 } 1506 1507 /* 1508 * Figure out the reason(s) for the interrupt. Note 1509 * that the hal returns a pseudo-ISR that may include 1510 * bits we haven't explicitly enabled so we mask the 1511 * value to insure we only process bits we requested. 1512 */ 1513 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1514 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1515 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 1516#ifdef ATH_KTR_INTR_DEBUG 1517 CTR5(ATH_KTR_INTR, 1518 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1519 ah->ah_intrstate[0], 1520 ah->ah_intrstate[1], 1521 ah->ah_intrstate[2], 1522 ah->ah_intrstate[3], 1523 ah->ah_intrstate[6]); 1524#endif 1525 1526 /* Squirrel away SYNC interrupt debugging */ 1527 if (ah->ah_syncstate != 0) { 1528 int i; 1529 for (i = 0; i < 32; i++) 1530 if (ah->ah_syncstate & (i << i)) 1531 sc->sc_intr_stats.sync_intr[i]++; 1532 } 1533 1534 status &= sc->sc_imask; /* discard unasked for bits */ 1535 1536 /* Short-circuit un-handled interrupts */ 1537 if (status == 0x0) { 1538 ATH_PCU_UNLOCK(sc); 1539 return; 1540 } 1541 1542 /* 1543 * Take a note that we're inside the interrupt handler, so 1544 * the reset routines know to wait. 1545 */ 1546 sc->sc_intr_cnt++; 1547 ATH_PCU_UNLOCK(sc); 1548 1549 /* 1550 * Handle the interrupt. We won't run concurrent with the reset 1551 * or channel change routines as they'll wait for sc_intr_cnt 1552 * to be 0 before continuing. 1553 */ 1554 if (status & HAL_INT_FATAL) { 1555 sc->sc_stats.ast_hardware++; 1556 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1557 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 1558 } else { 1559 if (status & HAL_INT_SWBA) { 1560 /* 1561 * Software beacon alert--time to send a beacon. 1562 * Handle beacon transmission directly; deferring 1563 * this is too slow to meet timing constraints 1564 * under load. 1565 */ 1566#ifdef IEEE80211_SUPPORT_TDMA 1567 if (sc->sc_tdma) { 1568 if (sc->sc_tdmaswba == 0) { 1569 struct ieee80211com *ic = ifp->if_l2com; 1570 struct ieee80211vap *vap = 1571 TAILQ_FIRST(&ic->ic_vaps); 1572 ath_tdma_beacon_send(sc, vap); 1573 sc->sc_tdmaswba = 1574 vap->iv_tdma->tdma_bintval; 1575 } else 1576 sc->sc_tdmaswba--; 1577 } else 1578#endif 1579 { 1580 ath_beacon_proc(sc, 0); 1581#ifdef IEEE80211_SUPPORT_SUPERG 1582 /* 1583 * Schedule the rx taskq in case there's no 1584 * traffic so any frames held on the staging 1585 * queue are aged and potentially flushed. 1586 */ 1587 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1588#endif 1589 } 1590 } 1591 if (status & HAL_INT_RXEOL) { 1592 int imask; 1593 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1594 ATH_PCU_LOCK(sc); 1595 /* 1596 * NB: the hardware should re-read the link when 1597 * RXE bit is written, but it doesn't work at 1598 * least on older hardware revs. 1599 */ 1600 sc->sc_stats.ast_rxeol++; 1601 /* 1602 * Disable RXEOL/RXORN - prevent an interrupt 1603 * storm until the PCU logic can be reset. 1604 * In case the interface is reset some other 1605 * way before "sc_kickpcu" is called, don't 1606 * modify sc_imask - that way if it is reset 1607 * by a call to ath_reset() somehow, the 1608 * interrupt mask will be correctly reprogrammed. 1609 */ 1610 imask = sc->sc_imask; 1611 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1612 ath_hal_intrset(ah, imask); 1613 /* 1614 * Only blank sc_rxlink if we've not yet kicked 1615 * the PCU. 1616 * 1617 * This isn't entirely correct - the correct solution 1618 * would be to have a PCU lock and engage that for 1619 * the duration of the PCU fiddling; which would include 1620 * running the RX process. Otherwise we could end up 1621 * messing up the RX descriptor chain and making the 1622 * RX desc list much shorter. 1623 */ 1624 if (! sc->sc_kickpcu) 1625 sc->sc_rxlink = NULL; 1626 sc->sc_kickpcu = 1; 1627 /* 1628 * Enqueue an RX proc, to handled whatever 1629 * is in the RX queue. 1630 * This will then kick the PCU. 1631 */ 1632 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1633 ATH_PCU_UNLOCK(sc); 1634 } 1635 if (status & HAL_INT_TXURN) { 1636 sc->sc_stats.ast_txurn++; 1637 /* bump tx trigger level */ 1638 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1639 } 1640 /* 1641 * Handle both the legacy and RX EDMA interrupt bits. 1642 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 1643 */ 1644 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 1645 sc->sc_stats.ast_rx_intr++; 1646 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1647 } 1648 if (status & HAL_INT_TX) { 1649 sc->sc_stats.ast_tx_intr++; 1650 /* 1651 * Grab all the currently set bits in the HAL txq bitmap 1652 * and blank them. This is the only place we should be 1653 * doing this. 1654 */ 1655 ATH_PCU_LOCK(sc); 1656 txqs = 0xffffffff; 1657 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1658 sc->sc_txq_active |= txqs; 1659 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1660 ATH_PCU_UNLOCK(sc); 1661 } 1662 if (status & HAL_INT_BMISS) { 1663 sc->sc_stats.ast_bmiss++; 1664 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1665 } 1666 if (status & HAL_INT_GTT) 1667 sc->sc_stats.ast_tx_timeout++; 1668 if (status & HAL_INT_CST) 1669 sc->sc_stats.ast_tx_cst++; 1670 if (status & HAL_INT_MIB) { 1671 sc->sc_stats.ast_mib++; 1672 ATH_PCU_LOCK(sc); 1673 /* 1674 * Disable interrupts until we service the MIB 1675 * interrupt; otherwise it will continue to fire. 1676 */ 1677 ath_hal_intrset(ah, 0); 1678 /* 1679 * Let the hal handle the event. We assume it will 1680 * clear whatever condition caused the interrupt. 1681 */ 1682 ath_hal_mibevent(ah, &sc->sc_halstats); 1683 /* 1684 * Don't reset the interrupt if we've just 1685 * kicked the PCU, or we may get a nested 1686 * RXEOL before the rxproc has had a chance 1687 * to run. 1688 */ 1689 if (sc->sc_kickpcu == 0) 1690 ath_hal_intrset(ah, sc->sc_imask); 1691 ATH_PCU_UNLOCK(sc); 1692 } 1693 if (status & HAL_INT_RXORN) { 1694 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1695 CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 1696 sc->sc_stats.ast_rxorn++; 1697 } 1698 } 1699 ATH_PCU_LOCK(sc); 1700 sc->sc_intr_cnt--; 1701 ATH_PCU_UNLOCK(sc); 1702} 1703 1704static void 1705ath_fatal_proc(void *arg, int pending) 1706{ 1707 struct ath_softc *sc = arg; 1708 struct ifnet *ifp = sc->sc_ifp; 1709 u_int32_t *state; 1710 u_int32_t len; 1711 void *sp; 1712 1713 if_printf(ifp, "hardware error; resetting\n"); 1714 /* 1715 * Fatal errors are unrecoverable. Typically these 1716 * are caused by DMA errors. Collect h/w state from 1717 * the hal so we can diagnose what's going on. 1718 */ 1719 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1720 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1721 state = sp; 1722 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1723 state[0], state[1] , state[2], state[3], 1724 state[4], state[5]); 1725 } 1726 ath_reset(ifp, ATH_RESET_NOLOSS); 1727} 1728 1729static void 1730ath_bmiss_vap(struct ieee80211vap *vap) 1731{ 1732 /* 1733 * Workaround phantom bmiss interrupts by sanity-checking 1734 * the time of our last rx'd frame. If it is within the 1735 * beacon miss interval then ignore the interrupt. If it's 1736 * truly a bmiss we'll get another interrupt soon and that'll 1737 * be dispatched up for processing. Note this applies only 1738 * for h/w beacon miss events. 1739 */ 1740 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1741 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1742 struct ath_softc *sc = ifp->if_softc; 1743 u_int64_t lastrx = sc->sc_lastrx; 1744 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1745 /* XXX should take a locked ref to iv_bss */ 1746 u_int bmisstimeout = 1747 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1748 1749 DPRINTF(sc, ATH_DEBUG_BEACON, 1750 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1751 __func__, (unsigned long long) tsf, 1752 (unsigned long long)(tsf - lastrx), 1753 (unsigned long long) lastrx, bmisstimeout); 1754 1755 if (tsf - lastrx <= bmisstimeout) { 1756 sc->sc_stats.ast_bmiss_phantom++; 1757 return; 1758 } 1759 } 1760 ATH_VAP(vap)->av_bmiss(vap); 1761} 1762 1763static int 1764ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1765{ 1766 uint32_t rsize; 1767 void *sp; 1768 1769 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1770 return 0; 1771 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1772 *hangs = *(uint32_t *)sp; 1773 return 1; 1774} 1775 1776static void 1777ath_bmiss_proc(void *arg, int pending) 1778{ 1779 struct ath_softc *sc = arg; 1780 struct ifnet *ifp = sc->sc_ifp; 1781 uint32_t hangs; 1782 1783 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1784 1785 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1786 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1787 ath_reset(ifp, ATH_RESET_NOLOSS); 1788 } else 1789 ieee80211_beacon_miss(ifp->if_l2com); 1790} 1791 1792/* 1793 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1794 * calcs together with WME. If necessary disable the crypto 1795 * hardware and mark the 802.11 state so keys will be setup 1796 * with the MIC work done in software. 1797 */ 1798static void 1799ath_settkipmic(struct ath_softc *sc) 1800{ 1801 struct ifnet *ifp = sc->sc_ifp; 1802 struct ieee80211com *ic = ifp->if_l2com; 1803 1804 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1805 if (ic->ic_flags & IEEE80211_F_WME) { 1806 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1807 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1808 } else { 1809 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1810 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1811 } 1812 } 1813} 1814 1815static void 1816ath_init(void *arg) 1817{ 1818 struct ath_softc *sc = (struct ath_softc *) arg; 1819 struct ifnet *ifp = sc->sc_ifp; 1820 struct ieee80211com *ic = ifp->if_l2com; 1821 struct ath_hal *ah = sc->sc_ah; 1822 HAL_STATUS status; 1823 1824 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1825 __func__, ifp->if_flags); 1826 1827 ATH_LOCK(sc); 1828 /* 1829 * Stop anything previously setup. This is safe 1830 * whether this is the first time through or not. 1831 */ 1832 ath_stop_locked(ifp); 1833 1834 /* 1835 * The basic interface to setting the hardware in a good 1836 * state is ``reset''. On return the hardware is known to 1837 * be powered up and with interrupts disabled. This must 1838 * be followed by initialization of the appropriate bits 1839 * and then setup of the interrupt mask. 1840 */ 1841 ath_settkipmic(sc); 1842 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1843 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1844 status); 1845 ATH_UNLOCK(sc); 1846 return; 1847 } 1848 ath_chan_change(sc, ic->ic_curchan); 1849 1850 /* Let DFS at it in case it's a DFS channel */ 1851 ath_dfs_radar_enable(sc, ic->ic_curchan); 1852 1853 /* 1854 * Likewise this is set during reset so update 1855 * state cached in the driver. 1856 */ 1857 sc->sc_diversity = ath_hal_getdiversity(ah); 1858 sc->sc_lastlongcal = 0; 1859 sc->sc_resetcal = 1; 1860 sc->sc_lastcalreset = 0; 1861 sc->sc_lastani = 0; 1862 sc->sc_lastshortcal = 0; 1863 sc->sc_doresetcal = AH_FALSE; 1864 /* 1865 * Beacon timers were cleared here; give ath_newstate() 1866 * a hint that the beacon timers should be poked when 1867 * things transition to the RUN state. 1868 */ 1869 sc->sc_beacons = 0; 1870 1871 /* 1872 * Setup the hardware after reset: the key cache 1873 * is filled as needed and the receive engine is 1874 * set going. Frame transmit is handled entirely 1875 * in the frame output path; there's nothing to do 1876 * here except setup the interrupt mask. 1877 */ 1878 if (ath_startrecv(sc) != 0) { 1879 if_printf(ifp, "unable to start recv logic\n"); 1880 ATH_UNLOCK(sc); 1881 return; 1882 } 1883 1884 /* 1885 * Enable interrupts. 1886 */ 1887 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1888 | HAL_INT_RXEOL | HAL_INT_RXORN 1889 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1890 1891 /* 1892 * Enable RX EDMA bits. Note these overlap with 1893 * HAL_INT_RX and HAL_INT_RXDESC respectively. 1894 */ 1895 if (sc->sc_isedma) 1896 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 1897 1898 /* 1899 * Enable MIB interrupts when there are hardware phy counters. 1900 * Note we only do this (at the moment) for station mode. 1901 */ 1902 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1903 sc->sc_imask |= HAL_INT_MIB; 1904 1905 /* Enable global TX timeout and carrier sense timeout if available */ 1906 if (ath_hal_gtxto_supported(ah)) 1907 sc->sc_imask |= HAL_INT_GTT; 1908 1909 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1910 __func__, sc->sc_imask); 1911 1912 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1913 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1914 ath_hal_intrset(ah, sc->sc_imask); 1915 1916 ATH_UNLOCK(sc); 1917 1918#ifdef ATH_TX99_DIAG 1919 if (sc->sc_tx99 != NULL) 1920 sc->sc_tx99->start(sc->sc_tx99); 1921 else 1922#endif 1923 ieee80211_start_all(ic); /* start all vap's */ 1924} 1925 1926static void 1927ath_stop_locked(struct ifnet *ifp) 1928{ 1929 struct ath_softc *sc = ifp->if_softc; 1930 struct ath_hal *ah = sc->sc_ah; 1931 1932 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1933 __func__, sc->sc_invalid, ifp->if_flags); 1934 1935 ATH_LOCK_ASSERT(sc); 1936 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1937 /* 1938 * Shutdown the hardware and driver: 1939 * reset 802.11 state machine 1940 * turn off timers 1941 * disable interrupts 1942 * turn off the radio 1943 * clear transmit machinery 1944 * clear receive machinery 1945 * drain and release tx queues 1946 * reclaim beacon resources 1947 * power down hardware 1948 * 1949 * Note that some of this work is not possible if the 1950 * hardware is gone (invalid). 1951 */ 1952#ifdef ATH_TX99_DIAG 1953 if (sc->sc_tx99 != NULL) 1954 sc->sc_tx99->stop(sc->sc_tx99); 1955#endif 1956 callout_stop(&sc->sc_wd_ch); 1957 sc->sc_wd_timer = 0; 1958 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1959 if (!sc->sc_invalid) { 1960 if (sc->sc_softled) { 1961 callout_stop(&sc->sc_ledtimer); 1962 ath_hal_gpioset(ah, sc->sc_ledpin, 1963 !sc->sc_ledon); 1964 sc->sc_blinking = 0; 1965 } 1966 ath_hal_intrset(ah, 0); 1967 } 1968 ath_draintxq(sc, ATH_RESET_DEFAULT); 1969 if (!sc->sc_invalid) { 1970 ath_stoprecv(sc, 1); 1971 ath_hal_phydisable(ah); 1972 } else 1973 sc->sc_rxlink = NULL; 1974 ath_beacon_free(sc); /* XXX not needed */ 1975 } 1976} 1977 1978#define MAX_TXRX_ITERATIONS 1000 1979static void 1980ath_txrx_stop_locked(struct ath_softc *sc) 1981{ 1982 int i = MAX_TXRX_ITERATIONS; 1983 1984 ATH_UNLOCK_ASSERT(sc); 1985 ATH_PCU_LOCK_ASSERT(sc); 1986 1987 /* 1988 * Sleep until all the pending operations have completed. 1989 * 1990 * The caller must ensure that reset has been incremented 1991 * or the pending operations may continue being queued. 1992 */ 1993 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1994 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1995 if (i <= 0) 1996 break; 1997 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1998 i--; 1999 } 2000 2001 if (i <= 0) 2002 device_printf(sc->sc_dev, 2003 "%s: didn't finish after %d iterations\n", 2004 __func__, MAX_TXRX_ITERATIONS); 2005} 2006#undef MAX_TXRX_ITERATIONS 2007 2008#if 0 2009static void 2010ath_txrx_stop(struct ath_softc *sc) 2011{ 2012 ATH_UNLOCK_ASSERT(sc); 2013 ATH_PCU_UNLOCK_ASSERT(sc); 2014 2015 ATH_PCU_LOCK(sc); 2016 ath_txrx_stop_locked(sc); 2017 ATH_PCU_UNLOCK(sc); 2018} 2019#endif 2020 2021static void 2022ath_txrx_start(struct ath_softc *sc) 2023{ 2024 2025 taskqueue_unblock(sc->sc_tq); 2026} 2027 2028/* 2029 * Grab the reset lock, and wait around until noone else 2030 * is trying to do anything with it. 2031 * 2032 * This is totally horrible but we can't hold this lock for 2033 * long enough to do TX/RX or we end up with net80211/ip stack 2034 * LORs and eventual deadlock. 2035 * 2036 * "dowait" signals whether to spin, waiting for the reset 2037 * lock count to reach 0. This should (for now) only be used 2038 * during the reset path, as the rest of the code may not 2039 * be locking-reentrant enough to behave correctly. 2040 * 2041 * Another, cleaner way should be found to serialise all of 2042 * these operations. 2043 */ 2044#define MAX_RESET_ITERATIONS 10 2045static int 2046ath_reset_grablock(struct ath_softc *sc, int dowait) 2047{ 2048 int w = 0; 2049 int i = MAX_RESET_ITERATIONS; 2050 2051 ATH_PCU_LOCK_ASSERT(sc); 2052 do { 2053 if (sc->sc_inreset_cnt == 0) { 2054 w = 1; 2055 break; 2056 } 2057 if (dowait == 0) { 2058 w = 0; 2059 break; 2060 } 2061 ATH_PCU_UNLOCK(sc); 2062 pause("ath_reset_grablock", 1); 2063 i--; 2064 ATH_PCU_LOCK(sc); 2065 } while (i > 0); 2066 2067 /* 2068 * We always increment the refcounter, regardless 2069 * of whether we succeeded to get it in an exclusive 2070 * way. 2071 */ 2072 sc->sc_inreset_cnt++; 2073 2074 if (i <= 0) 2075 device_printf(sc->sc_dev, 2076 "%s: didn't finish after %d iterations\n", 2077 __func__, MAX_RESET_ITERATIONS); 2078 2079 if (w == 0) 2080 device_printf(sc->sc_dev, 2081 "%s: warning, recursive reset path!\n", 2082 __func__); 2083 2084 return w; 2085} 2086#undef MAX_RESET_ITERATIONS 2087 2088/* 2089 * XXX TODO: write ath_reset_releaselock 2090 */ 2091 2092static void 2093ath_stop(struct ifnet *ifp) 2094{ 2095 struct ath_softc *sc = ifp->if_softc; 2096 2097 ATH_LOCK(sc); 2098 ath_stop_locked(ifp); 2099 ATH_UNLOCK(sc); 2100} 2101 2102/* 2103 * Reset the hardware w/o losing operational state. This is 2104 * basically a more efficient way of doing ath_stop, ath_init, 2105 * followed by state transitions to the current 802.11 2106 * operational state. Used to recover from various errors and 2107 * to reset or reload hardware state. 2108 */ 2109int 2110ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2111{ 2112 struct ath_softc *sc = ifp->if_softc; 2113 struct ieee80211com *ic = ifp->if_l2com; 2114 struct ath_hal *ah = sc->sc_ah; 2115 HAL_STATUS status; 2116 int i; 2117 2118 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2119 2120 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2121 ATH_PCU_UNLOCK_ASSERT(sc); 2122 ATH_UNLOCK_ASSERT(sc); 2123 2124 /* Try to (stop any further TX/RX from occuring */ 2125 taskqueue_block(sc->sc_tq); 2126 2127 ATH_PCU_LOCK(sc); 2128 ath_hal_intrset(ah, 0); /* disable interrupts */ 2129 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2130 if (ath_reset_grablock(sc, 1) == 0) { 2131 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2132 __func__); 2133 } 2134 ATH_PCU_UNLOCK(sc); 2135 2136 /* 2137 * Should now wait for pending TX/RX to complete 2138 * and block future ones from occuring. This needs to be 2139 * done before the TX queue is drained. 2140 */ 2141 ath_draintxq(sc, reset_type); /* stop xmit side */ 2142 2143 /* 2144 * Regardless of whether we're doing a no-loss flush or 2145 * not, stop the PCU and handle what's in the RX queue. 2146 * That way frames aren't dropped which shouldn't be. 2147 */ 2148 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2149 ath_rx_flush(sc); 2150 2151 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2152 /* NB: indicate channel change so we do a full reset */ 2153 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2154 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2155 __func__, status); 2156 sc->sc_diversity = ath_hal_getdiversity(ah); 2157 2158 /* Let DFS at it in case it's a DFS channel */ 2159 ath_dfs_radar_enable(sc, ic->ic_curchan); 2160 2161 if (ath_startrecv(sc) != 0) /* restart recv */ 2162 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2163 /* 2164 * We may be doing a reset in response to an ioctl 2165 * that changes the channel so update any state that 2166 * might change as a result. 2167 */ 2168 ath_chan_change(sc, ic->ic_curchan); 2169 if (sc->sc_beacons) { /* restart beacons */ 2170#ifdef IEEE80211_SUPPORT_TDMA 2171 if (sc->sc_tdma) 2172 ath_tdma_config(sc, NULL); 2173 else 2174#endif 2175 ath_beacon_config(sc, NULL); 2176 } 2177 2178 /* 2179 * Release the reset lock and re-enable interrupts here. 2180 * If an interrupt was being processed in ath_intr(), 2181 * it would disable interrupts at this point. So we have 2182 * to atomically enable interrupts and decrement the 2183 * reset counter - this way ath_intr() doesn't end up 2184 * disabling interrupts without a corresponding enable 2185 * in the rest or channel change path. 2186 */ 2187 ATH_PCU_LOCK(sc); 2188 sc->sc_inreset_cnt--; 2189 /* XXX only do this if sc_inreset_cnt == 0? */ 2190 ath_hal_intrset(ah, sc->sc_imask); 2191 ATH_PCU_UNLOCK(sc); 2192 2193 /* 2194 * TX and RX can be started here. If it were started with 2195 * sc_inreset_cnt > 0, the TX and RX path would abort. 2196 * Thus if this is a nested call through the reset or 2197 * channel change code, TX completion will occur but 2198 * RX completion and ath_start / ath_tx_start will not 2199 * run. 2200 */ 2201 2202 /* Restart TX/RX as needed */ 2203 ath_txrx_start(sc); 2204 2205 /* XXX Restart TX completion and pending TX */ 2206 if (reset_type == ATH_RESET_NOLOSS) { 2207 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2208 if (ATH_TXQ_SETUP(sc, i)) { 2209 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2210 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2211 ath_txq_sched(sc, &sc->sc_txq[i]); 2212 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2213 } 2214 } 2215 } 2216 2217 /* 2218 * This may have been set during an ath_start() call which 2219 * set this once it detected a concurrent TX was going on. 2220 * So, clear it. 2221 */ 2222 IF_LOCK(&ifp->if_snd); 2223 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2224 IF_UNLOCK(&ifp->if_snd); 2225 2226 /* Handle any frames in the TX queue */ 2227 /* 2228 * XXX should this be done by the caller, rather than 2229 * ath_reset() ? 2230 */ 2231 ath_start(ifp); /* restart xmit */ 2232 return 0; 2233} 2234 2235static int 2236ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2237{ 2238 struct ieee80211com *ic = vap->iv_ic; 2239 struct ifnet *ifp = ic->ic_ifp; 2240 struct ath_softc *sc = ifp->if_softc; 2241 struct ath_hal *ah = sc->sc_ah; 2242 2243 switch (cmd) { 2244 case IEEE80211_IOC_TXPOWER: 2245 /* 2246 * If per-packet TPC is enabled, then we have nothing 2247 * to do; otherwise we need to force the global limit. 2248 * All this can happen directly; no need to reset. 2249 */ 2250 if (!ath_hal_gettpc(ah)) 2251 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2252 return 0; 2253 } 2254 /* XXX? Full or NOLOSS? */ 2255 return ath_reset(ifp, ATH_RESET_FULL); 2256} 2257 2258struct ath_buf * 2259_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2260{ 2261 struct ath_buf *bf; 2262 2263 ATH_TXBUF_LOCK_ASSERT(sc); 2264 2265 if (btype == ATH_BUFTYPE_MGMT) 2266 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2267 else 2268 bf = TAILQ_FIRST(&sc->sc_txbuf); 2269 2270 if (bf == NULL) { 2271 sc->sc_stats.ast_tx_getnobuf++; 2272 } else { 2273 if (bf->bf_flags & ATH_BUF_BUSY) { 2274 sc->sc_stats.ast_tx_getbusybuf++; 2275 bf = NULL; 2276 } 2277 } 2278 2279 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 2280 if (btype == ATH_BUFTYPE_MGMT) 2281 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 2282 else { 2283 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2284 sc->sc_txbuf_cnt--; 2285 2286 /* 2287 * This shuldn't happen; however just to be 2288 * safe print a warning and fudge the txbuf 2289 * count. 2290 */ 2291 if (sc->sc_txbuf_cnt < 0) { 2292 device_printf(sc->sc_dev, 2293 "%s: sc_txbuf_cnt < 0?\n", 2294 __func__); 2295 sc->sc_txbuf_cnt = 0; 2296 } 2297 } 2298 } else 2299 bf = NULL; 2300 2301 if (bf == NULL) { 2302 /* XXX should check which list, mgmt or otherwise */ 2303 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2304 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2305 "out of xmit buffers" : "xmit buffer busy"); 2306 return NULL; 2307 } 2308 2309 /* XXX TODO: should do this at buffer list initialisation */ 2310 /* XXX (then, ensure the buffer has the right flag set) */ 2311 if (btype == ATH_BUFTYPE_MGMT) 2312 bf->bf_flags |= ATH_BUF_MGMT; 2313 else 2314 bf->bf_flags &= (~ATH_BUF_MGMT); 2315 2316 /* Valid bf here; clear some basic fields */ 2317 bf->bf_next = NULL; /* XXX just to be sure */ 2318 bf->bf_last = NULL; /* XXX again, just to be sure */ 2319 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2320 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2321 2322 return bf; 2323} 2324 2325/* 2326 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2327 * can't be thrown back on the queue as they could still be 2328 * in use by the hardware. 2329 * 2330 * This duplicates the buffer, or returns NULL. 2331 * 2332 * The descriptor is also copied but the link pointers and 2333 * the DMA segments aren't copied; this frame should thus 2334 * be again passed through the descriptor setup/chain routines 2335 * so the link is correct. 2336 * 2337 * The caller must free the buffer using ath_freebuf(). 2338 * 2339 * XXX TODO: this call shouldn't fail as it'll cause packet loss 2340 * XXX in the TX pathway when retries are needed. 2341 * XXX Figure out how to keep some buffers free, or factor the 2342 * XXX number of busy buffers into the xmit path (ath_start()) 2343 * XXX so we don't over-commit. 2344 */ 2345struct ath_buf * 2346ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2347{ 2348 struct ath_buf *tbf; 2349 2350 tbf = ath_getbuf(sc, 2351 (bf->bf_flags & ATH_BUF_MGMT) ? 2352 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 2353 if (tbf == NULL) 2354 return NULL; /* XXX failure? Why? */ 2355 2356 /* Copy basics */ 2357 tbf->bf_next = NULL; 2358 tbf->bf_nseg = bf->bf_nseg; 2359 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2360 tbf->bf_status = bf->bf_status; 2361 tbf->bf_m = bf->bf_m; 2362 tbf->bf_node = bf->bf_node; 2363 /* will be setup by the chain/setup function */ 2364 tbf->bf_lastds = NULL; 2365 /* for now, last == self */ 2366 tbf->bf_last = tbf; 2367 tbf->bf_comp = bf->bf_comp; 2368 2369 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2370 2371 /* The caller has to re-init the descriptor + links */ 2372 2373 /* Copy state */ 2374 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2375 2376 return tbf; 2377} 2378 2379struct ath_buf * 2380ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 2381{ 2382 struct ath_buf *bf; 2383 2384 ATH_TXBUF_LOCK(sc); 2385 bf = _ath_getbuf_locked(sc, btype); 2386 /* 2387 * If a mgmt buffer was requested but we're out of those, 2388 * try requesting a normal one. 2389 */ 2390 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 2391 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 2392 ATH_TXBUF_UNLOCK(sc); 2393 if (bf == NULL) { 2394 struct ifnet *ifp = sc->sc_ifp; 2395 2396 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 2397 sc->sc_stats.ast_tx_qstop++; 2398 IF_LOCK(&ifp->if_snd); 2399 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2400 IF_UNLOCK(&ifp->if_snd); 2401 } 2402 return bf; 2403} 2404 2405void 2406ath_start(struct ifnet *ifp) 2407{ 2408 struct ath_softc *sc = ifp->if_softc; 2409 struct ieee80211_node *ni; 2410 struct ath_buf *bf; 2411 struct mbuf *m, *next; 2412 ath_bufhead frags; 2413 2414 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 2415 return; 2416 2417 /* XXX is it ok to hold the ATH_LOCK here? */ 2418 ATH_PCU_LOCK(sc); 2419 if (sc->sc_inreset_cnt > 0) { 2420 device_printf(sc->sc_dev, 2421 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2422 ATH_PCU_UNLOCK(sc); 2423 IF_LOCK(&ifp->if_snd); 2424 sc->sc_stats.ast_tx_qstop++; 2425 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2426 IF_UNLOCK(&ifp->if_snd); 2427 return; 2428 } 2429 sc->sc_txstart_cnt++; 2430 ATH_PCU_UNLOCK(sc); 2431 2432 for (;;) { 2433 ATH_TXBUF_LOCK(sc); 2434 if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { 2435 /* XXX increment counter? */ 2436 ATH_TXBUF_UNLOCK(sc); 2437 IF_LOCK(&ifp->if_snd); 2438 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2439 IF_UNLOCK(&ifp->if_snd); 2440 break; 2441 } 2442 ATH_TXBUF_UNLOCK(sc); 2443 2444 /* 2445 * Grab a TX buffer and associated resources. 2446 */ 2447 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 2448 if (bf == NULL) 2449 break; 2450 2451 IFQ_DEQUEUE(&ifp->if_snd, m); 2452 if (m == NULL) { 2453 ATH_TXBUF_LOCK(sc); 2454 ath_returnbuf_head(sc, bf); 2455 ATH_TXBUF_UNLOCK(sc); 2456 break; 2457 } 2458 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 2459 /* 2460 * Check for fragmentation. If this frame 2461 * has been broken up verify we have enough 2462 * buffers to send all the fragments so all 2463 * go out or none... 2464 */ 2465 TAILQ_INIT(&frags); 2466 if ((m->m_flags & M_FRAG) && 2467 !ath_txfrag_setup(sc, &frags, m, ni)) { 2468 DPRINTF(sc, ATH_DEBUG_XMIT, 2469 "%s: out of txfrag buffers\n", __func__); 2470 sc->sc_stats.ast_tx_nofrag++; 2471 ifp->if_oerrors++; 2472 ath_freetx(m); 2473 goto bad; 2474 } 2475 ifp->if_opackets++; 2476 nextfrag: 2477 /* 2478 * Pass the frame to the h/w for transmission. 2479 * Fragmented frames have each frag chained together 2480 * with m_nextpkt. We know there are sufficient ath_buf's 2481 * to send all the frags because of work done by 2482 * ath_txfrag_setup. We leave m_nextpkt set while 2483 * calling ath_tx_start so it can use it to extend the 2484 * the tx duration to cover the subsequent frag and 2485 * so it can reclaim all the mbufs in case of an error; 2486 * ath_tx_start clears m_nextpkt once it commits to 2487 * handing the frame to the hardware. 2488 */ 2489 next = m->m_nextpkt; 2490 if (ath_tx_start(sc, ni, bf, m)) { 2491 bad: 2492 ifp->if_oerrors++; 2493 reclaim: 2494 bf->bf_m = NULL; 2495 bf->bf_node = NULL; 2496 ATH_TXBUF_LOCK(sc); 2497 ath_returnbuf_head(sc, bf); 2498 ath_txfrag_cleanup(sc, &frags, ni); 2499 ATH_TXBUF_UNLOCK(sc); 2500 if (ni != NULL) 2501 ieee80211_free_node(ni); 2502 continue; 2503 } 2504 if (next != NULL) { 2505 /* 2506 * Beware of state changing between frags. 2507 * XXX check sta power-save state? 2508 */ 2509 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2510 DPRINTF(sc, ATH_DEBUG_XMIT, 2511 "%s: flush fragmented packet, state %s\n", 2512 __func__, 2513 ieee80211_state_name[ni->ni_vap->iv_state]); 2514 ath_freetx(next); 2515 goto reclaim; 2516 } 2517 m = next; 2518 bf = TAILQ_FIRST(&frags); 2519 KASSERT(bf != NULL, ("no buf for txfrag")); 2520 TAILQ_REMOVE(&frags, bf, bf_list); 2521 goto nextfrag; 2522 } 2523 2524 sc->sc_wd_timer = 5; 2525 } 2526 2527 ATH_PCU_LOCK(sc); 2528 sc->sc_txstart_cnt--; 2529 ATH_PCU_UNLOCK(sc); 2530} 2531 2532static int 2533ath_media_change(struct ifnet *ifp) 2534{ 2535 int error = ieee80211_media_change(ifp); 2536 /* NB: only the fixed rate can change and that doesn't need a reset */ 2537 return (error == ENETRESET ? 0 : error); 2538} 2539 2540/* 2541 * Block/unblock tx+rx processing while a key change is done. 2542 * We assume the caller serializes key management operations 2543 * so we only need to worry about synchronization with other 2544 * uses that originate in the driver. 2545 */ 2546static void 2547ath_key_update_begin(struct ieee80211vap *vap) 2548{ 2549 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2550 struct ath_softc *sc = ifp->if_softc; 2551 2552 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2553 taskqueue_block(sc->sc_tq); 2554 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2555} 2556 2557static void 2558ath_key_update_end(struct ieee80211vap *vap) 2559{ 2560 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2561 struct ath_softc *sc = ifp->if_softc; 2562 2563 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2564 IF_UNLOCK(&ifp->if_snd); 2565 taskqueue_unblock(sc->sc_tq); 2566} 2567 2568static void 2569ath_update_promisc(struct ifnet *ifp) 2570{ 2571 struct ath_softc *sc = ifp->if_softc; 2572 u_int32_t rfilt; 2573 2574 /* configure rx filter */ 2575 rfilt = ath_calcrxfilter(sc); 2576 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2577 2578 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2579} 2580 2581static void 2582ath_update_mcast(struct ifnet *ifp) 2583{ 2584 struct ath_softc *sc = ifp->if_softc; 2585 u_int32_t mfilt[2]; 2586 2587 /* calculate and install multicast filter */ 2588 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2589 struct ifmultiaddr *ifma; 2590 /* 2591 * Merge multicast addresses to form the hardware filter. 2592 */ 2593 mfilt[0] = mfilt[1] = 0; 2594 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2595 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2596 caddr_t dl; 2597 u_int32_t val; 2598 u_int8_t pos; 2599 2600 /* calculate XOR of eight 6bit values */ 2601 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2602 val = LE_READ_4(dl + 0); 2603 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2604 val = LE_READ_4(dl + 3); 2605 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2606 pos &= 0x3f; 2607 mfilt[pos / 32] |= (1 << (pos % 32)); 2608 } 2609 if_maddr_runlock(ifp); 2610 } else 2611 mfilt[0] = mfilt[1] = ~0; 2612 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2613 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2614 __func__, mfilt[0], mfilt[1]); 2615} 2616 2617void 2618ath_mode_init(struct ath_softc *sc) 2619{ 2620 struct ifnet *ifp = sc->sc_ifp; 2621 struct ath_hal *ah = sc->sc_ah; 2622 u_int32_t rfilt; 2623 2624 /* configure rx filter */ 2625 rfilt = ath_calcrxfilter(sc); 2626 ath_hal_setrxfilter(ah, rfilt); 2627 2628 /* configure operational mode */ 2629 ath_hal_setopmode(ah); 2630 2631 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE, 2632 "%s: ah=%p, ifp=%p, if_addr=%p\n", 2633 __func__, 2634 ah, 2635 ifp, 2636 (ifp == NULL) ? NULL : ifp->if_addr); 2637 2638 /* handle any link-level address change */ 2639 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2640 2641 /* calculate and install multicast filter */ 2642 ath_update_mcast(ifp); 2643} 2644 2645/* 2646 * Set the slot time based on the current setting. 2647 */ 2648void 2649ath_setslottime(struct ath_softc *sc) 2650{ 2651 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2652 struct ath_hal *ah = sc->sc_ah; 2653 u_int usec; 2654 2655 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2656 usec = 13; 2657 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2658 usec = 21; 2659 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2660 /* honor short/long slot time only in 11g */ 2661 /* XXX shouldn't honor on pure g or turbo g channel */ 2662 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2663 usec = HAL_SLOT_TIME_9; 2664 else 2665 usec = HAL_SLOT_TIME_20; 2666 } else 2667 usec = HAL_SLOT_TIME_9; 2668 2669 DPRINTF(sc, ATH_DEBUG_RESET, 2670 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2671 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2672 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2673 2674 ath_hal_setslottime(ah, usec); 2675 sc->sc_updateslot = OK; 2676} 2677 2678/* 2679 * Callback from the 802.11 layer to update the 2680 * slot time based on the current setting. 2681 */ 2682static void 2683ath_updateslot(struct ifnet *ifp) 2684{ 2685 struct ath_softc *sc = ifp->if_softc; 2686 struct ieee80211com *ic = ifp->if_l2com; 2687 2688 /* 2689 * When not coordinating the BSS, change the hardware 2690 * immediately. For other operation we defer the change 2691 * until beacon updates have propagated to the stations. 2692 */ 2693 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2694 ic->ic_opmode == IEEE80211_M_MBSS) 2695 sc->sc_updateslot = UPDATE; 2696 else 2697 ath_setslottime(sc); 2698} 2699 2700/* 2701 * Append the contents of src to dst; both queues 2702 * are assumed to be locked. 2703 */ 2704void 2705ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2706{ 2707 2708 ATH_TXQ_LOCK_ASSERT(dst); 2709 ATH_TXQ_LOCK_ASSERT(src); 2710 2711 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2712 dst->axq_link = src->axq_link; 2713 src->axq_link = NULL; 2714 dst->axq_depth += src->axq_depth; 2715 dst->axq_aggr_depth += src->axq_aggr_depth; 2716 src->axq_depth = 0; 2717 src->axq_aggr_depth = 0; 2718} 2719 2720/* 2721 * Reset the hardware, with no loss. 2722 * 2723 * This can't be used for a general case reset. 2724 */ 2725static void 2726ath_reset_proc(void *arg, int pending) 2727{ 2728 struct ath_softc *sc = arg; 2729 struct ifnet *ifp = sc->sc_ifp; 2730 2731#if 0 2732 if_printf(ifp, "%s: resetting\n", __func__); 2733#endif 2734 ath_reset(ifp, ATH_RESET_NOLOSS); 2735} 2736 2737/* 2738 * Reset the hardware after detecting beacons have stopped. 2739 */ 2740static void 2741ath_bstuck_proc(void *arg, int pending) 2742{ 2743 struct ath_softc *sc = arg; 2744 struct ifnet *ifp = sc->sc_ifp; 2745 uint32_t hangs = 0; 2746 2747 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 2748 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 2749 2750 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2751 sc->sc_bmisscount); 2752 sc->sc_stats.ast_bstuck++; 2753 /* 2754 * This assumes that there's no simultaneous channel mode change 2755 * occuring. 2756 */ 2757 ath_reset(ifp, ATH_RESET_NOLOSS); 2758} 2759 2760static void 2761ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2762{ 2763 bus_addr_t *paddr = (bus_addr_t*) arg; 2764 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2765 *paddr = segs->ds_addr; 2766} 2767 2768/* 2769 * Allocate the descriptors and appropriate DMA tag/setup. 2770 * 2771 * For some situations (eg EDMA TX completion), there isn't a requirement 2772 * for the ath_buf entries to be allocated. 2773 */ 2774int 2775ath_descdma_alloc_desc(struct ath_softc *sc, 2776 struct ath_descdma *dd, ath_bufhead *head, 2777 const char *name, int ds_size, int ndesc) 2778{ 2779#define DS2PHYS(_dd, _ds) \ 2780 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2781#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2782 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2783 struct ifnet *ifp = sc->sc_ifp; 2784 int error; 2785 2786 dd->dd_descsize = ds_size; 2787 2788 DPRINTF(sc, ATH_DEBUG_RESET, 2789 "%s: %s DMA: %u desc, %d bytes per descriptor\n", 2790 __func__, name, ndesc, dd->dd_descsize); 2791 2792 dd->dd_name = name; 2793 dd->dd_desc_len = dd->dd_descsize * ndesc; 2794 2795 /* 2796 * Merlin work-around: 2797 * Descriptors that cross the 4KB boundary can't be used. 2798 * Assume one skipped descriptor per 4KB page. 2799 */ 2800 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2801 int numpages = dd->dd_desc_len / 4096; 2802 dd->dd_desc_len += ds_size * numpages; 2803 } 2804 2805 /* 2806 * Setup DMA descriptor area. 2807 */ 2808 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 2809 PAGE_SIZE, 0, /* alignment, bounds */ 2810 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2811 BUS_SPACE_MAXADDR, /* highaddr */ 2812 NULL, NULL, /* filter, filterarg */ 2813 dd->dd_desc_len, /* maxsize */ 2814 1, /* nsegments */ 2815 dd->dd_desc_len, /* maxsegsize */ 2816 BUS_DMA_ALLOCNOW, /* flags */ 2817 NULL, /* lockfunc */ 2818 NULL, /* lockarg */ 2819 &dd->dd_dmat); 2820 if (error != 0) { 2821 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2822 return error; 2823 } 2824 2825 /* allocate descriptors */ 2826 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2827 if (error != 0) { 2828 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2829 "error %u\n", dd->dd_name, error); 2830 goto fail0; 2831 } 2832 2833 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2834 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2835 &dd->dd_dmamap); 2836 if (error != 0) { 2837 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2838 "error %u\n", ndesc, dd->dd_name, error); 2839 goto fail1; 2840 } 2841 2842 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2843 dd->dd_desc, dd->dd_desc_len, 2844 ath_load_cb, &dd->dd_desc_paddr, 2845 BUS_DMA_NOWAIT); 2846 if (error != 0) { 2847 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2848 dd->dd_name, error); 2849 goto fail2; 2850 } 2851 2852 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2853 __func__, dd->dd_name, (uint8_t *) dd->dd_desc, 2854 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, 2855 /*XXX*/ (u_long) dd->dd_desc_len); 2856 2857 return (0); 2858 2859fail2: 2860 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2861fail1: 2862 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2863fail0: 2864 bus_dma_tag_destroy(dd->dd_dmat); 2865 memset(dd, 0, sizeof(*dd)); 2866 return error; 2867#undef DS2PHYS 2868#undef ATH_DESC_4KB_BOUND_CHECK 2869} 2870 2871int 2872ath_descdma_setup(struct ath_softc *sc, 2873 struct ath_descdma *dd, ath_bufhead *head, 2874 const char *name, int ds_size, int nbuf, int ndesc) 2875{ 2876#define DS2PHYS(_dd, _ds) \ 2877 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2878#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2879 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2880 struct ifnet *ifp = sc->sc_ifp; 2881 uint8_t *ds; 2882 struct ath_buf *bf; 2883 int i, bsize, error; 2884 2885 /* Allocate descriptors */ 2886 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size, 2887 nbuf * ndesc); 2888 2889 /* Assume any errors during allocation were dealt with */ 2890 if (error != 0) { 2891 return (error); 2892 } 2893 2894 ds = (uint8_t *) dd->dd_desc; 2895 2896 /* allocate rx buffers */ 2897 bsize = sizeof(struct ath_buf) * nbuf; 2898 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2899 if (bf == NULL) { 2900 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2901 dd->dd_name, bsize); 2902 goto fail3; 2903 } 2904 dd->dd_bufptr = bf; 2905 2906 TAILQ_INIT(head); 2907 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) { 2908 bf->bf_desc = (struct ath_desc *) ds; 2909 bf->bf_daddr = DS2PHYS(dd, ds); 2910 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2911 /* 2912 * Merlin WAR: Skip descriptor addresses which 2913 * cause 4KB boundary crossing along any point 2914 * in the descriptor. 2915 */ 2916 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 2917 dd->dd_descsize)) { 2918 /* Start at the next page */ 2919 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 2920 bf->bf_desc = (struct ath_desc *) ds; 2921 bf->bf_daddr = DS2PHYS(dd, ds); 2922 } 2923 } 2924 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2925 &bf->bf_dmamap); 2926 if (error != 0) { 2927 if_printf(ifp, "unable to create dmamap for %s " 2928 "buffer %u, error %u\n", dd->dd_name, i, error); 2929 ath_descdma_cleanup(sc, dd, head); 2930 return error; 2931 } 2932 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 2933 TAILQ_INSERT_TAIL(head, bf, bf_list); 2934 } 2935 2936 /* 2937 * XXX TODO: ensure that ds doesn't overflow the descriptor 2938 * allocation otherwise weird stuff will occur and crash your 2939 * machine. 2940 */ 2941 return 0; 2942 /* XXX this should likely just call ath_descdma_cleanup() */ 2943fail3: 2944 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2945 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2946 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2947 bus_dma_tag_destroy(dd->dd_dmat); 2948 memset(dd, 0, sizeof(*dd)); 2949 return error; 2950#undef DS2PHYS 2951#undef ATH_DESC_4KB_BOUND_CHECK 2952} 2953 2954/* 2955 * Allocate ath_buf entries but no descriptor contents. 2956 * 2957 * This is for RX EDMA where the descriptors are the header part of 2958 * the RX buffer. 2959 */ 2960int 2961ath_descdma_setup_rx_edma(struct ath_softc *sc, 2962 struct ath_descdma *dd, ath_bufhead *head, 2963 const char *name, int nbuf, int rx_status_len) 2964{ 2965 struct ifnet *ifp = sc->sc_ifp; 2966 struct ath_buf *bf; 2967 int i, bsize, error; 2968 2969 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n", 2970 __func__, name, nbuf); 2971 2972 dd->dd_name = name; 2973 /* 2974 * This is (mostly) purely for show. We're not allocating any actual 2975 * descriptors here as EDMA RX has the descriptor be part 2976 * of the RX buffer. 2977 * 2978 * However, dd_desc_len is used by ath_descdma_free() to determine 2979 * whether we have already freed this DMA mapping. 2980 */ 2981 dd->dd_desc_len = rx_status_len * nbuf; 2982 dd->dd_descsize = rx_status_len; 2983 2984 /* allocate rx buffers */ 2985 bsize = sizeof(struct ath_buf) * nbuf; 2986 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2987 if (bf == NULL) { 2988 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2989 dd->dd_name, bsize); 2990 error = ENOMEM; 2991 goto fail3; 2992 } 2993 dd->dd_bufptr = bf; 2994 2995 TAILQ_INIT(head); 2996 for (i = 0; i < nbuf; i++, bf++) { 2997 bf->bf_desc = NULL; 2998 bf->bf_daddr = 0; 2999 bf->bf_lastds = NULL; /* Just an initial value */ 3000 3001 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3002 &bf->bf_dmamap); 3003 if (error != 0) { 3004 if_printf(ifp, "unable to create dmamap for %s " 3005 "buffer %u, error %u\n", dd->dd_name, i, error); 3006 ath_descdma_cleanup(sc, dd, head); 3007 return error; 3008 } 3009 TAILQ_INSERT_TAIL(head, bf, bf_list); 3010 } 3011 return 0; 3012fail3: 3013 memset(dd, 0, sizeof(*dd)); 3014 return error; 3015} 3016 3017void 3018ath_descdma_cleanup(struct ath_softc *sc, 3019 struct ath_descdma *dd, ath_bufhead *head) 3020{ 3021 struct ath_buf *bf; 3022 struct ieee80211_node *ni; 3023 3024 if (dd->dd_dmamap != 0) { 3025 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3026 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3027 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3028 bus_dma_tag_destroy(dd->dd_dmat); 3029 } 3030 3031 if (head != NULL) { 3032 TAILQ_FOREACH(bf, head, bf_list) { 3033 if (bf->bf_m) { 3034 m_freem(bf->bf_m); 3035 bf->bf_m = NULL; 3036 } 3037 if (bf->bf_dmamap != NULL) { 3038 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3039 bf->bf_dmamap = NULL; 3040 } 3041 ni = bf->bf_node; 3042 bf->bf_node = NULL; 3043 if (ni != NULL) { 3044 /* 3045 * Reclaim node reference. 3046 */ 3047 ieee80211_free_node(ni); 3048 } 3049 } 3050 } 3051 3052 if (head != NULL) 3053 TAILQ_INIT(head); 3054 3055 if (dd->dd_bufptr != NULL) 3056 free(dd->dd_bufptr, M_ATHDEV); 3057 memset(dd, 0, sizeof(*dd)); 3058} 3059 3060static int 3061ath_desc_alloc(struct ath_softc *sc) 3062{ 3063 int error; 3064 3065 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3066 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_TXDESC); 3067 if (error != 0) { 3068 return error; 3069 } 3070 sc->sc_txbuf_cnt = ath_txbuf; 3071 3072 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3073 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 3074 ATH_TXDESC); 3075 if (error != 0) { 3076 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3077 return error; 3078 } 3079 3080 /* 3081 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3082 * flag doesn't have to be set in ath_getbuf_locked(). 3083 */ 3084 3085 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3086 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 3087 if (error != 0) { 3088 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3089 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3090 &sc->sc_txbuf_mgmt); 3091 return error; 3092 } 3093 return 0; 3094} 3095 3096static void 3097ath_desc_free(struct ath_softc *sc) 3098{ 3099 3100 if (sc->sc_bdma.dd_desc_len != 0) 3101 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3102 if (sc->sc_txdma.dd_desc_len != 0) 3103 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3104 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3105 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3106 &sc->sc_txbuf_mgmt); 3107} 3108 3109static struct ieee80211_node * 3110ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3111{ 3112 struct ieee80211com *ic = vap->iv_ic; 3113 struct ath_softc *sc = ic->ic_ifp->if_softc; 3114 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3115 struct ath_node *an; 3116 3117 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3118 if (an == NULL) { 3119 /* XXX stat+msg */ 3120 return NULL; 3121 } 3122 ath_rate_node_init(sc, an); 3123 3124 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3125 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3126 device_get_nameunit(sc->sc_dev), an); 3127 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3128 3129 /* XXX setup ath_tid */ 3130 ath_tx_tid_init(sc, an); 3131 3132 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3133 return &an->an_node; 3134} 3135 3136static void 3137ath_node_cleanup(struct ieee80211_node *ni) 3138{ 3139 struct ieee80211com *ic = ni->ni_ic; 3140 struct ath_softc *sc = ic->ic_ifp->if_softc; 3141 3142 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3143 ath_tx_node_flush(sc, ATH_NODE(ni)); 3144 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3145 sc->sc_node_cleanup(ni); 3146} 3147 3148static void 3149ath_node_free(struct ieee80211_node *ni) 3150{ 3151 struct ieee80211com *ic = ni->ni_ic; 3152 struct ath_softc *sc = ic->ic_ifp->if_softc; 3153 3154 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 3155 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3156 sc->sc_node_free(ni); 3157} 3158 3159static void 3160ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3161{ 3162 struct ieee80211com *ic = ni->ni_ic; 3163 struct ath_softc *sc = ic->ic_ifp->if_softc; 3164 struct ath_hal *ah = sc->sc_ah; 3165 3166 *rssi = ic->ic_node_getrssi(ni); 3167 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3168 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3169 else 3170 *noise = -95; /* nominally correct */ 3171} 3172 3173/* 3174 * Set the default antenna. 3175 */ 3176void 3177ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3178{ 3179 struct ath_hal *ah = sc->sc_ah; 3180 3181 /* XXX block beacon interrupts */ 3182 ath_hal_setdefantenna(ah, antenna); 3183 if (sc->sc_defant != antenna) 3184 sc->sc_stats.ast_ant_defswitch++; 3185 sc->sc_defant = antenna; 3186 sc->sc_rxotherant = 0; 3187} 3188 3189static void 3190ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3191{ 3192 txq->axq_qnum = qnum; 3193 txq->axq_ac = 0; 3194 txq->axq_depth = 0; 3195 txq->axq_aggr_depth = 0; 3196 txq->axq_intrcnt = 0; 3197 txq->axq_link = NULL; 3198 txq->axq_softc = sc; 3199 TAILQ_INIT(&txq->axq_q); 3200 TAILQ_INIT(&txq->axq_tidq); 3201 ATH_TXQ_LOCK_INIT(sc, txq); 3202} 3203 3204/* 3205 * Setup a h/w transmit queue. 3206 */ 3207static struct ath_txq * 3208ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3209{ 3210#define N(a) (sizeof(a)/sizeof(a[0])) 3211 struct ath_hal *ah = sc->sc_ah; 3212 HAL_TXQ_INFO qi; 3213 int qnum; 3214 3215 memset(&qi, 0, sizeof(qi)); 3216 qi.tqi_subtype = subtype; 3217 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3218 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3219 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3220 /* 3221 * Enable interrupts only for EOL and DESC conditions. 3222 * We mark tx descriptors to receive a DESC interrupt 3223 * when a tx queue gets deep; otherwise waiting for the 3224 * EOL to reap descriptors. Note that this is done to 3225 * reduce interrupt load and this only defers reaping 3226 * descriptors, never transmitting frames. Aside from 3227 * reducing interrupts this also permits more concurrency. 3228 * The only potential downside is if the tx queue backs 3229 * up in which case the top half of the kernel may backup 3230 * due to a lack of tx descriptors. 3231 */ 3232 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3233 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3234 if (qnum == -1) { 3235 /* 3236 * NB: don't print a message, this happens 3237 * normally on parts with too few tx queues 3238 */ 3239 return NULL; 3240 } 3241 if (qnum >= N(sc->sc_txq)) { 3242 device_printf(sc->sc_dev, 3243 "hal qnum %u out of range, max %zu!\n", 3244 qnum, N(sc->sc_txq)); 3245 ath_hal_releasetxqueue(ah, qnum); 3246 return NULL; 3247 } 3248 if (!ATH_TXQ_SETUP(sc, qnum)) { 3249 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3250 sc->sc_txqsetup |= 1<<qnum; 3251 } 3252 return &sc->sc_txq[qnum]; 3253#undef N 3254} 3255 3256/* 3257 * Setup a hardware data transmit queue for the specified 3258 * access control. The hal may not support all requested 3259 * queues in which case it will return a reference to a 3260 * previously setup queue. We record the mapping from ac's 3261 * to h/w queues for use by ath_tx_start and also track 3262 * the set of h/w queues being used to optimize work in the 3263 * transmit interrupt handler and related routines. 3264 */ 3265static int 3266ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3267{ 3268#define N(a) (sizeof(a)/sizeof(a[0])) 3269 struct ath_txq *txq; 3270 3271 if (ac >= N(sc->sc_ac2q)) { 3272 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3273 ac, N(sc->sc_ac2q)); 3274 return 0; 3275 } 3276 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3277 if (txq != NULL) { 3278 txq->axq_ac = ac; 3279 sc->sc_ac2q[ac] = txq; 3280 return 1; 3281 } else 3282 return 0; 3283#undef N 3284} 3285 3286/* 3287 * Update WME parameters for a transmit queue. 3288 */ 3289static int 3290ath_txq_update(struct ath_softc *sc, int ac) 3291{ 3292#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3293#define ATH_TXOP_TO_US(v) (v<<5) 3294 struct ifnet *ifp = sc->sc_ifp; 3295 struct ieee80211com *ic = ifp->if_l2com; 3296 struct ath_txq *txq = sc->sc_ac2q[ac]; 3297 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3298 struct ath_hal *ah = sc->sc_ah; 3299 HAL_TXQ_INFO qi; 3300 3301 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3302#ifdef IEEE80211_SUPPORT_TDMA 3303 if (sc->sc_tdma) { 3304 /* 3305 * AIFS is zero so there's no pre-transmit wait. The 3306 * burst time defines the slot duration and is configured 3307 * through net80211. The QCU is setup to not do post-xmit 3308 * back off, lockout all lower-priority QCU's, and fire 3309 * off the DMA beacon alert timer which is setup based 3310 * on the slot configuration. 3311 */ 3312 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3313 | HAL_TXQ_TXERRINT_ENABLE 3314 | HAL_TXQ_TXURNINT_ENABLE 3315 | HAL_TXQ_TXEOLINT_ENABLE 3316 | HAL_TXQ_DBA_GATED 3317 | HAL_TXQ_BACKOFF_DISABLE 3318 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 3319 ; 3320 qi.tqi_aifs = 0; 3321 /* XXX +dbaprep? */ 3322 qi.tqi_readyTime = sc->sc_tdmaslotlen; 3323 qi.tqi_burstTime = qi.tqi_readyTime; 3324 } else { 3325#endif 3326 /* 3327 * XXX shouldn't this just use the default flags 3328 * used in the previous queue setup? 3329 */ 3330 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3331 | HAL_TXQ_TXERRINT_ENABLE 3332 | HAL_TXQ_TXDESCINT_ENABLE 3333 | HAL_TXQ_TXURNINT_ENABLE 3334 | HAL_TXQ_TXEOLINT_ENABLE 3335 ; 3336 qi.tqi_aifs = wmep->wmep_aifsn; 3337 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3338 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3339 qi.tqi_readyTime = 0; 3340 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3341#ifdef IEEE80211_SUPPORT_TDMA 3342 } 3343#endif 3344 3345 DPRINTF(sc, ATH_DEBUG_RESET, 3346 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 3347 __func__, txq->axq_qnum, qi.tqi_qflags, 3348 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 3349 3350 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3351 if_printf(ifp, "unable to update hardware queue " 3352 "parameters for %s traffic!\n", 3353 ieee80211_wme_acnames[ac]); 3354 return 0; 3355 } else { 3356 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3357 return 1; 3358 } 3359#undef ATH_TXOP_TO_US 3360#undef ATH_EXPONENT_TO_VALUE 3361} 3362 3363/* 3364 * Callback from the 802.11 layer to update WME parameters. 3365 */ 3366int 3367ath_wme_update(struct ieee80211com *ic) 3368{ 3369 struct ath_softc *sc = ic->ic_ifp->if_softc; 3370 3371 return !ath_txq_update(sc, WME_AC_BE) || 3372 !ath_txq_update(sc, WME_AC_BK) || 3373 !ath_txq_update(sc, WME_AC_VI) || 3374 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3375} 3376 3377/* 3378 * Reclaim resources for a setup queue. 3379 */ 3380static void 3381ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3382{ 3383 3384 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3385 ATH_TXQ_LOCK_DESTROY(txq); 3386 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3387} 3388 3389/* 3390 * Reclaim all tx queue resources. 3391 */ 3392static void 3393ath_tx_cleanup(struct ath_softc *sc) 3394{ 3395 int i; 3396 3397 ATH_TXBUF_LOCK_DESTROY(sc); 3398 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3399 if (ATH_TXQ_SETUP(sc, i)) 3400 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3401} 3402 3403/* 3404 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 3405 * using the current rates in sc_rixmap. 3406 */ 3407int 3408ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 3409{ 3410 int rix = sc->sc_rixmap[rate]; 3411 /* NB: return lowest rix for invalid rate */ 3412 return (rix == 0xff ? 0 : rix); 3413} 3414 3415static void 3416ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 3417 struct ath_buf *bf) 3418{ 3419 struct ieee80211_node *ni = bf->bf_node; 3420 struct ifnet *ifp = sc->sc_ifp; 3421 struct ieee80211com *ic = ifp->if_l2com; 3422 int sr, lr, pri; 3423 3424 if (ts->ts_status == 0) { 3425 u_int8_t txant = ts->ts_antenna; 3426 sc->sc_stats.ast_ant_tx[txant]++; 3427 sc->sc_ant_tx[txant]++; 3428 if (ts->ts_finaltsi != 0) 3429 sc->sc_stats.ast_tx_altrate++; 3430 pri = M_WME_GETAC(bf->bf_m); 3431 if (pri >= WME_AC_VO) 3432 ic->ic_wme.wme_hipri_traffic++; 3433 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 3434 ni->ni_inact = ni->ni_inact_reload; 3435 } else { 3436 if (ts->ts_status & HAL_TXERR_XRETRY) 3437 sc->sc_stats.ast_tx_xretries++; 3438 if (ts->ts_status & HAL_TXERR_FIFO) 3439 sc->sc_stats.ast_tx_fifoerr++; 3440 if (ts->ts_status & HAL_TXERR_FILT) 3441 sc->sc_stats.ast_tx_filtered++; 3442 if (ts->ts_status & HAL_TXERR_XTXOP) 3443 sc->sc_stats.ast_tx_xtxop++; 3444 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 3445 sc->sc_stats.ast_tx_timerexpired++; 3446 3447 if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 3448 sc->sc_stats.ast_tx_data_underrun++; 3449 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 3450 sc->sc_stats.ast_tx_delim_underrun++; 3451 3452 if (bf->bf_m->m_flags & M_FF) 3453 sc->sc_stats.ast_ff_txerr++; 3454 } 3455 /* XXX when is this valid? */ 3456 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 3457 sc->sc_stats.ast_tx_desccfgerr++; 3458 3459 sr = ts->ts_shortretry; 3460 lr = ts->ts_longretry; 3461 sc->sc_stats.ast_tx_shortretry += sr; 3462 sc->sc_stats.ast_tx_longretry += lr; 3463 3464} 3465 3466/* 3467 * The default completion. If fail is 1, this means 3468 * "please don't retry the frame, and just return -1 status 3469 * to the net80211 stack. 3470 */ 3471void 3472ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3473{ 3474 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3475 int st; 3476 3477 if (fail == 1) 3478 st = -1; 3479 else 3480 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 3481 ts->ts_status : HAL_TXERR_XRETRY; 3482 3483 if (bf->bf_state.bfs_dobaw) 3484 device_printf(sc->sc_dev, 3485 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 3486 __func__, 3487 bf, 3488 SEQNO(bf->bf_state.bfs_seqno)); 3489 if (bf->bf_next != NULL) 3490 device_printf(sc->sc_dev, 3491 "%s: bf %p: seqno %d: bf_next not NULL!\n", 3492 __func__, 3493 bf, 3494 SEQNO(bf->bf_state.bfs_seqno)); 3495 3496 /* 3497 * Do any tx complete callback. Note this must 3498 * be done before releasing the node reference. 3499 * This will free the mbuf, release the net80211 3500 * node and recycle the ath_buf. 3501 */ 3502 ath_tx_freebuf(sc, bf, st); 3503} 3504 3505/* 3506 * Update rate control with the given completion status. 3507 */ 3508void 3509ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 3510 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 3511 int nframes, int nbad) 3512{ 3513 struct ath_node *an; 3514 3515 /* Only for unicast frames */ 3516 if (ni == NULL) 3517 return; 3518 3519 an = ATH_NODE(ni); 3520 3521 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 3522 ATH_NODE_LOCK(an); 3523 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 3524 ATH_NODE_UNLOCK(an); 3525 } 3526} 3527 3528/* 3529 * Update the busy status of the last frame on the free list. 3530 * When doing TDMA, the busy flag tracks whether the hardware 3531 * currently points to this buffer or not, and thus gated DMA 3532 * may restart by re-reading the last descriptor in this 3533 * buffer. 3534 * 3535 * This should be called in the completion function once one 3536 * of the buffers has been used. 3537 */ 3538static void 3539ath_tx_update_busy(struct ath_softc *sc) 3540{ 3541 struct ath_buf *last; 3542 3543 /* 3544 * Since the last frame may still be marked 3545 * as ATH_BUF_BUSY, unmark it here before 3546 * finishing the frame processing. 3547 * Since we've completed a frame (aggregate 3548 * or otherwise), the hardware has moved on 3549 * and is no longer referencing the previous 3550 * descriptor. 3551 */ 3552 ATH_TXBUF_LOCK_ASSERT(sc); 3553 last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3554 if (last != NULL) 3555 last->bf_flags &= ~ATH_BUF_BUSY; 3556 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 3557 if (last != NULL) 3558 last->bf_flags &= ~ATH_BUF_BUSY; 3559} 3560 3561/* 3562 * Process completed xmit descriptors from the specified queue. 3563 * Kick the packet scheduler if needed. This can occur from this 3564 * particular task. 3565 */ 3566static int 3567ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 3568{ 3569 struct ath_hal *ah = sc->sc_ah; 3570 struct ath_buf *bf; 3571 struct ath_desc *ds; 3572 struct ath_tx_status *ts; 3573 struct ieee80211_node *ni; 3574 struct ath_node *an; 3575#ifdef IEEE80211_SUPPORT_SUPERG 3576 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3577#endif /* IEEE80211_SUPPORT_SUPERG */ 3578 int nacked; 3579 HAL_STATUS status; 3580 3581 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3582 __func__, txq->axq_qnum, 3583 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3584 txq->axq_link); 3585 nacked = 0; 3586 for (;;) { 3587 ATH_TXQ_LOCK(txq); 3588 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3589 bf = TAILQ_FIRST(&txq->axq_q); 3590 if (bf == NULL) { 3591 ATH_TXQ_UNLOCK(txq); 3592 break; 3593 } 3594 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 3595 ts = &bf->bf_status.ds_txstat; 3596 status = ath_hal_txprocdesc(ah, ds, ts); 3597#ifdef ATH_DEBUG 3598 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3599 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3600 status == HAL_OK); 3601 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) { 3602 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3603 status == HAL_OK); 3604 } 3605#endif 3606 if (status == HAL_EINPROGRESS) { 3607 ATH_TXQ_UNLOCK(txq); 3608 break; 3609 } 3610 ATH_TXQ_REMOVE(txq, bf, bf_list); 3611#ifdef IEEE80211_SUPPORT_TDMA 3612 if (txq->axq_depth > 0) { 3613 /* 3614 * More frames follow. Mark the buffer busy 3615 * so it's not re-used while the hardware may 3616 * still re-read the link field in the descriptor. 3617 * 3618 * Use the last buffer in an aggregate as that 3619 * is where the hardware may be - intermediate 3620 * descriptors won't be "busy". 3621 */ 3622 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 3623 } else 3624#else 3625 if (txq->axq_depth == 0) 3626#endif 3627 txq->axq_link = NULL; 3628 if (bf->bf_state.bfs_aggr) 3629 txq->axq_aggr_depth--; 3630 3631 ni = bf->bf_node; 3632 /* 3633 * If unicast frame was ack'd update RSSI, 3634 * including the last rx time used to 3635 * workaround phantom bmiss interrupts. 3636 */ 3637 if (ni != NULL && ts->ts_status == 0 && 3638 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 3639 nacked++; 3640 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 3641 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 3642 ts->ts_rssi); 3643 } 3644 ATH_TXQ_UNLOCK(txq); 3645 3646 /* If unicast frame, update general statistics */ 3647 if (ni != NULL) { 3648 an = ATH_NODE(ni); 3649 /* update statistics */ 3650 ath_tx_update_stats(sc, ts, bf); 3651 } 3652 3653 /* 3654 * Call the completion handler. 3655 * The completion handler is responsible for 3656 * calling the rate control code. 3657 * 3658 * Frames with no completion handler get the 3659 * rate control code called here. 3660 */ 3661 if (bf->bf_comp == NULL) { 3662 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 3663 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 3664 /* 3665 * XXX assume this isn't an aggregate 3666 * frame. 3667 */ 3668 ath_tx_update_ratectrl(sc, ni, 3669 bf->bf_state.bfs_rc, ts, 3670 bf->bf_state.bfs_pktlen, 1, 3671 (ts->ts_status == 0 ? 0 : 1)); 3672 } 3673 ath_tx_default_comp(sc, bf, 0); 3674 } else 3675 bf->bf_comp(sc, bf, 0); 3676 } 3677#ifdef IEEE80211_SUPPORT_SUPERG 3678 /* 3679 * Flush fast-frame staging queue when traffic slows. 3680 */ 3681 if (txq->axq_depth <= 1) 3682 ieee80211_ff_flush(ic, txq->axq_ac); 3683#endif 3684 3685 /* Kick the TXQ scheduler */ 3686 if (dosched) { 3687 ATH_TXQ_LOCK(txq); 3688 ath_txq_sched(sc, txq); 3689 ATH_TXQ_UNLOCK(txq); 3690 } 3691 3692 return nacked; 3693} 3694 3695#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 3696 3697/* 3698 * Deferred processing of transmit interrupt; special-cased 3699 * for a single hardware transmit queue (e.g. 5210 and 5211). 3700 */ 3701static void 3702ath_tx_proc_q0(void *arg, int npending) 3703{ 3704 struct ath_softc *sc = arg; 3705 struct ifnet *ifp = sc->sc_ifp; 3706 uint32_t txqs; 3707 3708 ATH_PCU_LOCK(sc); 3709 sc->sc_txproc_cnt++; 3710 txqs = sc->sc_txq_active; 3711 sc->sc_txq_active &= ~txqs; 3712 ATH_PCU_UNLOCK(sc); 3713 3714 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 3715 /* XXX why is lastrx updated in tx code? */ 3716 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3717 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3718 ath_tx_processq(sc, sc->sc_cabq, 1); 3719 IF_LOCK(&ifp->if_snd); 3720 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3721 IF_UNLOCK(&ifp->if_snd); 3722 sc->sc_wd_timer = 0; 3723 3724 if (sc->sc_softled) 3725 ath_led_event(sc, sc->sc_txrix); 3726 3727 ATH_PCU_LOCK(sc); 3728 sc->sc_txproc_cnt--; 3729 ATH_PCU_UNLOCK(sc); 3730 3731 ath_tx_kick(sc); 3732} 3733 3734/* 3735 * Deferred processing of transmit interrupt; special-cased 3736 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 3737 */ 3738static void 3739ath_tx_proc_q0123(void *arg, int npending) 3740{ 3741 struct ath_softc *sc = arg; 3742 struct ifnet *ifp = sc->sc_ifp; 3743 int nacked; 3744 uint32_t txqs; 3745 3746 ATH_PCU_LOCK(sc); 3747 sc->sc_txproc_cnt++; 3748 txqs = sc->sc_txq_active; 3749 sc->sc_txq_active &= ~txqs; 3750 ATH_PCU_UNLOCK(sc); 3751 3752 /* 3753 * Process each active queue. 3754 */ 3755 nacked = 0; 3756 if (TXQACTIVE(txqs, 0)) 3757 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 3758 if (TXQACTIVE(txqs, 1)) 3759 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 3760 if (TXQACTIVE(txqs, 2)) 3761 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 3762 if (TXQACTIVE(txqs, 3)) 3763 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 3764 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3765 ath_tx_processq(sc, sc->sc_cabq, 1); 3766 if (nacked) 3767 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3768 3769 IF_LOCK(&ifp->if_snd); 3770 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3771 IF_UNLOCK(&ifp->if_snd); 3772 sc->sc_wd_timer = 0; 3773 3774 if (sc->sc_softled) 3775 ath_led_event(sc, sc->sc_txrix); 3776 3777 ATH_PCU_LOCK(sc); 3778 sc->sc_txproc_cnt--; 3779 ATH_PCU_UNLOCK(sc); 3780 3781 ath_tx_kick(sc); 3782} 3783 3784/* 3785 * Deferred processing of transmit interrupt. 3786 */ 3787static void 3788ath_tx_proc(void *arg, int npending) 3789{ 3790 struct ath_softc *sc = arg; 3791 struct ifnet *ifp = sc->sc_ifp; 3792 int i, nacked; 3793 uint32_t txqs; 3794 3795 ATH_PCU_LOCK(sc); 3796 sc->sc_txproc_cnt++; 3797 txqs = sc->sc_txq_active; 3798 sc->sc_txq_active &= ~txqs; 3799 ATH_PCU_UNLOCK(sc); 3800 3801 /* 3802 * Process each active queue. 3803 */ 3804 nacked = 0; 3805 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3806 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 3807 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 3808 if (nacked) 3809 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3810 3811 /* XXX check this inside of IF_LOCK? */ 3812 IF_LOCK(&ifp->if_snd); 3813 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3814 IF_UNLOCK(&ifp->if_snd); 3815 sc->sc_wd_timer = 0; 3816 3817 if (sc->sc_softled) 3818 ath_led_event(sc, sc->sc_txrix); 3819 3820 ATH_PCU_LOCK(sc); 3821 sc->sc_txproc_cnt--; 3822 ATH_PCU_UNLOCK(sc); 3823 3824 ath_tx_kick(sc); 3825} 3826#undef TXQACTIVE 3827 3828/* 3829 * Deferred processing of TXQ rescheduling. 3830 */ 3831static void 3832ath_txq_sched_tasklet(void *arg, int npending) 3833{ 3834 struct ath_softc *sc = arg; 3835 int i; 3836 3837 /* XXX is skipping ok? */ 3838 ATH_PCU_LOCK(sc); 3839#if 0 3840 if (sc->sc_inreset_cnt > 0) { 3841 device_printf(sc->sc_dev, 3842 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 3843 ATH_PCU_UNLOCK(sc); 3844 return; 3845 } 3846#endif 3847 sc->sc_txproc_cnt++; 3848 ATH_PCU_UNLOCK(sc); 3849 3850 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3851 if (ATH_TXQ_SETUP(sc, i)) { 3852 ATH_TXQ_LOCK(&sc->sc_txq[i]); 3853 ath_txq_sched(sc, &sc->sc_txq[i]); 3854 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 3855 } 3856 } 3857 3858 ATH_PCU_LOCK(sc); 3859 sc->sc_txproc_cnt--; 3860 ATH_PCU_UNLOCK(sc); 3861} 3862 3863void 3864ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 3865{ 3866 3867 ATH_TXBUF_LOCK_ASSERT(sc); 3868 3869 if (bf->bf_flags & ATH_BUF_MGMT) 3870 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 3871 else { 3872 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3873 sc->sc_txbuf_cnt++; 3874 if (sc->sc_txbuf_cnt > ath_txbuf) { 3875 device_printf(sc->sc_dev, 3876 "%s: sc_txbuf_cnt > %d?\n", 3877 __func__, 3878 ath_txbuf); 3879 sc->sc_txbuf_cnt = ath_txbuf; 3880 } 3881 } 3882} 3883 3884void 3885ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 3886{ 3887 3888 ATH_TXBUF_LOCK_ASSERT(sc); 3889 3890 if (bf->bf_flags & ATH_BUF_MGMT) 3891 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 3892 else { 3893 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 3894 sc->sc_txbuf_cnt++; 3895 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 3896 device_printf(sc->sc_dev, 3897 "%s: sc_txbuf_cnt > %d?\n", 3898 __func__, 3899 ATH_TXBUF); 3900 sc->sc_txbuf_cnt = ATH_TXBUF; 3901 } 3902 } 3903} 3904 3905/* 3906 * Return a buffer to the pool and update the 'busy' flag on the 3907 * previous 'tail' entry. 3908 * 3909 * This _must_ only be called when the buffer is involved in a completed 3910 * TX. The logic is that if it was part of an active TX, the previous 3911 * buffer on the list is now not involved in a halted TX DMA queue, waiting 3912 * for restart (eg for TDMA.) 3913 * 3914 * The caller must free the mbuf and recycle the node reference. 3915 */ 3916void 3917ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 3918{ 3919 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3920 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 3921 3922 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 3923 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 3924 3925 ATH_TXBUF_LOCK(sc); 3926 ath_tx_update_busy(sc); 3927 ath_returnbuf_tail(sc, bf); 3928 ATH_TXBUF_UNLOCK(sc); 3929} 3930 3931/* 3932 * This is currently used by ath_tx_draintxq() and 3933 * ath_tx_tid_free_pkts(). 3934 * 3935 * It recycles a single ath_buf. 3936 */ 3937void 3938ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 3939{ 3940 struct ieee80211_node *ni = bf->bf_node; 3941 struct mbuf *m0 = bf->bf_m; 3942 3943 bf->bf_node = NULL; 3944 bf->bf_m = NULL; 3945 3946 /* Free the buffer, it's not needed any longer */ 3947 ath_freebuf(sc, bf); 3948 3949 if (ni != NULL) { 3950 /* 3951 * Do any callback and reclaim the node reference. 3952 */ 3953 if (m0->m_flags & M_TXCB) 3954 ieee80211_process_callback(ni, m0, status); 3955 ieee80211_free_node(ni); 3956 } 3957 m_freem(m0); 3958 3959 /* 3960 * XXX the buffer used to be freed -after-, but the DMA map was 3961 * freed where ath_freebuf() now is. I've no idea what this 3962 * will do. 3963 */ 3964} 3965 3966void 3967ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 3968{ 3969#ifdef ATH_DEBUG 3970 struct ath_hal *ah = sc->sc_ah; 3971#endif 3972 struct ath_buf *bf; 3973 u_int ix; 3974 3975 /* 3976 * NB: this assumes output has been stopped and 3977 * we do not need to block ath_tx_proc 3978 */ 3979 ATH_TXBUF_LOCK(sc); 3980 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 3981 if (bf != NULL) 3982 bf->bf_flags &= ~ATH_BUF_BUSY; 3983 bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3984 if (bf != NULL) 3985 bf->bf_flags &= ~ATH_BUF_BUSY; 3986 ATH_TXBUF_UNLOCK(sc); 3987 3988 for (ix = 0;; ix++) { 3989 ATH_TXQ_LOCK(txq); 3990 bf = TAILQ_FIRST(&txq->axq_q); 3991 if (bf == NULL) { 3992 txq->axq_link = NULL; 3993 ATH_TXQ_UNLOCK(txq); 3994 break; 3995 } 3996 ATH_TXQ_REMOVE(txq, bf, bf_list); 3997 if (bf->bf_state.bfs_aggr) 3998 txq->axq_aggr_depth--; 3999#ifdef ATH_DEBUG 4000 if (sc->sc_debug & ATH_DEBUG_RESET) { 4001 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4002 4003 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 4004 ath_hal_txprocdesc(ah, bf->bf_lastds, 4005 &bf->bf_status.ds_txstat) == HAL_OK); 4006 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 4007 bf->bf_m->m_len, 0, -1); 4008 } 4009#endif /* ATH_DEBUG */ 4010 /* 4011 * Since we're now doing magic in the completion 4012 * functions, we -must- call it for aggregation 4013 * destinations or BAW tracking will get upset. 4014 */ 4015 /* 4016 * Clear ATH_BUF_BUSY; the completion handler 4017 * will free the buffer. 4018 */ 4019 ATH_TXQ_UNLOCK(txq); 4020 bf->bf_flags &= ~ATH_BUF_BUSY; 4021 if (bf->bf_comp) 4022 bf->bf_comp(sc, bf, 1); 4023 else 4024 ath_tx_default_comp(sc, bf, 1); 4025 } 4026 4027 /* 4028 * Drain software queued frames which are on 4029 * active TIDs. 4030 */ 4031 ath_tx_txq_drain(sc, txq); 4032} 4033 4034static void 4035ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 4036{ 4037 struct ath_hal *ah = sc->sc_ah; 4038 4039 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4040 __func__, txq->axq_qnum, 4041 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 4042 txq->axq_link); 4043 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 4044} 4045 4046static int 4047ath_stoptxdma(struct ath_softc *sc) 4048{ 4049 struct ath_hal *ah = sc->sc_ah; 4050 int i; 4051 4052 /* XXX return value */ 4053 if (sc->sc_invalid) 4054 return 0; 4055 4056 if (!sc->sc_invalid) { 4057 /* don't touch the hardware if marked invalid */ 4058 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4059 __func__, sc->sc_bhalq, 4060 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 4061 NULL); 4062 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 4063 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4064 if (ATH_TXQ_SETUP(sc, i)) 4065 ath_tx_stopdma(sc, &sc->sc_txq[i]); 4066 } 4067 4068 return 1; 4069} 4070 4071/* 4072 * Drain the transmit queues and reclaim resources. 4073 */ 4074static void 4075ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 4076{ 4077#ifdef ATH_DEBUG 4078 struct ath_hal *ah = sc->sc_ah; 4079#endif 4080 struct ifnet *ifp = sc->sc_ifp; 4081 int i; 4082 4083 (void) ath_stoptxdma(sc); 4084 4085 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4086 /* 4087 * XXX TODO: should we just handle the completed TX frames 4088 * here, whether or not the reset is a full one or not? 4089 */ 4090 if (ATH_TXQ_SETUP(sc, i)) { 4091 if (reset_type == ATH_RESET_NOLOSS) 4092 ath_tx_processq(sc, &sc->sc_txq[i], 0); 4093 else 4094 ath_tx_draintxq(sc, &sc->sc_txq[i]); 4095 } 4096 } 4097#ifdef ATH_DEBUG 4098 if (sc->sc_debug & ATH_DEBUG_RESET) { 4099 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 4100 if (bf != NULL && bf->bf_m != NULL) { 4101 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 4102 ath_hal_txprocdesc(ah, bf->bf_lastds, 4103 &bf->bf_status.ds_txstat) == HAL_OK); 4104 ieee80211_dump_pkt(ifp->if_l2com, 4105 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 4106 0, -1); 4107 } 4108 } 4109#endif /* ATH_DEBUG */ 4110 IF_LOCK(&ifp->if_snd); 4111 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4112 IF_UNLOCK(&ifp->if_snd); 4113 sc->sc_wd_timer = 0; 4114} 4115 4116/* 4117 * Update internal state after a channel change. 4118 */ 4119static void 4120ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 4121{ 4122 enum ieee80211_phymode mode; 4123 4124 /* 4125 * Change channels and update the h/w rate map 4126 * if we're switching; e.g. 11a to 11b/g. 4127 */ 4128 mode = ieee80211_chan2mode(chan); 4129 if (mode != sc->sc_curmode) 4130 ath_setcurmode(sc, mode); 4131 sc->sc_curchan = chan; 4132} 4133 4134/* 4135 * Set/change channels. If the channel is really being changed, 4136 * it's done by resetting the chip. To accomplish this we must 4137 * first cleanup any pending DMA, then restart stuff after a la 4138 * ath_init. 4139 */ 4140static int 4141ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 4142{ 4143 struct ifnet *ifp = sc->sc_ifp; 4144 struct ieee80211com *ic = ifp->if_l2com; 4145 struct ath_hal *ah = sc->sc_ah; 4146 int ret = 0; 4147 4148 /* Treat this as an interface reset */ 4149 ATH_PCU_UNLOCK_ASSERT(sc); 4150 ATH_UNLOCK_ASSERT(sc); 4151 4152 /* (Try to) stop TX/RX from occuring */ 4153 taskqueue_block(sc->sc_tq); 4154 4155 ATH_PCU_LOCK(sc); 4156 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 4157 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 4158 if (ath_reset_grablock(sc, 1) == 0) { 4159 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 4160 __func__); 4161 } 4162 ATH_PCU_UNLOCK(sc); 4163 4164 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 4165 __func__, ieee80211_chan2ieee(ic, chan), 4166 chan->ic_freq, chan->ic_flags); 4167 if (chan != sc->sc_curchan) { 4168 HAL_STATUS status; 4169 /* 4170 * To switch channels clear any pending DMA operations; 4171 * wait long enough for the RX fifo to drain, reset the 4172 * hardware at the new frequency, and then re-enable 4173 * the relevant bits of the h/w. 4174 */ 4175#if 0 4176 ath_hal_intrset(ah, 0); /* disable interrupts */ 4177#endif 4178 ath_stoprecv(sc, 1); /* turn off frame recv */ 4179 /* 4180 * First, handle completed TX/RX frames. 4181 */ 4182 ath_rx_flush(sc); 4183 ath_draintxq(sc, ATH_RESET_NOLOSS); 4184 /* 4185 * Next, flush the non-scheduled frames. 4186 */ 4187 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 4188 4189 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 4190 if_printf(ifp, "%s: unable to reset " 4191 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 4192 __func__, ieee80211_chan2ieee(ic, chan), 4193 chan->ic_freq, chan->ic_flags, status); 4194 ret = EIO; 4195 goto finish; 4196 } 4197 sc->sc_diversity = ath_hal_getdiversity(ah); 4198 4199 /* Let DFS at it in case it's a DFS channel */ 4200 ath_dfs_radar_enable(sc, chan); 4201 4202 /* 4203 * Re-enable rx framework. 4204 */ 4205 if (ath_startrecv(sc) != 0) { 4206 if_printf(ifp, "%s: unable to restart recv logic\n", 4207 __func__); 4208 ret = EIO; 4209 goto finish; 4210 } 4211 4212 /* 4213 * Change channels and update the h/w rate map 4214 * if we're switching; e.g. 11a to 11b/g. 4215 */ 4216 ath_chan_change(sc, chan); 4217 4218 /* 4219 * Reset clears the beacon timers; reset them 4220 * here if needed. 4221 */ 4222 if (sc->sc_beacons) { /* restart beacons */ 4223#ifdef IEEE80211_SUPPORT_TDMA 4224 if (sc->sc_tdma) 4225 ath_tdma_config(sc, NULL); 4226 else 4227#endif 4228 ath_beacon_config(sc, NULL); 4229 } 4230 4231 /* 4232 * Re-enable interrupts. 4233 */ 4234#if 0 4235 ath_hal_intrset(ah, sc->sc_imask); 4236#endif 4237 } 4238 4239finish: 4240 ATH_PCU_LOCK(sc); 4241 sc->sc_inreset_cnt--; 4242 /* XXX only do this if sc_inreset_cnt == 0? */ 4243 ath_hal_intrset(ah, sc->sc_imask); 4244 ATH_PCU_UNLOCK(sc); 4245 4246 IF_LOCK(&ifp->if_snd); 4247 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4248 IF_UNLOCK(&ifp->if_snd); 4249 ath_txrx_start(sc); 4250 /* XXX ath_start? */ 4251 4252 return ret; 4253} 4254 4255/* 4256 * Periodically recalibrate the PHY to account 4257 * for temperature/environment changes. 4258 */ 4259static void 4260ath_calibrate(void *arg) 4261{ 4262 struct ath_softc *sc = arg; 4263 struct ath_hal *ah = sc->sc_ah; 4264 struct ifnet *ifp = sc->sc_ifp; 4265 struct ieee80211com *ic = ifp->if_l2com; 4266 HAL_BOOL longCal, isCalDone; 4267 HAL_BOOL aniCal, shortCal = AH_FALSE; 4268 int nextcal; 4269 4270 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 4271 goto restart; 4272 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 4273 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 4274 if (sc->sc_doresetcal) 4275 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 4276 4277 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 4278 if (aniCal) { 4279 sc->sc_stats.ast_ani_cal++; 4280 sc->sc_lastani = ticks; 4281 ath_hal_ani_poll(ah, sc->sc_curchan); 4282 } 4283 4284 if (longCal) { 4285 sc->sc_stats.ast_per_cal++; 4286 sc->sc_lastlongcal = ticks; 4287 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4288 /* 4289 * Rfgain is out of bounds, reset the chip 4290 * to load new gain values. 4291 */ 4292 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4293 "%s: rfgain change\n", __func__); 4294 sc->sc_stats.ast_per_rfgain++; 4295 sc->sc_resetcal = 0; 4296 sc->sc_doresetcal = AH_TRUE; 4297 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4298 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4299 return; 4300 } 4301 /* 4302 * If this long cal is after an idle period, then 4303 * reset the data collection state so we start fresh. 4304 */ 4305 if (sc->sc_resetcal) { 4306 (void) ath_hal_calreset(ah, sc->sc_curchan); 4307 sc->sc_lastcalreset = ticks; 4308 sc->sc_lastshortcal = ticks; 4309 sc->sc_resetcal = 0; 4310 sc->sc_doresetcal = AH_TRUE; 4311 } 4312 } 4313 4314 /* Only call if we're doing a short/long cal, not for ANI calibration */ 4315 if (shortCal || longCal) { 4316 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 4317 if (longCal) { 4318 /* 4319 * Calibrate noise floor data again in case of change. 4320 */ 4321 ath_hal_process_noisefloor(ah); 4322 } 4323 } else { 4324 DPRINTF(sc, ATH_DEBUG_ANY, 4325 "%s: calibration of channel %u failed\n", 4326 __func__, sc->sc_curchan->ic_freq); 4327 sc->sc_stats.ast_per_calfail++; 4328 } 4329 if (shortCal) 4330 sc->sc_lastshortcal = ticks; 4331 } 4332 if (!isCalDone) { 4333restart: 4334 /* 4335 * Use a shorter interval to potentially collect multiple 4336 * data samples required to complete calibration. Once 4337 * we're told the work is done we drop back to a longer 4338 * interval between requests. We're more aggressive doing 4339 * work when operating as an AP to improve operation right 4340 * after startup. 4341 */ 4342 sc->sc_lastshortcal = ticks; 4343 nextcal = ath_shortcalinterval*hz/1000; 4344 if (sc->sc_opmode != HAL_M_HOSTAP) 4345 nextcal *= 10; 4346 sc->sc_doresetcal = AH_TRUE; 4347 } else { 4348 /* nextcal should be the shortest time for next event */ 4349 nextcal = ath_longcalinterval*hz; 4350 if (sc->sc_lastcalreset == 0) 4351 sc->sc_lastcalreset = sc->sc_lastlongcal; 4352 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 4353 sc->sc_resetcal = 1; /* setup reset next trip */ 4354 sc->sc_doresetcal = AH_FALSE; 4355 } 4356 /* ANI calibration may occur more often than short/long/resetcal */ 4357 if (ath_anicalinterval > 0) 4358 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 4359 4360 if (nextcal != 0) { 4361 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 4362 __func__, nextcal, isCalDone ? "" : "!"); 4363 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 4364 } else { 4365 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 4366 __func__); 4367 /* NB: don't rearm timer */ 4368 } 4369} 4370 4371static void 4372ath_scan_start(struct ieee80211com *ic) 4373{ 4374 struct ifnet *ifp = ic->ic_ifp; 4375 struct ath_softc *sc = ifp->if_softc; 4376 struct ath_hal *ah = sc->sc_ah; 4377 u_int32_t rfilt; 4378 4379 /* XXX calibration timer? */ 4380 4381 ATH_LOCK(sc); 4382 sc->sc_scanning = 1; 4383 sc->sc_syncbeacon = 0; 4384 rfilt = ath_calcrxfilter(sc); 4385 ATH_UNLOCK(sc); 4386 4387 ATH_PCU_LOCK(sc); 4388 ath_hal_setrxfilter(ah, rfilt); 4389 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 4390 ATH_PCU_UNLOCK(sc); 4391 4392 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 4393 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 4394} 4395 4396static void 4397ath_scan_end(struct ieee80211com *ic) 4398{ 4399 struct ifnet *ifp = ic->ic_ifp; 4400 struct ath_softc *sc = ifp->if_softc; 4401 struct ath_hal *ah = sc->sc_ah; 4402 u_int32_t rfilt; 4403 4404 ATH_LOCK(sc); 4405 sc->sc_scanning = 0; 4406 rfilt = ath_calcrxfilter(sc); 4407 ATH_UNLOCK(sc); 4408 4409 ATH_PCU_LOCK(sc); 4410 ath_hal_setrxfilter(ah, rfilt); 4411 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4412 4413 ath_hal_process_noisefloor(ah); 4414 ATH_PCU_UNLOCK(sc); 4415 4416 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4417 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 4418 sc->sc_curaid); 4419} 4420 4421#ifdef ATH_ENABLE_11N 4422/* 4423 * For now, just do a channel change. 4424 * 4425 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 4426 * control state and resetting the hardware without dropping frames out 4427 * of the queue. 4428 * 4429 * The unfortunate trouble here is making absolutely sure that the 4430 * channel width change has propagated enough so the hardware 4431 * absolutely isn't handed bogus frames for it's current operating 4432 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 4433 * does occur in parallel, we need to make certain we've blocked 4434 * any further ongoing TX (and RX, that can cause raw TX) 4435 * before we do this. 4436 */ 4437static void 4438ath_update_chw(struct ieee80211com *ic) 4439{ 4440 struct ifnet *ifp = ic->ic_ifp; 4441 struct ath_softc *sc = ifp->if_softc; 4442 4443 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 4444 ath_set_channel(ic); 4445} 4446#endif /* ATH_ENABLE_11N */ 4447 4448static void 4449ath_set_channel(struct ieee80211com *ic) 4450{ 4451 struct ifnet *ifp = ic->ic_ifp; 4452 struct ath_softc *sc = ifp->if_softc; 4453 4454 (void) ath_chan_set(sc, ic->ic_curchan); 4455 /* 4456 * If we are returning to our bss channel then mark state 4457 * so the next recv'd beacon's tsf will be used to sync the 4458 * beacon timers. Note that since we only hear beacons in 4459 * sta/ibss mode this has no effect in other operating modes. 4460 */ 4461 ATH_LOCK(sc); 4462 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 4463 sc->sc_syncbeacon = 1; 4464 ATH_UNLOCK(sc); 4465} 4466 4467/* 4468 * Walk the vap list and check if there any vap's in RUN state. 4469 */ 4470static int 4471ath_isanyrunningvaps(struct ieee80211vap *this) 4472{ 4473 struct ieee80211com *ic = this->iv_ic; 4474 struct ieee80211vap *vap; 4475 4476 IEEE80211_LOCK_ASSERT(ic); 4477 4478 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 4479 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 4480 return 1; 4481 } 4482 return 0; 4483} 4484 4485static int 4486ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4487{ 4488 struct ieee80211com *ic = vap->iv_ic; 4489 struct ath_softc *sc = ic->ic_ifp->if_softc; 4490 struct ath_vap *avp = ATH_VAP(vap); 4491 struct ath_hal *ah = sc->sc_ah; 4492 struct ieee80211_node *ni = NULL; 4493 int i, error, stamode; 4494 u_int32_t rfilt; 4495 int csa_run_transition = 0; 4496 static const HAL_LED_STATE leds[] = { 4497 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4498 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4499 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4500 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4501 HAL_LED_RUN, /* IEEE80211_S_CAC */ 4502 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4503 HAL_LED_RUN, /* IEEE80211_S_CSA */ 4504 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 4505 }; 4506 4507 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4508 ieee80211_state_name[vap->iv_state], 4509 ieee80211_state_name[nstate]); 4510 4511 /* 4512 * net80211 _should_ have the comlock asserted at this point. 4513 * There are some comments around the calls to vap->iv_newstate 4514 * which indicate that it (newstate) may end up dropping the 4515 * lock. This and the subsequent lock assert check after newstate 4516 * are an attempt to catch these and figure out how/why. 4517 */ 4518 IEEE80211_LOCK_ASSERT(ic); 4519 4520 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 4521 csa_run_transition = 1; 4522 4523 callout_drain(&sc->sc_cal_ch); 4524 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4525 4526 if (nstate == IEEE80211_S_SCAN) { 4527 /* 4528 * Scanning: turn off beacon miss and don't beacon. 4529 * Mark beacon state so when we reach RUN state we'll 4530 * [re]setup beacons. Unblock the task q thread so 4531 * deferred interrupt processing is done. 4532 */ 4533 ath_hal_intrset(ah, 4534 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4535 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4536 sc->sc_beacons = 0; 4537 taskqueue_unblock(sc->sc_tq); 4538 } 4539 4540 ni = ieee80211_ref_node(vap->iv_bss); 4541 rfilt = ath_calcrxfilter(sc); 4542 stamode = (vap->iv_opmode == IEEE80211_M_STA || 4543 vap->iv_opmode == IEEE80211_M_AHDEMO || 4544 vap->iv_opmode == IEEE80211_M_IBSS); 4545 if (stamode && nstate == IEEE80211_S_RUN) { 4546 sc->sc_curaid = ni->ni_associd; 4547 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 4548 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4549 } 4550 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4551 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 4552 ath_hal_setrxfilter(ah, rfilt); 4553 4554 /* XXX is this to restore keycache on resume? */ 4555 if (vap->iv_opmode != IEEE80211_M_STA && 4556 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 4557 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4558 if (ath_hal_keyisvalid(ah, i)) 4559 ath_hal_keysetmac(ah, i, ni->ni_bssid); 4560 } 4561 4562 /* 4563 * Invoke the parent method to do net80211 work. 4564 */ 4565 error = avp->av_newstate(vap, nstate, arg); 4566 if (error != 0) 4567 goto bad; 4568 4569 /* 4570 * See above: ensure av_newstate() doesn't drop the lock 4571 * on us. 4572 */ 4573 IEEE80211_LOCK_ASSERT(ic); 4574 4575 if (nstate == IEEE80211_S_RUN) { 4576 /* NB: collect bss node again, it may have changed */ 4577 ieee80211_free_node(ni); 4578 ni = ieee80211_ref_node(vap->iv_bss); 4579 4580 DPRINTF(sc, ATH_DEBUG_STATE, 4581 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 4582 "capinfo 0x%04x chan %d\n", __func__, 4583 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 4584 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 4585 4586 switch (vap->iv_opmode) { 4587#ifdef IEEE80211_SUPPORT_TDMA 4588 case IEEE80211_M_AHDEMO: 4589 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 4590 break; 4591 /* fall thru... */ 4592#endif 4593 case IEEE80211_M_HOSTAP: 4594 case IEEE80211_M_IBSS: 4595 case IEEE80211_M_MBSS: 4596 /* 4597 * Allocate and setup the beacon frame. 4598 * 4599 * Stop any previous beacon DMA. This may be 4600 * necessary, for example, when an ibss merge 4601 * causes reconfiguration; there will be a state 4602 * transition from RUN->RUN that means we may 4603 * be called with beacon transmission active. 4604 */ 4605 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4606 4607 error = ath_beacon_alloc(sc, ni); 4608 if (error != 0) 4609 goto bad; 4610 /* 4611 * If joining an adhoc network defer beacon timer 4612 * configuration to the next beacon frame so we 4613 * have a current TSF to use. Otherwise we're 4614 * starting an ibss/bss so there's no need to delay; 4615 * if this is the first vap moving to RUN state, then 4616 * beacon state needs to be [re]configured. 4617 */ 4618 if (vap->iv_opmode == IEEE80211_M_IBSS && 4619 ni->ni_tstamp.tsf != 0) { 4620 sc->sc_syncbeacon = 1; 4621 } else if (!sc->sc_beacons) { 4622#ifdef IEEE80211_SUPPORT_TDMA 4623 if (vap->iv_caps & IEEE80211_C_TDMA) 4624 ath_tdma_config(sc, vap); 4625 else 4626#endif 4627 ath_beacon_config(sc, vap); 4628 sc->sc_beacons = 1; 4629 } 4630 break; 4631 case IEEE80211_M_STA: 4632 /* 4633 * Defer beacon timer configuration to the next 4634 * beacon frame so we have a current TSF to use 4635 * (any TSF collected when scanning is likely old). 4636 * However if it's due to a CSA -> RUN transition, 4637 * force a beacon update so we pick up a lack of 4638 * beacons from an AP in CAC and thus force a 4639 * scan. 4640 */ 4641 sc->sc_syncbeacon = 1; 4642 if (csa_run_transition) 4643 ath_beacon_config(sc, vap); 4644 break; 4645 case IEEE80211_M_MONITOR: 4646 /* 4647 * Monitor mode vaps have only INIT->RUN and RUN->RUN 4648 * transitions so we must re-enable interrupts here to 4649 * handle the case of a single monitor mode vap. 4650 */ 4651 ath_hal_intrset(ah, sc->sc_imask); 4652 break; 4653 case IEEE80211_M_WDS: 4654 break; 4655 default: 4656 break; 4657 } 4658 /* 4659 * Let the hal process statistics collected during a 4660 * scan so it can provide calibrated noise floor data. 4661 */ 4662 ath_hal_process_noisefloor(ah); 4663 /* 4664 * Reset rssi stats; maybe not the best place... 4665 */ 4666 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 4667 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 4668 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 4669 /* 4670 * Finally, start any timers and the task q thread 4671 * (in case we didn't go through SCAN state). 4672 */ 4673 if (ath_longcalinterval != 0) { 4674 /* start periodic recalibration timer */ 4675 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4676 } else { 4677 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4678 "%s: calibration disabled\n", __func__); 4679 } 4680 taskqueue_unblock(sc->sc_tq); 4681 } else if (nstate == IEEE80211_S_INIT) { 4682 /* 4683 * If there are no vaps left in RUN state then 4684 * shutdown host/driver operation: 4685 * o disable interrupts 4686 * o disable the task queue thread 4687 * o mark beacon processing as stopped 4688 */ 4689 if (!ath_isanyrunningvaps(vap)) { 4690 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4691 /* disable interrupts */ 4692 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 4693 taskqueue_block(sc->sc_tq); 4694 sc->sc_beacons = 0; 4695 } 4696#ifdef IEEE80211_SUPPORT_TDMA 4697 ath_hal_setcca(ah, AH_TRUE); 4698#endif 4699 } 4700bad: 4701 ieee80211_free_node(ni); 4702 return error; 4703} 4704 4705/* 4706 * Allocate a key cache slot to the station so we can 4707 * setup a mapping from key index to node. The key cache 4708 * slot is needed for managing antenna state and for 4709 * compression when stations do not use crypto. We do 4710 * it uniliaterally here; if crypto is employed this slot 4711 * will be reassigned. 4712 */ 4713static void 4714ath_setup_stationkey(struct ieee80211_node *ni) 4715{ 4716 struct ieee80211vap *vap = ni->ni_vap; 4717 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4718 ieee80211_keyix keyix, rxkeyix; 4719 4720 /* XXX should take a locked ref to vap->iv_bss */ 4721 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 4722 /* 4723 * Key cache is full; we'll fall back to doing 4724 * the more expensive lookup in software. Note 4725 * this also means no h/w compression. 4726 */ 4727 /* XXX msg+statistic */ 4728 } else { 4729 /* XXX locking? */ 4730 ni->ni_ucastkey.wk_keyix = keyix; 4731 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 4732 /* NB: must mark device key to get called back on delete */ 4733 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 4734 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 4735 /* NB: this will create a pass-thru key entry */ 4736 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 4737 } 4738} 4739 4740/* 4741 * Setup driver-specific state for a newly associated node. 4742 * Note that we're called also on a re-associate, the isnew 4743 * param tells us if this is the first time or not. 4744 */ 4745static void 4746ath_newassoc(struct ieee80211_node *ni, int isnew) 4747{ 4748 struct ath_node *an = ATH_NODE(ni); 4749 struct ieee80211vap *vap = ni->ni_vap; 4750 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4751 const struct ieee80211_txparam *tp = ni->ni_txparms; 4752 4753 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 4754 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 4755 4756 ath_rate_newassoc(sc, an, isnew); 4757 if (isnew && 4758 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 4759 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 4760 ath_setup_stationkey(ni); 4761} 4762 4763static int 4764ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 4765 int nchans, struct ieee80211_channel chans[]) 4766{ 4767 struct ath_softc *sc = ic->ic_ifp->if_softc; 4768 struct ath_hal *ah = sc->sc_ah; 4769 HAL_STATUS status; 4770 4771 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4772 "%s: rd %u cc %u location %c%s\n", 4773 __func__, reg->regdomain, reg->country, reg->location, 4774 reg->ecm ? " ecm" : ""); 4775 4776 status = ath_hal_set_channels(ah, chans, nchans, 4777 reg->country, reg->regdomain); 4778 if (status != HAL_OK) { 4779 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 4780 __func__, status); 4781 return EINVAL; /* XXX */ 4782 } 4783 4784 return 0; 4785} 4786 4787static void 4788ath_getradiocaps(struct ieee80211com *ic, 4789 int maxchans, int *nchans, struct ieee80211_channel chans[]) 4790{ 4791 struct ath_softc *sc = ic->ic_ifp->if_softc; 4792 struct ath_hal *ah = sc->sc_ah; 4793 4794 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 4795 __func__, SKU_DEBUG, CTRY_DEFAULT); 4796 4797 /* XXX check return */ 4798 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 4799 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 4800 4801} 4802 4803static int 4804ath_getchannels(struct ath_softc *sc) 4805{ 4806 struct ifnet *ifp = sc->sc_ifp; 4807 struct ieee80211com *ic = ifp->if_l2com; 4808 struct ath_hal *ah = sc->sc_ah; 4809 HAL_STATUS status; 4810 4811 /* 4812 * Collect channel set based on EEPROM contents. 4813 */ 4814 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 4815 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 4816 if (status != HAL_OK) { 4817 if_printf(ifp, "%s: unable to collect channel list from hal, " 4818 "status %d\n", __func__, status); 4819 return EINVAL; 4820 } 4821 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 4822 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 4823 /* XXX map Atheros sku's to net80211 SKU's */ 4824 /* XXX net80211 types too small */ 4825 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 4826 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 4827 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 4828 ic->ic_regdomain.isocc[1] = ' '; 4829 4830 ic->ic_regdomain.ecm = 1; 4831 ic->ic_regdomain.location = 'I'; 4832 4833 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4834 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 4835 __func__, sc->sc_eerd, sc->sc_eecc, 4836 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 4837 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 4838 return 0; 4839} 4840 4841static int 4842ath_rate_setup(struct ath_softc *sc, u_int mode) 4843{ 4844 struct ath_hal *ah = sc->sc_ah; 4845 const HAL_RATE_TABLE *rt; 4846 4847 switch (mode) { 4848 case IEEE80211_MODE_11A: 4849 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 4850 break; 4851 case IEEE80211_MODE_HALF: 4852 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 4853 break; 4854 case IEEE80211_MODE_QUARTER: 4855 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 4856 break; 4857 case IEEE80211_MODE_11B: 4858 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 4859 break; 4860 case IEEE80211_MODE_11G: 4861 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 4862 break; 4863 case IEEE80211_MODE_TURBO_A: 4864 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 4865 break; 4866 case IEEE80211_MODE_TURBO_G: 4867 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 4868 break; 4869 case IEEE80211_MODE_STURBO_A: 4870 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4871 break; 4872 case IEEE80211_MODE_11NA: 4873 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 4874 break; 4875 case IEEE80211_MODE_11NG: 4876 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 4877 break; 4878 default: 4879 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4880 __func__, mode); 4881 return 0; 4882 } 4883 sc->sc_rates[mode] = rt; 4884 return (rt != NULL); 4885} 4886 4887static void 4888ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4889{ 4890#define N(a) (sizeof(a)/sizeof(a[0])) 4891 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 4892 static const struct { 4893 u_int rate; /* tx/rx 802.11 rate */ 4894 u_int16_t timeOn; /* LED on time (ms) */ 4895 u_int16_t timeOff; /* LED off time (ms) */ 4896 } blinkrates[] = { 4897 { 108, 40, 10 }, 4898 { 96, 44, 11 }, 4899 { 72, 50, 13 }, 4900 { 48, 57, 14 }, 4901 { 36, 67, 16 }, 4902 { 24, 80, 20 }, 4903 { 22, 100, 25 }, 4904 { 18, 133, 34 }, 4905 { 12, 160, 40 }, 4906 { 10, 200, 50 }, 4907 { 6, 240, 58 }, 4908 { 4, 267, 66 }, 4909 { 2, 400, 100 }, 4910 { 0, 500, 130 }, 4911 /* XXX half/quarter rates */ 4912 }; 4913 const HAL_RATE_TABLE *rt; 4914 int i, j; 4915 4916 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 4917 rt = sc->sc_rates[mode]; 4918 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 4919 for (i = 0; i < rt->rateCount; i++) { 4920 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 4921 if (rt->info[i].phy != IEEE80211_T_HT) 4922 sc->sc_rixmap[ieeerate] = i; 4923 else 4924 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 4925 } 4926 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 4927 for (i = 0; i < N(sc->sc_hwmap); i++) { 4928 if (i >= rt->rateCount) { 4929 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 4930 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 4931 continue; 4932 } 4933 sc->sc_hwmap[i].ieeerate = 4934 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 4935 if (rt->info[i].phy == IEEE80211_T_HT) 4936 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 4937 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 4938 if (rt->info[i].shortPreamble || 4939 rt->info[i].phy == IEEE80211_T_OFDM) 4940 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 4941 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 4942 for (j = 0; j < N(blinkrates)-1; j++) 4943 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 4944 break; 4945 /* NB: this uses the last entry if the rate isn't found */ 4946 /* XXX beware of overlow */ 4947 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 4948 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 4949 } 4950 sc->sc_currates = rt; 4951 sc->sc_curmode = mode; 4952 /* 4953 * All protection frames are transmited at 2Mb/s for 4954 * 11g, otherwise at 1Mb/s. 4955 */ 4956 if (mode == IEEE80211_MODE_11G) 4957 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 4958 else 4959 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 4960 /* NB: caller is responsible for resetting rate control state */ 4961#undef N 4962} 4963 4964static void 4965ath_watchdog(void *arg) 4966{ 4967 struct ath_softc *sc = arg; 4968 int do_reset = 0; 4969 4970 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 4971 struct ifnet *ifp = sc->sc_ifp; 4972 uint32_t hangs; 4973 4974 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 4975 hangs != 0) { 4976 if_printf(ifp, "%s hang detected (0x%x)\n", 4977 hangs & 0xff ? "bb" : "mac", hangs); 4978 } else 4979 if_printf(ifp, "device timeout\n"); 4980 do_reset = 1; 4981 ifp->if_oerrors++; 4982 sc->sc_stats.ast_watchdog++; 4983 } 4984 4985 /* 4986 * We can't hold the lock across the ath_reset() call. 4987 * 4988 * And since this routine can't hold a lock and sleep, 4989 * do the reset deferred. 4990 */ 4991 if (do_reset) { 4992 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4993 } 4994 4995 callout_schedule(&sc->sc_wd_ch, hz); 4996} 4997 4998/* 4999 * Fetch the rate control statistics for the given node. 5000 */ 5001static int 5002ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs) 5003{ 5004 struct ath_node *an; 5005 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5006 struct ieee80211_node *ni; 5007 int error = 0; 5008 5009 /* Perform a lookup on the given node */ 5010 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr); 5011 if (ni == NULL) { 5012 error = EINVAL; 5013 goto bad; 5014 } 5015 5016 /* Lock the ath_node */ 5017 an = ATH_NODE(ni); 5018 ATH_NODE_LOCK(an); 5019 5020 /* Fetch the rate control stats for this node */ 5021 error = ath_rate_fetch_node_stats(sc, an, rs); 5022 5023 /* No matter what happens here, just drop through */ 5024 5025 /* Unlock the ath_node */ 5026 ATH_NODE_UNLOCK(an); 5027 5028 /* Unref the node */ 5029 ieee80211_node_decref(ni); 5030 5031bad: 5032 return (error); 5033} 5034 5035#ifdef ATH_DIAGAPI 5036/* 5037 * Diagnostic interface to the HAL. This is used by various 5038 * tools to do things like retrieve register contents for 5039 * debugging. The mechanism is intentionally opaque so that 5040 * it can change frequently w/o concern for compatiblity. 5041 */ 5042static int 5043ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 5044{ 5045 struct ath_hal *ah = sc->sc_ah; 5046 u_int id = ad->ad_id & ATH_DIAG_ID; 5047 void *indata = NULL; 5048 void *outdata = NULL; 5049 u_int32_t insize = ad->ad_in_size; 5050 u_int32_t outsize = ad->ad_out_size; 5051 int error = 0; 5052 5053 if (ad->ad_id & ATH_DIAG_IN) { 5054 /* 5055 * Copy in data. 5056 */ 5057 indata = malloc(insize, M_TEMP, M_NOWAIT); 5058 if (indata == NULL) { 5059 error = ENOMEM; 5060 goto bad; 5061 } 5062 error = copyin(ad->ad_in_data, indata, insize); 5063 if (error) 5064 goto bad; 5065 } 5066 if (ad->ad_id & ATH_DIAG_DYN) { 5067 /* 5068 * Allocate a buffer for the results (otherwise the HAL 5069 * returns a pointer to a buffer where we can read the 5070 * results). Note that we depend on the HAL leaving this 5071 * pointer for us to use below in reclaiming the buffer; 5072 * may want to be more defensive. 5073 */ 5074 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 5075 if (outdata == NULL) { 5076 error = ENOMEM; 5077 goto bad; 5078 } 5079 } 5080 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 5081 if (outsize < ad->ad_out_size) 5082 ad->ad_out_size = outsize; 5083 if (outdata != NULL) 5084 error = copyout(outdata, ad->ad_out_data, 5085 ad->ad_out_size); 5086 } else { 5087 error = EINVAL; 5088 } 5089bad: 5090 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 5091 free(indata, M_TEMP); 5092 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 5093 free(outdata, M_TEMP); 5094 return error; 5095} 5096#endif /* ATH_DIAGAPI */ 5097 5098static int 5099ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 5100{ 5101#define IS_RUNNING(ifp) \ 5102 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 5103 struct ath_softc *sc = ifp->if_softc; 5104 struct ieee80211com *ic = ifp->if_l2com; 5105 struct ifreq *ifr = (struct ifreq *)data; 5106 const HAL_RATE_TABLE *rt; 5107 int error = 0; 5108 5109 switch (cmd) { 5110 case SIOCSIFFLAGS: 5111 ATH_LOCK(sc); 5112 if (IS_RUNNING(ifp)) { 5113 /* 5114 * To avoid rescanning another access point, 5115 * do not call ath_init() here. Instead, 5116 * only reflect promisc mode settings. 5117 */ 5118 ath_mode_init(sc); 5119 } else if (ifp->if_flags & IFF_UP) { 5120 /* 5121 * Beware of being called during attach/detach 5122 * to reset promiscuous mode. In that case we 5123 * will still be marked UP but not RUNNING. 5124 * However trying to re-init the interface 5125 * is the wrong thing to do as we've already 5126 * torn down much of our state. There's 5127 * probably a better way to deal with this. 5128 */ 5129 if (!sc->sc_invalid) 5130 ath_init(sc); /* XXX lose error */ 5131 } else { 5132 ath_stop_locked(ifp); 5133#ifdef notyet 5134 /* XXX must wakeup in places like ath_vap_delete */ 5135 if (!sc->sc_invalid) 5136 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 5137#endif 5138 } 5139 ATH_UNLOCK(sc); 5140 break; 5141 case SIOCGIFMEDIA: 5142 case SIOCSIFMEDIA: 5143 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 5144 break; 5145 case SIOCGATHSTATS: 5146 /* NB: embed these numbers to get a consistent view */ 5147 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 5148 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 5149 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 5150 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 5151#ifdef IEEE80211_SUPPORT_TDMA 5152 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 5153 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 5154#endif 5155 rt = sc->sc_currates; 5156 sc->sc_stats.ast_tx_rate = 5157 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 5158 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 5159 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 5160 return copyout(&sc->sc_stats, 5161 ifr->ifr_data, sizeof (sc->sc_stats)); 5162 case SIOCGATHAGSTATS: 5163 return copyout(&sc->sc_aggr_stats, 5164 ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 5165 case SIOCZATHSTATS: 5166 error = priv_check(curthread, PRIV_DRIVER); 5167 if (error == 0) { 5168 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 5169 memset(&sc->sc_aggr_stats, 0, 5170 sizeof(sc->sc_aggr_stats)); 5171 memset(&sc->sc_intr_stats, 0, 5172 sizeof(sc->sc_intr_stats)); 5173 } 5174 break; 5175#ifdef ATH_DIAGAPI 5176 case SIOCGATHDIAG: 5177 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 5178 break; 5179 case SIOCGATHPHYERR: 5180 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 5181 break; 5182#endif 5183 case SIOCGATHNODERATESTATS: 5184 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr); 5185 break; 5186 case SIOCGIFADDR: 5187 error = ether_ioctl(ifp, cmd, data); 5188 break; 5189 default: 5190 error = EINVAL; 5191 break; 5192 } 5193 return error; 5194#undef IS_RUNNING 5195} 5196 5197/* 5198 * Announce various information on device/driver attach. 5199 */ 5200static void 5201ath_announce(struct ath_softc *sc) 5202{ 5203 struct ifnet *ifp = sc->sc_ifp; 5204 struct ath_hal *ah = sc->sc_ah; 5205 5206 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 5207 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 5208 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 5209 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 5210 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 5211 if (bootverbose) { 5212 int i; 5213 for (i = 0; i <= WME_AC_VO; i++) { 5214 struct ath_txq *txq = sc->sc_ac2q[i]; 5215 if_printf(ifp, "Use hw queue %u for %s traffic\n", 5216 txq->axq_qnum, ieee80211_wme_acnames[i]); 5217 } 5218 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5219 sc->sc_cabq->axq_qnum); 5220 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5221 } 5222 if (ath_rxbuf != ATH_RXBUF) 5223 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5224 if (ath_txbuf != ATH_TXBUF) 5225 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 5226 if (sc->sc_mcastkey && bootverbose) 5227 if_printf(ifp, "using multicast key search\n"); 5228} 5229 5230static void 5231ath_dfs_tasklet(void *p, int npending) 5232{ 5233 struct ath_softc *sc = (struct ath_softc *) p; 5234 struct ifnet *ifp = sc->sc_ifp; 5235 struct ieee80211com *ic = ifp->if_l2com; 5236 5237 /* 5238 * If previous processing has found a radar event, 5239 * signal this to the net80211 layer to begin DFS 5240 * processing. 5241 */ 5242 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 5243 /* DFS event found, initiate channel change */ 5244 /* 5245 * XXX doesn't currently tell us whether the event 5246 * XXX was found in the primary or extension 5247 * XXX channel! 5248 */ 5249 IEEE80211_LOCK(ic); 5250 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 5251 IEEE80211_UNLOCK(ic); 5252 } 5253} 5254 5255MODULE_VERSION(if_ath, 1); 5256MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 5257#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 5258MODULE_DEPEND(if_ath, alq, 1, 1, 1); 5259#endif 5260