if_ath.c revision 231571
1214501Srpaulo/*- 2214501Srpaulo * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3214501Srpaulo * All rights reserved. 4214501Srpaulo * 5214501Srpaulo * Redistribution and use in source and binary forms, with or without 6214501Srpaulo * modification, are permitted provided that the following conditions 7214501Srpaulo * are met: 8252190Srpaulo * 1. Redistributions of source code must retain the above copyright 9252190Srpaulo * notice, this list of conditions and the following disclaimer, 10214501Srpaulo * without modification. 11214501Srpaulo * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12214501Srpaulo * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13214501Srpaulo * redistribution must be conditioned upon including a substantially 14214501Srpaulo * similar Disclaimer requirement for further binary redistribution. 15214501Srpaulo * 16214501Srpaulo * NO WARRANTY 17252190Srpaulo * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18252190Srpaulo * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19252190Srpaulo * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20252190Srpaulo * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21252190Srpaulo * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22252190Srpaulo * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23214501Srpaulo * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24214501Srpaulo * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25214501Srpaulo * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26214501Srpaulo * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27214501Srpaulo * THE POSSIBILITY OF SUCH DAMAGES. 28214501Srpaulo */ 29214501Srpaulo 30214501Srpaulo#include <sys/cdefs.h> 31214501Srpaulo__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 231571 2012-02-13 00:28:41Z adrian $"); 32214501Srpaulo 33214501Srpaulo/* 34214501Srpaulo * Driver for the Atheros Wireless LAN controller. 35214501Srpaulo * 36214501Srpaulo * This software is derived from work of Atsushi Onoe; his contribution 37252190Srpaulo * is greatly appreciated. 38214501Srpaulo */ 39341618Scy 40341618Scy#include "opt_inet.h" 41341618Scy#include "opt_ath.h" 42214501Srpaulo/* 43341618Scy * This is needed for register operations which are performed 44214501Srpaulo * by the driver - eg, calls to ath_hal_gettsf32(). 45214501Srpaulo */ 46252190Srpaulo#include "opt_ah.h" 47214501Srpaulo#include "opt_wlan.h" 48214501Srpaulo 49214501Srpaulo#include <sys/param.h> 50214501Srpaulo#include <sys/systm.h> 51214501Srpaulo#include <sys/sysctl.h> 52252190Srpaulo#include <sys/mbuf.h> 53214501Srpaulo#include <sys/malloc.h> 54214501Srpaulo#include <sys/lock.h> 55214501Srpaulo#include <sys/mutex.h> 56214501Srpaulo#include <sys/kernel.h> 57214501Srpaulo#include <sys/socket.h> 58214501Srpaulo#include <sys/sockio.h> 59214501Srpaulo#include <sys/errno.h> 60214501Srpaulo#include <sys/callout.h> 61214501Srpaulo#include <sys/bus.h> 62341618Scy#include <sys/endian.h> 63289284Srpaulo#include <sys/kthread.h> 64289284Srpaulo#include <sys/taskqueue.h> 65214501Srpaulo#include <sys/priv.h> 66289284Srpaulo#include <sys/module.h> 67252190Srpaulo#include <sys/ktr.h> 68214501Srpaulo#include <sys/smp.h> /* for mp_ncpus */ 69214501Srpaulo 70214501Srpaulo#include <machine/bus.h> 71214501Srpaulo 72214501Srpaulo#include <net/if.h> 73214501Srpaulo#include <net/if_dl.h> 74214501Srpaulo#include <net/if_media.h> 75214501Srpaulo#include <net/if_types.h> 76214501Srpaulo#include <net/if_arp.h> 77341618Scy#include <net/ethernet.h> 78214501Srpaulo#include <net/if_llc.h> 79214501Srpaulo 80214501Srpaulo#include <net80211/ieee80211_var.h> 81214501Srpaulo#include <net80211/ieee80211_regdomain.h> 82252190Srpaulo#ifdef IEEE80211_SUPPORT_SUPERG 83252190Srpaulo#include <net80211/ieee80211_superg.h> 84252190Srpaulo#endif 85252190Srpaulo#ifdef IEEE80211_SUPPORT_TDMA 86214501Srpaulo#include <net80211/ieee80211_tdma.h> 87214501Srpaulo#endif 88252190Srpaulo 89214501Srpaulo#include <net/bpf.h> 90252190Srpaulo 91214501Srpaulo#ifdef INET 92214501Srpaulo#include <netinet/in.h> 93214501Srpaulo#include <netinet/if_ether.h> 94214501Srpaulo#endif 95214501Srpaulo 96214501Srpaulo#include <dev/ath/if_athvar.h> 97214501Srpaulo#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 98214501Srpaulo#include <dev/ath/ath_hal/ah_diagcodes.h> 99214501Srpaulo 100214501Srpaulo#include <dev/ath/if_ath_debug.h> 101214501Srpaulo#include <dev/ath/if_ath_misc.h> 102214501Srpaulo#include <dev/ath/if_ath_tx.h> 103214501Srpaulo#include <dev/ath/if_ath_sysctl.h> 104214501Srpaulo#include <dev/ath/if_ath_led.h> 105214501Srpaulo#include <dev/ath/if_ath_keycache.h> 106214501Srpaulo#include <dev/ath/if_athdfs.h> 107214501Srpaulo 108214501Srpaulo#ifdef ATH_TX99_DIAG 109214501Srpaulo#include <dev/ath/ath_tx99/ath_tx99.h> 110214501Srpaulo#endif 111214501Srpaulo 112214501Srpaulo#define ATH_KTR_INTR KTR_SPARE4 113214501Srpaulo#define ATH_KTR_ERR KTR_SPARE3 114214501Srpaulo 115214501Srpaulo/* 116214501Srpaulo * ATH_BCBUF determines the number of vap's that can transmit 117214501Srpaulo * beacons and also (currently) the number of vap's that can 118214501Srpaulo * have unique mac addresses/bssid. When staggering beacons 119214501Srpaulo * 4 is probably a good max as otherwise the beacons become 120214501Srpaulo * very closely spaced and there is limited time for cab q traffic 121214501Srpaulo * to go out. You can burst beacons instead but that is not good 122214501Srpaulo * for stations in power save and at some point you really want 123214501Srpaulo * another radio (and channel). 124214501Srpaulo * 125214501Srpaulo * The limit on the number of mac addresses is tied to our use of 126214501Srpaulo * the U/L bit and tracking addresses in a byte; it would be 127214501Srpaulo * worthwhile to allow more for applications like proxy sta. 128214501Srpaulo */ 129214501SrpauloCTASSERT(ATH_BCBUF <= 8); 130214501Srpaulo 131214501Srpaulostatic struct ieee80211vap *ath_vap_create(struct ieee80211com *, 132214501Srpaulo const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 133214501Srpaulo const uint8_t [IEEE80211_ADDR_LEN], 134214501Srpaulo const uint8_t [IEEE80211_ADDR_LEN]); 135214501Srpaulostatic void ath_vap_delete(struct ieee80211vap *); 136214501Srpaulostatic void ath_init(void *); 137214501Srpaulostatic void ath_stop_locked(struct ifnet *); 138214501Srpaulostatic void ath_stop(struct ifnet *); 139214501Srpaulostatic void ath_start(struct ifnet *); 140214501Srpaulostatic int ath_reset_vap(struct ieee80211vap *, u_long); 141214501Srpaulostatic int ath_media_change(struct ifnet *); 142214501Srpaulostatic void ath_watchdog(void *); 143214501Srpaulostatic int ath_ioctl(struct ifnet *, u_long, caddr_t); 144214501Srpaulostatic void ath_fatal_proc(void *, int); 145214501Srpaulostatic void ath_bmiss_vap(struct ieee80211vap *); 146214501Srpaulostatic void ath_bmiss_proc(void *, int); 147214501Srpaulostatic void ath_key_update_begin(struct ieee80211vap *); 148214501Srpaulostatic void ath_key_update_end(struct ieee80211vap *); 149214501Srpaulostatic void ath_update_mcast(struct ifnet *); 150214501Srpaulostatic void ath_update_promisc(struct ifnet *); 151214501Srpaulostatic void ath_mode_init(struct ath_softc *); 152214501Srpaulostatic void ath_setslottime(struct ath_softc *); 153214501Srpaulostatic void ath_updateslot(struct ifnet *); 154214501Srpaulostatic int ath_beaconq_setup(struct ath_hal *); 155214501Srpaulostatic int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 156214501Srpaulostatic void ath_beacon_update(struct ieee80211vap *, int item); 157214501Srpaulostatic void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 158214501Srpaulostatic void ath_beacon_proc(void *, int); 159214501Srpaulostatic struct ath_buf *ath_beacon_generate(struct ath_softc *, 160214501Srpaulo struct ieee80211vap *); 161214501Srpaulostatic void ath_bstuck_proc(void *, int); 162214501Srpaulostatic void ath_beacon_return(struct ath_softc *, struct ath_buf *); 163214501Srpaulostatic void ath_beacon_free(struct ath_softc *); 164214501Srpaulostatic void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); 165214501Srpaulostatic void ath_descdma_cleanup(struct ath_softc *sc, 166214501Srpaulo struct ath_descdma *, ath_bufhead *); 167214501Srpaulostatic int ath_desc_alloc(struct ath_softc *); 168214501Srpaulostatic void ath_desc_free(struct ath_softc *); 169214501Srpaulostatic struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 170214501Srpaulo const uint8_t [IEEE80211_ADDR_LEN]); 171214501Srpaulostatic void ath_node_cleanup(struct ieee80211_node *); 172214501Srpaulostatic void ath_node_free(struct ieee80211_node *); 173214501Srpaulostatic void ath_node_getsignal(const struct ieee80211_node *, 174214501Srpaulo int8_t *, int8_t *); 175214501Srpaulostatic int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 176214501Srpaulostatic void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 177214501Srpaulo int subtype, int rssi, int nf); 178214501Srpaulostatic void ath_setdefantenna(struct ath_softc *, u_int); 179214501Srpaulostatic void ath_rx_proc(struct ath_softc *sc, int); 180214501Srpaulostatic void ath_rx_tasklet(void *, int); 181214501Srpaulostatic void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 182214501Srpaulostatic struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 183214501Srpaulostatic int ath_tx_setup(struct ath_softc *, int, int); 184214501Srpaulostatic int ath_wme_update(struct ieee80211com *); 185214501Srpaulostatic void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 186341618Scystatic void ath_tx_cleanup(struct ath_softc *); 187214501Srpaulostatic void ath_tx_proc_q0(void *, int); 188341618Scystatic void ath_tx_proc_q0123(void *, int); 189341618Scystatic void ath_tx_proc(void *, int); 190341618Scystatic int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 191341618Scystatic void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 192341618Scystatic void ath_stoprecv(struct ath_softc *, int); 193341618Scystatic int ath_startrecv(struct ath_softc *); 194341618Scystatic void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 195341618Scystatic void ath_scan_start(struct ieee80211com *); 196341618Scystatic void ath_scan_end(struct ieee80211com *); 197341618Scystatic void ath_set_channel(struct ieee80211com *); 198341618Scystatic void ath_calibrate(void *); 199341618Scystatic int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 200341618Scystatic void ath_setup_stationkey(struct ieee80211_node *); 201341618Scystatic void ath_newassoc(struct ieee80211_node *, int); 202341618Scystatic int ath_setregdomain(struct ieee80211com *, 203341618Scy struct ieee80211_regdomain *, int, 204341618Scy struct ieee80211_channel []); 205341618Scystatic void ath_getradiocaps(struct ieee80211com *, int, int *, 206252190Srpaulo struct ieee80211_channel []); 207214501Srpaulostatic int ath_getchannels(struct ath_softc *); 208214501Srpaulo 209214501Srpaulostatic int ath_rate_setup(struct ath_softc *, u_int mode); 210214501Srpaulostatic void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 211214501Srpaulo 212214501Srpaulostatic void ath_announce(struct ath_softc *); 213214501Srpaulo 214214501Srpaulostatic void ath_dfs_tasklet(void *, int); 215214501Srpaulo 216324714Scy#ifdef IEEE80211_SUPPORT_TDMA 217214501Srpaulostatic void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, 218214501Srpaulo u_int32_t bintval); 219214501Srpaulostatic void ath_tdma_bintvalsetup(struct ath_softc *sc, 220214501Srpaulo const struct ieee80211_tdma_state *tdma); 221214501Srpaulostatic void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap); 222324714Scystatic void ath_tdma_update(struct ieee80211_node *ni, 223214501Srpaulo const struct ieee80211_tdma_param *tdma, int); 224214501Srpaulostatic void ath_tdma_beacon_send(struct ath_softc *sc, 225214501Srpaulo struct ieee80211vap *vap); 226214501Srpaulo 227214501Srpaulo#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 228214501Srpaulo#define TDMA_LPF_LEN 6 229214501Srpaulo#define TDMA_DUMMY_MARKER 0x127 230214501Srpaulo#define TDMA_EP_MUL(x, mul) ((x) * (mul)) 231214501Srpaulo#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 232214501Srpaulo#define TDMA_LPF(x, y, len) \ 233214501Srpaulo ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 234214501Srpaulo#define TDMA_SAMPLE(x, y) do { \ 235214501Srpaulo x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 236214501Srpaulo} while (0) 237214501Srpaulo#define TDMA_EP_RND(x,mul) \ 238214501Srpaulo ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 239214501Srpaulo#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 240214501Srpaulo#endif /* IEEE80211_SUPPORT_TDMA */ 241214501Srpaulo 242214501SrpauloSYSCTL_DECL(_hw_ath); 243214501Srpaulo 244214501Srpaulo/* XXX validate sysctl values */ 245252190Srpaulostatic int ath_longcalinterval = 30; /* long cals every 30 secs */ 246214501SrpauloSYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 247214501Srpaulo 0, "long chip calibration interval (secs)"); 248214501Srpaulostatic int ath_shortcalinterval = 100; /* short cals every 100 ms */ 249324714ScySYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 250214501Srpaulo 0, "short chip calibration interval (msecs)"); 251214501Srpaulostatic int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 252324714ScySYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 253214501Srpaulo 0, "reset chip calibration results (secs)"); 254214501Srpaulostatic int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 255281681SrpauloSYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 256281681Srpaulo 0, "ANI calibration (msecs)"); 257281681Srpaulo 258281681Srpaulostatic int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 259214501SrpauloSYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 260214501Srpaulo 0, "rx buffers allocated"); 261214501SrpauloTUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 262214501Srpaulostatic int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 263214501SrpauloSYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 264214501Srpaulo 0, "tx buffers allocated"); 265214501SrpauloTUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 266214501Srpaulo 267214501Srpaulostatic int ath_bstuck_threshold = 4; /* max missed beacons */ 268214501SrpauloSYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 269214501Srpaulo 0, "max missed beacon xmits before chip reset"); 270214501Srpaulo 271324714ScyMALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 272214501Srpaulo 273324714Scy#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 274214501Srpaulo#define HAL_MODE_HT40 \ 275214501Srpaulo (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 276214501Srpaulo HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 277214501Srpauloint 278214501Srpauloath_attach(u_int16_t devid, struct ath_softc *sc) 279214501Srpaulo{ 280214501Srpaulo struct ifnet *ifp; 281214501Srpaulo struct ieee80211com *ic; 282252190Srpaulo struct ath_hal *ah = NULL; 283214501Srpaulo HAL_STATUS status; 284214501Srpaulo int error = 0, i; 285214501Srpaulo u_int wmodes; 286214501Srpaulo uint8_t macaddr[IEEE80211_ADDR_LEN]; 287214501Srpaulo int rx_chainmask, tx_chainmask; 288214501Srpaulo 289214501Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 290214501Srpaulo 291281681Srpaulo ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 292281681Srpaulo if (ifp == NULL) { 293281681Srpaulo device_printf(sc->sc_dev, "can not if_alloc()\n"); 294281681Srpaulo error = ENOSPC; 295281681Srpaulo goto bad; 296281681Srpaulo } 297281681Srpaulo ic = ifp->if_l2com; 298281681Srpaulo 299281681Srpaulo /* set these up early for if_printf use */ 300281681Srpaulo if_initname(ifp, device_get_name(sc->sc_dev), 301281681Srpaulo device_get_unit(sc->sc_dev)); 302214501Srpaulo 303214501Srpaulo ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 304214501Srpaulo sc->sc_eepromdata, &status); 305214501Srpaulo if (ah == NULL) { 306214501Srpaulo if_printf(ifp, "unable to attach hardware; HAL status %u\n", 307214501Srpaulo status); 308214501Srpaulo error = ENXIO; 309214501Srpaulo goto bad; 310214501Srpaulo } 311214501Srpaulo sc->sc_ah = ah; 312214501Srpaulo sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 313214501Srpaulo#ifdef ATH_DEBUG 314214501Srpaulo sc->sc_debug = ath_debug; 315214501Srpaulo#endif 316214501Srpaulo 317214501Srpaulo /* 318214501Srpaulo * Check if the MAC has multi-rate retry support. 319214501Srpaulo * We do this by trying to setup a fake extended 320214501Srpaulo * descriptor. MAC's that don't have support will 321281681Srpaulo * return false w/o doing anything. MAC's that do 322214501Srpaulo * support it will return true w/o doing anything. 323214501Srpaulo */ 324214501Srpaulo sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 325214501Srpaulo 326214501Srpaulo /* 327214501Srpaulo * Check if the device has hardware counters for PHY 328281681Srpaulo * errors. If so we need to enable the MIB interrupt 329281681Srpaulo * so we can act on stat triggers. 330214501Srpaulo */ 331214501Srpaulo if (ath_hal_hwphycounters(ah)) 332214501Srpaulo sc->sc_needmib = 1; 333214501Srpaulo 334214501Srpaulo /* 335214501Srpaulo * Get the hardware key cache size. 336214501Srpaulo */ 337281681Srpaulo sc->sc_keymax = ath_hal_keycachesize(ah); 338281681Srpaulo if (sc->sc_keymax > ATH_KEYMAX) { 339281681Srpaulo if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 340281681Srpaulo ATH_KEYMAX, sc->sc_keymax); 341281681Srpaulo sc->sc_keymax = ATH_KEYMAX; 342281681Srpaulo } 343281681Srpaulo /* 344281681Srpaulo * Reset the key cache since some parts do not 345214501Srpaulo * reset the contents on initial power up. 346214501Srpaulo */ 347214501Srpaulo for (i = 0; i < sc->sc_keymax; i++) 348214501Srpaulo ath_hal_keyreset(ah, i); 349214501Srpaulo 350214501Srpaulo /* 351281681Srpaulo * Collect the default channel list. 352281681Srpaulo */ 353214501Srpaulo error = ath_getchannels(sc); 354214501Srpaulo if (error != 0) 355214501Srpaulo goto bad; 356214501Srpaulo 357214501Srpaulo /* 358214501Srpaulo * Setup rate tables for all potential media types. 359214501Srpaulo */ 360281681Srpaulo ath_rate_setup(sc, IEEE80211_MODE_11A); 361281681Srpaulo ath_rate_setup(sc, IEEE80211_MODE_11B); 362281681Srpaulo ath_rate_setup(sc, IEEE80211_MODE_11G); 363214501Srpaulo ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 364214501Srpaulo ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 365214501Srpaulo ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 366214501Srpaulo ath_rate_setup(sc, IEEE80211_MODE_11NA); 367214501Srpaulo ath_rate_setup(sc, IEEE80211_MODE_11NG); 368214501Srpaulo ath_rate_setup(sc, IEEE80211_MODE_HALF); 369252190Srpaulo ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 370252190Srpaulo 371252190Srpaulo /* NB: setup here so ath_rate_update is happy */ 372252190Srpaulo ath_setcurmode(sc, IEEE80211_MODE_11A); 373252190Srpaulo 374252190Srpaulo /* 375252190Srpaulo * Allocate tx+rx descriptors and populate the lists. 376252190Srpaulo */ 377252190Srpaulo error = ath_desc_alloc(sc); 378214501Srpaulo if (error != 0) { 379281681Srpaulo if_printf(ifp, "failed to allocate descriptors: %d\n", error); 380281681Srpaulo goto bad; 381214501Srpaulo } 382214501Srpaulo callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 383214501Srpaulo callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 384214501Srpaulo 385214501Srpaulo ATH_TXBUF_LOCK_INIT(sc); 386281681Srpaulo 387214501Srpaulo sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 388214501Srpaulo taskqueue_thread_enqueue, &sc->sc_tq); 389214501Srpaulo taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 390214501Srpaulo "%s taskq", ifp->if_xname); 391214501Srpaulo 392214501Srpaulo TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 393252190Srpaulo TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 394214501Srpaulo TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 395252190Srpaulo 396214501Srpaulo /* 397214501Srpaulo * Allocate hardware transmit queues: one queue for 398214501Srpaulo * beacon frames and one data queue for each QoS 399214501Srpaulo * priority. Note that the hal handles resetting 400214501Srpaulo * these queues at the needed time. 401214501Srpaulo * 402214501Srpaulo * XXX PS-Poll 403214501Srpaulo */ 404214501Srpaulo sc->sc_bhalq = ath_beaconq_setup(ah); 405252190Srpaulo if (sc->sc_bhalq == (u_int) -1) { 406214501Srpaulo if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 407214501Srpaulo error = EIO; 408281681Srpaulo goto bad2; 409214501Srpaulo } 410214501Srpaulo sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 411252190Srpaulo if (sc->sc_cabq == NULL) { 412281681Srpaulo if_printf(ifp, "unable to setup CAB xmit queue!\n"); 413214501Srpaulo error = EIO; 414214501Srpaulo goto bad2; 415214501Srpaulo } 416214501Srpaulo /* NB: insure BK queue is the lowest priority h/w queue */ 417281681Srpaulo if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 418214501Srpaulo if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 419214501Srpaulo ieee80211_wme_acnames[WME_AC_BK]); 420214501Srpaulo error = EIO; 421214501Srpaulo goto bad2; 422214501Srpaulo } 423214501Srpaulo if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 424214501Srpaulo !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 425252190Srpaulo !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 426214501Srpaulo /* 427252190Srpaulo * Not enough hardware tx queues to properly do WME; 428214501Srpaulo * just punt and assign them all to the same h/w queue. 429214501Srpaulo * We could do a better job of this if, for example, 430214501Srpaulo * we allocate queues when we switch from station to 431214501Srpaulo * AP mode. 432214501Srpaulo */ 433214501Srpaulo if (sc->sc_ac2q[WME_AC_VI] != NULL) 434214501Srpaulo ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 435252190Srpaulo if (sc->sc_ac2q[WME_AC_BE] != NULL) 436214501Srpaulo ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 437252190Srpaulo sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 438214501Srpaulo sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 439214501Srpaulo sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 440214501Srpaulo } 441214501Srpaulo 442214501Srpaulo /* 443214501Srpaulo * Special case certain configurations. Note the 444214501Srpaulo * CAB queue is handled by these specially so don't 445214501Srpaulo * include them when checking the txq setup mask. 446214501Srpaulo */ 447214501Srpaulo switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 448214501Srpaulo case 0x01: 449324714Scy TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 450214501Srpaulo break; 451214501Srpaulo case 0x0f: 452214501Srpaulo TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 453214501Srpaulo break; 454214501Srpaulo default: 455214501Srpaulo TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 456214501Srpaulo break; 457214501Srpaulo } 458214501Srpaulo 459214501Srpaulo /* 460252190Srpaulo * Setup rate control. Some rate control modules 461289284Srpaulo * call back to change the anntena state so expose 462289284Srpaulo * the necessary entry points. 463214501Srpaulo * XXX maybe belongs in struct ath_ratectrl? 464214501Srpaulo */ 465214501Srpaulo sc->sc_setdefantenna = ath_setdefantenna; 466252190Srpaulo sc->sc_rc = ath_rate_attach(sc); 467214501Srpaulo if (sc->sc_rc == NULL) { 468252190Srpaulo error = EIO; 469214501Srpaulo goto bad2; 470214501Srpaulo } 471214501Srpaulo 472214501Srpaulo /* Attach DFS module */ 473252190Srpaulo if (! ath_dfs_attach(sc)) { 474214501Srpaulo device_printf(sc->sc_dev, 475252190Srpaulo "%s: unable to attach DFS\n", __func__); 476214501Srpaulo error = EIO; 477214501Srpaulo goto bad2; 478214501Srpaulo } 479214501Srpaulo 480214501Srpaulo /* Start DFS processing tasklet */ 481214501Srpaulo TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 482324714Scy 483214501Srpaulo /* Configure LED state */ 484324714Scy sc->sc_blinking = 0; 485214501Srpaulo sc->sc_ledstate = 1; 486214501Srpaulo sc->sc_ledon = 0; /* low true */ 487214501Srpaulo sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 488214501Srpaulo callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 489214501Srpaulo 490214501Srpaulo /* 491214501Srpaulo * Don't setup hardware-based blinking. 492214501Srpaulo * 493214501Srpaulo * Although some NICs may have this configured in the 494214501Srpaulo * default reset register values, the user may wish 495214501Srpaulo * to alter which pins have which function. 496214501Srpaulo * 497214501Srpaulo * The reference driver attaches the MAC network LED to GPIO1 and 498214501Srpaulo * the MAC power LED to GPIO2. However, the DWA-552 cardbus 499214501Srpaulo * NIC has these reversed. 500214501Srpaulo */ 501252190Srpaulo sc->sc_hardled = (1 == 0); 502214501Srpaulo sc->sc_led_net_pin = -1; 503214501Srpaulo sc->sc_led_pwr_pin = -1; 504214501Srpaulo /* 505252190Srpaulo * Auto-enable soft led processing for IBM cards and for 506214501Srpaulo * 5211 minipci cards. Users can also manually enable/disable 507214501Srpaulo * support with a sysctl. 508214501Srpaulo */ 509214501Srpaulo sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 510214501Srpaulo ath_led_config(sc); 511252190Srpaulo ath_hal_setledstate(ah, HAL_LED_INIT); 512214501Srpaulo 513214501Srpaulo ifp->if_softc = sc; 514214501Srpaulo ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 515214501Srpaulo ifp->if_start = ath_start; 516214501Srpaulo ifp->if_ioctl = ath_ioctl; 517214501Srpaulo ifp->if_init = ath_init; 518214501Srpaulo IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 519214501Srpaulo ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 520214501Srpaulo IFQ_SET_READY(&ifp->if_snd); 521214501Srpaulo 522214501Srpaulo ic->ic_ifp = ifp; 523214501Srpaulo /* XXX not right but it's not used anywhere important */ 524214501Srpaulo ic->ic_phytype = IEEE80211_T_OFDM; 525214501Srpaulo ic->ic_opmode = IEEE80211_M_STA; 526281681Srpaulo ic->ic_caps = 527281681Srpaulo IEEE80211_C_STA /* station mode */ 528281681Srpaulo | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 529281681Srpaulo | IEEE80211_C_HOSTAP /* hostap mode */ 530281681Srpaulo | IEEE80211_C_MONITOR /* monitor mode */ 531281681Srpaulo | IEEE80211_C_AHDEMO /* adhoc demo mode */ 532281681Srpaulo | IEEE80211_C_WDS /* 4-address traffic works */ 533281681Srpaulo | IEEE80211_C_MBSS /* mesh point link mode */ 534281681Srpaulo | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 535281681Srpaulo | IEEE80211_C_SHSLOT /* short slot time supported */ 536281681Srpaulo | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 537252190Srpaulo | IEEE80211_C_BGSCAN /* capable of bg scanning */ 538252190Srpaulo | IEEE80211_C_TXFRAG /* handle tx frags */ 539252190Srpaulo#ifdef ATH_ENABLE_DFS 540252190Srpaulo | IEEE80211_C_DFS /* Enable radar detection */ 541281681Srpaulo#endif 542281681Srpaulo ; 543281681Srpaulo /* 544281681Srpaulo * Query the hal to figure out h/w crypto support. 545281681Srpaulo */ 546281681Srpaulo if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 547281681Srpaulo ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 548281681Srpaulo if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 549281681Srpaulo ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 550281681Srpaulo if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 551281681Srpaulo ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 552252190Srpaulo if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 553214501Srpaulo ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 554281681Srpaulo if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 555281681Srpaulo ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 556214501Srpaulo /* 557214501Srpaulo * Check if h/w does the MIC and/or whether the 558214501Srpaulo * separate key cache entries are required to 559214501Srpaulo * handle both tx+rx MIC keys. 560281681Srpaulo */ 561281681Srpaulo if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 562214501Srpaulo ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 563214501Srpaulo /* 564214501Srpaulo * If the h/w supports storing tx+rx MIC keys 565324714Scy * in one cache slot automatically enable use. 566214501Srpaulo */ 567214501Srpaulo if (ath_hal_hastkipsplit(ah) || 568252190Srpaulo !ath_hal_settkipsplit(ah, AH_FALSE)) 569324714Scy sc->sc_splitmic = 1; 570214501Srpaulo /* 571252190Srpaulo * If the h/w can do TKIP MIC together with WME then 572252190Srpaulo * we use it; otherwise we force the MIC to be done 573214501Srpaulo * in software by the net80211 layer. 574324714Scy */ 575214501Srpaulo if (ath_hal_haswmetkipmic(ah)) 576214501Srpaulo sc->sc_wmetkipmic = 1; 577214501Srpaulo } 578324714Scy sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 579214501Srpaulo /* 580214501Srpaulo * Check for multicast key search support. 581214501Srpaulo */ 582214501Srpaulo if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 583214501Srpaulo !ath_hal_getmcastkeysearch(sc->sc_ah)) { 584214501Srpaulo ath_hal_setmcastkeysearch(sc->sc_ah, 1); 585214501Srpaulo } 586214501Srpaulo sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 587214501Srpaulo /* 588214501Srpaulo * Mark key cache slots associated with global keys 589214501Srpaulo * as in use. If we knew TKIP was not to be used we 590214501Srpaulo * could leave the +32, +64, and +32+64 slots free. 591214501Srpaulo */ 592214501Srpaulo for (i = 0; i < IEEE80211_WEP_NKID; i++) { 593252190Srpaulo setbit(sc->sc_keymap, i); 594214501Srpaulo setbit(sc->sc_keymap, i+64); 595214501Srpaulo if (sc->sc_splitmic) { 596252190Srpaulo setbit(sc->sc_keymap, i+32); 597214501Srpaulo setbit(sc->sc_keymap, i+32+64); 598214501Srpaulo } 599214501Srpaulo } 600214501Srpaulo /* 601214501Srpaulo * TPC support can be done either with a global cap or 602324714Scy * per-packet support. The latter is not available on 603214501Srpaulo * all parts. We're a bit pedantic here as all parts 604324714Scy * support a global cap. 605214501Srpaulo */ 606324714Scy if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 607214501Srpaulo ic->ic_caps |= IEEE80211_C_TXPMGT; 608214501Srpaulo 609214501Srpaulo /* 610214501Srpaulo * Mark WME capability only if we have sufficient 611214501Srpaulo * hardware queues to do proper priority scheduling. 612214501Srpaulo */ 613214501Srpaulo if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 614214501Srpaulo ic->ic_caps |= IEEE80211_C_WME; 615214501Srpaulo /* 616214501Srpaulo * Check for misc other capabilities. 617214501Srpaulo */ 618214501Srpaulo if (ath_hal_hasbursting(ah)) 619214501Srpaulo ic->ic_caps |= IEEE80211_C_BURST; 620214501Srpaulo sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 621214501Srpaulo sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 622214501Srpaulo sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 623214501Srpaulo sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 624214501Srpaulo sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 625214501Srpaulo if (ath_hal_hasfastframes(ah)) 626214501Srpaulo ic->ic_caps |= IEEE80211_C_FF; 627324714Scy wmodes = ath_hal_getwirelessmodes(ah); 628214501Srpaulo if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 629214501Srpaulo ic->ic_caps |= IEEE80211_C_TURBOP; 630214501Srpaulo#ifdef IEEE80211_SUPPORT_TDMA 631214501Srpaulo if (ath_hal_macversion(ah) > 0x78) { 632214501Srpaulo ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 633324714Scy ic->ic_tdma_update = ath_tdma_update; 634214501Srpaulo } 635214501Srpaulo#endif 636214501Srpaulo 637214501Srpaulo /* 638214501Srpaulo * Allow the TX and RX chainmasks to be overridden by 639214501Srpaulo * environment variables and/or device.hints. 640252190Srpaulo * 641214501Srpaulo * This must be done early - before the hardware is 642214501Srpaulo * calibrated or before the 802.11n stream calculation 643324714Scy * is done. 644252190Srpaulo */ 645214501Srpaulo if (resource_int_value(device_get_name(sc->sc_dev), 646214501Srpaulo device_get_unit(sc->sc_dev), "rx_chainmask", 647214501Srpaulo &rx_chainmask) == 0) { 648214501Srpaulo device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 649214501Srpaulo rx_chainmask); 650252190Srpaulo (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 651214501Srpaulo } 652214501Srpaulo if (resource_int_value(device_get_name(sc->sc_dev), 653252190Srpaulo device_get_unit(sc->sc_dev), "tx_chainmask", 654214501Srpaulo &tx_chainmask) == 0) { 655214501Srpaulo device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 656324714Scy tx_chainmask); 657214501Srpaulo (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 658214501Srpaulo } 659214501Srpaulo 660214501Srpaulo /* 661324714Scy * The if_ath 11n support is completely not ready for normal use. 662324714Scy * Enabling this option will likely break everything and everything. 663214501Srpaulo * Don't think of doing that unless you know what you're doing. 664214501Srpaulo */ 665214501Srpaulo 666214501Srpaulo#ifdef ATH_ENABLE_11N 667324714Scy /* 668324714Scy * Query HT capabilities 669214501Srpaulo */ 670214501Srpaulo if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 671214501Srpaulo (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 672281681Srpaulo int rxs, txs; 673281681Srpaulo 674214501Srpaulo device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 675214501Srpaulo ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 676214501Srpaulo | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 677214501Srpaulo | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 678214501Srpaulo | IEEE80211_HTCAP_MAXAMSDU_3839 679214501Srpaulo /* max A-MSDU length */ 680214501Srpaulo | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 681214501Srpaulo ; 682214501Srpaulo 683214501Srpaulo /* 684214501Srpaulo * Enable short-GI for HT20 only if the hardware 685214501Srpaulo * advertises support. 686252190Srpaulo * Notably, anything earlier than the AR9287 doesn't. 687214501Srpaulo */ 688252190Srpaulo if ((ath_hal_getcapability(ah, 689214501Srpaulo HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 690214501Srpaulo (wmodes & HAL_MODE_HT20)) { 691214501Srpaulo device_printf(sc->sc_dev, 692214501Srpaulo "[HT] enabling short-GI in 20MHz mode\n"); 693214501Srpaulo ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 694214501Srpaulo } 695324714Scy 696214501Srpaulo if (wmodes & HAL_MODE_HT40) 697214501Srpaulo ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 698214501Srpaulo | IEEE80211_HTCAP_SHORTGI40; 699214501Srpaulo 700214501Srpaulo /* 701214501Srpaulo * TX/RX streams need to be taken into account when 702214501Srpaulo * negotiating which MCS rates it'll receive and 703214501Srpaulo * what MCS rates are available for TX. 704214501Srpaulo */ 705214501Srpaulo (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 706214501Srpaulo (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 707214501Srpaulo 708252190Srpaulo ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 709214501Srpaulo ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 710252190Srpaulo 711252190Srpaulo ic->ic_txstream = txs; 712252190Srpaulo ic->ic_rxstream = rxs; 713252190Srpaulo 714252190Srpaulo device_printf(sc->sc_dev, 715252190Srpaulo "[HT] %d RX streams; %d TX streams\n", rxs, txs); 716252190Srpaulo } 717252190Srpaulo#endif 718252190Srpaulo 719341618Scy /* 720341618Scy * Check if the hardware requires PCI register serialisation. 721341618Scy * Some of the Owl based MACs require this. 722341618Scy */ 723252190Srpaulo if (mp_ncpus > 1 && 724252190Srpaulo ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 725341618Scy 0, NULL) == HAL_OK) { 726341618Scy sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 727252190Srpaulo device_printf(sc->sc_dev, 728252190Srpaulo "Enabling register serialisation\n"); 729252190Srpaulo } 730252190Srpaulo 731252190Srpaulo /* 732252190Srpaulo * Indicate we need the 802.11 header padded to a 733252190Srpaulo * 32-bit boundary for 4-address and QoS frames. 734252190Srpaulo */ 735252190Srpaulo ic->ic_flags |= IEEE80211_F_DATAPAD; 736252190Srpaulo 737252190Srpaulo /* 738252190Srpaulo * Query the hal about antenna support. 739252190Srpaulo */ 740252190Srpaulo sc->sc_defant = ath_hal_getdefantenna(ah); 741252190Srpaulo 742252190Srpaulo /* 743252190Srpaulo * Not all chips have the VEOL support we want to 744252190Srpaulo * use with IBSS beacons; check here for it. 745252190Srpaulo */ 746252190Srpaulo sc->sc_hasveol = ath_hal_hasveol(ah); 747252190Srpaulo 748252190Srpaulo /* get mac address from hardware */ 749252190Srpaulo ath_hal_getmac(ah, macaddr); 750252190Srpaulo if (sc->sc_hasbmask) 751252190Srpaulo ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 752252190Srpaulo 753252190Srpaulo /* NB: used to size node table key mapping array */ 754252190Srpaulo ic->ic_max_keyix = sc->sc_keymax; 755252190Srpaulo /* call MI attach routine. */ 756252190Srpaulo ieee80211_ifattach(ic, macaddr); 757252190Srpaulo ic->ic_setregdomain = ath_setregdomain; 758252190Srpaulo ic->ic_getradiocaps = ath_getradiocaps; 759214501Srpaulo sc->sc_opmode = HAL_M_STA; 760214501Srpaulo 761214501Srpaulo /* override default methods */ 762214501Srpaulo ic->ic_newassoc = ath_newassoc; 763252190Srpaulo ic->ic_updateslot = ath_updateslot; 764214501Srpaulo ic->ic_wme.wme_update = ath_wme_update; 765214501Srpaulo ic->ic_vap_create = ath_vap_create; 766252190Srpaulo ic->ic_vap_delete = ath_vap_delete; 767214501Srpaulo ic->ic_raw_xmit = ath_raw_xmit; 768214501Srpaulo ic->ic_update_mcast = ath_update_mcast; 769214501Srpaulo ic->ic_update_promisc = ath_update_promisc; 770214501Srpaulo ic->ic_node_alloc = ath_node_alloc; 771214501Srpaulo sc->sc_node_free = ic->ic_node_free; 772214501Srpaulo ic->ic_node_free = ath_node_free; 773214501Srpaulo sc->sc_node_cleanup = ic->ic_node_cleanup; 774214501Srpaulo ic->ic_node_cleanup = ath_node_cleanup; 775324714Scy ic->ic_node_getsignal = ath_node_getsignal; 776214501Srpaulo ic->ic_scan_start = ath_scan_start; 777214501Srpaulo ic->ic_scan_end = ath_scan_end; 778214501Srpaulo ic->ic_set_channel = ath_set_channel; 779214501Srpaulo 780214501Srpaulo /* 802.11n specific - but just override anyway */ 781214501Srpaulo sc->sc_addba_request = ic->ic_addba_request; 782214501Srpaulo sc->sc_addba_response = ic->ic_addba_response; 783214501Srpaulo sc->sc_addba_stop = ic->ic_addba_stop; 784214501Srpaulo sc->sc_bar_response = ic->ic_bar_response; 785214501Srpaulo sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 786214501Srpaulo 787252190Srpaulo ic->ic_addba_request = ath_addba_request; 788214501Srpaulo ic->ic_addba_response = ath_addba_response; 789214501Srpaulo ic->ic_addba_response_timeout = ath_addba_response_timeout; 790252190Srpaulo ic->ic_addba_stop = ath_addba_stop; 791214501Srpaulo ic->ic_bar_response = ath_bar_response; 792214501Srpaulo 793214501Srpaulo ieee80211_radiotap_attach(ic, 794214501Srpaulo &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 795214501Srpaulo ATH_TX_RADIOTAP_PRESENT, 796214501Srpaulo &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 797214501Srpaulo ATH_RX_RADIOTAP_PRESENT); 798214501Srpaulo 799324714Scy /* 800214501Srpaulo * Setup dynamic sysctl's now that country code and 801214501Srpaulo * regdomain are available from the hal. 802214501Srpaulo */ 803214501Srpaulo ath_sysctlattach(sc); 804214501Srpaulo ath_sysctl_stats_attach(sc); 805214501Srpaulo ath_sysctl_hal_attach(sc); 806214501Srpaulo 807214501Srpaulo if (bootverbose) 808214501Srpaulo ieee80211_announce(ic); 809214501Srpaulo ath_announce(sc); 810281681Srpaulo return 0; 811281681Srpaulobad2: 812214501Srpaulo ath_tx_cleanup(sc); 813281681Srpaulo ath_desc_free(sc); 814252190Srpaulobad: 815281681Srpaulo if (ah) 816281681Srpaulo ath_hal_detach(ah); 817281681Srpaulo if (ifp != NULL) 818281681Srpaulo if_free(ifp); 819214501Srpaulo sc->sc_invalid = 1; 820281681Srpaulo return error; 821281681Srpaulo} 822281681Srpaulo 823281681Srpauloint 824281681Srpauloath_detach(struct ath_softc *sc) 825324714Scy{ 826281681Srpaulo struct ifnet *ifp = sc->sc_ifp; 827281681Srpaulo 828281681Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 829281681Srpaulo __func__, ifp->if_flags); 830281681Srpaulo 831281681Srpaulo /* 832214501Srpaulo * NB: the order of these is important: 833281681Srpaulo * o stop the chip so no more interrupts will fire 834281681Srpaulo * o call the 802.11 layer before detaching the hal to 835281681Srpaulo * insure callbacks into the driver to delete global 836281681Srpaulo * key cache entries can be handled 837281681Srpaulo * o free the taskqueue which drains any pending tasks 838281681Srpaulo * o reclaim the tx queue data structures after calling 839281681Srpaulo * the 802.11 layer as we'll get called back to reclaim 840281681Srpaulo * node state and potentially want to use them 841214501Srpaulo * o to cleanup the tx queues the hal is called, so detach 842281681Srpaulo * it last 843281681Srpaulo * Other than that, it's straightforward... 844281681Srpaulo */ 845281681Srpaulo ath_stop(ifp); 846281681Srpaulo ieee80211_ifdetach(ifp->if_l2com); 847214501Srpaulo taskqueue_free(sc->sc_tq); 848281681Srpaulo#ifdef ATH_TX99_DIAG 849281681Srpaulo if (sc->sc_tx99 != NULL) 850281681Srpaulo sc->sc_tx99->detach(sc->sc_tx99); 851281681Srpaulo#endif 852281681Srpaulo ath_rate_detach(sc->sc_rc); 853281681Srpaulo 854281681Srpaulo ath_dfs_detach(sc); 855281681Srpaulo ath_desc_free(sc); 856281681Srpaulo ath_tx_cleanup(sc); 857214501Srpaulo ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 858214501Srpaulo if_free(ifp); 859289284Srpaulo 860281681Srpaulo return 0; 861281681Srpaulo} 862214501Srpaulo 863252190Srpaulo/* 864281681Srpaulo * MAC address handling for multiple BSS on the same radio. 865252190Srpaulo * The first vap uses the MAC address from the EEPROM. For 866281681Srpaulo * subsequent vap's we set the U/L bit (bit 1) in the MAC 867252190Srpaulo * address and use the next six bits as an index. 868252190Srpaulo */ 869252190Srpaulostatic void 870252190Srpauloassign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 871252190Srpaulo{ 872281681Srpaulo int i; 873252190Srpaulo 874252190Srpaulo if (clone && sc->sc_hasbmask) { 875252190Srpaulo /* NB: we only do this if h/w supports multiple bssid */ 876252190Srpaulo for (i = 0; i < 8; i++) 877252190Srpaulo if ((sc->sc_bssidmask & (1<<i)) == 0) 878252190Srpaulo break; 879281681Srpaulo if (i != 0) 880252190Srpaulo mac[0] |= (i << 2)|0x2; 881252190Srpaulo } else 882252190Srpaulo i = 0; 883252190Srpaulo sc->sc_bssidmask |= 1<<i; 884252190Srpaulo sc->sc_hwbssidmask[0] &= ~mac[0]; 885281681Srpaulo if (i == 0) 886324714Scy sc->sc_nbssid0++; 887281681Srpaulo} 888281681Srpaulo 889281681Srpaulostatic void 890281681Srpauloreclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 891281681Srpaulo{ 892281681Srpaulo int i = mac[0] >> 2; 893324714Scy uint8_t mask; 894324714Scy 895281681Srpaulo if (i != 0 || --sc->sc_nbssid0 == 0) { 896281681Srpaulo sc->sc_bssidmask &= ~(1<<i); 897281681Srpaulo /* recalculate bssid mask from remaining addresses */ 898281681Srpaulo mask = 0xff; 899324714Scy for (i = 1; i < 8; i++) 900324714Scy if (sc->sc_bssidmask & (1<<i)) 901324714Scy mask &= ~((i<<2)|0x2); 902324714Scy sc->sc_hwbssidmask[0] |= mask; 903324714Scy } 904324714Scy} 905324714Scy 906324714Scy/* 907324714Scy * Assign a beacon xmit slot. We try to space out 908324714Scy * assignments so when beacons are staggered the 909252190Srpaulo * traffic coming out of the cab q has maximal time 910252190Srpaulo * to go out before the next beacon is scheduled. 911252190Srpaulo */ 912252190Srpaulostatic int 913252190Srpauloassign_bslot(struct ath_softc *sc) 914281681Srpaulo{ 915252190Srpaulo u_int slot, free; 916252190Srpaulo 917281681Srpaulo free = 0; 918252190Srpaulo for (slot = 0; slot < ATH_BCBUF; slot++) 919252190Srpaulo if (sc->sc_bslot[slot] == NULL) { 920252190Srpaulo if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 921252190Srpaulo sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 922252190Srpaulo return slot; 923252190Srpaulo free = slot; 924281681Srpaulo /* NB: keep looking for a double slot */ 925252190Srpaulo } 926252190Srpaulo return free; 927252190Srpaulo} 928252190Srpaulo 929252190Srpaulostatic struct ieee80211vap * 930252190Srpauloath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 931281681Srpaulo enum ieee80211_opmode opmode, int flags, 932252190Srpaulo const uint8_t bssid[IEEE80211_ADDR_LEN], 933252190Srpaulo const uint8_t mac0[IEEE80211_ADDR_LEN]) 934341618Scy{ 935341618Scy struct ath_softc *sc = ic->ic_ifp->if_softc; 936341618Scy struct ath_vap *avp; 937341618Scy struct ieee80211vap *vap; 938341618Scy uint8_t mac[IEEE80211_ADDR_LEN]; 939341618Scy int needbeacon, error; 940252190Srpaulo enum ieee80211_opmode ic_opmode; 941252190Srpaulo 942252190Srpaulo avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 943252190Srpaulo M_80211_VAP, M_WAITOK | M_ZERO); 944252190Srpaulo needbeacon = 0; 945252190Srpaulo IEEE80211_ADDR_COPY(mac, mac0); 946252190Srpaulo 947252190Srpaulo ATH_LOCK(sc); 948252190Srpaulo ic_opmode = opmode; /* default to opmode of new vap */ 949252190Srpaulo switch (opmode) { 950252190Srpaulo case IEEE80211_M_STA: 951252190Srpaulo if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 952252190Srpaulo device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 953252190Srpaulo goto bad; 954252190Srpaulo } 955252190Srpaulo if (sc->sc_nvaps) { 956289284Srpaulo /* 957252190Srpaulo * With multiple vaps we must fall back 958252190Srpaulo * to s/w beacon miss handling. 959252190Srpaulo */ 960214501Srpaulo flags |= IEEE80211_CLONE_NOBEACONS; 961214501Srpaulo } 962214501Srpaulo if (flags & IEEE80211_CLONE_NOBEACONS) { 963214501Srpaulo /* 964252190Srpaulo * Station mode w/o beacons are implemented w/ AP mode. 965252190Srpaulo */ 966252190Srpaulo ic_opmode = IEEE80211_M_HOSTAP; 967252190Srpaulo } 968341618Scy break; 969252190Srpaulo case IEEE80211_M_IBSS: 970252190Srpaulo if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 971252190Srpaulo device_printf(sc->sc_dev, 972341618Scy "only 1 ibss vap supported\n"); 973252190Srpaulo goto bad; 974252190Srpaulo } 975252190Srpaulo needbeacon = 1; 976252190Srpaulo break; 977252190Srpaulo case IEEE80211_M_AHDEMO: 978252190Srpaulo#ifdef IEEE80211_SUPPORT_TDMA 979252190Srpaulo if (flags & IEEE80211_CLONE_TDMA) { 980252190Srpaulo if (sc->sc_nvaps != 0) { 981252190Srpaulo device_printf(sc->sc_dev, 982252190Srpaulo "only 1 tdma vap supported\n"); 983252190Srpaulo goto bad; 984252190Srpaulo } 985214501Srpaulo needbeacon = 1; 986341618Scy flags |= IEEE80211_CLONE_NOBEACONS; 987214501Srpaulo } 988252190Srpaulo /* fall thru... */ 989214501Srpaulo#endif 990214501Srpaulo case IEEE80211_M_MONITOR: 991341618Scy if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 992214501Srpaulo /* 993214501Srpaulo * Adopt existing mode. Adding a monitor or ahdemo 994214501Srpaulo * vap to an existing configuration is of dubious 995252190Srpaulo * value but should be ok. 996252190Srpaulo */ 997252190Srpaulo /* XXX not right for monitor mode */ 998252190Srpaulo ic_opmode = ic->ic_opmode; 999252190Srpaulo } 1000252190Srpaulo break; 1001252190Srpaulo case IEEE80211_M_HOSTAP: 1002252190Srpaulo case IEEE80211_M_MBSS: 1003214501Srpaulo needbeacon = 1; 1004214501Srpaulo break; 1005252190Srpaulo case IEEE80211_M_WDS: 1006214501Srpaulo if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1007252190Srpaulo device_printf(sc->sc_dev, 1008252190Srpaulo "wds not supported in sta mode\n"); 1009214501Srpaulo goto bad; 1010214501Srpaulo } 1011252190Srpaulo /* 1012252190Srpaulo * Silently remove any request for a unique 1013252190Srpaulo * bssid; WDS vap's always share the local 1014214501Srpaulo * mac address. 1015214501Srpaulo */ 1016214501Srpaulo flags &= ~IEEE80211_CLONE_BSSID; 1017214501Srpaulo if (sc->sc_nvaps == 0) 1018341618Scy ic_opmode = IEEE80211_M_HOSTAP; 1019341618Scy else 1020214501Srpaulo ic_opmode = ic->ic_opmode; 1021252190Srpaulo break; 1022252190Srpaulo default: 1023252190Srpaulo device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1024252190Srpaulo goto bad; 1025252190Srpaulo } 1026252190Srpaulo /* 1027252190Srpaulo * Check that a beacon buffer is available; the code below assumes it. 1028252190Srpaulo */ 1029252190Srpaulo if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1030252190Srpaulo device_printf(sc->sc_dev, "no beacon buffer available\n"); 1031252190Srpaulo goto bad; 1032252190Srpaulo } 1033252190Srpaulo 1034214501Srpaulo /* STA, AHDEMO? */ 1035252190Srpaulo if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1036252190Srpaulo assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1037214501Srpaulo ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1038214501Srpaulo } 1039214501Srpaulo 1040252190Srpaulo vap = &avp->av_vap; 1041252190Srpaulo /* XXX can't hold mutex across if_alloc */ 1042252190Srpaulo ATH_UNLOCK(sc); 1043214501Srpaulo error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1044252190Srpaulo bssid, mac); 1045252190Srpaulo ATH_LOCK(sc); 1046252190Srpaulo if (error != 0) { 1047252190Srpaulo device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1048252190Srpaulo __func__, error); 1049252190Srpaulo goto bad2; 1050252190Srpaulo } 1051252190Srpaulo 1052252190Srpaulo /* h/w crypto support */ 1053252190Srpaulo vap->iv_key_alloc = ath_key_alloc; 1054252190Srpaulo vap->iv_key_delete = ath_key_delete; 1055252190Srpaulo vap->iv_key_set = ath_key_set; 1056252190Srpaulo vap->iv_key_update_begin = ath_key_update_begin; 1057252190Srpaulo vap->iv_key_update_end = ath_key_update_end; 1058252190Srpaulo 1059252190Srpaulo /* override various methods */ 1060214501Srpaulo avp->av_recv_mgmt = vap->iv_recv_mgmt; 1061214501Srpaulo vap->iv_recv_mgmt = ath_recv_mgmt; 1062214501Srpaulo vap->iv_reset = ath_reset_vap; 1063252190Srpaulo vap->iv_update_beacon = ath_beacon_update; 1064214501Srpaulo avp->av_newstate = vap->iv_newstate; 1065214501Srpaulo vap->iv_newstate = ath_newstate; 1066214501Srpaulo avp->av_bmiss = vap->iv_bmiss; 1067214501Srpaulo vap->iv_bmiss = ath_bmiss_vap; 1068214501Srpaulo 1069252190Srpaulo /* Set default parameters */ 1070214501Srpaulo 1071214501Srpaulo /* 1072341618Scy * Anything earlier than some AR9300 series MACs don't 1073252190Srpaulo * support a smaller MPDU density. 1074341618Scy */ 1075252190Srpaulo vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1076252190Srpaulo /* 1077252190Srpaulo * All NICs can handle the maximum size, however 1078252190Srpaulo * AR5416 based MACs can only TX aggregates w/ RTS 1079252190Srpaulo * protection when the total aggregate size is <= 8k. 1080252190Srpaulo * However, for now that's enforced by the TX path. 1081341618Scy */ 1082252190Srpaulo vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1083341618Scy 1084341618Scy avp->av_bslot = -1; 1085341618Scy if (needbeacon) { 1086341618Scy /* 1087341618Scy * Allocate beacon state and setup the q for buffered 1088341618Scy * multicast frames. We know a beacon buffer is 1089341618Scy * available because we checked above. 1090341618Scy */ 1091341618Scy avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1092341618Scy TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1093341618Scy if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1094341618Scy /* 1095341618Scy * Assign the vap to a beacon xmit slot. As above 1096341618Scy * this cannot fail to find a free one. 1097341618Scy */ 1098341618Scy avp->av_bslot = assign_bslot(sc); 1099341618Scy KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1100341618Scy ("beacon slot %u not empty", avp->av_bslot)); 1101341618Scy sc->sc_bslot[avp->av_bslot] = vap; 1102341618Scy sc->sc_nbcnvaps++; 1103341618Scy } 1104341618Scy if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1105341618Scy /* 1106252190Srpaulo * Multple vaps are to transmit beacons and we 1107341618Scy * have h/w support for TSF adjusting; enable 1108341618Scy * use of staggered beacons. 1109341618Scy */ 1110341618Scy sc->sc_stagbeacons = 1; 1111341618Scy } 1112341618Scy ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1113341618Scy } 1114341618Scy 1115341618Scy ic->ic_opmode = ic_opmode; 1116252190Srpaulo if (opmode != IEEE80211_M_WDS) { 1117252190Srpaulo sc->sc_nvaps++; 1118252190Srpaulo if (opmode == IEEE80211_M_STA) 1119252190Srpaulo sc->sc_nstavaps++; 1120341618Scy if (opmode == IEEE80211_M_MBSS) 1121341618Scy sc->sc_nmeshvaps++; 1122252190Srpaulo } 1123252190Srpaulo switch (ic_opmode) { 1124252190Srpaulo case IEEE80211_M_IBSS: 1125252190Srpaulo sc->sc_opmode = HAL_M_IBSS; 1126252190Srpaulo break; 1127252190Srpaulo case IEEE80211_M_STA: 1128252190Srpaulo sc->sc_opmode = HAL_M_STA; 1129341618Scy break; 1130252190Srpaulo case IEEE80211_M_AHDEMO: 1131252190Srpaulo#ifdef IEEE80211_SUPPORT_TDMA 1132252190Srpaulo if (vap->iv_caps & IEEE80211_C_TDMA) { 1133252190Srpaulo sc->sc_tdma = 1; 1134252190Srpaulo /* NB: disable tsf adjust */ 1135252190Srpaulo sc->sc_stagbeacons = 0; 1136252190Srpaulo } 1137252190Srpaulo /* 1138252190Srpaulo * NB: adhoc demo mode is a pseudo mode; to the hal it's 1139252190Srpaulo * just ap mode. 1140252190Srpaulo */ 1141252190Srpaulo /* fall thru... */ 1142252190Srpaulo#endif 1143252190Srpaulo case IEEE80211_M_HOSTAP: 1144252190Srpaulo case IEEE80211_M_MBSS: 1145252190Srpaulo sc->sc_opmode = HAL_M_HOSTAP; 1146252190Srpaulo break; 1147252190Srpaulo case IEEE80211_M_MONITOR: 1148252190Srpaulo sc->sc_opmode = HAL_M_MONITOR; 1149252190Srpaulo break; 1150252190Srpaulo default: 1151252190Srpaulo /* XXX should not happen */ 1152252190Srpaulo break; 1153252190Srpaulo } 1154252190Srpaulo if (sc->sc_hastsfadd) { 1155252190Srpaulo /* 1156252190Srpaulo * Configure whether or not TSF adjust should be done. 1157252190Srpaulo */ 1158252190Srpaulo ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1159252190Srpaulo } 1160252190Srpaulo if (flags & IEEE80211_CLONE_NOBEACONS) { 1161252190Srpaulo /* 1162252190Srpaulo * Enable s/w beacon miss handling. 1163252190Srpaulo */ 1164252190Srpaulo sc->sc_swbmiss = 1; 1165252190Srpaulo } 1166252190Srpaulo ATH_UNLOCK(sc); 1167252190Srpaulo 1168252190Srpaulo /* complete setup */ 1169252190Srpaulo ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1170252190Srpaulo return vap; 1171252190Srpaulobad2: 1172341618Scy reclaim_address(sc, mac); 1173252190Srpaulo ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1174214501Srpaulobad: 1175252190Srpaulo free(avp, M_80211_VAP); 1176214501Srpaulo ATH_UNLOCK(sc); 1177214501Srpaulo return NULL; 1178214501Srpaulo} 1179214501Srpaulo 1180214501Srpaulostatic void 1181214501Srpauloath_vap_delete(struct ieee80211vap *vap) 1182214501Srpaulo{ 1183214501Srpaulo struct ieee80211com *ic = vap->iv_ic; 1184214501Srpaulo struct ifnet *ifp = ic->ic_ifp; 1185324714Scy struct ath_softc *sc = ifp->if_softc; 1186324714Scy struct ath_hal *ah = sc->sc_ah; 1187214501Srpaulo struct ath_vap *avp = ATH_VAP(vap); 1188214501Srpaulo 1189214501Srpaulo DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1190214501Srpaulo if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1191214501Srpaulo /* 1192214501Srpaulo * Quiesce the hardware while we remove the vap. In 1193214501Srpaulo * particular we need to reclaim all references to 1194214501Srpaulo * the vap state by any frames pending on the tx queues. 1195214501Srpaulo */ 1196252190Srpaulo ath_hal_intrset(ah, 0); /* disable interrupts */ 1197214501Srpaulo ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1198252190Srpaulo /* XXX Do all frames from all vaps/nodes need draining here? */ 1199214501Srpaulo ath_stoprecv(sc, 1); /* stop recv side */ 1200252190Srpaulo } 1201252190Srpaulo 1202252190Srpaulo ieee80211_vap_detach(vap); 1203252190Srpaulo 1204214501Srpaulo /* 1205252190Srpaulo * XXX Danger Will Robinson! Danger! 1206214501Srpaulo * 1207214501Srpaulo * Because ieee80211_vap_detach() can queue a frame (the station 1208214501Srpaulo * diassociate message?) after we've drained the TXQ and 1209214501Srpaulo * flushed the software TXQ, we will end up with a frame queued 1210252190Srpaulo * to a node whose vap is about to be freed. 1211214501Srpaulo * 1212214501Srpaulo * To work around this, flush the hardware/software again. 1213214501Srpaulo * This may be racy - the ath task may be running and the packet 1214214501Srpaulo * may be being scheduled between sw->hw txq. Tsk. 1215214501Srpaulo * 1216214501Srpaulo * TODO: figure out why a new node gets allocated somewhere around 1217214501Srpaulo * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1218252190Srpaulo * call!) 1219252190Srpaulo */ 1220252190Srpaulo 1221346563Scy ath_draintxq(sc, ATH_RESET_DEFAULT); 1222252190Srpaulo 1223252190Srpaulo ATH_LOCK(sc); 1224252190Srpaulo /* 1225252190Srpaulo * Reclaim beacon state. Note this must be done before 1226252190Srpaulo * the vap instance is reclaimed as we may have a reference 1227214501Srpaulo * to it in the buffer for the beacon frame. 1228214501Srpaulo */ 1229214501Srpaulo if (avp->av_bcbuf != NULL) { 1230214501Srpaulo if (avp->av_bslot != -1) { 1231214501Srpaulo sc->sc_bslot[avp->av_bslot] = NULL; 1232214501Srpaulo sc->sc_nbcnvaps--; 1233252190Srpaulo } 1234214501Srpaulo ath_beacon_return(sc, avp->av_bcbuf); 1235324714Scy avp->av_bcbuf = NULL; 1236214501Srpaulo if (sc->sc_nbcnvaps == 0) { 1237324714Scy sc->sc_stagbeacons = 0; 1238324714Scy if (sc->sc_hastsfadd) 1239214501Srpaulo ath_hal_settsfadjust(sc->sc_ah, 0); 1240214501Srpaulo } 1241214501Srpaulo /* 1242214501Srpaulo * Reclaim any pending mcast frames for the vap. 1243252190Srpaulo */ 1244214501Srpaulo ath_tx_draintxq(sc, &avp->av_mcastq); 1245214501Srpaulo ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1246289284Srpaulo } 1247214501Srpaulo /* 1248214501Srpaulo * Update bookkeeping. 1249324714Scy */ 1250214501Srpaulo if (vap->iv_opmode == IEEE80211_M_STA) { 1251214501Srpaulo sc->sc_nstavaps--; 1252324714Scy if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1253214501Srpaulo sc->sc_swbmiss = 0; 1254214501Srpaulo } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1255214501Srpaulo vap->iv_opmode == IEEE80211_M_MBSS) { 1256214501Srpaulo reclaim_address(sc, vap->iv_myaddr); 1257214501Srpaulo ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1258214501Srpaulo if (vap->iv_opmode == IEEE80211_M_MBSS) 1259214501Srpaulo sc->sc_nmeshvaps--; 1260214501Srpaulo } 1261214501Srpaulo if (vap->iv_opmode != IEEE80211_M_WDS) 1262214501Srpaulo sc->sc_nvaps--; 1263214501Srpaulo#ifdef IEEE80211_SUPPORT_TDMA 1264214501Srpaulo /* TDMA operation ceases when the last vap is destroyed */ 1265214501Srpaulo if (sc->sc_tdma && sc->sc_nvaps == 0) { 1266214501Srpaulo sc->sc_tdma = 0; 1267214501Srpaulo sc->sc_swbmiss = 0; 1268214501Srpaulo } 1269214501Srpaulo#endif 1270214501Srpaulo free(avp, M_80211_VAP); 1271214501Srpaulo 1272214501Srpaulo if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1273214501Srpaulo /* 1274214501Srpaulo * Restart rx+tx machines if still running (RUNNING will 1275214501Srpaulo * be reset if we just destroyed the last vap). 1276324714Scy */ 1277214501Srpaulo if (ath_startrecv(sc) != 0) 1278324714Scy if_printf(ifp, "%s: unable to restart recv logic\n", 1279214501Srpaulo __func__); 1280214501Srpaulo if (sc->sc_beacons) { /* restart beacons */ 1281214501Srpaulo#ifdef IEEE80211_SUPPORT_TDMA 1282214501Srpaulo if (sc->sc_tdma) 1283324714Scy ath_tdma_config(sc, NULL); 1284214501Srpaulo else 1285324714Scy#endif 1286214501Srpaulo ath_beacon_config(sc, NULL); 1287324714Scy } 1288214501Srpaulo ath_hal_intrset(ah, sc->sc_imask); 1289324714Scy } 1290214501Srpaulo ATH_UNLOCK(sc); 1291324714Scy} 1292214501Srpaulo 1293214501Srpaulovoid 1294214501Srpauloath_suspend(struct ath_softc *sc) 1295214501Srpaulo{ 1296324714Scy struct ifnet *ifp = sc->sc_ifp; 1297214501Srpaulo struct ieee80211com *ic = ifp->if_l2com; 1298214501Srpaulo 1299214501Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1300214501Srpaulo __func__, ifp->if_flags); 1301214501Srpaulo 1302324714Scy sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1303214501Srpaulo if (ic->ic_opmode == IEEE80211_M_STA) 1304214501Srpaulo ath_stop(ifp); 1305214501Srpaulo else 1306214501Srpaulo ieee80211_suspend_all(ic); 1307214501Srpaulo /* 1308214501Srpaulo * NB: don't worry about putting the chip in low power 1309214501Srpaulo * mode; pci will power off our socket on suspend and 1310324714Scy * CardBus detaches the device. 1311214501Srpaulo */ 1312214501Srpaulo} 1313214501Srpaulo 1314214501Srpaulo/* 1315252190Srpaulo * Reset the key cache since some parts do not reset the 1316252190Srpaulo * contents on resume. First we clear all entries, then 1317289284Srpaulo * re-load keys that the 802.11 layer assumes are setup 1318341618Scy * in h/w. 1319324714Scy */ 1320252190Srpaulostatic void 1321252190Srpauloath_reset_keycache(struct ath_softc *sc) 1322252190Srpaulo{ 1323324714Scy struct ifnet *ifp = sc->sc_ifp; 1324289284Srpaulo struct ieee80211com *ic = ifp->if_l2com; 1325289284Srpaulo struct ath_hal *ah = sc->sc_ah; 1326289284Srpaulo int i; 1327252190Srpaulo 1328252190Srpaulo for (i = 0; i < sc->sc_keymax; i++) 1329252190Srpaulo ath_hal_keyreset(ah, i); 1330252190Srpaulo ieee80211_crypto_reload_keys(ic); 1331341618Scy} 1332289284Srpaulo 1333289284Srpaulovoid 1334341618Scyath_resume(struct ath_softc *sc) 1335341618Scy{ 1336289284Srpaulo struct ifnet *ifp = sc->sc_ifp; 1337289284Srpaulo struct ieee80211com *ic = ifp->if_l2com; 1338252190Srpaulo struct ath_hal *ah = sc->sc_ah; 1339252190Srpaulo HAL_STATUS status; 1340252190Srpaulo 1341252190Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1342341618Scy __func__, ifp->if_flags); 1343289284Srpaulo 1344341618Scy /* 1345289284Srpaulo * Must reset the chip before we reload the 1346252190Srpaulo * keycache as we were powered down on suspend. 1347289284Srpaulo */ 1348324714Scy ath_hal_reset(ah, sc->sc_opmode, 1349289284Srpaulo sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1350289284Srpaulo AH_FALSE, &status); 1351289284Srpaulo ath_reset_keycache(sc); 1352252190Srpaulo 1353252190Srpaulo /* Let DFS at it in case it's a DFS channel */ 1354252190Srpaulo ath_dfs_radar_enable(sc, ic->ic_curchan); 1355252190Srpaulo 1356289284Srpaulo /* Restore the LED configuration */ 1357214501Srpaulo ath_led_config(sc); 1358214501Srpaulo ath_hal_setledstate(ah, HAL_LED_INIT); 1359214501Srpaulo 1360341618Scy if (sc->sc_resume_up) { 1361341618Scy if (ic->ic_opmode == IEEE80211_M_STA) { 1362341618Scy ath_init(sc); 1363341618Scy ath_hal_setledstate(ah, HAL_LED_RUN); 1364341618Scy /* 1365341618Scy * Program the beacon registers using the last rx'd 1366341618Scy * beacon frame and enable sync on the next beacon 1367341618Scy * we see. This should handle the case where we 1368341618Scy * wakeup and find the same AP and also the case where 1369341618Scy * we wakeup and need to roam. For the latter we 1370341618Scy * should get bmiss events that trigger a roam. 1371341618Scy */ 1372341618Scy ath_beacon_config(sc, NULL); 1373341618Scy sc->sc_syncbeacon = 1; 1374341618Scy } else 1375341618Scy ieee80211_resume_all(ic); 1376341618Scy } 1377341618Scy 1378341618Scy /* XXX beacons ? */ 1379341618Scy} 1380341618Scy 1381341618Scyvoid 1382341618Scyath_shutdown(struct ath_softc *sc) 1383341618Scy{ 1384341618Scy struct ifnet *ifp = sc->sc_ifp; 1385341618Scy 1386341618Scy DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1387341618Scy __func__, ifp->if_flags); 1388341618Scy 1389341618Scy ath_stop(ifp); 1390341618Scy /* NB: no point powering down chip as we're about to reboot */ 1391341618Scy} 1392341618Scy 1393341618Scy/* 1394252190Srpaulo * Interrupt handler. Most of the actual processing is deferred. 1395252190Srpaulo */ 1396252190Srpaulovoid 1397252190Srpauloath_intr(void *arg) 1398252190Srpaulo{ 1399252190Srpaulo struct ath_softc *sc = arg; 1400252190Srpaulo struct ifnet *ifp = sc->sc_ifp; 1401252190Srpaulo struct ath_hal *ah = sc->sc_ah; 1402252190Srpaulo HAL_INT status = 0; 1403252190Srpaulo uint32_t txqs; 1404252190Srpaulo 1405252190Srpaulo /* 1406252190Srpaulo * If we're inside a reset path, just print a warning and 1407252190Srpaulo * clear the ISR. The reset routine will finish it for us. 1408252190Srpaulo */ 1409252190Srpaulo ATH_PCU_LOCK(sc); 1410252190Srpaulo if (sc->sc_inreset_cnt) { 1411252190Srpaulo HAL_INT status; 1412281681Srpaulo ath_hal_getisr(ah, &status); /* clear ISR */ 1413252190Srpaulo ath_hal_intrset(ah, 0); /* disable further intr's */ 1414252190Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, 1415252190Srpaulo "%s: in reset, ignoring: status=0x%x\n", 1416252190Srpaulo __func__, status); 1417252190Srpaulo ATH_PCU_UNLOCK(sc); 1418252190Srpaulo return; 1419252190Srpaulo } 1420252190Srpaulo 1421252190Srpaulo if (sc->sc_invalid) { 1422252190Srpaulo /* 1423252190Srpaulo * The hardware is not ready/present, don't touch anything. 1424252190Srpaulo * Note this can happen early on if the IRQ is shared. 1425252190Srpaulo */ 1426252190Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1427252190Srpaulo ATH_PCU_UNLOCK(sc); 1428252190Srpaulo return; 1429252190Srpaulo } 1430252190Srpaulo if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1431252190Srpaulo ATH_PCU_UNLOCK(sc); 1432252190Srpaulo return; 1433252190Srpaulo } 1434252190Srpaulo 1435252190Srpaulo if ((ifp->if_flags & IFF_UP) == 0 || 1436252190Srpaulo (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1437252190Srpaulo HAL_INT status; 1438252190Srpaulo 1439252190Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1440252190Srpaulo __func__, ifp->if_flags); 1441252190Srpaulo ath_hal_getisr(ah, &status); /* clear ISR */ 1442252190Srpaulo ath_hal_intrset(ah, 0); /* disable further intr's */ 1443252190Srpaulo ATH_PCU_UNLOCK(sc); 1444252190Srpaulo return; 1445252190Srpaulo } 1446252190Srpaulo 1447252190Srpaulo /* 1448252190Srpaulo * Figure out the reason(s) for the interrupt. Note 1449252190Srpaulo * that the hal returns a pseudo-ISR that may include 1450252190Srpaulo * bits we haven't explicitly enabled so we mask the 1451252190Srpaulo * value to insure we only process bits we requested. 1452252190Srpaulo */ 1453252190Srpaulo ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1454252190Srpaulo DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1455252190Srpaulo CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 1456252190Srpaulo#ifdef ATH_KTR_INTR_DEBUG 1457252190Srpaulo CTR5(ATH_KTR_INTR, 1458341618Scy "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1459341618Scy ah->ah_intrstate[0], 1460341618Scy ah->ah_intrstate[1], 1461341618Scy ah->ah_intrstate[2], 1462341618Scy ah->ah_intrstate[3], 1463252190Srpaulo ah->ah_intrstate[6]); 1464252190Srpaulo#endif 1465252190Srpaulo status &= sc->sc_imask; /* discard unasked for bits */ 1466252190Srpaulo 1467252190Srpaulo /* Short-circuit un-handled interrupts */ 1468252190Srpaulo if (status == 0x0) { 1469252190Srpaulo ATH_PCU_UNLOCK(sc); 1470252190Srpaulo return; 1471214501Srpaulo } 1472252190Srpaulo 1473252190Srpaulo /* 1474252190Srpaulo * Take a note that we're inside the interrupt handler, so 1475252190Srpaulo * the reset routines know to wait. 1476341618Scy */ 1477341618Scy sc->sc_intr_cnt++; 1478341618Scy ATH_PCU_UNLOCK(sc); 1479341618Scy 1480252190Srpaulo /* 1481252190Srpaulo * Handle the interrupt. We won't run concurrent with the reset 1482252190Srpaulo * or channel change routines as they'll wait for sc_intr_cnt 1483252190Srpaulo * to be 0 before continuing. 1484252190Srpaulo */ 1485252190Srpaulo if (status & HAL_INT_FATAL) { 1486252190Srpaulo sc->sc_stats.ast_hardware++; 1487252190Srpaulo ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1488252190Srpaulo ath_fatal_proc(sc, 0); 1489252190Srpaulo } else { 1490252190Srpaulo if (status & HAL_INT_SWBA) { 1491324714Scy /* 1492214501Srpaulo * Software beacon alert--time to send a beacon. 1493214501Srpaulo * Handle beacon transmission directly; deferring 1494214501Srpaulo * this is too slow to meet timing constraints 1495214501Srpaulo * under load. 1496214501Srpaulo */ 1497214501Srpaulo#ifdef IEEE80211_SUPPORT_TDMA 1498214501Srpaulo if (sc->sc_tdma) { 1499324714Scy if (sc->sc_tdmaswba == 0) { 1500214501Srpaulo struct ieee80211com *ic = ifp->if_l2com; 1501214501Srpaulo struct ieee80211vap *vap = 1502324714Scy TAILQ_FIRST(&ic->ic_vaps); 1503214501Srpaulo ath_tdma_beacon_send(sc, vap); 1504214501Srpaulo sc->sc_tdmaswba = 1505324714Scy vap->iv_tdma->tdma_bintval; 1506214501Srpaulo } else 1507214501Srpaulo sc->sc_tdmaswba--; 1508214501Srpaulo } else 1509214501Srpaulo#endif 1510214501Srpaulo { 1511214501Srpaulo ath_beacon_proc(sc, 0); 1512214501Srpaulo#ifdef IEEE80211_SUPPORT_SUPERG 1513214501Srpaulo /* 1514214501Srpaulo * Schedule the rx taskq in case there's no 1515214501Srpaulo * traffic so any frames held on the staging 1516324714Scy * queue are aged and potentially flushed. 1517324714Scy */ 1518214501Srpaulo taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1519324714Scy#endif 1520214501Srpaulo } 1521214501Srpaulo } 1522214501Srpaulo if (status & HAL_INT_RXEOL) { 1523214501Srpaulo int imask; 1524214501Srpaulo CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1525214501Srpaulo ATH_PCU_LOCK(sc); 1526214501Srpaulo /* 1527214501Srpaulo * NB: the hardware should re-read the link when 1528214501Srpaulo * RXE bit is written, but it doesn't work at 1529252190Srpaulo * least on older hardware revs. 1530214501Srpaulo */ 1531214501Srpaulo sc->sc_stats.ast_rxeol++; 1532214501Srpaulo /* 1533214501Srpaulo * Disable RXEOL/RXORN - prevent an interrupt 1534214501Srpaulo * storm until the PCU logic can be reset. 1535214501Srpaulo * In case the interface is reset some other 1536214501Srpaulo * way before "sc_kickpcu" is called, don't 1537324714Scy * modify sc_imask - that way if it is reset 1538214501Srpaulo * by a call to ath_reset() somehow, the 1539324714Scy * interrupt mask will be correctly reprogrammed. 1540214501Srpaulo */ 1541214501Srpaulo imask = sc->sc_imask; 1542324714Scy imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1543214501Srpaulo ath_hal_intrset(ah, imask); 1544252190Srpaulo /* 1545252190Srpaulo * Only blank sc_rxlink if we've not yet kicked 1546252190Srpaulo * the PCU. 1547252190Srpaulo * 1548252190Srpaulo * This isn't entirely correct - the correct solution 1549252190Srpaulo * would be to have a PCU lock and engage that for 1550252190Srpaulo * the duration of the PCU fiddling; which would include 1551252190Srpaulo * running the RX process. Otherwise we could end up 1552252190Srpaulo * messing up the RX descriptor chain and making the 1553324714Scy * RX desc list much shorter. 1554214501Srpaulo */ 1555214501Srpaulo if (! sc->sc_kickpcu) 1556214501Srpaulo sc->sc_rxlink = NULL; 1557214501Srpaulo sc->sc_kickpcu = 1; 1558214501Srpaulo /* 1559214501Srpaulo * Enqueue an RX proc, to handled whatever 1560214501Srpaulo * is in the RX queue. 1561214501Srpaulo * This will then kick the PCU. 1562214501Srpaulo */ 1563252190Srpaulo taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1564214501Srpaulo ATH_PCU_UNLOCK(sc); 1565214501Srpaulo } 1566252190Srpaulo if (status & HAL_INT_TXURN) { 1567214501Srpaulo sc->sc_stats.ast_txurn++; 1568214501Srpaulo /* bump tx trigger level */ 1569214501Srpaulo ath_hal_updatetxtriglevel(ah, AH_TRUE); 1570214501Srpaulo } 1571214501Srpaulo if (status & HAL_INT_RX) { 1572214501Srpaulo sc->sc_stats.ast_rx_intr++; 1573214501Srpaulo taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1574214501Srpaulo } 1575214501Srpaulo if (status & HAL_INT_TX) { 1576214501Srpaulo sc->sc_stats.ast_tx_intr++; 1577214501Srpaulo /* 1578214501Srpaulo * Grab all the currently set bits in the HAL txq bitmap 1579252190Srpaulo * and blank them. This is the only place we should be 1580214501Srpaulo * doing this. 1581214501Srpaulo */ 1582214501Srpaulo ATH_PCU_LOCK(sc); 1583214501Srpaulo txqs = 0xffffffff; 1584214501Srpaulo ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1585214501Srpaulo sc->sc_txq_active |= txqs; 1586214501Srpaulo taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1587214501Srpaulo ATH_PCU_UNLOCK(sc); 1588214501Srpaulo } 1589252190Srpaulo if (status & HAL_INT_BMISS) { 1590214501Srpaulo sc->sc_stats.ast_bmiss++; 1591214501Srpaulo taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1592214501Srpaulo } 1593214501Srpaulo if (status & HAL_INT_GTT) 1594214501Srpaulo sc->sc_stats.ast_tx_timeout++; 1595214501Srpaulo if (status & HAL_INT_CST) 1596214501Srpaulo sc->sc_stats.ast_tx_cst++; 1597214501Srpaulo if (status & HAL_INT_MIB) { 1598214501Srpaulo sc->sc_stats.ast_mib++; 1599214501Srpaulo ATH_PCU_LOCK(sc); 1600214501Srpaulo /* 1601214501Srpaulo * Disable interrupts until we service the MIB 1602214501Srpaulo * interrupt; otherwise it will continue to fire. 1603214501Srpaulo */ 1604214501Srpaulo ath_hal_intrset(ah, 0); 1605214501Srpaulo /* 1606214501Srpaulo * Let the hal handle the event. We assume it will 1607324714Scy * clear whatever condition caused the interrupt. 1608214501Srpaulo */ 1609214501Srpaulo ath_hal_mibevent(ah, &sc->sc_halstats); 1610214501Srpaulo /* 1611214501Srpaulo * Don't reset the interrupt if we've just 1612214501Srpaulo * kicked the PCU, or we may get a nested 1613214501Srpaulo * RXEOL before the rxproc has had a chance 1614214501Srpaulo * to run. 1615214501Srpaulo */ 1616281681Srpaulo if (sc->sc_kickpcu == 0) 1617281681Srpaulo ath_hal_intrset(ah, sc->sc_imask); 1618281681Srpaulo ATH_PCU_UNLOCK(sc); 1619214501Srpaulo } 1620214501Srpaulo if (status & HAL_INT_RXORN) { 1621214501Srpaulo /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1622214501Srpaulo CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 1623214501Srpaulo sc->sc_stats.ast_rxorn++; 1624214501Srpaulo } 1625214501Srpaulo } 1626214501Srpaulo ATH_PCU_LOCK(sc); 1627214501Srpaulo sc->sc_intr_cnt--; 1628214501Srpaulo ATH_PCU_UNLOCK(sc); 1629214501Srpaulo} 1630252190Srpaulo 1631214501Srpaulostatic void 1632214501Srpauloath_fatal_proc(void *arg, int pending) 1633214501Srpaulo{ 1634214501Srpaulo struct ath_softc *sc = arg; 1635214501Srpaulo struct ifnet *ifp = sc->sc_ifp; 1636252190Srpaulo u_int32_t *state; 1637214501Srpaulo u_int32_t len; 1638214501Srpaulo void *sp; 1639214501Srpaulo 1640252190Srpaulo if_printf(ifp, "hardware error; resetting\n"); 1641214501Srpaulo /* 1642214501Srpaulo * Fatal errors are unrecoverable. Typically these 1643214501Srpaulo * are caused by DMA errors. Collect h/w state from 1644214501Srpaulo * the hal so we can diagnose what's going on. 1645214501Srpaulo */ 1646252190Srpaulo if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1647214501Srpaulo KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1648214501Srpaulo state = sp; 1649214501Srpaulo if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1650214501Srpaulo state[0], state[1] , state[2], state[3], 1651214501Srpaulo state[4], state[5]); 1652214501Srpaulo } 1653214501Srpaulo ath_reset(ifp, ATH_RESET_NOLOSS); 1654214501Srpaulo} 1655214501Srpaulo 1656214501Srpaulostatic void 1657214501Srpauloath_bmiss_vap(struct ieee80211vap *vap) 1658252190Srpaulo{ 1659252190Srpaulo /* 1660214501Srpaulo * Workaround phantom bmiss interrupts by sanity-checking 1661252190Srpaulo * the time of our last rx'd frame. If it is within the 1662214501Srpaulo * beacon miss interval then ignore the interrupt. If it's 1663214501Srpaulo * truly a bmiss we'll get another interrupt soon and that'll 1664214501Srpaulo * be dispatched up for processing. Note this applies only 1665214501Srpaulo * for h/w beacon miss events. 1666214501Srpaulo */ 1667214501Srpaulo if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1668214501Srpaulo struct ifnet *ifp = vap->iv_ic->ic_ifp; 1669214501Srpaulo struct ath_softc *sc = ifp->if_softc; 1670214501Srpaulo u_int64_t lastrx = sc->sc_lastrx; 1671214501Srpaulo u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1672214501Srpaulo /* XXX should take a locked ref to iv_bss */ 1673214501Srpaulo u_int bmisstimeout = 1674214501Srpaulo vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1675214501Srpaulo 1676324714Scy DPRINTF(sc, ATH_DEBUG_BEACON, 1677214501Srpaulo "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1678281681Srpaulo __func__, (unsigned long long) tsf, 1679281681Srpaulo (unsigned long long)(tsf - lastrx), 1680281681Srpaulo (unsigned long long) lastrx, bmisstimeout); 1681214501Srpaulo 1682214501Srpaulo if (tsf - lastrx <= bmisstimeout) { 1683214501Srpaulo sc->sc_stats.ast_bmiss_phantom++; 1684214501Srpaulo return; 1685324714Scy } 1686324714Scy } 1687214501Srpaulo ATH_VAP(vap)->av_bmiss(vap); 1688324714Scy} 1689214501Srpaulo 1690214501Srpaulostatic int 1691214501Srpauloath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1692214501Srpaulo{ 1693214501Srpaulo uint32_t rsize; 1694214501Srpaulo void *sp; 1695324714Scy 1696214501Srpaulo if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1697214501Srpaulo return 0; 1698214501Srpaulo KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1699214501Srpaulo *hangs = *(uint32_t *)sp; 1700214501Srpaulo return 1; 1701214501Srpaulo} 1702252190Srpaulo 1703214501Srpaulostatic void 1704214501Srpauloath_bmiss_proc(void *arg, int pending) 1705214501Srpaulo{ 1706214501Srpaulo struct ath_softc *sc = arg; 1707341618Scy struct ifnet *ifp = sc->sc_ifp; 1708341618Scy uint32_t hangs; 1709341618Scy 1710341618Scy DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1711341618Scy 1712341618Scy if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1713341618Scy if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1714341618Scy ath_reset(ifp, ATH_RESET_NOLOSS); 1715341618Scy } else 1716341618Scy ieee80211_beacon_miss(ifp->if_l2com); 1717341618Scy} 1718341618Scy 1719341618Scy/* 1720341618Scy * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1721341618Scy * calcs together with WME. If necessary disable the crypto 1722341618Scy * hardware and mark the 802.11 state so keys will be setup 1723341618Scy * with the MIC work done in software. 1724341618Scy */ 1725341618Scystatic void 1726341618Scyath_settkipmic(struct ath_softc *sc) 1727341618Scy{ 1728214501Srpaulo struct ifnet *ifp = sc->sc_ifp; 1729252190Srpaulo struct ieee80211com *ic = ifp->if_l2com; 1730214501Srpaulo 1731252190Srpaulo if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1732214501Srpaulo if (ic->ic_flags & IEEE80211_F_WME) { 1733214501Srpaulo ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1734214501Srpaulo ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1735214501Srpaulo } else { 1736252190Srpaulo ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1737214501Srpaulo ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1738281681Srpaulo } 1739281681Srpaulo } 1740214501Srpaulo} 1741214501Srpaulo 1742214501Srpaulostatic void 1743214501Srpauloath_init(void *arg) 1744214501Srpaulo{ 1745214501Srpaulo struct ath_softc *sc = (struct ath_softc *) arg; 1746281681Srpaulo struct ifnet *ifp = sc->sc_ifp; 1747281681Srpaulo struct ieee80211com *ic = ifp->if_l2com; 1748214501Srpaulo struct ath_hal *ah = sc->sc_ah; 1749214501Srpaulo HAL_STATUS status; 1750324714Scy 1751214501Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1752324714Scy __func__, ifp->if_flags); 1753214501Srpaulo 1754214501Srpaulo ATH_LOCK(sc); 1755281681Srpaulo /* 1756281681Srpaulo * Stop anything previously setup. This is safe 1757214501Srpaulo * whether this is the first time through or not. 1758214501Srpaulo */ 1759214501Srpaulo ath_stop_locked(ifp); 1760214501Srpaulo 1761214501Srpaulo /* 1762214501Srpaulo * The basic interface to setting the hardware in a good 1763214501Srpaulo * state is ``reset''. On return the hardware is known to 1764214501Srpaulo * be powered up and with interrupts disabled. This must 1765214501Srpaulo * be followed by initialization of the appropriate bits 1766214501Srpaulo * and then setup of the interrupt mask. 1767252190Srpaulo */ 1768214501Srpaulo ath_settkipmic(sc); 1769214501Srpaulo if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1770214501Srpaulo if_printf(ifp, "unable to reset hardware; hal status %u\n", 1771214501Srpaulo status); 1772214501Srpaulo ATH_UNLOCK(sc); 1773214501Srpaulo return; 1774214501Srpaulo } 1775214501Srpaulo ath_chan_change(sc, ic->ic_curchan); 1776214501Srpaulo 1777214501Srpaulo /* Let DFS at it in case it's a DFS channel */ 1778214501Srpaulo ath_dfs_radar_enable(sc, ic->ic_curchan); 1779214501Srpaulo 1780214501Srpaulo /* 1781214501Srpaulo * Likewise this is set during reset so update 1782214501Srpaulo * state cached in the driver. 1783214501Srpaulo */ 1784214501Srpaulo sc->sc_diversity = ath_hal_getdiversity(ah); 1785214501Srpaulo sc->sc_lastlongcal = 0; 1786324714Scy sc->sc_resetcal = 1; 1787214501Srpaulo sc->sc_lastcalreset = 0; 1788214501Srpaulo sc->sc_lastani = 0; 1789214501Srpaulo sc->sc_lastshortcal = 0; 1790214501Srpaulo sc->sc_doresetcal = AH_FALSE; 1791214501Srpaulo /* 1792281681Srpaulo * Beacon timers were cleared here; give ath_newstate() 1793281681Srpaulo * a hint that the beacon timers should be poked when 1794281681Srpaulo * things transition to the RUN state. 1795214501Srpaulo */ 1796214501Srpaulo sc->sc_beacons = 0; 1797214501Srpaulo 1798214501Srpaulo /* 1799214501Srpaulo * Initial aggregation settings. 1800252190Srpaulo */ 1801214501Srpaulo sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 1802252190Srpaulo sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1803252190Srpaulo sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1804214501Srpaulo 1805252190Srpaulo /* 1806214501Srpaulo * Setup the hardware after reset: the key cache 1807214501Srpaulo * is filled as needed and the receive engine is 1808341618Scy * set going. Frame transmit is handled entirely 1809341618Scy * in the frame output path; there's nothing to do 1810341618Scy * here except setup the interrupt mask. 1811214501Srpaulo */ 1812214501Srpaulo if (ath_startrecv(sc) != 0) { 1813252190Srpaulo if_printf(ifp, "unable to start recv logic\n"); 1814252190Srpaulo ATH_UNLOCK(sc); 1815252190Srpaulo return; 1816214501Srpaulo } 1817214501Srpaulo 1818214501Srpaulo /* 1819214501Srpaulo * Enable interrupts. 1820214501Srpaulo */ 1821214501Srpaulo sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1822289284Srpaulo | HAL_INT_RXEOL | HAL_INT_RXORN 1823214501Srpaulo | HAL_INT_FATAL | HAL_INT_GLOBAL; 1824214501Srpaulo /* 1825214501Srpaulo * Enable MIB interrupts when there are hardware phy counters. 1826214501Srpaulo * Note we only do this (at the moment) for station mode. 1827214501Srpaulo */ 1828252190Srpaulo if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1829214501Srpaulo sc->sc_imask |= HAL_INT_MIB; 1830252190Srpaulo 1831214501Srpaulo /* Enable global TX timeout and carrier sense timeout if available */ 1832252190Srpaulo if (ath_hal_gtxto_supported(ah)) 1833289284Srpaulo sc->sc_imask |= HAL_INT_GTT; 1834289284Srpaulo 1835324714Scy DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1836289284Srpaulo __func__, sc->sc_imask); 1837289284Srpaulo 1838289284Srpaulo ifp->if_drv_flags |= IFF_DRV_RUNNING; 1839289284Srpaulo callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1840214501Srpaulo ath_hal_intrset(ah, sc->sc_imask); 1841214501Srpaulo 1842214501Srpaulo ATH_UNLOCK(sc); 1843214501Srpaulo 1844214501Srpaulo#ifdef ATH_TX99_DIAG 1845214501Srpaulo if (sc->sc_tx99 != NULL) 1846214501Srpaulo sc->sc_tx99->start(sc->sc_tx99); 1847214501Srpaulo else 1848214501Srpaulo#endif 1849214501Srpaulo ieee80211_start_all(ic); /* start all vap's */ 1850289284Srpaulo} 1851214501Srpaulo 1852214501Srpaulostatic void 1853214501Srpauloath_stop_locked(struct ifnet *ifp) 1854252190Srpaulo{ 1855214501Srpaulo struct ath_softc *sc = ifp->if_softc; 1856252190Srpaulo struct ath_hal *ah = sc->sc_ah; 1857214501Srpaulo 1858214501Srpaulo DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1859324714Scy __func__, sc->sc_invalid, ifp->if_flags); 1860214501Srpaulo 1861214501Srpaulo ATH_LOCK_ASSERT(sc); 1862214501Srpaulo if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1863341618Scy /* 1864214501Srpaulo * Shutdown the hardware and driver: 1865214501Srpaulo * reset 802.11 state machine 1866281681Srpaulo * turn off timers 1867281681Srpaulo * disable interrupts 1868214501Srpaulo * turn off the radio 1869214501Srpaulo * clear transmit machinery 1870214501Srpaulo * clear receive machinery 1871214501Srpaulo * drain and release tx queues 1872214501Srpaulo * reclaim beacon resources 1873214501Srpaulo * power down hardware 1874252190Srpaulo * 1875214501Srpaulo * Note that some of this work is not possible if the 1876252190Srpaulo * hardware is gone (invalid). 1877214501Srpaulo */ 1878214501Srpaulo#ifdef ATH_TX99_DIAG 1879214501Srpaulo if (sc->sc_tx99 != NULL) 1880324714Scy sc->sc_tx99->stop(sc->sc_tx99); 1881214501Srpaulo#endif 1882214501Srpaulo callout_stop(&sc->sc_wd_ch); 1883252190Srpaulo sc->sc_wd_timer = 0; 1884252190Srpaulo ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1885214501Srpaulo if (!sc->sc_invalid) { 1886214501Srpaulo if (sc->sc_softled) { 1887281681Srpaulo callout_stop(&sc->sc_ledtimer); 1888281681Srpaulo ath_hal_gpioset(ah, sc->sc_ledpin, 1889214501Srpaulo !sc->sc_ledon); 1890214501Srpaulo sc->sc_blinking = 0; 1891214501Srpaulo } 1892214501Srpaulo ath_hal_intrset(ah, 0); 1893214501Srpaulo } 1894214501Srpaulo ath_draintxq(sc, ATH_RESET_DEFAULT); 1895214501Srpaulo if (!sc->sc_invalid) { 1896214501Srpaulo ath_stoprecv(sc, 1); 1897252190Srpaulo ath_hal_phydisable(ah); 1898214501Srpaulo } else 1899252190Srpaulo sc->sc_rxlink = NULL; 1900214501Srpaulo ath_beacon_free(sc); /* XXX not needed */ 1901214501Srpaulo } 1902214501Srpaulo} 1903214501Srpaulo 1904214501Srpaulo#define MAX_TXRX_ITERATIONS 1000 1905252190Srpaulostatic void 1906214501Srpauloath_txrx_stop(struct ath_softc *sc) 1907252190Srpaulo{ 1908214501Srpaulo int i = MAX_TXRX_ITERATIONS; 1909214501Srpaulo 1910214501Srpaulo ATH_UNLOCK_ASSERT(sc); 1911252190Srpaulo /* Stop any new TX/RX from occuring */ 1912252190Srpaulo taskqueue_block(sc->sc_tq); 1913252190Srpaulo 1914252190Srpaulo ATH_PCU_LOCK(sc); 1915252190Srpaulo /* 1916252190Srpaulo * Sleep until all the pending operations have completed. 1917252190Srpaulo * 1918252190Srpaulo * The caller must ensure that reset has been incremented 1919252190Srpaulo * or the pending operations may continue being queued. 1920252190Srpaulo */ 1921252190Srpaulo while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1922252190Srpaulo sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1923252190Srpaulo if (i <= 0) 1924252190Srpaulo break; 1925252190Srpaulo msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1926252190Srpaulo i--; 1927252190Srpaulo } 1928252190Srpaulo ATH_PCU_UNLOCK(sc); 1929252190Srpaulo 1930252190Srpaulo if (i <= 0) 1931252190Srpaulo device_printf(sc->sc_dev, 1932252190Srpaulo "%s: didn't finish after %d iterations\n", 1933252190Srpaulo __func__, MAX_TXRX_ITERATIONS); 1934252190Srpaulo} 1935252190Srpaulo#undef MAX_TXRX_ITERATIONS 1936252190Srpaulo 1937252190Srpaulostatic void 1938252190Srpauloath_txrx_start(struct ath_softc *sc) 1939252190Srpaulo{ 1940252190Srpaulo 1941252190Srpaulo taskqueue_unblock(sc->sc_tq); 1942252190Srpaulo} 1943252190Srpaulo 1944252190Srpaulo/* 1945252190Srpaulo * Grab the reset lock, and wait around until noone else 1946252190Srpaulo * is trying to do anything with it. 1947252190Srpaulo * 1948252190Srpaulo * This is totally horrible but we can't hold this lock for 1949252190Srpaulo * long enough to do TX/RX or we end up with net80211/ip stack 1950252190Srpaulo * LORs and eventual deadlock. 1951252190Srpaulo * 1952252190Srpaulo * "dowait" signals whether to spin, waiting for the reset 1953281681Srpaulo * lock count to reach 0. This should (for now) only be used 1954281681Srpaulo * during the reset path, as the rest of the code may not 1955281681Srpaulo * be locking-reentrant enough to behave correctly. 1956281681Srpaulo * 1957281681Srpaulo * Another, cleaner way should be found to serialise all of 1958281681Srpaulo * these operations. 1959281681Srpaulo */ 1960281681Srpaulo#define MAX_RESET_ITERATIONS 10 1961281681Srpaulostatic int 1962281681Srpauloath_reset_grablock(struct ath_softc *sc, int dowait) 1963281681Srpaulo{ 1964281681Srpaulo int w = 0; 1965281681Srpaulo int i = MAX_RESET_ITERATIONS; 1966281681Srpaulo 1967281681Srpaulo ATH_PCU_LOCK_ASSERT(sc); 1968281681Srpaulo do { 1969281681Srpaulo if (sc->sc_inreset_cnt == 0) { 1970281681Srpaulo w = 1; 1971281681Srpaulo break; 1972252190Srpaulo } 1973252190Srpaulo if (dowait == 0) { 1974252190Srpaulo w = 0; 1975252190Srpaulo break; 1976341618Scy } 1977252190Srpaulo ATH_PCU_UNLOCK(sc); 1978252190Srpaulo pause("ath_reset_grablock", 1); 1979324714Scy i--; 1980324714Scy ATH_PCU_LOCK(sc); 1981252190Srpaulo } while (i > 0); 1982252190Srpaulo 1983252190Srpaulo /* 1984252190Srpaulo * We always increment the refcounter, regardless 1985252190Srpaulo * of whether we succeeded to get it in an exclusive 1986252190Srpaulo * way. 1987252190Srpaulo */ 1988252190Srpaulo sc->sc_inreset_cnt++; 1989252190Srpaulo 1990252190Srpaulo if (i <= 0) 1991324714Scy device_printf(sc->sc_dev, 1992252190Srpaulo "%s: didn't finish after %d iterations\n", 1993252190Srpaulo __func__, MAX_RESET_ITERATIONS); 1994252190Srpaulo 1995252190Srpaulo if (w == 0) 1996252190Srpaulo device_printf(sc->sc_dev, 1997252190Srpaulo "%s: warning, recursive reset path!\n", 1998252190Srpaulo __func__); 1999252190Srpaulo 2000252190Srpaulo return w; 2001252190Srpaulo} 2002341618Scy#undef MAX_RESET_ITERATIONS 2003252190Srpaulo 2004252190Srpaulo/* 2005289284Srpaulo * XXX TODO: write ath_reset_releaselock 2006289284Srpaulo */ 2007252190Srpaulo 2008252190Srpaulostatic void 2009252190Srpauloath_stop(struct ifnet *ifp) 2010252190Srpaulo{ 2011252190Srpaulo struct ath_softc *sc = ifp->if_softc; 2012252190Srpaulo 2013252190Srpaulo ATH_LOCK(sc); 2014252190Srpaulo ath_stop_locked(ifp); 2015252190Srpaulo ATH_UNLOCK(sc); 2016252190Srpaulo} 2017252190Srpaulo 2018252190Srpaulo/* 2019252190Srpaulo * Reset the hardware w/o losing operational state. This is 2020252190Srpaulo * basically a more efficient way of doing ath_stop, ath_init, 2021252190Srpaulo * followed by state transitions to the current 802.11 2022252190Srpaulo * operational state. Used to recover from various errors and 2023252190Srpaulo * to reset or reload hardware state. 2024252190Srpaulo */ 2025252190Srpauloint 2026252190Srpauloath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2027252190Srpaulo{ 2028252190Srpaulo struct ath_softc *sc = ifp->if_softc; 2029252190Srpaulo struct ieee80211com *ic = ifp->if_l2com; 2030252190Srpaulo struct ath_hal *ah = sc->sc_ah; 2031252190Srpaulo HAL_STATUS status; 2032252190Srpaulo int i; 2033252190Srpaulo 2034252190Srpaulo DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2035252190Srpaulo 2036252190Srpaulo /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2037252190Srpaulo ATH_PCU_UNLOCK_ASSERT(sc); 2038252190Srpaulo ATH_UNLOCK_ASSERT(sc); 2039252190Srpaulo 2040252190Srpaulo ATH_PCU_LOCK(sc); 2041252190Srpaulo if (ath_reset_grablock(sc, 1) == 0) { 2042252190Srpaulo device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2043252190Srpaulo __func__); 2044252190Srpaulo } 2045252190Srpaulo ath_hal_intrset(ah, 0); /* disable interrupts */ 2046252190Srpaulo ATH_PCU_UNLOCK(sc); 2047252190Srpaulo 2048252190Srpaulo /* 2049252190Srpaulo * Should now wait for pending TX/RX to complete 2050252190Srpaulo * and block future ones from occuring. This needs to be 2051252190Srpaulo * done before the TX queue is drained. 2052252190Srpaulo */ 2053252190Srpaulo ath_txrx_stop(sc); 2054252190Srpaulo ath_draintxq(sc, reset_type); /* stop xmit side */ 2055252190Srpaulo 2056252190Srpaulo /* 2057252190Srpaulo * Regardless of whether we're doing a no-loss flush or 2058252190Srpaulo * not, stop the PCU and handle what's in the RX queue. 2059252190Srpaulo * That way frames aren't dropped which shouldn't be. 2060252190Srpaulo */ 2061252190Srpaulo ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2062252190Srpaulo ath_rx_proc(sc, 0); 2063252190Srpaulo 2064252190Srpaulo ath_settkipmic(sc); /* configure TKIP MIC handling */ 2065252190Srpaulo /* NB: indicate channel change so we do a full reset */ 2066252190Srpaulo if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2067252190Srpaulo if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2068252190Srpaulo __func__, status); 2069252190Srpaulo sc->sc_diversity = ath_hal_getdiversity(ah); 2070252190Srpaulo 2071252190Srpaulo /* Let DFS at it in case it's a DFS channel */ 2072252190Srpaulo ath_dfs_radar_enable(sc, ic->ic_curchan); 2073252190Srpaulo 2074252190Srpaulo if (ath_startrecv(sc) != 0) /* restart recv */ 2075252190Srpaulo if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2076252190Srpaulo /* 2077252190Srpaulo * We may be doing a reset in response to an ioctl 2078252190Srpaulo * that changes the channel so update any state that 2079252190Srpaulo * might change as a result. 2080252190Srpaulo */ 2081252190Srpaulo ath_chan_change(sc, ic->ic_curchan); 2082252190Srpaulo if (sc->sc_beacons) { /* restart beacons */ 2083252190Srpaulo#ifdef IEEE80211_SUPPORT_TDMA 2084252190Srpaulo if (sc->sc_tdma) 2085252190Srpaulo ath_tdma_config(sc, NULL); 2086252190Srpaulo else 2087252190Srpaulo#endif 2088252190Srpaulo ath_beacon_config(sc, NULL); 2089252190Srpaulo } 2090252190Srpaulo 2091252190Srpaulo /* 2092252190Srpaulo * Release the reset lock and re-enable interrupts here. 2093252190Srpaulo * If an interrupt was being processed in ath_intr(), 2094252190Srpaulo * it would disable interrupts at this point. So we have 2095252190Srpaulo * to atomically enable interrupts and decrement the 2096252190Srpaulo * reset counter - this way ath_intr() doesn't end up 2097252190Srpaulo * disabling interrupts without a corresponding enable 2098252190Srpaulo * in the rest or channel change path. 2099252190Srpaulo */ 2100252190Srpaulo ATH_PCU_LOCK(sc); 2101252190Srpaulo sc->sc_inreset_cnt--; 2102252190Srpaulo /* XXX only do this if sc_inreset_cnt == 0? */ 2103252190Srpaulo ath_hal_intrset(ah, sc->sc_imask); 2104252190Srpaulo ATH_PCU_UNLOCK(sc); 2105252190Srpaulo 2106252190Srpaulo /* 2107281681Srpaulo * TX and RX can be started here. If it were started with 2108252190Srpaulo * sc_inreset_cnt > 0, the TX and RX path would abort. 2109252190Srpaulo * Thus if this is a nested call through the reset or 2110252190Srpaulo * channel change code, TX completion will occur but 2111252190Srpaulo * RX completion and ath_start / ath_tx_start will not 2112252190Srpaulo * run. 2113252190Srpaulo */ 2114252190Srpaulo 2115252190Srpaulo /* Restart TX/RX as needed */ 2116252190Srpaulo ath_txrx_start(sc); 2117252190Srpaulo 2118252190Srpaulo /* XXX Restart TX completion and pending TX */ 2119252190Srpaulo if (reset_type == ATH_RESET_NOLOSS) { 2120252190Srpaulo for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2121252190Srpaulo if (ATH_TXQ_SETUP(sc, i)) { 2122252190Srpaulo ATH_TXQ_LOCK(&sc->sc_txq[i]); 2123252190Srpaulo ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2124252190Srpaulo ath_txq_sched(sc, &sc->sc_txq[i]); 2125252190Srpaulo ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2126252190Srpaulo } 2127252190Srpaulo } 2128252190Srpaulo } 2129252190Srpaulo 2130252190Srpaulo /* 2131252190Srpaulo * This may have been set during an ath_start() call which 2132252190Srpaulo * set this once it detected a concurrent TX was going on. 2133252190Srpaulo * So, clear it. 2134252190Srpaulo */ 2135252190Srpaulo /* XXX do this inside of IF_LOCK? */ 2136252190Srpaulo ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2137252190Srpaulo 2138252190Srpaulo /* Handle any frames in the TX queue */ 2139252190Srpaulo /* 2140252190Srpaulo * XXX should this be done by the caller, rather than 2141252190Srpaulo * ath_reset() ? 2142252190Srpaulo */ 2143252190Srpaulo ath_start(ifp); /* restart xmit */ 2144252190Srpaulo return 0; 2145252190Srpaulo} 2146252190Srpaulo 2147252190Srpaulostatic int 2148252190Srpauloath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2149252190Srpaulo{ 2150252190Srpaulo struct ieee80211com *ic = vap->iv_ic; 2151252190Srpaulo struct ifnet *ifp = ic->ic_ifp; 2152252190Srpaulo struct ath_softc *sc = ifp->if_softc; 2153252190Srpaulo struct ath_hal *ah = sc->sc_ah; 2154252190Srpaulo 2155252190Srpaulo switch (cmd) { 2156252190Srpaulo case IEEE80211_IOC_TXPOWER: 2157252190Srpaulo /* 2158252190Srpaulo * If per-packet TPC is enabled, then we have nothing 2159252190Srpaulo * to do; otherwise we need to force the global limit. 2160252190Srpaulo * All this can happen directly; no need to reset. 2161252190Srpaulo */ 2162252190Srpaulo if (!ath_hal_gettpc(ah)) 2163252190Srpaulo ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2164252190Srpaulo return 0; 2165252190Srpaulo } 2166252190Srpaulo /* XXX? Full or NOLOSS? */ 2167252190Srpaulo return ath_reset(ifp, ATH_RESET_FULL); 2168252190Srpaulo} 2169252190Srpaulo 2170252190Srpaulostruct ath_buf * 2171252190Srpaulo_ath_getbuf_locked(struct ath_softc *sc) 2172252190Srpaulo{ 2173252190Srpaulo struct ath_buf *bf; 2174252190Srpaulo 2175252190Srpaulo ATH_TXBUF_LOCK_ASSERT(sc); 2176252190Srpaulo 2177252190Srpaulo bf = TAILQ_FIRST(&sc->sc_txbuf); 2178252190Srpaulo if (bf == NULL) { 2179252190Srpaulo sc->sc_stats.ast_tx_getnobuf++; 2180252190Srpaulo } else { 2181252190Srpaulo if (bf->bf_flags & ATH_BUF_BUSY) { 2182252190Srpaulo sc->sc_stats.ast_tx_getbusybuf++; 2183252190Srpaulo bf = NULL; 2184252190Srpaulo } 2185252190Srpaulo } 2186252190Srpaulo 2187252190Srpaulo if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) 2188252190Srpaulo TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2189252190Srpaulo else 2190252190Srpaulo bf = NULL; 2191252190Srpaulo 2192252190Srpaulo if (bf == NULL) { 2193252190Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2194252190Srpaulo TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2195252190Srpaulo "out of xmit buffers" : "xmit buffer busy"); 2196252190Srpaulo return NULL; 2197252190Srpaulo } 2198252190Srpaulo 2199252190Srpaulo /* Valid bf here; clear some basic fields */ 2200252190Srpaulo bf->bf_next = NULL; /* XXX just to be sure */ 2201252190Srpaulo bf->bf_last = NULL; /* XXX again, just to be sure */ 2202252190Srpaulo bf->bf_comp = NULL; /* XXX again, just to be sure */ 2203252190Srpaulo bzero(&bf->bf_state, sizeof(bf->bf_state)); 2204252190Srpaulo 2205252190Srpaulo return bf; 2206252190Srpaulo} 2207252190Srpaulo 2208252190Srpaulo/* 2209252190Srpaulo * When retrying a software frame, buffers marked ATH_BUF_BUSY 2210252190Srpaulo * can't be thrown back on the queue as they could still be 2211252190Srpaulo * in use by the hardware. 2212252190Srpaulo * 2213252190Srpaulo * This duplicates the buffer, or returns NULL. 2214252190Srpaulo * 2215252190Srpaulo * The descriptor is also copied but the link pointers and 2216252190Srpaulo * the DMA segments aren't copied; this frame should thus 2217252190Srpaulo * be again passed through the descriptor setup/chain routines 2218252190Srpaulo * so the link is correct. 2219252190Srpaulo * 2220252190Srpaulo * The caller must free the buffer using ath_freebuf(). 2221252190Srpaulo * 2222252190Srpaulo * XXX TODO: this call shouldn't fail as it'll cause packet loss 2223252190Srpaulo * XXX in the TX pathway when retries are needed. 2224252190Srpaulo * XXX Figure out how to keep some buffers free, or factor the 2225252190Srpaulo * XXX number of busy buffers into the xmit path (ath_start()) 2226252190Srpaulo * XXX so we don't over-commit. 2227252190Srpaulo */ 2228252190Srpaulostruct ath_buf * 2229252190Srpauloath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2230252190Srpaulo{ 2231252190Srpaulo struct ath_buf *tbf; 2232252190Srpaulo 2233252190Srpaulo tbf = ath_getbuf(sc); 2234252190Srpaulo if (tbf == NULL) 2235252190Srpaulo return NULL; /* XXX failure? Why? */ 2236252190Srpaulo 2237252190Srpaulo /* Copy basics */ 2238252190Srpaulo tbf->bf_next = NULL; 2239252190Srpaulo tbf->bf_nseg = bf->bf_nseg; 2240252190Srpaulo tbf->bf_txflags = bf->bf_txflags; 2241252190Srpaulo tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2242252190Srpaulo tbf->bf_status = bf->bf_status; 2243252190Srpaulo tbf->bf_m = bf->bf_m; 2244252190Srpaulo tbf->bf_node = bf->bf_node; 2245252190Srpaulo /* will be setup by the chain/setup function */ 2246252190Srpaulo tbf->bf_lastds = NULL; 2247252190Srpaulo /* for now, last == self */ 2248252190Srpaulo tbf->bf_last = tbf; 2249252190Srpaulo tbf->bf_comp = bf->bf_comp; 2250252190Srpaulo 2251252190Srpaulo /* NOTE: DMA segments will be setup by the setup/chain functions */ 2252252190Srpaulo 2253252190Srpaulo /* The caller has to re-init the descriptor + links */ 2254252190Srpaulo 2255252190Srpaulo /* Copy state */ 2256252190Srpaulo memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2257252190Srpaulo 2258252190Srpaulo return tbf; 2259252190Srpaulo} 2260281681Srpaulo 2261252190Srpaulostruct ath_buf * 2262252190Srpauloath_getbuf(struct ath_softc *sc) 2263214501Srpaulo{ 2264214501Srpaulo struct ath_buf *bf; 2265252190Srpaulo 2266252190Srpaulo ATH_TXBUF_LOCK(sc); 2267252190Srpaulo bf = _ath_getbuf_locked(sc); 2268252190Srpaulo if (bf == NULL) { 2269252190Srpaulo struct ifnet *ifp = sc->sc_ifp; 2270252190Srpaulo 2271252190Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 2272252190Srpaulo sc->sc_stats.ast_tx_qstop++; 2273252190Srpaulo /* XXX do this inside of IF_LOCK? */ 2274252190Srpaulo ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2275252190Srpaulo } 2276252190Srpaulo ATH_TXBUF_UNLOCK(sc); 2277252190Srpaulo return bf; 2278252190Srpaulo} 2279252190Srpaulo 2280252190Srpaulostatic void 2281252190Srpauloath_start(struct ifnet *ifp) 2282252190Srpaulo{ 2283252190Srpaulo struct ath_softc *sc = ifp->if_softc; 2284252190Srpaulo struct ieee80211_node *ni; 2285252190Srpaulo struct ath_buf *bf; 2286341618Scy struct mbuf *m, *next; 2287252190Srpaulo ath_bufhead frags; 2288252190Srpaulo 2289252190Srpaulo if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 2290341618Scy return; 2291289284Srpaulo 2292252190Srpaulo /* XXX is it ok to hold the ATH_LOCK here? */ 2293252190Srpaulo ATH_PCU_LOCK(sc); 2294252190Srpaulo if (sc->sc_inreset_cnt > 0) { 2295252190Srpaulo device_printf(sc->sc_dev, 2296281681Srpaulo "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2297252190Srpaulo /* XXX do this inside of IF_LOCK? */ 2298281681Srpaulo ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2299281681Srpaulo ATH_PCU_UNLOCK(sc); 2300214501Srpaulo return; 2301 } 2302 sc->sc_txstart_cnt++; 2303 ATH_PCU_UNLOCK(sc); 2304 2305 for (;;) { 2306 /* 2307 * Grab a TX buffer and associated resources. 2308 */ 2309 bf = ath_getbuf(sc); 2310 if (bf == NULL) 2311 break; 2312 2313 IFQ_DEQUEUE(&ifp->if_snd, m); 2314 if (m == NULL) { 2315 ATH_TXBUF_LOCK(sc); 2316 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2317 ATH_TXBUF_UNLOCK(sc); 2318 break; 2319 } 2320 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 2321 /* 2322 * Check for fragmentation. If this frame 2323 * has been broken up verify we have enough 2324 * buffers to send all the fragments so all 2325 * go out or none... 2326 */ 2327 TAILQ_INIT(&frags); 2328 if ((m->m_flags & M_FRAG) && 2329 !ath_txfrag_setup(sc, &frags, m, ni)) { 2330 DPRINTF(sc, ATH_DEBUG_XMIT, 2331 "%s: out of txfrag buffers\n", __func__); 2332 sc->sc_stats.ast_tx_nofrag++; 2333 ifp->if_oerrors++; 2334 ath_freetx(m); 2335 goto bad; 2336 } 2337 ifp->if_opackets++; 2338 nextfrag: 2339 /* 2340 * Pass the frame to the h/w for transmission. 2341 * Fragmented frames have each frag chained together 2342 * with m_nextpkt. We know there are sufficient ath_buf's 2343 * to send all the frags because of work done by 2344 * ath_txfrag_setup. We leave m_nextpkt set while 2345 * calling ath_tx_start so it can use it to extend the 2346 * the tx duration to cover the subsequent frag and 2347 * so it can reclaim all the mbufs in case of an error; 2348 * ath_tx_start clears m_nextpkt once it commits to 2349 * handing the frame to the hardware. 2350 */ 2351 next = m->m_nextpkt; 2352 if (ath_tx_start(sc, ni, bf, m)) { 2353 bad: 2354 ifp->if_oerrors++; 2355 reclaim: 2356 bf->bf_m = NULL; 2357 bf->bf_node = NULL; 2358 ATH_TXBUF_LOCK(sc); 2359 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2360 ath_txfrag_cleanup(sc, &frags, ni); 2361 ATH_TXBUF_UNLOCK(sc); 2362 if (ni != NULL) 2363 ieee80211_free_node(ni); 2364 continue; 2365 } 2366 if (next != NULL) { 2367 /* 2368 * Beware of state changing between frags. 2369 * XXX check sta power-save state? 2370 */ 2371 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2372 DPRINTF(sc, ATH_DEBUG_XMIT, 2373 "%s: flush fragmented packet, state %s\n", 2374 __func__, 2375 ieee80211_state_name[ni->ni_vap->iv_state]); 2376 ath_freetx(next); 2377 goto reclaim; 2378 } 2379 m = next; 2380 bf = TAILQ_FIRST(&frags); 2381 KASSERT(bf != NULL, ("no buf for txfrag")); 2382 TAILQ_REMOVE(&frags, bf, bf_list); 2383 goto nextfrag; 2384 } 2385 2386 sc->sc_wd_timer = 5; 2387 } 2388 2389 ATH_PCU_LOCK(sc); 2390 sc->sc_txstart_cnt--; 2391 ATH_PCU_UNLOCK(sc); 2392} 2393 2394static int 2395ath_media_change(struct ifnet *ifp) 2396{ 2397 int error = ieee80211_media_change(ifp); 2398 /* NB: only the fixed rate can change and that doesn't need a reset */ 2399 return (error == ENETRESET ? 0 : error); 2400} 2401 2402/* 2403 * Block/unblock tx+rx processing while a key change is done. 2404 * We assume the caller serializes key management operations 2405 * so we only need to worry about synchronization with other 2406 * uses that originate in the driver. 2407 */ 2408static void 2409ath_key_update_begin(struct ieee80211vap *vap) 2410{ 2411 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2412 struct ath_softc *sc = ifp->if_softc; 2413 2414 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2415 taskqueue_block(sc->sc_tq); 2416 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2417} 2418 2419static void 2420ath_key_update_end(struct ieee80211vap *vap) 2421{ 2422 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2423 struct ath_softc *sc = ifp->if_softc; 2424 2425 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2426 IF_UNLOCK(&ifp->if_snd); 2427 taskqueue_unblock(sc->sc_tq); 2428} 2429 2430/* 2431 * Calculate the receive filter according to the 2432 * operating mode and state: 2433 * 2434 * o always accept unicast, broadcast, and multicast traffic 2435 * o accept PHY error frames when hardware doesn't have MIB support 2436 * to count and we need them for ANI (sta mode only until recently) 2437 * and we are not scanning (ANI is disabled) 2438 * NB: older hal's add rx filter bits out of sight and we need to 2439 * blindly preserve them 2440 * o probe request frames are accepted only when operating in 2441 * hostap, adhoc, mesh, or monitor modes 2442 * o enable promiscuous mode 2443 * - when in monitor mode 2444 * - if interface marked PROMISC (assumes bridge setting is filtered) 2445 * o accept beacons: 2446 * - when operating in station mode for collecting rssi data when 2447 * the station is otherwise quiet, or 2448 * - when operating in adhoc mode so the 802.11 layer creates 2449 * node table entries for peers, 2450 * - when scanning 2451 * - when doing s/w beacon miss (e.g. for ap+sta) 2452 * - when operating in ap mode in 11g to detect overlapping bss that 2453 * require protection 2454 * - when operating in mesh mode to detect neighbors 2455 * o accept control frames: 2456 * - when in monitor mode 2457 * XXX HT protection for 11n 2458 */ 2459static u_int32_t 2460ath_calcrxfilter(struct ath_softc *sc) 2461{ 2462 struct ifnet *ifp = sc->sc_ifp; 2463 struct ieee80211com *ic = ifp->if_l2com; 2464 u_int32_t rfilt; 2465 2466 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2467 if (!sc->sc_needmib && !sc->sc_scanning) 2468 rfilt |= HAL_RX_FILTER_PHYERR; 2469 if (ic->ic_opmode != IEEE80211_M_STA) 2470 rfilt |= HAL_RX_FILTER_PROBEREQ; 2471 /* XXX ic->ic_monvaps != 0? */ 2472 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 2473 rfilt |= HAL_RX_FILTER_PROM; 2474 if (ic->ic_opmode == IEEE80211_M_STA || 2475 ic->ic_opmode == IEEE80211_M_IBSS || 2476 sc->sc_swbmiss || sc->sc_scanning) 2477 rfilt |= HAL_RX_FILTER_BEACON; 2478 /* 2479 * NB: We don't recalculate the rx filter when 2480 * ic_protmode changes; otherwise we could do 2481 * this only when ic_protmode != NONE. 2482 */ 2483 if (ic->ic_opmode == IEEE80211_M_HOSTAP && 2484 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 2485 rfilt |= HAL_RX_FILTER_BEACON; 2486 2487 /* 2488 * Enable hardware PS-POLL RX only for hostap mode; 2489 * STA mode sends PS-POLL frames but never 2490 * receives them. 2491 */ 2492 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 2493 0, NULL) == HAL_OK && 2494 ic->ic_opmode == IEEE80211_M_HOSTAP) 2495 rfilt |= HAL_RX_FILTER_PSPOLL; 2496 2497 if (sc->sc_nmeshvaps) { 2498 rfilt |= HAL_RX_FILTER_BEACON; 2499 if (sc->sc_hasbmatch) 2500 rfilt |= HAL_RX_FILTER_BSSID; 2501 else 2502 rfilt |= HAL_RX_FILTER_PROM; 2503 } 2504 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2505 rfilt |= HAL_RX_FILTER_CONTROL; 2506 2507 /* 2508 * Enable RX of compressed BAR frames only when doing 2509 * 802.11n. Required for A-MPDU. 2510 */ 2511 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) 2512 rfilt |= HAL_RX_FILTER_COMPBAR; 2513 2514 /* 2515 * Enable radar PHY errors if requested by the 2516 * DFS module. 2517 */ 2518 if (sc->sc_dodfs) 2519 rfilt |= HAL_RX_FILTER_PHYRADAR; 2520 2521 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 2522 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 2523 return rfilt; 2524} 2525 2526static void 2527ath_update_promisc(struct ifnet *ifp) 2528{ 2529 struct ath_softc *sc = ifp->if_softc; 2530 u_int32_t rfilt; 2531 2532 /* configure rx filter */ 2533 rfilt = ath_calcrxfilter(sc); 2534 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2535 2536 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2537} 2538 2539static void 2540ath_update_mcast(struct ifnet *ifp) 2541{ 2542 struct ath_softc *sc = ifp->if_softc; 2543 u_int32_t mfilt[2]; 2544 2545 /* calculate and install multicast filter */ 2546 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2547 struct ifmultiaddr *ifma; 2548 /* 2549 * Merge multicast addresses to form the hardware filter. 2550 */ 2551 mfilt[0] = mfilt[1] = 0; 2552 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2553 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2554 caddr_t dl; 2555 u_int32_t val; 2556 u_int8_t pos; 2557 2558 /* calculate XOR of eight 6bit values */ 2559 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2560 val = LE_READ_4(dl + 0); 2561 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2562 val = LE_READ_4(dl + 3); 2563 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2564 pos &= 0x3f; 2565 mfilt[pos / 32] |= (1 << (pos % 32)); 2566 } 2567 if_maddr_runlock(ifp); 2568 } else 2569 mfilt[0] = mfilt[1] = ~0; 2570 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2571 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2572 __func__, mfilt[0], mfilt[1]); 2573} 2574 2575static void 2576ath_mode_init(struct ath_softc *sc) 2577{ 2578 struct ifnet *ifp = sc->sc_ifp; 2579 struct ath_hal *ah = sc->sc_ah; 2580 u_int32_t rfilt; 2581 2582 /* configure rx filter */ 2583 rfilt = ath_calcrxfilter(sc); 2584 ath_hal_setrxfilter(ah, rfilt); 2585 2586 /* configure operational mode */ 2587 ath_hal_setopmode(ah); 2588 2589 /* handle any link-level address change */ 2590 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2591 2592 /* calculate and install multicast filter */ 2593 ath_update_mcast(ifp); 2594} 2595 2596/* 2597 * Set the slot time based on the current setting. 2598 */ 2599static void 2600ath_setslottime(struct ath_softc *sc) 2601{ 2602 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2603 struct ath_hal *ah = sc->sc_ah; 2604 u_int usec; 2605 2606 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2607 usec = 13; 2608 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2609 usec = 21; 2610 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2611 /* honor short/long slot time only in 11g */ 2612 /* XXX shouldn't honor on pure g or turbo g channel */ 2613 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2614 usec = HAL_SLOT_TIME_9; 2615 else 2616 usec = HAL_SLOT_TIME_20; 2617 } else 2618 usec = HAL_SLOT_TIME_9; 2619 2620 DPRINTF(sc, ATH_DEBUG_RESET, 2621 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2622 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2623 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2624 2625 ath_hal_setslottime(ah, usec); 2626 sc->sc_updateslot = OK; 2627} 2628 2629/* 2630 * Callback from the 802.11 layer to update the 2631 * slot time based on the current setting. 2632 */ 2633static void 2634ath_updateslot(struct ifnet *ifp) 2635{ 2636 struct ath_softc *sc = ifp->if_softc; 2637 struct ieee80211com *ic = ifp->if_l2com; 2638 2639 /* 2640 * When not coordinating the BSS, change the hardware 2641 * immediately. For other operation we defer the change 2642 * until beacon updates have propagated to the stations. 2643 */ 2644 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2645 ic->ic_opmode == IEEE80211_M_MBSS) 2646 sc->sc_updateslot = UPDATE; 2647 else 2648 ath_setslottime(sc); 2649} 2650 2651/* 2652 * Setup a h/w transmit queue for beacons. 2653 */ 2654static int 2655ath_beaconq_setup(struct ath_hal *ah) 2656{ 2657 HAL_TXQ_INFO qi; 2658 2659 memset(&qi, 0, sizeof(qi)); 2660 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 2661 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 2662 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 2663 /* NB: for dynamic turbo, don't enable any other interrupts */ 2664 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 2665 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 2666} 2667 2668/* 2669 * Setup the transmit queue parameters for the beacon queue. 2670 */ 2671static int 2672ath_beaconq_config(struct ath_softc *sc) 2673{ 2674#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 2675 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2676 struct ath_hal *ah = sc->sc_ah; 2677 HAL_TXQ_INFO qi; 2678 2679 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 2680 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2681 ic->ic_opmode == IEEE80211_M_MBSS) { 2682 /* 2683 * Always burst out beacon and CAB traffic. 2684 */ 2685 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 2686 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 2687 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 2688 } else { 2689 struct wmeParams *wmep = 2690 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 2691 /* 2692 * Adhoc mode; important thing is to use 2x cwmin. 2693 */ 2694 qi.tqi_aifs = wmep->wmep_aifsn; 2695 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 2696 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 2697 } 2698 2699 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 2700 device_printf(sc->sc_dev, "unable to update parameters for " 2701 "beacon hardware queue!\n"); 2702 return 0; 2703 } else { 2704 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 2705 return 1; 2706 } 2707#undef ATH_EXPONENT_TO_VALUE 2708} 2709 2710/* 2711 * Allocate and setup an initial beacon frame. 2712 */ 2713static int 2714ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 2715{ 2716 struct ieee80211vap *vap = ni->ni_vap; 2717 struct ath_vap *avp = ATH_VAP(vap); 2718 struct ath_buf *bf; 2719 struct mbuf *m; 2720 int error; 2721 2722 bf = avp->av_bcbuf; 2723 DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n", 2724 __func__, bf->bf_m, bf->bf_node); 2725 if (bf->bf_m != NULL) { 2726 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2727 m_freem(bf->bf_m); 2728 bf->bf_m = NULL; 2729 } 2730 if (bf->bf_node != NULL) { 2731 ieee80211_free_node(bf->bf_node); 2732 bf->bf_node = NULL; 2733 } 2734 2735 /* 2736 * NB: the beacon data buffer must be 32-bit aligned; 2737 * we assume the mbuf routines will return us something 2738 * with this alignment (perhaps should assert). 2739 */ 2740 m = ieee80211_beacon_alloc(ni, &avp->av_boff); 2741 if (m == NULL) { 2742 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); 2743 sc->sc_stats.ast_be_nombuf++; 2744 return ENOMEM; 2745 } 2746 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2747 bf->bf_segs, &bf->bf_nseg, 2748 BUS_DMA_NOWAIT); 2749 if (error != 0) { 2750 device_printf(sc->sc_dev, 2751 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", 2752 __func__, error); 2753 m_freem(m); 2754 return error; 2755 } 2756 2757 /* 2758 * Calculate a TSF adjustment factor required for staggered 2759 * beacons. Note that we assume the format of the beacon 2760 * frame leaves the tstamp field immediately following the 2761 * header. 2762 */ 2763 if (sc->sc_stagbeacons && avp->av_bslot > 0) { 2764 uint64_t tsfadjust; 2765 struct ieee80211_frame *wh; 2766 2767 /* 2768 * The beacon interval is in TU's; the TSF is in usecs. 2769 * We figure out how many TU's to add to align the timestamp 2770 * then convert to TSF units and handle byte swapping before 2771 * inserting it in the frame. The hardware will then add this 2772 * each time a beacon frame is sent. Note that we align vap's 2773 * 1..N and leave vap 0 untouched. This means vap 0 has a 2774 * timestamp in one beacon interval while the others get a 2775 * timstamp aligned to the next interval. 2776 */ 2777 tsfadjust = ni->ni_intval * 2778 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; 2779 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ 2780 2781 DPRINTF(sc, ATH_DEBUG_BEACON, 2782 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", 2783 __func__, sc->sc_stagbeacons ? "stagger" : "burst", 2784 avp->av_bslot, ni->ni_intval, 2785 (long long unsigned) le64toh(tsfadjust)); 2786 2787 wh = mtod(m, struct ieee80211_frame *); 2788 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); 2789 } 2790 bf->bf_m = m; 2791 bf->bf_node = ieee80211_ref_node(ni); 2792 2793 return 0; 2794} 2795 2796/* 2797 * Setup the beacon frame for transmit. 2798 */ 2799static void 2800ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2801{ 2802#define USE_SHPREAMBLE(_ic) \ 2803 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2804 == IEEE80211_F_SHPREAMBLE) 2805 struct ieee80211_node *ni = bf->bf_node; 2806 struct ieee80211com *ic = ni->ni_ic; 2807 struct mbuf *m = bf->bf_m; 2808 struct ath_hal *ah = sc->sc_ah; 2809 struct ath_desc *ds; 2810 int flags, antenna; 2811 const HAL_RATE_TABLE *rt; 2812 u_int8_t rix, rate; 2813 2814 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2815 __func__, m, m->m_len); 2816 2817 /* setup descriptors */ 2818 ds = bf->bf_desc; 2819 bf->bf_last = bf; 2820 bf->bf_lastds = ds; 2821 2822 flags = HAL_TXDESC_NOACK; 2823 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2824 ds->ds_link = bf->bf_daddr; /* self-linked */ 2825 flags |= HAL_TXDESC_VEOL; 2826 /* 2827 * Let hardware handle antenna switching. 2828 */ 2829 antenna = sc->sc_txantenna; 2830 } else { 2831 ds->ds_link = 0; 2832 /* 2833 * Switch antenna every 4 beacons. 2834 * XXX assumes two antenna 2835 */ 2836 if (sc->sc_txantenna != 0) 2837 antenna = sc->sc_txantenna; 2838 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) 2839 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); 2840 else 2841 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2842 } 2843 2844 KASSERT(bf->bf_nseg == 1, 2845 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 2846 ds->ds_data = bf->bf_segs[0].ds_addr; 2847 /* 2848 * Calculate rate code. 2849 * XXX everything at min xmit rate 2850 */ 2851 rix = 0; 2852 rt = sc->sc_currates; 2853 rate = rt->info[rix].rateCode; 2854 if (USE_SHPREAMBLE(ic)) 2855 rate |= rt->info[rix].shortPreamble; 2856 ath_hal_setuptxdesc(ah, ds 2857 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 2858 , sizeof(struct ieee80211_frame)/* header length */ 2859 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2860 , ni->ni_txpower /* txpower XXX */ 2861 , rate, 1 /* series 0 rate/tries */ 2862 , HAL_TXKEYIX_INVALID /* no encryption */ 2863 , antenna /* antenna mode */ 2864 , flags /* no ack, veol for beacons */ 2865 , 0 /* rts/cts rate */ 2866 , 0 /* rts/cts duration */ 2867 ); 2868 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 2869 ath_hal_filltxdesc(ah, ds 2870 , roundup(m->m_len, 4) /* buffer length */ 2871 , AH_TRUE /* first segment */ 2872 , AH_TRUE /* last segment */ 2873 , ds /* first descriptor */ 2874 ); 2875#if 0 2876 ath_desc_swap(ds); 2877#endif 2878#undef USE_SHPREAMBLE 2879} 2880 2881static void 2882ath_beacon_update(struct ieee80211vap *vap, int item) 2883{ 2884 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; 2885 2886 setbit(bo->bo_flags, item); 2887} 2888 2889/* 2890 * Append the contents of src to dst; both queues 2891 * are assumed to be locked. 2892 */ 2893static void 2894ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2895{ 2896 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2897 dst->axq_link = src->axq_link; 2898 src->axq_link = NULL; 2899 dst->axq_depth += src->axq_depth; 2900 dst->axq_aggr_depth += src->axq_aggr_depth; 2901 src->axq_depth = 0; 2902 src->axq_aggr_depth = 0; 2903} 2904 2905/* 2906 * Transmit a beacon frame at SWBA. Dynamic updates to the 2907 * frame contents are done as needed and the slot time is 2908 * also adjusted based on current state. 2909 */ 2910static void 2911ath_beacon_proc(void *arg, int pending) 2912{ 2913 struct ath_softc *sc = arg; 2914 struct ath_hal *ah = sc->sc_ah; 2915 struct ieee80211vap *vap; 2916 struct ath_buf *bf; 2917 int slot, otherant; 2918 uint32_t bfaddr; 2919 2920 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2921 __func__, pending); 2922 /* 2923 * Check if the previous beacon has gone out. If 2924 * not don't try to post another, skip this period 2925 * and wait for the next. Missed beacons indicate 2926 * a problem and should not occur. If we miss too 2927 * many consecutive beacons reset the device. 2928 */ 2929 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2930 sc->sc_bmisscount++; 2931 sc->sc_stats.ast_be_missed++; 2932 DPRINTF(sc, ATH_DEBUG_BEACON, 2933 "%s: missed %u consecutive beacons\n", 2934 __func__, sc->sc_bmisscount); 2935 if (sc->sc_bmisscount >= ath_bstuck_threshold) 2936 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 2937 return; 2938 } 2939 if (sc->sc_bmisscount != 0) { 2940 DPRINTF(sc, ATH_DEBUG_BEACON, 2941 "%s: resume beacon xmit after %u misses\n", 2942 __func__, sc->sc_bmisscount); 2943 sc->sc_bmisscount = 0; 2944 } 2945 2946 if (sc->sc_stagbeacons) { /* staggered beacons */ 2947 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2948 uint32_t tsftu; 2949 2950 tsftu = ath_hal_gettsf32(ah) >> 10; 2951 /* XXX lintval */ 2952 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; 2953 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; 2954 bfaddr = 0; 2955 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2956 bf = ath_beacon_generate(sc, vap); 2957 if (bf != NULL) 2958 bfaddr = bf->bf_daddr; 2959 } 2960 } else { /* burst'd beacons */ 2961 uint32_t *bflink = &bfaddr; 2962 2963 for (slot = 0; slot < ATH_BCBUF; slot++) { 2964 vap = sc->sc_bslot[slot]; 2965 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2966 bf = ath_beacon_generate(sc, vap); 2967 if (bf != NULL) { 2968 *bflink = bf->bf_daddr; 2969 bflink = &bf->bf_desc->ds_link; 2970 } 2971 } 2972 } 2973 *bflink = 0; /* terminate list */ 2974 } 2975 2976 /* 2977 * Handle slot time change when a non-ERP station joins/leaves 2978 * an 11g network. The 802.11 layer notifies us via callback, 2979 * we mark updateslot, then wait one beacon before effecting 2980 * the change. This gives associated stations at least one 2981 * beacon interval to note the state change. 2982 */ 2983 /* XXX locking */ 2984 if (sc->sc_updateslot == UPDATE) { 2985 sc->sc_updateslot = COMMIT; /* commit next beacon */ 2986 sc->sc_slotupdate = slot; 2987 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 2988 ath_setslottime(sc); /* commit change to h/w */ 2989 2990 /* 2991 * Check recent per-antenna transmit statistics and flip 2992 * the default antenna if noticeably more frames went out 2993 * on the non-default antenna. 2994 * XXX assumes 2 anntenae 2995 */ 2996 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { 2997 otherant = sc->sc_defant & 1 ? 2 : 1; 2998 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 2999 ath_setdefantenna(sc, otherant); 3000 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 3001 } 3002 3003 if (bfaddr != 0) { 3004 /* 3005 * Stop any current dma and put the new frame on the queue. 3006 * This should never fail since we check above that no frames 3007 * are still pending on the queue. 3008 */ 3009 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 3010 DPRINTF(sc, ATH_DEBUG_ANY, 3011 "%s: beacon queue %u did not stop?\n", 3012 __func__, sc->sc_bhalq); 3013 } 3014 /* NB: cabq traffic should already be queued and primed */ 3015 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); 3016 ath_hal_txstart(ah, sc->sc_bhalq); 3017 3018 sc->sc_stats.ast_be_xmit++; 3019 } 3020} 3021 3022static struct ath_buf * 3023ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) 3024{ 3025 struct ath_vap *avp = ATH_VAP(vap); 3026 struct ath_txq *cabq = sc->sc_cabq; 3027 struct ath_buf *bf; 3028 struct mbuf *m; 3029 int nmcastq, error; 3030 3031 KASSERT(vap->iv_state >= IEEE80211_S_RUN, 3032 ("not running, state %d", vap->iv_state)); 3033 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3034 3035 /* 3036 * Update dynamic beacon contents. If this returns 3037 * non-zero then we need to remap the memory because 3038 * the beacon frame changed size (probably because 3039 * of the TIM bitmap). 3040 */ 3041 bf = avp->av_bcbuf; 3042 m = bf->bf_m; 3043 nmcastq = avp->av_mcastq.axq_depth; 3044 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { 3045 /* XXX too conservative? */ 3046 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3047 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3048 bf->bf_segs, &bf->bf_nseg, 3049 BUS_DMA_NOWAIT); 3050 if (error != 0) { 3051 if_printf(vap->iv_ifp, 3052 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3053 __func__, error); 3054 return NULL; 3055 } 3056 } 3057 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { 3058 DPRINTF(sc, ATH_DEBUG_BEACON, 3059 "%s: cabq did not drain, mcastq %u cabq %u\n", 3060 __func__, nmcastq, cabq->axq_depth); 3061 sc->sc_stats.ast_cabq_busy++; 3062 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { 3063 /* 3064 * CABQ traffic from a previous vap is still pending. 3065 * We must drain the q before this beacon frame goes 3066 * out as otherwise this vap's stations will get cab 3067 * frames from a different vap. 3068 * XXX could be slow causing us to miss DBA 3069 */ 3070 ath_tx_draintxq(sc, cabq); 3071 } 3072 } 3073 ath_beacon_setup(sc, bf); 3074 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3075 3076 /* 3077 * Enable the CAB queue before the beacon queue to 3078 * insure cab frames are triggered by this beacon. 3079 */ 3080 if (avp->av_boff.bo_tim[4] & 1) { 3081 struct ath_hal *ah = sc->sc_ah; 3082 3083 /* NB: only at DTIM */ 3084 ATH_TXQ_LOCK(cabq); 3085 ATH_TXQ_LOCK(&avp->av_mcastq); 3086 if (nmcastq) { 3087 struct ath_buf *bfm; 3088 3089 /* 3090 * Move frames from the s/w mcast q to the h/w cab q. 3091 * XXX MORE_DATA bit 3092 */ 3093 bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q); 3094 if (cabq->axq_link != NULL) { 3095 *cabq->axq_link = bfm->bf_daddr; 3096 } else 3097 ath_hal_puttxbuf(ah, cabq->axq_qnum, 3098 bfm->bf_daddr); 3099 ath_txqmove(cabq, &avp->av_mcastq); 3100 3101 sc->sc_stats.ast_cabq_xmit += nmcastq; 3102 } 3103 /* NB: gated by beacon so safe to start here */ 3104 if (! TAILQ_EMPTY(&(cabq->axq_q))) 3105 ath_hal_txstart(ah, cabq->axq_qnum); 3106 ATH_TXQ_UNLOCK(&avp->av_mcastq); 3107 ATH_TXQ_UNLOCK(cabq); 3108 } 3109 return bf; 3110} 3111 3112static void 3113ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) 3114{ 3115 struct ath_vap *avp = ATH_VAP(vap); 3116 struct ath_hal *ah = sc->sc_ah; 3117 struct ath_buf *bf; 3118 struct mbuf *m; 3119 int error; 3120 3121 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3122 3123 /* 3124 * Update dynamic beacon contents. If this returns 3125 * non-zero then we need to remap the memory because 3126 * the beacon frame changed size (probably because 3127 * of the TIM bitmap). 3128 */ 3129 bf = avp->av_bcbuf; 3130 m = bf->bf_m; 3131 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { 3132 /* XXX too conservative? */ 3133 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3134 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3135 bf->bf_segs, &bf->bf_nseg, 3136 BUS_DMA_NOWAIT); 3137 if (error != 0) { 3138 if_printf(vap->iv_ifp, 3139 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3140 __func__, error); 3141 return; 3142 } 3143 } 3144 ath_beacon_setup(sc, bf); 3145 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3146 3147 /* NB: caller is known to have already stopped tx dma */ 3148 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 3149 ath_hal_txstart(ah, sc->sc_bhalq); 3150} 3151 3152/* 3153 * Reset the hardware after detecting beacons have stopped. 3154 */ 3155static void 3156ath_bstuck_proc(void *arg, int pending) 3157{ 3158 struct ath_softc *sc = arg; 3159 struct ifnet *ifp = sc->sc_ifp; 3160 uint32_t hangs = 0; 3161 3162 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3163 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3164 3165 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3166 sc->sc_bmisscount); 3167 sc->sc_stats.ast_bstuck++; 3168 /* 3169 * This assumes that there's no simultaneous channel mode change 3170 * occuring. 3171 */ 3172 ath_reset(ifp, ATH_RESET_NOLOSS); 3173} 3174 3175/* 3176 * Reclaim beacon resources and return buffer to the pool. 3177 */ 3178static void 3179ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) 3180{ 3181 3182 DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 3183 __func__, bf, bf->bf_m, bf->bf_node); 3184 if (bf->bf_m != NULL) { 3185 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3186 m_freem(bf->bf_m); 3187 bf->bf_m = NULL; 3188 } 3189 if (bf->bf_node != NULL) { 3190 ieee80211_free_node(bf->bf_node); 3191 bf->bf_node = NULL; 3192 } 3193 TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); 3194} 3195 3196/* 3197 * Reclaim beacon resources. 3198 */ 3199static void 3200ath_beacon_free(struct ath_softc *sc) 3201{ 3202 struct ath_buf *bf; 3203 3204 TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 3205 DPRINTF(sc, ATH_DEBUG_NODE, 3206 "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 3207 __func__, bf, bf->bf_m, bf->bf_node); 3208 if (bf->bf_m != NULL) { 3209 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3210 m_freem(bf->bf_m); 3211 bf->bf_m = NULL; 3212 } 3213 if (bf->bf_node != NULL) { 3214 ieee80211_free_node(bf->bf_node); 3215 bf->bf_node = NULL; 3216 } 3217 } 3218} 3219 3220/* 3221 * Configure the beacon and sleep timers. 3222 * 3223 * When operating as an AP this resets the TSF and sets 3224 * up the hardware to notify us when we need to issue beacons. 3225 * 3226 * When operating in station mode this sets up the beacon 3227 * timers according to the timestamp of the last received 3228 * beacon and the current TSF, configures PCF and DTIM 3229 * handling, programs the sleep registers so the hardware 3230 * will wakeup in time to receive beacons, and configures 3231 * the beacon miss handling so we'll receive a BMISS 3232 * interrupt when we stop seeing beacons from the AP 3233 * we've associated with. 3234 */ 3235static void 3236ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) 3237{ 3238#define TSF_TO_TU(_h,_l) \ 3239 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 3240#define FUDGE 2 3241 struct ath_hal *ah = sc->sc_ah; 3242 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3243 struct ieee80211_node *ni; 3244 u_int32_t nexttbtt, intval, tsftu; 3245 u_int64_t tsf; 3246 3247 if (vap == NULL) 3248 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 3249 ni = ieee80211_ref_node(vap->iv_bss); 3250 3251 /* extract tstamp from last beacon and convert to TU */ 3252 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 3253 LE_READ_4(ni->ni_tstamp.data)); 3254 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3255 ic->ic_opmode == IEEE80211_M_MBSS) { 3256 /* 3257 * For multi-bss ap/mesh support beacons are either staggered 3258 * evenly over N slots or burst together. For the former 3259 * arrange for the SWBA to be delivered for each slot. 3260 * Slots that are not occupied will generate nothing. 3261 */ 3262 /* NB: the beacon interval is kept internally in TU's */ 3263 intval = ni->ni_intval & HAL_BEACON_PERIOD; 3264 if (sc->sc_stagbeacons) 3265 intval /= ATH_BCBUF; 3266 } else { 3267 /* NB: the beacon interval is kept internally in TU's */ 3268 intval = ni->ni_intval & HAL_BEACON_PERIOD; 3269 } 3270 if (nexttbtt == 0) /* e.g. for ap mode */ 3271 nexttbtt = intval; 3272 else if (intval) /* NB: can be 0 for monitor mode */ 3273 nexttbtt = roundup(nexttbtt, intval); 3274 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 3275 __func__, nexttbtt, intval, ni->ni_intval); 3276 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { 3277 HAL_BEACON_STATE bs; 3278 int dtimperiod, dtimcount; 3279 int cfpperiod, cfpcount; 3280 3281 /* 3282 * Setup dtim and cfp parameters according to 3283 * last beacon we received (which may be none). 3284 */ 3285 dtimperiod = ni->ni_dtim_period; 3286 if (dtimperiod <= 0) /* NB: 0 if not known */ 3287 dtimperiod = 1; 3288 dtimcount = ni->ni_dtim_count; 3289 if (dtimcount >= dtimperiod) /* NB: sanity check */ 3290 dtimcount = 0; /* XXX? */ 3291 cfpperiod = 1; /* NB: no PCF support yet */ 3292 cfpcount = 0; 3293 /* 3294 * Pull nexttbtt forward to reflect the current 3295 * TSF and calculate dtim+cfp state for the result. 3296 */ 3297 tsf = ath_hal_gettsf64(ah); 3298 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 3299 do { 3300 nexttbtt += intval; 3301 if (--dtimcount < 0) { 3302 dtimcount = dtimperiod - 1; 3303 if (--cfpcount < 0) 3304 cfpcount = cfpperiod - 1; 3305 } 3306 } while (nexttbtt < tsftu); 3307 memset(&bs, 0, sizeof(bs)); 3308 bs.bs_intval = intval; 3309 bs.bs_nexttbtt = nexttbtt; 3310 bs.bs_dtimperiod = dtimperiod*intval; 3311 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 3312 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 3313 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 3314 bs.bs_cfpmaxduration = 0; 3315#if 0 3316 /* 3317 * The 802.11 layer records the offset to the DTIM 3318 * bitmap while receiving beacons; use it here to 3319 * enable h/w detection of our AID being marked in 3320 * the bitmap vector (to indicate frames for us are 3321 * pending at the AP). 3322 * XXX do DTIM handling in s/w to WAR old h/w bugs 3323 * XXX enable based on h/w rev for newer chips 3324 */ 3325 bs.bs_timoffset = ni->ni_timoff; 3326#endif 3327 /* 3328 * Calculate the number of consecutive beacons to miss 3329 * before taking a BMISS interrupt. 3330 * Note that we clamp the result to at most 10 beacons. 3331 */ 3332 bs.bs_bmissthreshold = vap->iv_bmissthreshold; 3333 if (bs.bs_bmissthreshold > 10) 3334 bs.bs_bmissthreshold = 10; 3335 else if (bs.bs_bmissthreshold <= 0) 3336 bs.bs_bmissthreshold = 1; 3337 3338 /* 3339 * Calculate sleep duration. The configuration is 3340 * given in ms. We insure a multiple of the beacon 3341 * period is used. Also, if the sleep duration is 3342 * greater than the DTIM period then it makes senses 3343 * to make it a multiple of that. 3344 * 3345 * XXX fixed at 100ms 3346 */ 3347 bs.bs_sleepduration = 3348 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 3349 if (bs.bs_sleepduration > bs.bs_dtimperiod) 3350 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 3351 3352 DPRINTF(sc, ATH_DEBUG_BEACON, 3353 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 3354 , __func__ 3355 , tsf, tsftu 3356 , bs.bs_intval 3357 , bs.bs_nexttbtt 3358 , bs.bs_dtimperiod 3359 , bs.bs_nextdtim 3360 , bs.bs_bmissthreshold 3361 , bs.bs_sleepduration 3362 , bs.bs_cfpperiod 3363 , bs.bs_cfpmaxduration 3364 , bs.bs_cfpnext 3365 , bs.bs_timoffset 3366 ); 3367 ath_hal_intrset(ah, 0); 3368 ath_hal_beacontimers(ah, &bs); 3369 sc->sc_imask |= HAL_INT_BMISS; 3370 ath_hal_intrset(ah, sc->sc_imask); 3371 } else { 3372 ath_hal_intrset(ah, 0); 3373 if (nexttbtt == intval) 3374 intval |= HAL_BEACON_RESET_TSF; 3375 if (ic->ic_opmode == IEEE80211_M_IBSS) { 3376 /* 3377 * In IBSS mode enable the beacon timers but only 3378 * enable SWBA interrupts if we need to manually 3379 * prepare beacon frames. Otherwise we use a 3380 * self-linked tx descriptor and let the hardware 3381 * deal with things. 3382 */ 3383 intval |= HAL_BEACON_ENA; 3384 if (!sc->sc_hasveol) 3385 sc->sc_imask |= HAL_INT_SWBA; 3386 if ((intval & HAL_BEACON_RESET_TSF) == 0) { 3387 /* 3388 * Pull nexttbtt forward to reflect 3389 * the current TSF. 3390 */ 3391 tsf = ath_hal_gettsf64(ah); 3392 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 3393 do { 3394 nexttbtt += intval; 3395 } while (nexttbtt < tsftu); 3396 } 3397 ath_beaconq_config(sc); 3398 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3399 ic->ic_opmode == IEEE80211_M_MBSS) { 3400 /* 3401 * In AP/mesh mode we enable the beacon timers 3402 * and SWBA interrupts to prepare beacon frames. 3403 */ 3404 intval |= HAL_BEACON_ENA; 3405 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 3406 ath_beaconq_config(sc); 3407 } 3408 ath_hal_beaconinit(ah, nexttbtt, intval); 3409 sc->sc_bmisscount = 0; 3410 ath_hal_intrset(ah, sc->sc_imask); 3411 /* 3412 * When using a self-linked beacon descriptor in 3413 * ibss mode load it once here. 3414 */ 3415 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 3416 ath_beacon_start_adhoc(sc, vap); 3417 } 3418 sc->sc_syncbeacon = 0; 3419 ieee80211_free_node(ni); 3420#undef FUDGE 3421#undef TSF_TO_TU 3422} 3423 3424static void 3425ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3426{ 3427 bus_addr_t *paddr = (bus_addr_t*) arg; 3428 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 3429 *paddr = segs->ds_addr; 3430} 3431 3432static int 3433ath_descdma_setup(struct ath_softc *sc, 3434 struct ath_descdma *dd, ath_bufhead *head, 3435 const char *name, int nbuf, int ndesc) 3436{ 3437#define DS2PHYS(_dd, _ds) \ 3438 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3439#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3440 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3441 struct ifnet *ifp = sc->sc_ifp; 3442 uint8_t *ds; 3443 struct ath_buf *bf; 3444 int i, bsize, error; 3445 int desc_len; 3446 3447 desc_len = sizeof(struct ath_desc); 3448 3449 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 3450 __func__, name, nbuf, ndesc); 3451 3452 dd->dd_name = name; 3453 dd->dd_desc_len = desc_len * nbuf * ndesc; 3454 3455 /* 3456 * Merlin work-around: 3457 * Descriptors that cross the 4KB boundary can't be used. 3458 * Assume one skipped descriptor per 4KB page. 3459 */ 3460 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3461 int numdescpage = 4096 / (desc_len * ndesc); 3462 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 3463 } 3464 3465 /* 3466 * Setup DMA descriptor area. 3467 */ 3468 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3469 PAGE_SIZE, 0, /* alignment, bounds */ 3470 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3471 BUS_SPACE_MAXADDR, /* highaddr */ 3472 NULL, NULL, /* filter, filterarg */ 3473 dd->dd_desc_len, /* maxsize */ 3474 1, /* nsegments */ 3475 dd->dd_desc_len, /* maxsegsize */ 3476 BUS_DMA_ALLOCNOW, /* flags */ 3477 NULL, /* lockfunc */ 3478 NULL, /* lockarg */ 3479 &dd->dd_dmat); 3480 if (error != 0) { 3481 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3482 return error; 3483 } 3484 3485 /* allocate descriptors */ 3486 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 3487 if (error != 0) { 3488 if_printf(ifp, "unable to create dmamap for %s descriptors, " 3489 "error %u\n", dd->dd_name, error); 3490 goto fail0; 3491 } 3492 3493 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 3494 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 3495 &dd->dd_dmamap); 3496 if (error != 0) { 3497 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3498 "error %u\n", nbuf * ndesc, dd->dd_name, error); 3499 goto fail1; 3500 } 3501 3502 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3503 dd->dd_desc, dd->dd_desc_len, 3504 ath_load_cb, &dd->dd_desc_paddr, 3505 BUS_DMA_NOWAIT); 3506 if (error != 0) { 3507 if_printf(ifp, "unable to map %s descriptors, error %u\n", 3508 dd->dd_name, error); 3509 goto fail2; 3510 } 3511 3512 ds = (uint8_t *) dd->dd_desc; 3513 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3514 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 3515 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 3516 3517 /* allocate rx buffers */ 3518 bsize = sizeof(struct ath_buf) * nbuf; 3519 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3520 if (bf == NULL) { 3521 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3522 dd->dd_name, bsize); 3523 goto fail3; 3524 } 3525 dd->dd_bufptr = bf; 3526 3527 TAILQ_INIT(head); 3528 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 3529 bf->bf_desc = (struct ath_desc *) ds; 3530 bf->bf_daddr = DS2PHYS(dd, ds); 3531 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3532 /* 3533 * Merlin WAR: Skip descriptor addresses which 3534 * cause 4KB boundary crossing along any point 3535 * in the descriptor. 3536 */ 3537 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 3538 desc_len * ndesc)) { 3539 /* Start at the next page */ 3540 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 3541 bf->bf_desc = (struct ath_desc *) ds; 3542 bf->bf_daddr = DS2PHYS(dd, ds); 3543 } 3544 } 3545 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3546 &bf->bf_dmamap); 3547 if (error != 0) { 3548 if_printf(ifp, "unable to create dmamap for %s " 3549 "buffer %u, error %u\n", dd->dd_name, i, error); 3550 ath_descdma_cleanup(sc, dd, head); 3551 return error; 3552 } 3553 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 3554 TAILQ_INSERT_TAIL(head, bf, bf_list); 3555 } 3556 return 0; 3557fail3: 3558 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3559fail2: 3560 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3561fail1: 3562 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3563fail0: 3564 bus_dma_tag_destroy(dd->dd_dmat); 3565 memset(dd, 0, sizeof(*dd)); 3566 return error; 3567#undef DS2PHYS 3568#undef ATH_DESC_4KB_BOUND_CHECK 3569} 3570 3571static void 3572ath_descdma_cleanup(struct ath_softc *sc, 3573 struct ath_descdma *dd, ath_bufhead *head) 3574{ 3575 struct ath_buf *bf; 3576 struct ieee80211_node *ni; 3577 3578 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3579 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3580 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3581 bus_dma_tag_destroy(dd->dd_dmat); 3582 3583 TAILQ_FOREACH(bf, head, bf_list) { 3584 if (bf->bf_m) { 3585 m_freem(bf->bf_m); 3586 bf->bf_m = NULL; 3587 } 3588 if (bf->bf_dmamap != NULL) { 3589 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3590 bf->bf_dmamap = NULL; 3591 } 3592 ni = bf->bf_node; 3593 bf->bf_node = NULL; 3594 if (ni != NULL) { 3595 /* 3596 * Reclaim node reference. 3597 */ 3598 ieee80211_free_node(ni); 3599 } 3600 } 3601 3602 TAILQ_INIT(head); 3603 free(dd->dd_bufptr, M_ATHDEV); 3604 memset(dd, 0, sizeof(*dd)); 3605} 3606 3607static int 3608ath_desc_alloc(struct ath_softc *sc) 3609{ 3610 int error; 3611 3612 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 3613 "rx", ath_rxbuf, 1); 3614 if (error != 0) 3615 return error; 3616 3617 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3618 "tx", ath_txbuf, ATH_TXDESC); 3619 if (error != 0) { 3620 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3621 return error; 3622 } 3623 3624 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3625 "beacon", ATH_BCBUF, 1); 3626 if (error != 0) { 3627 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3628 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3629 return error; 3630 } 3631 return 0; 3632} 3633 3634static void 3635ath_desc_free(struct ath_softc *sc) 3636{ 3637 3638 if (sc->sc_bdma.dd_desc_len != 0) 3639 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3640 if (sc->sc_txdma.dd_desc_len != 0) 3641 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3642 if (sc->sc_rxdma.dd_desc_len != 0) 3643 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3644} 3645 3646static struct ieee80211_node * 3647ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3648{ 3649 struct ieee80211com *ic = vap->iv_ic; 3650 struct ath_softc *sc = ic->ic_ifp->if_softc; 3651 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3652 struct ath_node *an; 3653 3654 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3655 if (an == NULL) { 3656 /* XXX stat+msg */ 3657 return NULL; 3658 } 3659 ath_rate_node_init(sc, an); 3660 3661 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3662 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3663 device_get_nameunit(sc->sc_dev), an); 3664 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3665 3666 /* XXX setup ath_tid */ 3667 ath_tx_tid_init(sc, an); 3668 3669 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3670 return &an->an_node; 3671} 3672 3673static void 3674ath_node_cleanup(struct ieee80211_node *ni) 3675{ 3676 struct ieee80211com *ic = ni->ni_ic; 3677 struct ath_softc *sc = ic->ic_ifp->if_softc; 3678 3679 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3680 ath_tx_node_flush(sc, ATH_NODE(ni)); 3681 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3682 sc->sc_node_cleanup(ni); 3683} 3684 3685static void 3686ath_node_free(struct ieee80211_node *ni) 3687{ 3688 struct ieee80211com *ic = ni->ni_ic; 3689 struct ath_softc *sc = ic->ic_ifp->if_softc; 3690 3691 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 3692 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3693 sc->sc_node_free(ni); 3694} 3695 3696static void 3697ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3698{ 3699 struct ieee80211com *ic = ni->ni_ic; 3700 struct ath_softc *sc = ic->ic_ifp->if_softc; 3701 struct ath_hal *ah = sc->sc_ah; 3702 3703 *rssi = ic->ic_node_getrssi(ni); 3704 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3705 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3706 else 3707 *noise = -95; /* nominally correct */ 3708} 3709 3710static int 3711ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 3712{ 3713 struct ath_hal *ah = sc->sc_ah; 3714 int error; 3715 struct mbuf *m; 3716 struct ath_desc *ds; 3717 3718 m = bf->bf_m; 3719 if (m == NULL) { 3720 /* 3721 * NB: by assigning a page to the rx dma buffer we 3722 * implicitly satisfy the Atheros requirement that 3723 * this buffer be cache-line-aligned and sized to be 3724 * multiple of the cache line size. Not doing this 3725 * causes weird stuff to happen (for the 5210 at least). 3726 */ 3727 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3728 if (m == NULL) { 3729 DPRINTF(sc, ATH_DEBUG_ANY, 3730 "%s: no mbuf/cluster\n", __func__); 3731 sc->sc_stats.ast_rx_nombuf++; 3732 return ENOMEM; 3733 } 3734 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 3735 3736 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 3737 bf->bf_dmamap, m, 3738 bf->bf_segs, &bf->bf_nseg, 3739 BUS_DMA_NOWAIT); 3740 if (error != 0) { 3741 DPRINTF(sc, ATH_DEBUG_ANY, 3742 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 3743 __func__, error); 3744 sc->sc_stats.ast_rx_busdma++; 3745 m_freem(m); 3746 return error; 3747 } 3748 KASSERT(bf->bf_nseg == 1, 3749 ("multi-segment packet; nseg %u", bf->bf_nseg)); 3750 bf->bf_m = m; 3751 } 3752 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 3753 3754 /* 3755 * Setup descriptors. For receive we always terminate 3756 * the descriptor list with a self-linked entry so we'll 3757 * not get overrun under high load (as can happen with a 3758 * 5212 when ANI processing enables PHY error frames). 3759 * 3760 * To insure the last descriptor is self-linked we create 3761 * each descriptor as self-linked and add it to the end. As 3762 * each additional descriptor is added the previous self-linked 3763 * entry is ``fixed'' naturally. This should be safe even 3764 * if DMA is happening. When processing RX interrupts we 3765 * never remove/process the last, self-linked, entry on the 3766 * descriptor list. This insures the hardware always has 3767 * someplace to write a new frame. 3768 */ 3769 /* 3770 * 11N: we can no longer afford to self link the last descriptor. 3771 * MAC acknowledges BA status as long as it copies frames to host 3772 * buffer (or rx fifo). This can incorrectly acknowledge packets 3773 * to a sender if last desc is self-linked. 3774 */ 3775 ds = bf->bf_desc; 3776 if (sc->sc_rxslink) 3777 ds->ds_link = bf->bf_daddr; /* link to self */ 3778 else 3779 ds->ds_link = 0; /* terminate the list */ 3780 ds->ds_data = bf->bf_segs[0].ds_addr; 3781 ath_hal_setuprxdesc(ah, ds 3782 , m->m_len /* buffer size */ 3783 , 0 3784 ); 3785 3786 if (sc->sc_rxlink != NULL) 3787 *sc->sc_rxlink = bf->bf_daddr; 3788 sc->sc_rxlink = &ds->ds_link; 3789 return 0; 3790} 3791 3792/* 3793 * Extend 15-bit time stamp from rx descriptor to 3794 * a full 64-bit TSF using the specified TSF. 3795 */ 3796static __inline u_int64_t 3797ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf) 3798{ 3799 if ((tsf & 0x7fff) < rstamp) 3800 tsf -= 0x8000; 3801 3802 return ((tsf &~ 0x7fff) | rstamp); 3803} 3804 3805/* 3806 * Extend 32-bit time stamp from rx descriptor to 3807 * a full 64-bit TSF using the specified TSF. 3808 */ 3809static __inline u_int64_t 3810ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf) 3811{ 3812 u_int32_t tsf_low = tsf & 0xffffffff; 3813 u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp; 3814 3815 if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000)) 3816 tsf64 -= 0x100000000ULL; 3817 3818 if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000)) 3819 tsf64 += 0x100000000ULL; 3820 3821 return tsf64; 3822} 3823 3824/* 3825 * Extend the TSF from the RX descriptor to a full 64 bit TSF. 3826 * Earlier hardware versions only wrote the low 15 bits of the 3827 * TSF into the RX descriptor; later versions (AR5416 and up) 3828 * include the 32 bit TSF value. 3829 */ 3830static __inline u_int64_t 3831ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf) 3832{ 3833 if (sc->sc_rxtsf32) 3834 return ath_extend_tsf32(rstamp, tsf); 3835 else 3836 return ath_extend_tsf15(rstamp, tsf); 3837} 3838 3839/* 3840 * Intercept management frames to collect beacon rssi data 3841 * and to do ibss merges. 3842 */ 3843static void 3844ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 3845 int subtype, int rssi, int nf) 3846{ 3847 struct ieee80211vap *vap = ni->ni_vap; 3848 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 3849 3850 /* 3851 * Call up first so subsequent work can use information 3852 * potentially stored in the node (e.g. for ibss merge). 3853 */ 3854 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); 3855 switch (subtype) { 3856 case IEEE80211_FC0_SUBTYPE_BEACON: 3857 /* update rssi statistics for use by the hal */ 3858 /* XXX unlocked check against vap->iv_bss? */ 3859 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 3860 if (sc->sc_syncbeacon && 3861 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 3862 /* 3863 * Resync beacon timers using the tsf of the beacon 3864 * frame we just received. 3865 */ 3866 ath_beacon_config(sc, vap); 3867 } 3868 /* fall thru... */ 3869 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3870 if (vap->iv_opmode == IEEE80211_M_IBSS && 3871 vap->iv_state == IEEE80211_S_RUN) { 3872 uint32_t rstamp = sc->sc_lastrs->rs_tstamp; 3873 uint64_t tsf = ath_extend_tsf(sc, rstamp, 3874 ath_hal_gettsf64(sc->sc_ah)); 3875 /* 3876 * Handle ibss merge as needed; check the tsf on the 3877 * frame before attempting the merge. The 802.11 spec 3878 * says the station should change it's bssid to match 3879 * the oldest station with the same ssid, where oldest 3880 * is determined by the tsf. Note that hardware 3881 * reconfiguration happens through callback to 3882 * ath_newstate as the state machine will go from 3883 * RUN -> RUN when this happens. 3884 */ 3885 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 3886 DPRINTF(sc, ATH_DEBUG_STATE, 3887 "ibss merge, rstamp %u tsf %ju " 3888 "tstamp %ju\n", rstamp, (uintmax_t)tsf, 3889 (uintmax_t)ni->ni_tstamp.tsf); 3890 (void) ieee80211_ibss_merge(ni); 3891 } 3892 } 3893 break; 3894 } 3895} 3896 3897/* 3898 * Set the default antenna. 3899 */ 3900static void 3901ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3902{ 3903 struct ath_hal *ah = sc->sc_ah; 3904 3905 /* XXX block beacon interrupts */ 3906 ath_hal_setdefantenna(ah, antenna); 3907 if (sc->sc_defant != antenna) 3908 sc->sc_stats.ast_ant_defswitch++; 3909 sc->sc_defant = antenna; 3910 sc->sc_rxotherant = 0; 3911} 3912 3913static void 3914ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 3915 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 3916{ 3917#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 3918#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 3919#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 3920#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) 3921 struct ath_softc *sc = ifp->if_softc; 3922 const HAL_RATE_TABLE *rt; 3923 uint8_t rix; 3924 3925 rt = sc->sc_currates; 3926 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 3927 rix = rt->rateCodeToIndex[rs->rs_rate]; 3928 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 3929 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 3930#ifdef AH_SUPPORT_AR5416 3931 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 3932 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ 3933 struct ieee80211com *ic = ifp->if_l2com; 3934 3935 if ((rs->rs_flags & HAL_RX_2040) == 0) 3936 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 3937 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) 3938 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 3939 else 3940 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 3941 if ((rs->rs_flags & HAL_RX_GI) == 0) 3942 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 3943 } 3944#endif 3945 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); 3946 if (rs->rs_status & HAL_RXERR_CRC) 3947 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 3948 /* XXX propagate other error flags from descriptor */ 3949 sc->sc_rx_th.wr_antnoise = nf; 3950 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; 3951 sc->sc_rx_th.wr_antenna = rs->rs_antenna; 3952#undef CHAN_HT 3953#undef CHAN_HT20 3954#undef CHAN_HT40U 3955#undef CHAN_HT40D 3956} 3957 3958static void 3959ath_handle_micerror(struct ieee80211com *ic, 3960 struct ieee80211_frame *wh, int keyix) 3961{ 3962 struct ieee80211_node *ni; 3963 3964 /* XXX recheck MIC to deal w/ chips that lie */ 3965 /* XXX discard MIC errors on !data frames */ 3966 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 3967 if (ni != NULL) { 3968 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 3969 ieee80211_free_node(ni); 3970 } 3971} 3972 3973/* 3974 * Only run the RX proc if it's not already running. 3975 * Since this may get run as part of the reset/flush path, 3976 * the task can't clash with an existing, running tasklet. 3977 */ 3978static void 3979ath_rx_tasklet(void *arg, int npending) 3980{ 3981 struct ath_softc *sc = arg; 3982 3983 CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending); 3984 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 3985 ATH_PCU_LOCK(sc); 3986 if (sc->sc_inreset_cnt > 0) { 3987 device_printf(sc->sc_dev, 3988 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 3989 ATH_PCU_UNLOCK(sc); 3990 return; 3991 } 3992 ATH_PCU_UNLOCK(sc); 3993 ath_rx_proc(sc, 1); 3994} 3995 3996static void 3997ath_rx_proc(struct ath_softc *sc, int resched) 3998{ 3999#define PA2DESC(_sc, _pa) \ 4000 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 4001 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 4002 struct ath_buf *bf; 4003 struct ifnet *ifp = sc->sc_ifp; 4004 struct ieee80211com *ic = ifp->if_l2com; 4005 struct ath_hal *ah = sc->sc_ah; 4006 struct ath_desc *ds; 4007 struct ath_rx_status *rs; 4008 struct mbuf *m; 4009 struct ieee80211_node *ni; 4010 int len, type, ngood; 4011 HAL_STATUS status; 4012 int16_t nf; 4013 u_int64_t tsf, rstamp; 4014 int npkts = 0; 4015 4016 /* XXX we must not hold the ATH_LOCK here */ 4017 ATH_UNLOCK_ASSERT(sc); 4018 ATH_PCU_UNLOCK_ASSERT(sc); 4019 4020 ATH_PCU_LOCK(sc); 4021 sc->sc_rxproc_cnt++; 4022 ATH_PCU_UNLOCK(sc); 4023 4024 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); 4025 ngood = 0; 4026 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 4027 sc->sc_stats.ast_rx_noise = nf; 4028 tsf = ath_hal_gettsf64(ah); 4029 do { 4030 bf = TAILQ_FIRST(&sc->sc_rxbuf); 4031 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ 4032 if_printf(ifp, "%s: no buffer!\n", __func__); 4033 break; 4034 } else if (bf == NULL) { 4035 /* 4036 * End of List: 4037 * this can happen for non-self-linked RX chains 4038 */ 4039 sc->sc_stats.ast_rx_hitqueueend++; 4040 break; 4041 } 4042 m = bf->bf_m; 4043 if (m == NULL) { /* NB: shouldn't happen */ 4044 /* 4045 * If mbuf allocation failed previously there 4046 * will be no mbuf; try again to re-populate it. 4047 */ 4048 /* XXX make debug msg */ 4049 if_printf(ifp, "%s: no mbuf!\n", __func__); 4050 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4051 goto rx_next; 4052 } 4053 ds = bf->bf_desc; 4054 if (ds->ds_link == bf->bf_daddr) { 4055 /* NB: never process the self-linked entry at the end */ 4056 sc->sc_stats.ast_rx_hitqueueend++; 4057 break; 4058 } 4059 /* XXX sync descriptor memory */ 4060 /* 4061 * Must provide the virtual address of the current 4062 * descriptor, the physical address, and the virtual 4063 * address of the next descriptor in the h/w chain. 4064 * This allows the HAL to look ahead to see if the 4065 * hardware is done with a descriptor by checking the 4066 * done bit in the following descriptor and the address 4067 * of the current descriptor the DMA engine is working 4068 * on. All this is necessary because of our use of 4069 * a self-linked list to avoid rx overruns. 4070 */ 4071 rs = &bf->bf_status.ds_rxstat; 4072 status = ath_hal_rxprocdesc(ah, ds, 4073 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4074#ifdef ATH_DEBUG 4075 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 4076 ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4077#endif 4078 if (status == HAL_EINPROGRESS) 4079 break; 4080 4081 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4082 npkts++; 4083 4084 /* 4085 * Calculate the correct 64 bit TSF given 4086 * the TSF64 register value and rs_tstamp. 4087 */ 4088 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 4089 4090 /* These aren't specifically errors */ 4091#ifdef AH_SUPPORT_AR5416 4092 if (rs->rs_flags & HAL_RX_GI) 4093 sc->sc_stats.ast_rx_halfgi++; 4094 if (rs->rs_flags & HAL_RX_2040) 4095 sc->sc_stats.ast_rx_2040++; 4096 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) 4097 sc->sc_stats.ast_rx_pre_crc_err++; 4098 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) 4099 sc->sc_stats.ast_rx_post_crc_err++; 4100 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) 4101 sc->sc_stats.ast_rx_decrypt_busy_err++; 4102 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) 4103 sc->sc_stats.ast_rx_hi_rx_chain++; 4104#endif /* AH_SUPPORT_AR5416 */ 4105 4106 if (rs->rs_status != 0) { 4107 if (rs->rs_status & HAL_RXERR_CRC) 4108 sc->sc_stats.ast_rx_crcerr++; 4109 if (rs->rs_status & HAL_RXERR_FIFO) 4110 sc->sc_stats.ast_rx_fifoerr++; 4111 if (rs->rs_status & HAL_RXERR_PHY) { 4112 sc->sc_stats.ast_rx_phyerr++; 4113 /* Process DFS radar events */ 4114 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || 4115 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { 4116 /* Since we're touching the frame data, sync it */ 4117 bus_dmamap_sync(sc->sc_dmat, 4118 bf->bf_dmamap, 4119 BUS_DMASYNC_POSTREAD); 4120 /* Now pass it to the radar processing code */ 4121 ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs); 4122 } 4123 4124 /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ 4125 if (rs->rs_phyerr < 64) 4126 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; 4127 goto rx_error; /* NB: don't count in ierrors */ 4128 } 4129 if (rs->rs_status & HAL_RXERR_DECRYPT) { 4130 /* 4131 * Decrypt error. If the error occurred 4132 * because there was no hardware key, then 4133 * let the frame through so the upper layers 4134 * can process it. This is necessary for 5210 4135 * parts which have no way to setup a ``clear'' 4136 * key cache entry. 4137 * 4138 * XXX do key cache faulting 4139 */ 4140 if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 4141 goto rx_accept; 4142 sc->sc_stats.ast_rx_badcrypt++; 4143 } 4144 if (rs->rs_status & HAL_RXERR_MIC) { 4145 sc->sc_stats.ast_rx_badmic++; 4146 /* 4147 * Do minimal work required to hand off 4148 * the 802.11 header for notification. 4149 */ 4150 /* XXX frag's and qos frames */ 4151 len = rs->rs_datalen; 4152 if (len >= sizeof (struct ieee80211_frame)) { 4153 bus_dmamap_sync(sc->sc_dmat, 4154 bf->bf_dmamap, 4155 BUS_DMASYNC_POSTREAD); 4156 ath_handle_micerror(ic, 4157 mtod(m, struct ieee80211_frame *), 4158 sc->sc_splitmic ? 4159 rs->rs_keyix-32 : rs->rs_keyix); 4160 } 4161 } 4162 ifp->if_ierrors++; 4163rx_error: 4164 /* 4165 * Cleanup any pending partial frame. 4166 */ 4167 if (sc->sc_rxpending != NULL) { 4168 m_freem(sc->sc_rxpending); 4169 sc->sc_rxpending = NULL; 4170 } 4171 /* 4172 * When a tap is present pass error frames 4173 * that have been requested. By default we 4174 * pass decrypt+mic errors but others may be 4175 * interesting (e.g. crc). 4176 */ 4177 if (ieee80211_radiotap_active(ic) && 4178 (rs->rs_status & sc->sc_monpass)) { 4179 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4180 BUS_DMASYNC_POSTREAD); 4181 /* NB: bpf needs the mbuf length setup */ 4182 len = rs->rs_datalen; 4183 m->m_pkthdr.len = m->m_len = len; 4184 bf->bf_m = NULL; 4185 ath_rx_tap(ifp, m, rs, rstamp, nf); 4186 ieee80211_radiotap_rx_all(ic, m); 4187 m_freem(m); 4188 } 4189 /* XXX pass MIC errors up for s/w reclaculation */ 4190 goto rx_next; 4191 } 4192rx_accept: 4193 /* 4194 * Sync and unmap the frame. At this point we're 4195 * committed to passing the mbuf somewhere so clear 4196 * bf_m; this means a new mbuf must be allocated 4197 * when the rx descriptor is setup again to receive 4198 * another frame. 4199 */ 4200 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4201 BUS_DMASYNC_POSTREAD); 4202 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4203 bf->bf_m = NULL; 4204 4205 len = rs->rs_datalen; 4206 m->m_len = len; 4207 4208 if (rs->rs_more) { 4209 /* 4210 * Frame spans multiple descriptors; save 4211 * it for the next completed descriptor, it 4212 * will be used to construct a jumbogram. 4213 */ 4214 if (sc->sc_rxpending != NULL) { 4215 /* NB: max frame size is currently 2 clusters */ 4216 sc->sc_stats.ast_rx_toobig++; 4217 m_freem(sc->sc_rxpending); 4218 } 4219 m->m_pkthdr.rcvif = ifp; 4220 m->m_pkthdr.len = len; 4221 sc->sc_rxpending = m; 4222 goto rx_next; 4223 } else if (sc->sc_rxpending != NULL) { 4224 /* 4225 * This is the second part of a jumbogram, 4226 * chain it to the first mbuf, adjust the 4227 * frame length, and clear the rxpending state. 4228 */ 4229 sc->sc_rxpending->m_next = m; 4230 sc->sc_rxpending->m_pkthdr.len += len; 4231 m = sc->sc_rxpending; 4232 sc->sc_rxpending = NULL; 4233 } else { 4234 /* 4235 * Normal single-descriptor receive; setup 4236 * the rcvif and packet length. 4237 */ 4238 m->m_pkthdr.rcvif = ifp; 4239 m->m_pkthdr.len = len; 4240 } 4241 4242 /* 4243 * Validate rs->rs_antenna. 4244 * 4245 * Some users w/ AR9285 NICs have reported crashes 4246 * here because rs_antenna field is bogusly large. 4247 * Let's enforce the maximum antenna limit of 8 4248 * (and it shouldn't be hard coded, but that's a 4249 * separate problem) and if there's an issue, print 4250 * out an error and adjust rs_antenna to something 4251 * sensible. 4252 * 4253 * This code should be removed once the actual 4254 * root cause of the issue has been identified. 4255 * For example, it may be that the rs_antenna 4256 * field is only valid for the lsat frame of 4257 * an aggregate and it just happens that it is 4258 * "mostly" right. (This is a general statement - 4259 * the majority of the statistics are only valid 4260 * for the last frame in an aggregate. 4261 */ 4262 if (rs->rs_antenna > 7) { 4263 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", 4264 __func__, rs->rs_antenna); 4265#ifdef ATH_DEBUG 4266 ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4267#endif /* ATH_DEBUG */ 4268 rs->rs_antenna = 0; /* XXX better than nothing */ 4269 } 4270 4271 ifp->if_ipackets++; 4272 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 4273 4274 /* 4275 * Populate the rx status block. When there are bpf 4276 * listeners we do the additional work to provide 4277 * complete status. Otherwise we fill in only the 4278 * material required by ieee80211_input. Note that 4279 * noise setting is filled in above. 4280 */ 4281 if (ieee80211_radiotap_active(ic)) 4282 ath_rx_tap(ifp, m, rs, rstamp, nf); 4283 4284 /* 4285 * From this point on we assume the frame is at least 4286 * as large as ieee80211_frame_min; verify that. 4287 */ 4288 if (len < IEEE80211_MIN_LEN) { 4289 if (!ieee80211_radiotap_active(ic)) { 4290 DPRINTF(sc, ATH_DEBUG_RECV, 4291 "%s: short packet %d\n", __func__, len); 4292 sc->sc_stats.ast_rx_tooshort++; 4293 } else { 4294 /* NB: in particular this captures ack's */ 4295 ieee80211_radiotap_rx_all(ic, m); 4296 } 4297 m_freem(m); 4298 goto rx_next; 4299 } 4300 4301 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 4302 const HAL_RATE_TABLE *rt = sc->sc_currates; 4303 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; 4304 4305 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 4306 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); 4307 } 4308 4309 m_adj(m, -IEEE80211_CRC_LEN); 4310 4311 /* 4312 * Locate the node for sender, track state, and then 4313 * pass the (referenced) node up to the 802.11 layer 4314 * for its use. 4315 */ 4316 ni = ieee80211_find_rxnode_withkey(ic, 4317 mtod(m, const struct ieee80211_frame_min *), 4318 rs->rs_keyix == HAL_RXKEYIX_INVALID ? 4319 IEEE80211_KEYIX_NONE : rs->rs_keyix); 4320 sc->sc_lastrs = rs; 4321 4322#ifdef AH_SUPPORT_AR5416 4323 if (rs->rs_isaggr) 4324 sc->sc_stats.ast_rx_agg++; 4325#endif /* AH_SUPPORT_AR5416 */ 4326 4327 if (ni != NULL) { 4328 /* 4329 * Only punt packets for ampdu reorder processing for 4330 * 11n nodes; net80211 enforces that M_AMPDU is only 4331 * set for 11n nodes. 4332 */ 4333 if (ni->ni_flags & IEEE80211_NODE_HT) 4334 m->m_flags |= M_AMPDU; 4335 4336 /* 4337 * Sending station is known, dispatch directly. 4338 */ 4339 type = ieee80211_input(ni, m, rs->rs_rssi, nf); 4340 ieee80211_free_node(ni); 4341 /* 4342 * Arrange to update the last rx timestamp only for 4343 * frames from our ap when operating in station mode. 4344 * This assumes the rx key is always setup when 4345 * associated. 4346 */ 4347 if (ic->ic_opmode == IEEE80211_M_STA && 4348 rs->rs_keyix != HAL_RXKEYIX_INVALID) 4349 ngood++; 4350 } else { 4351 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf); 4352 } 4353 /* 4354 * Track rx rssi and do any rx antenna management. 4355 */ 4356 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 4357 if (sc->sc_diversity) { 4358 /* 4359 * When using fast diversity, change the default rx 4360 * antenna if diversity chooses the other antenna 3 4361 * times in a row. 4362 */ 4363 if (sc->sc_defant != rs->rs_antenna) { 4364 if (++sc->sc_rxotherant >= 3) 4365 ath_setdefantenna(sc, rs->rs_antenna); 4366 } else 4367 sc->sc_rxotherant = 0; 4368 } 4369 4370 /* Newer school diversity - kite specific for now */ 4371 /* XXX perhaps migrate the normal diversity code to this? */ 4372 if ((ah)->ah_rxAntCombDiversity) 4373 (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz); 4374 4375 if (sc->sc_softled) { 4376 /* 4377 * Blink for any data frame. Otherwise do a 4378 * heartbeat-style blink when idle. The latter 4379 * is mainly for station mode where we depend on 4380 * periodic beacon frames to trigger the poll event. 4381 */ 4382 if (type == IEEE80211_FC0_TYPE_DATA) { 4383 const HAL_RATE_TABLE *rt = sc->sc_currates; 4384 ath_led_event(sc, 4385 rt->rateCodeToIndex[rs->rs_rate]); 4386 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 4387 ath_led_event(sc, 0); 4388 } 4389rx_next: 4390 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 4391 } while (ath_rxbuf_init(sc, bf) == 0); 4392 4393 /* rx signal state monitoring */ 4394 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 4395 if (ngood) 4396 sc->sc_lastrx = tsf; 4397 4398 CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood); 4399 /* Queue DFS tasklet if needed */ 4400 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 4401 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 4402 4403 /* 4404 * Now that all the RX frames were handled that 4405 * need to be handled, kick the PCU if there's 4406 * been an RXEOL condition. 4407 */ 4408 ATH_PCU_LOCK(sc); 4409 if (resched && sc->sc_kickpcu) { 4410 CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu"); 4411 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", 4412 __func__, npkts); 4413 4414 /* XXX rxslink? */ 4415 /* 4416 * XXX can we hold the PCU lock here? 4417 * Are there any net80211 buffer calls involved? 4418 */ 4419 bf = TAILQ_FIRST(&sc->sc_rxbuf); 4420 ath_hal_putrxbuf(ah, bf->bf_daddr); 4421 ath_hal_rxena(ah); /* enable recv descriptors */ 4422 ath_mode_init(sc); /* set filters, etc. */ 4423 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 4424 4425 ath_hal_intrset(ah, sc->sc_imask); 4426 sc->sc_kickpcu = 0; 4427 } 4428 ATH_PCU_UNLOCK(sc); 4429 4430 /* XXX check this inside of IF_LOCK? */ 4431 if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 4432#ifdef IEEE80211_SUPPORT_SUPERG 4433 ieee80211_ff_age_all(ic, 100); 4434#endif 4435 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4436 ath_start(ifp); 4437 } 4438#undef PA2DESC 4439 4440 ATH_PCU_LOCK(sc); 4441 sc->sc_rxproc_cnt--; 4442 ATH_PCU_UNLOCK(sc); 4443} 4444 4445static void 4446ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4447{ 4448 txq->axq_qnum = qnum; 4449 txq->axq_ac = 0; 4450 txq->axq_depth = 0; 4451 txq->axq_aggr_depth = 0; 4452 txq->axq_intrcnt = 0; 4453 txq->axq_link = NULL; 4454 txq->axq_softc = sc; 4455 TAILQ_INIT(&txq->axq_q); 4456 TAILQ_INIT(&txq->axq_tidq); 4457 ATH_TXQ_LOCK_INIT(sc, txq); 4458} 4459 4460/* 4461 * Setup a h/w transmit queue. 4462 */ 4463static struct ath_txq * 4464ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4465{ 4466#define N(a) (sizeof(a)/sizeof(a[0])) 4467 struct ath_hal *ah = sc->sc_ah; 4468 HAL_TXQ_INFO qi; 4469 int qnum; 4470 4471 memset(&qi, 0, sizeof(qi)); 4472 qi.tqi_subtype = subtype; 4473 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4474 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4475 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4476 /* 4477 * Enable interrupts only for EOL and DESC conditions. 4478 * We mark tx descriptors to receive a DESC interrupt 4479 * when a tx queue gets deep; otherwise waiting for the 4480 * EOL to reap descriptors. Note that this is done to 4481 * reduce interrupt load and this only defers reaping 4482 * descriptors, never transmitting frames. Aside from 4483 * reducing interrupts this also permits more concurrency. 4484 * The only potential downside is if the tx queue backs 4485 * up in which case the top half of the kernel may backup 4486 * due to a lack of tx descriptors. 4487 */ 4488 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 4489 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4490 if (qnum == -1) { 4491 /* 4492 * NB: don't print a message, this happens 4493 * normally on parts with too few tx queues 4494 */ 4495 return NULL; 4496 } 4497 if (qnum >= N(sc->sc_txq)) { 4498 device_printf(sc->sc_dev, 4499 "hal qnum %u out of range, max %zu!\n", 4500 qnum, N(sc->sc_txq)); 4501 ath_hal_releasetxqueue(ah, qnum); 4502 return NULL; 4503 } 4504 if (!ATH_TXQ_SETUP(sc, qnum)) { 4505 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4506 sc->sc_txqsetup |= 1<<qnum; 4507 } 4508 return &sc->sc_txq[qnum]; 4509#undef N 4510} 4511 4512/* 4513 * Setup a hardware data transmit queue for the specified 4514 * access control. The hal may not support all requested 4515 * queues in which case it will return a reference to a 4516 * previously setup queue. We record the mapping from ac's 4517 * to h/w queues for use by ath_tx_start and also track 4518 * the set of h/w queues being used to optimize work in the 4519 * transmit interrupt handler and related routines. 4520 */ 4521static int 4522ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4523{ 4524#define N(a) (sizeof(a)/sizeof(a[0])) 4525 struct ath_txq *txq; 4526 4527 if (ac >= N(sc->sc_ac2q)) { 4528 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4529 ac, N(sc->sc_ac2q)); 4530 return 0; 4531 } 4532 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4533 if (txq != NULL) { 4534 txq->axq_ac = ac; 4535 sc->sc_ac2q[ac] = txq; 4536 return 1; 4537 } else 4538 return 0; 4539#undef N 4540} 4541 4542/* 4543 * Update WME parameters for a transmit queue. 4544 */ 4545static int 4546ath_txq_update(struct ath_softc *sc, int ac) 4547{ 4548#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4549#define ATH_TXOP_TO_US(v) (v<<5) 4550 struct ifnet *ifp = sc->sc_ifp; 4551 struct ieee80211com *ic = ifp->if_l2com; 4552 struct ath_txq *txq = sc->sc_ac2q[ac]; 4553 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4554 struct ath_hal *ah = sc->sc_ah; 4555 HAL_TXQ_INFO qi; 4556 4557 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4558#ifdef IEEE80211_SUPPORT_TDMA 4559 if (sc->sc_tdma) { 4560 /* 4561 * AIFS is zero so there's no pre-transmit wait. The 4562 * burst time defines the slot duration and is configured 4563 * through net80211. The QCU is setup to not do post-xmit 4564 * back off, lockout all lower-priority QCU's, and fire 4565 * off the DMA beacon alert timer which is setup based 4566 * on the slot configuration. 4567 */ 4568 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4569 | HAL_TXQ_TXERRINT_ENABLE 4570 | HAL_TXQ_TXURNINT_ENABLE 4571 | HAL_TXQ_TXEOLINT_ENABLE 4572 | HAL_TXQ_DBA_GATED 4573 | HAL_TXQ_BACKOFF_DISABLE 4574 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 4575 ; 4576 qi.tqi_aifs = 0; 4577 /* XXX +dbaprep? */ 4578 qi.tqi_readyTime = sc->sc_tdmaslotlen; 4579 qi.tqi_burstTime = qi.tqi_readyTime; 4580 } else { 4581#endif 4582 /* 4583 * XXX shouldn't this just use the default flags 4584 * used in the previous queue setup? 4585 */ 4586 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4587 | HAL_TXQ_TXERRINT_ENABLE 4588 | HAL_TXQ_TXDESCINT_ENABLE 4589 | HAL_TXQ_TXURNINT_ENABLE 4590 | HAL_TXQ_TXEOLINT_ENABLE 4591 ; 4592 qi.tqi_aifs = wmep->wmep_aifsn; 4593 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4594 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4595 qi.tqi_readyTime = 0; 4596 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4597#ifdef IEEE80211_SUPPORT_TDMA 4598 } 4599#endif 4600 4601 DPRINTF(sc, ATH_DEBUG_RESET, 4602 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 4603 __func__, txq->axq_qnum, qi.tqi_qflags, 4604 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4605 4606 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4607 if_printf(ifp, "unable to update hardware queue " 4608 "parameters for %s traffic!\n", 4609 ieee80211_wme_acnames[ac]); 4610 return 0; 4611 } else { 4612 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4613 return 1; 4614 } 4615#undef ATH_TXOP_TO_US 4616#undef ATH_EXPONENT_TO_VALUE 4617} 4618 4619/* 4620 * Callback from the 802.11 layer to update WME parameters. 4621 */ 4622static int 4623ath_wme_update(struct ieee80211com *ic) 4624{ 4625 struct ath_softc *sc = ic->ic_ifp->if_softc; 4626 4627 return !ath_txq_update(sc, WME_AC_BE) || 4628 !ath_txq_update(sc, WME_AC_BK) || 4629 !ath_txq_update(sc, WME_AC_VI) || 4630 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4631} 4632 4633/* 4634 * Reclaim resources for a setup queue. 4635 */ 4636static void 4637ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4638{ 4639 4640 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4641 ATH_TXQ_LOCK_DESTROY(txq); 4642 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4643} 4644 4645/* 4646 * Reclaim all tx queue resources. 4647 */ 4648static void 4649ath_tx_cleanup(struct ath_softc *sc) 4650{ 4651 int i; 4652 4653 ATH_TXBUF_LOCK_DESTROY(sc); 4654 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4655 if (ATH_TXQ_SETUP(sc, i)) 4656 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4657} 4658 4659/* 4660 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4661 * using the current rates in sc_rixmap. 4662 */ 4663int 4664ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4665{ 4666 int rix = sc->sc_rixmap[rate]; 4667 /* NB: return lowest rix for invalid rate */ 4668 return (rix == 0xff ? 0 : rix); 4669} 4670 4671static void 4672ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4673 struct ath_buf *bf) 4674{ 4675 struct ieee80211_node *ni = bf->bf_node; 4676 struct ifnet *ifp = sc->sc_ifp; 4677 struct ieee80211com *ic = ifp->if_l2com; 4678 int sr, lr, pri; 4679 4680 if (ts->ts_status == 0) { 4681 u_int8_t txant = ts->ts_antenna; 4682 sc->sc_stats.ast_ant_tx[txant]++; 4683 sc->sc_ant_tx[txant]++; 4684 if (ts->ts_finaltsi != 0) 4685 sc->sc_stats.ast_tx_altrate++; 4686 pri = M_WME_GETAC(bf->bf_m); 4687 if (pri >= WME_AC_VO) 4688 ic->ic_wme.wme_hipri_traffic++; 4689 if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) 4690 ni->ni_inact = ni->ni_inact_reload; 4691 } else { 4692 if (ts->ts_status & HAL_TXERR_XRETRY) 4693 sc->sc_stats.ast_tx_xretries++; 4694 if (ts->ts_status & HAL_TXERR_FIFO) 4695 sc->sc_stats.ast_tx_fifoerr++; 4696 if (ts->ts_status & HAL_TXERR_FILT) 4697 sc->sc_stats.ast_tx_filtered++; 4698 if (ts->ts_status & HAL_TXERR_XTXOP) 4699 sc->sc_stats.ast_tx_xtxop++; 4700 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4701 sc->sc_stats.ast_tx_timerexpired++; 4702 4703 if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 4704 sc->sc_stats.ast_tx_data_underrun++; 4705 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 4706 sc->sc_stats.ast_tx_delim_underrun++; 4707 4708 if (bf->bf_m->m_flags & M_FF) 4709 sc->sc_stats.ast_ff_txerr++; 4710 } 4711 /* XXX when is this valid? */ 4712 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 4713 sc->sc_stats.ast_tx_desccfgerr++; 4714 4715 sr = ts->ts_shortretry; 4716 lr = ts->ts_longretry; 4717 sc->sc_stats.ast_tx_shortretry += sr; 4718 sc->sc_stats.ast_tx_longretry += lr; 4719 4720} 4721 4722/* 4723 * The default completion. If fail is 1, this means 4724 * "please don't retry the frame, and just return -1 status 4725 * to the net80211 stack. 4726 */ 4727void 4728ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4729{ 4730 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4731 int st; 4732 4733 if (fail == 1) 4734 st = -1; 4735 else 4736 st = ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) ? 4737 ts->ts_status : HAL_TXERR_XRETRY; 4738 4739 if (bf->bf_state.bfs_dobaw) 4740 device_printf(sc->sc_dev, 4741 "%s: dobaw should've been cleared!\n", __func__); 4742 if (bf->bf_next != NULL) 4743 device_printf(sc->sc_dev, 4744 "%s: bf_next not NULL!\n", __func__); 4745 4746 /* 4747 * Do any tx complete callback. Note this must 4748 * be done before releasing the node reference. 4749 * This will free the mbuf, release the net80211 4750 * node and recycle the ath_buf. 4751 */ 4752 ath_tx_freebuf(sc, bf, st); 4753} 4754 4755/* 4756 * Update rate control with the given completion status. 4757 */ 4758void 4759ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4760 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4761 int nframes, int nbad) 4762{ 4763 struct ath_node *an; 4764 4765 /* Only for unicast frames */ 4766 if (ni == NULL) 4767 return; 4768 4769 an = ATH_NODE(ni); 4770 4771 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4772 ATH_NODE_LOCK(an); 4773 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4774 ATH_NODE_UNLOCK(an); 4775 } 4776} 4777 4778/* 4779 * Update the busy status of the last frame on the free list. 4780 * When doing TDMA, the busy flag tracks whether the hardware 4781 * currently points to this buffer or not, and thus gated DMA 4782 * may restart by re-reading the last descriptor in this 4783 * buffer. 4784 * 4785 * This should be called in the completion function once one 4786 * of the buffers has been used. 4787 */ 4788static void 4789ath_tx_update_busy(struct ath_softc *sc) 4790{ 4791 struct ath_buf *last; 4792 4793 /* 4794 * Since the last frame may still be marked 4795 * as ATH_BUF_BUSY, unmark it here before 4796 * finishing the frame processing. 4797 * Since we've completed a frame (aggregate 4798 * or otherwise), the hardware has moved on 4799 * and is no longer referencing the previous 4800 * descriptor. 4801 */ 4802 ATH_TXBUF_LOCK_ASSERT(sc); 4803 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 4804 if (last != NULL) 4805 last->bf_flags &= ~ATH_BUF_BUSY; 4806} 4807 4808 4809/* 4810 * Process completed xmit descriptors from the specified queue. 4811 * Kick the packet scheduler if needed. This can occur from this 4812 * particular task. 4813 */ 4814static int 4815ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4816{ 4817 struct ath_hal *ah = sc->sc_ah; 4818 struct ath_buf *bf; 4819 struct ath_desc *ds; 4820 struct ath_tx_status *ts; 4821 struct ieee80211_node *ni; 4822 struct ath_node *an; 4823 int nacked; 4824 HAL_STATUS status; 4825 4826 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4827 __func__, txq->axq_qnum, 4828 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4829 txq->axq_link); 4830 nacked = 0; 4831 for (;;) { 4832 ATH_TXQ_LOCK(txq); 4833 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4834 bf = TAILQ_FIRST(&txq->axq_q); 4835 if (bf == NULL) { 4836 ATH_TXQ_UNLOCK(txq); 4837 break; 4838 } 4839 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4840 ts = &bf->bf_status.ds_txstat; 4841 status = ath_hal_txprocdesc(ah, ds, ts); 4842#ifdef ATH_DEBUG 4843 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4844 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4845 status == HAL_OK); 4846#endif 4847 if (status == HAL_EINPROGRESS) { 4848 ATH_TXQ_UNLOCK(txq); 4849 break; 4850 } 4851 ATH_TXQ_REMOVE(txq, bf, bf_list); 4852#ifdef IEEE80211_SUPPORT_TDMA 4853 if (txq->axq_depth > 0) { 4854 /* 4855 * More frames follow. Mark the buffer busy 4856 * so it's not re-used while the hardware may 4857 * still re-read the link field in the descriptor. 4858 * 4859 * Use the last buffer in an aggregate as that 4860 * is where the hardware may be - intermediate 4861 * descriptors won't be "busy". 4862 */ 4863 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4864 } else 4865#else 4866 if (txq->axq_depth == 0) 4867#endif 4868 txq->axq_link = NULL; 4869 if (bf->bf_state.bfs_aggr) 4870 txq->axq_aggr_depth--; 4871 4872 ni = bf->bf_node; 4873 /* 4874 * If unicast frame was ack'd update RSSI, 4875 * including the last rx time used to 4876 * workaround phantom bmiss interrupts. 4877 */ 4878 if (ni != NULL && ts->ts_status == 0 && 4879 ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)) { 4880 nacked++; 4881 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4882 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4883 ts->ts_rssi); 4884 } 4885 ATH_TXQ_UNLOCK(txq); 4886 4887 /* If unicast frame, update general statistics */ 4888 if (ni != NULL) { 4889 an = ATH_NODE(ni); 4890 /* update statistics */ 4891 ath_tx_update_stats(sc, ts, bf); 4892 } 4893 4894 /* 4895 * Call the completion handler. 4896 * The completion handler is responsible for 4897 * calling the rate control code. 4898 * 4899 * Frames with no completion handler get the 4900 * rate control code called here. 4901 */ 4902 if (bf->bf_comp == NULL) { 4903 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4904 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) { 4905 /* 4906 * XXX assume this isn't an aggregate 4907 * frame. 4908 */ 4909 ath_tx_update_ratectrl(sc, ni, 4910 bf->bf_state.bfs_rc, ts, 4911 bf->bf_state.bfs_pktlen, 1, 4912 (ts->ts_status == 0 ? 0 : 1)); 4913 } 4914 ath_tx_default_comp(sc, bf, 0); 4915 } else 4916 bf->bf_comp(sc, bf, 0); 4917 } 4918#ifdef IEEE80211_SUPPORT_SUPERG 4919 /* 4920 * Flush fast-frame staging queue when traffic slows. 4921 */ 4922 if (txq->axq_depth <= 1) 4923 ieee80211_ff_flush(ic, txq->axq_ac); 4924#endif 4925 4926 /* Kick the TXQ scheduler */ 4927 if (dosched) { 4928 ATH_TXQ_LOCK(txq); 4929 ath_txq_sched(sc, txq); 4930 ATH_TXQ_UNLOCK(txq); 4931 } 4932 4933 return nacked; 4934} 4935 4936#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4937 4938/* 4939 * Deferred processing of transmit interrupt; special-cased 4940 * for a single hardware transmit queue (e.g. 5210 and 5211). 4941 */ 4942static void 4943ath_tx_proc_q0(void *arg, int npending) 4944{ 4945 struct ath_softc *sc = arg; 4946 struct ifnet *ifp = sc->sc_ifp; 4947 uint32_t txqs; 4948 4949 ATH_PCU_LOCK(sc); 4950 sc->sc_txproc_cnt++; 4951 txqs = sc->sc_txq_active; 4952 sc->sc_txq_active &= ~txqs; 4953 ATH_PCU_UNLOCK(sc); 4954 4955 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 4956 /* XXX why is lastrx updated in tx code? */ 4957 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4958 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4959 ath_tx_processq(sc, sc->sc_cabq, 1); 4960 /* XXX check this inside of IF_LOCK? */ 4961 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4962 sc->sc_wd_timer = 0; 4963 4964 if (sc->sc_softled) 4965 ath_led_event(sc, sc->sc_txrix); 4966 4967 ATH_PCU_LOCK(sc); 4968 sc->sc_txproc_cnt--; 4969 ATH_PCU_UNLOCK(sc); 4970 4971 ath_start(ifp); 4972} 4973 4974/* 4975 * Deferred processing of transmit interrupt; special-cased 4976 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4977 */ 4978static void 4979ath_tx_proc_q0123(void *arg, int npending) 4980{ 4981 struct ath_softc *sc = arg; 4982 struct ifnet *ifp = sc->sc_ifp; 4983 int nacked; 4984 uint32_t txqs; 4985 4986 ATH_PCU_LOCK(sc); 4987 sc->sc_txproc_cnt++; 4988 txqs = sc->sc_txq_active; 4989 sc->sc_txq_active &= ~txqs; 4990 ATH_PCU_UNLOCK(sc); 4991 4992 /* 4993 * Process each active queue. 4994 */ 4995 nacked = 0; 4996 if (TXQACTIVE(txqs, 0)) 4997 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 4998 if (TXQACTIVE(txqs, 1)) 4999 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 5000 if (TXQACTIVE(txqs, 2)) 5001 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 5002 if (TXQACTIVE(txqs, 3)) 5003 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 5004 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 5005 ath_tx_processq(sc, sc->sc_cabq, 1); 5006 if (nacked) 5007 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5008 5009 /* XXX check this inside of IF_LOCK? */ 5010 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5011 sc->sc_wd_timer = 0; 5012 5013 if (sc->sc_softled) 5014 ath_led_event(sc, sc->sc_txrix); 5015 5016 ATH_PCU_LOCK(sc); 5017 sc->sc_txproc_cnt--; 5018 ATH_PCU_UNLOCK(sc); 5019 5020 ath_start(ifp); 5021} 5022 5023/* 5024 * Deferred processing of transmit interrupt. 5025 */ 5026static void 5027ath_tx_proc(void *arg, int npending) 5028{ 5029 struct ath_softc *sc = arg; 5030 struct ifnet *ifp = sc->sc_ifp; 5031 int i, nacked; 5032 uint32_t txqs; 5033 5034 ATH_PCU_LOCK(sc); 5035 sc->sc_txproc_cnt++; 5036 txqs = sc->sc_txq_active; 5037 sc->sc_txq_active &= ~txqs; 5038 ATH_PCU_UNLOCK(sc); 5039 5040 /* 5041 * Process each active queue. 5042 */ 5043 nacked = 0; 5044 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5045 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 5046 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 5047 if (nacked) 5048 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5049 5050 /* XXX check this inside of IF_LOCK? */ 5051 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5052 sc->sc_wd_timer = 0; 5053 5054 if (sc->sc_softled) 5055 ath_led_event(sc, sc->sc_txrix); 5056 5057 ATH_PCU_LOCK(sc); 5058 sc->sc_txproc_cnt--; 5059 ATH_PCU_UNLOCK(sc); 5060 5061 ath_start(ifp); 5062} 5063#undef TXQACTIVE 5064 5065/* 5066 * Return a buffer to the pool and update the 'busy' flag on the 5067 * previous 'tail' entry. 5068 * 5069 * This _must_ only be called when the buffer is involved in a completed 5070 * TX. The logic is that if it was part of an active TX, the previous 5071 * buffer on the list is now not involved in a halted TX DMA queue, waiting 5072 * for restart (eg for TDMA.) 5073 * 5074 * The caller must free the mbuf and recycle the node reference. 5075 */ 5076void 5077ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 5078{ 5079 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 5080 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 5081 5082 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 5083 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 5084 5085 ATH_TXBUF_LOCK(sc); 5086 ath_tx_update_busy(sc); 5087 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 5088 ATH_TXBUF_UNLOCK(sc); 5089} 5090 5091/* 5092 * This is currently used by ath_tx_draintxq() and 5093 * ath_tx_tid_free_pkts(). 5094 * 5095 * It recycles a single ath_buf. 5096 */ 5097void 5098ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 5099{ 5100 struct ieee80211_node *ni = bf->bf_node; 5101 struct mbuf *m0 = bf->bf_m; 5102 5103 bf->bf_node = NULL; 5104 bf->bf_m = NULL; 5105 5106 /* Free the buffer, it's not needed any longer */ 5107 ath_freebuf(sc, bf); 5108 5109 if (ni != NULL) { 5110 /* 5111 * Do any callback and reclaim the node reference. 5112 */ 5113 if (m0->m_flags & M_TXCB) 5114 ieee80211_process_callback(ni, m0, status); 5115 ieee80211_free_node(ni); 5116 } 5117 m_freem(m0); 5118 5119 /* 5120 * XXX the buffer used to be freed -after-, but the DMA map was 5121 * freed where ath_freebuf() now is. I've no idea what this 5122 * will do. 5123 */ 5124} 5125 5126void 5127ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 5128{ 5129#ifdef ATH_DEBUG 5130 struct ath_hal *ah = sc->sc_ah; 5131#endif 5132 struct ath_buf *bf; 5133 u_int ix; 5134 5135 /* 5136 * NB: this assumes output has been stopped and 5137 * we do not need to block ath_tx_proc 5138 */ 5139 ATH_TXBUF_LOCK(sc); 5140 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 5141 if (bf != NULL) 5142 bf->bf_flags &= ~ATH_BUF_BUSY; 5143 ATH_TXBUF_UNLOCK(sc); 5144 5145 for (ix = 0;; ix++) { 5146 ATH_TXQ_LOCK(txq); 5147 bf = TAILQ_FIRST(&txq->axq_q); 5148 if (bf == NULL) { 5149 txq->axq_link = NULL; 5150 ATH_TXQ_UNLOCK(txq); 5151 break; 5152 } 5153 ATH_TXQ_REMOVE(txq, bf, bf_list); 5154 if (bf->bf_state.bfs_aggr) 5155 txq->axq_aggr_depth--; 5156#ifdef ATH_DEBUG 5157 if (sc->sc_debug & ATH_DEBUG_RESET) { 5158 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5159 5160 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 5161 ath_hal_txprocdesc(ah, bf->bf_lastds, 5162 &bf->bf_status.ds_txstat) == HAL_OK); 5163 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 5164 bf->bf_m->m_len, 0, -1); 5165 } 5166#endif /* ATH_DEBUG */ 5167 /* 5168 * Since we're now doing magic in the completion 5169 * functions, we -must- call it for aggregation 5170 * destinations or BAW tracking will get upset. 5171 */ 5172 /* 5173 * Clear ATH_BUF_BUSY; the completion handler 5174 * will free the buffer. 5175 */ 5176 ATH_TXQ_UNLOCK(txq); 5177 bf->bf_flags &= ~ATH_BUF_BUSY; 5178 if (bf->bf_comp) 5179 bf->bf_comp(sc, bf, 1); 5180 else 5181 ath_tx_default_comp(sc, bf, 1); 5182 } 5183 5184 /* 5185 * Drain software queued frames which are on 5186 * active TIDs. 5187 */ 5188 ath_tx_txq_drain(sc, txq); 5189} 5190 5191static void 5192ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5193{ 5194 struct ath_hal *ah = sc->sc_ah; 5195 5196 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5197 __func__, txq->axq_qnum, 5198 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 5199 txq->axq_link); 5200 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5201} 5202 5203static int 5204ath_stoptxdma(struct ath_softc *sc) 5205{ 5206 struct ath_hal *ah = sc->sc_ah; 5207 int i; 5208 5209 /* XXX return value */ 5210 if (sc->sc_invalid) 5211 return 0; 5212 5213 if (!sc->sc_invalid) { 5214 /* don't touch the hardware if marked invalid */ 5215 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5216 __func__, sc->sc_bhalq, 5217 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 5218 NULL); 5219 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5220 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5221 if (ATH_TXQ_SETUP(sc, i)) 5222 ath_tx_stopdma(sc, &sc->sc_txq[i]); 5223 } 5224 5225 return 1; 5226} 5227 5228/* 5229 * Drain the transmit queues and reclaim resources. 5230 */ 5231static void 5232ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 5233{ 5234#ifdef ATH_DEBUG 5235 struct ath_hal *ah = sc->sc_ah; 5236#endif 5237 struct ifnet *ifp = sc->sc_ifp; 5238 int i; 5239 5240 (void) ath_stoptxdma(sc); 5241 5242 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5243 /* 5244 * XXX TODO: should we just handle the completed TX frames 5245 * here, whether or not the reset is a full one or not? 5246 */ 5247 if (ATH_TXQ_SETUP(sc, i)) { 5248 if (reset_type == ATH_RESET_NOLOSS) 5249 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5250 else 5251 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5252 } 5253 } 5254#ifdef ATH_DEBUG 5255 if (sc->sc_debug & ATH_DEBUG_RESET) { 5256 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5257 if (bf != NULL && bf->bf_m != NULL) { 5258 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5259 ath_hal_txprocdesc(ah, bf->bf_lastds, 5260 &bf->bf_status.ds_txstat) == HAL_OK); 5261 ieee80211_dump_pkt(ifp->if_l2com, 5262 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5263 0, -1); 5264 } 5265 } 5266#endif /* ATH_DEBUG */ 5267 /* XXX check this inside of IF_LOCK? */ 5268 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5269 sc->sc_wd_timer = 0; 5270} 5271 5272/* 5273 * Disable the receive h/w in preparation for a reset. 5274 */ 5275static void 5276ath_stoprecv(struct ath_softc *sc, int dodelay) 5277{ 5278#define PA2DESC(_sc, _pa) \ 5279 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 5280 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 5281 struct ath_hal *ah = sc->sc_ah; 5282 5283 ath_hal_stoppcurecv(ah); /* disable PCU */ 5284 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 5285 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 5286 if (dodelay) 5287 DELAY(3000); /* 3ms is long enough for 1 frame */ 5288#ifdef ATH_DEBUG 5289 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 5290 struct ath_buf *bf; 5291 u_int ix; 5292 5293 printf("%s: rx queue %p, link %p\n", __func__, 5294 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 5295 ix = 0; 5296 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 5297 struct ath_desc *ds = bf->bf_desc; 5298 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5299 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 5300 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 5301 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 5302 ath_printrxbuf(sc, bf, ix, status == HAL_OK); 5303 ix++; 5304 } 5305 } 5306#endif 5307 if (sc->sc_rxpending != NULL) { 5308 m_freem(sc->sc_rxpending); 5309 sc->sc_rxpending = NULL; 5310 } 5311 sc->sc_rxlink = NULL; /* just in case */ 5312#undef PA2DESC 5313} 5314 5315/* 5316 * Enable the receive h/w following a reset. 5317 */ 5318static int 5319ath_startrecv(struct ath_softc *sc) 5320{ 5321 struct ath_hal *ah = sc->sc_ah; 5322 struct ath_buf *bf; 5323 5324 sc->sc_rxlink = NULL; 5325 sc->sc_rxpending = NULL; 5326 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 5327 int error = ath_rxbuf_init(sc, bf); 5328 if (error != 0) { 5329 DPRINTF(sc, ATH_DEBUG_RECV, 5330 "%s: ath_rxbuf_init failed %d\n", 5331 __func__, error); 5332 return error; 5333 } 5334 } 5335 5336 bf = TAILQ_FIRST(&sc->sc_rxbuf); 5337 ath_hal_putrxbuf(ah, bf->bf_daddr); 5338 ath_hal_rxena(ah); /* enable recv descriptors */ 5339 ath_mode_init(sc); /* set filters, etc. */ 5340 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 5341 return 0; 5342} 5343 5344/* 5345 * Update internal state after a channel change. 5346 */ 5347static void 5348ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5349{ 5350 enum ieee80211_phymode mode; 5351 5352 /* 5353 * Change channels and update the h/w rate map 5354 * if we're switching; e.g. 11a to 11b/g. 5355 */ 5356 mode = ieee80211_chan2mode(chan); 5357 if (mode != sc->sc_curmode) 5358 ath_setcurmode(sc, mode); 5359 sc->sc_curchan = chan; 5360} 5361 5362/* 5363 * Set/change channels. If the channel is really being changed, 5364 * it's done by resetting the chip. To accomplish this we must 5365 * first cleanup any pending DMA, then restart stuff after a la 5366 * ath_init. 5367 */ 5368static int 5369ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5370{ 5371 struct ifnet *ifp = sc->sc_ifp; 5372 struct ieee80211com *ic = ifp->if_l2com; 5373 struct ath_hal *ah = sc->sc_ah; 5374 int ret = 0; 5375 int dointr = 0; 5376 5377 /* Treat this as an interface reset */ 5378 ATH_PCU_LOCK(sc); 5379 if (ath_reset_grablock(sc, 1) == 0) { 5380 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5381 __func__); 5382 } 5383 if (chan != sc->sc_curchan) { 5384 dointr = 1; 5385 /* XXX only do this if inreset_cnt is 1? */ 5386 ath_hal_intrset(ah, 0); 5387 } 5388 ATH_PCU_UNLOCK(sc); 5389 ath_txrx_stop(sc); 5390 5391 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5392 __func__, ieee80211_chan2ieee(ic, chan), 5393 chan->ic_freq, chan->ic_flags); 5394 if (chan != sc->sc_curchan) { 5395 HAL_STATUS status; 5396 /* 5397 * To switch channels clear any pending DMA operations; 5398 * wait long enough for the RX fifo to drain, reset the 5399 * hardware at the new frequency, and then re-enable 5400 * the relevant bits of the h/w. 5401 */ 5402#if 0 5403 ath_hal_intrset(ah, 0); /* disable interrupts */ 5404#endif 5405 ath_stoprecv(sc, 1); /* turn off frame recv */ 5406 /* 5407 * First, handle completed TX/RX frames. 5408 */ 5409 ath_rx_proc(sc, 0); 5410 ath_draintxq(sc, ATH_RESET_NOLOSS); 5411 /* 5412 * Next, flush the non-scheduled frames. 5413 */ 5414 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5415 5416 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5417 if_printf(ifp, "%s: unable to reset " 5418 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5419 __func__, ieee80211_chan2ieee(ic, chan), 5420 chan->ic_freq, chan->ic_flags, status); 5421 ret = EIO; 5422 goto finish; 5423 } 5424 sc->sc_diversity = ath_hal_getdiversity(ah); 5425 5426 /* Let DFS at it in case it's a DFS channel */ 5427 ath_dfs_radar_enable(sc, ic->ic_curchan); 5428 5429 /* 5430 * Re-enable rx framework. 5431 */ 5432 if (ath_startrecv(sc) != 0) { 5433 if_printf(ifp, "%s: unable to restart recv logic\n", 5434 __func__); 5435 ret = EIO; 5436 goto finish; 5437 } 5438 5439 /* 5440 * Change channels and update the h/w rate map 5441 * if we're switching; e.g. 11a to 11b/g. 5442 */ 5443 ath_chan_change(sc, chan); 5444 5445 /* 5446 * Reset clears the beacon timers; reset them 5447 * here if needed. 5448 */ 5449 if (sc->sc_beacons) { /* restart beacons */ 5450#ifdef IEEE80211_SUPPORT_TDMA 5451 if (sc->sc_tdma) 5452 ath_tdma_config(sc, NULL); 5453 else 5454#endif 5455 ath_beacon_config(sc, NULL); 5456 } 5457 5458#if 0 5459 /* 5460 * Re-enable interrupts. 5461 */ 5462 ath_hal_intrset(ah, sc->sc_imask); 5463#endif 5464 } 5465 5466finish: 5467 ATH_PCU_LOCK(sc); 5468 sc->sc_inreset_cnt--; 5469 /* XXX only do this if sc_inreset_cnt == 0? */ 5470 if (dointr) 5471 ath_hal_intrset(ah, sc->sc_imask); 5472 ATH_PCU_UNLOCK(sc); 5473 5474 /* XXX do this inside of IF_LOCK? */ 5475 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5476 ath_txrx_start(sc); 5477 /* XXX ath_start? */ 5478 5479 return ret; 5480} 5481 5482/* 5483 * Periodically recalibrate the PHY to account 5484 * for temperature/environment changes. 5485 */ 5486static void 5487ath_calibrate(void *arg) 5488{ 5489 struct ath_softc *sc = arg; 5490 struct ath_hal *ah = sc->sc_ah; 5491 struct ifnet *ifp = sc->sc_ifp; 5492 struct ieee80211com *ic = ifp->if_l2com; 5493 HAL_BOOL longCal, isCalDone; 5494 HAL_BOOL aniCal, shortCal = AH_FALSE; 5495 int nextcal; 5496 5497 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5498 goto restart; 5499 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5500 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5501 if (sc->sc_doresetcal) 5502 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5503 5504 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5505 if (aniCal) { 5506 sc->sc_stats.ast_ani_cal++; 5507 sc->sc_lastani = ticks; 5508 ath_hal_ani_poll(ah, sc->sc_curchan); 5509 } 5510 5511 if (longCal) { 5512 sc->sc_stats.ast_per_cal++; 5513 sc->sc_lastlongcal = ticks; 5514 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5515 /* 5516 * Rfgain is out of bounds, reset the chip 5517 * to load new gain values. 5518 */ 5519 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5520 "%s: rfgain change\n", __func__); 5521 sc->sc_stats.ast_per_rfgain++; 5522 /* 5523 * Drop lock - we can't hold it across the 5524 * ath_reset() call. Instead, we'll drop 5525 * out here, do a reset, then reschedule 5526 * the callout. 5527 */ 5528 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5529 sc->sc_resetcal = 0; 5530 sc->sc_doresetcal = AH_TRUE; 5531 ATH_UNLOCK(sc); 5532 ath_reset(ifp, ATH_RESET_NOLOSS); 5533 ATH_LOCK(sc); 5534 return; 5535 } 5536 /* 5537 * If this long cal is after an idle period, then 5538 * reset the data collection state so we start fresh. 5539 */ 5540 if (sc->sc_resetcal) { 5541 (void) ath_hal_calreset(ah, sc->sc_curchan); 5542 sc->sc_lastcalreset = ticks; 5543 sc->sc_lastshortcal = ticks; 5544 sc->sc_resetcal = 0; 5545 sc->sc_doresetcal = AH_TRUE; 5546 } 5547 } 5548 5549 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5550 if (shortCal || longCal) { 5551 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5552 if (longCal) { 5553 /* 5554 * Calibrate noise floor data again in case of change. 5555 */ 5556 ath_hal_process_noisefloor(ah); 5557 } 5558 } else { 5559 DPRINTF(sc, ATH_DEBUG_ANY, 5560 "%s: calibration of channel %u failed\n", 5561 __func__, sc->sc_curchan->ic_freq); 5562 sc->sc_stats.ast_per_calfail++; 5563 } 5564 if (shortCal) 5565 sc->sc_lastshortcal = ticks; 5566 } 5567 if (!isCalDone) { 5568restart: 5569 /* 5570 * Use a shorter interval to potentially collect multiple 5571 * data samples required to complete calibration. Once 5572 * we're told the work is done we drop back to a longer 5573 * interval between requests. We're more aggressive doing 5574 * work when operating as an AP to improve operation right 5575 * after startup. 5576 */ 5577 sc->sc_lastshortcal = ticks; 5578 nextcal = ath_shortcalinterval*hz/1000; 5579 if (sc->sc_opmode != HAL_M_HOSTAP) 5580 nextcal *= 10; 5581 sc->sc_doresetcal = AH_TRUE; 5582 } else { 5583 /* nextcal should be the shortest time for next event */ 5584 nextcal = ath_longcalinterval*hz; 5585 if (sc->sc_lastcalreset == 0) 5586 sc->sc_lastcalreset = sc->sc_lastlongcal; 5587 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5588 sc->sc_resetcal = 1; /* setup reset next trip */ 5589 sc->sc_doresetcal = AH_FALSE; 5590 } 5591 /* ANI calibration may occur more often than short/long/resetcal */ 5592 if (ath_anicalinterval > 0) 5593 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5594 5595 if (nextcal != 0) { 5596 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5597 __func__, nextcal, isCalDone ? "" : "!"); 5598 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5599 } else { 5600 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5601 __func__); 5602 /* NB: don't rearm timer */ 5603 } 5604} 5605 5606static void 5607ath_scan_start(struct ieee80211com *ic) 5608{ 5609 struct ifnet *ifp = ic->ic_ifp; 5610 struct ath_softc *sc = ifp->if_softc; 5611 struct ath_hal *ah = sc->sc_ah; 5612 u_int32_t rfilt; 5613 5614 /* XXX calibration timer? */ 5615 5616 sc->sc_scanning = 1; 5617 sc->sc_syncbeacon = 0; 5618 rfilt = ath_calcrxfilter(sc); 5619 ath_hal_setrxfilter(ah, rfilt); 5620 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 5621 5622 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5623 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 5624} 5625 5626static void 5627ath_scan_end(struct ieee80211com *ic) 5628{ 5629 struct ifnet *ifp = ic->ic_ifp; 5630 struct ath_softc *sc = ifp->if_softc; 5631 struct ath_hal *ah = sc->sc_ah; 5632 u_int32_t rfilt; 5633 5634 sc->sc_scanning = 0; 5635 rfilt = ath_calcrxfilter(sc); 5636 ath_hal_setrxfilter(ah, rfilt); 5637 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5638 5639 ath_hal_process_noisefloor(ah); 5640 5641 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5642 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 5643 sc->sc_curaid); 5644} 5645 5646static void 5647ath_set_channel(struct ieee80211com *ic) 5648{ 5649 struct ifnet *ifp = ic->ic_ifp; 5650 struct ath_softc *sc = ifp->if_softc; 5651 5652 (void) ath_chan_set(sc, ic->ic_curchan); 5653 /* 5654 * If we are returning to our bss channel then mark state 5655 * so the next recv'd beacon's tsf will be used to sync the 5656 * beacon timers. Note that since we only hear beacons in 5657 * sta/ibss mode this has no effect in other operating modes. 5658 */ 5659 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5660 sc->sc_syncbeacon = 1; 5661} 5662 5663/* 5664 * Walk the vap list and check if there any vap's in RUN state. 5665 */ 5666static int 5667ath_isanyrunningvaps(struct ieee80211vap *this) 5668{ 5669 struct ieee80211com *ic = this->iv_ic; 5670 struct ieee80211vap *vap; 5671 5672 IEEE80211_LOCK_ASSERT(ic); 5673 5674 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5675 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5676 return 1; 5677 } 5678 return 0; 5679} 5680 5681static int 5682ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5683{ 5684 struct ieee80211com *ic = vap->iv_ic; 5685 struct ath_softc *sc = ic->ic_ifp->if_softc; 5686 struct ath_vap *avp = ATH_VAP(vap); 5687 struct ath_hal *ah = sc->sc_ah; 5688 struct ieee80211_node *ni = NULL; 5689 int i, error, stamode; 5690 u_int32_t rfilt; 5691 int csa_run_transition = 0; 5692 static const HAL_LED_STATE leds[] = { 5693 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5694 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5695 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5696 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5697 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5698 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5699 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5700 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5701 }; 5702 5703 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5704 ieee80211_state_name[vap->iv_state], 5705 ieee80211_state_name[nstate]); 5706 5707 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5708 csa_run_transition = 1; 5709 5710 callout_drain(&sc->sc_cal_ch); 5711 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5712 5713 if (nstate == IEEE80211_S_SCAN) { 5714 /* 5715 * Scanning: turn off beacon miss and don't beacon. 5716 * Mark beacon state so when we reach RUN state we'll 5717 * [re]setup beacons. Unblock the task q thread so 5718 * deferred interrupt processing is done. 5719 */ 5720 ath_hal_intrset(ah, 5721 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5722 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5723 sc->sc_beacons = 0; 5724 taskqueue_unblock(sc->sc_tq); 5725 } 5726 5727 ni = ieee80211_ref_node(vap->iv_bss); 5728 rfilt = ath_calcrxfilter(sc); 5729 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5730 vap->iv_opmode == IEEE80211_M_AHDEMO || 5731 vap->iv_opmode == IEEE80211_M_IBSS); 5732 if (stamode && nstate == IEEE80211_S_RUN) { 5733 sc->sc_curaid = ni->ni_associd; 5734 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5735 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5736 } 5737 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5738 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5739 ath_hal_setrxfilter(ah, rfilt); 5740 5741 /* XXX is this to restore keycache on resume? */ 5742 if (vap->iv_opmode != IEEE80211_M_STA && 5743 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5744 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5745 if (ath_hal_keyisvalid(ah, i)) 5746 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5747 } 5748 5749 /* 5750 * Invoke the parent method to do net80211 work. 5751 */ 5752 error = avp->av_newstate(vap, nstate, arg); 5753 if (error != 0) 5754 goto bad; 5755 5756 if (nstate == IEEE80211_S_RUN) { 5757 /* NB: collect bss node again, it may have changed */ 5758 ieee80211_free_node(ni); 5759 ni = ieee80211_ref_node(vap->iv_bss); 5760 5761 DPRINTF(sc, ATH_DEBUG_STATE, 5762 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5763 "capinfo 0x%04x chan %d\n", __func__, 5764 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5765 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5766 5767 switch (vap->iv_opmode) { 5768#ifdef IEEE80211_SUPPORT_TDMA 5769 case IEEE80211_M_AHDEMO: 5770 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 5771 break; 5772 /* fall thru... */ 5773#endif 5774 case IEEE80211_M_HOSTAP: 5775 case IEEE80211_M_IBSS: 5776 case IEEE80211_M_MBSS: 5777 /* 5778 * Allocate and setup the beacon frame. 5779 * 5780 * Stop any previous beacon DMA. This may be 5781 * necessary, for example, when an ibss merge 5782 * causes reconfiguration; there will be a state 5783 * transition from RUN->RUN that means we may 5784 * be called with beacon transmission active. 5785 */ 5786 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5787 5788 error = ath_beacon_alloc(sc, ni); 5789 if (error != 0) 5790 goto bad; 5791 /* 5792 * If joining an adhoc network defer beacon timer 5793 * configuration to the next beacon frame so we 5794 * have a current TSF to use. Otherwise we're 5795 * starting an ibss/bss so there's no need to delay; 5796 * if this is the first vap moving to RUN state, then 5797 * beacon state needs to be [re]configured. 5798 */ 5799 if (vap->iv_opmode == IEEE80211_M_IBSS && 5800 ni->ni_tstamp.tsf != 0) { 5801 sc->sc_syncbeacon = 1; 5802 } else if (!sc->sc_beacons) { 5803#ifdef IEEE80211_SUPPORT_TDMA 5804 if (vap->iv_caps & IEEE80211_C_TDMA) 5805 ath_tdma_config(sc, vap); 5806 else 5807#endif 5808 ath_beacon_config(sc, vap); 5809 sc->sc_beacons = 1; 5810 } 5811 break; 5812 case IEEE80211_M_STA: 5813 /* 5814 * Defer beacon timer configuration to the next 5815 * beacon frame so we have a current TSF to use 5816 * (any TSF collected when scanning is likely old). 5817 * However if it's due to a CSA -> RUN transition, 5818 * force a beacon update so we pick up a lack of 5819 * beacons from an AP in CAC and thus force a 5820 * scan. 5821 */ 5822 sc->sc_syncbeacon = 1; 5823 if (csa_run_transition) 5824 ath_beacon_config(sc, vap); 5825 break; 5826 case IEEE80211_M_MONITOR: 5827 /* 5828 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5829 * transitions so we must re-enable interrupts here to 5830 * handle the case of a single monitor mode vap. 5831 */ 5832 ath_hal_intrset(ah, sc->sc_imask); 5833 break; 5834 case IEEE80211_M_WDS: 5835 break; 5836 default: 5837 break; 5838 } 5839 /* 5840 * Let the hal process statistics collected during a 5841 * scan so it can provide calibrated noise floor data. 5842 */ 5843 ath_hal_process_noisefloor(ah); 5844 /* 5845 * Reset rssi stats; maybe not the best place... 5846 */ 5847 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5848 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5849 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 5850 /* 5851 * Finally, start any timers and the task q thread 5852 * (in case we didn't go through SCAN state). 5853 */ 5854 if (ath_longcalinterval != 0) { 5855 /* start periodic recalibration timer */ 5856 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5857 } else { 5858 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5859 "%s: calibration disabled\n", __func__); 5860 } 5861 taskqueue_unblock(sc->sc_tq); 5862 } else if (nstate == IEEE80211_S_INIT) { 5863 /* 5864 * If there are no vaps left in RUN state then 5865 * shutdown host/driver operation: 5866 * o disable interrupts 5867 * o disable the task queue thread 5868 * o mark beacon processing as stopped 5869 */ 5870 if (!ath_isanyrunningvaps(vap)) { 5871 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5872 /* disable interrupts */ 5873 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5874 taskqueue_block(sc->sc_tq); 5875 sc->sc_beacons = 0; 5876 } 5877#ifdef IEEE80211_SUPPORT_TDMA 5878 ath_hal_setcca(ah, AH_TRUE); 5879#endif 5880 } 5881bad: 5882 ieee80211_free_node(ni); 5883 return error; 5884} 5885 5886/* 5887 * Allocate a key cache slot to the station so we can 5888 * setup a mapping from key index to node. The key cache 5889 * slot is needed for managing antenna state and for 5890 * compression when stations do not use crypto. We do 5891 * it uniliaterally here; if crypto is employed this slot 5892 * will be reassigned. 5893 */ 5894static void 5895ath_setup_stationkey(struct ieee80211_node *ni) 5896{ 5897 struct ieee80211vap *vap = ni->ni_vap; 5898 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5899 ieee80211_keyix keyix, rxkeyix; 5900 5901 /* XXX should take a locked ref to vap->iv_bss */ 5902 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5903 /* 5904 * Key cache is full; we'll fall back to doing 5905 * the more expensive lookup in software. Note 5906 * this also means no h/w compression. 5907 */ 5908 /* XXX msg+statistic */ 5909 } else { 5910 /* XXX locking? */ 5911 ni->ni_ucastkey.wk_keyix = keyix; 5912 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 5913 /* NB: must mark device key to get called back on delete */ 5914 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 5915 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5916 /* NB: this will create a pass-thru key entry */ 5917 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 5918 } 5919} 5920 5921/* 5922 * Setup driver-specific state for a newly associated node. 5923 * Note that we're called also on a re-associate, the isnew 5924 * param tells us if this is the first time or not. 5925 */ 5926static void 5927ath_newassoc(struct ieee80211_node *ni, int isnew) 5928{ 5929 struct ath_node *an = ATH_NODE(ni); 5930 struct ieee80211vap *vap = ni->ni_vap; 5931 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5932 const struct ieee80211_txparam *tp = ni->ni_txparms; 5933 5934 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 5935 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 5936 5937 ath_rate_newassoc(sc, an, isnew); 5938 if (isnew && 5939 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 5940 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 5941 ath_setup_stationkey(ni); 5942} 5943 5944static int 5945ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 5946 int nchans, struct ieee80211_channel chans[]) 5947{ 5948 struct ath_softc *sc = ic->ic_ifp->if_softc; 5949 struct ath_hal *ah = sc->sc_ah; 5950 HAL_STATUS status; 5951 5952 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 5953 "%s: rd %u cc %u location %c%s\n", 5954 __func__, reg->regdomain, reg->country, reg->location, 5955 reg->ecm ? " ecm" : ""); 5956 5957 status = ath_hal_set_channels(ah, chans, nchans, 5958 reg->country, reg->regdomain); 5959 if (status != HAL_OK) { 5960 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 5961 __func__, status); 5962 return EINVAL; /* XXX */ 5963 } 5964 5965 return 0; 5966} 5967 5968static void 5969ath_getradiocaps(struct ieee80211com *ic, 5970 int maxchans, int *nchans, struct ieee80211_channel chans[]) 5971{ 5972 struct ath_softc *sc = ic->ic_ifp->if_softc; 5973 struct ath_hal *ah = sc->sc_ah; 5974 5975 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 5976 __func__, SKU_DEBUG, CTRY_DEFAULT); 5977 5978 /* XXX check return */ 5979 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 5980 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 5981 5982} 5983 5984static int 5985ath_getchannels(struct ath_softc *sc) 5986{ 5987 struct ifnet *ifp = sc->sc_ifp; 5988 struct ieee80211com *ic = ifp->if_l2com; 5989 struct ath_hal *ah = sc->sc_ah; 5990 HAL_STATUS status; 5991 5992 /* 5993 * Collect channel set based on EEPROM contents. 5994 */ 5995 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 5996 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 5997 if (status != HAL_OK) { 5998 if_printf(ifp, "%s: unable to collect channel list from hal, " 5999 "status %d\n", __func__, status); 6000 return EINVAL; 6001 } 6002 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6003 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 6004 /* XXX map Atheros sku's to net80211 SKU's */ 6005 /* XXX net80211 types too small */ 6006 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 6007 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 6008 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 6009 ic->ic_regdomain.isocc[1] = ' '; 6010 6011 ic->ic_regdomain.ecm = 1; 6012 ic->ic_regdomain.location = 'I'; 6013 6014 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6015 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6016 __func__, sc->sc_eerd, sc->sc_eecc, 6017 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 6018 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 6019 return 0; 6020} 6021 6022static int 6023ath_rate_setup(struct ath_softc *sc, u_int mode) 6024{ 6025 struct ath_hal *ah = sc->sc_ah; 6026 const HAL_RATE_TABLE *rt; 6027 6028 switch (mode) { 6029 case IEEE80211_MODE_11A: 6030 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 6031 break; 6032 case IEEE80211_MODE_HALF: 6033 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6034 break; 6035 case IEEE80211_MODE_QUARTER: 6036 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6037 break; 6038 case IEEE80211_MODE_11B: 6039 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 6040 break; 6041 case IEEE80211_MODE_11G: 6042 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 6043 break; 6044 case IEEE80211_MODE_TURBO_A: 6045 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 6046 break; 6047 case IEEE80211_MODE_TURBO_G: 6048 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6049 break; 6050 case IEEE80211_MODE_STURBO_A: 6051 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6052 break; 6053 case IEEE80211_MODE_11NA: 6054 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6055 break; 6056 case IEEE80211_MODE_11NG: 6057 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6058 break; 6059 default: 6060 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6061 __func__, mode); 6062 return 0; 6063 } 6064 sc->sc_rates[mode] = rt; 6065 return (rt != NULL); 6066} 6067 6068static void 6069ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6070{ 6071#define N(a) (sizeof(a)/sizeof(a[0])) 6072 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6073 static const struct { 6074 u_int rate; /* tx/rx 802.11 rate */ 6075 u_int16_t timeOn; /* LED on time (ms) */ 6076 u_int16_t timeOff; /* LED off time (ms) */ 6077 } blinkrates[] = { 6078 { 108, 40, 10 }, 6079 { 96, 44, 11 }, 6080 { 72, 50, 13 }, 6081 { 48, 57, 14 }, 6082 { 36, 67, 16 }, 6083 { 24, 80, 20 }, 6084 { 22, 100, 25 }, 6085 { 18, 133, 34 }, 6086 { 12, 160, 40 }, 6087 { 10, 200, 50 }, 6088 { 6, 240, 58 }, 6089 { 4, 267, 66 }, 6090 { 2, 400, 100 }, 6091 { 0, 500, 130 }, 6092 /* XXX half/quarter rates */ 6093 }; 6094 const HAL_RATE_TABLE *rt; 6095 int i, j; 6096 6097 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6098 rt = sc->sc_rates[mode]; 6099 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6100 for (i = 0; i < rt->rateCount; i++) { 6101 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6102 if (rt->info[i].phy != IEEE80211_T_HT) 6103 sc->sc_rixmap[ieeerate] = i; 6104 else 6105 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6106 } 6107 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6108 for (i = 0; i < N(sc->sc_hwmap); i++) { 6109 if (i >= rt->rateCount) { 6110 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6111 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6112 continue; 6113 } 6114 sc->sc_hwmap[i].ieeerate = 6115 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6116 if (rt->info[i].phy == IEEE80211_T_HT) 6117 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6118 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6119 if (rt->info[i].shortPreamble || 6120 rt->info[i].phy == IEEE80211_T_OFDM) 6121 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6122 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6123 for (j = 0; j < N(blinkrates)-1; j++) 6124 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6125 break; 6126 /* NB: this uses the last entry if the rate isn't found */ 6127 /* XXX beware of overlow */ 6128 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6129 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6130 } 6131 sc->sc_currates = rt; 6132 sc->sc_curmode = mode; 6133 /* 6134 * All protection frames are transmited at 2Mb/s for 6135 * 11g, otherwise at 1Mb/s. 6136 */ 6137 if (mode == IEEE80211_MODE_11G) 6138 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6139 else 6140 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6141 /* NB: caller is responsible for resetting rate control state */ 6142#undef N 6143} 6144 6145static void 6146ath_watchdog(void *arg) 6147{ 6148 struct ath_softc *sc = arg; 6149 int do_reset = 0; 6150 6151 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6152 struct ifnet *ifp = sc->sc_ifp; 6153 uint32_t hangs; 6154 6155 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6156 hangs != 0) { 6157 if_printf(ifp, "%s hang detected (0x%x)\n", 6158 hangs & 0xff ? "bb" : "mac", hangs); 6159 } else 6160 if_printf(ifp, "device timeout\n"); 6161 do_reset = 1; 6162 ifp->if_oerrors++; 6163 sc->sc_stats.ast_watchdog++; 6164 } 6165 6166 /* 6167 * We can't hold the lock across the ath_reset() call. 6168 */ 6169 if (do_reset) { 6170 ATH_UNLOCK(sc); 6171 ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS); 6172 ATH_LOCK(sc); 6173 } 6174 6175 callout_schedule(&sc->sc_wd_ch, hz); 6176} 6177 6178#ifdef ATH_DIAGAPI 6179/* 6180 * Diagnostic interface to the HAL. This is used by various 6181 * tools to do things like retrieve register contents for 6182 * debugging. The mechanism is intentionally opaque so that 6183 * it can change frequently w/o concern for compatiblity. 6184 */ 6185static int 6186ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6187{ 6188 struct ath_hal *ah = sc->sc_ah; 6189 u_int id = ad->ad_id & ATH_DIAG_ID; 6190 void *indata = NULL; 6191 void *outdata = NULL; 6192 u_int32_t insize = ad->ad_in_size; 6193 u_int32_t outsize = ad->ad_out_size; 6194 int error = 0; 6195 6196 if (ad->ad_id & ATH_DIAG_IN) { 6197 /* 6198 * Copy in data. 6199 */ 6200 indata = malloc(insize, M_TEMP, M_NOWAIT); 6201 if (indata == NULL) { 6202 error = ENOMEM; 6203 goto bad; 6204 } 6205 error = copyin(ad->ad_in_data, indata, insize); 6206 if (error) 6207 goto bad; 6208 } 6209 if (ad->ad_id & ATH_DIAG_DYN) { 6210 /* 6211 * Allocate a buffer for the results (otherwise the HAL 6212 * returns a pointer to a buffer where we can read the 6213 * results). Note that we depend on the HAL leaving this 6214 * pointer for us to use below in reclaiming the buffer; 6215 * may want to be more defensive. 6216 */ 6217 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 6218 if (outdata == NULL) { 6219 error = ENOMEM; 6220 goto bad; 6221 } 6222 } 6223 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6224 if (outsize < ad->ad_out_size) 6225 ad->ad_out_size = outsize; 6226 if (outdata != NULL) 6227 error = copyout(outdata, ad->ad_out_data, 6228 ad->ad_out_size); 6229 } else { 6230 error = EINVAL; 6231 } 6232bad: 6233 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6234 free(indata, M_TEMP); 6235 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6236 free(outdata, M_TEMP); 6237 return error; 6238} 6239#endif /* ATH_DIAGAPI */ 6240 6241static int 6242ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 6243{ 6244#define IS_RUNNING(ifp) \ 6245 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 6246 struct ath_softc *sc = ifp->if_softc; 6247 struct ieee80211com *ic = ifp->if_l2com; 6248 struct ifreq *ifr = (struct ifreq *)data; 6249 const HAL_RATE_TABLE *rt; 6250 int error = 0; 6251 6252 switch (cmd) { 6253 case SIOCSIFFLAGS: 6254 ATH_LOCK(sc); 6255 if (IS_RUNNING(ifp)) { 6256 /* 6257 * To avoid rescanning another access point, 6258 * do not call ath_init() here. Instead, 6259 * only reflect promisc mode settings. 6260 */ 6261 ath_mode_init(sc); 6262 } else if (ifp->if_flags & IFF_UP) { 6263 /* 6264 * Beware of being called during attach/detach 6265 * to reset promiscuous mode. In that case we 6266 * will still be marked UP but not RUNNING. 6267 * However trying to re-init the interface 6268 * is the wrong thing to do as we've already 6269 * torn down much of our state. There's 6270 * probably a better way to deal with this. 6271 */ 6272 if (!sc->sc_invalid) 6273 ath_init(sc); /* XXX lose error */ 6274 } else { 6275 ath_stop_locked(ifp); 6276#ifdef notyet 6277 /* XXX must wakeup in places like ath_vap_delete */ 6278 if (!sc->sc_invalid) 6279 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 6280#endif 6281 } 6282 ATH_UNLOCK(sc); 6283 break; 6284 case SIOCGIFMEDIA: 6285 case SIOCSIFMEDIA: 6286 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6287 break; 6288 case SIOCGATHSTATS: 6289 /* NB: embed these numbers to get a consistent view */ 6290 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6291 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 6292 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 6293 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6294#ifdef IEEE80211_SUPPORT_TDMA 6295 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 6296 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 6297#endif 6298 rt = sc->sc_currates; 6299 sc->sc_stats.ast_tx_rate = 6300 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 6301 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 6302 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6303 return copyout(&sc->sc_stats, 6304 ifr->ifr_data, sizeof (sc->sc_stats)); 6305 case SIOCZATHSTATS: 6306 error = priv_check(curthread, PRIV_DRIVER); 6307 if (error == 0) 6308 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 6309 break; 6310#ifdef ATH_DIAGAPI 6311 case SIOCGATHDIAG: 6312 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6313 break; 6314 case SIOCGATHPHYERR: 6315 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6316 break; 6317#endif 6318 case SIOCGIFADDR: 6319 error = ether_ioctl(ifp, cmd, data); 6320 break; 6321 default: 6322 error = EINVAL; 6323 break; 6324 } 6325 return error; 6326#undef IS_RUNNING 6327} 6328 6329/* 6330 * Announce various information on device/driver attach. 6331 */ 6332static void 6333ath_announce(struct ath_softc *sc) 6334{ 6335 struct ifnet *ifp = sc->sc_ifp; 6336 struct ath_hal *ah = sc->sc_ah; 6337 6338 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6339 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6340 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6341 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6342 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6343 if (bootverbose) { 6344 int i; 6345 for (i = 0; i <= WME_AC_VO; i++) { 6346 struct ath_txq *txq = sc->sc_ac2q[i]; 6347 if_printf(ifp, "Use hw queue %u for %s traffic\n", 6348 txq->axq_qnum, ieee80211_wme_acnames[i]); 6349 } 6350 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6351 sc->sc_cabq->axq_qnum); 6352 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6353 } 6354 if (ath_rxbuf != ATH_RXBUF) 6355 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6356 if (ath_txbuf != ATH_TXBUF) 6357 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 6358 if (sc->sc_mcastkey && bootverbose) 6359 if_printf(ifp, "using multicast key search\n"); 6360} 6361 6362#ifdef IEEE80211_SUPPORT_TDMA 6363static void 6364ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval) 6365{ 6366 struct ath_hal *ah = sc->sc_ah; 6367 HAL_BEACON_TIMERS bt; 6368 6369 bt.bt_intval = bintval | HAL_BEACON_ENA; 6370 bt.bt_nexttbtt = nexttbtt; 6371 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep; 6372 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep; 6373 bt.bt_nextatim = nexttbtt+1; 6374 /* Enables TBTT, DBA, SWBA timers by default */ 6375 bt.bt_flags = 0; 6376 ath_hal_beaconsettimers(ah, &bt); 6377} 6378 6379/* 6380 * Calculate the beacon interval. This is periodic in the 6381 * superframe for the bss. We assume each station is configured 6382 * identically wrt transmit rate so the guard time we calculate 6383 * above will be the same on all stations. Note we need to 6384 * factor in the xmit time because the hardware will schedule 6385 * a frame for transmit if the start of the frame is within 6386 * the burst time. When we get hardware that properly kills 6387 * frames in the PCU we can reduce/eliminate the guard time. 6388 * 6389 * Roundup to 1024 is so we have 1 TU buffer in the guard time 6390 * to deal with the granularity of the nexttbtt timer. 11n MAC's 6391 * with 1us timer granularity should allow us to reduce/eliminate 6392 * this. 6393 */ 6394static void 6395ath_tdma_bintvalsetup(struct ath_softc *sc, 6396 const struct ieee80211_tdma_state *tdma) 6397{ 6398 /* copy from vap state (XXX check all vaps have same value?) */ 6399 sc->sc_tdmaslotlen = tdma->tdma_slotlen; 6400 6401 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) * 6402 tdma->tdma_slotcnt, 1024); 6403 sc->sc_tdmabintval >>= 10; /* TSF -> TU */ 6404 if (sc->sc_tdmabintval & 1) 6405 sc->sc_tdmabintval++; 6406 6407 if (tdma->tdma_slot == 0) { 6408 /* 6409 * Only slot 0 beacons; other slots respond. 6410 */ 6411 sc->sc_imask |= HAL_INT_SWBA; 6412 sc->sc_tdmaswba = 0; /* beacon immediately */ 6413 } else { 6414 /* XXX all vaps must be slot 0 or slot !0 */ 6415 sc->sc_imask &= ~HAL_INT_SWBA; 6416 } 6417} 6418 6419/* 6420 * Max 802.11 overhead. This assumes no 4-address frames and 6421 * the encapsulation done by ieee80211_encap (llc). We also 6422 * include potential crypto overhead. 6423 */ 6424#define IEEE80211_MAXOVERHEAD \ 6425 (sizeof(struct ieee80211_qosframe) \ 6426 + sizeof(struct llc) \ 6427 + IEEE80211_ADDR_LEN \ 6428 + IEEE80211_WEP_IVLEN \ 6429 + IEEE80211_WEP_KIDLEN \ 6430 + IEEE80211_WEP_CRCLEN \ 6431 + IEEE80211_WEP_MICLEN \ 6432 + IEEE80211_CRC_LEN) 6433 6434/* 6435 * Setup initially for tdma operation. Start the beacon 6436 * timers and enable SWBA if we are slot 0. Otherwise 6437 * we wait for slot 0 to arrive so we can sync up before 6438 * starting to transmit. 6439 */ 6440static void 6441ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap) 6442{ 6443 struct ath_hal *ah = sc->sc_ah; 6444 struct ifnet *ifp = sc->sc_ifp; 6445 struct ieee80211com *ic = ifp->if_l2com; 6446 const struct ieee80211_txparam *tp; 6447 const struct ieee80211_tdma_state *tdma = NULL; 6448 int rix; 6449 6450 if (vap == NULL) { 6451 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 6452 if (vap == NULL) { 6453 if_printf(ifp, "%s: no vaps?\n", __func__); 6454 return; 6455 } 6456 } 6457 /* XXX should take a locked ref to iv_bss */ 6458 tp = vap->iv_bss->ni_txparms; 6459 /* 6460 * Calculate the guard time for each slot. This is the 6461 * time to send a maximal-size frame according to the 6462 * fixed/lowest transmit rate. Note that the interface 6463 * mtu does not include the 802.11 overhead so we must 6464 * tack that on (ath_hal_computetxtime includes the 6465 * preamble and plcp in it's calculation). 6466 */ 6467 tdma = vap->iv_tdma; 6468 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 6469 rix = ath_tx_findrix(sc, tp->ucastrate); 6470 else 6471 rix = ath_tx_findrix(sc, tp->mcastrate); 6472 /* XXX short preamble assumed */ 6473 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates, 6474 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE); 6475 6476 ath_hal_intrset(ah, 0); 6477 6478 ath_beaconq_config(sc); /* setup h/w beacon q */ 6479 if (sc->sc_setcca) 6480 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */ 6481 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */ 6482 ath_tdma_settimers(sc, sc->sc_tdmabintval, 6483 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF); 6484 sc->sc_syncbeacon = 0; 6485 6486 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER; 6487 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER; 6488 6489 ath_hal_intrset(ah, sc->sc_imask); 6490 6491 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u " 6492 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__, 6493 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt, 6494 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval, 6495 sc->sc_tdmadbaprep); 6496} 6497 6498/* 6499 * Update tdma operation. Called from the 802.11 layer 6500 * when a beacon is received from the TDMA station operating 6501 * in the slot immediately preceding us in the bss. Use 6502 * the rx timestamp for the beacon frame to update our 6503 * beacon timers so we follow their schedule. Note that 6504 * by using the rx timestamp we implicitly include the 6505 * propagation delay in our schedule. 6506 */ 6507static void 6508ath_tdma_update(struct ieee80211_node *ni, 6509 const struct ieee80211_tdma_param *tdma, int changed) 6510{ 6511#define TSF_TO_TU(_h,_l) \ 6512 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 6513#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) 6514 struct ieee80211vap *vap = ni->ni_vap; 6515 struct ieee80211com *ic = ni->ni_ic; 6516 struct ath_softc *sc = ic->ic_ifp->if_softc; 6517 struct ath_hal *ah = sc->sc_ah; 6518 const HAL_RATE_TABLE *rt = sc->sc_currates; 6519 u_int64_t tsf, rstamp, nextslot, nexttbtt; 6520 u_int32_t txtime, nextslottu; 6521 int32_t tudelta, tsfdelta; 6522 const struct ath_rx_status *rs; 6523 int rix; 6524 6525 sc->sc_stats.ast_tdma_update++; 6526 6527 /* 6528 * Check for and adopt configuration changes. 6529 */ 6530 if (changed != 0) { 6531 const struct ieee80211_tdma_state *ts = vap->iv_tdma; 6532 6533 ath_tdma_bintvalsetup(sc, ts); 6534 if (changed & TDMA_UPDATE_SLOTLEN) 6535 ath_wme_update(ic); 6536 6537 DPRINTF(sc, ATH_DEBUG_TDMA, 6538 "%s: adopt slot %u slotcnt %u slotlen %u us " 6539 "bintval %u TU\n", __func__, 6540 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen, 6541 sc->sc_tdmabintval); 6542 6543 /* XXX right? */ 6544 ath_hal_intrset(ah, sc->sc_imask); 6545 /* NB: beacon timers programmed below */ 6546 } 6547 6548 /* extend rx timestamp to 64 bits */ 6549 rs = sc->sc_lastrs; 6550 tsf = ath_hal_gettsf64(ah); 6551 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 6552 /* 6553 * The rx timestamp is set by the hardware on completing 6554 * reception (at the point where the rx descriptor is DMA'd 6555 * to the host). To find the start of our next slot we 6556 * must adjust this time by the time required to send 6557 * the packet just received. 6558 */ 6559 rix = rt->rateCodeToIndex[rs->rs_rate]; 6560 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix, 6561 rt->info[rix].shortPreamble); 6562 /* NB: << 9 is to cvt to TU and /2 */ 6563 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9); 6564 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD; 6565 6566 /* 6567 * Retrieve the hardware NextTBTT in usecs 6568 * and calculate the difference between what the 6569 * other station thinks and what we have programmed. This 6570 * lets us figure how to adjust our timers to match. The 6571 * adjustments are done by pulling the TSF forward and possibly 6572 * rewriting the beacon timers. 6573 */ 6574 nexttbtt = ath_hal_getnexttbtt(ah); 6575 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt); 6576 6577 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6578 "tsfdelta %d avg +%d/-%d\n", tsfdelta, 6579 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam)); 6580 6581 if (tsfdelta < 0) { 6582 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 6583 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta); 6584 tsfdelta = -tsfdelta % 1024; 6585 nextslottu++; 6586 } else if (tsfdelta > 0) { 6587 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta); 6588 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 6589 tsfdelta = 1024 - (tsfdelta % 1024); 6590 nextslottu++; 6591 } else { 6592 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 6593 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 6594 } 6595 tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt); 6596 6597 /* 6598 * Copy sender's timetstamp into tdma ie so they can 6599 * calculate roundtrip time. We submit a beacon frame 6600 * below after any timer adjustment. The frame goes out 6601 * at the next TBTT so the sender can calculate the 6602 * roundtrip by inspecting the tdma ie in our beacon frame. 6603 * 6604 * NB: This tstamp is subtlely preserved when 6605 * IEEE80211_BEACON_TDMA is marked (e.g. when the 6606 * slot position changes) because ieee80211_add_tdma 6607 * skips over the data. 6608 */ 6609 memcpy(ATH_VAP(vap)->av_boff.bo_tdma + 6610 __offsetof(struct ieee80211_tdma_param, tdma_tstamp), 6611 &ni->ni_tstamp.data, 8); 6612#if 0 6613 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6614 "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n", 6615 (unsigned long long) tsf, (unsigned long long) nextslot, 6616 (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta); 6617#endif 6618 /* 6619 * Adjust the beacon timers only when pulling them forward 6620 * or when going back by less than the beacon interval. 6621 * Negative jumps larger than the beacon interval seem to 6622 * cause the timers to stop and generally cause instability. 6623 * This basically filters out jumps due to missed beacons. 6624 */ 6625 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) { 6626 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval); 6627 sc->sc_stats.ast_tdma_timers++; 6628 } 6629 if (tsfdelta > 0) { 6630 ath_hal_adjusttsf(ah, tsfdelta); 6631 sc->sc_stats.ast_tdma_tsf++; 6632 } 6633 ath_tdma_beacon_send(sc, vap); /* prepare response */ 6634#undef TU_TO_TSF 6635#undef TSF_TO_TU 6636} 6637 6638/* 6639 * Transmit a beacon frame at SWBA. Dynamic updates 6640 * to the frame contents are done as needed. 6641 */ 6642static void 6643ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) 6644{ 6645 struct ath_hal *ah = sc->sc_ah; 6646 struct ath_buf *bf; 6647 int otherant; 6648 6649 /* 6650 * Check if the previous beacon has gone out. If 6651 * not don't try to post another, skip this period 6652 * and wait for the next. Missed beacons indicate 6653 * a problem and should not occur. If we miss too 6654 * many consecutive beacons reset the device. 6655 */ 6656 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 6657 sc->sc_bmisscount++; 6658 DPRINTF(sc, ATH_DEBUG_BEACON, 6659 "%s: missed %u consecutive beacons\n", 6660 __func__, sc->sc_bmisscount); 6661 if (sc->sc_bmisscount >= ath_bstuck_threshold) 6662 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 6663 return; 6664 } 6665 if (sc->sc_bmisscount != 0) { 6666 DPRINTF(sc, ATH_DEBUG_BEACON, 6667 "%s: resume beacon xmit after %u misses\n", 6668 __func__, sc->sc_bmisscount); 6669 sc->sc_bmisscount = 0; 6670 } 6671 6672 /* 6673 * Check recent per-antenna transmit statistics and flip 6674 * the default antenna if noticeably more frames went out 6675 * on the non-default antenna. 6676 * XXX assumes 2 anntenae 6677 */ 6678 if (!sc->sc_diversity) { 6679 otherant = sc->sc_defant & 1 ? 2 : 1; 6680 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 6681 ath_setdefantenna(sc, otherant); 6682 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 6683 } 6684 6685 bf = ath_beacon_generate(sc, vap); 6686 if (bf != NULL) { 6687 /* 6688 * Stop any current dma and put the new frame on the queue. 6689 * This should never fail since we check above that no frames 6690 * are still pending on the queue. 6691 */ 6692 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 6693 DPRINTF(sc, ATH_DEBUG_ANY, 6694 "%s: beacon queue %u did not stop?\n", 6695 __func__, sc->sc_bhalq); 6696 /* NB: the HAL still stops DMA, so proceed */ 6697 } 6698 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 6699 ath_hal_txstart(ah, sc->sc_bhalq); 6700 6701 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */ 6702 6703 /* 6704 * Record local TSF for our last send for use 6705 * in arbitrating slot collisions. 6706 */ 6707 /* XXX should take a locked ref to iv_bss */ 6708 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah); 6709 } 6710} 6711#endif /* IEEE80211_SUPPORT_TDMA */ 6712 6713static void 6714ath_dfs_tasklet(void *p, int npending) 6715{ 6716 struct ath_softc *sc = (struct ath_softc *) p; 6717 struct ifnet *ifp = sc->sc_ifp; 6718 struct ieee80211com *ic = ifp->if_l2com; 6719 6720 /* 6721 * If previous processing has found a radar event, 6722 * signal this to the net80211 layer to begin DFS 6723 * processing. 6724 */ 6725 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6726 /* DFS event found, initiate channel change */ 6727 /* 6728 * XXX doesn't currently tell us whether the event 6729 * XXX was found in the primary or extension 6730 * XXX channel! 6731 */ 6732 IEEE80211_LOCK(ic); 6733 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6734 IEEE80211_UNLOCK(ic); 6735 } 6736} 6737 6738MODULE_VERSION(if_ath, 1); 6739MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6740