if_ath.c revision 140428
1/*- 2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 3. Neither the names of the above-listed copyright holders nor the names 16 * of any contributors may be used to endorse or promote products derived 17 * from this software without specific prior written permission. 18 * 19 * Alternatively, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") version 2 as published by the Free 21 * Software Foundation. 22 * 23 * NO WARRANTY 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 27 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 28 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 29 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 32 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 34 * THE POSSIBILITY OF SUCH DAMAGES. 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 140428 2005-01-18 18:11:04Z sam $"); 39 40/* 41 * Driver for the Atheros Wireless LAN controller. 42 * 43 * This software is derived from work of Atsushi Onoe; his contribution 44 * is greatly appreciated. 45 */ 46 47#include "opt_inet.h" 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/sysctl.h> 52#include <sys/mbuf.h> 53#include <sys/malloc.h> 54#include <sys/lock.h> 55#include <sys/mutex.h> 56#include <sys/kernel.h> 57#include <sys/socket.h> 58#include <sys/sockio.h> 59#include <sys/errno.h> 60#include <sys/callout.h> 61#include <sys/bus.h> 62#include <sys/endian.h> 63 64#include <machine/bus.h> 65 66#include <net/if.h> 67#include <net/if_dl.h> 68#include <net/if_media.h> 69#include <net/if_arp.h> 70#include <net/ethernet.h> 71#include <net/if_llc.h> 72 73#include <net80211/ieee80211_var.h> 74 75#include <net/bpf.h> 76 77#ifdef INET 78#include <netinet/in.h> 79#include <netinet/if_ether.h> 80#endif 81 82#define AR_DEBUG 83#include <dev/ath/if_athvar.h> 84#include <contrib/dev/ath/ah_desc.h> 85#include <contrib/dev/ath/ah_devid.h> /* XXX for softled */ 86 87/* unalligned little endian access */ 88#define LE_READ_2(p) \ 89 ((u_int16_t) \ 90 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) 91#define LE_READ_4(p) \ 92 ((u_int32_t) \ 93 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ 94 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) 95 96static void ath_init(void *); 97static void ath_stop_locked(struct ifnet *); 98static void ath_stop(struct ifnet *); 99static void ath_start(struct ifnet *); 100static int ath_reset(struct ifnet *); 101static int ath_media_change(struct ifnet *); 102static void ath_watchdog(struct ifnet *); 103static int ath_ioctl(struct ifnet *, u_long, caddr_t); 104static void ath_fatal_proc(void *, int); 105static void ath_rxorn_proc(void *, int); 106static void ath_bmiss_proc(void *, int); 107static void ath_initkeytable(struct ath_softc *); 108static int ath_key_alloc(struct ieee80211com *, 109 const struct ieee80211_key *); 110static int ath_key_delete(struct ieee80211com *, 111 const struct ieee80211_key *); 112static int ath_key_set(struct ieee80211com *, const struct ieee80211_key *, 113 const u_int8_t mac[IEEE80211_ADDR_LEN]); 114static void ath_key_update_begin(struct ieee80211com *); 115static void ath_key_update_end(struct ieee80211com *); 116static void ath_mode_init(struct ath_softc *); 117static void ath_setslottime(struct ath_softc *); 118static void ath_updateslot(struct ifnet *); 119static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 120static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 121static void ath_beacon_proc(void *, int); 122static void ath_bstuck_proc(void *, int); 123static void ath_beacon_free(struct ath_softc *); 124static void ath_beacon_config(struct ath_softc *); 125static void ath_descdma_cleanup(struct ath_softc *sc, 126 struct ath_descdma *, ath_bufhead *); 127static int ath_desc_alloc(struct ath_softc *); 128static void ath_desc_free(struct ath_softc *); 129static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *); 130static void ath_node_free(struct ieee80211_node *); 131static u_int8_t ath_node_getrssi(const struct ieee80211_node *); 132static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 133static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 134 struct ieee80211_node *ni, 135 int subtype, int rssi, u_int32_t rstamp); 136static void ath_setdefantenna(struct ath_softc *, u_int); 137static void ath_rx_proc(void *, int); 138static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 139static int ath_tx_setup(struct ath_softc *, int, int); 140static int ath_wme_update(struct ieee80211com *); 141static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 142static void ath_tx_cleanup(struct ath_softc *); 143static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 144 struct ath_buf *, struct mbuf *); 145static void ath_tx_proc_q0(void *, int); 146static void ath_tx_proc_q0123(void *, int); 147static void ath_tx_proc(void *, int); 148static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 149static void ath_draintxq(struct ath_softc *); 150static void ath_stoprecv(struct ath_softc *); 151static int ath_startrecv(struct ath_softc *); 152static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 153static void ath_next_scan(void *); 154static void ath_calibrate(void *); 155static int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 156static void ath_newassoc(struct ieee80211com *, 157 struct ieee80211_node *, int); 158static int ath_getchannels(struct ath_softc *, u_int cc, 159 HAL_BOOL outdoor, HAL_BOOL xchanmode); 160static void ath_update_led(struct ath_softc *); 161static void ath_update_txpow(struct ath_softc *); 162 163static int ath_rate_setup(struct ath_softc *, u_int mode); 164static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 165 166static void ath_sysctlattach(struct ath_softc *); 167static void ath_bpfattach(struct ath_softc *); 168static void ath_announce(struct ath_softc *); 169 170SYSCTL_DECL(_hw_ath); 171 172/* XXX validate sysctl values */ 173static int ath_dwelltime = 200; /* 5 channels/second */ 174SYSCTL_INT(_hw_ath, OID_AUTO, dwell, CTLFLAG_RW, &ath_dwelltime, 175 0, "channel dwell time (ms) for AP/station scanning"); 176static int ath_calinterval = 30; /* calibrate every 30 secs */ 177SYSCTL_INT(_hw_ath, OID_AUTO, calibrate, CTLFLAG_RW, &ath_calinterval, 178 0, "chip calibration interval (secs)"); 179static int ath_outdoor = AH_TRUE; /* outdoor operation */ 180SYSCTL_INT(_hw_ath, OID_AUTO, outdoor, CTLFLAG_RD, &ath_outdoor, 181 0, "outdoor operation"); 182TUNABLE_INT("hw.ath.outdoor", &ath_outdoor); 183static int ath_xchanmode = AH_TRUE; /* extended channel use */ 184SYSCTL_INT(_hw_ath, OID_AUTO, xchanmode, CTLFLAG_RD, &ath_xchanmode, 185 0, "extended channel mode"); 186TUNABLE_INT("hw.ath.xchanmode", &ath_xchanmode); 187static int ath_countrycode = CTRY_DEFAULT; /* country code */ 188SYSCTL_INT(_hw_ath, OID_AUTO, countrycode, CTLFLAG_RD, &ath_countrycode, 189 0, "country code"); 190TUNABLE_INT("hw.ath.countrycode", &ath_countrycode); 191static int ath_regdomain = 0; /* regulatory domain */ 192SYSCTL_INT(_hw_ath, OID_AUTO, regdomain, CTLFLAG_RD, &ath_regdomain, 193 0, "regulatory domain"); 194 195#ifdef AR_DEBUG 196static int ath_debug = 0; 197SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug, 198 0, "control debugging printfs"); 199TUNABLE_INT("hw.ath.debug", &ath_debug); 200enum { 201 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 202 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 203 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ 204 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 205 ATH_DEBUG_RATE = 0x00000010, /* rate control */ 206 ATH_DEBUG_RESET = 0x00000020, /* reset processing */ 207 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ 208 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ 209 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ 210 ATH_DEBUG_INTR = 0x00001000, /* ISR */ 211 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ 212 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ 213 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ 214 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ 215 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ 216 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ 217 ATH_DEBUG_NODE = 0x00080000, /* node management */ 218 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ 219 ATH_DEBUG_ANY = 0xffffffff 220}; 221#define IFF_DUMPPKTS(sc, m) \ 222 ((sc->sc_debug & (m)) || \ 223 (sc->sc_if.if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 224#define DPRINTF(sc, m, fmt, ...) do { \ 225 if (sc->sc_debug & (m)) \ 226 printf(fmt, __VA_ARGS__); \ 227} while (0) 228#define KEYPRINTF(sc, ix, hk, mac) do { \ 229 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ 230 ath_keyprint(__func__, ix, hk, mac); \ 231} while (0) 232static void ath_printrxbuf(struct ath_buf *bf, int); 233static void ath_printtxbuf(struct ath_buf *bf, int); 234#else 235#define IFF_DUMPPKTS(sc, m) \ 236 ((sc->sc_if.if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 237#define DPRINTF(m, fmt, ...) 238#define KEYPRINTF(sc, k, ix, mac) 239#endif 240 241MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 242 243int 244ath_attach(u_int16_t devid, struct ath_softc *sc) 245{ 246 struct ifnet *ifp = &sc->sc_if; 247 struct ieee80211com *ic = &sc->sc_ic; 248 struct ath_hal *ah; 249 HAL_STATUS status; 250 int error = 0, i; 251 252 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 253 254 /* set these up early for if_printf use */ 255 if_initname(ifp, device_get_name(sc->sc_dev), 256 device_get_unit(sc->sc_dev)); 257 258 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); 259 if (ah == NULL) { 260 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 261 status); 262 error = ENXIO; 263 goto bad; 264 } 265 if (ah->ah_abi != HAL_ABI_VERSION) { 266 if_printf(ifp, "HAL ABI mismatch detected " 267 "(HAL:0x%x != driver:0x%x)\n", 268 ah->ah_abi, HAL_ABI_VERSION); 269 error = ENXIO; 270 goto bad; 271 } 272 sc->sc_ah = ah; 273 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 274 275 /* 276 * Check if the MAC has multi-rate retry support. 277 * We do this by trying to setup a fake extended 278 * descriptor. MAC's that don't have support will 279 * return false w/o doing anything. MAC's that do 280 * support it will return true w/o doing anything. 281 */ 282 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 283 284 /* 285 * Check if the device has hardware counters for PHY 286 * errors. If so we need to enable the MIB interrupt 287 * so we can act on stat triggers. 288 */ 289 if (ath_hal_hwphycounters(ah)) 290 sc->sc_needmib = 1; 291 292 /* 293 * Get the hardware key cache size. 294 */ 295 sc->sc_keymax = ath_hal_keycachesize(ah); 296 if (sc->sc_keymax > sizeof(sc->sc_keymap) * NBBY) { 297 if_printf(ifp, 298 "Warning, using only %zu of %u key cache slots\n", 299 sizeof(sc->sc_keymap) * NBBY, sc->sc_keymax); 300 sc->sc_keymax = sizeof(sc->sc_keymap) * NBBY; 301 } 302 /* 303 * Reset the key cache since some parts do not 304 * reset the contents on initial power up. 305 */ 306 for (i = 0; i < sc->sc_keymax; i++) 307 ath_hal_keyreset(ah, i); 308 /* 309 * Mark key cache slots associated with global keys 310 * as in use. If we knew TKIP was not to be used we 311 * could leave the +32, +64, and +32+64 slots free. 312 * XXX only for splitmic. 313 */ 314 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 315 setbit(sc->sc_keymap, i); 316 setbit(sc->sc_keymap, i+32); 317 setbit(sc->sc_keymap, i+64); 318 setbit(sc->sc_keymap, i+32+64); 319 } 320 321 /* 322 * Collect the channel list using the default country 323 * code and including outdoor channels. The 802.11 layer 324 * is resposible for filtering this list based on settings 325 * like the phy mode. 326 */ 327 error = ath_getchannels(sc, ath_countrycode, 328 ath_outdoor, ath_xchanmode); 329 if (error != 0) 330 goto bad; 331 /* 332 * Setup dynamic sysctl's now that country code and 333 * regdomain are available from the hal. 334 */ 335 ath_sysctlattach(sc); 336 337 /* 338 * Setup rate tables for all potential media types. 339 */ 340 ath_rate_setup(sc, IEEE80211_MODE_11A); 341 ath_rate_setup(sc, IEEE80211_MODE_11B); 342 ath_rate_setup(sc, IEEE80211_MODE_11G); 343 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 344 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 345 /* NB: setup here so ath_rate_update is happy */ 346 ath_setcurmode(sc, IEEE80211_MODE_11A); 347 348 /* 349 * Allocate tx+rx descriptors and populate the lists. 350 */ 351 error = ath_desc_alloc(sc); 352 if (error != 0) { 353 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 354 goto bad; 355 } 356 callout_init(&sc->sc_scan_ch, debug_mpsafenet ? CALLOUT_MPSAFE : 0); 357 callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE); 358 359 ATH_TXBUF_LOCK_INIT(sc); 360 361 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); 362 TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); 363 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 364 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 365 TASK_INIT(&sc->sc_bstucktask, 0, ath_bstuck_proc, sc); 366 367 /* 368 * Allocate hardware transmit queues: one queue for 369 * beacon frames and one data queue for each QoS 370 * priority. Note that the hal handles reseting 371 * these queues at the needed time. 372 * 373 * XXX PS-Poll 374 */ 375 sc->sc_bhalq = ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, NULL); 376 if (sc->sc_bhalq == (u_int) -1) { 377 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 378 error = EIO; 379 goto bad2; 380 } 381 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 382 if (sc->sc_cabq == NULL) { 383 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 384 error = EIO; 385 goto bad2; 386 } 387 /* NB: insure BK queue is the lowest priority h/w queue */ 388 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 389 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 390 ieee80211_wme_acnames[WME_AC_BK]); 391 error = EIO; 392 goto bad2; 393 } 394 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 395 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 396 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 397 /* 398 * Not enough hardware tx queues to properly do WME; 399 * just punt and assign them all to the same h/w queue. 400 * We could do a better job of this if, for example, 401 * we allocate queues when we switch from station to 402 * AP mode. 403 */ 404 if (sc->sc_ac2q[WME_AC_VI] != NULL) 405 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 406 if (sc->sc_ac2q[WME_AC_BE] != NULL) 407 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 408 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 409 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 410 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 411 } 412 413 /* 414 * Special case certain configurations. Note the 415 * CAB queue is handled by these specially so don't 416 * include them when checking the txq setup mask. 417 */ 418 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 419 case 0x01: 420 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 421 break; 422 case 0x0f: 423 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 424 break; 425 default: 426 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 427 break; 428 } 429 430 /* 431 * Setup rate control. Some rate control modules 432 * call back to change the anntena state so expose 433 * the necessary entry points. 434 * XXX maybe belongs in struct ath_ratectrl? 435 */ 436 sc->sc_setdefantenna = ath_setdefantenna; 437 sc->sc_rc = ath_rate_attach(sc); 438 if (sc->sc_rc == NULL) { 439 error = EIO; 440 goto bad2; 441 } 442 443 sc->sc_ledstate = 1; 444 /* 445 * Auto-enable soft led processing for IBM cards and for 446 * 5211 minipci cards. Users can also manually enable/disable 447 * support with a sysctl. 448 */ 449 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 450 if (sc->sc_softled) { 451 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 452 ath_hal_gpioset(ah, sc->sc_ledpin, 0); 453 } 454 455 ifp->if_softc = sc; 456 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 457 ifp->if_start = ath_start; 458 ifp->if_watchdog = ath_watchdog; 459 ifp->if_ioctl = ath_ioctl; 460 ifp->if_init = ath_init; 461 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 462 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 463 IFQ_SET_READY(&ifp->if_snd); 464 465 ic->ic_ifp = ifp; 466 ic->ic_reset = ath_reset; 467 ic->ic_newassoc = ath_newassoc; 468 ic->ic_updateslot = ath_updateslot; 469 ic->ic_wme.wme_update = ath_wme_update; 470 /* XXX not right but it's not used anywhere important */ 471 ic->ic_phytype = IEEE80211_T_OFDM; 472 ic->ic_opmode = IEEE80211_M_STA; 473 ic->ic_caps = 474 IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 475 | IEEE80211_C_HOSTAP /* hostap mode */ 476 | IEEE80211_C_MONITOR /* monitor mode */ 477 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 478 | IEEE80211_C_SHSLOT /* short slot time supported */ 479 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 480 ; 481 /* 482 * Query the hal to figure out h/w crypto support. 483 */ 484 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 485 ic->ic_caps |= IEEE80211_C_WEP; 486 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 487 ic->ic_caps |= IEEE80211_C_AES; 488 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 489 ic->ic_caps |= IEEE80211_C_AES_CCM; 490 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 491 ic->ic_caps |= IEEE80211_C_CKIP; 492 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 493 ic->ic_caps |= IEEE80211_C_TKIP; 494 /* 495 * Check if h/w does the MIC and/or whether the 496 * separate key cache entries are required to 497 * handle both tx+rx MIC keys. 498 */ 499 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 500 ic->ic_caps |= IEEE80211_C_TKIPMIC; 501 if (ath_hal_tkipsplit(ah)) 502 sc->sc_splitmic = 1; 503 } 504 /* 505 * TPC support can be done either with a global cap or 506 * per-packet support. The latter is not available on 507 * all parts. We're a bit pedantic here as all parts 508 * support a global cap. 509 */ 510 sc->sc_hastpc = ath_hal_hastpc(ah); 511 if (sc->sc_hastpc || ath_hal_hastxpowlimit(ah)) 512 ic->ic_caps |= IEEE80211_C_TXPMGT; 513 514 /* 515 * Mark WME capability only if we have sufficient 516 * hardware queues to do proper priority scheduling. 517 */ 518 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 519 ic->ic_caps |= IEEE80211_C_WME; 520 /* 521 * Check for frame bursting capability. 522 */ 523 if (ath_hal_hasbursting(ah)) 524 ic->ic_caps |= IEEE80211_C_BURST; 525 526 /* 527 * Indicate we need the 802.11 header padded to a 528 * 32-bit boundary for 4-address and QoS frames. 529 */ 530 ic->ic_flags |= IEEE80211_F_DATAPAD; 531 532 /* 533 * Query the hal about antenna support. 534 */ 535 if (ath_hal_hasdiversity(ah)) { 536 sc->sc_hasdiversity = 1; 537 sc->sc_diversity = ath_hal_getdiversity(ah); 538 } 539 sc->sc_defant = ath_hal_getdefantenna(ah); 540 541 /* 542 * Not all chips have the VEOL support we want to 543 * use with IBSS beacons; check here for it. 544 */ 545 sc->sc_hasveol = ath_hal_hasveol(ah); 546 547 /* get mac address from hardware */ 548 ath_hal_getmac(ah, ic->ic_myaddr); 549 550 /* call MI attach routine. */ 551 ieee80211_ifattach(ic); 552 /* override default methods */ 553 ic->ic_node_alloc = ath_node_alloc; 554 sc->sc_node_free = ic->ic_node_free; 555 ic->ic_node_free = ath_node_free; 556 ic->ic_node_getrssi = ath_node_getrssi; 557 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 558 ic->ic_recv_mgmt = ath_recv_mgmt; 559 sc->sc_newstate = ic->ic_newstate; 560 ic->ic_newstate = ath_newstate; 561 ic->ic_crypto.cs_key_alloc = ath_key_alloc; 562 ic->ic_crypto.cs_key_delete = ath_key_delete; 563 ic->ic_crypto.cs_key_set = ath_key_set; 564 ic->ic_crypto.cs_key_update_begin = ath_key_update_begin; 565 ic->ic_crypto.cs_key_update_end = ath_key_update_end; 566 /* complete initialization */ 567 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 568 569 ath_bpfattach(sc); 570 571 if (bootverbose) 572 ieee80211_announce(ic); 573 ath_announce(sc); 574 return 0; 575bad2: 576 ath_tx_cleanup(sc); 577 ath_desc_free(sc); 578bad: 579 if (ah) 580 ath_hal_detach(ah); 581 sc->sc_invalid = 1; 582 return error; 583} 584 585int 586ath_detach(struct ath_softc *sc) 587{ 588 struct ifnet *ifp = &sc->sc_if; 589 590 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 591 __func__, ifp->if_flags); 592 593 ath_stop(ifp); 594 bpfdetach(ifp); 595 /* 596 * NB: the order of these is important: 597 * o call the 802.11 layer before detaching the hal to 598 * insure callbacks into the driver to delete global 599 * key cache entries can be handled 600 * o reclaim the tx queue data structures after calling 601 * the 802.11 layer as we'll get called back to reclaim 602 * node state and potentially want to use them 603 * o to cleanup the tx queues the hal is called, so detach 604 * it last 605 * Other than that, it's straightforward... 606 */ 607 ieee80211_ifdetach(&sc->sc_ic); 608 ath_rate_detach(sc->sc_rc); 609 ath_desc_free(sc); 610 ath_tx_cleanup(sc); 611 ath_hal_detach(sc->sc_ah); 612 613 return 0; 614} 615 616void 617ath_suspend(struct ath_softc *sc) 618{ 619 struct ifnet *ifp = &sc->sc_if; 620 621 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 622 __func__, ifp->if_flags); 623 624 ath_stop(ifp); 625} 626 627void 628ath_resume(struct ath_softc *sc) 629{ 630 struct ifnet *ifp = &sc->sc_if; 631 632 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 633 __func__, ifp->if_flags); 634 635 if (ifp->if_flags & IFF_UP) { 636 ath_init(ifp); 637 if (ifp->if_flags & IFF_RUNNING) 638 ath_start(ifp); 639 } 640} 641 642void 643ath_shutdown(struct ath_softc *sc) 644{ 645 struct ifnet *ifp = &sc->sc_if; 646 647 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 648 __func__, ifp->if_flags); 649 650 ath_stop(ifp); 651} 652 653/* 654 * Interrupt handler. Most of the actual processing is deferred. 655 */ 656void 657ath_intr(void *arg) 658{ 659 struct ath_softc *sc = arg; 660 struct ifnet *ifp = &sc->sc_if; 661 struct ath_hal *ah = sc->sc_ah; 662 HAL_INT status; 663 664 if (sc->sc_invalid) { 665 /* 666 * The hardware is not ready/present, don't touch anything. 667 * Note this can happen early on if the IRQ is shared. 668 */ 669 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 670 return; 671 } 672 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ 673 return; 674 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 675 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 676 __func__, ifp->if_flags); 677 ath_hal_getisr(ah, &status); /* clear ISR */ 678 ath_hal_intrset(ah, 0); /* disable further intr's */ 679 return; 680 } 681 /* 682 * Figure out the reason(s) for the interrupt. Note 683 * that the hal returns a pseudo-ISR that may include 684 * bits we haven't explicitly enabled so we mask the 685 * value to insure we only process bits we requested. 686 */ 687 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 688 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 689 status &= sc->sc_imask; /* discard unasked for bits */ 690 if (status & HAL_INT_FATAL) { 691 /* 692 * Fatal errors are unrecoverable. Typically 693 * these are caused by DMA errors. Unfortunately 694 * the exact reason is not (presently) returned 695 * by the hal. 696 */ 697 sc->sc_stats.ast_hardware++; 698 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 699 taskqueue_enqueue(taskqueue_swi, &sc->sc_fataltask); 700 } else if (status & HAL_INT_RXORN) { 701 sc->sc_stats.ast_rxorn++; 702 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 703 taskqueue_enqueue(taskqueue_swi, &sc->sc_rxorntask); 704 } else { 705 if (status & HAL_INT_SWBA) { 706 /* 707 * Software beacon alert--time to send a beacon. 708 * Handle beacon transmission directly; deferring 709 * this is too slow to meet timing constraints 710 * under load. 711 */ 712 ath_beacon_proc(sc, 0); 713 } 714 if (status & HAL_INT_RXEOL) { 715 /* 716 * NB: the hardware should re-read the link when 717 * RXE bit is written, but it doesn't work at 718 * least on older hardware revs. 719 */ 720 sc->sc_stats.ast_rxeol++; 721 sc->sc_rxlink = NULL; 722 } 723 if (status & HAL_INT_TXURN) { 724 sc->sc_stats.ast_txurn++; 725 /* bump tx trigger level */ 726 ath_hal_updatetxtriglevel(ah, AH_TRUE); 727 } 728 if (status & HAL_INT_RX) 729 taskqueue_enqueue(taskqueue_swi, &sc->sc_rxtask); 730 if (status & HAL_INT_TX) 731 taskqueue_enqueue(taskqueue_swi, &sc->sc_txtask); 732 if (status & HAL_INT_BMISS) { 733 sc->sc_stats.ast_bmiss++; 734 taskqueue_enqueue(taskqueue_swi, &sc->sc_bmisstask); 735 } 736 if (status & HAL_INT_MIB) { 737 sc->sc_stats.ast_mib++; 738 /* 739 * Disable interrupts until we service the MIB 740 * interrupt; otherwise it will continue to fire. 741 */ 742 ath_hal_intrset(ah, 0); 743 /* 744 * Let the hal handle the event. We assume it will 745 * clear whatever condition caused the interrupt. 746 */ 747 ath_hal_mibevent(ah, 748 &ATH_NODE(sc->sc_ic.ic_bss)->an_halstats); 749 ath_hal_intrset(ah, sc->sc_imask); 750 } 751 } 752} 753 754static void 755ath_fatal_proc(void *arg, int pending) 756{ 757 struct ath_softc *sc = arg; 758 struct ifnet *ifp = &sc->sc_if; 759 760 if_printf(ifp, "hardware error; resetting\n"); 761 ath_reset(ifp); 762} 763 764static void 765ath_rxorn_proc(void *arg, int pending) 766{ 767 struct ath_softc *sc = arg; 768 struct ifnet *ifp = &sc->sc_if; 769 770 if_printf(ifp, "rx FIFO overrun; resetting\n"); 771 ath_reset(ifp); 772} 773 774static void 775ath_bmiss_proc(void *arg, int pending) 776{ 777 struct ath_softc *sc = arg; 778 struct ieee80211com *ic = &sc->sc_ic; 779 780 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 781 KASSERT(ic->ic_opmode == IEEE80211_M_STA, 782 ("unexpect operating mode %u", ic->ic_opmode)); 783 if (ic->ic_state == IEEE80211_S_RUN) { 784 /* 785 * Rather than go directly to scan state, try to 786 * reassociate first. If that fails then the state 787 * machine will drop us into scanning after timing 788 * out waiting for a probe response. 789 */ 790 NET_LOCK_GIANT(); 791 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 792 NET_UNLOCK_GIANT(); 793 } 794} 795 796static u_int 797ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 798{ 799#define N(a) (sizeof(a) / sizeof(a[0])) 800 static const u_int modeflags[] = { 801 0, /* IEEE80211_MODE_AUTO */ 802 CHANNEL_A, /* IEEE80211_MODE_11A */ 803 CHANNEL_B, /* IEEE80211_MODE_11B */ 804 CHANNEL_PUREG, /* IEEE80211_MODE_11G */ 805 0, /* IEEE80211_MODE_FH */ 806 CHANNEL_T, /* IEEE80211_MODE_TURBO_A */ 807 CHANNEL_108G /* IEEE80211_MODE_TURBO_G */ 808 }; 809 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 810 811 KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode)); 812 KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode)); 813 return modeflags[mode]; 814#undef N 815} 816 817static void 818ath_init(void *arg) 819{ 820 struct ath_softc *sc = (struct ath_softc *) arg; 821 struct ieee80211com *ic = &sc->sc_ic; 822 struct ifnet *ifp = &sc->sc_if; 823 struct ieee80211_node *ni; 824 struct ath_hal *ah = sc->sc_ah; 825 HAL_STATUS status; 826 827 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 828 __func__, ifp->if_flags); 829 830 ATH_LOCK(sc); 831 /* 832 * Stop anything previously setup. This is safe 833 * whether this is the first time through or not. 834 */ 835 ath_stop_locked(ifp); 836 837 /* 838 * The basic interface to setting the hardware in a good 839 * state is ``reset''. On return the hardware is known to 840 * be powered up and with interrupts disabled. This must 841 * be followed by initialization of the appropriate bits 842 * and then setup of the interrupt mask. 843 */ 844 sc->sc_curchan.channel = ic->ic_ibss_chan->ic_freq; 845 sc->sc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 846 if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_FALSE, &status)) { 847 if_printf(ifp, "unable to reset hardware; hal status %u\n", 848 status); 849 goto done; 850 } 851 852 /* 853 * This is needed only to setup initial state 854 * but it's best done after a reset. 855 */ 856 ath_update_txpow(sc); 857 858 /* 859 * Setup the hardware after reset: the key cache 860 * is filled as needed and the receive engine is 861 * set going. Frame transmit is handled entirely 862 * in the frame output path; there's nothing to do 863 * here except setup the interrupt mask. 864 */ 865 ath_initkeytable(sc); /* XXX still needed? */ 866 if (ath_startrecv(sc) != 0) { 867 if_printf(ifp, "unable to start recv logic\n"); 868 goto done; 869 } 870 871 /* 872 * Enable interrupts. 873 */ 874 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 875 | HAL_INT_RXEOL | HAL_INT_RXORN 876 | HAL_INT_FATAL | HAL_INT_GLOBAL; 877 /* 878 * Enable MIB interrupts when there are hardware phy counters. 879 * Note we only do this (at the moment) for station mode. 880 */ 881 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 882 sc->sc_imask |= HAL_INT_MIB; 883 ath_hal_intrset(ah, sc->sc_imask); 884 885 ifp->if_flags |= IFF_RUNNING; 886 ic->ic_state = IEEE80211_S_INIT; 887 888 /* 889 * The hardware should be ready to go now so it's safe 890 * to kick the 802.11 state machine as it's likely to 891 * immediately call back to us to send mgmt frames. 892 */ 893 ni = ic->ic_bss; 894 ni->ni_chan = ic->ic_ibss_chan; 895 ath_chan_change(sc, ni->ni_chan); 896 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 897 if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 898 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 899 } else 900 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 901done: 902 ATH_UNLOCK(sc); 903} 904 905static void 906ath_stop_locked(struct ifnet *ifp) 907{ 908 struct ath_softc *sc = ifp->if_softc; 909 struct ieee80211com *ic = &sc->sc_ic; 910 struct ath_hal *ah = sc->sc_ah; 911 912 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 913 __func__, sc->sc_invalid, ifp->if_flags); 914 915 ATH_LOCK_ASSERT(sc); 916 if (ifp->if_flags & IFF_RUNNING) { 917 /* 918 * Shutdown the hardware and driver: 919 * reset 802.11 state machine 920 * turn off timers 921 * disable interrupts 922 * turn off the radio 923 * clear transmit machinery 924 * clear receive machinery 925 * drain and release tx queues 926 * reclaim beacon resources 927 * power down hardware 928 * 929 * Note that some of this work is not possible if the 930 * hardware is gone (invalid). 931 */ 932 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 933 ifp->if_flags &= ~IFF_RUNNING; 934 ifp->if_timer = 0; 935 if (!sc->sc_invalid) { 936 if (sc->sc_softled) 937 ath_hal_gpioset(ah, sc->sc_ledpin, 1); 938 ath_hal_intrset(ah, 0); 939 } 940 ath_draintxq(sc); 941 if (!sc->sc_invalid) { 942 ath_stoprecv(sc); 943 ath_hal_phydisable(ah); 944 } else 945 sc->sc_rxlink = NULL; 946 IFQ_DRV_PURGE(&ifp->if_snd); 947 ath_beacon_free(sc); 948 } 949} 950 951static void 952ath_stop(struct ifnet *ifp) 953{ 954 struct ath_softc *sc = ifp->if_softc; 955 956 ATH_LOCK(sc); 957 ath_stop_locked(ifp); 958 if (!sc->sc_invalid) { 959 /* 960 * Set the chip in full sleep mode. Note that we are 961 * careful to do this only when bringing the interface 962 * completely to a stop. When the chip is in this state 963 * it must be carefully woken up or references to 964 * registers in the PCI clock domain may freeze the bus 965 * (and system). This varies by chip and is mostly an 966 * issue with newer parts that go to sleep more quickly. 967 */ 968 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP, 0); 969 } 970 ATH_UNLOCK(sc); 971} 972 973/* 974 * Reset the hardware w/o losing operational state. This is 975 * basically a more efficient way of doing ath_stop, ath_init, 976 * followed by state transitions to the current 802.11 977 * operational state. Used to recover from various errors and 978 * to reset or reload hardware state. 979 */ 980static int 981ath_reset(struct ifnet *ifp) 982{ 983 struct ath_softc *sc = ifp->if_softc; 984 struct ieee80211com *ic = &sc->sc_ic; 985 struct ath_hal *ah = sc->sc_ah; 986 struct ieee80211_channel *c; 987 HAL_STATUS status; 988 989 /* 990 * Convert to a HAL channel description with the flags 991 * constrained to reflect the current operating mode. 992 */ 993 c = ic->ic_ibss_chan; 994 sc->sc_curchan.channel = c->ic_freq; 995 sc->sc_curchan.channelFlags = ath_chan2flags(ic, c); 996 997 ath_hal_intrset(ah, 0); /* disable interrupts */ 998 ath_draintxq(sc); /* stop xmit side */ 999 ath_stoprecv(sc); /* stop recv side */ 1000 /* NB: indicate channel change so we do a full reset */ 1001 if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_TRUE, &status)) 1002 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 1003 __func__, status); 1004 ath_update_txpow(sc); /* update tx power state */ 1005 if (ath_startrecv(sc) != 0) /* restart recv */ 1006 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 1007 /* 1008 * We may be doing a reset in response to an ioctl 1009 * that changes the channel so update any state that 1010 * might change as a result. 1011 */ 1012 ath_chan_change(sc, c); 1013 if (ic->ic_state == IEEE80211_S_RUN) 1014 ath_beacon_config(sc); /* restart beacons */ 1015 ath_hal_intrset(ah, sc->sc_imask); 1016 1017 ath_start(ifp); /* restart xmit */ 1018 return 0; 1019} 1020 1021static void 1022ath_start(struct ifnet *ifp) 1023{ 1024 struct ath_softc *sc = ifp->if_softc; 1025 struct ath_hal *ah = sc->sc_ah; 1026 struct ieee80211com *ic = &sc->sc_ic; 1027 struct ieee80211_node *ni; 1028 struct ath_buf *bf; 1029 struct mbuf *m; 1030 struct ieee80211_frame *wh; 1031 struct ether_header *eh; 1032 1033 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 1034 return; 1035 for (;;) { 1036 /* 1037 * Grab a TX buffer and associated resources. 1038 */ 1039 ATH_TXBUF_LOCK(sc); 1040 bf = STAILQ_FIRST(&sc->sc_txbuf); 1041 if (bf != NULL) 1042 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 1043 ATH_TXBUF_UNLOCK(sc); 1044 if (bf == NULL) { 1045 DPRINTF(sc, ATH_DEBUG_ANY, "%s: out of xmit buffers\n", 1046 __func__); 1047 sc->sc_stats.ast_tx_qstop++; 1048 ifp->if_flags |= IFF_OACTIVE; 1049 break; 1050 } 1051 /* 1052 * Poll the management queue for frames; they 1053 * have priority over normal data frames. 1054 */ 1055 IF_DEQUEUE(&ic->ic_mgtq, m); 1056 if (m == NULL) { 1057 /* 1058 * No data frames go out unless we're associated. 1059 */ 1060 if (ic->ic_state != IEEE80211_S_RUN) { 1061 DPRINTF(sc, ATH_DEBUG_ANY, 1062 "%s: ignore data packet, state %u\n", 1063 __func__, ic->ic_state); 1064 sc->sc_stats.ast_tx_discard++; 1065 ATH_TXBUF_LOCK(sc); 1066 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1067 ATH_TXBUF_UNLOCK(sc); 1068 break; 1069 } 1070 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ 1071 if (m == NULL) { 1072 ATH_TXBUF_LOCK(sc); 1073 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1074 ATH_TXBUF_UNLOCK(sc); 1075 break; 1076 } 1077 /* 1078 * Find the node for the destination so we can do 1079 * things like power save and fast frames aggregation. 1080 */ 1081 if (m->m_len < sizeof(struct ether_header) && 1082 (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { 1083 ic->ic_stats.is_tx_nobuf++; /* XXX */ 1084 ni = NULL; 1085 goto bad; 1086 } 1087 eh = mtod(m, struct ether_header *); 1088 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 1089 if (ni == NULL) { 1090 /* NB: ieee80211_find_txnode does stat+msg */ 1091 goto bad; 1092 } 1093 if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && 1094 (m->m_flags & M_PWR_SAV) == 0) { 1095 /* 1096 * Station in power save mode; pass the frame 1097 * to the 802.11 layer and continue. We'll get 1098 * the frame back when the time is right. 1099 */ 1100 ieee80211_pwrsave(ic, ni, m); 1101 goto reclaim; 1102 } 1103 /* calculate priority so we can find the tx queue */ 1104 if (ieee80211_classify(ic, m, ni)) { 1105 DPRINTF(sc, ATH_DEBUG_XMIT, 1106 "%s: discard, classification failure\n", 1107 __func__); 1108 goto bad; 1109 } 1110 ifp->if_opackets++; 1111 BPF_MTAP(ifp, m); 1112 /* 1113 * Encapsulate the packet in prep for transmission. 1114 */ 1115 m = ieee80211_encap(ic, m, ni); 1116 if (m == NULL) { 1117 DPRINTF(sc, ATH_DEBUG_ANY, 1118 "%s: encapsulation failure\n", 1119 __func__); 1120 sc->sc_stats.ast_tx_encap++; 1121 goto bad; 1122 } 1123 } else { 1124 /* 1125 * Hack! The referenced node pointer is in the 1126 * rcvif field of the packet header. This is 1127 * placed there by ieee80211_mgmt_output because 1128 * we need to hold the reference with the frame 1129 * and there's no other way (other than packet 1130 * tags which we consider too expensive to use) 1131 * to pass it along. 1132 */ 1133 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1134 m->m_pkthdr.rcvif = NULL; 1135 1136 wh = mtod(m, struct ieee80211_frame *); 1137 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 1138 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 1139 /* fill time stamp */ 1140 u_int64_t tsf; 1141 u_int32_t *tstamp; 1142 1143 tsf = ath_hal_gettsf64(ah); 1144 /* XXX: adjust 100us delay to xmit */ 1145 tsf += 100; 1146 tstamp = (u_int32_t *)&wh[1]; 1147 tstamp[0] = htole32(tsf & 0xffffffff); 1148 tstamp[1] = htole32(tsf >> 32); 1149 } 1150 sc->sc_stats.ast_tx_mgmt++; 1151 } 1152 1153 if (ath_tx_start(sc, ni, bf, m)) { 1154 bad: 1155 ifp->if_oerrors++; 1156 reclaim: 1157 ATH_TXBUF_LOCK(sc); 1158 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1159 ATH_TXBUF_UNLOCK(sc); 1160 if (ni != NULL) 1161 ieee80211_free_node(ni); 1162 continue; 1163 } 1164 1165 sc->sc_tx_timer = 5; 1166 ifp->if_timer = 1; 1167 } 1168} 1169 1170static int 1171ath_media_change(struct ifnet *ifp) 1172{ 1173#define IS_UP(ifp) \ 1174 ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP)) 1175 int error; 1176 1177 error = ieee80211_media_change(ifp); 1178 if (error == ENETRESET) { 1179 if (IS_UP(ifp)) 1180 ath_init(ifp); /* XXX lose error */ 1181 error = 0; 1182 } 1183 return error; 1184#undef IS_UP 1185} 1186 1187#ifdef AR_DEBUG 1188static void 1189ath_keyprint(const char *tag, u_int ix, 1190 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1191{ 1192 static const char *ciphers[] = { 1193 "WEP", 1194 "AES-OCB", 1195 "AES-CCM", 1196 "CKIP", 1197 "TKIP", 1198 "CLR", 1199 }; 1200 int i, n; 1201 1202 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); 1203 for (i = 0, n = hk->kv_len; i < n; i++) 1204 printf("%02x", hk->kv_val[i]); 1205 printf(" mac %s", ether_sprintf(mac)); 1206 if (hk->kv_type == HAL_CIPHER_TKIP) { 1207 printf(" mic "); 1208 for (i = 0; i < sizeof(hk->kv_mic); i++) 1209 printf("%02x", hk->kv_mic[i]); 1210 } 1211 printf("\n"); 1212} 1213#endif 1214 1215/* 1216 * Set a TKIP key into the hardware. This handles the 1217 * potential distribution of key state to multiple key 1218 * cache slots for TKIP. 1219 */ 1220static int 1221ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, 1222 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1223{ 1224#define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) 1225 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; 1226 struct ath_hal *ah = sc->sc_ah; 1227 1228 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, 1229 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); 1230 KASSERT(sc->sc_splitmic, ("key cache !split")); 1231 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { 1232 /* 1233 * TX key goes at first index, RX key at +32. 1234 * The hal handles the MIC keys at index+64. 1235 */ 1236 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); 1237 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 1238 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) 1239 return 0; 1240 1241 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 1242 KEYPRINTF(sc, k->wk_keyix+32, hk, mac); 1243 /* XXX delete tx key on failure? */ 1244 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); 1245 } else if (k->wk_flags & IEEE80211_KEY_XR) { 1246 /* 1247 * TX/RX key goes at first index. 1248 * The hal handles the MIC keys are index+64. 1249 */ 1250 KASSERT(k->wk_keyix < IEEE80211_WEP_NKID, 1251 ("group key at index %u", k->wk_keyix)); 1252 memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ? 1253 k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic)); 1254 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 1255 return ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid); 1256 } 1257 /* XXX key w/o xmit/recv; need this for compression? */ 1258 return 0; 1259#undef IEEE80211_KEY_XR 1260} 1261 1262/* 1263 * Set a net80211 key into the hardware. This handles the 1264 * potential distribution of key state to multiple key 1265 * cache slots for TKIP with hardware MIC support. 1266 */ 1267static int 1268ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, 1269 const u_int8_t mac[IEEE80211_ADDR_LEN]) 1270{ 1271#define N(a) (sizeof(a)/sizeof(a[0])) 1272 static const u_int8_t ciphermap[] = { 1273 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ 1274 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ 1275 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ 1276 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ 1277 (u_int8_t) -1, /* 4 is not allocated */ 1278 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ 1279 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ 1280 }; 1281 struct ath_hal *ah = sc->sc_ah; 1282 const struct ieee80211_cipher *cip = k->wk_cipher; 1283 HAL_KEYVAL hk; 1284 1285 memset(&hk, 0, sizeof(hk)); 1286 /* 1287 * Software crypto uses a "clear key" so non-crypto 1288 * state kept in the key cache are maintained and 1289 * so that rx frames have an entry to match. 1290 */ 1291 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { 1292 KASSERT(cip->ic_cipher < N(ciphermap), 1293 ("invalid cipher type %u", cip->ic_cipher)); 1294 hk.kv_type = ciphermap[cip->ic_cipher]; 1295 hk.kv_len = k->wk_keylen; 1296 memcpy(hk.kv_val, k->wk_key, k->wk_keylen); 1297 } else 1298 hk.kv_type = HAL_CIPHER_CLR; 1299 1300 if (hk.kv_type == HAL_CIPHER_TKIP && 1301 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && 1302 sc->sc_splitmic) { 1303 return ath_keyset_tkip(sc, k, &hk, mac); 1304 } else { 1305 KEYPRINTF(sc, k->wk_keyix, &hk, mac); 1306 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); 1307 } 1308#undef N 1309} 1310 1311/* 1312 * Fill the hardware key cache with key entries. 1313 */ 1314static void 1315ath_initkeytable(struct ath_softc *sc) 1316{ 1317 struct ieee80211com *ic = &sc->sc_ic; 1318 struct ifnet *ifp = &sc->sc_if; 1319 struct ath_hal *ah = sc->sc_ah; 1320 const u_int8_t *bssid; 1321 int i; 1322 1323 /* XXX maybe should reset all keys when !PRIVACY */ 1324 if (ic->ic_state == IEEE80211_S_SCAN) 1325 bssid = ifp->if_broadcastaddr; 1326 else 1327 bssid = ic->ic_bss->ni_bssid; 1328 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1329 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1330 1331 if (k->wk_keylen == 0) { 1332 ath_hal_keyreset(ah, i); 1333 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: reset key %u\n", 1334 __func__, i); 1335 } else { 1336 ath_keyset(sc, k, bssid); 1337 } 1338 } 1339} 1340 1341/* 1342 * Allocate tx/rx key slots for TKIP. We allocate two slots for 1343 * each key, one for decrypt/encrypt and the other for the MIC. 1344 */ 1345static u_int16_t 1346key_alloc_2pair(struct ath_softc *sc) 1347{ 1348#define N(a) (sizeof(a)/sizeof(a[0])) 1349 u_int i, keyix; 1350 1351 KASSERT(sc->sc_splitmic, ("key cache !split")); 1352 /* XXX could optimize */ 1353 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 1354 u_int8_t b = sc->sc_keymap[i]; 1355 if (b != 0xff) { 1356 /* 1357 * One or more slots in this byte are free. 1358 */ 1359 keyix = i*NBBY; 1360 while (b & 1) { 1361 again: 1362 keyix++; 1363 b >>= 1; 1364 } 1365 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ 1366 if (isset(sc->sc_keymap, keyix+32) || 1367 isset(sc->sc_keymap, keyix+64) || 1368 isset(sc->sc_keymap, keyix+32+64)) { 1369 /* full pair unavailable */ 1370 /* XXX statistic */ 1371 if (keyix == (i+1)*NBBY) { 1372 /* no slots were appropriate, advance */ 1373 continue; 1374 } 1375 goto again; 1376 } 1377 setbit(sc->sc_keymap, keyix); 1378 setbit(sc->sc_keymap, keyix+64); 1379 setbit(sc->sc_keymap, keyix+32); 1380 setbit(sc->sc_keymap, keyix+32+64); 1381 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1382 "%s: key pair %u,%u %u,%u\n", 1383 __func__, keyix, keyix+64, 1384 keyix+32, keyix+32+64); 1385 return keyix; 1386 } 1387 } 1388 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 1389 return IEEE80211_KEYIX_NONE; 1390#undef N 1391} 1392 1393/* 1394 * Allocate a single key cache slot. 1395 */ 1396static u_int16_t 1397key_alloc_single(struct ath_softc *sc) 1398{ 1399#define N(a) (sizeof(a)/sizeof(a[0])) 1400 u_int i, keyix; 1401 1402 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ 1403 for (i = 0; i < N(sc->sc_keymap); i++) { 1404 u_int8_t b = sc->sc_keymap[i]; 1405 if (b != 0xff) { 1406 /* 1407 * One or more slots are free. 1408 */ 1409 keyix = i*NBBY; 1410 while (b & 1) 1411 keyix++, b >>= 1; 1412 setbit(sc->sc_keymap, keyix); 1413 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", 1414 __func__, keyix); 1415 return keyix; 1416 } 1417 } 1418 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); 1419 return IEEE80211_KEYIX_NONE; 1420#undef N 1421} 1422 1423/* 1424 * Allocate one or more key cache slots for a uniacst key. The 1425 * key itself is needed only to identify the cipher. For hardware 1426 * TKIP with split cipher+MIC keys we allocate two key cache slot 1427 * pairs so that we can setup separate TX and RX MIC keys. Note 1428 * that the MIC key for a TKIP key at slot i is assumed by the 1429 * hardware to be at slot i+64. This limits TKIP keys to the first 1430 * 64 entries. 1431 */ 1432static int 1433ath_key_alloc(struct ieee80211com *ic, const struct ieee80211_key *k) 1434{ 1435 struct ath_softc *sc = ic->ic_ifp->if_softc; 1436 1437 /* 1438 * We allocate two pair for TKIP when using the h/w to do 1439 * the MIC. For everything else, including software crypto, 1440 * we allocate a single entry. Note that s/w crypto requires 1441 * a pass-through slot on the 5211 and 5212. The 5210 does 1442 * not support pass-through cache entries and we map all 1443 * those requests to slot 0. 1444 */ 1445 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 1446 return key_alloc_single(sc); 1447 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && 1448 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) { 1449 return key_alloc_2pair(sc); 1450 } else { 1451 return key_alloc_single(sc); 1452 } 1453} 1454 1455/* 1456 * Delete an entry in the key cache allocated by ath_key_alloc. 1457 */ 1458static int 1459ath_key_delete(struct ieee80211com *ic, const struct ieee80211_key *k) 1460{ 1461 struct ath_softc *sc = ic->ic_ifp->if_softc; 1462 struct ath_hal *ah = sc->sc_ah; 1463 const struct ieee80211_cipher *cip = k->wk_cipher; 1464 u_int keyix = k->wk_keyix; 1465 1466 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); 1467 1468 ath_hal_keyreset(ah, keyix); 1469 /* 1470 * Handle split tx/rx keying required for TKIP with h/w MIC. 1471 */ 1472 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1473 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) 1474 ath_hal_keyreset(ah, keyix+32); /* RX key */ 1475 if (keyix >= IEEE80211_WEP_NKID) { 1476 /* 1477 * Don't touch keymap entries for global keys so 1478 * they are never considered for dynamic allocation. 1479 */ 1480 clrbit(sc->sc_keymap, keyix); 1481 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1482 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && 1483 sc->sc_splitmic) { 1484 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ 1485 clrbit(sc->sc_keymap, keyix+32); /* RX key */ 1486 clrbit(sc->sc_keymap, keyix+32+64); /* RX key MIC */ 1487 } 1488 } 1489 return 1; 1490} 1491 1492/* 1493 * Set the key cache contents for the specified key. Key cache 1494 * slot(s) must already have been allocated by ath_key_alloc. 1495 */ 1496static int 1497ath_key_set(struct ieee80211com *ic, const struct ieee80211_key *k, 1498 const u_int8_t mac[IEEE80211_ADDR_LEN]) 1499{ 1500 struct ath_softc *sc = ic->ic_ifp->if_softc; 1501 1502 return ath_keyset(sc, k, mac); 1503} 1504 1505/* 1506 * Block/unblock tx+rx processing while a key change is done. 1507 * We assume the caller serializes key management operations 1508 * so we only need to worry about synchronization with other 1509 * uses that originate in the driver. 1510 */ 1511static void 1512ath_key_update_begin(struct ieee80211com *ic) 1513{ 1514 struct ifnet *ifp = ic->ic_ifp; 1515 struct ath_softc *sc = ifp->if_softc; 1516 1517 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1518#if 0 1519 tasklet_disable(&sc->sc_rxtq); 1520#endif 1521 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 1522} 1523 1524static void 1525ath_key_update_end(struct ieee80211com *ic) 1526{ 1527 struct ifnet *ifp = ic->ic_ifp; 1528 struct ath_softc *sc = ifp->if_softc; 1529 1530 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1531 IF_UNLOCK(&ifp->if_snd); 1532#if 0 1533 tasklet_enable(&sc->sc_rxtq); 1534#endif 1535} 1536 1537/* 1538 * Calculate the receive filter according to the 1539 * operating mode and state: 1540 * 1541 * o always accept unicast, broadcast, and multicast traffic 1542 * o maintain current state of phy error reception (the hal 1543 * may enable phy error frames for noise immunity work) 1544 * o probe request frames are accepted only when operating in 1545 * hostap, adhoc, or monitor modes 1546 * o enable promiscuous mode according to the interface state 1547 * o accept beacons: 1548 * - when operating in adhoc mode so the 802.11 layer creates 1549 * node table entries for peers, 1550 * - when operating in station mode for collecting rssi data when 1551 * the station is otherwise quiet, or 1552 * - when scanning 1553 */ 1554static u_int32_t 1555ath_calcrxfilter(struct ath_softc *sc, enum ieee80211_state state) 1556{ 1557 struct ieee80211com *ic = &sc->sc_ic; 1558 struct ath_hal *ah = sc->sc_ah; 1559 struct ifnet *ifp = &sc->sc_if; 1560 u_int32_t rfilt; 1561 1562 rfilt = (ath_hal_getrxfilter(ah) & HAL_RX_FILTER_PHYERR) 1563 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1564 if (ic->ic_opmode != IEEE80211_M_STA) 1565 rfilt |= HAL_RX_FILTER_PROBEREQ; 1566 if (ic->ic_opmode != IEEE80211_M_HOSTAP && 1567 (ifp->if_flags & IFF_PROMISC)) 1568 rfilt |= HAL_RX_FILTER_PROM; 1569 if (ic->ic_opmode == IEEE80211_M_STA || 1570 ic->ic_opmode == IEEE80211_M_IBSS || 1571 state == IEEE80211_S_SCAN) 1572 rfilt |= HAL_RX_FILTER_BEACON; 1573 return rfilt; 1574} 1575 1576static void 1577ath_mode_init(struct ath_softc *sc) 1578{ 1579 struct ieee80211com *ic = &sc->sc_ic; 1580 struct ath_hal *ah = sc->sc_ah; 1581 struct ifnet *ifp = &sc->sc_if; 1582 u_int32_t rfilt, mfilt[2], val; 1583 u_int8_t pos; 1584 struct ifmultiaddr *ifma; 1585 1586 /* configure rx filter */ 1587 rfilt = ath_calcrxfilter(sc, ic->ic_state); 1588 ath_hal_setrxfilter(ah, rfilt); 1589 1590 /* configure operational mode */ 1591 ath_hal_setopmode(ah); 1592 1593 /* 1594 * Handle any link-level address change. Note that we only 1595 * need to force ic_myaddr; any other addresses are handled 1596 * as a byproduct of the ifnet code marking the interface 1597 * down then up. 1598 * 1599 * XXX should get from lladdr instead of arpcom but that's more work 1600 */ 1601 IEEE80211_ADDR_COPY(ic->ic_myaddr, IFP2AC(ifp)->ac_enaddr); 1602 ath_hal_setmac(ah, ic->ic_myaddr); 1603 1604 /* calculate and install multicast filter */ 1605 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 1606 mfilt[0] = mfilt[1] = 0; 1607 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1608 caddr_t dl; 1609 1610 /* calculate XOR of eight 6bit values */ 1611 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1612 val = LE_READ_4(dl + 0); 1613 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1614 val = LE_READ_4(dl + 3); 1615 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1616 pos &= 0x3f; 1617 mfilt[pos / 32] |= (1 << (pos % 32)); 1618 } 1619 } else { 1620 mfilt[0] = mfilt[1] = ~0; 1621 } 1622 ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]); 1623 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, MC filter %08x:%08x\n", 1624 __func__, rfilt, mfilt[0], mfilt[1]); 1625} 1626 1627static void 1628ath_mbuf_load_cb(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsize, int error) 1629{ 1630 struct ath_buf *bf = arg; 1631 1632 KASSERT(nseg <= ATH_MAX_SCATTER, ("too many DMA segments %u", nseg)); 1633 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 1634 bf->bf_mapsize = mapsize; 1635 bf->bf_nseg = nseg; 1636 bcopy(seg, bf->bf_segs, nseg * sizeof (seg[0])); 1637} 1638 1639/* 1640 * Set the slot time based on the current setting. 1641 */ 1642static void 1643ath_setslottime(struct ath_softc *sc) 1644{ 1645 struct ieee80211com *ic = &sc->sc_ic; 1646 struct ath_hal *ah = sc->sc_ah; 1647 1648 if (ic->ic_flags & IEEE80211_F_SHSLOT) 1649 ath_hal_setslottime(ah, HAL_SLOT_TIME_9); 1650 else 1651 ath_hal_setslottime(ah, HAL_SLOT_TIME_20); 1652 sc->sc_updateslot = OK; 1653} 1654 1655/* 1656 * Callback from the 802.11 layer to update the 1657 * slot time based on the current setting. 1658 */ 1659static void 1660ath_updateslot(struct ifnet *ifp) 1661{ 1662 struct ath_softc *sc = ifp->if_softc; 1663 struct ieee80211com *ic = &sc->sc_ic; 1664 1665 /* 1666 * When not coordinating the BSS, change the hardware 1667 * immediately. For other operation we defer the change 1668 * until beacon updates have propagated to the stations. 1669 */ 1670 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 1671 sc->sc_updateslot = UPDATE; 1672 else 1673 ath_setslottime(sc); 1674} 1675 1676/* 1677 * Allocate and setup an initial beacon frame. 1678 */ 1679static int 1680ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1681{ 1682 struct ieee80211com *ic = ni->ni_ic; 1683 struct ath_buf *bf; 1684 struct mbuf *m; 1685 int error; 1686 1687 bf = STAILQ_FIRST(&sc->sc_bbuf); 1688 if (bf == NULL) { 1689 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: no dma buffers\n", __func__); 1690 sc->sc_stats.ast_be_nombuf++; /* XXX */ 1691 return ENOMEM; /* XXX */ 1692 } 1693 if (bf->bf_m != NULL) { 1694 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1695 m_freem(bf->bf_m); 1696 bf->bf_m = NULL; 1697 bf->bf_node = NULL; 1698 } 1699 /* 1700 * NB: the beacon data buffer must be 32-bit aligned; 1701 * we assume the mbuf routines will return us something 1702 * with this alignment (perhaps should assert). 1703 */ 1704 m = ieee80211_beacon_alloc(ic, ni, &sc->sc_boff); 1705 if (m == NULL) { 1706 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get mbuf\n", 1707 __func__); 1708 sc->sc_stats.ast_be_nombuf++; 1709 return ENOMEM; 1710 } 1711 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1712 ath_mbuf_load_cb, bf, 1713 BUS_DMA_NOWAIT); 1714 if (error == 0) { 1715 bf->bf_m = m; 1716 bf->bf_node = ni; /* NB: no held reference */ 1717 } else { 1718 m_freem(m); 1719 } 1720 return error; 1721} 1722 1723/* 1724 * Setup the beacon frame for transmit. 1725 */ 1726static void 1727ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 1728{ 1729#define USE_SHPREAMBLE(_ic) \ 1730 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 1731 == IEEE80211_F_SHPREAMBLE) 1732 struct ieee80211_node *ni = bf->bf_node; 1733 struct ieee80211com *ic = ni->ni_ic; 1734 struct mbuf *m = bf->bf_m; 1735 struct ath_hal *ah = sc->sc_ah; 1736 struct ath_node *an = ATH_NODE(ni); 1737 struct ath_desc *ds; 1738 int flags, antenna; 1739 u_int8_t rate; 1740 1741 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: m %p len %u\n", 1742 __func__, m, m->m_len); 1743 1744 /* setup descriptors */ 1745 ds = bf->bf_desc; 1746 1747 flags = HAL_TXDESC_NOACK; 1748 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 1749 ds->ds_link = bf->bf_daddr; /* self-linked */ 1750 flags |= HAL_TXDESC_VEOL; 1751 /* 1752 * Let hardware handle antenna switching. 1753 */ 1754 antenna = 0; 1755 } else { 1756 ds->ds_link = 0; 1757 /* 1758 * Switch antenna every 4 beacons. 1759 * XXX assumes two antenna 1760 */ 1761 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 1762 } 1763 1764 KASSERT(bf->bf_nseg == 1, 1765 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 1766 ds->ds_data = bf->bf_segs[0].ds_addr; 1767 /* 1768 * Calculate rate code. 1769 * XXX everything at min xmit rate 1770 */ 1771 if (USE_SHPREAMBLE(ic)) 1772 rate = an->an_tx_mgtratesp; 1773 else 1774 rate = an->an_tx_mgtrate; 1775 ath_hal_setuptxdesc(ah, ds 1776 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 1777 , sizeof(struct ieee80211_frame)/* header length */ 1778 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1779 , ni->ni_txpower /* txpower XXX */ 1780 , rate, 1 /* series 0 rate/tries */ 1781 , HAL_TXKEYIX_INVALID /* no encryption */ 1782 , antenna /* antenna mode */ 1783 , flags /* no ack, veol for beacons */ 1784 , 0 /* rts/cts rate */ 1785 , 0 /* rts/cts duration */ 1786 ); 1787 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1788 ath_hal_filltxdesc(ah, ds 1789 , roundup(m->m_len, 4) /* buffer length */ 1790 , AH_TRUE /* first segment */ 1791 , AH_TRUE /* last segment */ 1792 , ds /* first descriptor */ 1793 ); 1794#undef USE_SHPREAMBLE 1795} 1796 1797/* 1798 * Transmit a beacon frame at SWBA. Dynamic updates to the 1799 * frame contents are done as needed and the slot time is 1800 * also adjusted based on current state. 1801 */ 1802static void 1803ath_beacon_proc(void *arg, int pending) 1804{ 1805 struct ath_softc *sc = arg; 1806 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); 1807 struct ieee80211_node *ni = bf->bf_node; 1808 struct ieee80211com *ic = ni->ni_ic; 1809 struct ath_hal *ah = sc->sc_ah; 1810 struct mbuf *m; 1811 int ncabq, error, otherant; 1812 1813 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 1814 __func__, pending); 1815 1816 if (ic->ic_opmode == IEEE80211_M_STA || 1817 ic->ic_opmode == IEEE80211_M_MONITOR || 1818 bf == NULL || bf->bf_m == NULL) { 1819 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_flags=%x bf=%p bf_m=%p\n", 1820 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL); 1821 return; 1822 } 1823 /* 1824 * Check if the previous beacon has gone out. If 1825 * not don't don't try to post another, skip this 1826 * period and wait for the next. Missed beacons 1827 * indicate a problem and should not occur. If we 1828 * miss too many consecutive beacons reset the device. 1829 */ 1830 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 1831 sc->sc_bmisscount++; 1832 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, 1833 "%s: missed %u consecutive beacons\n", 1834 __func__, sc->sc_bmisscount); 1835 if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ 1836 taskqueue_enqueue(taskqueue_swi, &sc->sc_bstucktask); 1837 return; 1838 } 1839 if (sc->sc_bmisscount != 0) { 1840 DPRINTF(sc, ATH_DEBUG_BEACON, 1841 "%s: resume beacon xmit after %u misses\n", 1842 __func__, sc->sc_bmisscount); 1843 sc->sc_bmisscount = 0; 1844 } 1845 1846 /* 1847 * Update dynamic beacon contents. If this returns 1848 * non-zero then we need to remap the memory because 1849 * the beacon frame changed size (probably because 1850 * of the TIM bitmap). 1851 */ 1852 m = bf->bf_m; 1853 ncabq = ath_hal_numtxpending(ah, sc->sc_cabq->axq_qnum); 1854 if (ieee80211_beacon_update(ic, bf->bf_node, &sc->sc_boff, m, ncabq)) { 1855 /* XXX too conservative? */ 1856 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1857 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1858 ath_mbuf_load_cb, bf, 1859 BUS_DMA_NOWAIT); 1860 if (error != 0) { 1861 if_printf(ic->ic_ifp, 1862 "%s: bus_dmamap_load_mbuf failed, error %u\n", 1863 __func__, error); 1864 return; 1865 } 1866 } 1867 1868 /* 1869 * Handle slot time change when a non-ERP station joins/leaves 1870 * an 11g network. The 802.11 layer notifies us via callback, 1871 * we mark updateslot, then wait one beacon before effecting 1872 * the change. This gives associated stations at least one 1873 * beacon interval to note the state change. 1874 */ 1875 /* XXX locking */ 1876 if (sc->sc_updateslot == UPDATE) 1877 sc->sc_updateslot = COMMIT; /* commit next beacon */ 1878 else if (sc->sc_updateslot == COMMIT) 1879 ath_setslottime(sc); /* commit change to h/w */ 1880 1881 /* 1882 * Check recent per-antenna transmit statistics and flip 1883 * the default antenna if noticeably more frames went out 1884 * on the non-default antenna. 1885 * XXX assumes 2 anntenae 1886 */ 1887 otherant = sc->sc_defant & 1 ? 2 : 1; 1888 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 1889 ath_setdefantenna(sc, otherant); 1890 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 1891 1892 /* 1893 * Construct tx descriptor. 1894 */ 1895 ath_beacon_setup(sc, bf); 1896 1897 /* 1898 * Stop any current dma and put the new frame on the queue. 1899 * This should never fail since we check above that no frames 1900 * are still pending on the queue. 1901 */ 1902 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 1903 DPRINTF(sc, ATH_DEBUG_ANY, 1904 "%s: beacon queue %u did not stop?\n", 1905 __func__, sc->sc_bhalq); 1906 } 1907 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 1908 1909 /* 1910 * Enable the CAB queue before the beacon queue to 1911 * insure cab frames are triggered by this beacon. 1912 */ 1913 if (sc->sc_boff.bo_tim[4] & 1) /* NB: only at DTIM */ 1914 ath_hal_txstart(ah, sc->sc_cabq->axq_qnum); 1915 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 1916 ath_hal_txstart(ah, sc->sc_bhalq); 1917 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, 1918 "%s: TXDP[%u] = %p (%p)\n", __func__, 1919 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc); 1920 1921 sc->sc_stats.ast_be_xmit++; 1922} 1923 1924/* 1925 * Reset the hardware after detecting beacons have stopped. 1926 */ 1927static void 1928ath_bstuck_proc(void *arg, int pending) 1929{ 1930 struct ath_softc *sc = arg; 1931 struct ifnet *ifp = &sc->sc_if; 1932 1933 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 1934 sc->sc_bmisscount); 1935 ath_reset(ifp); 1936} 1937 1938/* 1939 * Reclaim beacon resources. 1940 */ 1941static void 1942ath_beacon_free(struct ath_softc *sc) 1943{ 1944 struct ath_buf *bf; 1945 1946 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) 1947 if (bf->bf_m != NULL) { 1948 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1949 m_freem(bf->bf_m); 1950 bf->bf_m = NULL; 1951 bf->bf_node = NULL; 1952 } 1953} 1954 1955/* 1956 * Configure the beacon and sleep timers. 1957 * 1958 * When operating as an AP this resets the TSF and sets 1959 * up the hardware to notify us when we need to issue beacons. 1960 * 1961 * When operating in station mode this sets up the beacon 1962 * timers according to the timestamp of the last received 1963 * beacon and the current TSF, configures PCF and DTIM 1964 * handling, programs the sleep registers so the hardware 1965 * will wakeup in time to receive beacons, and configures 1966 * the beacon miss handling so we'll receive a BMISS 1967 * interrupt when we stop seeing beacons from the AP 1968 * we've associated with. 1969 */ 1970static void 1971ath_beacon_config(struct ath_softc *sc) 1972{ 1973#define MS_TO_TU(x) (((x) * 1000) / 1024) 1974 struct ath_hal *ah = sc->sc_ah; 1975 struct ieee80211com *ic = &sc->sc_ic; 1976 struct ieee80211_node *ni = ic->ic_bss; 1977 u_int32_t nexttbtt, intval; 1978 1979 nexttbtt = (LE_READ_4(ni->ni_tstamp.data + 4) << 22) | 1980 (LE_READ_4(ni->ni_tstamp.data) >> 10); 1981 intval = MS_TO_TU(ni->ni_intval) & HAL_BEACON_PERIOD; 1982 if (nexttbtt == 0) /* e.g. for ap mode */ 1983 nexttbtt = intval; 1984 else if (intval) /* NB: can be 0 for monitor mode */ 1985 nexttbtt = roundup(nexttbtt, intval); 1986 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 1987 __func__, nexttbtt, intval, ni->ni_intval); 1988 if (ic->ic_opmode == IEEE80211_M_STA) { 1989 HAL_BEACON_STATE bs; 1990 u_int32_t bmisstime; 1991 1992 /* NB: no PCF support right now */ 1993 memset(&bs, 0, sizeof(bs)); 1994 bs.bs_intval = intval; 1995 bs.bs_nexttbtt = nexttbtt; 1996 bs.bs_dtimperiod = bs.bs_intval; 1997 bs.bs_nextdtim = nexttbtt; 1998 /* 1999 * The 802.11 layer records the offset to the DTIM 2000 * bitmap while receiving beacons; use it here to 2001 * enable h/w detection of our AID being marked in 2002 * the bitmap vector (to indicate frames for us are 2003 * pending at the AP). 2004 */ 2005 bs.bs_timoffset = ni->ni_timoff; 2006 /* 2007 * Calculate the number of consecutive beacons to miss 2008 * before taking a BMISS interrupt. The configuration 2009 * is specified in ms, so we need to convert that to 2010 * TU's and then calculate based on the beacon interval. 2011 * Note that we clamp the result to at most 10 beacons. 2012 */ 2013 bmisstime = MS_TO_TU(ic->ic_bmisstimeout); 2014 bs.bs_bmissthreshold = howmany(bmisstime, intval); 2015 if (bs.bs_bmissthreshold > 10) 2016 bs.bs_bmissthreshold = 10; 2017 else if (bs.bs_bmissthreshold <= 0) 2018 bs.bs_bmissthreshold = 1; 2019 2020 /* 2021 * Calculate sleep duration. The configuration is 2022 * given in ms. We insure a multiple of the beacon 2023 * period is used. Also, if the sleep duration is 2024 * greater than the DTIM period then it makes senses 2025 * to make it a multiple of that. 2026 * 2027 * XXX fixed at 100ms 2028 */ 2029 bs.bs_sleepduration = roundup(MS_TO_TU(100), bs.bs_intval); 2030 if (bs.bs_sleepduration > bs.bs_dtimperiod) 2031 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 2032 2033 DPRINTF(sc, ATH_DEBUG_BEACON, 2034 "%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 2035 , __func__ 2036 , bs.bs_intval 2037 , bs.bs_nexttbtt 2038 , bs.bs_dtimperiod 2039 , bs.bs_nextdtim 2040 , bs.bs_bmissthreshold 2041 , bs.bs_sleepduration 2042 , bs.bs_cfpperiod 2043 , bs.bs_cfpmaxduration 2044 , bs.bs_cfpnext 2045 , bs.bs_timoffset 2046 ); 2047 ath_hal_intrset(ah, 0); 2048 ath_hal_beacontimers(ah, &bs); 2049 sc->sc_imask |= HAL_INT_BMISS; 2050 ath_hal_intrset(ah, sc->sc_imask); 2051 } else { 2052 ath_hal_intrset(ah, 0); 2053 if (nexttbtt == intval) 2054 intval |= HAL_BEACON_RESET_TSF; 2055 if (ic->ic_opmode == IEEE80211_M_IBSS) { 2056 /* 2057 * In IBSS mode enable the beacon timers but only 2058 * enable SWBA interrupts if we need to manually 2059 * prepare beacon frames. Otherwise we use a 2060 * self-linked tx descriptor and let the hardware 2061 * deal with things. 2062 */ 2063 intval |= HAL_BEACON_ENA; 2064 if (!sc->sc_hasveol) 2065 sc->sc_imask |= HAL_INT_SWBA; 2066 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 2067 /* 2068 * In AP mode we enable the beacon timers and 2069 * SWBA interrupts to prepare beacon frames. 2070 */ 2071 intval |= HAL_BEACON_ENA; 2072 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 2073 } 2074 ath_hal_beaconinit(ah, nexttbtt, intval); 2075 sc->sc_bmisscount = 0; 2076 ath_hal_intrset(ah, sc->sc_imask); 2077 /* 2078 * When using a self-linked beacon descriptor in 2079 * ibss mode load it once here. 2080 */ 2081 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 2082 ath_beacon_proc(sc, 0); 2083 } 2084#undef MS_TO_TU 2085} 2086 2087static void 2088ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2089{ 2090 bus_addr_t *paddr = (bus_addr_t*) arg; 2091 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2092 *paddr = segs->ds_addr; 2093} 2094 2095static int 2096ath_descdma_setup(struct ath_softc *sc, 2097 struct ath_descdma *dd, ath_bufhead *head, 2098 const char *name, int nbuf, int ndesc) 2099{ 2100#define DS2PHYS(_dd, _ds) \ 2101 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2102 struct ifnet *ifp = &sc->sc_if; 2103 struct ath_desc *ds; 2104 struct ath_buf *bf; 2105 int i, bsize, error; 2106 2107 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 2108 __func__, name, nbuf, ndesc); 2109 2110 dd->dd_name = name; 2111 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 2112 2113 /* 2114 * Setup DMA descriptor area. 2115 */ 2116 error = bus_dma_tag_create(NULL, /* parent */ 2117 PAGE_SIZE, 0, /* alignment, bounds */ 2118 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2119 BUS_SPACE_MAXADDR, /* highaddr */ 2120 NULL, NULL, /* filter, filterarg */ 2121 dd->dd_desc_len, /* maxsize */ 2122 1, /* nsegments */ 2123 BUS_SPACE_MAXADDR, /* maxsegsize */ 2124 BUS_DMA_ALLOCNOW, /* flags */ 2125 NULL, /* lockfunc */ 2126 NULL, /* lockarg */ 2127 &dd->dd_dmat); 2128 if (error != 0) { 2129 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2130 return error; 2131 } 2132 2133 /* allocate descriptors */ 2134 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2135 if (error != 0) { 2136 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2137 "error %u\n", dd->dd_name, error); 2138 goto fail0; 2139 } 2140 2141 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2142 BUS_DMA_NOWAIT, &dd->dd_dmamap); 2143 if (error != 0) { 2144 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2145 "error %u\n", nbuf * ndesc, dd->dd_name, error); 2146 goto fail1; 2147 } 2148 2149 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2150 dd->dd_desc, dd->dd_desc_len, 2151 ath_load_cb, &dd->dd_desc_paddr, 2152 BUS_DMA_NOWAIT); 2153 if (error != 0) { 2154 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2155 dd->dd_name, error); 2156 goto fail2; 2157 } 2158 2159 ds = dd->dd_desc; 2160 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2161 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 2162 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 2163 2164 /* allocate rx buffers */ 2165 bsize = sizeof(struct ath_buf) * nbuf; 2166 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2167 if (bf == NULL) { 2168 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2169 dd->dd_name, bsize); 2170 goto fail3; 2171 } 2172 dd->dd_bufptr = bf; 2173 2174 STAILQ_INIT(head); 2175 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 2176 bf->bf_desc = ds; 2177 bf->bf_daddr = DS2PHYS(dd, ds); 2178 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2179 &bf->bf_dmamap); 2180 if (error != 0) { 2181 if_printf(ifp, "unable to create dmamap for %s " 2182 "buffer %u, error %u\n", dd->dd_name, i, error); 2183 ath_descdma_cleanup(sc, dd, head); 2184 return error; 2185 } 2186 STAILQ_INSERT_TAIL(head, bf, bf_list); 2187 } 2188 return 0; 2189fail3: 2190 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2191fail2: 2192 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2193fail1: 2194 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2195fail0: 2196 bus_dma_tag_destroy(dd->dd_dmat); 2197 memset(dd, 0, sizeof(*dd)); 2198 return error; 2199#undef DS2PHYS 2200} 2201 2202static void 2203ath_descdma_cleanup(struct ath_softc *sc, 2204 struct ath_descdma *dd, ath_bufhead *head) 2205{ 2206 struct ath_buf *bf; 2207 struct ieee80211_node *ni; 2208 2209 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2210 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2211 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2212 bus_dma_tag_destroy(dd->dd_dmat); 2213 2214 STAILQ_FOREACH(bf, head, bf_list) { 2215 if (bf->bf_m) { 2216 m_freem(bf->bf_m); 2217 bf->bf_m = NULL; 2218 } 2219 if (bf->bf_dmamap != NULL) { 2220 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2221 bf->bf_dmamap = NULL; 2222 } 2223 ni = bf->bf_node; 2224 bf->bf_node = NULL; 2225 if (ni != NULL) { 2226 /* 2227 * Reclaim node reference. 2228 */ 2229 ieee80211_free_node(ni); 2230 } 2231 } 2232 2233 STAILQ_INIT(head); 2234 free(dd->dd_bufptr, M_ATHDEV); 2235 memset(dd, 0, sizeof(*dd)); 2236} 2237 2238static int 2239ath_desc_alloc(struct ath_softc *sc) 2240{ 2241 int error; 2242 2243 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 2244 "rx", ATH_RXBUF, 1); 2245 if (error != 0) 2246 return error; 2247 2248 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2249 "tx", ATH_TXBUF, ATH_TXDESC); 2250 if (error != 0) { 2251 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2252 return error; 2253 } 2254 2255 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 2256 "beacon", 1, 1); 2257 if (error != 0) { 2258 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2259 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2260 return error; 2261 } 2262 return 0; 2263} 2264 2265static void 2266ath_desc_free(struct ath_softc *sc) 2267{ 2268 2269 if (sc->sc_bdma.dd_desc_len != 0) 2270 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 2271 if (sc->sc_txdma.dd_desc_len != 0) 2272 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2273 if (sc->sc_rxdma.dd_desc_len != 0) 2274 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2275} 2276 2277static struct ieee80211_node * 2278ath_node_alloc(struct ieee80211_node_table *nt) 2279{ 2280 struct ieee80211com *ic = nt->nt_ic; 2281 struct ath_softc *sc = ic->ic_ifp->if_softc; 2282 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 2283 struct ath_node *an; 2284 2285 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 2286 if (an == NULL) { 2287 /* XXX stat+msg */ 2288 return NULL; 2289 } 2290 an->an_avgrssi = ATH_RSSI_DUMMY_MARKER; 2291 an->an_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 2292 an->an_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 2293 an->an_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 2294 ath_rate_node_init(sc, an); 2295 2296 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 2297 return &an->an_node; 2298} 2299 2300static void 2301ath_node_free(struct ieee80211_node *ni) 2302{ 2303 struct ieee80211com *ic = ni->ni_ic; 2304 struct ath_softc *sc = ic->ic_ifp->if_softc; 2305 2306 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 2307 2308 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 2309 sc->sc_node_free(ni); 2310} 2311 2312static u_int8_t 2313ath_node_getrssi(const struct ieee80211_node *ni) 2314{ 2315#define HAL_EP_RND(x, mul) \ 2316 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 2317 u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi; 2318 int32_t rssi; 2319 2320 /* 2321 * When only one frame is received there will be no state in 2322 * avgrssi so fallback on the value recorded by the 802.11 layer. 2323 */ 2324 if (avgrssi != ATH_RSSI_DUMMY_MARKER) 2325 rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER); 2326 else 2327 rssi = ni->ni_rssi; 2328 /* NB: theoretically we shouldn't need this, but be paranoid */ 2329 return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; 2330#undef HAL_EP_RND 2331} 2332 2333static int 2334ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 2335{ 2336 struct ath_hal *ah = sc->sc_ah; 2337 int error; 2338 struct mbuf *m; 2339 struct ath_desc *ds; 2340 2341 m = bf->bf_m; 2342 if (m == NULL) { 2343 /* 2344 * NB: by assigning a page to the rx dma buffer we 2345 * implicitly satisfy the Atheros requirement that 2346 * this buffer be cache-line-aligned and sized to be 2347 * multiple of the cache line size. Not doing this 2348 * causes weird stuff to happen (for the 5210 at least). 2349 */ 2350 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2351 if (m == NULL) { 2352 DPRINTF(sc, ATH_DEBUG_ANY, 2353 "%s: no mbuf/cluster\n", __func__); 2354 sc->sc_stats.ast_rx_nombuf++; 2355 return ENOMEM; 2356 } 2357 bf->bf_m = m; 2358 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 2359 2360 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2361 bf->bf_dmamap, m, 2362 ath_mbuf_load_cb, bf, 2363 BUS_DMA_NOWAIT); 2364 if (error != 0) { 2365 DPRINTF(sc, ATH_DEBUG_ANY, 2366 "%s: bus_dmamap_load_mbuf failed; error %d\n", 2367 __func__, error); 2368 sc->sc_stats.ast_rx_busdma++; 2369 return error; 2370 } 2371 KASSERT(bf->bf_nseg == 1, 2372 ("multi-segment packet; nseg %u", bf->bf_nseg)); 2373 } 2374 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 2375 2376 /* 2377 * Setup descriptors. For receive we always terminate 2378 * the descriptor list with a self-linked entry so we'll 2379 * not get overrun under high load (as can happen with a 2380 * 5212 when ANI processing enables PHY error frames). 2381 * 2382 * To insure the last descriptor is self-linked we create 2383 * each descriptor as self-linked and add it to the end. As 2384 * each additional descriptor is added the previous self-linked 2385 * entry is ``fixed'' naturally. This should be safe even 2386 * if DMA is happening. When processing RX interrupts we 2387 * never remove/process the last, self-linked, entry on the 2388 * descriptor list. This insures the hardware always has 2389 * someplace to write a new frame. 2390 */ 2391 ds = bf->bf_desc; 2392 ds->ds_link = bf->bf_daddr; /* link to self */ 2393 ds->ds_data = bf->bf_segs[0].ds_addr; 2394 ath_hal_setuprxdesc(ah, ds 2395 , m->m_len /* buffer size */ 2396 , 0 2397 ); 2398 2399 if (sc->sc_rxlink != NULL) 2400 *sc->sc_rxlink = bf->bf_daddr; 2401 sc->sc_rxlink = &ds->ds_link; 2402 return 0; 2403} 2404 2405/* 2406 * Intercept management frames to collect beacon rssi data 2407 * and to do ibss merges. 2408 */ 2409static void 2410ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2411 struct ieee80211_node *ni, 2412 int subtype, int rssi, u_int32_t rstamp) 2413{ 2414 struct ath_softc *sc = ic->ic_ifp->if_softc; 2415 2416 /* 2417 * Call up first so subsequent work can use information 2418 * potentially stored in the node (e.g. for ibss merge). 2419 */ 2420 sc->sc_recv_mgmt(ic, m, ni, subtype, rssi, rstamp); 2421 switch (subtype) { 2422 case IEEE80211_FC0_SUBTYPE_BEACON: 2423 /* update rssi statistics for use by the hal */ 2424 ATH_RSSI_LPF(ATH_NODE(ni)->an_halstats.ns_avgbrssi, rssi); 2425 /* fall thru... */ 2426 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2427 if (ic->ic_opmode == IEEE80211_M_IBSS && 2428 ic->ic_state == IEEE80211_S_RUN) { 2429 struct ath_hal *ah = sc->sc_ah; 2430 /* XXX extend rstamp */ 2431 u_int64_t tsf = ath_hal_gettsf64(ah); 2432 2433 /* 2434 * Handle ibss merge as needed; check the tsf on the 2435 * frame before attempting the merge. The 802.11 spec 2436 * says the station should change it's bssid to match 2437 * the oldest station with the same ssid, where oldest 2438 * is determined by the tsf. 2439 */ 2440 if (le64toh(ni->ni_tstamp.tsf) >= tsf && 2441 ieee80211_ibss_merge(ic, ni)) 2442 ath_hal_setassocid(ah, ic->ic_bss->ni_bssid, 0); 2443 } 2444 break; 2445 } 2446} 2447 2448/* 2449 * Set the default antenna. 2450 */ 2451static void 2452ath_setdefantenna(struct ath_softc *sc, u_int antenna) 2453{ 2454 struct ath_hal *ah = sc->sc_ah; 2455 2456 /* XXX block beacon interrupts */ 2457 ath_hal_setdefantenna(ah, antenna); 2458 if (sc->sc_defant != antenna) 2459 sc->sc_stats.ast_ant_defswitch++; 2460 sc->sc_defant = antenna; 2461 sc->sc_rxotherant = 0; 2462} 2463 2464static void 2465ath_rx_proc(void *arg, int npending) 2466{ 2467#define PA2DESC(_sc, _pa) \ 2468 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 2469 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 2470 struct ath_softc *sc = arg; 2471 struct ath_buf *bf; 2472 struct ieee80211com *ic = &sc->sc_ic; 2473 struct ifnet *ifp = &sc->sc_if; 2474 struct ath_hal *ah = sc->sc_ah; 2475 struct ath_desc *ds; 2476 struct mbuf *m; 2477 struct ieee80211_node *ni; 2478 struct ath_node *an; 2479 int len; 2480 u_int phyerr; 2481 HAL_STATUS status; 2482 2483 NET_LOCK_GIANT(); /* XXX */ 2484 2485 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 2486 do { 2487 bf = STAILQ_FIRST(&sc->sc_rxbuf); 2488 if (bf == NULL) { /* NB: shouldn't happen */ 2489 if_printf(ifp, "%s: no buffer!\n", __func__); 2490 break; 2491 } 2492 ds = bf->bf_desc; 2493 if (ds->ds_link == bf->bf_daddr) { 2494 /* NB: never process the self-linked entry at the end */ 2495 break; 2496 } 2497 m = bf->bf_m; 2498 if (m == NULL) { /* NB: shouldn't happen */ 2499 if_printf(ifp, "%s: no mbuf!\n", __func__); 2500 continue; 2501 } 2502 /* XXX sync descriptor memory */ 2503 /* 2504 * Must provide the virtual address of the current 2505 * descriptor, the physical address, and the virtual 2506 * address of the next descriptor in the h/w chain. 2507 * This allows the HAL to look ahead to see if the 2508 * hardware is done with a descriptor by checking the 2509 * done bit in the following descriptor and the address 2510 * of the current descriptor the DMA engine is working 2511 * on. All this is necessary because of our use of 2512 * a self-linked list to avoid rx overruns. 2513 */ 2514 status = ath_hal_rxprocdesc(ah, ds, 2515 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 2516#ifdef AR_DEBUG 2517 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 2518 ath_printrxbuf(bf, status == HAL_OK); 2519#endif 2520 if (status == HAL_EINPROGRESS) 2521 break; 2522 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 2523 if (ds->ds_rxstat.rs_more) { 2524 /* 2525 * Frame spans multiple descriptors; this 2526 * cannot happen yet as we don't support 2527 * jumbograms. If not in monitor mode, 2528 * discard the frame. 2529 */ 2530 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2531 sc->sc_stats.ast_rx_toobig++; 2532 goto rx_next; 2533 } 2534 /* fall thru for monitor mode handling... */ 2535 } else if (ds->ds_rxstat.rs_status != 0) { 2536 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 2537 sc->sc_stats.ast_rx_crcerr++; 2538 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 2539 sc->sc_stats.ast_rx_fifoerr++; 2540 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 2541 sc->sc_stats.ast_rx_phyerr++; 2542 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 2543 sc->sc_stats.ast_rx_phy[phyerr]++; 2544 goto rx_next; 2545 } 2546 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) { 2547 /* 2548 * Decrypt error. If the error occurred 2549 * because there was no hardware key, then 2550 * let the frame through so the upper layers 2551 * can process it. This is necessary for 5210 2552 * parts which have no way to setup a ``clear'' 2553 * key cache entry. 2554 * 2555 * XXX do key cache faulting 2556 */ 2557 if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID) 2558 goto rx_accept; 2559 sc->sc_stats.ast_rx_badcrypt++; 2560 } 2561 if (ds->ds_rxstat.rs_status & HAL_RXERR_MIC) { 2562 sc->sc_stats.ast_rx_badmic++; 2563 /* 2564 * Do minimal work required to hand off 2565 * the 802.11 header for notifcation. 2566 */ 2567 /* XXX frag's and qos frames */ 2568 len = ds->ds_rxstat.rs_datalen; 2569 if (len >= sizeof (struct ieee80211_frame)) { 2570 bus_dmamap_sync(sc->sc_dmat, 2571 bf->bf_dmamap, 2572 BUS_DMASYNC_POSTREAD); 2573 ieee80211_notify_michael_failure(ic, 2574 mtod(m, struct ieee80211_frame *), 2575 sc->sc_splitmic ? 2576 ds->ds_rxstat.rs_keyix-32 : 2577 ds->ds_rxstat.rs_keyix 2578 ); 2579 } 2580 } 2581 ifp->if_ierrors++; 2582 /* 2583 * Reject error frames, we normally don't want 2584 * to see them in monitor mode (in monitor mode 2585 * allow through packets that have crypto problems). 2586 */ 2587 if ((ds->ds_rxstat.rs_status &~ 2588 (HAL_RXERR_DECRYPT|HAL_RXERR_MIC)) || 2589 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 2590 goto rx_next; 2591 } 2592rx_accept: 2593 /* 2594 * Sync and unmap the frame. At this point we're 2595 * committed to passing the mbuf somewhere so clear 2596 * bf_m; this means a new sk_buff must be allocated 2597 * when the rx descriptor is setup again to receive 2598 * another frame. 2599 */ 2600 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2601 BUS_DMASYNC_POSTREAD); 2602 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2603 bf->bf_m = NULL; 2604 2605 m->m_pkthdr.rcvif = ifp; 2606 len = ds->ds_rxstat.rs_datalen; 2607 m->m_pkthdr.len = m->m_len = len; 2608 2609 if (sc->sc_softled) 2610 ath_update_led(sc); 2611 sc->sc_stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++; 2612 2613 if (sc->sc_drvbpf) { 2614 const void *data; 2615 int hdrsize, hdrspace; 2616 u_int8_t rix; 2617 2618 /* 2619 * Discard anything shorter than an ack or cts. 2620 */ 2621 if (len < IEEE80211_ACK_LEN) { 2622 DPRINTF(sc, ATH_DEBUG_RECV, 2623 "%s: runt packet %d\n", 2624 __func__, len); 2625 sc->sc_stats.ast_rx_tooshort++; 2626 m_freem(m); 2627 goto rx_next; 2628 } 2629 rix = ds->ds_rxstat.rs_rate; 2630 sc->sc_rx_th.wr_flags = sc->sc_hwflags[rix]; 2631 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix]; 2632 sc->sc_rx_th.wr_antsignal = ds->ds_rxstat.rs_rssi; 2633 sc->sc_rx_th.wr_antenna = ds->ds_rxstat.rs_antenna; 2634 /* XXX TSF */ 2635 2636 /* 2637 * Gag, deal with hardware padding of headers. This 2638 * only happens for QoS frames. We copy the 802.11 2639 * header out-of-line and supply it separately, then 2640 * adjust the mbuf chain. It would be better if we 2641 * could just flag the packet in the radiotap header 2642 * and have applications DTRT. 2643 */ 2644 if (len > sizeof(struct ieee80211_qosframe)) { 2645 data = mtod(m, const void *); 2646 hdrsize = ieee80211_anyhdrsize(data); 2647 if (hdrsize & 3) { 2648 bcopy(data, &sc->sc_rx_wh, hdrsize); 2649 hdrspace = roundup(hdrsize, 2650 sizeof(u_int32_t)); 2651 m->m_data += hdrspace; 2652 m->m_len -= hdrspace; 2653 bpf_mtap2(sc->sc_drvbpf, &sc->sc_rx, 2654 sc->sc_rx_rt_len + hdrsize, m); 2655 m->m_data -= hdrspace; 2656 m->m_len += hdrspace; 2657 } else 2658 bpf_mtap2(sc->sc_drvbpf, 2659 &sc->sc_rx, sc->sc_rx_rt_len, m); 2660 } else 2661 bpf_mtap2(sc->sc_drvbpf, 2662 &sc->sc_rx, sc->sc_rx_rt_len, m); 2663 } 2664 2665 /* 2666 * From this point on we assume the frame is at least 2667 * as large as ieee80211_frame_min; verify that. 2668 */ 2669 if (len < IEEE80211_MIN_LEN) { 2670 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", 2671 __func__, len); 2672 sc->sc_stats.ast_rx_tooshort++; 2673 m_freem(m); 2674 goto rx_next; 2675 } 2676 2677 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 2678 ieee80211_dump_pkt(mtod(m, caddr_t), len, 2679 sc->sc_hwmap[ds->ds_rxstat.rs_rate], 2680 ds->ds_rxstat.rs_rssi); 2681 } 2682 2683 m_adj(m, -IEEE80211_CRC_LEN); 2684 2685 /* 2686 * Locate the node for sender, track state, and then 2687 * pass the (referenced) node up to the 802.11 layer 2688 * for its use. 2689 */ 2690 ni = ieee80211_find_rxnode(ic, 2691 mtod(m, const struct ieee80211_frame_min *)); 2692 2693 /* 2694 * Track rx rssi and do any rx antenna management. 2695 */ 2696 an = ATH_NODE(ni); 2697 ATH_RSSI_LPF(an->an_avgrssi, ds->ds_rxstat.rs_rssi); 2698 if (sc->sc_diversity) { 2699 /* 2700 * When using fast diversity, change the default rx 2701 * antenna if diversity chooses the other antenna 3 2702 * times in a row. 2703 */ 2704 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { 2705 if (++sc->sc_rxotherant >= 3) 2706 ath_setdefantenna(sc, 2707 ds->ds_rxstat.rs_antenna); 2708 } else 2709 sc->sc_rxotherant = 0; 2710 } 2711 2712 /* 2713 * Send frame up for processing. 2714 */ 2715 ieee80211_input(ic, m, ni, 2716 ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_tstamp); 2717 2718 /* 2719 * Reclaim node reference. 2720 */ 2721 ieee80211_free_node(ni); 2722rx_next: 2723 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2724 } while (ath_rxbuf_init(sc, bf) == 0); 2725 2726 /* rx signal state monitoring */ 2727 ath_hal_rxmonitor(ah, &ATH_NODE(ic->ic_bss)->an_halstats); 2728 2729 NET_UNLOCK_GIANT(); /* XXX */ 2730#undef PA2DESC 2731} 2732 2733/* 2734 * Setup a h/w transmit queue. 2735 */ 2736static struct ath_txq * 2737ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 2738{ 2739#define N(a) (sizeof(a)/sizeof(a[0])) 2740 struct ath_hal *ah = sc->sc_ah; 2741 HAL_TXQ_INFO qi; 2742 int qnum; 2743 2744 memset(&qi, 0, sizeof(qi)); 2745 qi.tqi_subtype = subtype; 2746 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 2747 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 2748 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 2749 /* 2750 * Enable interrupts only for EOL and DESC conditions. 2751 * We mark tx descriptors to receive a DESC interrupt 2752 * when a tx queue gets deep; otherwise waiting for the 2753 * EOL to reap descriptors. Note that this is done to 2754 * reduce interrupt load and this only defers reaping 2755 * descriptors, never transmitting frames. Aside from 2756 * reducing interrupts this also permits more concurrency. 2757 * The only potential downside is if the tx queue backs 2758 * up in which case the top half of the kernel may backup 2759 * due to a lack of tx descriptors. 2760 */ 2761 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE; 2762 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 2763 if (qnum == -1) { 2764 /* 2765 * NB: don't print a message, this happens 2766 * normally on parts with too few tx queues 2767 */ 2768 return NULL; 2769 } 2770 if (qnum >= N(sc->sc_txq)) { 2771 device_printf(sc->sc_dev, 2772 "hal qnum %u out of range, max %zu!\n", 2773 qnum, N(sc->sc_txq)); 2774 ath_hal_releasetxqueue(ah, qnum); 2775 return NULL; 2776 } 2777 if (!ATH_TXQ_SETUP(sc, qnum)) { 2778 struct ath_txq *txq = &sc->sc_txq[qnum]; 2779 2780 txq->axq_qnum = qnum; 2781 txq->axq_depth = 0; 2782 txq->axq_intrcnt = 0; 2783 txq->axq_link = NULL; 2784 STAILQ_INIT(&txq->axq_q); 2785 ATH_TXQ_LOCK_INIT(sc, txq); 2786 sc->sc_txqsetup |= 1<<qnum; 2787 } 2788 return &sc->sc_txq[qnum]; 2789#undef N 2790} 2791 2792/* 2793 * Setup a hardware data transmit queue for the specified 2794 * access control. The hal may not support all requested 2795 * queues in which case it will return a reference to a 2796 * previously setup queue. We record the mapping from ac's 2797 * to h/w queues for use by ath_tx_start and also track 2798 * the set of h/w queues being used to optimize work in the 2799 * transmit interrupt handler and related routines. 2800 */ 2801static int 2802ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 2803{ 2804#define N(a) (sizeof(a)/sizeof(a[0])) 2805 struct ath_txq *txq; 2806 2807 if (ac >= N(sc->sc_ac2q)) { 2808 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 2809 ac, N(sc->sc_ac2q)); 2810 return 0; 2811 } 2812 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 2813 if (txq != NULL) { 2814 sc->sc_ac2q[ac] = txq; 2815 return 1; 2816 } else 2817 return 0; 2818#undef N 2819} 2820 2821/* 2822 * Update WME parameters for a transmit queue. 2823 */ 2824static int 2825ath_txq_update(struct ath_softc *sc, int ac) 2826{ 2827#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 2828#define ATH_TXOP_TO_US(v) (v<<5) 2829 struct ieee80211com *ic = &sc->sc_ic; 2830 struct ath_txq *txq = sc->sc_ac2q[ac]; 2831 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 2832 struct ath_hal *ah = sc->sc_ah; 2833 HAL_TXQ_INFO qi; 2834 2835 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 2836 qi.tqi_aifs = wmep->wmep_aifsn; 2837 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 2838 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 2839 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 2840 2841 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 2842 device_printf(sc->sc_dev, "unable to update hardware queue " 2843 "parameters for %s traffic!\n", 2844 ieee80211_wme_acnames[ac]); 2845 return 0; 2846 } else { 2847 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 2848 return 1; 2849 } 2850#undef ATH_TXOP_TO_US 2851#undef ATH_EXPONENT_TO_VALUE 2852} 2853 2854/* 2855 * Callback from the 802.11 layer to update WME parameters. 2856 */ 2857static int 2858ath_wme_update(struct ieee80211com *ic) 2859{ 2860 struct ath_softc *sc = ic->ic_ifp->if_softc; 2861 2862 return !ath_txq_update(sc, WME_AC_BE) || 2863 !ath_txq_update(sc, WME_AC_BK) || 2864 !ath_txq_update(sc, WME_AC_VI) || 2865 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 2866} 2867 2868/* 2869 * Reclaim resources for a setup queue. 2870 */ 2871static void 2872ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 2873{ 2874 2875 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 2876 ATH_TXQ_LOCK_DESTROY(txq); 2877 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 2878} 2879 2880/* 2881 * Reclaim all tx queue resources. 2882 */ 2883static void 2884ath_tx_cleanup(struct ath_softc *sc) 2885{ 2886 int i; 2887 2888 ATH_TXBUF_LOCK_DESTROY(sc); 2889 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 2890 if (ATH_TXQ_SETUP(sc, i)) 2891 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 2892} 2893 2894static int 2895ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, 2896 struct mbuf *m0) 2897{ 2898 struct ieee80211com *ic = &sc->sc_ic; 2899 struct ath_hal *ah = sc->sc_ah; 2900 struct ifnet *ifp = &sc->sc_if; 2901 int i, error, iswep, ismcast, keyix, hdrlen, pktlen, try0; 2902 u_int8_t rix, txrate, ctsrate; 2903 u_int8_t cix = 0xff; /* NB: silence compiler */ 2904 struct ath_desc *ds, *ds0; 2905 struct ath_txq *txq; 2906 struct mbuf *m; 2907 struct ieee80211_frame *wh; 2908 u_int subtype, flags, ctsduration; 2909 HAL_PKT_TYPE atype; 2910 const HAL_RATE_TABLE *rt; 2911 HAL_BOOL shortPreamble; 2912 struct ath_node *an; 2913 2914 wh = mtod(m0, struct ieee80211_frame *); 2915 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 2916 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2917 hdrlen = ieee80211_anyhdrsize(wh); 2918 /* 2919 * Packet length must not include any 2920 * pad bytes; deduct them here. 2921 */ 2922 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 2923 2924 if (iswep) { 2925 const struct ieee80211_cipher *cip; 2926 struct ieee80211_key *k; 2927 2928 /* 2929 * Construct the 802.11 header+trailer for an encrypted 2930 * frame. The only reason this can fail is because of an 2931 * unknown or unsupported cipher/key type. 2932 */ 2933 k = ieee80211_crypto_encap(ic, ni, m0); 2934 if (k == NULL) { 2935 /* 2936 * This can happen when the key is yanked after the 2937 * frame was queued. Just discard the frame; the 2938 * 802.11 layer counts failures and provides 2939 * debugging/diagnostics. 2940 */ 2941 return EIO; 2942 } 2943 /* 2944 * Adjust the packet + header lengths for the crypto 2945 * additions and calculate the h/w key index. When 2946 * a s/w mic is done the frame will have had any mic 2947 * added to it prior to entry so skb->len above will 2948 * account for it. Otherwise we need to add it to the 2949 * packet length. 2950 */ 2951 cip = k->wk_cipher; 2952 hdrlen += cip->ic_header; 2953 pktlen += cip->ic_header + cip->ic_trailer; 2954 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) 2955 pktlen += cip->ic_miclen; 2956 keyix = k->wk_keyix; 2957 2958 /* packet header may have moved, reset our local pointer */ 2959 wh = mtod(m0, struct ieee80211_frame *); 2960 } else 2961 keyix = HAL_TXKEYIX_INVALID; 2962 2963 pktlen += IEEE80211_CRC_LEN; 2964 2965 /* 2966 * Load the DMA map so any coalescing is done. This 2967 * also calculates the number of descriptors we need. 2968 */ 2969 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2970 ath_mbuf_load_cb, bf, 2971 BUS_DMA_NOWAIT); 2972 if (error == EFBIG) { 2973 /* XXX packet requires too many descriptors */ 2974 bf->bf_nseg = ATH_TXDESC+1; 2975 } else if (error != 0) { 2976 sc->sc_stats.ast_tx_busdma++; 2977 m_freem(m0); 2978 return error; 2979 } 2980 /* 2981 * Discard null packets and check for packets that 2982 * require too many TX descriptors. We try to convert 2983 * the latter to a cluster. 2984 */ 2985 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 2986 sc->sc_stats.ast_tx_linear++; 2987 MGETHDR(m, M_DONTWAIT, MT_DATA); 2988 if (m == NULL) { 2989 sc->sc_stats.ast_tx_nombuf++; 2990 m_freem(m0); 2991 return ENOMEM; 2992 } 2993 M_MOVE_PKTHDR(m, m0); 2994 MCLGET(m, M_DONTWAIT); 2995 if ((m->m_flags & M_EXT) == 0) { 2996 sc->sc_stats.ast_tx_nomcl++; 2997 m_freem(m0); 2998 m_free(m); 2999 return ENOMEM; 3000 } 3001 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 3002 m_freem(m0); 3003 m->m_len = m->m_pkthdr.len; 3004 m0 = m; 3005 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 3006 ath_mbuf_load_cb, bf, 3007 BUS_DMA_NOWAIT); 3008 if (error != 0) { 3009 sc->sc_stats.ast_tx_busdma++; 3010 m_freem(m0); 3011 return error; 3012 } 3013 KASSERT(bf->bf_nseg == 1, 3014 ("packet not one segment; nseg %u", bf->bf_nseg)); 3015 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 3016 sc->sc_stats.ast_tx_nodata++; 3017 m_freem(m0); 3018 return EIO; 3019 } 3020 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, pktlen); 3021 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3022 bf->bf_m = m0; 3023 bf->bf_node = ni; /* NB: held reference */ 3024 3025 /* setup descriptors */ 3026 ds = bf->bf_desc; 3027 rt = sc->sc_currates; 3028 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 3029 3030 /* 3031 * NB: the 802.11 layer marks whether or not we should 3032 * use short preamble based on the current mode and 3033 * negotiated parameters. 3034 */ 3035 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 3036 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 3037 shortPreamble = AH_TRUE; 3038 sc->sc_stats.ast_tx_shortpre++; 3039 } else { 3040 shortPreamble = AH_FALSE; 3041 } 3042 3043 an = ATH_NODE(ni); 3044 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 3045 /* 3046 * Calculate Atheros packet type from IEEE80211 packet header, 3047 * setup for rate calculations, and select h/w transmit queue. 3048 */ 3049 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 3050 case IEEE80211_FC0_TYPE_MGT: 3051 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3052 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 3053 atype = HAL_PKT_TYPE_BEACON; 3054 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3055 atype = HAL_PKT_TYPE_PROBE_RESP; 3056 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 3057 atype = HAL_PKT_TYPE_ATIM; 3058 else 3059 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 3060 rix = 0; /* XXX lowest rate */ 3061 try0 = ATH_TXMAXTRY; 3062 if (shortPreamble) 3063 txrate = an->an_tx_mgtratesp; 3064 else 3065 txrate = an->an_tx_mgtrate; 3066 /* NB: force all management frames to highest queue */ 3067 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3068 /* NB: force all management frames to highest queue */ 3069 txq = sc->sc_ac2q[WME_AC_VO]; 3070 } else 3071 txq = sc->sc_ac2q[WME_AC_BE]; 3072 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3073 break; 3074 case IEEE80211_FC0_TYPE_CTL: 3075 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 3076 rix = 0; /* XXX lowest rate */ 3077 try0 = ATH_TXMAXTRY; 3078 if (shortPreamble) 3079 txrate = an->an_tx_mgtratesp; 3080 else 3081 txrate = an->an_tx_mgtrate; 3082 /* NB: force all ctl frames to highest queue */ 3083 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3084 /* NB: force all ctl frames to highest queue */ 3085 txq = sc->sc_ac2q[WME_AC_VO]; 3086 } else 3087 txq = sc->sc_ac2q[WME_AC_BE]; 3088 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3089 break; 3090 case IEEE80211_FC0_TYPE_DATA: 3091 atype = HAL_PKT_TYPE_NORMAL; /* default */ 3092 /* 3093 * Data frames; consult the rate control module. 3094 */ 3095 ath_rate_findrate(sc, an, shortPreamble, pktlen, 3096 &rix, &try0, &txrate); 3097 /* 3098 * Default all non-QoS traffic to the background queue. 3099 */ 3100 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { 3101 u_int pri = M_WME_GETAC(m0); 3102 txq = sc->sc_ac2q[pri]; 3103 if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[pri].wmep_noackPolicy) 3104 flags |= HAL_TXDESC_NOACK; 3105 } else 3106 txq = sc->sc_ac2q[WME_AC_BE]; 3107 break; 3108 default: 3109 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 3110 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 3111 /* XXX statistic */ 3112 m_freem(m0); 3113 return EIO; 3114 } 3115 3116 /* 3117 * When servicing one or more stations in power-save mode 3118 * multicast frames must be buffered until after the beacon. 3119 * We use the CAB queue for that. 3120 */ 3121 if (ismcast && ic->ic_ps_sta) { 3122 txq = sc->sc_cabq; 3123 /* XXX? more bit in 802.11 frame header */ 3124 } 3125 3126 /* 3127 * Calculate miscellaneous flags. 3128 */ 3129 if (ismcast) { 3130 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 3131 sc->sc_stats.ast_tx_noack++; 3132 } else if (pktlen > ic->ic_rtsthreshold) { 3133 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 3134 cix = rt->info[rix].controlRate; 3135 sc->sc_stats.ast_tx_rts++; 3136 } 3137 3138 /* 3139 * If 802.11g protection is enabled, determine whether 3140 * to use RTS/CTS or just CTS. Note that this is only 3141 * done for OFDM unicast frames. 3142 */ 3143 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3144 rt->info[rix].phy == IEEE80211_T_OFDM && 3145 (flags & HAL_TXDESC_NOACK) == 0) { 3146 /* XXX fragments must use CCK rates w/ protection */ 3147 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3148 flags |= HAL_TXDESC_RTSENA; 3149 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3150 flags |= HAL_TXDESC_CTSENA; 3151 cix = rt->info[sc->sc_protrix].controlRate; 3152 sc->sc_stats.ast_tx_protect++; 3153 } 3154 3155 /* 3156 * Calculate duration. This logically belongs in the 802.11 3157 * layer but it lacks sufficient information to calculate it. 3158 */ 3159 if ((flags & HAL_TXDESC_NOACK) == 0 && 3160 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 3161 u_int16_t dur; 3162 /* 3163 * XXX not right with fragmentation. 3164 */ 3165 if (shortPreamble) 3166 dur = rt->info[rix].spAckDuration; 3167 else 3168 dur = rt->info[rix].lpAckDuration; 3169 *(u_int16_t *)wh->i_dur = htole16(dur); 3170 } 3171 3172 /* 3173 * Calculate RTS/CTS rate and duration if needed. 3174 */ 3175 ctsduration = 0; 3176 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 3177 /* 3178 * CTS transmit rate is derived from the transmit rate 3179 * by looking in the h/w rate table. We must also factor 3180 * in whether or not a short preamble is to be used. 3181 */ 3182 /* NB: cix is set above where RTS/CTS is enabled */ 3183 KASSERT(cix != 0xff, ("cix not setup")); 3184 ctsrate = rt->info[cix].rateCode; 3185 /* 3186 * Compute the transmit duration based on the frame 3187 * size and the size of an ACK frame. We call into the 3188 * HAL to do the computation since it depends on the 3189 * characteristics of the actual PHY being used. 3190 * 3191 * NB: CTS is assumed the same size as an ACK so we can 3192 * use the precalculated ACK durations. 3193 */ 3194 if (shortPreamble) { 3195 ctsrate |= rt->info[cix].shortPreamble; 3196 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3197 ctsduration += rt->info[cix].spAckDuration; 3198 ctsduration += ath_hal_computetxtime(ah, 3199 rt, pktlen, rix, AH_TRUE); 3200 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3201 ctsduration += rt->info[cix].spAckDuration; 3202 } else { 3203 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3204 ctsduration += rt->info[cix].lpAckDuration; 3205 ctsduration += ath_hal_computetxtime(ah, 3206 rt, pktlen, rix, AH_FALSE); 3207 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3208 ctsduration += rt->info[cix].lpAckDuration; 3209 } 3210 /* 3211 * Must disable multi-rate retry when using RTS/CTS. 3212 */ 3213 try0 = ATH_TXMAXTRY; 3214 } else 3215 ctsrate = 0; 3216 3217 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3218 ieee80211_dump_pkt(mtod(m0, caddr_t), m0->m_len, 3219 sc->sc_hwmap[txrate], -1); 3220 3221 if (ic->ic_rawbpf) 3222 bpf_mtap(ic->ic_rawbpf, m0); 3223 if (sc->sc_drvbpf) { 3224 sc->sc_tx_th.wt_flags = sc->sc_hwflags[txrate]; 3225 if (iswep) 3226 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3227 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate]; 3228 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 3229 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 3230 3231 bpf_mtap2(sc->sc_drvbpf, 3232 &sc->sc_tx_th, sc->sc_tx_th_len, m0); 3233 } 3234 3235 /* 3236 * Determine if a tx interrupt should be generated for 3237 * this descriptor. We take a tx interrupt to reap 3238 * descriptors when the h/w hits an EOL condition or 3239 * when the descriptor is specifically marked to generate 3240 * an interrupt. We periodically mark descriptors in this 3241 * way to insure timely replenishing of the supply needed 3242 * for sending frames. Defering interrupts reduces system 3243 * load and potentially allows more concurrent work to be 3244 * done but if done to aggressively can cause senders to 3245 * backup. 3246 * 3247 * NB: use >= to deal with sc_txintrperiod changing 3248 * dynamically through sysctl. 3249 */ 3250 if (flags & HAL_TXDESC_INTREQ) { 3251 txq->axq_intrcnt = 0; 3252 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 3253 flags |= HAL_TXDESC_INTREQ; 3254 txq->axq_intrcnt = 0; 3255 } 3256 3257 /* 3258 * Formulate first tx descriptor with tx controls. 3259 */ 3260 /* XXX check return value? */ 3261 ath_hal_setuptxdesc(ah, ds 3262 , pktlen /* packet length */ 3263 , hdrlen /* header length */ 3264 , atype /* Atheros packet type */ 3265 , ni->ni_txpower /* txpower */ 3266 , txrate, try0 /* series 0 rate/tries */ 3267 , keyix /* key cache index */ 3268 , sc->sc_txantenna /* antenna mode */ 3269 , flags /* flags */ 3270 , ctsrate /* rts/cts rate */ 3271 , ctsduration /* rts/cts duration */ 3272 ); 3273 /* 3274 * Setup the multi-rate retry state only when we're 3275 * going to use it. This assumes ath_hal_setuptxdesc 3276 * initializes the descriptors (so we don't have to) 3277 * when the hardware supports multi-rate retry and 3278 * we don't use it. 3279 */ 3280 if (try0 != ATH_TXMAXTRY) 3281 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); 3282 3283 /* 3284 * Fillin the remainder of the descriptor info. 3285 */ 3286 ds0 = ds; 3287 for (i = 0; i < bf->bf_nseg; i++, ds++) { 3288 ds->ds_data = bf->bf_segs[i].ds_addr; 3289 if (i == bf->bf_nseg - 1) 3290 ds->ds_link = 0; 3291 else 3292 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 3293 ath_hal_filltxdesc(ah, ds 3294 , bf->bf_segs[i].ds_len /* segment length */ 3295 , i == 0 /* first segment */ 3296 , i == bf->bf_nseg - 1 /* last segment */ 3297 , ds0 /* first descriptor */ 3298 ); 3299 DPRINTF(sc, ATH_DEBUG_XMIT, 3300 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 3301 __func__, i, ds->ds_link, ds->ds_data, 3302 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 3303 } 3304#if 0 3305 if ((flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) && 3306 !ath_hal_updateCTSForBursting(ah, ds 3307 , txq->axq_linkbuf != NULL ? 3308 txq->axq_linkbuf->bf_desc : NULL 3309 , txq->axq_lastdsWithCTS 3310 , txq->axq_gatingds 3311 , IEEE80211_TXOP_TO_US(ic->ic_chanParams.cap_wmeParams[skb->priority].wmep_txopLimit) 3312 , ath_hal_computetxtime(ah, rt, IEEE80211_ACK_LEN, cix, AH_TRUE))) { 3313 ATH_TXQ_LOCK(txq); 3314 txq->axq_lastdsWithCTS = ds; 3315 /* set gating Desc to final desc */ 3316 txq->axq_gatingds = (struct ath_desc *)txq->axq_link; 3317 ATH_TXQ_UNLOCK(txq); 3318 } 3319#endif 3320 /* 3321 * Insert the frame on the outbound list and 3322 * pass it on to the hardware. 3323 */ 3324 ATH_TXQ_LOCK(txq); 3325 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 3326 if (txq->axq_link == NULL) { 3327 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 3328 DPRINTF(sc, ATH_DEBUG_XMIT, 3329 "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, 3330 txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, 3331 txq->axq_depth); 3332 } else { 3333 *txq->axq_link = bf->bf_daddr; 3334 DPRINTF(sc, ATH_DEBUG_XMIT, 3335 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 3336 txq->axq_qnum, txq->axq_link, 3337 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); 3338 } 3339 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 3340 ATH_TXQ_UNLOCK(txq); 3341 3342 if (sc->sc_softled) 3343 ath_update_led(sc); 3344 3345 /* 3346 * The CAB queue is started from the SWBA handler since 3347 * frames only go out on DTIM and to avoid possible races. 3348 */ 3349 if (txq != sc->sc_cabq) 3350 ath_hal_txstart(ah, txq->axq_qnum); 3351 return 0; 3352} 3353 3354/* 3355 * Process completed xmit descriptors from the specified queue. 3356 */ 3357static void 3358ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 3359{ 3360 struct ath_hal *ah = sc->sc_ah; 3361 struct ieee80211com *ic = &sc->sc_ic; 3362 struct ath_buf *bf; 3363 struct ath_desc *ds; 3364 struct ieee80211_node *ni; 3365 struct ath_node *an; 3366 int sr, lr, pri; 3367 HAL_STATUS status; 3368 3369 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3370 __func__, txq->axq_qnum, 3371 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3372 txq->axq_link); 3373 for (;;) { 3374 ATH_TXQ_LOCK(txq); 3375 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3376 bf = STAILQ_FIRST(&txq->axq_q); 3377 if (bf == NULL) { 3378 txq->axq_link = NULL; 3379 ATH_TXQ_UNLOCK(txq); 3380 break; 3381 } 3382 /* only the last descriptor is needed */ 3383 ds = &bf->bf_desc[bf->bf_nseg - 1]; 3384 status = ath_hal_txprocdesc(ah, ds); 3385#ifdef AR_DEBUG 3386 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3387 ath_printtxbuf(bf, status == HAL_OK); 3388#endif 3389 if (status == HAL_EINPROGRESS) { 3390 ATH_TXQ_UNLOCK(txq); 3391 break; 3392 } 3393#if 0 3394 if (bf->bf_desc == txq->axq_lastdsWithCTS) 3395 txq->axq_lastdsWithCTS = NULL; 3396 if (ds == txq->axq_gatingds) 3397 txq->axq_gatingds = NULL; 3398#endif 3399 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 3400 ATH_TXQ_UNLOCK(txq); 3401 3402 ni = bf->bf_node; 3403 if (ni != NULL) { 3404 an = ATH_NODE(ni); 3405 if (ds->ds_txstat.ts_status == 0) { 3406 u_int8_t txant = ds->ds_txstat.ts_antenna; 3407 sc->sc_stats.ast_ant_tx[txant]++; 3408 sc->sc_ant_tx[txant]++; 3409 if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE) 3410 sc->sc_stats.ast_tx_altrate++; 3411 sc->sc_stats.ast_tx_rssi = 3412 ds->ds_txstat.ts_rssi; 3413 ATH_RSSI_LPF(an->an_halstats.ns_avgtxrssi, 3414 ds->ds_txstat.ts_rssi); 3415 pri = M_WME_GETAC(bf->bf_m); 3416 if (pri >= WME_AC_VO) 3417 ic->ic_wme.wme_hipri_traffic++; 3418 ni->ni_inact = ni->ni_inact_reload; 3419 } else { 3420 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 3421 sc->sc_stats.ast_tx_xretries++; 3422 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 3423 sc->sc_stats.ast_tx_fifoerr++; 3424 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 3425 sc->sc_stats.ast_tx_filtered++; 3426 } 3427 sr = ds->ds_txstat.ts_shortretry; 3428 lr = ds->ds_txstat.ts_longretry; 3429 sc->sc_stats.ast_tx_shortretry += sr; 3430 sc->sc_stats.ast_tx_longretry += lr; 3431 /* 3432 * Hand the descriptor to the rate control algorithm. 3433 */ 3434 ath_rate_tx_complete(sc, an, ds); 3435 /* 3436 * Reclaim reference to node. 3437 * 3438 * NB: the node may be reclaimed here if, for example 3439 * this is a DEAUTH message that was sent and the 3440 * node was timed out due to inactivity. 3441 */ 3442 ieee80211_free_node(ni); 3443 } 3444 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3445 BUS_DMASYNC_POSTWRITE); 3446 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3447 m_freem(bf->bf_m); 3448 bf->bf_m = NULL; 3449 bf->bf_node = NULL; 3450 3451 ATH_TXBUF_LOCK(sc); 3452 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3453 ATH_TXBUF_UNLOCK(sc); 3454 } 3455} 3456 3457/* 3458 * Deferred processing of transmit interrupt; special-cased 3459 * for a single hardware transmit queue (e.g. 5210 and 5211). 3460 */ 3461static void 3462ath_tx_proc_q0(void *arg, int npending) 3463{ 3464 struct ath_softc *sc = arg; 3465 struct ifnet *ifp = &sc->sc_if; 3466 3467 ath_tx_processq(sc, &sc->sc_txq[0]); 3468 ath_tx_processq(sc, sc->sc_cabq); 3469 ifp->if_flags &= ~IFF_OACTIVE; 3470 sc->sc_tx_timer = 0; 3471 3472 ath_start(ifp); 3473} 3474 3475/* 3476 * Deferred processing of transmit interrupt; special-cased 3477 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 3478 */ 3479static void 3480ath_tx_proc_q0123(void *arg, int npending) 3481{ 3482 struct ath_softc *sc = arg; 3483 struct ifnet *ifp = &sc->sc_if; 3484 3485 /* 3486 * Process each active queue. 3487 */ 3488 ath_tx_processq(sc, &sc->sc_txq[0]); 3489 ath_tx_processq(sc, &sc->sc_txq[1]); 3490 ath_tx_processq(sc, &sc->sc_txq[2]); 3491 ath_tx_processq(sc, &sc->sc_txq[3]); 3492 ath_tx_processq(sc, sc->sc_cabq); 3493 3494 ifp->if_flags &= ~IFF_OACTIVE; 3495 sc->sc_tx_timer = 0; 3496 3497 ath_start(ifp); 3498} 3499 3500/* 3501 * Deferred processing of transmit interrupt. 3502 */ 3503static void 3504ath_tx_proc(void *arg, int npending) 3505{ 3506 struct ath_softc *sc = arg; 3507 struct ifnet *ifp = &sc->sc_if; 3508 int i; 3509 3510 /* 3511 * Process each active queue. 3512 */ 3513 /* XXX faster to read ISR_S0_S and ISR_S1_S to determine q's? */ 3514 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3515 if (ATH_TXQ_SETUP(sc, i)) 3516 ath_tx_processq(sc, &sc->sc_txq[i]); 3517 3518 ifp->if_flags &= ~IFF_OACTIVE; 3519 sc->sc_tx_timer = 0; 3520 3521 ath_start(ifp); 3522} 3523 3524static void 3525ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 3526{ 3527 struct ath_hal *ah = sc->sc_ah; 3528 struct ieee80211_node *ni; 3529 struct ath_buf *bf; 3530 3531 /* 3532 * NB: this assumes output has been stopped and 3533 * we do not need to block ath_tx_tasklet 3534 */ 3535 for (;;) { 3536 ATH_TXQ_LOCK(txq); 3537 bf = STAILQ_FIRST(&txq->axq_q); 3538 if (bf == NULL) { 3539 txq->axq_link = NULL; 3540 ATH_TXQ_UNLOCK(txq); 3541 break; 3542 } 3543 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 3544 ATH_TXQ_UNLOCK(txq); 3545#ifdef AR_DEBUG 3546 if (sc->sc_debug & ATH_DEBUG_RESET) 3547 ath_printtxbuf(bf, 3548 ath_hal_txprocdesc(ah, bf->bf_desc) == HAL_OK); 3549#endif /* AR_DEBUG */ 3550 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3551 m_freem(bf->bf_m); 3552 bf->bf_m = NULL; 3553 ni = bf->bf_node; 3554 bf->bf_node = NULL; 3555 if (ni != NULL) { 3556 /* 3557 * Reclaim node reference. 3558 */ 3559 ieee80211_free_node(ni); 3560 } 3561 ATH_TXBUF_LOCK(sc); 3562 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3563 ATH_TXBUF_UNLOCK(sc); 3564 } 3565} 3566 3567static void 3568ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 3569{ 3570 struct ath_hal *ah = sc->sc_ah; 3571 3572 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 3573 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 3574 __func__, txq->axq_qnum, 3575 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 3576 txq->axq_link); 3577} 3578 3579/* 3580 * Drain the transmit queues and reclaim resources. 3581 */ 3582static void 3583ath_draintxq(struct ath_softc *sc) 3584{ 3585 struct ath_hal *ah = sc->sc_ah; 3586 struct ifnet *ifp = &sc->sc_if; 3587 int i; 3588 3589 /* XXX return value */ 3590 if (!sc->sc_invalid) { 3591 /* don't touch the hardware if marked invalid */ 3592 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 3593 DPRINTF(sc, ATH_DEBUG_RESET, 3594 "%s: beacon queue %p\n", __func__, 3595 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq)); 3596 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3597 if (ATH_TXQ_SETUP(sc, i)) 3598 ath_tx_stopdma(sc, &sc->sc_txq[i]); 3599 } 3600 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3601 if (ATH_TXQ_SETUP(sc, i)) 3602 ath_tx_draintxq(sc, &sc->sc_txq[i]); 3603 ifp->if_flags &= ~IFF_OACTIVE; 3604 sc->sc_tx_timer = 0; 3605} 3606 3607/* 3608 * Disable the receive h/w in preparation for a reset. 3609 */ 3610static void 3611ath_stoprecv(struct ath_softc *sc) 3612{ 3613#define PA2DESC(_sc, _pa) \ 3614 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 3615 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 3616 struct ath_hal *ah = sc->sc_ah; 3617 3618 ath_hal_stoppcurecv(ah); /* disable PCU */ 3619 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 3620 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 3621 DELAY(3000); /* 3ms is long enough for 1 frame */ 3622#ifdef AR_DEBUG 3623 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 3624 struct ath_buf *bf; 3625 3626 printf("%s: rx queue %p, link %p\n", __func__, 3627 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 3628 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 3629 struct ath_desc *ds = bf->bf_desc; 3630 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 3631 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 3632 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 3633 ath_printrxbuf(bf, status == HAL_OK); 3634 } 3635 } 3636#endif 3637 sc->sc_rxlink = NULL; /* just in case */ 3638#undef PA2DESC 3639} 3640 3641/* 3642 * Enable the receive h/w following a reset. 3643 */ 3644static int 3645ath_startrecv(struct ath_softc *sc) 3646{ 3647 struct ath_hal *ah = sc->sc_ah; 3648 struct ath_buf *bf; 3649 3650 sc->sc_rxlink = NULL; 3651 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 3652 int error = ath_rxbuf_init(sc, bf); 3653 if (error != 0) { 3654 DPRINTF(sc, ATH_DEBUG_RECV, 3655 "%s: ath_rxbuf_init failed %d\n", 3656 __func__, error); 3657 return error; 3658 } 3659 } 3660 3661 bf = STAILQ_FIRST(&sc->sc_rxbuf); 3662 ath_hal_putrxbuf(ah, bf->bf_daddr); 3663 ath_hal_rxena(ah); /* enable recv descriptors */ 3664 ath_mode_init(sc); /* set filters, etc. */ 3665 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 3666 return 0; 3667} 3668 3669/* 3670 * Update internal state after a channel change. 3671 */ 3672static void 3673ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 3674{ 3675 struct ieee80211com *ic = &sc->sc_ic; 3676 enum ieee80211_phymode mode; 3677 u_int16_t flags; 3678 3679 /* 3680 * Change channels and update the h/w rate map 3681 * if we're switching; e.g. 11a to 11b/g. 3682 */ 3683 mode = ieee80211_chan2mode(ic, chan); 3684 if (mode != sc->sc_curmode) 3685 ath_setcurmode(sc, mode); 3686 /* 3687 * Update BPF state. NB: ethereal et. al. don't handle 3688 * merged flags well so pick a unique mode for their use. 3689 */ 3690 if (IEEE80211_IS_CHAN_A(chan)) 3691 flags = IEEE80211_CHAN_A; 3692 /* XXX 11g schizophrenia */ 3693 else if (IEEE80211_IS_CHAN_G(chan) || 3694 IEEE80211_IS_CHAN_PUREG(chan)) 3695 flags = IEEE80211_CHAN_G; 3696 else 3697 flags = IEEE80211_CHAN_B; 3698 if (IEEE80211_IS_CHAN_T(chan)) 3699 flags |= IEEE80211_CHAN_TURBO; 3700 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = 3701 htole16(chan->ic_freq); 3702 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = 3703 htole16(flags); 3704} 3705 3706/* 3707 * Set/change channels. If the channel is really being changed, 3708 * it's done by reseting the chip. To accomplish this we must 3709 * first cleanup any pending DMA, then restart stuff after a la 3710 * ath_init. 3711 */ 3712static int 3713ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 3714{ 3715 struct ath_hal *ah = sc->sc_ah; 3716 struct ieee80211com *ic = &sc->sc_ic; 3717 HAL_CHANNEL hchan; 3718 3719 /* 3720 * Convert to a HAL channel description with 3721 * the flags constrained to reflect the current 3722 * operating mode. 3723 */ 3724 hchan.channel = chan->ic_freq; 3725 hchan.channelFlags = ath_chan2flags(ic, chan); 3726 3727 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz) -> %u (%u MHz)\n", 3728 __func__, 3729 ath_hal_mhz2ieee(sc->sc_curchan.channel, 3730 sc->sc_curchan.channelFlags), 3731 sc->sc_curchan.channel, 3732 ath_hal_mhz2ieee(hchan.channel, hchan.channelFlags), hchan.channel); 3733 if (hchan.channel != sc->sc_curchan.channel || 3734 hchan.channelFlags != sc->sc_curchan.channelFlags) { 3735 HAL_STATUS status; 3736 3737 /* 3738 * To switch channels clear any pending DMA operations; 3739 * wait long enough for the RX fifo to drain, reset the 3740 * hardware at the new frequency, and then re-enable 3741 * the relevant bits of the h/w. 3742 */ 3743 ath_hal_intrset(ah, 0); /* disable interrupts */ 3744 ath_draintxq(sc); /* clear pending tx frames */ 3745 ath_stoprecv(sc); /* turn off frame recv */ 3746 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 3747 if_printf(ic->ic_ifp, "ath_chan_set: unable to reset " 3748 "channel %u (%u Mhz)\n", 3749 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 3750 return EIO; 3751 } 3752 sc->sc_curchan = hchan; 3753 ath_update_txpow(sc); /* update tx power state */ 3754 3755 /* 3756 * Re-enable rx framework. 3757 */ 3758 if (ath_startrecv(sc) != 0) { 3759 if_printf(ic->ic_ifp, 3760 "ath_chan_set: unable to restart recv logic\n"); 3761 return EIO; 3762 } 3763 3764 /* 3765 * Change channels and update the h/w rate map 3766 * if we're switching; e.g. 11a to 11b/g. 3767 */ 3768 ic->ic_ibss_chan = chan; 3769 ath_chan_change(sc, chan); 3770 3771 /* 3772 * Re-enable interrupts. 3773 */ 3774 ath_hal_intrset(ah, sc->sc_imask); 3775 } 3776 return 0; 3777} 3778 3779static void 3780ath_next_scan(void *arg) 3781{ 3782 struct ath_softc *sc = arg; 3783 struct ieee80211com *ic = &sc->sc_ic; 3784 3785 if (ic->ic_state == IEEE80211_S_SCAN) 3786 ieee80211_next_scan(ic); 3787} 3788 3789/* 3790 * Periodically recalibrate the PHY to account 3791 * for temperature/environment changes. 3792 */ 3793static void 3794ath_calibrate(void *arg) 3795{ 3796 struct ath_softc *sc = arg; 3797 struct ath_hal *ah = sc->sc_ah; 3798 3799 sc->sc_stats.ast_per_cal++; 3800 3801 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: channel %u/%x\n", 3802 __func__, sc->sc_curchan.channel, sc->sc_curchan.channelFlags); 3803 3804 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 3805 /* 3806 * Rfgain is out of bounds, reset the chip 3807 * to load new gain values. 3808 */ 3809 sc->sc_stats.ast_per_rfgain++; 3810 ath_reset(&sc->sc_if); 3811 } 3812 if (!ath_hal_calibrate(ah, &sc->sc_curchan)) { 3813 DPRINTF(sc, ATH_DEBUG_ANY, 3814 "%s: calibration of channel %u failed\n", 3815 __func__, sc->sc_curchan.channel); 3816 sc->sc_stats.ast_per_calfail++; 3817 } 3818 callout_reset(&sc->sc_cal_ch, ath_calinterval * hz, ath_calibrate, sc); 3819} 3820 3821static int 3822ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 3823{ 3824 struct ifnet *ifp = ic->ic_ifp; 3825 struct ath_softc *sc = ifp->if_softc; 3826 struct ath_hal *ah = sc->sc_ah; 3827 struct ieee80211_node *ni; 3828 int i, error; 3829 const u_int8_t *bssid; 3830 u_int32_t rfilt; 3831 static const HAL_LED_STATE leds[] = { 3832 HAL_LED_INIT, /* IEEE80211_S_INIT */ 3833 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 3834 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 3835 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 3836 HAL_LED_RUN, /* IEEE80211_S_RUN */ 3837 }; 3838 3839 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 3840 ieee80211_state_name[ic->ic_state], 3841 ieee80211_state_name[nstate]); 3842 3843 callout_stop(&sc->sc_scan_ch); 3844 callout_stop(&sc->sc_cal_ch); 3845 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 3846 3847 if (nstate == IEEE80211_S_INIT) { 3848 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 3849 ath_hal_intrset(ah, sc->sc_imask); 3850 /* 3851 * Notify the rate control algorithm. 3852 */ 3853 ath_rate_newstate(sc, nstate); 3854 goto done; 3855 } 3856 ni = ic->ic_bss; 3857 error = ath_chan_set(sc, ni->ni_chan); 3858 if (error != 0) 3859 goto bad; 3860 rfilt = ath_calcrxfilter(sc, nstate); 3861 if (nstate == IEEE80211_S_SCAN) 3862 bssid = ifp->if_broadcastaddr; 3863 else 3864 bssid = ni->ni_bssid; 3865 ath_hal_setrxfilter(ah, rfilt); 3866 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s\n", 3867 __func__, rfilt, ether_sprintf(bssid)); 3868 3869 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) 3870 ath_hal_setassocid(ah, bssid, ni->ni_associd); 3871 else 3872 ath_hal_setassocid(ah, bssid, 0); 3873 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 3874 for (i = 0; i < IEEE80211_WEP_NKID; i++) 3875 if (ath_hal_keyisvalid(ah, i)) 3876 ath_hal_keysetmac(ah, i, bssid); 3877 } 3878 3879 /* 3880 * Notify the rate control algorithm so rates 3881 * are setup should ath_beacon_alloc be called. 3882 */ 3883 ath_rate_newstate(sc, nstate); 3884 3885 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 3886 /* nothing to do */; 3887 } else if (nstate == IEEE80211_S_RUN) { 3888 DPRINTF(sc, ATH_DEBUG_STATE, 3889 "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s " 3890 "capinfo=0x%04x chan=%d\n" 3891 , __func__ 3892 , ic->ic_flags 3893 , ni->ni_intval 3894 , ether_sprintf(ni->ni_bssid) 3895 , ni->ni_capinfo 3896 , ieee80211_chan2ieee(ic, ni->ni_chan)); 3897 3898 /* 3899 * Allocate and setup the beacon frame for AP or adhoc mode. 3900 */ 3901 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3902 ic->ic_opmode == IEEE80211_M_IBSS) { 3903 error = ath_beacon_alloc(sc, ni); 3904 if (error != 0) 3905 goto bad; 3906 } 3907 3908 /* 3909 * Configure the beacon and sleep timers. 3910 */ 3911 ath_beacon_config(sc); 3912 } else { 3913 ath_hal_intrset(ah, 3914 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 3915 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 3916 } 3917done: 3918 /* 3919 * Invoke the parent method to complete the work. 3920 */ 3921 error = sc->sc_newstate(ic, nstate, arg); 3922 /* 3923 * Finally, start any timers. 3924 */ 3925 if (nstate == IEEE80211_S_RUN) { 3926 /* start periodic recalibration timer */ 3927 callout_reset(&sc->sc_cal_ch, ath_calinterval * hz, 3928 ath_calibrate, sc); 3929 } else if (nstate == IEEE80211_S_SCAN) { 3930 /* start ap/neighbor scan timer */ 3931 callout_reset(&sc->sc_scan_ch, (ath_dwelltime * hz) / 1000, 3932 ath_next_scan, sc); 3933 } 3934bad: 3935 return error; 3936} 3937 3938/* 3939 * Setup driver-specific state for a newly associated node. 3940 * Note that we're called also on a re-associate, the isnew 3941 * param tells us if this is the first time or not. 3942 */ 3943static void 3944ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 3945{ 3946 struct ath_softc *sc = ic->ic_ifp->if_softc; 3947 3948 ath_rate_newassoc(sc, ATH_NODE(ni), isnew); 3949} 3950 3951static int 3952ath_getchannels(struct ath_softc *sc, u_int cc, 3953 HAL_BOOL outdoor, HAL_BOOL xchanmode) 3954{ 3955 struct ieee80211com *ic = &sc->sc_ic; 3956 struct ifnet *ifp = &sc->sc_if; 3957 struct ath_hal *ah = sc->sc_ah; 3958 HAL_CHANNEL *chans; 3959 int i, ix, nchan; 3960 3961 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 3962 M_TEMP, M_NOWAIT); 3963 if (chans == NULL) { 3964 if_printf(ifp, "unable to allocate channel table\n"); 3965 return ENOMEM; 3966 } 3967 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 3968 cc, HAL_MODE_ALL, outdoor, xchanmode)) { 3969 u_int32_t rd; 3970 3971 ath_hal_getregdomain(ah, &rd); 3972 if_printf(ifp, "unable to collect channel list from hal; " 3973 "regdomain likely %u country code %u\n", rd, cc); 3974 free(chans, M_TEMP); 3975 return EINVAL; 3976 } 3977 3978 /* 3979 * Convert HAL channels to ieee80211 ones and insert 3980 * them in the table according to their channel number. 3981 */ 3982 for (i = 0; i < nchan; i++) { 3983 HAL_CHANNEL *c = &chans[i]; 3984 ix = ath_hal_mhz2ieee(c->channel, c->channelFlags); 3985 if (ix > IEEE80211_CHAN_MAX) { 3986 if_printf(ifp, "bad hal channel %u (%u/%x) ignored\n", 3987 ix, c->channel, c->channelFlags); 3988 continue; 3989 } 3990 /* NB: flags are known to be compatible */ 3991 if (ic->ic_channels[ix].ic_freq == 0) { 3992 ic->ic_channels[ix].ic_freq = c->channel; 3993 ic->ic_channels[ix].ic_flags = c->channelFlags; 3994 } else { 3995 /* channels overlap; e.g. 11g and 11b */ 3996 ic->ic_channels[ix].ic_flags |= c->channelFlags; 3997 } 3998 } 3999 free(chans, M_TEMP); 4000 return 0; 4001} 4002 4003static void 4004ath_update_led(struct ath_softc *sc) 4005{ 4006 struct ieee80211com *ic = &sc->sc_ic; 4007 struct ath_hal *ah = sc->sc_ah; 4008 u_int32_t threshold; 4009 4010 /* 4011 * When not associated, flash LED on for 5s, off for 200ms. 4012 * XXX this assumes 100ms beacon interval. 4013 */ 4014 if (ic->ic_state != IEEE80211_S_RUN) { 4015 threshold = 2 + sc->sc_ledstate * 48; 4016 } else { 4017 threshold = 2 + sc->sc_ledstate * 18; 4018 } 4019 if (ic->ic_stats.is_rx_beacon - sc->sc_beacons >= threshold) { 4020 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 4021 ath_hal_gpioset(ah, sc->sc_ledpin, sc->sc_ledstate); 4022 sc->sc_ledstate ^= 1; 4023 sc->sc_beacons = ic->ic_stats.is_rx_beacon; 4024 } 4025} 4026 4027static void 4028ath_update_txpow(struct ath_softc *sc) 4029{ 4030 struct ieee80211com *ic = &sc->sc_ic; 4031 struct ath_hal *ah = sc->sc_ah; 4032 u_int32_t txpow; 4033 4034 if (sc->sc_curtxpow != ic->ic_txpowlimit) { 4035 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 4036 /* read back in case value is clamped */ 4037 ath_hal_gettxpowlimit(ah, &txpow); 4038 ic->ic_txpowlimit = sc->sc_curtxpow = txpow; 4039 } 4040 /* 4041 * Fetch max tx power level for status requests. 4042 */ 4043 ath_hal_getmaxtxpow(sc->sc_ah, &txpow); 4044 ic->ic_bss->ni_txpower = txpow; 4045} 4046 4047static int 4048ath_rate_setup(struct ath_softc *sc, u_int mode) 4049{ 4050 struct ath_hal *ah = sc->sc_ah; 4051 struct ieee80211com *ic = &sc->sc_ic; 4052 const HAL_RATE_TABLE *rt; 4053 struct ieee80211_rateset *rs; 4054 int i, maxrates; 4055 4056 switch (mode) { 4057 case IEEE80211_MODE_11A: 4058 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11A); 4059 break; 4060 case IEEE80211_MODE_11B: 4061 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11B); 4062 break; 4063 case IEEE80211_MODE_11G: 4064 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11G); 4065 break; 4066 case IEEE80211_MODE_TURBO_A: 4067 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4068 break; 4069 case IEEE80211_MODE_TURBO_G: 4070 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_108G); 4071 break; 4072 default: 4073 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4074 __func__, mode); 4075 return 0; 4076 } 4077 rt = sc->sc_rates[mode]; 4078 if (rt == NULL) 4079 return 0; 4080 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 4081 DPRINTF(sc, ATH_DEBUG_ANY, 4082 "%s: rate table too small (%u > %u)\n", 4083 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); 4084 maxrates = IEEE80211_RATE_MAXSIZE; 4085 } else 4086 maxrates = rt->rateCount; 4087 rs = &ic->ic_sup_rates[mode]; 4088 for (i = 0; i < maxrates; i++) 4089 rs->rs_rates[i] = rt->info[i].dot11Rate; 4090 rs->rs_nrates = maxrates; 4091 return 1; 4092} 4093 4094static void 4095ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4096{ 4097 const HAL_RATE_TABLE *rt; 4098 int i; 4099 4100 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 4101 rt = sc->sc_rates[mode]; 4102 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 4103 for (i = 0; i < rt->rateCount; i++) 4104 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 4105 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 4106 memset(sc->sc_hwflags, 0, sizeof(sc->sc_hwflags)); 4107 for (i = 0; i < 32; i++) { 4108 u_int8_t ix = rt->rateCodeToIndex[i]; 4109 if (ix == 0xff) 4110 continue; 4111 sc->sc_hwmap[i] = rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; 4112 if (rt->info[ix].shortPreamble || 4113 rt->info[ix].phy == IEEE80211_T_OFDM) 4114 sc->sc_hwflags[i] |= IEEE80211_RADIOTAP_F_SHORTPRE; 4115 } 4116 sc->sc_currates = rt; 4117 sc->sc_curmode = mode; 4118 /* 4119 * All protection frames are transmited at 2Mb/s for 4120 * 11g, otherwise at 1Mb/s. 4121 * XXX select protection rate index from rate table. 4122 */ 4123 sc->sc_protrix = (mode == IEEE80211_MODE_11G ? 1 : 0); 4124 /* NB: caller is responsible for reseting rate control state */ 4125} 4126 4127#ifdef AR_DEBUG 4128static void 4129ath_printrxbuf(struct ath_buf *bf, int done) 4130{ 4131 struct ath_desc *ds; 4132 int i; 4133 4134 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 4135 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 4136 i, ds, (struct ath_desc *)bf->bf_daddr + i, 4137 ds->ds_link, ds->ds_data, 4138 ds->ds_ctl0, ds->ds_ctl1, 4139 ds->ds_hw[0], ds->ds_hw[1], 4140 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 4141 } 4142} 4143 4144static void 4145ath_printtxbuf(struct ath_buf *bf, int done) 4146{ 4147 struct ath_desc *ds; 4148 int i; 4149 4150 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 4151 printf("T%d (%p %p) %08x %08x %08x %08x %08x %08x %08x %08x %c\n", 4152 i, ds, (struct ath_desc *)bf->bf_daddr + i, 4153 ds->ds_link, ds->ds_data, 4154 ds->ds_ctl0, ds->ds_ctl1, 4155 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 4156 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 4157 } 4158} 4159#endif /* AR_DEBUG */ 4160 4161static void 4162ath_watchdog(struct ifnet *ifp) 4163{ 4164 struct ath_softc *sc = ifp->if_softc; 4165 struct ieee80211com *ic = &sc->sc_ic; 4166 4167 ifp->if_timer = 0; 4168 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 4169 return; 4170 if (sc->sc_tx_timer) { 4171 if (--sc->sc_tx_timer == 0) { 4172 if_printf(ifp, "device timeout\n"); 4173 ath_reset(ifp); 4174 ifp->if_oerrors++; 4175 sc->sc_stats.ast_watchdog++; 4176 } else 4177 ifp->if_timer = 1; 4178 } 4179 ieee80211_watchdog(ic); 4180} 4181 4182/* 4183 * Diagnostic interface to the HAL. This is used by various 4184 * tools to do things like retrieve register contents for 4185 * debugging. The mechanism is intentionally opaque so that 4186 * it can change frequently w/o concern for compatiblity. 4187 */ 4188static int 4189ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 4190{ 4191 struct ath_hal *ah = sc->sc_ah; 4192 u_int id = ad->ad_id & ATH_DIAG_ID; 4193 void *indata = NULL; 4194 void *outdata = NULL; 4195 u_int32_t insize = ad->ad_in_size; 4196 u_int32_t outsize = ad->ad_out_size; 4197 int error = 0; 4198 4199 if (ad->ad_id & ATH_DIAG_IN) { 4200 /* 4201 * Copy in data. 4202 */ 4203 indata = malloc(insize, M_TEMP, M_NOWAIT); 4204 if (indata == NULL) { 4205 error = ENOMEM; 4206 goto bad; 4207 } 4208 error = copyin(ad->ad_in_data, indata, insize); 4209 if (error) 4210 goto bad; 4211 } 4212 if (ad->ad_id & ATH_DIAG_DYN) { 4213 /* 4214 * Allocate a buffer for the results (otherwise the HAL 4215 * returns a pointer to a buffer where we can read the 4216 * results). Note that we depend on the HAL leaving this 4217 * pointer for us to use below in reclaiming the buffer; 4218 * may want to be more defensive. 4219 */ 4220 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 4221 if (outdata == NULL) { 4222 error = ENOMEM; 4223 goto bad; 4224 } 4225 } 4226 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 4227 if (outsize < ad->ad_out_size) 4228 ad->ad_out_size = outsize; 4229 if (outdata != NULL) 4230 error = copyout(outdata, ad->ad_out_data, 4231 ad->ad_out_size); 4232 } else { 4233 error = EINVAL; 4234 } 4235bad: 4236 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 4237 free(indata, M_TEMP); 4238 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 4239 free(outdata, M_TEMP); 4240 return error; 4241} 4242 4243static int 4244ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4245{ 4246#define IS_RUNNING(ifp) \ 4247 ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP)) 4248 struct ath_softc *sc = ifp->if_softc; 4249 struct ieee80211com *ic = &sc->sc_ic; 4250 struct ifreq *ifr = (struct ifreq *)data; 4251 int error = 0; 4252 4253 ATH_LOCK(sc); 4254 switch (cmd) { 4255 case SIOCSIFFLAGS: 4256 if (IS_RUNNING(ifp)) { 4257 /* 4258 * To avoid rescanning another access point, 4259 * do not call ath_init() here. Instead, 4260 * only reflect promisc mode settings. 4261 */ 4262 ath_mode_init(sc); 4263 } else if (ifp->if_flags & IFF_UP) { 4264 /* 4265 * Beware of being called during attach/detach 4266 * to reset promiscuous mode. In that case we 4267 * will still be marked UP but not RUNNING. 4268 * However trying to re-init the interface 4269 * is the wrong thing to do as we've already 4270 * torn down much of our state. There's 4271 * probably a better way to deal with this. 4272 */ 4273 if (!sc->sc_invalid && ic->ic_bss != NULL) 4274 ath_init(ifp); /* XXX lose error */ 4275 } else 4276 ath_stop_locked(ifp); 4277 break; 4278 case SIOCADDMULTI: 4279 case SIOCDELMULTI: 4280 /* 4281 * The upper layer has already installed/removed 4282 * the multicast address(es), just recalculate the 4283 * multicast filter for the card. 4284 */ 4285 if (ifp->if_flags & IFF_RUNNING) 4286 ath_mode_init(sc); 4287 break; 4288 case SIOCGATHSTATS: 4289 /* NB: embed these numbers to get a consistent view */ 4290 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 4291 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 4292 sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic); 4293 ATH_UNLOCK(sc); 4294 /* 4295 * NB: Drop the softc lock in case of a page fault; 4296 * we'll accept any potential inconsisentcy in the 4297 * statistics. The alternative is to copy the data 4298 * to a local structure. 4299 */ 4300 return copyout(&sc->sc_stats, 4301 ifr->ifr_data, sizeof (sc->sc_stats)); 4302 case SIOCGATHDIAG: 4303 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 4304 break; 4305 default: 4306 error = ieee80211_ioctl(ic, cmd, data); 4307 if (error == ENETRESET) { 4308 if (IS_RUNNING(ifp) && 4309 ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 4310 ath_init(ifp); /* XXX lose error */ 4311 error = 0; 4312 } 4313 if (error == ERESTART) 4314 error = IS_RUNNING(ifp) ? ath_reset(ifp) : 0; 4315 break; 4316 } 4317 ATH_UNLOCK(sc); 4318 return error; 4319#undef IS_RUNNING 4320} 4321 4322static int 4323ath_sysctl_slottime(SYSCTL_HANDLER_ARGS) 4324{ 4325 struct ath_softc *sc = arg1; 4326 u_int slottime = ath_hal_getslottime(sc->sc_ah); 4327 int error; 4328 4329 error = sysctl_handle_int(oidp, &slottime, 0, req); 4330 if (error || !req->newptr) 4331 return error; 4332 return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0; 4333} 4334 4335static int 4336ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS) 4337{ 4338 struct ath_softc *sc = arg1; 4339 u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah); 4340 int error; 4341 4342 error = sysctl_handle_int(oidp, &acktimeout, 0, req); 4343 if (error || !req->newptr) 4344 return error; 4345 return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0; 4346} 4347 4348static int 4349ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS) 4350{ 4351 struct ath_softc *sc = arg1; 4352 u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah); 4353 int error; 4354 4355 error = sysctl_handle_int(oidp, &ctstimeout, 0, req); 4356 if (error || !req->newptr) 4357 return error; 4358 return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0; 4359} 4360 4361static int 4362ath_sysctl_softled(SYSCTL_HANDLER_ARGS) 4363{ 4364 struct ath_softc *sc = arg1; 4365 int softled = sc->sc_softled; 4366 int error; 4367 4368 error = sysctl_handle_int(oidp, &softled, 0, req); 4369 if (error || !req->newptr) 4370 return error; 4371 if (softled > 1) 4372 softled = 1; 4373 if (softled != sc->sc_softled) { 4374 if (softled) 4375 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); 4376 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !softled); 4377 sc->sc_softled = softled; 4378 } 4379 return 0; 4380} 4381 4382static int 4383ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS) 4384{ 4385 struct ath_softc *sc = arg1; 4386 u_int defantenna = ath_hal_getdefantenna(sc->sc_ah); 4387 int error; 4388 4389 error = sysctl_handle_int(oidp, &defantenna, 0, req); 4390 if (!error && req->newptr) 4391 ath_hal_setdefantenna(sc->sc_ah, defantenna); 4392 return error; 4393} 4394 4395static int 4396ath_sysctl_diversity(SYSCTL_HANDLER_ARGS) 4397{ 4398 struct ath_softc *sc = arg1; 4399 u_int diversity = sc->sc_diversity; 4400 int error; 4401 4402 error = sysctl_handle_int(oidp, &diversity, 0, req); 4403 if (error || !req->newptr) 4404 return error; 4405 sc->sc_diversity = diversity; 4406 return !ath_hal_setdiversity(sc->sc_ah, diversity) ? EINVAL : 0; 4407} 4408 4409static int 4410ath_sysctl_diag(SYSCTL_HANDLER_ARGS) 4411{ 4412 struct ath_softc *sc = arg1; 4413 u_int32_t diag; 4414 int error; 4415 4416 if (!ath_hal_getdiag(sc->sc_ah, &diag)) 4417 return EINVAL; 4418 error = sysctl_handle_int(oidp, &diag, 0, req); 4419 if (error || !req->newptr) 4420 return error; 4421 return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0; 4422} 4423 4424static int 4425ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS) 4426{ 4427 struct ath_softc *sc = arg1; 4428 struct ifnet *ifp = &sc->sc_if; 4429 u_int32_t scale; 4430 int error; 4431 4432 ath_hal_gettpscale(sc->sc_ah, &scale); 4433 error = sysctl_handle_int(oidp, &scale, 0, req); 4434 if (error || !req->newptr) 4435 return error; 4436 return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL : ath_reset(ifp); 4437} 4438 4439static int 4440ath_sysctl_tpc(SYSCTL_HANDLER_ARGS) 4441{ 4442 struct ath_softc *sc = arg1; 4443 u_int tpc = ath_hal_gettpc(sc->sc_ah); 4444 int error; 4445 4446 error = sysctl_handle_int(oidp, &tpc, 0, req); 4447 if (error || !req->newptr) 4448 return error; 4449 return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0; 4450} 4451 4452static void 4453ath_sysctlattach(struct ath_softc *sc) 4454{ 4455 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 4456 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 4457 4458 ath_hal_getcountrycode(sc->sc_ah, &sc->sc_countrycode); 4459 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4460 "countrycode", CTLFLAG_RD, &sc->sc_countrycode, 0, 4461 "EEPROM country code"); 4462 ath_hal_getregdomain(sc->sc_ah, &sc->sc_regdomain); 4463 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4464 "regdomain", CTLFLAG_RD, &sc->sc_regdomain, 0, 4465 "EEPROM regdomain code"); 4466 sc->sc_debug = ath_debug; 4467 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4468 "debug", CTLFLAG_RW, &sc->sc_debug, 0, 4469 "control debugging printfs"); 4470 4471 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4472 "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4473 ath_sysctl_slottime, "I", "802.11 slot time (us)"); 4474 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4475 "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4476 ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)"); 4477 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4478 "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4479 ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)"); 4480 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4481 "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4482 ath_sysctl_softled, "I", "enable/disable software LED support"); 4483 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4484 "ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0, 4485 "GPIO pin connected to LED"); 4486 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4487 "txantenna", CTLFLAG_RW, &sc->sc_txantenna, 0, 4488 "tx antenna (0=auto)"); 4489 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4490 "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4491 ath_sysctl_rxantenna, "I", "default/rx antenna"); 4492 if (sc->sc_hasdiversity) 4493 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4494 "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4495 ath_sysctl_diversity, "I", "antenna diversity"); 4496 sc->sc_txintrperiod = ATH_TXINTR_PERIOD; 4497 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4498 "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0, 4499 "tx descriptor batching"); 4500 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4501 "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4502 ath_sysctl_diag, "I", "h/w diagnostic control"); 4503 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4504 "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4505 ath_sysctl_tpscale, "I", "tx power scaling"); 4506 if (sc->sc_hastpc) 4507 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4508 "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4509 ath_sysctl_tpc, "I", "enable/disable per-packet TPC"); 4510} 4511 4512static void 4513ath_bpfattach(struct ath_softc *sc) 4514{ 4515 struct ifnet *ifp = &sc->sc_if; 4516 4517 bpfattach2(ifp, DLT_IEEE802_11_RADIO, 4518 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th), 4519 &sc->sc_drvbpf); 4520 /* 4521 * Initialize constant fields. 4522 * XXX make header lengths a multiple of 32-bits so subsequent 4523 * headers are properly aligned; this is a kludge to keep 4524 * certain applications happy. 4525 * 4526 * NB: the channel is setup each time we transition to the 4527 * RUN state to avoid filling it in for each frame. 4528 */ 4529 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); 4530 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); 4531 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 4532 4533 sc->sc_rx_rt_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); 4534 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_rt_len); 4535 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 4536} 4537 4538/* 4539 * Announce various information on device/driver attach. 4540 */ 4541static void 4542ath_announce(struct ath_softc *sc) 4543{ 4544#define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) 4545 struct ifnet *ifp = &sc->sc_if; 4546 struct ath_hal *ah = sc->sc_ah; 4547 u_int modes, cc; 4548 4549 if_printf(ifp, "mac %d.%d phy %d.%d", 4550 ah->ah_macVersion, ah->ah_macRev, 4551 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 4552 /* 4553 * Print radio revision(s). We check the wireless modes 4554 * to avoid falsely printing revs for inoperable parts. 4555 * Dual-band radio revs are returned in the 5Ghz rev number. 4556 */ 4557 ath_hal_getcountrycode(ah, &cc); 4558 modes = ath_hal_getwirelessmodes(ah, cc); 4559 if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { 4560 if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) 4561 printf(" 5ghz radio %d.%d 2ghz radio %d.%d", 4562 ah->ah_analog5GhzRev >> 4, 4563 ah->ah_analog5GhzRev & 0xf, 4564 ah->ah_analog2GhzRev >> 4, 4565 ah->ah_analog2GhzRev & 0xf); 4566 else 4567 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 4568 ah->ah_analog5GhzRev & 0xf); 4569 } else 4570 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 4571 ah->ah_analog5GhzRev & 0xf); 4572 printf("\n"); 4573 if (bootverbose) { 4574 int i; 4575 for (i = 0; i <= WME_AC_VO; i++) { 4576 struct ath_txq *txq = sc->sc_ac2q[i]; 4577 if_printf(ifp, "Use hw queue %u for %s traffic\n", 4578 txq->axq_qnum, ieee80211_wme_acnames[i]); 4579 } 4580 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 4581 sc->sc_cabq->axq_qnum); 4582 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 4583 } 4584#undef HAL_MODE_DUALBAND 4585} 4586