if_ath.c revision 138878
1/*- 2 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 3. Neither the names of the above-listed copyright holders nor the names 16 * of any contributors may be used to endorse or promote products derived 17 * from this software without specific prior written permission. 18 * 19 * Alternatively, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") version 2 as published by the Free 21 * Software Foundation. 22 * 23 * NO WARRANTY 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 27 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 28 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 29 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 32 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 34 * THE POSSIBILITY OF SUCH DAMAGES. 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 138878 2004-12-15 02:25:21Z peter $"); 39 40/* 41 * Driver for the Atheros Wireless LAN controller. 42 * 43 * This software is derived from work of Atsushi Onoe; his contribution 44 * is greatly appreciated. 45 */ 46 47#include "opt_inet.h" 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/sysctl.h> 52#include <sys/mbuf.h> 53#include <sys/malloc.h> 54#include <sys/lock.h> 55#include <sys/mutex.h> 56#include <sys/kernel.h> 57#include <sys/socket.h> 58#include <sys/sockio.h> 59#include <sys/errno.h> 60#include <sys/callout.h> 61#include <sys/bus.h> 62#include <sys/endian.h> 63 64#include <machine/bus.h> 65 66#include <net/if.h> 67#include <net/if_dl.h> 68#include <net/if_media.h> 69#include <net/if_arp.h> 70#include <net/ethernet.h> 71#include <net/if_llc.h> 72 73#include <net80211/ieee80211_var.h> 74 75#include <net/bpf.h> 76 77#ifdef INET 78#include <netinet/in.h> 79#include <netinet/if_ether.h> 80#endif 81 82#define AR_DEBUG 83#include <dev/ath/if_athvar.h> 84#include <contrib/dev/ath/ah_desc.h> 85#include <contrib/dev/ath/ah_devid.h> /* XXX for softled */ 86 87/* unalligned little endian access */ 88#define LE_READ_2(p) \ 89 ((u_int16_t) \ 90 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) 91#define LE_READ_4(p) \ 92 ((u_int32_t) \ 93 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ 94 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) 95 96static void ath_init(void *); 97static void ath_stop_locked(struct ifnet *); 98static void ath_stop(struct ifnet *); 99static void ath_start(struct ifnet *); 100static int ath_reset(struct ifnet *); 101static int ath_media_change(struct ifnet *); 102static void ath_watchdog(struct ifnet *); 103static int ath_ioctl(struct ifnet *, u_long, caddr_t); 104static void ath_fatal_proc(void *, int); 105static void ath_rxorn_proc(void *, int); 106static void ath_bmiss_proc(void *, int); 107static void ath_initkeytable(struct ath_softc *); 108static int ath_key_alloc(struct ieee80211com *, 109 const struct ieee80211_key *); 110static int ath_key_delete(struct ieee80211com *, 111 const struct ieee80211_key *); 112static int ath_key_set(struct ieee80211com *, const struct ieee80211_key *, 113 const u_int8_t mac[IEEE80211_ADDR_LEN]); 114static void ath_key_update_begin(struct ieee80211com *); 115static void ath_key_update_end(struct ieee80211com *); 116static void ath_mode_init(struct ath_softc *); 117static void ath_setslottime(struct ath_softc *); 118static void ath_updateslot(struct ifnet *); 119static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 120static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 121static void ath_beacon_proc(void *, int); 122static void ath_bstuck_proc(void *, int); 123static void ath_beacon_free(struct ath_softc *); 124static void ath_beacon_config(struct ath_softc *); 125static void ath_descdma_cleanup(struct ath_softc *sc, 126 struct ath_descdma *, ath_bufhead *); 127static int ath_desc_alloc(struct ath_softc *); 128static void ath_desc_free(struct ath_softc *); 129static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *); 130static void ath_node_free(struct ieee80211_node *); 131static u_int8_t ath_node_getrssi(const struct ieee80211_node *); 132static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 133static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 134 struct ieee80211_node *ni, 135 int subtype, int rssi, u_int32_t rstamp); 136static void ath_setdefantenna(struct ath_softc *, u_int); 137static void ath_rx_proc(void *, int); 138static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 139static int ath_tx_setup(struct ath_softc *, int, int); 140static int ath_wme_update(struct ieee80211com *); 141static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 142static void ath_tx_cleanup(struct ath_softc *); 143static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 144 struct ath_buf *, struct mbuf *); 145static void ath_tx_proc_q0(void *, int); 146static void ath_tx_proc_q0123(void *, int); 147static void ath_tx_proc(void *, int); 148static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 149static void ath_draintxq(struct ath_softc *); 150static void ath_stoprecv(struct ath_softc *); 151static int ath_startrecv(struct ath_softc *); 152static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 153static void ath_next_scan(void *); 154static void ath_calibrate(void *); 155static int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 156static void ath_newassoc(struct ieee80211com *, 157 struct ieee80211_node *, int); 158static int ath_getchannels(struct ath_softc *, u_int cc, 159 HAL_BOOL outdoor, HAL_BOOL xchanmode); 160static void ath_update_led(struct ath_softc *); 161static void ath_update_txpow(struct ath_softc *); 162 163static int ath_rate_setup(struct ath_softc *, u_int mode); 164static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 165 166static void ath_sysctlattach(struct ath_softc *); 167static void ath_bpfattach(struct ath_softc *); 168static void ath_announce(struct ath_softc *); 169 170SYSCTL_DECL(_hw_ath); 171 172/* XXX validate sysctl values */ 173static int ath_dwelltime = 200; /* 5 channels/second */ 174SYSCTL_INT(_hw_ath, OID_AUTO, dwell, CTLFLAG_RW, &ath_dwelltime, 175 0, "channel dwell time (ms) for AP/station scanning"); 176static int ath_calinterval = 30; /* calibrate every 30 secs */ 177SYSCTL_INT(_hw_ath, OID_AUTO, calibrate, CTLFLAG_RW, &ath_calinterval, 178 0, "chip calibration interval (secs)"); 179static int ath_outdoor = AH_TRUE; /* outdoor operation */ 180SYSCTL_INT(_hw_ath, OID_AUTO, outdoor, CTLFLAG_RD, &ath_outdoor, 181 0, "outdoor operation"); 182TUNABLE_INT("hw.ath.outdoor", &ath_outdoor); 183static int ath_xchanmode = AH_TRUE; /* extended channel use */ 184SYSCTL_INT(_hw_ath, OID_AUTO, xchanmode, CTLFLAG_RD, &ath_xchanmode, 185 0, "extended channel mode"); 186TUNABLE_INT("hw.ath.xchanmode", &ath_xchanmode); 187static int ath_countrycode = CTRY_DEFAULT; /* country code */ 188SYSCTL_INT(_hw_ath, OID_AUTO, countrycode, CTLFLAG_RD, &ath_countrycode, 189 0, "country code"); 190TUNABLE_INT("hw.ath.countrycode", &ath_countrycode); 191static int ath_regdomain = 0; /* regulatory domain */ 192SYSCTL_INT(_hw_ath, OID_AUTO, regdomain, CTLFLAG_RD, &ath_regdomain, 193 0, "regulatory domain"); 194 195#ifdef AR_DEBUG 196static int ath_debug = 0; 197SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug, 198 0, "control debugging printfs"); 199TUNABLE_INT("hw.ath.debug", &ath_debug); 200enum { 201 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 202 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 203 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ 204 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 205 ATH_DEBUG_RATE = 0x00000010, /* rate control */ 206 ATH_DEBUG_RESET = 0x00000020, /* reset processing */ 207 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ 208 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ 209 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ 210 ATH_DEBUG_INTR = 0x00001000, /* ISR */ 211 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ 212 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ 213 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ 214 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ 215 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ 216 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ 217 ATH_DEBUG_NODE = 0x00080000, /* node management */ 218 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ 219 ATH_DEBUG_ANY = 0xffffffff 220}; 221#define IFF_DUMPPKTS(sc, m) \ 222 ((sc->sc_debug & m) || \ 223 (sc->sc_if.if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 224#define DPRINTF(sc, m, fmt, ...) do { \ 225 if (sc->sc_debug & m) \ 226 printf(fmt, __VA_ARGS__); \ 227} while (0) 228#define KEYPRINTF(sc, ix, hk, mac) do { \ 229 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ 230 ath_keyprint(__func__, ix, hk, mac); \ 231} while (0) 232static void ath_printrxbuf(struct ath_buf *bf, int); 233static void ath_printtxbuf(struct ath_buf *bf, int); 234#else 235#define IFF_DUMPPKTS(sc, m) \ 236 ((sc->sc_if.if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 237#define DPRINTF(m, fmt, ...) 238#define KEYPRINTF(sc, k, ix, mac) 239#endif 240 241MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 242 243int 244ath_attach(u_int16_t devid, struct ath_softc *sc) 245{ 246 struct ifnet *ifp = &sc->sc_if; 247 struct ieee80211com *ic = &sc->sc_ic; 248 struct ath_hal *ah; 249 HAL_STATUS status; 250 int error = 0, i; 251 252 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 253 254 /* set these up early for if_printf use */ 255 if_initname(ifp, device_get_name(sc->sc_dev), 256 device_get_unit(sc->sc_dev)); 257 258 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); 259 if (ah == NULL) { 260 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 261 status); 262 error = ENXIO; 263 goto bad; 264 } 265 if (ah->ah_abi != HAL_ABI_VERSION) { 266 if_printf(ifp, "HAL ABI mismatch detected " 267 "(HAL:0x%x != driver:0x%x)\n", 268 ah->ah_abi, HAL_ABI_VERSION); 269 error = ENXIO; 270 goto bad; 271 } 272 sc->sc_ah = ah; 273 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 274 275 /* 276 * Check if the MAC has multi-rate retry support. 277 * We do this by trying to setup a fake extended 278 * descriptor. MAC's that don't have support will 279 * return false w/o doing anything. MAC's that do 280 * support it will return true w/o doing anything. 281 */ 282 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 283 284 /* 285 * Check if the device has hardware counters for PHY 286 * errors. If so we need to enable the MIB interrupt 287 * so we can act on stat triggers. 288 */ 289 if (ath_hal_hwphycounters(ah)) 290 sc->sc_needmib = 1; 291 292 /* 293 * Get the hardware key cache size. 294 */ 295 sc->sc_keymax = ath_hal_keycachesize(ah); 296 if (sc->sc_keymax > sizeof(sc->sc_keymap) * NBBY) { 297 if_printf(ifp, 298 "Warning, using only %zu of %u key cache slots\n", 299 sizeof(sc->sc_keymap) * NBBY, sc->sc_keymax); 300 sc->sc_keymax = sizeof(sc->sc_keymap) * NBBY; 301 } 302 /* 303 * Reset the key cache since some parts do not 304 * reset the contents on initial power up. 305 */ 306 for (i = 0; i < sc->sc_keymax; i++) 307 ath_hal_keyreset(ah, i); 308 /* 309 * Mark key cache slots associated with global keys 310 * as in use. If we knew TKIP was not to be used we 311 * could leave the +32, +64, and +32+64 slots free. 312 * XXX only for splitmic. 313 */ 314 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 315 setbit(sc->sc_keymap, i); 316 setbit(sc->sc_keymap, i+32); 317 setbit(sc->sc_keymap, i+64); 318 setbit(sc->sc_keymap, i+32+64); 319 } 320 321 /* 322 * Collect the channel list using the default country 323 * code and including outdoor channels. The 802.11 layer 324 * is resposible for filtering this list based on settings 325 * like the phy mode. 326 */ 327 error = ath_getchannels(sc, ath_countrycode, 328 ath_outdoor, ath_xchanmode); 329 if (error != 0) 330 goto bad; 331 /* 332 * Setup dynamic sysctl's now that country code and 333 * regdomain are available from the hal. 334 */ 335 ath_sysctlattach(sc); 336 337 /* 338 * Setup rate tables for all potential media types. 339 */ 340 ath_rate_setup(sc, IEEE80211_MODE_11A); 341 ath_rate_setup(sc, IEEE80211_MODE_11B); 342 ath_rate_setup(sc, IEEE80211_MODE_11G); 343 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 344 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 345 /* NB: setup here so ath_rate_update is happy */ 346 ath_setcurmode(sc, IEEE80211_MODE_11A); 347 348 /* 349 * Allocate tx+rx descriptors and populate the lists. 350 */ 351 error = ath_desc_alloc(sc); 352 if (error != 0) { 353 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 354 goto bad; 355 } 356 callout_init(&sc->sc_scan_ch, debug_mpsafenet ? CALLOUT_MPSAFE : 0); 357 callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE); 358 359 ATH_TXBUF_LOCK_INIT(sc); 360 361 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); 362 TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); 363 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 364 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 365 TASK_INIT(&sc->sc_bstucktask, 0, ath_bstuck_proc, sc); 366 367 /* 368 * Allocate hardware transmit queues: one queue for 369 * beacon frames and one data queue for each QoS 370 * priority. Note that the hal handles reseting 371 * these queues at the needed time. 372 * 373 * XXX PS-Poll 374 */ 375 sc->sc_bhalq = ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, NULL); 376 if (sc->sc_bhalq == (u_int) -1) { 377 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 378 error = EIO; 379 goto bad2; 380 } 381 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 382 if (sc->sc_cabq == NULL) { 383 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 384 error = EIO; 385 goto bad2; 386 } 387 /* NB: insure BK queue is the lowest priority h/w queue */ 388 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 389 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 390 ieee80211_wme_acnames[WME_AC_BK]); 391 error = EIO; 392 goto bad2; 393 } 394 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 395 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 396 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 397 /* 398 * Not enough hardware tx queues to properly do WME; 399 * just punt and assign them all to the same h/w queue. 400 * We could do a better job of this if, for example, 401 * we allocate queues when we switch from station to 402 * AP mode. 403 */ 404 if (sc->sc_ac2q[WME_AC_VI] != NULL) 405 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 406 if (sc->sc_ac2q[WME_AC_BE] != NULL) 407 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 408 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 409 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 410 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 411 } 412 413 /* 414 * Special case certain configurations. Note the 415 * CAB queue is handled by these specially so don't 416 * include them when checking the txq setup mask. 417 */ 418 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 419 case 0x01: 420 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 421 break; 422 case 0x0f: 423 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 424 break; 425 default: 426 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 427 break; 428 } 429 430 /* 431 * Setup rate control. Some rate control modules 432 * call back to change the anntena state so expose 433 * the necessary entry points. 434 * XXX maybe belongs in struct ath_ratectrl? 435 */ 436 sc->sc_setdefantenna = ath_setdefantenna; 437 sc->sc_rc = ath_rate_attach(sc); 438 if (sc->sc_rc == NULL) { 439 error = EIO; 440 goto bad2; 441 } 442 443 sc->sc_ledstate = 1; 444 /* 445 * Auto-enable soft led processing for IBM cards and for 446 * 5211 minipci cards. Users can also manually enable/disable 447 * support with a sysctl. 448 */ 449 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 450 if (sc->sc_softled) { 451 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 452 ath_hal_gpioset(ah, sc->sc_ledpin, 0); 453 } 454 455 ifp->if_softc = sc; 456 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 457 ifp->if_start = ath_start; 458 ifp->if_watchdog = ath_watchdog; 459 ifp->if_ioctl = ath_ioctl; 460 ifp->if_init = ath_init; 461 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 462 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 463 IFQ_SET_READY(&ifp->if_snd); 464 465 ic->ic_ifp = ifp; 466 ic->ic_reset = ath_reset; 467 ic->ic_newassoc = ath_newassoc; 468 ic->ic_updateslot = ath_updateslot; 469 ic->ic_wme.wme_update = ath_wme_update; 470 /* XXX not right but it's not used anywhere important */ 471 ic->ic_phytype = IEEE80211_T_OFDM; 472 ic->ic_opmode = IEEE80211_M_STA; 473 ic->ic_caps = 474 IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 475 | IEEE80211_C_HOSTAP /* hostap mode */ 476 | IEEE80211_C_MONITOR /* monitor mode */ 477 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 478 | IEEE80211_C_SHSLOT /* short slot time supported */ 479 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 480 ; 481 /* 482 * Query the hal to figure out h/w crypto support. 483 */ 484 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 485 ic->ic_caps |= IEEE80211_C_WEP; 486 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 487 ic->ic_caps |= IEEE80211_C_AES; 488 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 489 ic->ic_caps |= IEEE80211_C_AES_CCM; 490 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 491 ic->ic_caps |= IEEE80211_C_CKIP; 492 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 493 ic->ic_caps |= IEEE80211_C_TKIP; 494 /* 495 * Check if h/w does the MIC and/or whether the 496 * separate key cache entries are required to 497 * handle both tx+rx MIC keys. 498 */ 499 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 500 ic->ic_caps |= IEEE80211_C_TKIPMIC; 501 if (ath_hal_tkipsplit(ah)) 502 sc->sc_splitmic = 1; 503 } 504 /* 505 * TPC support can be done either with a global cap or 506 * per-packet support. The latter is not available on 507 * all parts. We're a bit pedantic here as all parts 508 * support a global cap. 509 */ 510 sc->sc_hastpc = ath_hal_hastpc(ah); 511 if (sc->sc_hastpc || ath_hal_hastxpowlimit(ah)) 512 ic->ic_caps |= IEEE80211_C_TXPMGT; 513 514 /* 515 * Mark WME capability only if we have sufficient 516 * hardware queues to do proper priority scheduling. 517 */ 518 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 519 ic->ic_caps |= IEEE80211_C_WME; 520 /* 521 * Check for frame bursting capability. 522 */ 523 if (ath_hal_hasbursting(ah)) 524 ic->ic_caps |= IEEE80211_C_BURST; 525 526 /* 527 * Indicate we need the 802.11 header padded to a 528 * 32-bit boundary for 4-address and QoS frames. 529 */ 530 ic->ic_flags |= IEEE80211_F_DATAPAD; 531 532 /* 533 * Query the hal about antenna support. 534 */ 535 if (ath_hal_hasdiversity(ah)) { 536 sc->sc_hasdiversity = 1; 537 sc->sc_diversity = ath_hal_getdiversity(ah); 538 } 539 sc->sc_defant = ath_hal_getdefantenna(ah); 540 541 /* 542 * Not all chips have the VEOL support we want to 543 * use with IBSS beacons; check here for it. 544 */ 545 sc->sc_hasveol = ath_hal_hasveol(ah); 546 547 /* get mac address from hardware */ 548 ath_hal_getmac(ah, ic->ic_myaddr); 549 550 /* call MI attach routine. */ 551 ieee80211_ifattach(ic); 552 /* override default methods */ 553 ic->ic_node_alloc = ath_node_alloc; 554 sc->sc_node_free = ic->ic_node_free; 555 ic->ic_node_free = ath_node_free; 556 ic->ic_node_getrssi = ath_node_getrssi; 557 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 558 ic->ic_recv_mgmt = ath_recv_mgmt; 559 sc->sc_newstate = ic->ic_newstate; 560 ic->ic_newstate = ath_newstate; 561 ic->ic_crypto.cs_key_alloc = ath_key_alloc; 562 ic->ic_crypto.cs_key_delete = ath_key_delete; 563 ic->ic_crypto.cs_key_set = ath_key_set; 564 ic->ic_crypto.cs_key_update_begin = ath_key_update_begin; 565 ic->ic_crypto.cs_key_update_end = ath_key_update_end; 566 /* complete initialization */ 567 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 568 569 ath_bpfattach(sc); 570 571 if (bootverbose) 572 ieee80211_announce(ic); 573 ath_announce(sc); 574 return 0; 575bad2: 576 ath_tx_cleanup(sc); 577 ath_desc_free(sc); 578bad: 579 if (ah) 580 ath_hal_detach(ah); 581 sc->sc_invalid = 1; 582 return error; 583} 584 585int 586ath_detach(struct ath_softc *sc) 587{ 588 struct ifnet *ifp = &sc->sc_if; 589 590 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 591 __func__, ifp->if_flags); 592 593 ath_stop(ifp); 594 bpfdetach(ifp); 595 /* 596 * NB: the order of these is important: 597 * o call the 802.11 layer before detaching the hal to 598 * insure callbacks into the driver to delete global 599 * key cache entries can be handled 600 * o reclaim the tx queue data structures after calling 601 * the 802.11 layer as we'll get called back to reclaim 602 * node state and potentially want to use them 603 * o to cleanup the tx queues the hal is called, so detach 604 * it last 605 * Other than that, it's straightforward... 606 */ 607 ieee80211_ifdetach(&sc->sc_ic); 608 ath_rate_detach(sc->sc_rc); 609 ath_desc_free(sc); 610 ath_tx_cleanup(sc); 611 ath_hal_detach(sc->sc_ah); 612 613 return 0; 614} 615 616void 617ath_suspend(struct ath_softc *sc) 618{ 619 struct ifnet *ifp = &sc->sc_if; 620 621 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 622 __func__, ifp->if_flags); 623 624 ath_stop(ifp); 625} 626 627void 628ath_resume(struct ath_softc *sc) 629{ 630 struct ifnet *ifp = &sc->sc_if; 631 632 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 633 __func__, ifp->if_flags); 634 635 if (ifp->if_flags & IFF_UP) { 636 ath_init(ifp); 637 if (ifp->if_flags & IFF_RUNNING) 638 ath_start(ifp); 639 } 640} 641 642void 643ath_shutdown(struct ath_softc *sc) 644{ 645 struct ifnet *ifp = &sc->sc_if; 646 647 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 648 __func__, ifp->if_flags); 649 650 ath_stop(ifp); 651} 652 653/* 654 * Interrupt handler. Most of the actual processing is deferred. 655 */ 656void 657ath_intr(void *arg) 658{ 659 struct ath_softc *sc = arg; 660 struct ifnet *ifp = &sc->sc_if; 661 struct ath_hal *ah = sc->sc_ah; 662 HAL_INT status; 663 664 if (sc->sc_invalid) { 665 /* 666 * The hardware is not ready/present, don't touch anything. 667 * Note this can happen early on if the IRQ is shared. 668 */ 669 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 670 return; 671 } 672 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ 673 return; 674 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { 675 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 676 __func__, ifp->if_flags); 677 ath_hal_getisr(ah, &status); /* clear ISR */ 678 ath_hal_intrset(ah, 0); /* disable further intr's */ 679 return; 680 } 681 /* 682 * Figure out the reason(s) for the interrupt. Note 683 * that the hal returns a pseudo-ISR that may include 684 * bits we haven't explicitly enabled so we mask the 685 * value to insure we only process bits we requested. 686 */ 687 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 688 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 689 status &= sc->sc_imask; /* discard unasked for bits */ 690 if (status & HAL_INT_FATAL) { 691 /* 692 * Fatal errors are unrecoverable. Typically 693 * these are caused by DMA errors. Unfortunately 694 * the exact reason is not (presently) returned 695 * by the hal. 696 */ 697 sc->sc_stats.ast_hardware++; 698 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 699 taskqueue_enqueue(taskqueue_swi, &sc->sc_fataltask); 700 } else if (status & HAL_INT_RXORN) { 701 sc->sc_stats.ast_rxorn++; 702 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 703 taskqueue_enqueue(taskqueue_swi, &sc->sc_rxorntask); 704 } else { 705 if (status & HAL_INT_SWBA) { 706 /* 707 * Software beacon alert--time to send a beacon. 708 * Handle beacon transmission directly; deferring 709 * this is too slow to meet timing constraints 710 * under load. 711 */ 712 ath_beacon_proc(sc, 0); 713 } 714 if (status & HAL_INT_RXEOL) { 715 /* 716 * NB: the hardware should re-read the link when 717 * RXE bit is written, but it doesn't work at 718 * least on older hardware revs. 719 */ 720 sc->sc_stats.ast_rxeol++; 721 sc->sc_rxlink = NULL; 722 } 723 if (status & HAL_INT_TXURN) { 724 sc->sc_stats.ast_txurn++; 725 /* bump tx trigger level */ 726 ath_hal_updatetxtriglevel(ah, AH_TRUE); 727 } 728 if (status & HAL_INT_RX) 729 taskqueue_enqueue(taskqueue_swi, &sc->sc_rxtask); 730 if (status & HAL_INT_TX) 731 taskqueue_enqueue(taskqueue_swi, &sc->sc_txtask); 732 if (status & HAL_INT_BMISS) { 733 sc->sc_stats.ast_bmiss++; 734 taskqueue_enqueue(taskqueue_swi, &sc->sc_bmisstask); 735 } 736 if (status & HAL_INT_MIB) { 737 sc->sc_stats.ast_mib++; 738 /* 739 * Disable interrupts until we service the MIB 740 * interrupt; otherwise it will continue to fire. 741 */ 742 ath_hal_intrset(ah, 0); 743 /* 744 * Let the hal handle the event. We assume it will 745 * clear whatever condition caused the interrupt. 746 */ 747 ath_hal_mibevent(ah, 748 &ATH_NODE(sc->sc_ic.ic_bss)->an_halstats); 749 ath_hal_intrset(ah, sc->sc_imask); 750 } 751 } 752} 753 754static void 755ath_fatal_proc(void *arg, int pending) 756{ 757 struct ath_softc *sc = arg; 758 struct ifnet *ifp = &sc->sc_if; 759 760 if_printf(ifp, "hardware error; resetting\n"); 761 ath_reset(ifp); 762} 763 764static void 765ath_rxorn_proc(void *arg, int pending) 766{ 767 struct ath_softc *sc = arg; 768 struct ifnet *ifp = &sc->sc_if; 769 770 if_printf(ifp, "rx FIFO overrun; resetting\n"); 771 ath_reset(ifp); 772} 773 774static void 775ath_bmiss_proc(void *arg, int pending) 776{ 777 struct ath_softc *sc = arg; 778 struct ieee80211com *ic = &sc->sc_ic; 779 780 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 781 KASSERT(ic->ic_opmode == IEEE80211_M_STA, 782 ("unexpect operating mode %u", ic->ic_opmode)); 783 if (ic->ic_state == IEEE80211_S_RUN) { 784 /* 785 * Rather than go directly to scan state, try to 786 * reassociate first. If that fails then the state 787 * machine will drop us into scanning after timing 788 * out waiting for a probe response. 789 */ 790 NET_LOCK_GIANT(); 791 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); 792 NET_UNLOCK_GIANT(); 793 } 794} 795 796static u_int 797ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) 798{ 799#define N(a) (sizeof(a) / sizeof(a[0])) 800 static const u_int modeflags[] = { 801 0, /* IEEE80211_MODE_AUTO */ 802 CHANNEL_A, /* IEEE80211_MODE_11A */ 803 CHANNEL_B, /* IEEE80211_MODE_11B */ 804 CHANNEL_PUREG, /* IEEE80211_MODE_11G */ 805 0, /* IEEE80211_MODE_FH */ 806 CHANNEL_T, /* IEEE80211_MODE_TURBO_A */ 807 CHANNEL_108G /* IEEE80211_MODE_TURBO_G */ 808 }; 809 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 810 811 KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode)); 812 KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode)); 813 return modeflags[mode]; 814#undef N 815} 816 817static void 818ath_init(void *arg) 819{ 820 struct ath_softc *sc = (struct ath_softc *) arg; 821 struct ieee80211com *ic = &sc->sc_ic; 822 struct ifnet *ifp = &sc->sc_if; 823 struct ieee80211_node *ni; 824 enum ieee80211_phymode mode; 825 struct ath_hal *ah = sc->sc_ah; 826 HAL_STATUS status; 827 828 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 829 __func__, ifp->if_flags); 830 831 ATH_LOCK(sc); 832 /* 833 * Stop anything previously setup. This is safe 834 * whether this is the first time through or not. 835 */ 836 ath_stop_locked(ifp); 837 838 /* 839 * The basic interface to setting the hardware in a good 840 * state is ``reset''. On return the hardware is known to 841 * be powered up and with interrupts disabled. This must 842 * be followed by initialization of the appropriate bits 843 * and then setup of the interrupt mask. 844 */ 845 sc->sc_curchan.channel = ic->ic_ibss_chan->ic_freq; 846 sc->sc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); 847 if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_FALSE, &status)) { 848 if_printf(ifp, "unable to reset hardware; hal status %u\n", 849 status); 850 goto done; 851 } 852 853 /* 854 * This is needed only to setup initial state 855 * but it's best done after a reset. 856 */ 857 ath_update_txpow(sc); 858 859 /* 860 * Setup the hardware after reset: the key cache 861 * is filled as needed and the receive engine is 862 * set going. Frame transmit is handled entirely 863 * in the frame output path; there's nothing to do 864 * here except setup the interrupt mask. 865 */ 866 ath_initkeytable(sc); /* XXX still needed? */ 867 if (ath_startrecv(sc) != 0) { 868 if_printf(ifp, "unable to start recv logic\n"); 869 goto done; 870 } 871 872 /* 873 * Enable interrupts. 874 */ 875 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 876 | HAL_INT_RXEOL | HAL_INT_RXORN 877 | HAL_INT_FATAL | HAL_INT_GLOBAL; 878 /* 879 * Enable MIB interrupts when there are hardware phy counters. 880 * Note we only do this (at the moment) for station mode. 881 */ 882 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 883 sc->sc_imask |= HAL_INT_MIB; 884 ath_hal_intrset(ah, sc->sc_imask); 885 886 ifp->if_flags |= IFF_RUNNING; 887 ic->ic_state = IEEE80211_S_INIT; 888 889 /* 890 * The hardware should be ready to go now so it's safe 891 * to kick the 802.11 state machine as it's likely to 892 * immediately call back to us to send mgmt frames. 893 */ 894 ni = ic->ic_bss; 895 ni->ni_chan = ic->ic_ibss_chan; 896 mode = ieee80211_chan2mode(ic, ni->ni_chan); 897 if (mode != sc->sc_curmode) 898 ath_setcurmode(sc, mode); 899 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 900 if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 901 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 902 } else 903 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 904done: 905 ATH_UNLOCK(sc); 906} 907 908static void 909ath_stop_locked(struct ifnet *ifp) 910{ 911 struct ath_softc *sc = ifp->if_softc; 912 struct ieee80211com *ic = &sc->sc_ic; 913 struct ath_hal *ah = sc->sc_ah; 914 915 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 916 __func__, sc->sc_invalid, ifp->if_flags); 917 918 ATH_LOCK_ASSERT(sc); 919 if (ifp->if_flags & IFF_RUNNING) { 920 /* 921 * Shutdown the hardware and driver: 922 * reset 802.11 state machine 923 * turn off timers 924 * disable interrupts 925 * turn off the radio 926 * clear transmit machinery 927 * clear receive machinery 928 * drain and release tx queues 929 * reclaim beacon resources 930 * power down hardware 931 * 932 * Note that some of this work is not possible if the 933 * hardware is gone (invalid). 934 */ 935 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 936 ifp->if_flags &= ~IFF_RUNNING; 937 ifp->if_timer = 0; 938 if (!sc->sc_invalid) { 939 if (sc->sc_softled) 940 ath_hal_gpioset(ah, sc->sc_ledpin, 1); 941 ath_hal_intrset(ah, 0); 942 } 943 ath_draintxq(sc); 944 if (!sc->sc_invalid) { 945 ath_stoprecv(sc); 946 ath_hal_phydisable(ah); 947 } else 948 sc->sc_rxlink = NULL; 949 IFQ_DRV_PURGE(&ifp->if_snd); 950 ath_beacon_free(sc); 951 } 952} 953 954static void 955ath_stop(struct ifnet *ifp) 956{ 957 struct ath_softc *sc = ifp->if_softc; 958 959 ATH_LOCK(sc); 960 ath_stop_locked(ifp); 961 if (!sc->sc_invalid) { 962 /* 963 * Set the chip in full sleep mode. Note that we are 964 * careful to do this only when bringing the interface 965 * completely to a stop. When the chip is in this state 966 * it must be carefully woken up or references to 967 * registers in the PCI clock domain may freeze the bus 968 * (and system). This varies by chip and is mostly an 969 * issue with newer parts that go to sleep more quickly. 970 */ 971 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP, 0); 972 } 973 ATH_UNLOCK(sc); 974} 975 976/* 977 * Reset the hardware w/o losing operational state. This is 978 * basically a more efficient way of doing ath_stop, ath_init, 979 * followed by state transitions to the current 802.11 980 * operational state. Used to recover from various errors and 981 * to reset or reload hardware state. 982 */ 983static int 984ath_reset(struct ifnet *ifp) 985{ 986 struct ath_softc *sc = ifp->if_softc; 987 struct ieee80211com *ic = &sc->sc_ic; 988 struct ath_hal *ah = sc->sc_ah; 989 struct ieee80211_channel *c; 990 HAL_STATUS status; 991 992 /* 993 * Convert to a HAL channel description with the flags 994 * constrained to reflect the current operating mode. 995 */ 996 c = ic->ic_ibss_chan; 997 sc->sc_curchan.channel = c->ic_freq; 998 sc->sc_curchan.channelFlags = ath_chan2flags(ic, c); 999 1000 ath_hal_intrset(ah, 0); /* disable interrupts */ 1001 ath_draintxq(sc); /* stop xmit side */ 1002 ath_stoprecv(sc); /* stop recv side */ 1003 /* NB: indicate channel change so we do a full reset */ 1004 if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_TRUE, &status)) 1005 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 1006 __func__, status); 1007 ath_update_txpow(sc); /* update tx power state */ 1008 if (ath_startrecv(sc) != 0) /* restart recv */ 1009 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 1010 /* 1011 * We may be doing a reset in response to an ioctl 1012 * that changes the channel so update any state that 1013 * might change as a result. 1014 */ 1015 ath_chan_change(sc, c); 1016 if (ic->ic_state == IEEE80211_S_RUN) 1017 ath_beacon_config(sc); /* restart beacons */ 1018 ath_hal_intrset(ah, sc->sc_imask); 1019 1020 ath_start(ifp); /* restart xmit */ 1021 return 0; 1022} 1023 1024static void 1025ath_start(struct ifnet *ifp) 1026{ 1027 struct ath_softc *sc = ifp->if_softc; 1028 struct ath_hal *ah = sc->sc_ah; 1029 struct ieee80211com *ic = &sc->sc_ic; 1030 struct ieee80211_node *ni; 1031 struct ath_buf *bf; 1032 struct mbuf *m; 1033 struct ieee80211_frame *wh; 1034 struct ether_header *eh; 1035 1036 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 1037 return; 1038 for (;;) { 1039 /* 1040 * Grab a TX buffer and associated resources. 1041 */ 1042 ATH_TXBUF_LOCK(sc); 1043 bf = STAILQ_FIRST(&sc->sc_txbuf); 1044 if (bf != NULL) 1045 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 1046 ATH_TXBUF_UNLOCK(sc); 1047 if (bf == NULL) { 1048 DPRINTF(sc, ATH_DEBUG_ANY, "%s: out of xmit buffers\n", 1049 __func__); 1050 sc->sc_stats.ast_tx_qstop++; 1051 ifp->if_flags |= IFF_OACTIVE; 1052 break; 1053 } 1054 /* 1055 * Poll the management queue for frames; they 1056 * have priority over normal data frames. 1057 */ 1058 IF_DEQUEUE(&ic->ic_mgtq, m); 1059 if (m == NULL) { 1060 /* 1061 * No data frames go out unless we're associated. 1062 */ 1063 if (ic->ic_state != IEEE80211_S_RUN) { 1064 DPRINTF(sc, ATH_DEBUG_ANY, 1065 "%s: ignore data packet, state %u\n", 1066 __func__, ic->ic_state); 1067 sc->sc_stats.ast_tx_discard++; 1068 ATH_TXBUF_LOCK(sc); 1069 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1070 ATH_TXBUF_UNLOCK(sc); 1071 break; 1072 } 1073 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ 1074 if (m == NULL) { 1075 ATH_TXBUF_LOCK(sc); 1076 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1077 ATH_TXBUF_UNLOCK(sc); 1078 break; 1079 } 1080 /* 1081 * Find the node for the destination so we can do 1082 * things like power save and fast frames aggregation. 1083 */ 1084 if (m->m_len < sizeof(struct ether_header) && 1085 (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { 1086 ic->ic_stats.is_tx_nobuf++; /* XXX */ 1087 ni = NULL; 1088 goto bad; 1089 } 1090 eh = mtod(m, struct ether_header *); 1091 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 1092 if (ni == NULL) { 1093 /* NB: ieee80211_find_txnode does stat+msg */ 1094 goto bad; 1095 } 1096 if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && 1097 (m->m_flags & M_PWR_SAV) == 0) { 1098 /* 1099 * Station in power save mode; pass the frame 1100 * to the 802.11 layer and continue. We'll get 1101 * the frame back when the time is right. 1102 */ 1103 ieee80211_pwrsave(ic, ni, m); 1104 goto reclaim; 1105 } 1106 /* calculate priority so we can find the tx queue */ 1107 if (ieee80211_classify(ic, m, ni)) { 1108 DPRINTF(sc, ATH_DEBUG_XMIT, 1109 "%s: discard, classification failure\n", 1110 __func__); 1111 goto bad; 1112 } 1113 ifp->if_opackets++; 1114 BPF_MTAP(ifp, m); 1115 /* 1116 * Encapsulate the packet in prep for transmission. 1117 */ 1118 m = ieee80211_encap(ic, m, ni); 1119 if (m == NULL) { 1120 DPRINTF(sc, ATH_DEBUG_ANY, 1121 "%s: encapsulation failure\n", 1122 __func__); 1123 sc->sc_stats.ast_tx_encap++; 1124 goto bad; 1125 } 1126 } else { 1127 /* 1128 * Hack! The referenced node pointer is in the 1129 * rcvif field of the packet header. This is 1130 * placed there by ieee80211_mgmt_output because 1131 * we need to hold the reference with the frame 1132 * and there's no other way (other than packet 1133 * tags which we consider too expensive to use) 1134 * to pass it along. 1135 */ 1136 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1137 m->m_pkthdr.rcvif = NULL; 1138 1139 wh = mtod(m, struct ieee80211_frame *); 1140 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 1141 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 1142 /* fill time stamp */ 1143 u_int64_t tsf; 1144 u_int32_t *tstamp; 1145 1146 tsf = ath_hal_gettsf64(ah); 1147 /* XXX: adjust 100us delay to xmit */ 1148 tsf += 100; 1149 tstamp = (u_int32_t *)&wh[1]; 1150 tstamp[0] = htole32(tsf & 0xffffffff); 1151 tstamp[1] = htole32(tsf >> 32); 1152 } 1153 sc->sc_stats.ast_tx_mgmt++; 1154 } 1155 1156 if (ath_tx_start(sc, ni, bf, m)) { 1157 bad: 1158 ifp->if_oerrors++; 1159 reclaim: 1160 ATH_TXBUF_LOCK(sc); 1161 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1162 ATH_TXBUF_UNLOCK(sc); 1163 if (ni != NULL) 1164 ieee80211_free_node(ni); 1165 continue; 1166 } 1167 1168 sc->sc_tx_timer = 5; 1169 ifp->if_timer = 1; 1170 } 1171} 1172 1173static int 1174ath_media_change(struct ifnet *ifp) 1175{ 1176#define IS_UP(ifp) \ 1177 ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP)) 1178 int error; 1179 1180 error = ieee80211_media_change(ifp); 1181 if (error == ENETRESET) { 1182 if (IS_UP(ifp)) 1183 ath_init(ifp); /* XXX lose error */ 1184 error = 0; 1185 } 1186 return error; 1187#undef IS_UP 1188} 1189 1190#ifdef AR_DEBUG 1191static void 1192ath_keyprint(const char *tag, u_int ix, 1193 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1194{ 1195 static const char *ciphers[] = { 1196 "WEP", 1197 "AES-OCB", 1198 "AES-CCM", 1199 "CKIP", 1200 "TKIP", 1201 "CLR", 1202 }; 1203 int i, n; 1204 1205 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); 1206 for (i = 0, n = hk->kv_len; i < n; i++) 1207 printf("%02x", hk->kv_val[i]); 1208 printf(" mac %s", ether_sprintf(mac)); 1209 if (hk->kv_type == HAL_CIPHER_TKIP) { 1210 printf(" mic "); 1211 for (i = 0; i < sizeof(hk->kv_mic); i++) 1212 printf("%02x", hk->kv_mic[i]); 1213 } 1214 printf("\n"); 1215} 1216#endif 1217 1218/* 1219 * Set a TKIP key into the hardware. This handles the 1220 * potential distribution of key state to multiple key 1221 * cache slots for TKIP. 1222 */ 1223static int 1224ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, 1225 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1226{ 1227#define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) 1228 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; 1229 struct ath_hal *ah = sc->sc_ah; 1230 1231 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, 1232 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); 1233 KASSERT(sc->sc_splitmic, ("key cache !split")); 1234 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { 1235 /* 1236 * TX key goes at first index, RX key at +32. 1237 * The hal handles the MIC keys at index+64. 1238 */ 1239 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); 1240 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 1241 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) 1242 return 0; 1243 1244 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 1245 KEYPRINTF(sc, k->wk_keyix+32, hk, mac); 1246 /* XXX delete tx key on failure? */ 1247 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); 1248 } else if (k->wk_flags & IEEE80211_KEY_XR) { 1249 /* 1250 * TX/RX key goes at first index. 1251 * The hal handles the MIC keys are index+64. 1252 */ 1253 KASSERT(k->wk_keyix < IEEE80211_WEP_NKID, 1254 ("group key at index %u", k->wk_keyix)); 1255 memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ? 1256 k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic)); 1257 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 1258 return ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid); 1259 } 1260 /* XXX key w/o xmit/recv; need this for compression? */ 1261 return 0; 1262#undef IEEE80211_KEY_XR 1263} 1264 1265/* 1266 * Set a net80211 key into the hardware. This handles the 1267 * potential distribution of key state to multiple key 1268 * cache slots for TKIP with hardware MIC support. 1269 */ 1270static int 1271ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, 1272 const u_int8_t mac[IEEE80211_ADDR_LEN]) 1273{ 1274#define N(a) (sizeof(a)/sizeof(a[0])) 1275 static const u_int8_t ciphermap[] = { 1276 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ 1277 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ 1278 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ 1279 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ 1280 (u_int8_t) -1, /* 4 is not allocated */ 1281 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ 1282 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ 1283 }; 1284 struct ath_hal *ah = sc->sc_ah; 1285 const struct ieee80211_cipher *cip = k->wk_cipher; 1286 HAL_KEYVAL hk; 1287 1288 memset(&hk, 0, sizeof(hk)); 1289 /* 1290 * Software crypto uses a "clear key" so non-crypto 1291 * state kept in the key cache are maintained and 1292 * so that rx frames have an entry to match. 1293 */ 1294 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { 1295 KASSERT(cip->ic_cipher < N(ciphermap), 1296 ("invalid cipher type %u", cip->ic_cipher)); 1297 hk.kv_type = ciphermap[cip->ic_cipher]; 1298 hk.kv_len = k->wk_keylen; 1299 memcpy(hk.kv_val, k->wk_key, k->wk_keylen); 1300 } else 1301 hk.kv_type = HAL_CIPHER_CLR; 1302 1303 if (hk.kv_type == HAL_CIPHER_TKIP && 1304 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && 1305 sc->sc_splitmic) { 1306 return ath_keyset_tkip(sc, k, &hk, mac); 1307 } else { 1308 KEYPRINTF(sc, k->wk_keyix, &hk, mac); 1309 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); 1310 } 1311#undef N 1312} 1313 1314/* 1315 * Fill the hardware key cache with key entries. 1316 */ 1317static void 1318ath_initkeytable(struct ath_softc *sc) 1319{ 1320 struct ieee80211com *ic = &sc->sc_ic; 1321 struct ifnet *ifp = &sc->sc_if; 1322 struct ath_hal *ah = sc->sc_ah; 1323 const u_int8_t *bssid; 1324 int i; 1325 1326 /* XXX maybe should reset all keys when !PRIVACY */ 1327 if (ic->ic_state == IEEE80211_S_SCAN) 1328 bssid = ifp->if_broadcastaddr; 1329 else 1330 bssid = ic->ic_bss->ni_bssid; 1331 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1332 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1333 1334 if (k->wk_keylen == 0) { 1335 ath_hal_keyreset(ah, i); 1336 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: reset key %u\n", 1337 __func__, i); 1338 } else { 1339 ath_keyset(sc, k, bssid); 1340 } 1341 } 1342} 1343 1344/* 1345 * Allocate tx/rx key slots for TKIP. We allocate two slots for 1346 * each key, one for decrypt/encrypt and the other for the MIC. 1347 */ 1348static u_int16_t 1349key_alloc_2pair(struct ath_softc *sc) 1350{ 1351#define N(a) (sizeof(a)/sizeof(a[0])) 1352 u_int i, keyix; 1353 1354 KASSERT(sc->sc_splitmic, ("key cache !split")); 1355 /* XXX could optimize */ 1356 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 1357 u_int8_t b = sc->sc_keymap[i]; 1358 if (b != 0xff) { 1359 /* 1360 * One or more slots in this byte are free. 1361 */ 1362 keyix = i*NBBY; 1363 while (b & 1) { 1364 again: 1365 keyix++; 1366 b >>= 1; 1367 } 1368 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ 1369 if (isset(sc->sc_keymap, keyix+32) || 1370 isset(sc->sc_keymap, keyix+64) || 1371 isset(sc->sc_keymap, keyix+32+64)) { 1372 /* full pair unavailable */ 1373 /* XXX statistic */ 1374 if (keyix == (i+1)*NBBY) { 1375 /* no slots were appropriate, advance */ 1376 continue; 1377 } 1378 goto again; 1379 } 1380 setbit(sc->sc_keymap, keyix); 1381 setbit(sc->sc_keymap, keyix+64); 1382 setbit(sc->sc_keymap, keyix+32); 1383 setbit(sc->sc_keymap, keyix+32+64); 1384 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1385 "%s: key pair %u,%u %u,%u\n", 1386 __func__, keyix, keyix+64, 1387 keyix+32, keyix+32+64); 1388 return keyix; 1389 } 1390 } 1391 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 1392 return IEEE80211_KEYIX_NONE; 1393#undef N 1394} 1395 1396/* 1397 * Allocate a single key cache slot. 1398 */ 1399static u_int16_t 1400key_alloc_single(struct ath_softc *sc) 1401{ 1402#define N(a) (sizeof(a)/sizeof(a[0])) 1403 u_int i, keyix; 1404 1405 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ 1406 for (i = 0; i < N(sc->sc_keymap); i++) { 1407 u_int8_t b = sc->sc_keymap[i]; 1408 if (b != 0xff) { 1409 /* 1410 * One or more slots are free. 1411 */ 1412 keyix = i*NBBY; 1413 while (b & 1) 1414 keyix++, b >>= 1; 1415 setbit(sc->sc_keymap, keyix); 1416 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", 1417 __func__, keyix); 1418 return keyix; 1419 } 1420 } 1421 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); 1422 return IEEE80211_KEYIX_NONE; 1423#undef N 1424} 1425 1426/* 1427 * Allocate one or more key cache slots for a uniacst key. The 1428 * key itself is needed only to identify the cipher. For hardware 1429 * TKIP with split cipher+MIC keys we allocate two key cache slot 1430 * pairs so that we can setup separate TX and RX MIC keys. Note 1431 * that the MIC key for a TKIP key at slot i is assumed by the 1432 * hardware to be at slot i+64. This limits TKIP keys to the first 1433 * 64 entries. 1434 */ 1435static int 1436ath_key_alloc(struct ieee80211com *ic, const struct ieee80211_key *k) 1437{ 1438 struct ath_softc *sc = ic->ic_ifp->if_softc; 1439 1440 /* 1441 * We allocate two pair for TKIP when using the h/w to do 1442 * the MIC. For everything else, including software crypto, 1443 * we allocate a single entry. Note that s/w crypto requires 1444 * a pass-through slot on the 5211 and 5212. The 5210 does 1445 * not support pass-through cache entries and we map all 1446 * those requests to slot 0. 1447 */ 1448 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 1449 return key_alloc_single(sc); 1450 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && 1451 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) { 1452 return key_alloc_2pair(sc); 1453 } else { 1454 return key_alloc_single(sc); 1455 } 1456} 1457 1458/* 1459 * Delete an entry in the key cache allocated by ath_key_alloc. 1460 */ 1461static int 1462ath_key_delete(struct ieee80211com *ic, const struct ieee80211_key *k) 1463{ 1464 struct ath_softc *sc = ic->ic_ifp->if_softc; 1465 struct ath_hal *ah = sc->sc_ah; 1466 const struct ieee80211_cipher *cip = k->wk_cipher; 1467 u_int keyix = k->wk_keyix; 1468 1469 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); 1470 1471 ath_hal_keyreset(ah, keyix); 1472 /* 1473 * Handle split tx/rx keying required for TKIP with h/w MIC. 1474 */ 1475 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1476 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) 1477 ath_hal_keyreset(ah, keyix+32); /* RX key */ 1478 if (keyix >= IEEE80211_WEP_NKID) { 1479 /* 1480 * Don't touch keymap entries for global keys so 1481 * they are never considered for dynamic allocation. 1482 */ 1483 clrbit(sc->sc_keymap, keyix); 1484 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1485 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && 1486 sc->sc_splitmic) { 1487 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ 1488 clrbit(sc->sc_keymap, keyix+32); /* RX key */ 1489 clrbit(sc->sc_keymap, keyix+32+64); /* RX key MIC */ 1490 } 1491 } 1492 return 1; 1493} 1494 1495/* 1496 * Set the key cache contents for the specified key. Key cache 1497 * slot(s) must already have been allocated by ath_key_alloc. 1498 */ 1499static int 1500ath_key_set(struct ieee80211com *ic, const struct ieee80211_key *k, 1501 const u_int8_t mac[IEEE80211_ADDR_LEN]) 1502{ 1503 struct ath_softc *sc = ic->ic_ifp->if_softc; 1504 1505 return ath_keyset(sc, k, mac); 1506} 1507 1508/* 1509 * Block/unblock tx+rx processing while a key change is done. 1510 * We assume the caller serializes key management operations 1511 * so we only need to worry about synchronization with other 1512 * uses that originate in the driver. 1513 */ 1514static void 1515ath_key_update_begin(struct ieee80211com *ic) 1516{ 1517 struct ifnet *ifp = ic->ic_ifp; 1518 struct ath_softc *sc = ifp->if_softc; 1519 1520 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1521#if 0 1522 tasklet_disable(&sc->sc_rxtq); 1523#endif 1524 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 1525} 1526 1527static void 1528ath_key_update_end(struct ieee80211com *ic) 1529{ 1530 struct ifnet *ifp = ic->ic_ifp; 1531 struct ath_softc *sc = ifp->if_softc; 1532 1533 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1534 IF_UNLOCK(&ifp->if_snd); 1535#if 0 1536 tasklet_enable(&sc->sc_rxtq); 1537#endif 1538} 1539 1540/* 1541 * Calculate the receive filter according to the 1542 * operating mode and state: 1543 * 1544 * o always accept unicast, broadcast, and multicast traffic 1545 * o maintain current state of phy error reception (the hal 1546 * may enable phy error frames for noise immunity work) 1547 * o probe request frames are accepted only when operating in 1548 * hostap, adhoc, or monitor modes 1549 * o enable promiscuous mode according to the interface state 1550 * o accept beacons: 1551 * - when operating in adhoc mode so the 802.11 layer creates 1552 * node table entries for peers, 1553 * - when operating in station mode for collecting rssi data when 1554 * the station is otherwise quiet, or 1555 * - when scanning 1556 */ 1557static u_int32_t 1558ath_calcrxfilter(struct ath_softc *sc, enum ieee80211_state state) 1559{ 1560 struct ieee80211com *ic = &sc->sc_ic; 1561 struct ath_hal *ah = sc->sc_ah; 1562 struct ifnet *ifp = &sc->sc_if; 1563 u_int32_t rfilt; 1564 1565 rfilt = (ath_hal_getrxfilter(ah) & HAL_RX_FILTER_PHYERR) 1566 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1567 if (ic->ic_opmode != IEEE80211_M_STA) 1568 rfilt |= HAL_RX_FILTER_PROBEREQ; 1569 if (ic->ic_opmode != IEEE80211_M_HOSTAP && 1570 (ifp->if_flags & IFF_PROMISC)) 1571 rfilt |= HAL_RX_FILTER_PROM; 1572 if (ic->ic_opmode == IEEE80211_M_STA || 1573 ic->ic_opmode == IEEE80211_M_IBSS || 1574 state == IEEE80211_S_SCAN) 1575 rfilt |= HAL_RX_FILTER_BEACON; 1576 return rfilt; 1577} 1578 1579static void 1580ath_mode_init(struct ath_softc *sc) 1581{ 1582 struct ieee80211com *ic = &sc->sc_ic; 1583 struct ath_hal *ah = sc->sc_ah; 1584 struct ifnet *ifp = &sc->sc_if; 1585 u_int32_t rfilt, mfilt[2], val; 1586 u_int8_t pos; 1587 struct ifmultiaddr *ifma; 1588 1589 /* configure rx filter */ 1590 rfilt = ath_calcrxfilter(sc, ic->ic_state); 1591 ath_hal_setrxfilter(ah, rfilt); 1592 1593 /* configure operational mode */ 1594 ath_hal_setopmode(ah); 1595 1596 /* 1597 * Handle any link-level address change. Note that we only 1598 * need to force ic_myaddr; any other addresses are handled 1599 * as a byproduct of the ifnet code marking the interface 1600 * down then up. 1601 * 1602 * XXX should get from lladdr instead of arpcom but that's more work 1603 */ 1604 IEEE80211_ADDR_COPY(ic->ic_myaddr, IFP2AC(ifp)->ac_enaddr); 1605 ath_hal_setmac(ah, ic->ic_myaddr); 1606 1607 /* calculate and install multicast filter */ 1608 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 1609 mfilt[0] = mfilt[1] = 0; 1610 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1611 caddr_t dl; 1612 1613 /* calculate XOR of eight 6bit values */ 1614 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1615 val = LE_READ_4(dl + 0); 1616 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1617 val = LE_READ_4(dl + 3); 1618 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1619 pos &= 0x3f; 1620 mfilt[pos / 32] |= (1 << (pos % 32)); 1621 } 1622 } else { 1623 mfilt[0] = mfilt[1] = ~0; 1624 } 1625 ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]); 1626 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, MC filter %08x:%08x\n", 1627 __func__, rfilt, mfilt[0], mfilt[1]); 1628} 1629 1630static void 1631ath_mbuf_load_cb(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsize, int error) 1632{ 1633 struct ath_buf *bf = arg; 1634 1635 KASSERT(nseg <= ATH_MAX_SCATTER, 1636 ("%s: too many DMA segments %u", __func__, nseg)); 1637 KASSERT(error == 0, 1638 ("%s: error %u on bus_dma callback", __func__, error)); 1639 bf->bf_mapsize = mapsize; 1640 bf->bf_nseg = nseg; 1641 bcopy(seg, bf->bf_segs, nseg * sizeof (seg[0])); 1642} 1643 1644/* 1645 * Set the slot time based on the current setting. 1646 */ 1647static void 1648ath_setslottime(struct ath_softc *sc) 1649{ 1650 struct ieee80211com *ic = &sc->sc_ic; 1651 struct ath_hal *ah = sc->sc_ah; 1652 1653 if (ic->ic_flags & IEEE80211_F_SHSLOT) 1654 ath_hal_setslottime(ah, HAL_SLOT_TIME_9); 1655 else 1656 ath_hal_setslottime(ah, HAL_SLOT_TIME_20); 1657 sc->sc_updateslot = OK; 1658} 1659 1660/* 1661 * Callback from the 802.11 layer to update the 1662 * slot time based on the current setting. 1663 */ 1664static void 1665ath_updateslot(struct ifnet *ifp) 1666{ 1667 struct ath_softc *sc = ifp->if_softc; 1668 struct ieee80211com *ic = &sc->sc_ic; 1669 1670 /* 1671 * When not coordinating the BSS, change the hardware 1672 * immediately. For other operation we defer the change 1673 * until beacon updates have propagated to the stations. 1674 */ 1675 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 1676 sc->sc_updateslot = UPDATE; 1677 else 1678 ath_setslottime(sc); 1679} 1680 1681/* 1682 * Allocate and setup an initial beacon frame. 1683 */ 1684static int 1685ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1686{ 1687 struct ieee80211com *ic = ni->ni_ic; 1688 struct ath_buf *bf; 1689 struct mbuf *m; 1690 int error; 1691 1692 bf = STAILQ_FIRST(&sc->sc_bbuf); 1693 if (bf == NULL) { 1694 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: no dma buffers\n", __func__); 1695 sc->sc_stats.ast_be_nombuf++; /* XXX */ 1696 return ENOMEM; /* XXX */ 1697 } 1698 if (bf->bf_m != NULL) { 1699 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1700 m_freem(bf->bf_m); 1701 bf->bf_m = NULL; 1702 bf->bf_node = NULL; 1703 } 1704 /* 1705 * NB: the beacon data buffer must be 32-bit aligned; 1706 * we assume the mbuf routines will return us something 1707 * with this alignment (perhaps should assert). 1708 */ 1709 m = ieee80211_beacon_alloc(ic, ni, &sc->sc_boff); 1710 if (m == NULL) { 1711 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get mbuf\n", 1712 __func__); 1713 sc->sc_stats.ast_be_nombuf++; 1714 return ENOMEM; 1715 } 1716 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1717 ath_mbuf_load_cb, bf, 1718 BUS_DMA_NOWAIT); 1719 if (error == 0) { 1720 bf->bf_m = m; 1721 bf->bf_node = ni; /* NB: no held reference */ 1722 } else { 1723 m_freem(m); 1724 } 1725 return error; 1726} 1727 1728/* 1729 * Setup the beacon frame for transmit. 1730 */ 1731static void 1732ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 1733{ 1734#define USE_SHPREAMBLE(_ic) \ 1735 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 1736 == IEEE80211_F_SHPREAMBLE) 1737 struct ieee80211_node *ni = bf->bf_node; 1738 struct ieee80211com *ic = ni->ni_ic; 1739 struct mbuf *m = bf->bf_m; 1740 struct ath_hal *ah = sc->sc_ah; 1741 struct ath_node *an = ATH_NODE(ni); 1742 struct ath_desc *ds; 1743 int flags, antenna; 1744 u_int8_t rate; 1745 1746 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: m %p len %u\n", 1747 __func__, m, m->m_len); 1748 1749 /* setup descriptors */ 1750 ds = bf->bf_desc; 1751 1752 flags = HAL_TXDESC_NOACK; 1753 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 1754 ds->ds_link = bf->bf_daddr; /* self-linked */ 1755 flags |= HAL_TXDESC_VEOL; 1756 /* 1757 * Let hardware handle antenna switching. 1758 */ 1759 antenna = 0; 1760 } else { 1761 ds->ds_link = 0; 1762 /* 1763 * Switch antenna every 4 beacons. 1764 * XXX assumes two antenna 1765 */ 1766 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 1767 } 1768 1769 KASSERT(bf->bf_nseg == 1, 1770 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 1771 ds->ds_data = bf->bf_segs[0].ds_addr; 1772 /* 1773 * Calculate rate code. 1774 * XXX everything at min xmit rate 1775 */ 1776 if (USE_SHPREAMBLE(ic)) 1777 rate = an->an_tx_mgtratesp; 1778 else 1779 rate = an->an_tx_mgtrate; 1780 ath_hal_setuptxdesc(ah, ds 1781 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 1782 , sizeof(struct ieee80211_frame)/* header length */ 1783 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 1784 , ni->ni_txpower /* txpower XXX */ 1785 , rate, 1 /* series 0 rate/tries */ 1786 , HAL_TXKEYIX_INVALID /* no encryption */ 1787 , antenna /* antenna mode */ 1788 , flags /* no ack, veol for beacons */ 1789 , 0 /* rts/cts rate */ 1790 , 0 /* rts/cts duration */ 1791 ); 1792 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 1793 ath_hal_filltxdesc(ah, ds 1794 , roundup(m->m_len, 4) /* buffer length */ 1795 , AH_TRUE /* first segment */ 1796 , AH_TRUE /* last segment */ 1797 , ds /* first descriptor */ 1798 ); 1799#undef USE_SHPREAMBLE 1800} 1801 1802/* 1803 * Transmit a beacon frame at SWBA. Dynamic updates to the 1804 * frame contents are done as needed and the slot time is 1805 * also adjusted based on current state. 1806 */ 1807static void 1808ath_beacon_proc(void *arg, int pending) 1809{ 1810 struct ath_softc *sc = arg; 1811 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); 1812 struct ieee80211_node *ni = bf->bf_node; 1813 struct ieee80211com *ic = ni->ni_ic; 1814 struct ath_hal *ah = sc->sc_ah; 1815 struct mbuf *m; 1816 int ncabq, error, otherant; 1817 1818 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 1819 __func__, pending); 1820 1821 if (ic->ic_opmode == IEEE80211_M_STA || 1822 ic->ic_opmode == IEEE80211_M_MONITOR || 1823 bf == NULL || bf->bf_m == NULL) { 1824 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_flags=%x bf=%p bf_m=%p\n", 1825 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL); 1826 return; 1827 } 1828 /* 1829 * Check if the previous beacon has gone out. If 1830 * not don't don't try to post another, skip this 1831 * period and wait for the next. Missed beacons 1832 * indicate a problem and should not occur. If we 1833 * miss too many consecutive beacons reset the device. 1834 */ 1835 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 1836 sc->sc_bmisscount++; 1837 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, 1838 "%s: missed %u consecutive beacons\n", 1839 __func__, sc->sc_bmisscount); 1840 if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ 1841 taskqueue_enqueue(taskqueue_swi, &sc->sc_bstucktask); 1842 return; 1843 } 1844 if (sc->sc_bmisscount != 0) { 1845 DPRINTF(sc, ATH_DEBUG_BEACON, 1846 "%s: resume beacon xmit after %u misses\n", 1847 __func__, sc->sc_bmisscount); 1848 sc->sc_bmisscount = 0; 1849 } 1850 1851 /* 1852 * Update dynamic beacon contents. If this returns 1853 * non-zero then we need to remap the memory because 1854 * the beacon frame changed size (probably because 1855 * of the TIM bitmap). 1856 */ 1857 m = bf->bf_m; 1858 ncabq = ath_hal_numtxpending(ah, sc->sc_cabq->axq_qnum); 1859 if (ieee80211_beacon_update(ic, bf->bf_node, &sc->sc_boff, m, ncabq)) { 1860 /* XXX too conservative? */ 1861 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1862 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, 1863 ath_mbuf_load_cb, bf, 1864 BUS_DMA_NOWAIT); 1865 if (error != 0) { 1866 if_printf(ic->ic_ifp, 1867 "%s: bus_dmamap_load_mbuf failed, error %u\n", 1868 __func__, error); 1869 return; 1870 } 1871 } 1872 1873 /* 1874 * Handle slot time change when a non-ERP station joins/leaves 1875 * an 11g network. The 802.11 layer notifies us via callback, 1876 * we mark updateslot, then wait one beacon before effecting 1877 * the change. This gives associated stations at least one 1878 * beacon interval to note the state change. 1879 */ 1880 /* XXX locking */ 1881 if (sc->sc_updateslot == UPDATE) 1882 sc->sc_updateslot = COMMIT; /* commit next beacon */ 1883 else if (sc->sc_updateslot == COMMIT) 1884 ath_setslottime(sc); /* commit change to h/w */ 1885 1886 /* 1887 * Check recent per-antenna transmit statistics and flip 1888 * the default antenna if noticeably more frames went out 1889 * on the non-default antenna. 1890 * XXX assumes 2 anntenae 1891 */ 1892 otherant = sc->sc_defant & 1 ? 2 : 1; 1893 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 1894 ath_setdefantenna(sc, otherant); 1895 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 1896 1897 /* 1898 * Construct tx descriptor. 1899 */ 1900 ath_beacon_setup(sc, bf); 1901 1902 /* 1903 * Stop any current dma and put the new frame on the queue. 1904 * This should never fail since we check above that no frames 1905 * are still pending on the queue. 1906 */ 1907 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 1908 DPRINTF(sc, ATH_DEBUG_ANY, 1909 "%s: beacon queue %u did not stop?\n", 1910 __func__, sc->sc_bhalq); 1911 } 1912 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 1913 1914 /* 1915 * Enable the CAB queue before the beacon queue to 1916 * insure cab frames are triggered by this beacon. 1917 */ 1918 if (sc->sc_boff.bo_tim[4] & 1) /* NB: only at DTIM */ 1919 ath_hal_txstart(ah, sc->sc_cabq->axq_qnum); 1920 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 1921 ath_hal_txstart(ah, sc->sc_bhalq); 1922 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, 1923 "%s: TXDP[%u] = %p (%p)\n", __func__, 1924 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc); 1925 1926 sc->sc_stats.ast_be_xmit++; 1927} 1928 1929/* 1930 * Reset the hardware after detecting beacons have stopped. 1931 */ 1932static void 1933ath_bstuck_proc(void *arg, int pending) 1934{ 1935 struct ath_softc *sc = arg; 1936 struct ifnet *ifp = &sc->sc_if; 1937 1938 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 1939 sc->sc_bmisscount); 1940 ath_reset(ifp); 1941} 1942 1943/* 1944 * Reclaim beacon resources. 1945 */ 1946static void 1947ath_beacon_free(struct ath_softc *sc) 1948{ 1949 struct ath_buf *bf; 1950 1951 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) 1952 if (bf->bf_m != NULL) { 1953 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 1954 m_freem(bf->bf_m); 1955 bf->bf_m = NULL; 1956 bf->bf_node = NULL; 1957 } 1958} 1959 1960/* 1961 * Configure the beacon and sleep timers. 1962 * 1963 * When operating as an AP this resets the TSF and sets 1964 * up the hardware to notify us when we need to issue beacons. 1965 * 1966 * When operating in station mode this sets up the beacon 1967 * timers according to the timestamp of the last received 1968 * beacon and the current TSF, configures PCF and DTIM 1969 * handling, programs the sleep registers so the hardware 1970 * will wakeup in time to receive beacons, and configures 1971 * the beacon miss handling so we'll receive a BMISS 1972 * interrupt when we stop seeing beacons from the AP 1973 * we've associated with. 1974 */ 1975static void 1976ath_beacon_config(struct ath_softc *sc) 1977{ 1978 struct ath_hal *ah = sc->sc_ah; 1979 struct ieee80211com *ic = &sc->sc_ic; 1980 struct ieee80211_node *ni = ic->ic_bss; 1981 u_int32_t nexttbtt, intval; 1982 1983 nexttbtt = (LE_READ_4(ni->ni_tstamp.data + 4) << 22) | 1984 (LE_READ_4(ni->ni_tstamp.data) >> 10); 1985 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u\n", 1986 __func__, nexttbtt, ni->ni_intval); 1987 nexttbtt += ni->ni_intval; 1988 intval = ni->ni_intval & HAL_BEACON_PERIOD; 1989 if (ic->ic_opmode == IEEE80211_M_STA) { 1990 HAL_BEACON_STATE bs; 1991 u_int32_t bmisstime; 1992 1993 /* NB: no PCF support right now */ 1994 memset(&bs, 0, sizeof(bs)); 1995 /* 1996 * Reset our tsf so the hardware will update the 1997 * tsf register to reflect timestamps found in 1998 * received beacons. 1999 */ 2000 bs.bs_intval = intval | HAL_BEACON_RESET_TSF; 2001 bs.bs_nexttbtt = nexttbtt; 2002 bs.bs_dtimperiod = bs.bs_intval; 2003 bs.bs_nextdtim = nexttbtt; 2004 /* 2005 * The 802.11 layer records the offset to the DTIM 2006 * bitmap while receiving beacons; use it here to 2007 * enable h/w detection of our AID being marked in 2008 * the bitmap vector (to indicate frames for us are 2009 * pending at the AP). 2010 */ 2011 bs.bs_timoffset = ni->ni_timoff; 2012 /* 2013 * Calculate the number of consecutive beacons to miss 2014 * before taking a BMISS interrupt. The configuration 2015 * is specified in ms, so we need to convert that to 2016 * TU's and then calculate based on the beacon interval. 2017 * Note that we clamp the result to at most 10 beacons. 2018 */ 2019 bmisstime = (ic->ic_bmisstimeout * 1000) / 1024; 2020 bs.bs_bmissthreshold = howmany(bmisstime,ni->ni_intval); 2021 if (bs.bs_bmissthreshold > 10) 2022 bs.bs_bmissthreshold = 10; 2023 else if (bs.bs_bmissthreshold <= 0) 2024 bs.bs_bmissthreshold = 1; 2025 2026 /* 2027 * Calculate sleep duration. The configuration is 2028 * given in ms. We insure a multiple of the beacon 2029 * period is used. Also, if the sleep duration is 2030 * greater than the DTIM period then it makes senses 2031 * to make it a multiple of that. 2032 * 2033 * XXX fixed at 100ms 2034 */ 2035 bs.bs_sleepduration = 2036 roundup((100 * 1000) / 1024, bs.bs_intval); 2037 if (bs.bs_sleepduration > bs.bs_dtimperiod) 2038 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 2039 2040 DPRINTF(sc, ATH_DEBUG_BEACON, 2041 "%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 2042 , __func__ 2043 , bs.bs_intval 2044 , bs.bs_nexttbtt 2045 , bs.bs_dtimperiod 2046 , bs.bs_nextdtim 2047 , bs.bs_bmissthreshold 2048 , bs.bs_sleepduration 2049 , bs.bs_cfpperiod 2050 , bs.bs_cfpmaxduration 2051 , bs.bs_cfpnext 2052 , bs.bs_timoffset 2053 ); 2054 ath_hal_intrset(ah, 0); 2055 ath_hal_beacontimers(ah, &bs); 2056 sc->sc_imask |= HAL_INT_BMISS; 2057 ath_hal_intrset(ah, sc->sc_imask); 2058 } else { 2059 ath_hal_intrset(ah, 0); 2060 if (nexttbtt == ni->ni_intval) 2061 intval |= HAL_BEACON_RESET_TSF; 2062 if (ic->ic_opmode == IEEE80211_M_IBSS) { 2063 /* 2064 * In IBSS mode enable the beacon timers but only 2065 * enable SWBA interrupts if we need to manually 2066 * prepare beacon frames. Otherwise we use a 2067 * self-linked tx descriptor and let the hardware 2068 * deal with things. 2069 */ 2070 intval |= HAL_BEACON_ENA; 2071 if (!sc->sc_hasveol) 2072 sc->sc_imask |= HAL_INT_SWBA; 2073 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 2074 /* 2075 * In AP mode we enable the beacon timers and 2076 * SWBA interrupts to prepare beacon frames. 2077 */ 2078 intval |= HAL_BEACON_ENA; 2079 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 2080 } 2081 ath_hal_beaconinit(ah, nexttbtt, intval); 2082 sc->sc_bmisscount = 0; 2083 ath_hal_intrset(ah, sc->sc_imask); 2084 /* 2085 * When using a self-linked beacon descriptor in 2086 * ibss mode load it once here. 2087 */ 2088 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 2089 ath_beacon_proc(sc, 0); 2090 } 2091} 2092 2093static void 2094ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2095{ 2096 bus_addr_t *paddr = (bus_addr_t*) arg; 2097 KASSERT(error == 0, 2098 ("%s: error %u on bus_dma callback", __func__, error)); 2099 *paddr = segs->ds_addr; 2100} 2101 2102static int 2103ath_descdma_setup(struct ath_softc *sc, 2104 struct ath_descdma *dd, ath_bufhead *head, 2105 const char *name, int nbuf, int ndesc) 2106{ 2107#define DS2PHYS(_dd, _ds) \ 2108 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2109 struct ifnet *ifp = &sc->sc_if; 2110 struct ath_desc *ds; 2111 struct ath_buf *bf; 2112 int i, bsize, error; 2113 2114 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 2115 __func__, name, nbuf, ndesc); 2116 2117 dd->dd_name = name; 2118 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 2119 2120 /* 2121 * Setup DMA descriptor area. 2122 */ 2123 error = bus_dma_tag_create(NULL, /* parent */ 2124 PAGE_SIZE, 0, /* alignment, bounds */ 2125 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2126 BUS_SPACE_MAXADDR, /* highaddr */ 2127 NULL, NULL, /* filter, filterarg */ 2128 dd->dd_desc_len, /* maxsize */ 2129 1, /* nsegments */ 2130 BUS_SPACE_MAXADDR, /* maxsegsize */ 2131 BUS_DMA_ALLOCNOW, /* flags */ 2132 NULL, /* lockfunc */ 2133 NULL, /* lockarg */ 2134 &dd->dd_dmat); 2135 if (error != 0) { 2136 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2137 return error; 2138 } 2139 2140 /* allocate descriptors */ 2141 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2142 if (error != 0) { 2143 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2144 "error %u\n", dd->dd_name, error); 2145 goto fail0; 2146 } 2147 2148 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2149 BUS_DMA_NOWAIT, &dd->dd_dmamap); 2150 if (error != 0) { 2151 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2152 "error %u\n", nbuf * ndesc, dd->dd_name, error); 2153 goto fail1; 2154 } 2155 2156 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2157 dd->dd_desc, dd->dd_desc_len, 2158 ath_load_cb, &dd->dd_desc_paddr, 2159 BUS_DMA_NOWAIT); 2160 if (error != 0) { 2161 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2162 dd->dd_name, error); 2163 goto fail2; 2164 } 2165 2166 ds = dd->dd_desc; 2167 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2168 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 2169 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 2170 2171 /* allocate rx buffers */ 2172 bsize = sizeof(struct ath_buf) * nbuf; 2173 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2174 if (bf == NULL) { 2175 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2176 dd->dd_name, bsize); 2177 goto fail3; 2178 } 2179 dd->dd_bufptr = bf; 2180 2181 STAILQ_INIT(head); 2182 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 2183 bf->bf_desc = ds; 2184 bf->bf_daddr = DS2PHYS(dd, ds); 2185 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2186 &bf->bf_dmamap); 2187 if (error != 0) { 2188 if_printf(ifp, "unable to create dmamap for %s " 2189 "buffer %u, error %u\n", dd->dd_name, i, error); 2190 ath_descdma_cleanup(sc, dd, head); 2191 return error; 2192 } 2193 STAILQ_INSERT_TAIL(head, bf, bf_list); 2194 } 2195 return 0; 2196fail3: 2197 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2198fail2: 2199 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2200fail1: 2201 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2202fail0: 2203 bus_dma_tag_destroy(dd->dd_dmat); 2204 memset(dd, 0, sizeof(*dd)); 2205 return error; 2206#undef DS2PHYS 2207} 2208 2209static void 2210ath_descdma_cleanup(struct ath_softc *sc, 2211 struct ath_descdma *dd, ath_bufhead *head) 2212{ 2213 struct ath_buf *bf; 2214 struct ieee80211_node *ni; 2215 2216 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2217 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2218 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2219 bus_dma_tag_destroy(dd->dd_dmat); 2220 2221 STAILQ_FOREACH(bf, head, bf_list) { 2222 if (bf->bf_m) { 2223 m_freem(bf->bf_m); 2224 bf->bf_m = NULL; 2225 } 2226 if (bf->bf_dmamap != NULL) { 2227 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2228 bf->bf_dmamap = NULL; 2229 } 2230 ni = bf->bf_node; 2231 bf->bf_node = NULL; 2232 if (ni != NULL) { 2233 /* 2234 * Reclaim node reference. 2235 */ 2236 ieee80211_free_node(ni); 2237 } 2238 } 2239 2240 STAILQ_INIT(head); 2241 free(dd->dd_bufptr, M_ATHDEV); 2242 memset(dd, 0, sizeof(*dd)); 2243} 2244 2245static int 2246ath_desc_alloc(struct ath_softc *sc) 2247{ 2248 int error; 2249 2250 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 2251 "rx", ATH_RXBUF, 1); 2252 if (error != 0) 2253 return error; 2254 2255 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2256 "tx", ATH_TXBUF, ATH_TXDESC); 2257 if (error != 0) { 2258 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2259 return error; 2260 } 2261 2262 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 2263 "beacon", 1, 1); 2264 if (error != 0) { 2265 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2266 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2267 return error; 2268 } 2269 return 0; 2270} 2271 2272static void 2273ath_desc_free(struct ath_softc *sc) 2274{ 2275 2276 if (sc->sc_bdma.dd_desc_len != 0) 2277 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 2278 if (sc->sc_txdma.dd_desc_len != 0) 2279 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2280 if (sc->sc_rxdma.dd_desc_len != 0) 2281 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2282} 2283 2284static struct ieee80211_node * 2285ath_node_alloc(struct ieee80211_node_table *nt) 2286{ 2287 struct ieee80211com *ic = nt->nt_ic; 2288 struct ath_softc *sc = ic->ic_ifp->if_softc; 2289 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 2290 struct ath_node *an; 2291 2292 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 2293 if (an == NULL) { 2294 /* XXX stat+msg */ 2295 return NULL; 2296 } 2297 an->an_avgrssi = ATH_RSSI_DUMMY_MARKER; 2298 an->an_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 2299 an->an_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 2300 an->an_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 2301 ath_rate_node_init(sc, an); 2302 2303 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 2304 return &an->an_node; 2305} 2306 2307/* 2308 * Clear any references to a node in a transmit queue. 2309 * This happens when the node is cleaned so we don't 2310 * need to worry about the reference count going to zero; 2311 * we just reclaim the reference w/o dropping the txq lock. 2312 * Then we null the pointer and the right thing happens 2313 * when the buffer is cleaned in ath_tx_processq. 2314 */ 2315static void 2316ath_tx_cleanq(struct ieee80211com *ic, struct ath_txq *txq, 2317 struct ieee80211_node *ni) 2318{ 2319 struct ath_buf *bf; 2320 2321 ATH_TXQ_LOCK(txq); 2322 STAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 2323 if (bf->bf_node == ni) { 2324 /* NB: this clears the pointer too */ 2325 ieee80211_unref_node(&bf->bf_node); 2326 } 2327 } 2328 ATH_TXQ_UNLOCK(txq); 2329} 2330 2331static void 2332ath_node_free(struct ieee80211_node *ni) 2333{ 2334 struct ieee80211com *ic = ni->ni_ic; 2335 struct ath_softc *sc = ic->ic_ifp->if_softc; 2336 int i; 2337 2338 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 2339 2340 /* XXX can this happen since refcnt must be zero for us to be called? */ 2341 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 2342 if (ATH_TXQ_SETUP(sc, i)) 2343 ath_tx_cleanq(ic, &sc->sc_txq[i], ni); 2344 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 2345 sc->sc_node_free(ni); 2346} 2347 2348static u_int8_t 2349ath_node_getrssi(const struct ieee80211_node *ni) 2350{ 2351#define HAL_EP_RND(x, mul) \ 2352 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 2353 u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi; 2354 int32_t rssi; 2355 2356 /* 2357 * When only one frame is received there will be no state in 2358 * avgrssi so fallback on the value recorded by the 802.11 layer. 2359 */ 2360 if (avgrssi != ATH_RSSI_DUMMY_MARKER) 2361 rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER); 2362 else 2363 rssi = ni->ni_rssi; 2364 /* NB: theoretically we shouldn't need this, but be paranoid */ 2365 return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; 2366#undef HAL_EP_RND 2367} 2368 2369static int 2370ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 2371{ 2372 struct ath_hal *ah = sc->sc_ah; 2373 int error; 2374 struct mbuf *m; 2375 struct ath_desc *ds; 2376 2377 m = bf->bf_m; 2378 if (m == NULL) { 2379 /* 2380 * NB: by assigning a page to the rx dma buffer we 2381 * implicitly satisfy the Atheros requirement that 2382 * this buffer be cache-line-aligned and sized to be 2383 * multiple of the cache line size. Not doing this 2384 * causes weird stuff to happen (for the 5210 at least). 2385 */ 2386 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2387 if (m == NULL) { 2388 DPRINTF(sc, ATH_DEBUG_ANY, 2389 "%s: no mbuf/cluster\n", __func__); 2390 sc->sc_stats.ast_rx_nombuf++; 2391 return ENOMEM; 2392 } 2393 KASSERT(m->m_next == NULL, ("m_next %p (1)", m->m_next)); 2394 bf->bf_m = m; 2395 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 2396 2397 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2398 bf->bf_dmamap, m, 2399 ath_mbuf_load_cb, bf, 2400 BUS_DMA_NOWAIT); 2401 if (error != 0) { 2402 DPRINTF(sc, ATH_DEBUG_ANY, 2403 "%s: bus_dmamap_load_mbuf failed; error %d\n", 2404 __func__, error); 2405 sc->sc_stats.ast_rx_busdma++; 2406 return error; 2407 } 2408 KASSERT(bf->bf_nseg == 1, ("%s: multi-segment packet; nseg %u", 2409 __func__, bf->bf_nseg)); 2410 } 2411 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 2412 2413 /* 2414 * Setup descriptors. For receive we always terminate 2415 * the descriptor list with a self-linked entry so we'll 2416 * not get overrun under high load (as can happen with a 2417 * 5212 when ANI processing enables PHY error frames). 2418 * 2419 * To insure the last descriptor is self-linked we create 2420 * each descriptor as self-linked and add it to the end. As 2421 * each additional descriptor is added the previous self-linked 2422 * entry is ``fixed'' naturally. This should be safe even 2423 * if DMA is happening. When processing RX interrupts we 2424 * never remove/process the last, self-linked, entry on the 2425 * descriptor list. This insures the hardware always has 2426 * someplace to write a new frame. 2427 */ 2428 ds = bf->bf_desc; 2429 ds->ds_link = bf->bf_daddr; /* link to self */ 2430 ds->ds_data = bf->bf_segs[0].ds_addr; 2431 ath_hal_setuprxdesc(ah, ds 2432 , m->m_len /* buffer size */ 2433 , 0 2434 ); 2435 2436 if (sc->sc_rxlink != NULL) 2437 *sc->sc_rxlink = bf->bf_daddr; 2438 sc->sc_rxlink = &ds->ds_link; 2439 return 0; 2440} 2441 2442/* 2443 * Intercept management frames to collect beacon rssi data 2444 * and to do ibss merges. 2445 */ 2446static void 2447ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2448 struct ieee80211_node *ni, 2449 int subtype, int rssi, u_int32_t rstamp) 2450{ 2451 struct ath_softc *sc = ic->ic_ifp->if_softc; 2452 2453 /* 2454 * Call up first so subsequent work can use information 2455 * potentially stored in the node (e.g. for ibss merge). 2456 */ 2457 sc->sc_recv_mgmt(ic, m, ni, subtype, rssi, rstamp); 2458 switch (subtype) { 2459 case IEEE80211_FC0_SUBTYPE_BEACON: 2460 /* update rssi statistics for use by the hal */ 2461 ATH_RSSI_LPF(ATH_NODE(ni)->an_halstats.ns_avgbrssi, rssi); 2462 /* fall thru... */ 2463 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2464 if (ic->ic_opmode == IEEE80211_M_IBSS && 2465 ic->ic_state == IEEE80211_S_RUN) { 2466 struct ath_hal *ah = sc->sc_ah; 2467 /* XXX extend rstamp */ 2468 u_int64_t tsf = ath_hal_gettsf64(ah); 2469 2470 /* 2471 * Handle ibss merge as needed; check the tsf on the 2472 * frame before attempting the merge. The 802.11 spec 2473 * says the station should change it's bssid to match 2474 * the oldest station with the same ssid, where oldest 2475 * is determined by the tsf. 2476 */ 2477 if (le64toh(ni->ni_tstamp.tsf) >= tsf && 2478 ieee80211_ibss_merge(ic, ni)) 2479 ath_hal_setassocid(ah, ic->ic_bss->ni_bssid, 0); 2480 } 2481 break; 2482 } 2483} 2484 2485/* 2486 * Set the default antenna. 2487 */ 2488static void 2489ath_setdefantenna(struct ath_softc *sc, u_int antenna) 2490{ 2491 struct ath_hal *ah = sc->sc_ah; 2492 2493 /* XXX block beacon interrupts */ 2494 ath_hal_setdefantenna(ah, antenna); 2495 if (sc->sc_defant != antenna) 2496 sc->sc_stats.ast_ant_defswitch++; 2497 sc->sc_defant = antenna; 2498 sc->sc_rxotherant = 0; 2499} 2500 2501static void 2502ath_rx_proc(void *arg, int npending) 2503{ 2504#define PA2DESC(_sc, _pa) \ 2505 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 2506 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 2507 struct ath_softc *sc = arg; 2508 struct ath_buf *bf; 2509 struct ieee80211com *ic = &sc->sc_ic; 2510 struct ifnet *ifp = &sc->sc_if; 2511 struct ath_hal *ah = sc->sc_ah; 2512 struct ath_desc *ds; 2513 struct mbuf *m; 2514 struct ieee80211_node *ni; 2515 struct ath_node *an; 2516 int len; 2517 u_int phyerr; 2518 HAL_STATUS status; 2519 2520 NET_LOCK_GIANT(); /* XXX */ 2521 2522 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 2523 do { 2524 bf = STAILQ_FIRST(&sc->sc_rxbuf); 2525 if (bf == NULL) { /* NB: shouldn't happen */ 2526 if_printf(ifp, "%s: no buffer!\n", __func__); 2527 break; 2528 } 2529 ds = bf->bf_desc; 2530 if (ds->ds_link == bf->bf_daddr) { 2531 /* NB: never process the self-linked entry at the end */ 2532 break; 2533 } 2534 m = bf->bf_m; 2535 if (m == NULL) { /* NB: shouldn't happen */ 2536 if_printf(ifp, "%s: no mbuf!\n", __func__); 2537 continue; 2538 } 2539 /* XXX sync descriptor memory */ 2540 /* 2541 * Must provide the virtual address of the current 2542 * descriptor, the physical address, and the virtual 2543 * address of the next descriptor in the h/w chain. 2544 * This allows the HAL to look ahead to see if the 2545 * hardware is done with a descriptor by checking the 2546 * done bit in the following descriptor and the address 2547 * of the current descriptor the DMA engine is working 2548 * on. All this is necessary because of our use of 2549 * a self-linked list to avoid rx overruns. 2550 */ 2551 status = ath_hal_rxprocdesc(ah, ds, 2552 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 2553#ifdef AR_DEBUG 2554 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 2555 ath_printrxbuf(bf, status == HAL_OK); 2556#endif 2557 if (status == HAL_EINPROGRESS) 2558 break; 2559 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 2560 if (ds->ds_rxstat.rs_more) { 2561 /* 2562 * Frame spans multiple descriptors; this 2563 * cannot happen yet as we don't support 2564 * jumbograms. If not in monitor mode, 2565 * discard the frame. 2566 */ 2567 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2568 sc->sc_stats.ast_rx_toobig++; 2569 goto rx_next; 2570 } 2571 /* fall thru for monitor mode handling... */ 2572 } else if (ds->ds_rxstat.rs_status != 0) { 2573 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 2574 sc->sc_stats.ast_rx_crcerr++; 2575 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 2576 sc->sc_stats.ast_rx_fifoerr++; 2577 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 2578 sc->sc_stats.ast_rx_phyerr++; 2579 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 2580 sc->sc_stats.ast_rx_phy[phyerr]++; 2581 goto rx_next; 2582 } 2583 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) { 2584 /* 2585 * Decrypt error. If the error occurred 2586 * because there was no hardware key, then 2587 * let the frame through so the upper layers 2588 * can process it. This is necessary for 5210 2589 * parts which have no way to setup a ``clear'' 2590 * key cache entry. 2591 * 2592 * XXX do key cache faulting 2593 */ 2594 if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID) 2595 goto rx_accept; 2596 sc->sc_stats.ast_rx_badcrypt++; 2597 } 2598 if (ds->ds_rxstat.rs_status & HAL_RXERR_MIC) { 2599 sc->sc_stats.ast_rx_badmic++; 2600 /* 2601 * Do minimal work required to hand off 2602 * the 802.11 header for notifcation. 2603 */ 2604 /* XXX frag's and qos frames */ 2605 len = ds->ds_rxstat.rs_datalen; 2606 if (len >= sizeof (struct ieee80211_frame)) { 2607 bus_dmamap_sync(sc->sc_dmat, 2608 bf->bf_dmamap, 2609 BUS_DMASYNC_POSTREAD); 2610 ieee80211_notify_michael_failure(ic, 2611 mtod(m, struct ieee80211_frame *), 2612 ds->ds_rxstat.rs_keyix); 2613 } 2614 } 2615 ifp->if_ierrors++; 2616 /* 2617 * Reject error frames, we normally don't want 2618 * to see them in monitor mode (in monitor mode 2619 * allow through packets that have crypto problems). 2620 */ 2621 if ((ds->ds_rxstat.rs_status &~ 2622 (HAL_RXERR_DECRYPT|HAL_RXERR_MIC)) || 2623 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 2624 goto rx_next; 2625 } 2626rx_accept: 2627 KASSERT(m->m_next == NULL, ("m_next %p (3)", m->m_next)); 2628 /* 2629 * Sync and unmap the frame. At this point we're 2630 * committed to passing the mbuf somewhere so clear 2631 * bf_m; this means a new sk_buff must be allocated 2632 * when the rx descriptor is setup again to receive 2633 * another frame. 2634 */ 2635 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2636 BUS_DMASYNC_POSTREAD); 2637 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2638 bf->bf_m = NULL; 2639 2640 m->m_pkthdr.rcvif = ifp; 2641 len = ds->ds_rxstat.rs_datalen; 2642 m->m_pkthdr.len = m->m_len = len; 2643 2644 if (sc->sc_softled) 2645 ath_update_led(sc); 2646 sc->sc_stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++; 2647 2648 if (sc->sc_drvbpf) { 2649 /* 2650 * Discard anything shorter than an ack or cts. 2651 */ 2652 if (len < IEEE80211_ACK_LEN) { 2653 DPRINTF(sc, ATH_DEBUG_RECV, 2654 "%s: runt packet %d\n", 2655 __func__, len); 2656 sc->sc_stats.ast_rx_tooshort++; 2657 m_freem(m); 2658 goto rx_next; 2659 } 2660 sc->sc_rx_th.wr_rate = 2661 sc->sc_hwmap[ds->ds_rxstat.rs_rate]; 2662 sc->sc_rx_th.wr_antsignal = ds->ds_rxstat.rs_rssi; 2663 sc->sc_rx_th.wr_antenna = ds->ds_rxstat.rs_antenna; 2664 /* XXX TSF */ 2665 2666 bpf_mtap2(sc->sc_drvbpf, 2667 &sc->sc_rx_th, sc->sc_rx_th_len, m); 2668 } 2669 2670 /* 2671 * From this point on we assume the frame is at least 2672 * as large as ieee80211_frame_min; verify that. 2673 */ 2674 if (len < IEEE80211_MIN_LEN) { 2675 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", 2676 __func__, len); 2677 sc->sc_stats.ast_rx_tooshort++; 2678 m_freem(m); 2679 goto rx_next; 2680 } 2681 2682 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 2683 ieee80211_dump_pkt(mtod(m, caddr_t), len, 2684 sc->sc_hwmap[ds->ds_rxstat.rs_rate], 2685 ds->ds_rxstat.rs_rssi); 2686 } 2687 2688 m_adj(m, -IEEE80211_CRC_LEN); 2689 2690 /* 2691 * Locate the node for sender, track state, and then 2692 * pass the (referenced) node up to the 802.11 layer 2693 * for its use. 2694 */ 2695 ni = ieee80211_find_rxnode(ic, 2696 mtod(m, const struct ieee80211_frame_min *)); 2697 2698 /* 2699 * Track rx rssi and do any rx antenna management. 2700 */ 2701 an = ATH_NODE(ni); 2702 ATH_RSSI_LPF(an->an_avgrssi, ds->ds_rxstat.rs_rssi); 2703 if (sc->sc_diversity) { 2704 /* 2705 * When using fast diversity, change the default rx 2706 * antenna if diversity chooses the other antenna 3 2707 * times in a row. 2708 */ 2709 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { 2710 if (++sc->sc_rxotherant >= 3) 2711 ath_setdefantenna(sc, 2712 ds->ds_rxstat.rs_antenna); 2713 } else 2714 sc->sc_rxotherant = 0; 2715 } 2716 2717 /* 2718 * Send frame up for processing. 2719 */ 2720 ieee80211_input(ic, m, ni, 2721 ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_tstamp); 2722 2723 /* 2724 * Reclaim node reference. 2725 */ 2726 ieee80211_free_node(ni); 2727rx_next: 2728 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2729 } while (ath_rxbuf_init(sc, bf) == 0); 2730 2731 /* rx signal state monitoring */ 2732 ath_hal_rxmonitor(ah, &ATH_NODE(ic->ic_bss)->an_halstats); 2733 2734 NET_UNLOCK_GIANT(); /* XXX */ 2735#undef PA2DESC 2736} 2737 2738/* 2739 * Setup a h/w transmit queue. 2740 */ 2741static struct ath_txq * 2742ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 2743{ 2744#define N(a) (sizeof(a)/sizeof(a[0])) 2745 struct ath_hal *ah = sc->sc_ah; 2746 HAL_TXQ_INFO qi; 2747 int qnum; 2748 2749 memset(&qi, 0, sizeof(qi)); 2750 qi.tqi_subtype = subtype; 2751 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 2752 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 2753 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 2754 /* 2755 * Enable interrupts only for EOL and DESC conditions. 2756 * We mark tx descriptors to receive a DESC interrupt 2757 * when a tx queue gets deep; otherwise waiting for the 2758 * EOL to reap descriptors. Note that this is done to 2759 * reduce interrupt load and this only defers reaping 2760 * descriptors, never transmitting frames. Aside from 2761 * reducing interrupts this also permits more concurrency. 2762 * The only potential downside is if the tx queue backs 2763 * up in which case the top half of the kernel may backup 2764 * due to a lack of tx descriptors. 2765 */ 2766 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE; 2767 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 2768 if (qnum == -1) { 2769 /* 2770 * NB: don't print a message, this happens 2771 * ormally on parts with too few tx queues 2772 */ 2773 return NULL; 2774 } 2775 if (qnum >= N(sc->sc_txq)) { 2776 device_printf(sc->sc_dev, 2777 "hal qnum %u out of range, max %zu!\n", 2778 qnum, N(sc->sc_txq)); 2779 ath_hal_releasetxqueue(ah, qnum); 2780 return NULL; 2781 } 2782 if (!ATH_TXQ_SETUP(sc, qnum)) { 2783 struct ath_txq *txq = &sc->sc_txq[qnum]; 2784 2785 txq->axq_qnum = qnum; 2786 txq->axq_depth = 0; 2787 txq->axq_intrcnt = 0; 2788 txq->axq_link = NULL; 2789 STAILQ_INIT(&txq->axq_q); 2790 ATH_TXQ_LOCK_INIT(sc, txq); 2791 sc->sc_txqsetup |= 1<<qnum; 2792 } 2793 return &sc->sc_txq[qnum]; 2794#undef N 2795} 2796 2797/* 2798 * Setup a hardware data transmit queue for the specified 2799 * access control. The hal may not support all requested 2800 * queues in which case it will return a reference to a 2801 * previously setup queue. We record the mapping from ac's 2802 * to h/w queues for use by ath_tx_start and also track 2803 * the set of h/w queues being used to optimize work in the 2804 * transmit interrupt handler and related routines. 2805 */ 2806static int 2807ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 2808{ 2809#define N(a) (sizeof(a)/sizeof(a[0])) 2810 struct ath_txq *txq; 2811 2812 if (ac >= N(sc->sc_ac2q)) { 2813 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 2814 ac, N(sc->sc_ac2q)); 2815 return 0; 2816 } 2817 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 2818 if (txq != NULL) { 2819 sc->sc_ac2q[ac] = txq; 2820 return 1; 2821 } else 2822 return 0; 2823#undef N 2824} 2825 2826/* 2827 * Update WME parameters for a transmit queue. 2828 */ 2829static int 2830ath_txq_update(struct ath_softc *sc, int ac) 2831{ 2832#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 2833#define ATH_TXOP_TO_US(v) (v<<5) 2834 struct ieee80211com *ic = &sc->sc_ic; 2835 struct ath_txq *txq = sc->sc_ac2q[ac]; 2836 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 2837 struct ath_hal *ah = sc->sc_ah; 2838 HAL_TXQ_INFO qi; 2839 2840 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 2841 qi.tqi_aifs = wmep->wmep_aifsn; 2842 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 2843 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 2844 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 2845 2846 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 2847 device_printf(sc->sc_dev, "unable to update hardware queue " 2848 "parameters for %s traffic!\n", 2849 ieee80211_wme_acnames[ac]); 2850 return 0; 2851 } else { 2852 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 2853 return 1; 2854 } 2855#undef ATH_TXOP_TO_US 2856#undef ATH_EXPONENT_TO_VALUE 2857} 2858 2859/* 2860 * Callback from the 802.11 layer to update WME parameters. 2861 */ 2862static int 2863ath_wme_update(struct ieee80211com *ic) 2864{ 2865 struct ath_softc *sc = ic->ic_ifp->if_softc; 2866 2867 return !ath_txq_update(sc, WME_AC_BE) || 2868 !ath_txq_update(sc, WME_AC_BK) || 2869 !ath_txq_update(sc, WME_AC_VI) || 2870 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 2871} 2872 2873/* 2874 * Reclaim resources for a setup queue. 2875 */ 2876static void 2877ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 2878{ 2879 2880 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 2881 ATH_TXQ_LOCK_DESTROY(txq); 2882 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 2883} 2884 2885/* 2886 * Reclaim all tx queue resources. 2887 */ 2888static void 2889ath_tx_cleanup(struct ath_softc *sc) 2890{ 2891 int i; 2892 2893 ATH_TXBUF_LOCK_DESTROY(sc); 2894 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 2895 if (ATH_TXQ_SETUP(sc, i)) 2896 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 2897} 2898 2899static int 2900ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, 2901 struct mbuf *m0) 2902{ 2903 struct ieee80211com *ic = &sc->sc_ic; 2904 struct ath_hal *ah = sc->sc_ah; 2905 struct ifnet *ifp = &sc->sc_if; 2906 int i, error, iswep, ismcast, keyix, hdrlen, pktlen, try0; 2907 u_int8_t rix, txrate, ctsrate; 2908 u_int8_t cix = 0xff; /* NB: silence compiler */ 2909 struct ath_desc *ds, *ds0; 2910 struct ath_txq *txq; 2911 struct mbuf *m; 2912 struct ieee80211_frame *wh; 2913 u_int subtype, flags, ctsduration; 2914 HAL_PKT_TYPE atype; 2915 const HAL_RATE_TABLE *rt; 2916 HAL_BOOL shortPreamble; 2917 struct ath_node *an; 2918 2919 wh = mtod(m0, struct ieee80211_frame *); 2920 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 2921 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2922 hdrlen = ieee80211_anyhdrsize(wh); 2923 /* 2924 * Packet length must not include by any 2925 * pad bytes; deduct it here. 2926 */ 2927 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 2928 2929 if (iswep) { 2930 const struct ieee80211_cipher *cip; 2931 struct ieee80211_key *k; 2932 2933 /* 2934 * Construct the 802.11 header+trailer for an encrypted 2935 * frame. The only reason this can fail is because of an 2936 * unknown or unsupported cipher/key type. 2937 */ 2938 k = ieee80211_crypto_encap(ic, ni, m0); 2939 if (k == NULL) { 2940 /* 2941 * This can happen when the key is yanked after the 2942 * frame was queued. Just discard the frame; the 2943 * 802.11 layer counts failures and provides 2944 * debugging/diagnostics. 2945 */ 2946 return EIO; 2947 } 2948 /* 2949 * Adjust the packet + header lengths for the crypto 2950 * additions and calculate the h/w key index. When 2951 * a s/w mic is done the frame will have had any mic 2952 * added to it prior to entry so skb->len above will 2953 * account for it. Otherwise we need to add it to the 2954 * packet length. 2955 */ 2956 cip = k->wk_cipher; 2957 hdrlen += cip->ic_header; 2958 pktlen += cip->ic_header + cip->ic_trailer; 2959 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) 2960 pktlen += cip->ic_miclen; 2961 keyix = k->wk_keyix; 2962 2963 /* packet header may have moved, reset our local pointer */ 2964 wh = mtod(m0, struct ieee80211_frame *); 2965 } else 2966 keyix = HAL_TXKEYIX_INVALID; 2967 2968 pktlen += IEEE80211_CRC_LEN; 2969 2970 /* 2971 * Load the DMA map so any coalescing is done. This 2972 * also calculates the number of descriptors we need. 2973 */ 2974 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 2975 ath_mbuf_load_cb, bf, 2976 BUS_DMA_NOWAIT); 2977 if (error == EFBIG) { 2978 /* XXX packet requires too many descriptors */ 2979 bf->bf_nseg = ATH_TXDESC+1; 2980 } else if (error != 0) { 2981 sc->sc_stats.ast_tx_busdma++; 2982 m_freem(m0); 2983 return error; 2984 } 2985 /* 2986 * Discard null packets and check for packets that 2987 * require too many TX descriptors. We try to convert 2988 * the latter to a cluster. 2989 */ 2990 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 2991 sc->sc_stats.ast_tx_linear++; 2992 MGETHDR(m, M_DONTWAIT, MT_DATA); 2993 if (m == NULL) { 2994 sc->sc_stats.ast_tx_nombuf++; 2995 m_freem(m0); 2996 return ENOMEM; 2997 } 2998 M_MOVE_PKTHDR(m, m0); 2999 MCLGET(m, M_DONTWAIT); 3000 if ((m->m_flags & M_EXT) == 0) { 3001 sc->sc_stats.ast_tx_nomcl++; 3002 m_freem(m0); 3003 m_free(m); 3004 return ENOMEM; 3005 } 3006 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 3007 m_freem(m0); 3008 m->m_len = m->m_pkthdr.len; 3009 m0 = m; 3010 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, 3011 ath_mbuf_load_cb, bf, 3012 BUS_DMA_NOWAIT); 3013 if (error != 0) { 3014 sc->sc_stats.ast_tx_busdma++; 3015 m_freem(m0); 3016 return error; 3017 } 3018 KASSERT(bf->bf_nseg == 1, 3019 ("ath_tx_start: packet not one segment; nseg %u", 3020 bf->bf_nseg)); 3021 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 3022 sc->sc_stats.ast_tx_nodata++; 3023 m_freem(m0); 3024 return EIO; 3025 } 3026 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, pktlen); 3027 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3028 bf->bf_m = m0; 3029 bf->bf_node = ni; /* NB: held reference */ 3030 3031 /* setup descriptors */ 3032 ds = bf->bf_desc; 3033 rt = sc->sc_currates; 3034 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 3035 3036 /* 3037 * NB: the 802.11 layer marks whether or not we should 3038 * use short preamble based on the current mode and 3039 * negotiated parameters. 3040 */ 3041 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 3042 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 3043 shortPreamble = AH_TRUE; 3044 sc->sc_stats.ast_tx_shortpre++; 3045 } else { 3046 shortPreamble = AH_FALSE; 3047 } 3048 3049 an = ATH_NODE(ni); 3050 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 3051 /* 3052 * Calculate Atheros packet type from IEEE80211 packet header, 3053 * setup for rate calculations, and select h/w transmit queue. 3054 */ 3055 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 3056 case IEEE80211_FC0_TYPE_MGT: 3057 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3058 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 3059 atype = HAL_PKT_TYPE_BEACON; 3060 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3061 atype = HAL_PKT_TYPE_PROBE_RESP; 3062 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 3063 atype = HAL_PKT_TYPE_ATIM; 3064 else 3065 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 3066 rix = 0; /* XXX lowest rate */ 3067 try0 = ATH_TXMAXTRY; 3068 if (shortPreamble) 3069 txrate = an->an_tx_mgtratesp; 3070 else 3071 txrate = an->an_tx_mgtrate; 3072 /* NB: force all management frames to highest queue */ 3073 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3074 /* NB: force all management frames to highest queue */ 3075 txq = sc->sc_ac2q[WME_AC_VO]; 3076 } else 3077 txq = sc->sc_ac2q[WME_AC_BE]; 3078 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3079 break; 3080 case IEEE80211_FC0_TYPE_CTL: 3081 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 3082 rix = 0; /* XXX lowest rate */ 3083 try0 = ATH_TXMAXTRY; 3084 if (shortPreamble) 3085 txrate = an->an_tx_mgtratesp; 3086 else 3087 txrate = an->an_tx_mgtrate; 3088 /* NB: force all ctl frames to highest queue */ 3089 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3090 /* NB: force all ctl frames to highest queue */ 3091 txq = sc->sc_ac2q[WME_AC_VO]; 3092 } else 3093 txq = sc->sc_ac2q[WME_AC_BE]; 3094 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3095 break; 3096 case IEEE80211_FC0_TYPE_DATA: 3097 atype = HAL_PKT_TYPE_NORMAL; /* default */ 3098 /* 3099 * Data frames; consult the rate control module. 3100 */ 3101 ath_rate_findrate(sc, an, shortPreamble, pktlen, 3102 &rix, &try0, &txrate); 3103 /* 3104 * Default all non-QoS traffic to the background queue. 3105 */ 3106 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { 3107 u_int pri = M_WME_GETAC(m0); 3108 txq = sc->sc_ac2q[pri]; 3109 if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[pri].wmep_noackPolicy) 3110 flags |= HAL_TXDESC_NOACK; 3111 } else 3112 txq = sc->sc_ac2q[WME_AC_BE]; 3113 break; 3114 default: 3115 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 3116 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 3117 /* XXX statistic */ 3118 m_freem(m0); 3119 return EIO; 3120 } 3121 3122 /* 3123 * When servicing one or more stations in power-save mode 3124 * multicast frames must be buffered until after the beacon. 3125 * We use the CAB queue for that. 3126 */ 3127 if (ismcast && ic->ic_ps_sta) { 3128 txq = sc->sc_cabq; 3129 /* XXX? more bit in 802.11 frame header */ 3130 } 3131 3132 /* 3133 * Calculate miscellaneous flags. 3134 */ 3135 if (ismcast) { 3136 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 3137 sc->sc_stats.ast_tx_noack++; 3138 } else if (pktlen > ic->ic_rtsthreshold) { 3139 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 3140 cix = rt->info[rix].controlRate; 3141 sc->sc_stats.ast_tx_rts++; 3142 } 3143 3144 /* 3145 * If 802.11g protection is enabled, determine whether 3146 * to use RTS/CTS or just CTS. Note that this is only 3147 * done for OFDM unicast frames. 3148 */ 3149 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3150 rt->info[rix].phy == IEEE80211_T_OFDM && 3151 (flags & HAL_TXDESC_NOACK) == 0) { 3152 /* XXX fragments must use CCK rates w/ protection */ 3153 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3154 flags |= HAL_TXDESC_RTSENA; 3155 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3156 flags |= HAL_TXDESC_CTSENA; 3157 cix = rt->info[sc->sc_protrix].controlRate; 3158 sc->sc_stats.ast_tx_protect++; 3159 } 3160 3161 /* 3162 * Calculate duration. This logically belongs in the 802.11 3163 * layer but it lacks sufficient information to calculate it. 3164 */ 3165 if ((flags & HAL_TXDESC_NOACK) == 0 && 3166 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 3167 u_int16_t dur; 3168 /* 3169 * XXX not right with fragmentation. 3170 */ 3171 if (shortPreamble) 3172 dur = rt->info[rix].spAckDuration; 3173 else 3174 dur = rt->info[rix].lpAckDuration; 3175 *(u_int16_t *)wh->i_dur = htole16(dur); 3176 } 3177 3178 /* 3179 * Calculate RTS/CTS rate and duration if needed. 3180 */ 3181 ctsduration = 0; 3182 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 3183 /* 3184 * CTS transmit rate is derived from the transmit rate 3185 * by looking in the h/w rate table. We must also factor 3186 * in whether or not a short preamble is to be used. 3187 */ 3188 /* NB: cix is set above where RTS/CTS is enabled */ 3189 KASSERT(cix != 0xff, ("cix not setup")); 3190 ctsrate = rt->info[cix].rateCode; 3191 /* 3192 * Compute the transmit duration based on the frame 3193 * size and the size of an ACK frame. We call into the 3194 * HAL to do the computation since it depends on the 3195 * characteristics of the actual PHY being used. 3196 * 3197 * NB: CTS is assumed the same size as an ACK so we can 3198 * use the precalculated ACK durations. 3199 */ 3200 if (shortPreamble) { 3201 ctsrate |= rt->info[cix].shortPreamble; 3202 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3203 ctsduration += rt->info[cix].spAckDuration; 3204 ctsduration += ath_hal_computetxtime(ah, 3205 rt, pktlen, rix, AH_TRUE); 3206 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3207 ctsduration += rt->info[cix].spAckDuration; 3208 } else { 3209 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3210 ctsduration += rt->info[cix].lpAckDuration; 3211 ctsduration += ath_hal_computetxtime(ah, 3212 rt, pktlen, rix, AH_FALSE); 3213 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3214 ctsduration += rt->info[cix].lpAckDuration; 3215 } 3216 /* 3217 * Must disable multi-rate retry when using RTS/CTS. 3218 */ 3219 try0 = ATH_TXMAXTRY; 3220 } else 3221 ctsrate = 0; 3222 3223 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3224 ieee80211_dump_pkt(mtod(m0, caddr_t), m0->m_len, 3225 sc->sc_hwmap[txrate], -1); 3226 3227 if (ic->ic_rawbpf) 3228 bpf_mtap(ic->ic_rawbpf, m0); 3229 if (sc->sc_drvbpf) { 3230 sc->sc_tx_th.wt_flags = 0; 3231 if (shortPreamble) 3232 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3233 if (iswep) 3234 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3235 sc->sc_tx_th.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate]; 3236 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 3237 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 3238 3239 bpf_mtap2(sc->sc_drvbpf, 3240 &sc->sc_tx_th, sc->sc_tx_th_len, m0); 3241 } 3242 3243 /* 3244 * Determine if a tx interrupt should be generated for 3245 * this descriptor. We take a tx interrupt to reap 3246 * descriptors when the h/w hits an EOL condition or 3247 * when the descriptor is specifically marked to generate 3248 * an interrupt. We periodically mark descriptors in this 3249 * way to insure timely replenishing of the supply needed 3250 * for sending frames. Defering interrupts reduces system 3251 * load and potentially allows more concurrent work to be 3252 * done but if done to aggressively can cause senders to 3253 * backup. 3254 * 3255 * NB: use >= to deal with sc_txintrperiod changing 3256 * dynamically through sysctl. 3257 */ 3258 if (flags & HAL_TXDESC_INTREQ) { 3259 txq->axq_intrcnt = 0; 3260 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 3261 flags |= HAL_TXDESC_INTREQ; 3262 txq->axq_intrcnt = 0; 3263 } 3264 3265 /* 3266 * Formulate first tx descriptor with tx controls. 3267 */ 3268 /* XXX check return value? */ 3269 ath_hal_setuptxdesc(ah, ds 3270 , pktlen /* packet length */ 3271 , hdrlen /* header length */ 3272 , atype /* Atheros packet type */ 3273 , ni->ni_txpower /* txpower */ 3274 , txrate, try0 /* series 0 rate/tries */ 3275 , keyix /* key cache index */ 3276 , sc->sc_txantenna /* antenna mode */ 3277 , flags /* flags */ 3278 , ctsrate /* rts/cts rate */ 3279 , ctsduration /* rts/cts duration */ 3280 ); 3281 /* 3282 * Setup the multi-rate retry state only when we're 3283 * going to use it. This assumes ath_hal_setuptxdesc 3284 * initializes the descriptors (so we don't have to) 3285 * when the hardware supports multi-rate retry and 3286 * we don't use it. 3287 */ 3288 if (try0 != ATH_TXMAXTRY) 3289 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); 3290 3291 /* 3292 * Fillin the remainder of the descriptor info. 3293 */ 3294 ds0 = ds; 3295 for (i = 0; i < bf->bf_nseg; i++, ds++) { 3296 ds->ds_data = bf->bf_segs[i].ds_addr; 3297 if (i == bf->bf_nseg - 1) 3298 ds->ds_link = 0; 3299 else 3300 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 3301 ath_hal_filltxdesc(ah, ds 3302 , bf->bf_segs[i].ds_len /* segment length */ 3303 , i == 0 /* first segment */ 3304 , i == bf->bf_nseg - 1 /* last segment */ 3305 , ds0 /* first descriptor */ 3306 ); 3307 DPRINTF(sc, ATH_DEBUG_XMIT, 3308 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 3309 __func__, i, ds->ds_link, ds->ds_data, 3310 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 3311 } 3312#if 0 3313 if ((flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) && 3314 !ath_hal_updateCTSForBursting(ah, ds 3315 , txq->axq_linkbuf != NULL ? 3316 txq->axq_linkbuf->bf_desc : NULL 3317 , txq->axq_lastdsWithCTS 3318 , txq->axq_gatingds 3319 , IEEE80211_TXOP_TO_US(ic->ic_chanParams.cap_wmeParams[skb->priority].wmep_txopLimit) 3320 , ath_hal_computetxtime(ah, rt, IEEE80211_ACK_LEN, cix, AH_TRUE))) { 3321 ATH_TXQ_LOCK(txq); 3322 txq->axq_lastdsWithCTS = ds; 3323 /* set gating Desc to final desc */ 3324 txq->axq_gatingds = (struct ath_desc *)txq->axq_link; 3325 ATH_TXQ_UNLOCK(txq); 3326 } 3327#endif 3328 /* 3329 * Insert the frame on the outbound list and 3330 * pass it on to the hardware. 3331 */ 3332 ATH_TXQ_LOCK(txq); 3333 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 3334 if (txq->axq_link == NULL) { 3335 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 3336 DPRINTF(sc, ATH_DEBUG_XMIT, 3337 "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, 3338 txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, 3339 txq->axq_depth); 3340 } else { 3341 *txq->axq_link = bf->bf_daddr; 3342 DPRINTF(sc, ATH_DEBUG_XMIT, 3343 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 3344 txq->axq_qnum, txq->axq_link, 3345 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); 3346 } 3347 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 3348 ATH_TXQ_UNLOCK(txq); 3349 3350 if (sc->sc_softled) 3351 ath_update_led(sc); 3352 3353 /* 3354 * The CAB queue is started from the SWBA handler since 3355 * frames only go out on DTIM and to avoid possible races. 3356 */ 3357 if (txq != sc->sc_cabq) 3358 ath_hal_txstart(ah, txq->axq_qnum); 3359 return 0; 3360} 3361 3362/* 3363 * Process completed xmit descriptors from the specified queue. 3364 */ 3365static void 3366ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 3367{ 3368 struct ath_hal *ah = sc->sc_ah; 3369 struct ieee80211com *ic = &sc->sc_ic; 3370 struct ath_buf *bf; 3371 struct ath_desc *ds; 3372 struct ieee80211_node *ni; 3373 struct ath_node *an; 3374 int sr, lr, pri; 3375 HAL_STATUS status; 3376 3377 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3378 __func__, txq->axq_qnum, 3379 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3380 txq->axq_link); 3381 for (;;) { 3382 ATH_TXQ_LOCK(txq); 3383 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3384 bf = STAILQ_FIRST(&txq->axq_q); 3385 if (bf == NULL) { 3386 txq->axq_link = NULL; 3387 ATH_TXQ_UNLOCK(txq); 3388 break; 3389 } 3390 /* only the last descriptor is needed */ 3391 ds = &bf->bf_desc[bf->bf_nseg - 1]; 3392 status = ath_hal_txprocdesc(ah, ds); 3393#ifdef AR_DEBUG 3394 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3395 ath_printtxbuf(bf, status == HAL_OK); 3396#endif 3397 if (status == HAL_EINPROGRESS) { 3398 ATH_TXQ_UNLOCK(txq); 3399 break; 3400 } 3401#if 0 3402 if (bf->bf_desc == txq->axq_lastdsWithCTS) 3403 txq->axq_lastdsWithCTS = NULL; 3404 if (ds == txq->axq_gatingds) 3405 txq->axq_gatingds = NULL; 3406#endif 3407 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 3408 ATH_TXQ_UNLOCK(txq); 3409 3410 ni = bf->bf_node; 3411 if (ni != NULL) { 3412 an = ATH_NODE(ni); 3413 if (ds->ds_txstat.ts_status == 0) { 3414 u_int8_t txant = ds->ds_txstat.ts_antenna; 3415 sc->sc_stats.ast_ant_tx[txant]++; 3416 sc->sc_ant_tx[txant]++; 3417 if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE) 3418 sc->sc_stats.ast_tx_altrate++; 3419 sc->sc_stats.ast_tx_rssi = 3420 ds->ds_txstat.ts_rssi; 3421 ATH_RSSI_LPF(an->an_halstats.ns_avgtxrssi, 3422 ds->ds_txstat.ts_rssi); 3423 pri = M_WME_GETAC(bf->bf_m); 3424 if (pri >= WME_AC_VO) 3425 ic->ic_wme.wme_hipri_traffic++; 3426 ni->ni_inact = ni->ni_inact_reload; 3427 } else { 3428 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) 3429 sc->sc_stats.ast_tx_xretries++; 3430 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) 3431 sc->sc_stats.ast_tx_fifoerr++; 3432 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) 3433 sc->sc_stats.ast_tx_filtered++; 3434 } 3435 sr = ds->ds_txstat.ts_shortretry; 3436 lr = ds->ds_txstat.ts_longretry; 3437 sc->sc_stats.ast_tx_shortretry += sr; 3438 sc->sc_stats.ast_tx_longretry += lr; 3439 /* 3440 * Hand the descriptor to the rate control algorithm. 3441 */ 3442 ath_rate_tx_complete(sc, an, ds); 3443 /* 3444 * Reclaim reference to node. 3445 * 3446 * NB: the node may be reclaimed here if, for example 3447 * this is a DEAUTH message that was sent and the 3448 * node was timed out due to inactivity. 3449 */ 3450 ieee80211_free_node(ni); 3451 } 3452 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3453 BUS_DMASYNC_POSTWRITE); 3454 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3455 m_freem(bf->bf_m); 3456 bf->bf_m = NULL; 3457 bf->bf_node = NULL; 3458 3459 ATH_TXBUF_LOCK(sc); 3460 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3461 ATH_TXBUF_UNLOCK(sc); 3462 } 3463} 3464 3465/* 3466 * Deferred processing of transmit interrupt; special-cased 3467 * for a single hardware transmit queue (e.g. 5210 and 5211). 3468 */ 3469static void 3470ath_tx_proc_q0(void *arg, int npending) 3471{ 3472 struct ath_softc *sc = arg; 3473 struct ifnet *ifp = &sc->sc_if; 3474 3475 ath_tx_processq(sc, &sc->sc_txq[0]); 3476 ath_tx_processq(sc, sc->sc_cabq); 3477 ifp->if_flags &= ~IFF_OACTIVE; 3478 sc->sc_tx_timer = 0; 3479 3480 ath_start(ifp); 3481} 3482 3483/* 3484 * Deferred processing of transmit interrupt; special-cased 3485 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 3486 */ 3487static void 3488ath_tx_proc_q0123(void *arg, int npending) 3489{ 3490 struct ath_softc *sc = arg; 3491 struct ifnet *ifp = &sc->sc_if; 3492 3493 /* 3494 * Process each active queue. 3495 */ 3496 ath_tx_processq(sc, &sc->sc_txq[0]); 3497 ath_tx_processq(sc, &sc->sc_txq[1]); 3498 ath_tx_processq(sc, &sc->sc_txq[2]); 3499 ath_tx_processq(sc, &sc->sc_txq[3]); 3500 ath_tx_processq(sc, sc->sc_cabq); 3501 3502 ifp->if_flags &= ~IFF_OACTIVE; 3503 sc->sc_tx_timer = 0; 3504 3505 ath_start(ifp); 3506} 3507 3508/* 3509 * Deferred processing of transmit interrupt. 3510 */ 3511static void 3512ath_tx_proc(void *arg, int npending) 3513{ 3514 struct ath_softc *sc = arg; 3515 struct ifnet *ifp = &sc->sc_if; 3516 int i; 3517 3518 /* 3519 * Process each active queue. 3520 */ 3521 /* XXX faster to read ISR_S0_S and ISR_S1_S to determine q's? */ 3522 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3523 if (ATH_TXQ_SETUP(sc, i)) 3524 ath_tx_processq(sc, &sc->sc_txq[i]); 3525 3526 ifp->if_flags &= ~IFF_OACTIVE; 3527 sc->sc_tx_timer = 0; 3528 3529 ath_start(ifp); 3530} 3531 3532static void 3533ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 3534{ 3535 struct ath_hal *ah = sc->sc_ah; 3536 struct ieee80211_node *ni; 3537 struct ath_buf *bf; 3538 3539 /* 3540 * NB: this assumes output has been stopped and 3541 * we do not need to block ath_tx_tasklet 3542 */ 3543 for (;;) { 3544 ATH_TXQ_LOCK(txq); 3545 bf = STAILQ_FIRST(&txq->axq_q); 3546 if (bf == NULL) { 3547 txq->axq_link = NULL; 3548 ATH_TXQ_UNLOCK(txq); 3549 break; 3550 } 3551 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 3552 ATH_TXQ_UNLOCK(txq); 3553#ifdef AR_DEBUG 3554 if (sc->sc_debug & ATH_DEBUG_RESET) 3555 ath_printtxbuf(bf, 3556 ath_hal_txprocdesc(ah, bf->bf_desc) == HAL_OK); 3557#endif /* AR_DEBUG */ 3558 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3559 m_freem(bf->bf_m); 3560 bf->bf_m = NULL; 3561 ni = bf->bf_node; 3562 bf->bf_node = NULL; 3563 if (ni != NULL) { 3564 /* 3565 * Reclaim node reference. 3566 */ 3567 ieee80211_free_node(ni); 3568 } 3569 ATH_TXBUF_LOCK(sc); 3570 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3571 ATH_TXBUF_UNLOCK(sc); 3572 } 3573} 3574 3575static void 3576ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 3577{ 3578 struct ath_hal *ah = sc->sc_ah; 3579 3580 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 3581 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 3582 __func__, txq->axq_qnum, 3583 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 3584 txq->axq_link); 3585} 3586 3587/* 3588 * Drain the transmit queues and reclaim resources. 3589 */ 3590static void 3591ath_draintxq(struct ath_softc *sc) 3592{ 3593 struct ath_hal *ah = sc->sc_ah; 3594 struct ifnet *ifp = &sc->sc_if; 3595 int i; 3596 3597 /* XXX return value */ 3598 if (!sc->sc_invalid) { 3599 /* don't touch the hardware if marked invalid */ 3600 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 3601 DPRINTF(sc, ATH_DEBUG_RESET, 3602 "%s: beacon queue %p\n", __func__, 3603 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq)); 3604 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3605 if (ATH_TXQ_SETUP(sc, i)) 3606 ath_tx_stopdma(sc, &sc->sc_txq[i]); 3607 } 3608 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3609 if (ATH_TXQ_SETUP(sc, i)) 3610 ath_tx_draintxq(sc, &sc->sc_txq[i]); 3611 ifp->if_flags &= ~IFF_OACTIVE; 3612 sc->sc_tx_timer = 0; 3613} 3614 3615/* 3616 * Disable the receive h/w in preparation for a reset. 3617 */ 3618static void 3619ath_stoprecv(struct ath_softc *sc) 3620{ 3621#define PA2DESC(_sc, _pa) \ 3622 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 3623 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 3624 struct ath_hal *ah = sc->sc_ah; 3625 3626 ath_hal_stoppcurecv(ah); /* disable PCU */ 3627 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 3628 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 3629 DELAY(3000); /* 3ms is long enough for 1 frame */ 3630#ifdef AR_DEBUG 3631 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 3632 struct ath_buf *bf; 3633 3634 printf("%s: rx queue %p, link %p\n", __func__, 3635 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 3636 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 3637 struct ath_desc *ds = bf->bf_desc; 3638 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 3639 bf->bf_daddr, PA2DESC(sc, ds->ds_link)); 3640 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 3641 ath_printrxbuf(bf, status == HAL_OK); 3642 } 3643 } 3644#endif 3645 sc->sc_rxlink = NULL; /* just in case */ 3646#undef PA2DESC 3647} 3648 3649/* 3650 * Enable the receive h/w following a reset. 3651 */ 3652static int 3653ath_startrecv(struct ath_softc *sc) 3654{ 3655 struct ath_hal *ah = sc->sc_ah; 3656 struct ath_buf *bf; 3657 3658 sc->sc_rxlink = NULL; 3659 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 3660 int error = ath_rxbuf_init(sc, bf); 3661 if (error != 0) { 3662 DPRINTF(sc, ATH_DEBUG_RECV, 3663 "%s: ath_rxbuf_init failed %d\n", 3664 __func__, error); 3665 return error; 3666 } 3667 } 3668 3669 bf = STAILQ_FIRST(&sc->sc_rxbuf); 3670 ath_hal_putrxbuf(ah, bf->bf_daddr); 3671 ath_hal_rxena(ah); /* enable recv descriptors */ 3672 ath_mode_init(sc); /* set filters, etc. */ 3673 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 3674 return 0; 3675} 3676 3677/* 3678 * Update internal state after a channel change. 3679 */ 3680static void 3681ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 3682{ 3683 struct ieee80211com *ic = &sc->sc_ic; 3684 enum ieee80211_phymode mode; 3685 3686 /* 3687 * Change channels and update the h/w rate map 3688 * if we're switching; e.g. 11a to 11b/g. 3689 */ 3690 mode = ieee80211_chan2mode(ic, chan); 3691 if (mode != sc->sc_curmode) 3692 ath_setcurmode(sc, mode); 3693 /* 3694 * Update BPF state. 3695 */ 3696 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = 3697 htole16(chan->ic_freq); 3698 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = 3699 htole16(chan->ic_flags); 3700} 3701 3702/* 3703 * Set/change channels. If the channel is really being changed, 3704 * it's done by reseting the chip. To accomplish this we must 3705 * first cleanup any pending DMA, then restart stuff after a la 3706 * ath_init. 3707 */ 3708static int 3709ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 3710{ 3711 struct ath_hal *ah = sc->sc_ah; 3712 struct ieee80211com *ic = &sc->sc_ic; 3713 HAL_CHANNEL hchan; 3714 3715 /* 3716 * Convert to a HAL channel description with 3717 * the flags constrained to reflect the current 3718 * operating mode. 3719 */ 3720 hchan.channel = chan->ic_freq; 3721 hchan.channelFlags = ath_chan2flags(ic, chan); 3722 3723 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz) -> %u (%u MHz)\n", 3724 __func__, 3725 ath_hal_mhz2ieee(sc->sc_curchan.channel, 3726 sc->sc_curchan.channelFlags), 3727 sc->sc_curchan.channel, 3728 ath_hal_mhz2ieee(hchan.channel, hchan.channelFlags), hchan.channel); 3729 if (hchan.channel != sc->sc_curchan.channel || 3730 hchan.channelFlags != sc->sc_curchan.channelFlags) { 3731 HAL_STATUS status; 3732 3733 /* 3734 * To switch channels clear any pending DMA operations; 3735 * wait long enough for the RX fifo to drain, reset the 3736 * hardware at the new frequency, and then re-enable 3737 * the relevant bits of the h/w. 3738 */ 3739 ath_hal_intrset(ah, 0); /* disable interrupts */ 3740 ath_draintxq(sc); /* clear pending tx frames */ 3741 ath_stoprecv(sc); /* turn off frame recv */ 3742 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { 3743 if_printf(ic->ic_ifp, "ath_chan_set: unable to reset " 3744 "channel %u (%u Mhz)\n", 3745 ieee80211_chan2ieee(ic, chan), chan->ic_freq); 3746 return EIO; 3747 } 3748 sc->sc_curchan = hchan; 3749 ath_update_txpow(sc); /* update tx power state */ 3750 3751 /* 3752 * Re-enable rx framework. 3753 */ 3754 if (ath_startrecv(sc) != 0) { 3755 if_printf(ic->ic_ifp, 3756 "ath_chan_set: unable to restart recv logic\n"); 3757 return EIO; 3758 } 3759 3760 /* 3761 * Change channels and update the h/w rate map 3762 * if we're switching; e.g. 11a to 11b/g. 3763 */ 3764 ic->ic_ibss_chan = chan; 3765 ath_chan_change(sc, chan); 3766 3767 /* 3768 * Re-enable interrupts. 3769 */ 3770 ath_hal_intrset(ah, sc->sc_imask); 3771 } 3772 return 0; 3773} 3774 3775static void 3776ath_next_scan(void *arg) 3777{ 3778 struct ath_softc *sc = arg; 3779 struct ieee80211com *ic = &sc->sc_ic; 3780 3781 if (ic->ic_state == IEEE80211_S_SCAN) 3782 ieee80211_next_scan(ic); 3783} 3784 3785/* 3786 * Periodically recalibrate the PHY to account 3787 * for temperature/environment changes. 3788 */ 3789static void 3790ath_calibrate(void *arg) 3791{ 3792 struct ath_softc *sc = arg; 3793 struct ath_hal *ah = sc->sc_ah; 3794 3795 sc->sc_stats.ast_per_cal++; 3796 3797 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: channel %u/%x\n", 3798 __func__, sc->sc_curchan.channel, sc->sc_curchan.channelFlags); 3799 3800 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 3801 /* 3802 * Rfgain is out of bounds, reset the chip 3803 * to load new gain values. 3804 */ 3805 sc->sc_stats.ast_per_rfgain++; 3806 ath_reset(&sc->sc_if); 3807 } 3808 if (!ath_hal_calibrate(ah, &sc->sc_curchan)) { 3809 DPRINTF(sc, ATH_DEBUG_ANY, 3810 "%s: calibration of channel %u failed\n", 3811 __func__, sc->sc_curchan.channel); 3812 sc->sc_stats.ast_per_calfail++; 3813 } 3814 callout_reset(&sc->sc_cal_ch, ath_calinterval * hz, ath_calibrate, sc); 3815} 3816 3817static int 3818ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 3819{ 3820 struct ifnet *ifp = ic->ic_ifp; 3821 struct ath_softc *sc = ifp->if_softc; 3822 struct ath_hal *ah = sc->sc_ah; 3823 struct ieee80211_node *ni; 3824 int i, error; 3825 const u_int8_t *bssid; 3826 u_int32_t rfilt; 3827 static const HAL_LED_STATE leds[] = { 3828 HAL_LED_INIT, /* IEEE80211_S_INIT */ 3829 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 3830 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 3831 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 3832 HAL_LED_RUN, /* IEEE80211_S_RUN */ 3833 }; 3834 3835 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 3836 ieee80211_state_name[ic->ic_state], 3837 ieee80211_state_name[nstate]); 3838 3839 callout_stop(&sc->sc_scan_ch); 3840 callout_stop(&sc->sc_cal_ch); 3841 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 3842 3843 if (nstate == IEEE80211_S_INIT) { 3844 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 3845 ath_hal_intrset(ah, sc->sc_imask); 3846 /* 3847 * Notify the rate control algorithm. 3848 */ 3849 ath_rate_newstate(sc, nstate); 3850 goto done; 3851 } 3852 ni = ic->ic_bss; 3853 error = ath_chan_set(sc, ni->ni_chan); 3854 if (error != 0) 3855 goto bad; 3856 rfilt = ath_calcrxfilter(sc, nstate); 3857 if (nstate == IEEE80211_S_SCAN) 3858 bssid = ifp->if_broadcastaddr; 3859 else 3860 bssid = ni->ni_bssid; 3861 ath_hal_setrxfilter(ah, rfilt); 3862 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s\n", 3863 __func__, rfilt, ether_sprintf(bssid)); 3864 3865 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) 3866 ath_hal_setassocid(ah, bssid, ni->ni_associd); 3867 else 3868 ath_hal_setassocid(ah, bssid, 0); 3869 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 3870 for (i = 0; i < IEEE80211_WEP_NKID; i++) 3871 if (ath_hal_keyisvalid(ah, i)) 3872 ath_hal_keysetmac(ah, i, bssid); 3873 } 3874 3875 /* 3876 * Notify the rate control algorithm so rates 3877 * are setup should ath_beacon_alloc be called. 3878 */ 3879 ath_rate_newstate(sc, nstate); 3880 3881 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 3882 /* nothing to do */; 3883 } else if (nstate == IEEE80211_S_RUN) { 3884 DPRINTF(sc, ATH_DEBUG_STATE, 3885 "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s " 3886 "capinfo=0x%04x chan=%d\n" 3887 , __func__ 3888 , ic->ic_flags 3889 , ni->ni_intval 3890 , ether_sprintf(ni->ni_bssid) 3891 , ni->ni_capinfo 3892 , ieee80211_chan2ieee(ic, ni->ni_chan)); 3893 3894 /* 3895 * Allocate and setup the beacon frame for AP or adhoc mode. 3896 */ 3897 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3898 ic->ic_opmode == IEEE80211_M_IBSS) { 3899 error = ath_beacon_alloc(sc, ni); 3900 if (error != 0) 3901 goto bad; 3902 } 3903 3904 /* 3905 * Configure the beacon and sleep timers. 3906 */ 3907 ath_beacon_config(sc); 3908 } else { 3909 ath_hal_intrset(ah, 3910 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 3911 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 3912 } 3913done: 3914 /* 3915 * Invoke the parent method to complete the work. 3916 */ 3917 error = sc->sc_newstate(ic, nstate, arg); 3918 /* 3919 * Finally, start any timers. 3920 */ 3921 if (nstate == IEEE80211_S_RUN) { 3922 /* start periodic recalibration timer */ 3923 callout_reset(&sc->sc_cal_ch, ath_calinterval * hz, 3924 ath_calibrate, sc); 3925 } else if (nstate == IEEE80211_S_SCAN) { 3926 /* start ap/neighbor scan timer */ 3927 callout_reset(&sc->sc_scan_ch, (ath_dwelltime * hz) / 1000, 3928 ath_next_scan, sc); 3929 } 3930bad: 3931 return error; 3932} 3933 3934/* 3935 * Setup driver-specific state for a newly associated node. 3936 * Note that we're called also on a re-associate, the isnew 3937 * param tells us if this is the first time or not. 3938 */ 3939static void 3940ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 3941{ 3942 struct ath_softc *sc = ic->ic_ifp->if_softc; 3943 3944 ath_rate_newassoc(sc, ATH_NODE(ni), isnew); 3945} 3946 3947static int 3948ath_getchannels(struct ath_softc *sc, u_int cc, 3949 HAL_BOOL outdoor, HAL_BOOL xchanmode) 3950{ 3951 struct ieee80211com *ic = &sc->sc_ic; 3952 struct ifnet *ifp = &sc->sc_if; 3953 struct ath_hal *ah = sc->sc_ah; 3954 HAL_CHANNEL *chans; 3955 int i, ix, nchan; 3956 3957 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 3958 M_TEMP, M_NOWAIT); 3959 if (chans == NULL) { 3960 if_printf(ifp, "unable to allocate channel table\n"); 3961 return ENOMEM; 3962 } 3963 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 3964 cc, HAL_MODE_ALL, outdoor, xchanmode)) { 3965 u_int32_t rd; 3966 3967 ath_hal_getregdomain(ah, &rd); 3968 if_printf(ifp, "unable to collect channel list from hal; " 3969 "regdomain likely %u country code %u\n", rd, cc); 3970 free(chans, M_TEMP); 3971 return EINVAL; 3972 } 3973 3974 /* 3975 * Convert HAL channels to ieee80211 ones and insert 3976 * them in the table according to their channel number. 3977 */ 3978 for (i = 0; i < nchan; i++) { 3979 HAL_CHANNEL *c = &chans[i]; 3980 ix = ath_hal_mhz2ieee(c->channel, c->channelFlags); 3981 if (ix > IEEE80211_CHAN_MAX) { 3982 if_printf(ifp, "bad hal channel %u (%u/%x) ignored\n", 3983 ix, c->channel, c->channelFlags); 3984 continue; 3985 } 3986 /* NB: flags are known to be compatible */ 3987 if (ic->ic_channels[ix].ic_freq == 0) { 3988 ic->ic_channels[ix].ic_freq = c->channel; 3989 ic->ic_channels[ix].ic_flags = c->channelFlags; 3990 } else { 3991 /* channels overlap; e.g. 11g and 11b */ 3992 ic->ic_channels[ix].ic_flags |= c->channelFlags; 3993 } 3994 } 3995 free(chans, M_TEMP); 3996 return 0; 3997} 3998 3999static void 4000ath_update_led(struct ath_softc *sc) 4001{ 4002 struct ieee80211com *ic = &sc->sc_ic; 4003 struct ath_hal *ah = sc->sc_ah; 4004 u_int32_t threshold; 4005 4006 /* 4007 * When not associated, flash LED on for 5s, off for 200ms. 4008 * XXX this assumes 100ms beacon interval. 4009 */ 4010 if (ic->ic_state != IEEE80211_S_RUN) { 4011 threshold = 2 + sc->sc_ledstate * 48; 4012 } else { 4013 threshold = 2 + sc->sc_ledstate * 18; 4014 } 4015 if (ic->ic_stats.is_rx_beacon - sc->sc_beacons >= threshold) { 4016 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 4017 ath_hal_gpioset(ah, sc->sc_ledpin, sc->sc_ledstate); 4018 sc->sc_ledstate ^= 1; 4019 sc->sc_beacons = ic->ic_stats.is_rx_beacon; 4020 } 4021} 4022 4023static void 4024ath_update_txpow(struct ath_softc *sc) 4025{ 4026 struct ieee80211com *ic = &sc->sc_ic; 4027 struct ath_hal *ah = sc->sc_ah; 4028 u_int32_t txpow; 4029 4030 if (sc->sc_curtxpow != ic->ic_txpowlimit) { 4031 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 4032 /* read back in case value is clamped */ 4033 ath_hal_gettxpowlimit(ah, &txpow); 4034 ic->ic_txpowlimit = sc->sc_curtxpow = txpow; 4035 } 4036 /* 4037 * Fetch max tx power level for status requests. 4038 */ 4039 ath_hal_getmaxtxpow(sc->sc_ah, &txpow); 4040 ic->ic_bss->ni_txpower = txpow; 4041} 4042 4043static int 4044ath_rate_setup(struct ath_softc *sc, u_int mode) 4045{ 4046 struct ath_hal *ah = sc->sc_ah; 4047 struct ieee80211com *ic = &sc->sc_ic; 4048 const HAL_RATE_TABLE *rt; 4049 struct ieee80211_rateset *rs; 4050 int i, maxrates; 4051 4052 switch (mode) { 4053 case IEEE80211_MODE_11A: 4054 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11A); 4055 break; 4056 case IEEE80211_MODE_11B: 4057 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11B); 4058 break; 4059 case IEEE80211_MODE_11G: 4060 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11G); 4061 break; 4062 case IEEE80211_MODE_TURBO_A: 4063 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4064 break; 4065 case IEEE80211_MODE_TURBO_G: 4066 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_108G); 4067 break; 4068 default: 4069 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4070 __func__, mode); 4071 return 0; 4072 } 4073 rt = sc->sc_rates[mode]; 4074 if (rt == NULL) 4075 return 0; 4076 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { 4077 DPRINTF(sc, ATH_DEBUG_ANY, 4078 "%s: rate table too small (%u > %u)\n", 4079 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); 4080 maxrates = IEEE80211_RATE_MAXSIZE; 4081 } else 4082 maxrates = rt->rateCount; 4083 rs = &ic->ic_sup_rates[mode]; 4084 for (i = 0; i < maxrates; i++) 4085 rs->rs_rates[i] = rt->info[i].dot11Rate; 4086 rs->rs_nrates = maxrates; 4087 return 1; 4088} 4089 4090static void 4091ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4092{ 4093 const HAL_RATE_TABLE *rt; 4094 int i; 4095 4096 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 4097 rt = sc->sc_rates[mode]; 4098 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 4099 for (i = 0; i < rt->rateCount; i++) 4100 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 4101 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 4102 for (i = 0; i < 32; i++) { 4103 u_int8_t ix = rt->rateCodeToIndex[i]; 4104 if (ix != 0xff) 4105 sc->sc_hwmap[i] = rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; 4106 } 4107 sc->sc_currates = rt; 4108 sc->sc_curmode = mode; 4109 /* 4110 * All protection frames are transmited at 2Mb/s for 4111 * 11g, otherwise at 1Mb/s. 4112 * XXX select protection rate index from rate table. 4113 */ 4114 sc->sc_protrix = (mode == IEEE80211_MODE_11G ? 1 : 0); 4115 /* NB: caller is responsible for reseting rate control state */ 4116} 4117 4118#ifdef AR_DEBUG 4119static void 4120ath_printrxbuf(struct ath_buf *bf, int done) 4121{ 4122 struct ath_desc *ds; 4123 int i; 4124 4125 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 4126 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", 4127 i, ds, (struct ath_desc *)bf->bf_daddr + i, 4128 ds->ds_link, ds->ds_data, 4129 ds->ds_ctl0, ds->ds_ctl1, 4130 ds->ds_hw[0], ds->ds_hw[1], 4131 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 4132 } 4133} 4134 4135static void 4136ath_printtxbuf(struct ath_buf *bf, int done) 4137{ 4138 struct ath_desc *ds; 4139 int i; 4140 4141 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 4142 printf("T%d (%p %p) %08x %08x %08x %08x %08x %08x %08x %08x %c\n", 4143 i, ds, (struct ath_desc *)bf->bf_daddr + i, 4144 ds->ds_link, ds->ds_data, 4145 ds->ds_ctl0, ds->ds_ctl1, 4146 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 4147 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 4148 } 4149} 4150#endif /* AR_DEBUG */ 4151 4152static void 4153ath_watchdog(struct ifnet *ifp) 4154{ 4155 struct ath_softc *sc = ifp->if_softc; 4156 struct ieee80211com *ic = &sc->sc_ic; 4157 4158 ifp->if_timer = 0; 4159 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) 4160 return; 4161 if (sc->sc_tx_timer) { 4162 if (--sc->sc_tx_timer == 0) { 4163 if_printf(ifp, "device timeout\n"); 4164 ath_reset(ifp); 4165 ifp->if_oerrors++; 4166 sc->sc_stats.ast_watchdog++; 4167 } else 4168 ifp->if_timer = 1; 4169 } 4170 ieee80211_watchdog(ic); 4171} 4172 4173/* 4174 * Diagnostic interface to the HAL. This is used by various 4175 * tools to do things like retrieve register contents for 4176 * debugging. The mechanism is intentionally opaque so that 4177 * it can change frequently w/o concern for compatiblity. 4178 */ 4179static int 4180ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 4181{ 4182 struct ath_hal *ah = sc->sc_ah; 4183 u_int id = ad->ad_id & ATH_DIAG_ID; 4184 void *indata = NULL; 4185 void *outdata = NULL; 4186 u_int32_t insize = ad->ad_in_size; 4187 u_int32_t outsize = ad->ad_out_size; 4188 int error = 0; 4189 4190 if (ad->ad_id & ATH_DIAG_IN) { 4191 /* 4192 * Copy in data. 4193 */ 4194 indata = malloc(insize, M_TEMP, M_NOWAIT); 4195 if (indata == NULL) { 4196 error = ENOMEM; 4197 goto bad; 4198 } 4199 error = copyin(ad->ad_in_data, indata, insize); 4200 if (error) 4201 goto bad; 4202 } 4203 if (ad->ad_id & ATH_DIAG_DYN) { 4204 /* 4205 * Allocate a buffer for the results (otherwise the HAL 4206 * returns a pointer to a buffer where we can read the 4207 * results). Note that we depend on the HAL leaving this 4208 * pointer for us to use below in reclaiming the buffer; 4209 * may want to be more defensive. 4210 */ 4211 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 4212 if (outdata == NULL) { 4213 error = ENOMEM; 4214 goto bad; 4215 } 4216 } 4217 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 4218 if (outsize < ad->ad_out_size) 4219 ad->ad_out_size = outsize; 4220 if (outdata != NULL) 4221 error = copyout(outdata, ad->ad_out_data, 4222 ad->ad_out_size); 4223 } else { 4224 error = EINVAL; 4225 } 4226bad: 4227 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 4228 free(indata, M_TEMP); 4229 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 4230 free(outdata, M_TEMP); 4231 return error; 4232} 4233 4234static int 4235ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4236{ 4237#define IS_RUNNING(ifp) \ 4238 ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP)) 4239 struct ath_softc *sc = ifp->if_softc; 4240 struct ieee80211com *ic = &sc->sc_ic; 4241 struct ifreq *ifr = (struct ifreq *)data; 4242 int error = 0; 4243 4244 ATH_LOCK(sc); 4245 switch (cmd) { 4246 case SIOCSIFFLAGS: 4247 if (IS_RUNNING(ifp)) { 4248 /* 4249 * To avoid rescanning another access point, 4250 * do not call ath_init() here. Instead, 4251 * only reflect promisc mode settings. 4252 */ 4253 ath_mode_init(sc); 4254 } else if (ifp->if_flags & IFF_UP) { 4255 /* 4256 * Beware of being called during attach/detach 4257 * to reset promiscuous mode. In that case we 4258 * will still be marked UP but not RUNNING. 4259 * However trying to re-init the interface 4260 * is the wrong thing to do as we've already 4261 * torn down much of our state. There's 4262 * probably a better way to deal with this. 4263 */ 4264 if (!sc->sc_invalid && ic->ic_bss != NULL) 4265 ath_init(ifp); /* XXX lose error */ 4266 } else 4267 ath_stop_locked(ifp); 4268 break; 4269 case SIOCADDMULTI: 4270 case SIOCDELMULTI: 4271 /* 4272 * The upper layer has already installed/removed 4273 * the multicast address(es), just recalculate the 4274 * multicast filter for the card. 4275 */ 4276 if (ifp->if_flags & IFF_RUNNING) 4277 ath_mode_init(sc); 4278 break; 4279 case SIOCGATHSTATS: 4280 /* NB: embed these numbers to get a consistent view */ 4281 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 4282 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 4283 sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic); 4284 ATH_UNLOCK(sc); 4285 /* 4286 * NB: Drop the softc lock in case of a page fault; 4287 * we'll accept any potential inconsisentcy in the 4288 * statistics. The alternative is to copy the data 4289 * to a local structure. 4290 */ 4291 return copyout(&sc->sc_stats, 4292 ifr->ifr_data, sizeof (sc->sc_stats)); 4293 case SIOCGATHDIAG: 4294 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 4295 break; 4296 default: 4297 error = ieee80211_ioctl(ic, cmd, data); 4298 if (error == ENETRESET) { 4299 if (IS_RUNNING(ifp) && 4300 ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 4301 ath_init(ifp); /* XXX lose error */ 4302 error = 0; 4303 } 4304 if (error == ERESTART) 4305 error = IS_RUNNING(ifp) ? ath_reset(ifp) : 0; 4306 break; 4307 } 4308 ATH_UNLOCK(sc); 4309 return error; 4310#undef IS_UP 4311} 4312 4313static int 4314ath_sysctl_slottime(SYSCTL_HANDLER_ARGS) 4315{ 4316 struct ath_softc *sc = arg1; 4317 u_int slottime = ath_hal_getslottime(sc->sc_ah); 4318 int error; 4319 4320 error = sysctl_handle_int(oidp, &slottime, 0, req); 4321 if (error || !req->newptr) 4322 return error; 4323 return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0; 4324} 4325 4326static int 4327ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS) 4328{ 4329 struct ath_softc *sc = arg1; 4330 u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah); 4331 int error; 4332 4333 error = sysctl_handle_int(oidp, &acktimeout, 0, req); 4334 if (error || !req->newptr) 4335 return error; 4336 return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0; 4337} 4338 4339static int 4340ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS) 4341{ 4342 struct ath_softc *sc = arg1; 4343 u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah); 4344 int error; 4345 4346 error = sysctl_handle_int(oidp, &ctstimeout, 0, req); 4347 if (error || !req->newptr) 4348 return error; 4349 return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0; 4350} 4351 4352static int 4353ath_sysctl_softled(SYSCTL_HANDLER_ARGS) 4354{ 4355 struct ath_softc *sc = arg1; 4356 int softled = sc->sc_softled; 4357 int error; 4358 4359 error = sysctl_handle_int(oidp, &softled, 0, req); 4360 if (error || !req->newptr) 4361 return error; 4362 if (softled > 1) 4363 softled = 1; 4364 if (softled != sc->sc_softled) { 4365 if (softled) 4366 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); 4367 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !softled); 4368 sc->sc_softled = softled; 4369 } 4370 return 0; 4371} 4372 4373static int 4374ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS) 4375{ 4376 struct ath_softc *sc = arg1; 4377 u_int defantenna = ath_hal_getdefantenna(sc->sc_ah); 4378 int error; 4379 4380 error = sysctl_handle_int(oidp, &defantenna, 0, req); 4381 if (!error && req->newptr) 4382 ath_hal_setdefantenna(sc->sc_ah, defantenna); 4383 return error; 4384} 4385 4386static int 4387ath_sysctl_diversity(SYSCTL_HANDLER_ARGS) 4388{ 4389 struct ath_softc *sc = arg1; 4390 u_int diversity = sc->sc_diversity; 4391 int error; 4392 4393 error = sysctl_handle_int(oidp, &diversity, 0, req); 4394 if (error || !req->newptr) 4395 return error; 4396 sc->sc_diversity = diversity; 4397 return !ath_hal_setdiversity(sc->sc_ah, diversity) ? EINVAL : 0; 4398} 4399 4400static int 4401ath_sysctl_diag(SYSCTL_HANDLER_ARGS) 4402{ 4403 struct ath_softc *sc = arg1; 4404 u_int32_t diag; 4405 int error; 4406 4407 if (!ath_hal_getdiag(sc->sc_ah, &diag)) 4408 return EINVAL; 4409 error = sysctl_handle_int(oidp, &diag, 0, req); 4410 if (error || !req->newptr) 4411 return error; 4412 return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0; 4413} 4414 4415static int 4416ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS) 4417{ 4418 struct ath_softc *sc = arg1; 4419 struct ifnet *ifp = &sc->sc_if; 4420 u_int32_t scale; 4421 int error; 4422 4423 ath_hal_gettpscale(sc->sc_ah, &scale); 4424 error = sysctl_handle_int(oidp, &scale, 0, req); 4425 if (error || !req->newptr) 4426 return error; 4427 return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL : ath_reset(ifp); 4428} 4429 4430static int 4431ath_sysctl_tpc(SYSCTL_HANDLER_ARGS) 4432{ 4433 struct ath_softc *sc = arg1; 4434 u_int tpc = ath_hal_gettpc(sc->sc_ah); 4435 int error; 4436 4437 error = sysctl_handle_int(oidp, &tpc, 0, req); 4438 if (error || !req->newptr) 4439 return error; 4440 return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0; 4441} 4442 4443static void 4444ath_sysctlattach(struct ath_softc *sc) 4445{ 4446 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 4447 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 4448 4449 ath_hal_getcountrycode(sc->sc_ah, &sc->sc_countrycode); 4450 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4451 "countrycode", CTLFLAG_RD, &sc->sc_countrycode, 0, 4452 "EEPROM country code"); 4453 ath_hal_getregdomain(sc->sc_ah, &sc->sc_regdomain); 4454 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4455 "regdomain", CTLFLAG_RD, &sc->sc_regdomain, 0, 4456 "EEPROM regdomain code"); 4457 sc->sc_debug = ath_debug; 4458 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4459 "debug", CTLFLAG_RW, &sc->sc_debug, 0, 4460 "control debugging printfs"); 4461 4462 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4463 "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4464 ath_sysctl_slottime, "I", "802.11 slot time (us)"); 4465 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4466 "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4467 ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)"); 4468 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4469 "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4470 ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)"); 4471 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4472 "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4473 ath_sysctl_softled, "I", "enable/disable software LED support"); 4474 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4475 "ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0, 4476 "GPIO pin connected to LED"); 4477 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4478 "txantenna", CTLFLAG_RW, &sc->sc_txantenna, 0, 4479 "tx antenna (0=auto)"); 4480 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4481 "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4482 ath_sysctl_rxantenna, "I", "default/rx antenna"); 4483 if (sc->sc_hasdiversity) 4484 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4485 "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4486 ath_sysctl_diversity, "I", "antenna diversity"); 4487 sc->sc_txintrperiod = ATH_TXINTR_PERIOD; 4488 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4489 "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0, 4490 "tx descriptor batching"); 4491 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4492 "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4493 ath_sysctl_diag, "I", "h/w diagnostic control"); 4494 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4495 "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4496 ath_sysctl_tpscale, "I", "tx power scaling"); 4497 if (sc->sc_hastpc) 4498 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4499 "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4500 ath_sysctl_tpc, "I", "enable/disable per-packet TPC"); 4501} 4502 4503static void 4504ath_bpfattach(struct ath_softc *sc) 4505{ 4506 struct ifnet *ifp = &sc->sc_if; 4507 4508 bpfattach2(ifp, DLT_IEEE802_11_RADIO, 4509 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th), 4510 &sc->sc_drvbpf); 4511 /* 4512 * Initialize constant fields. 4513 * XXX make header lengths a multiple of 32-bits so subsequent 4514 * headers are properly aligned; this is a kludge to keep 4515 * certain applications happy. 4516 * 4517 * NB: the channel is setup each time we transition to the 4518 * RUN state to avoid filling it in for each frame. 4519 */ 4520 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); 4521 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); 4522 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 4523 4524 sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); 4525 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); 4526 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 4527} 4528 4529/* 4530 * Announce various information on device/driver attach. 4531 */ 4532static void 4533ath_announce(struct ath_softc *sc) 4534{ 4535#define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) 4536 struct ifnet *ifp = &sc->sc_if; 4537 struct ath_hal *ah = sc->sc_ah; 4538 u_int modes, cc; 4539 4540 if_printf(ifp, "mac %d.%d phy %d.%d", 4541 ah->ah_macVersion, ah->ah_macRev, 4542 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 4543 /* 4544 * Print radio revision(s). We check the wireless modes 4545 * to avoid falsely printing revs for inoperable parts. 4546 * Dual-band radio revs are returned in the 5Ghz rev number. 4547 */ 4548 ath_hal_getcountrycode(ah, &cc); 4549 modes = ath_hal_getwirelessmodes(ah, cc); 4550 if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { 4551 if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) 4552 printf(" 5ghz radio %d.%d 2ghz radio %d.%d", 4553 ah->ah_analog5GhzRev >> 4, 4554 ah->ah_analog5GhzRev & 0xf, 4555 ah->ah_analog2GhzRev >> 4, 4556 ah->ah_analog2GhzRev & 0xf); 4557 else 4558 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 4559 ah->ah_analog5GhzRev & 0xf); 4560 } else 4561 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 4562 ah->ah_analog5GhzRev & 0xf); 4563 printf("\n"); 4564 if (bootverbose) { 4565 int i; 4566 for (i = 0; i <= WME_AC_VO; i++) { 4567 struct ath_txq *txq = sc->sc_ac2q[i]; 4568 if_printf(ifp, "Use hw queue %u for %s traffic\n", 4569 txq->axq_qnum, ieee80211_wme_acnames[i]); 4570 } 4571 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 4572 sc->sc_cabq->axq_qnum); 4573 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 4574 } 4575#undef HAL_MODE_DUALBAND 4576} 4577