Deleted Added
sdiff udiff text old ( 234089 ) new ( 234090 )
full compact
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 234090 2012-04-10 07:23:37Z adrian $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42/*
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
45 *
46 * It's also required for any AH_DEBUG checks in here, eg the
47 * module dependencies.
48 */
49#include "opt_ah.h"
50#include "opt_wlan.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/sysctl.h>
55#include <sys/mbuf.h>
56#include <sys/malloc.h>
57#include <sys/lock.h>
58#include <sys/mutex.h>
59#include <sys/kernel.h>
60#include <sys/socket.h>
61#include <sys/sockio.h>
62#include <sys/errno.h>
63#include <sys/callout.h>
64#include <sys/bus.h>
65#include <sys/endian.h>
66#include <sys/kthread.h>
67#include <sys/taskqueue.h>
68#include <sys/priv.h>
69#include <sys/module.h>
70#include <sys/ktr.h>
71#include <sys/smp.h> /* for mp_ncpus */
72
73#include <machine/bus.h>
74
75#include <net/if.h>
76#include <net/if_dl.h>
77#include <net/if_media.h>
78#include <net/if_types.h>
79#include <net/if_arp.h>
80#include <net/ethernet.h>
81#include <net/if_llc.h>
82
83#include <net80211/ieee80211_var.h>
84#include <net80211/ieee80211_regdomain.h>
85#ifdef IEEE80211_SUPPORT_SUPERG
86#include <net80211/ieee80211_superg.h>
87#endif
88#ifdef IEEE80211_SUPPORT_TDMA
89#include <net80211/ieee80211_tdma.h>
90#endif
91
92#include <net/bpf.h>
93
94#ifdef INET
95#include <netinet/in.h>
96#include <netinet/if_ether.h>
97#endif
98
99#include <dev/ath/if_athvar.h>
100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
101#include <dev/ath/ath_hal/ah_diagcodes.h>
102
103#include <dev/ath/if_ath_debug.h>
104#include <dev/ath/if_ath_misc.h>
105#include <dev/ath/if_ath_tx.h>
106#include <dev/ath/if_ath_sysctl.h>
107#include <dev/ath/if_ath_led.h>
108#include <dev/ath/if_ath_keycache.h>
109#include <dev/ath/if_athdfs.h>
110
111#ifdef ATH_TX99_DIAG
112#include <dev/ath/ath_tx99/ath_tx99.h>
113#endif
114
115#define ATH_KTR_INTR KTR_SPARE4
116#define ATH_KTR_ERR KTR_SPARE3
117
118/*
119 * ATH_BCBUF determines the number of vap's that can transmit
120 * beacons and also (currently) the number of vap's that can
121 * have unique mac addresses/bssid. When staggering beacons
122 * 4 is probably a good max as otherwise the beacons become
123 * very closely spaced and there is limited time for cab q traffic
124 * to go out. You can burst beacons instead but that is not good
125 * for stations in power save and at some point you really want
126 * another radio (and channel).
127 *
128 * The limit on the number of mac addresses is tied to our use of
129 * the U/L bit and tracking addresses in a byte; it would be
130 * worthwhile to allow more for applications like proxy sta.
131 */
132CTASSERT(ATH_BCBUF <= 8);
133
134static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
136 const uint8_t [IEEE80211_ADDR_LEN],
137 const uint8_t [IEEE80211_ADDR_LEN]);
138static void ath_vap_delete(struct ieee80211vap *);
139static void ath_init(void *);
140static void ath_stop_locked(struct ifnet *);
141static void ath_stop(struct ifnet *);
142static void ath_start(struct ifnet *);
143static int ath_reset_vap(struct ieee80211vap *, u_long);
144static int ath_media_change(struct ifnet *);
145static void ath_watchdog(void *);
146static int ath_ioctl(struct ifnet *, u_long, caddr_t);
147static void ath_fatal_proc(void *, int);
148static void ath_bmiss_vap(struct ieee80211vap *);
149static void ath_bmiss_proc(void *, int);
150static void ath_key_update_begin(struct ieee80211vap *);
151static void ath_key_update_end(struct ieee80211vap *);
152static void ath_update_mcast(struct ifnet *);
153static void ath_update_promisc(struct ifnet *);
154static void ath_mode_init(struct ath_softc *);
155static void ath_setslottime(struct ath_softc *);
156static void ath_updateslot(struct ifnet *);
157static int ath_beaconq_setup(struct ath_hal *);
158static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
159static void ath_beacon_update(struct ieee80211vap *, int item);
160static void ath_beacon_setup(struct ath_softc *, struct ath_buf *);
161static void ath_beacon_proc(void *, int);
162static struct ath_buf *ath_beacon_generate(struct ath_softc *,
163 struct ieee80211vap *);
164static void ath_bstuck_proc(void *, int);
165static void ath_reset_proc(void *, int);
166static void ath_beacon_return(struct ath_softc *, struct ath_buf *);
167static void ath_beacon_free(struct ath_softc *);
168static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
169static void ath_descdma_cleanup(struct ath_softc *sc,
170 struct ath_descdma *, ath_bufhead *);
171static int ath_desc_alloc(struct ath_softc *);
172static void ath_desc_free(struct ath_softc *);
173static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
174 const uint8_t [IEEE80211_ADDR_LEN]);
175static void ath_node_cleanup(struct ieee80211_node *);
176static void ath_node_free(struct ieee80211_node *);
177static void ath_node_getsignal(const struct ieee80211_node *,
178 int8_t *, int8_t *);
179static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
180static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
181 int subtype, int rssi, int nf);
182static void ath_setdefantenna(struct ath_softc *, u_int);
183static void ath_rx_proc(struct ath_softc *sc, int);
184static void ath_rx_tasklet(void *, int);
185static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
186static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
187static int ath_tx_setup(struct ath_softc *, int, int);
188static int ath_wme_update(struct ieee80211com *);
189static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
190static void ath_tx_cleanup(struct ath_softc *);
191static void ath_tx_proc_q0(void *, int);
192static void ath_tx_proc_q0123(void *, int);
193static void ath_tx_proc(void *, int);
194static void ath_txq_sched_tasklet(void *, int);
195static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
196static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type);
197static void ath_stoprecv(struct ath_softc *, int);
198static int ath_startrecv(struct ath_softc *);
199static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
200static void ath_scan_start(struct ieee80211com *);
201static void ath_scan_end(struct ieee80211com *);
202static void ath_set_channel(struct ieee80211com *);
203#ifdef ATH_ENABLE_11N
204static void ath_update_chw(struct ieee80211com *);
205#endif /* ATH_ENABLE_11N */
206static void ath_calibrate(void *);
207static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
208static void ath_setup_stationkey(struct ieee80211_node *);
209static void ath_newassoc(struct ieee80211_node *, int);
210static int ath_setregdomain(struct ieee80211com *,
211 struct ieee80211_regdomain *, int,
212 struct ieee80211_channel []);
213static void ath_getradiocaps(struct ieee80211com *, int, int *,
214 struct ieee80211_channel []);
215static int ath_getchannels(struct ath_softc *);
216
217static int ath_rate_setup(struct ath_softc *, u_int mode);
218static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
219
220static void ath_announce(struct ath_softc *);
221
222static void ath_dfs_tasklet(void *, int);
223
224#ifdef IEEE80211_SUPPORT_TDMA
225static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
226 u_int32_t bintval);
227static void ath_tdma_bintvalsetup(struct ath_softc *sc,
228 const struct ieee80211_tdma_state *tdma);
229static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
230static void ath_tdma_update(struct ieee80211_node *ni,
231 const struct ieee80211_tdma_param *tdma, int);
232static void ath_tdma_beacon_send(struct ath_softc *sc,
233 struct ieee80211vap *vap);
234
235#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */
236#define TDMA_LPF_LEN 6
237#define TDMA_DUMMY_MARKER 0x127
238#define TDMA_EP_MUL(x, mul) ((x) * (mul))
239#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
240#define TDMA_LPF(x, y, len) \
241 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
242#define TDMA_SAMPLE(x, y) do { \
243 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \
244} while (0)
245#define TDMA_EP_RND(x,mul) \
246 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
247#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
248#endif /* IEEE80211_SUPPORT_TDMA */
249
250SYSCTL_DECL(_hw_ath);
251
252/* XXX validate sysctl values */
253static int ath_longcalinterval = 30; /* long cals every 30 secs */
254SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
255 0, "long chip calibration interval (secs)");
256static int ath_shortcalinterval = 100; /* short cals every 100 ms */
257SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
258 0, "short chip calibration interval (msecs)");
259static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
260SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
261 0, "reset chip calibration results (secs)");
262static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
263SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
264 0, "ANI calibration (msecs)");
265
266static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
267SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
268 0, "rx buffers allocated");
269TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
270static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
271SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
272 0, "tx buffers allocated");
273TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
274
275static int ath_bstuck_threshold = 4; /* max missed beacons */
276SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
277 0, "max missed beacon xmits before chip reset");
278
279MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
280
281#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
282#define HAL_MODE_HT40 \
283 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
284 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
285int
286ath_attach(u_int16_t devid, struct ath_softc *sc)
287{
288 struct ifnet *ifp;
289 struct ieee80211com *ic;
290 struct ath_hal *ah = NULL;
291 HAL_STATUS status;
292 int error = 0, i;
293 u_int wmodes;
294 uint8_t macaddr[IEEE80211_ADDR_LEN];
295 int rx_chainmask, tx_chainmask;
296
297 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
298
299 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
300 if (ifp == NULL) {
301 device_printf(sc->sc_dev, "can not if_alloc()\n");
302 error = ENOSPC;
303 goto bad;
304 }
305 ic = ifp->if_l2com;
306
307 /* set these up early for if_printf use */
308 if_initname(ifp, device_get_name(sc->sc_dev),
309 device_get_unit(sc->sc_dev));
310
311 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
312 sc->sc_eepromdata, &status);
313 if (ah == NULL) {
314 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
315 status);
316 error = ENXIO;
317 goto bad;
318 }
319 sc->sc_ah = ah;
320 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
321#ifdef ATH_DEBUG
322 sc->sc_debug = ath_debug;
323#endif
324
325 /*
326 * Check if the MAC has multi-rate retry support.
327 * We do this by trying to setup a fake extended
328 * descriptor. MAC's that don't have support will
329 * return false w/o doing anything. MAC's that do
330 * support it will return true w/o doing anything.
331 */
332 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
333
334 /*
335 * Check if the device has hardware counters for PHY
336 * errors. If so we need to enable the MIB interrupt
337 * so we can act on stat triggers.
338 */
339 if (ath_hal_hwphycounters(ah))
340 sc->sc_needmib = 1;
341
342 /*
343 * Get the hardware key cache size.
344 */
345 sc->sc_keymax = ath_hal_keycachesize(ah);
346 if (sc->sc_keymax > ATH_KEYMAX) {
347 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
348 ATH_KEYMAX, sc->sc_keymax);
349 sc->sc_keymax = ATH_KEYMAX;
350 }
351 /*
352 * Reset the key cache since some parts do not
353 * reset the contents on initial power up.
354 */
355 for (i = 0; i < sc->sc_keymax; i++)
356 ath_hal_keyreset(ah, i);
357
358 /*
359 * Collect the default channel list.
360 */
361 error = ath_getchannels(sc);
362 if (error != 0)
363 goto bad;
364
365 /*
366 * Setup rate tables for all potential media types.
367 */
368 ath_rate_setup(sc, IEEE80211_MODE_11A);
369 ath_rate_setup(sc, IEEE80211_MODE_11B);
370 ath_rate_setup(sc, IEEE80211_MODE_11G);
371 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
372 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
373 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
374 ath_rate_setup(sc, IEEE80211_MODE_11NA);
375 ath_rate_setup(sc, IEEE80211_MODE_11NG);
376 ath_rate_setup(sc, IEEE80211_MODE_HALF);
377 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
378
379 /* NB: setup here so ath_rate_update is happy */
380 ath_setcurmode(sc, IEEE80211_MODE_11A);
381
382 /*
383 * Allocate tx+rx descriptors and populate the lists.
384 */
385 error = ath_desc_alloc(sc);
386 if (error != 0) {
387 if_printf(ifp, "failed to allocate descriptors: %d\n", error);
388 goto bad;
389 }
390 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
391 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
392
393 ATH_TXBUF_LOCK_INIT(sc);
394
395 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
396 taskqueue_thread_enqueue, &sc->sc_tq);
397 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
398 "%s taskq", ifp->if_xname);
399
400 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc);
401 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
402 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
403 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
404 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc);
405
406 /*
407 * Allocate hardware transmit queues: one queue for
408 * beacon frames and one data queue for each QoS
409 * priority. Note that the hal handles resetting
410 * these queues at the needed time.
411 *
412 * XXX PS-Poll
413 */
414 sc->sc_bhalq = ath_beaconq_setup(ah);
415 if (sc->sc_bhalq == (u_int) -1) {
416 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
417 error = EIO;
418 goto bad2;
419 }
420 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
421 if (sc->sc_cabq == NULL) {
422 if_printf(ifp, "unable to setup CAB xmit queue!\n");
423 error = EIO;
424 goto bad2;
425 }
426 /* NB: insure BK queue is the lowest priority h/w queue */
427 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
428 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
429 ieee80211_wme_acnames[WME_AC_BK]);
430 error = EIO;
431 goto bad2;
432 }
433 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
434 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
435 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
436 /*
437 * Not enough hardware tx queues to properly do WME;
438 * just punt and assign them all to the same h/w queue.
439 * We could do a better job of this if, for example,
440 * we allocate queues when we switch from station to
441 * AP mode.
442 */
443 if (sc->sc_ac2q[WME_AC_VI] != NULL)
444 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
445 if (sc->sc_ac2q[WME_AC_BE] != NULL)
446 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
447 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
448 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
449 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
450 }
451
452 /*
453 * Special case certain configurations. Note the
454 * CAB queue is handled by these specially so don't
455 * include them when checking the txq setup mask.
456 */
457 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
458 case 0x01:
459 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
460 break;
461 case 0x0f:
462 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
463 break;
464 default:
465 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
466 break;
467 }
468
469 /*
470 * Setup rate control. Some rate control modules
471 * call back to change the anntena state so expose
472 * the necessary entry points.
473 * XXX maybe belongs in struct ath_ratectrl?
474 */
475 sc->sc_setdefantenna = ath_setdefantenna;
476 sc->sc_rc = ath_rate_attach(sc);
477 if (sc->sc_rc == NULL) {
478 error = EIO;
479 goto bad2;
480 }
481
482 /* Attach DFS module */
483 if (! ath_dfs_attach(sc)) {
484 device_printf(sc->sc_dev,
485 "%s: unable to attach DFS\n", __func__);
486 error = EIO;
487 goto bad2;
488 }
489
490 /* Start DFS processing tasklet */
491 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
492
493 /* Configure LED state */
494 sc->sc_blinking = 0;
495 sc->sc_ledstate = 1;
496 sc->sc_ledon = 0; /* low true */
497 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
498 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
499
500 /*
501 * Don't setup hardware-based blinking.
502 *
503 * Although some NICs may have this configured in the
504 * default reset register values, the user may wish
505 * to alter which pins have which function.
506 *
507 * The reference driver attaches the MAC network LED to GPIO1 and
508 * the MAC power LED to GPIO2. However, the DWA-552 cardbus
509 * NIC has these reversed.
510 */
511 sc->sc_hardled = (1 == 0);
512 sc->sc_led_net_pin = -1;
513 sc->sc_led_pwr_pin = -1;
514 /*
515 * Auto-enable soft led processing for IBM cards and for
516 * 5211 minipci cards. Users can also manually enable/disable
517 * support with a sysctl.
518 */
519 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
520 ath_led_config(sc);
521 ath_hal_setledstate(ah, HAL_LED_INIT);
522
523 ifp->if_softc = sc;
524 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
525 ifp->if_start = ath_start;
526 ifp->if_ioctl = ath_ioctl;
527 ifp->if_init = ath_init;
528 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
529 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
530 IFQ_SET_READY(&ifp->if_snd);
531
532 ic->ic_ifp = ifp;
533 /* XXX not right but it's not used anywhere important */
534 ic->ic_phytype = IEEE80211_T_OFDM;
535 ic->ic_opmode = IEEE80211_M_STA;
536 ic->ic_caps =
537 IEEE80211_C_STA /* station mode */
538 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
539 | IEEE80211_C_HOSTAP /* hostap mode */
540 | IEEE80211_C_MONITOR /* monitor mode */
541 | IEEE80211_C_AHDEMO /* adhoc demo mode */
542 | IEEE80211_C_WDS /* 4-address traffic works */
543 | IEEE80211_C_MBSS /* mesh point link mode */
544 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
545 | IEEE80211_C_SHSLOT /* short slot time supported */
546 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
547 | IEEE80211_C_BGSCAN /* capable of bg scanning */
548 | IEEE80211_C_TXFRAG /* handle tx frags */
549#ifdef ATH_ENABLE_DFS
550 | IEEE80211_C_DFS /* Enable radar detection */
551#endif
552 ;
553 /*
554 * Query the hal to figure out h/w crypto support.
555 */
556 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
557 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
558 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
559 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
560 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
561 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
562 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
563 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
564 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
565 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
566 /*
567 * Check if h/w does the MIC and/or whether the
568 * separate key cache entries are required to
569 * handle both tx+rx MIC keys.
570 */
571 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
572 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
573 /*
574 * If the h/w supports storing tx+rx MIC keys
575 * in one cache slot automatically enable use.
576 */
577 if (ath_hal_hastkipsplit(ah) ||
578 !ath_hal_settkipsplit(ah, AH_FALSE))
579 sc->sc_splitmic = 1;
580 /*
581 * If the h/w can do TKIP MIC together with WME then
582 * we use it; otherwise we force the MIC to be done
583 * in software by the net80211 layer.
584 */
585 if (ath_hal_haswmetkipmic(ah))
586 sc->sc_wmetkipmic = 1;
587 }
588 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
589 /*
590 * Check for multicast key search support.
591 */
592 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
593 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
594 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
595 }
596 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
597 /*
598 * Mark key cache slots associated with global keys
599 * as in use. If we knew TKIP was not to be used we
600 * could leave the +32, +64, and +32+64 slots free.
601 */
602 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
603 setbit(sc->sc_keymap, i);
604 setbit(sc->sc_keymap, i+64);
605 if (sc->sc_splitmic) {
606 setbit(sc->sc_keymap, i+32);
607 setbit(sc->sc_keymap, i+32+64);
608 }
609 }
610 /*
611 * TPC support can be done either with a global cap or
612 * per-packet support. The latter is not available on
613 * all parts. We're a bit pedantic here as all parts
614 * support a global cap.
615 */
616 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
617 ic->ic_caps |= IEEE80211_C_TXPMGT;
618
619 /*
620 * Mark WME capability only if we have sufficient
621 * hardware queues to do proper priority scheduling.
622 */
623 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
624 ic->ic_caps |= IEEE80211_C_WME;
625 /*
626 * Check for misc other capabilities.
627 */
628 if (ath_hal_hasbursting(ah))
629 ic->ic_caps |= IEEE80211_C_BURST;
630 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
631 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
632 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
633 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
634 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
635 if (ath_hal_hasfastframes(ah))
636 ic->ic_caps |= IEEE80211_C_FF;
637 wmodes = ath_hal_getwirelessmodes(ah);
638 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
639 ic->ic_caps |= IEEE80211_C_TURBOP;
640#ifdef IEEE80211_SUPPORT_TDMA
641 if (ath_hal_macversion(ah) > 0x78) {
642 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
643 ic->ic_tdma_update = ath_tdma_update;
644 }
645#endif
646
647 /*
648 * TODO: enforce that at least this many frames are available
649 * in the txbuf list before allowing data frames (raw or
650 * otherwise) to be transmitted.
651 */
652 sc->sc_txq_data_minfree = 10;
653 /*
654 * Leave this as default to maintain legacy behaviour.
655 * Shortening the cabq/mcastq may end up causing some
656 * undesirable behaviour.
657 */
658 sc->sc_txq_mcastq_maxdepth = ath_txbuf;
659
660 /*
661 * Allow the TX and RX chainmasks to be overridden by
662 * environment variables and/or device.hints.
663 *
664 * This must be done early - before the hardware is
665 * calibrated or before the 802.11n stream calculation
666 * is done.
667 */
668 if (resource_int_value(device_get_name(sc->sc_dev),
669 device_get_unit(sc->sc_dev), "rx_chainmask",
670 &rx_chainmask) == 0) {
671 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
672 rx_chainmask);
673 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
674 }
675 if (resource_int_value(device_get_name(sc->sc_dev),
676 device_get_unit(sc->sc_dev), "tx_chainmask",
677 &tx_chainmask) == 0) {
678 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
679 tx_chainmask);
680 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
681 }
682
683 /*
684 * The if_ath 11n support is completely not ready for normal use.
685 * Enabling this option will likely break everything and everything.
686 * Don't think of doing that unless you know what you're doing.
687 */
688
689#ifdef ATH_ENABLE_11N
690 /*
691 * Query HT capabilities
692 */
693 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
694 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
695 int rxs, txs;
696
697 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
698 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
699 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
700 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
701 | IEEE80211_HTCAP_MAXAMSDU_3839
702 /* max A-MSDU length */
703 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
704 ;
705
706 /*
707 * Enable short-GI for HT20 only if the hardware
708 * advertises support.
709 * Notably, anything earlier than the AR9287 doesn't.
710 */
711 if ((ath_hal_getcapability(ah,
712 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
713 (wmodes & HAL_MODE_HT20)) {
714 device_printf(sc->sc_dev,
715 "[HT] enabling short-GI in 20MHz mode\n");
716 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
717 }
718
719 if (wmodes & HAL_MODE_HT40)
720 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
721 | IEEE80211_HTCAP_SHORTGI40;
722
723 /*
724 * TX/RX streams need to be taken into account when
725 * negotiating which MCS rates it'll receive and
726 * what MCS rates are available for TX.
727 */
728 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
729 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
730
731 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
732 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
733
734 ic->ic_txstream = txs;
735 ic->ic_rxstream = rxs;
736
737 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
738 &sc->sc_rts_aggr_limit);
739 if (sc->sc_rts_aggr_limit != (64 * 1024))
740 device_printf(sc->sc_dev,
741 "[HT] RTS aggregates limited to %d KiB\n",
742 sc->sc_rts_aggr_limit / 1024);
743
744 device_printf(sc->sc_dev,
745 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
746 }
747#endif
748
749 /*
750 * Check if the hardware requires PCI register serialisation.
751 * Some of the Owl based MACs require this.
752 */
753 if (mp_ncpus > 1 &&
754 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
755 0, NULL) == HAL_OK) {
756 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
757 device_printf(sc->sc_dev,
758 "Enabling register serialisation\n");
759 }
760
761 /*
762 * Indicate we need the 802.11 header padded to a
763 * 32-bit boundary for 4-address and QoS frames.
764 */
765 ic->ic_flags |= IEEE80211_F_DATAPAD;
766
767 /*
768 * Query the hal about antenna support.
769 */
770 sc->sc_defant = ath_hal_getdefantenna(ah);
771
772 /*
773 * Not all chips have the VEOL support we want to
774 * use with IBSS beacons; check here for it.
775 */
776 sc->sc_hasveol = ath_hal_hasveol(ah);
777
778 /* get mac address from hardware */
779 ath_hal_getmac(ah, macaddr);
780 if (sc->sc_hasbmask)
781 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
782
783 /* NB: used to size node table key mapping array */
784 ic->ic_max_keyix = sc->sc_keymax;
785 /* call MI attach routine. */
786 ieee80211_ifattach(ic, macaddr);
787 ic->ic_setregdomain = ath_setregdomain;
788 ic->ic_getradiocaps = ath_getradiocaps;
789 sc->sc_opmode = HAL_M_STA;
790
791 /* override default methods */
792 ic->ic_newassoc = ath_newassoc;
793 ic->ic_updateslot = ath_updateslot;
794 ic->ic_wme.wme_update = ath_wme_update;
795 ic->ic_vap_create = ath_vap_create;
796 ic->ic_vap_delete = ath_vap_delete;
797 ic->ic_raw_xmit = ath_raw_xmit;
798 ic->ic_update_mcast = ath_update_mcast;
799 ic->ic_update_promisc = ath_update_promisc;
800 ic->ic_node_alloc = ath_node_alloc;
801 sc->sc_node_free = ic->ic_node_free;
802 ic->ic_node_free = ath_node_free;
803 sc->sc_node_cleanup = ic->ic_node_cleanup;
804 ic->ic_node_cleanup = ath_node_cleanup;
805 ic->ic_node_getsignal = ath_node_getsignal;
806 ic->ic_scan_start = ath_scan_start;
807 ic->ic_scan_end = ath_scan_end;
808 ic->ic_set_channel = ath_set_channel;
809#ifdef ATH_ENABLE_11N
810 /* 802.11n specific - but just override anyway */
811 sc->sc_addba_request = ic->ic_addba_request;
812 sc->sc_addba_response = ic->ic_addba_response;
813 sc->sc_addba_stop = ic->ic_addba_stop;
814 sc->sc_bar_response = ic->ic_bar_response;
815 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
816
817 ic->ic_addba_request = ath_addba_request;
818 ic->ic_addba_response = ath_addba_response;
819 ic->ic_addba_response_timeout = ath_addba_response_timeout;
820 ic->ic_addba_stop = ath_addba_stop;
821 ic->ic_bar_response = ath_bar_response;
822
823 ic->ic_update_chw = ath_update_chw;
824#endif /* ATH_ENABLE_11N */
825
826 ieee80211_radiotap_attach(ic,
827 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
828 ATH_TX_RADIOTAP_PRESENT,
829 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
830 ATH_RX_RADIOTAP_PRESENT);
831
832 /*
833 * Setup dynamic sysctl's now that country code and
834 * regdomain are available from the hal.
835 */
836 ath_sysctlattach(sc);
837 ath_sysctl_stats_attach(sc);
838 ath_sysctl_hal_attach(sc);
839
840 if (bootverbose)
841 ieee80211_announce(ic);
842 ath_announce(sc);
843 return 0;
844bad2:
845 ath_tx_cleanup(sc);
846 ath_desc_free(sc);
847bad:
848 if (ah)
849 ath_hal_detach(ah);
850 if (ifp != NULL)
851 if_free(ifp);
852 sc->sc_invalid = 1;
853 return error;
854}
855
856int
857ath_detach(struct ath_softc *sc)
858{
859 struct ifnet *ifp = sc->sc_ifp;
860
861 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
862 __func__, ifp->if_flags);
863
864 /*
865 * NB: the order of these is important:
866 * o stop the chip so no more interrupts will fire
867 * o call the 802.11 layer before detaching the hal to
868 * insure callbacks into the driver to delete global
869 * key cache entries can be handled
870 * o free the taskqueue which drains any pending tasks
871 * o reclaim the tx queue data structures after calling
872 * the 802.11 layer as we'll get called back to reclaim
873 * node state and potentially want to use them
874 * o to cleanup the tx queues the hal is called, so detach
875 * it last
876 * Other than that, it's straightforward...
877 */
878 ath_stop(ifp);
879 ieee80211_ifdetach(ifp->if_l2com);
880 taskqueue_free(sc->sc_tq);
881#ifdef ATH_TX99_DIAG
882 if (sc->sc_tx99 != NULL)
883 sc->sc_tx99->detach(sc->sc_tx99);
884#endif
885 ath_rate_detach(sc->sc_rc);
886
887 ath_dfs_detach(sc);
888 ath_desc_free(sc);
889 ath_tx_cleanup(sc);
890 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
891 if_free(ifp);
892
893 return 0;
894}
895
896/*
897 * MAC address handling for multiple BSS on the same radio.
898 * The first vap uses the MAC address from the EEPROM. For
899 * subsequent vap's we set the U/L bit (bit 1) in the MAC
900 * address and use the next six bits as an index.
901 */
902static void
903assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
904{
905 int i;
906
907 if (clone && sc->sc_hasbmask) {
908 /* NB: we only do this if h/w supports multiple bssid */
909 for (i = 0; i < 8; i++)
910 if ((sc->sc_bssidmask & (1<<i)) == 0)
911 break;
912 if (i != 0)
913 mac[0] |= (i << 2)|0x2;
914 } else
915 i = 0;
916 sc->sc_bssidmask |= 1<<i;
917 sc->sc_hwbssidmask[0] &= ~mac[0];
918 if (i == 0)
919 sc->sc_nbssid0++;
920}
921
922static void
923reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
924{
925 int i = mac[0] >> 2;
926 uint8_t mask;
927
928 if (i != 0 || --sc->sc_nbssid0 == 0) {
929 sc->sc_bssidmask &= ~(1<<i);
930 /* recalculate bssid mask from remaining addresses */
931 mask = 0xff;
932 for (i = 1; i < 8; i++)
933 if (sc->sc_bssidmask & (1<<i))
934 mask &= ~((i<<2)|0x2);
935 sc->sc_hwbssidmask[0] |= mask;
936 }
937}
938
939/*
940 * Assign a beacon xmit slot. We try to space out
941 * assignments so when beacons are staggered the
942 * traffic coming out of the cab q has maximal time
943 * to go out before the next beacon is scheduled.
944 */
945static int
946assign_bslot(struct ath_softc *sc)
947{
948 u_int slot, free;
949
950 free = 0;
951 for (slot = 0; slot < ATH_BCBUF; slot++)
952 if (sc->sc_bslot[slot] == NULL) {
953 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
954 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
955 return slot;
956 free = slot;
957 /* NB: keep looking for a double slot */
958 }
959 return free;
960}
961
962static struct ieee80211vap *
963ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
964 enum ieee80211_opmode opmode, int flags,
965 const uint8_t bssid[IEEE80211_ADDR_LEN],
966 const uint8_t mac0[IEEE80211_ADDR_LEN])
967{
968 struct ath_softc *sc = ic->ic_ifp->if_softc;
969 struct ath_vap *avp;
970 struct ieee80211vap *vap;
971 uint8_t mac[IEEE80211_ADDR_LEN];
972 int needbeacon, error;
973 enum ieee80211_opmode ic_opmode;
974
975 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
976 M_80211_VAP, M_WAITOK | M_ZERO);
977 needbeacon = 0;
978 IEEE80211_ADDR_COPY(mac, mac0);
979
980 ATH_LOCK(sc);
981 ic_opmode = opmode; /* default to opmode of new vap */
982 switch (opmode) {
983 case IEEE80211_M_STA:
984 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
985 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
986 goto bad;
987 }
988 if (sc->sc_nvaps) {
989 /*
990 * With multiple vaps we must fall back
991 * to s/w beacon miss handling.
992 */
993 flags |= IEEE80211_CLONE_NOBEACONS;
994 }
995 if (flags & IEEE80211_CLONE_NOBEACONS) {
996 /*
997 * Station mode w/o beacons are implemented w/ AP mode.
998 */
999 ic_opmode = IEEE80211_M_HOSTAP;
1000 }
1001 break;
1002 case IEEE80211_M_IBSS:
1003 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
1004 device_printf(sc->sc_dev,
1005 "only 1 ibss vap supported\n");
1006 goto bad;
1007 }
1008 needbeacon = 1;
1009 break;
1010 case IEEE80211_M_AHDEMO:
1011#ifdef IEEE80211_SUPPORT_TDMA
1012 if (flags & IEEE80211_CLONE_TDMA) {
1013 if (sc->sc_nvaps != 0) {
1014 device_printf(sc->sc_dev,
1015 "only 1 tdma vap supported\n");
1016 goto bad;
1017 }
1018 needbeacon = 1;
1019 flags |= IEEE80211_CLONE_NOBEACONS;
1020 }
1021 /* fall thru... */
1022#endif
1023 case IEEE80211_M_MONITOR:
1024 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
1025 /*
1026 * Adopt existing mode. Adding a monitor or ahdemo
1027 * vap to an existing configuration is of dubious
1028 * value but should be ok.
1029 */
1030 /* XXX not right for monitor mode */
1031 ic_opmode = ic->ic_opmode;
1032 }
1033 break;
1034 case IEEE80211_M_HOSTAP:
1035 case IEEE80211_M_MBSS:
1036 needbeacon = 1;
1037 break;
1038 case IEEE80211_M_WDS:
1039 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
1040 device_printf(sc->sc_dev,
1041 "wds not supported in sta mode\n");
1042 goto bad;
1043 }
1044 /*
1045 * Silently remove any request for a unique
1046 * bssid; WDS vap's always share the local
1047 * mac address.
1048 */
1049 flags &= ~IEEE80211_CLONE_BSSID;
1050 if (sc->sc_nvaps == 0)
1051 ic_opmode = IEEE80211_M_HOSTAP;
1052 else
1053 ic_opmode = ic->ic_opmode;
1054 break;
1055 default:
1056 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
1057 goto bad;
1058 }
1059 /*
1060 * Check that a beacon buffer is available; the code below assumes it.
1061 */
1062 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
1063 device_printf(sc->sc_dev, "no beacon buffer available\n");
1064 goto bad;
1065 }
1066
1067 /* STA, AHDEMO? */
1068 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
1069 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1070 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1071 }
1072
1073 vap = &avp->av_vap;
1074 /* XXX can't hold mutex across if_alloc */
1075 ATH_UNLOCK(sc);
1076 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1077 bssid, mac);
1078 ATH_LOCK(sc);
1079 if (error != 0) {
1080 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1081 __func__, error);
1082 goto bad2;
1083 }
1084
1085 /* h/w crypto support */
1086 vap->iv_key_alloc = ath_key_alloc;
1087 vap->iv_key_delete = ath_key_delete;
1088 vap->iv_key_set = ath_key_set;
1089 vap->iv_key_update_begin = ath_key_update_begin;
1090 vap->iv_key_update_end = ath_key_update_end;
1091
1092 /* override various methods */
1093 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1094 vap->iv_recv_mgmt = ath_recv_mgmt;
1095 vap->iv_reset = ath_reset_vap;
1096 vap->iv_update_beacon = ath_beacon_update;
1097 avp->av_newstate = vap->iv_newstate;
1098 vap->iv_newstate = ath_newstate;
1099 avp->av_bmiss = vap->iv_bmiss;
1100 vap->iv_bmiss = ath_bmiss_vap;
1101
1102 /* Set default parameters */
1103
1104 /*
1105 * Anything earlier than some AR9300 series MACs don't
1106 * support a smaller MPDU density.
1107 */
1108 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1109 /*
1110 * All NICs can handle the maximum size, however
1111 * AR5416 based MACs can only TX aggregates w/ RTS
1112 * protection when the total aggregate size is <= 8k.
1113 * However, for now that's enforced by the TX path.
1114 */
1115 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1116
1117 avp->av_bslot = -1;
1118 if (needbeacon) {
1119 /*
1120 * Allocate beacon state and setup the q for buffered
1121 * multicast frames. We know a beacon buffer is
1122 * available because we checked above.
1123 */
1124 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1125 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1126 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1127 /*
1128 * Assign the vap to a beacon xmit slot. As above
1129 * this cannot fail to find a free one.
1130 */
1131 avp->av_bslot = assign_bslot(sc);
1132 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1133 ("beacon slot %u not empty", avp->av_bslot));
1134 sc->sc_bslot[avp->av_bslot] = vap;
1135 sc->sc_nbcnvaps++;
1136 }
1137 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1138 /*
1139 * Multple vaps are to transmit beacons and we
1140 * have h/w support for TSF adjusting; enable
1141 * use of staggered beacons.
1142 */
1143 sc->sc_stagbeacons = 1;
1144 }
1145 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1146 }
1147
1148 ic->ic_opmode = ic_opmode;
1149 if (opmode != IEEE80211_M_WDS) {
1150 sc->sc_nvaps++;
1151 if (opmode == IEEE80211_M_STA)
1152 sc->sc_nstavaps++;
1153 if (opmode == IEEE80211_M_MBSS)
1154 sc->sc_nmeshvaps++;
1155 }
1156 switch (ic_opmode) {
1157 case IEEE80211_M_IBSS:
1158 sc->sc_opmode = HAL_M_IBSS;
1159 break;
1160 case IEEE80211_M_STA:
1161 sc->sc_opmode = HAL_M_STA;
1162 break;
1163 case IEEE80211_M_AHDEMO:
1164#ifdef IEEE80211_SUPPORT_TDMA
1165 if (vap->iv_caps & IEEE80211_C_TDMA) {
1166 sc->sc_tdma = 1;
1167 /* NB: disable tsf adjust */
1168 sc->sc_stagbeacons = 0;
1169 }
1170 /*
1171 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1172 * just ap mode.
1173 */
1174 /* fall thru... */
1175#endif
1176 case IEEE80211_M_HOSTAP:
1177 case IEEE80211_M_MBSS:
1178 sc->sc_opmode = HAL_M_HOSTAP;
1179 break;
1180 case IEEE80211_M_MONITOR:
1181 sc->sc_opmode = HAL_M_MONITOR;
1182 break;
1183 default:
1184 /* XXX should not happen */
1185 break;
1186 }
1187 if (sc->sc_hastsfadd) {
1188 /*
1189 * Configure whether or not TSF adjust should be done.
1190 */
1191 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1192 }
1193 if (flags & IEEE80211_CLONE_NOBEACONS) {
1194 /*
1195 * Enable s/w beacon miss handling.
1196 */
1197 sc->sc_swbmiss = 1;
1198 }
1199 ATH_UNLOCK(sc);
1200
1201 /* complete setup */
1202 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1203 return vap;
1204bad2:
1205 reclaim_address(sc, mac);
1206 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1207bad:
1208 free(avp, M_80211_VAP);
1209 ATH_UNLOCK(sc);
1210 return NULL;
1211}
1212
1213static void
1214ath_vap_delete(struct ieee80211vap *vap)
1215{
1216 struct ieee80211com *ic = vap->iv_ic;
1217 struct ifnet *ifp = ic->ic_ifp;
1218 struct ath_softc *sc = ifp->if_softc;
1219 struct ath_hal *ah = sc->sc_ah;
1220 struct ath_vap *avp = ATH_VAP(vap);
1221
1222 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1223 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1224 /*
1225 * Quiesce the hardware while we remove the vap. In
1226 * particular we need to reclaim all references to
1227 * the vap state by any frames pending on the tx queues.
1228 */
1229 ath_hal_intrset(ah, 0); /* disable interrupts */
1230 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1231 /* XXX Do all frames from all vaps/nodes need draining here? */
1232 ath_stoprecv(sc, 1); /* stop recv side */
1233 }
1234
1235 ieee80211_vap_detach(vap);
1236
1237 /*
1238 * XXX Danger Will Robinson! Danger!
1239 *
1240 * Because ieee80211_vap_detach() can queue a frame (the station
1241 * diassociate message?) after we've drained the TXQ and
1242 * flushed the software TXQ, we will end up with a frame queued
1243 * to a node whose vap is about to be freed.
1244 *
1245 * To work around this, flush the hardware/software again.
1246 * This may be racy - the ath task may be running and the packet
1247 * may be being scheduled between sw->hw txq. Tsk.
1248 *
1249 * TODO: figure out why a new node gets allocated somewhere around
1250 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1251 * call!)
1252 */
1253
1254 ath_draintxq(sc, ATH_RESET_DEFAULT);
1255
1256 ATH_LOCK(sc);
1257 /*
1258 * Reclaim beacon state. Note this must be done before
1259 * the vap instance is reclaimed as we may have a reference
1260 * to it in the buffer for the beacon frame.
1261 */
1262 if (avp->av_bcbuf != NULL) {
1263 if (avp->av_bslot != -1) {
1264 sc->sc_bslot[avp->av_bslot] = NULL;
1265 sc->sc_nbcnvaps--;
1266 }
1267 ath_beacon_return(sc, avp->av_bcbuf);
1268 avp->av_bcbuf = NULL;
1269 if (sc->sc_nbcnvaps == 0) {
1270 sc->sc_stagbeacons = 0;
1271 if (sc->sc_hastsfadd)
1272 ath_hal_settsfadjust(sc->sc_ah, 0);
1273 }
1274 /*
1275 * Reclaim any pending mcast frames for the vap.
1276 */
1277 ath_tx_draintxq(sc, &avp->av_mcastq);
1278 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1279 }
1280 /*
1281 * Update bookkeeping.
1282 */
1283 if (vap->iv_opmode == IEEE80211_M_STA) {
1284 sc->sc_nstavaps--;
1285 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1286 sc->sc_swbmiss = 0;
1287 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1288 vap->iv_opmode == IEEE80211_M_MBSS) {
1289 reclaim_address(sc, vap->iv_myaddr);
1290 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1291 if (vap->iv_opmode == IEEE80211_M_MBSS)
1292 sc->sc_nmeshvaps--;
1293 }
1294 if (vap->iv_opmode != IEEE80211_M_WDS)
1295 sc->sc_nvaps--;
1296#ifdef IEEE80211_SUPPORT_TDMA
1297 /* TDMA operation ceases when the last vap is destroyed */
1298 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1299 sc->sc_tdma = 0;
1300 sc->sc_swbmiss = 0;
1301 }
1302#endif
1303 free(avp, M_80211_VAP);
1304
1305 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1306 /*
1307 * Restart rx+tx machines if still running (RUNNING will
1308 * be reset if we just destroyed the last vap).
1309 */
1310 if (ath_startrecv(sc) != 0)
1311 if_printf(ifp, "%s: unable to restart recv logic\n",
1312 __func__);
1313 if (sc->sc_beacons) { /* restart beacons */
1314#ifdef IEEE80211_SUPPORT_TDMA
1315 if (sc->sc_tdma)
1316 ath_tdma_config(sc, NULL);
1317 else
1318#endif
1319 ath_beacon_config(sc, NULL);
1320 }
1321 ath_hal_intrset(ah, sc->sc_imask);
1322 }
1323 ATH_UNLOCK(sc);
1324}
1325
1326void
1327ath_suspend(struct ath_softc *sc)
1328{
1329 struct ifnet *ifp = sc->sc_ifp;
1330 struct ieee80211com *ic = ifp->if_l2com;
1331
1332 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1333 __func__, ifp->if_flags);
1334
1335 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1336 if (ic->ic_opmode == IEEE80211_M_STA)
1337 ath_stop(ifp);
1338 else
1339 ieee80211_suspend_all(ic);
1340 /*
1341 * NB: don't worry about putting the chip in low power
1342 * mode; pci will power off our socket on suspend and
1343 * CardBus detaches the device.
1344 */
1345}
1346
1347/*
1348 * Reset the key cache since some parts do not reset the
1349 * contents on resume. First we clear all entries, then
1350 * re-load keys that the 802.11 layer assumes are setup
1351 * in h/w.
1352 */
1353static void
1354ath_reset_keycache(struct ath_softc *sc)
1355{
1356 struct ifnet *ifp = sc->sc_ifp;
1357 struct ieee80211com *ic = ifp->if_l2com;
1358 struct ath_hal *ah = sc->sc_ah;
1359 int i;
1360
1361 for (i = 0; i < sc->sc_keymax; i++)
1362 ath_hal_keyreset(ah, i);
1363 ieee80211_crypto_reload_keys(ic);
1364}
1365
1366void
1367ath_resume(struct ath_softc *sc)
1368{
1369 struct ifnet *ifp = sc->sc_ifp;
1370 struct ieee80211com *ic = ifp->if_l2com;
1371 struct ath_hal *ah = sc->sc_ah;
1372 HAL_STATUS status;
1373
1374 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1375 __func__, ifp->if_flags);
1376
1377 /*
1378 * Must reset the chip before we reload the
1379 * keycache as we were powered down on suspend.
1380 */
1381 ath_hal_reset(ah, sc->sc_opmode,
1382 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1383 AH_FALSE, &status);
1384 ath_reset_keycache(sc);
1385
1386 /* Let DFS at it in case it's a DFS channel */
1387 ath_dfs_radar_enable(sc, ic->ic_curchan);
1388
1389 /* Restore the LED configuration */
1390 ath_led_config(sc);
1391 ath_hal_setledstate(ah, HAL_LED_INIT);
1392
1393 if (sc->sc_resume_up) {
1394 if (ic->ic_opmode == IEEE80211_M_STA) {
1395 ath_init(sc);
1396 ath_hal_setledstate(ah, HAL_LED_RUN);
1397 /*
1398 * Program the beacon registers using the last rx'd
1399 * beacon frame and enable sync on the next beacon
1400 * we see. This should handle the case where we
1401 * wakeup and find the same AP and also the case where
1402 * we wakeup and need to roam. For the latter we
1403 * should get bmiss events that trigger a roam.
1404 */
1405 ath_beacon_config(sc, NULL);
1406 sc->sc_syncbeacon = 1;
1407 } else
1408 ieee80211_resume_all(ic);
1409 }
1410
1411 /* XXX beacons ? */
1412}
1413
1414void
1415ath_shutdown(struct ath_softc *sc)
1416{
1417 struct ifnet *ifp = sc->sc_ifp;
1418
1419 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1420 __func__, ifp->if_flags);
1421
1422 ath_stop(ifp);
1423 /* NB: no point powering down chip as we're about to reboot */
1424}
1425
1426/*
1427 * Interrupt handler. Most of the actual processing is deferred.
1428 */
1429void
1430ath_intr(void *arg)
1431{
1432 struct ath_softc *sc = arg;
1433 struct ifnet *ifp = sc->sc_ifp;
1434 struct ath_hal *ah = sc->sc_ah;
1435 HAL_INT status = 0;
1436 uint32_t txqs;
1437
1438 /*
1439 * If we're inside a reset path, just print a warning and
1440 * clear the ISR. The reset routine will finish it for us.
1441 */
1442 ATH_PCU_LOCK(sc);
1443 if (sc->sc_inreset_cnt) {
1444 HAL_INT status;
1445 ath_hal_getisr(ah, &status); /* clear ISR */
1446 ath_hal_intrset(ah, 0); /* disable further intr's */
1447 DPRINTF(sc, ATH_DEBUG_ANY,
1448 "%s: in reset, ignoring: status=0x%x\n",
1449 __func__, status);
1450 ATH_PCU_UNLOCK(sc);
1451 return;
1452 }
1453
1454 if (sc->sc_invalid) {
1455 /*
1456 * The hardware is not ready/present, don't touch anything.
1457 * Note this can happen early on if the IRQ is shared.
1458 */
1459 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1460 ATH_PCU_UNLOCK(sc);
1461 return;
1462 }
1463 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
1464 ATH_PCU_UNLOCK(sc);
1465 return;
1466 }
1467
1468 if ((ifp->if_flags & IFF_UP) == 0 ||
1469 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1470 HAL_INT status;
1471
1472 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1473 __func__, ifp->if_flags);
1474 ath_hal_getisr(ah, &status); /* clear ISR */
1475 ath_hal_intrset(ah, 0); /* disable further intr's */
1476 ATH_PCU_UNLOCK(sc);
1477 return;
1478 }
1479
1480 /*
1481 * Figure out the reason(s) for the interrupt. Note
1482 * that the hal returns a pseudo-ISR that may include
1483 * bits we haven't explicitly enabled so we mask the
1484 * value to insure we only process bits we requested.
1485 */
1486 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1487 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1488 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status);
1489#ifdef ATH_KTR_INTR_DEBUG
1490 CTR5(ATH_KTR_INTR,
1491 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
1492 ah->ah_intrstate[0],
1493 ah->ah_intrstate[1],
1494 ah->ah_intrstate[2],
1495 ah->ah_intrstate[3],
1496 ah->ah_intrstate[6]);
1497#endif
1498
1499 /* Squirrel away SYNC interrupt debugging */
1500 if (ah->ah_syncstate != 0) {
1501 int i;
1502 for (i = 0; i < 32; i++)
1503 if (ah->ah_syncstate & (i << i))
1504 sc->sc_intr_stats.sync_intr[i]++;
1505 }
1506
1507 status &= sc->sc_imask; /* discard unasked for bits */
1508
1509 /* Short-circuit un-handled interrupts */
1510 if (status == 0x0) {
1511 ATH_PCU_UNLOCK(sc);
1512 return;
1513 }
1514
1515 /*
1516 * Take a note that we're inside the interrupt handler, so
1517 * the reset routines know to wait.
1518 */
1519 sc->sc_intr_cnt++;
1520 ATH_PCU_UNLOCK(sc);
1521
1522 /*
1523 * Handle the interrupt. We won't run concurrent with the reset
1524 * or channel change routines as they'll wait for sc_intr_cnt
1525 * to be 0 before continuing.
1526 */
1527 if (status & HAL_INT_FATAL) {
1528 sc->sc_stats.ast_hardware++;
1529 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1530 ath_fatal_proc(sc, 0);
1531 } else {
1532 if (status & HAL_INT_SWBA) {
1533 /*
1534 * Software beacon alert--time to send a beacon.
1535 * Handle beacon transmission directly; deferring
1536 * this is too slow to meet timing constraints
1537 * under load.
1538 */
1539#ifdef IEEE80211_SUPPORT_TDMA
1540 if (sc->sc_tdma) {
1541 if (sc->sc_tdmaswba == 0) {
1542 struct ieee80211com *ic = ifp->if_l2com;
1543 struct ieee80211vap *vap =
1544 TAILQ_FIRST(&ic->ic_vaps);
1545 ath_tdma_beacon_send(sc, vap);
1546 sc->sc_tdmaswba =
1547 vap->iv_tdma->tdma_bintval;
1548 } else
1549 sc->sc_tdmaswba--;
1550 } else
1551#endif
1552 {
1553 ath_beacon_proc(sc, 0);
1554#ifdef IEEE80211_SUPPORT_SUPERG
1555 /*
1556 * Schedule the rx taskq in case there's no
1557 * traffic so any frames held on the staging
1558 * queue are aged and potentially flushed.
1559 */
1560 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1561#endif
1562 }
1563 }
1564 if (status & HAL_INT_RXEOL) {
1565 int imask;
1566 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL");
1567 ATH_PCU_LOCK(sc);
1568 /*
1569 * NB: the hardware should re-read the link when
1570 * RXE bit is written, but it doesn't work at
1571 * least on older hardware revs.
1572 */
1573 sc->sc_stats.ast_rxeol++;
1574 /*
1575 * Disable RXEOL/RXORN - prevent an interrupt
1576 * storm until the PCU logic can be reset.
1577 * In case the interface is reset some other
1578 * way before "sc_kickpcu" is called, don't
1579 * modify sc_imask - that way if it is reset
1580 * by a call to ath_reset() somehow, the
1581 * interrupt mask will be correctly reprogrammed.
1582 */
1583 imask = sc->sc_imask;
1584 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
1585 ath_hal_intrset(ah, imask);
1586 /*
1587 * Only blank sc_rxlink if we've not yet kicked
1588 * the PCU.
1589 *
1590 * This isn't entirely correct - the correct solution
1591 * would be to have a PCU lock and engage that for
1592 * the duration of the PCU fiddling; which would include
1593 * running the RX process. Otherwise we could end up
1594 * messing up the RX descriptor chain and making the
1595 * RX desc list much shorter.
1596 */
1597 if (! sc->sc_kickpcu)
1598 sc->sc_rxlink = NULL;
1599 sc->sc_kickpcu = 1;
1600 /*
1601 * Enqueue an RX proc, to handled whatever
1602 * is in the RX queue.
1603 * This will then kick the PCU.
1604 */
1605 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1606 ATH_PCU_UNLOCK(sc);
1607 }
1608 if (status & HAL_INT_TXURN) {
1609 sc->sc_stats.ast_txurn++;
1610 /* bump tx trigger level */
1611 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1612 }
1613 if (status & HAL_INT_RX) {
1614 sc->sc_stats.ast_rx_intr++;
1615 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1616 }
1617 if (status & HAL_INT_TX) {
1618 sc->sc_stats.ast_tx_intr++;
1619 /*
1620 * Grab all the currently set bits in the HAL txq bitmap
1621 * and blank them. This is the only place we should be
1622 * doing this.
1623 */
1624 ATH_PCU_LOCK(sc);
1625 txqs = 0xffffffff;
1626 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
1627 sc->sc_txq_active |= txqs;
1628 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1629 ATH_PCU_UNLOCK(sc);
1630 }
1631 if (status & HAL_INT_BMISS) {
1632 sc->sc_stats.ast_bmiss++;
1633 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1634 }
1635 if (status & HAL_INT_GTT)
1636 sc->sc_stats.ast_tx_timeout++;
1637 if (status & HAL_INT_CST)
1638 sc->sc_stats.ast_tx_cst++;
1639 if (status & HAL_INT_MIB) {
1640 sc->sc_stats.ast_mib++;
1641 ATH_PCU_LOCK(sc);
1642 /*
1643 * Disable interrupts until we service the MIB
1644 * interrupt; otherwise it will continue to fire.
1645 */
1646 ath_hal_intrset(ah, 0);
1647 /*
1648 * Let the hal handle the event. We assume it will
1649 * clear whatever condition caused the interrupt.
1650 */
1651 ath_hal_mibevent(ah, &sc->sc_halstats);
1652 /*
1653 * Don't reset the interrupt if we've just
1654 * kicked the PCU, or we may get a nested
1655 * RXEOL before the rxproc has had a chance
1656 * to run.
1657 */
1658 if (sc->sc_kickpcu == 0)
1659 ath_hal_intrset(ah, sc->sc_imask);
1660 ATH_PCU_UNLOCK(sc);
1661 }
1662 if (status & HAL_INT_RXORN) {
1663 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1664 CTR0(ATH_KTR_ERR, "ath_intr: RXORN");
1665 sc->sc_stats.ast_rxorn++;
1666 }
1667 }
1668 ATH_PCU_LOCK(sc);
1669 sc->sc_intr_cnt--;
1670 ATH_PCU_UNLOCK(sc);
1671}
1672
1673static void
1674ath_fatal_proc(void *arg, int pending)
1675{
1676 struct ath_softc *sc = arg;
1677 struct ifnet *ifp = sc->sc_ifp;
1678 u_int32_t *state;
1679 u_int32_t len;
1680 void *sp;
1681
1682 if_printf(ifp, "hardware error; resetting\n");
1683 /*
1684 * Fatal errors are unrecoverable. Typically these
1685 * are caused by DMA errors. Collect h/w state from
1686 * the hal so we can diagnose what's going on.
1687 */
1688 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1689 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1690 state = sp;
1691 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1692 state[0], state[1] , state[2], state[3],
1693 state[4], state[5]);
1694 }
1695 ath_reset(ifp, ATH_RESET_NOLOSS);
1696}
1697
1698static void
1699ath_bmiss_vap(struct ieee80211vap *vap)
1700{
1701 /*
1702 * Workaround phantom bmiss interrupts by sanity-checking
1703 * the time of our last rx'd frame. If it is within the
1704 * beacon miss interval then ignore the interrupt. If it's
1705 * truly a bmiss we'll get another interrupt soon and that'll
1706 * be dispatched up for processing. Note this applies only
1707 * for h/w beacon miss events.
1708 */
1709 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1710 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1711 struct ath_softc *sc = ifp->if_softc;
1712 u_int64_t lastrx = sc->sc_lastrx;
1713 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1714 /* XXX should take a locked ref to iv_bss */
1715 u_int bmisstimeout =
1716 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1717
1718 DPRINTF(sc, ATH_DEBUG_BEACON,
1719 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1720 __func__, (unsigned long long) tsf,
1721 (unsigned long long)(tsf - lastrx),
1722 (unsigned long long) lastrx, bmisstimeout);
1723
1724 if (tsf - lastrx <= bmisstimeout) {
1725 sc->sc_stats.ast_bmiss_phantom++;
1726 return;
1727 }
1728 }
1729 ATH_VAP(vap)->av_bmiss(vap);
1730}
1731
1732static int
1733ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1734{
1735 uint32_t rsize;
1736 void *sp;
1737
1738 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
1739 return 0;
1740 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1741 *hangs = *(uint32_t *)sp;
1742 return 1;
1743}
1744
1745static void
1746ath_bmiss_proc(void *arg, int pending)
1747{
1748 struct ath_softc *sc = arg;
1749 struct ifnet *ifp = sc->sc_ifp;
1750 uint32_t hangs;
1751
1752 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1753
1754 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1755 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
1756 ath_reset(ifp, ATH_RESET_NOLOSS);
1757 } else
1758 ieee80211_beacon_miss(ifp->if_l2com);
1759}
1760
1761/*
1762 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1763 * calcs together with WME. If necessary disable the crypto
1764 * hardware and mark the 802.11 state so keys will be setup
1765 * with the MIC work done in software.
1766 */
1767static void
1768ath_settkipmic(struct ath_softc *sc)
1769{
1770 struct ifnet *ifp = sc->sc_ifp;
1771 struct ieee80211com *ic = ifp->if_l2com;
1772
1773 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1774 if (ic->ic_flags & IEEE80211_F_WME) {
1775 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1776 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1777 } else {
1778 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1779 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1780 }
1781 }
1782}
1783
1784static void
1785ath_init(void *arg)
1786{
1787 struct ath_softc *sc = (struct ath_softc *) arg;
1788 struct ifnet *ifp = sc->sc_ifp;
1789 struct ieee80211com *ic = ifp->if_l2com;
1790 struct ath_hal *ah = sc->sc_ah;
1791 HAL_STATUS status;
1792
1793 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1794 __func__, ifp->if_flags);
1795
1796 ATH_LOCK(sc);
1797 /*
1798 * Stop anything previously setup. This is safe
1799 * whether this is the first time through or not.
1800 */
1801 ath_stop_locked(ifp);
1802
1803 /*
1804 * The basic interface to setting the hardware in a good
1805 * state is ``reset''. On return the hardware is known to
1806 * be powered up and with interrupts disabled. This must
1807 * be followed by initialization of the appropriate bits
1808 * and then setup of the interrupt mask.
1809 */
1810 ath_settkipmic(sc);
1811 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1812 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1813 status);
1814 ATH_UNLOCK(sc);
1815 return;
1816 }
1817 ath_chan_change(sc, ic->ic_curchan);
1818
1819 /* Let DFS at it in case it's a DFS channel */
1820 ath_dfs_radar_enable(sc, ic->ic_curchan);
1821
1822 /*
1823 * Likewise this is set during reset so update
1824 * state cached in the driver.
1825 */
1826 sc->sc_diversity = ath_hal_getdiversity(ah);
1827 sc->sc_lastlongcal = 0;
1828 sc->sc_resetcal = 1;
1829 sc->sc_lastcalreset = 0;
1830 sc->sc_lastani = 0;
1831 sc->sc_lastshortcal = 0;
1832 sc->sc_doresetcal = AH_FALSE;
1833 /*
1834 * Beacon timers were cleared here; give ath_newstate()
1835 * a hint that the beacon timers should be poked when
1836 * things transition to the RUN state.
1837 */
1838 sc->sc_beacons = 0;
1839
1840 /*
1841 * Initial aggregation settings.
1842 */
1843 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH;
1844 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
1845 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
1846
1847 /*
1848 * Setup the hardware after reset: the key cache
1849 * is filled as needed and the receive engine is
1850 * set going. Frame transmit is handled entirely
1851 * in the frame output path; there's nothing to do
1852 * here except setup the interrupt mask.
1853 */
1854 if (ath_startrecv(sc) != 0) {
1855 if_printf(ifp, "unable to start recv logic\n");
1856 ATH_UNLOCK(sc);
1857 return;
1858 }
1859
1860 /*
1861 * Enable interrupts.
1862 */
1863 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1864 | HAL_INT_RXEOL | HAL_INT_RXORN
1865 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1866 /*
1867 * Enable MIB interrupts when there are hardware phy counters.
1868 * Note we only do this (at the moment) for station mode.
1869 */
1870 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1871 sc->sc_imask |= HAL_INT_MIB;
1872
1873 /* Enable global TX timeout and carrier sense timeout if available */
1874 if (ath_hal_gtxto_supported(ah))
1875 sc->sc_imask |= HAL_INT_GTT;
1876
1877 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
1878 __func__, sc->sc_imask);
1879
1880 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1881 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1882 ath_hal_intrset(ah, sc->sc_imask);
1883
1884 ATH_UNLOCK(sc);
1885
1886#ifdef ATH_TX99_DIAG
1887 if (sc->sc_tx99 != NULL)
1888 sc->sc_tx99->start(sc->sc_tx99);
1889 else
1890#endif
1891 ieee80211_start_all(ic); /* start all vap's */
1892}
1893
1894static void
1895ath_stop_locked(struct ifnet *ifp)
1896{
1897 struct ath_softc *sc = ifp->if_softc;
1898 struct ath_hal *ah = sc->sc_ah;
1899
1900 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1901 __func__, sc->sc_invalid, ifp->if_flags);
1902
1903 ATH_LOCK_ASSERT(sc);
1904 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1905 /*
1906 * Shutdown the hardware and driver:
1907 * reset 802.11 state machine
1908 * turn off timers
1909 * disable interrupts
1910 * turn off the radio
1911 * clear transmit machinery
1912 * clear receive machinery
1913 * drain and release tx queues
1914 * reclaim beacon resources
1915 * power down hardware
1916 *
1917 * Note that some of this work is not possible if the
1918 * hardware is gone (invalid).
1919 */
1920#ifdef ATH_TX99_DIAG
1921 if (sc->sc_tx99 != NULL)
1922 sc->sc_tx99->stop(sc->sc_tx99);
1923#endif
1924 callout_stop(&sc->sc_wd_ch);
1925 sc->sc_wd_timer = 0;
1926 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1927 if (!sc->sc_invalid) {
1928 if (sc->sc_softled) {
1929 callout_stop(&sc->sc_ledtimer);
1930 ath_hal_gpioset(ah, sc->sc_ledpin,
1931 !sc->sc_ledon);
1932 sc->sc_blinking = 0;
1933 }
1934 ath_hal_intrset(ah, 0);
1935 }
1936 ath_draintxq(sc, ATH_RESET_DEFAULT);
1937 if (!sc->sc_invalid) {
1938 ath_stoprecv(sc, 1);
1939 ath_hal_phydisable(ah);
1940 } else
1941 sc->sc_rxlink = NULL;
1942 ath_beacon_free(sc); /* XXX not needed */
1943 }
1944}
1945
1946#define MAX_TXRX_ITERATIONS 1000
1947static void
1948ath_txrx_stop_locked(struct ath_softc *sc)
1949{
1950 int i = MAX_TXRX_ITERATIONS;
1951
1952 ATH_UNLOCK_ASSERT(sc);
1953 ATH_PCU_LOCK_ASSERT(sc);
1954
1955 /*
1956 * Sleep until all the pending operations have completed.
1957 *
1958 * The caller must ensure that reset has been incremented
1959 * or the pending operations may continue being queued.
1960 */
1961 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
1962 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
1963 if (i <= 0)
1964 break;
1965 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1);
1966 i--;
1967 }
1968
1969 if (i <= 0)
1970 device_printf(sc->sc_dev,
1971 "%s: didn't finish after %d iterations\n",
1972 __func__, MAX_TXRX_ITERATIONS);
1973}
1974#undef MAX_TXRX_ITERATIONS
1975
1976#if 0
1977static void
1978ath_txrx_stop(struct ath_softc *sc)
1979{
1980 ATH_UNLOCK_ASSERT(sc);
1981 ATH_PCU_UNLOCK_ASSERT(sc);
1982
1983 ATH_PCU_LOCK(sc);
1984 ath_txrx_stop_locked(sc);
1985 ATH_PCU_UNLOCK(sc);
1986}
1987#endif
1988
1989static void
1990ath_txrx_start(struct ath_softc *sc)
1991{
1992
1993 taskqueue_unblock(sc->sc_tq);
1994}
1995
1996/*
1997 * Grab the reset lock, and wait around until noone else
1998 * is trying to do anything with it.
1999 *
2000 * This is totally horrible but we can't hold this lock for
2001 * long enough to do TX/RX or we end up with net80211/ip stack
2002 * LORs and eventual deadlock.
2003 *
2004 * "dowait" signals whether to spin, waiting for the reset
2005 * lock count to reach 0. This should (for now) only be used
2006 * during the reset path, as the rest of the code may not
2007 * be locking-reentrant enough to behave correctly.
2008 *
2009 * Another, cleaner way should be found to serialise all of
2010 * these operations.
2011 */
2012#define MAX_RESET_ITERATIONS 10
2013static int
2014ath_reset_grablock(struct ath_softc *sc, int dowait)
2015{
2016 int w = 0;
2017 int i = MAX_RESET_ITERATIONS;
2018
2019 ATH_PCU_LOCK_ASSERT(sc);
2020 do {
2021 if (sc->sc_inreset_cnt == 0) {
2022 w = 1;
2023 break;
2024 }
2025 if (dowait == 0) {
2026 w = 0;
2027 break;
2028 }
2029 ATH_PCU_UNLOCK(sc);
2030 pause("ath_reset_grablock", 1);
2031 i--;
2032 ATH_PCU_LOCK(sc);
2033 } while (i > 0);
2034
2035 /*
2036 * We always increment the refcounter, regardless
2037 * of whether we succeeded to get it in an exclusive
2038 * way.
2039 */
2040 sc->sc_inreset_cnt++;
2041
2042 if (i <= 0)
2043 device_printf(sc->sc_dev,
2044 "%s: didn't finish after %d iterations\n",
2045 __func__, MAX_RESET_ITERATIONS);
2046
2047 if (w == 0)
2048 device_printf(sc->sc_dev,
2049 "%s: warning, recursive reset path!\n",
2050 __func__);
2051
2052 return w;
2053}
2054#undef MAX_RESET_ITERATIONS
2055
2056/*
2057 * XXX TODO: write ath_reset_releaselock
2058 */
2059
2060static void
2061ath_stop(struct ifnet *ifp)
2062{
2063 struct ath_softc *sc = ifp->if_softc;
2064
2065 ATH_LOCK(sc);
2066 ath_stop_locked(ifp);
2067 ATH_UNLOCK(sc);
2068}
2069
2070/*
2071 * Reset the hardware w/o losing operational state. This is
2072 * basically a more efficient way of doing ath_stop, ath_init,
2073 * followed by state transitions to the current 802.11
2074 * operational state. Used to recover from various errors and
2075 * to reset or reload hardware state.
2076 */
2077int
2078ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
2079{
2080 struct ath_softc *sc = ifp->if_softc;
2081 struct ieee80211com *ic = ifp->if_l2com;
2082 struct ath_hal *ah = sc->sc_ah;
2083 HAL_STATUS status;
2084 int i;
2085
2086 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
2087
2088 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
2089 ATH_PCU_UNLOCK_ASSERT(sc);
2090 ATH_UNLOCK_ASSERT(sc);
2091
2092 /* Try to (stop any further TX/RX from occuring */
2093 taskqueue_block(sc->sc_tq);
2094
2095 ATH_PCU_LOCK(sc);
2096 ath_hal_intrset(ah, 0); /* disable interrupts */
2097 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */
2098 if (ath_reset_grablock(sc, 1) == 0) {
2099 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2100 __func__);
2101 }
2102 ATH_PCU_UNLOCK(sc);
2103
2104 /*
2105 * Should now wait for pending TX/RX to complete
2106 * and block future ones from occuring. This needs to be
2107 * done before the TX queue is drained.
2108 */
2109 ath_draintxq(sc, reset_type); /* stop xmit side */
2110
2111 /*
2112 * Regardless of whether we're doing a no-loss flush or
2113 * not, stop the PCU and handle what's in the RX queue.
2114 * That way frames aren't dropped which shouldn't be.
2115 */
2116 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2117 ath_rx_proc(sc, 0);
2118
2119 ath_settkipmic(sc); /* configure TKIP MIC handling */
2120 /* NB: indicate channel change so we do a full reset */
2121 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
2122 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2123 __func__, status);
2124 sc->sc_diversity = ath_hal_getdiversity(ah);
2125
2126 /* Let DFS at it in case it's a DFS channel */
2127 ath_dfs_radar_enable(sc, ic->ic_curchan);
2128
2129 if (ath_startrecv(sc) != 0) /* restart recv */
2130 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
2131 /*
2132 * We may be doing a reset in response to an ioctl
2133 * that changes the channel so update any state that
2134 * might change as a result.
2135 */
2136 ath_chan_change(sc, ic->ic_curchan);
2137 if (sc->sc_beacons) { /* restart beacons */
2138#ifdef IEEE80211_SUPPORT_TDMA
2139 if (sc->sc_tdma)
2140 ath_tdma_config(sc, NULL);
2141 else
2142#endif
2143 ath_beacon_config(sc, NULL);
2144 }
2145
2146 /*
2147 * Release the reset lock and re-enable interrupts here.
2148 * If an interrupt was being processed in ath_intr(),
2149 * it would disable interrupts at this point. So we have
2150 * to atomically enable interrupts and decrement the
2151 * reset counter - this way ath_intr() doesn't end up
2152 * disabling interrupts without a corresponding enable
2153 * in the rest or channel change path.
2154 */
2155 ATH_PCU_LOCK(sc);
2156 sc->sc_inreset_cnt--;
2157 /* XXX only do this if sc_inreset_cnt == 0? */
2158 ath_hal_intrset(ah, sc->sc_imask);
2159 ATH_PCU_UNLOCK(sc);
2160
2161 /*
2162 * TX and RX can be started here. If it were started with
2163 * sc_inreset_cnt > 0, the TX and RX path would abort.
2164 * Thus if this is a nested call through the reset or
2165 * channel change code, TX completion will occur but
2166 * RX completion and ath_start / ath_tx_start will not
2167 * run.
2168 */
2169
2170 /* Restart TX/RX as needed */
2171 ath_txrx_start(sc);
2172
2173 /* XXX Restart TX completion and pending TX */
2174 if (reset_type == ATH_RESET_NOLOSS) {
2175 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2176 if (ATH_TXQ_SETUP(sc, i)) {
2177 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2178 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2179 ath_txq_sched(sc, &sc->sc_txq[i]);
2180 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2181 }
2182 }
2183 }
2184
2185 /*
2186 * This may have been set during an ath_start() call which
2187 * set this once it detected a concurrent TX was going on.
2188 * So, clear it.
2189 */
2190 IF_LOCK(&ifp->if_snd);
2191 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2192 IF_UNLOCK(&ifp->if_snd);
2193
2194 /* Handle any frames in the TX queue */
2195 /*
2196 * XXX should this be done by the caller, rather than
2197 * ath_reset() ?
2198 */
2199 ath_start(ifp); /* restart xmit */
2200 return 0;
2201}
2202
2203static int
2204ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
2205{
2206 struct ieee80211com *ic = vap->iv_ic;
2207 struct ifnet *ifp = ic->ic_ifp;
2208 struct ath_softc *sc = ifp->if_softc;
2209 struct ath_hal *ah = sc->sc_ah;
2210
2211 switch (cmd) {
2212 case IEEE80211_IOC_TXPOWER:
2213 /*
2214 * If per-packet TPC is enabled, then we have nothing
2215 * to do; otherwise we need to force the global limit.
2216 * All this can happen directly; no need to reset.
2217 */
2218 if (!ath_hal_gettpc(ah))
2219 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
2220 return 0;
2221 }
2222 /* XXX? Full or NOLOSS? */
2223 return ath_reset(ifp, ATH_RESET_FULL);
2224}
2225
2226struct ath_buf *
2227_ath_getbuf_locked(struct ath_softc *sc)
2228{
2229 struct ath_buf *bf;
2230
2231 ATH_TXBUF_LOCK_ASSERT(sc);
2232
2233 bf = TAILQ_FIRST(&sc->sc_txbuf);
2234 if (bf == NULL) {
2235 sc->sc_stats.ast_tx_getnobuf++;
2236 } else {
2237 if (bf->bf_flags & ATH_BUF_BUSY) {
2238 sc->sc_stats.ast_tx_getbusybuf++;
2239 bf = NULL;
2240 }
2241 }
2242
2243 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
2244 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
2245 else
2246 bf = NULL;
2247
2248 if (bf == NULL) {
2249 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
2250 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
2251 "out of xmit buffers" : "xmit buffer busy");
2252 return NULL;
2253 }
2254
2255 /* Valid bf here; clear some basic fields */
2256 bf->bf_next = NULL; /* XXX just to be sure */
2257 bf->bf_last = NULL; /* XXX again, just to be sure */
2258 bf->bf_comp = NULL; /* XXX again, just to be sure */
2259 bzero(&bf->bf_state, sizeof(bf->bf_state));
2260
2261 return bf;
2262}
2263
2264/*
2265 * When retrying a software frame, buffers marked ATH_BUF_BUSY
2266 * can't be thrown back on the queue as they could still be
2267 * in use by the hardware.
2268 *
2269 * This duplicates the buffer, or returns NULL.
2270 *
2271 * The descriptor is also copied but the link pointers and
2272 * the DMA segments aren't copied; this frame should thus
2273 * be again passed through the descriptor setup/chain routines
2274 * so the link is correct.
2275 *
2276 * The caller must free the buffer using ath_freebuf().
2277 *
2278 * XXX TODO: this call shouldn't fail as it'll cause packet loss
2279 * XXX in the TX pathway when retries are needed.
2280 * XXX Figure out how to keep some buffers free, or factor the
2281 * XXX number of busy buffers into the xmit path (ath_start())
2282 * XXX so we don't over-commit.
2283 */
2284struct ath_buf *
2285ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
2286{
2287 struct ath_buf *tbf;
2288
2289 tbf = ath_getbuf(sc);
2290 if (tbf == NULL)
2291 return NULL; /* XXX failure? Why? */
2292
2293 /* Copy basics */
2294 tbf->bf_next = NULL;
2295 tbf->bf_nseg = bf->bf_nseg;
2296 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY;
2297 tbf->bf_status = bf->bf_status;
2298 tbf->bf_m = bf->bf_m;
2299 tbf->bf_node = bf->bf_node;
2300 /* will be setup by the chain/setup function */
2301 tbf->bf_lastds = NULL;
2302 /* for now, last == self */
2303 tbf->bf_last = tbf;
2304 tbf->bf_comp = bf->bf_comp;
2305
2306 /* NOTE: DMA segments will be setup by the setup/chain functions */
2307
2308 /* The caller has to re-init the descriptor + links */
2309
2310 /* Copy state */
2311 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
2312
2313 return tbf;
2314}
2315
2316struct ath_buf *
2317ath_getbuf(struct ath_softc *sc)
2318{
2319 struct ath_buf *bf;
2320
2321 ATH_TXBUF_LOCK(sc);
2322 bf = _ath_getbuf_locked(sc);
2323 ATH_TXBUF_UNLOCK(sc);
2324 if (bf == NULL) {
2325 struct ifnet *ifp = sc->sc_ifp;
2326
2327 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2328 sc->sc_stats.ast_tx_qstop++;
2329 IF_LOCK(&ifp->if_snd);
2330 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2331 IF_UNLOCK(&ifp->if_snd);
2332 }
2333 return bf;
2334}
2335
2336static void
2337ath_start(struct ifnet *ifp)
2338{
2339 struct ath_softc *sc = ifp->if_softc;
2340 struct ieee80211_node *ni;
2341 struct ath_buf *bf;
2342 struct mbuf *m, *next;
2343 ath_bufhead frags;
2344
2345 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2346 return;
2347
2348 /* XXX is it ok to hold the ATH_LOCK here? */
2349 ATH_PCU_LOCK(sc);
2350 if (sc->sc_inreset_cnt > 0) {
2351 device_printf(sc->sc_dev,
2352 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2353 ATH_PCU_UNLOCK(sc);
2354 IF_LOCK(&ifp->if_snd);
2355 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2356 IF_UNLOCK(&ifp->if_snd);
2357 return;
2358 }
2359 sc->sc_txstart_cnt++;
2360 ATH_PCU_UNLOCK(sc);
2361
2362 for (;;) {
2363 /*
2364 * Grab a TX buffer and associated resources.
2365 */
2366 bf = ath_getbuf(sc);
2367 if (bf == NULL)
2368 break;
2369
2370 IFQ_DEQUEUE(&ifp->if_snd, m);
2371 if (m == NULL) {
2372 ATH_TXBUF_LOCK(sc);
2373 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2374 ATH_TXBUF_UNLOCK(sc);
2375 break;
2376 }
2377 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2378 /*
2379 * Check for fragmentation. If this frame
2380 * has been broken up verify we have enough
2381 * buffers to send all the fragments so all
2382 * go out or none...
2383 */
2384 TAILQ_INIT(&frags);
2385 if ((m->m_flags & M_FRAG) &&
2386 !ath_txfrag_setup(sc, &frags, m, ni)) {
2387 DPRINTF(sc, ATH_DEBUG_XMIT,
2388 "%s: out of txfrag buffers\n", __func__);
2389 sc->sc_stats.ast_tx_nofrag++;
2390 ifp->if_oerrors++;
2391 ath_freetx(m);
2392 goto bad;
2393 }
2394 ifp->if_opackets++;
2395 nextfrag:
2396 /*
2397 * Pass the frame to the h/w for transmission.
2398 * Fragmented frames have each frag chained together
2399 * with m_nextpkt. We know there are sufficient ath_buf's
2400 * to send all the frags because of work done by
2401 * ath_txfrag_setup. We leave m_nextpkt set while
2402 * calling ath_tx_start so it can use it to extend the
2403 * the tx duration to cover the subsequent frag and
2404 * so it can reclaim all the mbufs in case of an error;
2405 * ath_tx_start clears m_nextpkt once it commits to
2406 * handing the frame to the hardware.
2407 */
2408 next = m->m_nextpkt;
2409 if (ath_tx_start(sc, ni, bf, m)) {
2410 bad:
2411 ifp->if_oerrors++;
2412 reclaim:
2413 bf->bf_m = NULL;
2414 bf->bf_node = NULL;
2415 ATH_TXBUF_LOCK(sc);
2416 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2417 ath_txfrag_cleanup(sc, &frags, ni);
2418 ATH_TXBUF_UNLOCK(sc);
2419 if (ni != NULL)
2420 ieee80211_free_node(ni);
2421 continue;
2422 }
2423 if (next != NULL) {
2424 /*
2425 * Beware of state changing between frags.
2426 * XXX check sta power-save state?
2427 */
2428 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2429 DPRINTF(sc, ATH_DEBUG_XMIT,
2430 "%s: flush fragmented packet, state %s\n",
2431 __func__,
2432 ieee80211_state_name[ni->ni_vap->iv_state]);
2433 ath_freetx(next);
2434 goto reclaim;
2435 }
2436 m = next;
2437 bf = TAILQ_FIRST(&frags);
2438 KASSERT(bf != NULL, ("no buf for txfrag"));
2439 TAILQ_REMOVE(&frags, bf, bf_list);
2440 goto nextfrag;
2441 }
2442
2443 sc->sc_wd_timer = 5;
2444 }
2445
2446 ATH_PCU_LOCK(sc);
2447 sc->sc_txstart_cnt--;
2448 ATH_PCU_UNLOCK(sc);
2449}
2450
2451static int
2452ath_media_change(struct ifnet *ifp)
2453{
2454 int error = ieee80211_media_change(ifp);
2455 /* NB: only the fixed rate can change and that doesn't need a reset */
2456 return (error == ENETRESET ? 0 : error);
2457}
2458
2459/*
2460 * Block/unblock tx+rx processing while a key change is done.
2461 * We assume the caller serializes key management operations
2462 * so we only need to worry about synchronization with other
2463 * uses that originate in the driver.
2464 */
2465static void
2466ath_key_update_begin(struct ieee80211vap *vap)
2467{
2468 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2469 struct ath_softc *sc = ifp->if_softc;
2470
2471 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2472 taskqueue_block(sc->sc_tq);
2473 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2474}
2475
2476static void
2477ath_key_update_end(struct ieee80211vap *vap)
2478{
2479 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2480 struct ath_softc *sc = ifp->if_softc;
2481
2482 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2483 IF_UNLOCK(&ifp->if_snd);
2484 taskqueue_unblock(sc->sc_tq);
2485}
2486
2487/*
2488 * Calculate the receive filter according to the
2489 * operating mode and state:
2490 *
2491 * o always accept unicast, broadcast, and multicast traffic
2492 * o accept PHY error frames when hardware doesn't have MIB support
2493 * to count and we need them for ANI (sta mode only until recently)
2494 * and we are not scanning (ANI is disabled)
2495 * NB: older hal's add rx filter bits out of sight and we need to
2496 * blindly preserve them
2497 * o probe request frames are accepted only when operating in
2498 * hostap, adhoc, mesh, or monitor modes
2499 * o enable promiscuous mode
2500 * - when in monitor mode
2501 * - if interface marked PROMISC (assumes bridge setting is filtered)
2502 * o accept beacons:
2503 * - when operating in station mode for collecting rssi data when
2504 * the station is otherwise quiet, or
2505 * - when operating in adhoc mode so the 802.11 layer creates
2506 * node table entries for peers,
2507 * - when scanning
2508 * - when doing s/w beacon miss (e.g. for ap+sta)
2509 * - when operating in ap mode in 11g to detect overlapping bss that
2510 * require protection
2511 * - when operating in mesh mode to detect neighbors
2512 * o accept control frames:
2513 * - when in monitor mode
2514 * XXX HT protection for 11n
2515 */
2516static u_int32_t
2517ath_calcrxfilter(struct ath_softc *sc)
2518{
2519 struct ifnet *ifp = sc->sc_ifp;
2520 struct ieee80211com *ic = ifp->if_l2com;
2521 u_int32_t rfilt;
2522
2523 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2524 if (!sc->sc_needmib && !sc->sc_scanning)
2525 rfilt |= HAL_RX_FILTER_PHYERR;
2526 if (ic->ic_opmode != IEEE80211_M_STA)
2527 rfilt |= HAL_RX_FILTER_PROBEREQ;
2528 /* XXX ic->ic_monvaps != 0? */
2529 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
2530 rfilt |= HAL_RX_FILTER_PROM;
2531 if (ic->ic_opmode == IEEE80211_M_STA ||
2532 ic->ic_opmode == IEEE80211_M_IBSS ||
2533 sc->sc_swbmiss || sc->sc_scanning)
2534 rfilt |= HAL_RX_FILTER_BEACON;
2535 /*
2536 * NB: We don't recalculate the rx filter when
2537 * ic_protmode changes; otherwise we could do
2538 * this only when ic_protmode != NONE.
2539 */
2540 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2541 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2542 rfilt |= HAL_RX_FILTER_BEACON;
2543
2544 /*
2545 * Enable hardware PS-POLL RX only for hostap mode;
2546 * STA mode sends PS-POLL frames but never
2547 * receives them.
2548 */
2549 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
2550 0, NULL) == HAL_OK &&
2551 ic->ic_opmode == IEEE80211_M_HOSTAP)
2552 rfilt |= HAL_RX_FILTER_PSPOLL;
2553
2554 if (sc->sc_nmeshvaps) {
2555 rfilt |= HAL_RX_FILTER_BEACON;
2556 if (sc->sc_hasbmatch)
2557 rfilt |= HAL_RX_FILTER_BSSID;
2558 else
2559 rfilt |= HAL_RX_FILTER_PROM;
2560 }
2561 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2562 rfilt |= HAL_RX_FILTER_CONTROL;
2563
2564 /*
2565 * Enable RX of compressed BAR frames only when doing
2566 * 802.11n. Required for A-MPDU.
2567 */
2568 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
2569 rfilt |= HAL_RX_FILTER_COMPBAR;
2570
2571 /*
2572 * Enable radar PHY errors if requested by the
2573 * DFS module.
2574 */
2575 if (sc->sc_dodfs)
2576 rfilt |= HAL_RX_FILTER_PHYRADAR;
2577
2578 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2579 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2580 return rfilt;
2581}
2582
2583static void
2584ath_update_promisc(struct ifnet *ifp)
2585{
2586 struct ath_softc *sc = ifp->if_softc;
2587 u_int32_t rfilt;
2588
2589 /* configure rx filter */
2590 rfilt = ath_calcrxfilter(sc);
2591 ath_hal_setrxfilter(sc->sc_ah, rfilt);
2592
2593 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2594}
2595
2596static void
2597ath_update_mcast(struct ifnet *ifp)
2598{
2599 struct ath_softc *sc = ifp->if_softc;
2600 u_int32_t mfilt[2];
2601
2602 /* calculate and install multicast filter */
2603 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2604 struct ifmultiaddr *ifma;
2605 /*
2606 * Merge multicast addresses to form the hardware filter.
2607 */
2608 mfilt[0] = mfilt[1] = 0;
2609 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
2610 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2611 caddr_t dl;
2612 u_int32_t val;
2613 u_int8_t pos;
2614
2615 /* calculate XOR of eight 6bit values */
2616 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2617 val = LE_READ_4(dl + 0);
2618 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2619 val = LE_READ_4(dl + 3);
2620 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2621 pos &= 0x3f;
2622 mfilt[pos / 32] |= (1 << (pos % 32));
2623 }
2624 if_maddr_runlock(ifp);
2625 } else
2626 mfilt[0] = mfilt[1] = ~0;
2627 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2628 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2629 __func__, mfilt[0], mfilt[1]);
2630}
2631
2632static void
2633ath_mode_init(struct ath_softc *sc)
2634{
2635 struct ifnet *ifp = sc->sc_ifp;
2636 struct ath_hal *ah = sc->sc_ah;
2637 u_int32_t rfilt;
2638
2639 /* configure rx filter */
2640 rfilt = ath_calcrxfilter(sc);
2641 ath_hal_setrxfilter(ah, rfilt);
2642
2643 /* configure operational mode */
2644 ath_hal_setopmode(ah);
2645
2646 /* handle any link-level address change */
2647 ath_hal_setmac(ah, IF_LLADDR(ifp));
2648
2649 /* calculate and install multicast filter */
2650 ath_update_mcast(ifp);
2651}
2652
2653/*
2654 * Set the slot time based on the current setting.
2655 */
2656static void
2657ath_setslottime(struct ath_softc *sc)
2658{
2659 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2660 struct ath_hal *ah = sc->sc_ah;
2661 u_int usec;
2662
2663 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2664 usec = 13;
2665 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2666 usec = 21;
2667 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2668 /* honor short/long slot time only in 11g */
2669 /* XXX shouldn't honor on pure g or turbo g channel */
2670 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2671 usec = HAL_SLOT_TIME_9;
2672 else
2673 usec = HAL_SLOT_TIME_20;
2674 } else
2675 usec = HAL_SLOT_TIME_9;
2676
2677 DPRINTF(sc, ATH_DEBUG_RESET,
2678 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2679 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2680 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2681
2682 ath_hal_setslottime(ah, usec);
2683 sc->sc_updateslot = OK;
2684}
2685
2686/*
2687 * Callback from the 802.11 layer to update the
2688 * slot time based on the current setting.
2689 */
2690static void
2691ath_updateslot(struct ifnet *ifp)
2692{
2693 struct ath_softc *sc = ifp->if_softc;
2694 struct ieee80211com *ic = ifp->if_l2com;
2695
2696 /*
2697 * When not coordinating the BSS, change the hardware
2698 * immediately. For other operation we defer the change
2699 * until beacon updates have propagated to the stations.
2700 */
2701 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2702 ic->ic_opmode == IEEE80211_M_MBSS)
2703 sc->sc_updateslot = UPDATE;
2704 else
2705 ath_setslottime(sc);
2706}
2707
2708/*
2709 * Setup a h/w transmit queue for beacons.
2710 */
2711static int
2712ath_beaconq_setup(struct ath_hal *ah)
2713{
2714 HAL_TXQ_INFO qi;
2715
2716 memset(&qi, 0, sizeof(qi));
2717 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2718 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2719 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2720 /* NB: for dynamic turbo, don't enable any other interrupts */
2721 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2722 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2723}
2724
2725/*
2726 * Setup the transmit queue parameters for the beacon queue.
2727 */
2728static int
2729ath_beaconq_config(struct ath_softc *sc)
2730{
2731#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
2732 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2733 struct ath_hal *ah = sc->sc_ah;
2734 HAL_TXQ_INFO qi;
2735
2736 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2737 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2738 ic->ic_opmode == IEEE80211_M_MBSS) {
2739 /*
2740 * Always burst out beacon and CAB traffic.
2741 */
2742 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2743 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2744 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2745 } else {
2746 struct wmeParams *wmep =
2747 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2748 /*
2749 * Adhoc mode; important thing is to use 2x cwmin.
2750 */
2751 qi.tqi_aifs = wmep->wmep_aifsn;
2752 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2753 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2754 }
2755
2756 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2757 device_printf(sc->sc_dev, "unable to update parameters for "
2758 "beacon hardware queue!\n");
2759 return 0;
2760 } else {
2761 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2762 return 1;
2763 }
2764#undef ATH_EXPONENT_TO_VALUE
2765}
2766
2767/*
2768 * Allocate and setup an initial beacon frame.
2769 */
2770static int
2771ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2772{
2773 struct ieee80211vap *vap = ni->ni_vap;
2774 struct ath_vap *avp = ATH_VAP(vap);
2775 struct ath_buf *bf;
2776 struct mbuf *m;
2777 int error;
2778
2779 bf = avp->av_bcbuf;
2780 DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n",
2781 __func__, bf->bf_m, bf->bf_node);
2782 if (bf->bf_m != NULL) {
2783 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2784 m_freem(bf->bf_m);
2785 bf->bf_m = NULL;
2786 }
2787 if (bf->bf_node != NULL) {
2788 ieee80211_free_node(bf->bf_node);
2789 bf->bf_node = NULL;
2790 }
2791
2792 /*
2793 * NB: the beacon data buffer must be 32-bit aligned;
2794 * we assume the mbuf routines will return us something
2795 * with this alignment (perhaps should assert).
2796 */
2797 m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2798 if (m == NULL) {
2799 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2800 sc->sc_stats.ast_be_nombuf++;
2801 return ENOMEM;
2802 }
2803 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2804 bf->bf_segs, &bf->bf_nseg,
2805 BUS_DMA_NOWAIT);
2806 if (error != 0) {
2807 device_printf(sc->sc_dev,
2808 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2809 __func__, error);
2810 m_freem(m);
2811 return error;
2812 }
2813
2814 /*
2815 * Calculate a TSF adjustment factor required for staggered
2816 * beacons. Note that we assume the format of the beacon
2817 * frame leaves the tstamp field immediately following the
2818 * header.
2819 */
2820 if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2821 uint64_t tsfadjust;
2822 struct ieee80211_frame *wh;
2823
2824 /*
2825 * The beacon interval is in TU's; the TSF is in usecs.
2826 * We figure out how many TU's to add to align the timestamp
2827 * then convert to TSF units and handle byte swapping before
2828 * inserting it in the frame. The hardware will then add this
2829 * each time a beacon frame is sent. Note that we align vap's
2830 * 1..N and leave vap 0 untouched. This means vap 0 has a
2831 * timestamp in one beacon interval while the others get a
2832 * timstamp aligned to the next interval.
2833 */
2834 tsfadjust = ni->ni_intval *
2835 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2836 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */
2837
2838 DPRINTF(sc, ATH_DEBUG_BEACON,
2839 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2840 __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2841 avp->av_bslot, ni->ni_intval,
2842 (long long unsigned) le64toh(tsfadjust));
2843
2844 wh = mtod(m, struct ieee80211_frame *);
2845 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2846 }
2847 bf->bf_m = m;
2848 bf->bf_node = ieee80211_ref_node(ni);
2849
2850 return 0;
2851}
2852
2853/*
2854 * Setup the beacon frame for transmit.
2855 */
2856static void
2857ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2858{
2859#define USE_SHPREAMBLE(_ic) \
2860 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2861 == IEEE80211_F_SHPREAMBLE)
2862 struct ieee80211_node *ni = bf->bf_node;
2863 struct ieee80211com *ic = ni->ni_ic;
2864 struct mbuf *m = bf->bf_m;
2865 struct ath_hal *ah = sc->sc_ah;
2866 struct ath_desc *ds;
2867 int flags, antenna;
2868 const HAL_RATE_TABLE *rt;
2869 u_int8_t rix, rate;
2870
2871 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2872 __func__, m, m->m_len);
2873
2874 /* setup descriptors */
2875 ds = bf->bf_desc;
2876 bf->bf_last = bf;
2877 bf->bf_lastds = ds;
2878
2879 flags = HAL_TXDESC_NOACK;
2880 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2881 ds->ds_link = bf->bf_daddr; /* self-linked */
2882 flags |= HAL_TXDESC_VEOL;
2883 /*
2884 * Let hardware handle antenna switching.
2885 */
2886 antenna = sc->sc_txantenna;
2887 } else {
2888 ds->ds_link = 0;
2889 /*
2890 * Switch antenna every 4 beacons.
2891 * XXX assumes two antenna
2892 */
2893 if (sc->sc_txantenna != 0)
2894 antenna = sc->sc_txantenna;
2895 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2896 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2897 else
2898 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
2899 }
2900
2901 KASSERT(bf->bf_nseg == 1,
2902 ("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2903 ds->ds_data = bf->bf_segs[0].ds_addr;
2904 /*
2905 * Calculate rate code.
2906 * XXX everything at min xmit rate
2907 */
2908 rix = 0;
2909 rt = sc->sc_currates;
2910 rate = rt->info[rix].rateCode;
2911 if (USE_SHPREAMBLE(ic))
2912 rate |= rt->info[rix].shortPreamble;
2913 ath_hal_setuptxdesc(ah, ds
2914 , m->m_len + IEEE80211_CRC_LEN /* frame length */
2915 , sizeof(struct ieee80211_frame)/* header length */
2916 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
2917 , ni->ni_txpower /* txpower XXX */
2918 , rate, 1 /* series 0 rate/tries */
2919 , HAL_TXKEYIX_INVALID /* no encryption */
2920 , antenna /* antenna mode */
2921 , flags /* no ack, veol for beacons */
2922 , 0 /* rts/cts rate */
2923 , 0 /* rts/cts duration */
2924 );
2925 /* NB: beacon's BufLen must be a multiple of 4 bytes */
2926 ath_hal_filltxdesc(ah, ds
2927 , roundup(m->m_len, 4) /* buffer length */
2928 , AH_TRUE /* first segment */
2929 , AH_TRUE /* last segment */
2930 , ds /* first descriptor */
2931 );
2932#if 0
2933 ath_desc_swap(ds);
2934#endif
2935#undef USE_SHPREAMBLE
2936}
2937
2938static void
2939ath_beacon_update(struct ieee80211vap *vap, int item)
2940{
2941 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2942
2943 setbit(bo->bo_flags, item);
2944}
2945
2946/*
2947 * Append the contents of src to dst; both queues
2948 * are assumed to be locked.
2949 */
2950static void
2951ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2952{
2953
2954 ATH_TXQ_LOCK_ASSERT(dst);
2955 ATH_TXQ_LOCK_ASSERT(src);
2956
2957 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
2958 dst->axq_link = src->axq_link;
2959 src->axq_link = NULL;
2960 dst->axq_depth += src->axq_depth;
2961 dst->axq_aggr_depth += src->axq_aggr_depth;
2962 src->axq_depth = 0;
2963 src->axq_aggr_depth = 0;
2964}
2965
2966/*
2967 * Transmit a beacon frame at SWBA. Dynamic updates to the
2968 * frame contents are done as needed and the slot time is
2969 * also adjusted based on current state.
2970 */
2971static void
2972ath_beacon_proc(void *arg, int pending)
2973{
2974 struct ath_softc *sc = arg;
2975 struct ath_hal *ah = sc->sc_ah;
2976 struct ieee80211vap *vap;
2977 struct ath_buf *bf;
2978 int slot, otherant;
2979 uint32_t bfaddr;
2980
2981 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2982 __func__, pending);
2983 /*
2984 * Check if the previous beacon has gone out. If
2985 * not don't try to post another, skip this period
2986 * and wait for the next. Missed beacons indicate
2987 * a problem and should not occur. If we miss too
2988 * many consecutive beacons reset the device.
2989 */
2990 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2991 sc->sc_bmisscount++;
2992 sc->sc_stats.ast_be_missed++;
2993 DPRINTF(sc, ATH_DEBUG_BEACON,
2994 "%s: missed %u consecutive beacons\n",
2995 __func__, sc->sc_bmisscount);
2996 if (sc->sc_bmisscount >= ath_bstuck_threshold)
2997 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
2998 return;
2999 }
3000 if (sc->sc_bmisscount != 0) {
3001 DPRINTF(sc, ATH_DEBUG_BEACON,
3002 "%s: resume beacon xmit after %u misses\n",
3003 __func__, sc->sc_bmisscount);
3004 sc->sc_bmisscount = 0;
3005 }
3006
3007 if (sc->sc_stagbeacons) { /* staggered beacons */
3008 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3009 uint32_t tsftu;
3010
3011 tsftu = ath_hal_gettsf32(ah) >> 10;
3012 /* XXX lintval */
3013 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
3014 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
3015 bfaddr = 0;
3016 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
3017 bf = ath_beacon_generate(sc, vap);
3018 if (bf != NULL)
3019 bfaddr = bf->bf_daddr;
3020 }
3021 } else { /* burst'd beacons */
3022 uint32_t *bflink = &bfaddr;
3023
3024 for (slot = 0; slot < ATH_BCBUF; slot++) {
3025 vap = sc->sc_bslot[slot];
3026 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
3027 bf = ath_beacon_generate(sc, vap);
3028 if (bf != NULL) {
3029 *bflink = bf->bf_daddr;
3030 bflink = &bf->bf_desc->ds_link;
3031 }
3032 }
3033 }
3034 *bflink = 0; /* terminate list */
3035 }
3036
3037 /*
3038 * Handle slot time change when a non-ERP station joins/leaves
3039 * an 11g network. The 802.11 layer notifies us via callback,
3040 * we mark updateslot, then wait one beacon before effecting
3041 * the change. This gives associated stations at least one
3042 * beacon interval to note the state change.
3043 */
3044 /* XXX locking */
3045 if (sc->sc_updateslot == UPDATE) {
3046 sc->sc_updateslot = COMMIT; /* commit next beacon */
3047 sc->sc_slotupdate = slot;
3048 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
3049 ath_setslottime(sc); /* commit change to h/w */
3050
3051 /*
3052 * Check recent per-antenna transmit statistics and flip
3053 * the default antenna if noticeably more frames went out
3054 * on the non-default antenna.
3055 * XXX assumes 2 anntenae
3056 */
3057 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
3058 otherant = sc->sc_defant & 1 ? 2 : 1;
3059 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
3060 ath_setdefantenna(sc, otherant);
3061 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
3062 }
3063
3064 if (bfaddr != 0) {
3065 /*
3066 * Stop any current dma and put the new frame on the queue.
3067 * This should never fail since we check above that no frames
3068 * are still pending on the queue.
3069 */
3070 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
3071 DPRINTF(sc, ATH_DEBUG_ANY,
3072 "%s: beacon queue %u did not stop?\n",
3073 __func__, sc->sc_bhalq);
3074 }
3075 /* NB: cabq traffic should already be queued and primed */
3076 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
3077 ath_hal_txstart(ah, sc->sc_bhalq);
3078
3079 sc->sc_stats.ast_be_xmit++;
3080 }
3081}
3082
3083static struct ath_buf *
3084ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
3085{
3086 struct ath_vap *avp = ATH_VAP(vap);
3087 struct ath_txq *cabq = sc->sc_cabq;
3088 struct ath_buf *bf;
3089 struct mbuf *m;
3090 int nmcastq, error;
3091
3092 KASSERT(vap->iv_state >= IEEE80211_S_RUN,
3093 ("not running, state %d", vap->iv_state));
3094 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3095
3096 /*
3097 * Update dynamic beacon contents. If this returns
3098 * non-zero then we need to remap the memory because
3099 * the beacon frame changed size (probably because
3100 * of the TIM bitmap).
3101 */
3102 bf = avp->av_bcbuf;
3103 m = bf->bf_m;
3104 /* XXX lock mcastq? */
3105 nmcastq = avp->av_mcastq.axq_depth;
3106
3107 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
3108 /* XXX too conservative? */
3109 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3110 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3111 bf->bf_segs, &bf->bf_nseg,
3112 BUS_DMA_NOWAIT);
3113 if (error != 0) {
3114 if_printf(vap->iv_ifp,
3115 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3116 __func__, error);
3117 return NULL;
3118 }
3119 }
3120 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
3121 DPRINTF(sc, ATH_DEBUG_BEACON,
3122 "%s: cabq did not drain, mcastq %u cabq %u\n",
3123 __func__, nmcastq, cabq->axq_depth);
3124 sc->sc_stats.ast_cabq_busy++;
3125 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
3126 /*
3127 * CABQ traffic from a previous vap is still pending.
3128 * We must drain the q before this beacon frame goes
3129 * out as otherwise this vap's stations will get cab
3130 * frames from a different vap.
3131 * XXX could be slow causing us to miss DBA
3132 */
3133 ath_tx_draintxq(sc, cabq);
3134 }
3135 }
3136 ath_beacon_setup(sc, bf);
3137 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3138
3139 /*
3140 * Enable the CAB queue before the beacon queue to
3141 * insure cab frames are triggered by this beacon.
3142 */
3143 if (avp->av_boff.bo_tim[4] & 1) {
3144 struct ath_hal *ah = sc->sc_ah;
3145
3146 /* NB: only at DTIM */
3147 ATH_TXQ_LOCK(cabq);
3148 ATH_TXQ_LOCK(&avp->av_mcastq);
3149 if (nmcastq) {
3150 struct ath_buf *bfm;
3151
3152 /*
3153 * Move frames from the s/w mcast q to the h/w cab q.
3154 * XXX MORE_DATA bit
3155 */
3156 bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q);
3157 if (cabq->axq_link != NULL) {
3158 *cabq->axq_link = bfm->bf_daddr;
3159 } else
3160 ath_hal_puttxbuf(ah, cabq->axq_qnum,
3161 bfm->bf_daddr);
3162 ath_txqmove(cabq, &avp->av_mcastq);
3163
3164 sc->sc_stats.ast_cabq_xmit += nmcastq;
3165 }
3166 /* NB: gated by beacon so safe to start here */
3167 if (! TAILQ_EMPTY(&(cabq->axq_q)))
3168 ath_hal_txstart(ah, cabq->axq_qnum);
3169 ATH_TXQ_UNLOCK(&avp->av_mcastq);
3170 ATH_TXQ_UNLOCK(cabq);
3171 }
3172 return bf;
3173}
3174
3175static void
3176ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3177{
3178 struct ath_vap *avp = ATH_VAP(vap);
3179 struct ath_hal *ah = sc->sc_ah;
3180 struct ath_buf *bf;
3181 struct mbuf *m;
3182 int error;
3183
3184 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3185
3186 /*
3187 * Update dynamic beacon contents. If this returns
3188 * non-zero then we need to remap the memory because
3189 * the beacon frame changed size (probably because
3190 * of the TIM bitmap).
3191 */
3192 bf = avp->av_bcbuf;
3193 m = bf->bf_m;
3194 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3195 /* XXX too conservative? */
3196 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3197 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3198 bf->bf_segs, &bf->bf_nseg,
3199 BUS_DMA_NOWAIT);
3200 if (error != 0) {
3201 if_printf(vap->iv_ifp,
3202 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3203 __func__, error);
3204 return;
3205 }
3206 }
3207 ath_beacon_setup(sc, bf);
3208 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3209
3210 /* NB: caller is known to have already stopped tx dma */
3211 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3212 ath_hal_txstart(ah, sc->sc_bhalq);
3213}
3214
3215/*
3216 * Reset the hardware, with no loss.
3217 *
3218 * This can't be used for a general case reset.
3219 */
3220static void
3221ath_reset_proc(void *arg, int pending)
3222{
3223 struct ath_softc *sc = arg;
3224 struct ifnet *ifp = sc->sc_ifp;
3225
3226#if 0
3227 if_printf(ifp, "%s: resetting\n", __func__);
3228#endif
3229 ath_reset(ifp, ATH_RESET_NOLOSS);
3230}
3231
3232/*
3233 * Reset the hardware after detecting beacons have stopped.
3234 */
3235static void
3236ath_bstuck_proc(void *arg, int pending)
3237{
3238 struct ath_softc *sc = arg;
3239 struct ifnet *ifp = sc->sc_ifp;
3240 uint32_t hangs = 0;
3241
3242 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
3243 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
3244
3245 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3246 sc->sc_bmisscount);
3247 sc->sc_stats.ast_bstuck++;
3248 /*
3249 * This assumes that there's no simultaneous channel mode change
3250 * occuring.
3251 */
3252 ath_reset(ifp, ATH_RESET_NOLOSS);
3253}
3254
3255/*
3256 * Reclaim beacon resources and return buffer to the pool.
3257 */
3258static void
3259ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3260{
3261
3262 DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n",
3263 __func__, bf, bf->bf_m, bf->bf_node);
3264 if (bf->bf_m != NULL) {
3265 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3266 m_freem(bf->bf_m);
3267 bf->bf_m = NULL;
3268 }
3269 if (bf->bf_node != NULL) {
3270 ieee80211_free_node(bf->bf_node);
3271 bf->bf_node = NULL;
3272 }
3273 TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3274}
3275
3276/*
3277 * Reclaim beacon resources.
3278 */
3279static void
3280ath_beacon_free(struct ath_softc *sc)
3281{
3282 struct ath_buf *bf;
3283
3284 TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3285 DPRINTF(sc, ATH_DEBUG_NODE,
3286 "%s: free bf=%p, bf_m=%p, bf_node=%p\n",
3287 __func__, bf, bf->bf_m, bf->bf_node);
3288 if (bf->bf_m != NULL) {
3289 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3290 m_freem(bf->bf_m);
3291 bf->bf_m = NULL;
3292 }
3293 if (bf->bf_node != NULL) {
3294 ieee80211_free_node(bf->bf_node);
3295 bf->bf_node = NULL;
3296 }
3297 }
3298}
3299
3300/*
3301 * Configure the beacon and sleep timers.
3302 *
3303 * When operating as an AP this resets the TSF and sets
3304 * up the hardware to notify us when we need to issue beacons.
3305 *
3306 * When operating in station mode this sets up the beacon
3307 * timers according to the timestamp of the last received
3308 * beacon and the current TSF, configures PCF and DTIM
3309 * handling, programs the sleep registers so the hardware
3310 * will wakeup in time to receive beacons, and configures
3311 * the beacon miss handling so we'll receive a BMISS
3312 * interrupt when we stop seeing beacons from the AP
3313 * we've associated with.
3314 */
3315static void
3316ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
3317{
3318#define TSF_TO_TU(_h,_l) \
3319 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
3320#define FUDGE 2
3321 struct ath_hal *ah = sc->sc_ah;
3322 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3323 struct ieee80211_node *ni;
3324 u_int32_t nexttbtt, intval, tsftu;
3325 u_int64_t tsf;
3326
3327 if (vap == NULL)
3328 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
3329 ni = ieee80211_ref_node(vap->iv_bss);
3330
3331 /* extract tstamp from last beacon and convert to TU */
3332 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3333 LE_READ_4(ni->ni_tstamp.data));
3334 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3335 ic->ic_opmode == IEEE80211_M_MBSS) {
3336 /*
3337 * For multi-bss ap/mesh support beacons are either staggered
3338 * evenly over N slots or burst together. For the former
3339 * arrange for the SWBA to be delivered for each slot.
3340 * Slots that are not occupied will generate nothing.
3341 */
3342 /* NB: the beacon interval is kept internally in TU's */
3343 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3344 if (sc->sc_stagbeacons)
3345 intval /= ATH_BCBUF;
3346 } else {
3347 /* NB: the beacon interval is kept internally in TU's */
3348 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3349 }
3350 if (nexttbtt == 0) /* e.g. for ap mode */
3351 nexttbtt = intval;
3352 else if (intval) /* NB: can be 0 for monitor mode */
3353 nexttbtt = roundup(nexttbtt, intval);
3354 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3355 __func__, nexttbtt, intval, ni->ni_intval);
3356 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
3357 HAL_BEACON_STATE bs;
3358 int dtimperiod, dtimcount;
3359 int cfpperiod, cfpcount;
3360
3361 /*
3362 * Setup dtim and cfp parameters according to
3363 * last beacon we received (which may be none).
3364 */
3365 dtimperiod = ni->ni_dtim_period;
3366 if (dtimperiod <= 0) /* NB: 0 if not known */
3367 dtimperiod = 1;
3368 dtimcount = ni->ni_dtim_count;
3369 if (dtimcount >= dtimperiod) /* NB: sanity check */
3370 dtimcount = 0; /* XXX? */
3371 cfpperiod = 1; /* NB: no PCF support yet */
3372 cfpcount = 0;
3373 /*
3374 * Pull nexttbtt forward to reflect the current
3375 * TSF and calculate dtim+cfp state for the result.
3376 */
3377 tsf = ath_hal_gettsf64(ah);
3378 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3379 do {
3380 nexttbtt += intval;
3381 if (--dtimcount < 0) {
3382 dtimcount = dtimperiod - 1;
3383 if (--cfpcount < 0)
3384 cfpcount = cfpperiod - 1;
3385 }
3386 } while (nexttbtt < tsftu);
3387 memset(&bs, 0, sizeof(bs));
3388 bs.bs_intval = intval;
3389 bs.bs_nexttbtt = nexttbtt;
3390 bs.bs_dtimperiod = dtimperiod*intval;
3391 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3392 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3393 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3394 bs.bs_cfpmaxduration = 0;
3395#if 0
3396 /*
3397 * The 802.11 layer records the offset to the DTIM
3398 * bitmap while receiving beacons; use it here to
3399 * enable h/w detection of our AID being marked in
3400 * the bitmap vector (to indicate frames for us are
3401 * pending at the AP).
3402 * XXX do DTIM handling in s/w to WAR old h/w bugs
3403 * XXX enable based on h/w rev for newer chips
3404 */
3405 bs.bs_timoffset = ni->ni_timoff;
3406#endif
3407 /*
3408 * Calculate the number of consecutive beacons to miss
3409 * before taking a BMISS interrupt.
3410 * Note that we clamp the result to at most 10 beacons.
3411 */
3412 bs.bs_bmissthreshold = vap->iv_bmissthreshold;
3413 if (bs.bs_bmissthreshold > 10)
3414 bs.bs_bmissthreshold = 10;
3415 else if (bs.bs_bmissthreshold <= 0)
3416 bs.bs_bmissthreshold = 1;
3417
3418 /*
3419 * Calculate sleep duration. The configuration is
3420 * given in ms. We insure a multiple of the beacon
3421 * period is used. Also, if the sleep duration is
3422 * greater than the DTIM period then it makes senses
3423 * to make it a multiple of that.
3424 *
3425 * XXX fixed at 100ms
3426 */
3427 bs.bs_sleepduration =
3428 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3429 if (bs.bs_sleepduration > bs.bs_dtimperiod)
3430 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3431
3432 DPRINTF(sc, ATH_DEBUG_BEACON,
3433 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3434 , __func__
3435 , tsf, tsftu
3436 , bs.bs_intval
3437 , bs.bs_nexttbtt
3438 , bs.bs_dtimperiod
3439 , bs.bs_nextdtim
3440 , bs.bs_bmissthreshold
3441 , bs.bs_sleepduration
3442 , bs.bs_cfpperiod
3443 , bs.bs_cfpmaxduration
3444 , bs.bs_cfpnext
3445 , bs.bs_timoffset
3446 );
3447 ath_hal_intrset(ah, 0);
3448 ath_hal_beacontimers(ah, &bs);
3449 sc->sc_imask |= HAL_INT_BMISS;
3450 ath_hal_intrset(ah, sc->sc_imask);
3451 } else {
3452 ath_hal_intrset(ah, 0);
3453 if (nexttbtt == intval)
3454 intval |= HAL_BEACON_RESET_TSF;
3455 if (ic->ic_opmode == IEEE80211_M_IBSS) {
3456 /*
3457 * In IBSS mode enable the beacon timers but only
3458 * enable SWBA interrupts if we need to manually
3459 * prepare beacon frames. Otherwise we use a
3460 * self-linked tx descriptor and let the hardware
3461 * deal with things.
3462 */
3463 intval |= HAL_BEACON_ENA;
3464 if (!sc->sc_hasveol)
3465 sc->sc_imask |= HAL_INT_SWBA;
3466 if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3467 /*
3468 * Pull nexttbtt forward to reflect
3469 * the current TSF.
3470 */
3471 tsf = ath_hal_gettsf64(ah);
3472 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3473 do {
3474 nexttbtt += intval;
3475 } while (nexttbtt < tsftu);
3476 }
3477 ath_beaconq_config(sc);
3478 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3479 ic->ic_opmode == IEEE80211_M_MBSS) {
3480 /*
3481 * In AP/mesh mode we enable the beacon timers
3482 * and SWBA interrupts to prepare beacon frames.
3483 */
3484 intval |= HAL_BEACON_ENA;
3485 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
3486 ath_beaconq_config(sc);
3487 }
3488 ath_hal_beaconinit(ah, nexttbtt, intval);
3489 sc->sc_bmisscount = 0;
3490 ath_hal_intrset(ah, sc->sc_imask);
3491 /*
3492 * When using a self-linked beacon descriptor in
3493 * ibss mode load it once here.
3494 */
3495 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
3496 ath_beacon_start_adhoc(sc, vap);
3497 }
3498 sc->sc_syncbeacon = 0;
3499 ieee80211_free_node(ni);
3500#undef FUDGE
3501#undef TSF_TO_TU
3502}
3503
3504static void
3505ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3506{
3507 bus_addr_t *paddr = (bus_addr_t*) arg;
3508 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3509 *paddr = segs->ds_addr;
3510}
3511
3512static int
3513ath_descdma_setup(struct ath_softc *sc,
3514 struct ath_descdma *dd, ath_bufhead *head,
3515 const char *name, int nbuf, int ndesc)
3516{
3517#define DS2PHYS(_dd, _ds) \
3518 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3519#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3520 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3521 struct ifnet *ifp = sc->sc_ifp;
3522 uint8_t *ds;
3523 struct ath_buf *bf;
3524 int i, bsize, error;
3525 int desc_len;
3526
3527 desc_len = sizeof(struct ath_desc);
3528
3529 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3530 __func__, name, nbuf, ndesc);
3531
3532 dd->dd_name = name;
3533 dd->dd_desc_len = desc_len * nbuf * ndesc;
3534
3535 /*
3536 * Merlin work-around:
3537 * Descriptors that cross the 4KB boundary can't be used.
3538 * Assume one skipped descriptor per 4KB page.
3539 */
3540 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3541 int numdescpage = 4096 / (desc_len * ndesc);
3542 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096;
3543 }
3544
3545 /*
3546 * Setup DMA descriptor area.
3547 */
3548 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3549 PAGE_SIZE, 0, /* alignment, bounds */
3550 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3551 BUS_SPACE_MAXADDR, /* highaddr */
3552 NULL, NULL, /* filter, filterarg */
3553 dd->dd_desc_len, /* maxsize */
3554 1, /* nsegments */
3555 dd->dd_desc_len, /* maxsegsize */
3556 BUS_DMA_ALLOCNOW, /* flags */
3557 NULL, /* lockfunc */
3558 NULL, /* lockarg */
3559 &dd->dd_dmat);
3560 if (error != 0) {
3561 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3562 return error;
3563 }
3564
3565 /* allocate descriptors */
3566 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3567 if (error != 0) {
3568 if_printf(ifp, "unable to create dmamap for %s descriptors, "
3569 "error %u\n", dd->dd_name, error);
3570 goto fail0;
3571 }
3572
3573 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3574 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3575 &dd->dd_dmamap);
3576 if (error != 0) {
3577 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3578 "error %u\n", nbuf * ndesc, dd->dd_name, error);
3579 goto fail1;
3580 }
3581
3582 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3583 dd->dd_desc, dd->dd_desc_len,
3584 ath_load_cb, &dd->dd_desc_paddr,
3585 BUS_DMA_NOWAIT);
3586 if (error != 0) {
3587 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3588 dd->dd_name, error);
3589 goto fail2;
3590 }
3591
3592 ds = (uint8_t *) dd->dd_desc;
3593 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3594 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3595 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3596
3597 /* allocate rx buffers */
3598 bsize = sizeof(struct ath_buf) * nbuf;
3599 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3600 if (bf == NULL) {
3601 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3602 dd->dd_name, bsize);
3603 goto fail3;
3604 }
3605 dd->dd_bufptr = bf;
3606
3607 TAILQ_INIT(head);
3608 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) {
3609 bf->bf_desc = (struct ath_desc *) ds;
3610 bf->bf_daddr = DS2PHYS(dd, ds);
3611 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3612 /*
3613 * Merlin WAR: Skip descriptor addresses which
3614 * cause 4KB boundary crossing along any point
3615 * in the descriptor.
3616 */
3617 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
3618 desc_len * ndesc)) {
3619 /* Start at the next page */
3620 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
3621 bf->bf_desc = (struct ath_desc *) ds;
3622 bf->bf_daddr = DS2PHYS(dd, ds);
3623 }
3624 }
3625 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3626 &bf->bf_dmamap);
3627 if (error != 0) {
3628 if_printf(ifp, "unable to create dmamap for %s "
3629 "buffer %u, error %u\n", dd->dd_name, i, error);
3630 ath_descdma_cleanup(sc, dd, head);
3631 return error;
3632 }
3633 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
3634 TAILQ_INSERT_TAIL(head, bf, bf_list);
3635 }
3636 return 0;
3637fail3:
3638 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3639fail2:
3640 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3641fail1:
3642 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3643fail0:
3644 bus_dma_tag_destroy(dd->dd_dmat);
3645 memset(dd, 0, sizeof(*dd));
3646 return error;
3647#undef DS2PHYS
3648#undef ATH_DESC_4KB_BOUND_CHECK
3649}
3650
3651static void
3652ath_descdma_cleanup(struct ath_softc *sc,
3653 struct ath_descdma *dd, ath_bufhead *head)
3654{
3655 struct ath_buf *bf;
3656 struct ieee80211_node *ni;
3657
3658 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3659 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3660 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3661 bus_dma_tag_destroy(dd->dd_dmat);
3662
3663 TAILQ_FOREACH(bf, head, bf_list) {
3664 if (bf->bf_m) {
3665 m_freem(bf->bf_m);
3666 bf->bf_m = NULL;
3667 }
3668 if (bf->bf_dmamap != NULL) {
3669 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3670 bf->bf_dmamap = NULL;
3671 }
3672 ni = bf->bf_node;
3673 bf->bf_node = NULL;
3674 if (ni != NULL) {
3675 /*
3676 * Reclaim node reference.
3677 */
3678 ieee80211_free_node(ni);
3679 }
3680 }
3681
3682 TAILQ_INIT(head);
3683 free(dd->dd_bufptr, M_ATHDEV);
3684 memset(dd, 0, sizeof(*dd));
3685}
3686
3687static int
3688ath_desc_alloc(struct ath_softc *sc)
3689{
3690 int error;
3691
3692 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3693 "rx", ath_rxbuf, 1);
3694 if (error != 0)
3695 return error;
3696
3697 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3698 "tx", ath_txbuf, ATH_TXDESC);
3699 if (error != 0) {
3700 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3701 return error;
3702 }
3703
3704 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3705 "beacon", ATH_BCBUF, 1);
3706 if (error != 0) {
3707 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3708 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3709 return error;
3710 }
3711 return 0;
3712}
3713
3714static void
3715ath_desc_free(struct ath_softc *sc)
3716{
3717
3718 if (sc->sc_bdma.dd_desc_len != 0)
3719 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3720 if (sc->sc_txdma.dd_desc_len != 0)
3721 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3722 if (sc->sc_rxdma.dd_desc_len != 0)
3723 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3724}
3725
3726static struct ieee80211_node *
3727ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3728{
3729 struct ieee80211com *ic = vap->iv_ic;
3730 struct ath_softc *sc = ic->ic_ifp->if_softc;
3731 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3732 struct ath_node *an;
3733
3734 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3735 if (an == NULL) {
3736 /* XXX stat+msg */
3737 return NULL;
3738 }
3739 ath_rate_node_init(sc, an);
3740
3741 /* Setup the mutex - there's no associd yet so set the name to NULL */
3742 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
3743 device_get_nameunit(sc->sc_dev), an);
3744 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
3745
3746 /* XXX setup ath_tid */
3747 ath_tx_tid_init(sc, an);
3748
3749 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3750 return &an->an_node;
3751}
3752
3753static void
3754ath_node_cleanup(struct ieee80211_node *ni)
3755{
3756 struct ieee80211com *ic = ni->ni_ic;
3757 struct ath_softc *sc = ic->ic_ifp->if_softc;
3758
3759 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
3760 ath_tx_node_flush(sc, ATH_NODE(ni));
3761 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3762 sc->sc_node_cleanup(ni);
3763}
3764
3765static void
3766ath_node_free(struct ieee80211_node *ni)
3767{
3768 struct ieee80211com *ic = ni->ni_ic;
3769 struct ath_softc *sc = ic->ic_ifp->if_softc;
3770
3771 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3772 mtx_destroy(&ATH_NODE(ni)->an_mtx);
3773 sc->sc_node_free(ni);
3774}
3775
3776static void
3777ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3778{
3779 struct ieee80211com *ic = ni->ni_ic;
3780 struct ath_softc *sc = ic->ic_ifp->if_softc;
3781 struct ath_hal *ah = sc->sc_ah;
3782
3783 *rssi = ic->ic_node_getrssi(ni);
3784 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3785 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3786 else
3787 *noise = -95; /* nominally correct */
3788}
3789
3790static int
3791ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3792{
3793 struct ath_hal *ah = sc->sc_ah;
3794 int error;
3795 struct mbuf *m;
3796 struct ath_desc *ds;
3797
3798 m = bf->bf_m;
3799 if (m == NULL) {
3800 /*
3801 * NB: by assigning a page to the rx dma buffer we
3802 * implicitly satisfy the Atheros requirement that
3803 * this buffer be cache-line-aligned and sized to be
3804 * multiple of the cache line size. Not doing this
3805 * causes weird stuff to happen (for the 5210 at least).
3806 */
3807 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3808 if (m == NULL) {
3809 DPRINTF(sc, ATH_DEBUG_ANY,
3810 "%s: no mbuf/cluster\n", __func__);
3811 sc->sc_stats.ast_rx_nombuf++;
3812 return ENOMEM;
3813 }
3814 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3815
3816 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3817 bf->bf_dmamap, m,
3818 bf->bf_segs, &bf->bf_nseg,
3819 BUS_DMA_NOWAIT);
3820 if (error != 0) {
3821 DPRINTF(sc, ATH_DEBUG_ANY,
3822 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3823 __func__, error);
3824 sc->sc_stats.ast_rx_busdma++;
3825 m_freem(m);
3826 return error;
3827 }
3828 KASSERT(bf->bf_nseg == 1,
3829 ("multi-segment packet; nseg %u", bf->bf_nseg));
3830 bf->bf_m = m;
3831 }
3832 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3833
3834 /*
3835 * Setup descriptors. For receive we always terminate
3836 * the descriptor list with a self-linked entry so we'll
3837 * not get overrun under high load (as can happen with a
3838 * 5212 when ANI processing enables PHY error frames).
3839 *
3840 * To insure the last descriptor is self-linked we create
3841 * each descriptor as self-linked and add it to the end. As
3842 * each additional descriptor is added the previous self-linked
3843 * entry is ``fixed'' naturally. This should be safe even
3844 * if DMA is happening. When processing RX interrupts we
3845 * never remove/process the last, self-linked, entry on the
3846 * descriptor list. This insures the hardware always has
3847 * someplace to write a new frame.
3848 */
3849 /*
3850 * 11N: we can no longer afford to self link the last descriptor.
3851 * MAC acknowledges BA status as long as it copies frames to host
3852 * buffer (or rx fifo). This can incorrectly acknowledge packets
3853 * to a sender if last desc is self-linked.
3854 */
3855 ds = bf->bf_desc;
3856 if (sc->sc_rxslink)
3857 ds->ds_link = bf->bf_daddr; /* link to self */
3858 else
3859 ds->ds_link = 0; /* terminate the list */
3860 ds->ds_data = bf->bf_segs[0].ds_addr;
3861 ath_hal_setuprxdesc(ah, ds
3862 , m->m_len /* buffer size */
3863 , 0
3864 );
3865
3866 if (sc->sc_rxlink != NULL)
3867 *sc->sc_rxlink = bf->bf_daddr;
3868 sc->sc_rxlink = &ds->ds_link;
3869 return 0;
3870}
3871
3872/*
3873 * Extend 15-bit time stamp from rx descriptor to
3874 * a full 64-bit TSF using the specified TSF.
3875 */
3876static __inline u_int64_t
3877ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf)
3878{
3879 if ((tsf & 0x7fff) < rstamp)
3880 tsf -= 0x8000;
3881
3882 return ((tsf &~ 0x7fff) | rstamp);
3883}
3884
3885/*
3886 * Extend 32-bit time stamp from rx descriptor to
3887 * a full 64-bit TSF using the specified TSF.
3888 */
3889static __inline u_int64_t
3890ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf)
3891{
3892 u_int32_t tsf_low = tsf & 0xffffffff;
3893 u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp;
3894
3895 if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000))
3896 tsf64 -= 0x100000000ULL;
3897
3898 if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000))
3899 tsf64 += 0x100000000ULL;
3900
3901 return tsf64;
3902}
3903
3904/*
3905 * Extend the TSF from the RX descriptor to a full 64 bit TSF.
3906 * Earlier hardware versions only wrote the low 15 bits of the
3907 * TSF into the RX descriptor; later versions (AR5416 and up)
3908 * include the 32 bit TSF value.
3909 */
3910static __inline u_int64_t
3911ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf)
3912{
3913 if (sc->sc_rxtsf32)
3914 return ath_extend_tsf32(rstamp, tsf);
3915 else
3916 return ath_extend_tsf15(rstamp, tsf);
3917}
3918
3919/*
3920 * Intercept management frames to collect beacon rssi data
3921 * and to do ibss merges.
3922 */
3923static void
3924ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3925 int subtype, int rssi, int nf)
3926{
3927 struct ieee80211vap *vap = ni->ni_vap;
3928 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3929
3930 /*
3931 * Call up first so subsequent work can use information
3932 * potentially stored in the node (e.g. for ibss merge).
3933 */
3934 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf);
3935 switch (subtype) {
3936 case IEEE80211_FC0_SUBTYPE_BEACON:
3937 /* update rssi statistics for use by the hal */
3938 /* XXX unlocked check against vap->iv_bss? */
3939 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3940 if (sc->sc_syncbeacon &&
3941 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3942 /*
3943 * Resync beacon timers using the tsf of the beacon
3944 * frame we just received.
3945 */
3946 ath_beacon_config(sc, vap);
3947 }
3948 /* fall thru... */
3949 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3950 if (vap->iv_opmode == IEEE80211_M_IBSS &&
3951 vap->iv_state == IEEE80211_S_RUN) {
3952 uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
3953 uint64_t tsf = ath_extend_tsf(sc, rstamp,
3954 ath_hal_gettsf64(sc->sc_ah));
3955 /*
3956 * Handle ibss merge as needed; check the tsf on the
3957 * frame before attempting the merge. The 802.11 spec
3958 * says the station should change it's bssid to match
3959 * the oldest station with the same ssid, where oldest
3960 * is determined by the tsf. Note that hardware
3961 * reconfiguration happens through callback to
3962 * ath_newstate as the state machine will go from
3963 * RUN -> RUN when this happens.
3964 */
3965 if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3966 DPRINTF(sc, ATH_DEBUG_STATE,
3967 "ibss merge, rstamp %u tsf %ju "
3968 "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3969 (uintmax_t)ni->ni_tstamp.tsf);
3970 (void) ieee80211_ibss_merge(ni);
3971 }
3972 }
3973 break;
3974 }
3975}
3976
3977/*
3978 * Set the default antenna.
3979 */
3980static void
3981ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3982{
3983 struct ath_hal *ah = sc->sc_ah;
3984
3985 /* XXX block beacon interrupts */
3986 ath_hal_setdefantenna(ah, antenna);
3987 if (sc->sc_defant != antenna)
3988 sc->sc_stats.ast_ant_defswitch++;
3989 sc->sc_defant = antenna;
3990 sc->sc_rxotherant = 0;
3991}
3992
3993static void
3994ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3995 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3996{
3997#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20)
3998#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U)
3999#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D)
4000#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
4001 struct ath_softc *sc = ifp->if_softc;
4002 const HAL_RATE_TABLE *rt;
4003 uint8_t rix;
4004
4005 rt = sc->sc_currates;
4006 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
4007 rix = rt->rateCodeToIndex[rs->rs_rate];
4008 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
4009 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
4010#ifdef AH_SUPPORT_AR5416
4011 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
4012 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
4013 struct ieee80211com *ic = ifp->if_l2com;
4014
4015 if ((rs->rs_flags & HAL_RX_2040) == 0)
4016 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
4017 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
4018 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
4019 else
4020 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
4021 if ((rs->rs_flags & HAL_RX_GI) == 0)
4022 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
4023 }
4024#endif
4025 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf));
4026 if (rs->rs_status & HAL_RXERR_CRC)
4027 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4028 /* XXX propagate other error flags from descriptor */
4029 sc->sc_rx_th.wr_antnoise = nf;
4030 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
4031 sc->sc_rx_th.wr_antenna = rs->rs_antenna;
4032#undef CHAN_HT
4033#undef CHAN_HT20
4034#undef CHAN_HT40U
4035#undef CHAN_HT40D
4036}
4037
4038static void
4039ath_handle_micerror(struct ieee80211com *ic,
4040 struct ieee80211_frame *wh, int keyix)
4041{
4042 struct ieee80211_node *ni;
4043
4044 /* XXX recheck MIC to deal w/ chips that lie */
4045 /* XXX discard MIC errors on !data frames */
4046 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
4047 if (ni != NULL) {
4048 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
4049 ieee80211_free_node(ni);
4050 }
4051}
4052
4053/*
4054 * Only run the RX proc if it's not already running.
4055 * Since this may get run as part of the reset/flush path,
4056 * the task can't clash with an existing, running tasklet.
4057 */
4058static void
4059ath_rx_tasklet(void *arg, int npending)
4060{
4061 struct ath_softc *sc = arg;
4062
4063 CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending);
4064 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
4065 ATH_PCU_LOCK(sc);
4066 if (sc->sc_inreset_cnt > 0) {
4067 device_printf(sc->sc_dev,
4068 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
4069 ATH_PCU_UNLOCK(sc);
4070 return;
4071 }
4072 ATH_PCU_UNLOCK(sc);
4073 ath_rx_proc(sc, 1);
4074}
4075
4076static void
4077ath_rx_proc(struct ath_softc *sc, int resched)
4078{
4079#define PA2DESC(_sc, _pa) \
4080 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
4081 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
4082 struct ath_buf *bf;
4083 struct ifnet *ifp = sc->sc_ifp;
4084 struct ieee80211com *ic = ifp->if_l2com;
4085 struct ath_hal *ah = sc->sc_ah;
4086 struct ath_desc *ds;
4087 struct ath_rx_status *rs;
4088 struct mbuf *m;
4089 struct ieee80211_node *ni;
4090 int len, type, ngood;
4091 HAL_STATUS status;
4092 int16_t nf;
4093 u_int64_t tsf, rstamp;
4094 int npkts = 0;
4095
4096 /* XXX we must not hold the ATH_LOCK here */
4097 ATH_UNLOCK_ASSERT(sc);
4098 ATH_PCU_UNLOCK_ASSERT(sc);
4099
4100 ATH_PCU_LOCK(sc);
4101 sc->sc_rxproc_cnt++;
4102 ATH_PCU_UNLOCK(sc);
4103
4104 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
4105 ngood = 0;
4106 nf = ath_hal_getchannoise(ah, sc->sc_curchan);
4107 sc->sc_stats.ast_rx_noise = nf;
4108 tsf = ath_hal_gettsf64(ah);
4109 do {
4110 bf = TAILQ_FIRST(&sc->sc_rxbuf);
4111 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */
4112 if_printf(ifp, "%s: no buffer!\n", __func__);
4113 break;
4114 } else if (bf == NULL) {
4115 /*
4116 * End of List:
4117 * this can happen for non-self-linked RX chains
4118 */
4119 sc->sc_stats.ast_rx_hitqueueend++;
4120 break;
4121 }
4122 m = bf->bf_m;
4123 if (m == NULL) { /* NB: shouldn't happen */
4124 /*
4125 * If mbuf allocation failed previously there
4126 * will be no mbuf; try again to re-populate it.
4127 */
4128 /* XXX make debug msg */
4129 if_printf(ifp, "%s: no mbuf!\n", __func__);
4130 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
4131 goto rx_next;
4132 }
4133 ds = bf->bf_desc;
4134 if (ds->ds_link == bf->bf_daddr) {
4135 /* NB: never process the self-linked entry at the end */
4136 sc->sc_stats.ast_rx_hitqueueend++;
4137 break;
4138 }
4139 /* XXX sync descriptor memory */
4140 /*
4141 * Must provide the virtual address of the current
4142 * descriptor, the physical address, and the virtual
4143 * address of the next descriptor in the h/w chain.
4144 * This allows the HAL to look ahead to see if the
4145 * hardware is done with a descriptor by checking the
4146 * done bit in the following descriptor and the address
4147 * of the current descriptor the DMA engine is working
4148 * on. All this is necessary because of our use of
4149 * a self-linked list to avoid rx overruns.
4150 */
4151 rs = &bf->bf_status.ds_rxstat;
4152 status = ath_hal_rxprocdesc(ah, ds,
4153 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4154#ifdef ATH_DEBUG
4155 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
4156 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4157#endif
4158 if (status == HAL_EINPROGRESS)
4159 break;
4160
4161 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
4162 npkts++;
4163
4164 /*
4165 * Calculate the correct 64 bit TSF given
4166 * the TSF64 register value and rs_tstamp.
4167 */
4168 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
4169
4170 /* These aren't specifically errors */
4171#ifdef AH_SUPPORT_AR5416
4172 if (rs->rs_flags & HAL_RX_GI)
4173 sc->sc_stats.ast_rx_halfgi++;
4174 if (rs->rs_flags & HAL_RX_2040)
4175 sc->sc_stats.ast_rx_2040++;
4176 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
4177 sc->sc_stats.ast_rx_pre_crc_err++;
4178 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
4179 sc->sc_stats.ast_rx_post_crc_err++;
4180 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
4181 sc->sc_stats.ast_rx_decrypt_busy_err++;
4182 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
4183 sc->sc_stats.ast_rx_hi_rx_chain++;
4184#endif /* AH_SUPPORT_AR5416 */
4185
4186 if (rs->rs_status != 0) {
4187 if (rs->rs_status & HAL_RXERR_CRC)
4188 sc->sc_stats.ast_rx_crcerr++;
4189 if (rs->rs_status & HAL_RXERR_FIFO)
4190 sc->sc_stats.ast_rx_fifoerr++;
4191 if (rs->rs_status & HAL_RXERR_PHY) {
4192 sc->sc_stats.ast_rx_phyerr++;
4193 /* Process DFS radar events */
4194 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
4195 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
4196 /* Since we're touching the frame data, sync it */
4197 bus_dmamap_sync(sc->sc_dmat,
4198 bf->bf_dmamap,
4199 BUS_DMASYNC_POSTREAD);
4200 /* Now pass it to the radar processing code */
4201 ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs);
4202 }
4203
4204 /* Be suitably paranoid about receiving phy errors out of the stats array bounds */
4205 if (rs->rs_phyerr < 64)
4206 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
4207 goto rx_error; /* NB: don't count in ierrors */
4208 }
4209 if (rs->rs_status & HAL_RXERR_DECRYPT) {
4210 /*
4211 * Decrypt error. If the error occurred
4212 * because there was no hardware key, then
4213 * let the frame through so the upper layers
4214 * can process it. This is necessary for 5210
4215 * parts which have no way to setup a ``clear''
4216 * key cache entry.
4217 *
4218 * XXX do key cache faulting
4219 */
4220 if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
4221 goto rx_accept;
4222 sc->sc_stats.ast_rx_badcrypt++;
4223 }
4224 if (rs->rs_status & HAL_RXERR_MIC) {
4225 sc->sc_stats.ast_rx_badmic++;
4226 /*
4227 * Do minimal work required to hand off
4228 * the 802.11 header for notification.
4229 */
4230 /* XXX frag's and qos frames */
4231 len = rs->rs_datalen;
4232 if (len >= sizeof (struct ieee80211_frame)) {
4233 bus_dmamap_sync(sc->sc_dmat,
4234 bf->bf_dmamap,
4235 BUS_DMASYNC_POSTREAD);
4236 ath_handle_micerror(ic,
4237 mtod(m, struct ieee80211_frame *),
4238 sc->sc_splitmic ?
4239 rs->rs_keyix-32 : rs->rs_keyix);
4240 }
4241 }
4242 ifp->if_ierrors++;
4243rx_error:
4244 /*
4245 * Cleanup any pending partial frame.
4246 */
4247 if (sc->sc_rxpending != NULL) {
4248 m_freem(sc->sc_rxpending);
4249 sc->sc_rxpending = NULL;
4250 }
4251 /*
4252 * When a tap is present pass error frames
4253 * that have been requested. By default we
4254 * pass decrypt+mic errors but others may be
4255 * interesting (e.g. crc).
4256 */
4257 if (ieee80211_radiotap_active(ic) &&
4258 (rs->rs_status & sc->sc_monpass)) {
4259 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4260 BUS_DMASYNC_POSTREAD);
4261 /* NB: bpf needs the mbuf length setup */
4262 len = rs->rs_datalen;
4263 m->m_pkthdr.len = m->m_len = len;
4264 bf->bf_m = NULL;
4265 ath_rx_tap(ifp, m, rs, rstamp, nf);
4266 ieee80211_radiotap_rx_all(ic, m);
4267 m_freem(m);
4268 }
4269 /* XXX pass MIC errors up for s/w reclaculation */
4270 goto rx_next;
4271 }
4272rx_accept:
4273 /*
4274 * Sync and unmap the frame. At this point we're
4275 * committed to passing the mbuf somewhere so clear
4276 * bf_m; this means a new mbuf must be allocated
4277 * when the rx descriptor is setup again to receive
4278 * another frame.
4279 */
4280 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4281 BUS_DMASYNC_POSTREAD);
4282 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4283 bf->bf_m = NULL;
4284
4285 len = rs->rs_datalen;
4286 m->m_len = len;
4287
4288 if (rs->rs_more) {
4289 /*
4290 * Frame spans multiple descriptors; save
4291 * it for the next completed descriptor, it
4292 * will be used to construct a jumbogram.
4293 */
4294 if (sc->sc_rxpending != NULL) {
4295 /* NB: max frame size is currently 2 clusters */
4296 sc->sc_stats.ast_rx_toobig++;
4297 m_freem(sc->sc_rxpending);
4298 }
4299 m->m_pkthdr.rcvif = ifp;
4300 m->m_pkthdr.len = len;
4301 sc->sc_rxpending = m;
4302 goto rx_next;
4303 } else if (sc->sc_rxpending != NULL) {
4304 /*
4305 * This is the second part of a jumbogram,
4306 * chain it to the first mbuf, adjust the
4307 * frame length, and clear the rxpending state.
4308 */
4309 sc->sc_rxpending->m_next = m;
4310 sc->sc_rxpending->m_pkthdr.len += len;
4311 m = sc->sc_rxpending;
4312 sc->sc_rxpending = NULL;
4313 } else {
4314 /*
4315 * Normal single-descriptor receive; setup
4316 * the rcvif and packet length.
4317 */
4318 m->m_pkthdr.rcvif = ifp;
4319 m->m_pkthdr.len = len;
4320 }
4321
4322 /*
4323 * Validate rs->rs_antenna.
4324 *
4325 * Some users w/ AR9285 NICs have reported crashes
4326 * here because rs_antenna field is bogusly large.
4327 * Let's enforce the maximum antenna limit of 8
4328 * (and it shouldn't be hard coded, but that's a
4329 * separate problem) and if there's an issue, print
4330 * out an error and adjust rs_antenna to something
4331 * sensible.
4332 *
4333 * This code should be removed once the actual
4334 * root cause of the issue has been identified.
4335 * For example, it may be that the rs_antenna
4336 * field is only valid for the lsat frame of
4337 * an aggregate and it just happens that it is
4338 * "mostly" right. (This is a general statement -
4339 * the majority of the statistics are only valid
4340 * for the last frame in an aggregate.
4341 */
4342 if (rs->rs_antenna > 7) {
4343 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n",
4344 __func__, rs->rs_antenna);
4345#ifdef ATH_DEBUG
4346 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4347#endif /* ATH_DEBUG */
4348 rs->rs_antenna = 0; /* XXX better than nothing */
4349 }
4350
4351 ifp->if_ipackets++;
4352 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
4353
4354 /*
4355 * Populate the rx status block. When there are bpf
4356 * listeners we do the additional work to provide
4357 * complete status. Otherwise we fill in only the
4358 * material required by ieee80211_input. Note that
4359 * noise setting is filled in above.
4360 */
4361 if (ieee80211_radiotap_active(ic))
4362 ath_rx_tap(ifp, m, rs, rstamp, nf);
4363
4364 /*
4365 * From this point on we assume the frame is at least
4366 * as large as ieee80211_frame_min; verify that.
4367 */
4368 if (len < IEEE80211_MIN_LEN) {
4369 if (!ieee80211_radiotap_active(ic)) {
4370 DPRINTF(sc, ATH_DEBUG_RECV,
4371 "%s: short packet %d\n", __func__, len);
4372 sc->sc_stats.ast_rx_tooshort++;
4373 } else {
4374 /* NB: in particular this captures ack's */
4375 ieee80211_radiotap_rx_all(ic, m);
4376 }
4377 m_freem(m);
4378 goto rx_next;
4379 }
4380
4381 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
4382 const HAL_RATE_TABLE *rt = sc->sc_currates;
4383 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
4384
4385 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
4386 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
4387 }
4388
4389 m_adj(m, -IEEE80211_CRC_LEN);
4390
4391 /*
4392 * Locate the node for sender, track state, and then
4393 * pass the (referenced) node up to the 802.11 layer
4394 * for its use.
4395 */
4396 ni = ieee80211_find_rxnode_withkey(ic,
4397 mtod(m, const struct ieee80211_frame_min *),
4398 rs->rs_keyix == HAL_RXKEYIX_INVALID ?
4399 IEEE80211_KEYIX_NONE : rs->rs_keyix);
4400 sc->sc_lastrs = rs;
4401
4402#ifdef AH_SUPPORT_AR5416
4403 if (rs->rs_isaggr)
4404 sc->sc_stats.ast_rx_agg++;
4405#endif /* AH_SUPPORT_AR5416 */
4406
4407 if (ni != NULL) {
4408 /*
4409 * Only punt packets for ampdu reorder processing for
4410 * 11n nodes; net80211 enforces that M_AMPDU is only
4411 * set for 11n nodes.
4412 */
4413 if (ni->ni_flags & IEEE80211_NODE_HT)
4414 m->m_flags |= M_AMPDU;
4415
4416 /*
4417 * Sending station is known, dispatch directly.
4418 */
4419 type = ieee80211_input(ni, m, rs->rs_rssi, nf);
4420 ieee80211_free_node(ni);
4421 /*
4422 * Arrange to update the last rx timestamp only for
4423 * frames from our ap when operating in station mode.
4424 * This assumes the rx key is always setup when
4425 * associated.
4426 */
4427 if (ic->ic_opmode == IEEE80211_M_STA &&
4428 rs->rs_keyix != HAL_RXKEYIX_INVALID)
4429 ngood++;
4430 } else {
4431 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
4432 }
4433 /*
4434 * Track rx rssi and do any rx antenna management.
4435 */
4436 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
4437 if (sc->sc_diversity) {
4438 /*
4439 * When using fast diversity, change the default rx
4440 * antenna if diversity chooses the other antenna 3
4441 * times in a row.
4442 */
4443 if (sc->sc_defant != rs->rs_antenna) {
4444 if (++sc->sc_rxotherant >= 3)
4445 ath_setdefantenna(sc, rs->rs_antenna);
4446 } else
4447 sc->sc_rxotherant = 0;
4448 }
4449
4450 /* Newer school diversity - kite specific for now */
4451 /* XXX perhaps migrate the normal diversity code to this? */
4452 if ((ah)->ah_rxAntCombDiversity)
4453 (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz);
4454
4455 if (sc->sc_softled) {
4456 /*
4457 * Blink for any data frame. Otherwise do a
4458 * heartbeat-style blink when idle. The latter
4459 * is mainly for station mode where we depend on
4460 * periodic beacon frames to trigger the poll event.
4461 */
4462 if (type == IEEE80211_FC0_TYPE_DATA) {
4463 const HAL_RATE_TABLE *rt = sc->sc_currates;
4464 ath_led_event(sc,
4465 rt->rateCodeToIndex[rs->rs_rate]);
4466 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
4467 ath_led_event(sc, 0);
4468 }
4469rx_next:
4470 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
4471 } while (ath_rxbuf_init(sc, bf) == 0);
4472
4473 /* rx signal state monitoring */
4474 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
4475 if (ngood)
4476 sc->sc_lastrx = tsf;
4477
4478 CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
4479 /* Queue DFS tasklet if needed */
4480 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
4481 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
4482
4483 /*
4484 * Now that all the RX frames were handled that
4485 * need to be handled, kick the PCU if there's
4486 * been an RXEOL condition.
4487 */
4488 ATH_PCU_LOCK(sc);
4489 if (resched && sc->sc_kickpcu) {
4490 CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu");
4491 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
4492 __func__, npkts);
4493
4494 /* XXX rxslink? */
4495 /*
4496 * XXX can we hold the PCU lock here?
4497 * Are there any net80211 buffer calls involved?
4498 */
4499 bf = TAILQ_FIRST(&sc->sc_rxbuf);
4500 ath_hal_putrxbuf(ah, bf->bf_daddr);
4501 ath_hal_rxena(ah); /* enable recv descriptors */
4502 ath_mode_init(sc); /* set filters, etc. */
4503 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
4504
4505 ath_hal_intrset(ah, sc->sc_imask);
4506 sc->sc_kickpcu = 0;
4507 }
4508 ATH_PCU_UNLOCK(sc);
4509
4510 /* XXX check this inside of IF_LOCK? */
4511 if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
4512#ifdef IEEE80211_SUPPORT_SUPERG
4513 ieee80211_ff_age_all(ic, 100);
4514#endif
4515 if (!IFQ_IS_EMPTY(&ifp->if_snd))
4516 ath_start(ifp);
4517 }
4518#undef PA2DESC
4519
4520 ATH_PCU_LOCK(sc);
4521 sc->sc_rxproc_cnt--;
4522 ATH_PCU_UNLOCK(sc);
4523}
4524
4525static void
4526ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4527{
4528 txq->axq_qnum = qnum;
4529 txq->axq_ac = 0;
4530 txq->axq_depth = 0;
4531 txq->axq_aggr_depth = 0;
4532 txq->axq_intrcnt = 0;
4533 txq->axq_link = NULL;
4534 txq->axq_softc = sc;
4535 TAILQ_INIT(&txq->axq_q);
4536 TAILQ_INIT(&txq->axq_tidq);
4537 ATH_TXQ_LOCK_INIT(sc, txq);
4538}
4539
4540/*
4541 * Setup a h/w transmit queue.
4542 */
4543static struct ath_txq *
4544ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4545{
4546#define N(a) (sizeof(a)/sizeof(a[0]))
4547 struct ath_hal *ah = sc->sc_ah;
4548 HAL_TXQ_INFO qi;
4549 int qnum;
4550
4551 memset(&qi, 0, sizeof(qi));
4552 qi.tqi_subtype = subtype;
4553 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4554 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4555 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4556 /*
4557 * Enable interrupts only for EOL and DESC conditions.
4558 * We mark tx descriptors to receive a DESC interrupt
4559 * when a tx queue gets deep; otherwise waiting for the
4560 * EOL to reap descriptors. Note that this is done to
4561 * reduce interrupt load and this only defers reaping
4562 * descriptors, never transmitting frames. Aside from
4563 * reducing interrupts this also permits more concurrency.
4564 * The only potential downside is if the tx queue backs
4565 * up in which case the top half of the kernel may backup
4566 * due to a lack of tx descriptors.
4567 */
4568 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
4569 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4570 if (qnum == -1) {
4571 /*
4572 * NB: don't print a message, this happens
4573 * normally on parts with too few tx queues
4574 */
4575 return NULL;
4576 }
4577 if (qnum >= N(sc->sc_txq)) {
4578 device_printf(sc->sc_dev,
4579 "hal qnum %u out of range, max %zu!\n",
4580 qnum, N(sc->sc_txq));
4581 ath_hal_releasetxqueue(ah, qnum);
4582 return NULL;
4583 }
4584 if (!ATH_TXQ_SETUP(sc, qnum)) {
4585 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4586 sc->sc_txqsetup |= 1<<qnum;
4587 }
4588 return &sc->sc_txq[qnum];
4589#undef N
4590}
4591
4592/*
4593 * Setup a hardware data transmit queue for the specified
4594 * access control. The hal may not support all requested
4595 * queues in which case it will return a reference to a
4596 * previously setup queue. We record the mapping from ac's
4597 * to h/w queues for use by ath_tx_start and also track
4598 * the set of h/w queues being used to optimize work in the
4599 * transmit interrupt handler and related routines.
4600 */
4601static int
4602ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4603{
4604#define N(a) (sizeof(a)/sizeof(a[0]))
4605 struct ath_txq *txq;
4606
4607 if (ac >= N(sc->sc_ac2q)) {
4608 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4609 ac, N(sc->sc_ac2q));
4610 return 0;
4611 }
4612 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4613 if (txq != NULL) {
4614 txq->axq_ac = ac;
4615 sc->sc_ac2q[ac] = txq;
4616 return 1;
4617 } else
4618 return 0;
4619#undef N
4620}
4621
4622/*
4623 * Update WME parameters for a transmit queue.
4624 */
4625static int
4626ath_txq_update(struct ath_softc *sc, int ac)
4627{
4628#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
4629#define ATH_TXOP_TO_US(v) (v<<5)
4630 struct ifnet *ifp = sc->sc_ifp;
4631 struct ieee80211com *ic = ifp->if_l2com;
4632 struct ath_txq *txq = sc->sc_ac2q[ac];
4633 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4634 struct ath_hal *ah = sc->sc_ah;
4635 HAL_TXQ_INFO qi;
4636
4637 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4638#ifdef IEEE80211_SUPPORT_TDMA
4639 if (sc->sc_tdma) {
4640 /*
4641 * AIFS is zero so there's no pre-transmit wait. The
4642 * burst time defines the slot duration and is configured
4643 * through net80211. The QCU is setup to not do post-xmit
4644 * back off, lockout all lower-priority QCU's, and fire
4645 * off the DMA beacon alert timer which is setup based
4646 * on the slot configuration.
4647 */
4648 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4649 | HAL_TXQ_TXERRINT_ENABLE
4650 | HAL_TXQ_TXURNINT_ENABLE
4651 | HAL_TXQ_TXEOLINT_ENABLE
4652 | HAL_TXQ_DBA_GATED
4653 | HAL_TXQ_BACKOFF_DISABLE
4654 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4655 ;
4656 qi.tqi_aifs = 0;
4657 /* XXX +dbaprep? */
4658 qi.tqi_readyTime = sc->sc_tdmaslotlen;
4659 qi.tqi_burstTime = qi.tqi_readyTime;
4660 } else {
4661#endif
4662 /*
4663 * XXX shouldn't this just use the default flags
4664 * used in the previous queue setup?
4665 */
4666 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4667 | HAL_TXQ_TXERRINT_ENABLE
4668 | HAL_TXQ_TXDESCINT_ENABLE
4669 | HAL_TXQ_TXURNINT_ENABLE
4670 | HAL_TXQ_TXEOLINT_ENABLE
4671 ;
4672 qi.tqi_aifs = wmep->wmep_aifsn;
4673 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4674 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4675 qi.tqi_readyTime = 0;
4676 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4677#ifdef IEEE80211_SUPPORT_TDMA
4678 }
4679#endif
4680
4681 DPRINTF(sc, ATH_DEBUG_RESET,
4682 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4683 __func__, txq->axq_qnum, qi.tqi_qflags,
4684 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4685
4686 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4687 if_printf(ifp, "unable to update hardware queue "
4688 "parameters for %s traffic!\n",
4689 ieee80211_wme_acnames[ac]);
4690 return 0;
4691 } else {
4692 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4693 return 1;
4694 }
4695#undef ATH_TXOP_TO_US
4696#undef ATH_EXPONENT_TO_VALUE
4697}
4698
4699/*
4700 * Callback from the 802.11 layer to update WME parameters.
4701 */
4702static int
4703ath_wme_update(struct ieee80211com *ic)
4704{
4705 struct ath_softc *sc = ic->ic_ifp->if_softc;
4706
4707 return !ath_txq_update(sc, WME_AC_BE) ||
4708 !ath_txq_update(sc, WME_AC_BK) ||
4709 !ath_txq_update(sc, WME_AC_VI) ||
4710 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4711}
4712
4713/*
4714 * Reclaim resources for a setup queue.
4715 */
4716static void
4717ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4718{
4719
4720 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4721 ATH_TXQ_LOCK_DESTROY(txq);
4722 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4723}
4724
4725/*
4726 * Reclaim all tx queue resources.
4727 */
4728static void
4729ath_tx_cleanup(struct ath_softc *sc)
4730{
4731 int i;
4732
4733 ATH_TXBUF_LOCK_DESTROY(sc);
4734 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4735 if (ATH_TXQ_SETUP(sc, i))
4736 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4737}
4738
4739/*
4740 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
4741 * using the current rates in sc_rixmap.
4742 */
4743int
4744ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
4745{
4746 int rix = sc->sc_rixmap[rate];
4747 /* NB: return lowest rix for invalid rate */
4748 return (rix == 0xff ? 0 : rix);
4749}
4750
4751static void
4752ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
4753 struct ath_buf *bf)
4754{
4755 struct ieee80211_node *ni = bf->bf_node;
4756 struct ifnet *ifp = sc->sc_ifp;
4757 struct ieee80211com *ic = ifp->if_l2com;
4758 int sr, lr, pri;
4759
4760 if (ts->ts_status == 0) {
4761 u_int8_t txant = ts->ts_antenna;
4762 sc->sc_stats.ast_ant_tx[txant]++;
4763 sc->sc_ant_tx[txant]++;
4764 if (ts->ts_finaltsi != 0)
4765 sc->sc_stats.ast_tx_altrate++;
4766 pri = M_WME_GETAC(bf->bf_m);
4767 if (pri >= WME_AC_VO)
4768 ic->ic_wme.wme_hipri_traffic++;
4769 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)
4770 ni->ni_inact = ni->ni_inact_reload;
4771 } else {
4772 if (ts->ts_status & HAL_TXERR_XRETRY)
4773 sc->sc_stats.ast_tx_xretries++;
4774 if (ts->ts_status & HAL_TXERR_FIFO)
4775 sc->sc_stats.ast_tx_fifoerr++;
4776 if (ts->ts_status & HAL_TXERR_FILT)
4777 sc->sc_stats.ast_tx_filtered++;
4778 if (ts->ts_status & HAL_TXERR_XTXOP)
4779 sc->sc_stats.ast_tx_xtxop++;
4780 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
4781 sc->sc_stats.ast_tx_timerexpired++;
4782
4783 if (ts->ts_status & HAL_TX_DATA_UNDERRUN)
4784 sc->sc_stats.ast_tx_data_underrun++;
4785 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN)
4786 sc->sc_stats.ast_tx_delim_underrun++;
4787
4788 if (bf->bf_m->m_flags & M_FF)
4789 sc->sc_stats.ast_ff_txerr++;
4790 }
4791 /* XXX when is this valid? */
4792 if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
4793 sc->sc_stats.ast_tx_desccfgerr++;
4794
4795 sr = ts->ts_shortretry;
4796 lr = ts->ts_longretry;
4797 sc->sc_stats.ast_tx_shortretry += sr;
4798 sc->sc_stats.ast_tx_longretry += lr;
4799
4800}
4801
4802/*
4803 * The default completion. If fail is 1, this means
4804 * "please don't retry the frame, and just return -1 status
4805 * to the net80211 stack.
4806 */
4807void
4808ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4809{
4810 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4811 int st;
4812
4813 if (fail == 1)
4814 st = -1;
4815 else
4816 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ?
4817 ts->ts_status : HAL_TXERR_XRETRY;
4818
4819 if (bf->bf_state.bfs_dobaw)
4820 device_printf(sc->sc_dev,
4821 "%s: bf %p: seqno %d: dobaw should've been cleared!\n",
4822 __func__,
4823 bf,
4824 SEQNO(bf->bf_state.bfs_seqno));
4825 if (bf->bf_next != NULL)
4826 device_printf(sc->sc_dev,
4827 "%s: bf %p: seqno %d: bf_next not NULL!\n",
4828 __func__,
4829 bf,
4830 SEQNO(bf->bf_state.bfs_seqno));
4831
4832 /*
4833 * Do any tx complete callback. Note this must
4834 * be done before releasing the node reference.
4835 * This will free the mbuf, release the net80211
4836 * node and recycle the ath_buf.
4837 */
4838 ath_tx_freebuf(sc, bf, st);
4839}
4840
4841/*
4842 * Update rate control with the given completion status.
4843 */
4844void
4845ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
4846 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
4847 int nframes, int nbad)
4848{
4849 struct ath_node *an;
4850
4851 /* Only for unicast frames */
4852 if (ni == NULL)
4853 return;
4854
4855 an = ATH_NODE(ni);
4856
4857 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
4858 ATH_NODE_LOCK(an);
4859 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
4860 ATH_NODE_UNLOCK(an);
4861 }
4862}
4863
4864/*
4865 * Update the busy status of the last frame on the free list.
4866 * When doing TDMA, the busy flag tracks whether the hardware
4867 * currently points to this buffer or not, and thus gated DMA
4868 * may restart by re-reading the last descriptor in this
4869 * buffer.
4870 *
4871 * This should be called in the completion function once one
4872 * of the buffers has been used.
4873 */
4874static void
4875ath_tx_update_busy(struct ath_softc *sc)
4876{
4877 struct ath_buf *last;
4878
4879 /*
4880 * Since the last frame may still be marked
4881 * as ATH_BUF_BUSY, unmark it here before
4882 * finishing the frame processing.
4883 * Since we've completed a frame (aggregate
4884 * or otherwise), the hardware has moved on
4885 * and is no longer referencing the previous
4886 * descriptor.
4887 */
4888 ATH_TXBUF_LOCK_ASSERT(sc);
4889 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
4890 if (last != NULL)
4891 last->bf_flags &= ~ATH_BUF_BUSY;
4892}
4893
4894
4895/*
4896 * Process completed xmit descriptors from the specified queue.
4897 * Kick the packet scheduler if needed. This can occur from this
4898 * particular task.
4899 */
4900static int
4901ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
4902{
4903 struct ath_hal *ah = sc->sc_ah;
4904 struct ath_buf *bf;
4905 struct ath_desc *ds;
4906 struct ath_tx_status *ts;
4907 struct ieee80211_node *ni;
4908 struct ath_node *an;
4909 int nacked;
4910 HAL_STATUS status;
4911
4912 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
4913 __func__, txq->axq_qnum,
4914 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4915 txq->axq_link);
4916 nacked = 0;
4917 for (;;) {
4918 ATH_TXQ_LOCK(txq);
4919 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
4920 bf = TAILQ_FIRST(&txq->axq_q);
4921 if (bf == NULL) {
4922 ATH_TXQ_UNLOCK(txq);
4923 break;
4924 }
4925 ds = bf->bf_lastds; /* XXX must be setup correctly! */
4926 ts = &bf->bf_status.ds_txstat;
4927 status = ath_hal_txprocdesc(ah, ds, ts);
4928#ifdef ATH_DEBUG
4929 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
4930 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4931 status == HAL_OK);
4932 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) {
4933 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4934 status == HAL_OK);
4935 }
4936#endif
4937 if (status == HAL_EINPROGRESS) {
4938 ATH_TXQ_UNLOCK(txq);
4939 break;
4940 }
4941 ATH_TXQ_REMOVE(txq, bf, bf_list);
4942#ifdef IEEE80211_SUPPORT_TDMA
4943 if (txq->axq_depth > 0) {
4944 /*
4945 * More frames follow. Mark the buffer busy
4946 * so it's not re-used while the hardware may
4947 * still re-read the link field in the descriptor.
4948 *
4949 * Use the last buffer in an aggregate as that
4950 * is where the hardware may be - intermediate
4951 * descriptors won't be "busy".
4952 */
4953 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4954 } else
4955#else
4956 if (txq->axq_depth == 0)
4957#endif
4958 txq->axq_link = NULL;
4959 if (bf->bf_state.bfs_aggr)
4960 txq->axq_aggr_depth--;
4961
4962 ni = bf->bf_node;
4963 /*
4964 * If unicast frame was ack'd update RSSI,
4965 * including the last rx time used to
4966 * workaround phantom bmiss interrupts.
4967 */
4968 if (ni != NULL && ts->ts_status == 0 &&
4969 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
4970 nacked++;
4971 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4972 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4973 ts->ts_rssi);
4974 }
4975 ATH_TXQ_UNLOCK(txq);
4976
4977 /* If unicast frame, update general statistics */
4978 if (ni != NULL) {
4979 an = ATH_NODE(ni);
4980 /* update statistics */
4981 ath_tx_update_stats(sc, ts, bf);
4982 }
4983
4984 /*
4985 * Call the completion handler.
4986 * The completion handler is responsible for
4987 * calling the rate control code.
4988 *
4989 * Frames with no completion handler get the
4990 * rate control code called here.
4991 */
4992 if (bf->bf_comp == NULL) {
4993 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4994 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) {
4995 /*
4996 * XXX assume this isn't an aggregate
4997 * frame.
4998 */
4999 ath_tx_update_ratectrl(sc, ni,
5000 bf->bf_state.bfs_rc, ts,
5001 bf->bf_state.bfs_pktlen, 1,
5002 (ts->ts_status == 0 ? 0 : 1));
5003 }
5004 ath_tx_default_comp(sc, bf, 0);
5005 } else
5006 bf->bf_comp(sc, bf, 0);
5007 }
5008#ifdef IEEE80211_SUPPORT_SUPERG
5009 /*
5010 * Flush fast-frame staging queue when traffic slows.
5011 */
5012 if (txq->axq_depth <= 1)
5013 ieee80211_ff_flush(ic, txq->axq_ac);
5014#endif
5015
5016 /* Kick the TXQ scheduler */
5017 if (dosched) {
5018 ATH_TXQ_LOCK(txq);
5019 ath_txq_sched(sc, txq);
5020 ATH_TXQ_UNLOCK(txq);
5021 }
5022
5023 return nacked;
5024}
5025
5026#define TXQACTIVE(t, q) ( (t) & (1 << (q)))
5027
5028/*
5029 * Deferred processing of transmit interrupt; special-cased
5030 * for a single hardware transmit queue (e.g. 5210 and 5211).
5031 */
5032static void
5033ath_tx_proc_q0(void *arg, int npending)
5034{
5035 struct ath_softc *sc = arg;
5036 struct ifnet *ifp = sc->sc_ifp;
5037 uint32_t txqs;
5038
5039 ATH_PCU_LOCK(sc);
5040 sc->sc_txproc_cnt++;
5041 txqs = sc->sc_txq_active;
5042 sc->sc_txq_active &= ~txqs;
5043 ATH_PCU_UNLOCK(sc);
5044
5045 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
5046 /* XXX why is lastrx updated in tx code? */
5047 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5048 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
5049 ath_tx_processq(sc, sc->sc_cabq, 1);
5050 IF_LOCK(&ifp->if_snd);
5051 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5052 IF_UNLOCK(&ifp->if_snd);
5053 sc->sc_wd_timer = 0;
5054
5055 if (sc->sc_softled)
5056 ath_led_event(sc, sc->sc_txrix);
5057
5058 ATH_PCU_LOCK(sc);
5059 sc->sc_txproc_cnt--;
5060 ATH_PCU_UNLOCK(sc);
5061
5062 ath_start(ifp);
5063}
5064
5065/*
5066 * Deferred processing of transmit interrupt; special-cased
5067 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
5068 */
5069static void
5070ath_tx_proc_q0123(void *arg, int npending)
5071{
5072 struct ath_softc *sc = arg;
5073 struct ifnet *ifp = sc->sc_ifp;
5074 int nacked;
5075 uint32_t txqs;
5076
5077 ATH_PCU_LOCK(sc);
5078 sc->sc_txproc_cnt++;
5079 txqs = sc->sc_txq_active;
5080 sc->sc_txq_active &= ~txqs;
5081 ATH_PCU_UNLOCK(sc);
5082
5083 /*
5084 * Process each active queue.
5085 */
5086 nacked = 0;
5087 if (TXQACTIVE(txqs, 0))
5088 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
5089 if (TXQACTIVE(txqs, 1))
5090 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
5091 if (TXQACTIVE(txqs, 2))
5092 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
5093 if (TXQACTIVE(txqs, 3))
5094 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
5095 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
5096 ath_tx_processq(sc, sc->sc_cabq, 1);
5097 if (nacked)
5098 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5099
5100 IF_LOCK(&ifp->if_snd);
5101 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5102 IF_UNLOCK(&ifp->if_snd);
5103 sc->sc_wd_timer = 0;
5104
5105 if (sc->sc_softled)
5106 ath_led_event(sc, sc->sc_txrix);
5107
5108 ATH_PCU_LOCK(sc);
5109 sc->sc_txproc_cnt--;
5110 ATH_PCU_UNLOCK(sc);
5111
5112 ath_start(ifp);
5113}
5114
5115/*
5116 * Deferred processing of transmit interrupt.
5117 */
5118static void
5119ath_tx_proc(void *arg, int npending)
5120{
5121 struct ath_softc *sc = arg;
5122 struct ifnet *ifp = sc->sc_ifp;
5123 int i, nacked;
5124 uint32_t txqs;
5125
5126 ATH_PCU_LOCK(sc);
5127 sc->sc_txproc_cnt++;
5128 txqs = sc->sc_txq_active;
5129 sc->sc_txq_active &= ~txqs;
5130 ATH_PCU_UNLOCK(sc);
5131
5132 /*
5133 * Process each active queue.
5134 */
5135 nacked = 0;
5136 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5137 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
5138 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
5139 if (nacked)
5140 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5141
5142 /* XXX check this inside of IF_LOCK? */
5143 IF_LOCK(&ifp->if_snd);
5144 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5145 IF_UNLOCK(&ifp->if_snd);
5146 sc->sc_wd_timer = 0;
5147
5148 if (sc->sc_softled)
5149 ath_led_event(sc, sc->sc_txrix);
5150
5151 ATH_PCU_LOCK(sc);
5152 sc->sc_txproc_cnt--;
5153 ATH_PCU_UNLOCK(sc);
5154
5155 ath_start(ifp);
5156}
5157#undef TXQACTIVE
5158
5159/*
5160 * Deferred processing of TXQ rescheduling.
5161 */
5162static void
5163ath_txq_sched_tasklet(void *arg, int npending)
5164{
5165 struct ath_softc *sc = arg;
5166 int i;
5167
5168 /* XXX is skipping ok? */
5169 ATH_PCU_LOCK(sc);
5170#if 0
5171 if (sc->sc_inreset_cnt > 0) {
5172 device_printf(sc->sc_dev,
5173 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
5174 ATH_PCU_UNLOCK(sc);
5175 return;
5176 }
5177#endif
5178 sc->sc_txproc_cnt++;
5179 ATH_PCU_UNLOCK(sc);
5180
5181 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5182 if (ATH_TXQ_SETUP(sc, i)) {
5183 ATH_TXQ_LOCK(&sc->sc_txq[i]);
5184 ath_txq_sched(sc, &sc->sc_txq[i]);
5185 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
5186 }
5187 }
5188
5189 ATH_PCU_LOCK(sc);
5190 sc->sc_txproc_cnt--;
5191 ATH_PCU_UNLOCK(sc);
5192}
5193
5194/*
5195 * Return a buffer to the pool and update the 'busy' flag on the
5196 * previous 'tail' entry.
5197 *
5198 * This _must_ only be called when the buffer is involved in a completed
5199 * TX. The logic is that if it was part of an active TX, the previous
5200 * buffer on the list is now not involved in a halted TX DMA queue, waiting
5201 * for restart (eg for TDMA.)
5202 *
5203 * The caller must free the mbuf and recycle the node reference.
5204 */
5205void
5206ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
5207{
5208 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5209 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE);
5210
5211 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
5212 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
5213
5214 ATH_TXBUF_LOCK(sc);
5215 ath_tx_update_busy(sc);
5216 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5217 ATH_TXBUF_UNLOCK(sc);
5218}
5219
5220/*
5221 * This is currently used by ath_tx_draintxq() and
5222 * ath_tx_tid_free_pkts().
5223 *
5224 * It recycles a single ath_buf.
5225 */
5226void
5227ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
5228{
5229 struct ieee80211_node *ni = bf->bf_node;
5230 struct mbuf *m0 = bf->bf_m;
5231
5232 bf->bf_node = NULL;
5233 bf->bf_m = NULL;
5234
5235 /* Free the buffer, it's not needed any longer */
5236 ath_freebuf(sc, bf);
5237
5238 if (ni != NULL) {
5239 /*
5240 * Do any callback and reclaim the node reference.
5241 */
5242 if (m0->m_flags & M_TXCB)
5243 ieee80211_process_callback(ni, m0, status);
5244 ieee80211_free_node(ni);
5245 }
5246 m_freem(m0);
5247
5248 /*
5249 * XXX the buffer used to be freed -after-, but the DMA map was
5250 * freed where ath_freebuf() now is. I've no idea what this
5251 * will do.
5252 */
5253}
5254
5255void
5256ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5257{
5258#ifdef ATH_DEBUG
5259 struct ath_hal *ah = sc->sc_ah;
5260#endif
5261 struct ath_buf *bf;
5262 u_int ix;
5263
5264 /*
5265 * NB: this assumes output has been stopped and
5266 * we do not need to block ath_tx_proc
5267 */
5268 ATH_TXBUF_LOCK(sc);
5269 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
5270 if (bf != NULL)
5271 bf->bf_flags &= ~ATH_BUF_BUSY;
5272 ATH_TXBUF_UNLOCK(sc);
5273
5274 for (ix = 0;; ix++) {
5275 ATH_TXQ_LOCK(txq);
5276 bf = TAILQ_FIRST(&txq->axq_q);
5277 if (bf == NULL) {
5278 txq->axq_link = NULL;
5279 ATH_TXQ_UNLOCK(txq);
5280 break;
5281 }
5282 ATH_TXQ_REMOVE(txq, bf, bf_list);
5283 if (bf->bf_state.bfs_aggr)
5284 txq->axq_aggr_depth--;
5285#ifdef ATH_DEBUG
5286 if (sc->sc_debug & ATH_DEBUG_RESET) {
5287 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5288
5289 ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
5290 ath_hal_txprocdesc(ah, bf->bf_lastds,
5291 &bf->bf_status.ds_txstat) == HAL_OK);
5292 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
5293 bf->bf_m->m_len, 0, -1);
5294 }
5295#endif /* ATH_DEBUG */
5296 /*
5297 * Since we're now doing magic in the completion
5298 * functions, we -must- call it for aggregation
5299 * destinations or BAW tracking will get upset.
5300 */
5301 /*
5302 * Clear ATH_BUF_BUSY; the completion handler
5303 * will free the buffer.
5304 */
5305 ATH_TXQ_UNLOCK(txq);
5306 bf->bf_flags &= ~ATH_BUF_BUSY;
5307 if (bf->bf_comp)
5308 bf->bf_comp(sc, bf, 1);
5309 else
5310 ath_tx_default_comp(sc, bf, 1);
5311 }
5312
5313 /*
5314 * Drain software queued frames which are on
5315 * active TIDs.
5316 */
5317 ath_tx_txq_drain(sc, txq);
5318}
5319
5320static void
5321ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5322{
5323 struct ath_hal *ah = sc->sc_ah;
5324
5325 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5326 __func__, txq->axq_qnum,
5327 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5328 txq->axq_link);
5329 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5330}
5331
5332static int
5333ath_stoptxdma(struct ath_softc *sc)
5334{
5335 struct ath_hal *ah = sc->sc_ah;
5336 int i;
5337
5338 /* XXX return value */
5339 if (sc->sc_invalid)
5340 return 0;
5341
5342 if (!sc->sc_invalid) {
5343 /* don't touch the hardware if marked invalid */
5344 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5345 __func__, sc->sc_bhalq,
5346 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5347 NULL);
5348 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5349 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5350 if (ATH_TXQ_SETUP(sc, i))
5351 ath_tx_stopdma(sc, &sc->sc_txq[i]);
5352 }
5353
5354 return 1;
5355}
5356
5357/*
5358 * Drain the transmit queues and reclaim resources.
5359 */
5360static void
5361ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
5362{
5363#ifdef ATH_DEBUG
5364 struct ath_hal *ah = sc->sc_ah;
5365#endif
5366 struct ifnet *ifp = sc->sc_ifp;
5367 int i;
5368
5369 (void) ath_stoptxdma(sc);
5370
5371 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5372 /*
5373 * XXX TODO: should we just handle the completed TX frames
5374 * here, whether or not the reset is a full one or not?
5375 */
5376 if (ATH_TXQ_SETUP(sc, i)) {
5377 if (reset_type == ATH_RESET_NOLOSS)
5378 ath_tx_processq(sc, &sc->sc_txq[i], 0);
5379 else
5380 ath_tx_draintxq(sc, &sc->sc_txq[i]);
5381 }
5382 }
5383#ifdef ATH_DEBUG
5384 if (sc->sc_debug & ATH_DEBUG_RESET) {
5385 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
5386 if (bf != NULL && bf->bf_m != NULL) {
5387 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5388 ath_hal_txprocdesc(ah, bf->bf_lastds,
5389 &bf->bf_status.ds_txstat) == HAL_OK);
5390 ieee80211_dump_pkt(ifp->if_l2com,
5391 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
5392 0, -1);
5393 }
5394 }
5395#endif /* ATH_DEBUG */
5396 IF_LOCK(&ifp->if_snd);
5397 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5398 IF_UNLOCK(&ifp->if_snd);
5399 sc->sc_wd_timer = 0;
5400}
5401
5402/*
5403 * Disable the receive h/w in preparation for a reset.
5404 */
5405static void
5406ath_stoprecv(struct ath_softc *sc, int dodelay)
5407{
5408#define PA2DESC(_sc, _pa) \
5409 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
5410 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
5411 struct ath_hal *ah = sc->sc_ah;
5412
5413 ath_hal_stoppcurecv(ah); /* disable PCU */
5414 ath_hal_setrxfilter(ah, 0); /* clear recv filter */
5415 ath_hal_stopdmarecv(ah); /* disable DMA engine */
5416 if (dodelay)
5417 DELAY(3000); /* 3ms is long enough for 1 frame */
5418#ifdef ATH_DEBUG
5419 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
5420 struct ath_buf *bf;
5421 u_int ix;
5422
5423 device_printf(sc->sc_dev,
5424 "%s: rx queue %p, link %p\n",
5425 __func__,
5426 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah),
5427 sc->sc_rxlink);
5428 ix = 0;
5429 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5430 struct ath_desc *ds = bf->bf_desc;
5431 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5432 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
5433 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
5434 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
5435 ath_printrxbuf(sc, bf, ix, status == HAL_OK);
5436 ix++;
5437 }
5438 }
5439#endif
5440 if (sc->sc_rxpending != NULL) {
5441 m_freem(sc->sc_rxpending);
5442 sc->sc_rxpending = NULL;
5443 }
5444 sc->sc_rxlink = NULL; /* just in case */
5445#undef PA2DESC
5446}
5447
5448/*
5449 * Enable the receive h/w following a reset.
5450 */
5451static int
5452ath_startrecv(struct ath_softc *sc)
5453{
5454 struct ath_hal *ah = sc->sc_ah;
5455 struct ath_buf *bf;
5456
5457 sc->sc_rxlink = NULL;
5458 sc->sc_rxpending = NULL;
5459 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5460 int error = ath_rxbuf_init(sc, bf);
5461 if (error != 0) {
5462 DPRINTF(sc, ATH_DEBUG_RECV,
5463 "%s: ath_rxbuf_init failed %d\n",
5464 __func__, error);
5465 return error;
5466 }
5467 }
5468
5469 bf = TAILQ_FIRST(&sc->sc_rxbuf);
5470 ath_hal_putrxbuf(ah, bf->bf_daddr);
5471 ath_hal_rxena(ah); /* enable recv descriptors */
5472 ath_mode_init(sc); /* set filters, etc. */
5473 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
5474 return 0;
5475}
5476
5477/*
5478 * Update internal state after a channel change.
5479 */
5480static void
5481ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5482{
5483 enum ieee80211_phymode mode;
5484
5485 /*
5486 * Change channels and update the h/w rate map
5487 * if we're switching; e.g. 11a to 11b/g.
5488 */
5489 mode = ieee80211_chan2mode(chan);
5490 if (mode != sc->sc_curmode)
5491 ath_setcurmode(sc, mode);
5492 sc->sc_curchan = chan;
5493}
5494
5495/*
5496 * Set/change channels. If the channel is really being changed,
5497 * it's done by resetting the chip. To accomplish this we must
5498 * first cleanup any pending DMA, then restart stuff after a la
5499 * ath_init.
5500 */
5501static int
5502ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5503{
5504 struct ifnet *ifp = sc->sc_ifp;
5505 struct ieee80211com *ic = ifp->if_l2com;
5506 struct ath_hal *ah = sc->sc_ah;
5507 int ret = 0;
5508
5509 /* Treat this as an interface reset */
5510 ATH_PCU_UNLOCK_ASSERT(sc);
5511 ATH_UNLOCK_ASSERT(sc);
5512
5513 /* (Try to) stop TX/RX from occuring */
5514 taskqueue_block(sc->sc_tq);
5515
5516 ATH_PCU_LOCK(sc);
5517 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */
5518 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */
5519 if (ath_reset_grablock(sc, 1) == 0) {
5520 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
5521 __func__);
5522 }
5523 ATH_PCU_UNLOCK(sc);
5524
5525 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5526 __func__, ieee80211_chan2ieee(ic, chan),
5527 chan->ic_freq, chan->ic_flags);
5528 if (chan != sc->sc_curchan) {
5529 HAL_STATUS status;
5530 /*
5531 * To switch channels clear any pending DMA operations;
5532 * wait long enough for the RX fifo to drain, reset the
5533 * hardware at the new frequency, and then re-enable
5534 * the relevant bits of the h/w.
5535 */
5536#if 0
5537 ath_hal_intrset(ah, 0); /* disable interrupts */
5538#endif
5539 ath_stoprecv(sc, 1); /* turn off frame recv */
5540 /*
5541 * First, handle completed TX/RX frames.
5542 */
5543 ath_rx_proc(sc, 0);
5544 ath_draintxq(sc, ATH_RESET_NOLOSS);
5545 /*
5546 * Next, flush the non-scheduled frames.
5547 */
5548 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
5549
5550 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5551 if_printf(ifp, "%s: unable to reset "
5552 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
5553 __func__, ieee80211_chan2ieee(ic, chan),
5554 chan->ic_freq, chan->ic_flags, status);
5555 ret = EIO;
5556 goto finish;
5557 }
5558 sc->sc_diversity = ath_hal_getdiversity(ah);
5559
5560 /* Let DFS at it in case it's a DFS channel */
5561 ath_dfs_radar_enable(sc, chan);
5562
5563 /*
5564 * Re-enable rx framework.
5565 */
5566 if (ath_startrecv(sc) != 0) {
5567 if_printf(ifp, "%s: unable to restart recv logic\n",
5568 __func__);
5569 ret = EIO;
5570 goto finish;
5571 }
5572
5573 /*
5574 * Change channels and update the h/w rate map
5575 * if we're switching; e.g. 11a to 11b/g.
5576 */
5577 ath_chan_change(sc, chan);
5578
5579 /*
5580 * Reset clears the beacon timers; reset them
5581 * here if needed.
5582 */
5583 if (sc->sc_beacons) { /* restart beacons */
5584#ifdef IEEE80211_SUPPORT_TDMA
5585 if (sc->sc_tdma)
5586 ath_tdma_config(sc, NULL);
5587 else
5588#endif
5589 ath_beacon_config(sc, NULL);
5590 }
5591
5592 /*
5593 * Re-enable interrupts.
5594 */
5595#if 0
5596 ath_hal_intrset(ah, sc->sc_imask);
5597#endif
5598 }
5599
5600finish:
5601 ATH_PCU_LOCK(sc);
5602 sc->sc_inreset_cnt--;
5603 /* XXX only do this if sc_inreset_cnt == 0? */
5604 ath_hal_intrset(ah, sc->sc_imask);
5605 ATH_PCU_UNLOCK(sc);
5606
5607 IF_LOCK(&ifp->if_snd);
5608 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5609 IF_UNLOCK(&ifp->if_snd);
5610 ath_txrx_start(sc);
5611 /* XXX ath_start? */
5612
5613 return ret;
5614}
5615
5616/*
5617 * Periodically recalibrate the PHY to account
5618 * for temperature/environment changes.
5619 */
5620static void
5621ath_calibrate(void *arg)
5622{
5623 struct ath_softc *sc = arg;
5624 struct ath_hal *ah = sc->sc_ah;
5625 struct ifnet *ifp = sc->sc_ifp;
5626 struct ieee80211com *ic = ifp->if_l2com;
5627 HAL_BOOL longCal, isCalDone;
5628 HAL_BOOL aniCal, shortCal = AH_FALSE;
5629 int nextcal;
5630
5631 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
5632 goto restart;
5633 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5634 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
5635 if (sc->sc_doresetcal)
5636 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
5637
5638 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
5639 if (aniCal) {
5640 sc->sc_stats.ast_ani_cal++;
5641 sc->sc_lastani = ticks;
5642 ath_hal_ani_poll(ah, sc->sc_curchan);
5643 }
5644
5645 if (longCal) {
5646 sc->sc_stats.ast_per_cal++;
5647 sc->sc_lastlongcal = ticks;
5648 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5649 /*
5650 * Rfgain is out of bounds, reset the chip
5651 * to load new gain values.
5652 */
5653 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5654 "%s: rfgain change\n", __func__);
5655 sc->sc_stats.ast_per_rfgain++;
5656 sc->sc_resetcal = 0;
5657 sc->sc_doresetcal = AH_TRUE;
5658 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
5659 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5660 return;
5661 }
5662 /*
5663 * If this long cal is after an idle period, then
5664 * reset the data collection state so we start fresh.
5665 */
5666 if (sc->sc_resetcal) {
5667 (void) ath_hal_calreset(ah, sc->sc_curchan);
5668 sc->sc_lastcalreset = ticks;
5669 sc->sc_lastshortcal = ticks;
5670 sc->sc_resetcal = 0;
5671 sc->sc_doresetcal = AH_TRUE;
5672 }
5673 }
5674
5675 /* Only call if we're doing a short/long cal, not for ANI calibration */
5676 if (shortCal || longCal) {
5677 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5678 if (longCal) {
5679 /*
5680 * Calibrate noise floor data again in case of change.
5681 */
5682 ath_hal_process_noisefloor(ah);
5683 }
5684 } else {
5685 DPRINTF(sc, ATH_DEBUG_ANY,
5686 "%s: calibration of channel %u failed\n",
5687 __func__, sc->sc_curchan->ic_freq);
5688 sc->sc_stats.ast_per_calfail++;
5689 }
5690 if (shortCal)
5691 sc->sc_lastshortcal = ticks;
5692 }
5693 if (!isCalDone) {
5694restart:
5695 /*
5696 * Use a shorter interval to potentially collect multiple
5697 * data samples required to complete calibration. Once
5698 * we're told the work is done we drop back to a longer
5699 * interval between requests. We're more aggressive doing
5700 * work when operating as an AP to improve operation right
5701 * after startup.
5702 */
5703 sc->sc_lastshortcal = ticks;
5704 nextcal = ath_shortcalinterval*hz/1000;
5705 if (sc->sc_opmode != HAL_M_HOSTAP)
5706 nextcal *= 10;
5707 sc->sc_doresetcal = AH_TRUE;
5708 } else {
5709 /* nextcal should be the shortest time for next event */
5710 nextcal = ath_longcalinterval*hz;
5711 if (sc->sc_lastcalreset == 0)
5712 sc->sc_lastcalreset = sc->sc_lastlongcal;
5713 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5714 sc->sc_resetcal = 1; /* setup reset next trip */
5715 sc->sc_doresetcal = AH_FALSE;
5716 }
5717 /* ANI calibration may occur more often than short/long/resetcal */
5718 if (ath_anicalinterval > 0)
5719 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
5720
5721 if (nextcal != 0) {
5722 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5723 __func__, nextcal, isCalDone ? "" : "!");
5724 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5725 } else {
5726 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5727 __func__);
5728 /* NB: don't rearm timer */
5729 }
5730}
5731
5732static void
5733ath_scan_start(struct ieee80211com *ic)
5734{
5735 struct ifnet *ifp = ic->ic_ifp;
5736 struct ath_softc *sc = ifp->if_softc;
5737 struct ath_hal *ah = sc->sc_ah;
5738 u_int32_t rfilt;
5739
5740 /* XXX calibration timer? */
5741
5742 ATH_LOCK(sc);
5743 sc->sc_scanning = 1;
5744 sc->sc_syncbeacon = 0;
5745 rfilt = ath_calcrxfilter(sc);
5746 ATH_UNLOCK(sc);
5747
5748 ATH_PCU_LOCK(sc);
5749 ath_hal_setrxfilter(ah, rfilt);
5750 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5751 ATH_PCU_UNLOCK(sc);
5752
5753 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5754 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5755}
5756
5757static void
5758ath_scan_end(struct ieee80211com *ic)
5759{
5760 struct ifnet *ifp = ic->ic_ifp;
5761 struct ath_softc *sc = ifp->if_softc;
5762 struct ath_hal *ah = sc->sc_ah;
5763 u_int32_t rfilt;
5764
5765 ATH_LOCK(sc);
5766 sc->sc_scanning = 0;
5767 rfilt = ath_calcrxfilter(sc);
5768 ATH_UNLOCK(sc);
5769
5770 ATH_PCU_LOCK(sc);
5771 ath_hal_setrxfilter(ah, rfilt);
5772 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5773
5774 ath_hal_process_noisefloor(ah);
5775 ATH_PCU_UNLOCK(sc);
5776
5777 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5778 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5779 sc->sc_curaid);
5780}
5781
5782#ifdef ATH_ENABLE_11N
5783/*
5784 * For now, just do a channel change.
5785 *
5786 * Later, we'll go through the hard slog of suspending tx/rx, changing rate
5787 * control state and resetting the hardware without dropping frames out
5788 * of the queue.
5789 *
5790 * The unfortunate trouble here is making absolutely sure that the
5791 * channel width change has propagated enough so the hardware
5792 * absolutely isn't handed bogus frames for it's current operating
5793 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and
5794 * does occur in parallel, we need to make certain we've blocked
5795 * any further ongoing TX (and RX, that can cause raw TX)
5796 * before we do this.
5797 */
5798static void
5799ath_update_chw(struct ieee80211com *ic)
5800{
5801 struct ifnet *ifp = ic->ic_ifp;
5802 struct ath_softc *sc = ifp->if_softc;
5803
5804 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__);
5805 ath_set_channel(ic);
5806}
5807#endif /* ATH_ENABLE_11N */
5808
5809static void
5810ath_set_channel(struct ieee80211com *ic)
5811{
5812 struct ifnet *ifp = ic->ic_ifp;
5813 struct ath_softc *sc = ifp->if_softc;
5814
5815 (void) ath_chan_set(sc, ic->ic_curchan);
5816 /*
5817 * If we are returning to our bss channel then mark state
5818 * so the next recv'd beacon's tsf will be used to sync the
5819 * beacon timers. Note that since we only hear beacons in
5820 * sta/ibss mode this has no effect in other operating modes.
5821 */
5822 ATH_LOCK(sc);
5823 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5824 sc->sc_syncbeacon = 1;
5825 ATH_UNLOCK(sc);
5826}
5827
5828/*
5829 * Walk the vap list and check if there any vap's in RUN state.
5830 */
5831static int
5832ath_isanyrunningvaps(struct ieee80211vap *this)
5833{
5834 struct ieee80211com *ic = this->iv_ic;
5835 struct ieee80211vap *vap;
5836
5837 IEEE80211_LOCK_ASSERT(ic);
5838
5839 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5840 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
5841 return 1;
5842 }
5843 return 0;
5844}
5845
5846static int
5847ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5848{
5849 struct ieee80211com *ic = vap->iv_ic;
5850 struct ath_softc *sc = ic->ic_ifp->if_softc;
5851 struct ath_vap *avp = ATH_VAP(vap);
5852 struct ath_hal *ah = sc->sc_ah;
5853 struct ieee80211_node *ni = NULL;
5854 int i, error, stamode;
5855 u_int32_t rfilt;
5856 int csa_run_transition = 0;
5857 static const HAL_LED_STATE leds[] = {
5858 HAL_LED_INIT, /* IEEE80211_S_INIT */
5859 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5860 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5861 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5862 HAL_LED_RUN, /* IEEE80211_S_CAC */
5863 HAL_LED_RUN, /* IEEE80211_S_RUN */
5864 HAL_LED_RUN, /* IEEE80211_S_CSA */
5865 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5866 };
5867
5868 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5869 ieee80211_state_name[vap->iv_state],
5870 ieee80211_state_name[nstate]);
5871
5872 /*
5873 * net80211 _should_ have the comlock asserted at this point.
5874 * There are some comments around the calls to vap->iv_newstate
5875 * which indicate that it (newstate) may end up dropping the
5876 * lock. This and the subsequent lock assert check after newstate
5877 * are an attempt to catch these and figure out how/why.
5878 */
5879 IEEE80211_LOCK_ASSERT(ic);
5880
5881 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
5882 csa_run_transition = 1;
5883
5884 callout_drain(&sc->sc_cal_ch);
5885 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
5886
5887 if (nstate == IEEE80211_S_SCAN) {
5888 /*
5889 * Scanning: turn off beacon miss and don't beacon.
5890 * Mark beacon state so when we reach RUN state we'll
5891 * [re]setup beacons. Unblock the task q thread so
5892 * deferred interrupt processing is done.
5893 */
5894 ath_hal_intrset(ah,
5895 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5896 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5897 sc->sc_beacons = 0;
5898 taskqueue_unblock(sc->sc_tq);
5899 }
5900
5901 ni = ieee80211_ref_node(vap->iv_bss);
5902 rfilt = ath_calcrxfilter(sc);
5903 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5904 vap->iv_opmode == IEEE80211_M_AHDEMO ||
5905 vap->iv_opmode == IEEE80211_M_IBSS);
5906 if (stamode && nstate == IEEE80211_S_RUN) {
5907 sc->sc_curaid = ni->ni_associd;
5908 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5909 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5910 }
5911 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5912 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5913 ath_hal_setrxfilter(ah, rfilt);
5914
5915 /* XXX is this to restore keycache on resume? */
5916 if (vap->iv_opmode != IEEE80211_M_STA &&
5917 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5918 for (i = 0; i < IEEE80211_WEP_NKID; i++)
5919 if (ath_hal_keyisvalid(ah, i))
5920 ath_hal_keysetmac(ah, i, ni->ni_bssid);
5921 }
5922
5923 /*
5924 * Invoke the parent method to do net80211 work.
5925 */
5926 error = avp->av_newstate(vap, nstate, arg);
5927 if (error != 0)
5928 goto bad;
5929
5930 /*
5931 * See above: ensure av_newstate() doesn't drop the lock
5932 * on us.
5933 */
5934 IEEE80211_LOCK_ASSERT(ic);
5935
5936 if (nstate == IEEE80211_S_RUN) {
5937 /* NB: collect bss node again, it may have changed */
5938 ieee80211_free_node(ni);
5939 ni = ieee80211_ref_node(vap->iv_bss);
5940
5941 DPRINTF(sc, ATH_DEBUG_STATE,
5942 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5943 "capinfo 0x%04x chan %d\n", __func__,
5944 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5945 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5946
5947 switch (vap->iv_opmode) {
5948#ifdef IEEE80211_SUPPORT_TDMA
5949 case IEEE80211_M_AHDEMO:
5950 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5951 break;
5952 /* fall thru... */
5953#endif
5954 case IEEE80211_M_HOSTAP:
5955 case IEEE80211_M_IBSS:
5956 case IEEE80211_M_MBSS:
5957 /*
5958 * Allocate and setup the beacon frame.
5959 *
5960 * Stop any previous beacon DMA. This may be
5961 * necessary, for example, when an ibss merge
5962 * causes reconfiguration; there will be a state
5963 * transition from RUN->RUN that means we may
5964 * be called with beacon transmission active.
5965 */
5966 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5967
5968 error = ath_beacon_alloc(sc, ni);
5969 if (error != 0)
5970 goto bad;
5971 /*
5972 * If joining an adhoc network defer beacon timer
5973 * configuration to the next beacon frame so we
5974 * have a current TSF to use. Otherwise we're
5975 * starting an ibss/bss so there's no need to delay;
5976 * if this is the first vap moving to RUN state, then
5977 * beacon state needs to be [re]configured.
5978 */
5979 if (vap->iv_opmode == IEEE80211_M_IBSS &&
5980 ni->ni_tstamp.tsf != 0) {
5981 sc->sc_syncbeacon = 1;
5982 } else if (!sc->sc_beacons) {
5983#ifdef IEEE80211_SUPPORT_TDMA
5984 if (vap->iv_caps & IEEE80211_C_TDMA)
5985 ath_tdma_config(sc, vap);
5986 else
5987#endif
5988 ath_beacon_config(sc, vap);
5989 sc->sc_beacons = 1;
5990 }
5991 break;
5992 case IEEE80211_M_STA:
5993 /*
5994 * Defer beacon timer configuration to the next
5995 * beacon frame so we have a current TSF to use
5996 * (any TSF collected when scanning is likely old).
5997 * However if it's due to a CSA -> RUN transition,
5998 * force a beacon update so we pick up a lack of
5999 * beacons from an AP in CAC and thus force a
6000 * scan.
6001 */
6002 sc->sc_syncbeacon = 1;
6003 if (csa_run_transition)
6004 ath_beacon_config(sc, vap);
6005 break;
6006 case IEEE80211_M_MONITOR:
6007 /*
6008 * Monitor mode vaps have only INIT->RUN and RUN->RUN
6009 * transitions so we must re-enable interrupts here to
6010 * handle the case of a single monitor mode vap.
6011 */
6012 ath_hal_intrset(ah, sc->sc_imask);
6013 break;
6014 case IEEE80211_M_WDS:
6015 break;
6016 default:
6017 break;
6018 }
6019 /*
6020 * Let the hal process statistics collected during a
6021 * scan so it can provide calibrated noise floor data.
6022 */
6023 ath_hal_process_noisefloor(ah);
6024 /*
6025 * Reset rssi stats; maybe not the best place...
6026 */
6027 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
6028 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
6029 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
6030 /*
6031 * Finally, start any timers and the task q thread
6032 * (in case we didn't go through SCAN state).
6033 */
6034 if (ath_longcalinterval != 0) {
6035 /* start periodic recalibration timer */
6036 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
6037 } else {
6038 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
6039 "%s: calibration disabled\n", __func__);
6040 }
6041 taskqueue_unblock(sc->sc_tq);
6042 } else if (nstate == IEEE80211_S_INIT) {
6043 /*
6044 * If there are no vaps left in RUN state then
6045 * shutdown host/driver operation:
6046 * o disable interrupts
6047 * o disable the task queue thread
6048 * o mark beacon processing as stopped
6049 */
6050 if (!ath_isanyrunningvaps(vap)) {
6051 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
6052 /* disable interrupts */
6053 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
6054 taskqueue_block(sc->sc_tq);
6055 sc->sc_beacons = 0;
6056 }
6057#ifdef IEEE80211_SUPPORT_TDMA
6058 ath_hal_setcca(ah, AH_TRUE);
6059#endif
6060 }
6061bad:
6062 ieee80211_free_node(ni);
6063 return error;
6064}
6065
6066/*
6067 * Allocate a key cache slot to the station so we can
6068 * setup a mapping from key index to node. The key cache
6069 * slot is needed for managing antenna state and for
6070 * compression when stations do not use crypto. We do
6071 * it uniliaterally here; if crypto is employed this slot
6072 * will be reassigned.
6073 */
6074static void
6075ath_setup_stationkey(struct ieee80211_node *ni)
6076{
6077 struct ieee80211vap *vap = ni->ni_vap;
6078 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6079 ieee80211_keyix keyix, rxkeyix;
6080
6081 /* XXX should take a locked ref to vap->iv_bss */
6082 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
6083 /*
6084 * Key cache is full; we'll fall back to doing
6085 * the more expensive lookup in software. Note
6086 * this also means no h/w compression.
6087 */
6088 /* XXX msg+statistic */
6089 } else {
6090 /* XXX locking? */
6091 ni->ni_ucastkey.wk_keyix = keyix;
6092 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
6093 /* NB: must mark device key to get called back on delete */
6094 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
6095 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
6096 /* NB: this will create a pass-thru key entry */
6097 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
6098 }
6099}
6100
6101/*
6102 * Setup driver-specific state for a newly associated node.
6103 * Note that we're called also on a re-associate, the isnew
6104 * param tells us if this is the first time or not.
6105 */
6106static void
6107ath_newassoc(struct ieee80211_node *ni, int isnew)
6108{
6109 struct ath_node *an = ATH_NODE(ni);
6110 struct ieee80211vap *vap = ni->ni_vap;
6111 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6112 const struct ieee80211_txparam *tp = ni->ni_txparms;
6113
6114 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
6115 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
6116
6117 ath_rate_newassoc(sc, an, isnew);
6118 if (isnew &&
6119 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
6120 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
6121 ath_setup_stationkey(ni);
6122}
6123
6124static int
6125ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
6126 int nchans, struct ieee80211_channel chans[])
6127{
6128 struct ath_softc *sc = ic->ic_ifp->if_softc;
6129 struct ath_hal *ah = sc->sc_ah;
6130 HAL_STATUS status;
6131
6132 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6133 "%s: rd %u cc %u location %c%s\n",
6134 __func__, reg->regdomain, reg->country, reg->location,
6135 reg->ecm ? " ecm" : "");
6136
6137 status = ath_hal_set_channels(ah, chans, nchans,
6138 reg->country, reg->regdomain);
6139 if (status != HAL_OK) {
6140 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
6141 __func__, status);
6142 return EINVAL; /* XXX */
6143 }
6144
6145 return 0;
6146}
6147
6148static void
6149ath_getradiocaps(struct ieee80211com *ic,
6150 int maxchans, int *nchans, struct ieee80211_channel chans[])
6151{
6152 struct ath_softc *sc = ic->ic_ifp->if_softc;
6153 struct ath_hal *ah = sc->sc_ah;
6154
6155 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
6156 __func__, SKU_DEBUG, CTRY_DEFAULT);
6157
6158 /* XXX check return */
6159 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
6160 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
6161
6162}
6163
6164static int
6165ath_getchannels(struct ath_softc *sc)
6166{
6167 struct ifnet *ifp = sc->sc_ifp;
6168 struct ieee80211com *ic = ifp->if_l2com;
6169 struct ath_hal *ah = sc->sc_ah;
6170 HAL_STATUS status;
6171
6172 /*
6173 * Collect channel set based on EEPROM contents.
6174 */
6175 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
6176 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
6177 if (status != HAL_OK) {
6178 if_printf(ifp, "%s: unable to collect channel list from hal, "
6179 "status %d\n", __func__, status);
6180 return EINVAL;
6181 }
6182 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
6183 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
6184 /* XXX map Atheros sku's to net80211 SKU's */
6185 /* XXX net80211 types too small */
6186 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
6187 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
6188 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
6189 ic->ic_regdomain.isocc[1] = ' ';
6190
6191 ic->ic_regdomain.ecm = 1;
6192 ic->ic_regdomain.location = 'I';
6193
6194 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6195 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
6196 __func__, sc->sc_eerd, sc->sc_eecc,
6197 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
6198 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
6199 return 0;
6200}
6201
6202static int
6203ath_rate_setup(struct ath_softc *sc, u_int mode)
6204{
6205 struct ath_hal *ah = sc->sc_ah;
6206 const HAL_RATE_TABLE *rt;
6207
6208 switch (mode) {
6209 case IEEE80211_MODE_11A:
6210 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
6211 break;
6212 case IEEE80211_MODE_HALF:
6213 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
6214 break;
6215 case IEEE80211_MODE_QUARTER:
6216 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
6217 break;
6218 case IEEE80211_MODE_11B:
6219 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
6220 break;
6221 case IEEE80211_MODE_11G:
6222 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
6223 break;
6224 case IEEE80211_MODE_TURBO_A:
6225 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
6226 break;
6227 case IEEE80211_MODE_TURBO_G:
6228 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
6229 break;
6230 case IEEE80211_MODE_STURBO_A:
6231 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
6232 break;
6233 case IEEE80211_MODE_11NA:
6234 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
6235 break;
6236 case IEEE80211_MODE_11NG:
6237 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
6238 break;
6239 default:
6240 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
6241 __func__, mode);
6242 return 0;
6243 }
6244 sc->sc_rates[mode] = rt;
6245 return (rt != NULL);
6246}
6247
6248static void
6249ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
6250{
6251#define N(a) (sizeof(a)/sizeof(a[0]))
6252 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
6253 static const struct {
6254 u_int rate; /* tx/rx 802.11 rate */
6255 u_int16_t timeOn; /* LED on time (ms) */
6256 u_int16_t timeOff; /* LED off time (ms) */
6257 } blinkrates[] = {
6258 { 108, 40, 10 },
6259 { 96, 44, 11 },
6260 { 72, 50, 13 },
6261 { 48, 57, 14 },
6262 { 36, 67, 16 },
6263 { 24, 80, 20 },
6264 { 22, 100, 25 },
6265 { 18, 133, 34 },
6266 { 12, 160, 40 },
6267 { 10, 200, 50 },
6268 { 6, 240, 58 },
6269 { 4, 267, 66 },
6270 { 2, 400, 100 },
6271 { 0, 500, 130 },
6272 /* XXX half/quarter rates */
6273 };
6274 const HAL_RATE_TABLE *rt;
6275 int i, j;
6276
6277 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6278 rt = sc->sc_rates[mode];
6279 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6280 for (i = 0; i < rt->rateCount; i++) {
6281 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6282 if (rt->info[i].phy != IEEE80211_T_HT)
6283 sc->sc_rixmap[ieeerate] = i;
6284 else
6285 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6286 }
6287 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6288 for (i = 0; i < N(sc->sc_hwmap); i++) {
6289 if (i >= rt->rateCount) {
6290 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6291 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6292 continue;
6293 }
6294 sc->sc_hwmap[i].ieeerate =
6295 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6296 if (rt->info[i].phy == IEEE80211_T_HT)
6297 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6298 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6299 if (rt->info[i].shortPreamble ||
6300 rt->info[i].phy == IEEE80211_T_OFDM)
6301 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6302 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
6303 for (j = 0; j < N(blinkrates)-1; j++)
6304 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6305 break;
6306 /* NB: this uses the last entry if the rate isn't found */
6307 /* XXX beware of overlow */
6308 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6309 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6310 }
6311 sc->sc_currates = rt;
6312 sc->sc_curmode = mode;
6313 /*
6314 * All protection frames are transmited at 2Mb/s for
6315 * 11g, otherwise at 1Mb/s.
6316 */
6317 if (mode == IEEE80211_MODE_11G)
6318 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
6319 else
6320 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
6321 /* NB: caller is responsible for resetting rate control state */
6322#undef N
6323}
6324
6325static void
6326ath_watchdog(void *arg)
6327{
6328 struct ath_softc *sc = arg;
6329 int do_reset = 0;
6330
6331 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6332 struct ifnet *ifp = sc->sc_ifp;
6333 uint32_t hangs;
6334
6335 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6336 hangs != 0) {
6337 if_printf(ifp, "%s hang detected (0x%x)\n",
6338 hangs & 0xff ? "bb" : "mac", hangs);
6339 } else
6340 if_printf(ifp, "device timeout\n");
6341 do_reset = 1;
6342 ifp->if_oerrors++;
6343 sc->sc_stats.ast_watchdog++;
6344 }
6345
6346 /*
6347 * We can't hold the lock across the ath_reset() call.
6348 *
6349 * And since this routine can't hold a lock and sleep,
6350 * do the reset deferred.
6351 */
6352 if (do_reset) {
6353 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
6354 }
6355
6356 callout_schedule(&sc->sc_wd_ch, hz);
6357}
6358
6359#ifdef ATH_DIAGAPI
6360/*
6361 * Diagnostic interface to the HAL. This is used by various
6362 * tools to do things like retrieve register contents for
6363 * debugging. The mechanism is intentionally opaque so that
6364 * it can change frequently w/o concern for compatiblity.
6365 */
6366static int
6367ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6368{
6369 struct ath_hal *ah = sc->sc_ah;
6370 u_int id = ad->ad_id & ATH_DIAG_ID;
6371 void *indata = NULL;
6372 void *outdata = NULL;
6373 u_int32_t insize = ad->ad_in_size;
6374 u_int32_t outsize = ad->ad_out_size;
6375 int error = 0;
6376
6377 if (ad->ad_id & ATH_DIAG_IN) {
6378 /*
6379 * Copy in data.
6380 */
6381 indata = malloc(insize, M_TEMP, M_NOWAIT);
6382 if (indata == NULL) {
6383 error = ENOMEM;
6384 goto bad;
6385 }
6386 error = copyin(ad->ad_in_data, indata, insize);
6387 if (error)
6388 goto bad;
6389 }
6390 if (ad->ad_id & ATH_DIAG_DYN) {
6391 /*
6392 * Allocate a buffer for the results (otherwise the HAL
6393 * returns a pointer to a buffer where we can read the
6394 * results). Note that we depend on the HAL leaving this
6395 * pointer for us to use below in reclaiming the buffer;
6396 * may want to be more defensive.
6397 */
6398 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
6399 if (outdata == NULL) {
6400 error = ENOMEM;
6401 goto bad;
6402 }
6403 }
6404 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6405 if (outsize < ad->ad_out_size)
6406 ad->ad_out_size = outsize;
6407 if (outdata != NULL)
6408 error = copyout(outdata, ad->ad_out_data,
6409 ad->ad_out_size);
6410 } else {
6411 error = EINVAL;
6412 }
6413bad:
6414 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6415 free(indata, M_TEMP);
6416 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6417 free(outdata, M_TEMP);
6418 return error;
6419}
6420#endif /* ATH_DIAGAPI */
6421
6422static int
6423ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6424{
6425#define IS_RUNNING(ifp) \
6426 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
6427 struct ath_softc *sc = ifp->if_softc;
6428 struct ieee80211com *ic = ifp->if_l2com;
6429 struct ifreq *ifr = (struct ifreq *)data;
6430 const HAL_RATE_TABLE *rt;
6431 int error = 0;
6432
6433 switch (cmd) {
6434 case SIOCSIFFLAGS:
6435 ATH_LOCK(sc);
6436 if (IS_RUNNING(ifp)) {
6437 /*
6438 * To avoid rescanning another access point,
6439 * do not call ath_init() here. Instead,
6440 * only reflect promisc mode settings.
6441 */
6442 ath_mode_init(sc);
6443 } else if (ifp->if_flags & IFF_UP) {
6444 /*
6445 * Beware of being called during attach/detach
6446 * to reset promiscuous mode. In that case we
6447 * will still be marked UP but not RUNNING.
6448 * However trying to re-init the interface
6449 * is the wrong thing to do as we've already
6450 * torn down much of our state. There's
6451 * probably a better way to deal with this.
6452 */
6453 if (!sc->sc_invalid)
6454 ath_init(sc); /* XXX lose error */
6455 } else {
6456 ath_stop_locked(ifp);
6457#ifdef notyet
6458 /* XXX must wakeup in places like ath_vap_delete */
6459 if (!sc->sc_invalid)
6460 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6461#endif
6462 }
6463 ATH_UNLOCK(sc);
6464 break;
6465 case SIOCGIFMEDIA:
6466 case SIOCSIFMEDIA:
6467 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6468 break;
6469 case SIOCGATHSTATS:
6470 /* NB: embed these numbers to get a consistent view */
6471 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6472 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6473 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6474 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6475#ifdef IEEE80211_SUPPORT_TDMA
6476 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6477 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6478#endif
6479 rt = sc->sc_currates;
6480 sc->sc_stats.ast_tx_rate =
6481 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6482 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
6483 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
6484 return copyout(&sc->sc_stats,
6485 ifr->ifr_data, sizeof (sc->sc_stats));
6486 case SIOCZATHSTATS:
6487 error = priv_check(curthread, PRIV_DRIVER);
6488 if (error == 0) {
6489 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6490 memset(&sc->sc_intr_stats, 0,
6491 sizeof(sc->sc_intr_stats));
6492 }
6493 break;
6494#ifdef ATH_DIAGAPI
6495 case SIOCGATHDIAG:
6496 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6497 break;
6498 case SIOCGATHPHYERR:
6499 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
6500 break;
6501#endif
6502 case SIOCGIFADDR:
6503 error = ether_ioctl(ifp, cmd, data);
6504 break;
6505 default:
6506 error = EINVAL;
6507 break;
6508 }
6509 return error;
6510#undef IS_RUNNING
6511}
6512
6513/*
6514 * Announce various information on device/driver attach.
6515 */
6516static void
6517ath_announce(struct ath_softc *sc)
6518{
6519 struct ifnet *ifp = sc->sc_ifp;
6520 struct ath_hal *ah = sc->sc_ah;
6521
6522 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
6523 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
6524 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
6525 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
6526 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
6527 if (bootverbose) {
6528 int i;
6529 for (i = 0; i <= WME_AC_VO; i++) {
6530 struct ath_txq *txq = sc->sc_ac2q[i];
6531 if_printf(ifp, "Use hw queue %u for %s traffic\n",
6532 txq->axq_qnum, ieee80211_wme_acnames[i]);
6533 }
6534 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
6535 sc->sc_cabq->axq_qnum);
6536 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
6537 }
6538 if (ath_rxbuf != ATH_RXBUF)
6539 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
6540 if (ath_txbuf != ATH_TXBUF)
6541 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
6542 if (sc->sc_mcastkey && bootverbose)
6543 if_printf(ifp, "using multicast key search\n");
6544}
6545
6546#ifdef IEEE80211_SUPPORT_TDMA
6547static void
6548ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
6549{
6550 struct ath_hal *ah = sc->sc_ah;
6551 HAL_BEACON_TIMERS bt;
6552
6553 bt.bt_intval = bintval | HAL_BEACON_ENA;
6554 bt.bt_nexttbtt = nexttbtt;
6555 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
6556 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
6557 bt.bt_nextatim = nexttbtt+1;
6558 /* Enables TBTT, DBA, SWBA timers by default */
6559 bt.bt_flags = 0;
6560 ath_hal_beaconsettimers(ah, &bt);
6561}
6562
6563/*
6564 * Calculate the beacon interval. This is periodic in the
6565 * superframe for the bss. We assume each station is configured
6566 * identically wrt transmit rate so the guard time we calculate
6567 * above will be the same on all stations. Note we need to
6568 * factor in the xmit time because the hardware will schedule
6569 * a frame for transmit if the start of the frame is within
6570 * the burst time. When we get hardware that properly kills
6571 * frames in the PCU we can reduce/eliminate the guard time.
6572 *
6573 * Roundup to 1024 is so we have 1 TU buffer in the guard time
6574 * to deal with the granularity of the nexttbtt timer. 11n MAC's
6575 * with 1us timer granularity should allow us to reduce/eliminate
6576 * this.
6577 */
6578static void
6579ath_tdma_bintvalsetup(struct ath_softc *sc,
6580 const struct ieee80211_tdma_state *tdma)
6581{
6582 /* copy from vap state (XXX check all vaps have same value?) */
6583 sc->sc_tdmaslotlen = tdma->tdma_slotlen;
6584
6585 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
6586 tdma->tdma_slotcnt, 1024);
6587 sc->sc_tdmabintval >>= 10; /* TSF -> TU */
6588 if (sc->sc_tdmabintval & 1)
6589 sc->sc_tdmabintval++;
6590
6591 if (tdma->tdma_slot == 0) {
6592 /*
6593 * Only slot 0 beacons; other slots respond.
6594 */
6595 sc->sc_imask |= HAL_INT_SWBA;
6596 sc->sc_tdmaswba = 0; /* beacon immediately */
6597 } else {
6598 /* XXX all vaps must be slot 0 or slot !0 */
6599 sc->sc_imask &= ~HAL_INT_SWBA;
6600 }
6601}
6602
6603/*
6604 * Max 802.11 overhead. This assumes no 4-address frames and
6605 * the encapsulation done by ieee80211_encap (llc). We also
6606 * include potential crypto overhead.
6607 */
6608#define IEEE80211_MAXOVERHEAD \
6609 (sizeof(struct ieee80211_qosframe) \
6610 + sizeof(struct llc) \
6611 + IEEE80211_ADDR_LEN \
6612 + IEEE80211_WEP_IVLEN \
6613 + IEEE80211_WEP_KIDLEN \
6614 + IEEE80211_WEP_CRCLEN \
6615 + IEEE80211_WEP_MICLEN \
6616 + IEEE80211_CRC_LEN)
6617
6618/*
6619 * Setup initially for tdma operation. Start the beacon
6620 * timers and enable SWBA if we are slot 0. Otherwise
6621 * we wait for slot 0 to arrive so we can sync up before
6622 * starting to transmit.
6623 */
6624static void
6625ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
6626{
6627 struct ath_hal *ah = sc->sc_ah;
6628 struct ifnet *ifp = sc->sc_ifp;
6629 struct ieee80211com *ic = ifp->if_l2com;
6630 const struct ieee80211_txparam *tp;
6631 const struct ieee80211_tdma_state *tdma = NULL;
6632 int rix;
6633
6634 if (vap == NULL) {
6635 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
6636 if (vap == NULL) {
6637 if_printf(ifp, "%s: no vaps?\n", __func__);
6638 return;
6639 }
6640 }
6641 /* XXX should take a locked ref to iv_bss */
6642 tp = vap->iv_bss->ni_txparms;
6643 /*
6644 * Calculate the guard time for each slot. This is the
6645 * time to send a maximal-size frame according to the
6646 * fixed/lowest transmit rate. Note that the interface
6647 * mtu does not include the 802.11 overhead so we must
6648 * tack that on (ath_hal_computetxtime includes the
6649 * preamble and plcp in it's calculation).
6650 */
6651 tdma = vap->iv_tdma;
6652 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
6653 rix = ath_tx_findrix(sc, tp->ucastrate);
6654 else
6655 rix = ath_tx_findrix(sc, tp->mcastrate);
6656 /* XXX short preamble assumed */
6657 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
6658 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
6659
6660 ath_hal_intrset(ah, 0);
6661
6662 ath_beaconq_config(sc); /* setup h/w beacon q */
6663 if (sc->sc_setcca)
6664 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */
6665 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */
6666 ath_tdma_settimers(sc, sc->sc_tdmabintval,
6667 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
6668 sc->sc_syncbeacon = 0;
6669
6670 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
6671 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
6672
6673 ath_hal_intrset(ah, sc->sc_imask);
6674
6675 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
6676 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
6677 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
6678 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
6679 sc->sc_tdmadbaprep);
6680}
6681
6682/*
6683 * Update tdma operation. Called from the 802.11 layer
6684 * when a beacon is received from the TDMA station operating
6685 * in the slot immediately preceding us in the bss. Use
6686 * the rx timestamp for the beacon frame to update our
6687 * beacon timers so we follow their schedule. Note that
6688 * by using the rx timestamp we implicitly include the
6689 * propagation delay in our schedule.
6690 */
6691static void
6692ath_tdma_update(struct ieee80211_node *ni,
6693 const struct ieee80211_tdma_param *tdma, int changed)
6694{
6695#define TSF_TO_TU(_h,_l) \
6696 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
6697#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
6698 struct ieee80211vap *vap = ni->ni_vap;
6699 struct ieee80211com *ic = ni->ni_ic;
6700 struct ath_softc *sc = ic->ic_ifp->if_softc;
6701 struct ath_hal *ah = sc->sc_ah;
6702 const HAL_RATE_TABLE *rt = sc->sc_currates;
6703 u_int64_t tsf, rstamp, nextslot, nexttbtt;
6704 u_int32_t txtime, nextslottu;
6705 int32_t tudelta, tsfdelta;
6706 const struct ath_rx_status *rs;
6707 int rix;
6708
6709 sc->sc_stats.ast_tdma_update++;
6710
6711 /*
6712 * Check for and adopt configuration changes.
6713 */
6714 if (changed != 0) {
6715 const struct ieee80211_tdma_state *ts = vap->iv_tdma;
6716
6717 ath_tdma_bintvalsetup(sc, ts);
6718 if (changed & TDMA_UPDATE_SLOTLEN)
6719 ath_wme_update(ic);
6720
6721 DPRINTF(sc, ATH_DEBUG_TDMA,
6722 "%s: adopt slot %u slotcnt %u slotlen %u us "
6723 "bintval %u TU\n", __func__,
6724 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
6725 sc->sc_tdmabintval);
6726
6727 /* XXX right? */
6728 ath_hal_intrset(ah, sc->sc_imask);
6729 /* NB: beacon timers programmed below */
6730 }
6731
6732 /* extend rx timestamp to 64 bits */
6733 rs = sc->sc_lastrs;
6734 tsf = ath_hal_gettsf64(ah);
6735 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
6736 /*
6737 * The rx timestamp is set by the hardware on completing
6738 * reception (at the point where the rx descriptor is DMA'd
6739 * to the host). To find the start of our next slot we
6740 * must adjust this time by the time required to send
6741 * the packet just received.
6742 */
6743 rix = rt->rateCodeToIndex[rs->rs_rate];
6744 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
6745 rt->info[rix].shortPreamble);
6746 /* NB: << 9 is to cvt to TU and /2 */
6747 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
6748 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
6749
6750 /*
6751 * Retrieve the hardware NextTBTT in usecs
6752 * and calculate the difference between what the
6753 * other station thinks and what we have programmed. This
6754 * lets us figure how to adjust our timers to match. The
6755 * adjustments are done by pulling the TSF forward and possibly
6756 * rewriting the beacon timers.
6757 */
6758 nexttbtt = ath_hal_getnexttbtt(ah);
6759 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt);
6760
6761 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
6762 "tsfdelta %d avg +%d/-%d\n", tsfdelta,
6763 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
6764
6765 if (tsfdelta < 0) {
6766 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
6767 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
6768 tsfdelta = -tsfdelta % 1024;
6769 nextslottu++;
6770 } else if (tsfdelta > 0) {
6771 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
6772 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
6773 tsfdelta = 1024 - (tsfdelta % 1024);
6774 nextslottu++;
6775 } else {
6776 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
6777 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
6778 }
6779 tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt);
6780
6781 /*
6782 * Copy sender's timetstamp into tdma ie so they can
6783 * calculate roundtrip time. We submit a beacon frame
6784 * below after any timer adjustment. The frame goes out
6785 * at the next TBTT so the sender can calculate the
6786 * roundtrip by inspecting the tdma ie in our beacon frame.
6787 *
6788 * NB: This tstamp is subtlely preserved when
6789 * IEEE80211_BEACON_TDMA is marked (e.g. when the
6790 * slot position changes) because ieee80211_add_tdma
6791 * skips over the data.
6792 */
6793 memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
6794 __offsetof(struct ieee80211_tdma_param, tdma_tstamp),
6795 &ni->ni_tstamp.data, 8);
6796#if 0
6797 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
6798 "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n",
6799 (unsigned long long) tsf, (unsigned long long) nextslot,
6800 (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta);
6801#endif
6802 /*
6803 * Adjust the beacon timers only when pulling them forward
6804 * or when going back by less than the beacon interval.
6805 * Negative jumps larger than the beacon interval seem to
6806 * cause the timers to stop and generally cause instability.
6807 * This basically filters out jumps due to missed beacons.
6808 */
6809 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
6810 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
6811 sc->sc_stats.ast_tdma_timers++;
6812 }
6813 if (tsfdelta > 0) {
6814 ath_hal_adjusttsf(ah, tsfdelta);
6815 sc->sc_stats.ast_tdma_tsf++;
6816 }
6817 ath_tdma_beacon_send(sc, vap); /* prepare response */
6818#undef TU_TO_TSF
6819#undef TSF_TO_TU
6820}
6821
6822/*
6823 * Transmit a beacon frame at SWBA. Dynamic updates
6824 * to the frame contents are done as needed.
6825 */
6826static void
6827ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
6828{
6829 struct ath_hal *ah = sc->sc_ah;
6830 struct ath_buf *bf;
6831 int otherant;
6832
6833 /*
6834 * Check if the previous beacon has gone out. If
6835 * not don't try to post another, skip this period
6836 * and wait for the next. Missed beacons indicate
6837 * a problem and should not occur. If we miss too
6838 * many consecutive beacons reset the device.
6839 */
6840 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
6841 sc->sc_bmisscount++;
6842 DPRINTF(sc, ATH_DEBUG_BEACON,
6843 "%s: missed %u consecutive beacons\n",
6844 __func__, sc->sc_bmisscount);
6845 if (sc->sc_bmisscount >= ath_bstuck_threshold)
6846 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
6847 return;
6848 }
6849 if (sc->sc_bmisscount != 0) {
6850 DPRINTF(sc, ATH_DEBUG_BEACON,
6851 "%s: resume beacon xmit after %u misses\n",
6852 __func__, sc->sc_bmisscount);
6853 sc->sc_bmisscount = 0;
6854 }
6855
6856 /*
6857 * Check recent per-antenna transmit statistics and flip
6858 * the default antenna if noticeably more frames went out
6859 * on the non-default antenna.
6860 * XXX assumes 2 anntenae
6861 */
6862 if (!sc->sc_diversity) {
6863 otherant = sc->sc_defant & 1 ? 2 : 1;
6864 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
6865 ath_setdefantenna(sc, otherant);
6866 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
6867 }
6868
6869 bf = ath_beacon_generate(sc, vap);
6870 if (bf != NULL) {
6871 /*
6872 * Stop any current dma and put the new frame on the queue.
6873 * This should never fail since we check above that no frames
6874 * are still pending on the queue.
6875 */
6876 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
6877 DPRINTF(sc, ATH_DEBUG_ANY,
6878 "%s: beacon queue %u did not stop?\n",
6879 __func__, sc->sc_bhalq);
6880 /* NB: the HAL still stops DMA, so proceed */
6881 }
6882 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
6883 ath_hal_txstart(ah, sc->sc_bhalq);
6884
6885 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */
6886
6887 /*
6888 * Record local TSF for our last send for use
6889 * in arbitrating slot collisions.
6890 */
6891 /* XXX should take a locked ref to iv_bss */
6892 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
6893 }
6894}
6895#endif /* IEEE80211_SUPPORT_TDMA */
6896
6897static void
6898ath_dfs_tasklet(void *p, int npending)
6899{
6900 struct ath_softc *sc = (struct ath_softc *) p;
6901 struct ifnet *ifp = sc->sc_ifp;
6902 struct ieee80211com *ic = ifp->if_l2com;
6903
6904 /*
6905 * If previous processing has found a radar event,
6906 * signal this to the net80211 layer to begin DFS
6907 * processing.
6908 */
6909 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
6910 /* DFS event found, initiate channel change */
6911 /*
6912 * XXX doesn't currently tell us whether the event
6913 * XXX was found in the primary or extension
6914 * XXX channel!
6915 */
6916 IEEE80211_LOCK(ic);
6917 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
6918 IEEE80211_UNLOCK(ic);
6919 }
6920}
6921
6922MODULE_VERSION(if_ath, 1);
6923MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
6924#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ)
6925MODULE_DEPEND(if_ath, alq, 1, 1, 1);
6926#endif