Deleted Added
full compact
if_ath.c (228887) if_ath.c (228888)
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 228887 2011-12-26 05:37:09Z adrian $");
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 228888 2011-12-26 05:46:22Z adrian $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42/*
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
45 */
46#include "opt_ah.h"
47#include "opt_wlan.h"
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/sysctl.h>
52#include <sys/mbuf.h>
53#include <sys/malloc.h>
54#include <sys/lock.h>
55#include <sys/mutex.h>
56#include <sys/kernel.h>
57#include <sys/socket.h>
58#include <sys/sockio.h>
59#include <sys/errno.h>
60#include <sys/callout.h>
61#include <sys/bus.h>
62#include <sys/endian.h>
63#include <sys/kthread.h>
64#include <sys/taskqueue.h>
65#include <sys/priv.h>
66#include <sys/module.h>
67#include <sys/ktr.h>
68#include <sys/smp.h> /* for mp_ncpus */
69
70#include <machine/bus.h>
71
72#include <net/if.h>
73#include <net/if_dl.h>
74#include <net/if_media.h>
75#include <net/if_types.h>
76#include <net/if_arp.h>
77#include <net/ethernet.h>
78#include <net/if_llc.h>
79
80#include <net80211/ieee80211_var.h>
81#include <net80211/ieee80211_regdomain.h>
82#ifdef IEEE80211_SUPPORT_SUPERG
83#include <net80211/ieee80211_superg.h>
84#endif
85#ifdef IEEE80211_SUPPORT_TDMA
86#include <net80211/ieee80211_tdma.h>
87#endif
88
89#include <net/bpf.h>
90
91#ifdef INET
92#include <netinet/in.h>
93#include <netinet/if_ether.h>
94#endif
95
96#include <dev/ath/if_athvar.h>
97#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
98#include <dev/ath/ath_hal/ah_diagcodes.h>
99
100#include <dev/ath/if_ath_debug.h>
101#include <dev/ath/if_ath_misc.h>
102#include <dev/ath/if_ath_tx.h>
103#include <dev/ath/if_ath_sysctl.h>
104#include <dev/ath/if_ath_led.h>
105#include <dev/ath/if_ath_keycache.h>
106#include <dev/ath/if_athdfs.h>
107
108#ifdef ATH_TX99_DIAG
109#include <dev/ath/ath_tx99/ath_tx99.h>
110#endif
111
112#define ATH_KTR_INTR KTR_SPARE4
113#define ATH_KTR_ERR KTR_SPARE3
114
115/*
116 * ATH_BCBUF determines the number of vap's that can transmit
117 * beacons and also (currently) the number of vap's that can
118 * have unique mac addresses/bssid. When staggering beacons
119 * 4 is probably a good max as otherwise the beacons become
120 * very closely spaced and there is limited time for cab q traffic
121 * to go out. You can burst beacons instead but that is not good
122 * for stations in power save and at some point you really want
123 * another radio (and channel).
124 *
125 * The limit on the number of mac addresses is tied to our use of
126 * the U/L bit and tracking addresses in a byte; it would be
127 * worthwhile to allow more for applications like proxy sta.
128 */
129CTASSERT(ATH_BCBUF <= 8);
130
131static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
132 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
133 const uint8_t [IEEE80211_ADDR_LEN],
134 const uint8_t [IEEE80211_ADDR_LEN]);
135static void ath_vap_delete(struct ieee80211vap *);
136static void ath_init(void *);
137static void ath_stop_locked(struct ifnet *);
138static void ath_stop(struct ifnet *);
139static void ath_start(struct ifnet *);
140static int ath_reset_vap(struct ieee80211vap *, u_long);
141static int ath_media_change(struct ifnet *);
142static void ath_watchdog(void *);
143static int ath_ioctl(struct ifnet *, u_long, caddr_t);
144static void ath_fatal_proc(void *, int);
145static void ath_bmiss_vap(struct ieee80211vap *);
146static void ath_bmiss_proc(void *, int);
147static void ath_key_update_begin(struct ieee80211vap *);
148static void ath_key_update_end(struct ieee80211vap *);
149static void ath_update_mcast(struct ifnet *);
150static void ath_update_promisc(struct ifnet *);
151static void ath_mode_init(struct ath_softc *);
152static void ath_setslottime(struct ath_softc *);
153static void ath_updateslot(struct ifnet *);
154static int ath_beaconq_setup(struct ath_hal *);
155static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
156static void ath_beacon_update(struct ieee80211vap *, int item);
157static void ath_beacon_setup(struct ath_softc *, struct ath_buf *);
158static void ath_beacon_proc(void *, int);
159static struct ath_buf *ath_beacon_generate(struct ath_softc *,
160 struct ieee80211vap *);
161static void ath_bstuck_proc(void *, int);
162static void ath_beacon_return(struct ath_softc *, struct ath_buf *);
163static void ath_beacon_free(struct ath_softc *);
164static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
165static void ath_descdma_cleanup(struct ath_softc *sc,
166 struct ath_descdma *, ath_bufhead *);
167static int ath_desc_alloc(struct ath_softc *);
168static void ath_desc_free(struct ath_softc *);
169static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
170 const uint8_t [IEEE80211_ADDR_LEN]);
171static void ath_node_cleanup(struct ieee80211_node *);
172static void ath_node_free(struct ieee80211_node *);
173static void ath_node_getsignal(const struct ieee80211_node *,
174 int8_t *, int8_t *);
175static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
176static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
177 int subtype, int rssi, int nf);
178static void ath_setdefantenna(struct ath_softc *, u_int);
179static void ath_rx_proc(struct ath_softc *sc, int);
180static void ath_rx_tasklet(void *, int);
181static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
182static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
183static int ath_tx_setup(struct ath_softc *, int, int);
184static int ath_wme_update(struct ieee80211com *);
185static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
186static void ath_tx_cleanup(struct ath_softc *);
187static void ath_tx_proc_q0(void *, int);
188static void ath_tx_proc_q0123(void *, int);
189static void ath_tx_proc(void *, int);
190static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
191static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type);
192static void ath_stoprecv(struct ath_softc *, int);
193static int ath_startrecv(struct ath_softc *);
194static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
195static void ath_scan_start(struct ieee80211com *);
196static void ath_scan_end(struct ieee80211com *);
197static void ath_set_channel(struct ieee80211com *);
198static void ath_calibrate(void *);
199static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
200static void ath_setup_stationkey(struct ieee80211_node *);
201static void ath_newassoc(struct ieee80211_node *, int);
202static int ath_setregdomain(struct ieee80211com *,
203 struct ieee80211_regdomain *, int,
204 struct ieee80211_channel []);
205static void ath_getradiocaps(struct ieee80211com *, int, int *,
206 struct ieee80211_channel []);
207static int ath_getchannels(struct ath_softc *);
208
209static int ath_rate_setup(struct ath_softc *, u_int mode);
210static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
211
212static void ath_announce(struct ath_softc *);
213
214static void ath_dfs_tasklet(void *, int);
215
216#ifdef IEEE80211_SUPPORT_TDMA
217static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
218 u_int32_t bintval);
219static void ath_tdma_bintvalsetup(struct ath_softc *sc,
220 const struct ieee80211_tdma_state *tdma);
221static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
222static void ath_tdma_update(struct ieee80211_node *ni,
223 const struct ieee80211_tdma_param *tdma, int);
224static void ath_tdma_beacon_send(struct ath_softc *sc,
225 struct ieee80211vap *vap);
226
227#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */
228#define TDMA_LPF_LEN 6
229#define TDMA_DUMMY_MARKER 0x127
230#define TDMA_EP_MUL(x, mul) ((x) * (mul))
231#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
232#define TDMA_LPF(x, y, len) \
233 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
234#define TDMA_SAMPLE(x, y) do { \
235 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \
236} while (0)
237#define TDMA_EP_RND(x,mul) \
238 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
239#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
240#endif /* IEEE80211_SUPPORT_TDMA */
241
242SYSCTL_DECL(_hw_ath);
243
244/* XXX validate sysctl values */
245static int ath_longcalinterval = 30; /* long cals every 30 secs */
246SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
247 0, "long chip calibration interval (secs)");
248static int ath_shortcalinterval = 100; /* short cals every 100 ms */
249SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
250 0, "short chip calibration interval (msecs)");
251static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
252SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
253 0, "reset chip calibration results (secs)");
254static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
255SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
256 0, "ANI calibration (msecs)");
257
258static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
259SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
260 0, "rx buffers allocated");
261TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
262static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
263SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
264 0, "tx buffers allocated");
265TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
266
267static int ath_bstuck_threshold = 4; /* max missed beacons */
268SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
269 0, "max missed beacon xmits before chip reset");
270
271MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
272
273#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
274#define HAL_MODE_HT40 \
275 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
276 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
277int
278ath_attach(u_int16_t devid, struct ath_softc *sc)
279{
280 struct ifnet *ifp;
281 struct ieee80211com *ic;
282 struct ath_hal *ah = NULL;
283 HAL_STATUS status;
284 int error = 0, i;
285 u_int wmodes;
286 uint8_t macaddr[IEEE80211_ADDR_LEN];
287
288 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
289
290 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
291 if (ifp == NULL) {
292 device_printf(sc->sc_dev, "can not if_alloc()\n");
293 error = ENOSPC;
294 goto bad;
295 }
296 ic = ifp->if_l2com;
297
298 /* set these up early for if_printf use */
299 if_initname(ifp, device_get_name(sc->sc_dev),
300 device_get_unit(sc->sc_dev));
301
302 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
303 sc->sc_eepromdata, &status);
304 if (ah == NULL) {
305 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
306 status);
307 error = ENXIO;
308 goto bad;
309 }
310 sc->sc_ah = ah;
311 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
312#ifdef ATH_DEBUG
313 sc->sc_debug = ath_debug;
314#endif
315
316 /*
317 * Check if the MAC has multi-rate retry support.
318 * We do this by trying to setup a fake extended
319 * descriptor. MAC's that don't have support will
320 * return false w/o doing anything. MAC's that do
321 * support it will return true w/o doing anything.
322 */
323 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
324
325 /*
326 * Check if the device has hardware counters for PHY
327 * errors. If so we need to enable the MIB interrupt
328 * so we can act on stat triggers.
329 */
330 if (ath_hal_hwphycounters(ah))
331 sc->sc_needmib = 1;
332
333 /*
334 * Get the hardware key cache size.
335 */
336 sc->sc_keymax = ath_hal_keycachesize(ah);
337 if (sc->sc_keymax > ATH_KEYMAX) {
338 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
339 ATH_KEYMAX, sc->sc_keymax);
340 sc->sc_keymax = ATH_KEYMAX;
341 }
342 /*
343 * Reset the key cache since some parts do not
344 * reset the contents on initial power up.
345 */
346 for (i = 0; i < sc->sc_keymax; i++)
347 ath_hal_keyreset(ah, i);
348
349 /*
350 * Collect the default channel list.
351 */
352 error = ath_getchannels(sc);
353 if (error != 0)
354 goto bad;
355
356 /*
357 * Setup rate tables for all potential media types.
358 */
359 ath_rate_setup(sc, IEEE80211_MODE_11A);
360 ath_rate_setup(sc, IEEE80211_MODE_11B);
361 ath_rate_setup(sc, IEEE80211_MODE_11G);
362 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
363 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
364 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
365 ath_rate_setup(sc, IEEE80211_MODE_11NA);
366 ath_rate_setup(sc, IEEE80211_MODE_11NG);
367 ath_rate_setup(sc, IEEE80211_MODE_HALF);
368 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
369
370 /* NB: setup here so ath_rate_update is happy */
371 ath_setcurmode(sc, IEEE80211_MODE_11A);
372
373 /*
374 * Allocate tx+rx descriptors and populate the lists.
375 */
376 error = ath_desc_alloc(sc);
377 if (error != 0) {
378 if_printf(ifp, "failed to allocate descriptors: %d\n", error);
379 goto bad;
380 }
381 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
382 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
383
384 ATH_TXBUF_LOCK_INIT(sc);
385
386 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
387 taskqueue_thread_enqueue, &sc->sc_tq);
388 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
389 "%s taskq", ifp->if_xname);
390
391 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc);
392 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
393 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
394
395 /*
396 * Allocate hardware transmit queues: one queue for
397 * beacon frames and one data queue for each QoS
398 * priority. Note that the hal handles resetting
399 * these queues at the needed time.
400 *
401 * XXX PS-Poll
402 */
403 sc->sc_bhalq = ath_beaconq_setup(ah);
404 if (sc->sc_bhalq == (u_int) -1) {
405 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
406 error = EIO;
407 goto bad2;
408 }
409 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
410 if (sc->sc_cabq == NULL) {
411 if_printf(ifp, "unable to setup CAB xmit queue!\n");
412 error = EIO;
413 goto bad2;
414 }
415 /* NB: insure BK queue is the lowest priority h/w queue */
416 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
417 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
418 ieee80211_wme_acnames[WME_AC_BK]);
419 error = EIO;
420 goto bad2;
421 }
422 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
423 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
424 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
425 /*
426 * Not enough hardware tx queues to properly do WME;
427 * just punt and assign them all to the same h/w queue.
428 * We could do a better job of this if, for example,
429 * we allocate queues when we switch from station to
430 * AP mode.
431 */
432 if (sc->sc_ac2q[WME_AC_VI] != NULL)
433 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
434 if (sc->sc_ac2q[WME_AC_BE] != NULL)
435 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
436 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
437 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
438 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
439 }
440
441 /*
442 * Special case certain configurations. Note the
443 * CAB queue is handled by these specially so don't
444 * include them when checking the txq setup mask.
445 */
446 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
447 case 0x01:
448 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
449 break;
450 case 0x0f:
451 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
452 break;
453 default:
454 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
455 break;
456 }
457
458 /*
459 * Setup rate control. Some rate control modules
460 * call back to change the anntena state so expose
461 * the necessary entry points.
462 * XXX maybe belongs in struct ath_ratectrl?
463 */
464 sc->sc_setdefantenna = ath_setdefantenna;
465 sc->sc_rc = ath_rate_attach(sc);
466 if (sc->sc_rc == NULL) {
467 error = EIO;
468 goto bad2;
469 }
470
471 /* Attach DFS module */
472 if (! ath_dfs_attach(sc)) {
473 device_printf(sc->sc_dev,
474 "%s: unable to attach DFS\n", __func__);
475 error = EIO;
476 goto bad2;
477 }
478
479 /* Start DFS processing tasklet */
480 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
481
482 sc->sc_blinking = 0;
483 sc->sc_ledstate = 1;
484 sc->sc_ledon = 0; /* low true */
485 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
486 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
487 /*
488 * Auto-enable soft led processing for IBM cards and for
489 * 5211 minipci cards. Users can also manually enable/disable
490 * support with a sysctl.
491 */
492 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42/*
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
45 */
46#include "opt_ah.h"
47#include "opt_wlan.h"
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/sysctl.h>
52#include <sys/mbuf.h>
53#include <sys/malloc.h>
54#include <sys/lock.h>
55#include <sys/mutex.h>
56#include <sys/kernel.h>
57#include <sys/socket.h>
58#include <sys/sockio.h>
59#include <sys/errno.h>
60#include <sys/callout.h>
61#include <sys/bus.h>
62#include <sys/endian.h>
63#include <sys/kthread.h>
64#include <sys/taskqueue.h>
65#include <sys/priv.h>
66#include <sys/module.h>
67#include <sys/ktr.h>
68#include <sys/smp.h> /* for mp_ncpus */
69
70#include <machine/bus.h>
71
72#include <net/if.h>
73#include <net/if_dl.h>
74#include <net/if_media.h>
75#include <net/if_types.h>
76#include <net/if_arp.h>
77#include <net/ethernet.h>
78#include <net/if_llc.h>
79
80#include <net80211/ieee80211_var.h>
81#include <net80211/ieee80211_regdomain.h>
82#ifdef IEEE80211_SUPPORT_SUPERG
83#include <net80211/ieee80211_superg.h>
84#endif
85#ifdef IEEE80211_SUPPORT_TDMA
86#include <net80211/ieee80211_tdma.h>
87#endif
88
89#include <net/bpf.h>
90
91#ifdef INET
92#include <netinet/in.h>
93#include <netinet/if_ether.h>
94#endif
95
96#include <dev/ath/if_athvar.h>
97#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
98#include <dev/ath/ath_hal/ah_diagcodes.h>
99
100#include <dev/ath/if_ath_debug.h>
101#include <dev/ath/if_ath_misc.h>
102#include <dev/ath/if_ath_tx.h>
103#include <dev/ath/if_ath_sysctl.h>
104#include <dev/ath/if_ath_led.h>
105#include <dev/ath/if_ath_keycache.h>
106#include <dev/ath/if_athdfs.h>
107
108#ifdef ATH_TX99_DIAG
109#include <dev/ath/ath_tx99/ath_tx99.h>
110#endif
111
112#define ATH_KTR_INTR KTR_SPARE4
113#define ATH_KTR_ERR KTR_SPARE3
114
115/*
116 * ATH_BCBUF determines the number of vap's that can transmit
117 * beacons and also (currently) the number of vap's that can
118 * have unique mac addresses/bssid. When staggering beacons
119 * 4 is probably a good max as otherwise the beacons become
120 * very closely spaced and there is limited time for cab q traffic
121 * to go out. You can burst beacons instead but that is not good
122 * for stations in power save and at some point you really want
123 * another radio (and channel).
124 *
125 * The limit on the number of mac addresses is tied to our use of
126 * the U/L bit and tracking addresses in a byte; it would be
127 * worthwhile to allow more for applications like proxy sta.
128 */
129CTASSERT(ATH_BCBUF <= 8);
130
131static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
132 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
133 const uint8_t [IEEE80211_ADDR_LEN],
134 const uint8_t [IEEE80211_ADDR_LEN]);
135static void ath_vap_delete(struct ieee80211vap *);
136static void ath_init(void *);
137static void ath_stop_locked(struct ifnet *);
138static void ath_stop(struct ifnet *);
139static void ath_start(struct ifnet *);
140static int ath_reset_vap(struct ieee80211vap *, u_long);
141static int ath_media_change(struct ifnet *);
142static void ath_watchdog(void *);
143static int ath_ioctl(struct ifnet *, u_long, caddr_t);
144static void ath_fatal_proc(void *, int);
145static void ath_bmiss_vap(struct ieee80211vap *);
146static void ath_bmiss_proc(void *, int);
147static void ath_key_update_begin(struct ieee80211vap *);
148static void ath_key_update_end(struct ieee80211vap *);
149static void ath_update_mcast(struct ifnet *);
150static void ath_update_promisc(struct ifnet *);
151static void ath_mode_init(struct ath_softc *);
152static void ath_setslottime(struct ath_softc *);
153static void ath_updateslot(struct ifnet *);
154static int ath_beaconq_setup(struct ath_hal *);
155static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
156static void ath_beacon_update(struct ieee80211vap *, int item);
157static void ath_beacon_setup(struct ath_softc *, struct ath_buf *);
158static void ath_beacon_proc(void *, int);
159static struct ath_buf *ath_beacon_generate(struct ath_softc *,
160 struct ieee80211vap *);
161static void ath_bstuck_proc(void *, int);
162static void ath_beacon_return(struct ath_softc *, struct ath_buf *);
163static void ath_beacon_free(struct ath_softc *);
164static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
165static void ath_descdma_cleanup(struct ath_softc *sc,
166 struct ath_descdma *, ath_bufhead *);
167static int ath_desc_alloc(struct ath_softc *);
168static void ath_desc_free(struct ath_softc *);
169static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
170 const uint8_t [IEEE80211_ADDR_LEN]);
171static void ath_node_cleanup(struct ieee80211_node *);
172static void ath_node_free(struct ieee80211_node *);
173static void ath_node_getsignal(const struct ieee80211_node *,
174 int8_t *, int8_t *);
175static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
176static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
177 int subtype, int rssi, int nf);
178static void ath_setdefantenna(struct ath_softc *, u_int);
179static void ath_rx_proc(struct ath_softc *sc, int);
180static void ath_rx_tasklet(void *, int);
181static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
182static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
183static int ath_tx_setup(struct ath_softc *, int, int);
184static int ath_wme_update(struct ieee80211com *);
185static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
186static void ath_tx_cleanup(struct ath_softc *);
187static void ath_tx_proc_q0(void *, int);
188static void ath_tx_proc_q0123(void *, int);
189static void ath_tx_proc(void *, int);
190static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
191static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type);
192static void ath_stoprecv(struct ath_softc *, int);
193static int ath_startrecv(struct ath_softc *);
194static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
195static void ath_scan_start(struct ieee80211com *);
196static void ath_scan_end(struct ieee80211com *);
197static void ath_set_channel(struct ieee80211com *);
198static void ath_calibrate(void *);
199static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
200static void ath_setup_stationkey(struct ieee80211_node *);
201static void ath_newassoc(struct ieee80211_node *, int);
202static int ath_setregdomain(struct ieee80211com *,
203 struct ieee80211_regdomain *, int,
204 struct ieee80211_channel []);
205static void ath_getradiocaps(struct ieee80211com *, int, int *,
206 struct ieee80211_channel []);
207static int ath_getchannels(struct ath_softc *);
208
209static int ath_rate_setup(struct ath_softc *, u_int mode);
210static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
211
212static void ath_announce(struct ath_softc *);
213
214static void ath_dfs_tasklet(void *, int);
215
216#ifdef IEEE80211_SUPPORT_TDMA
217static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
218 u_int32_t bintval);
219static void ath_tdma_bintvalsetup(struct ath_softc *sc,
220 const struct ieee80211_tdma_state *tdma);
221static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
222static void ath_tdma_update(struct ieee80211_node *ni,
223 const struct ieee80211_tdma_param *tdma, int);
224static void ath_tdma_beacon_send(struct ath_softc *sc,
225 struct ieee80211vap *vap);
226
227#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */
228#define TDMA_LPF_LEN 6
229#define TDMA_DUMMY_MARKER 0x127
230#define TDMA_EP_MUL(x, mul) ((x) * (mul))
231#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
232#define TDMA_LPF(x, y, len) \
233 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
234#define TDMA_SAMPLE(x, y) do { \
235 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \
236} while (0)
237#define TDMA_EP_RND(x,mul) \
238 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
239#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
240#endif /* IEEE80211_SUPPORT_TDMA */
241
242SYSCTL_DECL(_hw_ath);
243
244/* XXX validate sysctl values */
245static int ath_longcalinterval = 30; /* long cals every 30 secs */
246SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
247 0, "long chip calibration interval (secs)");
248static int ath_shortcalinterval = 100; /* short cals every 100 ms */
249SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
250 0, "short chip calibration interval (msecs)");
251static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
252SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
253 0, "reset chip calibration results (secs)");
254static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
255SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
256 0, "ANI calibration (msecs)");
257
258static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
259SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
260 0, "rx buffers allocated");
261TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
262static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
263SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
264 0, "tx buffers allocated");
265TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
266
267static int ath_bstuck_threshold = 4; /* max missed beacons */
268SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
269 0, "max missed beacon xmits before chip reset");
270
271MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
272
273#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
274#define HAL_MODE_HT40 \
275 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
276 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
277int
278ath_attach(u_int16_t devid, struct ath_softc *sc)
279{
280 struct ifnet *ifp;
281 struct ieee80211com *ic;
282 struct ath_hal *ah = NULL;
283 HAL_STATUS status;
284 int error = 0, i;
285 u_int wmodes;
286 uint8_t macaddr[IEEE80211_ADDR_LEN];
287
288 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
289
290 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
291 if (ifp == NULL) {
292 device_printf(sc->sc_dev, "can not if_alloc()\n");
293 error = ENOSPC;
294 goto bad;
295 }
296 ic = ifp->if_l2com;
297
298 /* set these up early for if_printf use */
299 if_initname(ifp, device_get_name(sc->sc_dev),
300 device_get_unit(sc->sc_dev));
301
302 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
303 sc->sc_eepromdata, &status);
304 if (ah == NULL) {
305 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
306 status);
307 error = ENXIO;
308 goto bad;
309 }
310 sc->sc_ah = ah;
311 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
312#ifdef ATH_DEBUG
313 sc->sc_debug = ath_debug;
314#endif
315
316 /*
317 * Check if the MAC has multi-rate retry support.
318 * We do this by trying to setup a fake extended
319 * descriptor. MAC's that don't have support will
320 * return false w/o doing anything. MAC's that do
321 * support it will return true w/o doing anything.
322 */
323 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
324
325 /*
326 * Check if the device has hardware counters for PHY
327 * errors. If so we need to enable the MIB interrupt
328 * so we can act on stat triggers.
329 */
330 if (ath_hal_hwphycounters(ah))
331 sc->sc_needmib = 1;
332
333 /*
334 * Get the hardware key cache size.
335 */
336 sc->sc_keymax = ath_hal_keycachesize(ah);
337 if (sc->sc_keymax > ATH_KEYMAX) {
338 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
339 ATH_KEYMAX, sc->sc_keymax);
340 sc->sc_keymax = ATH_KEYMAX;
341 }
342 /*
343 * Reset the key cache since some parts do not
344 * reset the contents on initial power up.
345 */
346 for (i = 0; i < sc->sc_keymax; i++)
347 ath_hal_keyreset(ah, i);
348
349 /*
350 * Collect the default channel list.
351 */
352 error = ath_getchannels(sc);
353 if (error != 0)
354 goto bad;
355
356 /*
357 * Setup rate tables for all potential media types.
358 */
359 ath_rate_setup(sc, IEEE80211_MODE_11A);
360 ath_rate_setup(sc, IEEE80211_MODE_11B);
361 ath_rate_setup(sc, IEEE80211_MODE_11G);
362 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
363 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
364 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
365 ath_rate_setup(sc, IEEE80211_MODE_11NA);
366 ath_rate_setup(sc, IEEE80211_MODE_11NG);
367 ath_rate_setup(sc, IEEE80211_MODE_HALF);
368 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
369
370 /* NB: setup here so ath_rate_update is happy */
371 ath_setcurmode(sc, IEEE80211_MODE_11A);
372
373 /*
374 * Allocate tx+rx descriptors and populate the lists.
375 */
376 error = ath_desc_alloc(sc);
377 if (error != 0) {
378 if_printf(ifp, "failed to allocate descriptors: %d\n", error);
379 goto bad;
380 }
381 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
382 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
383
384 ATH_TXBUF_LOCK_INIT(sc);
385
386 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
387 taskqueue_thread_enqueue, &sc->sc_tq);
388 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
389 "%s taskq", ifp->if_xname);
390
391 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc);
392 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
393 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
394
395 /*
396 * Allocate hardware transmit queues: one queue for
397 * beacon frames and one data queue for each QoS
398 * priority. Note that the hal handles resetting
399 * these queues at the needed time.
400 *
401 * XXX PS-Poll
402 */
403 sc->sc_bhalq = ath_beaconq_setup(ah);
404 if (sc->sc_bhalq == (u_int) -1) {
405 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
406 error = EIO;
407 goto bad2;
408 }
409 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
410 if (sc->sc_cabq == NULL) {
411 if_printf(ifp, "unable to setup CAB xmit queue!\n");
412 error = EIO;
413 goto bad2;
414 }
415 /* NB: insure BK queue is the lowest priority h/w queue */
416 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
417 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
418 ieee80211_wme_acnames[WME_AC_BK]);
419 error = EIO;
420 goto bad2;
421 }
422 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
423 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
424 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
425 /*
426 * Not enough hardware tx queues to properly do WME;
427 * just punt and assign them all to the same h/w queue.
428 * We could do a better job of this if, for example,
429 * we allocate queues when we switch from station to
430 * AP mode.
431 */
432 if (sc->sc_ac2q[WME_AC_VI] != NULL)
433 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
434 if (sc->sc_ac2q[WME_AC_BE] != NULL)
435 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
436 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
437 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
438 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
439 }
440
441 /*
442 * Special case certain configurations. Note the
443 * CAB queue is handled by these specially so don't
444 * include them when checking the txq setup mask.
445 */
446 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
447 case 0x01:
448 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
449 break;
450 case 0x0f:
451 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
452 break;
453 default:
454 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
455 break;
456 }
457
458 /*
459 * Setup rate control. Some rate control modules
460 * call back to change the anntena state so expose
461 * the necessary entry points.
462 * XXX maybe belongs in struct ath_ratectrl?
463 */
464 sc->sc_setdefantenna = ath_setdefantenna;
465 sc->sc_rc = ath_rate_attach(sc);
466 if (sc->sc_rc == NULL) {
467 error = EIO;
468 goto bad2;
469 }
470
471 /* Attach DFS module */
472 if (! ath_dfs_attach(sc)) {
473 device_printf(sc->sc_dev,
474 "%s: unable to attach DFS\n", __func__);
475 error = EIO;
476 goto bad2;
477 }
478
479 /* Start DFS processing tasklet */
480 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
481
482 sc->sc_blinking = 0;
483 sc->sc_ledstate = 1;
484 sc->sc_ledon = 0; /* low true */
485 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
486 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
487 /*
488 * Auto-enable soft led processing for IBM cards and for
489 * 5211 minipci cards. Users can also manually enable/disable
490 * support with a sysctl.
491 */
492 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
493 if (sc->sc_softled) {
494 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
495 HAL_GPIO_MUX_MAC_NETWORK_LED);
496 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
497 }
493 ath_led_config(sc);
498
499 ifp->if_softc = sc;
500 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
501 ifp->if_start = ath_start;
502 ifp->if_ioctl = ath_ioctl;
503 ifp->if_init = ath_init;
504 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
505 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
506 IFQ_SET_READY(&ifp->if_snd);
507
508 ic->ic_ifp = ifp;
509 /* XXX not right but it's not used anywhere important */
510 ic->ic_phytype = IEEE80211_T_OFDM;
511 ic->ic_opmode = IEEE80211_M_STA;
512 ic->ic_caps =
513 IEEE80211_C_STA /* station mode */
514 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
515 | IEEE80211_C_HOSTAP /* hostap mode */
516 | IEEE80211_C_MONITOR /* monitor mode */
517 | IEEE80211_C_AHDEMO /* adhoc demo mode */
518 | IEEE80211_C_WDS /* 4-address traffic works */
519 | IEEE80211_C_MBSS /* mesh point link mode */
520 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
521 | IEEE80211_C_SHSLOT /* short slot time supported */
522 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
523 | IEEE80211_C_BGSCAN /* capable of bg scanning */
524 | IEEE80211_C_TXFRAG /* handle tx frags */
525#ifdef ATH_ENABLE_DFS
526 | IEEE80211_C_DFS /* Enable radar detection */
527#endif
528 ;
529 /*
530 * Query the hal to figure out h/w crypto support.
531 */
532 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
533 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
534 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
535 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
536 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
537 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
538 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
539 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
540 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
541 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
542 /*
543 * Check if h/w does the MIC and/or whether the
544 * separate key cache entries are required to
545 * handle both tx+rx MIC keys.
546 */
547 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
548 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
549 /*
550 * If the h/w supports storing tx+rx MIC keys
551 * in one cache slot automatically enable use.
552 */
553 if (ath_hal_hastkipsplit(ah) ||
554 !ath_hal_settkipsplit(ah, AH_FALSE))
555 sc->sc_splitmic = 1;
556 /*
557 * If the h/w can do TKIP MIC together with WME then
558 * we use it; otherwise we force the MIC to be done
559 * in software by the net80211 layer.
560 */
561 if (ath_hal_haswmetkipmic(ah))
562 sc->sc_wmetkipmic = 1;
563 }
564 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
565 /*
566 * Check for multicast key search support.
567 */
568 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
569 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
570 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
571 }
572 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
573 /*
574 * Mark key cache slots associated with global keys
575 * as in use. If we knew TKIP was not to be used we
576 * could leave the +32, +64, and +32+64 slots free.
577 */
578 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
579 setbit(sc->sc_keymap, i);
580 setbit(sc->sc_keymap, i+64);
581 if (sc->sc_splitmic) {
582 setbit(sc->sc_keymap, i+32);
583 setbit(sc->sc_keymap, i+32+64);
584 }
585 }
586 /*
587 * TPC support can be done either with a global cap or
588 * per-packet support. The latter is not available on
589 * all parts. We're a bit pedantic here as all parts
590 * support a global cap.
591 */
592 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
593 ic->ic_caps |= IEEE80211_C_TXPMGT;
594
595 /*
596 * Mark WME capability only if we have sufficient
597 * hardware queues to do proper priority scheduling.
598 */
599 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
600 ic->ic_caps |= IEEE80211_C_WME;
601 /*
602 * Check for misc other capabilities.
603 */
604 if (ath_hal_hasbursting(ah))
605 ic->ic_caps |= IEEE80211_C_BURST;
606 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
607 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
608 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
609 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
610 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
611 if (ath_hal_hasfastframes(ah))
612 ic->ic_caps |= IEEE80211_C_FF;
613 wmodes = ath_hal_getwirelessmodes(ah);
614 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
615 ic->ic_caps |= IEEE80211_C_TURBOP;
616#ifdef IEEE80211_SUPPORT_TDMA
617 if (ath_hal_macversion(ah) > 0x78) {
618 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
619 ic->ic_tdma_update = ath_tdma_update;
620 }
621#endif
622
623 /*
624 * The if_ath 11n support is completely not ready for normal use.
625 * Enabling this option will likely break everything and everything.
626 * Don't think of doing that unless you know what you're doing.
627 */
628
629#ifdef ATH_ENABLE_11N
630 /*
631 * Query HT capabilities
632 */
633 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
634 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
635 int rxs, txs;
636
637 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
638 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
639 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
640 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
641 | IEEE80211_HTCAP_MAXAMSDU_3839
642 /* max A-MSDU length */
643 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
644 ;
645
646 /*
647 * Enable short-GI for HT20 only if the hardware
648 * advertises support.
649 * Notably, anything earlier than the AR9287 doesn't.
650 */
651 if ((ath_hal_getcapability(ah,
652 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
653 (wmodes & HAL_MODE_HT20)) {
654 device_printf(sc->sc_dev,
655 "[HT] enabling short-GI in 20MHz mode\n");
656 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
657 }
658
659 if (wmodes & HAL_MODE_HT40)
660 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
661 | IEEE80211_HTCAP_SHORTGI40;
662
663 /*
664 * TX/RX streams need to be taken into account when
665 * negotiating which MCS rates it'll receive and
666 * what MCS rates are available for TX.
667 */
668 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &rxs);
669 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &txs);
670
671 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
672 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
673
674 ic->ic_txstream = txs;
675 ic->ic_rxstream = rxs;
676
677 device_printf(sc->sc_dev,
678 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
679 }
680#endif
681
682 /*
683 * Check if the hardware requires PCI register serialisation.
684 * Some of the Owl based MACs require this.
685 */
686 if (mp_ncpus > 1 &&
687 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
688 0, NULL) == HAL_OK) {
689 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
690 device_printf(sc->sc_dev,
691 "Enabling register serialisation\n");
692 }
693
694 /*
695 * Indicate we need the 802.11 header padded to a
696 * 32-bit boundary for 4-address and QoS frames.
697 */
698 ic->ic_flags |= IEEE80211_F_DATAPAD;
699
700 /*
701 * Query the hal about antenna support.
702 */
703 sc->sc_defant = ath_hal_getdefantenna(ah);
704
705 /*
706 * Not all chips have the VEOL support we want to
707 * use with IBSS beacons; check here for it.
708 */
709 sc->sc_hasveol = ath_hal_hasveol(ah);
710
711 /* get mac address from hardware */
712 ath_hal_getmac(ah, macaddr);
713 if (sc->sc_hasbmask)
714 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
715
716 /* NB: used to size node table key mapping array */
717 ic->ic_max_keyix = sc->sc_keymax;
718 /* call MI attach routine. */
719 ieee80211_ifattach(ic, macaddr);
720 ic->ic_setregdomain = ath_setregdomain;
721 ic->ic_getradiocaps = ath_getradiocaps;
722 sc->sc_opmode = HAL_M_STA;
723
724 /* override default methods */
725 ic->ic_newassoc = ath_newassoc;
726 ic->ic_updateslot = ath_updateslot;
727 ic->ic_wme.wme_update = ath_wme_update;
728 ic->ic_vap_create = ath_vap_create;
729 ic->ic_vap_delete = ath_vap_delete;
730 ic->ic_raw_xmit = ath_raw_xmit;
731 ic->ic_update_mcast = ath_update_mcast;
732 ic->ic_update_promisc = ath_update_promisc;
733 ic->ic_node_alloc = ath_node_alloc;
734 sc->sc_node_free = ic->ic_node_free;
735 ic->ic_node_free = ath_node_free;
736 sc->sc_node_cleanup = ic->ic_node_cleanup;
737 ic->ic_node_cleanup = ath_node_cleanup;
738 ic->ic_node_getsignal = ath_node_getsignal;
739 ic->ic_scan_start = ath_scan_start;
740 ic->ic_scan_end = ath_scan_end;
741 ic->ic_set_channel = ath_set_channel;
742
743 /* 802.11n specific - but just override anyway */
744 sc->sc_addba_request = ic->ic_addba_request;
745 sc->sc_addba_response = ic->ic_addba_response;
746 sc->sc_addba_stop = ic->ic_addba_stop;
747 sc->sc_bar_response = ic->ic_bar_response;
748 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
749
750 ic->ic_addba_request = ath_addba_request;
751 ic->ic_addba_response = ath_addba_response;
752 ic->ic_addba_response_timeout = ath_addba_response_timeout;
753 ic->ic_addba_stop = ath_addba_stop;
754 ic->ic_bar_response = ath_bar_response;
755
756 ieee80211_radiotap_attach(ic,
757 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
758 ATH_TX_RADIOTAP_PRESENT,
759 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
760 ATH_RX_RADIOTAP_PRESENT);
761
762 /*
763 * Setup dynamic sysctl's now that country code and
764 * regdomain are available from the hal.
765 */
766 ath_sysctlattach(sc);
767 ath_sysctl_stats_attach(sc);
768 ath_sysctl_hal_attach(sc);
769
770 if (bootverbose)
771 ieee80211_announce(ic);
772 ath_announce(sc);
773 return 0;
774bad2:
775 ath_tx_cleanup(sc);
776 ath_desc_free(sc);
777bad:
778 if (ah)
779 ath_hal_detach(ah);
780 if (ifp != NULL)
781 if_free(ifp);
782 sc->sc_invalid = 1;
783 return error;
784}
785
786int
787ath_detach(struct ath_softc *sc)
788{
789 struct ifnet *ifp = sc->sc_ifp;
790
791 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
792 __func__, ifp->if_flags);
793
794 /*
795 * NB: the order of these is important:
796 * o stop the chip so no more interrupts will fire
797 * o call the 802.11 layer before detaching the hal to
798 * insure callbacks into the driver to delete global
799 * key cache entries can be handled
800 * o free the taskqueue which drains any pending tasks
801 * o reclaim the tx queue data structures after calling
802 * the 802.11 layer as we'll get called back to reclaim
803 * node state and potentially want to use them
804 * o to cleanup the tx queues the hal is called, so detach
805 * it last
806 * Other than that, it's straightforward...
807 */
808 ath_stop(ifp);
809 ieee80211_ifdetach(ifp->if_l2com);
810 taskqueue_free(sc->sc_tq);
811#ifdef ATH_TX99_DIAG
812 if (sc->sc_tx99 != NULL)
813 sc->sc_tx99->detach(sc->sc_tx99);
814#endif
815 ath_rate_detach(sc->sc_rc);
816
817 ath_dfs_detach(sc);
818 ath_desc_free(sc);
819 ath_tx_cleanup(sc);
820 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
821 if_free(ifp);
822
823 return 0;
824}
825
826/*
827 * MAC address handling for multiple BSS on the same radio.
828 * The first vap uses the MAC address from the EEPROM. For
829 * subsequent vap's we set the U/L bit (bit 1) in the MAC
830 * address and use the next six bits as an index.
831 */
832static void
833assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
834{
835 int i;
836
837 if (clone && sc->sc_hasbmask) {
838 /* NB: we only do this if h/w supports multiple bssid */
839 for (i = 0; i < 8; i++)
840 if ((sc->sc_bssidmask & (1<<i)) == 0)
841 break;
842 if (i != 0)
843 mac[0] |= (i << 2)|0x2;
844 } else
845 i = 0;
846 sc->sc_bssidmask |= 1<<i;
847 sc->sc_hwbssidmask[0] &= ~mac[0];
848 if (i == 0)
849 sc->sc_nbssid0++;
850}
851
852static void
853reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
854{
855 int i = mac[0] >> 2;
856 uint8_t mask;
857
858 if (i != 0 || --sc->sc_nbssid0 == 0) {
859 sc->sc_bssidmask &= ~(1<<i);
860 /* recalculate bssid mask from remaining addresses */
861 mask = 0xff;
862 for (i = 1; i < 8; i++)
863 if (sc->sc_bssidmask & (1<<i))
864 mask &= ~((i<<2)|0x2);
865 sc->sc_hwbssidmask[0] |= mask;
866 }
867}
868
869/*
870 * Assign a beacon xmit slot. We try to space out
871 * assignments so when beacons are staggered the
872 * traffic coming out of the cab q has maximal time
873 * to go out before the next beacon is scheduled.
874 */
875static int
876assign_bslot(struct ath_softc *sc)
877{
878 u_int slot, free;
879
880 free = 0;
881 for (slot = 0; slot < ATH_BCBUF; slot++)
882 if (sc->sc_bslot[slot] == NULL) {
883 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
884 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
885 return slot;
886 free = slot;
887 /* NB: keep looking for a double slot */
888 }
889 return free;
890}
891
892static struct ieee80211vap *
893ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
894 enum ieee80211_opmode opmode, int flags,
895 const uint8_t bssid[IEEE80211_ADDR_LEN],
896 const uint8_t mac0[IEEE80211_ADDR_LEN])
897{
898 struct ath_softc *sc = ic->ic_ifp->if_softc;
899 struct ath_vap *avp;
900 struct ieee80211vap *vap;
901 uint8_t mac[IEEE80211_ADDR_LEN];
902 int needbeacon, error;
903 enum ieee80211_opmode ic_opmode;
904
905 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
906 M_80211_VAP, M_WAITOK | M_ZERO);
907 needbeacon = 0;
908 IEEE80211_ADDR_COPY(mac, mac0);
909
910 ATH_LOCK(sc);
911 ic_opmode = opmode; /* default to opmode of new vap */
912 switch (opmode) {
913 case IEEE80211_M_STA:
914 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
915 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
916 goto bad;
917 }
918 if (sc->sc_nvaps) {
919 /*
920 * With multiple vaps we must fall back
921 * to s/w beacon miss handling.
922 */
923 flags |= IEEE80211_CLONE_NOBEACONS;
924 }
925 if (flags & IEEE80211_CLONE_NOBEACONS) {
926 /*
927 * Station mode w/o beacons are implemented w/ AP mode.
928 */
929 ic_opmode = IEEE80211_M_HOSTAP;
930 }
931 break;
932 case IEEE80211_M_IBSS:
933 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
934 device_printf(sc->sc_dev,
935 "only 1 ibss vap supported\n");
936 goto bad;
937 }
938 needbeacon = 1;
939 break;
940 case IEEE80211_M_AHDEMO:
941#ifdef IEEE80211_SUPPORT_TDMA
942 if (flags & IEEE80211_CLONE_TDMA) {
943 if (sc->sc_nvaps != 0) {
944 device_printf(sc->sc_dev,
945 "only 1 tdma vap supported\n");
946 goto bad;
947 }
948 needbeacon = 1;
949 flags |= IEEE80211_CLONE_NOBEACONS;
950 }
951 /* fall thru... */
952#endif
953 case IEEE80211_M_MONITOR:
954 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
955 /*
956 * Adopt existing mode. Adding a monitor or ahdemo
957 * vap to an existing configuration is of dubious
958 * value but should be ok.
959 */
960 /* XXX not right for monitor mode */
961 ic_opmode = ic->ic_opmode;
962 }
963 break;
964 case IEEE80211_M_HOSTAP:
965 case IEEE80211_M_MBSS:
966 needbeacon = 1;
967 break;
968 case IEEE80211_M_WDS:
969 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
970 device_printf(sc->sc_dev,
971 "wds not supported in sta mode\n");
972 goto bad;
973 }
974 /*
975 * Silently remove any request for a unique
976 * bssid; WDS vap's always share the local
977 * mac address.
978 */
979 flags &= ~IEEE80211_CLONE_BSSID;
980 if (sc->sc_nvaps == 0)
981 ic_opmode = IEEE80211_M_HOSTAP;
982 else
983 ic_opmode = ic->ic_opmode;
984 break;
985 default:
986 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
987 goto bad;
988 }
989 /*
990 * Check that a beacon buffer is available; the code below assumes it.
991 */
992 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
993 device_printf(sc->sc_dev, "no beacon buffer available\n");
994 goto bad;
995 }
996
997 /* STA, AHDEMO? */
998 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
999 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1000 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1001 }
1002
1003 vap = &avp->av_vap;
1004 /* XXX can't hold mutex across if_alloc */
1005 ATH_UNLOCK(sc);
1006 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1007 bssid, mac);
1008 ATH_LOCK(sc);
1009 if (error != 0) {
1010 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1011 __func__, error);
1012 goto bad2;
1013 }
1014
1015 /* h/w crypto support */
1016 vap->iv_key_alloc = ath_key_alloc;
1017 vap->iv_key_delete = ath_key_delete;
1018 vap->iv_key_set = ath_key_set;
1019 vap->iv_key_update_begin = ath_key_update_begin;
1020 vap->iv_key_update_end = ath_key_update_end;
1021
1022 /* override various methods */
1023 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1024 vap->iv_recv_mgmt = ath_recv_mgmt;
1025 vap->iv_reset = ath_reset_vap;
1026 vap->iv_update_beacon = ath_beacon_update;
1027 avp->av_newstate = vap->iv_newstate;
1028 vap->iv_newstate = ath_newstate;
1029 avp->av_bmiss = vap->iv_bmiss;
1030 vap->iv_bmiss = ath_bmiss_vap;
1031
1032 /* Set default parameters */
1033
1034 /*
1035 * Anything earlier than some AR9300 series MACs don't
1036 * support a smaller MPDU density.
1037 */
1038 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1039 /*
1040 * All NICs can handle the maximum size, however
1041 * AR5416 based MACs can only TX aggregates w/ RTS
1042 * protection when the total aggregate size is <= 8k.
1043 * However, for now that's enforced by the TX path.
1044 */
1045 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1046
1047 avp->av_bslot = -1;
1048 if (needbeacon) {
1049 /*
1050 * Allocate beacon state and setup the q for buffered
1051 * multicast frames. We know a beacon buffer is
1052 * available because we checked above.
1053 */
1054 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1055 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1056 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1057 /*
1058 * Assign the vap to a beacon xmit slot. As above
1059 * this cannot fail to find a free one.
1060 */
1061 avp->av_bslot = assign_bslot(sc);
1062 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1063 ("beacon slot %u not empty", avp->av_bslot));
1064 sc->sc_bslot[avp->av_bslot] = vap;
1065 sc->sc_nbcnvaps++;
1066 }
1067 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1068 /*
1069 * Multple vaps are to transmit beacons and we
1070 * have h/w support for TSF adjusting; enable
1071 * use of staggered beacons.
1072 */
1073 sc->sc_stagbeacons = 1;
1074 }
1075 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1076 }
1077
1078 ic->ic_opmode = ic_opmode;
1079 if (opmode != IEEE80211_M_WDS) {
1080 sc->sc_nvaps++;
1081 if (opmode == IEEE80211_M_STA)
1082 sc->sc_nstavaps++;
1083 if (opmode == IEEE80211_M_MBSS)
1084 sc->sc_nmeshvaps++;
1085 }
1086 switch (ic_opmode) {
1087 case IEEE80211_M_IBSS:
1088 sc->sc_opmode = HAL_M_IBSS;
1089 break;
1090 case IEEE80211_M_STA:
1091 sc->sc_opmode = HAL_M_STA;
1092 break;
1093 case IEEE80211_M_AHDEMO:
1094#ifdef IEEE80211_SUPPORT_TDMA
1095 if (vap->iv_caps & IEEE80211_C_TDMA) {
1096 sc->sc_tdma = 1;
1097 /* NB: disable tsf adjust */
1098 sc->sc_stagbeacons = 0;
1099 }
1100 /*
1101 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1102 * just ap mode.
1103 */
1104 /* fall thru... */
1105#endif
1106 case IEEE80211_M_HOSTAP:
1107 case IEEE80211_M_MBSS:
1108 sc->sc_opmode = HAL_M_HOSTAP;
1109 break;
1110 case IEEE80211_M_MONITOR:
1111 sc->sc_opmode = HAL_M_MONITOR;
1112 break;
1113 default:
1114 /* XXX should not happen */
1115 break;
1116 }
1117 if (sc->sc_hastsfadd) {
1118 /*
1119 * Configure whether or not TSF adjust should be done.
1120 */
1121 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1122 }
1123 if (flags & IEEE80211_CLONE_NOBEACONS) {
1124 /*
1125 * Enable s/w beacon miss handling.
1126 */
1127 sc->sc_swbmiss = 1;
1128 }
1129 ATH_UNLOCK(sc);
1130
1131 /* complete setup */
1132 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1133 return vap;
1134bad2:
1135 reclaim_address(sc, mac);
1136 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1137bad:
1138 free(avp, M_80211_VAP);
1139 ATH_UNLOCK(sc);
1140 return NULL;
1141}
1142
1143static void
1144ath_vap_delete(struct ieee80211vap *vap)
1145{
1146 struct ieee80211com *ic = vap->iv_ic;
1147 struct ifnet *ifp = ic->ic_ifp;
1148 struct ath_softc *sc = ifp->if_softc;
1149 struct ath_hal *ah = sc->sc_ah;
1150 struct ath_vap *avp = ATH_VAP(vap);
1151
1152 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1153 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1154 /*
1155 * Quiesce the hardware while we remove the vap. In
1156 * particular we need to reclaim all references to
1157 * the vap state by any frames pending on the tx queues.
1158 */
1159 ath_hal_intrset(ah, 0); /* disable interrupts */
1160 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1161 /* XXX Do all frames from all vaps/nodes need draining here? */
1162 ath_stoprecv(sc, 1); /* stop recv side */
1163 }
1164
1165 ieee80211_vap_detach(vap);
1166
1167 /*
1168 * XXX Danger Will Robinson! Danger!
1169 *
1170 * Because ieee80211_vap_detach() can queue a frame (the station
1171 * diassociate message?) after we've drained the TXQ and
1172 * flushed the software TXQ, we will end up with a frame queued
1173 * to a node whose vap is about to be freed.
1174 *
1175 * To work around this, flush the hardware/software again.
1176 * This may be racy - the ath task may be running and the packet
1177 * may be being scheduled between sw->hw txq. Tsk.
1178 *
1179 * TODO: figure out why a new node gets allocated somewhere around
1180 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1181 * call!)
1182 */
1183
1184 ath_draintxq(sc, ATH_RESET_DEFAULT);
1185
1186 ATH_LOCK(sc);
1187 /*
1188 * Reclaim beacon state. Note this must be done before
1189 * the vap instance is reclaimed as we may have a reference
1190 * to it in the buffer for the beacon frame.
1191 */
1192 if (avp->av_bcbuf != NULL) {
1193 if (avp->av_bslot != -1) {
1194 sc->sc_bslot[avp->av_bslot] = NULL;
1195 sc->sc_nbcnvaps--;
1196 }
1197 ath_beacon_return(sc, avp->av_bcbuf);
1198 avp->av_bcbuf = NULL;
1199 if (sc->sc_nbcnvaps == 0) {
1200 sc->sc_stagbeacons = 0;
1201 if (sc->sc_hastsfadd)
1202 ath_hal_settsfadjust(sc->sc_ah, 0);
1203 }
1204 /*
1205 * Reclaim any pending mcast frames for the vap.
1206 */
1207 ath_tx_draintxq(sc, &avp->av_mcastq);
1208 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1209 }
1210 /*
1211 * Update bookkeeping.
1212 */
1213 if (vap->iv_opmode == IEEE80211_M_STA) {
1214 sc->sc_nstavaps--;
1215 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1216 sc->sc_swbmiss = 0;
1217 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1218 vap->iv_opmode == IEEE80211_M_MBSS) {
1219 reclaim_address(sc, vap->iv_myaddr);
1220 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1221 if (vap->iv_opmode == IEEE80211_M_MBSS)
1222 sc->sc_nmeshvaps--;
1223 }
1224 if (vap->iv_opmode != IEEE80211_M_WDS)
1225 sc->sc_nvaps--;
1226#ifdef IEEE80211_SUPPORT_TDMA
1227 /* TDMA operation ceases when the last vap is destroyed */
1228 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1229 sc->sc_tdma = 0;
1230 sc->sc_swbmiss = 0;
1231 }
1232#endif
1233 free(avp, M_80211_VAP);
1234
1235 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1236 /*
1237 * Restart rx+tx machines if still running (RUNNING will
1238 * be reset if we just destroyed the last vap).
1239 */
1240 if (ath_startrecv(sc) != 0)
1241 if_printf(ifp, "%s: unable to restart recv logic\n",
1242 __func__);
1243 if (sc->sc_beacons) { /* restart beacons */
1244#ifdef IEEE80211_SUPPORT_TDMA
1245 if (sc->sc_tdma)
1246 ath_tdma_config(sc, NULL);
1247 else
1248#endif
1249 ath_beacon_config(sc, NULL);
1250 }
1251 ath_hal_intrset(ah, sc->sc_imask);
1252 }
1253 ATH_UNLOCK(sc);
1254}
1255
1256void
1257ath_suspend(struct ath_softc *sc)
1258{
1259 struct ifnet *ifp = sc->sc_ifp;
1260 struct ieee80211com *ic = ifp->if_l2com;
1261
1262 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1263 __func__, ifp->if_flags);
1264
1265 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1266 if (ic->ic_opmode == IEEE80211_M_STA)
1267 ath_stop(ifp);
1268 else
1269 ieee80211_suspend_all(ic);
1270 /*
1271 * NB: don't worry about putting the chip in low power
1272 * mode; pci will power off our socket on suspend and
1273 * CardBus detaches the device.
1274 */
1275}
1276
1277/*
1278 * Reset the key cache since some parts do not reset the
1279 * contents on resume. First we clear all entries, then
1280 * re-load keys that the 802.11 layer assumes are setup
1281 * in h/w.
1282 */
1283static void
1284ath_reset_keycache(struct ath_softc *sc)
1285{
1286 struct ifnet *ifp = sc->sc_ifp;
1287 struct ieee80211com *ic = ifp->if_l2com;
1288 struct ath_hal *ah = sc->sc_ah;
1289 int i;
1290
1291 for (i = 0; i < sc->sc_keymax; i++)
1292 ath_hal_keyreset(ah, i);
1293 ieee80211_crypto_reload_keys(ic);
1294}
1295
1296void
1297ath_resume(struct ath_softc *sc)
1298{
1299 struct ifnet *ifp = sc->sc_ifp;
1300 struct ieee80211com *ic = ifp->if_l2com;
1301 struct ath_hal *ah = sc->sc_ah;
1302 HAL_STATUS status;
1303
1304 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1305 __func__, ifp->if_flags);
1306
1307 /*
1308 * Must reset the chip before we reload the
1309 * keycache as we were powered down on suspend.
1310 */
1311 ath_hal_reset(ah, sc->sc_opmode,
1312 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1313 AH_FALSE, &status);
1314 ath_reset_keycache(sc);
1315
1316 /* Let DFS at it in case it's a DFS channel */
1317 ath_dfs_radar_enable(sc, ic->ic_curchan);
1318
1319 if (sc->sc_resume_up) {
1320 if (ic->ic_opmode == IEEE80211_M_STA) {
1321 ath_init(sc);
1322 /*
1323 * Program the beacon registers using the last rx'd
1324 * beacon frame and enable sync on the next beacon
1325 * we see. This should handle the case where we
1326 * wakeup and find the same AP and also the case where
1327 * we wakeup and need to roam. For the latter we
1328 * should get bmiss events that trigger a roam.
1329 */
1330 ath_beacon_config(sc, NULL);
1331 sc->sc_syncbeacon = 1;
1332 } else
1333 ieee80211_resume_all(ic);
1334 }
494
495 ifp->if_softc = sc;
496 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
497 ifp->if_start = ath_start;
498 ifp->if_ioctl = ath_ioctl;
499 ifp->if_init = ath_init;
500 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
501 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
502 IFQ_SET_READY(&ifp->if_snd);
503
504 ic->ic_ifp = ifp;
505 /* XXX not right but it's not used anywhere important */
506 ic->ic_phytype = IEEE80211_T_OFDM;
507 ic->ic_opmode = IEEE80211_M_STA;
508 ic->ic_caps =
509 IEEE80211_C_STA /* station mode */
510 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
511 | IEEE80211_C_HOSTAP /* hostap mode */
512 | IEEE80211_C_MONITOR /* monitor mode */
513 | IEEE80211_C_AHDEMO /* adhoc demo mode */
514 | IEEE80211_C_WDS /* 4-address traffic works */
515 | IEEE80211_C_MBSS /* mesh point link mode */
516 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
517 | IEEE80211_C_SHSLOT /* short slot time supported */
518 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
519 | IEEE80211_C_BGSCAN /* capable of bg scanning */
520 | IEEE80211_C_TXFRAG /* handle tx frags */
521#ifdef ATH_ENABLE_DFS
522 | IEEE80211_C_DFS /* Enable radar detection */
523#endif
524 ;
525 /*
526 * Query the hal to figure out h/w crypto support.
527 */
528 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
529 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
530 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
531 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
532 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
533 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
534 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
535 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
536 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
537 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
538 /*
539 * Check if h/w does the MIC and/or whether the
540 * separate key cache entries are required to
541 * handle both tx+rx MIC keys.
542 */
543 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
544 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
545 /*
546 * If the h/w supports storing tx+rx MIC keys
547 * in one cache slot automatically enable use.
548 */
549 if (ath_hal_hastkipsplit(ah) ||
550 !ath_hal_settkipsplit(ah, AH_FALSE))
551 sc->sc_splitmic = 1;
552 /*
553 * If the h/w can do TKIP MIC together with WME then
554 * we use it; otherwise we force the MIC to be done
555 * in software by the net80211 layer.
556 */
557 if (ath_hal_haswmetkipmic(ah))
558 sc->sc_wmetkipmic = 1;
559 }
560 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
561 /*
562 * Check for multicast key search support.
563 */
564 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
565 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
566 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
567 }
568 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
569 /*
570 * Mark key cache slots associated with global keys
571 * as in use. If we knew TKIP was not to be used we
572 * could leave the +32, +64, and +32+64 slots free.
573 */
574 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
575 setbit(sc->sc_keymap, i);
576 setbit(sc->sc_keymap, i+64);
577 if (sc->sc_splitmic) {
578 setbit(sc->sc_keymap, i+32);
579 setbit(sc->sc_keymap, i+32+64);
580 }
581 }
582 /*
583 * TPC support can be done either with a global cap or
584 * per-packet support. The latter is not available on
585 * all parts. We're a bit pedantic here as all parts
586 * support a global cap.
587 */
588 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
589 ic->ic_caps |= IEEE80211_C_TXPMGT;
590
591 /*
592 * Mark WME capability only if we have sufficient
593 * hardware queues to do proper priority scheduling.
594 */
595 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
596 ic->ic_caps |= IEEE80211_C_WME;
597 /*
598 * Check for misc other capabilities.
599 */
600 if (ath_hal_hasbursting(ah))
601 ic->ic_caps |= IEEE80211_C_BURST;
602 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
603 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
604 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
605 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
606 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
607 if (ath_hal_hasfastframes(ah))
608 ic->ic_caps |= IEEE80211_C_FF;
609 wmodes = ath_hal_getwirelessmodes(ah);
610 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
611 ic->ic_caps |= IEEE80211_C_TURBOP;
612#ifdef IEEE80211_SUPPORT_TDMA
613 if (ath_hal_macversion(ah) > 0x78) {
614 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
615 ic->ic_tdma_update = ath_tdma_update;
616 }
617#endif
618
619 /*
620 * The if_ath 11n support is completely not ready for normal use.
621 * Enabling this option will likely break everything and everything.
622 * Don't think of doing that unless you know what you're doing.
623 */
624
625#ifdef ATH_ENABLE_11N
626 /*
627 * Query HT capabilities
628 */
629 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
630 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
631 int rxs, txs;
632
633 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
634 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
635 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
636 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
637 | IEEE80211_HTCAP_MAXAMSDU_3839
638 /* max A-MSDU length */
639 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
640 ;
641
642 /*
643 * Enable short-GI for HT20 only if the hardware
644 * advertises support.
645 * Notably, anything earlier than the AR9287 doesn't.
646 */
647 if ((ath_hal_getcapability(ah,
648 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
649 (wmodes & HAL_MODE_HT20)) {
650 device_printf(sc->sc_dev,
651 "[HT] enabling short-GI in 20MHz mode\n");
652 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
653 }
654
655 if (wmodes & HAL_MODE_HT40)
656 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
657 | IEEE80211_HTCAP_SHORTGI40;
658
659 /*
660 * TX/RX streams need to be taken into account when
661 * negotiating which MCS rates it'll receive and
662 * what MCS rates are available for TX.
663 */
664 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &rxs);
665 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &txs);
666
667 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
668 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
669
670 ic->ic_txstream = txs;
671 ic->ic_rxstream = rxs;
672
673 device_printf(sc->sc_dev,
674 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
675 }
676#endif
677
678 /*
679 * Check if the hardware requires PCI register serialisation.
680 * Some of the Owl based MACs require this.
681 */
682 if (mp_ncpus > 1 &&
683 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
684 0, NULL) == HAL_OK) {
685 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
686 device_printf(sc->sc_dev,
687 "Enabling register serialisation\n");
688 }
689
690 /*
691 * Indicate we need the 802.11 header padded to a
692 * 32-bit boundary for 4-address and QoS frames.
693 */
694 ic->ic_flags |= IEEE80211_F_DATAPAD;
695
696 /*
697 * Query the hal about antenna support.
698 */
699 sc->sc_defant = ath_hal_getdefantenna(ah);
700
701 /*
702 * Not all chips have the VEOL support we want to
703 * use with IBSS beacons; check here for it.
704 */
705 sc->sc_hasveol = ath_hal_hasveol(ah);
706
707 /* get mac address from hardware */
708 ath_hal_getmac(ah, macaddr);
709 if (sc->sc_hasbmask)
710 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
711
712 /* NB: used to size node table key mapping array */
713 ic->ic_max_keyix = sc->sc_keymax;
714 /* call MI attach routine. */
715 ieee80211_ifattach(ic, macaddr);
716 ic->ic_setregdomain = ath_setregdomain;
717 ic->ic_getradiocaps = ath_getradiocaps;
718 sc->sc_opmode = HAL_M_STA;
719
720 /* override default methods */
721 ic->ic_newassoc = ath_newassoc;
722 ic->ic_updateslot = ath_updateslot;
723 ic->ic_wme.wme_update = ath_wme_update;
724 ic->ic_vap_create = ath_vap_create;
725 ic->ic_vap_delete = ath_vap_delete;
726 ic->ic_raw_xmit = ath_raw_xmit;
727 ic->ic_update_mcast = ath_update_mcast;
728 ic->ic_update_promisc = ath_update_promisc;
729 ic->ic_node_alloc = ath_node_alloc;
730 sc->sc_node_free = ic->ic_node_free;
731 ic->ic_node_free = ath_node_free;
732 sc->sc_node_cleanup = ic->ic_node_cleanup;
733 ic->ic_node_cleanup = ath_node_cleanup;
734 ic->ic_node_getsignal = ath_node_getsignal;
735 ic->ic_scan_start = ath_scan_start;
736 ic->ic_scan_end = ath_scan_end;
737 ic->ic_set_channel = ath_set_channel;
738
739 /* 802.11n specific - but just override anyway */
740 sc->sc_addba_request = ic->ic_addba_request;
741 sc->sc_addba_response = ic->ic_addba_response;
742 sc->sc_addba_stop = ic->ic_addba_stop;
743 sc->sc_bar_response = ic->ic_bar_response;
744 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
745
746 ic->ic_addba_request = ath_addba_request;
747 ic->ic_addba_response = ath_addba_response;
748 ic->ic_addba_response_timeout = ath_addba_response_timeout;
749 ic->ic_addba_stop = ath_addba_stop;
750 ic->ic_bar_response = ath_bar_response;
751
752 ieee80211_radiotap_attach(ic,
753 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
754 ATH_TX_RADIOTAP_PRESENT,
755 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
756 ATH_RX_RADIOTAP_PRESENT);
757
758 /*
759 * Setup dynamic sysctl's now that country code and
760 * regdomain are available from the hal.
761 */
762 ath_sysctlattach(sc);
763 ath_sysctl_stats_attach(sc);
764 ath_sysctl_hal_attach(sc);
765
766 if (bootverbose)
767 ieee80211_announce(ic);
768 ath_announce(sc);
769 return 0;
770bad2:
771 ath_tx_cleanup(sc);
772 ath_desc_free(sc);
773bad:
774 if (ah)
775 ath_hal_detach(ah);
776 if (ifp != NULL)
777 if_free(ifp);
778 sc->sc_invalid = 1;
779 return error;
780}
781
782int
783ath_detach(struct ath_softc *sc)
784{
785 struct ifnet *ifp = sc->sc_ifp;
786
787 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
788 __func__, ifp->if_flags);
789
790 /*
791 * NB: the order of these is important:
792 * o stop the chip so no more interrupts will fire
793 * o call the 802.11 layer before detaching the hal to
794 * insure callbacks into the driver to delete global
795 * key cache entries can be handled
796 * o free the taskqueue which drains any pending tasks
797 * o reclaim the tx queue data structures after calling
798 * the 802.11 layer as we'll get called back to reclaim
799 * node state and potentially want to use them
800 * o to cleanup the tx queues the hal is called, so detach
801 * it last
802 * Other than that, it's straightforward...
803 */
804 ath_stop(ifp);
805 ieee80211_ifdetach(ifp->if_l2com);
806 taskqueue_free(sc->sc_tq);
807#ifdef ATH_TX99_DIAG
808 if (sc->sc_tx99 != NULL)
809 sc->sc_tx99->detach(sc->sc_tx99);
810#endif
811 ath_rate_detach(sc->sc_rc);
812
813 ath_dfs_detach(sc);
814 ath_desc_free(sc);
815 ath_tx_cleanup(sc);
816 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
817 if_free(ifp);
818
819 return 0;
820}
821
822/*
823 * MAC address handling for multiple BSS on the same radio.
824 * The first vap uses the MAC address from the EEPROM. For
825 * subsequent vap's we set the U/L bit (bit 1) in the MAC
826 * address and use the next six bits as an index.
827 */
828static void
829assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
830{
831 int i;
832
833 if (clone && sc->sc_hasbmask) {
834 /* NB: we only do this if h/w supports multiple bssid */
835 for (i = 0; i < 8; i++)
836 if ((sc->sc_bssidmask & (1<<i)) == 0)
837 break;
838 if (i != 0)
839 mac[0] |= (i << 2)|0x2;
840 } else
841 i = 0;
842 sc->sc_bssidmask |= 1<<i;
843 sc->sc_hwbssidmask[0] &= ~mac[0];
844 if (i == 0)
845 sc->sc_nbssid0++;
846}
847
848static void
849reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
850{
851 int i = mac[0] >> 2;
852 uint8_t mask;
853
854 if (i != 0 || --sc->sc_nbssid0 == 0) {
855 sc->sc_bssidmask &= ~(1<<i);
856 /* recalculate bssid mask from remaining addresses */
857 mask = 0xff;
858 for (i = 1; i < 8; i++)
859 if (sc->sc_bssidmask & (1<<i))
860 mask &= ~((i<<2)|0x2);
861 sc->sc_hwbssidmask[0] |= mask;
862 }
863}
864
865/*
866 * Assign a beacon xmit slot. We try to space out
867 * assignments so when beacons are staggered the
868 * traffic coming out of the cab q has maximal time
869 * to go out before the next beacon is scheduled.
870 */
871static int
872assign_bslot(struct ath_softc *sc)
873{
874 u_int slot, free;
875
876 free = 0;
877 for (slot = 0; slot < ATH_BCBUF; slot++)
878 if (sc->sc_bslot[slot] == NULL) {
879 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
880 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
881 return slot;
882 free = slot;
883 /* NB: keep looking for a double slot */
884 }
885 return free;
886}
887
888static struct ieee80211vap *
889ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
890 enum ieee80211_opmode opmode, int flags,
891 const uint8_t bssid[IEEE80211_ADDR_LEN],
892 const uint8_t mac0[IEEE80211_ADDR_LEN])
893{
894 struct ath_softc *sc = ic->ic_ifp->if_softc;
895 struct ath_vap *avp;
896 struct ieee80211vap *vap;
897 uint8_t mac[IEEE80211_ADDR_LEN];
898 int needbeacon, error;
899 enum ieee80211_opmode ic_opmode;
900
901 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
902 M_80211_VAP, M_WAITOK | M_ZERO);
903 needbeacon = 0;
904 IEEE80211_ADDR_COPY(mac, mac0);
905
906 ATH_LOCK(sc);
907 ic_opmode = opmode; /* default to opmode of new vap */
908 switch (opmode) {
909 case IEEE80211_M_STA:
910 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
911 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
912 goto bad;
913 }
914 if (sc->sc_nvaps) {
915 /*
916 * With multiple vaps we must fall back
917 * to s/w beacon miss handling.
918 */
919 flags |= IEEE80211_CLONE_NOBEACONS;
920 }
921 if (flags & IEEE80211_CLONE_NOBEACONS) {
922 /*
923 * Station mode w/o beacons are implemented w/ AP mode.
924 */
925 ic_opmode = IEEE80211_M_HOSTAP;
926 }
927 break;
928 case IEEE80211_M_IBSS:
929 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
930 device_printf(sc->sc_dev,
931 "only 1 ibss vap supported\n");
932 goto bad;
933 }
934 needbeacon = 1;
935 break;
936 case IEEE80211_M_AHDEMO:
937#ifdef IEEE80211_SUPPORT_TDMA
938 if (flags & IEEE80211_CLONE_TDMA) {
939 if (sc->sc_nvaps != 0) {
940 device_printf(sc->sc_dev,
941 "only 1 tdma vap supported\n");
942 goto bad;
943 }
944 needbeacon = 1;
945 flags |= IEEE80211_CLONE_NOBEACONS;
946 }
947 /* fall thru... */
948#endif
949 case IEEE80211_M_MONITOR:
950 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
951 /*
952 * Adopt existing mode. Adding a monitor or ahdemo
953 * vap to an existing configuration is of dubious
954 * value but should be ok.
955 */
956 /* XXX not right for monitor mode */
957 ic_opmode = ic->ic_opmode;
958 }
959 break;
960 case IEEE80211_M_HOSTAP:
961 case IEEE80211_M_MBSS:
962 needbeacon = 1;
963 break;
964 case IEEE80211_M_WDS:
965 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
966 device_printf(sc->sc_dev,
967 "wds not supported in sta mode\n");
968 goto bad;
969 }
970 /*
971 * Silently remove any request for a unique
972 * bssid; WDS vap's always share the local
973 * mac address.
974 */
975 flags &= ~IEEE80211_CLONE_BSSID;
976 if (sc->sc_nvaps == 0)
977 ic_opmode = IEEE80211_M_HOSTAP;
978 else
979 ic_opmode = ic->ic_opmode;
980 break;
981 default:
982 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
983 goto bad;
984 }
985 /*
986 * Check that a beacon buffer is available; the code below assumes it.
987 */
988 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
989 device_printf(sc->sc_dev, "no beacon buffer available\n");
990 goto bad;
991 }
992
993 /* STA, AHDEMO? */
994 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
995 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
996 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
997 }
998
999 vap = &avp->av_vap;
1000 /* XXX can't hold mutex across if_alloc */
1001 ATH_UNLOCK(sc);
1002 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1003 bssid, mac);
1004 ATH_LOCK(sc);
1005 if (error != 0) {
1006 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1007 __func__, error);
1008 goto bad2;
1009 }
1010
1011 /* h/w crypto support */
1012 vap->iv_key_alloc = ath_key_alloc;
1013 vap->iv_key_delete = ath_key_delete;
1014 vap->iv_key_set = ath_key_set;
1015 vap->iv_key_update_begin = ath_key_update_begin;
1016 vap->iv_key_update_end = ath_key_update_end;
1017
1018 /* override various methods */
1019 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1020 vap->iv_recv_mgmt = ath_recv_mgmt;
1021 vap->iv_reset = ath_reset_vap;
1022 vap->iv_update_beacon = ath_beacon_update;
1023 avp->av_newstate = vap->iv_newstate;
1024 vap->iv_newstate = ath_newstate;
1025 avp->av_bmiss = vap->iv_bmiss;
1026 vap->iv_bmiss = ath_bmiss_vap;
1027
1028 /* Set default parameters */
1029
1030 /*
1031 * Anything earlier than some AR9300 series MACs don't
1032 * support a smaller MPDU density.
1033 */
1034 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1035 /*
1036 * All NICs can handle the maximum size, however
1037 * AR5416 based MACs can only TX aggregates w/ RTS
1038 * protection when the total aggregate size is <= 8k.
1039 * However, for now that's enforced by the TX path.
1040 */
1041 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1042
1043 avp->av_bslot = -1;
1044 if (needbeacon) {
1045 /*
1046 * Allocate beacon state and setup the q for buffered
1047 * multicast frames. We know a beacon buffer is
1048 * available because we checked above.
1049 */
1050 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1051 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1052 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1053 /*
1054 * Assign the vap to a beacon xmit slot. As above
1055 * this cannot fail to find a free one.
1056 */
1057 avp->av_bslot = assign_bslot(sc);
1058 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1059 ("beacon slot %u not empty", avp->av_bslot));
1060 sc->sc_bslot[avp->av_bslot] = vap;
1061 sc->sc_nbcnvaps++;
1062 }
1063 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1064 /*
1065 * Multple vaps are to transmit beacons and we
1066 * have h/w support for TSF adjusting; enable
1067 * use of staggered beacons.
1068 */
1069 sc->sc_stagbeacons = 1;
1070 }
1071 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1072 }
1073
1074 ic->ic_opmode = ic_opmode;
1075 if (opmode != IEEE80211_M_WDS) {
1076 sc->sc_nvaps++;
1077 if (opmode == IEEE80211_M_STA)
1078 sc->sc_nstavaps++;
1079 if (opmode == IEEE80211_M_MBSS)
1080 sc->sc_nmeshvaps++;
1081 }
1082 switch (ic_opmode) {
1083 case IEEE80211_M_IBSS:
1084 sc->sc_opmode = HAL_M_IBSS;
1085 break;
1086 case IEEE80211_M_STA:
1087 sc->sc_opmode = HAL_M_STA;
1088 break;
1089 case IEEE80211_M_AHDEMO:
1090#ifdef IEEE80211_SUPPORT_TDMA
1091 if (vap->iv_caps & IEEE80211_C_TDMA) {
1092 sc->sc_tdma = 1;
1093 /* NB: disable tsf adjust */
1094 sc->sc_stagbeacons = 0;
1095 }
1096 /*
1097 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1098 * just ap mode.
1099 */
1100 /* fall thru... */
1101#endif
1102 case IEEE80211_M_HOSTAP:
1103 case IEEE80211_M_MBSS:
1104 sc->sc_opmode = HAL_M_HOSTAP;
1105 break;
1106 case IEEE80211_M_MONITOR:
1107 sc->sc_opmode = HAL_M_MONITOR;
1108 break;
1109 default:
1110 /* XXX should not happen */
1111 break;
1112 }
1113 if (sc->sc_hastsfadd) {
1114 /*
1115 * Configure whether or not TSF adjust should be done.
1116 */
1117 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1118 }
1119 if (flags & IEEE80211_CLONE_NOBEACONS) {
1120 /*
1121 * Enable s/w beacon miss handling.
1122 */
1123 sc->sc_swbmiss = 1;
1124 }
1125 ATH_UNLOCK(sc);
1126
1127 /* complete setup */
1128 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1129 return vap;
1130bad2:
1131 reclaim_address(sc, mac);
1132 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1133bad:
1134 free(avp, M_80211_VAP);
1135 ATH_UNLOCK(sc);
1136 return NULL;
1137}
1138
1139static void
1140ath_vap_delete(struct ieee80211vap *vap)
1141{
1142 struct ieee80211com *ic = vap->iv_ic;
1143 struct ifnet *ifp = ic->ic_ifp;
1144 struct ath_softc *sc = ifp->if_softc;
1145 struct ath_hal *ah = sc->sc_ah;
1146 struct ath_vap *avp = ATH_VAP(vap);
1147
1148 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1149 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1150 /*
1151 * Quiesce the hardware while we remove the vap. In
1152 * particular we need to reclaim all references to
1153 * the vap state by any frames pending on the tx queues.
1154 */
1155 ath_hal_intrset(ah, 0); /* disable interrupts */
1156 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1157 /* XXX Do all frames from all vaps/nodes need draining here? */
1158 ath_stoprecv(sc, 1); /* stop recv side */
1159 }
1160
1161 ieee80211_vap_detach(vap);
1162
1163 /*
1164 * XXX Danger Will Robinson! Danger!
1165 *
1166 * Because ieee80211_vap_detach() can queue a frame (the station
1167 * diassociate message?) after we've drained the TXQ and
1168 * flushed the software TXQ, we will end up with a frame queued
1169 * to a node whose vap is about to be freed.
1170 *
1171 * To work around this, flush the hardware/software again.
1172 * This may be racy - the ath task may be running and the packet
1173 * may be being scheduled between sw->hw txq. Tsk.
1174 *
1175 * TODO: figure out why a new node gets allocated somewhere around
1176 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1177 * call!)
1178 */
1179
1180 ath_draintxq(sc, ATH_RESET_DEFAULT);
1181
1182 ATH_LOCK(sc);
1183 /*
1184 * Reclaim beacon state. Note this must be done before
1185 * the vap instance is reclaimed as we may have a reference
1186 * to it in the buffer for the beacon frame.
1187 */
1188 if (avp->av_bcbuf != NULL) {
1189 if (avp->av_bslot != -1) {
1190 sc->sc_bslot[avp->av_bslot] = NULL;
1191 sc->sc_nbcnvaps--;
1192 }
1193 ath_beacon_return(sc, avp->av_bcbuf);
1194 avp->av_bcbuf = NULL;
1195 if (sc->sc_nbcnvaps == 0) {
1196 sc->sc_stagbeacons = 0;
1197 if (sc->sc_hastsfadd)
1198 ath_hal_settsfadjust(sc->sc_ah, 0);
1199 }
1200 /*
1201 * Reclaim any pending mcast frames for the vap.
1202 */
1203 ath_tx_draintxq(sc, &avp->av_mcastq);
1204 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1205 }
1206 /*
1207 * Update bookkeeping.
1208 */
1209 if (vap->iv_opmode == IEEE80211_M_STA) {
1210 sc->sc_nstavaps--;
1211 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1212 sc->sc_swbmiss = 0;
1213 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1214 vap->iv_opmode == IEEE80211_M_MBSS) {
1215 reclaim_address(sc, vap->iv_myaddr);
1216 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1217 if (vap->iv_opmode == IEEE80211_M_MBSS)
1218 sc->sc_nmeshvaps--;
1219 }
1220 if (vap->iv_opmode != IEEE80211_M_WDS)
1221 sc->sc_nvaps--;
1222#ifdef IEEE80211_SUPPORT_TDMA
1223 /* TDMA operation ceases when the last vap is destroyed */
1224 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1225 sc->sc_tdma = 0;
1226 sc->sc_swbmiss = 0;
1227 }
1228#endif
1229 free(avp, M_80211_VAP);
1230
1231 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1232 /*
1233 * Restart rx+tx machines if still running (RUNNING will
1234 * be reset if we just destroyed the last vap).
1235 */
1236 if (ath_startrecv(sc) != 0)
1237 if_printf(ifp, "%s: unable to restart recv logic\n",
1238 __func__);
1239 if (sc->sc_beacons) { /* restart beacons */
1240#ifdef IEEE80211_SUPPORT_TDMA
1241 if (sc->sc_tdma)
1242 ath_tdma_config(sc, NULL);
1243 else
1244#endif
1245 ath_beacon_config(sc, NULL);
1246 }
1247 ath_hal_intrset(ah, sc->sc_imask);
1248 }
1249 ATH_UNLOCK(sc);
1250}
1251
1252void
1253ath_suspend(struct ath_softc *sc)
1254{
1255 struct ifnet *ifp = sc->sc_ifp;
1256 struct ieee80211com *ic = ifp->if_l2com;
1257
1258 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1259 __func__, ifp->if_flags);
1260
1261 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1262 if (ic->ic_opmode == IEEE80211_M_STA)
1263 ath_stop(ifp);
1264 else
1265 ieee80211_suspend_all(ic);
1266 /*
1267 * NB: don't worry about putting the chip in low power
1268 * mode; pci will power off our socket on suspend and
1269 * CardBus detaches the device.
1270 */
1271}
1272
1273/*
1274 * Reset the key cache since some parts do not reset the
1275 * contents on resume. First we clear all entries, then
1276 * re-load keys that the 802.11 layer assumes are setup
1277 * in h/w.
1278 */
1279static void
1280ath_reset_keycache(struct ath_softc *sc)
1281{
1282 struct ifnet *ifp = sc->sc_ifp;
1283 struct ieee80211com *ic = ifp->if_l2com;
1284 struct ath_hal *ah = sc->sc_ah;
1285 int i;
1286
1287 for (i = 0; i < sc->sc_keymax; i++)
1288 ath_hal_keyreset(ah, i);
1289 ieee80211_crypto_reload_keys(ic);
1290}
1291
1292void
1293ath_resume(struct ath_softc *sc)
1294{
1295 struct ifnet *ifp = sc->sc_ifp;
1296 struct ieee80211com *ic = ifp->if_l2com;
1297 struct ath_hal *ah = sc->sc_ah;
1298 HAL_STATUS status;
1299
1300 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1301 __func__, ifp->if_flags);
1302
1303 /*
1304 * Must reset the chip before we reload the
1305 * keycache as we were powered down on suspend.
1306 */
1307 ath_hal_reset(ah, sc->sc_opmode,
1308 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1309 AH_FALSE, &status);
1310 ath_reset_keycache(sc);
1311
1312 /* Let DFS at it in case it's a DFS channel */
1313 ath_dfs_radar_enable(sc, ic->ic_curchan);
1314
1315 if (sc->sc_resume_up) {
1316 if (ic->ic_opmode == IEEE80211_M_STA) {
1317 ath_init(sc);
1318 /*
1319 * Program the beacon registers using the last rx'd
1320 * beacon frame and enable sync on the next beacon
1321 * we see. This should handle the case where we
1322 * wakeup and find the same AP and also the case where
1323 * we wakeup and need to roam. For the latter we
1324 * should get bmiss events that trigger a roam.
1325 */
1326 ath_beacon_config(sc, NULL);
1327 sc->sc_syncbeacon = 1;
1328 } else
1329 ieee80211_resume_all(ic);
1330 }
1335 if (sc->sc_softled) {
1336 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1337 HAL_GPIO_MUX_MAC_NETWORK_LED);
1338 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
1339 }
1331 ath_led_config(sc);
1340
1341 /* XXX beacons ? */
1342}
1343
1344void
1345ath_shutdown(struct ath_softc *sc)
1346{
1347 struct ifnet *ifp = sc->sc_ifp;
1348
1349 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1350 __func__, ifp->if_flags);
1351
1352 ath_stop(ifp);
1353 /* NB: no point powering down chip as we're about to reboot */
1354}
1355
1356/*
1357 * Interrupt handler. Most of the actual processing is deferred.
1358 */
1359void
1360ath_intr(void *arg)
1361{
1362 struct ath_softc *sc = arg;
1363 struct ifnet *ifp = sc->sc_ifp;
1364 struct ath_hal *ah = sc->sc_ah;
1365 HAL_INT status = 0;
1366 uint32_t txqs;
1367
1368 /*
1369 * If we're inside a reset path, just print a warning and
1370 * clear the ISR. The reset routine will finish it for us.
1371 */
1372 ATH_PCU_LOCK(sc);
1373 if (sc->sc_inreset_cnt) {
1374 HAL_INT status;
1375 ath_hal_getisr(ah, &status); /* clear ISR */
1376 ath_hal_intrset(ah, 0); /* disable further intr's */
1377 DPRINTF(sc, ATH_DEBUG_ANY,
1378 "%s: in reset, ignoring: status=0x%x\n",
1379 __func__, status);
1380 ATH_PCU_UNLOCK(sc);
1381 return;
1382 }
1383
1384 if (sc->sc_invalid) {
1385 /*
1386 * The hardware is not ready/present, don't touch anything.
1387 * Note this can happen early on if the IRQ is shared.
1388 */
1389 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1390 ATH_PCU_UNLOCK(sc);
1391 return;
1392 }
1393 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
1394 ATH_PCU_UNLOCK(sc);
1395 return;
1396 }
1397
1398 if ((ifp->if_flags & IFF_UP) == 0 ||
1399 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1400 HAL_INT status;
1401
1402 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1403 __func__, ifp->if_flags);
1404 ath_hal_getisr(ah, &status); /* clear ISR */
1405 ath_hal_intrset(ah, 0); /* disable further intr's */
1406 ATH_PCU_UNLOCK(sc);
1407 return;
1408 }
1409
1410 /*
1411 * Figure out the reason(s) for the interrupt. Note
1412 * that the hal returns a pseudo-ISR that may include
1413 * bits we haven't explicitly enabled so we mask the
1414 * value to insure we only process bits we requested.
1415 */
1416 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1417 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1418 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status);
1419#ifdef ATH_KTR_INTR_DEBUG
1420 CTR5(ATH_KTR_INTR,
1421 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
1422 ah->ah_intrstate[0],
1423 ah->ah_intrstate[1],
1424 ah->ah_intrstate[2],
1425 ah->ah_intrstate[3],
1426 ah->ah_intrstate[6]);
1427#endif
1428 status &= sc->sc_imask; /* discard unasked for bits */
1429
1430 /* Short-circuit un-handled interrupts */
1431 if (status == 0x0) {
1432 ATH_PCU_UNLOCK(sc);
1433 return;
1434 }
1435
1436 /*
1437 * Take a note that we're inside the interrupt handler, so
1438 * the reset routines know to wait.
1439 */
1440 sc->sc_intr_cnt++;
1441 ATH_PCU_UNLOCK(sc);
1442
1443 /*
1444 * Handle the interrupt. We won't run concurrent with the reset
1445 * or channel change routines as they'll wait for sc_intr_cnt
1446 * to be 0 before continuing.
1447 */
1448 if (status & HAL_INT_FATAL) {
1449 sc->sc_stats.ast_hardware++;
1450 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1451 ath_fatal_proc(sc, 0);
1452 } else {
1453 if (status & HAL_INT_SWBA) {
1454 /*
1455 * Software beacon alert--time to send a beacon.
1456 * Handle beacon transmission directly; deferring
1457 * this is too slow to meet timing constraints
1458 * under load.
1459 */
1460#ifdef IEEE80211_SUPPORT_TDMA
1461 if (sc->sc_tdma) {
1462 if (sc->sc_tdmaswba == 0) {
1463 struct ieee80211com *ic = ifp->if_l2com;
1464 struct ieee80211vap *vap =
1465 TAILQ_FIRST(&ic->ic_vaps);
1466 ath_tdma_beacon_send(sc, vap);
1467 sc->sc_tdmaswba =
1468 vap->iv_tdma->tdma_bintval;
1469 } else
1470 sc->sc_tdmaswba--;
1471 } else
1472#endif
1473 {
1474 ath_beacon_proc(sc, 0);
1475#ifdef IEEE80211_SUPPORT_SUPERG
1476 /*
1477 * Schedule the rx taskq in case there's no
1478 * traffic so any frames held on the staging
1479 * queue are aged and potentially flushed.
1480 */
1481 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1482#endif
1483 }
1484 }
1485 if (status & HAL_INT_RXEOL) {
1486 int imask;
1487 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL");
1488 ATH_PCU_LOCK(sc);
1489 /*
1490 * NB: the hardware should re-read the link when
1491 * RXE bit is written, but it doesn't work at
1492 * least on older hardware revs.
1493 */
1494 sc->sc_stats.ast_rxeol++;
1495 /*
1496 * Disable RXEOL/RXORN - prevent an interrupt
1497 * storm until the PCU logic can be reset.
1498 * In case the interface is reset some other
1499 * way before "sc_kickpcu" is called, don't
1500 * modify sc_imask - that way if it is reset
1501 * by a call to ath_reset() somehow, the
1502 * interrupt mask will be correctly reprogrammed.
1503 */
1504 imask = sc->sc_imask;
1505 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
1506 ath_hal_intrset(ah, imask);
1507 /*
1508 * Only blank sc_rxlink if we've not yet kicked
1509 * the PCU.
1510 *
1511 * This isn't entirely correct - the correct solution
1512 * would be to have a PCU lock and engage that for
1513 * the duration of the PCU fiddling; which would include
1514 * running the RX process. Otherwise we could end up
1515 * messing up the RX descriptor chain and making the
1516 * RX desc list much shorter.
1517 */
1518 if (! sc->sc_kickpcu)
1519 sc->sc_rxlink = NULL;
1520 sc->sc_kickpcu = 1;
1521 /*
1522 * Enqueue an RX proc, to handled whatever
1523 * is in the RX queue.
1524 * This will then kick the PCU.
1525 */
1526 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1527 ATH_PCU_UNLOCK(sc);
1528 }
1529 if (status & HAL_INT_TXURN) {
1530 sc->sc_stats.ast_txurn++;
1531 /* bump tx trigger level */
1532 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1533 }
1534 if (status & HAL_INT_RX) {
1535 sc->sc_stats.ast_rx_intr++;
1536 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1537 }
1538 if (status & HAL_INT_TX) {
1539 sc->sc_stats.ast_tx_intr++;
1540 /*
1541 * Grab all the currently set bits in the HAL txq bitmap
1542 * and blank them. This is the only place we should be
1543 * doing this.
1544 */
1545 ATH_PCU_LOCK(sc);
1546 txqs = 0xffffffff;
1547 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
1548 sc->sc_txq_active |= txqs;
1549 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1550 ATH_PCU_UNLOCK(sc);
1551 }
1552 if (status & HAL_INT_BMISS) {
1553 sc->sc_stats.ast_bmiss++;
1554 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1555 }
1556 if (status & HAL_INT_GTT)
1557 sc->sc_stats.ast_tx_timeout++;
1558 if (status & HAL_INT_CST)
1559 sc->sc_stats.ast_tx_cst++;
1560 if (status & HAL_INT_MIB) {
1561 sc->sc_stats.ast_mib++;
1562 ATH_PCU_LOCK(sc);
1563 /*
1564 * Disable interrupts until we service the MIB
1565 * interrupt; otherwise it will continue to fire.
1566 */
1567 ath_hal_intrset(ah, 0);
1568 /*
1569 * Let the hal handle the event. We assume it will
1570 * clear whatever condition caused the interrupt.
1571 */
1572 ath_hal_mibevent(ah, &sc->sc_halstats);
1573 /*
1574 * Don't reset the interrupt if we've just
1575 * kicked the PCU, or we may get a nested
1576 * RXEOL before the rxproc has had a chance
1577 * to run.
1578 */
1579 if (sc->sc_kickpcu == 0)
1580 ath_hal_intrset(ah, sc->sc_imask);
1581 ATH_PCU_UNLOCK(sc);
1582 }
1583 if (status & HAL_INT_RXORN) {
1584 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1585 CTR0(ATH_KTR_ERR, "ath_intr: RXORN");
1586 sc->sc_stats.ast_rxorn++;
1587 }
1588 }
1589 ATH_PCU_LOCK(sc);
1590 sc->sc_intr_cnt--;
1591 ATH_PCU_UNLOCK(sc);
1592}
1593
1594static void
1595ath_fatal_proc(void *arg, int pending)
1596{
1597 struct ath_softc *sc = arg;
1598 struct ifnet *ifp = sc->sc_ifp;
1599 u_int32_t *state;
1600 u_int32_t len;
1601 void *sp;
1602
1603 if_printf(ifp, "hardware error; resetting\n");
1604 /*
1605 * Fatal errors are unrecoverable. Typically these
1606 * are caused by DMA errors. Collect h/w state from
1607 * the hal so we can diagnose what's going on.
1608 */
1609 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1610 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1611 state = sp;
1612 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1613 state[0], state[1] , state[2], state[3],
1614 state[4], state[5]);
1615 }
1616 ath_reset(ifp, ATH_RESET_NOLOSS);
1617}
1618
1619static void
1620ath_bmiss_vap(struct ieee80211vap *vap)
1621{
1622 /*
1623 * Workaround phantom bmiss interrupts by sanity-checking
1624 * the time of our last rx'd frame. If it is within the
1625 * beacon miss interval then ignore the interrupt. If it's
1626 * truly a bmiss we'll get another interrupt soon and that'll
1627 * be dispatched up for processing. Note this applies only
1628 * for h/w beacon miss events.
1629 */
1630 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1631 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1632 struct ath_softc *sc = ifp->if_softc;
1633 u_int64_t lastrx = sc->sc_lastrx;
1634 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1635 u_int bmisstimeout =
1636 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1637
1638 DPRINTF(sc, ATH_DEBUG_BEACON,
1639 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1640 __func__, (unsigned long long) tsf,
1641 (unsigned long long)(tsf - lastrx),
1642 (unsigned long long) lastrx, bmisstimeout);
1643
1644 if (tsf - lastrx <= bmisstimeout) {
1645 sc->sc_stats.ast_bmiss_phantom++;
1646 return;
1647 }
1648 }
1649 ATH_VAP(vap)->av_bmiss(vap);
1650}
1651
1652static int
1653ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1654{
1655 uint32_t rsize;
1656 void *sp;
1657
1658 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
1659 return 0;
1660 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1661 *hangs = *(uint32_t *)sp;
1662 return 1;
1663}
1664
1665static void
1666ath_bmiss_proc(void *arg, int pending)
1667{
1668 struct ath_softc *sc = arg;
1669 struct ifnet *ifp = sc->sc_ifp;
1670 uint32_t hangs;
1671
1672 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1673
1674 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1675 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
1676 ath_reset(ifp, ATH_RESET_NOLOSS);
1677 } else
1678 ieee80211_beacon_miss(ifp->if_l2com);
1679}
1680
1681/*
1682 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1683 * calcs together with WME. If necessary disable the crypto
1684 * hardware and mark the 802.11 state so keys will be setup
1685 * with the MIC work done in software.
1686 */
1687static void
1688ath_settkipmic(struct ath_softc *sc)
1689{
1690 struct ifnet *ifp = sc->sc_ifp;
1691 struct ieee80211com *ic = ifp->if_l2com;
1692
1693 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1694 if (ic->ic_flags & IEEE80211_F_WME) {
1695 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1696 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1697 } else {
1698 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1699 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1700 }
1701 }
1702}
1703
1704static void
1705ath_init(void *arg)
1706{
1707 struct ath_softc *sc = (struct ath_softc *) arg;
1708 struct ifnet *ifp = sc->sc_ifp;
1709 struct ieee80211com *ic = ifp->if_l2com;
1710 struct ath_hal *ah = sc->sc_ah;
1711 HAL_STATUS status;
1712
1713 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1714 __func__, ifp->if_flags);
1715
1716 ATH_LOCK(sc);
1717 /*
1718 * Stop anything previously setup. This is safe
1719 * whether this is the first time through or not.
1720 */
1721 ath_stop_locked(ifp);
1722
1723 /*
1724 * The basic interface to setting the hardware in a good
1725 * state is ``reset''. On return the hardware is known to
1726 * be powered up and with interrupts disabled. This must
1727 * be followed by initialization of the appropriate bits
1728 * and then setup of the interrupt mask.
1729 */
1730 ath_settkipmic(sc);
1731 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1732 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1733 status);
1734 ATH_UNLOCK(sc);
1735 return;
1736 }
1737 ath_chan_change(sc, ic->ic_curchan);
1738
1739 /* Let DFS at it in case it's a DFS channel */
1740 ath_dfs_radar_enable(sc, ic->ic_curchan);
1741
1742 /*
1743 * Likewise this is set during reset so update
1744 * state cached in the driver.
1745 */
1746 sc->sc_diversity = ath_hal_getdiversity(ah);
1747 sc->sc_lastlongcal = 0;
1748 sc->sc_resetcal = 1;
1749 sc->sc_lastcalreset = 0;
1750 sc->sc_lastani = 0;
1751 sc->sc_lastshortcal = 0;
1752 sc->sc_doresetcal = AH_FALSE;
1753 /*
1754 * Beacon timers were cleared here; give ath_newstate()
1755 * a hint that the beacon timers should be poked when
1756 * things transition to the RUN state.
1757 */
1758 sc->sc_beacons = 0;
1759
1760 /*
1761 * Initial aggregation settings.
1762 */
1763 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH;
1764 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
1765 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
1766
1767 /*
1768 * Setup the hardware after reset: the key cache
1769 * is filled as needed and the receive engine is
1770 * set going. Frame transmit is handled entirely
1771 * in the frame output path; there's nothing to do
1772 * here except setup the interrupt mask.
1773 */
1774 if (ath_startrecv(sc) != 0) {
1775 if_printf(ifp, "unable to start recv logic\n");
1776 ATH_UNLOCK(sc);
1777 return;
1778 }
1779
1780 /*
1781 * Enable interrupts.
1782 */
1783 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1784 | HAL_INT_RXEOL | HAL_INT_RXORN
1785 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1786 /*
1787 * Enable MIB interrupts when there are hardware phy counters.
1788 * Note we only do this (at the moment) for station mode.
1789 */
1790 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1791 sc->sc_imask |= HAL_INT_MIB;
1792
1793 /* Enable global TX timeout and carrier sense timeout if available */
1794 if (ath_hal_gtxto_supported(ah))
1795 sc->sc_imask |= HAL_INT_GTT;
1796
1797 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
1798 __func__, sc->sc_imask);
1799
1800 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1801 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1802 ath_hal_intrset(ah, sc->sc_imask);
1803
1804 ATH_UNLOCK(sc);
1805
1806#ifdef ATH_TX99_DIAG
1807 if (sc->sc_tx99 != NULL)
1808 sc->sc_tx99->start(sc->sc_tx99);
1809 else
1810#endif
1811 ieee80211_start_all(ic); /* start all vap's */
1812}
1813
1814static void
1815ath_stop_locked(struct ifnet *ifp)
1816{
1817 struct ath_softc *sc = ifp->if_softc;
1818 struct ath_hal *ah = sc->sc_ah;
1819
1820 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1821 __func__, sc->sc_invalid, ifp->if_flags);
1822
1823 ATH_LOCK_ASSERT(sc);
1824 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1825 /*
1826 * Shutdown the hardware and driver:
1827 * reset 802.11 state machine
1828 * turn off timers
1829 * disable interrupts
1830 * turn off the radio
1831 * clear transmit machinery
1832 * clear receive machinery
1833 * drain and release tx queues
1834 * reclaim beacon resources
1835 * power down hardware
1836 *
1837 * Note that some of this work is not possible if the
1838 * hardware is gone (invalid).
1839 */
1840#ifdef ATH_TX99_DIAG
1841 if (sc->sc_tx99 != NULL)
1842 sc->sc_tx99->stop(sc->sc_tx99);
1843#endif
1844 callout_stop(&sc->sc_wd_ch);
1845 sc->sc_wd_timer = 0;
1846 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1847 if (!sc->sc_invalid) {
1848 if (sc->sc_softled) {
1849 callout_stop(&sc->sc_ledtimer);
1850 ath_hal_gpioset(ah, sc->sc_ledpin,
1851 !sc->sc_ledon);
1852 sc->sc_blinking = 0;
1853 }
1854 ath_hal_intrset(ah, 0);
1855 }
1856 ath_draintxq(sc, ATH_RESET_DEFAULT);
1857 if (!sc->sc_invalid) {
1858 ath_stoprecv(sc, 1);
1859 ath_hal_phydisable(ah);
1860 } else
1861 sc->sc_rxlink = NULL;
1862 ath_beacon_free(sc); /* XXX not needed */
1863 }
1864}
1865
1866#define MAX_TXRX_ITERATIONS 1000
1867static void
1868ath_txrx_stop(struct ath_softc *sc)
1869{
1870 int i = MAX_TXRX_ITERATIONS;
1871
1872 ATH_UNLOCK_ASSERT(sc);
1873 /* Stop any new TX/RX from occuring */
1874 taskqueue_block(sc->sc_tq);
1875
1876 ATH_PCU_LOCK(sc);
1877 /*
1878 * Sleep until all the pending operations have completed.
1879 *
1880 * The caller must ensure that reset has been incremented
1881 * or the pending operations may continue being queued.
1882 */
1883 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
1884 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
1885 if (i <= 0)
1886 break;
1887 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1);
1888 i--;
1889 }
1890 ATH_PCU_UNLOCK(sc);
1891
1892 if (i <= 0)
1893 device_printf(sc->sc_dev,
1894 "%s: didn't finish after %d iterations\n",
1895 __func__, MAX_TXRX_ITERATIONS);
1896}
1897#undef MAX_TXRX_ITERATIONS
1898
1899static void
1900ath_txrx_start(struct ath_softc *sc)
1901{
1902
1903 taskqueue_unblock(sc->sc_tq);
1904}
1905
1906/*
1907 * Grab the reset lock, and wait around until noone else
1908 * is trying to do anything with it.
1909 *
1910 * This is totally horrible but we can't hold this lock for
1911 * long enough to do TX/RX or we end up with net80211/ip stack
1912 * LORs and eventual deadlock.
1913 *
1914 * "dowait" signals whether to spin, waiting for the reset
1915 * lock count to reach 0. This should (for now) only be used
1916 * during the reset path, as the rest of the code may not
1917 * be locking-reentrant enough to behave correctly.
1918 *
1919 * Another, cleaner way should be found to serialise all of
1920 * these operations.
1921 */
1922#define MAX_RESET_ITERATIONS 10
1923static int
1924ath_reset_grablock(struct ath_softc *sc, int dowait)
1925{
1926 int w = 0;
1927 int i = MAX_RESET_ITERATIONS;
1928
1929 ATH_PCU_LOCK_ASSERT(sc);
1930 do {
1931 if (sc->sc_inreset_cnt == 0) {
1932 w = 1;
1933 break;
1934 }
1935 if (dowait == 0) {
1936 w = 0;
1937 break;
1938 }
1939 ATH_PCU_UNLOCK(sc);
1940 pause("ath_reset_grablock", 1);
1941 i--;
1942 ATH_PCU_LOCK(sc);
1943 } while (i > 0);
1944
1945 /*
1946 * We always increment the refcounter, regardless
1947 * of whether we succeeded to get it in an exclusive
1948 * way.
1949 */
1950 sc->sc_inreset_cnt++;
1951
1952 if (i <= 0)
1953 device_printf(sc->sc_dev,
1954 "%s: didn't finish after %d iterations\n",
1955 __func__, MAX_RESET_ITERATIONS);
1956
1957 if (w == 0)
1958 device_printf(sc->sc_dev,
1959 "%s: warning, recursive reset path!\n",
1960 __func__);
1961
1962 return w;
1963}
1964#undef MAX_RESET_ITERATIONS
1965
1966/*
1967 * XXX TODO: write ath_reset_releaselock
1968 */
1969
1970static void
1971ath_stop(struct ifnet *ifp)
1972{
1973 struct ath_softc *sc = ifp->if_softc;
1974
1975 ATH_LOCK(sc);
1976 ath_stop_locked(ifp);
1977 ATH_UNLOCK(sc);
1978}
1979
1980/*
1981 * Reset the hardware w/o losing operational state. This is
1982 * basically a more efficient way of doing ath_stop, ath_init,
1983 * followed by state transitions to the current 802.11
1984 * operational state. Used to recover from various errors and
1985 * to reset or reload hardware state.
1986 */
1987int
1988ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
1989{
1990 struct ath_softc *sc = ifp->if_softc;
1991 struct ieee80211com *ic = ifp->if_l2com;
1992 struct ath_hal *ah = sc->sc_ah;
1993 HAL_STATUS status;
1994 int i;
1995
1996 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1997
1998 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
1999 ATH_PCU_UNLOCK_ASSERT(sc);
2000 ATH_UNLOCK_ASSERT(sc);
2001
2002 ATH_PCU_LOCK(sc);
2003 if (ath_reset_grablock(sc, 1) == 0) {
2004 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2005 __func__);
2006 }
2007 ath_hal_intrset(ah, 0); /* disable interrupts */
2008 ATH_PCU_UNLOCK(sc);
2009
2010 /*
2011 * Should now wait for pending TX/RX to complete
2012 * and block future ones from occuring. This needs to be
2013 * done before the TX queue is drained.
2014 */
2015 ath_txrx_stop(sc);
2016 ath_draintxq(sc, reset_type); /* stop xmit side */
2017
2018 /*
2019 * Regardless of whether we're doing a no-loss flush or
2020 * not, stop the PCU and handle what's in the RX queue.
2021 * That way frames aren't dropped which shouldn't be.
2022 */
2023 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2024 ath_rx_proc(sc, 0);
2025
2026 ath_settkipmic(sc); /* configure TKIP MIC handling */
2027 /* NB: indicate channel change so we do a full reset */
2028 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
2029 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2030 __func__, status);
2031 sc->sc_diversity = ath_hal_getdiversity(ah);
2032
2033 /* Let DFS at it in case it's a DFS channel */
2034 ath_dfs_radar_enable(sc, ic->ic_curchan);
2035
2036 if (ath_startrecv(sc) != 0) /* restart recv */
2037 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
2038 /*
2039 * We may be doing a reset in response to an ioctl
2040 * that changes the channel so update any state that
2041 * might change as a result.
2042 */
2043 ath_chan_change(sc, ic->ic_curchan);
2044 if (sc->sc_beacons) { /* restart beacons */
2045#ifdef IEEE80211_SUPPORT_TDMA
2046 if (sc->sc_tdma)
2047 ath_tdma_config(sc, NULL);
2048 else
2049#endif
2050 ath_beacon_config(sc, NULL);
2051 }
2052
2053 /*
2054 * Release the reset lock and re-enable interrupts here.
2055 * If an interrupt was being processed in ath_intr(),
2056 * it would disable interrupts at this point. So we have
2057 * to atomically enable interrupts and decrement the
2058 * reset counter - this way ath_intr() doesn't end up
2059 * disabling interrupts without a corresponding enable
2060 * in the rest or channel change path.
2061 */
2062 ATH_PCU_LOCK(sc);
2063 sc->sc_inreset_cnt--;
2064 /* XXX only do this if sc_inreset_cnt == 0? */
2065 ath_hal_intrset(ah, sc->sc_imask);
2066 ATH_PCU_UNLOCK(sc);
2067
2068 /*
2069 * TX and RX can be started here. If it were started with
2070 * sc_inreset_cnt > 0, the TX and RX path would abort.
2071 * Thus if this is a nested call through the reset or
2072 * channel change code, TX completion will occur but
2073 * RX completion and ath_start / ath_tx_start will not
2074 * run.
2075 */
2076
2077 /* Restart TX/RX as needed */
2078 ath_txrx_start(sc);
2079
2080 /* XXX Restart TX completion and pending TX */
2081 if (reset_type == ATH_RESET_NOLOSS) {
2082 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2083 if (ATH_TXQ_SETUP(sc, i)) {
2084 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2085 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2086 ath_txq_sched(sc, &sc->sc_txq[i]);
2087 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2088 }
2089 }
2090 }
2091
2092 /*
2093 * This may have been set during an ath_start() call which
2094 * set this once it detected a concurrent TX was going on.
2095 * So, clear it.
2096 */
2097 /* XXX do this inside of IF_LOCK? */
2098 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2099
2100 /* Handle any frames in the TX queue */
2101 /*
2102 * XXX should this be done by the caller, rather than
2103 * ath_reset() ?
2104 */
2105 ath_start(ifp); /* restart xmit */
2106 return 0;
2107}
2108
2109static int
2110ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
2111{
2112 struct ieee80211com *ic = vap->iv_ic;
2113 struct ifnet *ifp = ic->ic_ifp;
2114 struct ath_softc *sc = ifp->if_softc;
2115 struct ath_hal *ah = sc->sc_ah;
2116
2117 switch (cmd) {
2118 case IEEE80211_IOC_TXPOWER:
2119 /*
2120 * If per-packet TPC is enabled, then we have nothing
2121 * to do; otherwise we need to force the global limit.
2122 * All this can happen directly; no need to reset.
2123 */
2124 if (!ath_hal_gettpc(ah))
2125 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
2126 return 0;
2127 }
2128 /* XXX? Full or NOLOSS? */
2129 return ath_reset(ifp, ATH_RESET_FULL);
2130}
2131
2132struct ath_buf *
2133_ath_getbuf_locked(struct ath_softc *sc)
2134{
2135 struct ath_buf *bf;
2136
2137 ATH_TXBUF_LOCK_ASSERT(sc);
2138
2139 bf = TAILQ_FIRST(&sc->sc_txbuf);
2140 if (bf == NULL) {
2141 sc->sc_stats.ast_tx_getnobuf++;
2142 } else {
2143 if (bf->bf_flags & ATH_BUF_BUSY) {
2144 sc->sc_stats.ast_tx_getbusybuf++;
2145 bf = NULL;
2146 }
2147 }
2148
2149 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
2150 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
2151 else
2152 bf = NULL;
2153
2154 if (bf == NULL) {
2155 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
2156 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
2157 "out of xmit buffers" : "xmit buffer busy");
2158 return NULL;
2159 }
2160
2161 /* Valid bf here; clear some basic fields */
2162 bf->bf_next = NULL; /* XXX just to be sure */
2163 bf->bf_last = NULL; /* XXX again, just to be sure */
2164 bf->bf_comp = NULL; /* XXX again, just to be sure */
2165 bzero(&bf->bf_state, sizeof(bf->bf_state));
2166
2167 return bf;
2168}
2169
2170/*
2171 * When retrying a software frame, buffers marked ATH_BUF_BUSY
2172 * can't be thrown back on the queue as they could still be
2173 * in use by the hardware.
2174 *
2175 * This duplicates the buffer, or returns NULL.
2176 *
2177 * The descriptor is also copied but the link pointers and
2178 * the DMA segments aren't copied; this frame should thus
2179 * be again passed through the descriptor setup/chain routines
2180 * so the link is correct.
2181 *
2182 * The caller must free the buffer using ath_freebuf().
2183 *
2184 * XXX TODO: this call shouldn't fail as it'll cause packet loss
2185 * XXX in the TX pathway when retries are needed.
2186 * XXX Figure out how to keep some buffers free, or factor the
2187 * XXX number of busy buffers into the xmit path (ath_start())
2188 * XXX so we don't over-commit.
2189 */
2190struct ath_buf *
2191ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
2192{
2193 struct ath_buf *tbf;
2194
2195 tbf = ath_getbuf(sc);
2196 if (tbf == NULL)
2197 return NULL; /* XXX failure? Why? */
2198
2199 /* Copy basics */
2200 tbf->bf_next = NULL;
2201 tbf->bf_nseg = bf->bf_nseg;
2202 tbf->bf_txflags = bf->bf_txflags;
2203 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY;
2204 tbf->bf_status = bf->bf_status;
2205 tbf->bf_m = bf->bf_m;
2206 tbf->bf_node = bf->bf_node;
2207 /* will be setup by the chain/setup function */
2208 tbf->bf_lastds = NULL;
2209 /* for now, last == self */
2210 tbf->bf_last = tbf;
2211 tbf->bf_comp = bf->bf_comp;
2212
2213 /* NOTE: DMA segments will be setup by the setup/chain functions */
2214
2215 /* The caller has to re-init the descriptor + links */
2216
2217 /* Copy state */
2218 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
2219
2220 return tbf;
2221}
2222
2223struct ath_buf *
2224ath_getbuf(struct ath_softc *sc)
2225{
2226 struct ath_buf *bf;
2227
2228 ATH_TXBUF_LOCK(sc);
2229 bf = _ath_getbuf_locked(sc);
2230 if (bf == NULL) {
2231 struct ifnet *ifp = sc->sc_ifp;
2232
2233 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2234 sc->sc_stats.ast_tx_qstop++;
2235 /* XXX do this inside of IF_LOCK? */
2236 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2237 }
2238 ATH_TXBUF_UNLOCK(sc);
2239 return bf;
2240}
2241
2242static void
2243ath_start(struct ifnet *ifp)
2244{
2245 struct ath_softc *sc = ifp->if_softc;
2246 struct ieee80211_node *ni;
2247 struct ath_buf *bf;
2248 struct mbuf *m, *next;
2249 ath_bufhead frags;
2250
2251 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2252 return;
2253
2254 /* XXX is it ok to hold the ATH_LOCK here? */
2255 ATH_PCU_LOCK(sc);
2256 if (sc->sc_inreset_cnt > 0) {
2257 device_printf(sc->sc_dev,
2258 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2259 /* XXX do this inside of IF_LOCK? */
2260 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2261 ATH_PCU_UNLOCK(sc);
2262 return;
2263 }
2264 sc->sc_txstart_cnt++;
2265 ATH_PCU_UNLOCK(sc);
2266
2267 for (;;) {
2268 /*
2269 * Grab a TX buffer and associated resources.
2270 */
2271 bf = ath_getbuf(sc);
2272 if (bf == NULL)
2273 break;
2274
2275 IFQ_DEQUEUE(&ifp->if_snd, m);
2276 if (m == NULL) {
2277 ATH_TXBUF_LOCK(sc);
2278 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2279 ATH_TXBUF_UNLOCK(sc);
2280 break;
2281 }
2282 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2283 /*
2284 * Check for fragmentation. If this frame
2285 * has been broken up verify we have enough
2286 * buffers to send all the fragments so all
2287 * go out or none...
2288 */
2289 TAILQ_INIT(&frags);
2290 if ((m->m_flags & M_FRAG) &&
2291 !ath_txfrag_setup(sc, &frags, m, ni)) {
2292 DPRINTF(sc, ATH_DEBUG_XMIT,
2293 "%s: out of txfrag buffers\n", __func__);
2294 sc->sc_stats.ast_tx_nofrag++;
2295 ifp->if_oerrors++;
2296 ath_freetx(m);
2297 goto bad;
2298 }
2299 ifp->if_opackets++;
2300 nextfrag:
2301 /*
2302 * Pass the frame to the h/w for transmission.
2303 * Fragmented frames have each frag chained together
2304 * with m_nextpkt. We know there are sufficient ath_buf's
2305 * to send all the frags because of work done by
2306 * ath_txfrag_setup. We leave m_nextpkt set while
2307 * calling ath_tx_start so it can use it to extend the
2308 * the tx duration to cover the subsequent frag and
2309 * so it can reclaim all the mbufs in case of an error;
2310 * ath_tx_start clears m_nextpkt once it commits to
2311 * handing the frame to the hardware.
2312 */
2313 next = m->m_nextpkt;
2314 if (ath_tx_start(sc, ni, bf, m)) {
2315 bad:
2316 ifp->if_oerrors++;
2317 reclaim:
2318 bf->bf_m = NULL;
2319 bf->bf_node = NULL;
2320 ATH_TXBUF_LOCK(sc);
2321 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2322 ath_txfrag_cleanup(sc, &frags, ni);
2323 ATH_TXBUF_UNLOCK(sc);
2324 if (ni != NULL)
2325 ieee80211_free_node(ni);
2326 continue;
2327 }
2328 if (next != NULL) {
2329 /*
2330 * Beware of state changing between frags.
2331 * XXX check sta power-save state?
2332 */
2333 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2334 DPRINTF(sc, ATH_DEBUG_XMIT,
2335 "%s: flush fragmented packet, state %s\n",
2336 __func__,
2337 ieee80211_state_name[ni->ni_vap->iv_state]);
2338 ath_freetx(next);
2339 goto reclaim;
2340 }
2341 m = next;
2342 bf = TAILQ_FIRST(&frags);
2343 KASSERT(bf != NULL, ("no buf for txfrag"));
2344 TAILQ_REMOVE(&frags, bf, bf_list);
2345 goto nextfrag;
2346 }
2347
2348 sc->sc_wd_timer = 5;
2349 }
2350
2351 ATH_PCU_LOCK(sc);
2352 sc->sc_txstart_cnt--;
2353 ATH_PCU_UNLOCK(sc);
2354}
2355
2356static int
2357ath_media_change(struct ifnet *ifp)
2358{
2359 int error = ieee80211_media_change(ifp);
2360 /* NB: only the fixed rate can change and that doesn't need a reset */
2361 return (error == ENETRESET ? 0 : error);
2362}
2363
2364/*
2365 * Block/unblock tx+rx processing while a key change is done.
2366 * We assume the caller serializes key management operations
2367 * so we only need to worry about synchronization with other
2368 * uses that originate in the driver.
2369 */
2370static void
2371ath_key_update_begin(struct ieee80211vap *vap)
2372{
2373 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2374 struct ath_softc *sc = ifp->if_softc;
2375
2376 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2377 taskqueue_block(sc->sc_tq);
2378 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2379}
2380
2381static void
2382ath_key_update_end(struct ieee80211vap *vap)
2383{
2384 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2385 struct ath_softc *sc = ifp->if_softc;
2386
2387 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2388 IF_UNLOCK(&ifp->if_snd);
2389 taskqueue_unblock(sc->sc_tq);
2390}
2391
2392/*
2393 * Calculate the receive filter according to the
2394 * operating mode and state:
2395 *
2396 * o always accept unicast, broadcast, and multicast traffic
2397 * o accept PHY error frames when hardware doesn't have MIB support
2398 * to count and we need them for ANI (sta mode only until recently)
2399 * and we are not scanning (ANI is disabled)
2400 * NB: older hal's add rx filter bits out of sight and we need to
2401 * blindly preserve them
2402 * o probe request frames are accepted only when operating in
2403 * hostap, adhoc, mesh, or monitor modes
2404 * o enable promiscuous mode
2405 * - when in monitor mode
2406 * - if interface marked PROMISC (assumes bridge setting is filtered)
2407 * o accept beacons:
2408 * - when operating in station mode for collecting rssi data when
2409 * the station is otherwise quiet, or
2410 * - when operating in adhoc mode so the 802.11 layer creates
2411 * node table entries for peers,
2412 * - when scanning
2413 * - when doing s/w beacon miss (e.g. for ap+sta)
2414 * - when operating in ap mode in 11g to detect overlapping bss that
2415 * require protection
2416 * - when operating in mesh mode to detect neighbors
2417 * o accept control frames:
2418 * - when in monitor mode
2419 * XXX HT protection for 11n
2420 */
2421static u_int32_t
2422ath_calcrxfilter(struct ath_softc *sc)
2423{
2424 struct ifnet *ifp = sc->sc_ifp;
2425 struct ieee80211com *ic = ifp->if_l2com;
2426 u_int32_t rfilt;
2427
2428 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2429 if (!sc->sc_needmib && !sc->sc_scanning)
2430 rfilt |= HAL_RX_FILTER_PHYERR;
2431 if (ic->ic_opmode != IEEE80211_M_STA)
2432 rfilt |= HAL_RX_FILTER_PROBEREQ;
2433 /* XXX ic->ic_monvaps != 0? */
2434 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
2435 rfilt |= HAL_RX_FILTER_PROM;
2436 if (ic->ic_opmode == IEEE80211_M_STA ||
2437 ic->ic_opmode == IEEE80211_M_IBSS ||
2438 sc->sc_swbmiss || sc->sc_scanning)
2439 rfilt |= HAL_RX_FILTER_BEACON;
2440 /*
2441 * NB: We don't recalculate the rx filter when
2442 * ic_protmode changes; otherwise we could do
2443 * this only when ic_protmode != NONE.
2444 */
2445 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2446 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2447 rfilt |= HAL_RX_FILTER_BEACON;
2448
2449 /*
2450 * Enable hardware PS-POLL RX only for hostap mode;
2451 * STA mode sends PS-POLL frames but never
2452 * receives them.
2453 */
2454 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
2455 0, NULL) == HAL_OK &&
2456 ic->ic_opmode == IEEE80211_M_HOSTAP)
2457 rfilt |= HAL_RX_FILTER_PSPOLL;
2458
2459 if (sc->sc_nmeshvaps) {
2460 rfilt |= HAL_RX_FILTER_BEACON;
2461 if (sc->sc_hasbmatch)
2462 rfilt |= HAL_RX_FILTER_BSSID;
2463 else
2464 rfilt |= HAL_RX_FILTER_PROM;
2465 }
2466 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2467 rfilt |= HAL_RX_FILTER_CONTROL;
2468
2469 /*
2470 * Enable RX of compressed BAR frames only when doing
2471 * 802.11n. Required for A-MPDU.
2472 */
2473 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
2474 rfilt |= HAL_RX_FILTER_COMPBAR;
2475
2476 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2477 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2478 return rfilt;
2479}
2480
2481static void
2482ath_update_promisc(struct ifnet *ifp)
2483{
2484 struct ath_softc *sc = ifp->if_softc;
2485 u_int32_t rfilt;
2486
2487 /* configure rx filter */
2488 rfilt = ath_calcrxfilter(sc);
2489 ath_hal_setrxfilter(sc->sc_ah, rfilt);
2490
2491 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2492}
2493
2494static void
2495ath_update_mcast(struct ifnet *ifp)
2496{
2497 struct ath_softc *sc = ifp->if_softc;
2498 u_int32_t mfilt[2];
2499
2500 /* calculate and install multicast filter */
2501 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2502 struct ifmultiaddr *ifma;
2503 /*
2504 * Merge multicast addresses to form the hardware filter.
2505 */
2506 mfilt[0] = mfilt[1] = 0;
2507 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
2508 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2509 caddr_t dl;
2510 u_int32_t val;
2511 u_int8_t pos;
2512
2513 /* calculate XOR of eight 6bit values */
2514 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2515 val = LE_READ_4(dl + 0);
2516 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2517 val = LE_READ_4(dl + 3);
2518 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2519 pos &= 0x3f;
2520 mfilt[pos / 32] |= (1 << (pos % 32));
2521 }
2522 if_maddr_runlock(ifp);
2523 } else
2524 mfilt[0] = mfilt[1] = ~0;
2525 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2526 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2527 __func__, mfilt[0], mfilt[1]);
2528}
2529
2530static void
2531ath_mode_init(struct ath_softc *sc)
2532{
2533 struct ifnet *ifp = sc->sc_ifp;
2534 struct ath_hal *ah = sc->sc_ah;
2535 u_int32_t rfilt;
2536
2537 /* configure rx filter */
2538 rfilt = ath_calcrxfilter(sc);
2539 ath_hal_setrxfilter(ah, rfilt);
2540
2541 /* configure operational mode */
2542 ath_hal_setopmode(ah);
2543
2544 /* handle any link-level address change */
2545 ath_hal_setmac(ah, IF_LLADDR(ifp));
2546
2547 /* calculate and install multicast filter */
2548 ath_update_mcast(ifp);
2549}
2550
2551/*
2552 * Set the slot time based on the current setting.
2553 */
2554static void
2555ath_setslottime(struct ath_softc *sc)
2556{
2557 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2558 struct ath_hal *ah = sc->sc_ah;
2559 u_int usec;
2560
2561 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2562 usec = 13;
2563 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2564 usec = 21;
2565 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2566 /* honor short/long slot time only in 11g */
2567 /* XXX shouldn't honor on pure g or turbo g channel */
2568 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2569 usec = HAL_SLOT_TIME_9;
2570 else
2571 usec = HAL_SLOT_TIME_20;
2572 } else
2573 usec = HAL_SLOT_TIME_9;
2574
2575 DPRINTF(sc, ATH_DEBUG_RESET,
2576 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2577 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2578 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2579
2580 ath_hal_setslottime(ah, usec);
2581 sc->sc_updateslot = OK;
2582}
2583
2584/*
2585 * Callback from the 802.11 layer to update the
2586 * slot time based on the current setting.
2587 */
2588static void
2589ath_updateslot(struct ifnet *ifp)
2590{
2591 struct ath_softc *sc = ifp->if_softc;
2592 struct ieee80211com *ic = ifp->if_l2com;
2593
2594 /*
2595 * When not coordinating the BSS, change the hardware
2596 * immediately. For other operation we defer the change
2597 * until beacon updates have propagated to the stations.
2598 */
2599 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2600 ic->ic_opmode == IEEE80211_M_MBSS)
2601 sc->sc_updateslot = UPDATE;
2602 else
2603 ath_setslottime(sc);
2604}
2605
2606/*
2607 * Setup a h/w transmit queue for beacons.
2608 */
2609static int
2610ath_beaconq_setup(struct ath_hal *ah)
2611{
2612 HAL_TXQ_INFO qi;
2613
2614 memset(&qi, 0, sizeof(qi));
2615 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2616 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2617 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2618 /* NB: for dynamic turbo, don't enable any other interrupts */
2619 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2620 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2621}
2622
2623/*
2624 * Setup the transmit queue parameters for the beacon queue.
2625 */
2626static int
2627ath_beaconq_config(struct ath_softc *sc)
2628{
2629#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
2630 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2631 struct ath_hal *ah = sc->sc_ah;
2632 HAL_TXQ_INFO qi;
2633
2634 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2635 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2636 ic->ic_opmode == IEEE80211_M_MBSS) {
2637 /*
2638 * Always burst out beacon and CAB traffic.
2639 */
2640 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2641 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2642 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2643 } else {
2644 struct wmeParams *wmep =
2645 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2646 /*
2647 * Adhoc mode; important thing is to use 2x cwmin.
2648 */
2649 qi.tqi_aifs = wmep->wmep_aifsn;
2650 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2651 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2652 }
2653
2654 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2655 device_printf(sc->sc_dev, "unable to update parameters for "
2656 "beacon hardware queue!\n");
2657 return 0;
2658 } else {
2659 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2660 return 1;
2661 }
2662#undef ATH_EXPONENT_TO_VALUE
2663}
2664
2665/*
2666 * Allocate and setup an initial beacon frame.
2667 */
2668static int
2669ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2670{
2671 struct ieee80211vap *vap = ni->ni_vap;
2672 struct ath_vap *avp = ATH_VAP(vap);
2673 struct ath_buf *bf;
2674 struct mbuf *m;
2675 int error;
2676
2677 bf = avp->av_bcbuf;
2678 if (bf->bf_m != NULL) {
2679 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2680 m_freem(bf->bf_m);
2681 bf->bf_m = NULL;
2682 }
2683 if (bf->bf_node != NULL) {
2684 ieee80211_free_node(bf->bf_node);
2685 bf->bf_node = NULL;
2686 }
2687
2688 /*
2689 * NB: the beacon data buffer must be 32-bit aligned;
2690 * we assume the mbuf routines will return us something
2691 * with this alignment (perhaps should assert).
2692 */
2693 m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2694 if (m == NULL) {
2695 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2696 sc->sc_stats.ast_be_nombuf++;
2697 return ENOMEM;
2698 }
2699 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2700 bf->bf_segs, &bf->bf_nseg,
2701 BUS_DMA_NOWAIT);
2702 if (error != 0) {
2703 device_printf(sc->sc_dev,
2704 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2705 __func__, error);
2706 m_freem(m);
2707 return error;
2708 }
2709
2710 /*
2711 * Calculate a TSF adjustment factor required for staggered
2712 * beacons. Note that we assume the format of the beacon
2713 * frame leaves the tstamp field immediately following the
2714 * header.
2715 */
2716 if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2717 uint64_t tsfadjust;
2718 struct ieee80211_frame *wh;
2719
2720 /*
2721 * The beacon interval is in TU's; the TSF is in usecs.
2722 * We figure out how many TU's to add to align the timestamp
2723 * then convert to TSF units and handle byte swapping before
2724 * inserting it in the frame. The hardware will then add this
2725 * each time a beacon frame is sent. Note that we align vap's
2726 * 1..N and leave vap 0 untouched. This means vap 0 has a
2727 * timestamp in one beacon interval while the others get a
2728 * timstamp aligned to the next interval.
2729 */
2730 tsfadjust = ni->ni_intval *
2731 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2732 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */
2733
2734 DPRINTF(sc, ATH_DEBUG_BEACON,
2735 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2736 __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2737 avp->av_bslot, ni->ni_intval,
2738 (long long unsigned) le64toh(tsfadjust));
2739
2740 wh = mtod(m, struct ieee80211_frame *);
2741 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2742 }
2743 bf->bf_m = m;
2744 bf->bf_node = ieee80211_ref_node(ni);
2745
2746 return 0;
2747}
2748
2749/*
2750 * Setup the beacon frame for transmit.
2751 */
2752static void
2753ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2754{
2755#define USE_SHPREAMBLE(_ic) \
2756 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2757 == IEEE80211_F_SHPREAMBLE)
2758 struct ieee80211_node *ni = bf->bf_node;
2759 struct ieee80211com *ic = ni->ni_ic;
2760 struct mbuf *m = bf->bf_m;
2761 struct ath_hal *ah = sc->sc_ah;
2762 struct ath_desc *ds;
2763 int flags, antenna;
2764 const HAL_RATE_TABLE *rt;
2765 u_int8_t rix, rate;
2766
2767 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2768 __func__, m, m->m_len);
2769
2770 /* setup descriptors */
2771 ds = bf->bf_desc;
2772 bf->bf_last = bf;
2773 bf->bf_lastds = ds;
2774
2775 flags = HAL_TXDESC_NOACK;
2776 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2777 ds->ds_link = bf->bf_daddr; /* self-linked */
2778 flags |= HAL_TXDESC_VEOL;
2779 /*
2780 * Let hardware handle antenna switching.
2781 */
2782 antenna = sc->sc_txantenna;
2783 } else {
2784 ds->ds_link = 0;
2785 /*
2786 * Switch antenna every 4 beacons.
2787 * XXX assumes two antenna
2788 */
2789 if (sc->sc_txantenna != 0)
2790 antenna = sc->sc_txantenna;
2791 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2792 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2793 else
2794 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
2795 }
2796
2797 KASSERT(bf->bf_nseg == 1,
2798 ("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2799 ds->ds_data = bf->bf_segs[0].ds_addr;
2800 /*
2801 * Calculate rate code.
2802 * XXX everything at min xmit rate
2803 */
2804 rix = 0;
2805 rt = sc->sc_currates;
2806 rate = rt->info[rix].rateCode;
2807 if (USE_SHPREAMBLE(ic))
2808 rate |= rt->info[rix].shortPreamble;
2809 ath_hal_setuptxdesc(ah, ds
2810 , m->m_len + IEEE80211_CRC_LEN /* frame length */
2811 , sizeof(struct ieee80211_frame)/* header length */
2812 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
2813 , ni->ni_txpower /* txpower XXX */
2814 , rate, 1 /* series 0 rate/tries */
2815 , HAL_TXKEYIX_INVALID /* no encryption */
2816 , antenna /* antenna mode */
2817 , flags /* no ack, veol for beacons */
2818 , 0 /* rts/cts rate */
2819 , 0 /* rts/cts duration */
2820 );
2821 /* NB: beacon's BufLen must be a multiple of 4 bytes */
2822 ath_hal_filltxdesc(ah, ds
2823 , roundup(m->m_len, 4) /* buffer length */
2824 , AH_TRUE /* first segment */
2825 , AH_TRUE /* last segment */
2826 , ds /* first descriptor */
2827 );
2828#if 0
2829 ath_desc_swap(ds);
2830#endif
2831#undef USE_SHPREAMBLE
2832}
2833
2834static void
2835ath_beacon_update(struct ieee80211vap *vap, int item)
2836{
2837 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2838
2839 setbit(bo->bo_flags, item);
2840}
2841
2842/*
2843 * Append the contents of src to dst; both queues
2844 * are assumed to be locked.
2845 */
2846static void
2847ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2848{
2849 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
2850 dst->axq_link = src->axq_link;
2851 src->axq_link = NULL;
2852 dst->axq_depth += src->axq_depth;
2853 dst->axq_aggr_depth += src->axq_aggr_depth;
2854 src->axq_depth = 0;
2855 src->axq_aggr_depth = 0;
2856}
2857
2858/*
2859 * Transmit a beacon frame at SWBA. Dynamic updates to the
2860 * frame contents are done as needed and the slot time is
2861 * also adjusted based on current state.
2862 */
2863static void
2864ath_beacon_proc(void *arg, int pending)
2865{
2866 struct ath_softc *sc = arg;
2867 struct ath_hal *ah = sc->sc_ah;
2868 struct ieee80211vap *vap;
2869 struct ath_buf *bf;
2870 int slot, otherant;
2871 uint32_t bfaddr;
2872
2873 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2874 __func__, pending);
2875 /*
2876 * Check if the previous beacon has gone out. If
2877 * not don't try to post another, skip this period
2878 * and wait for the next. Missed beacons indicate
2879 * a problem and should not occur. If we miss too
2880 * many consecutive beacons reset the device.
2881 */
2882 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2883 sc->sc_bmisscount++;
2884 sc->sc_stats.ast_be_missed++;
2885 DPRINTF(sc, ATH_DEBUG_BEACON,
2886 "%s: missed %u consecutive beacons\n",
2887 __func__, sc->sc_bmisscount);
2888 if (sc->sc_bmisscount >= ath_bstuck_threshold)
2889 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
2890 return;
2891 }
2892 if (sc->sc_bmisscount != 0) {
2893 DPRINTF(sc, ATH_DEBUG_BEACON,
2894 "%s: resume beacon xmit after %u misses\n",
2895 __func__, sc->sc_bmisscount);
2896 sc->sc_bmisscount = 0;
2897 }
2898
2899 if (sc->sc_stagbeacons) { /* staggered beacons */
2900 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2901 uint32_t tsftu;
2902
2903 tsftu = ath_hal_gettsf32(ah) >> 10;
2904 /* XXX lintval */
2905 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
2906 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
2907 bfaddr = 0;
2908 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2909 bf = ath_beacon_generate(sc, vap);
2910 if (bf != NULL)
2911 bfaddr = bf->bf_daddr;
2912 }
2913 } else { /* burst'd beacons */
2914 uint32_t *bflink = &bfaddr;
2915
2916 for (slot = 0; slot < ATH_BCBUF; slot++) {
2917 vap = sc->sc_bslot[slot];
2918 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2919 bf = ath_beacon_generate(sc, vap);
2920 if (bf != NULL) {
2921 *bflink = bf->bf_daddr;
2922 bflink = &bf->bf_desc->ds_link;
2923 }
2924 }
2925 }
2926 *bflink = 0; /* terminate list */
2927 }
2928
2929 /*
2930 * Handle slot time change when a non-ERP station joins/leaves
2931 * an 11g network. The 802.11 layer notifies us via callback,
2932 * we mark updateslot, then wait one beacon before effecting
2933 * the change. This gives associated stations at least one
2934 * beacon interval to note the state change.
2935 */
2936 /* XXX locking */
2937 if (sc->sc_updateslot == UPDATE) {
2938 sc->sc_updateslot = COMMIT; /* commit next beacon */
2939 sc->sc_slotupdate = slot;
2940 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
2941 ath_setslottime(sc); /* commit change to h/w */
2942
2943 /*
2944 * Check recent per-antenna transmit statistics and flip
2945 * the default antenna if noticeably more frames went out
2946 * on the non-default antenna.
2947 * XXX assumes 2 anntenae
2948 */
2949 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
2950 otherant = sc->sc_defant & 1 ? 2 : 1;
2951 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
2952 ath_setdefantenna(sc, otherant);
2953 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
2954 }
2955
2956 if (bfaddr != 0) {
2957 /*
2958 * Stop any current dma and put the new frame on the queue.
2959 * This should never fail since we check above that no frames
2960 * are still pending on the queue.
2961 */
2962 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
2963 DPRINTF(sc, ATH_DEBUG_ANY,
2964 "%s: beacon queue %u did not stop?\n",
2965 __func__, sc->sc_bhalq);
2966 }
2967 /* NB: cabq traffic should already be queued and primed */
2968 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
2969 ath_hal_txstart(ah, sc->sc_bhalq);
2970
2971 sc->sc_stats.ast_be_xmit++;
2972 }
2973}
2974
2975static struct ath_buf *
2976ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
2977{
2978 struct ath_vap *avp = ATH_VAP(vap);
2979 struct ath_txq *cabq = sc->sc_cabq;
2980 struct ath_buf *bf;
2981 struct mbuf *m;
2982 int nmcastq, error;
2983
2984 KASSERT(vap->iv_state >= IEEE80211_S_RUN,
2985 ("not running, state %d", vap->iv_state));
2986 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
2987
2988 /*
2989 * Update dynamic beacon contents. If this returns
2990 * non-zero then we need to remap the memory because
2991 * the beacon frame changed size (probably because
2992 * of the TIM bitmap).
2993 */
2994 bf = avp->av_bcbuf;
2995 m = bf->bf_m;
2996 nmcastq = avp->av_mcastq.axq_depth;
2997 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
2998 /* XXX too conservative? */
2999 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3000 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3001 bf->bf_segs, &bf->bf_nseg,
3002 BUS_DMA_NOWAIT);
3003 if (error != 0) {
3004 if_printf(vap->iv_ifp,
3005 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3006 __func__, error);
3007 return NULL;
3008 }
3009 }
3010 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
3011 DPRINTF(sc, ATH_DEBUG_BEACON,
3012 "%s: cabq did not drain, mcastq %u cabq %u\n",
3013 __func__, nmcastq, cabq->axq_depth);
3014 sc->sc_stats.ast_cabq_busy++;
3015 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
3016 /*
3017 * CABQ traffic from a previous vap is still pending.
3018 * We must drain the q before this beacon frame goes
3019 * out as otherwise this vap's stations will get cab
3020 * frames from a different vap.
3021 * XXX could be slow causing us to miss DBA
3022 */
3023 ath_tx_draintxq(sc, cabq);
3024 }
3025 }
3026 ath_beacon_setup(sc, bf);
3027 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3028
3029 /*
3030 * Enable the CAB queue before the beacon queue to
3031 * insure cab frames are triggered by this beacon.
3032 */
3033 if (avp->av_boff.bo_tim[4] & 1) {
3034 struct ath_hal *ah = sc->sc_ah;
3035
3036 /* NB: only at DTIM */
3037 ATH_TXQ_LOCK(cabq);
3038 ATH_TXQ_LOCK(&avp->av_mcastq);
3039 if (nmcastq) {
3040 struct ath_buf *bfm;
3041
3042 /*
3043 * Move frames from the s/w mcast q to the h/w cab q.
3044 * XXX MORE_DATA bit
3045 */
3046 bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q);
3047 if (cabq->axq_link != NULL) {
3048 *cabq->axq_link = bfm->bf_daddr;
3049 } else
3050 ath_hal_puttxbuf(ah, cabq->axq_qnum,
3051 bfm->bf_daddr);
3052 ath_txqmove(cabq, &avp->av_mcastq);
3053
3054 sc->sc_stats.ast_cabq_xmit += nmcastq;
3055 }
3056 /* NB: gated by beacon so safe to start here */
3057 if (! TAILQ_EMPTY(&(cabq->axq_q)))
3058 ath_hal_txstart(ah, cabq->axq_qnum);
3059 ATH_TXQ_UNLOCK(&avp->av_mcastq);
3060 ATH_TXQ_UNLOCK(cabq);
3061 }
3062 return bf;
3063}
3064
3065static void
3066ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3067{
3068 struct ath_vap *avp = ATH_VAP(vap);
3069 struct ath_hal *ah = sc->sc_ah;
3070 struct ath_buf *bf;
3071 struct mbuf *m;
3072 int error;
3073
3074 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3075
3076 /*
3077 * Update dynamic beacon contents. If this returns
3078 * non-zero then we need to remap the memory because
3079 * the beacon frame changed size (probably because
3080 * of the TIM bitmap).
3081 */
3082 bf = avp->av_bcbuf;
3083 m = bf->bf_m;
3084 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3085 /* XXX too conservative? */
3086 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3087 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3088 bf->bf_segs, &bf->bf_nseg,
3089 BUS_DMA_NOWAIT);
3090 if (error != 0) {
3091 if_printf(vap->iv_ifp,
3092 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3093 __func__, error);
3094 return;
3095 }
3096 }
3097 ath_beacon_setup(sc, bf);
3098 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3099
3100 /* NB: caller is known to have already stopped tx dma */
3101 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3102 ath_hal_txstart(ah, sc->sc_bhalq);
3103}
3104
3105/*
3106 * Reset the hardware after detecting beacons have stopped.
3107 */
3108static void
3109ath_bstuck_proc(void *arg, int pending)
3110{
3111 struct ath_softc *sc = arg;
3112 struct ifnet *ifp = sc->sc_ifp;
3113 uint32_t hangs = 0;
3114
3115 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
3116 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
3117
3118 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3119 sc->sc_bmisscount);
3120 sc->sc_stats.ast_bstuck++;
3121 /*
3122 * This assumes that there's no simultaneous channel mode change
3123 * occuring.
3124 */
3125 ath_reset(ifp, ATH_RESET_NOLOSS);
3126}
3127
3128/*
3129 * Reclaim beacon resources and return buffer to the pool.
3130 */
3131static void
3132ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3133{
3134
3135 if (bf->bf_m != NULL) {
3136 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3137 m_freem(bf->bf_m);
3138 bf->bf_m = NULL;
3139 }
3140 if (bf->bf_node != NULL) {
3141 ieee80211_free_node(bf->bf_node);
3142 bf->bf_node = NULL;
3143 }
3144 TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3145}
3146
3147/*
3148 * Reclaim beacon resources.
3149 */
3150static void
3151ath_beacon_free(struct ath_softc *sc)
3152{
3153 struct ath_buf *bf;
3154
3155 TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3156 if (bf->bf_m != NULL) {
3157 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3158 m_freem(bf->bf_m);
3159 bf->bf_m = NULL;
3160 }
3161 if (bf->bf_node != NULL) {
3162 ieee80211_free_node(bf->bf_node);
3163 bf->bf_node = NULL;
3164 }
3165 }
3166}
3167
3168/*
3169 * Configure the beacon and sleep timers.
3170 *
3171 * When operating as an AP this resets the TSF and sets
3172 * up the hardware to notify us when we need to issue beacons.
3173 *
3174 * When operating in station mode this sets up the beacon
3175 * timers according to the timestamp of the last received
3176 * beacon and the current TSF, configures PCF and DTIM
3177 * handling, programs the sleep registers so the hardware
3178 * will wakeup in time to receive beacons, and configures
3179 * the beacon miss handling so we'll receive a BMISS
3180 * interrupt when we stop seeing beacons from the AP
3181 * we've associated with.
3182 */
3183static void
3184ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
3185{
3186#define TSF_TO_TU(_h,_l) \
3187 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
3188#define FUDGE 2
3189 struct ath_hal *ah = sc->sc_ah;
3190 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3191 struct ieee80211_node *ni;
3192 u_int32_t nexttbtt, intval, tsftu;
3193 u_int64_t tsf;
3194
3195 if (vap == NULL)
3196 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
3197 ni = vap->iv_bss;
3198
3199 /* extract tstamp from last beacon and convert to TU */
3200 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3201 LE_READ_4(ni->ni_tstamp.data));
3202 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3203 ic->ic_opmode == IEEE80211_M_MBSS) {
3204 /*
3205 * For multi-bss ap/mesh support beacons are either staggered
3206 * evenly over N slots or burst together. For the former
3207 * arrange for the SWBA to be delivered for each slot.
3208 * Slots that are not occupied will generate nothing.
3209 */
3210 /* NB: the beacon interval is kept internally in TU's */
3211 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3212 if (sc->sc_stagbeacons)
3213 intval /= ATH_BCBUF;
3214 } else {
3215 /* NB: the beacon interval is kept internally in TU's */
3216 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3217 }
3218 if (nexttbtt == 0) /* e.g. for ap mode */
3219 nexttbtt = intval;
3220 else if (intval) /* NB: can be 0 for monitor mode */
3221 nexttbtt = roundup(nexttbtt, intval);
3222 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3223 __func__, nexttbtt, intval, ni->ni_intval);
3224 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
3225 HAL_BEACON_STATE bs;
3226 int dtimperiod, dtimcount;
3227 int cfpperiod, cfpcount;
3228
3229 /*
3230 * Setup dtim and cfp parameters according to
3231 * last beacon we received (which may be none).
3232 */
3233 dtimperiod = ni->ni_dtim_period;
3234 if (dtimperiod <= 0) /* NB: 0 if not known */
3235 dtimperiod = 1;
3236 dtimcount = ni->ni_dtim_count;
3237 if (dtimcount >= dtimperiod) /* NB: sanity check */
3238 dtimcount = 0; /* XXX? */
3239 cfpperiod = 1; /* NB: no PCF support yet */
3240 cfpcount = 0;
3241 /*
3242 * Pull nexttbtt forward to reflect the current
3243 * TSF and calculate dtim+cfp state for the result.
3244 */
3245 tsf = ath_hal_gettsf64(ah);
3246 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3247 do {
3248 nexttbtt += intval;
3249 if (--dtimcount < 0) {
3250 dtimcount = dtimperiod - 1;
3251 if (--cfpcount < 0)
3252 cfpcount = cfpperiod - 1;
3253 }
3254 } while (nexttbtt < tsftu);
3255 memset(&bs, 0, sizeof(bs));
3256 bs.bs_intval = intval;
3257 bs.bs_nexttbtt = nexttbtt;
3258 bs.bs_dtimperiod = dtimperiod*intval;
3259 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3260 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3261 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3262 bs.bs_cfpmaxduration = 0;
3263#if 0
3264 /*
3265 * The 802.11 layer records the offset to the DTIM
3266 * bitmap while receiving beacons; use it here to
3267 * enable h/w detection of our AID being marked in
3268 * the bitmap vector (to indicate frames for us are
3269 * pending at the AP).
3270 * XXX do DTIM handling in s/w to WAR old h/w bugs
3271 * XXX enable based on h/w rev for newer chips
3272 */
3273 bs.bs_timoffset = ni->ni_timoff;
3274#endif
3275 /*
3276 * Calculate the number of consecutive beacons to miss
3277 * before taking a BMISS interrupt.
3278 * Note that we clamp the result to at most 10 beacons.
3279 */
3280 bs.bs_bmissthreshold = vap->iv_bmissthreshold;
3281 if (bs.bs_bmissthreshold > 10)
3282 bs.bs_bmissthreshold = 10;
3283 else if (bs.bs_bmissthreshold <= 0)
3284 bs.bs_bmissthreshold = 1;
3285
3286 /*
3287 * Calculate sleep duration. The configuration is
3288 * given in ms. We insure a multiple of the beacon
3289 * period is used. Also, if the sleep duration is
3290 * greater than the DTIM period then it makes senses
3291 * to make it a multiple of that.
3292 *
3293 * XXX fixed at 100ms
3294 */
3295 bs.bs_sleepduration =
3296 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3297 if (bs.bs_sleepduration > bs.bs_dtimperiod)
3298 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3299
3300 DPRINTF(sc, ATH_DEBUG_BEACON,
3301 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3302 , __func__
3303 , tsf, tsftu
3304 , bs.bs_intval
3305 , bs.bs_nexttbtt
3306 , bs.bs_dtimperiod
3307 , bs.bs_nextdtim
3308 , bs.bs_bmissthreshold
3309 , bs.bs_sleepduration
3310 , bs.bs_cfpperiod
3311 , bs.bs_cfpmaxduration
3312 , bs.bs_cfpnext
3313 , bs.bs_timoffset
3314 );
3315 ath_hal_intrset(ah, 0);
3316 ath_hal_beacontimers(ah, &bs);
3317 sc->sc_imask |= HAL_INT_BMISS;
3318 ath_hal_intrset(ah, sc->sc_imask);
3319 } else {
3320 ath_hal_intrset(ah, 0);
3321 if (nexttbtt == intval)
3322 intval |= HAL_BEACON_RESET_TSF;
3323 if (ic->ic_opmode == IEEE80211_M_IBSS) {
3324 /*
3325 * In IBSS mode enable the beacon timers but only
3326 * enable SWBA interrupts if we need to manually
3327 * prepare beacon frames. Otherwise we use a
3328 * self-linked tx descriptor and let the hardware
3329 * deal with things.
3330 */
3331 intval |= HAL_BEACON_ENA;
3332 if (!sc->sc_hasveol)
3333 sc->sc_imask |= HAL_INT_SWBA;
3334 if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3335 /*
3336 * Pull nexttbtt forward to reflect
3337 * the current TSF.
3338 */
3339 tsf = ath_hal_gettsf64(ah);
3340 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3341 do {
3342 nexttbtt += intval;
3343 } while (nexttbtt < tsftu);
3344 }
3345 ath_beaconq_config(sc);
3346 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3347 ic->ic_opmode == IEEE80211_M_MBSS) {
3348 /*
3349 * In AP/mesh mode we enable the beacon timers
3350 * and SWBA interrupts to prepare beacon frames.
3351 */
3352 intval |= HAL_BEACON_ENA;
3353 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
3354 ath_beaconq_config(sc);
3355 }
3356 ath_hal_beaconinit(ah, nexttbtt, intval);
3357 sc->sc_bmisscount = 0;
3358 ath_hal_intrset(ah, sc->sc_imask);
3359 /*
3360 * When using a self-linked beacon descriptor in
3361 * ibss mode load it once here.
3362 */
3363 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
3364 ath_beacon_start_adhoc(sc, vap);
3365 }
3366 sc->sc_syncbeacon = 0;
3367#undef FUDGE
3368#undef TSF_TO_TU
3369}
3370
3371static void
3372ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3373{
3374 bus_addr_t *paddr = (bus_addr_t*) arg;
3375 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3376 *paddr = segs->ds_addr;
3377}
3378
3379static int
3380ath_descdma_setup(struct ath_softc *sc,
3381 struct ath_descdma *dd, ath_bufhead *head,
3382 const char *name, int nbuf, int ndesc)
3383{
3384#define DS2PHYS(_dd, _ds) \
3385 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3386#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3387 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3388 struct ifnet *ifp = sc->sc_ifp;
3389 uint8_t *ds;
3390 struct ath_buf *bf;
3391 int i, bsize, error;
3392 int desc_len;
3393
3394 desc_len = sizeof(struct ath_desc);
3395
3396 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3397 __func__, name, nbuf, ndesc);
3398
3399 dd->dd_name = name;
3400 dd->dd_desc_len = desc_len * nbuf * ndesc;
3401
3402 /*
3403 * Merlin work-around:
3404 * Descriptors that cross the 4KB boundary can't be used.
3405 * Assume one skipped descriptor per 4KB page.
3406 */
3407 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3408 int numdescpage = 4096 / (desc_len * ndesc);
3409 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096;
3410 }
3411
3412 /*
3413 * Setup DMA descriptor area.
3414 */
3415 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3416 PAGE_SIZE, 0, /* alignment, bounds */
3417 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3418 BUS_SPACE_MAXADDR, /* highaddr */
3419 NULL, NULL, /* filter, filterarg */
3420 dd->dd_desc_len, /* maxsize */
3421 1, /* nsegments */
3422 dd->dd_desc_len, /* maxsegsize */
3423 BUS_DMA_ALLOCNOW, /* flags */
3424 NULL, /* lockfunc */
3425 NULL, /* lockarg */
3426 &dd->dd_dmat);
3427 if (error != 0) {
3428 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3429 return error;
3430 }
3431
3432 /* allocate descriptors */
3433 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3434 if (error != 0) {
3435 if_printf(ifp, "unable to create dmamap for %s descriptors, "
3436 "error %u\n", dd->dd_name, error);
3437 goto fail0;
3438 }
3439
3440 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3441 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3442 &dd->dd_dmamap);
3443 if (error != 0) {
3444 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3445 "error %u\n", nbuf * ndesc, dd->dd_name, error);
3446 goto fail1;
3447 }
3448
3449 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3450 dd->dd_desc, dd->dd_desc_len,
3451 ath_load_cb, &dd->dd_desc_paddr,
3452 BUS_DMA_NOWAIT);
3453 if (error != 0) {
3454 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3455 dd->dd_name, error);
3456 goto fail2;
3457 }
3458
3459 ds = (uint8_t *) dd->dd_desc;
3460 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3461 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3462 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3463
3464 /* allocate rx buffers */
3465 bsize = sizeof(struct ath_buf) * nbuf;
3466 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3467 if (bf == NULL) {
3468 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3469 dd->dd_name, bsize);
3470 goto fail3;
3471 }
3472 dd->dd_bufptr = bf;
3473
3474 TAILQ_INIT(head);
3475 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) {
3476 bf->bf_desc = (struct ath_desc *) ds;
3477 bf->bf_daddr = DS2PHYS(dd, ds);
3478 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3479 /*
3480 * Merlin WAR: Skip descriptor addresses which
3481 * cause 4KB boundary crossing along any point
3482 * in the descriptor.
3483 */
3484 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
3485 desc_len * ndesc)) {
3486 /* Start at the next page */
3487 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
3488 bf->bf_desc = (struct ath_desc *) ds;
3489 bf->bf_daddr = DS2PHYS(dd, ds);
3490 }
3491 }
3492 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3493 &bf->bf_dmamap);
3494 if (error != 0) {
3495 if_printf(ifp, "unable to create dmamap for %s "
3496 "buffer %u, error %u\n", dd->dd_name, i, error);
3497 ath_descdma_cleanup(sc, dd, head);
3498 return error;
3499 }
3500 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
3501 TAILQ_INSERT_TAIL(head, bf, bf_list);
3502 }
3503 return 0;
3504fail3:
3505 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3506fail2:
3507 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3508fail1:
3509 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3510fail0:
3511 bus_dma_tag_destroy(dd->dd_dmat);
3512 memset(dd, 0, sizeof(*dd));
3513 return error;
3514#undef DS2PHYS
3515#undef ATH_DESC_4KB_BOUND_CHECK
3516}
3517
3518static void
3519ath_descdma_cleanup(struct ath_softc *sc,
3520 struct ath_descdma *dd, ath_bufhead *head)
3521{
3522 struct ath_buf *bf;
3523 struct ieee80211_node *ni;
3524
3525 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3526 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3527 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3528 bus_dma_tag_destroy(dd->dd_dmat);
3529
3530 TAILQ_FOREACH(bf, head, bf_list) {
3531 if (bf->bf_m) {
3532 m_freem(bf->bf_m);
3533 bf->bf_m = NULL;
3534 }
3535 if (bf->bf_dmamap != NULL) {
3536 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3537 bf->bf_dmamap = NULL;
3538 }
3539 ni = bf->bf_node;
3540 bf->bf_node = NULL;
3541 if (ni != NULL) {
3542 /*
3543 * Reclaim node reference.
3544 */
3545 ieee80211_free_node(ni);
3546 }
3547 }
3548
3549 TAILQ_INIT(head);
3550 free(dd->dd_bufptr, M_ATHDEV);
3551 memset(dd, 0, sizeof(*dd));
3552}
3553
3554static int
3555ath_desc_alloc(struct ath_softc *sc)
3556{
3557 int error;
3558
3559 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3560 "rx", ath_rxbuf, 1);
3561 if (error != 0)
3562 return error;
3563
3564 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3565 "tx", ath_txbuf, ATH_TXDESC);
3566 if (error != 0) {
3567 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3568 return error;
3569 }
3570
3571 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3572 "beacon", ATH_BCBUF, 1);
3573 if (error != 0) {
3574 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3575 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3576 return error;
3577 }
3578 return 0;
3579}
3580
3581static void
3582ath_desc_free(struct ath_softc *sc)
3583{
3584
3585 if (sc->sc_bdma.dd_desc_len != 0)
3586 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3587 if (sc->sc_txdma.dd_desc_len != 0)
3588 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3589 if (sc->sc_rxdma.dd_desc_len != 0)
3590 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3591}
3592
3593static struct ieee80211_node *
3594ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3595{
3596 struct ieee80211com *ic = vap->iv_ic;
3597 struct ath_softc *sc = ic->ic_ifp->if_softc;
3598 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3599 struct ath_node *an;
3600
3601 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3602 if (an == NULL) {
3603 /* XXX stat+msg */
3604 return NULL;
3605 }
3606 ath_rate_node_init(sc, an);
3607
3608 /* Setup the mutex - there's no associd yet so set the name to NULL */
3609 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
3610 device_get_nameunit(sc->sc_dev), an);
3611 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
3612
3613 /* XXX setup ath_tid */
3614 ath_tx_tid_init(sc, an);
3615
3616 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3617 return &an->an_node;
3618}
3619
3620static void
3621ath_node_cleanup(struct ieee80211_node *ni)
3622{
3623 struct ieee80211com *ic = ni->ni_ic;
3624 struct ath_softc *sc = ic->ic_ifp->if_softc;
3625
3626 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
3627 ath_tx_node_flush(sc, ATH_NODE(ni));
3628 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3629 sc->sc_node_cleanup(ni);
3630}
3631
3632static void
3633ath_node_free(struct ieee80211_node *ni)
3634{
3635 struct ieee80211com *ic = ni->ni_ic;
3636 struct ath_softc *sc = ic->ic_ifp->if_softc;
3637
3638 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3639 mtx_destroy(&ATH_NODE(ni)->an_mtx);
3640 sc->sc_node_free(ni);
3641}
3642
3643static void
3644ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3645{
3646 struct ieee80211com *ic = ni->ni_ic;
3647 struct ath_softc *sc = ic->ic_ifp->if_softc;
3648 struct ath_hal *ah = sc->sc_ah;
3649
3650 *rssi = ic->ic_node_getrssi(ni);
3651 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3652 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3653 else
3654 *noise = -95; /* nominally correct */
3655}
3656
3657static int
3658ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3659{
3660 struct ath_hal *ah = sc->sc_ah;
3661 int error;
3662 struct mbuf *m;
3663 struct ath_desc *ds;
3664
3665 m = bf->bf_m;
3666 if (m == NULL) {
3667 /*
3668 * NB: by assigning a page to the rx dma buffer we
3669 * implicitly satisfy the Atheros requirement that
3670 * this buffer be cache-line-aligned and sized to be
3671 * multiple of the cache line size. Not doing this
3672 * causes weird stuff to happen (for the 5210 at least).
3673 */
3674 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3675 if (m == NULL) {
3676 DPRINTF(sc, ATH_DEBUG_ANY,
3677 "%s: no mbuf/cluster\n", __func__);
3678 sc->sc_stats.ast_rx_nombuf++;
3679 return ENOMEM;
3680 }
3681 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3682
3683 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3684 bf->bf_dmamap, m,
3685 bf->bf_segs, &bf->bf_nseg,
3686 BUS_DMA_NOWAIT);
3687 if (error != 0) {
3688 DPRINTF(sc, ATH_DEBUG_ANY,
3689 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3690 __func__, error);
3691 sc->sc_stats.ast_rx_busdma++;
3692 m_freem(m);
3693 return error;
3694 }
3695 KASSERT(bf->bf_nseg == 1,
3696 ("multi-segment packet; nseg %u", bf->bf_nseg));
3697 bf->bf_m = m;
3698 }
3699 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3700
3701 /*
3702 * Setup descriptors. For receive we always terminate
3703 * the descriptor list with a self-linked entry so we'll
3704 * not get overrun under high load (as can happen with a
3705 * 5212 when ANI processing enables PHY error frames).
3706 *
3707 * To insure the last descriptor is self-linked we create
3708 * each descriptor as self-linked and add it to the end. As
3709 * each additional descriptor is added the previous self-linked
3710 * entry is ``fixed'' naturally. This should be safe even
3711 * if DMA is happening. When processing RX interrupts we
3712 * never remove/process the last, self-linked, entry on the
3713 * descriptor list. This insures the hardware always has
3714 * someplace to write a new frame.
3715 */
3716 /*
3717 * 11N: we can no longer afford to self link the last descriptor.
3718 * MAC acknowledges BA status as long as it copies frames to host
3719 * buffer (or rx fifo). This can incorrectly acknowledge packets
3720 * to a sender if last desc is self-linked.
3721 */
3722 ds = bf->bf_desc;
3723 if (sc->sc_rxslink)
3724 ds->ds_link = bf->bf_daddr; /* link to self */
3725 else
3726 ds->ds_link = 0; /* terminate the list */
3727 ds->ds_data = bf->bf_segs[0].ds_addr;
3728 ath_hal_setuprxdesc(ah, ds
3729 , m->m_len /* buffer size */
3730 , 0
3731 );
3732
3733 if (sc->sc_rxlink != NULL)
3734 *sc->sc_rxlink = bf->bf_daddr;
3735 sc->sc_rxlink = &ds->ds_link;
3736 return 0;
3737}
3738
3739/*
3740 * Extend 15-bit time stamp from rx descriptor to
3741 * a full 64-bit TSF using the specified TSF.
3742 */
3743static __inline u_int64_t
3744ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf)
3745{
3746 if ((tsf & 0x7fff) < rstamp)
3747 tsf -= 0x8000;
3748
3749 return ((tsf &~ 0x7fff) | rstamp);
3750}
3751
3752/*
3753 * Extend 32-bit time stamp from rx descriptor to
3754 * a full 64-bit TSF using the specified TSF.
3755 */
3756static __inline u_int64_t
3757ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf)
3758{
3759 u_int32_t tsf_low = tsf & 0xffffffff;
3760 u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp;
3761
3762 if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000))
3763 tsf64 -= 0x100000000ULL;
3764
3765 if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000))
3766 tsf64 += 0x100000000ULL;
3767
3768 return tsf64;
3769}
3770
3771/*
3772 * Extend the TSF from the RX descriptor to a full 64 bit TSF.
3773 * Earlier hardware versions only wrote the low 15 bits of the
3774 * TSF into the RX descriptor; later versions (AR5416 and up)
3775 * include the 32 bit TSF value.
3776 */
3777static __inline u_int64_t
3778ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf)
3779{
3780 if (sc->sc_rxtsf32)
3781 return ath_extend_tsf32(rstamp, tsf);
3782 else
3783 return ath_extend_tsf15(rstamp, tsf);
3784}
3785
3786/*
3787 * Intercept management frames to collect beacon rssi data
3788 * and to do ibss merges.
3789 */
3790static void
3791ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3792 int subtype, int rssi, int nf)
3793{
3794 struct ieee80211vap *vap = ni->ni_vap;
3795 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3796
3797 /*
3798 * Call up first so subsequent work can use information
3799 * potentially stored in the node (e.g. for ibss merge).
3800 */
3801 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf);
3802 switch (subtype) {
3803 case IEEE80211_FC0_SUBTYPE_BEACON:
3804 /* update rssi statistics for use by the hal */
3805 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3806 if (sc->sc_syncbeacon &&
3807 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3808 /*
3809 * Resync beacon timers using the tsf of the beacon
3810 * frame we just received.
3811 */
3812 ath_beacon_config(sc, vap);
3813 }
3814 /* fall thru... */
3815 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3816 if (vap->iv_opmode == IEEE80211_M_IBSS &&
3817 vap->iv_state == IEEE80211_S_RUN) {
3818 uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
3819 uint64_t tsf = ath_extend_tsf(sc, rstamp,
3820 ath_hal_gettsf64(sc->sc_ah));
3821 /*
3822 * Handle ibss merge as needed; check the tsf on the
3823 * frame before attempting the merge. The 802.11 spec
3824 * says the station should change it's bssid to match
3825 * the oldest station with the same ssid, where oldest
3826 * is determined by the tsf. Note that hardware
3827 * reconfiguration happens through callback to
3828 * ath_newstate as the state machine will go from
3829 * RUN -> RUN when this happens.
3830 */
3831 if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3832 DPRINTF(sc, ATH_DEBUG_STATE,
3833 "ibss merge, rstamp %u tsf %ju "
3834 "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3835 (uintmax_t)ni->ni_tstamp.tsf);
3836 (void) ieee80211_ibss_merge(ni);
3837 }
3838 }
3839 break;
3840 }
3841}
3842
3843/*
3844 * Set the default antenna.
3845 */
3846static void
3847ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3848{
3849 struct ath_hal *ah = sc->sc_ah;
3850
3851 /* XXX block beacon interrupts */
3852 ath_hal_setdefantenna(ah, antenna);
3853 if (sc->sc_defant != antenna)
3854 sc->sc_stats.ast_ant_defswitch++;
3855 sc->sc_defant = antenna;
3856 sc->sc_rxotherant = 0;
3857}
3858
3859static void
3860ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3861 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3862{
3863#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20)
3864#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U)
3865#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D)
3866#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3867 struct ath_softc *sc = ifp->if_softc;
3868 const HAL_RATE_TABLE *rt;
3869 uint8_t rix;
3870
3871 rt = sc->sc_currates;
3872 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3873 rix = rt->rateCodeToIndex[rs->rs_rate];
3874 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3875 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3876#ifdef AH_SUPPORT_AR5416
3877 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
3878 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
3879 struct ieee80211com *ic = ifp->if_l2com;
3880
3881 if ((rs->rs_flags & HAL_RX_2040) == 0)
3882 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
3883 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
3884 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
3885 else
3886 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
3887 if ((rs->rs_flags & HAL_RX_GI) == 0)
3888 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
3889 }
3890#endif
3891 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf));
3892 if (rs->rs_status & HAL_RXERR_CRC)
3893 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3894 /* XXX propagate other error flags from descriptor */
3895 sc->sc_rx_th.wr_antnoise = nf;
3896 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
3897 sc->sc_rx_th.wr_antenna = rs->rs_antenna;
3898#undef CHAN_HT
3899#undef CHAN_HT20
3900#undef CHAN_HT40U
3901#undef CHAN_HT40D
3902}
3903
3904static void
3905ath_handle_micerror(struct ieee80211com *ic,
3906 struct ieee80211_frame *wh, int keyix)
3907{
3908 struct ieee80211_node *ni;
3909
3910 /* XXX recheck MIC to deal w/ chips that lie */
3911 /* XXX discard MIC errors on !data frames */
3912 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
3913 if (ni != NULL) {
3914 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
3915 ieee80211_free_node(ni);
3916 }
3917}
3918
3919/*
3920 * Only run the RX proc if it's not already running.
3921 * Since this may get run as part of the reset/flush path,
3922 * the task can't clash with an existing, running tasklet.
3923 */
3924static void
3925ath_rx_tasklet(void *arg, int npending)
3926{
3927 struct ath_softc *sc = arg;
3928
3929 CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending);
3930 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
3931 ATH_PCU_LOCK(sc);
3932 if (sc->sc_inreset_cnt > 0) {
3933 device_printf(sc->sc_dev,
3934 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
3935 ATH_PCU_UNLOCK(sc);
3936 return;
3937 }
3938 ATH_PCU_UNLOCK(sc);
3939 ath_rx_proc(sc, 1);
3940}
3941
3942static void
3943ath_rx_proc(struct ath_softc *sc, int resched)
3944{
3945#define PA2DESC(_sc, _pa) \
3946 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
3947 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
3948 struct ath_buf *bf;
3949 struct ifnet *ifp = sc->sc_ifp;
3950 struct ieee80211com *ic = ifp->if_l2com;
3951 struct ath_hal *ah = sc->sc_ah;
3952 struct ath_desc *ds;
3953 struct ath_rx_status *rs;
3954 struct mbuf *m;
3955 struct ieee80211_node *ni;
3956 int len, type, ngood;
3957 HAL_STATUS status;
3958 int16_t nf;
3959 u_int64_t tsf;
3960 int npkts = 0;
3961
3962 /* XXX we must not hold the ATH_LOCK here */
3963 ATH_UNLOCK_ASSERT(sc);
3964 ATH_PCU_UNLOCK_ASSERT(sc);
3965
3966 ATH_PCU_LOCK(sc);
3967 sc->sc_rxproc_cnt++;
3968 ATH_PCU_UNLOCK(sc);
3969
3970 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
3971 ngood = 0;
3972 nf = ath_hal_getchannoise(ah, sc->sc_curchan);
3973 sc->sc_stats.ast_rx_noise = nf;
3974 tsf = ath_hal_gettsf64(ah);
3975 do {
3976 bf = TAILQ_FIRST(&sc->sc_rxbuf);
3977 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */
3978 if_printf(ifp, "%s: no buffer!\n", __func__);
3979 break;
3980 } else if (bf == NULL) {
3981 /*
3982 * End of List:
3983 * this can happen for non-self-linked RX chains
3984 */
3985 sc->sc_stats.ast_rx_hitqueueend++;
3986 break;
3987 }
3988 m = bf->bf_m;
3989 if (m == NULL) { /* NB: shouldn't happen */
3990 /*
3991 * If mbuf allocation failed previously there
3992 * will be no mbuf; try again to re-populate it.
3993 */
3994 /* XXX make debug msg */
3995 if_printf(ifp, "%s: no mbuf!\n", __func__);
3996 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
3997 goto rx_next;
3998 }
3999 ds = bf->bf_desc;
4000 if (ds->ds_link == bf->bf_daddr) {
4001 /* NB: never process the self-linked entry at the end */
4002 sc->sc_stats.ast_rx_hitqueueend++;
4003 break;
4004 }
4005 /* XXX sync descriptor memory */
4006 /*
4007 * Must provide the virtual address of the current
4008 * descriptor, the physical address, and the virtual
4009 * address of the next descriptor in the h/w chain.
4010 * This allows the HAL to look ahead to see if the
4011 * hardware is done with a descriptor by checking the
4012 * done bit in the following descriptor and the address
4013 * of the current descriptor the DMA engine is working
4014 * on. All this is necessary because of our use of
4015 * a self-linked list to avoid rx overruns.
4016 */
4017 rs = &bf->bf_status.ds_rxstat;
4018 status = ath_hal_rxprocdesc(ah, ds,
4019 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4020#ifdef ATH_DEBUG
4021 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
4022 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4023#endif
4024 if (status == HAL_EINPROGRESS)
4025 break;
4026
4027 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
4028 npkts++;
4029
4030 /* These aren't specifically errors */
4031#ifdef AH_SUPPORT_AR5416
4032 if (rs->rs_flags & HAL_RX_GI)
4033 sc->sc_stats.ast_rx_halfgi++;
4034 if (rs->rs_flags & HAL_RX_2040)
4035 sc->sc_stats.ast_rx_2040++;
4036 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
4037 sc->sc_stats.ast_rx_pre_crc_err++;
4038 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
4039 sc->sc_stats.ast_rx_post_crc_err++;
4040 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
4041 sc->sc_stats.ast_rx_decrypt_busy_err++;
4042 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
4043 sc->sc_stats.ast_rx_hi_rx_chain++;
4044#endif /* AH_SUPPORT_AR5416 */
4045
4046 if (rs->rs_status != 0) {
4047 if (rs->rs_status & HAL_RXERR_CRC)
4048 sc->sc_stats.ast_rx_crcerr++;
4049 if (rs->rs_status & HAL_RXERR_FIFO)
4050 sc->sc_stats.ast_rx_fifoerr++;
4051 if (rs->rs_status & HAL_RXERR_PHY) {
4052 sc->sc_stats.ast_rx_phyerr++;
4053 /* Process DFS radar events */
4054 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
4055 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
4056 /* Since we're touching the frame data, sync it */
4057 bus_dmamap_sync(sc->sc_dmat,
4058 bf->bf_dmamap,
4059 BUS_DMASYNC_POSTREAD);
4060 /* Now pass it to the radar processing code */
4061 ath_dfs_process_phy_err(sc, mtod(m, char *), tsf, rs);
4062 }
4063
4064 /* Be suitably paranoid about receiving phy errors out of the stats array bounds */
4065 if (rs->rs_phyerr < 64)
4066 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
4067 goto rx_error; /* NB: don't count in ierrors */
4068 }
4069 if (rs->rs_status & HAL_RXERR_DECRYPT) {
4070 /*
4071 * Decrypt error. If the error occurred
4072 * because there was no hardware key, then
4073 * let the frame through so the upper layers
4074 * can process it. This is necessary for 5210
4075 * parts which have no way to setup a ``clear''
4076 * key cache entry.
4077 *
4078 * XXX do key cache faulting
4079 */
4080 if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
4081 goto rx_accept;
4082 sc->sc_stats.ast_rx_badcrypt++;
4083 }
4084 if (rs->rs_status & HAL_RXERR_MIC) {
4085 sc->sc_stats.ast_rx_badmic++;
4086 /*
4087 * Do minimal work required to hand off
4088 * the 802.11 header for notification.
4089 */
4090 /* XXX frag's and qos frames */
4091 len = rs->rs_datalen;
4092 if (len >= sizeof (struct ieee80211_frame)) {
4093 bus_dmamap_sync(sc->sc_dmat,
4094 bf->bf_dmamap,
4095 BUS_DMASYNC_POSTREAD);
4096 ath_handle_micerror(ic,
4097 mtod(m, struct ieee80211_frame *),
4098 sc->sc_splitmic ?
4099 rs->rs_keyix-32 : rs->rs_keyix);
4100 }
4101 }
4102 ifp->if_ierrors++;
4103rx_error:
4104 /*
4105 * Cleanup any pending partial frame.
4106 */
4107 if (sc->sc_rxpending != NULL) {
4108 m_freem(sc->sc_rxpending);
4109 sc->sc_rxpending = NULL;
4110 }
4111 /*
4112 * When a tap is present pass error frames
4113 * that have been requested. By default we
4114 * pass decrypt+mic errors but others may be
4115 * interesting (e.g. crc).
4116 */
4117 if (ieee80211_radiotap_active(ic) &&
4118 (rs->rs_status & sc->sc_monpass)) {
4119 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4120 BUS_DMASYNC_POSTREAD);
4121 /* NB: bpf needs the mbuf length setup */
4122 len = rs->rs_datalen;
4123 m->m_pkthdr.len = m->m_len = len;
4124 bf->bf_m = NULL;
4125 ath_rx_tap(ifp, m, rs, tsf, nf);
4126 ieee80211_radiotap_rx_all(ic, m);
4127 m_freem(m);
4128 }
4129 /* XXX pass MIC errors up for s/w reclaculation */
4130 goto rx_next;
4131 }
4132rx_accept:
4133 /*
4134 * Sync and unmap the frame. At this point we're
4135 * committed to passing the mbuf somewhere so clear
4136 * bf_m; this means a new mbuf must be allocated
4137 * when the rx descriptor is setup again to receive
4138 * another frame.
4139 */
4140 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4141 BUS_DMASYNC_POSTREAD);
4142 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4143 bf->bf_m = NULL;
4144
4145 len = rs->rs_datalen;
4146 m->m_len = len;
4147
4148 if (rs->rs_more) {
4149 /*
4150 * Frame spans multiple descriptors; save
4151 * it for the next completed descriptor, it
4152 * will be used to construct a jumbogram.
4153 */
4154 if (sc->sc_rxpending != NULL) {
4155 /* NB: max frame size is currently 2 clusters */
4156 sc->sc_stats.ast_rx_toobig++;
4157 m_freem(sc->sc_rxpending);
4158 }
4159 m->m_pkthdr.rcvif = ifp;
4160 m->m_pkthdr.len = len;
4161 sc->sc_rxpending = m;
4162 goto rx_next;
4163 } else if (sc->sc_rxpending != NULL) {
4164 /*
4165 * This is the second part of a jumbogram,
4166 * chain it to the first mbuf, adjust the
4167 * frame length, and clear the rxpending state.
4168 */
4169 sc->sc_rxpending->m_next = m;
4170 sc->sc_rxpending->m_pkthdr.len += len;
4171 m = sc->sc_rxpending;
4172 sc->sc_rxpending = NULL;
4173 } else {
4174 /*
4175 * Normal single-descriptor receive; setup
4176 * the rcvif and packet length.
4177 */
4178 m->m_pkthdr.rcvif = ifp;
4179 m->m_pkthdr.len = len;
4180 }
4181
4182 /*
4183 * Validate rs->rs_antenna.
4184 *
4185 * Some users w/ AR9285 NICs have reported crashes
4186 * here because rs_antenna field is bogusly large.
4187 * Let's enforce the maximum antenna limit of 8
4188 * (and it shouldn't be hard coded, but that's a
4189 * separate problem) and if there's an issue, print
4190 * out an error and adjust rs_antenna to something
4191 * sensible.
4192 *
4193 * This code should be removed once the actual
4194 * root cause of the issue has been identified.
4195 * For example, it may be that the rs_antenna
4196 * field is only valid for the lsat frame of
4197 * an aggregate and it just happens that it is
4198 * "mostly" right. (This is a general statement -
4199 * the majority of the statistics are only valid
4200 * for the last frame in an aggregate.
4201 */
4202 if (rs->rs_antenna > 7) {
4203 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n",
4204 __func__, rs->rs_antenna);
4205#ifdef ATH_DEBUG
4206 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4207#endif /* ATH_DEBUG */
4208 rs->rs_antenna = 0; /* XXX better than nothing */
4209 }
4210
4211 ifp->if_ipackets++;
4212 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
4213
4214 /*
4215 * Populate the rx status block. When there are bpf
4216 * listeners we do the additional work to provide
4217 * complete status. Otherwise we fill in only the
4218 * material required by ieee80211_input. Note that
4219 * noise setting is filled in above.
4220 */
4221 if (ieee80211_radiotap_active(ic))
4222 ath_rx_tap(ifp, m, rs, tsf, nf);
4223
4224 /*
4225 * From this point on we assume the frame is at least
4226 * as large as ieee80211_frame_min; verify that.
4227 */
4228 if (len < IEEE80211_MIN_LEN) {
4229 if (!ieee80211_radiotap_active(ic)) {
4230 DPRINTF(sc, ATH_DEBUG_RECV,
4231 "%s: short packet %d\n", __func__, len);
4232 sc->sc_stats.ast_rx_tooshort++;
4233 } else {
4234 /* NB: in particular this captures ack's */
4235 ieee80211_radiotap_rx_all(ic, m);
4236 }
4237 m_freem(m);
4238 goto rx_next;
4239 }
4240
4241 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
4242 const HAL_RATE_TABLE *rt = sc->sc_currates;
4243 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
4244
4245 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
4246 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
4247 }
4248
4249 m_adj(m, -IEEE80211_CRC_LEN);
4250
4251 /*
4252 * Locate the node for sender, track state, and then
4253 * pass the (referenced) node up to the 802.11 layer
4254 * for its use.
4255 */
4256 ni = ieee80211_find_rxnode_withkey(ic,
4257 mtod(m, const struct ieee80211_frame_min *),
4258 rs->rs_keyix == HAL_RXKEYIX_INVALID ?
4259 IEEE80211_KEYIX_NONE : rs->rs_keyix);
4260 sc->sc_lastrs = rs;
4261
4262#ifdef AH_SUPPORT_AR5416
4263 if (rs->rs_isaggr)
4264 sc->sc_stats.ast_rx_agg++;
4265#endif /* AH_SUPPORT_AR5416 */
4266
4267 if (ni != NULL) {
4268 /*
4269 * Only punt packets for ampdu reorder processing for
4270 * 11n nodes; net80211 enforces that M_AMPDU is only
4271 * set for 11n nodes.
4272 */
4273 if (ni->ni_flags & IEEE80211_NODE_HT)
4274 m->m_flags |= M_AMPDU;
4275
4276 /*
4277 * Sending station is known, dispatch directly.
4278 */
4279 type = ieee80211_input(ni, m, rs->rs_rssi, nf);
4280 ieee80211_free_node(ni);
4281 /*
4282 * Arrange to update the last rx timestamp only for
4283 * frames from our ap when operating in station mode.
4284 * This assumes the rx key is always setup when
4285 * associated.
4286 */
4287 if (ic->ic_opmode == IEEE80211_M_STA &&
4288 rs->rs_keyix != HAL_RXKEYIX_INVALID)
4289 ngood++;
4290 } else {
4291 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
4292 }
4293 /*
4294 * Track rx rssi and do any rx antenna management.
4295 */
4296 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
4297 if (sc->sc_diversity) {
4298 /*
4299 * When using fast diversity, change the default rx
4300 * antenna if diversity chooses the other antenna 3
4301 * times in a row.
4302 */
4303 if (sc->sc_defant != rs->rs_antenna) {
4304 if (++sc->sc_rxotherant >= 3)
4305 ath_setdefantenna(sc, rs->rs_antenna);
4306 } else
4307 sc->sc_rxotherant = 0;
4308 }
4309
4310 /* Newer school diversity - kite specific for now */
4311 /* XXX perhaps migrate the normal diversity code to this? */
4312 if ((ah)->ah_rxAntCombDiversity)
4313 (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz);
4314
4315 if (sc->sc_softled) {
4316 /*
4317 * Blink for any data frame. Otherwise do a
4318 * heartbeat-style blink when idle. The latter
4319 * is mainly for station mode where we depend on
4320 * periodic beacon frames to trigger the poll event.
4321 */
4322 if (type == IEEE80211_FC0_TYPE_DATA) {
4323 const HAL_RATE_TABLE *rt = sc->sc_currates;
4324 ath_led_event(sc,
4325 rt->rateCodeToIndex[rs->rs_rate]);
4326 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
4327 ath_led_event(sc, 0);
4328 }
4329rx_next:
4330 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
4331 } while (ath_rxbuf_init(sc, bf) == 0);
4332
4333 /* rx signal state monitoring */
4334 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
4335 if (ngood)
4336 sc->sc_lastrx = tsf;
4337
4338 CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
4339 /* Queue DFS tasklet if needed */
4340 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
4341 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
4342
4343 /*
4344 * Now that all the RX frames were handled that
4345 * need to be handled, kick the PCU if there's
4346 * been an RXEOL condition.
4347 */
4348 ATH_PCU_LOCK(sc);
4349 if (resched && sc->sc_kickpcu) {
4350 CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu");
4351 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
4352 __func__, npkts);
4353
4354 /* XXX rxslink? */
4355 /*
4356 * XXX can we hold the PCU lock here?
4357 * Are there any net80211 buffer calls involved?
4358 */
4359 bf = TAILQ_FIRST(&sc->sc_rxbuf);
4360 ath_hal_putrxbuf(ah, bf->bf_daddr);
4361 ath_hal_rxena(ah); /* enable recv descriptors */
4362 ath_mode_init(sc); /* set filters, etc. */
4363 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
4364
4365 ath_hal_intrset(ah, sc->sc_imask);
4366 sc->sc_kickpcu = 0;
4367 }
4368 ATH_PCU_UNLOCK(sc);
4369
4370 /* XXX check this inside of IF_LOCK? */
4371 if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
4372#ifdef IEEE80211_SUPPORT_SUPERG
4373 ieee80211_ff_age_all(ic, 100);
4374#endif
4375 if (!IFQ_IS_EMPTY(&ifp->if_snd))
4376 ath_start(ifp);
4377 }
4378#undef PA2DESC
4379
4380 ATH_PCU_LOCK(sc);
4381 sc->sc_rxproc_cnt--;
4382 ATH_PCU_UNLOCK(sc);
4383}
4384
4385static void
4386ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4387{
4388 txq->axq_qnum = qnum;
4389 txq->axq_ac = 0;
4390 txq->axq_depth = 0;
4391 txq->axq_aggr_depth = 0;
4392 txq->axq_intrcnt = 0;
4393 txq->axq_link = NULL;
4394 txq->axq_softc = sc;
4395 TAILQ_INIT(&txq->axq_q);
4396 TAILQ_INIT(&txq->axq_tidq);
4397 ATH_TXQ_LOCK_INIT(sc, txq);
4398}
4399
4400/*
4401 * Setup a h/w transmit queue.
4402 */
4403static struct ath_txq *
4404ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4405{
4406#define N(a) (sizeof(a)/sizeof(a[0]))
4407 struct ath_hal *ah = sc->sc_ah;
4408 HAL_TXQ_INFO qi;
4409 int qnum;
4410
4411 memset(&qi, 0, sizeof(qi));
4412 qi.tqi_subtype = subtype;
4413 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4414 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4415 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4416 /*
4417 * Enable interrupts only for EOL and DESC conditions.
4418 * We mark tx descriptors to receive a DESC interrupt
4419 * when a tx queue gets deep; otherwise waiting for the
4420 * EOL to reap descriptors. Note that this is done to
4421 * reduce interrupt load and this only defers reaping
4422 * descriptors, never transmitting frames. Aside from
4423 * reducing interrupts this also permits more concurrency.
4424 * The only potential downside is if the tx queue backs
4425 * up in which case the top half of the kernel may backup
4426 * due to a lack of tx descriptors.
4427 */
4428 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
4429 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4430 if (qnum == -1) {
4431 /*
4432 * NB: don't print a message, this happens
4433 * normally on parts with too few tx queues
4434 */
4435 return NULL;
4436 }
4437 if (qnum >= N(sc->sc_txq)) {
4438 device_printf(sc->sc_dev,
4439 "hal qnum %u out of range, max %zu!\n",
4440 qnum, N(sc->sc_txq));
4441 ath_hal_releasetxqueue(ah, qnum);
4442 return NULL;
4443 }
4444 if (!ATH_TXQ_SETUP(sc, qnum)) {
4445 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4446 sc->sc_txqsetup |= 1<<qnum;
4447 }
4448 return &sc->sc_txq[qnum];
4449#undef N
4450}
4451
4452/*
4453 * Setup a hardware data transmit queue for the specified
4454 * access control. The hal may not support all requested
4455 * queues in which case it will return a reference to a
4456 * previously setup queue. We record the mapping from ac's
4457 * to h/w queues for use by ath_tx_start and also track
4458 * the set of h/w queues being used to optimize work in the
4459 * transmit interrupt handler and related routines.
4460 */
4461static int
4462ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4463{
4464#define N(a) (sizeof(a)/sizeof(a[0]))
4465 struct ath_txq *txq;
4466
4467 if (ac >= N(sc->sc_ac2q)) {
4468 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4469 ac, N(sc->sc_ac2q));
4470 return 0;
4471 }
4472 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4473 if (txq != NULL) {
4474 txq->axq_ac = ac;
4475 sc->sc_ac2q[ac] = txq;
4476 return 1;
4477 } else
4478 return 0;
4479#undef N
4480}
4481
4482/*
4483 * Update WME parameters for a transmit queue.
4484 */
4485static int
4486ath_txq_update(struct ath_softc *sc, int ac)
4487{
4488#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
4489#define ATH_TXOP_TO_US(v) (v<<5)
4490 struct ifnet *ifp = sc->sc_ifp;
4491 struct ieee80211com *ic = ifp->if_l2com;
4492 struct ath_txq *txq = sc->sc_ac2q[ac];
4493 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4494 struct ath_hal *ah = sc->sc_ah;
4495 HAL_TXQ_INFO qi;
4496
4497 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4498#ifdef IEEE80211_SUPPORT_TDMA
4499 if (sc->sc_tdma) {
4500 /*
4501 * AIFS is zero so there's no pre-transmit wait. The
4502 * burst time defines the slot duration and is configured
4503 * through net80211. The QCU is setup to not do post-xmit
4504 * back off, lockout all lower-priority QCU's, and fire
4505 * off the DMA beacon alert timer which is setup based
4506 * on the slot configuration.
4507 */
4508 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4509 | HAL_TXQ_TXERRINT_ENABLE
4510 | HAL_TXQ_TXURNINT_ENABLE
4511 | HAL_TXQ_TXEOLINT_ENABLE
4512 | HAL_TXQ_DBA_GATED
4513 | HAL_TXQ_BACKOFF_DISABLE
4514 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4515 ;
4516 qi.tqi_aifs = 0;
4517 /* XXX +dbaprep? */
4518 qi.tqi_readyTime = sc->sc_tdmaslotlen;
4519 qi.tqi_burstTime = qi.tqi_readyTime;
4520 } else {
4521#endif
4522 /*
4523 * XXX shouldn't this just use the default flags
4524 * used in the previous queue setup?
4525 */
4526 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4527 | HAL_TXQ_TXERRINT_ENABLE
4528 | HAL_TXQ_TXDESCINT_ENABLE
4529 | HAL_TXQ_TXURNINT_ENABLE
4530 | HAL_TXQ_TXEOLINT_ENABLE
4531 ;
4532 qi.tqi_aifs = wmep->wmep_aifsn;
4533 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4534 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4535 qi.tqi_readyTime = 0;
4536 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4537#ifdef IEEE80211_SUPPORT_TDMA
4538 }
4539#endif
4540
4541 DPRINTF(sc, ATH_DEBUG_RESET,
4542 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4543 __func__, txq->axq_qnum, qi.tqi_qflags,
4544 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4545
4546 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4547 if_printf(ifp, "unable to update hardware queue "
4548 "parameters for %s traffic!\n",
4549 ieee80211_wme_acnames[ac]);
4550 return 0;
4551 } else {
4552 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4553 return 1;
4554 }
4555#undef ATH_TXOP_TO_US
4556#undef ATH_EXPONENT_TO_VALUE
4557}
4558
4559/*
4560 * Callback from the 802.11 layer to update WME parameters.
4561 */
4562static int
4563ath_wme_update(struct ieee80211com *ic)
4564{
4565 struct ath_softc *sc = ic->ic_ifp->if_softc;
4566
4567 return !ath_txq_update(sc, WME_AC_BE) ||
4568 !ath_txq_update(sc, WME_AC_BK) ||
4569 !ath_txq_update(sc, WME_AC_VI) ||
4570 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4571}
4572
4573/*
4574 * Reclaim resources for a setup queue.
4575 */
4576static void
4577ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4578{
4579
4580 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4581 ATH_TXQ_LOCK_DESTROY(txq);
4582 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4583}
4584
4585/*
4586 * Reclaim all tx queue resources.
4587 */
4588static void
4589ath_tx_cleanup(struct ath_softc *sc)
4590{
4591 int i;
4592
4593 ATH_TXBUF_LOCK_DESTROY(sc);
4594 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4595 if (ATH_TXQ_SETUP(sc, i))
4596 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4597}
4598
4599/*
4600 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
4601 * using the current rates in sc_rixmap.
4602 */
4603int
4604ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
4605{
4606 int rix = sc->sc_rixmap[rate];
4607 /* NB: return lowest rix for invalid rate */
4608 return (rix == 0xff ? 0 : rix);
4609}
4610
4611static void
4612ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
4613 struct ath_buf *bf)
4614{
4615 struct ieee80211_node *ni = bf->bf_node;
4616 struct ifnet *ifp = sc->sc_ifp;
4617 struct ieee80211com *ic = ifp->if_l2com;
4618 int sr, lr, pri;
4619
4620 if (ts->ts_status == 0) {
4621 u_int8_t txant = ts->ts_antenna;
4622 sc->sc_stats.ast_ant_tx[txant]++;
4623 sc->sc_ant_tx[txant]++;
4624 if (ts->ts_finaltsi != 0)
4625 sc->sc_stats.ast_tx_altrate++;
4626 pri = M_WME_GETAC(bf->bf_m);
4627 if (pri >= WME_AC_VO)
4628 ic->ic_wme.wme_hipri_traffic++;
4629 if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
4630 ni->ni_inact = ni->ni_inact_reload;
4631 } else {
4632 if (ts->ts_status & HAL_TXERR_XRETRY)
4633 sc->sc_stats.ast_tx_xretries++;
4634 if (ts->ts_status & HAL_TXERR_FIFO)
4635 sc->sc_stats.ast_tx_fifoerr++;
4636 if (ts->ts_status & HAL_TXERR_FILT)
4637 sc->sc_stats.ast_tx_filtered++;
4638 if (ts->ts_status & HAL_TXERR_XTXOP)
4639 sc->sc_stats.ast_tx_xtxop++;
4640 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
4641 sc->sc_stats.ast_tx_timerexpired++;
4642
4643 if (ts->ts_status & HAL_TX_DATA_UNDERRUN)
4644 sc->sc_stats.ast_tx_data_underrun++;
4645 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN)
4646 sc->sc_stats.ast_tx_delim_underrun++;
4647
4648 if (bf->bf_m->m_flags & M_FF)
4649 sc->sc_stats.ast_ff_txerr++;
4650 }
4651 /* XXX when is this valid? */
4652 if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
4653 sc->sc_stats.ast_tx_desccfgerr++;
4654
4655 sr = ts->ts_shortretry;
4656 lr = ts->ts_longretry;
4657 sc->sc_stats.ast_tx_shortretry += sr;
4658 sc->sc_stats.ast_tx_longretry += lr;
4659
4660}
4661
4662/*
4663 * The default completion. If fail is 1, this means
4664 * "please don't retry the frame, and just return -1 status
4665 * to the net80211 stack.
4666 */
4667void
4668ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4669{
4670 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4671 int st;
4672
4673 if (fail == 1)
4674 st = -1;
4675 else
4676 st = ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) ?
4677 ts->ts_status : HAL_TXERR_XRETRY;
4678
4679 if (bf->bf_state.bfs_dobaw)
4680 device_printf(sc->sc_dev,
4681 "%s: dobaw should've been cleared!\n", __func__);
4682 if (bf->bf_next != NULL)
4683 device_printf(sc->sc_dev,
4684 "%s: bf_next not NULL!\n", __func__);
4685
4686 /*
4687 * Do any tx complete callback. Note this must
4688 * be done before releasing the node reference.
4689 * This will free the mbuf, release the net80211
4690 * node and recycle the ath_buf.
4691 */
4692 ath_tx_freebuf(sc, bf, st);
4693}
4694
4695/*
4696 * Update rate control with the given completion status.
4697 */
4698void
4699ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
4700 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
4701 int nframes, int nbad)
4702{
4703 struct ath_node *an;
4704
4705 /* Only for unicast frames */
4706 if (ni == NULL)
4707 return;
4708
4709 an = ATH_NODE(ni);
4710
4711 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
4712 ATH_NODE_LOCK(an);
4713 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
4714 ATH_NODE_UNLOCK(an);
4715 }
4716}
4717
4718/*
4719 * Update the busy status of the last frame on the free list.
4720 * When doing TDMA, the busy flag tracks whether the hardware
4721 * currently points to this buffer or not, and thus gated DMA
4722 * may restart by re-reading the last descriptor in this
4723 * buffer.
4724 *
4725 * This should be called in the completion function once one
4726 * of the buffers has been used.
4727 */
4728static void
4729ath_tx_update_busy(struct ath_softc *sc)
4730{
4731 struct ath_buf *last;
4732
4733 /*
4734 * Since the last frame may still be marked
4735 * as ATH_BUF_BUSY, unmark it here before
4736 * finishing the frame processing.
4737 * Since we've completed a frame (aggregate
4738 * or otherwise), the hardware has moved on
4739 * and is no longer referencing the previous
4740 * descriptor.
4741 */
4742 ATH_TXBUF_LOCK_ASSERT(sc);
4743 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
4744 if (last != NULL)
4745 last->bf_flags &= ~ATH_BUF_BUSY;
4746}
4747
4748
4749/*
4750 * Process completed xmit descriptors from the specified queue.
4751 * Kick the packet scheduler if needed. This can occur from this
4752 * particular task.
4753 */
4754static int
4755ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
4756{
4757 struct ath_hal *ah = sc->sc_ah;
4758 struct ath_buf *bf;
4759 struct ath_desc *ds;
4760 struct ath_tx_status *ts;
4761 struct ieee80211_node *ni;
4762 struct ath_node *an;
4763 int nacked;
4764 HAL_STATUS status;
4765
4766 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
4767 __func__, txq->axq_qnum,
4768 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4769 txq->axq_link);
4770 nacked = 0;
4771 for (;;) {
4772 ATH_TXQ_LOCK(txq);
4773 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
4774 bf = TAILQ_FIRST(&txq->axq_q);
4775 if (bf == NULL) {
4776 ATH_TXQ_UNLOCK(txq);
4777 break;
4778 }
4779 ds = bf->bf_lastds; /* XXX must be setup correctly! */
4780 ts = &bf->bf_status.ds_txstat;
4781 status = ath_hal_txprocdesc(ah, ds, ts);
4782#ifdef ATH_DEBUG
4783 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
4784 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4785 status == HAL_OK);
4786#endif
4787 if (status == HAL_EINPROGRESS) {
4788 ATH_TXQ_UNLOCK(txq);
4789 break;
4790 }
4791 ATH_TXQ_REMOVE(txq, bf, bf_list);
4792#ifdef IEEE80211_SUPPORT_TDMA
4793 if (txq->axq_depth > 0) {
4794 /*
4795 * More frames follow. Mark the buffer busy
4796 * so it's not re-used while the hardware may
4797 * still re-read the link field in the descriptor.
4798 *
4799 * Use the last buffer in an aggregate as that
4800 * is where the hardware may be - intermediate
4801 * descriptors won't be "busy".
4802 */
4803 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4804 } else
4805#else
4806 if (txq->axq_depth == 0)
4807#endif
4808 txq->axq_link = NULL;
4809 if (bf->bf_state.bfs_aggr)
4810 txq->axq_aggr_depth--;
4811
4812 ni = bf->bf_node;
4813 /*
4814 * If unicast frame was ack'd update RSSI,
4815 * including the last rx time used to
4816 * workaround phantom bmiss interrupts.
4817 */
4818 if (ni != NULL && ts->ts_status == 0 &&
4819 ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)) {
4820 nacked++;
4821 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4822 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4823 ts->ts_rssi);
4824 }
4825 ATH_TXQ_UNLOCK(txq);
4826
4827 /* If unicast frame, update general statistics */
4828 if (ni != NULL) {
4829 an = ATH_NODE(ni);
4830 /* update statistics */
4831 ath_tx_update_stats(sc, ts, bf);
4832 }
4833
4834 /*
4835 * Call the completion handler.
4836 * The completion handler is responsible for
4837 * calling the rate control code.
4838 *
4839 * Frames with no completion handler get the
4840 * rate control code called here.
4841 */
4842 if (bf->bf_comp == NULL) {
4843 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4844 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
4845 /*
4846 * XXX assume this isn't an aggregate
4847 * frame.
4848 */
4849 ath_tx_update_ratectrl(sc, ni,
4850 bf->bf_state.bfs_rc, ts,
4851 bf->bf_state.bfs_pktlen, 1,
4852 (ts->ts_status == 0 ? 0 : 1));
4853 }
4854 ath_tx_default_comp(sc, bf, 0);
4855 } else
4856 bf->bf_comp(sc, bf, 0);
4857 }
4858#ifdef IEEE80211_SUPPORT_SUPERG
4859 /*
4860 * Flush fast-frame staging queue when traffic slows.
4861 */
4862 if (txq->axq_depth <= 1)
4863 ieee80211_ff_flush(ic, txq->axq_ac);
4864#endif
4865
4866 /* Kick the TXQ scheduler */
4867 if (dosched) {
4868 ATH_TXQ_LOCK(txq);
4869 ath_txq_sched(sc, txq);
4870 ATH_TXQ_UNLOCK(txq);
4871 }
4872
4873 return nacked;
4874}
4875
4876#define TXQACTIVE(t, q) ( (t) & (1 << (q)))
4877
4878/*
4879 * Deferred processing of transmit interrupt; special-cased
4880 * for a single hardware transmit queue (e.g. 5210 and 5211).
4881 */
4882static void
4883ath_tx_proc_q0(void *arg, int npending)
4884{
4885 struct ath_softc *sc = arg;
4886 struct ifnet *ifp = sc->sc_ifp;
4887 uint32_t txqs;
4888
4889 ATH_PCU_LOCK(sc);
4890 sc->sc_txproc_cnt++;
4891 txqs = sc->sc_txq_active;
4892 sc->sc_txq_active &= ~txqs;
4893 ATH_PCU_UNLOCK(sc);
4894
4895 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
4896 /* XXX why is lastrx updated in tx code? */
4897 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4898 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4899 ath_tx_processq(sc, sc->sc_cabq, 1);
4900 /* XXX check this inside of IF_LOCK? */
4901 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4902 sc->sc_wd_timer = 0;
4903
4904 if (sc->sc_softled)
4905 ath_led_event(sc, sc->sc_txrix);
4906
4907 ATH_PCU_LOCK(sc);
4908 sc->sc_txproc_cnt--;
4909 ATH_PCU_UNLOCK(sc);
4910
4911 ath_start(ifp);
4912}
4913
4914/*
4915 * Deferred processing of transmit interrupt; special-cased
4916 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4917 */
4918static void
4919ath_tx_proc_q0123(void *arg, int npending)
4920{
4921 struct ath_softc *sc = arg;
4922 struct ifnet *ifp = sc->sc_ifp;
4923 int nacked;
4924 uint32_t txqs;
4925
4926 ATH_PCU_LOCK(sc);
4927 sc->sc_txproc_cnt++;
4928 txqs = sc->sc_txq_active;
4929 sc->sc_txq_active &= ~txqs;
4930 ATH_PCU_UNLOCK(sc);
4931
4932 /*
4933 * Process each active queue.
4934 */
4935 nacked = 0;
4936 if (TXQACTIVE(txqs, 0))
4937 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
4938 if (TXQACTIVE(txqs, 1))
4939 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
4940 if (TXQACTIVE(txqs, 2))
4941 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
4942 if (TXQACTIVE(txqs, 3))
4943 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
4944 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4945 ath_tx_processq(sc, sc->sc_cabq, 1);
4946 if (nacked)
4947 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4948
4949 /* XXX check this inside of IF_LOCK? */
4950 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4951 sc->sc_wd_timer = 0;
4952
4953 if (sc->sc_softled)
4954 ath_led_event(sc, sc->sc_txrix);
4955
4956 ATH_PCU_LOCK(sc);
4957 sc->sc_txproc_cnt--;
4958 ATH_PCU_UNLOCK(sc);
4959
4960 ath_start(ifp);
4961}
4962
4963/*
4964 * Deferred processing of transmit interrupt.
4965 */
4966static void
4967ath_tx_proc(void *arg, int npending)
4968{
4969 struct ath_softc *sc = arg;
4970 struct ifnet *ifp = sc->sc_ifp;
4971 int i, nacked;
4972 uint32_t txqs;
4973
4974 ATH_PCU_LOCK(sc);
4975 sc->sc_txproc_cnt++;
4976 txqs = sc->sc_txq_active;
4977 sc->sc_txq_active &= ~txqs;
4978 ATH_PCU_UNLOCK(sc);
4979
4980 /*
4981 * Process each active queue.
4982 */
4983 nacked = 0;
4984 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4985 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
4986 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
4987 if (nacked)
4988 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4989
4990 /* XXX check this inside of IF_LOCK? */
4991 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4992 sc->sc_wd_timer = 0;
4993
4994 if (sc->sc_softled)
4995 ath_led_event(sc, sc->sc_txrix);
4996
4997 ATH_PCU_LOCK(sc);
4998 sc->sc_txproc_cnt--;
4999 ATH_PCU_UNLOCK(sc);
5000
5001 ath_start(ifp);
5002}
5003#undef TXQACTIVE
5004
5005/*
5006 * Return a buffer to the pool and update the 'busy' flag on the
5007 * previous 'tail' entry.
5008 *
5009 * This _must_ only be called when the buffer is involved in a completed
5010 * TX. The logic is that if it was part of an active TX, the previous
5011 * buffer on the list is now not involved in a halted TX DMA queue, waiting
5012 * for restart (eg for TDMA.)
5013 *
5014 * The caller must free the mbuf and recycle the node reference.
5015 */
5016void
5017ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
5018{
5019 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5020 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE);
5021
5022 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
5023 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
5024
5025 ATH_TXBUF_LOCK(sc);
5026 ath_tx_update_busy(sc);
5027 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5028 ATH_TXBUF_UNLOCK(sc);
5029}
5030
5031/*
5032 * This is currently used by ath_tx_draintxq() and
5033 * ath_tx_tid_free_pkts().
5034 *
5035 * It recycles a single ath_buf.
5036 */
5037void
5038ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
5039{
5040 struct ieee80211_node *ni = bf->bf_node;
5041 struct mbuf *m0 = bf->bf_m;
5042
5043 bf->bf_node = NULL;
5044 bf->bf_m = NULL;
5045
5046 /* Free the buffer, it's not needed any longer */
5047 ath_freebuf(sc, bf);
5048
5049 if (ni != NULL) {
5050 /*
5051 * Do any callback and reclaim the node reference.
5052 */
5053 if (m0->m_flags & M_TXCB)
5054 ieee80211_process_callback(ni, m0, status);
5055 ieee80211_free_node(ni);
5056 }
5057 m_freem(m0);
5058
5059 /*
5060 * XXX the buffer used to be freed -after-, but the DMA map was
5061 * freed where ath_freebuf() now is. I've no idea what this
5062 * will do.
5063 */
5064}
5065
5066void
5067ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5068{
5069#ifdef ATH_DEBUG
5070 struct ath_hal *ah = sc->sc_ah;
5071#endif
5072 struct ath_buf *bf;
5073 u_int ix;
5074
5075 /*
5076 * NB: this assumes output has been stopped and
5077 * we do not need to block ath_tx_proc
5078 */
5079 ATH_TXBUF_LOCK(sc);
5080 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
5081 if (bf != NULL)
5082 bf->bf_flags &= ~ATH_BUF_BUSY;
5083 ATH_TXBUF_UNLOCK(sc);
5084
5085 for (ix = 0;; ix++) {
5086 ATH_TXQ_LOCK(txq);
5087 bf = TAILQ_FIRST(&txq->axq_q);
5088 if (bf == NULL) {
5089 txq->axq_link = NULL;
5090 ATH_TXQ_UNLOCK(txq);
5091 break;
5092 }
5093 ATH_TXQ_REMOVE(txq, bf, bf_list);
5094 if (bf->bf_state.bfs_aggr)
5095 txq->axq_aggr_depth--;
5096#ifdef ATH_DEBUG
5097 if (sc->sc_debug & ATH_DEBUG_RESET) {
5098 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5099
5100 ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
5101 ath_hal_txprocdesc(ah, bf->bf_lastds,
5102 &bf->bf_status.ds_txstat) == HAL_OK);
5103 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
5104 bf->bf_m->m_len, 0, -1);
5105 }
5106#endif /* ATH_DEBUG */
5107 /*
5108 * Since we're now doing magic in the completion
5109 * functions, we -must- call it for aggregation
5110 * destinations or BAW tracking will get upset.
5111 */
5112 /*
5113 * Clear ATH_BUF_BUSY; the completion handler
5114 * will free the buffer.
5115 */
5116 ATH_TXQ_UNLOCK(txq);
5117 bf->bf_flags &= ~ATH_BUF_BUSY;
5118 if (bf->bf_comp)
5119 bf->bf_comp(sc, bf, 1);
5120 else
5121 ath_tx_default_comp(sc, bf, 1);
5122 }
5123
5124 /*
5125 * Drain software queued frames which are on
5126 * active TIDs.
5127 */
5128 ath_tx_txq_drain(sc, txq);
5129}
5130
5131static void
5132ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5133{
5134 struct ath_hal *ah = sc->sc_ah;
5135
5136 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5137 __func__, txq->axq_qnum,
5138 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5139 txq->axq_link);
5140 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5141}
5142
5143static int
5144ath_stoptxdma(struct ath_softc *sc)
5145{
5146 struct ath_hal *ah = sc->sc_ah;
5147 int i;
5148
5149 /* XXX return value */
5150 if (sc->sc_invalid)
5151 return 0;
5152
5153 if (!sc->sc_invalid) {
5154 /* don't touch the hardware if marked invalid */
5155 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5156 __func__, sc->sc_bhalq,
5157 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5158 NULL);
5159 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5160 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5161 if (ATH_TXQ_SETUP(sc, i))
5162 ath_tx_stopdma(sc, &sc->sc_txq[i]);
5163 }
5164
5165 return 1;
5166}
5167
5168/*
5169 * Drain the transmit queues and reclaim resources.
5170 */
5171static void
5172ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
5173{
5174#ifdef ATH_DEBUG
5175 struct ath_hal *ah = sc->sc_ah;
5176#endif
5177 struct ifnet *ifp = sc->sc_ifp;
5178 int i;
5179
5180 (void) ath_stoptxdma(sc);
5181
5182 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5183 /*
5184 * XXX TODO: should we just handle the completed TX frames
5185 * here, whether or not the reset is a full one or not?
5186 */
5187 if (ATH_TXQ_SETUP(sc, i)) {
5188 if (reset_type == ATH_RESET_NOLOSS)
5189 ath_tx_processq(sc, &sc->sc_txq[i], 0);
5190 else
5191 ath_tx_draintxq(sc, &sc->sc_txq[i]);
5192 }
5193 }
5194#ifdef ATH_DEBUG
5195 if (sc->sc_debug & ATH_DEBUG_RESET) {
5196 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
5197 if (bf != NULL && bf->bf_m != NULL) {
5198 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5199 ath_hal_txprocdesc(ah, bf->bf_lastds,
5200 &bf->bf_status.ds_txstat) == HAL_OK);
5201 ieee80211_dump_pkt(ifp->if_l2com,
5202 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
5203 0, -1);
5204 }
5205 }
5206#endif /* ATH_DEBUG */
5207 /* XXX check this inside of IF_LOCK? */
5208 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5209 sc->sc_wd_timer = 0;
5210}
5211
5212/*
5213 * Disable the receive h/w in preparation for a reset.
5214 */
5215static void
5216ath_stoprecv(struct ath_softc *sc, int dodelay)
5217{
5218#define PA2DESC(_sc, _pa) \
5219 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
5220 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
5221 struct ath_hal *ah = sc->sc_ah;
5222
5223 ath_hal_stoppcurecv(ah); /* disable PCU */
5224 ath_hal_setrxfilter(ah, 0); /* clear recv filter */
5225 ath_hal_stopdmarecv(ah); /* disable DMA engine */
5226 if (dodelay)
5227 DELAY(3000); /* 3ms is long enough for 1 frame */
5228#ifdef ATH_DEBUG
5229 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
5230 struct ath_buf *bf;
5231 u_int ix;
5232
5233 printf("%s: rx queue %p, link %p\n", __func__,
5234 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
5235 ix = 0;
5236 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5237 struct ath_desc *ds = bf->bf_desc;
5238 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5239 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
5240 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
5241 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
5242 ath_printrxbuf(sc, bf, ix, status == HAL_OK);
5243 ix++;
5244 }
5245 }
5246#endif
5247 if (sc->sc_rxpending != NULL) {
5248 m_freem(sc->sc_rxpending);
5249 sc->sc_rxpending = NULL;
5250 }
5251 sc->sc_rxlink = NULL; /* just in case */
5252#undef PA2DESC
5253}
5254
5255/*
5256 * Enable the receive h/w following a reset.
5257 */
5258static int
5259ath_startrecv(struct ath_softc *sc)
5260{
5261 struct ath_hal *ah = sc->sc_ah;
5262 struct ath_buf *bf;
5263
5264 sc->sc_rxlink = NULL;
5265 sc->sc_rxpending = NULL;
5266 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5267 int error = ath_rxbuf_init(sc, bf);
5268 if (error != 0) {
5269 DPRINTF(sc, ATH_DEBUG_RECV,
5270 "%s: ath_rxbuf_init failed %d\n",
5271 __func__, error);
5272 return error;
5273 }
5274 }
5275
5276 bf = TAILQ_FIRST(&sc->sc_rxbuf);
5277 ath_hal_putrxbuf(ah, bf->bf_daddr);
5278 ath_hal_rxena(ah); /* enable recv descriptors */
5279 ath_mode_init(sc); /* set filters, etc. */
5280 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
5281 return 0;
5282}
5283
5284/*
5285 * Update internal state after a channel change.
5286 */
5287static void
5288ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5289{
5290 enum ieee80211_phymode mode;
5291
5292 /*
5293 * Change channels and update the h/w rate map
5294 * if we're switching; e.g. 11a to 11b/g.
5295 */
5296 mode = ieee80211_chan2mode(chan);
5297 if (mode != sc->sc_curmode)
5298 ath_setcurmode(sc, mode);
5299 sc->sc_curchan = chan;
5300}
5301
5302/*
5303 * Set/change channels. If the channel is really being changed,
5304 * it's done by resetting the chip. To accomplish this we must
5305 * first cleanup any pending DMA, then restart stuff after a la
5306 * ath_init.
5307 */
5308static int
5309ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5310{
5311 struct ifnet *ifp = sc->sc_ifp;
5312 struct ieee80211com *ic = ifp->if_l2com;
5313 struct ath_hal *ah = sc->sc_ah;
5314 int ret = 0;
5315 int dointr = 0;
5316
5317 /* Treat this as an interface reset */
5318 ATH_PCU_LOCK(sc);
5319 if (ath_reset_grablock(sc, 1) == 0) {
5320 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
5321 __func__);
5322 }
5323 if (chan != sc->sc_curchan) {
5324 dointr = 1;
5325 /* XXX only do this if inreset_cnt is 1? */
5326 ath_hal_intrset(ah, 0);
5327 }
5328 ATH_PCU_UNLOCK(sc);
5329 ath_txrx_stop(sc);
5330
5331 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5332 __func__, ieee80211_chan2ieee(ic, chan),
5333 chan->ic_freq, chan->ic_flags);
5334 if (chan != sc->sc_curchan) {
5335 HAL_STATUS status;
5336 /*
5337 * To switch channels clear any pending DMA operations;
5338 * wait long enough for the RX fifo to drain, reset the
5339 * hardware at the new frequency, and then re-enable
5340 * the relevant bits of the h/w.
5341 */
5342#if 0
5343 ath_hal_intrset(ah, 0); /* disable interrupts */
5344#endif
5345 ath_stoprecv(sc, 1); /* turn off frame recv */
5346 /*
5347 * First, handle completed TX/RX frames.
5348 */
5349 ath_rx_proc(sc, 0);
5350 ath_draintxq(sc, ATH_RESET_NOLOSS);
5351 /*
5352 * Next, flush the non-scheduled frames.
5353 */
5354 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
5355
5356 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5357 if_printf(ifp, "%s: unable to reset "
5358 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
5359 __func__, ieee80211_chan2ieee(ic, chan),
5360 chan->ic_freq, chan->ic_flags, status);
5361 ret = EIO;
5362 goto finish;
5363 }
5364 sc->sc_diversity = ath_hal_getdiversity(ah);
5365
5366 /* Let DFS at it in case it's a DFS channel */
5367 ath_dfs_radar_enable(sc, ic->ic_curchan);
5368
5369 /*
5370 * Re-enable rx framework.
5371 */
5372 if (ath_startrecv(sc) != 0) {
5373 if_printf(ifp, "%s: unable to restart recv logic\n",
5374 __func__);
5375 ret = EIO;
5376 goto finish;
5377 }
5378
5379 /*
5380 * Change channels and update the h/w rate map
5381 * if we're switching; e.g. 11a to 11b/g.
5382 */
5383 ath_chan_change(sc, chan);
5384
5385 /*
5386 * Reset clears the beacon timers; reset them
5387 * here if needed.
5388 */
5389 if (sc->sc_beacons) { /* restart beacons */
5390#ifdef IEEE80211_SUPPORT_TDMA
5391 if (sc->sc_tdma)
5392 ath_tdma_config(sc, NULL);
5393 else
5394#endif
5395 ath_beacon_config(sc, NULL);
5396 }
5397
5398#if 0
5399 /*
5400 * Re-enable interrupts.
5401 */
5402 ath_hal_intrset(ah, sc->sc_imask);
5403#endif
5404 }
5405
5406finish:
5407 ATH_PCU_LOCK(sc);
5408 sc->sc_inreset_cnt--;
5409 /* XXX only do this if sc_inreset_cnt == 0? */
5410 if (dointr)
5411 ath_hal_intrset(ah, sc->sc_imask);
5412 ATH_PCU_UNLOCK(sc);
5413
5414 /* XXX do this inside of IF_LOCK? */
5415 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5416 ath_txrx_start(sc);
5417 /* XXX ath_start? */
5418
5419 return ret;
5420}
5421
5422/*
5423 * Periodically recalibrate the PHY to account
5424 * for temperature/environment changes.
5425 */
5426static void
5427ath_calibrate(void *arg)
5428{
5429 struct ath_softc *sc = arg;
5430 struct ath_hal *ah = sc->sc_ah;
5431 struct ifnet *ifp = sc->sc_ifp;
5432 struct ieee80211com *ic = ifp->if_l2com;
5433 HAL_BOOL longCal, isCalDone;
5434 HAL_BOOL aniCal, shortCal = AH_FALSE;
5435 int nextcal;
5436
5437 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
5438 goto restart;
5439 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5440 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
5441 if (sc->sc_doresetcal)
5442 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
5443
5444 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
5445 if (aniCal) {
5446 sc->sc_stats.ast_ani_cal++;
5447 sc->sc_lastani = ticks;
5448 ath_hal_ani_poll(ah, sc->sc_curchan);
5449 }
5450
5451 if (longCal) {
5452 sc->sc_stats.ast_per_cal++;
5453 sc->sc_lastlongcal = ticks;
5454 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5455 /*
5456 * Rfgain is out of bounds, reset the chip
5457 * to load new gain values.
5458 */
5459 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5460 "%s: rfgain change\n", __func__);
5461 sc->sc_stats.ast_per_rfgain++;
5462 /*
5463 * Drop lock - we can't hold it across the
5464 * ath_reset() call. Instead, we'll drop
5465 * out here, do a reset, then reschedule
5466 * the callout.
5467 */
5468 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5469 sc->sc_resetcal = 0;
5470 sc->sc_doresetcal = AH_TRUE;
5471 ATH_UNLOCK(sc);
5472 ath_reset(ifp, ATH_RESET_NOLOSS);
5473 ATH_LOCK(sc);
5474 return;
5475 }
5476 /*
5477 * If this long cal is after an idle period, then
5478 * reset the data collection state so we start fresh.
5479 */
5480 if (sc->sc_resetcal) {
5481 (void) ath_hal_calreset(ah, sc->sc_curchan);
5482 sc->sc_lastcalreset = ticks;
5483 sc->sc_lastshortcal = ticks;
5484 sc->sc_resetcal = 0;
5485 sc->sc_doresetcal = AH_TRUE;
5486 }
5487 }
5488
5489 /* Only call if we're doing a short/long cal, not for ANI calibration */
5490 if (shortCal || longCal) {
5491 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5492 if (longCal) {
5493 /*
5494 * Calibrate noise floor data again in case of change.
5495 */
5496 ath_hal_process_noisefloor(ah);
5497 }
5498 } else {
5499 DPRINTF(sc, ATH_DEBUG_ANY,
5500 "%s: calibration of channel %u failed\n",
5501 __func__, sc->sc_curchan->ic_freq);
5502 sc->sc_stats.ast_per_calfail++;
5503 }
5504 if (shortCal)
5505 sc->sc_lastshortcal = ticks;
5506 }
5507 if (!isCalDone) {
5508restart:
5509 /*
5510 * Use a shorter interval to potentially collect multiple
5511 * data samples required to complete calibration. Once
5512 * we're told the work is done we drop back to a longer
5513 * interval between requests. We're more aggressive doing
5514 * work when operating as an AP to improve operation right
5515 * after startup.
5516 */
5517 sc->sc_lastshortcal = ticks;
5518 nextcal = ath_shortcalinterval*hz/1000;
5519 if (sc->sc_opmode != HAL_M_HOSTAP)
5520 nextcal *= 10;
5521 sc->sc_doresetcal = AH_TRUE;
5522 } else {
5523 /* nextcal should be the shortest time for next event */
5524 nextcal = ath_longcalinterval*hz;
5525 if (sc->sc_lastcalreset == 0)
5526 sc->sc_lastcalreset = sc->sc_lastlongcal;
5527 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5528 sc->sc_resetcal = 1; /* setup reset next trip */
5529 sc->sc_doresetcal = AH_FALSE;
5530 }
5531 /* ANI calibration may occur more often than short/long/resetcal */
5532 if (ath_anicalinterval > 0)
5533 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
5534
5535 if (nextcal != 0) {
5536 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5537 __func__, nextcal, isCalDone ? "" : "!");
5538 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5539 } else {
5540 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5541 __func__);
5542 /* NB: don't rearm timer */
5543 }
5544}
5545
5546static void
5547ath_scan_start(struct ieee80211com *ic)
5548{
5549 struct ifnet *ifp = ic->ic_ifp;
5550 struct ath_softc *sc = ifp->if_softc;
5551 struct ath_hal *ah = sc->sc_ah;
5552 u_int32_t rfilt;
5553
5554 /* XXX calibration timer? */
5555
5556 sc->sc_scanning = 1;
5557 sc->sc_syncbeacon = 0;
5558 rfilt = ath_calcrxfilter(sc);
5559 ath_hal_setrxfilter(ah, rfilt);
5560 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5561
5562 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5563 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5564}
5565
5566static void
5567ath_scan_end(struct ieee80211com *ic)
5568{
5569 struct ifnet *ifp = ic->ic_ifp;
5570 struct ath_softc *sc = ifp->if_softc;
5571 struct ath_hal *ah = sc->sc_ah;
5572 u_int32_t rfilt;
5573
5574 sc->sc_scanning = 0;
5575 rfilt = ath_calcrxfilter(sc);
5576 ath_hal_setrxfilter(ah, rfilt);
5577 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5578
5579 ath_hal_process_noisefloor(ah);
5580
5581 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5582 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5583 sc->sc_curaid);
5584}
5585
5586static void
5587ath_set_channel(struct ieee80211com *ic)
5588{
5589 struct ifnet *ifp = ic->ic_ifp;
5590 struct ath_softc *sc = ifp->if_softc;
5591
5592 (void) ath_chan_set(sc, ic->ic_curchan);
5593 /*
5594 * If we are returning to our bss channel then mark state
5595 * so the next recv'd beacon's tsf will be used to sync the
5596 * beacon timers. Note that since we only hear beacons in
5597 * sta/ibss mode this has no effect in other operating modes.
5598 */
5599 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5600 sc->sc_syncbeacon = 1;
5601}
5602
5603/*
5604 * Walk the vap list and check if there any vap's in RUN state.
5605 */
5606static int
5607ath_isanyrunningvaps(struct ieee80211vap *this)
5608{
5609 struct ieee80211com *ic = this->iv_ic;
5610 struct ieee80211vap *vap;
5611
5612 IEEE80211_LOCK_ASSERT(ic);
5613
5614 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5615 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
5616 return 1;
5617 }
5618 return 0;
5619}
5620
5621static int
5622ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5623{
5624 struct ieee80211com *ic = vap->iv_ic;
5625 struct ath_softc *sc = ic->ic_ifp->if_softc;
5626 struct ath_vap *avp = ATH_VAP(vap);
5627 struct ath_hal *ah = sc->sc_ah;
5628 struct ieee80211_node *ni = NULL;
5629 int i, error, stamode;
5630 u_int32_t rfilt;
5631 int csa_run_transition = 0;
5632 static const HAL_LED_STATE leds[] = {
5633 HAL_LED_INIT, /* IEEE80211_S_INIT */
5634 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5635 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5636 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5637 HAL_LED_RUN, /* IEEE80211_S_CAC */
5638 HAL_LED_RUN, /* IEEE80211_S_RUN */
5639 HAL_LED_RUN, /* IEEE80211_S_CSA */
5640 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5641 };
5642
5643 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5644 ieee80211_state_name[vap->iv_state],
5645 ieee80211_state_name[nstate]);
5646
5647 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
5648 csa_run_transition = 1;
5649
5650 callout_drain(&sc->sc_cal_ch);
5651 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
5652
5653 if (nstate == IEEE80211_S_SCAN) {
5654 /*
5655 * Scanning: turn off beacon miss and don't beacon.
5656 * Mark beacon state so when we reach RUN state we'll
5657 * [re]setup beacons. Unblock the task q thread so
5658 * deferred interrupt processing is done.
5659 */
5660 ath_hal_intrset(ah,
5661 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5662 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5663 sc->sc_beacons = 0;
5664 taskqueue_unblock(sc->sc_tq);
5665 }
5666
5667 ni = vap->iv_bss;
5668 rfilt = ath_calcrxfilter(sc);
5669 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5670 vap->iv_opmode == IEEE80211_M_AHDEMO ||
5671 vap->iv_opmode == IEEE80211_M_IBSS);
5672 if (stamode && nstate == IEEE80211_S_RUN) {
5673 sc->sc_curaid = ni->ni_associd;
5674 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5675 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5676 }
5677 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5678 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5679 ath_hal_setrxfilter(ah, rfilt);
5680
5681 /* XXX is this to restore keycache on resume? */
5682 if (vap->iv_opmode != IEEE80211_M_STA &&
5683 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5684 for (i = 0; i < IEEE80211_WEP_NKID; i++)
5685 if (ath_hal_keyisvalid(ah, i))
5686 ath_hal_keysetmac(ah, i, ni->ni_bssid);
5687 }
5688
5689 /*
5690 * Invoke the parent method to do net80211 work.
5691 */
5692 error = avp->av_newstate(vap, nstate, arg);
5693 if (error != 0)
5694 goto bad;
5695
5696 if (nstate == IEEE80211_S_RUN) {
5697 /* NB: collect bss node again, it may have changed */
5698 ni = vap->iv_bss;
5699
5700 DPRINTF(sc, ATH_DEBUG_STATE,
5701 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5702 "capinfo 0x%04x chan %d\n", __func__,
5703 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5704 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5705
5706 switch (vap->iv_opmode) {
5707#ifdef IEEE80211_SUPPORT_TDMA
5708 case IEEE80211_M_AHDEMO:
5709 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5710 break;
5711 /* fall thru... */
5712#endif
5713 case IEEE80211_M_HOSTAP:
5714 case IEEE80211_M_IBSS:
5715 case IEEE80211_M_MBSS:
5716 /*
5717 * Allocate and setup the beacon frame.
5718 *
5719 * Stop any previous beacon DMA. This may be
5720 * necessary, for example, when an ibss merge
5721 * causes reconfiguration; there will be a state
5722 * transition from RUN->RUN that means we may
5723 * be called with beacon transmission active.
5724 */
5725 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5726
5727 error = ath_beacon_alloc(sc, ni);
5728 if (error != 0)
5729 goto bad;
5730 /*
5731 * If joining an adhoc network defer beacon timer
5732 * configuration to the next beacon frame so we
5733 * have a current TSF to use. Otherwise we're
5734 * starting an ibss/bss so there's no need to delay;
5735 * if this is the first vap moving to RUN state, then
5736 * beacon state needs to be [re]configured.
5737 */
5738 if (vap->iv_opmode == IEEE80211_M_IBSS &&
5739 ni->ni_tstamp.tsf != 0) {
5740 sc->sc_syncbeacon = 1;
5741 } else if (!sc->sc_beacons) {
5742#ifdef IEEE80211_SUPPORT_TDMA
5743 if (vap->iv_caps & IEEE80211_C_TDMA)
5744 ath_tdma_config(sc, vap);
5745 else
5746#endif
5747 ath_beacon_config(sc, vap);
5748 sc->sc_beacons = 1;
5749 }
5750 break;
5751 case IEEE80211_M_STA:
5752 /*
5753 * Defer beacon timer configuration to the next
5754 * beacon frame so we have a current TSF to use
5755 * (any TSF collected when scanning is likely old).
5756 * However if it's due to a CSA -> RUN transition,
5757 * force a beacon update so we pick up a lack of
5758 * beacons from an AP in CAC and thus force a
5759 * scan.
5760 */
5761 sc->sc_syncbeacon = 1;
5762 if (csa_run_transition)
5763 ath_beacon_config(sc, vap);
5764 break;
5765 case IEEE80211_M_MONITOR:
5766 /*
5767 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5768 * transitions so we must re-enable interrupts here to
5769 * handle the case of a single monitor mode vap.
5770 */
5771 ath_hal_intrset(ah, sc->sc_imask);
5772 break;
5773 case IEEE80211_M_WDS:
5774 break;
5775 default:
5776 break;
5777 }
5778 /*
5779 * Let the hal process statistics collected during a
5780 * scan so it can provide calibrated noise floor data.
5781 */
5782 ath_hal_process_noisefloor(ah);
5783 /*
5784 * Reset rssi stats; maybe not the best place...
5785 */
5786 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
5787 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
5788 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
5789 /*
5790 * Finally, start any timers and the task q thread
5791 * (in case we didn't go through SCAN state).
5792 */
5793 if (ath_longcalinterval != 0) {
5794 /* start periodic recalibration timer */
5795 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5796 } else {
5797 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5798 "%s: calibration disabled\n", __func__);
5799 }
5800 taskqueue_unblock(sc->sc_tq);
5801 } else if (nstate == IEEE80211_S_INIT) {
5802 /*
5803 * If there are no vaps left in RUN state then
5804 * shutdown host/driver operation:
5805 * o disable interrupts
5806 * o disable the task queue thread
5807 * o mark beacon processing as stopped
5808 */
5809 if (!ath_isanyrunningvaps(vap)) {
5810 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5811 /* disable interrupts */
5812 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
5813 taskqueue_block(sc->sc_tq);
5814 sc->sc_beacons = 0;
5815 }
5816#ifdef IEEE80211_SUPPORT_TDMA
5817 ath_hal_setcca(ah, AH_TRUE);
5818#endif
5819 }
5820bad:
5821 return error;
5822}
5823
5824/*
5825 * Allocate a key cache slot to the station so we can
5826 * setup a mapping from key index to node. The key cache
5827 * slot is needed for managing antenna state and for
5828 * compression when stations do not use crypto. We do
5829 * it uniliaterally here; if crypto is employed this slot
5830 * will be reassigned.
5831 */
5832static void
5833ath_setup_stationkey(struct ieee80211_node *ni)
5834{
5835 struct ieee80211vap *vap = ni->ni_vap;
5836 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5837 ieee80211_keyix keyix, rxkeyix;
5838
5839 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
5840 /*
5841 * Key cache is full; we'll fall back to doing
5842 * the more expensive lookup in software. Note
5843 * this also means no h/w compression.
5844 */
5845 /* XXX msg+statistic */
5846 } else {
5847 /* XXX locking? */
5848 ni->ni_ucastkey.wk_keyix = keyix;
5849 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
5850 /* NB: must mark device key to get called back on delete */
5851 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
5852 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
5853 /* NB: this will create a pass-thru key entry */
5854 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
5855 }
5856}
5857
5858/*
5859 * Setup driver-specific state for a newly associated node.
5860 * Note that we're called also on a re-associate, the isnew
5861 * param tells us if this is the first time or not.
5862 */
5863static void
5864ath_newassoc(struct ieee80211_node *ni, int isnew)
5865{
5866 struct ath_node *an = ATH_NODE(ni);
5867 struct ieee80211vap *vap = ni->ni_vap;
5868 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5869 const struct ieee80211_txparam *tp = ni->ni_txparms;
5870
5871 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
5872 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
5873
5874 ath_rate_newassoc(sc, an, isnew);
5875 if (isnew &&
5876 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
5877 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
5878 ath_setup_stationkey(ni);
5879}
5880
5881static int
5882ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
5883 int nchans, struct ieee80211_channel chans[])
5884{
5885 struct ath_softc *sc = ic->ic_ifp->if_softc;
5886 struct ath_hal *ah = sc->sc_ah;
5887 HAL_STATUS status;
5888
5889 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5890 "%s: rd %u cc %u location %c%s\n",
5891 __func__, reg->regdomain, reg->country, reg->location,
5892 reg->ecm ? " ecm" : "");
5893
5894 status = ath_hal_set_channels(ah, chans, nchans,
5895 reg->country, reg->regdomain);
5896 if (status != HAL_OK) {
5897 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
5898 __func__, status);
5899 return EINVAL; /* XXX */
5900 }
5901
5902 return 0;
5903}
5904
5905static void
5906ath_getradiocaps(struct ieee80211com *ic,
5907 int maxchans, int *nchans, struct ieee80211_channel chans[])
5908{
5909 struct ath_softc *sc = ic->ic_ifp->if_softc;
5910 struct ath_hal *ah = sc->sc_ah;
5911
5912 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
5913 __func__, SKU_DEBUG, CTRY_DEFAULT);
5914
5915 /* XXX check return */
5916 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
5917 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
5918
5919}
5920
5921static int
5922ath_getchannels(struct ath_softc *sc)
5923{
5924 struct ifnet *ifp = sc->sc_ifp;
5925 struct ieee80211com *ic = ifp->if_l2com;
5926 struct ath_hal *ah = sc->sc_ah;
5927 HAL_STATUS status;
5928
5929 /*
5930 * Collect channel set based on EEPROM contents.
5931 */
5932 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
5933 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
5934 if (status != HAL_OK) {
5935 if_printf(ifp, "%s: unable to collect channel list from hal, "
5936 "status %d\n", __func__, status);
5937 return EINVAL;
5938 }
5939 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
5940 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
5941 /* XXX map Atheros sku's to net80211 SKU's */
5942 /* XXX net80211 types too small */
5943 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
5944 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
5945 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
5946 ic->ic_regdomain.isocc[1] = ' ';
5947
5948 ic->ic_regdomain.ecm = 1;
5949 ic->ic_regdomain.location = 'I';
5950
5951 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5952 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
5953 __func__, sc->sc_eerd, sc->sc_eecc,
5954 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
5955 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
5956 return 0;
5957}
5958
5959static int
5960ath_rate_setup(struct ath_softc *sc, u_int mode)
5961{
5962 struct ath_hal *ah = sc->sc_ah;
5963 const HAL_RATE_TABLE *rt;
5964
5965 switch (mode) {
5966 case IEEE80211_MODE_11A:
5967 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
5968 break;
5969 case IEEE80211_MODE_HALF:
5970 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
5971 break;
5972 case IEEE80211_MODE_QUARTER:
5973 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5974 break;
5975 case IEEE80211_MODE_11B:
5976 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5977 break;
5978 case IEEE80211_MODE_11G:
5979 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5980 break;
5981 case IEEE80211_MODE_TURBO_A:
5982 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5983 break;
5984 case IEEE80211_MODE_TURBO_G:
5985 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5986 break;
5987 case IEEE80211_MODE_STURBO_A:
5988 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5989 break;
5990 case IEEE80211_MODE_11NA:
5991 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5992 break;
5993 case IEEE80211_MODE_11NG:
5994 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5995 break;
5996 default:
5997 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5998 __func__, mode);
5999 return 0;
6000 }
6001 sc->sc_rates[mode] = rt;
6002 return (rt != NULL);
6003}
6004
6005static void
6006ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
6007{
6008#define N(a) (sizeof(a)/sizeof(a[0]))
6009 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
6010 static const struct {
6011 u_int rate; /* tx/rx 802.11 rate */
6012 u_int16_t timeOn; /* LED on time (ms) */
6013 u_int16_t timeOff; /* LED off time (ms) */
6014 } blinkrates[] = {
6015 { 108, 40, 10 },
6016 { 96, 44, 11 },
6017 { 72, 50, 13 },
6018 { 48, 57, 14 },
6019 { 36, 67, 16 },
6020 { 24, 80, 20 },
6021 { 22, 100, 25 },
6022 { 18, 133, 34 },
6023 { 12, 160, 40 },
6024 { 10, 200, 50 },
6025 { 6, 240, 58 },
6026 { 4, 267, 66 },
6027 { 2, 400, 100 },
6028 { 0, 500, 130 },
6029 /* XXX half/quarter rates */
6030 };
6031 const HAL_RATE_TABLE *rt;
6032 int i, j;
6033
6034 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6035 rt = sc->sc_rates[mode];
6036 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6037 for (i = 0; i < rt->rateCount; i++) {
6038 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6039 if (rt->info[i].phy != IEEE80211_T_HT)
6040 sc->sc_rixmap[ieeerate] = i;
6041 else
6042 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6043 }
6044 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6045 for (i = 0; i < N(sc->sc_hwmap); i++) {
6046 if (i >= rt->rateCount) {
6047 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6048 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6049 continue;
6050 }
6051 sc->sc_hwmap[i].ieeerate =
6052 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6053 if (rt->info[i].phy == IEEE80211_T_HT)
6054 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6055 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6056 if (rt->info[i].shortPreamble ||
6057 rt->info[i].phy == IEEE80211_T_OFDM)
6058 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6059 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
6060 for (j = 0; j < N(blinkrates)-1; j++)
6061 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6062 break;
6063 /* NB: this uses the last entry if the rate isn't found */
6064 /* XXX beware of overlow */
6065 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6066 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6067 }
6068 sc->sc_currates = rt;
6069 sc->sc_curmode = mode;
6070 /*
6071 * All protection frames are transmited at 2Mb/s for
6072 * 11g, otherwise at 1Mb/s.
6073 */
6074 if (mode == IEEE80211_MODE_11G)
6075 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
6076 else
6077 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
6078 /* NB: caller is responsible for resetting rate control state */
6079#undef N
6080}
6081
6082static void
6083ath_watchdog(void *arg)
6084{
6085 struct ath_softc *sc = arg;
6086 int do_reset = 0;
6087
6088 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6089 struct ifnet *ifp = sc->sc_ifp;
6090 uint32_t hangs;
6091
6092 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6093 hangs != 0) {
6094 if_printf(ifp, "%s hang detected (0x%x)\n",
6095 hangs & 0xff ? "bb" : "mac", hangs);
6096 } else
6097 if_printf(ifp, "device timeout\n");
6098 do_reset = 1;
6099 ifp->if_oerrors++;
6100 sc->sc_stats.ast_watchdog++;
6101 }
6102
6103 /*
6104 * We can't hold the lock across the ath_reset() call.
6105 */
6106 if (do_reset) {
6107 ATH_UNLOCK(sc);
6108 ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS);
6109 ATH_LOCK(sc);
6110 }
6111
6112 callout_schedule(&sc->sc_wd_ch, hz);
6113}
6114
6115#ifdef ATH_DIAGAPI
6116/*
6117 * Diagnostic interface to the HAL. This is used by various
6118 * tools to do things like retrieve register contents for
6119 * debugging. The mechanism is intentionally opaque so that
6120 * it can change frequently w/o concern for compatiblity.
6121 */
6122static int
6123ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6124{
6125 struct ath_hal *ah = sc->sc_ah;
6126 u_int id = ad->ad_id & ATH_DIAG_ID;
6127 void *indata = NULL;
6128 void *outdata = NULL;
6129 u_int32_t insize = ad->ad_in_size;
6130 u_int32_t outsize = ad->ad_out_size;
6131 int error = 0;
6132
6133 if (ad->ad_id & ATH_DIAG_IN) {
6134 /*
6135 * Copy in data.
6136 */
6137 indata = malloc(insize, M_TEMP, M_NOWAIT);
6138 if (indata == NULL) {
6139 error = ENOMEM;
6140 goto bad;
6141 }
6142 error = copyin(ad->ad_in_data, indata, insize);
6143 if (error)
6144 goto bad;
6145 }
6146 if (ad->ad_id & ATH_DIAG_DYN) {
6147 /*
6148 * Allocate a buffer for the results (otherwise the HAL
6149 * returns a pointer to a buffer where we can read the
6150 * results). Note that we depend on the HAL leaving this
6151 * pointer for us to use below in reclaiming the buffer;
6152 * may want to be more defensive.
6153 */
6154 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
6155 if (outdata == NULL) {
6156 error = ENOMEM;
6157 goto bad;
6158 }
6159 }
6160 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6161 if (outsize < ad->ad_out_size)
6162 ad->ad_out_size = outsize;
6163 if (outdata != NULL)
6164 error = copyout(outdata, ad->ad_out_data,
6165 ad->ad_out_size);
6166 } else {
6167 error = EINVAL;
6168 }
6169bad:
6170 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6171 free(indata, M_TEMP);
6172 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6173 free(outdata, M_TEMP);
6174 return error;
6175}
6176#endif /* ATH_DIAGAPI */
6177
6178static int
6179ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6180{
6181#define IS_RUNNING(ifp) \
6182 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
6183 struct ath_softc *sc = ifp->if_softc;
6184 struct ieee80211com *ic = ifp->if_l2com;
6185 struct ifreq *ifr = (struct ifreq *)data;
6186 const HAL_RATE_TABLE *rt;
6187 int error = 0;
6188
6189 switch (cmd) {
6190 case SIOCSIFFLAGS:
6191 ATH_LOCK(sc);
6192 if (IS_RUNNING(ifp)) {
6193 /*
6194 * To avoid rescanning another access point,
6195 * do not call ath_init() here. Instead,
6196 * only reflect promisc mode settings.
6197 */
6198 ath_mode_init(sc);
6199 } else if (ifp->if_flags & IFF_UP) {
6200 /*
6201 * Beware of being called during attach/detach
6202 * to reset promiscuous mode. In that case we
6203 * will still be marked UP but not RUNNING.
6204 * However trying to re-init the interface
6205 * is the wrong thing to do as we've already
6206 * torn down much of our state. There's
6207 * probably a better way to deal with this.
6208 */
6209 if (!sc->sc_invalid)
6210 ath_init(sc); /* XXX lose error */
6211 } else {
6212 ath_stop_locked(ifp);
6213#ifdef notyet
6214 /* XXX must wakeup in places like ath_vap_delete */
6215 if (!sc->sc_invalid)
6216 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6217#endif
6218 }
6219 ATH_UNLOCK(sc);
6220 break;
6221 case SIOCGIFMEDIA:
6222 case SIOCSIFMEDIA:
6223 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6224 break;
6225 case SIOCGATHSTATS:
6226 /* NB: embed these numbers to get a consistent view */
6227 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6228 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6229 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6230 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6231#ifdef IEEE80211_SUPPORT_TDMA
6232 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6233 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6234#endif
6235 rt = sc->sc_currates;
6236 sc->sc_stats.ast_tx_rate =
6237 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6238 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
6239 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
6240 return copyout(&sc->sc_stats,
6241 ifr->ifr_data, sizeof (sc->sc_stats));
6242 case SIOCZATHSTATS:
6243 error = priv_check(curthread, PRIV_DRIVER);
6244 if (error == 0)
6245 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6246 break;
6247#ifdef ATH_DIAGAPI
6248 case SIOCGATHDIAG:
6249 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6250 break;
6251 case SIOCGATHPHYERR:
6252 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
6253 break;
6254#endif
6255 case SIOCGIFADDR:
6256 error = ether_ioctl(ifp, cmd, data);
6257 break;
6258 default:
6259 error = EINVAL;
6260 break;
6261 }
6262 return error;
6263#undef IS_RUNNING
6264}
6265
6266/*
6267 * Announce various information on device/driver attach.
6268 */
6269static void
6270ath_announce(struct ath_softc *sc)
6271{
6272 struct ifnet *ifp = sc->sc_ifp;
6273 struct ath_hal *ah = sc->sc_ah;
6274
6275 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
6276 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
6277 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
6278 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
6279 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
6280 if (bootverbose) {
6281 int i;
6282 for (i = 0; i <= WME_AC_VO; i++) {
6283 struct ath_txq *txq = sc->sc_ac2q[i];
6284 if_printf(ifp, "Use hw queue %u for %s traffic\n",
6285 txq->axq_qnum, ieee80211_wme_acnames[i]);
6286 }
6287 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
6288 sc->sc_cabq->axq_qnum);
6289 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
6290 }
6291 if (ath_rxbuf != ATH_RXBUF)
6292 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
6293 if (ath_txbuf != ATH_TXBUF)
6294 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
6295 if (sc->sc_mcastkey && bootverbose)
6296 if_printf(ifp, "using multicast key search\n");
6297}
6298
6299#ifdef IEEE80211_SUPPORT_TDMA
6300static void
6301ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
6302{
6303 struct ath_hal *ah = sc->sc_ah;
6304 HAL_BEACON_TIMERS bt;
6305
6306 bt.bt_intval = bintval | HAL_BEACON_ENA;
6307 bt.bt_nexttbtt = nexttbtt;
6308 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
6309 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
6310 bt.bt_nextatim = nexttbtt+1;
6311 /* Enables TBTT, DBA, SWBA timers by default */
6312 bt.bt_flags = 0;
6313 ath_hal_beaconsettimers(ah, &bt);
6314}
6315
6316/*
6317 * Calculate the beacon interval. This is periodic in the
6318 * superframe for the bss. We assume each station is configured
6319 * identically wrt transmit rate so the guard time we calculate
6320 * above will be the same on all stations. Note we need to
6321 * factor in the xmit time because the hardware will schedule
6322 * a frame for transmit if the start of the frame is within
6323 * the burst time. When we get hardware that properly kills
6324 * frames in the PCU we can reduce/eliminate the guard time.
6325 *
6326 * Roundup to 1024 is so we have 1 TU buffer in the guard time
6327 * to deal with the granularity of the nexttbtt timer. 11n MAC's
6328 * with 1us timer granularity should allow us to reduce/eliminate
6329 * this.
6330 */
6331static void
6332ath_tdma_bintvalsetup(struct ath_softc *sc,
6333 const struct ieee80211_tdma_state *tdma)
6334{
6335 /* copy from vap state (XXX check all vaps have same value?) */
6336 sc->sc_tdmaslotlen = tdma->tdma_slotlen;
6337
6338 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
6339 tdma->tdma_slotcnt, 1024);
6340 sc->sc_tdmabintval >>= 10; /* TSF -> TU */
6341 if (sc->sc_tdmabintval & 1)
6342 sc->sc_tdmabintval++;
6343
6344 if (tdma->tdma_slot == 0) {
6345 /*
6346 * Only slot 0 beacons; other slots respond.
6347 */
6348 sc->sc_imask |= HAL_INT_SWBA;
6349 sc->sc_tdmaswba = 0; /* beacon immediately */
6350 } else {
6351 /* XXX all vaps must be slot 0 or slot !0 */
6352 sc->sc_imask &= ~HAL_INT_SWBA;
6353 }
6354}
6355
6356/*
6357 * Max 802.11 overhead. This assumes no 4-address frames and
6358 * the encapsulation done by ieee80211_encap (llc). We also
6359 * include potential crypto overhead.
6360 */
6361#define IEEE80211_MAXOVERHEAD \
6362 (sizeof(struct ieee80211_qosframe) \
6363 + sizeof(struct llc) \
6364 + IEEE80211_ADDR_LEN \
6365 + IEEE80211_WEP_IVLEN \
6366 + IEEE80211_WEP_KIDLEN \
6367 + IEEE80211_WEP_CRCLEN \
6368 + IEEE80211_WEP_MICLEN \
6369 + IEEE80211_CRC_LEN)
6370
6371/*
6372 * Setup initially for tdma operation. Start the beacon
6373 * timers and enable SWBA if we are slot 0. Otherwise
6374 * we wait for slot 0 to arrive so we can sync up before
6375 * starting to transmit.
6376 */
6377static void
6378ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
6379{
6380 struct ath_hal *ah = sc->sc_ah;
6381 struct ifnet *ifp = sc->sc_ifp;
6382 struct ieee80211com *ic = ifp->if_l2com;
6383 const struct ieee80211_txparam *tp;
6384 const struct ieee80211_tdma_state *tdma = NULL;
6385 int rix;
6386
6387 if (vap == NULL) {
6388 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
6389 if (vap == NULL) {
6390 if_printf(ifp, "%s: no vaps?\n", __func__);
6391 return;
6392 }
6393 }
6394 tp = vap->iv_bss->ni_txparms;
6395 /*
6396 * Calculate the guard time for each slot. This is the
6397 * time to send a maximal-size frame according to the
6398 * fixed/lowest transmit rate. Note that the interface
6399 * mtu does not include the 802.11 overhead so we must
6400 * tack that on (ath_hal_computetxtime includes the
6401 * preamble and plcp in it's calculation).
6402 */
6403 tdma = vap->iv_tdma;
6404 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
6405 rix = ath_tx_findrix(sc, tp->ucastrate);
6406 else
6407 rix = ath_tx_findrix(sc, tp->mcastrate);
6408 /* XXX short preamble assumed */
6409 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
6410 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
6411
6412 ath_hal_intrset(ah, 0);
6413
6414 ath_beaconq_config(sc); /* setup h/w beacon q */
6415 if (sc->sc_setcca)
6416 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */
6417 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */
6418 ath_tdma_settimers(sc, sc->sc_tdmabintval,
6419 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
6420 sc->sc_syncbeacon = 0;
6421
6422 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
6423 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
6424
6425 ath_hal_intrset(ah, sc->sc_imask);
6426
6427 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
6428 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
6429 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
6430 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
6431 sc->sc_tdmadbaprep);
6432}
6433
6434/*
6435 * Update tdma operation. Called from the 802.11 layer
6436 * when a beacon is received from the TDMA station operating
6437 * in the slot immediately preceding us in the bss. Use
6438 * the rx timestamp for the beacon frame to update our
6439 * beacon timers so we follow their schedule. Note that
6440 * by using the rx timestamp we implicitly include the
6441 * propagation delay in our schedule.
6442 */
6443static void
6444ath_tdma_update(struct ieee80211_node *ni,
6445 const struct ieee80211_tdma_param *tdma, int changed)
6446{
6447#define TSF_TO_TU(_h,_l) \
6448 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
6449#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
6450 struct ieee80211vap *vap = ni->ni_vap;
6451 struct ieee80211com *ic = ni->ni_ic;
6452 struct ath_softc *sc = ic->ic_ifp->if_softc;
6453 struct ath_hal *ah = sc->sc_ah;
6454 const HAL_RATE_TABLE *rt = sc->sc_currates;
6455 u_int64_t tsf, rstamp, nextslot, nexttbtt;
6456 u_int32_t txtime, nextslottu;
6457 int32_t tudelta, tsfdelta;
6458 const struct ath_rx_status *rs;
6459 int rix;
6460
6461 sc->sc_stats.ast_tdma_update++;
6462
6463 /*
6464 * Check for and adopt configuration changes.
6465 */
6466 if (changed != 0) {
6467 const struct ieee80211_tdma_state *ts = vap->iv_tdma;
6468
6469 ath_tdma_bintvalsetup(sc, ts);
6470 if (changed & TDMA_UPDATE_SLOTLEN)
6471 ath_wme_update(ic);
6472
6473 DPRINTF(sc, ATH_DEBUG_TDMA,
6474 "%s: adopt slot %u slotcnt %u slotlen %u us "
6475 "bintval %u TU\n", __func__,
6476 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
6477 sc->sc_tdmabintval);
6478
6479 /* XXX right? */
6480 ath_hal_intrset(ah, sc->sc_imask);
6481 /* NB: beacon timers programmed below */
6482 }
6483
6484 /* extend rx timestamp to 64 bits */
6485 rs = sc->sc_lastrs;
6486 tsf = ath_hal_gettsf64(ah);
6487 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
6488 /*
6489 * The rx timestamp is set by the hardware on completing
6490 * reception (at the point where the rx descriptor is DMA'd
6491 * to the host). To find the start of our next slot we
6492 * must adjust this time by the time required to send
6493 * the packet just received.
6494 */
6495 rix = rt->rateCodeToIndex[rs->rs_rate];
6496 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
6497 rt->info[rix].shortPreamble);
6498 /* NB: << 9 is to cvt to TU and /2 */
6499 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
6500 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
6501
6502 /*
6503 * Retrieve the hardware NextTBTT in usecs
6504 * and calculate the difference between what the
6505 * other station thinks and what we have programmed. This
6506 * lets us figure how to adjust our timers to match. The
6507 * adjustments are done by pulling the TSF forward and possibly
6508 * rewriting the beacon timers.
6509 */
6510 nexttbtt = ath_hal_getnexttbtt(ah);
6511 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt);
6512
6513 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
6514 "tsfdelta %d avg +%d/-%d\n", tsfdelta,
6515 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
6516
6517 if (tsfdelta < 0) {
6518 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
6519 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
6520 tsfdelta = -tsfdelta % 1024;
6521 nextslottu++;
6522 } else if (tsfdelta > 0) {
6523 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
6524 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
6525 tsfdelta = 1024 - (tsfdelta % 1024);
6526 nextslottu++;
6527 } else {
6528 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
6529 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
6530 }
6531 tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt);
6532
6533 /*
6534 * Copy sender's timetstamp into tdma ie so they can
6535 * calculate roundtrip time. We submit a beacon frame
6536 * below after any timer adjustment. The frame goes out
6537 * at the next TBTT so the sender can calculate the
6538 * roundtrip by inspecting the tdma ie in our beacon frame.
6539 *
6540 * NB: This tstamp is subtlely preserved when
6541 * IEEE80211_BEACON_TDMA is marked (e.g. when the
6542 * slot position changes) because ieee80211_add_tdma
6543 * skips over the data.
6544 */
6545 memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
6546 __offsetof(struct ieee80211_tdma_param, tdma_tstamp),
6547 &ni->ni_tstamp.data, 8);
6548#if 0
6549 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
6550 "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n",
6551 (unsigned long long) tsf, (unsigned long long) nextslot,
6552 (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta);
6553#endif
6554 /*
6555 * Adjust the beacon timers only when pulling them forward
6556 * or when going back by less than the beacon interval.
6557 * Negative jumps larger than the beacon interval seem to
6558 * cause the timers to stop and generally cause instability.
6559 * This basically filters out jumps due to missed beacons.
6560 */
6561 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
6562 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
6563 sc->sc_stats.ast_tdma_timers++;
6564 }
6565 if (tsfdelta > 0) {
6566 ath_hal_adjusttsf(ah, tsfdelta);
6567 sc->sc_stats.ast_tdma_tsf++;
6568 }
6569 ath_tdma_beacon_send(sc, vap); /* prepare response */
6570#undef TU_TO_TSF
6571#undef TSF_TO_TU
6572}
6573
6574/*
6575 * Transmit a beacon frame at SWBA. Dynamic updates
6576 * to the frame contents are done as needed.
6577 */
6578static void
6579ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
6580{
6581 struct ath_hal *ah = sc->sc_ah;
6582 struct ath_buf *bf;
6583 int otherant;
6584
6585 /*
6586 * Check if the previous beacon has gone out. If
6587 * not don't try to post another, skip this period
6588 * and wait for the next. Missed beacons indicate
6589 * a problem and should not occur. If we miss too
6590 * many consecutive beacons reset the device.
6591 */
6592 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
6593 sc->sc_bmisscount++;
6594 DPRINTF(sc, ATH_DEBUG_BEACON,
6595 "%s: missed %u consecutive beacons\n",
6596 __func__, sc->sc_bmisscount);
6597 if (sc->sc_bmisscount >= ath_bstuck_threshold)
6598 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
6599 return;
6600 }
6601 if (sc->sc_bmisscount != 0) {
6602 DPRINTF(sc, ATH_DEBUG_BEACON,
6603 "%s: resume beacon xmit after %u misses\n",
6604 __func__, sc->sc_bmisscount);
6605 sc->sc_bmisscount = 0;
6606 }
6607
6608 /*
6609 * Check recent per-antenna transmit statistics and flip
6610 * the default antenna if noticeably more frames went out
6611 * on the non-default antenna.
6612 * XXX assumes 2 anntenae
6613 */
6614 if (!sc->sc_diversity) {
6615 otherant = sc->sc_defant & 1 ? 2 : 1;
6616 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
6617 ath_setdefantenna(sc, otherant);
6618 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
6619 }
6620
6621 bf = ath_beacon_generate(sc, vap);
6622 if (bf != NULL) {
6623 /*
6624 * Stop any current dma and put the new frame on the queue.
6625 * This should never fail since we check above that no frames
6626 * are still pending on the queue.
6627 */
6628 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
6629 DPRINTF(sc, ATH_DEBUG_ANY,
6630 "%s: beacon queue %u did not stop?\n",
6631 __func__, sc->sc_bhalq);
6632 /* NB: the HAL still stops DMA, so proceed */
6633 }
6634 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
6635 ath_hal_txstart(ah, sc->sc_bhalq);
6636
6637 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */
6638
6639 /*
6640 * Record local TSF for our last send for use
6641 * in arbitrating slot collisions.
6642 */
6643 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
6644 }
6645}
6646#endif /* IEEE80211_SUPPORT_TDMA */
6647
6648static void
6649ath_dfs_tasklet(void *p, int npending)
6650{
6651 struct ath_softc *sc = (struct ath_softc *) p;
6652 struct ifnet *ifp = sc->sc_ifp;
6653 struct ieee80211com *ic = ifp->if_l2com;
6654
6655 /*
6656 * If previous processing has found a radar event,
6657 * signal this to the net80211 layer to begin DFS
6658 * processing.
6659 */
6660 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
6661 /* DFS event found, initiate channel change */
6662 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
6663 }
6664}
6665
6666MODULE_VERSION(if_ath, 1);
6667MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
1332
1333 /* XXX beacons ? */
1334}
1335
1336void
1337ath_shutdown(struct ath_softc *sc)
1338{
1339 struct ifnet *ifp = sc->sc_ifp;
1340
1341 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1342 __func__, ifp->if_flags);
1343
1344 ath_stop(ifp);
1345 /* NB: no point powering down chip as we're about to reboot */
1346}
1347
1348/*
1349 * Interrupt handler. Most of the actual processing is deferred.
1350 */
1351void
1352ath_intr(void *arg)
1353{
1354 struct ath_softc *sc = arg;
1355 struct ifnet *ifp = sc->sc_ifp;
1356 struct ath_hal *ah = sc->sc_ah;
1357 HAL_INT status = 0;
1358 uint32_t txqs;
1359
1360 /*
1361 * If we're inside a reset path, just print a warning and
1362 * clear the ISR. The reset routine will finish it for us.
1363 */
1364 ATH_PCU_LOCK(sc);
1365 if (sc->sc_inreset_cnt) {
1366 HAL_INT status;
1367 ath_hal_getisr(ah, &status); /* clear ISR */
1368 ath_hal_intrset(ah, 0); /* disable further intr's */
1369 DPRINTF(sc, ATH_DEBUG_ANY,
1370 "%s: in reset, ignoring: status=0x%x\n",
1371 __func__, status);
1372 ATH_PCU_UNLOCK(sc);
1373 return;
1374 }
1375
1376 if (sc->sc_invalid) {
1377 /*
1378 * The hardware is not ready/present, don't touch anything.
1379 * Note this can happen early on if the IRQ is shared.
1380 */
1381 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1382 ATH_PCU_UNLOCK(sc);
1383 return;
1384 }
1385 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
1386 ATH_PCU_UNLOCK(sc);
1387 return;
1388 }
1389
1390 if ((ifp->if_flags & IFF_UP) == 0 ||
1391 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1392 HAL_INT status;
1393
1394 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1395 __func__, ifp->if_flags);
1396 ath_hal_getisr(ah, &status); /* clear ISR */
1397 ath_hal_intrset(ah, 0); /* disable further intr's */
1398 ATH_PCU_UNLOCK(sc);
1399 return;
1400 }
1401
1402 /*
1403 * Figure out the reason(s) for the interrupt. Note
1404 * that the hal returns a pseudo-ISR that may include
1405 * bits we haven't explicitly enabled so we mask the
1406 * value to insure we only process bits we requested.
1407 */
1408 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1409 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1410 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status);
1411#ifdef ATH_KTR_INTR_DEBUG
1412 CTR5(ATH_KTR_INTR,
1413 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
1414 ah->ah_intrstate[0],
1415 ah->ah_intrstate[1],
1416 ah->ah_intrstate[2],
1417 ah->ah_intrstate[3],
1418 ah->ah_intrstate[6]);
1419#endif
1420 status &= sc->sc_imask; /* discard unasked for bits */
1421
1422 /* Short-circuit un-handled interrupts */
1423 if (status == 0x0) {
1424 ATH_PCU_UNLOCK(sc);
1425 return;
1426 }
1427
1428 /*
1429 * Take a note that we're inside the interrupt handler, so
1430 * the reset routines know to wait.
1431 */
1432 sc->sc_intr_cnt++;
1433 ATH_PCU_UNLOCK(sc);
1434
1435 /*
1436 * Handle the interrupt. We won't run concurrent with the reset
1437 * or channel change routines as they'll wait for sc_intr_cnt
1438 * to be 0 before continuing.
1439 */
1440 if (status & HAL_INT_FATAL) {
1441 sc->sc_stats.ast_hardware++;
1442 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1443 ath_fatal_proc(sc, 0);
1444 } else {
1445 if (status & HAL_INT_SWBA) {
1446 /*
1447 * Software beacon alert--time to send a beacon.
1448 * Handle beacon transmission directly; deferring
1449 * this is too slow to meet timing constraints
1450 * under load.
1451 */
1452#ifdef IEEE80211_SUPPORT_TDMA
1453 if (sc->sc_tdma) {
1454 if (sc->sc_tdmaswba == 0) {
1455 struct ieee80211com *ic = ifp->if_l2com;
1456 struct ieee80211vap *vap =
1457 TAILQ_FIRST(&ic->ic_vaps);
1458 ath_tdma_beacon_send(sc, vap);
1459 sc->sc_tdmaswba =
1460 vap->iv_tdma->tdma_bintval;
1461 } else
1462 sc->sc_tdmaswba--;
1463 } else
1464#endif
1465 {
1466 ath_beacon_proc(sc, 0);
1467#ifdef IEEE80211_SUPPORT_SUPERG
1468 /*
1469 * Schedule the rx taskq in case there's no
1470 * traffic so any frames held on the staging
1471 * queue are aged and potentially flushed.
1472 */
1473 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1474#endif
1475 }
1476 }
1477 if (status & HAL_INT_RXEOL) {
1478 int imask;
1479 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL");
1480 ATH_PCU_LOCK(sc);
1481 /*
1482 * NB: the hardware should re-read the link when
1483 * RXE bit is written, but it doesn't work at
1484 * least on older hardware revs.
1485 */
1486 sc->sc_stats.ast_rxeol++;
1487 /*
1488 * Disable RXEOL/RXORN - prevent an interrupt
1489 * storm until the PCU logic can be reset.
1490 * In case the interface is reset some other
1491 * way before "sc_kickpcu" is called, don't
1492 * modify sc_imask - that way if it is reset
1493 * by a call to ath_reset() somehow, the
1494 * interrupt mask will be correctly reprogrammed.
1495 */
1496 imask = sc->sc_imask;
1497 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
1498 ath_hal_intrset(ah, imask);
1499 /*
1500 * Only blank sc_rxlink if we've not yet kicked
1501 * the PCU.
1502 *
1503 * This isn't entirely correct - the correct solution
1504 * would be to have a PCU lock and engage that for
1505 * the duration of the PCU fiddling; which would include
1506 * running the RX process. Otherwise we could end up
1507 * messing up the RX descriptor chain and making the
1508 * RX desc list much shorter.
1509 */
1510 if (! sc->sc_kickpcu)
1511 sc->sc_rxlink = NULL;
1512 sc->sc_kickpcu = 1;
1513 /*
1514 * Enqueue an RX proc, to handled whatever
1515 * is in the RX queue.
1516 * This will then kick the PCU.
1517 */
1518 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1519 ATH_PCU_UNLOCK(sc);
1520 }
1521 if (status & HAL_INT_TXURN) {
1522 sc->sc_stats.ast_txurn++;
1523 /* bump tx trigger level */
1524 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1525 }
1526 if (status & HAL_INT_RX) {
1527 sc->sc_stats.ast_rx_intr++;
1528 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1529 }
1530 if (status & HAL_INT_TX) {
1531 sc->sc_stats.ast_tx_intr++;
1532 /*
1533 * Grab all the currently set bits in the HAL txq bitmap
1534 * and blank them. This is the only place we should be
1535 * doing this.
1536 */
1537 ATH_PCU_LOCK(sc);
1538 txqs = 0xffffffff;
1539 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
1540 sc->sc_txq_active |= txqs;
1541 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1542 ATH_PCU_UNLOCK(sc);
1543 }
1544 if (status & HAL_INT_BMISS) {
1545 sc->sc_stats.ast_bmiss++;
1546 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1547 }
1548 if (status & HAL_INT_GTT)
1549 sc->sc_stats.ast_tx_timeout++;
1550 if (status & HAL_INT_CST)
1551 sc->sc_stats.ast_tx_cst++;
1552 if (status & HAL_INT_MIB) {
1553 sc->sc_stats.ast_mib++;
1554 ATH_PCU_LOCK(sc);
1555 /*
1556 * Disable interrupts until we service the MIB
1557 * interrupt; otherwise it will continue to fire.
1558 */
1559 ath_hal_intrset(ah, 0);
1560 /*
1561 * Let the hal handle the event. We assume it will
1562 * clear whatever condition caused the interrupt.
1563 */
1564 ath_hal_mibevent(ah, &sc->sc_halstats);
1565 /*
1566 * Don't reset the interrupt if we've just
1567 * kicked the PCU, or we may get a nested
1568 * RXEOL before the rxproc has had a chance
1569 * to run.
1570 */
1571 if (sc->sc_kickpcu == 0)
1572 ath_hal_intrset(ah, sc->sc_imask);
1573 ATH_PCU_UNLOCK(sc);
1574 }
1575 if (status & HAL_INT_RXORN) {
1576 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1577 CTR0(ATH_KTR_ERR, "ath_intr: RXORN");
1578 sc->sc_stats.ast_rxorn++;
1579 }
1580 }
1581 ATH_PCU_LOCK(sc);
1582 sc->sc_intr_cnt--;
1583 ATH_PCU_UNLOCK(sc);
1584}
1585
1586static void
1587ath_fatal_proc(void *arg, int pending)
1588{
1589 struct ath_softc *sc = arg;
1590 struct ifnet *ifp = sc->sc_ifp;
1591 u_int32_t *state;
1592 u_int32_t len;
1593 void *sp;
1594
1595 if_printf(ifp, "hardware error; resetting\n");
1596 /*
1597 * Fatal errors are unrecoverable. Typically these
1598 * are caused by DMA errors. Collect h/w state from
1599 * the hal so we can diagnose what's going on.
1600 */
1601 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1602 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1603 state = sp;
1604 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1605 state[0], state[1] , state[2], state[3],
1606 state[4], state[5]);
1607 }
1608 ath_reset(ifp, ATH_RESET_NOLOSS);
1609}
1610
1611static void
1612ath_bmiss_vap(struct ieee80211vap *vap)
1613{
1614 /*
1615 * Workaround phantom bmiss interrupts by sanity-checking
1616 * the time of our last rx'd frame. If it is within the
1617 * beacon miss interval then ignore the interrupt. If it's
1618 * truly a bmiss we'll get another interrupt soon and that'll
1619 * be dispatched up for processing. Note this applies only
1620 * for h/w beacon miss events.
1621 */
1622 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1623 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1624 struct ath_softc *sc = ifp->if_softc;
1625 u_int64_t lastrx = sc->sc_lastrx;
1626 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1627 u_int bmisstimeout =
1628 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1629
1630 DPRINTF(sc, ATH_DEBUG_BEACON,
1631 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1632 __func__, (unsigned long long) tsf,
1633 (unsigned long long)(tsf - lastrx),
1634 (unsigned long long) lastrx, bmisstimeout);
1635
1636 if (tsf - lastrx <= bmisstimeout) {
1637 sc->sc_stats.ast_bmiss_phantom++;
1638 return;
1639 }
1640 }
1641 ATH_VAP(vap)->av_bmiss(vap);
1642}
1643
1644static int
1645ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1646{
1647 uint32_t rsize;
1648 void *sp;
1649
1650 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
1651 return 0;
1652 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1653 *hangs = *(uint32_t *)sp;
1654 return 1;
1655}
1656
1657static void
1658ath_bmiss_proc(void *arg, int pending)
1659{
1660 struct ath_softc *sc = arg;
1661 struct ifnet *ifp = sc->sc_ifp;
1662 uint32_t hangs;
1663
1664 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1665
1666 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1667 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
1668 ath_reset(ifp, ATH_RESET_NOLOSS);
1669 } else
1670 ieee80211_beacon_miss(ifp->if_l2com);
1671}
1672
1673/*
1674 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1675 * calcs together with WME. If necessary disable the crypto
1676 * hardware and mark the 802.11 state so keys will be setup
1677 * with the MIC work done in software.
1678 */
1679static void
1680ath_settkipmic(struct ath_softc *sc)
1681{
1682 struct ifnet *ifp = sc->sc_ifp;
1683 struct ieee80211com *ic = ifp->if_l2com;
1684
1685 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1686 if (ic->ic_flags & IEEE80211_F_WME) {
1687 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1688 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1689 } else {
1690 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1691 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1692 }
1693 }
1694}
1695
1696static void
1697ath_init(void *arg)
1698{
1699 struct ath_softc *sc = (struct ath_softc *) arg;
1700 struct ifnet *ifp = sc->sc_ifp;
1701 struct ieee80211com *ic = ifp->if_l2com;
1702 struct ath_hal *ah = sc->sc_ah;
1703 HAL_STATUS status;
1704
1705 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1706 __func__, ifp->if_flags);
1707
1708 ATH_LOCK(sc);
1709 /*
1710 * Stop anything previously setup. This is safe
1711 * whether this is the first time through or not.
1712 */
1713 ath_stop_locked(ifp);
1714
1715 /*
1716 * The basic interface to setting the hardware in a good
1717 * state is ``reset''. On return the hardware is known to
1718 * be powered up and with interrupts disabled. This must
1719 * be followed by initialization of the appropriate bits
1720 * and then setup of the interrupt mask.
1721 */
1722 ath_settkipmic(sc);
1723 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1724 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1725 status);
1726 ATH_UNLOCK(sc);
1727 return;
1728 }
1729 ath_chan_change(sc, ic->ic_curchan);
1730
1731 /* Let DFS at it in case it's a DFS channel */
1732 ath_dfs_radar_enable(sc, ic->ic_curchan);
1733
1734 /*
1735 * Likewise this is set during reset so update
1736 * state cached in the driver.
1737 */
1738 sc->sc_diversity = ath_hal_getdiversity(ah);
1739 sc->sc_lastlongcal = 0;
1740 sc->sc_resetcal = 1;
1741 sc->sc_lastcalreset = 0;
1742 sc->sc_lastani = 0;
1743 sc->sc_lastshortcal = 0;
1744 sc->sc_doresetcal = AH_FALSE;
1745 /*
1746 * Beacon timers were cleared here; give ath_newstate()
1747 * a hint that the beacon timers should be poked when
1748 * things transition to the RUN state.
1749 */
1750 sc->sc_beacons = 0;
1751
1752 /*
1753 * Initial aggregation settings.
1754 */
1755 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH;
1756 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
1757 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
1758
1759 /*
1760 * Setup the hardware after reset: the key cache
1761 * is filled as needed and the receive engine is
1762 * set going. Frame transmit is handled entirely
1763 * in the frame output path; there's nothing to do
1764 * here except setup the interrupt mask.
1765 */
1766 if (ath_startrecv(sc) != 0) {
1767 if_printf(ifp, "unable to start recv logic\n");
1768 ATH_UNLOCK(sc);
1769 return;
1770 }
1771
1772 /*
1773 * Enable interrupts.
1774 */
1775 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1776 | HAL_INT_RXEOL | HAL_INT_RXORN
1777 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1778 /*
1779 * Enable MIB interrupts when there are hardware phy counters.
1780 * Note we only do this (at the moment) for station mode.
1781 */
1782 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1783 sc->sc_imask |= HAL_INT_MIB;
1784
1785 /* Enable global TX timeout and carrier sense timeout if available */
1786 if (ath_hal_gtxto_supported(ah))
1787 sc->sc_imask |= HAL_INT_GTT;
1788
1789 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
1790 __func__, sc->sc_imask);
1791
1792 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1793 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1794 ath_hal_intrset(ah, sc->sc_imask);
1795
1796 ATH_UNLOCK(sc);
1797
1798#ifdef ATH_TX99_DIAG
1799 if (sc->sc_tx99 != NULL)
1800 sc->sc_tx99->start(sc->sc_tx99);
1801 else
1802#endif
1803 ieee80211_start_all(ic); /* start all vap's */
1804}
1805
1806static void
1807ath_stop_locked(struct ifnet *ifp)
1808{
1809 struct ath_softc *sc = ifp->if_softc;
1810 struct ath_hal *ah = sc->sc_ah;
1811
1812 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1813 __func__, sc->sc_invalid, ifp->if_flags);
1814
1815 ATH_LOCK_ASSERT(sc);
1816 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1817 /*
1818 * Shutdown the hardware and driver:
1819 * reset 802.11 state machine
1820 * turn off timers
1821 * disable interrupts
1822 * turn off the radio
1823 * clear transmit machinery
1824 * clear receive machinery
1825 * drain and release tx queues
1826 * reclaim beacon resources
1827 * power down hardware
1828 *
1829 * Note that some of this work is not possible if the
1830 * hardware is gone (invalid).
1831 */
1832#ifdef ATH_TX99_DIAG
1833 if (sc->sc_tx99 != NULL)
1834 sc->sc_tx99->stop(sc->sc_tx99);
1835#endif
1836 callout_stop(&sc->sc_wd_ch);
1837 sc->sc_wd_timer = 0;
1838 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1839 if (!sc->sc_invalid) {
1840 if (sc->sc_softled) {
1841 callout_stop(&sc->sc_ledtimer);
1842 ath_hal_gpioset(ah, sc->sc_ledpin,
1843 !sc->sc_ledon);
1844 sc->sc_blinking = 0;
1845 }
1846 ath_hal_intrset(ah, 0);
1847 }
1848 ath_draintxq(sc, ATH_RESET_DEFAULT);
1849 if (!sc->sc_invalid) {
1850 ath_stoprecv(sc, 1);
1851 ath_hal_phydisable(ah);
1852 } else
1853 sc->sc_rxlink = NULL;
1854 ath_beacon_free(sc); /* XXX not needed */
1855 }
1856}
1857
1858#define MAX_TXRX_ITERATIONS 1000
1859static void
1860ath_txrx_stop(struct ath_softc *sc)
1861{
1862 int i = MAX_TXRX_ITERATIONS;
1863
1864 ATH_UNLOCK_ASSERT(sc);
1865 /* Stop any new TX/RX from occuring */
1866 taskqueue_block(sc->sc_tq);
1867
1868 ATH_PCU_LOCK(sc);
1869 /*
1870 * Sleep until all the pending operations have completed.
1871 *
1872 * The caller must ensure that reset has been incremented
1873 * or the pending operations may continue being queued.
1874 */
1875 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
1876 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
1877 if (i <= 0)
1878 break;
1879 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1);
1880 i--;
1881 }
1882 ATH_PCU_UNLOCK(sc);
1883
1884 if (i <= 0)
1885 device_printf(sc->sc_dev,
1886 "%s: didn't finish after %d iterations\n",
1887 __func__, MAX_TXRX_ITERATIONS);
1888}
1889#undef MAX_TXRX_ITERATIONS
1890
1891static void
1892ath_txrx_start(struct ath_softc *sc)
1893{
1894
1895 taskqueue_unblock(sc->sc_tq);
1896}
1897
1898/*
1899 * Grab the reset lock, and wait around until noone else
1900 * is trying to do anything with it.
1901 *
1902 * This is totally horrible but we can't hold this lock for
1903 * long enough to do TX/RX or we end up with net80211/ip stack
1904 * LORs and eventual deadlock.
1905 *
1906 * "dowait" signals whether to spin, waiting for the reset
1907 * lock count to reach 0. This should (for now) only be used
1908 * during the reset path, as the rest of the code may not
1909 * be locking-reentrant enough to behave correctly.
1910 *
1911 * Another, cleaner way should be found to serialise all of
1912 * these operations.
1913 */
1914#define MAX_RESET_ITERATIONS 10
1915static int
1916ath_reset_grablock(struct ath_softc *sc, int dowait)
1917{
1918 int w = 0;
1919 int i = MAX_RESET_ITERATIONS;
1920
1921 ATH_PCU_LOCK_ASSERT(sc);
1922 do {
1923 if (sc->sc_inreset_cnt == 0) {
1924 w = 1;
1925 break;
1926 }
1927 if (dowait == 0) {
1928 w = 0;
1929 break;
1930 }
1931 ATH_PCU_UNLOCK(sc);
1932 pause("ath_reset_grablock", 1);
1933 i--;
1934 ATH_PCU_LOCK(sc);
1935 } while (i > 0);
1936
1937 /*
1938 * We always increment the refcounter, regardless
1939 * of whether we succeeded to get it in an exclusive
1940 * way.
1941 */
1942 sc->sc_inreset_cnt++;
1943
1944 if (i <= 0)
1945 device_printf(sc->sc_dev,
1946 "%s: didn't finish after %d iterations\n",
1947 __func__, MAX_RESET_ITERATIONS);
1948
1949 if (w == 0)
1950 device_printf(sc->sc_dev,
1951 "%s: warning, recursive reset path!\n",
1952 __func__);
1953
1954 return w;
1955}
1956#undef MAX_RESET_ITERATIONS
1957
1958/*
1959 * XXX TODO: write ath_reset_releaselock
1960 */
1961
1962static void
1963ath_stop(struct ifnet *ifp)
1964{
1965 struct ath_softc *sc = ifp->if_softc;
1966
1967 ATH_LOCK(sc);
1968 ath_stop_locked(ifp);
1969 ATH_UNLOCK(sc);
1970}
1971
1972/*
1973 * Reset the hardware w/o losing operational state. This is
1974 * basically a more efficient way of doing ath_stop, ath_init,
1975 * followed by state transitions to the current 802.11
1976 * operational state. Used to recover from various errors and
1977 * to reset or reload hardware state.
1978 */
1979int
1980ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
1981{
1982 struct ath_softc *sc = ifp->if_softc;
1983 struct ieee80211com *ic = ifp->if_l2com;
1984 struct ath_hal *ah = sc->sc_ah;
1985 HAL_STATUS status;
1986 int i;
1987
1988 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1989
1990 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
1991 ATH_PCU_UNLOCK_ASSERT(sc);
1992 ATH_UNLOCK_ASSERT(sc);
1993
1994 ATH_PCU_LOCK(sc);
1995 if (ath_reset_grablock(sc, 1) == 0) {
1996 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
1997 __func__);
1998 }
1999 ath_hal_intrset(ah, 0); /* disable interrupts */
2000 ATH_PCU_UNLOCK(sc);
2001
2002 /*
2003 * Should now wait for pending TX/RX to complete
2004 * and block future ones from occuring. This needs to be
2005 * done before the TX queue is drained.
2006 */
2007 ath_txrx_stop(sc);
2008 ath_draintxq(sc, reset_type); /* stop xmit side */
2009
2010 /*
2011 * Regardless of whether we're doing a no-loss flush or
2012 * not, stop the PCU and handle what's in the RX queue.
2013 * That way frames aren't dropped which shouldn't be.
2014 */
2015 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2016 ath_rx_proc(sc, 0);
2017
2018 ath_settkipmic(sc); /* configure TKIP MIC handling */
2019 /* NB: indicate channel change so we do a full reset */
2020 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
2021 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2022 __func__, status);
2023 sc->sc_diversity = ath_hal_getdiversity(ah);
2024
2025 /* Let DFS at it in case it's a DFS channel */
2026 ath_dfs_radar_enable(sc, ic->ic_curchan);
2027
2028 if (ath_startrecv(sc) != 0) /* restart recv */
2029 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
2030 /*
2031 * We may be doing a reset in response to an ioctl
2032 * that changes the channel so update any state that
2033 * might change as a result.
2034 */
2035 ath_chan_change(sc, ic->ic_curchan);
2036 if (sc->sc_beacons) { /* restart beacons */
2037#ifdef IEEE80211_SUPPORT_TDMA
2038 if (sc->sc_tdma)
2039 ath_tdma_config(sc, NULL);
2040 else
2041#endif
2042 ath_beacon_config(sc, NULL);
2043 }
2044
2045 /*
2046 * Release the reset lock and re-enable interrupts here.
2047 * If an interrupt was being processed in ath_intr(),
2048 * it would disable interrupts at this point. So we have
2049 * to atomically enable interrupts and decrement the
2050 * reset counter - this way ath_intr() doesn't end up
2051 * disabling interrupts without a corresponding enable
2052 * in the rest or channel change path.
2053 */
2054 ATH_PCU_LOCK(sc);
2055 sc->sc_inreset_cnt--;
2056 /* XXX only do this if sc_inreset_cnt == 0? */
2057 ath_hal_intrset(ah, sc->sc_imask);
2058 ATH_PCU_UNLOCK(sc);
2059
2060 /*
2061 * TX and RX can be started here. If it were started with
2062 * sc_inreset_cnt > 0, the TX and RX path would abort.
2063 * Thus if this is a nested call through the reset or
2064 * channel change code, TX completion will occur but
2065 * RX completion and ath_start / ath_tx_start will not
2066 * run.
2067 */
2068
2069 /* Restart TX/RX as needed */
2070 ath_txrx_start(sc);
2071
2072 /* XXX Restart TX completion and pending TX */
2073 if (reset_type == ATH_RESET_NOLOSS) {
2074 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2075 if (ATH_TXQ_SETUP(sc, i)) {
2076 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2077 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2078 ath_txq_sched(sc, &sc->sc_txq[i]);
2079 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2080 }
2081 }
2082 }
2083
2084 /*
2085 * This may have been set during an ath_start() call which
2086 * set this once it detected a concurrent TX was going on.
2087 * So, clear it.
2088 */
2089 /* XXX do this inside of IF_LOCK? */
2090 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2091
2092 /* Handle any frames in the TX queue */
2093 /*
2094 * XXX should this be done by the caller, rather than
2095 * ath_reset() ?
2096 */
2097 ath_start(ifp); /* restart xmit */
2098 return 0;
2099}
2100
2101static int
2102ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
2103{
2104 struct ieee80211com *ic = vap->iv_ic;
2105 struct ifnet *ifp = ic->ic_ifp;
2106 struct ath_softc *sc = ifp->if_softc;
2107 struct ath_hal *ah = sc->sc_ah;
2108
2109 switch (cmd) {
2110 case IEEE80211_IOC_TXPOWER:
2111 /*
2112 * If per-packet TPC is enabled, then we have nothing
2113 * to do; otherwise we need to force the global limit.
2114 * All this can happen directly; no need to reset.
2115 */
2116 if (!ath_hal_gettpc(ah))
2117 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
2118 return 0;
2119 }
2120 /* XXX? Full or NOLOSS? */
2121 return ath_reset(ifp, ATH_RESET_FULL);
2122}
2123
2124struct ath_buf *
2125_ath_getbuf_locked(struct ath_softc *sc)
2126{
2127 struct ath_buf *bf;
2128
2129 ATH_TXBUF_LOCK_ASSERT(sc);
2130
2131 bf = TAILQ_FIRST(&sc->sc_txbuf);
2132 if (bf == NULL) {
2133 sc->sc_stats.ast_tx_getnobuf++;
2134 } else {
2135 if (bf->bf_flags & ATH_BUF_BUSY) {
2136 sc->sc_stats.ast_tx_getbusybuf++;
2137 bf = NULL;
2138 }
2139 }
2140
2141 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
2142 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
2143 else
2144 bf = NULL;
2145
2146 if (bf == NULL) {
2147 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
2148 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
2149 "out of xmit buffers" : "xmit buffer busy");
2150 return NULL;
2151 }
2152
2153 /* Valid bf here; clear some basic fields */
2154 bf->bf_next = NULL; /* XXX just to be sure */
2155 bf->bf_last = NULL; /* XXX again, just to be sure */
2156 bf->bf_comp = NULL; /* XXX again, just to be sure */
2157 bzero(&bf->bf_state, sizeof(bf->bf_state));
2158
2159 return bf;
2160}
2161
2162/*
2163 * When retrying a software frame, buffers marked ATH_BUF_BUSY
2164 * can't be thrown back on the queue as they could still be
2165 * in use by the hardware.
2166 *
2167 * This duplicates the buffer, or returns NULL.
2168 *
2169 * The descriptor is also copied but the link pointers and
2170 * the DMA segments aren't copied; this frame should thus
2171 * be again passed through the descriptor setup/chain routines
2172 * so the link is correct.
2173 *
2174 * The caller must free the buffer using ath_freebuf().
2175 *
2176 * XXX TODO: this call shouldn't fail as it'll cause packet loss
2177 * XXX in the TX pathway when retries are needed.
2178 * XXX Figure out how to keep some buffers free, or factor the
2179 * XXX number of busy buffers into the xmit path (ath_start())
2180 * XXX so we don't over-commit.
2181 */
2182struct ath_buf *
2183ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
2184{
2185 struct ath_buf *tbf;
2186
2187 tbf = ath_getbuf(sc);
2188 if (tbf == NULL)
2189 return NULL; /* XXX failure? Why? */
2190
2191 /* Copy basics */
2192 tbf->bf_next = NULL;
2193 tbf->bf_nseg = bf->bf_nseg;
2194 tbf->bf_txflags = bf->bf_txflags;
2195 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY;
2196 tbf->bf_status = bf->bf_status;
2197 tbf->bf_m = bf->bf_m;
2198 tbf->bf_node = bf->bf_node;
2199 /* will be setup by the chain/setup function */
2200 tbf->bf_lastds = NULL;
2201 /* for now, last == self */
2202 tbf->bf_last = tbf;
2203 tbf->bf_comp = bf->bf_comp;
2204
2205 /* NOTE: DMA segments will be setup by the setup/chain functions */
2206
2207 /* The caller has to re-init the descriptor + links */
2208
2209 /* Copy state */
2210 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
2211
2212 return tbf;
2213}
2214
2215struct ath_buf *
2216ath_getbuf(struct ath_softc *sc)
2217{
2218 struct ath_buf *bf;
2219
2220 ATH_TXBUF_LOCK(sc);
2221 bf = _ath_getbuf_locked(sc);
2222 if (bf == NULL) {
2223 struct ifnet *ifp = sc->sc_ifp;
2224
2225 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2226 sc->sc_stats.ast_tx_qstop++;
2227 /* XXX do this inside of IF_LOCK? */
2228 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2229 }
2230 ATH_TXBUF_UNLOCK(sc);
2231 return bf;
2232}
2233
2234static void
2235ath_start(struct ifnet *ifp)
2236{
2237 struct ath_softc *sc = ifp->if_softc;
2238 struct ieee80211_node *ni;
2239 struct ath_buf *bf;
2240 struct mbuf *m, *next;
2241 ath_bufhead frags;
2242
2243 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2244 return;
2245
2246 /* XXX is it ok to hold the ATH_LOCK here? */
2247 ATH_PCU_LOCK(sc);
2248 if (sc->sc_inreset_cnt > 0) {
2249 device_printf(sc->sc_dev,
2250 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2251 /* XXX do this inside of IF_LOCK? */
2252 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2253 ATH_PCU_UNLOCK(sc);
2254 return;
2255 }
2256 sc->sc_txstart_cnt++;
2257 ATH_PCU_UNLOCK(sc);
2258
2259 for (;;) {
2260 /*
2261 * Grab a TX buffer and associated resources.
2262 */
2263 bf = ath_getbuf(sc);
2264 if (bf == NULL)
2265 break;
2266
2267 IFQ_DEQUEUE(&ifp->if_snd, m);
2268 if (m == NULL) {
2269 ATH_TXBUF_LOCK(sc);
2270 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2271 ATH_TXBUF_UNLOCK(sc);
2272 break;
2273 }
2274 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2275 /*
2276 * Check for fragmentation. If this frame
2277 * has been broken up verify we have enough
2278 * buffers to send all the fragments so all
2279 * go out or none...
2280 */
2281 TAILQ_INIT(&frags);
2282 if ((m->m_flags & M_FRAG) &&
2283 !ath_txfrag_setup(sc, &frags, m, ni)) {
2284 DPRINTF(sc, ATH_DEBUG_XMIT,
2285 "%s: out of txfrag buffers\n", __func__);
2286 sc->sc_stats.ast_tx_nofrag++;
2287 ifp->if_oerrors++;
2288 ath_freetx(m);
2289 goto bad;
2290 }
2291 ifp->if_opackets++;
2292 nextfrag:
2293 /*
2294 * Pass the frame to the h/w for transmission.
2295 * Fragmented frames have each frag chained together
2296 * with m_nextpkt. We know there are sufficient ath_buf's
2297 * to send all the frags because of work done by
2298 * ath_txfrag_setup. We leave m_nextpkt set while
2299 * calling ath_tx_start so it can use it to extend the
2300 * the tx duration to cover the subsequent frag and
2301 * so it can reclaim all the mbufs in case of an error;
2302 * ath_tx_start clears m_nextpkt once it commits to
2303 * handing the frame to the hardware.
2304 */
2305 next = m->m_nextpkt;
2306 if (ath_tx_start(sc, ni, bf, m)) {
2307 bad:
2308 ifp->if_oerrors++;
2309 reclaim:
2310 bf->bf_m = NULL;
2311 bf->bf_node = NULL;
2312 ATH_TXBUF_LOCK(sc);
2313 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2314 ath_txfrag_cleanup(sc, &frags, ni);
2315 ATH_TXBUF_UNLOCK(sc);
2316 if (ni != NULL)
2317 ieee80211_free_node(ni);
2318 continue;
2319 }
2320 if (next != NULL) {
2321 /*
2322 * Beware of state changing between frags.
2323 * XXX check sta power-save state?
2324 */
2325 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2326 DPRINTF(sc, ATH_DEBUG_XMIT,
2327 "%s: flush fragmented packet, state %s\n",
2328 __func__,
2329 ieee80211_state_name[ni->ni_vap->iv_state]);
2330 ath_freetx(next);
2331 goto reclaim;
2332 }
2333 m = next;
2334 bf = TAILQ_FIRST(&frags);
2335 KASSERT(bf != NULL, ("no buf for txfrag"));
2336 TAILQ_REMOVE(&frags, bf, bf_list);
2337 goto nextfrag;
2338 }
2339
2340 sc->sc_wd_timer = 5;
2341 }
2342
2343 ATH_PCU_LOCK(sc);
2344 sc->sc_txstart_cnt--;
2345 ATH_PCU_UNLOCK(sc);
2346}
2347
2348static int
2349ath_media_change(struct ifnet *ifp)
2350{
2351 int error = ieee80211_media_change(ifp);
2352 /* NB: only the fixed rate can change and that doesn't need a reset */
2353 return (error == ENETRESET ? 0 : error);
2354}
2355
2356/*
2357 * Block/unblock tx+rx processing while a key change is done.
2358 * We assume the caller serializes key management operations
2359 * so we only need to worry about synchronization with other
2360 * uses that originate in the driver.
2361 */
2362static void
2363ath_key_update_begin(struct ieee80211vap *vap)
2364{
2365 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2366 struct ath_softc *sc = ifp->if_softc;
2367
2368 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2369 taskqueue_block(sc->sc_tq);
2370 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2371}
2372
2373static void
2374ath_key_update_end(struct ieee80211vap *vap)
2375{
2376 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2377 struct ath_softc *sc = ifp->if_softc;
2378
2379 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2380 IF_UNLOCK(&ifp->if_snd);
2381 taskqueue_unblock(sc->sc_tq);
2382}
2383
2384/*
2385 * Calculate the receive filter according to the
2386 * operating mode and state:
2387 *
2388 * o always accept unicast, broadcast, and multicast traffic
2389 * o accept PHY error frames when hardware doesn't have MIB support
2390 * to count and we need them for ANI (sta mode only until recently)
2391 * and we are not scanning (ANI is disabled)
2392 * NB: older hal's add rx filter bits out of sight and we need to
2393 * blindly preserve them
2394 * o probe request frames are accepted only when operating in
2395 * hostap, adhoc, mesh, or monitor modes
2396 * o enable promiscuous mode
2397 * - when in monitor mode
2398 * - if interface marked PROMISC (assumes bridge setting is filtered)
2399 * o accept beacons:
2400 * - when operating in station mode for collecting rssi data when
2401 * the station is otherwise quiet, or
2402 * - when operating in adhoc mode so the 802.11 layer creates
2403 * node table entries for peers,
2404 * - when scanning
2405 * - when doing s/w beacon miss (e.g. for ap+sta)
2406 * - when operating in ap mode in 11g to detect overlapping bss that
2407 * require protection
2408 * - when operating in mesh mode to detect neighbors
2409 * o accept control frames:
2410 * - when in monitor mode
2411 * XXX HT protection for 11n
2412 */
2413static u_int32_t
2414ath_calcrxfilter(struct ath_softc *sc)
2415{
2416 struct ifnet *ifp = sc->sc_ifp;
2417 struct ieee80211com *ic = ifp->if_l2com;
2418 u_int32_t rfilt;
2419
2420 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2421 if (!sc->sc_needmib && !sc->sc_scanning)
2422 rfilt |= HAL_RX_FILTER_PHYERR;
2423 if (ic->ic_opmode != IEEE80211_M_STA)
2424 rfilt |= HAL_RX_FILTER_PROBEREQ;
2425 /* XXX ic->ic_monvaps != 0? */
2426 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
2427 rfilt |= HAL_RX_FILTER_PROM;
2428 if (ic->ic_opmode == IEEE80211_M_STA ||
2429 ic->ic_opmode == IEEE80211_M_IBSS ||
2430 sc->sc_swbmiss || sc->sc_scanning)
2431 rfilt |= HAL_RX_FILTER_BEACON;
2432 /*
2433 * NB: We don't recalculate the rx filter when
2434 * ic_protmode changes; otherwise we could do
2435 * this only when ic_protmode != NONE.
2436 */
2437 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2438 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2439 rfilt |= HAL_RX_FILTER_BEACON;
2440
2441 /*
2442 * Enable hardware PS-POLL RX only for hostap mode;
2443 * STA mode sends PS-POLL frames but never
2444 * receives them.
2445 */
2446 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
2447 0, NULL) == HAL_OK &&
2448 ic->ic_opmode == IEEE80211_M_HOSTAP)
2449 rfilt |= HAL_RX_FILTER_PSPOLL;
2450
2451 if (sc->sc_nmeshvaps) {
2452 rfilt |= HAL_RX_FILTER_BEACON;
2453 if (sc->sc_hasbmatch)
2454 rfilt |= HAL_RX_FILTER_BSSID;
2455 else
2456 rfilt |= HAL_RX_FILTER_PROM;
2457 }
2458 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2459 rfilt |= HAL_RX_FILTER_CONTROL;
2460
2461 /*
2462 * Enable RX of compressed BAR frames only when doing
2463 * 802.11n. Required for A-MPDU.
2464 */
2465 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
2466 rfilt |= HAL_RX_FILTER_COMPBAR;
2467
2468 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2469 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2470 return rfilt;
2471}
2472
2473static void
2474ath_update_promisc(struct ifnet *ifp)
2475{
2476 struct ath_softc *sc = ifp->if_softc;
2477 u_int32_t rfilt;
2478
2479 /* configure rx filter */
2480 rfilt = ath_calcrxfilter(sc);
2481 ath_hal_setrxfilter(sc->sc_ah, rfilt);
2482
2483 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2484}
2485
2486static void
2487ath_update_mcast(struct ifnet *ifp)
2488{
2489 struct ath_softc *sc = ifp->if_softc;
2490 u_int32_t mfilt[2];
2491
2492 /* calculate and install multicast filter */
2493 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2494 struct ifmultiaddr *ifma;
2495 /*
2496 * Merge multicast addresses to form the hardware filter.
2497 */
2498 mfilt[0] = mfilt[1] = 0;
2499 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
2500 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2501 caddr_t dl;
2502 u_int32_t val;
2503 u_int8_t pos;
2504
2505 /* calculate XOR of eight 6bit values */
2506 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2507 val = LE_READ_4(dl + 0);
2508 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2509 val = LE_READ_4(dl + 3);
2510 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2511 pos &= 0x3f;
2512 mfilt[pos / 32] |= (1 << (pos % 32));
2513 }
2514 if_maddr_runlock(ifp);
2515 } else
2516 mfilt[0] = mfilt[1] = ~0;
2517 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2518 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2519 __func__, mfilt[0], mfilt[1]);
2520}
2521
2522static void
2523ath_mode_init(struct ath_softc *sc)
2524{
2525 struct ifnet *ifp = sc->sc_ifp;
2526 struct ath_hal *ah = sc->sc_ah;
2527 u_int32_t rfilt;
2528
2529 /* configure rx filter */
2530 rfilt = ath_calcrxfilter(sc);
2531 ath_hal_setrxfilter(ah, rfilt);
2532
2533 /* configure operational mode */
2534 ath_hal_setopmode(ah);
2535
2536 /* handle any link-level address change */
2537 ath_hal_setmac(ah, IF_LLADDR(ifp));
2538
2539 /* calculate and install multicast filter */
2540 ath_update_mcast(ifp);
2541}
2542
2543/*
2544 * Set the slot time based on the current setting.
2545 */
2546static void
2547ath_setslottime(struct ath_softc *sc)
2548{
2549 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2550 struct ath_hal *ah = sc->sc_ah;
2551 u_int usec;
2552
2553 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2554 usec = 13;
2555 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2556 usec = 21;
2557 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2558 /* honor short/long slot time only in 11g */
2559 /* XXX shouldn't honor on pure g or turbo g channel */
2560 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2561 usec = HAL_SLOT_TIME_9;
2562 else
2563 usec = HAL_SLOT_TIME_20;
2564 } else
2565 usec = HAL_SLOT_TIME_9;
2566
2567 DPRINTF(sc, ATH_DEBUG_RESET,
2568 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2569 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2570 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2571
2572 ath_hal_setslottime(ah, usec);
2573 sc->sc_updateslot = OK;
2574}
2575
2576/*
2577 * Callback from the 802.11 layer to update the
2578 * slot time based on the current setting.
2579 */
2580static void
2581ath_updateslot(struct ifnet *ifp)
2582{
2583 struct ath_softc *sc = ifp->if_softc;
2584 struct ieee80211com *ic = ifp->if_l2com;
2585
2586 /*
2587 * When not coordinating the BSS, change the hardware
2588 * immediately. For other operation we defer the change
2589 * until beacon updates have propagated to the stations.
2590 */
2591 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2592 ic->ic_opmode == IEEE80211_M_MBSS)
2593 sc->sc_updateslot = UPDATE;
2594 else
2595 ath_setslottime(sc);
2596}
2597
2598/*
2599 * Setup a h/w transmit queue for beacons.
2600 */
2601static int
2602ath_beaconq_setup(struct ath_hal *ah)
2603{
2604 HAL_TXQ_INFO qi;
2605
2606 memset(&qi, 0, sizeof(qi));
2607 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2608 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2609 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2610 /* NB: for dynamic turbo, don't enable any other interrupts */
2611 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2612 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2613}
2614
2615/*
2616 * Setup the transmit queue parameters for the beacon queue.
2617 */
2618static int
2619ath_beaconq_config(struct ath_softc *sc)
2620{
2621#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
2622 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2623 struct ath_hal *ah = sc->sc_ah;
2624 HAL_TXQ_INFO qi;
2625
2626 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2627 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2628 ic->ic_opmode == IEEE80211_M_MBSS) {
2629 /*
2630 * Always burst out beacon and CAB traffic.
2631 */
2632 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2633 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2634 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2635 } else {
2636 struct wmeParams *wmep =
2637 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2638 /*
2639 * Adhoc mode; important thing is to use 2x cwmin.
2640 */
2641 qi.tqi_aifs = wmep->wmep_aifsn;
2642 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2643 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2644 }
2645
2646 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2647 device_printf(sc->sc_dev, "unable to update parameters for "
2648 "beacon hardware queue!\n");
2649 return 0;
2650 } else {
2651 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2652 return 1;
2653 }
2654#undef ATH_EXPONENT_TO_VALUE
2655}
2656
2657/*
2658 * Allocate and setup an initial beacon frame.
2659 */
2660static int
2661ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2662{
2663 struct ieee80211vap *vap = ni->ni_vap;
2664 struct ath_vap *avp = ATH_VAP(vap);
2665 struct ath_buf *bf;
2666 struct mbuf *m;
2667 int error;
2668
2669 bf = avp->av_bcbuf;
2670 if (bf->bf_m != NULL) {
2671 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2672 m_freem(bf->bf_m);
2673 bf->bf_m = NULL;
2674 }
2675 if (bf->bf_node != NULL) {
2676 ieee80211_free_node(bf->bf_node);
2677 bf->bf_node = NULL;
2678 }
2679
2680 /*
2681 * NB: the beacon data buffer must be 32-bit aligned;
2682 * we assume the mbuf routines will return us something
2683 * with this alignment (perhaps should assert).
2684 */
2685 m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2686 if (m == NULL) {
2687 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2688 sc->sc_stats.ast_be_nombuf++;
2689 return ENOMEM;
2690 }
2691 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2692 bf->bf_segs, &bf->bf_nseg,
2693 BUS_DMA_NOWAIT);
2694 if (error != 0) {
2695 device_printf(sc->sc_dev,
2696 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2697 __func__, error);
2698 m_freem(m);
2699 return error;
2700 }
2701
2702 /*
2703 * Calculate a TSF adjustment factor required for staggered
2704 * beacons. Note that we assume the format of the beacon
2705 * frame leaves the tstamp field immediately following the
2706 * header.
2707 */
2708 if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2709 uint64_t tsfadjust;
2710 struct ieee80211_frame *wh;
2711
2712 /*
2713 * The beacon interval is in TU's; the TSF is in usecs.
2714 * We figure out how many TU's to add to align the timestamp
2715 * then convert to TSF units and handle byte swapping before
2716 * inserting it in the frame. The hardware will then add this
2717 * each time a beacon frame is sent. Note that we align vap's
2718 * 1..N and leave vap 0 untouched. This means vap 0 has a
2719 * timestamp in one beacon interval while the others get a
2720 * timstamp aligned to the next interval.
2721 */
2722 tsfadjust = ni->ni_intval *
2723 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2724 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */
2725
2726 DPRINTF(sc, ATH_DEBUG_BEACON,
2727 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2728 __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2729 avp->av_bslot, ni->ni_intval,
2730 (long long unsigned) le64toh(tsfadjust));
2731
2732 wh = mtod(m, struct ieee80211_frame *);
2733 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2734 }
2735 bf->bf_m = m;
2736 bf->bf_node = ieee80211_ref_node(ni);
2737
2738 return 0;
2739}
2740
2741/*
2742 * Setup the beacon frame for transmit.
2743 */
2744static void
2745ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2746{
2747#define USE_SHPREAMBLE(_ic) \
2748 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2749 == IEEE80211_F_SHPREAMBLE)
2750 struct ieee80211_node *ni = bf->bf_node;
2751 struct ieee80211com *ic = ni->ni_ic;
2752 struct mbuf *m = bf->bf_m;
2753 struct ath_hal *ah = sc->sc_ah;
2754 struct ath_desc *ds;
2755 int flags, antenna;
2756 const HAL_RATE_TABLE *rt;
2757 u_int8_t rix, rate;
2758
2759 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2760 __func__, m, m->m_len);
2761
2762 /* setup descriptors */
2763 ds = bf->bf_desc;
2764 bf->bf_last = bf;
2765 bf->bf_lastds = ds;
2766
2767 flags = HAL_TXDESC_NOACK;
2768 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2769 ds->ds_link = bf->bf_daddr; /* self-linked */
2770 flags |= HAL_TXDESC_VEOL;
2771 /*
2772 * Let hardware handle antenna switching.
2773 */
2774 antenna = sc->sc_txantenna;
2775 } else {
2776 ds->ds_link = 0;
2777 /*
2778 * Switch antenna every 4 beacons.
2779 * XXX assumes two antenna
2780 */
2781 if (sc->sc_txantenna != 0)
2782 antenna = sc->sc_txantenna;
2783 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2784 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2785 else
2786 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
2787 }
2788
2789 KASSERT(bf->bf_nseg == 1,
2790 ("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2791 ds->ds_data = bf->bf_segs[0].ds_addr;
2792 /*
2793 * Calculate rate code.
2794 * XXX everything at min xmit rate
2795 */
2796 rix = 0;
2797 rt = sc->sc_currates;
2798 rate = rt->info[rix].rateCode;
2799 if (USE_SHPREAMBLE(ic))
2800 rate |= rt->info[rix].shortPreamble;
2801 ath_hal_setuptxdesc(ah, ds
2802 , m->m_len + IEEE80211_CRC_LEN /* frame length */
2803 , sizeof(struct ieee80211_frame)/* header length */
2804 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
2805 , ni->ni_txpower /* txpower XXX */
2806 , rate, 1 /* series 0 rate/tries */
2807 , HAL_TXKEYIX_INVALID /* no encryption */
2808 , antenna /* antenna mode */
2809 , flags /* no ack, veol for beacons */
2810 , 0 /* rts/cts rate */
2811 , 0 /* rts/cts duration */
2812 );
2813 /* NB: beacon's BufLen must be a multiple of 4 bytes */
2814 ath_hal_filltxdesc(ah, ds
2815 , roundup(m->m_len, 4) /* buffer length */
2816 , AH_TRUE /* first segment */
2817 , AH_TRUE /* last segment */
2818 , ds /* first descriptor */
2819 );
2820#if 0
2821 ath_desc_swap(ds);
2822#endif
2823#undef USE_SHPREAMBLE
2824}
2825
2826static void
2827ath_beacon_update(struct ieee80211vap *vap, int item)
2828{
2829 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2830
2831 setbit(bo->bo_flags, item);
2832}
2833
2834/*
2835 * Append the contents of src to dst; both queues
2836 * are assumed to be locked.
2837 */
2838static void
2839ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2840{
2841 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
2842 dst->axq_link = src->axq_link;
2843 src->axq_link = NULL;
2844 dst->axq_depth += src->axq_depth;
2845 dst->axq_aggr_depth += src->axq_aggr_depth;
2846 src->axq_depth = 0;
2847 src->axq_aggr_depth = 0;
2848}
2849
2850/*
2851 * Transmit a beacon frame at SWBA. Dynamic updates to the
2852 * frame contents are done as needed and the slot time is
2853 * also adjusted based on current state.
2854 */
2855static void
2856ath_beacon_proc(void *arg, int pending)
2857{
2858 struct ath_softc *sc = arg;
2859 struct ath_hal *ah = sc->sc_ah;
2860 struct ieee80211vap *vap;
2861 struct ath_buf *bf;
2862 int slot, otherant;
2863 uint32_t bfaddr;
2864
2865 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2866 __func__, pending);
2867 /*
2868 * Check if the previous beacon has gone out. If
2869 * not don't try to post another, skip this period
2870 * and wait for the next. Missed beacons indicate
2871 * a problem and should not occur. If we miss too
2872 * many consecutive beacons reset the device.
2873 */
2874 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2875 sc->sc_bmisscount++;
2876 sc->sc_stats.ast_be_missed++;
2877 DPRINTF(sc, ATH_DEBUG_BEACON,
2878 "%s: missed %u consecutive beacons\n",
2879 __func__, sc->sc_bmisscount);
2880 if (sc->sc_bmisscount >= ath_bstuck_threshold)
2881 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
2882 return;
2883 }
2884 if (sc->sc_bmisscount != 0) {
2885 DPRINTF(sc, ATH_DEBUG_BEACON,
2886 "%s: resume beacon xmit after %u misses\n",
2887 __func__, sc->sc_bmisscount);
2888 sc->sc_bmisscount = 0;
2889 }
2890
2891 if (sc->sc_stagbeacons) { /* staggered beacons */
2892 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2893 uint32_t tsftu;
2894
2895 tsftu = ath_hal_gettsf32(ah) >> 10;
2896 /* XXX lintval */
2897 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
2898 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
2899 bfaddr = 0;
2900 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2901 bf = ath_beacon_generate(sc, vap);
2902 if (bf != NULL)
2903 bfaddr = bf->bf_daddr;
2904 }
2905 } else { /* burst'd beacons */
2906 uint32_t *bflink = &bfaddr;
2907
2908 for (slot = 0; slot < ATH_BCBUF; slot++) {
2909 vap = sc->sc_bslot[slot];
2910 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2911 bf = ath_beacon_generate(sc, vap);
2912 if (bf != NULL) {
2913 *bflink = bf->bf_daddr;
2914 bflink = &bf->bf_desc->ds_link;
2915 }
2916 }
2917 }
2918 *bflink = 0; /* terminate list */
2919 }
2920
2921 /*
2922 * Handle slot time change when a non-ERP station joins/leaves
2923 * an 11g network. The 802.11 layer notifies us via callback,
2924 * we mark updateslot, then wait one beacon before effecting
2925 * the change. This gives associated stations at least one
2926 * beacon interval to note the state change.
2927 */
2928 /* XXX locking */
2929 if (sc->sc_updateslot == UPDATE) {
2930 sc->sc_updateslot = COMMIT; /* commit next beacon */
2931 sc->sc_slotupdate = slot;
2932 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
2933 ath_setslottime(sc); /* commit change to h/w */
2934
2935 /*
2936 * Check recent per-antenna transmit statistics and flip
2937 * the default antenna if noticeably more frames went out
2938 * on the non-default antenna.
2939 * XXX assumes 2 anntenae
2940 */
2941 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
2942 otherant = sc->sc_defant & 1 ? 2 : 1;
2943 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
2944 ath_setdefantenna(sc, otherant);
2945 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
2946 }
2947
2948 if (bfaddr != 0) {
2949 /*
2950 * Stop any current dma and put the new frame on the queue.
2951 * This should never fail since we check above that no frames
2952 * are still pending on the queue.
2953 */
2954 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
2955 DPRINTF(sc, ATH_DEBUG_ANY,
2956 "%s: beacon queue %u did not stop?\n",
2957 __func__, sc->sc_bhalq);
2958 }
2959 /* NB: cabq traffic should already be queued and primed */
2960 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
2961 ath_hal_txstart(ah, sc->sc_bhalq);
2962
2963 sc->sc_stats.ast_be_xmit++;
2964 }
2965}
2966
2967static struct ath_buf *
2968ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
2969{
2970 struct ath_vap *avp = ATH_VAP(vap);
2971 struct ath_txq *cabq = sc->sc_cabq;
2972 struct ath_buf *bf;
2973 struct mbuf *m;
2974 int nmcastq, error;
2975
2976 KASSERT(vap->iv_state >= IEEE80211_S_RUN,
2977 ("not running, state %d", vap->iv_state));
2978 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
2979
2980 /*
2981 * Update dynamic beacon contents. If this returns
2982 * non-zero then we need to remap the memory because
2983 * the beacon frame changed size (probably because
2984 * of the TIM bitmap).
2985 */
2986 bf = avp->av_bcbuf;
2987 m = bf->bf_m;
2988 nmcastq = avp->av_mcastq.axq_depth;
2989 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
2990 /* XXX too conservative? */
2991 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2992 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2993 bf->bf_segs, &bf->bf_nseg,
2994 BUS_DMA_NOWAIT);
2995 if (error != 0) {
2996 if_printf(vap->iv_ifp,
2997 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
2998 __func__, error);
2999 return NULL;
3000 }
3001 }
3002 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
3003 DPRINTF(sc, ATH_DEBUG_BEACON,
3004 "%s: cabq did not drain, mcastq %u cabq %u\n",
3005 __func__, nmcastq, cabq->axq_depth);
3006 sc->sc_stats.ast_cabq_busy++;
3007 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
3008 /*
3009 * CABQ traffic from a previous vap is still pending.
3010 * We must drain the q before this beacon frame goes
3011 * out as otherwise this vap's stations will get cab
3012 * frames from a different vap.
3013 * XXX could be slow causing us to miss DBA
3014 */
3015 ath_tx_draintxq(sc, cabq);
3016 }
3017 }
3018 ath_beacon_setup(sc, bf);
3019 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3020
3021 /*
3022 * Enable the CAB queue before the beacon queue to
3023 * insure cab frames are triggered by this beacon.
3024 */
3025 if (avp->av_boff.bo_tim[4] & 1) {
3026 struct ath_hal *ah = sc->sc_ah;
3027
3028 /* NB: only at DTIM */
3029 ATH_TXQ_LOCK(cabq);
3030 ATH_TXQ_LOCK(&avp->av_mcastq);
3031 if (nmcastq) {
3032 struct ath_buf *bfm;
3033
3034 /*
3035 * Move frames from the s/w mcast q to the h/w cab q.
3036 * XXX MORE_DATA bit
3037 */
3038 bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q);
3039 if (cabq->axq_link != NULL) {
3040 *cabq->axq_link = bfm->bf_daddr;
3041 } else
3042 ath_hal_puttxbuf(ah, cabq->axq_qnum,
3043 bfm->bf_daddr);
3044 ath_txqmove(cabq, &avp->av_mcastq);
3045
3046 sc->sc_stats.ast_cabq_xmit += nmcastq;
3047 }
3048 /* NB: gated by beacon so safe to start here */
3049 if (! TAILQ_EMPTY(&(cabq->axq_q)))
3050 ath_hal_txstart(ah, cabq->axq_qnum);
3051 ATH_TXQ_UNLOCK(&avp->av_mcastq);
3052 ATH_TXQ_UNLOCK(cabq);
3053 }
3054 return bf;
3055}
3056
3057static void
3058ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3059{
3060 struct ath_vap *avp = ATH_VAP(vap);
3061 struct ath_hal *ah = sc->sc_ah;
3062 struct ath_buf *bf;
3063 struct mbuf *m;
3064 int error;
3065
3066 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3067
3068 /*
3069 * Update dynamic beacon contents. If this returns
3070 * non-zero then we need to remap the memory because
3071 * the beacon frame changed size (probably because
3072 * of the TIM bitmap).
3073 */
3074 bf = avp->av_bcbuf;
3075 m = bf->bf_m;
3076 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3077 /* XXX too conservative? */
3078 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3079 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3080 bf->bf_segs, &bf->bf_nseg,
3081 BUS_DMA_NOWAIT);
3082 if (error != 0) {
3083 if_printf(vap->iv_ifp,
3084 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3085 __func__, error);
3086 return;
3087 }
3088 }
3089 ath_beacon_setup(sc, bf);
3090 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3091
3092 /* NB: caller is known to have already stopped tx dma */
3093 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3094 ath_hal_txstart(ah, sc->sc_bhalq);
3095}
3096
3097/*
3098 * Reset the hardware after detecting beacons have stopped.
3099 */
3100static void
3101ath_bstuck_proc(void *arg, int pending)
3102{
3103 struct ath_softc *sc = arg;
3104 struct ifnet *ifp = sc->sc_ifp;
3105 uint32_t hangs = 0;
3106
3107 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
3108 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
3109
3110 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3111 sc->sc_bmisscount);
3112 sc->sc_stats.ast_bstuck++;
3113 /*
3114 * This assumes that there's no simultaneous channel mode change
3115 * occuring.
3116 */
3117 ath_reset(ifp, ATH_RESET_NOLOSS);
3118}
3119
3120/*
3121 * Reclaim beacon resources and return buffer to the pool.
3122 */
3123static void
3124ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3125{
3126
3127 if (bf->bf_m != NULL) {
3128 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3129 m_freem(bf->bf_m);
3130 bf->bf_m = NULL;
3131 }
3132 if (bf->bf_node != NULL) {
3133 ieee80211_free_node(bf->bf_node);
3134 bf->bf_node = NULL;
3135 }
3136 TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3137}
3138
3139/*
3140 * Reclaim beacon resources.
3141 */
3142static void
3143ath_beacon_free(struct ath_softc *sc)
3144{
3145 struct ath_buf *bf;
3146
3147 TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3148 if (bf->bf_m != NULL) {
3149 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3150 m_freem(bf->bf_m);
3151 bf->bf_m = NULL;
3152 }
3153 if (bf->bf_node != NULL) {
3154 ieee80211_free_node(bf->bf_node);
3155 bf->bf_node = NULL;
3156 }
3157 }
3158}
3159
3160/*
3161 * Configure the beacon and sleep timers.
3162 *
3163 * When operating as an AP this resets the TSF and sets
3164 * up the hardware to notify us when we need to issue beacons.
3165 *
3166 * When operating in station mode this sets up the beacon
3167 * timers according to the timestamp of the last received
3168 * beacon and the current TSF, configures PCF and DTIM
3169 * handling, programs the sleep registers so the hardware
3170 * will wakeup in time to receive beacons, and configures
3171 * the beacon miss handling so we'll receive a BMISS
3172 * interrupt when we stop seeing beacons from the AP
3173 * we've associated with.
3174 */
3175static void
3176ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
3177{
3178#define TSF_TO_TU(_h,_l) \
3179 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
3180#define FUDGE 2
3181 struct ath_hal *ah = sc->sc_ah;
3182 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3183 struct ieee80211_node *ni;
3184 u_int32_t nexttbtt, intval, tsftu;
3185 u_int64_t tsf;
3186
3187 if (vap == NULL)
3188 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
3189 ni = vap->iv_bss;
3190
3191 /* extract tstamp from last beacon and convert to TU */
3192 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3193 LE_READ_4(ni->ni_tstamp.data));
3194 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3195 ic->ic_opmode == IEEE80211_M_MBSS) {
3196 /*
3197 * For multi-bss ap/mesh support beacons are either staggered
3198 * evenly over N slots or burst together. For the former
3199 * arrange for the SWBA to be delivered for each slot.
3200 * Slots that are not occupied will generate nothing.
3201 */
3202 /* NB: the beacon interval is kept internally in TU's */
3203 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3204 if (sc->sc_stagbeacons)
3205 intval /= ATH_BCBUF;
3206 } else {
3207 /* NB: the beacon interval is kept internally in TU's */
3208 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3209 }
3210 if (nexttbtt == 0) /* e.g. for ap mode */
3211 nexttbtt = intval;
3212 else if (intval) /* NB: can be 0 for monitor mode */
3213 nexttbtt = roundup(nexttbtt, intval);
3214 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3215 __func__, nexttbtt, intval, ni->ni_intval);
3216 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
3217 HAL_BEACON_STATE bs;
3218 int dtimperiod, dtimcount;
3219 int cfpperiod, cfpcount;
3220
3221 /*
3222 * Setup dtim and cfp parameters according to
3223 * last beacon we received (which may be none).
3224 */
3225 dtimperiod = ni->ni_dtim_period;
3226 if (dtimperiod <= 0) /* NB: 0 if not known */
3227 dtimperiod = 1;
3228 dtimcount = ni->ni_dtim_count;
3229 if (dtimcount >= dtimperiod) /* NB: sanity check */
3230 dtimcount = 0; /* XXX? */
3231 cfpperiod = 1; /* NB: no PCF support yet */
3232 cfpcount = 0;
3233 /*
3234 * Pull nexttbtt forward to reflect the current
3235 * TSF and calculate dtim+cfp state for the result.
3236 */
3237 tsf = ath_hal_gettsf64(ah);
3238 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3239 do {
3240 nexttbtt += intval;
3241 if (--dtimcount < 0) {
3242 dtimcount = dtimperiod - 1;
3243 if (--cfpcount < 0)
3244 cfpcount = cfpperiod - 1;
3245 }
3246 } while (nexttbtt < tsftu);
3247 memset(&bs, 0, sizeof(bs));
3248 bs.bs_intval = intval;
3249 bs.bs_nexttbtt = nexttbtt;
3250 bs.bs_dtimperiod = dtimperiod*intval;
3251 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3252 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3253 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3254 bs.bs_cfpmaxduration = 0;
3255#if 0
3256 /*
3257 * The 802.11 layer records the offset to the DTIM
3258 * bitmap while receiving beacons; use it here to
3259 * enable h/w detection of our AID being marked in
3260 * the bitmap vector (to indicate frames for us are
3261 * pending at the AP).
3262 * XXX do DTIM handling in s/w to WAR old h/w bugs
3263 * XXX enable based on h/w rev for newer chips
3264 */
3265 bs.bs_timoffset = ni->ni_timoff;
3266#endif
3267 /*
3268 * Calculate the number of consecutive beacons to miss
3269 * before taking a BMISS interrupt.
3270 * Note that we clamp the result to at most 10 beacons.
3271 */
3272 bs.bs_bmissthreshold = vap->iv_bmissthreshold;
3273 if (bs.bs_bmissthreshold > 10)
3274 bs.bs_bmissthreshold = 10;
3275 else if (bs.bs_bmissthreshold <= 0)
3276 bs.bs_bmissthreshold = 1;
3277
3278 /*
3279 * Calculate sleep duration. The configuration is
3280 * given in ms. We insure a multiple of the beacon
3281 * period is used. Also, if the sleep duration is
3282 * greater than the DTIM period then it makes senses
3283 * to make it a multiple of that.
3284 *
3285 * XXX fixed at 100ms
3286 */
3287 bs.bs_sleepduration =
3288 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3289 if (bs.bs_sleepduration > bs.bs_dtimperiod)
3290 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3291
3292 DPRINTF(sc, ATH_DEBUG_BEACON,
3293 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3294 , __func__
3295 , tsf, tsftu
3296 , bs.bs_intval
3297 , bs.bs_nexttbtt
3298 , bs.bs_dtimperiod
3299 , bs.bs_nextdtim
3300 , bs.bs_bmissthreshold
3301 , bs.bs_sleepduration
3302 , bs.bs_cfpperiod
3303 , bs.bs_cfpmaxduration
3304 , bs.bs_cfpnext
3305 , bs.bs_timoffset
3306 );
3307 ath_hal_intrset(ah, 0);
3308 ath_hal_beacontimers(ah, &bs);
3309 sc->sc_imask |= HAL_INT_BMISS;
3310 ath_hal_intrset(ah, sc->sc_imask);
3311 } else {
3312 ath_hal_intrset(ah, 0);
3313 if (nexttbtt == intval)
3314 intval |= HAL_BEACON_RESET_TSF;
3315 if (ic->ic_opmode == IEEE80211_M_IBSS) {
3316 /*
3317 * In IBSS mode enable the beacon timers but only
3318 * enable SWBA interrupts if we need to manually
3319 * prepare beacon frames. Otherwise we use a
3320 * self-linked tx descriptor and let the hardware
3321 * deal with things.
3322 */
3323 intval |= HAL_BEACON_ENA;
3324 if (!sc->sc_hasveol)
3325 sc->sc_imask |= HAL_INT_SWBA;
3326 if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3327 /*
3328 * Pull nexttbtt forward to reflect
3329 * the current TSF.
3330 */
3331 tsf = ath_hal_gettsf64(ah);
3332 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3333 do {
3334 nexttbtt += intval;
3335 } while (nexttbtt < tsftu);
3336 }
3337 ath_beaconq_config(sc);
3338 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3339 ic->ic_opmode == IEEE80211_M_MBSS) {
3340 /*
3341 * In AP/mesh mode we enable the beacon timers
3342 * and SWBA interrupts to prepare beacon frames.
3343 */
3344 intval |= HAL_BEACON_ENA;
3345 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
3346 ath_beaconq_config(sc);
3347 }
3348 ath_hal_beaconinit(ah, nexttbtt, intval);
3349 sc->sc_bmisscount = 0;
3350 ath_hal_intrset(ah, sc->sc_imask);
3351 /*
3352 * When using a self-linked beacon descriptor in
3353 * ibss mode load it once here.
3354 */
3355 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
3356 ath_beacon_start_adhoc(sc, vap);
3357 }
3358 sc->sc_syncbeacon = 0;
3359#undef FUDGE
3360#undef TSF_TO_TU
3361}
3362
3363static void
3364ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3365{
3366 bus_addr_t *paddr = (bus_addr_t*) arg;
3367 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3368 *paddr = segs->ds_addr;
3369}
3370
3371static int
3372ath_descdma_setup(struct ath_softc *sc,
3373 struct ath_descdma *dd, ath_bufhead *head,
3374 const char *name, int nbuf, int ndesc)
3375{
3376#define DS2PHYS(_dd, _ds) \
3377 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3378#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3379 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3380 struct ifnet *ifp = sc->sc_ifp;
3381 uint8_t *ds;
3382 struct ath_buf *bf;
3383 int i, bsize, error;
3384 int desc_len;
3385
3386 desc_len = sizeof(struct ath_desc);
3387
3388 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3389 __func__, name, nbuf, ndesc);
3390
3391 dd->dd_name = name;
3392 dd->dd_desc_len = desc_len * nbuf * ndesc;
3393
3394 /*
3395 * Merlin work-around:
3396 * Descriptors that cross the 4KB boundary can't be used.
3397 * Assume one skipped descriptor per 4KB page.
3398 */
3399 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3400 int numdescpage = 4096 / (desc_len * ndesc);
3401 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096;
3402 }
3403
3404 /*
3405 * Setup DMA descriptor area.
3406 */
3407 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3408 PAGE_SIZE, 0, /* alignment, bounds */
3409 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3410 BUS_SPACE_MAXADDR, /* highaddr */
3411 NULL, NULL, /* filter, filterarg */
3412 dd->dd_desc_len, /* maxsize */
3413 1, /* nsegments */
3414 dd->dd_desc_len, /* maxsegsize */
3415 BUS_DMA_ALLOCNOW, /* flags */
3416 NULL, /* lockfunc */
3417 NULL, /* lockarg */
3418 &dd->dd_dmat);
3419 if (error != 0) {
3420 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3421 return error;
3422 }
3423
3424 /* allocate descriptors */
3425 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3426 if (error != 0) {
3427 if_printf(ifp, "unable to create dmamap for %s descriptors, "
3428 "error %u\n", dd->dd_name, error);
3429 goto fail0;
3430 }
3431
3432 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3433 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3434 &dd->dd_dmamap);
3435 if (error != 0) {
3436 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3437 "error %u\n", nbuf * ndesc, dd->dd_name, error);
3438 goto fail1;
3439 }
3440
3441 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3442 dd->dd_desc, dd->dd_desc_len,
3443 ath_load_cb, &dd->dd_desc_paddr,
3444 BUS_DMA_NOWAIT);
3445 if (error != 0) {
3446 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3447 dd->dd_name, error);
3448 goto fail2;
3449 }
3450
3451 ds = (uint8_t *) dd->dd_desc;
3452 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3453 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3454 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3455
3456 /* allocate rx buffers */
3457 bsize = sizeof(struct ath_buf) * nbuf;
3458 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3459 if (bf == NULL) {
3460 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3461 dd->dd_name, bsize);
3462 goto fail3;
3463 }
3464 dd->dd_bufptr = bf;
3465
3466 TAILQ_INIT(head);
3467 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) {
3468 bf->bf_desc = (struct ath_desc *) ds;
3469 bf->bf_daddr = DS2PHYS(dd, ds);
3470 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3471 /*
3472 * Merlin WAR: Skip descriptor addresses which
3473 * cause 4KB boundary crossing along any point
3474 * in the descriptor.
3475 */
3476 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
3477 desc_len * ndesc)) {
3478 /* Start at the next page */
3479 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
3480 bf->bf_desc = (struct ath_desc *) ds;
3481 bf->bf_daddr = DS2PHYS(dd, ds);
3482 }
3483 }
3484 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3485 &bf->bf_dmamap);
3486 if (error != 0) {
3487 if_printf(ifp, "unable to create dmamap for %s "
3488 "buffer %u, error %u\n", dd->dd_name, i, error);
3489 ath_descdma_cleanup(sc, dd, head);
3490 return error;
3491 }
3492 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
3493 TAILQ_INSERT_TAIL(head, bf, bf_list);
3494 }
3495 return 0;
3496fail3:
3497 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3498fail2:
3499 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3500fail1:
3501 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3502fail0:
3503 bus_dma_tag_destroy(dd->dd_dmat);
3504 memset(dd, 0, sizeof(*dd));
3505 return error;
3506#undef DS2PHYS
3507#undef ATH_DESC_4KB_BOUND_CHECK
3508}
3509
3510static void
3511ath_descdma_cleanup(struct ath_softc *sc,
3512 struct ath_descdma *dd, ath_bufhead *head)
3513{
3514 struct ath_buf *bf;
3515 struct ieee80211_node *ni;
3516
3517 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3518 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3519 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3520 bus_dma_tag_destroy(dd->dd_dmat);
3521
3522 TAILQ_FOREACH(bf, head, bf_list) {
3523 if (bf->bf_m) {
3524 m_freem(bf->bf_m);
3525 bf->bf_m = NULL;
3526 }
3527 if (bf->bf_dmamap != NULL) {
3528 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3529 bf->bf_dmamap = NULL;
3530 }
3531 ni = bf->bf_node;
3532 bf->bf_node = NULL;
3533 if (ni != NULL) {
3534 /*
3535 * Reclaim node reference.
3536 */
3537 ieee80211_free_node(ni);
3538 }
3539 }
3540
3541 TAILQ_INIT(head);
3542 free(dd->dd_bufptr, M_ATHDEV);
3543 memset(dd, 0, sizeof(*dd));
3544}
3545
3546static int
3547ath_desc_alloc(struct ath_softc *sc)
3548{
3549 int error;
3550
3551 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3552 "rx", ath_rxbuf, 1);
3553 if (error != 0)
3554 return error;
3555
3556 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3557 "tx", ath_txbuf, ATH_TXDESC);
3558 if (error != 0) {
3559 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3560 return error;
3561 }
3562
3563 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3564 "beacon", ATH_BCBUF, 1);
3565 if (error != 0) {
3566 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3567 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3568 return error;
3569 }
3570 return 0;
3571}
3572
3573static void
3574ath_desc_free(struct ath_softc *sc)
3575{
3576
3577 if (sc->sc_bdma.dd_desc_len != 0)
3578 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3579 if (sc->sc_txdma.dd_desc_len != 0)
3580 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3581 if (sc->sc_rxdma.dd_desc_len != 0)
3582 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3583}
3584
3585static struct ieee80211_node *
3586ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3587{
3588 struct ieee80211com *ic = vap->iv_ic;
3589 struct ath_softc *sc = ic->ic_ifp->if_softc;
3590 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3591 struct ath_node *an;
3592
3593 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3594 if (an == NULL) {
3595 /* XXX stat+msg */
3596 return NULL;
3597 }
3598 ath_rate_node_init(sc, an);
3599
3600 /* Setup the mutex - there's no associd yet so set the name to NULL */
3601 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
3602 device_get_nameunit(sc->sc_dev), an);
3603 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
3604
3605 /* XXX setup ath_tid */
3606 ath_tx_tid_init(sc, an);
3607
3608 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3609 return &an->an_node;
3610}
3611
3612static void
3613ath_node_cleanup(struct ieee80211_node *ni)
3614{
3615 struct ieee80211com *ic = ni->ni_ic;
3616 struct ath_softc *sc = ic->ic_ifp->if_softc;
3617
3618 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
3619 ath_tx_node_flush(sc, ATH_NODE(ni));
3620 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3621 sc->sc_node_cleanup(ni);
3622}
3623
3624static void
3625ath_node_free(struct ieee80211_node *ni)
3626{
3627 struct ieee80211com *ic = ni->ni_ic;
3628 struct ath_softc *sc = ic->ic_ifp->if_softc;
3629
3630 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3631 mtx_destroy(&ATH_NODE(ni)->an_mtx);
3632 sc->sc_node_free(ni);
3633}
3634
3635static void
3636ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3637{
3638 struct ieee80211com *ic = ni->ni_ic;
3639 struct ath_softc *sc = ic->ic_ifp->if_softc;
3640 struct ath_hal *ah = sc->sc_ah;
3641
3642 *rssi = ic->ic_node_getrssi(ni);
3643 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3644 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3645 else
3646 *noise = -95; /* nominally correct */
3647}
3648
3649static int
3650ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3651{
3652 struct ath_hal *ah = sc->sc_ah;
3653 int error;
3654 struct mbuf *m;
3655 struct ath_desc *ds;
3656
3657 m = bf->bf_m;
3658 if (m == NULL) {
3659 /*
3660 * NB: by assigning a page to the rx dma buffer we
3661 * implicitly satisfy the Atheros requirement that
3662 * this buffer be cache-line-aligned and sized to be
3663 * multiple of the cache line size. Not doing this
3664 * causes weird stuff to happen (for the 5210 at least).
3665 */
3666 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3667 if (m == NULL) {
3668 DPRINTF(sc, ATH_DEBUG_ANY,
3669 "%s: no mbuf/cluster\n", __func__);
3670 sc->sc_stats.ast_rx_nombuf++;
3671 return ENOMEM;
3672 }
3673 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3674
3675 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3676 bf->bf_dmamap, m,
3677 bf->bf_segs, &bf->bf_nseg,
3678 BUS_DMA_NOWAIT);
3679 if (error != 0) {
3680 DPRINTF(sc, ATH_DEBUG_ANY,
3681 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3682 __func__, error);
3683 sc->sc_stats.ast_rx_busdma++;
3684 m_freem(m);
3685 return error;
3686 }
3687 KASSERT(bf->bf_nseg == 1,
3688 ("multi-segment packet; nseg %u", bf->bf_nseg));
3689 bf->bf_m = m;
3690 }
3691 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3692
3693 /*
3694 * Setup descriptors. For receive we always terminate
3695 * the descriptor list with a self-linked entry so we'll
3696 * not get overrun under high load (as can happen with a
3697 * 5212 when ANI processing enables PHY error frames).
3698 *
3699 * To insure the last descriptor is self-linked we create
3700 * each descriptor as self-linked and add it to the end. As
3701 * each additional descriptor is added the previous self-linked
3702 * entry is ``fixed'' naturally. This should be safe even
3703 * if DMA is happening. When processing RX interrupts we
3704 * never remove/process the last, self-linked, entry on the
3705 * descriptor list. This insures the hardware always has
3706 * someplace to write a new frame.
3707 */
3708 /*
3709 * 11N: we can no longer afford to self link the last descriptor.
3710 * MAC acknowledges BA status as long as it copies frames to host
3711 * buffer (or rx fifo). This can incorrectly acknowledge packets
3712 * to a sender if last desc is self-linked.
3713 */
3714 ds = bf->bf_desc;
3715 if (sc->sc_rxslink)
3716 ds->ds_link = bf->bf_daddr; /* link to self */
3717 else
3718 ds->ds_link = 0; /* terminate the list */
3719 ds->ds_data = bf->bf_segs[0].ds_addr;
3720 ath_hal_setuprxdesc(ah, ds
3721 , m->m_len /* buffer size */
3722 , 0
3723 );
3724
3725 if (sc->sc_rxlink != NULL)
3726 *sc->sc_rxlink = bf->bf_daddr;
3727 sc->sc_rxlink = &ds->ds_link;
3728 return 0;
3729}
3730
3731/*
3732 * Extend 15-bit time stamp from rx descriptor to
3733 * a full 64-bit TSF using the specified TSF.
3734 */
3735static __inline u_int64_t
3736ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf)
3737{
3738 if ((tsf & 0x7fff) < rstamp)
3739 tsf -= 0x8000;
3740
3741 return ((tsf &~ 0x7fff) | rstamp);
3742}
3743
3744/*
3745 * Extend 32-bit time stamp from rx descriptor to
3746 * a full 64-bit TSF using the specified TSF.
3747 */
3748static __inline u_int64_t
3749ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf)
3750{
3751 u_int32_t tsf_low = tsf & 0xffffffff;
3752 u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp;
3753
3754 if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000))
3755 tsf64 -= 0x100000000ULL;
3756
3757 if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000))
3758 tsf64 += 0x100000000ULL;
3759
3760 return tsf64;
3761}
3762
3763/*
3764 * Extend the TSF from the RX descriptor to a full 64 bit TSF.
3765 * Earlier hardware versions only wrote the low 15 bits of the
3766 * TSF into the RX descriptor; later versions (AR5416 and up)
3767 * include the 32 bit TSF value.
3768 */
3769static __inline u_int64_t
3770ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf)
3771{
3772 if (sc->sc_rxtsf32)
3773 return ath_extend_tsf32(rstamp, tsf);
3774 else
3775 return ath_extend_tsf15(rstamp, tsf);
3776}
3777
3778/*
3779 * Intercept management frames to collect beacon rssi data
3780 * and to do ibss merges.
3781 */
3782static void
3783ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3784 int subtype, int rssi, int nf)
3785{
3786 struct ieee80211vap *vap = ni->ni_vap;
3787 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3788
3789 /*
3790 * Call up first so subsequent work can use information
3791 * potentially stored in the node (e.g. for ibss merge).
3792 */
3793 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf);
3794 switch (subtype) {
3795 case IEEE80211_FC0_SUBTYPE_BEACON:
3796 /* update rssi statistics for use by the hal */
3797 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3798 if (sc->sc_syncbeacon &&
3799 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3800 /*
3801 * Resync beacon timers using the tsf of the beacon
3802 * frame we just received.
3803 */
3804 ath_beacon_config(sc, vap);
3805 }
3806 /* fall thru... */
3807 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3808 if (vap->iv_opmode == IEEE80211_M_IBSS &&
3809 vap->iv_state == IEEE80211_S_RUN) {
3810 uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
3811 uint64_t tsf = ath_extend_tsf(sc, rstamp,
3812 ath_hal_gettsf64(sc->sc_ah));
3813 /*
3814 * Handle ibss merge as needed; check the tsf on the
3815 * frame before attempting the merge. The 802.11 spec
3816 * says the station should change it's bssid to match
3817 * the oldest station with the same ssid, where oldest
3818 * is determined by the tsf. Note that hardware
3819 * reconfiguration happens through callback to
3820 * ath_newstate as the state machine will go from
3821 * RUN -> RUN when this happens.
3822 */
3823 if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3824 DPRINTF(sc, ATH_DEBUG_STATE,
3825 "ibss merge, rstamp %u tsf %ju "
3826 "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3827 (uintmax_t)ni->ni_tstamp.tsf);
3828 (void) ieee80211_ibss_merge(ni);
3829 }
3830 }
3831 break;
3832 }
3833}
3834
3835/*
3836 * Set the default antenna.
3837 */
3838static void
3839ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3840{
3841 struct ath_hal *ah = sc->sc_ah;
3842
3843 /* XXX block beacon interrupts */
3844 ath_hal_setdefantenna(ah, antenna);
3845 if (sc->sc_defant != antenna)
3846 sc->sc_stats.ast_ant_defswitch++;
3847 sc->sc_defant = antenna;
3848 sc->sc_rxotherant = 0;
3849}
3850
3851static void
3852ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3853 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3854{
3855#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20)
3856#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U)
3857#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D)
3858#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3859 struct ath_softc *sc = ifp->if_softc;
3860 const HAL_RATE_TABLE *rt;
3861 uint8_t rix;
3862
3863 rt = sc->sc_currates;
3864 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3865 rix = rt->rateCodeToIndex[rs->rs_rate];
3866 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3867 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3868#ifdef AH_SUPPORT_AR5416
3869 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
3870 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
3871 struct ieee80211com *ic = ifp->if_l2com;
3872
3873 if ((rs->rs_flags & HAL_RX_2040) == 0)
3874 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
3875 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
3876 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
3877 else
3878 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
3879 if ((rs->rs_flags & HAL_RX_GI) == 0)
3880 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
3881 }
3882#endif
3883 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf));
3884 if (rs->rs_status & HAL_RXERR_CRC)
3885 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3886 /* XXX propagate other error flags from descriptor */
3887 sc->sc_rx_th.wr_antnoise = nf;
3888 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
3889 sc->sc_rx_th.wr_antenna = rs->rs_antenna;
3890#undef CHAN_HT
3891#undef CHAN_HT20
3892#undef CHAN_HT40U
3893#undef CHAN_HT40D
3894}
3895
3896static void
3897ath_handle_micerror(struct ieee80211com *ic,
3898 struct ieee80211_frame *wh, int keyix)
3899{
3900 struct ieee80211_node *ni;
3901
3902 /* XXX recheck MIC to deal w/ chips that lie */
3903 /* XXX discard MIC errors on !data frames */
3904 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
3905 if (ni != NULL) {
3906 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
3907 ieee80211_free_node(ni);
3908 }
3909}
3910
3911/*
3912 * Only run the RX proc if it's not already running.
3913 * Since this may get run as part of the reset/flush path,
3914 * the task can't clash with an existing, running tasklet.
3915 */
3916static void
3917ath_rx_tasklet(void *arg, int npending)
3918{
3919 struct ath_softc *sc = arg;
3920
3921 CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending);
3922 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
3923 ATH_PCU_LOCK(sc);
3924 if (sc->sc_inreset_cnt > 0) {
3925 device_printf(sc->sc_dev,
3926 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
3927 ATH_PCU_UNLOCK(sc);
3928 return;
3929 }
3930 ATH_PCU_UNLOCK(sc);
3931 ath_rx_proc(sc, 1);
3932}
3933
3934static void
3935ath_rx_proc(struct ath_softc *sc, int resched)
3936{
3937#define PA2DESC(_sc, _pa) \
3938 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
3939 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
3940 struct ath_buf *bf;
3941 struct ifnet *ifp = sc->sc_ifp;
3942 struct ieee80211com *ic = ifp->if_l2com;
3943 struct ath_hal *ah = sc->sc_ah;
3944 struct ath_desc *ds;
3945 struct ath_rx_status *rs;
3946 struct mbuf *m;
3947 struct ieee80211_node *ni;
3948 int len, type, ngood;
3949 HAL_STATUS status;
3950 int16_t nf;
3951 u_int64_t tsf;
3952 int npkts = 0;
3953
3954 /* XXX we must not hold the ATH_LOCK here */
3955 ATH_UNLOCK_ASSERT(sc);
3956 ATH_PCU_UNLOCK_ASSERT(sc);
3957
3958 ATH_PCU_LOCK(sc);
3959 sc->sc_rxproc_cnt++;
3960 ATH_PCU_UNLOCK(sc);
3961
3962 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
3963 ngood = 0;
3964 nf = ath_hal_getchannoise(ah, sc->sc_curchan);
3965 sc->sc_stats.ast_rx_noise = nf;
3966 tsf = ath_hal_gettsf64(ah);
3967 do {
3968 bf = TAILQ_FIRST(&sc->sc_rxbuf);
3969 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */
3970 if_printf(ifp, "%s: no buffer!\n", __func__);
3971 break;
3972 } else if (bf == NULL) {
3973 /*
3974 * End of List:
3975 * this can happen for non-self-linked RX chains
3976 */
3977 sc->sc_stats.ast_rx_hitqueueend++;
3978 break;
3979 }
3980 m = bf->bf_m;
3981 if (m == NULL) { /* NB: shouldn't happen */
3982 /*
3983 * If mbuf allocation failed previously there
3984 * will be no mbuf; try again to re-populate it.
3985 */
3986 /* XXX make debug msg */
3987 if_printf(ifp, "%s: no mbuf!\n", __func__);
3988 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
3989 goto rx_next;
3990 }
3991 ds = bf->bf_desc;
3992 if (ds->ds_link == bf->bf_daddr) {
3993 /* NB: never process the self-linked entry at the end */
3994 sc->sc_stats.ast_rx_hitqueueend++;
3995 break;
3996 }
3997 /* XXX sync descriptor memory */
3998 /*
3999 * Must provide the virtual address of the current
4000 * descriptor, the physical address, and the virtual
4001 * address of the next descriptor in the h/w chain.
4002 * This allows the HAL to look ahead to see if the
4003 * hardware is done with a descriptor by checking the
4004 * done bit in the following descriptor and the address
4005 * of the current descriptor the DMA engine is working
4006 * on. All this is necessary because of our use of
4007 * a self-linked list to avoid rx overruns.
4008 */
4009 rs = &bf->bf_status.ds_rxstat;
4010 status = ath_hal_rxprocdesc(ah, ds,
4011 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4012#ifdef ATH_DEBUG
4013 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
4014 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4015#endif
4016 if (status == HAL_EINPROGRESS)
4017 break;
4018
4019 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
4020 npkts++;
4021
4022 /* These aren't specifically errors */
4023#ifdef AH_SUPPORT_AR5416
4024 if (rs->rs_flags & HAL_RX_GI)
4025 sc->sc_stats.ast_rx_halfgi++;
4026 if (rs->rs_flags & HAL_RX_2040)
4027 sc->sc_stats.ast_rx_2040++;
4028 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
4029 sc->sc_stats.ast_rx_pre_crc_err++;
4030 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
4031 sc->sc_stats.ast_rx_post_crc_err++;
4032 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
4033 sc->sc_stats.ast_rx_decrypt_busy_err++;
4034 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
4035 sc->sc_stats.ast_rx_hi_rx_chain++;
4036#endif /* AH_SUPPORT_AR5416 */
4037
4038 if (rs->rs_status != 0) {
4039 if (rs->rs_status & HAL_RXERR_CRC)
4040 sc->sc_stats.ast_rx_crcerr++;
4041 if (rs->rs_status & HAL_RXERR_FIFO)
4042 sc->sc_stats.ast_rx_fifoerr++;
4043 if (rs->rs_status & HAL_RXERR_PHY) {
4044 sc->sc_stats.ast_rx_phyerr++;
4045 /* Process DFS radar events */
4046 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
4047 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
4048 /* Since we're touching the frame data, sync it */
4049 bus_dmamap_sync(sc->sc_dmat,
4050 bf->bf_dmamap,
4051 BUS_DMASYNC_POSTREAD);
4052 /* Now pass it to the radar processing code */
4053 ath_dfs_process_phy_err(sc, mtod(m, char *), tsf, rs);
4054 }
4055
4056 /* Be suitably paranoid about receiving phy errors out of the stats array bounds */
4057 if (rs->rs_phyerr < 64)
4058 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
4059 goto rx_error; /* NB: don't count in ierrors */
4060 }
4061 if (rs->rs_status & HAL_RXERR_DECRYPT) {
4062 /*
4063 * Decrypt error. If the error occurred
4064 * because there was no hardware key, then
4065 * let the frame through so the upper layers
4066 * can process it. This is necessary for 5210
4067 * parts which have no way to setup a ``clear''
4068 * key cache entry.
4069 *
4070 * XXX do key cache faulting
4071 */
4072 if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
4073 goto rx_accept;
4074 sc->sc_stats.ast_rx_badcrypt++;
4075 }
4076 if (rs->rs_status & HAL_RXERR_MIC) {
4077 sc->sc_stats.ast_rx_badmic++;
4078 /*
4079 * Do minimal work required to hand off
4080 * the 802.11 header for notification.
4081 */
4082 /* XXX frag's and qos frames */
4083 len = rs->rs_datalen;
4084 if (len >= sizeof (struct ieee80211_frame)) {
4085 bus_dmamap_sync(sc->sc_dmat,
4086 bf->bf_dmamap,
4087 BUS_DMASYNC_POSTREAD);
4088 ath_handle_micerror(ic,
4089 mtod(m, struct ieee80211_frame *),
4090 sc->sc_splitmic ?
4091 rs->rs_keyix-32 : rs->rs_keyix);
4092 }
4093 }
4094 ifp->if_ierrors++;
4095rx_error:
4096 /*
4097 * Cleanup any pending partial frame.
4098 */
4099 if (sc->sc_rxpending != NULL) {
4100 m_freem(sc->sc_rxpending);
4101 sc->sc_rxpending = NULL;
4102 }
4103 /*
4104 * When a tap is present pass error frames
4105 * that have been requested. By default we
4106 * pass decrypt+mic errors but others may be
4107 * interesting (e.g. crc).
4108 */
4109 if (ieee80211_radiotap_active(ic) &&
4110 (rs->rs_status & sc->sc_monpass)) {
4111 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4112 BUS_DMASYNC_POSTREAD);
4113 /* NB: bpf needs the mbuf length setup */
4114 len = rs->rs_datalen;
4115 m->m_pkthdr.len = m->m_len = len;
4116 bf->bf_m = NULL;
4117 ath_rx_tap(ifp, m, rs, tsf, nf);
4118 ieee80211_radiotap_rx_all(ic, m);
4119 m_freem(m);
4120 }
4121 /* XXX pass MIC errors up for s/w reclaculation */
4122 goto rx_next;
4123 }
4124rx_accept:
4125 /*
4126 * Sync and unmap the frame. At this point we're
4127 * committed to passing the mbuf somewhere so clear
4128 * bf_m; this means a new mbuf must be allocated
4129 * when the rx descriptor is setup again to receive
4130 * another frame.
4131 */
4132 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4133 BUS_DMASYNC_POSTREAD);
4134 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4135 bf->bf_m = NULL;
4136
4137 len = rs->rs_datalen;
4138 m->m_len = len;
4139
4140 if (rs->rs_more) {
4141 /*
4142 * Frame spans multiple descriptors; save
4143 * it for the next completed descriptor, it
4144 * will be used to construct a jumbogram.
4145 */
4146 if (sc->sc_rxpending != NULL) {
4147 /* NB: max frame size is currently 2 clusters */
4148 sc->sc_stats.ast_rx_toobig++;
4149 m_freem(sc->sc_rxpending);
4150 }
4151 m->m_pkthdr.rcvif = ifp;
4152 m->m_pkthdr.len = len;
4153 sc->sc_rxpending = m;
4154 goto rx_next;
4155 } else if (sc->sc_rxpending != NULL) {
4156 /*
4157 * This is the second part of a jumbogram,
4158 * chain it to the first mbuf, adjust the
4159 * frame length, and clear the rxpending state.
4160 */
4161 sc->sc_rxpending->m_next = m;
4162 sc->sc_rxpending->m_pkthdr.len += len;
4163 m = sc->sc_rxpending;
4164 sc->sc_rxpending = NULL;
4165 } else {
4166 /*
4167 * Normal single-descriptor receive; setup
4168 * the rcvif and packet length.
4169 */
4170 m->m_pkthdr.rcvif = ifp;
4171 m->m_pkthdr.len = len;
4172 }
4173
4174 /*
4175 * Validate rs->rs_antenna.
4176 *
4177 * Some users w/ AR9285 NICs have reported crashes
4178 * here because rs_antenna field is bogusly large.
4179 * Let's enforce the maximum antenna limit of 8
4180 * (and it shouldn't be hard coded, but that's a
4181 * separate problem) and if there's an issue, print
4182 * out an error and adjust rs_antenna to something
4183 * sensible.
4184 *
4185 * This code should be removed once the actual
4186 * root cause of the issue has been identified.
4187 * For example, it may be that the rs_antenna
4188 * field is only valid for the lsat frame of
4189 * an aggregate and it just happens that it is
4190 * "mostly" right. (This is a general statement -
4191 * the majority of the statistics are only valid
4192 * for the last frame in an aggregate.
4193 */
4194 if (rs->rs_antenna > 7) {
4195 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n",
4196 __func__, rs->rs_antenna);
4197#ifdef ATH_DEBUG
4198 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4199#endif /* ATH_DEBUG */
4200 rs->rs_antenna = 0; /* XXX better than nothing */
4201 }
4202
4203 ifp->if_ipackets++;
4204 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
4205
4206 /*
4207 * Populate the rx status block. When there are bpf
4208 * listeners we do the additional work to provide
4209 * complete status. Otherwise we fill in only the
4210 * material required by ieee80211_input. Note that
4211 * noise setting is filled in above.
4212 */
4213 if (ieee80211_radiotap_active(ic))
4214 ath_rx_tap(ifp, m, rs, tsf, nf);
4215
4216 /*
4217 * From this point on we assume the frame is at least
4218 * as large as ieee80211_frame_min; verify that.
4219 */
4220 if (len < IEEE80211_MIN_LEN) {
4221 if (!ieee80211_radiotap_active(ic)) {
4222 DPRINTF(sc, ATH_DEBUG_RECV,
4223 "%s: short packet %d\n", __func__, len);
4224 sc->sc_stats.ast_rx_tooshort++;
4225 } else {
4226 /* NB: in particular this captures ack's */
4227 ieee80211_radiotap_rx_all(ic, m);
4228 }
4229 m_freem(m);
4230 goto rx_next;
4231 }
4232
4233 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
4234 const HAL_RATE_TABLE *rt = sc->sc_currates;
4235 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
4236
4237 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
4238 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
4239 }
4240
4241 m_adj(m, -IEEE80211_CRC_LEN);
4242
4243 /*
4244 * Locate the node for sender, track state, and then
4245 * pass the (referenced) node up to the 802.11 layer
4246 * for its use.
4247 */
4248 ni = ieee80211_find_rxnode_withkey(ic,
4249 mtod(m, const struct ieee80211_frame_min *),
4250 rs->rs_keyix == HAL_RXKEYIX_INVALID ?
4251 IEEE80211_KEYIX_NONE : rs->rs_keyix);
4252 sc->sc_lastrs = rs;
4253
4254#ifdef AH_SUPPORT_AR5416
4255 if (rs->rs_isaggr)
4256 sc->sc_stats.ast_rx_agg++;
4257#endif /* AH_SUPPORT_AR5416 */
4258
4259 if (ni != NULL) {
4260 /*
4261 * Only punt packets for ampdu reorder processing for
4262 * 11n nodes; net80211 enforces that M_AMPDU is only
4263 * set for 11n nodes.
4264 */
4265 if (ni->ni_flags & IEEE80211_NODE_HT)
4266 m->m_flags |= M_AMPDU;
4267
4268 /*
4269 * Sending station is known, dispatch directly.
4270 */
4271 type = ieee80211_input(ni, m, rs->rs_rssi, nf);
4272 ieee80211_free_node(ni);
4273 /*
4274 * Arrange to update the last rx timestamp only for
4275 * frames from our ap when operating in station mode.
4276 * This assumes the rx key is always setup when
4277 * associated.
4278 */
4279 if (ic->ic_opmode == IEEE80211_M_STA &&
4280 rs->rs_keyix != HAL_RXKEYIX_INVALID)
4281 ngood++;
4282 } else {
4283 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
4284 }
4285 /*
4286 * Track rx rssi and do any rx antenna management.
4287 */
4288 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
4289 if (sc->sc_diversity) {
4290 /*
4291 * When using fast diversity, change the default rx
4292 * antenna if diversity chooses the other antenna 3
4293 * times in a row.
4294 */
4295 if (sc->sc_defant != rs->rs_antenna) {
4296 if (++sc->sc_rxotherant >= 3)
4297 ath_setdefantenna(sc, rs->rs_antenna);
4298 } else
4299 sc->sc_rxotherant = 0;
4300 }
4301
4302 /* Newer school diversity - kite specific for now */
4303 /* XXX perhaps migrate the normal diversity code to this? */
4304 if ((ah)->ah_rxAntCombDiversity)
4305 (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz);
4306
4307 if (sc->sc_softled) {
4308 /*
4309 * Blink for any data frame. Otherwise do a
4310 * heartbeat-style blink when idle. The latter
4311 * is mainly for station mode where we depend on
4312 * periodic beacon frames to trigger the poll event.
4313 */
4314 if (type == IEEE80211_FC0_TYPE_DATA) {
4315 const HAL_RATE_TABLE *rt = sc->sc_currates;
4316 ath_led_event(sc,
4317 rt->rateCodeToIndex[rs->rs_rate]);
4318 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
4319 ath_led_event(sc, 0);
4320 }
4321rx_next:
4322 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
4323 } while (ath_rxbuf_init(sc, bf) == 0);
4324
4325 /* rx signal state monitoring */
4326 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
4327 if (ngood)
4328 sc->sc_lastrx = tsf;
4329
4330 CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
4331 /* Queue DFS tasklet if needed */
4332 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
4333 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
4334
4335 /*
4336 * Now that all the RX frames were handled that
4337 * need to be handled, kick the PCU if there's
4338 * been an RXEOL condition.
4339 */
4340 ATH_PCU_LOCK(sc);
4341 if (resched && sc->sc_kickpcu) {
4342 CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu");
4343 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
4344 __func__, npkts);
4345
4346 /* XXX rxslink? */
4347 /*
4348 * XXX can we hold the PCU lock here?
4349 * Are there any net80211 buffer calls involved?
4350 */
4351 bf = TAILQ_FIRST(&sc->sc_rxbuf);
4352 ath_hal_putrxbuf(ah, bf->bf_daddr);
4353 ath_hal_rxena(ah); /* enable recv descriptors */
4354 ath_mode_init(sc); /* set filters, etc. */
4355 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
4356
4357 ath_hal_intrset(ah, sc->sc_imask);
4358 sc->sc_kickpcu = 0;
4359 }
4360 ATH_PCU_UNLOCK(sc);
4361
4362 /* XXX check this inside of IF_LOCK? */
4363 if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
4364#ifdef IEEE80211_SUPPORT_SUPERG
4365 ieee80211_ff_age_all(ic, 100);
4366#endif
4367 if (!IFQ_IS_EMPTY(&ifp->if_snd))
4368 ath_start(ifp);
4369 }
4370#undef PA2DESC
4371
4372 ATH_PCU_LOCK(sc);
4373 sc->sc_rxproc_cnt--;
4374 ATH_PCU_UNLOCK(sc);
4375}
4376
4377static void
4378ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4379{
4380 txq->axq_qnum = qnum;
4381 txq->axq_ac = 0;
4382 txq->axq_depth = 0;
4383 txq->axq_aggr_depth = 0;
4384 txq->axq_intrcnt = 0;
4385 txq->axq_link = NULL;
4386 txq->axq_softc = sc;
4387 TAILQ_INIT(&txq->axq_q);
4388 TAILQ_INIT(&txq->axq_tidq);
4389 ATH_TXQ_LOCK_INIT(sc, txq);
4390}
4391
4392/*
4393 * Setup a h/w transmit queue.
4394 */
4395static struct ath_txq *
4396ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4397{
4398#define N(a) (sizeof(a)/sizeof(a[0]))
4399 struct ath_hal *ah = sc->sc_ah;
4400 HAL_TXQ_INFO qi;
4401 int qnum;
4402
4403 memset(&qi, 0, sizeof(qi));
4404 qi.tqi_subtype = subtype;
4405 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4406 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4407 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4408 /*
4409 * Enable interrupts only for EOL and DESC conditions.
4410 * We mark tx descriptors to receive a DESC interrupt
4411 * when a tx queue gets deep; otherwise waiting for the
4412 * EOL to reap descriptors. Note that this is done to
4413 * reduce interrupt load and this only defers reaping
4414 * descriptors, never transmitting frames. Aside from
4415 * reducing interrupts this also permits more concurrency.
4416 * The only potential downside is if the tx queue backs
4417 * up in which case the top half of the kernel may backup
4418 * due to a lack of tx descriptors.
4419 */
4420 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
4421 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4422 if (qnum == -1) {
4423 /*
4424 * NB: don't print a message, this happens
4425 * normally on parts with too few tx queues
4426 */
4427 return NULL;
4428 }
4429 if (qnum >= N(sc->sc_txq)) {
4430 device_printf(sc->sc_dev,
4431 "hal qnum %u out of range, max %zu!\n",
4432 qnum, N(sc->sc_txq));
4433 ath_hal_releasetxqueue(ah, qnum);
4434 return NULL;
4435 }
4436 if (!ATH_TXQ_SETUP(sc, qnum)) {
4437 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4438 sc->sc_txqsetup |= 1<<qnum;
4439 }
4440 return &sc->sc_txq[qnum];
4441#undef N
4442}
4443
4444/*
4445 * Setup a hardware data transmit queue for the specified
4446 * access control. The hal may not support all requested
4447 * queues in which case it will return a reference to a
4448 * previously setup queue. We record the mapping from ac's
4449 * to h/w queues for use by ath_tx_start and also track
4450 * the set of h/w queues being used to optimize work in the
4451 * transmit interrupt handler and related routines.
4452 */
4453static int
4454ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4455{
4456#define N(a) (sizeof(a)/sizeof(a[0]))
4457 struct ath_txq *txq;
4458
4459 if (ac >= N(sc->sc_ac2q)) {
4460 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4461 ac, N(sc->sc_ac2q));
4462 return 0;
4463 }
4464 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4465 if (txq != NULL) {
4466 txq->axq_ac = ac;
4467 sc->sc_ac2q[ac] = txq;
4468 return 1;
4469 } else
4470 return 0;
4471#undef N
4472}
4473
4474/*
4475 * Update WME parameters for a transmit queue.
4476 */
4477static int
4478ath_txq_update(struct ath_softc *sc, int ac)
4479{
4480#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
4481#define ATH_TXOP_TO_US(v) (v<<5)
4482 struct ifnet *ifp = sc->sc_ifp;
4483 struct ieee80211com *ic = ifp->if_l2com;
4484 struct ath_txq *txq = sc->sc_ac2q[ac];
4485 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4486 struct ath_hal *ah = sc->sc_ah;
4487 HAL_TXQ_INFO qi;
4488
4489 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4490#ifdef IEEE80211_SUPPORT_TDMA
4491 if (sc->sc_tdma) {
4492 /*
4493 * AIFS is zero so there's no pre-transmit wait. The
4494 * burst time defines the slot duration and is configured
4495 * through net80211. The QCU is setup to not do post-xmit
4496 * back off, lockout all lower-priority QCU's, and fire
4497 * off the DMA beacon alert timer which is setup based
4498 * on the slot configuration.
4499 */
4500 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4501 | HAL_TXQ_TXERRINT_ENABLE
4502 | HAL_TXQ_TXURNINT_ENABLE
4503 | HAL_TXQ_TXEOLINT_ENABLE
4504 | HAL_TXQ_DBA_GATED
4505 | HAL_TXQ_BACKOFF_DISABLE
4506 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4507 ;
4508 qi.tqi_aifs = 0;
4509 /* XXX +dbaprep? */
4510 qi.tqi_readyTime = sc->sc_tdmaslotlen;
4511 qi.tqi_burstTime = qi.tqi_readyTime;
4512 } else {
4513#endif
4514 /*
4515 * XXX shouldn't this just use the default flags
4516 * used in the previous queue setup?
4517 */
4518 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4519 | HAL_TXQ_TXERRINT_ENABLE
4520 | HAL_TXQ_TXDESCINT_ENABLE
4521 | HAL_TXQ_TXURNINT_ENABLE
4522 | HAL_TXQ_TXEOLINT_ENABLE
4523 ;
4524 qi.tqi_aifs = wmep->wmep_aifsn;
4525 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4526 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4527 qi.tqi_readyTime = 0;
4528 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4529#ifdef IEEE80211_SUPPORT_TDMA
4530 }
4531#endif
4532
4533 DPRINTF(sc, ATH_DEBUG_RESET,
4534 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4535 __func__, txq->axq_qnum, qi.tqi_qflags,
4536 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4537
4538 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4539 if_printf(ifp, "unable to update hardware queue "
4540 "parameters for %s traffic!\n",
4541 ieee80211_wme_acnames[ac]);
4542 return 0;
4543 } else {
4544 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4545 return 1;
4546 }
4547#undef ATH_TXOP_TO_US
4548#undef ATH_EXPONENT_TO_VALUE
4549}
4550
4551/*
4552 * Callback from the 802.11 layer to update WME parameters.
4553 */
4554static int
4555ath_wme_update(struct ieee80211com *ic)
4556{
4557 struct ath_softc *sc = ic->ic_ifp->if_softc;
4558
4559 return !ath_txq_update(sc, WME_AC_BE) ||
4560 !ath_txq_update(sc, WME_AC_BK) ||
4561 !ath_txq_update(sc, WME_AC_VI) ||
4562 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4563}
4564
4565/*
4566 * Reclaim resources for a setup queue.
4567 */
4568static void
4569ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4570{
4571
4572 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4573 ATH_TXQ_LOCK_DESTROY(txq);
4574 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4575}
4576
4577/*
4578 * Reclaim all tx queue resources.
4579 */
4580static void
4581ath_tx_cleanup(struct ath_softc *sc)
4582{
4583 int i;
4584
4585 ATH_TXBUF_LOCK_DESTROY(sc);
4586 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4587 if (ATH_TXQ_SETUP(sc, i))
4588 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4589}
4590
4591/*
4592 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
4593 * using the current rates in sc_rixmap.
4594 */
4595int
4596ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
4597{
4598 int rix = sc->sc_rixmap[rate];
4599 /* NB: return lowest rix for invalid rate */
4600 return (rix == 0xff ? 0 : rix);
4601}
4602
4603static void
4604ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
4605 struct ath_buf *bf)
4606{
4607 struct ieee80211_node *ni = bf->bf_node;
4608 struct ifnet *ifp = sc->sc_ifp;
4609 struct ieee80211com *ic = ifp->if_l2com;
4610 int sr, lr, pri;
4611
4612 if (ts->ts_status == 0) {
4613 u_int8_t txant = ts->ts_antenna;
4614 sc->sc_stats.ast_ant_tx[txant]++;
4615 sc->sc_ant_tx[txant]++;
4616 if (ts->ts_finaltsi != 0)
4617 sc->sc_stats.ast_tx_altrate++;
4618 pri = M_WME_GETAC(bf->bf_m);
4619 if (pri >= WME_AC_VO)
4620 ic->ic_wme.wme_hipri_traffic++;
4621 if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
4622 ni->ni_inact = ni->ni_inact_reload;
4623 } else {
4624 if (ts->ts_status & HAL_TXERR_XRETRY)
4625 sc->sc_stats.ast_tx_xretries++;
4626 if (ts->ts_status & HAL_TXERR_FIFO)
4627 sc->sc_stats.ast_tx_fifoerr++;
4628 if (ts->ts_status & HAL_TXERR_FILT)
4629 sc->sc_stats.ast_tx_filtered++;
4630 if (ts->ts_status & HAL_TXERR_XTXOP)
4631 sc->sc_stats.ast_tx_xtxop++;
4632 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
4633 sc->sc_stats.ast_tx_timerexpired++;
4634
4635 if (ts->ts_status & HAL_TX_DATA_UNDERRUN)
4636 sc->sc_stats.ast_tx_data_underrun++;
4637 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN)
4638 sc->sc_stats.ast_tx_delim_underrun++;
4639
4640 if (bf->bf_m->m_flags & M_FF)
4641 sc->sc_stats.ast_ff_txerr++;
4642 }
4643 /* XXX when is this valid? */
4644 if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
4645 sc->sc_stats.ast_tx_desccfgerr++;
4646
4647 sr = ts->ts_shortretry;
4648 lr = ts->ts_longretry;
4649 sc->sc_stats.ast_tx_shortretry += sr;
4650 sc->sc_stats.ast_tx_longretry += lr;
4651
4652}
4653
4654/*
4655 * The default completion. If fail is 1, this means
4656 * "please don't retry the frame, and just return -1 status
4657 * to the net80211 stack.
4658 */
4659void
4660ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4661{
4662 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4663 int st;
4664
4665 if (fail == 1)
4666 st = -1;
4667 else
4668 st = ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) ?
4669 ts->ts_status : HAL_TXERR_XRETRY;
4670
4671 if (bf->bf_state.bfs_dobaw)
4672 device_printf(sc->sc_dev,
4673 "%s: dobaw should've been cleared!\n", __func__);
4674 if (bf->bf_next != NULL)
4675 device_printf(sc->sc_dev,
4676 "%s: bf_next not NULL!\n", __func__);
4677
4678 /*
4679 * Do any tx complete callback. Note this must
4680 * be done before releasing the node reference.
4681 * This will free the mbuf, release the net80211
4682 * node and recycle the ath_buf.
4683 */
4684 ath_tx_freebuf(sc, bf, st);
4685}
4686
4687/*
4688 * Update rate control with the given completion status.
4689 */
4690void
4691ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
4692 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
4693 int nframes, int nbad)
4694{
4695 struct ath_node *an;
4696
4697 /* Only for unicast frames */
4698 if (ni == NULL)
4699 return;
4700
4701 an = ATH_NODE(ni);
4702
4703 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
4704 ATH_NODE_LOCK(an);
4705 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
4706 ATH_NODE_UNLOCK(an);
4707 }
4708}
4709
4710/*
4711 * Update the busy status of the last frame on the free list.
4712 * When doing TDMA, the busy flag tracks whether the hardware
4713 * currently points to this buffer or not, and thus gated DMA
4714 * may restart by re-reading the last descriptor in this
4715 * buffer.
4716 *
4717 * This should be called in the completion function once one
4718 * of the buffers has been used.
4719 */
4720static void
4721ath_tx_update_busy(struct ath_softc *sc)
4722{
4723 struct ath_buf *last;
4724
4725 /*
4726 * Since the last frame may still be marked
4727 * as ATH_BUF_BUSY, unmark it here before
4728 * finishing the frame processing.
4729 * Since we've completed a frame (aggregate
4730 * or otherwise), the hardware has moved on
4731 * and is no longer referencing the previous
4732 * descriptor.
4733 */
4734 ATH_TXBUF_LOCK_ASSERT(sc);
4735 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
4736 if (last != NULL)
4737 last->bf_flags &= ~ATH_BUF_BUSY;
4738}
4739
4740
4741/*
4742 * Process completed xmit descriptors from the specified queue.
4743 * Kick the packet scheduler if needed. This can occur from this
4744 * particular task.
4745 */
4746static int
4747ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
4748{
4749 struct ath_hal *ah = sc->sc_ah;
4750 struct ath_buf *bf;
4751 struct ath_desc *ds;
4752 struct ath_tx_status *ts;
4753 struct ieee80211_node *ni;
4754 struct ath_node *an;
4755 int nacked;
4756 HAL_STATUS status;
4757
4758 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
4759 __func__, txq->axq_qnum,
4760 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4761 txq->axq_link);
4762 nacked = 0;
4763 for (;;) {
4764 ATH_TXQ_LOCK(txq);
4765 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
4766 bf = TAILQ_FIRST(&txq->axq_q);
4767 if (bf == NULL) {
4768 ATH_TXQ_UNLOCK(txq);
4769 break;
4770 }
4771 ds = bf->bf_lastds; /* XXX must be setup correctly! */
4772 ts = &bf->bf_status.ds_txstat;
4773 status = ath_hal_txprocdesc(ah, ds, ts);
4774#ifdef ATH_DEBUG
4775 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
4776 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4777 status == HAL_OK);
4778#endif
4779 if (status == HAL_EINPROGRESS) {
4780 ATH_TXQ_UNLOCK(txq);
4781 break;
4782 }
4783 ATH_TXQ_REMOVE(txq, bf, bf_list);
4784#ifdef IEEE80211_SUPPORT_TDMA
4785 if (txq->axq_depth > 0) {
4786 /*
4787 * More frames follow. Mark the buffer busy
4788 * so it's not re-used while the hardware may
4789 * still re-read the link field in the descriptor.
4790 *
4791 * Use the last buffer in an aggregate as that
4792 * is where the hardware may be - intermediate
4793 * descriptors won't be "busy".
4794 */
4795 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4796 } else
4797#else
4798 if (txq->axq_depth == 0)
4799#endif
4800 txq->axq_link = NULL;
4801 if (bf->bf_state.bfs_aggr)
4802 txq->axq_aggr_depth--;
4803
4804 ni = bf->bf_node;
4805 /*
4806 * If unicast frame was ack'd update RSSI,
4807 * including the last rx time used to
4808 * workaround phantom bmiss interrupts.
4809 */
4810 if (ni != NULL && ts->ts_status == 0 &&
4811 ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)) {
4812 nacked++;
4813 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4814 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4815 ts->ts_rssi);
4816 }
4817 ATH_TXQ_UNLOCK(txq);
4818
4819 /* If unicast frame, update general statistics */
4820 if (ni != NULL) {
4821 an = ATH_NODE(ni);
4822 /* update statistics */
4823 ath_tx_update_stats(sc, ts, bf);
4824 }
4825
4826 /*
4827 * Call the completion handler.
4828 * The completion handler is responsible for
4829 * calling the rate control code.
4830 *
4831 * Frames with no completion handler get the
4832 * rate control code called here.
4833 */
4834 if (bf->bf_comp == NULL) {
4835 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4836 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
4837 /*
4838 * XXX assume this isn't an aggregate
4839 * frame.
4840 */
4841 ath_tx_update_ratectrl(sc, ni,
4842 bf->bf_state.bfs_rc, ts,
4843 bf->bf_state.bfs_pktlen, 1,
4844 (ts->ts_status == 0 ? 0 : 1));
4845 }
4846 ath_tx_default_comp(sc, bf, 0);
4847 } else
4848 bf->bf_comp(sc, bf, 0);
4849 }
4850#ifdef IEEE80211_SUPPORT_SUPERG
4851 /*
4852 * Flush fast-frame staging queue when traffic slows.
4853 */
4854 if (txq->axq_depth <= 1)
4855 ieee80211_ff_flush(ic, txq->axq_ac);
4856#endif
4857
4858 /* Kick the TXQ scheduler */
4859 if (dosched) {
4860 ATH_TXQ_LOCK(txq);
4861 ath_txq_sched(sc, txq);
4862 ATH_TXQ_UNLOCK(txq);
4863 }
4864
4865 return nacked;
4866}
4867
4868#define TXQACTIVE(t, q) ( (t) & (1 << (q)))
4869
4870/*
4871 * Deferred processing of transmit interrupt; special-cased
4872 * for a single hardware transmit queue (e.g. 5210 and 5211).
4873 */
4874static void
4875ath_tx_proc_q0(void *arg, int npending)
4876{
4877 struct ath_softc *sc = arg;
4878 struct ifnet *ifp = sc->sc_ifp;
4879 uint32_t txqs;
4880
4881 ATH_PCU_LOCK(sc);
4882 sc->sc_txproc_cnt++;
4883 txqs = sc->sc_txq_active;
4884 sc->sc_txq_active &= ~txqs;
4885 ATH_PCU_UNLOCK(sc);
4886
4887 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
4888 /* XXX why is lastrx updated in tx code? */
4889 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4890 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4891 ath_tx_processq(sc, sc->sc_cabq, 1);
4892 /* XXX check this inside of IF_LOCK? */
4893 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4894 sc->sc_wd_timer = 0;
4895
4896 if (sc->sc_softled)
4897 ath_led_event(sc, sc->sc_txrix);
4898
4899 ATH_PCU_LOCK(sc);
4900 sc->sc_txproc_cnt--;
4901 ATH_PCU_UNLOCK(sc);
4902
4903 ath_start(ifp);
4904}
4905
4906/*
4907 * Deferred processing of transmit interrupt; special-cased
4908 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4909 */
4910static void
4911ath_tx_proc_q0123(void *arg, int npending)
4912{
4913 struct ath_softc *sc = arg;
4914 struct ifnet *ifp = sc->sc_ifp;
4915 int nacked;
4916 uint32_t txqs;
4917
4918 ATH_PCU_LOCK(sc);
4919 sc->sc_txproc_cnt++;
4920 txqs = sc->sc_txq_active;
4921 sc->sc_txq_active &= ~txqs;
4922 ATH_PCU_UNLOCK(sc);
4923
4924 /*
4925 * Process each active queue.
4926 */
4927 nacked = 0;
4928 if (TXQACTIVE(txqs, 0))
4929 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
4930 if (TXQACTIVE(txqs, 1))
4931 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
4932 if (TXQACTIVE(txqs, 2))
4933 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
4934 if (TXQACTIVE(txqs, 3))
4935 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
4936 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4937 ath_tx_processq(sc, sc->sc_cabq, 1);
4938 if (nacked)
4939 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4940
4941 /* XXX check this inside of IF_LOCK? */
4942 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4943 sc->sc_wd_timer = 0;
4944
4945 if (sc->sc_softled)
4946 ath_led_event(sc, sc->sc_txrix);
4947
4948 ATH_PCU_LOCK(sc);
4949 sc->sc_txproc_cnt--;
4950 ATH_PCU_UNLOCK(sc);
4951
4952 ath_start(ifp);
4953}
4954
4955/*
4956 * Deferred processing of transmit interrupt.
4957 */
4958static void
4959ath_tx_proc(void *arg, int npending)
4960{
4961 struct ath_softc *sc = arg;
4962 struct ifnet *ifp = sc->sc_ifp;
4963 int i, nacked;
4964 uint32_t txqs;
4965
4966 ATH_PCU_LOCK(sc);
4967 sc->sc_txproc_cnt++;
4968 txqs = sc->sc_txq_active;
4969 sc->sc_txq_active &= ~txqs;
4970 ATH_PCU_UNLOCK(sc);
4971
4972 /*
4973 * Process each active queue.
4974 */
4975 nacked = 0;
4976 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4977 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
4978 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
4979 if (nacked)
4980 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4981
4982 /* XXX check this inside of IF_LOCK? */
4983 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4984 sc->sc_wd_timer = 0;
4985
4986 if (sc->sc_softled)
4987 ath_led_event(sc, sc->sc_txrix);
4988
4989 ATH_PCU_LOCK(sc);
4990 sc->sc_txproc_cnt--;
4991 ATH_PCU_UNLOCK(sc);
4992
4993 ath_start(ifp);
4994}
4995#undef TXQACTIVE
4996
4997/*
4998 * Return a buffer to the pool and update the 'busy' flag on the
4999 * previous 'tail' entry.
5000 *
5001 * This _must_ only be called when the buffer is involved in a completed
5002 * TX. The logic is that if it was part of an active TX, the previous
5003 * buffer on the list is now not involved in a halted TX DMA queue, waiting
5004 * for restart (eg for TDMA.)
5005 *
5006 * The caller must free the mbuf and recycle the node reference.
5007 */
5008void
5009ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
5010{
5011 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5012 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE);
5013
5014 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
5015 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
5016
5017 ATH_TXBUF_LOCK(sc);
5018 ath_tx_update_busy(sc);
5019 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5020 ATH_TXBUF_UNLOCK(sc);
5021}
5022
5023/*
5024 * This is currently used by ath_tx_draintxq() and
5025 * ath_tx_tid_free_pkts().
5026 *
5027 * It recycles a single ath_buf.
5028 */
5029void
5030ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
5031{
5032 struct ieee80211_node *ni = bf->bf_node;
5033 struct mbuf *m0 = bf->bf_m;
5034
5035 bf->bf_node = NULL;
5036 bf->bf_m = NULL;
5037
5038 /* Free the buffer, it's not needed any longer */
5039 ath_freebuf(sc, bf);
5040
5041 if (ni != NULL) {
5042 /*
5043 * Do any callback and reclaim the node reference.
5044 */
5045 if (m0->m_flags & M_TXCB)
5046 ieee80211_process_callback(ni, m0, status);
5047 ieee80211_free_node(ni);
5048 }
5049 m_freem(m0);
5050
5051 /*
5052 * XXX the buffer used to be freed -after-, but the DMA map was
5053 * freed where ath_freebuf() now is. I've no idea what this
5054 * will do.
5055 */
5056}
5057
5058void
5059ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5060{
5061#ifdef ATH_DEBUG
5062 struct ath_hal *ah = sc->sc_ah;
5063#endif
5064 struct ath_buf *bf;
5065 u_int ix;
5066
5067 /*
5068 * NB: this assumes output has been stopped and
5069 * we do not need to block ath_tx_proc
5070 */
5071 ATH_TXBUF_LOCK(sc);
5072 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
5073 if (bf != NULL)
5074 bf->bf_flags &= ~ATH_BUF_BUSY;
5075 ATH_TXBUF_UNLOCK(sc);
5076
5077 for (ix = 0;; ix++) {
5078 ATH_TXQ_LOCK(txq);
5079 bf = TAILQ_FIRST(&txq->axq_q);
5080 if (bf == NULL) {
5081 txq->axq_link = NULL;
5082 ATH_TXQ_UNLOCK(txq);
5083 break;
5084 }
5085 ATH_TXQ_REMOVE(txq, bf, bf_list);
5086 if (bf->bf_state.bfs_aggr)
5087 txq->axq_aggr_depth--;
5088#ifdef ATH_DEBUG
5089 if (sc->sc_debug & ATH_DEBUG_RESET) {
5090 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5091
5092 ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
5093 ath_hal_txprocdesc(ah, bf->bf_lastds,
5094 &bf->bf_status.ds_txstat) == HAL_OK);
5095 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
5096 bf->bf_m->m_len, 0, -1);
5097 }
5098#endif /* ATH_DEBUG */
5099 /*
5100 * Since we're now doing magic in the completion
5101 * functions, we -must- call it for aggregation
5102 * destinations or BAW tracking will get upset.
5103 */
5104 /*
5105 * Clear ATH_BUF_BUSY; the completion handler
5106 * will free the buffer.
5107 */
5108 ATH_TXQ_UNLOCK(txq);
5109 bf->bf_flags &= ~ATH_BUF_BUSY;
5110 if (bf->bf_comp)
5111 bf->bf_comp(sc, bf, 1);
5112 else
5113 ath_tx_default_comp(sc, bf, 1);
5114 }
5115
5116 /*
5117 * Drain software queued frames which are on
5118 * active TIDs.
5119 */
5120 ath_tx_txq_drain(sc, txq);
5121}
5122
5123static void
5124ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5125{
5126 struct ath_hal *ah = sc->sc_ah;
5127
5128 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5129 __func__, txq->axq_qnum,
5130 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5131 txq->axq_link);
5132 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5133}
5134
5135static int
5136ath_stoptxdma(struct ath_softc *sc)
5137{
5138 struct ath_hal *ah = sc->sc_ah;
5139 int i;
5140
5141 /* XXX return value */
5142 if (sc->sc_invalid)
5143 return 0;
5144
5145 if (!sc->sc_invalid) {
5146 /* don't touch the hardware if marked invalid */
5147 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5148 __func__, sc->sc_bhalq,
5149 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5150 NULL);
5151 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5152 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5153 if (ATH_TXQ_SETUP(sc, i))
5154 ath_tx_stopdma(sc, &sc->sc_txq[i]);
5155 }
5156
5157 return 1;
5158}
5159
5160/*
5161 * Drain the transmit queues and reclaim resources.
5162 */
5163static void
5164ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
5165{
5166#ifdef ATH_DEBUG
5167 struct ath_hal *ah = sc->sc_ah;
5168#endif
5169 struct ifnet *ifp = sc->sc_ifp;
5170 int i;
5171
5172 (void) ath_stoptxdma(sc);
5173
5174 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5175 /*
5176 * XXX TODO: should we just handle the completed TX frames
5177 * here, whether or not the reset is a full one or not?
5178 */
5179 if (ATH_TXQ_SETUP(sc, i)) {
5180 if (reset_type == ATH_RESET_NOLOSS)
5181 ath_tx_processq(sc, &sc->sc_txq[i], 0);
5182 else
5183 ath_tx_draintxq(sc, &sc->sc_txq[i]);
5184 }
5185 }
5186#ifdef ATH_DEBUG
5187 if (sc->sc_debug & ATH_DEBUG_RESET) {
5188 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
5189 if (bf != NULL && bf->bf_m != NULL) {
5190 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5191 ath_hal_txprocdesc(ah, bf->bf_lastds,
5192 &bf->bf_status.ds_txstat) == HAL_OK);
5193 ieee80211_dump_pkt(ifp->if_l2com,
5194 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
5195 0, -1);
5196 }
5197 }
5198#endif /* ATH_DEBUG */
5199 /* XXX check this inside of IF_LOCK? */
5200 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5201 sc->sc_wd_timer = 0;
5202}
5203
5204/*
5205 * Disable the receive h/w in preparation for a reset.
5206 */
5207static void
5208ath_stoprecv(struct ath_softc *sc, int dodelay)
5209{
5210#define PA2DESC(_sc, _pa) \
5211 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
5212 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
5213 struct ath_hal *ah = sc->sc_ah;
5214
5215 ath_hal_stoppcurecv(ah); /* disable PCU */
5216 ath_hal_setrxfilter(ah, 0); /* clear recv filter */
5217 ath_hal_stopdmarecv(ah); /* disable DMA engine */
5218 if (dodelay)
5219 DELAY(3000); /* 3ms is long enough for 1 frame */
5220#ifdef ATH_DEBUG
5221 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
5222 struct ath_buf *bf;
5223 u_int ix;
5224
5225 printf("%s: rx queue %p, link %p\n", __func__,
5226 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
5227 ix = 0;
5228 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5229 struct ath_desc *ds = bf->bf_desc;
5230 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5231 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
5232 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
5233 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
5234 ath_printrxbuf(sc, bf, ix, status == HAL_OK);
5235 ix++;
5236 }
5237 }
5238#endif
5239 if (sc->sc_rxpending != NULL) {
5240 m_freem(sc->sc_rxpending);
5241 sc->sc_rxpending = NULL;
5242 }
5243 sc->sc_rxlink = NULL; /* just in case */
5244#undef PA2DESC
5245}
5246
5247/*
5248 * Enable the receive h/w following a reset.
5249 */
5250static int
5251ath_startrecv(struct ath_softc *sc)
5252{
5253 struct ath_hal *ah = sc->sc_ah;
5254 struct ath_buf *bf;
5255
5256 sc->sc_rxlink = NULL;
5257 sc->sc_rxpending = NULL;
5258 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5259 int error = ath_rxbuf_init(sc, bf);
5260 if (error != 0) {
5261 DPRINTF(sc, ATH_DEBUG_RECV,
5262 "%s: ath_rxbuf_init failed %d\n",
5263 __func__, error);
5264 return error;
5265 }
5266 }
5267
5268 bf = TAILQ_FIRST(&sc->sc_rxbuf);
5269 ath_hal_putrxbuf(ah, bf->bf_daddr);
5270 ath_hal_rxena(ah); /* enable recv descriptors */
5271 ath_mode_init(sc); /* set filters, etc. */
5272 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
5273 return 0;
5274}
5275
5276/*
5277 * Update internal state after a channel change.
5278 */
5279static void
5280ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5281{
5282 enum ieee80211_phymode mode;
5283
5284 /*
5285 * Change channels and update the h/w rate map
5286 * if we're switching; e.g. 11a to 11b/g.
5287 */
5288 mode = ieee80211_chan2mode(chan);
5289 if (mode != sc->sc_curmode)
5290 ath_setcurmode(sc, mode);
5291 sc->sc_curchan = chan;
5292}
5293
5294/*
5295 * Set/change channels. If the channel is really being changed,
5296 * it's done by resetting the chip. To accomplish this we must
5297 * first cleanup any pending DMA, then restart stuff after a la
5298 * ath_init.
5299 */
5300static int
5301ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5302{
5303 struct ifnet *ifp = sc->sc_ifp;
5304 struct ieee80211com *ic = ifp->if_l2com;
5305 struct ath_hal *ah = sc->sc_ah;
5306 int ret = 0;
5307 int dointr = 0;
5308
5309 /* Treat this as an interface reset */
5310 ATH_PCU_LOCK(sc);
5311 if (ath_reset_grablock(sc, 1) == 0) {
5312 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
5313 __func__);
5314 }
5315 if (chan != sc->sc_curchan) {
5316 dointr = 1;
5317 /* XXX only do this if inreset_cnt is 1? */
5318 ath_hal_intrset(ah, 0);
5319 }
5320 ATH_PCU_UNLOCK(sc);
5321 ath_txrx_stop(sc);
5322
5323 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5324 __func__, ieee80211_chan2ieee(ic, chan),
5325 chan->ic_freq, chan->ic_flags);
5326 if (chan != sc->sc_curchan) {
5327 HAL_STATUS status;
5328 /*
5329 * To switch channels clear any pending DMA operations;
5330 * wait long enough for the RX fifo to drain, reset the
5331 * hardware at the new frequency, and then re-enable
5332 * the relevant bits of the h/w.
5333 */
5334#if 0
5335 ath_hal_intrset(ah, 0); /* disable interrupts */
5336#endif
5337 ath_stoprecv(sc, 1); /* turn off frame recv */
5338 /*
5339 * First, handle completed TX/RX frames.
5340 */
5341 ath_rx_proc(sc, 0);
5342 ath_draintxq(sc, ATH_RESET_NOLOSS);
5343 /*
5344 * Next, flush the non-scheduled frames.
5345 */
5346 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
5347
5348 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5349 if_printf(ifp, "%s: unable to reset "
5350 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
5351 __func__, ieee80211_chan2ieee(ic, chan),
5352 chan->ic_freq, chan->ic_flags, status);
5353 ret = EIO;
5354 goto finish;
5355 }
5356 sc->sc_diversity = ath_hal_getdiversity(ah);
5357
5358 /* Let DFS at it in case it's a DFS channel */
5359 ath_dfs_radar_enable(sc, ic->ic_curchan);
5360
5361 /*
5362 * Re-enable rx framework.
5363 */
5364 if (ath_startrecv(sc) != 0) {
5365 if_printf(ifp, "%s: unable to restart recv logic\n",
5366 __func__);
5367 ret = EIO;
5368 goto finish;
5369 }
5370
5371 /*
5372 * Change channels and update the h/w rate map
5373 * if we're switching; e.g. 11a to 11b/g.
5374 */
5375 ath_chan_change(sc, chan);
5376
5377 /*
5378 * Reset clears the beacon timers; reset them
5379 * here if needed.
5380 */
5381 if (sc->sc_beacons) { /* restart beacons */
5382#ifdef IEEE80211_SUPPORT_TDMA
5383 if (sc->sc_tdma)
5384 ath_tdma_config(sc, NULL);
5385 else
5386#endif
5387 ath_beacon_config(sc, NULL);
5388 }
5389
5390#if 0
5391 /*
5392 * Re-enable interrupts.
5393 */
5394 ath_hal_intrset(ah, sc->sc_imask);
5395#endif
5396 }
5397
5398finish:
5399 ATH_PCU_LOCK(sc);
5400 sc->sc_inreset_cnt--;
5401 /* XXX only do this if sc_inreset_cnt == 0? */
5402 if (dointr)
5403 ath_hal_intrset(ah, sc->sc_imask);
5404 ATH_PCU_UNLOCK(sc);
5405
5406 /* XXX do this inside of IF_LOCK? */
5407 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5408 ath_txrx_start(sc);
5409 /* XXX ath_start? */
5410
5411 return ret;
5412}
5413
5414/*
5415 * Periodically recalibrate the PHY to account
5416 * for temperature/environment changes.
5417 */
5418static void
5419ath_calibrate(void *arg)
5420{
5421 struct ath_softc *sc = arg;
5422 struct ath_hal *ah = sc->sc_ah;
5423 struct ifnet *ifp = sc->sc_ifp;
5424 struct ieee80211com *ic = ifp->if_l2com;
5425 HAL_BOOL longCal, isCalDone;
5426 HAL_BOOL aniCal, shortCal = AH_FALSE;
5427 int nextcal;
5428
5429 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
5430 goto restart;
5431 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5432 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
5433 if (sc->sc_doresetcal)
5434 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
5435
5436 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
5437 if (aniCal) {
5438 sc->sc_stats.ast_ani_cal++;
5439 sc->sc_lastani = ticks;
5440 ath_hal_ani_poll(ah, sc->sc_curchan);
5441 }
5442
5443 if (longCal) {
5444 sc->sc_stats.ast_per_cal++;
5445 sc->sc_lastlongcal = ticks;
5446 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5447 /*
5448 * Rfgain is out of bounds, reset the chip
5449 * to load new gain values.
5450 */
5451 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5452 "%s: rfgain change\n", __func__);
5453 sc->sc_stats.ast_per_rfgain++;
5454 /*
5455 * Drop lock - we can't hold it across the
5456 * ath_reset() call. Instead, we'll drop
5457 * out here, do a reset, then reschedule
5458 * the callout.
5459 */
5460 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5461 sc->sc_resetcal = 0;
5462 sc->sc_doresetcal = AH_TRUE;
5463 ATH_UNLOCK(sc);
5464 ath_reset(ifp, ATH_RESET_NOLOSS);
5465 ATH_LOCK(sc);
5466 return;
5467 }
5468 /*
5469 * If this long cal is after an idle period, then
5470 * reset the data collection state so we start fresh.
5471 */
5472 if (sc->sc_resetcal) {
5473 (void) ath_hal_calreset(ah, sc->sc_curchan);
5474 sc->sc_lastcalreset = ticks;
5475 sc->sc_lastshortcal = ticks;
5476 sc->sc_resetcal = 0;
5477 sc->sc_doresetcal = AH_TRUE;
5478 }
5479 }
5480
5481 /* Only call if we're doing a short/long cal, not for ANI calibration */
5482 if (shortCal || longCal) {
5483 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5484 if (longCal) {
5485 /*
5486 * Calibrate noise floor data again in case of change.
5487 */
5488 ath_hal_process_noisefloor(ah);
5489 }
5490 } else {
5491 DPRINTF(sc, ATH_DEBUG_ANY,
5492 "%s: calibration of channel %u failed\n",
5493 __func__, sc->sc_curchan->ic_freq);
5494 sc->sc_stats.ast_per_calfail++;
5495 }
5496 if (shortCal)
5497 sc->sc_lastshortcal = ticks;
5498 }
5499 if (!isCalDone) {
5500restart:
5501 /*
5502 * Use a shorter interval to potentially collect multiple
5503 * data samples required to complete calibration. Once
5504 * we're told the work is done we drop back to a longer
5505 * interval between requests. We're more aggressive doing
5506 * work when operating as an AP to improve operation right
5507 * after startup.
5508 */
5509 sc->sc_lastshortcal = ticks;
5510 nextcal = ath_shortcalinterval*hz/1000;
5511 if (sc->sc_opmode != HAL_M_HOSTAP)
5512 nextcal *= 10;
5513 sc->sc_doresetcal = AH_TRUE;
5514 } else {
5515 /* nextcal should be the shortest time for next event */
5516 nextcal = ath_longcalinterval*hz;
5517 if (sc->sc_lastcalreset == 0)
5518 sc->sc_lastcalreset = sc->sc_lastlongcal;
5519 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5520 sc->sc_resetcal = 1; /* setup reset next trip */
5521 sc->sc_doresetcal = AH_FALSE;
5522 }
5523 /* ANI calibration may occur more often than short/long/resetcal */
5524 if (ath_anicalinterval > 0)
5525 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
5526
5527 if (nextcal != 0) {
5528 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5529 __func__, nextcal, isCalDone ? "" : "!");
5530 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5531 } else {
5532 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5533 __func__);
5534 /* NB: don't rearm timer */
5535 }
5536}
5537
5538static void
5539ath_scan_start(struct ieee80211com *ic)
5540{
5541 struct ifnet *ifp = ic->ic_ifp;
5542 struct ath_softc *sc = ifp->if_softc;
5543 struct ath_hal *ah = sc->sc_ah;
5544 u_int32_t rfilt;
5545
5546 /* XXX calibration timer? */
5547
5548 sc->sc_scanning = 1;
5549 sc->sc_syncbeacon = 0;
5550 rfilt = ath_calcrxfilter(sc);
5551 ath_hal_setrxfilter(ah, rfilt);
5552 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5553
5554 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5555 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5556}
5557
5558static void
5559ath_scan_end(struct ieee80211com *ic)
5560{
5561 struct ifnet *ifp = ic->ic_ifp;
5562 struct ath_softc *sc = ifp->if_softc;
5563 struct ath_hal *ah = sc->sc_ah;
5564 u_int32_t rfilt;
5565
5566 sc->sc_scanning = 0;
5567 rfilt = ath_calcrxfilter(sc);
5568 ath_hal_setrxfilter(ah, rfilt);
5569 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5570
5571 ath_hal_process_noisefloor(ah);
5572
5573 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5574 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5575 sc->sc_curaid);
5576}
5577
5578static void
5579ath_set_channel(struct ieee80211com *ic)
5580{
5581 struct ifnet *ifp = ic->ic_ifp;
5582 struct ath_softc *sc = ifp->if_softc;
5583
5584 (void) ath_chan_set(sc, ic->ic_curchan);
5585 /*
5586 * If we are returning to our bss channel then mark state
5587 * so the next recv'd beacon's tsf will be used to sync the
5588 * beacon timers. Note that since we only hear beacons in
5589 * sta/ibss mode this has no effect in other operating modes.
5590 */
5591 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5592 sc->sc_syncbeacon = 1;
5593}
5594
5595/*
5596 * Walk the vap list and check if there any vap's in RUN state.
5597 */
5598static int
5599ath_isanyrunningvaps(struct ieee80211vap *this)
5600{
5601 struct ieee80211com *ic = this->iv_ic;
5602 struct ieee80211vap *vap;
5603
5604 IEEE80211_LOCK_ASSERT(ic);
5605
5606 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5607 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
5608 return 1;
5609 }
5610 return 0;
5611}
5612
5613static int
5614ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5615{
5616 struct ieee80211com *ic = vap->iv_ic;
5617 struct ath_softc *sc = ic->ic_ifp->if_softc;
5618 struct ath_vap *avp = ATH_VAP(vap);
5619 struct ath_hal *ah = sc->sc_ah;
5620 struct ieee80211_node *ni = NULL;
5621 int i, error, stamode;
5622 u_int32_t rfilt;
5623 int csa_run_transition = 0;
5624 static const HAL_LED_STATE leds[] = {
5625 HAL_LED_INIT, /* IEEE80211_S_INIT */
5626 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5627 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5628 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5629 HAL_LED_RUN, /* IEEE80211_S_CAC */
5630 HAL_LED_RUN, /* IEEE80211_S_RUN */
5631 HAL_LED_RUN, /* IEEE80211_S_CSA */
5632 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5633 };
5634
5635 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5636 ieee80211_state_name[vap->iv_state],
5637 ieee80211_state_name[nstate]);
5638
5639 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
5640 csa_run_transition = 1;
5641
5642 callout_drain(&sc->sc_cal_ch);
5643 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
5644
5645 if (nstate == IEEE80211_S_SCAN) {
5646 /*
5647 * Scanning: turn off beacon miss and don't beacon.
5648 * Mark beacon state so when we reach RUN state we'll
5649 * [re]setup beacons. Unblock the task q thread so
5650 * deferred interrupt processing is done.
5651 */
5652 ath_hal_intrset(ah,
5653 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5654 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5655 sc->sc_beacons = 0;
5656 taskqueue_unblock(sc->sc_tq);
5657 }
5658
5659 ni = vap->iv_bss;
5660 rfilt = ath_calcrxfilter(sc);
5661 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5662 vap->iv_opmode == IEEE80211_M_AHDEMO ||
5663 vap->iv_opmode == IEEE80211_M_IBSS);
5664 if (stamode && nstate == IEEE80211_S_RUN) {
5665 sc->sc_curaid = ni->ni_associd;
5666 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5667 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5668 }
5669 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5670 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5671 ath_hal_setrxfilter(ah, rfilt);
5672
5673 /* XXX is this to restore keycache on resume? */
5674 if (vap->iv_opmode != IEEE80211_M_STA &&
5675 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5676 for (i = 0; i < IEEE80211_WEP_NKID; i++)
5677 if (ath_hal_keyisvalid(ah, i))
5678 ath_hal_keysetmac(ah, i, ni->ni_bssid);
5679 }
5680
5681 /*
5682 * Invoke the parent method to do net80211 work.
5683 */
5684 error = avp->av_newstate(vap, nstate, arg);
5685 if (error != 0)
5686 goto bad;
5687
5688 if (nstate == IEEE80211_S_RUN) {
5689 /* NB: collect bss node again, it may have changed */
5690 ni = vap->iv_bss;
5691
5692 DPRINTF(sc, ATH_DEBUG_STATE,
5693 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5694 "capinfo 0x%04x chan %d\n", __func__,
5695 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5696 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5697
5698 switch (vap->iv_opmode) {
5699#ifdef IEEE80211_SUPPORT_TDMA
5700 case IEEE80211_M_AHDEMO:
5701 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5702 break;
5703 /* fall thru... */
5704#endif
5705 case IEEE80211_M_HOSTAP:
5706 case IEEE80211_M_IBSS:
5707 case IEEE80211_M_MBSS:
5708 /*
5709 * Allocate and setup the beacon frame.
5710 *
5711 * Stop any previous beacon DMA. This may be
5712 * necessary, for example, when an ibss merge
5713 * causes reconfiguration; there will be a state
5714 * transition from RUN->RUN that means we may
5715 * be called with beacon transmission active.
5716 */
5717 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5718
5719 error = ath_beacon_alloc(sc, ni);
5720 if (error != 0)
5721 goto bad;
5722 /*
5723 * If joining an adhoc network defer beacon timer
5724 * configuration to the next beacon frame so we
5725 * have a current TSF to use. Otherwise we're
5726 * starting an ibss/bss so there's no need to delay;
5727 * if this is the first vap moving to RUN state, then
5728 * beacon state needs to be [re]configured.
5729 */
5730 if (vap->iv_opmode == IEEE80211_M_IBSS &&
5731 ni->ni_tstamp.tsf != 0) {
5732 sc->sc_syncbeacon = 1;
5733 } else if (!sc->sc_beacons) {
5734#ifdef IEEE80211_SUPPORT_TDMA
5735 if (vap->iv_caps & IEEE80211_C_TDMA)
5736 ath_tdma_config(sc, vap);
5737 else
5738#endif
5739 ath_beacon_config(sc, vap);
5740 sc->sc_beacons = 1;
5741 }
5742 break;
5743 case IEEE80211_M_STA:
5744 /*
5745 * Defer beacon timer configuration to the next
5746 * beacon frame so we have a current TSF to use
5747 * (any TSF collected when scanning is likely old).
5748 * However if it's due to a CSA -> RUN transition,
5749 * force a beacon update so we pick up a lack of
5750 * beacons from an AP in CAC and thus force a
5751 * scan.
5752 */
5753 sc->sc_syncbeacon = 1;
5754 if (csa_run_transition)
5755 ath_beacon_config(sc, vap);
5756 break;
5757 case IEEE80211_M_MONITOR:
5758 /*
5759 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5760 * transitions so we must re-enable interrupts here to
5761 * handle the case of a single monitor mode vap.
5762 */
5763 ath_hal_intrset(ah, sc->sc_imask);
5764 break;
5765 case IEEE80211_M_WDS:
5766 break;
5767 default:
5768 break;
5769 }
5770 /*
5771 * Let the hal process statistics collected during a
5772 * scan so it can provide calibrated noise floor data.
5773 */
5774 ath_hal_process_noisefloor(ah);
5775 /*
5776 * Reset rssi stats; maybe not the best place...
5777 */
5778 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
5779 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
5780 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
5781 /*
5782 * Finally, start any timers and the task q thread
5783 * (in case we didn't go through SCAN state).
5784 */
5785 if (ath_longcalinterval != 0) {
5786 /* start periodic recalibration timer */
5787 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5788 } else {
5789 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5790 "%s: calibration disabled\n", __func__);
5791 }
5792 taskqueue_unblock(sc->sc_tq);
5793 } else if (nstate == IEEE80211_S_INIT) {
5794 /*
5795 * If there are no vaps left in RUN state then
5796 * shutdown host/driver operation:
5797 * o disable interrupts
5798 * o disable the task queue thread
5799 * o mark beacon processing as stopped
5800 */
5801 if (!ath_isanyrunningvaps(vap)) {
5802 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5803 /* disable interrupts */
5804 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
5805 taskqueue_block(sc->sc_tq);
5806 sc->sc_beacons = 0;
5807 }
5808#ifdef IEEE80211_SUPPORT_TDMA
5809 ath_hal_setcca(ah, AH_TRUE);
5810#endif
5811 }
5812bad:
5813 return error;
5814}
5815
5816/*
5817 * Allocate a key cache slot to the station so we can
5818 * setup a mapping from key index to node. The key cache
5819 * slot is needed for managing antenna state and for
5820 * compression when stations do not use crypto. We do
5821 * it uniliaterally here; if crypto is employed this slot
5822 * will be reassigned.
5823 */
5824static void
5825ath_setup_stationkey(struct ieee80211_node *ni)
5826{
5827 struct ieee80211vap *vap = ni->ni_vap;
5828 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5829 ieee80211_keyix keyix, rxkeyix;
5830
5831 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
5832 /*
5833 * Key cache is full; we'll fall back to doing
5834 * the more expensive lookup in software. Note
5835 * this also means no h/w compression.
5836 */
5837 /* XXX msg+statistic */
5838 } else {
5839 /* XXX locking? */
5840 ni->ni_ucastkey.wk_keyix = keyix;
5841 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
5842 /* NB: must mark device key to get called back on delete */
5843 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
5844 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
5845 /* NB: this will create a pass-thru key entry */
5846 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
5847 }
5848}
5849
5850/*
5851 * Setup driver-specific state for a newly associated node.
5852 * Note that we're called also on a re-associate, the isnew
5853 * param tells us if this is the first time or not.
5854 */
5855static void
5856ath_newassoc(struct ieee80211_node *ni, int isnew)
5857{
5858 struct ath_node *an = ATH_NODE(ni);
5859 struct ieee80211vap *vap = ni->ni_vap;
5860 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5861 const struct ieee80211_txparam *tp = ni->ni_txparms;
5862
5863 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
5864 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
5865
5866 ath_rate_newassoc(sc, an, isnew);
5867 if (isnew &&
5868 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
5869 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
5870 ath_setup_stationkey(ni);
5871}
5872
5873static int
5874ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
5875 int nchans, struct ieee80211_channel chans[])
5876{
5877 struct ath_softc *sc = ic->ic_ifp->if_softc;
5878 struct ath_hal *ah = sc->sc_ah;
5879 HAL_STATUS status;
5880
5881 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5882 "%s: rd %u cc %u location %c%s\n",
5883 __func__, reg->regdomain, reg->country, reg->location,
5884 reg->ecm ? " ecm" : "");
5885
5886 status = ath_hal_set_channels(ah, chans, nchans,
5887 reg->country, reg->regdomain);
5888 if (status != HAL_OK) {
5889 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
5890 __func__, status);
5891 return EINVAL; /* XXX */
5892 }
5893
5894 return 0;
5895}
5896
5897static void
5898ath_getradiocaps(struct ieee80211com *ic,
5899 int maxchans, int *nchans, struct ieee80211_channel chans[])
5900{
5901 struct ath_softc *sc = ic->ic_ifp->if_softc;
5902 struct ath_hal *ah = sc->sc_ah;
5903
5904 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
5905 __func__, SKU_DEBUG, CTRY_DEFAULT);
5906
5907 /* XXX check return */
5908 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
5909 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
5910
5911}
5912
5913static int
5914ath_getchannels(struct ath_softc *sc)
5915{
5916 struct ifnet *ifp = sc->sc_ifp;
5917 struct ieee80211com *ic = ifp->if_l2com;
5918 struct ath_hal *ah = sc->sc_ah;
5919 HAL_STATUS status;
5920
5921 /*
5922 * Collect channel set based on EEPROM contents.
5923 */
5924 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
5925 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
5926 if (status != HAL_OK) {
5927 if_printf(ifp, "%s: unable to collect channel list from hal, "
5928 "status %d\n", __func__, status);
5929 return EINVAL;
5930 }
5931 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
5932 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
5933 /* XXX map Atheros sku's to net80211 SKU's */
5934 /* XXX net80211 types too small */
5935 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
5936 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
5937 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
5938 ic->ic_regdomain.isocc[1] = ' ';
5939
5940 ic->ic_regdomain.ecm = 1;
5941 ic->ic_regdomain.location = 'I';
5942
5943 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5944 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
5945 __func__, sc->sc_eerd, sc->sc_eecc,
5946 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
5947 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
5948 return 0;
5949}
5950
5951static int
5952ath_rate_setup(struct ath_softc *sc, u_int mode)
5953{
5954 struct ath_hal *ah = sc->sc_ah;
5955 const HAL_RATE_TABLE *rt;
5956
5957 switch (mode) {
5958 case IEEE80211_MODE_11A:
5959 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
5960 break;
5961 case IEEE80211_MODE_HALF:
5962 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
5963 break;
5964 case IEEE80211_MODE_QUARTER:
5965 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5966 break;
5967 case IEEE80211_MODE_11B:
5968 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5969 break;
5970 case IEEE80211_MODE_11G:
5971 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5972 break;
5973 case IEEE80211_MODE_TURBO_A:
5974 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5975 break;
5976 case IEEE80211_MODE_TURBO_G:
5977 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5978 break;
5979 case IEEE80211_MODE_STURBO_A:
5980 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5981 break;
5982 case IEEE80211_MODE_11NA:
5983 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5984 break;
5985 case IEEE80211_MODE_11NG:
5986 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5987 break;
5988 default:
5989 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5990 __func__, mode);
5991 return 0;
5992 }
5993 sc->sc_rates[mode] = rt;
5994 return (rt != NULL);
5995}
5996
5997static void
5998ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5999{
6000#define N(a) (sizeof(a)/sizeof(a[0]))
6001 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
6002 static const struct {
6003 u_int rate; /* tx/rx 802.11 rate */
6004 u_int16_t timeOn; /* LED on time (ms) */
6005 u_int16_t timeOff; /* LED off time (ms) */
6006 } blinkrates[] = {
6007 { 108, 40, 10 },
6008 { 96, 44, 11 },
6009 { 72, 50, 13 },
6010 { 48, 57, 14 },
6011 { 36, 67, 16 },
6012 { 24, 80, 20 },
6013 { 22, 100, 25 },
6014 { 18, 133, 34 },
6015 { 12, 160, 40 },
6016 { 10, 200, 50 },
6017 { 6, 240, 58 },
6018 { 4, 267, 66 },
6019 { 2, 400, 100 },
6020 { 0, 500, 130 },
6021 /* XXX half/quarter rates */
6022 };
6023 const HAL_RATE_TABLE *rt;
6024 int i, j;
6025
6026 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6027 rt = sc->sc_rates[mode];
6028 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6029 for (i = 0; i < rt->rateCount; i++) {
6030 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6031 if (rt->info[i].phy != IEEE80211_T_HT)
6032 sc->sc_rixmap[ieeerate] = i;
6033 else
6034 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6035 }
6036 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6037 for (i = 0; i < N(sc->sc_hwmap); i++) {
6038 if (i >= rt->rateCount) {
6039 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6040 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6041 continue;
6042 }
6043 sc->sc_hwmap[i].ieeerate =
6044 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6045 if (rt->info[i].phy == IEEE80211_T_HT)
6046 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6047 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6048 if (rt->info[i].shortPreamble ||
6049 rt->info[i].phy == IEEE80211_T_OFDM)
6050 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6051 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
6052 for (j = 0; j < N(blinkrates)-1; j++)
6053 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6054 break;
6055 /* NB: this uses the last entry if the rate isn't found */
6056 /* XXX beware of overlow */
6057 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6058 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6059 }
6060 sc->sc_currates = rt;
6061 sc->sc_curmode = mode;
6062 /*
6063 * All protection frames are transmited at 2Mb/s for
6064 * 11g, otherwise at 1Mb/s.
6065 */
6066 if (mode == IEEE80211_MODE_11G)
6067 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
6068 else
6069 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
6070 /* NB: caller is responsible for resetting rate control state */
6071#undef N
6072}
6073
6074static void
6075ath_watchdog(void *arg)
6076{
6077 struct ath_softc *sc = arg;
6078 int do_reset = 0;
6079
6080 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6081 struct ifnet *ifp = sc->sc_ifp;
6082 uint32_t hangs;
6083
6084 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6085 hangs != 0) {
6086 if_printf(ifp, "%s hang detected (0x%x)\n",
6087 hangs & 0xff ? "bb" : "mac", hangs);
6088 } else
6089 if_printf(ifp, "device timeout\n");
6090 do_reset = 1;
6091 ifp->if_oerrors++;
6092 sc->sc_stats.ast_watchdog++;
6093 }
6094
6095 /*
6096 * We can't hold the lock across the ath_reset() call.
6097 */
6098 if (do_reset) {
6099 ATH_UNLOCK(sc);
6100 ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS);
6101 ATH_LOCK(sc);
6102 }
6103
6104 callout_schedule(&sc->sc_wd_ch, hz);
6105}
6106
6107#ifdef ATH_DIAGAPI
6108/*
6109 * Diagnostic interface to the HAL. This is used by various
6110 * tools to do things like retrieve register contents for
6111 * debugging. The mechanism is intentionally opaque so that
6112 * it can change frequently w/o concern for compatiblity.
6113 */
6114static int
6115ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6116{
6117 struct ath_hal *ah = sc->sc_ah;
6118 u_int id = ad->ad_id & ATH_DIAG_ID;
6119 void *indata = NULL;
6120 void *outdata = NULL;
6121 u_int32_t insize = ad->ad_in_size;
6122 u_int32_t outsize = ad->ad_out_size;
6123 int error = 0;
6124
6125 if (ad->ad_id & ATH_DIAG_IN) {
6126 /*
6127 * Copy in data.
6128 */
6129 indata = malloc(insize, M_TEMP, M_NOWAIT);
6130 if (indata == NULL) {
6131 error = ENOMEM;
6132 goto bad;
6133 }
6134 error = copyin(ad->ad_in_data, indata, insize);
6135 if (error)
6136 goto bad;
6137 }
6138 if (ad->ad_id & ATH_DIAG_DYN) {
6139 /*
6140 * Allocate a buffer for the results (otherwise the HAL
6141 * returns a pointer to a buffer where we can read the
6142 * results). Note that we depend on the HAL leaving this
6143 * pointer for us to use below in reclaiming the buffer;
6144 * may want to be more defensive.
6145 */
6146 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
6147 if (outdata == NULL) {
6148 error = ENOMEM;
6149 goto bad;
6150 }
6151 }
6152 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6153 if (outsize < ad->ad_out_size)
6154 ad->ad_out_size = outsize;
6155 if (outdata != NULL)
6156 error = copyout(outdata, ad->ad_out_data,
6157 ad->ad_out_size);
6158 } else {
6159 error = EINVAL;
6160 }
6161bad:
6162 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6163 free(indata, M_TEMP);
6164 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6165 free(outdata, M_TEMP);
6166 return error;
6167}
6168#endif /* ATH_DIAGAPI */
6169
6170static int
6171ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6172{
6173#define IS_RUNNING(ifp) \
6174 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
6175 struct ath_softc *sc = ifp->if_softc;
6176 struct ieee80211com *ic = ifp->if_l2com;
6177 struct ifreq *ifr = (struct ifreq *)data;
6178 const HAL_RATE_TABLE *rt;
6179 int error = 0;
6180
6181 switch (cmd) {
6182 case SIOCSIFFLAGS:
6183 ATH_LOCK(sc);
6184 if (IS_RUNNING(ifp)) {
6185 /*
6186 * To avoid rescanning another access point,
6187 * do not call ath_init() here. Instead,
6188 * only reflect promisc mode settings.
6189 */
6190 ath_mode_init(sc);
6191 } else if (ifp->if_flags & IFF_UP) {
6192 /*
6193 * Beware of being called during attach/detach
6194 * to reset promiscuous mode. In that case we
6195 * will still be marked UP but not RUNNING.
6196 * However trying to re-init the interface
6197 * is the wrong thing to do as we've already
6198 * torn down much of our state. There's
6199 * probably a better way to deal with this.
6200 */
6201 if (!sc->sc_invalid)
6202 ath_init(sc); /* XXX lose error */
6203 } else {
6204 ath_stop_locked(ifp);
6205#ifdef notyet
6206 /* XXX must wakeup in places like ath_vap_delete */
6207 if (!sc->sc_invalid)
6208 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6209#endif
6210 }
6211 ATH_UNLOCK(sc);
6212 break;
6213 case SIOCGIFMEDIA:
6214 case SIOCSIFMEDIA:
6215 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6216 break;
6217 case SIOCGATHSTATS:
6218 /* NB: embed these numbers to get a consistent view */
6219 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6220 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6221 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6222 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6223#ifdef IEEE80211_SUPPORT_TDMA
6224 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6225 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6226#endif
6227 rt = sc->sc_currates;
6228 sc->sc_stats.ast_tx_rate =
6229 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6230 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
6231 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
6232 return copyout(&sc->sc_stats,
6233 ifr->ifr_data, sizeof (sc->sc_stats));
6234 case SIOCZATHSTATS:
6235 error = priv_check(curthread, PRIV_DRIVER);
6236 if (error == 0)
6237 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6238 break;
6239#ifdef ATH_DIAGAPI
6240 case SIOCGATHDIAG:
6241 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6242 break;
6243 case SIOCGATHPHYERR:
6244 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
6245 break;
6246#endif
6247 case SIOCGIFADDR:
6248 error = ether_ioctl(ifp, cmd, data);
6249 break;
6250 default:
6251 error = EINVAL;
6252 break;
6253 }
6254 return error;
6255#undef IS_RUNNING
6256}
6257
6258/*
6259 * Announce various information on device/driver attach.
6260 */
6261static void
6262ath_announce(struct ath_softc *sc)
6263{
6264 struct ifnet *ifp = sc->sc_ifp;
6265 struct ath_hal *ah = sc->sc_ah;
6266
6267 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
6268 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
6269 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
6270 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
6271 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
6272 if (bootverbose) {
6273 int i;
6274 for (i = 0; i <= WME_AC_VO; i++) {
6275 struct ath_txq *txq = sc->sc_ac2q[i];
6276 if_printf(ifp, "Use hw queue %u for %s traffic\n",
6277 txq->axq_qnum, ieee80211_wme_acnames[i]);
6278 }
6279 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
6280 sc->sc_cabq->axq_qnum);
6281 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
6282 }
6283 if (ath_rxbuf != ATH_RXBUF)
6284 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
6285 if (ath_txbuf != ATH_TXBUF)
6286 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
6287 if (sc->sc_mcastkey && bootverbose)
6288 if_printf(ifp, "using multicast key search\n");
6289}
6290
6291#ifdef IEEE80211_SUPPORT_TDMA
6292static void
6293ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
6294{
6295 struct ath_hal *ah = sc->sc_ah;
6296 HAL_BEACON_TIMERS bt;
6297
6298 bt.bt_intval = bintval | HAL_BEACON_ENA;
6299 bt.bt_nexttbtt = nexttbtt;
6300 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
6301 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
6302 bt.bt_nextatim = nexttbtt+1;
6303 /* Enables TBTT, DBA, SWBA timers by default */
6304 bt.bt_flags = 0;
6305 ath_hal_beaconsettimers(ah, &bt);
6306}
6307
6308/*
6309 * Calculate the beacon interval. This is periodic in the
6310 * superframe for the bss. We assume each station is configured
6311 * identically wrt transmit rate so the guard time we calculate
6312 * above will be the same on all stations. Note we need to
6313 * factor in the xmit time because the hardware will schedule
6314 * a frame for transmit if the start of the frame is within
6315 * the burst time. When we get hardware that properly kills
6316 * frames in the PCU we can reduce/eliminate the guard time.
6317 *
6318 * Roundup to 1024 is so we have 1 TU buffer in the guard time
6319 * to deal with the granularity of the nexttbtt timer. 11n MAC's
6320 * with 1us timer granularity should allow us to reduce/eliminate
6321 * this.
6322 */
6323static void
6324ath_tdma_bintvalsetup(struct ath_softc *sc,
6325 const struct ieee80211_tdma_state *tdma)
6326{
6327 /* copy from vap state (XXX check all vaps have same value?) */
6328 sc->sc_tdmaslotlen = tdma->tdma_slotlen;
6329
6330 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
6331 tdma->tdma_slotcnt, 1024);
6332 sc->sc_tdmabintval >>= 10; /* TSF -> TU */
6333 if (sc->sc_tdmabintval & 1)
6334 sc->sc_tdmabintval++;
6335
6336 if (tdma->tdma_slot == 0) {
6337 /*
6338 * Only slot 0 beacons; other slots respond.
6339 */
6340 sc->sc_imask |= HAL_INT_SWBA;
6341 sc->sc_tdmaswba = 0; /* beacon immediately */
6342 } else {
6343 /* XXX all vaps must be slot 0 or slot !0 */
6344 sc->sc_imask &= ~HAL_INT_SWBA;
6345 }
6346}
6347
6348/*
6349 * Max 802.11 overhead. This assumes no 4-address frames and
6350 * the encapsulation done by ieee80211_encap (llc). We also
6351 * include potential crypto overhead.
6352 */
6353#define IEEE80211_MAXOVERHEAD \
6354 (sizeof(struct ieee80211_qosframe) \
6355 + sizeof(struct llc) \
6356 + IEEE80211_ADDR_LEN \
6357 + IEEE80211_WEP_IVLEN \
6358 + IEEE80211_WEP_KIDLEN \
6359 + IEEE80211_WEP_CRCLEN \
6360 + IEEE80211_WEP_MICLEN \
6361 + IEEE80211_CRC_LEN)
6362
6363/*
6364 * Setup initially for tdma operation. Start the beacon
6365 * timers and enable SWBA if we are slot 0. Otherwise
6366 * we wait for slot 0 to arrive so we can sync up before
6367 * starting to transmit.
6368 */
6369static void
6370ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
6371{
6372 struct ath_hal *ah = sc->sc_ah;
6373 struct ifnet *ifp = sc->sc_ifp;
6374 struct ieee80211com *ic = ifp->if_l2com;
6375 const struct ieee80211_txparam *tp;
6376 const struct ieee80211_tdma_state *tdma = NULL;
6377 int rix;
6378
6379 if (vap == NULL) {
6380 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
6381 if (vap == NULL) {
6382 if_printf(ifp, "%s: no vaps?\n", __func__);
6383 return;
6384 }
6385 }
6386 tp = vap->iv_bss->ni_txparms;
6387 /*
6388 * Calculate the guard time for each slot. This is the
6389 * time to send a maximal-size frame according to the
6390 * fixed/lowest transmit rate. Note that the interface
6391 * mtu does not include the 802.11 overhead so we must
6392 * tack that on (ath_hal_computetxtime includes the
6393 * preamble and plcp in it's calculation).
6394 */
6395 tdma = vap->iv_tdma;
6396 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
6397 rix = ath_tx_findrix(sc, tp->ucastrate);
6398 else
6399 rix = ath_tx_findrix(sc, tp->mcastrate);
6400 /* XXX short preamble assumed */
6401 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
6402 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
6403
6404 ath_hal_intrset(ah, 0);
6405
6406 ath_beaconq_config(sc); /* setup h/w beacon q */
6407 if (sc->sc_setcca)
6408 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */
6409 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */
6410 ath_tdma_settimers(sc, sc->sc_tdmabintval,
6411 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
6412 sc->sc_syncbeacon = 0;
6413
6414 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
6415 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
6416
6417 ath_hal_intrset(ah, sc->sc_imask);
6418
6419 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
6420 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
6421 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
6422 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
6423 sc->sc_tdmadbaprep);
6424}
6425
6426/*
6427 * Update tdma operation. Called from the 802.11 layer
6428 * when a beacon is received from the TDMA station operating
6429 * in the slot immediately preceding us in the bss. Use
6430 * the rx timestamp for the beacon frame to update our
6431 * beacon timers so we follow their schedule. Note that
6432 * by using the rx timestamp we implicitly include the
6433 * propagation delay in our schedule.
6434 */
6435static void
6436ath_tdma_update(struct ieee80211_node *ni,
6437 const struct ieee80211_tdma_param *tdma, int changed)
6438{
6439#define TSF_TO_TU(_h,_l) \
6440 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
6441#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
6442 struct ieee80211vap *vap = ni->ni_vap;
6443 struct ieee80211com *ic = ni->ni_ic;
6444 struct ath_softc *sc = ic->ic_ifp->if_softc;
6445 struct ath_hal *ah = sc->sc_ah;
6446 const HAL_RATE_TABLE *rt = sc->sc_currates;
6447 u_int64_t tsf, rstamp, nextslot, nexttbtt;
6448 u_int32_t txtime, nextslottu;
6449 int32_t tudelta, tsfdelta;
6450 const struct ath_rx_status *rs;
6451 int rix;
6452
6453 sc->sc_stats.ast_tdma_update++;
6454
6455 /*
6456 * Check for and adopt configuration changes.
6457 */
6458 if (changed != 0) {
6459 const struct ieee80211_tdma_state *ts = vap->iv_tdma;
6460
6461 ath_tdma_bintvalsetup(sc, ts);
6462 if (changed & TDMA_UPDATE_SLOTLEN)
6463 ath_wme_update(ic);
6464
6465 DPRINTF(sc, ATH_DEBUG_TDMA,
6466 "%s: adopt slot %u slotcnt %u slotlen %u us "
6467 "bintval %u TU\n", __func__,
6468 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
6469 sc->sc_tdmabintval);
6470
6471 /* XXX right? */
6472 ath_hal_intrset(ah, sc->sc_imask);
6473 /* NB: beacon timers programmed below */
6474 }
6475
6476 /* extend rx timestamp to 64 bits */
6477 rs = sc->sc_lastrs;
6478 tsf = ath_hal_gettsf64(ah);
6479 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
6480 /*
6481 * The rx timestamp is set by the hardware on completing
6482 * reception (at the point where the rx descriptor is DMA'd
6483 * to the host). To find the start of our next slot we
6484 * must adjust this time by the time required to send
6485 * the packet just received.
6486 */
6487 rix = rt->rateCodeToIndex[rs->rs_rate];
6488 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
6489 rt->info[rix].shortPreamble);
6490 /* NB: << 9 is to cvt to TU and /2 */
6491 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
6492 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
6493
6494 /*
6495 * Retrieve the hardware NextTBTT in usecs
6496 * and calculate the difference between what the
6497 * other station thinks and what we have programmed. This
6498 * lets us figure how to adjust our timers to match. The
6499 * adjustments are done by pulling the TSF forward and possibly
6500 * rewriting the beacon timers.
6501 */
6502 nexttbtt = ath_hal_getnexttbtt(ah);
6503 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt);
6504
6505 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
6506 "tsfdelta %d avg +%d/-%d\n", tsfdelta,
6507 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
6508
6509 if (tsfdelta < 0) {
6510 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
6511 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
6512 tsfdelta = -tsfdelta % 1024;
6513 nextslottu++;
6514 } else if (tsfdelta > 0) {
6515 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
6516 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
6517 tsfdelta = 1024 - (tsfdelta % 1024);
6518 nextslottu++;
6519 } else {
6520 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
6521 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
6522 }
6523 tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt);
6524
6525 /*
6526 * Copy sender's timetstamp into tdma ie so they can
6527 * calculate roundtrip time. We submit a beacon frame
6528 * below after any timer adjustment. The frame goes out
6529 * at the next TBTT so the sender can calculate the
6530 * roundtrip by inspecting the tdma ie in our beacon frame.
6531 *
6532 * NB: This tstamp is subtlely preserved when
6533 * IEEE80211_BEACON_TDMA is marked (e.g. when the
6534 * slot position changes) because ieee80211_add_tdma
6535 * skips over the data.
6536 */
6537 memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
6538 __offsetof(struct ieee80211_tdma_param, tdma_tstamp),
6539 &ni->ni_tstamp.data, 8);
6540#if 0
6541 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
6542 "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n",
6543 (unsigned long long) tsf, (unsigned long long) nextslot,
6544 (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta);
6545#endif
6546 /*
6547 * Adjust the beacon timers only when pulling them forward
6548 * or when going back by less than the beacon interval.
6549 * Negative jumps larger than the beacon interval seem to
6550 * cause the timers to stop and generally cause instability.
6551 * This basically filters out jumps due to missed beacons.
6552 */
6553 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
6554 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
6555 sc->sc_stats.ast_tdma_timers++;
6556 }
6557 if (tsfdelta > 0) {
6558 ath_hal_adjusttsf(ah, tsfdelta);
6559 sc->sc_stats.ast_tdma_tsf++;
6560 }
6561 ath_tdma_beacon_send(sc, vap); /* prepare response */
6562#undef TU_TO_TSF
6563#undef TSF_TO_TU
6564}
6565
6566/*
6567 * Transmit a beacon frame at SWBA. Dynamic updates
6568 * to the frame contents are done as needed.
6569 */
6570static void
6571ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
6572{
6573 struct ath_hal *ah = sc->sc_ah;
6574 struct ath_buf *bf;
6575 int otherant;
6576
6577 /*
6578 * Check if the previous beacon has gone out. If
6579 * not don't try to post another, skip this period
6580 * and wait for the next. Missed beacons indicate
6581 * a problem and should not occur. If we miss too
6582 * many consecutive beacons reset the device.
6583 */
6584 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
6585 sc->sc_bmisscount++;
6586 DPRINTF(sc, ATH_DEBUG_BEACON,
6587 "%s: missed %u consecutive beacons\n",
6588 __func__, sc->sc_bmisscount);
6589 if (sc->sc_bmisscount >= ath_bstuck_threshold)
6590 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
6591 return;
6592 }
6593 if (sc->sc_bmisscount != 0) {
6594 DPRINTF(sc, ATH_DEBUG_BEACON,
6595 "%s: resume beacon xmit after %u misses\n",
6596 __func__, sc->sc_bmisscount);
6597 sc->sc_bmisscount = 0;
6598 }
6599
6600 /*
6601 * Check recent per-antenna transmit statistics and flip
6602 * the default antenna if noticeably more frames went out
6603 * on the non-default antenna.
6604 * XXX assumes 2 anntenae
6605 */
6606 if (!sc->sc_diversity) {
6607 otherant = sc->sc_defant & 1 ? 2 : 1;
6608 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
6609 ath_setdefantenna(sc, otherant);
6610 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
6611 }
6612
6613 bf = ath_beacon_generate(sc, vap);
6614 if (bf != NULL) {
6615 /*
6616 * Stop any current dma and put the new frame on the queue.
6617 * This should never fail since we check above that no frames
6618 * are still pending on the queue.
6619 */
6620 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
6621 DPRINTF(sc, ATH_DEBUG_ANY,
6622 "%s: beacon queue %u did not stop?\n",
6623 __func__, sc->sc_bhalq);
6624 /* NB: the HAL still stops DMA, so proceed */
6625 }
6626 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
6627 ath_hal_txstart(ah, sc->sc_bhalq);
6628
6629 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */
6630
6631 /*
6632 * Record local TSF for our last send for use
6633 * in arbitrating slot collisions.
6634 */
6635 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
6636 }
6637}
6638#endif /* IEEE80211_SUPPORT_TDMA */
6639
6640static void
6641ath_dfs_tasklet(void *p, int npending)
6642{
6643 struct ath_softc *sc = (struct ath_softc *) p;
6644 struct ifnet *ifp = sc->sc_ifp;
6645 struct ieee80211com *ic = ifp->if_l2com;
6646
6647 /*
6648 * If previous processing has found a radar event,
6649 * signal this to the net80211 layer to begin DFS
6650 * processing.
6651 */
6652 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
6653 /* DFS event found, initiate channel change */
6654 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
6655 }
6656}
6657
6658MODULE_VERSION(if_ath, 1);
6659MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */