Deleted Added
full compact
if_ath.c (242271) if_ath.c (242391)
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 242271 2012-10-28 21:13:12Z adrian $");
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 242391 2012-10-31 06:27:58Z adrian $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42/*
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
45 *
46 * It's also required for any AH_DEBUG checks in here, eg the
47 * module dependencies.
48 */
49#include "opt_ah.h"
50#include "opt_wlan.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/sysctl.h>
55#include <sys/mbuf.h>
56#include <sys/malloc.h>
57#include <sys/lock.h>
58#include <sys/mutex.h>
59#include <sys/kernel.h>
60#include <sys/socket.h>
61#include <sys/sockio.h>
62#include <sys/errno.h>
63#include <sys/callout.h>
64#include <sys/bus.h>
65#include <sys/endian.h>
66#include <sys/kthread.h>
67#include <sys/taskqueue.h>
68#include <sys/priv.h>
69#include <sys/module.h>
70#include <sys/ktr.h>
71#include <sys/smp.h> /* for mp_ncpus */
72
73#include <machine/bus.h>
74
75#include <net/if.h>
76#include <net/if_dl.h>
77#include <net/if_media.h>
78#include <net/if_types.h>
79#include <net/if_arp.h>
80#include <net/ethernet.h>
81#include <net/if_llc.h>
82
83#include <net80211/ieee80211_var.h>
84#include <net80211/ieee80211_regdomain.h>
85#ifdef IEEE80211_SUPPORT_SUPERG
86#include <net80211/ieee80211_superg.h>
87#endif
88#ifdef IEEE80211_SUPPORT_TDMA
89#include <net80211/ieee80211_tdma.h>
90#endif
91
92#include <net/bpf.h>
93
94#ifdef INET
95#include <netinet/in.h>
96#include <netinet/if_ether.h>
97#endif
98
99#include <dev/ath/if_athvar.h>
100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
101#include <dev/ath/ath_hal/ah_diagcodes.h>
102
103#include <dev/ath/if_ath_debug.h>
104#include <dev/ath/if_ath_misc.h>
105#include <dev/ath/if_ath_tsf.h>
106#include <dev/ath/if_ath_tx.h>
107#include <dev/ath/if_ath_sysctl.h>
108#include <dev/ath/if_ath_led.h>
109#include <dev/ath/if_ath_keycache.h>
110#include <dev/ath/if_ath_rx.h>
111#include <dev/ath/if_ath_rx_edma.h>
112#include <dev/ath/if_ath_tx_edma.h>
113#include <dev/ath/if_ath_beacon.h>
114#include <dev/ath/if_athdfs.h>
115
116#ifdef ATH_TX99_DIAG
117#include <dev/ath/ath_tx99/ath_tx99.h>
118#endif
119
120/*
121 * ATH_BCBUF determines the number of vap's that can transmit
122 * beacons and also (currently) the number of vap's that can
123 * have unique mac addresses/bssid. When staggering beacons
124 * 4 is probably a good max as otherwise the beacons become
125 * very closely spaced and there is limited time for cab q traffic
126 * to go out. You can burst beacons instead but that is not good
127 * for stations in power save and at some point you really want
128 * another radio (and channel).
129 *
130 * The limit on the number of mac addresses is tied to our use of
131 * the U/L bit and tracking addresses in a byte; it would be
132 * worthwhile to allow more for applications like proxy sta.
133 */
134CTASSERT(ATH_BCBUF <= 8);
135
136static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
137 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
138 const uint8_t [IEEE80211_ADDR_LEN],
139 const uint8_t [IEEE80211_ADDR_LEN]);
140static void ath_vap_delete(struct ieee80211vap *);
141static void ath_init(void *);
142static void ath_stop_locked(struct ifnet *);
143static void ath_stop(struct ifnet *);
144static int ath_reset_vap(struct ieee80211vap *, u_long);
145static void ath_start_queue(struct ifnet *ifp);
146static int ath_media_change(struct ifnet *);
147static void ath_watchdog(void *);
148static int ath_ioctl(struct ifnet *, u_long, caddr_t);
149static void ath_fatal_proc(void *, int);
150static void ath_bmiss_vap(struct ieee80211vap *);
151static void ath_bmiss_proc(void *, int);
152static void ath_key_update_begin(struct ieee80211vap *);
153static void ath_key_update_end(struct ieee80211vap *);
154static void ath_update_mcast(struct ifnet *);
155static void ath_update_promisc(struct ifnet *);
156static void ath_updateslot(struct ifnet *);
157static void ath_bstuck_proc(void *, int);
158static void ath_reset_proc(void *, int);
159static int ath_desc_alloc(struct ath_softc *);
160static void ath_desc_free(struct ath_softc *);
161static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
162 const uint8_t [IEEE80211_ADDR_LEN]);
163static void ath_node_cleanup(struct ieee80211_node *);
164static void ath_node_free(struct ieee80211_node *);
165static void ath_node_getsignal(const struct ieee80211_node *,
166 int8_t *, int8_t *);
167static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
168static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
169static int ath_tx_setup(struct ath_softc *, int, int);
170static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
171static void ath_tx_cleanup(struct ath_softc *);
172static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq,
173 int dosched);
174static void ath_tx_proc_q0(void *, int);
175static void ath_tx_proc_q0123(void *, int);
176static void ath_tx_proc(void *, int);
177static void ath_txq_sched_tasklet(void *, int);
178static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
179static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
180static void ath_scan_start(struct ieee80211com *);
181static void ath_scan_end(struct ieee80211com *);
182static void ath_set_channel(struct ieee80211com *);
183#ifdef ATH_ENABLE_11N
184static void ath_update_chw(struct ieee80211com *);
185#endif /* ATH_ENABLE_11N */
186static void ath_calibrate(void *);
187static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
188static void ath_setup_stationkey(struct ieee80211_node *);
189static void ath_newassoc(struct ieee80211_node *, int);
190static int ath_setregdomain(struct ieee80211com *,
191 struct ieee80211_regdomain *, int,
192 struct ieee80211_channel []);
193static void ath_getradiocaps(struct ieee80211com *, int, int *,
194 struct ieee80211_channel []);
195static int ath_getchannels(struct ath_softc *);
196
197static int ath_rate_setup(struct ath_softc *, u_int mode);
198static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
199
200static void ath_announce(struct ath_softc *);
201
202static void ath_dfs_tasklet(void *, int);
203static void ath_node_powersave(struct ieee80211_node *, int);
204static int ath_node_set_tim(struct ieee80211_node *, int);
205
206#ifdef IEEE80211_SUPPORT_TDMA
207#include <dev/ath/if_ath_tdma.h>
208#endif
209
210SYSCTL_DECL(_hw_ath);
211
212/* XXX validate sysctl values */
213static int ath_longcalinterval = 30; /* long cals every 30 secs */
214SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
215 0, "long chip calibration interval (secs)");
216static int ath_shortcalinterval = 100; /* short cals every 100 ms */
217SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
218 0, "short chip calibration interval (msecs)");
219static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
220SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
221 0, "reset chip calibration results (secs)");
222static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
223SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
224 0, "ANI calibration (msecs)");
225
226int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
227SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
228 0, "rx buffers allocated");
229TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
230int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
231SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
232 0, "tx buffers allocated");
233TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
234int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
235SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
236 0, "tx (mgmt) buffers allocated");
237TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
238
239int ath_bstuck_threshold = 4; /* max missed beacons */
240SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
241 0, "max missed beacon xmits before chip reset");
242
243MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
244
245void
246ath_legacy_attach_comp_func(struct ath_softc *sc)
247{
248
249 /*
250 * Special case certain configurations. Note the
251 * CAB queue is handled by these specially so don't
252 * include them when checking the txq setup mask.
253 */
254 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
255 case 0x01:
256 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
257 break;
258 case 0x0f:
259 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
260 break;
261 default:
262 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
263 break;
264 }
265}
266
267#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
268#define HAL_MODE_HT40 \
269 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
270 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
271int
272ath_attach(u_int16_t devid, struct ath_softc *sc)
273{
274 struct ifnet *ifp;
275 struct ieee80211com *ic;
276 struct ath_hal *ah = NULL;
277 HAL_STATUS status;
278 int error = 0, i;
279 u_int wmodes;
280 uint8_t macaddr[IEEE80211_ADDR_LEN];
281 int rx_chainmask, tx_chainmask;
282
283 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
284
285 CURVNET_SET(vnet0);
286 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
287 if (ifp == NULL) {
288 device_printf(sc->sc_dev, "can not if_alloc()\n");
289 error = ENOSPC;
290 goto bad;
291 }
292 ic = ifp->if_l2com;
293
294 /* set these up early for if_printf use */
295 if_initname(ifp, device_get_name(sc->sc_dev),
296 device_get_unit(sc->sc_dev));
297 CURVNET_RESTORE();
298
299 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
300 sc->sc_eepromdata, &status);
301 if (ah == NULL) {
302 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
303 status);
304 error = ENXIO;
305 goto bad;
306 }
307 sc->sc_ah = ah;
308 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
309#ifdef ATH_DEBUG
310 sc->sc_debug = ath_debug;
311#endif
312
313 /*
314 * Setup the DMA/EDMA functions based on the current
315 * hardware support.
316 *
317 * This is required before the descriptors are allocated.
318 */
319 if (ath_hal_hasedma(sc->sc_ah)) {
320 sc->sc_isedma = 1;
321 ath_recv_setup_edma(sc);
322 ath_xmit_setup_edma(sc);
323 } else {
324 ath_recv_setup_legacy(sc);
325 ath_xmit_setup_legacy(sc);
326 }
327
328 /*
329 * Check if the MAC has multi-rate retry support.
330 * We do this by trying to setup a fake extended
331 * descriptor. MAC's that don't have support will
332 * return false w/o doing anything. MAC's that do
333 * support it will return true w/o doing anything.
334 */
335 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
336
337 /*
338 * Check if the device has hardware counters for PHY
339 * errors. If so we need to enable the MIB interrupt
340 * so we can act on stat triggers.
341 */
342 if (ath_hal_hwphycounters(ah))
343 sc->sc_needmib = 1;
344
345 /*
346 * Get the hardware key cache size.
347 */
348 sc->sc_keymax = ath_hal_keycachesize(ah);
349 if (sc->sc_keymax > ATH_KEYMAX) {
350 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
351 ATH_KEYMAX, sc->sc_keymax);
352 sc->sc_keymax = ATH_KEYMAX;
353 }
354 /*
355 * Reset the key cache since some parts do not
356 * reset the contents on initial power up.
357 */
358 for (i = 0; i < sc->sc_keymax; i++)
359 ath_hal_keyreset(ah, i);
360
361 /*
362 * Collect the default channel list.
363 */
364 error = ath_getchannels(sc);
365 if (error != 0)
366 goto bad;
367
368 /*
369 * Setup rate tables for all potential media types.
370 */
371 ath_rate_setup(sc, IEEE80211_MODE_11A);
372 ath_rate_setup(sc, IEEE80211_MODE_11B);
373 ath_rate_setup(sc, IEEE80211_MODE_11G);
374 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
375 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
376 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
377 ath_rate_setup(sc, IEEE80211_MODE_11NA);
378 ath_rate_setup(sc, IEEE80211_MODE_11NG);
379 ath_rate_setup(sc, IEEE80211_MODE_HALF);
380 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
381
382 /* NB: setup here so ath_rate_update is happy */
383 ath_setcurmode(sc, IEEE80211_MODE_11A);
384
385 /*
386 * Allocate TX descriptors and populate the lists.
387 */
388 error = ath_desc_alloc(sc);
389 if (error != 0) {
390 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
391 error);
392 goto bad;
393 }
394 error = ath_txdma_setup(sc);
395 if (error != 0) {
396 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
397 error);
398 goto bad;
399 }
400
401 /*
402 * Allocate RX descriptors and populate the lists.
403 */
404 error = ath_rxdma_setup(sc);
405 if (error != 0) {
406 if_printf(ifp, "failed to allocate RX descriptors: %d\n",
407 error);
408 goto bad;
409 }
410
411 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
412 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
413
414 ATH_TXBUF_LOCK_INIT(sc);
415
416 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
417 taskqueue_thread_enqueue, &sc->sc_tq);
418 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
419 "%s taskq", ifp->if_xname);
420
421 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
422 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
423 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
424 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
425 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc);
426 TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc);
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42/*
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
45 *
46 * It's also required for any AH_DEBUG checks in here, eg the
47 * module dependencies.
48 */
49#include "opt_ah.h"
50#include "opt_wlan.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/sysctl.h>
55#include <sys/mbuf.h>
56#include <sys/malloc.h>
57#include <sys/lock.h>
58#include <sys/mutex.h>
59#include <sys/kernel.h>
60#include <sys/socket.h>
61#include <sys/sockio.h>
62#include <sys/errno.h>
63#include <sys/callout.h>
64#include <sys/bus.h>
65#include <sys/endian.h>
66#include <sys/kthread.h>
67#include <sys/taskqueue.h>
68#include <sys/priv.h>
69#include <sys/module.h>
70#include <sys/ktr.h>
71#include <sys/smp.h> /* for mp_ncpus */
72
73#include <machine/bus.h>
74
75#include <net/if.h>
76#include <net/if_dl.h>
77#include <net/if_media.h>
78#include <net/if_types.h>
79#include <net/if_arp.h>
80#include <net/ethernet.h>
81#include <net/if_llc.h>
82
83#include <net80211/ieee80211_var.h>
84#include <net80211/ieee80211_regdomain.h>
85#ifdef IEEE80211_SUPPORT_SUPERG
86#include <net80211/ieee80211_superg.h>
87#endif
88#ifdef IEEE80211_SUPPORT_TDMA
89#include <net80211/ieee80211_tdma.h>
90#endif
91
92#include <net/bpf.h>
93
94#ifdef INET
95#include <netinet/in.h>
96#include <netinet/if_ether.h>
97#endif
98
99#include <dev/ath/if_athvar.h>
100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
101#include <dev/ath/ath_hal/ah_diagcodes.h>
102
103#include <dev/ath/if_ath_debug.h>
104#include <dev/ath/if_ath_misc.h>
105#include <dev/ath/if_ath_tsf.h>
106#include <dev/ath/if_ath_tx.h>
107#include <dev/ath/if_ath_sysctl.h>
108#include <dev/ath/if_ath_led.h>
109#include <dev/ath/if_ath_keycache.h>
110#include <dev/ath/if_ath_rx.h>
111#include <dev/ath/if_ath_rx_edma.h>
112#include <dev/ath/if_ath_tx_edma.h>
113#include <dev/ath/if_ath_beacon.h>
114#include <dev/ath/if_athdfs.h>
115
116#ifdef ATH_TX99_DIAG
117#include <dev/ath/ath_tx99/ath_tx99.h>
118#endif
119
120/*
121 * ATH_BCBUF determines the number of vap's that can transmit
122 * beacons and also (currently) the number of vap's that can
123 * have unique mac addresses/bssid. When staggering beacons
124 * 4 is probably a good max as otherwise the beacons become
125 * very closely spaced and there is limited time for cab q traffic
126 * to go out. You can burst beacons instead but that is not good
127 * for stations in power save and at some point you really want
128 * another radio (and channel).
129 *
130 * The limit on the number of mac addresses is tied to our use of
131 * the U/L bit and tracking addresses in a byte; it would be
132 * worthwhile to allow more for applications like proxy sta.
133 */
134CTASSERT(ATH_BCBUF <= 8);
135
136static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
137 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
138 const uint8_t [IEEE80211_ADDR_LEN],
139 const uint8_t [IEEE80211_ADDR_LEN]);
140static void ath_vap_delete(struct ieee80211vap *);
141static void ath_init(void *);
142static void ath_stop_locked(struct ifnet *);
143static void ath_stop(struct ifnet *);
144static int ath_reset_vap(struct ieee80211vap *, u_long);
145static void ath_start_queue(struct ifnet *ifp);
146static int ath_media_change(struct ifnet *);
147static void ath_watchdog(void *);
148static int ath_ioctl(struct ifnet *, u_long, caddr_t);
149static void ath_fatal_proc(void *, int);
150static void ath_bmiss_vap(struct ieee80211vap *);
151static void ath_bmiss_proc(void *, int);
152static void ath_key_update_begin(struct ieee80211vap *);
153static void ath_key_update_end(struct ieee80211vap *);
154static void ath_update_mcast(struct ifnet *);
155static void ath_update_promisc(struct ifnet *);
156static void ath_updateslot(struct ifnet *);
157static void ath_bstuck_proc(void *, int);
158static void ath_reset_proc(void *, int);
159static int ath_desc_alloc(struct ath_softc *);
160static void ath_desc_free(struct ath_softc *);
161static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
162 const uint8_t [IEEE80211_ADDR_LEN]);
163static void ath_node_cleanup(struct ieee80211_node *);
164static void ath_node_free(struct ieee80211_node *);
165static void ath_node_getsignal(const struct ieee80211_node *,
166 int8_t *, int8_t *);
167static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
168static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
169static int ath_tx_setup(struct ath_softc *, int, int);
170static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
171static void ath_tx_cleanup(struct ath_softc *);
172static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq,
173 int dosched);
174static void ath_tx_proc_q0(void *, int);
175static void ath_tx_proc_q0123(void *, int);
176static void ath_tx_proc(void *, int);
177static void ath_txq_sched_tasklet(void *, int);
178static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
179static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
180static void ath_scan_start(struct ieee80211com *);
181static void ath_scan_end(struct ieee80211com *);
182static void ath_set_channel(struct ieee80211com *);
183#ifdef ATH_ENABLE_11N
184static void ath_update_chw(struct ieee80211com *);
185#endif /* ATH_ENABLE_11N */
186static void ath_calibrate(void *);
187static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
188static void ath_setup_stationkey(struct ieee80211_node *);
189static void ath_newassoc(struct ieee80211_node *, int);
190static int ath_setregdomain(struct ieee80211com *,
191 struct ieee80211_regdomain *, int,
192 struct ieee80211_channel []);
193static void ath_getradiocaps(struct ieee80211com *, int, int *,
194 struct ieee80211_channel []);
195static int ath_getchannels(struct ath_softc *);
196
197static int ath_rate_setup(struct ath_softc *, u_int mode);
198static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
199
200static void ath_announce(struct ath_softc *);
201
202static void ath_dfs_tasklet(void *, int);
203static void ath_node_powersave(struct ieee80211_node *, int);
204static int ath_node_set_tim(struct ieee80211_node *, int);
205
206#ifdef IEEE80211_SUPPORT_TDMA
207#include <dev/ath/if_ath_tdma.h>
208#endif
209
210SYSCTL_DECL(_hw_ath);
211
212/* XXX validate sysctl values */
213static int ath_longcalinterval = 30; /* long cals every 30 secs */
214SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
215 0, "long chip calibration interval (secs)");
216static int ath_shortcalinterval = 100; /* short cals every 100 ms */
217SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
218 0, "short chip calibration interval (msecs)");
219static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
220SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
221 0, "reset chip calibration results (secs)");
222static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
223SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
224 0, "ANI calibration (msecs)");
225
226int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
227SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
228 0, "rx buffers allocated");
229TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
230int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
231SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
232 0, "tx buffers allocated");
233TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
234int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
235SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
236 0, "tx (mgmt) buffers allocated");
237TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
238
239int ath_bstuck_threshold = 4; /* max missed beacons */
240SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
241 0, "max missed beacon xmits before chip reset");
242
243MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
244
245void
246ath_legacy_attach_comp_func(struct ath_softc *sc)
247{
248
249 /*
250 * Special case certain configurations. Note the
251 * CAB queue is handled by these specially so don't
252 * include them when checking the txq setup mask.
253 */
254 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
255 case 0x01:
256 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
257 break;
258 case 0x0f:
259 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
260 break;
261 default:
262 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
263 break;
264 }
265}
266
267#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
268#define HAL_MODE_HT40 \
269 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
270 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
271int
272ath_attach(u_int16_t devid, struct ath_softc *sc)
273{
274 struct ifnet *ifp;
275 struct ieee80211com *ic;
276 struct ath_hal *ah = NULL;
277 HAL_STATUS status;
278 int error = 0, i;
279 u_int wmodes;
280 uint8_t macaddr[IEEE80211_ADDR_LEN];
281 int rx_chainmask, tx_chainmask;
282
283 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
284
285 CURVNET_SET(vnet0);
286 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
287 if (ifp == NULL) {
288 device_printf(sc->sc_dev, "can not if_alloc()\n");
289 error = ENOSPC;
290 goto bad;
291 }
292 ic = ifp->if_l2com;
293
294 /* set these up early for if_printf use */
295 if_initname(ifp, device_get_name(sc->sc_dev),
296 device_get_unit(sc->sc_dev));
297 CURVNET_RESTORE();
298
299 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
300 sc->sc_eepromdata, &status);
301 if (ah == NULL) {
302 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
303 status);
304 error = ENXIO;
305 goto bad;
306 }
307 sc->sc_ah = ah;
308 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
309#ifdef ATH_DEBUG
310 sc->sc_debug = ath_debug;
311#endif
312
313 /*
314 * Setup the DMA/EDMA functions based on the current
315 * hardware support.
316 *
317 * This is required before the descriptors are allocated.
318 */
319 if (ath_hal_hasedma(sc->sc_ah)) {
320 sc->sc_isedma = 1;
321 ath_recv_setup_edma(sc);
322 ath_xmit_setup_edma(sc);
323 } else {
324 ath_recv_setup_legacy(sc);
325 ath_xmit_setup_legacy(sc);
326 }
327
328 /*
329 * Check if the MAC has multi-rate retry support.
330 * We do this by trying to setup a fake extended
331 * descriptor. MAC's that don't have support will
332 * return false w/o doing anything. MAC's that do
333 * support it will return true w/o doing anything.
334 */
335 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
336
337 /*
338 * Check if the device has hardware counters for PHY
339 * errors. If so we need to enable the MIB interrupt
340 * so we can act on stat triggers.
341 */
342 if (ath_hal_hwphycounters(ah))
343 sc->sc_needmib = 1;
344
345 /*
346 * Get the hardware key cache size.
347 */
348 sc->sc_keymax = ath_hal_keycachesize(ah);
349 if (sc->sc_keymax > ATH_KEYMAX) {
350 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
351 ATH_KEYMAX, sc->sc_keymax);
352 sc->sc_keymax = ATH_KEYMAX;
353 }
354 /*
355 * Reset the key cache since some parts do not
356 * reset the contents on initial power up.
357 */
358 for (i = 0; i < sc->sc_keymax; i++)
359 ath_hal_keyreset(ah, i);
360
361 /*
362 * Collect the default channel list.
363 */
364 error = ath_getchannels(sc);
365 if (error != 0)
366 goto bad;
367
368 /*
369 * Setup rate tables for all potential media types.
370 */
371 ath_rate_setup(sc, IEEE80211_MODE_11A);
372 ath_rate_setup(sc, IEEE80211_MODE_11B);
373 ath_rate_setup(sc, IEEE80211_MODE_11G);
374 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
375 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
376 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
377 ath_rate_setup(sc, IEEE80211_MODE_11NA);
378 ath_rate_setup(sc, IEEE80211_MODE_11NG);
379 ath_rate_setup(sc, IEEE80211_MODE_HALF);
380 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
381
382 /* NB: setup here so ath_rate_update is happy */
383 ath_setcurmode(sc, IEEE80211_MODE_11A);
384
385 /*
386 * Allocate TX descriptors and populate the lists.
387 */
388 error = ath_desc_alloc(sc);
389 if (error != 0) {
390 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
391 error);
392 goto bad;
393 }
394 error = ath_txdma_setup(sc);
395 if (error != 0) {
396 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
397 error);
398 goto bad;
399 }
400
401 /*
402 * Allocate RX descriptors and populate the lists.
403 */
404 error = ath_rxdma_setup(sc);
405 if (error != 0) {
406 if_printf(ifp, "failed to allocate RX descriptors: %d\n",
407 error);
408 goto bad;
409 }
410
411 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
412 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
413
414 ATH_TXBUF_LOCK_INIT(sc);
415
416 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
417 taskqueue_thread_enqueue, &sc->sc_tq);
418 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
419 "%s taskq", ifp->if_xname);
420
421 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
422 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
423 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
424 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
425 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc);
426 TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc);
427 TASK_INIT(&sc->sc_txsndtask, 0, ath_start_task, sc);
428
429 /*
430 * Allocate hardware transmit queues: one queue for
431 * beacon frames and one data queue for each QoS
432 * priority. Note that the hal handles resetting
433 * these queues at the needed time.
434 *
435 * XXX PS-Poll
436 */
437 sc->sc_bhalq = ath_beaconq_setup(sc);
438 if (sc->sc_bhalq == (u_int) -1) {
439 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
440 error = EIO;
441 goto bad2;
442 }
443 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
444 if (sc->sc_cabq == NULL) {
445 if_printf(ifp, "unable to setup CAB xmit queue!\n");
446 error = EIO;
447 goto bad2;
448 }
449 /* NB: insure BK queue is the lowest priority h/w queue */
450 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
451 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
452 ieee80211_wme_acnames[WME_AC_BK]);
453 error = EIO;
454 goto bad2;
455 }
456 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
457 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
458 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
459 /*
460 * Not enough hardware tx queues to properly do WME;
461 * just punt and assign them all to the same h/w queue.
462 * We could do a better job of this if, for example,
463 * we allocate queues when we switch from station to
464 * AP mode.
465 */
466 if (sc->sc_ac2q[WME_AC_VI] != NULL)
467 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
468 if (sc->sc_ac2q[WME_AC_BE] != NULL)
469 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
470 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
471 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
472 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
473 }
474
475 /*
476 * Attach the TX completion function.
477 *
478 * The non-EDMA chips may have some special case optimisations;
479 * this method gives everyone a chance to attach cleanly.
480 */
481 sc->sc_tx.xmit_attach_comp_func(sc);
482
483 /*
484 * Setup rate control. Some rate control modules
485 * call back to change the anntena state so expose
486 * the necessary entry points.
487 * XXX maybe belongs in struct ath_ratectrl?
488 */
489 sc->sc_setdefantenna = ath_setdefantenna;
490 sc->sc_rc = ath_rate_attach(sc);
491 if (sc->sc_rc == NULL) {
492 error = EIO;
493 goto bad2;
494 }
495
496 /* Attach DFS module */
497 if (! ath_dfs_attach(sc)) {
498 device_printf(sc->sc_dev,
499 "%s: unable to attach DFS\n", __func__);
500 error = EIO;
501 goto bad2;
502 }
503
504 /* Start DFS processing tasklet */
505 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
506
507 /* Configure LED state */
508 sc->sc_blinking = 0;
509 sc->sc_ledstate = 1;
510 sc->sc_ledon = 0; /* low true */
511 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
512 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
513
514 /*
515 * Don't setup hardware-based blinking.
516 *
517 * Although some NICs may have this configured in the
518 * default reset register values, the user may wish
519 * to alter which pins have which function.
520 *
521 * The reference driver attaches the MAC network LED to GPIO1 and
522 * the MAC power LED to GPIO2. However, the DWA-552 cardbus
523 * NIC has these reversed.
524 */
525 sc->sc_hardled = (1 == 0);
526 sc->sc_led_net_pin = -1;
527 sc->sc_led_pwr_pin = -1;
528 /*
529 * Auto-enable soft led processing for IBM cards and for
530 * 5211 minipci cards. Users can also manually enable/disable
531 * support with a sysctl.
532 */
533 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
534 ath_led_config(sc);
535 ath_hal_setledstate(ah, HAL_LED_INIT);
536
537 ifp->if_softc = sc;
538 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
539 ifp->if_start = ath_start_queue;
540 ifp->if_ioctl = ath_ioctl;
541 ifp->if_init = ath_init;
542 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
543 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
544 IFQ_SET_READY(&ifp->if_snd);
545
546 ic->ic_ifp = ifp;
547 /* XXX not right but it's not used anywhere important */
548 ic->ic_phytype = IEEE80211_T_OFDM;
549 ic->ic_opmode = IEEE80211_M_STA;
550 ic->ic_caps =
551 IEEE80211_C_STA /* station mode */
552 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
553 | IEEE80211_C_HOSTAP /* hostap mode */
554 | IEEE80211_C_MONITOR /* monitor mode */
555 | IEEE80211_C_AHDEMO /* adhoc demo mode */
556 | IEEE80211_C_WDS /* 4-address traffic works */
557 | IEEE80211_C_MBSS /* mesh point link mode */
558 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
559 | IEEE80211_C_SHSLOT /* short slot time supported */
560 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
561#ifndef ATH_ENABLE_11N
562 | IEEE80211_C_BGSCAN /* capable of bg scanning */
563#endif
564 | IEEE80211_C_TXFRAG /* handle tx frags */
565#ifdef ATH_ENABLE_DFS
566 | IEEE80211_C_DFS /* Enable radar detection */
567#endif
568 ;
569 /*
570 * Query the hal to figure out h/w crypto support.
571 */
572 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
573 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
574 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
575 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
576 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
577 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
578 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
579 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
580 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
581 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
582 /*
583 * Check if h/w does the MIC and/or whether the
584 * separate key cache entries are required to
585 * handle both tx+rx MIC keys.
586 */
587 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
588 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
589 /*
590 * If the h/w supports storing tx+rx MIC keys
591 * in one cache slot automatically enable use.
592 */
593 if (ath_hal_hastkipsplit(ah) ||
594 !ath_hal_settkipsplit(ah, AH_FALSE))
595 sc->sc_splitmic = 1;
596 /*
597 * If the h/w can do TKIP MIC together with WME then
598 * we use it; otherwise we force the MIC to be done
599 * in software by the net80211 layer.
600 */
601 if (ath_hal_haswmetkipmic(ah))
602 sc->sc_wmetkipmic = 1;
603 }
604 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
605 /*
606 * Check for multicast key search support.
607 */
608 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
609 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
610 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
611 }
612 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
613 /*
614 * Mark key cache slots associated with global keys
615 * as in use. If we knew TKIP was not to be used we
616 * could leave the +32, +64, and +32+64 slots free.
617 */
618 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
619 setbit(sc->sc_keymap, i);
620 setbit(sc->sc_keymap, i+64);
621 if (sc->sc_splitmic) {
622 setbit(sc->sc_keymap, i+32);
623 setbit(sc->sc_keymap, i+32+64);
624 }
625 }
626 /*
627 * TPC support can be done either with a global cap or
628 * per-packet support. The latter is not available on
629 * all parts. We're a bit pedantic here as all parts
630 * support a global cap.
631 */
632 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
633 ic->ic_caps |= IEEE80211_C_TXPMGT;
634
635 /*
636 * Mark WME capability only if we have sufficient
637 * hardware queues to do proper priority scheduling.
638 */
639 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
640 ic->ic_caps |= IEEE80211_C_WME;
641 /*
642 * Check for misc other capabilities.
643 */
644 if (ath_hal_hasbursting(ah))
645 ic->ic_caps |= IEEE80211_C_BURST;
646 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
647 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
648 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
649 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
650 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
651 if (ath_hal_hasfastframes(ah))
652 ic->ic_caps |= IEEE80211_C_FF;
653 wmodes = ath_hal_getwirelessmodes(ah);
654 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
655 ic->ic_caps |= IEEE80211_C_TURBOP;
656#ifdef IEEE80211_SUPPORT_TDMA
657 if (ath_hal_macversion(ah) > 0x78) {
658 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
659 ic->ic_tdma_update = ath_tdma_update;
660 }
661#endif
662
663 /*
664 * TODO: enforce that at least this many frames are available
665 * in the txbuf list before allowing data frames (raw or
666 * otherwise) to be transmitted.
667 */
668 sc->sc_txq_data_minfree = 10;
669 /*
670 * Leave this as default to maintain legacy behaviour.
671 * Shortening the cabq/mcastq may end up causing some
672 * undesirable behaviour.
673 */
674 sc->sc_txq_mcastq_maxdepth = ath_txbuf;
675
676 /*
677 * Allow the TX and RX chainmasks to be overridden by
678 * environment variables and/or device.hints.
679 *
680 * This must be done early - before the hardware is
681 * calibrated or before the 802.11n stream calculation
682 * is done.
683 */
684 if (resource_int_value(device_get_name(sc->sc_dev),
685 device_get_unit(sc->sc_dev), "rx_chainmask",
686 &rx_chainmask) == 0) {
687 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
688 rx_chainmask);
689 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
690 }
691 if (resource_int_value(device_get_name(sc->sc_dev),
692 device_get_unit(sc->sc_dev), "tx_chainmask",
693 &tx_chainmask) == 0) {
694 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
695 tx_chainmask);
696 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
697 }
698
699 /*
700 * Disable MRR with protected frames by default.
701 * Only 802.11n series NICs can handle this.
702 */
703 sc->sc_mrrprot = 0; /* XXX should be a capability */
704
705#ifdef ATH_ENABLE_11N
706 /*
707 * Query HT capabilities
708 */
709 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
710 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
711 int rxs, txs;
712
713 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
714
715 sc->sc_mrrprot = 1; /* XXX should be a capability */
716
717 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
718 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
719 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
720 | IEEE80211_HTCAP_MAXAMSDU_3839
721 /* max A-MSDU length */
722 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
723 ;
724
725 /*
726 * Enable short-GI for HT20 only if the hardware
727 * advertises support.
728 * Notably, anything earlier than the AR9287 doesn't.
729 */
730 if ((ath_hal_getcapability(ah,
731 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
732 (wmodes & HAL_MODE_HT20)) {
733 device_printf(sc->sc_dev,
734 "[HT] enabling short-GI in 20MHz mode\n");
735 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
736 }
737
738 if (wmodes & HAL_MODE_HT40)
739 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
740 | IEEE80211_HTCAP_SHORTGI40;
741
742 /*
743 * TX/RX streams need to be taken into account when
744 * negotiating which MCS rates it'll receive and
745 * what MCS rates are available for TX.
746 */
747 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
748 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
749
750 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
751 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
752
753 ic->ic_txstream = txs;
754 ic->ic_rxstream = rxs;
755
756 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
757 &sc->sc_rts_aggr_limit);
758 if (sc->sc_rts_aggr_limit != (64 * 1024))
759 device_printf(sc->sc_dev,
760 "[HT] RTS aggregates limited to %d KiB\n",
761 sc->sc_rts_aggr_limit / 1024);
762
763 device_printf(sc->sc_dev,
764 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
765 }
766#endif
767
768 /*
769 * Initial aggregation settings.
770 */
771 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH;
772 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
773 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
774
775 /*
776 * Check if the hardware requires PCI register serialisation.
777 * Some of the Owl based MACs require this.
778 */
779 if (mp_ncpus > 1 &&
780 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
781 0, NULL) == HAL_OK) {
782 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
783 device_printf(sc->sc_dev,
784 "Enabling register serialisation\n");
785 }
786
787 /*
788 * Indicate we need the 802.11 header padded to a
789 * 32-bit boundary for 4-address and QoS frames.
790 */
791 ic->ic_flags |= IEEE80211_F_DATAPAD;
792
793 /*
794 * Query the hal about antenna support.
795 */
796 sc->sc_defant = ath_hal_getdefantenna(ah);
797
798 /*
799 * Not all chips have the VEOL support we want to
800 * use with IBSS beacons; check here for it.
801 */
802 sc->sc_hasveol = ath_hal_hasveol(ah);
803
804 /* get mac address from hardware */
805 ath_hal_getmac(ah, macaddr);
806 if (sc->sc_hasbmask)
807 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
808
809 /* NB: used to size node table key mapping array */
810 ic->ic_max_keyix = sc->sc_keymax;
811 /* call MI attach routine. */
812 ieee80211_ifattach(ic, macaddr);
813 ic->ic_setregdomain = ath_setregdomain;
814 ic->ic_getradiocaps = ath_getradiocaps;
815 sc->sc_opmode = HAL_M_STA;
816
817 /* override default methods */
818 ic->ic_newassoc = ath_newassoc;
819 ic->ic_updateslot = ath_updateslot;
820 ic->ic_wme.wme_update = ath_wme_update;
821 ic->ic_vap_create = ath_vap_create;
822 ic->ic_vap_delete = ath_vap_delete;
823 ic->ic_raw_xmit = ath_raw_xmit;
824 ic->ic_update_mcast = ath_update_mcast;
825 ic->ic_update_promisc = ath_update_promisc;
826 ic->ic_node_alloc = ath_node_alloc;
827 sc->sc_node_free = ic->ic_node_free;
828 ic->ic_node_free = ath_node_free;
829 sc->sc_node_cleanup = ic->ic_node_cleanup;
830 ic->ic_node_cleanup = ath_node_cleanup;
831 ic->ic_node_getsignal = ath_node_getsignal;
832 ic->ic_scan_start = ath_scan_start;
833 ic->ic_scan_end = ath_scan_end;
834 ic->ic_set_channel = ath_set_channel;
835#ifdef ATH_ENABLE_11N
836 /* 802.11n specific - but just override anyway */
837 sc->sc_addba_request = ic->ic_addba_request;
838 sc->sc_addba_response = ic->ic_addba_response;
839 sc->sc_addba_stop = ic->ic_addba_stop;
840 sc->sc_bar_response = ic->ic_bar_response;
841 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
842
843 ic->ic_addba_request = ath_addba_request;
844 ic->ic_addba_response = ath_addba_response;
845 ic->ic_addba_response_timeout = ath_addba_response_timeout;
846 ic->ic_addba_stop = ath_addba_stop;
847 ic->ic_bar_response = ath_bar_response;
848
849 ic->ic_update_chw = ath_update_chw;
850#endif /* ATH_ENABLE_11N */
851
852#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
853 /*
854 * There's one vendor bitmap entry in the RX radiotap
855 * header; make sure that's taken into account.
856 */
857 ieee80211_radiotap_attachv(ic,
858 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0,
859 ATH_TX_RADIOTAP_PRESENT,
860 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1,
861 ATH_RX_RADIOTAP_PRESENT);
862#else
863 /*
864 * No vendor bitmap/extensions are present.
865 */
866 ieee80211_radiotap_attach(ic,
867 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
868 ATH_TX_RADIOTAP_PRESENT,
869 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
870 ATH_RX_RADIOTAP_PRESENT);
871#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
872
873 /*
874 * Setup dynamic sysctl's now that country code and
875 * regdomain are available from the hal.
876 */
877 ath_sysctlattach(sc);
878 ath_sysctl_stats_attach(sc);
879 ath_sysctl_hal_attach(sc);
880
881 if (bootverbose)
882 ieee80211_announce(ic);
883 ath_announce(sc);
884 return 0;
885bad2:
886 ath_tx_cleanup(sc);
887 ath_desc_free(sc);
888 ath_txdma_teardown(sc);
889 ath_rxdma_teardown(sc);
890bad:
891 if (ah)
892 ath_hal_detach(ah);
893 if (ifp != NULL) {
894 CURVNET_SET(ifp->if_vnet);
895 if_free(ifp);
896 CURVNET_RESTORE();
897 }
898 sc->sc_invalid = 1;
899 return error;
900}
901
902int
903ath_detach(struct ath_softc *sc)
904{
905 struct ifnet *ifp = sc->sc_ifp;
906
907 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
908 __func__, ifp->if_flags);
909
910 /*
911 * NB: the order of these is important:
912 * o stop the chip so no more interrupts will fire
913 * o call the 802.11 layer before detaching the hal to
914 * insure callbacks into the driver to delete global
915 * key cache entries can be handled
916 * o free the taskqueue which drains any pending tasks
917 * o reclaim the tx queue data structures after calling
918 * the 802.11 layer as we'll get called back to reclaim
919 * node state and potentially want to use them
920 * o to cleanup the tx queues the hal is called, so detach
921 * it last
922 * Other than that, it's straightforward...
923 */
924 ath_stop(ifp);
925 ieee80211_ifdetach(ifp->if_l2com);
926 taskqueue_free(sc->sc_tq);
927#ifdef ATH_TX99_DIAG
928 if (sc->sc_tx99 != NULL)
929 sc->sc_tx99->detach(sc->sc_tx99);
930#endif
931 ath_rate_detach(sc->sc_rc);
932
933 ath_dfs_detach(sc);
934 ath_desc_free(sc);
935 ath_txdma_teardown(sc);
936 ath_rxdma_teardown(sc);
937 ath_tx_cleanup(sc);
938 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
939
940 CURVNET_SET(ifp->if_vnet);
941 if_free(ifp);
942 CURVNET_RESTORE();
943
944 return 0;
945}
946
947/*
948 * MAC address handling for multiple BSS on the same radio.
949 * The first vap uses the MAC address from the EEPROM. For
950 * subsequent vap's we set the U/L bit (bit 1) in the MAC
951 * address and use the next six bits as an index.
952 */
953static void
954assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
955{
956 int i;
957
958 if (clone && sc->sc_hasbmask) {
959 /* NB: we only do this if h/w supports multiple bssid */
960 for (i = 0; i < 8; i++)
961 if ((sc->sc_bssidmask & (1<<i)) == 0)
962 break;
963 if (i != 0)
964 mac[0] |= (i << 2)|0x2;
965 } else
966 i = 0;
967 sc->sc_bssidmask |= 1<<i;
968 sc->sc_hwbssidmask[0] &= ~mac[0];
969 if (i == 0)
970 sc->sc_nbssid0++;
971}
972
973static void
974reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
975{
976 int i = mac[0] >> 2;
977 uint8_t mask;
978
979 if (i != 0 || --sc->sc_nbssid0 == 0) {
980 sc->sc_bssidmask &= ~(1<<i);
981 /* recalculate bssid mask from remaining addresses */
982 mask = 0xff;
983 for (i = 1; i < 8; i++)
984 if (sc->sc_bssidmask & (1<<i))
985 mask &= ~((i<<2)|0x2);
986 sc->sc_hwbssidmask[0] |= mask;
987 }
988}
989
990/*
991 * Assign a beacon xmit slot. We try to space out
992 * assignments so when beacons are staggered the
993 * traffic coming out of the cab q has maximal time
994 * to go out before the next beacon is scheduled.
995 */
996static int
997assign_bslot(struct ath_softc *sc)
998{
999 u_int slot, free;
1000
1001 free = 0;
1002 for (slot = 0; slot < ATH_BCBUF; slot++)
1003 if (sc->sc_bslot[slot] == NULL) {
1004 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
1005 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
1006 return slot;
1007 free = slot;
1008 /* NB: keep looking for a double slot */
1009 }
1010 return free;
1011}
1012
1013static struct ieee80211vap *
1014ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1015 enum ieee80211_opmode opmode, int flags,
1016 const uint8_t bssid[IEEE80211_ADDR_LEN],
1017 const uint8_t mac0[IEEE80211_ADDR_LEN])
1018{
1019 struct ath_softc *sc = ic->ic_ifp->if_softc;
1020 struct ath_vap *avp;
1021 struct ieee80211vap *vap;
1022 uint8_t mac[IEEE80211_ADDR_LEN];
1023 int needbeacon, error;
1024 enum ieee80211_opmode ic_opmode;
1025
1026 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
1027 M_80211_VAP, M_WAITOK | M_ZERO);
1028 needbeacon = 0;
1029 IEEE80211_ADDR_COPY(mac, mac0);
1030
1031 ATH_LOCK(sc);
1032 ic_opmode = opmode; /* default to opmode of new vap */
1033 switch (opmode) {
1034 case IEEE80211_M_STA:
1035 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
1036 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
1037 goto bad;
1038 }
1039 if (sc->sc_nvaps) {
1040 /*
1041 * With multiple vaps we must fall back
1042 * to s/w beacon miss handling.
1043 */
1044 flags |= IEEE80211_CLONE_NOBEACONS;
1045 }
1046 if (flags & IEEE80211_CLONE_NOBEACONS) {
1047 /*
1048 * Station mode w/o beacons are implemented w/ AP mode.
1049 */
1050 ic_opmode = IEEE80211_M_HOSTAP;
1051 }
1052 break;
1053 case IEEE80211_M_IBSS:
1054 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
1055 device_printf(sc->sc_dev,
1056 "only 1 ibss vap supported\n");
1057 goto bad;
1058 }
1059 needbeacon = 1;
1060 break;
1061 case IEEE80211_M_AHDEMO:
1062#ifdef IEEE80211_SUPPORT_TDMA
1063 if (flags & IEEE80211_CLONE_TDMA) {
1064 if (sc->sc_nvaps != 0) {
1065 device_printf(sc->sc_dev,
1066 "only 1 tdma vap supported\n");
1067 goto bad;
1068 }
1069 needbeacon = 1;
1070 flags |= IEEE80211_CLONE_NOBEACONS;
1071 }
1072 /* fall thru... */
1073#endif
1074 case IEEE80211_M_MONITOR:
1075 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
1076 /*
1077 * Adopt existing mode. Adding a monitor or ahdemo
1078 * vap to an existing configuration is of dubious
1079 * value but should be ok.
1080 */
1081 /* XXX not right for monitor mode */
1082 ic_opmode = ic->ic_opmode;
1083 }
1084 break;
1085 case IEEE80211_M_HOSTAP:
1086 case IEEE80211_M_MBSS:
1087 needbeacon = 1;
1088 break;
1089 case IEEE80211_M_WDS:
1090 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
1091 device_printf(sc->sc_dev,
1092 "wds not supported in sta mode\n");
1093 goto bad;
1094 }
1095 /*
1096 * Silently remove any request for a unique
1097 * bssid; WDS vap's always share the local
1098 * mac address.
1099 */
1100 flags &= ~IEEE80211_CLONE_BSSID;
1101 if (sc->sc_nvaps == 0)
1102 ic_opmode = IEEE80211_M_HOSTAP;
1103 else
1104 ic_opmode = ic->ic_opmode;
1105 break;
1106 default:
1107 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
1108 goto bad;
1109 }
1110 /*
1111 * Check that a beacon buffer is available; the code below assumes it.
1112 */
1113 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
1114 device_printf(sc->sc_dev, "no beacon buffer available\n");
1115 goto bad;
1116 }
1117
1118 /* STA, AHDEMO? */
1119 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
1120 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1121 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1122 }
1123
1124 vap = &avp->av_vap;
1125 /* XXX can't hold mutex across if_alloc */
1126 ATH_UNLOCK(sc);
1127 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1128 bssid, mac);
1129 ATH_LOCK(sc);
1130 if (error != 0) {
1131 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1132 __func__, error);
1133 goto bad2;
1134 }
1135
1136 /* h/w crypto support */
1137 vap->iv_key_alloc = ath_key_alloc;
1138 vap->iv_key_delete = ath_key_delete;
1139 vap->iv_key_set = ath_key_set;
1140 vap->iv_key_update_begin = ath_key_update_begin;
1141 vap->iv_key_update_end = ath_key_update_end;
1142
1143 /* override various methods */
1144 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1145 vap->iv_recv_mgmt = ath_recv_mgmt;
1146 vap->iv_reset = ath_reset_vap;
1147 vap->iv_update_beacon = ath_beacon_update;
1148 avp->av_newstate = vap->iv_newstate;
1149 vap->iv_newstate = ath_newstate;
1150 avp->av_bmiss = vap->iv_bmiss;
1151 vap->iv_bmiss = ath_bmiss_vap;
1152
1153 avp->av_node_ps = vap->iv_node_ps;
1154 vap->iv_node_ps = ath_node_powersave;
1155
1156 avp->av_set_tim = vap->iv_set_tim;
1157 vap->iv_set_tim = ath_node_set_tim;
1158
1159 /* Set default parameters */
1160
1161 /*
1162 * Anything earlier than some AR9300 series MACs don't
1163 * support a smaller MPDU density.
1164 */
1165 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1166 /*
1167 * All NICs can handle the maximum size, however
1168 * AR5416 based MACs can only TX aggregates w/ RTS
1169 * protection when the total aggregate size is <= 8k.
1170 * However, for now that's enforced by the TX path.
1171 */
1172 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1173
1174 avp->av_bslot = -1;
1175 if (needbeacon) {
1176 /*
1177 * Allocate beacon state and setup the q for buffered
1178 * multicast frames. We know a beacon buffer is
1179 * available because we checked above.
1180 */
1181 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1182 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1183 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1184 /*
1185 * Assign the vap to a beacon xmit slot. As above
1186 * this cannot fail to find a free one.
1187 */
1188 avp->av_bslot = assign_bslot(sc);
1189 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1190 ("beacon slot %u not empty", avp->av_bslot));
1191 sc->sc_bslot[avp->av_bslot] = vap;
1192 sc->sc_nbcnvaps++;
1193 }
1194 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1195 /*
1196 * Multple vaps are to transmit beacons and we
1197 * have h/w support for TSF adjusting; enable
1198 * use of staggered beacons.
1199 */
1200 sc->sc_stagbeacons = 1;
1201 }
1202 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1203 }
1204
1205 ic->ic_opmode = ic_opmode;
1206 if (opmode != IEEE80211_M_WDS) {
1207 sc->sc_nvaps++;
1208 if (opmode == IEEE80211_M_STA)
1209 sc->sc_nstavaps++;
1210 if (opmode == IEEE80211_M_MBSS)
1211 sc->sc_nmeshvaps++;
1212 }
1213 switch (ic_opmode) {
1214 case IEEE80211_M_IBSS:
1215 sc->sc_opmode = HAL_M_IBSS;
1216 break;
1217 case IEEE80211_M_STA:
1218 sc->sc_opmode = HAL_M_STA;
1219 break;
1220 case IEEE80211_M_AHDEMO:
1221#ifdef IEEE80211_SUPPORT_TDMA
1222 if (vap->iv_caps & IEEE80211_C_TDMA) {
1223 sc->sc_tdma = 1;
1224 /* NB: disable tsf adjust */
1225 sc->sc_stagbeacons = 0;
1226 }
1227 /*
1228 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1229 * just ap mode.
1230 */
1231 /* fall thru... */
1232#endif
1233 case IEEE80211_M_HOSTAP:
1234 case IEEE80211_M_MBSS:
1235 sc->sc_opmode = HAL_M_HOSTAP;
1236 break;
1237 case IEEE80211_M_MONITOR:
1238 sc->sc_opmode = HAL_M_MONITOR;
1239 break;
1240 default:
1241 /* XXX should not happen */
1242 break;
1243 }
1244 if (sc->sc_hastsfadd) {
1245 /*
1246 * Configure whether or not TSF adjust should be done.
1247 */
1248 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1249 }
1250 if (flags & IEEE80211_CLONE_NOBEACONS) {
1251 /*
1252 * Enable s/w beacon miss handling.
1253 */
1254 sc->sc_swbmiss = 1;
1255 }
1256 ATH_UNLOCK(sc);
1257
1258 /* complete setup */
1259 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1260 return vap;
1261bad2:
1262 reclaim_address(sc, mac);
1263 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1264bad:
1265 free(avp, M_80211_VAP);
1266 ATH_UNLOCK(sc);
1267 return NULL;
1268}
1269
1270static void
1271ath_vap_delete(struct ieee80211vap *vap)
1272{
1273 struct ieee80211com *ic = vap->iv_ic;
1274 struct ifnet *ifp = ic->ic_ifp;
1275 struct ath_softc *sc = ifp->if_softc;
1276 struct ath_hal *ah = sc->sc_ah;
1277 struct ath_vap *avp = ATH_VAP(vap);
1278
1279 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1280 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1281 /*
1282 * Quiesce the hardware while we remove the vap. In
1283 * particular we need to reclaim all references to
1284 * the vap state by any frames pending on the tx queues.
1285 */
1286 ath_hal_intrset(ah, 0); /* disable interrupts */
1287 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1288 /* XXX Do all frames from all vaps/nodes need draining here? */
1289 ath_stoprecv(sc, 1); /* stop recv side */
1290 }
1291
1292 ieee80211_vap_detach(vap);
1293
1294 /*
1295 * XXX Danger Will Robinson! Danger!
1296 *
1297 * Because ieee80211_vap_detach() can queue a frame (the station
1298 * diassociate message?) after we've drained the TXQ and
1299 * flushed the software TXQ, we will end up with a frame queued
1300 * to a node whose vap is about to be freed.
1301 *
1302 * To work around this, flush the hardware/software again.
1303 * This may be racy - the ath task may be running and the packet
1304 * may be being scheduled between sw->hw txq. Tsk.
1305 *
1306 * TODO: figure out why a new node gets allocated somewhere around
1307 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1308 * call!)
1309 */
1310
1311 ath_draintxq(sc, ATH_RESET_DEFAULT);
1312
1313 ATH_LOCK(sc);
1314 /*
1315 * Reclaim beacon state. Note this must be done before
1316 * the vap instance is reclaimed as we may have a reference
1317 * to it in the buffer for the beacon frame.
1318 */
1319 if (avp->av_bcbuf != NULL) {
1320 if (avp->av_bslot != -1) {
1321 sc->sc_bslot[avp->av_bslot] = NULL;
1322 sc->sc_nbcnvaps--;
1323 }
1324 ath_beacon_return(sc, avp->av_bcbuf);
1325 avp->av_bcbuf = NULL;
1326 if (sc->sc_nbcnvaps == 0) {
1327 sc->sc_stagbeacons = 0;
1328 if (sc->sc_hastsfadd)
1329 ath_hal_settsfadjust(sc->sc_ah, 0);
1330 }
1331 /*
1332 * Reclaim any pending mcast frames for the vap.
1333 */
1334 ath_tx_draintxq(sc, &avp->av_mcastq);
1335 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1336 }
1337 /*
1338 * Update bookkeeping.
1339 */
1340 if (vap->iv_opmode == IEEE80211_M_STA) {
1341 sc->sc_nstavaps--;
1342 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1343 sc->sc_swbmiss = 0;
1344 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1345 vap->iv_opmode == IEEE80211_M_MBSS) {
1346 reclaim_address(sc, vap->iv_myaddr);
1347 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1348 if (vap->iv_opmode == IEEE80211_M_MBSS)
1349 sc->sc_nmeshvaps--;
1350 }
1351 if (vap->iv_opmode != IEEE80211_M_WDS)
1352 sc->sc_nvaps--;
1353#ifdef IEEE80211_SUPPORT_TDMA
1354 /* TDMA operation ceases when the last vap is destroyed */
1355 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1356 sc->sc_tdma = 0;
1357 sc->sc_swbmiss = 0;
1358 }
1359#endif
1360 free(avp, M_80211_VAP);
1361
1362 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1363 /*
1364 * Restart rx+tx machines if still running (RUNNING will
1365 * be reset if we just destroyed the last vap).
1366 */
1367 if (ath_startrecv(sc) != 0)
1368 if_printf(ifp, "%s: unable to restart recv logic\n",
1369 __func__);
1370 if (sc->sc_beacons) { /* restart beacons */
1371#ifdef IEEE80211_SUPPORT_TDMA
1372 if (sc->sc_tdma)
1373 ath_tdma_config(sc, NULL);
1374 else
1375#endif
1376 ath_beacon_config(sc, NULL);
1377 }
1378 ath_hal_intrset(ah, sc->sc_imask);
1379 }
1380 ATH_UNLOCK(sc);
1381}
1382
1383void
1384ath_suspend(struct ath_softc *sc)
1385{
1386 struct ifnet *ifp = sc->sc_ifp;
1387 struct ieee80211com *ic = ifp->if_l2com;
1388
1389 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1390 __func__, ifp->if_flags);
1391
1392 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1393
1394 ieee80211_suspend_all(ic);
1395 /*
1396 * NB: don't worry about putting the chip in low power
1397 * mode; pci will power off our socket on suspend and
1398 * CardBus detaches the device.
1399 */
1400
1401 /*
1402 * XXX ensure none of the taskqueues are running
1403 * XXX ensure sc_invalid is 1
1404 * XXX ensure the calibration callout is disabled
1405 */
1406
1407 /* Disable the PCIe PHY, complete with workarounds */
1408 ath_hal_enablepcie(sc->sc_ah, 1, 1);
1409}
1410
1411/*
1412 * Reset the key cache since some parts do not reset the
1413 * contents on resume. First we clear all entries, then
1414 * re-load keys that the 802.11 layer assumes are setup
1415 * in h/w.
1416 */
1417static void
1418ath_reset_keycache(struct ath_softc *sc)
1419{
1420 struct ifnet *ifp = sc->sc_ifp;
1421 struct ieee80211com *ic = ifp->if_l2com;
1422 struct ath_hal *ah = sc->sc_ah;
1423 int i;
1424
1425 for (i = 0; i < sc->sc_keymax; i++)
1426 ath_hal_keyreset(ah, i);
1427 ieee80211_crypto_reload_keys(ic);
1428}
1429
1430void
1431ath_resume(struct ath_softc *sc)
1432{
1433 struct ifnet *ifp = sc->sc_ifp;
1434 struct ieee80211com *ic = ifp->if_l2com;
1435 struct ath_hal *ah = sc->sc_ah;
1436 HAL_STATUS status;
1437
1438 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1439 __func__, ifp->if_flags);
1440
1441 /* Re-enable PCIe, re-enable the PCIe bus */
1442 ath_hal_enablepcie(ah, 0, 0);
1443
1444 /*
1445 * Must reset the chip before we reload the
1446 * keycache as we were powered down on suspend.
1447 */
1448 ath_hal_reset(ah, sc->sc_opmode,
1449 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1450 AH_FALSE, &status);
1451 ath_reset_keycache(sc);
1452
1453 /* Let DFS at it in case it's a DFS channel */
1454 ath_dfs_radar_enable(sc, ic->ic_curchan);
1455
1456 /* Restore the LED configuration */
1457 ath_led_config(sc);
1458 ath_hal_setledstate(ah, HAL_LED_INIT);
1459
1460 if (sc->sc_resume_up)
1461 ieee80211_resume_all(ic);
1462
1463 /* XXX beacons ? */
1464}
1465
1466void
1467ath_shutdown(struct ath_softc *sc)
1468{
1469 struct ifnet *ifp = sc->sc_ifp;
1470
1471 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1472 __func__, ifp->if_flags);
1473
1474 ath_stop(ifp);
1475 /* NB: no point powering down chip as we're about to reboot */
1476}
1477
1478/*
1479 * Interrupt handler. Most of the actual processing is deferred.
1480 */
1481void
1482ath_intr(void *arg)
1483{
1484 struct ath_softc *sc = arg;
1485 struct ifnet *ifp = sc->sc_ifp;
1486 struct ath_hal *ah = sc->sc_ah;
1487 HAL_INT status = 0;
1488 uint32_t txqs;
1489
1490 /*
1491 * If we're inside a reset path, just print a warning and
1492 * clear the ISR. The reset routine will finish it for us.
1493 */
1494 ATH_PCU_LOCK(sc);
1495 if (sc->sc_inreset_cnt) {
1496 HAL_INT status;
1497 ath_hal_getisr(ah, &status); /* clear ISR */
1498 ath_hal_intrset(ah, 0); /* disable further intr's */
1499 DPRINTF(sc, ATH_DEBUG_ANY,
1500 "%s: in reset, ignoring: status=0x%x\n",
1501 __func__, status);
1502 ATH_PCU_UNLOCK(sc);
1503 return;
1504 }
1505
1506 if (sc->sc_invalid) {
1507 /*
1508 * The hardware is not ready/present, don't touch anything.
1509 * Note this can happen early on if the IRQ is shared.
1510 */
1511 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1512 ATH_PCU_UNLOCK(sc);
1513 return;
1514 }
1515 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
1516 ATH_PCU_UNLOCK(sc);
1517 return;
1518 }
1519
1520 if ((ifp->if_flags & IFF_UP) == 0 ||
1521 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1522 HAL_INT status;
1523
1524 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1525 __func__, ifp->if_flags);
1526 ath_hal_getisr(ah, &status); /* clear ISR */
1527 ath_hal_intrset(ah, 0); /* disable further intr's */
1528 ATH_PCU_UNLOCK(sc);
1529 return;
1530 }
1531
1532 /*
1533 * Figure out the reason(s) for the interrupt. Note
1534 * that the hal returns a pseudo-ISR that may include
1535 * bits we haven't explicitly enabled so we mask the
1536 * value to insure we only process bits we requested.
1537 */
1538 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1539 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1540 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status);
1541#ifdef ATH_KTR_INTR_DEBUG
1542 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5,
1543 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
1544 ah->ah_intrstate[0],
1545 ah->ah_intrstate[1],
1546 ah->ah_intrstate[2],
1547 ah->ah_intrstate[3],
1548 ah->ah_intrstate[6]);
1549#endif
1550
1551 /* Squirrel away SYNC interrupt debugging */
1552 if (ah->ah_syncstate != 0) {
1553 int i;
1554 for (i = 0; i < 32; i++)
1555 if (ah->ah_syncstate & (i << i))
1556 sc->sc_intr_stats.sync_intr[i]++;
1557 }
1558
1559 status &= sc->sc_imask; /* discard unasked for bits */
1560
1561 /* Short-circuit un-handled interrupts */
1562 if (status == 0x0) {
1563 ATH_PCU_UNLOCK(sc);
1564 return;
1565 }
1566
1567 /*
1568 * Take a note that we're inside the interrupt handler, so
1569 * the reset routines know to wait.
1570 */
1571 sc->sc_intr_cnt++;
1572 ATH_PCU_UNLOCK(sc);
1573
1574 /*
1575 * Handle the interrupt. We won't run concurrent with the reset
1576 * or channel change routines as they'll wait for sc_intr_cnt
1577 * to be 0 before continuing.
1578 */
1579 if (status & HAL_INT_FATAL) {
1580 sc->sc_stats.ast_hardware++;
1581 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1582 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
1583 } else {
1584 if (status & HAL_INT_SWBA) {
1585 /*
1586 * Software beacon alert--time to send a beacon.
1587 * Handle beacon transmission directly; deferring
1588 * this is too slow to meet timing constraints
1589 * under load.
1590 */
1591#ifdef IEEE80211_SUPPORT_TDMA
1592 if (sc->sc_tdma) {
1593 if (sc->sc_tdmaswba == 0) {
1594 struct ieee80211com *ic = ifp->if_l2com;
1595 struct ieee80211vap *vap =
1596 TAILQ_FIRST(&ic->ic_vaps);
1597 ath_tdma_beacon_send(sc, vap);
1598 sc->sc_tdmaswba =
1599 vap->iv_tdma->tdma_bintval;
1600 } else
1601 sc->sc_tdmaswba--;
1602 } else
1603#endif
1604 {
1605 ath_beacon_proc(sc, 0);
1606#ifdef IEEE80211_SUPPORT_SUPERG
1607 /*
1608 * Schedule the rx taskq in case there's no
1609 * traffic so any frames held on the staging
1610 * queue are aged and potentially flushed.
1611 */
1612 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1613#endif
1614 }
1615 }
1616 if (status & HAL_INT_RXEOL) {
1617 int imask;
1618 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL");
1619 ATH_PCU_LOCK(sc);
1620 /*
1621 * NB: the hardware should re-read the link when
1622 * RXE bit is written, but it doesn't work at
1623 * least on older hardware revs.
1624 */
1625 sc->sc_stats.ast_rxeol++;
1626 /*
1627 * Disable RXEOL/RXORN - prevent an interrupt
1628 * storm until the PCU logic can be reset.
1629 * In case the interface is reset some other
1630 * way before "sc_kickpcu" is called, don't
1631 * modify sc_imask - that way if it is reset
1632 * by a call to ath_reset() somehow, the
1633 * interrupt mask will be correctly reprogrammed.
1634 */
1635 imask = sc->sc_imask;
1636 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
1637 ath_hal_intrset(ah, imask);
1638 /*
1639 * Only blank sc_rxlink if we've not yet kicked
1640 * the PCU.
1641 *
1642 * This isn't entirely correct - the correct solution
1643 * would be to have a PCU lock and engage that for
1644 * the duration of the PCU fiddling; which would include
1645 * running the RX process. Otherwise we could end up
1646 * messing up the RX descriptor chain and making the
1647 * RX desc list much shorter.
1648 */
1649 if (! sc->sc_kickpcu)
1650 sc->sc_rxlink = NULL;
1651 sc->sc_kickpcu = 1;
1652 /*
1653 * Enqueue an RX proc, to handled whatever
1654 * is in the RX queue.
1655 * This will then kick the PCU.
1656 */
1657 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1658 ATH_PCU_UNLOCK(sc);
1659 }
1660 if (status & HAL_INT_TXURN) {
1661 sc->sc_stats.ast_txurn++;
1662 /* bump tx trigger level */
1663 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1664 }
1665 /*
1666 * Handle both the legacy and RX EDMA interrupt bits.
1667 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC.
1668 */
1669 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) {
1670 sc->sc_stats.ast_rx_intr++;
1671 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1672 }
1673 if (status & HAL_INT_TX) {
1674 sc->sc_stats.ast_tx_intr++;
1675 /*
1676 * Grab all the currently set bits in the HAL txq bitmap
1677 * and blank them. This is the only place we should be
1678 * doing this.
1679 */
1680 if (! sc->sc_isedma) {
1681 ATH_PCU_LOCK(sc);
1682 txqs = 0xffffffff;
1683 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
1684 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3,
1685 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x",
1686 txqs,
1687 sc->sc_txq_active,
1688 sc->sc_txq_active | txqs);
1689 sc->sc_txq_active |= txqs;
1690 ATH_PCU_UNLOCK(sc);
1691 }
1692 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1693 }
1694 if (status & HAL_INT_BMISS) {
1695 sc->sc_stats.ast_bmiss++;
1696 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1697 }
1698 if (status & HAL_INT_GTT)
1699 sc->sc_stats.ast_tx_timeout++;
1700 if (status & HAL_INT_CST)
1701 sc->sc_stats.ast_tx_cst++;
1702 if (status & HAL_INT_MIB) {
1703 sc->sc_stats.ast_mib++;
1704 ATH_PCU_LOCK(sc);
1705 /*
1706 * Disable interrupts until we service the MIB
1707 * interrupt; otherwise it will continue to fire.
1708 */
1709 ath_hal_intrset(ah, 0);
1710 /*
1711 * Let the hal handle the event. We assume it will
1712 * clear whatever condition caused the interrupt.
1713 */
1714 ath_hal_mibevent(ah, &sc->sc_halstats);
1715 /*
1716 * Don't reset the interrupt if we've just
1717 * kicked the PCU, or we may get a nested
1718 * RXEOL before the rxproc has had a chance
1719 * to run.
1720 */
1721 if (sc->sc_kickpcu == 0)
1722 ath_hal_intrset(ah, sc->sc_imask);
1723 ATH_PCU_UNLOCK(sc);
1724 }
1725 if (status & HAL_INT_RXORN) {
1726 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1727 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
1728 sc->sc_stats.ast_rxorn++;
1729 }
1730 }
1731 ATH_PCU_LOCK(sc);
1732 sc->sc_intr_cnt--;
1733 ATH_PCU_UNLOCK(sc);
1734}
1735
1736static void
1737ath_fatal_proc(void *arg, int pending)
1738{
1739 struct ath_softc *sc = arg;
1740 struct ifnet *ifp = sc->sc_ifp;
1741 u_int32_t *state;
1742 u_int32_t len;
1743 void *sp;
1744
1745 if_printf(ifp, "hardware error; resetting\n");
1746 /*
1747 * Fatal errors are unrecoverable. Typically these
1748 * are caused by DMA errors. Collect h/w state from
1749 * the hal so we can diagnose what's going on.
1750 */
1751 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1752 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1753 state = sp;
1754 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1755 state[0], state[1] , state[2], state[3],
1756 state[4], state[5]);
1757 }
1758 ath_reset(ifp, ATH_RESET_NOLOSS);
1759}
1760
1761static void
1762ath_bmiss_vap(struct ieee80211vap *vap)
1763{
1764 /*
1765 * Workaround phantom bmiss interrupts by sanity-checking
1766 * the time of our last rx'd frame. If it is within the
1767 * beacon miss interval then ignore the interrupt. If it's
1768 * truly a bmiss we'll get another interrupt soon and that'll
1769 * be dispatched up for processing. Note this applies only
1770 * for h/w beacon miss events.
1771 */
1772 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1773 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1774 struct ath_softc *sc = ifp->if_softc;
1775 u_int64_t lastrx = sc->sc_lastrx;
1776 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1777 /* XXX should take a locked ref to iv_bss */
1778 u_int bmisstimeout =
1779 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1780
1781 DPRINTF(sc, ATH_DEBUG_BEACON,
1782 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1783 __func__, (unsigned long long) tsf,
1784 (unsigned long long)(tsf - lastrx),
1785 (unsigned long long) lastrx, bmisstimeout);
1786
1787 if (tsf - lastrx <= bmisstimeout) {
1788 sc->sc_stats.ast_bmiss_phantom++;
1789 return;
1790 }
1791 }
1792 ATH_VAP(vap)->av_bmiss(vap);
1793}
1794
1795static int
1796ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1797{
1798 uint32_t rsize;
1799 void *sp;
1800
1801 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
1802 return 0;
1803 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1804 *hangs = *(uint32_t *)sp;
1805 return 1;
1806}
1807
1808static void
1809ath_bmiss_proc(void *arg, int pending)
1810{
1811 struct ath_softc *sc = arg;
1812 struct ifnet *ifp = sc->sc_ifp;
1813 uint32_t hangs;
1814
1815 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1816
1817 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1818 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
1819 ath_reset(ifp, ATH_RESET_NOLOSS);
1820 } else
1821 ieee80211_beacon_miss(ifp->if_l2com);
1822}
1823
1824/*
1825 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1826 * calcs together with WME. If necessary disable the crypto
1827 * hardware and mark the 802.11 state so keys will be setup
1828 * with the MIC work done in software.
1829 */
1830static void
1831ath_settkipmic(struct ath_softc *sc)
1832{
1833 struct ifnet *ifp = sc->sc_ifp;
1834 struct ieee80211com *ic = ifp->if_l2com;
1835
1836 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1837 if (ic->ic_flags & IEEE80211_F_WME) {
1838 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1839 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1840 } else {
1841 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1842 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1843 }
1844 }
1845}
1846
1847static void
1848ath_init(void *arg)
1849{
1850 struct ath_softc *sc = (struct ath_softc *) arg;
1851 struct ifnet *ifp = sc->sc_ifp;
1852 struct ieee80211com *ic = ifp->if_l2com;
1853 struct ath_hal *ah = sc->sc_ah;
1854 HAL_STATUS status;
1855
1856 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1857 __func__, ifp->if_flags);
1858
1859 ATH_LOCK(sc);
1860 /*
1861 * Stop anything previously setup. This is safe
1862 * whether this is the first time through or not.
1863 */
1864 ath_stop_locked(ifp);
1865
1866 /*
1867 * The basic interface to setting the hardware in a good
1868 * state is ``reset''. On return the hardware is known to
1869 * be powered up and with interrupts disabled. This must
1870 * be followed by initialization of the appropriate bits
1871 * and then setup of the interrupt mask.
1872 */
1873 ath_settkipmic(sc);
1874 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1875 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1876 status);
1877 ATH_UNLOCK(sc);
1878 return;
1879 }
1880 ath_chan_change(sc, ic->ic_curchan);
1881
1882 /* Let DFS at it in case it's a DFS channel */
1883 ath_dfs_radar_enable(sc, ic->ic_curchan);
1884
1885 /*
1886 * Likewise this is set during reset so update
1887 * state cached in the driver.
1888 */
1889 sc->sc_diversity = ath_hal_getdiversity(ah);
1890 sc->sc_lastlongcal = 0;
1891 sc->sc_resetcal = 1;
1892 sc->sc_lastcalreset = 0;
1893 sc->sc_lastani = 0;
1894 sc->sc_lastshortcal = 0;
1895 sc->sc_doresetcal = AH_FALSE;
1896 /*
1897 * Beacon timers were cleared here; give ath_newstate()
1898 * a hint that the beacon timers should be poked when
1899 * things transition to the RUN state.
1900 */
1901 sc->sc_beacons = 0;
1902
1903 /*
1904 * Setup the hardware after reset: the key cache
1905 * is filled as needed and the receive engine is
1906 * set going. Frame transmit is handled entirely
1907 * in the frame output path; there's nothing to do
1908 * here except setup the interrupt mask.
1909 */
1910 if (ath_startrecv(sc) != 0) {
1911 if_printf(ifp, "unable to start recv logic\n");
1912 ATH_UNLOCK(sc);
1913 return;
1914 }
1915
1916 /*
1917 * Enable interrupts.
1918 */
1919 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1920 | HAL_INT_RXEOL | HAL_INT_RXORN
1921 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1922
1923 /*
1924 * Enable RX EDMA bits. Note these overlap with
1925 * HAL_INT_RX and HAL_INT_RXDESC respectively.
1926 */
1927 if (sc->sc_isedma)
1928 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP);
1929
1930 /*
1931 * Enable MIB interrupts when there are hardware phy counters.
1932 * Note we only do this (at the moment) for station mode.
1933 */
1934 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1935 sc->sc_imask |= HAL_INT_MIB;
1936
1937 /* Enable global TX timeout and carrier sense timeout if available */
1938 if (ath_hal_gtxto_supported(ah))
1939 sc->sc_imask |= HAL_INT_GTT;
1940
1941 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
1942 __func__, sc->sc_imask);
1943
1944 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1945 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1946 ath_hal_intrset(ah, sc->sc_imask);
1947
1948 ATH_UNLOCK(sc);
1949
1950#ifdef ATH_TX99_DIAG
1951 if (sc->sc_tx99 != NULL)
1952 sc->sc_tx99->start(sc->sc_tx99);
1953 else
1954#endif
1955 ieee80211_start_all(ic); /* start all vap's */
1956}
1957
1958static void
1959ath_stop_locked(struct ifnet *ifp)
1960{
1961 struct ath_softc *sc = ifp->if_softc;
1962 struct ath_hal *ah = sc->sc_ah;
1963
1964 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1965 __func__, sc->sc_invalid, ifp->if_flags);
1966
1967 ATH_LOCK_ASSERT(sc);
1968 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1969 /*
1970 * Shutdown the hardware and driver:
1971 * reset 802.11 state machine
1972 * turn off timers
1973 * disable interrupts
1974 * turn off the radio
1975 * clear transmit machinery
1976 * clear receive machinery
1977 * drain and release tx queues
1978 * reclaim beacon resources
1979 * power down hardware
1980 *
1981 * Note that some of this work is not possible if the
1982 * hardware is gone (invalid).
1983 */
1984#ifdef ATH_TX99_DIAG
1985 if (sc->sc_tx99 != NULL)
1986 sc->sc_tx99->stop(sc->sc_tx99);
1987#endif
1988 callout_stop(&sc->sc_wd_ch);
1989 sc->sc_wd_timer = 0;
1990 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1991 if (!sc->sc_invalid) {
1992 if (sc->sc_softled) {
1993 callout_stop(&sc->sc_ledtimer);
1994 ath_hal_gpioset(ah, sc->sc_ledpin,
1995 !sc->sc_ledon);
1996 sc->sc_blinking = 0;
1997 }
1998 ath_hal_intrset(ah, 0);
1999 }
2000 ath_draintxq(sc, ATH_RESET_DEFAULT);
2001 if (!sc->sc_invalid) {
2002 ath_stoprecv(sc, 1);
2003 ath_hal_phydisable(ah);
2004 } else
2005 sc->sc_rxlink = NULL;
2006 ath_beacon_free(sc); /* XXX not needed */
2007 }
2008}
2009
2010#define MAX_TXRX_ITERATIONS 1000
2011static void
2012ath_txrx_stop_locked(struct ath_softc *sc)
2013{
2014 int i = MAX_TXRX_ITERATIONS;
2015
2016 ATH_UNLOCK_ASSERT(sc);
2017 ATH_PCU_LOCK_ASSERT(sc);
2018
2019 /*
2020 * Sleep until all the pending operations have completed.
2021 *
2022 * The caller must ensure that reset has been incremented
2023 * or the pending operations may continue being queued.
2024 */
2025 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
2026 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
2027 if (i <= 0)
2028 break;
2029 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1);
2030 i--;
2031 }
2032
2033 if (i <= 0)
2034 device_printf(sc->sc_dev,
2035 "%s: didn't finish after %d iterations\n",
2036 __func__, MAX_TXRX_ITERATIONS);
2037}
2038#undef MAX_TXRX_ITERATIONS
2039
2040#if 0
2041static void
2042ath_txrx_stop(struct ath_softc *sc)
2043{
2044 ATH_UNLOCK_ASSERT(sc);
2045 ATH_PCU_UNLOCK_ASSERT(sc);
2046
2047 ATH_PCU_LOCK(sc);
2048 ath_txrx_stop_locked(sc);
2049 ATH_PCU_UNLOCK(sc);
2050}
2051#endif
2052
2053static void
2054ath_txrx_start(struct ath_softc *sc)
2055{
2056
2057 taskqueue_unblock(sc->sc_tq);
2058}
2059
2060/*
2061 * Grab the reset lock, and wait around until noone else
2062 * is trying to do anything with it.
2063 *
2064 * This is totally horrible but we can't hold this lock for
2065 * long enough to do TX/RX or we end up with net80211/ip stack
2066 * LORs and eventual deadlock.
2067 *
2068 * "dowait" signals whether to spin, waiting for the reset
2069 * lock count to reach 0. This should (for now) only be used
2070 * during the reset path, as the rest of the code may not
2071 * be locking-reentrant enough to behave correctly.
2072 *
2073 * Another, cleaner way should be found to serialise all of
2074 * these operations.
2075 */
2076#define MAX_RESET_ITERATIONS 10
2077static int
2078ath_reset_grablock(struct ath_softc *sc, int dowait)
2079{
2080 int w = 0;
2081 int i = MAX_RESET_ITERATIONS;
2082
2083 ATH_PCU_LOCK_ASSERT(sc);
2084 do {
2085 if (sc->sc_inreset_cnt == 0) {
2086 w = 1;
2087 break;
2088 }
2089 if (dowait == 0) {
2090 w = 0;
2091 break;
2092 }
2093 ATH_PCU_UNLOCK(sc);
2094 pause("ath_reset_grablock", 1);
2095 i--;
2096 ATH_PCU_LOCK(sc);
2097 } while (i > 0);
2098
2099 /*
2100 * We always increment the refcounter, regardless
2101 * of whether we succeeded to get it in an exclusive
2102 * way.
2103 */
2104 sc->sc_inreset_cnt++;
2105
2106 if (i <= 0)
2107 device_printf(sc->sc_dev,
2108 "%s: didn't finish after %d iterations\n",
2109 __func__, MAX_RESET_ITERATIONS);
2110
2111 if (w == 0)
2112 device_printf(sc->sc_dev,
2113 "%s: warning, recursive reset path!\n",
2114 __func__);
2115
2116 return w;
2117}
2118#undef MAX_RESET_ITERATIONS
2119
2120/*
2121 * XXX TODO: write ath_reset_releaselock
2122 */
2123
2124static void
2125ath_stop(struct ifnet *ifp)
2126{
2127 struct ath_softc *sc = ifp->if_softc;
2128
2129 ATH_LOCK(sc);
2130 ath_stop_locked(ifp);
2131 ATH_UNLOCK(sc);
2132}
2133
2134/*
2135 * Reset the hardware w/o losing operational state. This is
2136 * basically a more efficient way of doing ath_stop, ath_init,
2137 * followed by state transitions to the current 802.11
2138 * operational state. Used to recover from various errors and
2139 * to reset or reload hardware state.
2140 */
2141int
2142ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
2143{
2144 struct ath_softc *sc = ifp->if_softc;
2145 struct ieee80211com *ic = ifp->if_l2com;
2146 struct ath_hal *ah = sc->sc_ah;
2147 HAL_STATUS status;
2148 int i;
2149
2150 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
2151
2152 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
2153 ATH_PCU_UNLOCK_ASSERT(sc);
2154 ATH_UNLOCK_ASSERT(sc);
2155
2156 /* Try to (stop any further TX/RX from occuring */
2157 taskqueue_block(sc->sc_tq);
2158
2159 ATH_PCU_LOCK(sc);
2160 ath_hal_intrset(ah, 0); /* disable interrupts */
2161 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */
2162 if (ath_reset_grablock(sc, 1) == 0) {
2163 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2164 __func__);
2165 }
2166 ATH_PCU_UNLOCK(sc);
2167
2168 /*
2169 * Should now wait for pending TX/RX to complete
2170 * and block future ones from occuring. This needs to be
2171 * done before the TX queue is drained.
2172 */
2173 ath_draintxq(sc, reset_type); /* stop xmit side */
2174
2175 /*
2176 * Regardless of whether we're doing a no-loss flush or
2177 * not, stop the PCU and handle what's in the RX queue.
2178 * That way frames aren't dropped which shouldn't be.
2179 */
2180 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2181 ath_rx_flush(sc);
2182
2183 ath_settkipmic(sc); /* configure TKIP MIC handling */
2184 /* NB: indicate channel change so we do a full reset */
2185 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
2186 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2187 __func__, status);
2188 sc->sc_diversity = ath_hal_getdiversity(ah);
2189
2190 /* Let DFS at it in case it's a DFS channel */
2191 ath_dfs_radar_enable(sc, ic->ic_curchan);
2192
2193 if (ath_startrecv(sc) != 0) /* restart recv */
2194 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
2195 /*
2196 * We may be doing a reset in response to an ioctl
2197 * that changes the channel so update any state that
2198 * might change as a result.
2199 */
2200 ath_chan_change(sc, ic->ic_curchan);
2201 if (sc->sc_beacons) { /* restart beacons */
2202#ifdef IEEE80211_SUPPORT_TDMA
2203 if (sc->sc_tdma)
2204 ath_tdma_config(sc, NULL);
2205 else
2206#endif
2207 ath_beacon_config(sc, NULL);
2208 }
2209
2210 /*
2211 * Release the reset lock and re-enable interrupts here.
2212 * If an interrupt was being processed in ath_intr(),
2213 * it would disable interrupts at this point. So we have
2214 * to atomically enable interrupts and decrement the
2215 * reset counter - this way ath_intr() doesn't end up
2216 * disabling interrupts without a corresponding enable
2217 * in the rest or channel change path.
2218 */
2219 ATH_PCU_LOCK(sc);
2220 sc->sc_inreset_cnt--;
2221 /* XXX only do this if sc_inreset_cnt == 0? */
2222 ath_hal_intrset(ah, sc->sc_imask);
2223 ATH_PCU_UNLOCK(sc);
2224
2225 /*
2226 * TX and RX can be started here. If it were started with
2227 * sc_inreset_cnt > 0, the TX and RX path would abort.
2228 * Thus if this is a nested call through the reset or
2229 * channel change code, TX completion will occur but
2230 * RX completion and ath_start / ath_tx_start will not
2231 * run.
2232 */
2233
2234 /* Restart TX/RX as needed */
2235 ath_txrx_start(sc);
2236
2237 /* XXX Restart TX completion and pending TX */
2238 if (reset_type == ATH_RESET_NOLOSS) {
2239 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2240 if (ATH_TXQ_SETUP(sc, i)) {
2241 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2242 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2243 ath_txq_sched(sc, &sc->sc_txq[i]);
2244 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2245 }
2246 }
2247 }
2248
2249 /*
2250 * This may have been set during an ath_start() call which
2251 * set this once it detected a concurrent TX was going on.
2252 * So, clear it.
2253 */
2254 IF_LOCK(&ifp->if_snd);
2255 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2256 IF_UNLOCK(&ifp->if_snd);
2257
2258 /* Handle any frames in the TX queue */
2259 /*
2260 * XXX should this be done by the caller, rather than
2261 * ath_reset() ?
2262 */
2263 ath_tx_kick(sc); /* restart xmit */
2264 return 0;
2265}
2266
2267static int
2268ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
2269{
2270 struct ieee80211com *ic = vap->iv_ic;
2271 struct ifnet *ifp = ic->ic_ifp;
2272 struct ath_softc *sc = ifp->if_softc;
2273 struct ath_hal *ah = sc->sc_ah;
2274
2275 switch (cmd) {
2276 case IEEE80211_IOC_TXPOWER:
2277 /*
2278 * If per-packet TPC is enabled, then we have nothing
2279 * to do; otherwise we need to force the global limit.
2280 * All this can happen directly; no need to reset.
2281 */
2282 if (!ath_hal_gettpc(ah))
2283 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
2284 return 0;
2285 }
2286 /* XXX? Full or NOLOSS? */
2287 return ath_reset(ifp, ATH_RESET_FULL);
2288}
2289
2290struct ath_buf *
2291_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
2292{
2293 struct ath_buf *bf;
2294
2295 ATH_TXBUF_LOCK_ASSERT(sc);
2296
2297 if (btype == ATH_BUFTYPE_MGMT)
2298 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
2299 else
2300 bf = TAILQ_FIRST(&sc->sc_txbuf);
2301
2302 if (bf == NULL) {
2303 sc->sc_stats.ast_tx_getnobuf++;
2304 } else {
2305 if (bf->bf_flags & ATH_BUF_BUSY) {
2306 sc->sc_stats.ast_tx_getbusybuf++;
2307 bf = NULL;
2308 }
2309 }
2310
2311 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
2312 if (btype == ATH_BUFTYPE_MGMT)
2313 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
2314 else {
2315 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
2316 sc->sc_txbuf_cnt--;
2317
2318 /*
2319 * This shuldn't happen; however just to be
2320 * safe print a warning and fudge the txbuf
2321 * count.
2322 */
2323 if (sc->sc_txbuf_cnt < 0) {
2324 device_printf(sc->sc_dev,
2325 "%s: sc_txbuf_cnt < 0?\n",
2326 __func__);
2327 sc->sc_txbuf_cnt = 0;
2328 }
2329 }
2330 } else
2331 bf = NULL;
2332
2333 if (bf == NULL) {
2334 /* XXX should check which list, mgmt or otherwise */
2335 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
2336 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
2337 "out of xmit buffers" : "xmit buffer busy");
2338 return NULL;
2339 }
2340
2341 /* XXX TODO: should do this at buffer list initialisation */
2342 /* XXX (then, ensure the buffer has the right flag set) */
2343 if (btype == ATH_BUFTYPE_MGMT)
2344 bf->bf_flags |= ATH_BUF_MGMT;
2345 else
2346 bf->bf_flags &= (~ATH_BUF_MGMT);
2347
2348 /* Valid bf here; clear some basic fields */
2349 bf->bf_next = NULL; /* XXX just to be sure */
2350 bf->bf_last = NULL; /* XXX again, just to be sure */
2351 bf->bf_comp = NULL; /* XXX again, just to be sure */
2352 bzero(&bf->bf_state, sizeof(bf->bf_state));
2353
2354 /*
2355 * Track the descriptor ID only if doing EDMA
2356 */
2357 if (sc->sc_isedma) {
2358 bf->bf_descid = sc->sc_txbuf_descid;
2359 sc->sc_txbuf_descid++;
2360 }
2361
2362 return bf;
2363}
2364
2365/*
2366 * When retrying a software frame, buffers marked ATH_BUF_BUSY
2367 * can't be thrown back on the queue as they could still be
2368 * in use by the hardware.
2369 *
2370 * This duplicates the buffer, or returns NULL.
2371 *
2372 * The descriptor is also copied but the link pointers and
2373 * the DMA segments aren't copied; this frame should thus
2374 * be again passed through the descriptor setup/chain routines
2375 * so the link is correct.
2376 *
2377 * The caller must free the buffer using ath_freebuf().
2378 *
2379 * XXX TODO: this call shouldn't fail as it'll cause packet loss
2380 * XXX in the TX pathway when retries are needed.
2381 * XXX Figure out how to keep some buffers free, or factor the
2382 * XXX number of busy buffers into the xmit path (ath_start())
2383 * XXX so we don't over-commit.
2384 */
2385struct ath_buf *
2386ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
2387{
2388 struct ath_buf *tbf;
2389
2390 tbf = ath_getbuf(sc,
2391 (bf->bf_flags & ATH_BUF_MGMT) ?
2392 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
2393 if (tbf == NULL)
2394 return NULL; /* XXX failure? Why? */
2395
2396 /* Copy basics */
2397 tbf->bf_next = NULL;
2398 tbf->bf_nseg = bf->bf_nseg;
2399 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY;
2400 tbf->bf_status = bf->bf_status;
2401 tbf->bf_m = bf->bf_m;
2402 tbf->bf_node = bf->bf_node;
2403 /* will be setup by the chain/setup function */
2404 tbf->bf_lastds = NULL;
2405 /* for now, last == self */
2406 tbf->bf_last = tbf;
2407 tbf->bf_comp = bf->bf_comp;
2408
2409 /* NOTE: DMA segments will be setup by the setup/chain functions */
2410
2411 /* The caller has to re-init the descriptor + links */
2412
2413 /* Copy state */
2414 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
2415
2416 return tbf;
2417}
2418
2419struct ath_buf *
2420ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
2421{
2422 struct ath_buf *bf;
2423
2424 ATH_TXBUF_LOCK(sc);
2425 bf = _ath_getbuf_locked(sc, btype);
2426 /*
2427 * If a mgmt buffer was requested but we're out of those,
2428 * try requesting a normal one.
2429 */
2430 if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
2431 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
2432 ATH_TXBUF_UNLOCK(sc);
2433 if (bf == NULL) {
2434 struct ifnet *ifp = sc->sc_ifp;
2435
2436 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2437 sc->sc_stats.ast_tx_qstop++;
2438 IF_LOCK(&ifp->if_snd);
2439 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2440 IF_UNLOCK(&ifp->if_snd);
2441 }
2442 return bf;
2443}
2444
2445static void
2446ath_start_queue(struct ifnet *ifp)
2447{
2448 struct ath_softc *sc = ifp->if_softc;
2449
427
428 /*
429 * Allocate hardware transmit queues: one queue for
430 * beacon frames and one data queue for each QoS
431 * priority. Note that the hal handles resetting
432 * these queues at the needed time.
433 *
434 * XXX PS-Poll
435 */
436 sc->sc_bhalq = ath_beaconq_setup(sc);
437 if (sc->sc_bhalq == (u_int) -1) {
438 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
439 error = EIO;
440 goto bad2;
441 }
442 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
443 if (sc->sc_cabq == NULL) {
444 if_printf(ifp, "unable to setup CAB xmit queue!\n");
445 error = EIO;
446 goto bad2;
447 }
448 /* NB: insure BK queue is the lowest priority h/w queue */
449 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
450 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
451 ieee80211_wme_acnames[WME_AC_BK]);
452 error = EIO;
453 goto bad2;
454 }
455 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
456 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
457 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
458 /*
459 * Not enough hardware tx queues to properly do WME;
460 * just punt and assign them all to the same h/w queue.
461 * We could do a better job of this if, for example,
462 * we allocate queues when we switch from station to
463 * AP mode.
464 */
465 if (sc->sc_ac2q[WME_AC_VI] != NULL)
466 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
467 if (sc->sc_ac2q[WME_AC_BE] != NULL)
468 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
469 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
470 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
471 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
472 }
473
474 /*
475 * Attach the TX completion function.
476 *
477 * The non-EDMA chips may have some special case optimisations;
478 * this method gives everyone a chance to attach cleanly.
479 */
480 sc->sc_tx.xmit_attach_comp_func(sc);
481
482 /*
483 * Setup rate control. Some rate control modules
484 * call back to change the anntena state so expose
485 * the necessary entry points.
486 * XXX maybe belongs in struct ath_ratectrl?
487 */
488 sc->sc_setdefantenna = ath_setdefantenna;
489 sc->sc_rc = ath_rate_attach(sc);
490 if (sc->sc_rc == NULL) {
491 error = EIO;
492 goto bad2;
493 }
494
495 /* Attach DFS module */
496 if (! ath_dfs_attach(sc)) {
497 device_printf(sc->sc_dev,
498 "%s: unable to attach DFS\n", __func__);
499 error = EIO;
500 goto bad2;
501 }
502
503 /* Start DFS processing tasklet */
504 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
505
506 /* Configure LED state */
507 sc->sc_blinking = 0;
508 sc->sc_ledstate = 1;
509 sc->sc_ledon = 0; /* low true */
510 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
511 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
512
513 /*
514 * Don't setup hardware-based blinking.
515 *
516 * Although some NICs may have this configured in the
517 * default reset register values, the user may wish
518 * to alter which pins have which function.
519 *
520 * The reference driver attaches the MAC network LED to GPIO1 and
521 * the MAC power LED to GPIO2. However, the DWA-552 cardbus
522 * NIC has these reversed.
523 */
524 sc->sc_hardled = (1 == 0);
525 sc->sc_led_net_pin = -1;
526 sc->sc_led_pwr_pin = -1;
527 /*
528 * Auto-enable soft led processing for IBM cards and for
529 * 5211 minipci cards. Users can also manually enable/disable
530 * support with a sysctl.
531 */
532 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
533 ath_led_config(sc);
534 ath_hal_setledstate(ah, HAL_LED_INIT);
535
536 ifp->if_softc = sc;
537 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
538 ifp->if_start = ath_start_queue;
539 ifp->if_ioctl = ath_ioctl;
540 ifp->if_init = ath_init;
541 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
542 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
543 IFQ_SET_READY(&ifp->if_snd);
544
545 ic->ic_ifp = ifp;
546 /* XXX not right but it's not used anywhere important */
547 ic->ic_phytype = IEEE80211_T_OFDM;
548 ic->ic_opmode = IEEE80211_M_STA;
549 ic->ic_caps =
550 IEEE80211_C_STA /* station mode */
551 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
552 | IEEE80211_C_HOSTAP /* hostap mode */
553 | IEEE80211_C_MONITOR /* monitor mode */
554 | IEEE80211_C_AHDEMO /* adhoc demo mode */
555 | IEEE80211_C_WDS /* 4-address traffic works */
556 | IEEE80211_C_MBSS /* mesh point link mode */
557 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
558 | IEEE80211_C_SHSLOT /* short slot time supported */
559 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
560#ifndef ATH_ENABLE_11N
561 | IEEE80211_C_BGSCAN /* capable of bg scanning */
562#endif
563 | IEEE80211_C_TXFRAG /* handle tx frags */
564#ifdef ATH_ENABLE_DFS
565 | IEEE80211_C_DFS /* Enable radar detection */
566#endif
567 ;
568 /*
569 * Query the hal to figure out h/w crypto support.
570 */
571 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
572 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
573 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
574 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
575 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
576 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
577 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
578 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
579 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
580 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
581 /*
582 * Check if h/w does the MIC and/or whether the
583 * separate key cache entries are required to
584 * handle both tx+rx MIC keys.
585 */
586 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
587 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
588 /*
589 * If the h/w supports storing tx+rx MIC keys
590 * in one cache slot automatically enable use.
591 */
592 if (ath_hal_hastkipsplit(ah) ||
593 !ath_hal_settkipsplit(ah, AH_FALSE))
594 sc->sc_splitmic = 1;
595 /*
596 * If the h/w can do TKIP MIC together with WME then
597 * we use it; otherwise we force the MIC to be done
598 * in software by the net80211 layer.
599 */
600 if (ath_hal_haswmetkipmic(ah))
601 sc->sc_wmetkipmic = 1;
602 }
603 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
604 /*
605 * Check for multicast key search support.
606 */
607 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
608 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
609 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
610 }
611 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
612 /*
613 * Mark key cache slots associated with global keys
614 * as in use. If we knew TKIP was not to be used we
615 * could leave the +32, +64, and +32+64 slots free.
616 */
617 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
618 setbit(sc->sc_keymap, i);
619 setbit(sc->sc_keymap, i+64);
620 if (sc->sc_splitmic) {
621 setbit(sc->sc_keymap, i+32);
622 setbit(sc->sc_keymap, i+32+64);
623 }
624 }
625 /*
626 * TPC support can be done either with a global cap or
627 * per-packet support. The latter is not available on
628 * all parts. We're a bit pedantic here as all parts
629 * support a global cap.
630 */
631 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
632 ic->ic_caps |= IEEE80211_C_TXPMGT;
633
634 /*
635 * Mark WME capability only if we have sufficient
636 * hardware queues to do proper priority scheduling.
637 */
638 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
639 ic->ic_caps |= IEEE80211_C_WME;
640 /*
641 * Check for misc other capabilities.
642 */
643 if (ath_hal_hasbursting(ah))
644 ic->ic_caps |= IEEE80211_C_BURST;
645 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
646 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
647 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
648 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
649 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
650 if (ath_hal_hasfastframes(ah))
651 ic->ic_caps |= IEEE80211_C_FF;
652 wmodes = ath_hal_getwirelessmodes(ah);
653 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
654 ic->ic_caps |= IEEE80211_C_TURBOP;
655#ifdef IEEE80211_SUPPORT_TDMA
656 if (ath_hal_macversion(ah) > 0x78) {
657 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
658 ic->ic_tdma_update = ath_tdma_update;
659 }
660#endif
661
662 /*
663 * TODO: enforce that at least this many frames are available
664 * in the txbuf list before allowing data frames (raw or
665 * otherwise) to be transmitted.
666 */
667 sc->sc_txq_data_minfree = 10;
668 /*
669 * Leave this as default to maintain legacy behaviour.
670 * Shortening the cabq/mcastq may end up causing some
671 * undesirable behaviour.
672 */
673 sc->sc_txq_mcastq_maxdepth = ath_txbuf;
674
675 /*
676 * Allow the TX and RX chainmasks to be overridden by
677 * environment variables and/or device.hints.
678 *
679 * This must be done early - before the hardware is
680 * calibrated or before the 802.11n stream calculation
681 * is done.
682 */
683 if (resource_int_value(device_get_name(sc->sc_dev),
684 device_get_unit(sc->sc_dev), "rx_chainmask",
685 &rx_chainmask) == 0) {
686 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
687 rx_chainmask);
688 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
689 }
690 if (resource_int_value(device_get_name(sc->sc_dev),
691 device_get_unit(sc->sc_dev), "tx_chainmask",
692 &tx_chainmask) == 0) {
693 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
694 tx_chainmask);
695 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
696 }
697
698 /*
699 * Disable MRR with protected frames by default.
700 * Only 802.11n series NICs can handle this.
701 */
702 sc->sc_mrrprot = 0; /* XXX should be a capability */
703
704#ifdef ATH_ENABLE_11N
705 /*
706 * Query HT capabilities
707 */
708 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
709 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
710 int rxs, txs;
711
712 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
713
714 sc->sc_mrrprot = 1; /* XXX should be a capability */
715
716 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
717 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
718 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
719 | IEEE80211_HTCAP_MAXAMSDU_3839
720 /* max A-MSDU length */
721 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
722 ;
723
724 /*
725 * Enable short-GI for HT20 only if the hardware
726 * advertises support.
727 * Notably, anything earlier than the AR9287 doesn't.
728 */
729 if ((ath_hal_getcapability(ah,
730 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
731 (wmodes & HAL_MODE_HT20)) {
732 device_printf(sc->sc_dev,
733 "[HT] enabling short-GI in 20MHz mode\n");
734 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
735 }
736
737 if (wmodes & HAL_MODE_HT40)
738 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
739 | IEEE80211_HTCAP_SHORTGI40;
740
741 /*
742 * TX/RX streams need to be taken into account when
743 * negotiating which MCS rates it'll receive and
744 * what MCS rates are available for TX.
745 */
746 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
747 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
748
749 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
750 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
751
752 ic->ic_txstream = txs;
753 ic->ic_rxstream = rxs;
754
755 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
756 &sc->sc_rts_aggr_limit);
757 if (sc->sc_rts_aggr_limit != (64 * 1024))
758 device_printf(sc->sc_dev,
759 "[HT] RTS aggregates limited to %d KiB\n",
760 sc->sc_rts_aggr_limit / 1024);
761
762 device_printf(sc->sc_dev,
763 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
764 }
765#endif
766
767 /*
768 * Initial aggregation settings.
769 */
770 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH;
771 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
772 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
773
774 /*
775 * Check if the hardware requires PCI register serialisation.
776 * Some of the Owl based MACs require this.
777 */
778 if (mp_ncpus > 1 &&
779 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
780 0, NULL) == HAL_OK) {
781 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
782 device_printf(sc->sc_dev,
783 "Enabling register serialisation\n");
784 }
785
786 /*
787 * Indicate we need the 802.11 header padded to a
788 * 32-bit boundary for 4-address and QoS frames.
789 */
790 ic->ic_flags |= IEEE80211_F_DATAPAD;
791
792 /*
793 * Query the hal about antenna support.
794 */
795 sc->sc_defant = ath_hal_getdefantenna(ah);
796
797 /*
798 * Not all chips have the VEOL support we want to
799 * use with IBSS beacons; check here for it.
800 */
801 sc->sc_hasveol = ath_hal_hasveol(ah);
802
803 /* get mac address from hardware */
804 ath_hal_getmac(ah, macaddr);
805 if (sc->sc_hasbmask)
806 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
807
808 /* NB: used to size node table key mapping array */
809 ic->ic_max_keyix = sc->sc_keymax;
810 /* call MI attach routine. */
811 ieee80211_ifattach(ic, macaddr);
812 ic->ic_setregdomain = ath_setregdomain;
813 ic->ic_getradiocaps = ath_getradiocaps;
814 sc->sc_opmode = HAL_M_STA;
815
816 /* override default methods */
817 ic->ic_newassoc = ath_newassoc;
818 ic->ic_updateslot = ath_updateslot;
819 ic->ic_wme.wme_update = ath_wme_update;
820 ic->ic_vap_create = ath_vap_create;
821 ic->ic_vap_delete = ath_vap_delete;
822 ic->ic_raw_xmit = ath_raw_xmit;
823 ic->ic_update_mcast = ath_update_mcast;
824 ic->ic_update_promisc = ath_update_promisc;
825 ic->ic_node_alloc = ath_node_alloc;
826 sc->sc_node_free = ic->ic_node_free;
827 ic->ic_node_free = ath_node_free;
828 sc->sc_node_cleanup = ic->ic_node_cleanup;
829 ic->ic_node_cleanup = ath_node_cleanup;
830 ic->ic_node_getsignal = ath_node_getsignal;
831 ic->ic_scan_start = ath_scan_start;
832 ic->ic_scan_end = ath_scan_end;
833 ic->ic_set_channel = ath_set_channel;
834#ifdef ATH_ENABLE_11N
835 /* 802.11n specific - but just override anyway */
836 sc->sc_addba_request = ic->ic_addba_request;
837 sc->sc_addba_response = ic->ic_addba_response;
838 sc->sc_addba_stop = ic->ic_addba_stop;
839 sc->sc_bar_response = ic->ic_bar_response;
840 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
841
842 ic->ic_addba_request = ath_addba_request;
843 ic->ic_addba_response = ath_addba_response;
844 ic->ic_addba_response_timeout = ath_addba_response_timeout;
845 ic->ic_addba_stop = ath_addba_stop;
846 ic->ic_bar_response = ath_bar_response;
847
848 ic->ic_update_chw = ath_update_chw;
849#endif /* ATH_ENABLE_11N */
850
851#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
852 /*
853 * There's one vendor bitmap entry in the RX radiotap
854 * header; make sure that's taken into account.
855 */
856 ieee80211_radiotap_attachv(ic,
857 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0,
858 ATH_TX_RADIOTAP_PRESENT,
859 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1,
860 ATH_RX_RADIOTAP_PRESENT);
861#else
862 /*
863 * No vendor bitmap/extensions are present.
864 */
865 ieee80211_radiotap_attach(ic,
866 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
867 ATH_TX_RADIOTAP_PRESENT,
868 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
869 ATH_RX_RADIOTAP_PRESENT);
870#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
871
872 /*
873 * Setup dynamic sysctl's now that country code and
874 * regdomain are available from the hal.
875 */
876 ath_sysctlattach(sc);
877 ath_sysctl_stats_attach(sc);
878 ath_sysctl_hal_attach(sc);
879
880 if (bootverbose)
881 ieee80211_announce(ic);
882 ath_announce(sc);
883 return 0;
884bad2:
885 ath_tx_cleanup(sc);
886 ath_desc_free(sc);
887 ath_txdma_teardown(sc);
888 ath_rxdma_teardown(sc);
889bad:
890 if (ah)
891 ath_hal_detach(ah);
892 if (ifp != NULL) {
893 CURVNET_SET(ifp->if_vnet);
894 if_free(ifp);
895 CURVNET_RESTORE();
896 }
897 sc->sc_invalid = 1;
898 return error;
899}
900
901int
902ath_detach(struct ath_softc *sc)
903{
904 struct ifnet *ifp = sc->sc_ifp;
905
906 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
907 __func__, ifp->if_flags);
908
909 /*
910 * NB: the order of these is important:
911 * o stop the chip so no more interrupts will fire
912 * o call the 802.11 layer before detaching the hal to
913 * insure callbacks into the driver to delete global
914 * key cache entries can be handled
915 * o free the taskqueue which drains any pending tasks
916 * o reclaim the tx queue data structures after calling
917 * the 802.11 layer as we'll get called back to reclaim
918 * node state and potentially want to use them
919 * o to cleanup the tx queues the hal is called, so detach
920 * it last
921 * Other than that, it's straightforward...
922 */
923 ath_stop(ifp);
924 ieee80211_ifdetach(ifp->if_l2com);
925 taskqueue_free(sc->sc_tq);
926#ifdef ATH_TX99_DIAG
927 if (sc->sc_tx99 != NULL)
928 sc->sc_tx99->detach(sc->sc_tx99);
929#endif
930 ath_rate_detach(sc->sc_rc);
931
932 ath_dfs_detach(sc);
933 ath_desc_free(sc);
934 ath_txdma_teardown(sc);
935 ath_rxdma_teardown(sc);
936 ath_tx_cleanup(sc);
937 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
938
939 CURVNET_SET(ifp->if_vnet);
940 if_free(ifp);
941 CURVNET_RESTORE();
942
943 return 0;
944}
945
946/*
947 * MAC address handling for multiple BSS on the same radio.
948 * The first vap uses the MAC address from the EEPROM. For
949 * subsequent vap's we set the U/L bit (bit 1) in the MAC
950 * address and use the next six bits as an index.
951 */
952static void
953assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
954{
955 int i;
956
957 if (clone && sc->sc_hasbmask) {
958 /* NB: we only do this if h/w supports multiple bssid */
959 for (i = 0; i < 8; i++)
960 if ((sc->sc_bssidmask & (1<<i)) == 0)
961 break;
962 if (i != 0)
963 mac[0] |= (i << 2)|0x2;
964 } else
965 i = 0;
966 sc->sc_bssidmask |= 1<<i;
967 sc->sc_hwbssidmask[0] &= ~mac[0];
968 if (i == 0)
969 sc->sc_nbssid0++;
970}
971
972static void
973reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
974{
975 int i = mac[0] >> 2;
976 uint8_t mask;
977
978 if (i != 0 || --sc->sc_nbssid0 == 0) {
979 sc->sc_bssidmask &= ~(1<<i);
980 /* recalculate bssid mask from remaining addresses */
981 mask = 0xff;
982 for (i = 1; i < 8; i++)
983 if (sc->sc_bssidmask & (1<<i))
984 mask &= ~((i<<2)|0x2);
985 sc->sc_hwbssidmask[0] |= mask;
986 }
987}
988
989/*
990 * Assign a beacon xmit slot. We try to space out
991 * assignments so when beacons are staggered the
992 * traffic coming out of the cab q has maximal time
993 * to go out before the next beacon is scheduled.
994 */
995static int
996assign_bslot(struct ath_softc *sc)
997{
998 u_int slot, free;
999
1000 free = 0;
1001 for (slot = 0; slot < ATH_BCBUF; slot++)
1002 if (sc->sc_bslot[slot] == NULL) {
1003 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
1004 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
1005 return slot;
1006 free = slot;
1007 /* NB: keep looking for a double slot */
1008 }
1009 return free;
1010}
1011
1012static struct ieee80211vap *
1013ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1014 enum ieee80211_opmode opmode, int flags,
1015 const uint8_t bssid[IEEE80211_ADDR_LEN],
1016 const uint8_t mac0[IEEE80211_ADDR_LEN])
1017{
1018 struct ath_softc *sc = ic->ic_ifp->if_softc;
1019 struct ath_vap *avp;
1020 struct ieee80211vap *vap;
1021 uint8_t mac[IEEE80211_ADDR_LEN];
1022 int needbeacon, error;
1023 enum ieee80211_opmode ic_opmode;
1024
1025 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
1026 M_80211_VAP, M_WAITOK | M_ZERO);
1027 needbeacon = 0;
1028 IEEE80211_ADDR_COPY(mac, mac0);
1029
1030 ATH_LOCK(sc);
1031 ic_opmode = opmode; /* default to opmode of new vap */
1032 switch (opmode) {
1033 case IEEE80211_M_STA:
1034 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
1035 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
1036 goto bad;
1037 }
1038 if (sc->sc_nvaps) {
1039 /*
1040 * With multiple vaps we must fall back
1041 * to s/w beacon miss handling.
1042 */
1043 flags |= IEEE80211_CLONE_NOBEACONS;
1044 }
1045 if (flags & IEEE80211_CLONE_NOBEACONS) {
1046 /*
1047 * Station mode w/o beacons are implemented w/ AP mode.
1048 */
1049 ic_opmode = IEEE80211_M_HOSTAP;
1050 }
1051 break;
1052 case IEEE80211_M_IBSS:
1053 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
1054 device_printf(sc->sc_dev,
1055 "only 1 ibss vap supported\n");
1056 goto bad;
1057 }
1058 needbeacon = 1;
1059 break;
1060 case IEEE80211_M_AHDEMO:
1061#ifdef IEEE80211_SUPPORT_TDMA
1062 if (flags & IEEE80211_CLONE_TDMA) {
1063 if (sc->sc_nvaps != 0) {
1064 device_printf(sc->sc_dev,
1065 "only 1 tdma vap supported\n");
1066 goto bad;
1067 }
1068 needbeacon = 1;
1069 flags |= IEEE80211_CLONE_NOBEACONS;
1070 }
1071 /* fall thru... */
1072#endif
1073 case IEEE80211_M_MONITOR:
1074 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
1075 /*
1076 * Adopt existing mode. Adding a monitor or ahdemo
1077 * vap to an existing configuration is of dubious
1078 * value but should be ok.
1079 */
1080 /* XXX not right for monitor mode */
1081 ic_opmode = ic->ic_opmode;
1082 }
1083 break;
1084 case IEEE80211_M_HOSTAP:
1085 case IEEE80211_M_MBSS:
1086 needbeacon = 1;
1087 break;
1088 case IEEE80211_M_WDS:
1089 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
1090 device_printf(sc->sc_dev,
1091 "wds not supported in sta mode\n");
1092 goto bad;
1093 }
1094 /*
1095 * Silently remove any request for a unique
1096 * bssid; WDS vap's always share the local
1097 * mac address.
1098 */
1099 flags &= ~IEEE80211_CLONE_BSSID;
1100 if (sc->sc_nvaps == 0)
1101 ic_opmode = IEEE80211_M_HOSTAP;
1102 else
1103 ic_opmode = ic->ic_opmode;
1104 break;
1105 default:
1106 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
1107 goto bad;
1108 }
1109 /*
1110 * Check that a beacon buffer is available; the code below assumes it.
1111 */
1112 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
1113 device_printf(sc->sc_dev, "no beacon buffer available\n");
1114 goto bad;
1115 }
1116
1117 /* STA, AHDEMO? */
1118 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
1119 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1120 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1121 }
1122
1123 vap = &avp->av_vap;
1124 /* XXX can't hold mutex across if_alloc */
1125 ATH_UNLOCK(sc);
1126 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1127 bssid, mac);
1128 ATH_LOCK(sc);
1129 if (error != 0) {
1130 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1131 __func__, error);
1132 goto bad2;
1133 }
1134
1135 /* h/w crypto support */
1136 vap->iv_key_alloc = ath_key_alloc;
1137 vap->iv_key_delete = ath_key_delete;
1138 vap->iv_key_set = ath_key_set;
1139 vap->iv_key_update_begin = ath_key_update_begin;
1140 vap->iv_key_update_end = ath_key_update_end;
1141
1142 /* override various methods */
1143 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1144 vap->iv_recv_mgmt = ath_recv_mgmt;
1145 vap->iv_reset = ath_reset_vap;
1146 vap->iv_update_beacon = ath_beacon_update;
1147 avp->av_newstate = vap->iv_newstate;
1148 vap->iv_newstate = ath_newstate;
1149 avp->av_bmiss = vap->iv_bmiss;
1150 vap->iv_bmiss = ath_bmiss_vap;
1151
1152 avp->av_node_ps = vap->iv_node_ps;
1153 vap->iv_node_ps = ath_node_powersave;
1154
1155 avp->av_set_tim = vap->iv_set_tim;
1156 vap->iv_set_tim = ath_node_set_tim;
1157
1158 /* Set default parameters */
1159
1160 /*
1161 * Anything earlier than some AR9300 series MACs don't
1162 * support a smaller MPDU density.
1163 */
1164 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1165 /*
1166 * All NICs can handle the maximum size, however
1167 * AR5416 based MACs can only TX aggregates w/ RTS
1168 * protection when the total aggregate size is <= 8k.
1169 * However, for now that's enforced by the TX path.
1170 */
1171 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1172
1173 avp->av_bslot = -1;
1174 if (needbeacon) {
1175 /*
1176 * Allocate beacon state and setup the q for buffered
1177 * multicast frames. We know a beacon buffer is
1178 * available because we checked above.
1179 */
1180 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1181 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1182 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1183 /*
1184 * Assign the vap to a beacon xmit slot. As above
1185 * this cannot fail to find a free one.
1186 */
1187 avp->av_bslot = assign_bslot(sc);
1188 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1189 ("beacon slot %u not empty", avp->av_bslot));
1190 sc->sc_bslot[avp->av_bslot] = vap;
1191 sc->sc_nbcnvaps++;
1192 }
1193 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1194 /*
1195 * Multple vaps are to transmit beacons and we
1196 * have h/w support for TSF adjusting; enable
1197 * use of staggered beacons.
1198 */
1199 sc->sc_stagbeacons = 1;
1200 }
1201 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1202 }
1203
1204 ic->ic_opmode = ic_opmode;
1205 if (opmode != IEEE80211_M_WDS) {
1206 sc->sc_nvaps++;
1207 if (opmode == IEEE80211_M_STA)
1208 sc->sc_nstavaps++;
1209 if (opmode == IEEE80211_M_MBSS)
1210 sc->sc_nmeshvaps++;
1211 }
1212 switch (ic_opmode) {
1213 case IEEE80211_M_IBSS:
1214 sc->sc_opmode = HAL_M_IBSS;
1215 break;
1216 case IEEE80211_M_STA:
1217 sc->sc_opmode = HAL_M_STA;
1218 break;
1219 case IEEE80211_M_AHDEMO:
1220#ifdef IEEE80211_SUPPORT_TDMA
1221 if (vap->iv_caps & IEEE80211_C_TDMA) {
1222 sc->sc_tdma = 1;
1223 /* NB: disable tsf adjust */
1224 sc->sc_stagbeacons = 0;
1225 }
1226 /*
1227 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1228 * just ap mode.
1229 */
1230 /* fall thru... */
1231#endif
1232 case IEEE80211_M_HOSTAP:
1233 case IEEE80211_M_MBSS:
1234 sc->sc_opmode = HAL_M_HOSTAP;
1235 break;
1236 case IEEE80211_M_MONITOR:
1237 sc->sc_opmode = HAL_M_MONITOR;
1238 break;
1239 default:
1240 /* XXX should not happen */
1241 break;
1242 }
1243 if (sc->sc_hastsfadd) {
1244 /*
1245 * Configure whether or not TSF adjust should be done.
1246 */
1247 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1248 }
1249 if (flags & IEEE80211_CLONE_NOBEACONS) {
1250 /*
1251 * Enable s/w beacon miss handling.
1252 */
1253 sc->sc_swbmiss = 1;
1254 }
1255 ATH_UNLOCK(sc);
1256
1257 /* complete setup */
1258 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1259 return vap;
1260bad2:
1261 reclaim_address(sc, mac);
1262 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1263bad:
1264 free(avp, M_80211_VAP);
1265 ATH_UNLOCK(sc);
1266 return NULL;
1267}
1268
1269static void
1270ath_vap_delete(struct ieee80211vap *vap)
1271{
1272 struct ieee80211com *ic = vap->iv_ic;
1273 struct ifnet *ifp = ic->ic_ifp;
1274 struct ath_softc *sc = ifp->if_softc;
1275 struct ath_hal *ah = sc->sc_ah;
1276 struct ath_vap *avp = ATH_VAP(vap);
1277
1278 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1279 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1280 /*
1281 * Quiesce the hardware while we remove the vap. In
1282 * particular we need to reclaim all references to
1283 * the vap state by any frames pending on the tx queues.
1284 */
1285 ath_hal_intrset(ah, 0); /* disable interrupts */
1286 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1287 /* XXX Do all frames from all vaps/nodes need draining here? */
1288 ath_stoprecv(sc, 1); /* stop recv side */
1289 }
1290
1291 ieee80211_vap_detach(vap);
1292
1293 /*
1294 * XXX Danger Will Robinson! Danger!
1295 *
1296 * Because ieee80211_vap_detach() can queue a frame (the station
1297 * diassociate message?) after we've drained the TXQ and
1298 * flushed the software TXQ, we will end up with a frame queued
1299 * to a node whose vap is about to be freed.
1300 *
1301 * To work around this, flush the hardware/software again.
1302 * This may be racy - the ath task may be running and the packet
1303 * may be being scheduled between sw->hw txq. Tsk.
1304 *
1305 * TODO: figure out why a new node gets allocated somewhere around
1306 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1307 * call!)
1308 */
1309
1310 ath_draintxq(sc, ATH_RESET_DEFAULT);
1311
1312 ATH_LOCK(sc);
1313 /*
1314 * Reclaim beacon state. Note this must be done before
1315 * the vap instance is reclaimed as we may have a reference
1316 * to it in the buffer for the beacon frame.
1317 */
1318 if (avp->av_bcbuf != NULL) {
1319 if (avp->av_bslot != -1) {
1320 sc->sc_bslot[avp->av_bslot] = NULL;
1321 sc->sc_nbcnvaps--;
1322 }
1323 ath_beacon_return(sc, avp->av_bcbuf);
1324 avp->av_bcbuf = NULL;
1325 if (sc->sc_nbcnvaps == 0) {
1326 sc->sc_stagbeacons = 0;
1327 if (sc->sc_hastsfadd)
1328 ath_hal_settsfadjust(sc->sc_ah, 0);
1329 }
1330 /*
1331 * Reclaim any pending mcast frames for the vap.
1332 */
1333 ath_tx_draintxq(sc, &avp->av_mcastq);
1334 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1335 }
1336 /*
1337 * Update bookkeeping.
1338 */
1339 if (vap->iv_opmode == IEEE80211_M_STA) {
1340 sc->sc_nstavaps--;
1341 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1342 sc->sc_swbmiss = 0;
1343 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1344 vap->iv_opmode == IEEE80211_M_MBSS) {
1345 reclaim_address(sc, vap->iv_myaddr);
1346 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1347 if (vap->iv_opmode == IEEE80211_M_MBSS)
1348 sc->sc_nmeshvaps--;
1349 }
1350 if (vap->iv_opmode != IEEE80211_M_WDS)
1351 sc->sc_nvaps--;
1352#ifdef IEEE80211_SUPPORT_TDMA
1353 /* TDMA operation ceases when the last vap is destroyed */
1354 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1355 sc->sc_tdma = 0;
1356 sc->sc_swbmiss = 0;
1357 }
1358#endif
1359 free(avp, M_80211_VAP);
1360
1361 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1362 /*
1363 * Restart rx+tx machines if still running (RUNNING will
1364 * be reset if we just destroyed the last vap).
1365 */
1366 if (ath_startrecv(sc) != 0)
1367 if_printf(ifp, "%s: unable to restart recv logic\n",
1368 __func__);
1369 if (sc->sc_beacons) { /* restart beacons */
1370#ifdef IEEE80211_SUPPORT_TDMA
1371 if (sc->sc_tdma)
1372 ath_tdma_config(sc, NULL);
1373 else
1374#endif
1375 ath_beacon_config(sc, NULL);
1376 }
1377 ath_hal_intrset(ah, sc->sc_imask);
1378 }
1379 ATH_UNLOCK(sc);
1380}
1381
1382void
1383ath_suspend(struct ath_softc *sc)
1384{
1385 struct ifnet *ifp = sc->sc_ifp;
1386 struct ieee80211com *ic = ifp->if_l2com;
1387
1388 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1389 __func__, ifp->if_flags);
1390
1391 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1392
1393 ieee80211_suspend_all(ic);
1394 /*
1395 * NB: don't worry about putting the chip in low power
1396 * mode; pci will power off our socket on suspend and
1397 * CardBus detaches the device.
1398 */
1399
1400 /*
1401 * XXX ensure none of the taskqueues are running
1402 * XXX ensure sc_invalid is 1
1403 * XXX ensure the calibration callout is disabled
1404 */
1405
1406 /* Disable the PCIe PHY, complete with workarounds */
1407 ath_hal_enablepcie(sc->sc_ah, 1, 1);
1408}
1409
1410/*
1411 * Reset the key cache since some parts do not reset the
1412 * contents on resume. First we clear all entries, then
1413 * re-load keys that the 802.11 layer assumes are setup
1414 * in h/w.
1415 */
1416static void
1417ath_reset_keycache(struct ath_softc *sc)
1418{
1419 struct ifnet *ifp = sc->sc_ifp;
1420 struct ieee80211com *ic = ifp->if_l2com;
1421 struct ath_hal *ah = sc->sc_ah;
1422 int i;
1423
1424 for (i = 0; i < sc->sc_keymax; i++)
1425 ath_hal_keyreset(ah, i);
1426 ieee80211_crypto_reload_keys(ic);
1427}
1428
1429void
1430ath_resume(struct ath_softc *sc)
1431{
1432 struct ifnet *ifp = sc->sc_ifp;
1433 struct ieee80211com *ic = ifp->if_l2com;
1434 struct ath_hal *ah = sc->sc_ah;
1435 HAL_STATUS status;
1436
1437 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1438 __func__, ifp->if_flags);
1439
1440 /* Re-enable PCIe, re-enable the PCIe bus */
1441 ath_hal_enablepcie(ah, 0, 0);
1442
1443 /*
1444 * Must reset the chip before we reload the
1445 * keycache as we were powered down on suspend.
1446 */
1447 ath_hal_reset(ah, sc->sc_opmode,
1448 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1449 AH_FALSE, &status);
1450 ath_reset_keycache(sc);
1451
1452 /* Let DFS at it in case it's a DFS channel */
1453 ath_dfs_radar_enable(sc, ic->ic_curchan);
1454
1455 /* Restore the LED configuration */
1456 ath_led_config(sc);
1457 ath_hal_setledstate(ah, HAL_LED_INIT);
1458
1459 if (sc->sc_resume_up)
1460 ieee80211_resume_all(ic);
1461
1462 /* XXX beacons ? */
1463}
1464
1465void
1466ath_shutdown(struct ath_softc *sc)
1467{
1468 struct ifnet *ifp = sc->sc_ifp;
1469
1470 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1471 __func__, ifp->if_flags);
1472
1473 ath_stop(ifp);
1474 /* NB: no point powering down chip as we're about to reboot */
1475}
1476
1477/*
1478 * Interrupt handler. Most of the actual processing is deferred.
1479 */
1480void
1481ath_intr(void *arg)
1482{
1483 struct ath_softc *sc = arg;
1484 struct ifnet *ifp = sc->sc_ifp;
1485 struct ath_hal *ah = sc->sc_ah;
1486 HAL_INT status = 0;
1487 uint32_t txqs;
1488
1489 /*
1490 * If we're inside a reset path, just print a warning and
1491 * clear the ISR. The reset routine will finish it for us.
1492 */
1493 ATH_PCU_LOCK(sc);
1494 if (sc->sc_inreset_cnt) {
1495 HAL_INT status;
1496 ath_hal_getisr(ah, &status); /* clear ISR */
1497 ath_hal_intrset(ah, 0); /* disable further intr's */
1498 DPRINTF(sc, ATH_DEBUG_ANY,
1499 "%s: in reset, ignoring: status=0x%x\n",
1500 __func__, status);
1501 ATH_PCU_UNLOCK(sc);
1502 return;
1503 }
1504
1505 if (sc->sc_invalid) {
1506 /*
1507 * The hardware is not ready/present, don't touch anything.
1508 * Note this can happen early on if the IRQ is shared.
1509 */
1510 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1511 ATH_PCU_UNLOCK(sc);
1512 return;
1513 }
1514 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
1515 ATH_PCU_UNLOCK(sc);
1516 return;
1517 }
1518
1519 if ((ifp->if_flags & IFF_UP) == 0 ||
1520 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1521 HAL_INT status;
1522
1523 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1524 __func__, ifp->if_flags);
1525 ath_hal_getisr(ah, &status); /* clear ISR */
1526 ath_hal_intrset(ah, 0); /* disable further intr's */
1527 ATH_PCU_UNLOCK(sc);
1528 return;
1529 }
1530
1531 /*
1532 * Figure out the reason(s) for the interrupt. Note
1533 * that the hal returns a pseudo-ISR that may include
1534 * bits we haven't explicitly enabled so we mask the
1535 * value to insure we only process bits we requested.
1536 */
1537 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1538 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1539 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status);
1540#ifdef ATH_KTR_INTR_DEBUG
1541 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5,
1542 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
1543 ah->ah_intrstate[0],
1544 ah->ah_intrstate[1],
1545 ah->ah_intrstate[2],
1546 ah->ah_intrstate[3],
1547 ah->ah_intrstate[6]);
1548#endif
1549
1550 /* Squirrel away SYNC interrupt debugging */
1551 if (ah->ah_syncstate != 0) {
1552 int i;
1553 for (i = 0; i < 32; i++)
1554 if (ah->ah_syncstate & (i << i))
1555 sc->sc_intr_stats.sync_intr[i]++;
1556 }
1557
1558 status &= sc->sc_imask; /* discard unasked for bits */
1559
1560 /* Short-circuit un-handled interrupts */
1561 if (status == 0x0) {
1562 ATH_PCU_UNLOCK(sc);
1563 return;
1564 }
1565
1566 /*
1567 * Take a note that we're inside the interrupt handler, so
1568 * the reset routines know to wait.
1569 */
1570 sc->sc_intr_cnt++;
1571 ATH_PCU_UNLOCK(sc);
1572
1573 /*
1574 * Handle the interrupt. We won't run concurrent with the reset
1575 * or channel change routines as they'll wait for sc_intr_cnt
1576 * to be 0 before continuing.
1577 */
1578 if (status & HAL_INT_FATAL) {
1579 sc->sc_stats.ast_hardware++;
1580 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1581 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
1582 } else {
1583 if (status & HAL_INT_SWBA) {
1584 /*
1585 * Software beacon alert--time to send a beacon.
1586 * Handle beacon transmission directly; deferring
1587 * this is too slow to meet timing constraints
1588 * under load.
1589 */
1590#ifdef IEEE80211_SUPPORT_TDMA
1591 if (sc->sc_tdma) {
1592 if (sc->sc_tdmaswba == 0) {
1593 struct ieee80211com *ic = ifp->if_l2com;
1594 struct ieee80211vap *vap =
1595 TAILQ_FIRST(&ic->ic_vaps);
1596 ath_tdma_beacon_send(sc, vap);
1597 sc->sc_tdmaswba =
1598 vap->iv_tdma->tdma_bintval;
1599 } else
1600 sc->sc_tdmaswba--;
1601 } else
1602#endif
1603 {
1604 ath_beacon_proc(sc, 0);
1605#ifdef IEEE80211_SUPPORT_SUPERG
1606 /*
1607 * Schedule the rx taskq in case there's no
1608 * traffic so any frames held on the staging
1609 * queue are aged and potentially flushed.
1610 */
1611 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1612#endif
1613 }
1614 }
1615 if (status & HAL_INT_RXEOL) {
1616 int imask;
1617 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL");
1618 ATH_PCU_LOCK(sc);
1619 /*
1620 * NB: the hardware should re-read the link when
1621 * RXE bit is written, but it doesn't work at
1622 * least on older hardware revs.
1623 */
1624 sc->sc_stats.ast_rxeol++;
1625 /*
1626 * Disable RXEOL/RXORN - prevent an interrupt
1627 * storm until the PCU logic can be reset.
1628 * In case the interface is reset some other
1629 * way before "sc_kickpcu" is called, don't
1630 * modify sc_imask - that way if it is reset
1631 * by a call to ath_reset() somehow, the
1632 * interrupt mask will be correctly reprogrammed.
1633 */
1634 imask = sc->sc_imask;
1635 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
1636 ath_hal_intrset(ah, imask);
1637 /*
1638 * Only blank sc_rxlink if we've not yet kicked
1639 * the PCU.
1640 *
1641 * This isn't entirely correct - the correct solution
1642 * would be to have a PCU lock and engage that for
1643 * the duration of the PCU fiddling; which would include
1644 * running the RX process. Otherwise we could end up
1645 * messing up the RX descriptor chain and making the
1646 * RX desc list much shorter.
1647 */
1648 if (! sc->sc_kickpcu)
1649 sc->sc_rxlink = NULL;
1650 sc->sc_kickpcu = 1;
1651 /*
1652 * Enqueue an RX proc, to handled whatever
1653 * is in the RX queue.
1654 * This will then kick the PCU.
1655 */
1656 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1657 ATH_PCU_UNLOCK(sc);
1658 }
1659 if (status & HAL_INT_TXURN) {
1660 sc->sc_stats.ast_txurn++;
1661 /* bump tx trigger level */
1662 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1663 }
1664 /*
1665 * Handle both the legacy and RX EDMA interrupt bits.
1666 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC.
1667 */
1668 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) {
1669 sc->sc_stats.ast_rx_intr++;
1670 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1671 }
1672 if (status & HAL_INT_TX) {
1673 sc->sc_stats.ast_tx_intr++;
1674 /*
1675 * Grab all the currently set bits in the HAL txq bitmap
1676 * and blank them. This is the only place we should be
1677 * doing this.
1678 */
1679 if (! sc->sc_isedma) {
1680 ATH_PCU_LOCK(sc);
1681 txqs = 0xffffffff;
1682 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
1683 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3,
1684 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x",
1685 txqs,
1686 sc->sc_txq_active,
1687 sc->sc_txq_active | txqs);
1688 sc->sc_txq_active |= txqs;
1689 ATH_PCU_UNLOCK(sc);
1690 }
1691 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1692 }
1693 if (status & HAL_INT_BMISS) {
1694 sc->sc_stats.ast_bmiss++;
1695 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1696 }
1697 if (status & HAL_INT_GTT)
1698 sc->sc_stats.ast_tx_timeout++;
1699 if (status & HAL_INT_CST)
1700 sc->sc_stats.ast_tx_cst++;
1701 if (status & HAL_INT_MIB) {
1702 sc->sc_stats.ast_mib++;
1703 ATH_PCU_LOCK(sc);
1704 /*
1705 * Disable interrupts until we service the MIB
1706 * interrupt; otherwise it will continue to fire.
1707 */
1708 ath_hal_intrset(ah, 0);
1709 /*
1710 * Let the hal handle the event. We assume it will
1711 * clear whatever condition caused the interrupt.
1712 */
1713 ath_hal_mibevent(ah, &sc->sc_halstats);
1714 /*
1715 * Don't reset the interrupt if we've just
1716 * kicked the PCU, or we may get a nested
1717 * RXEOL before the rxproc has had a chance
1718 * to run.
1719 */
1720 if (sc->sc_kickpcu == 0)
1721 ath_hal_intrset(ah, sc->sc_imask);
1722 ATH_PCU_UNLOCK(sc);
1723 }
1724 if (status & HAL_INT_RXORN) {
1725 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1726 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
1727 sc->sc_stats.ast_rxorn++;
1728 }
1729 }
1730 ATH_PCU_LOCK(sc);
1731 sc->sc_intr_cnt--;
1732 ATH_PCU_UNLOCK(sc);
1733}
1734
1735static void
1736ath_fatal_proc(void *arg, int pending)
1737{
1738 struct ath_softc *sc = arg;
1739 struct ifnet *ifp = sc->sc_ifp;
1740 u_int32_t *state;
1741 u_int32_t len;
1742 void *sp;
1743
1744 if_printf(ifp, "hardware error; resetting\n");
1745 /*
1746 * Fatal errors are unrecoverable. Typically these
1747 * are caused by DMA errors. Collect h/w state from
1748 * the hal so we can diagnose what's going on.
1749 */
1750 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1751 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1752 state = sp;
1753 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1754 state[0], state[1] , state[2], state[3],
1755 state[4], state[5]);
1756 }
1757 ath_reset(ifp, ATH_RESET_NOLOSS);
1758}
1759
1760static void
1761ath_bmiss_vap(struct ieee80211vap *vap)
1762{
1763 /*
1764 * Workaround phantom bmiss interrupts by sanity-checking
1765 * the time of our last rx'd frame. If it is within the
1766 * beacon miss interval then ignore the interrupt. If it's
1767 * truly a bmiss we'll get another interrupt soon and that'll
1768 * be dispatched up for processing. Note this applies only
1769 * for h/w beacon miss events.
1770 */
1771 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1772 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1773 struct ath_softc *sc = ifp->if_softc;
1774 u_int64_t lastrx = sc->sc_lastrx;
1775 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1776 /* XXX should take a locked ref to iv_bss */
1777 u_int bmisstimeout =
1778 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1779
1780 DPRINTF(sc, ATH_DEBUG_BEACON,
1781 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1782 __func__, (unsigned long long) tsf,
1783 (unsigned long long)(tsf - lastrx),
1784 (unsigned long long) lastrx, bmisstimeout);
1785
1786 if (tsf - lastrx <= bmisstimeout) {
1787 sc->sc_stats.ast_bmiss_phantom++;
1788 return;
1789 }
1790 }
1791 ATH_VAP(vap)->av_bmiss(vap);
1792}
1793
1794static int
1795ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1796{
1797 uint32_t rsize;
1798 void *sp;
1799
1800 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
1801 return 0;
1802 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1803 *hangs = *(uint32_t *)sp;
1804 return 1;
1805}
1806
1807static void
1808ath_bmiss_proc(void *arg, int pending)
1809{
1810 struct ath_softc *sc = arg;
1811 struct ifnet *ifp = sc->sc_ifp;
1812 uint32_t hangs;
1813
1814 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1815
1816 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1817 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
1818 ath_reset(ifp, ATH_RESET_NOLOSS);
1819 } else
1820 ieee80211_beacon_miss(ifp->if_l2com);
1821}
1822
1823/*
1824 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1825 * calcs together with WME. If necessary disable the crypto
1826 * hardware and mark the 802.11 state so keys will be setup
1827 * with the MIC work done in software.
1828 */
1829static void
1830ath_settkipmic(struct ath_softc *sc)
1831{
1832 struct ifnet *ifp = sc->sc_ifp;
1833 struct ieee80211com *ic = ifp->if_l2com;
1834
1835 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1836 if (ic->ic_flags & IEEE80211_F_WME) {
1837 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1838 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1839 } else {
1840 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1841 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1842 }
1843 }
1844}
1845
1846static void
1847ath_init(void *arg)
1848{
1849 struct ath_softc *sc = (struct ath_softc *) arg;
1850 struct ifnet *ifp = sc->sc_ifp;
1851 struct ieee80211com *ic = ifp->if_l2com;
1852 struct ath_hal *ah = sc->sc_ah;
1853 HAL_STATUS status;
1854
1855 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1856 __func__, ifp->if_flags);
1857
1858 ATH_LOCK(sc);
1859 /*
1860 * Stop anything previously setup. This is safe
1861 * whether this is the first time through or not.
1862 */
1863 ath_stop_locked(ifp);
1864
1865 /*
1866 * The basic interface to setting the hardware in a good
1867 * state is ``reset''. On return the hardware is known to
1868 * be powered up and with interrupts disabled. This must
1869 * be followed by initialization of the appropriate bits
1870 * and then setup of the interrupt mask.
1871 */
1872 ath_settkipmic(sc);
1873 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1874 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1875 status);
1876 ATH_UNLOCK(sc);
1877 return;
1878 }
1879 ath_chan_change(sc, ic->ic_curchan);
1880
1881 /* Let DFS at it in case it's a DFS channel */
1882 ath_dfs_radar_enable(sc, ic->ic_curchan);
1883
1884 /*
1885 * Likewise this is set during reset so update
1886 * state cached in the driver.
1887 */
1888 sc->sc_diversity = ath_hal_getdiversity(ah);
1889 sc->sc_lastlongcal = 0;
1890 sc->sc_resetcal = 1;
1891 sc->sc_lastcalreset = 0;
1892 sc->sc_lastani = 0;
1893 sc->sc_lastshortcal = 0;
1894 sc->sc_doresetcal = AH_FALSE;
1895 /*
1896 * Beacon timers were cleared here; give ath_newstate()
1897 * a hint that the beacon timers should be poked when
1898 * things transition to the RUN state.
1899 */
1900 sc->sc_beacons = 0;
1901
1902 /*
1903 * Setup the hardware after reset: the key cache
1904 * is filled as needed and the receive engine is
1905 * set going. Frame transmit is handled entirely
1906 * in the frame output path; there's nothing to do
1907 * here except setup the interrupt mask.
1908 */
1909 if (ath_startrecv(sc) != 0) {
1910 if_printf(ifp, "unable to start recv logic\n");
1911 ATH_UNLOCK(sc);
1912 return;
1913 }
1914
1915 /*
1916 * Enable interrupts.
1917 */
1918 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1919 | HAL_INT_RXEOL | HAL_INT_RXORN
1920 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1921
1922 /*
1923 * Enable RX EDMA bits. Note these overlap with
1924 * HAL_INT_RX and HAL_INT_RXDESC respectively.
1925 */
1926 if (sc->sc_isedma)
1927 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP);
1928
1929 /*
1930 * Enable MIB interrupts when there are hardware phy counters.
1931 * Note we only do this (at the moment) for station mode.
1932 */
1933 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1934 sc->sc_imask |= HAL_INT_MIB;
1935
1936 /* Enable global TX timeout and carrier sense timeout if available */
1937 if (ath_hal_gtxto_supported(ah))
1938 sc->sc_imask |= HAL_INT_GTT;
1939
1940 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
1941 __func__, sc->sc_imask);
1942
1943 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1944 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1945 ath_hal_intrset(ah, sc->sc_imask);
1946
1947 ATH_UNLOCK(sc);
1948
1949#ifdef ATH_TX99_DIAG
1950 if (sc->sc_tx99 != NULL)
1951 sc->sc_tx99->start(sc->sc_tx99);
1952 else
1953#endif
1954 ieee80211_start_all(ic); /* start all vap's */
1955}
1956
1957static void
1958ath_stop_locked(struct ifnet *ifp)
1959{
1960 struct ath_softc *sc = ifp->if_softc;
1961 struct ath_hal *ah = sc->sc_ah;
1962
1963 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1964 __func__, sc->sc_invalid, ifp->if_flags);
1965
1966 ATH_LOCK_ASSERT(sc);
1967 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1968 /*
1969 * Shutdown the hardware and driver:
1970 * reset 802.11 state machine
1971 * turn off timers
1972 * disable interrupts
1973 * turn off the radio
1974 * clear transmit machinery
1975 * clear receive machinery
1976 * drain and release tx queues
1977 * reclaim beacon resources
1978 * power down hardware
1979 *
1980 * Note that some of this work is not possible if the
1981 * hardware is gone (invalid).
1982 */
1983#ifdef ATH_TX99_DIAG
1984 if (sc->sc_tx99 != NULL)
1985 sc->sc_tx99->stop(sc->sc_tx99);
1986#endif
1987 callout_stop(&sc->sc_wd_ch);
1988 sc->sc_wd_timer = 0;
1989 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1990 if (!sc->sc_invalid) {
1991 if (sc->sc_softled) {
1992 callout_stop(&sc->sc_ledtimer);
1993 ath_hal_gpioset(ah, sc->sc_ledpin,
1994 !sc->sc_ledon);
1995 sc->sc_blinking = 0;
1996 }
1997 ath_hal_intrset(ah, 0);
1998 }
1999 ath_draintxq(sc, ATH_RESET_DEFAULT);
2000 if (!sc->sc_invalid) {
2001 ath_stoprecv(sc, 1);
2002 ath_hal_phydisable(ah);
2003 } else
2004 sc->sc_rxlink = NULL;
2005 ath_beacon_free(sc); /* XXX not needed */
2006 }
2007}
2008
2009#define MAX_TXRX_ITERATIONS 1000
2010static void
2011ath_txrx_stop_locked(struct ath_softc *sc)
2012{
2013 int i = MAX_TXRX_ITERATIONS;
2014
2015 ATH_UNLOCK_ASSERT(sc);
2016 ATH_PCU_LOCK_ASSERT(sc);
2017
2018 /*
2019 * Sleep until all the pending operations have completed.
2020 *
2021 * The caller must ensure that reset has been incremented
2022 * or the pending operations may continue being queued.
2023 */
2024 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
2025 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
2026 if (i <= 0)
2027 break;
2028 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1);
2029 i--;
2030 }
2031
2032 if (i <= 0)
2033 device_printf(sc->sc_dev,
2034 "%s: didn't finish after %d iterations\n",
2035 __func__, MAX_TXRX_ITERATIONS);
2036}
2037#undef MAX_TXRX_ITERATIONS
2038
2039#if 0
2040static void
2041ath_txrx_stop(struct ath_softc *sc)
2042{
2043 ATH_UNLOCK_ASSERT(sc);
2044 ATH_PCU_UNLOCK_ASSERT(sc);
2045
2046 ATH_PCU_LOCK(sc);
2047 ath_txrx_stop_locked(sc);
2048 ATH_PCU_UNLOCK(sc);
2049}
2050#endif
2051
2052static void
2053ath_txrx_start(struct ath_softc *sc)
2054{
2055
2056 taskqueue_unblock(sc->sc_tq);
2057}
2058
2059/*
2060 * Grab the reset lock, and wait around until noone else
2061 * is trying to do anything with it.
2062 *
2063 * This is totally horrible but we can't hold this lock for
2064 * long enough to do TX/RX or we end up with net80211/ip stack
2065 * LORs and eventual deadlock.
2066 *
2067 * "dowait" signals whether to spin, waiting for the reset
2068 * lock count to reach 0. This should (for now) only be used
2069 * during the reset path, as the rest of the code may not
2070 * be locking-reentrant enough to behave correctly.
2071 *
2072 * Another, cleaner way should be found to serialise all of
2073 * these operations.
2074 */
2075#define MAX_RESET_ITERATIONS 10
2076static int
2077ath_reset_grablock(struct ath_softc *sc, int dowait)
2078{
2079 int w = 0;
2080 int i = MAX_RESET_ITERATIONS;
2081
2082 ATH_PCU_LOCK_ASSERT(sc);
2083 do {
2084 if (sc->sc_inreset_cnt == 0) {
2085 w = 1;
2086 break;
2087 }
2088 if (dowait == 0) {
2089 w = 0;
2090 break;
2091 }
2092 ATH_PCU_UNLOCK(sc);
2093 pause("ath_reset_grablock", 1);
2094 i--;
2095 ATH_PCU_LOCK(sc);
2096 } while (i > 0);
2097
2098 /*
2099 * We always increment the refcounter, regardless
2100 * of whether we succeeded to get it in an exclusive
2101 * way.
2102 */
2103 sc->sc_inreset_cnt++;
2104
2105 if (i <= 0)
2106 device_printf(sc->sc_dev,
2107 "%s: didn't finish after %d iterations\n",
2108 __func__, MAX_RESET_ITERATIONS);
2109
2110 if (w == 0)
2111 device_printf(sc->sc_dev,
2112 "%s: warning, recursive reset path!\n",
2113 __func__);
2114
2115 return w;
2116}
2117#undef MAX_RESET_ITERATIONS
2118
2119/*
2120 * XXX TODO: write ath_reset_releaselock
2121 */
2122
2123static void
2124ath_stop(struct ifnet *ifp)
2125{
2126 struct ath_softc *sc = ifp->if_softc;
2127
2128 ATH_LOCK(sc);
2129 ath_stop_locked(ifp);
2130 ATH_UNLOCK(sc);
2131}
2132
2133/*
2134 * Reset the hardware w/o losing operational state. This is
2135 * basically a more efficient way of doing ath_stop, ath_init,
2136 * followed by state transitions to the current 802.11
2137 * operational state. Used to recover from various errors and
2138 * to reset or reload hardware state.
2139 */
2140int
2141ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
2142{
2143 struct ath_softc *sc = ifp->if_softc;
2144 struct ieee80211com *ic = ifp->if_l2com;
2145 struct ath_hal *ah = sc->sc_ah;
2146 HAL_STATUS status;
2147 int i;
2148
2149 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
2150
2151 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
2152 ATH_PCU_UNLOCK_ASSERT(sc);
2153 ATH_UNLOCK_ASSERT(sc);
2154
2155 /* Try to (stop any further TX/RX from occuring */
2156 taskqueue_block(sc->sc_tq);
2157
2158 ATH_PCU_LOCK(sc);
2159 ath_hal_intrset(ah, 0); /* disable interrupts */
2160 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */
2161 if (ath_reset_grablock(sc, 1) == 0) {
2162 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2163 __func__);
2164 }
2165 ATH_PCU_UNLOCK(sc);
2166
2167 /*
2168 * Should now wait for pending TX/RX to complete
2169 * and block future ones from occuring. This needs to be
2170 * done before the TX queue is drained.
2171 */
2172 ath_draintxq(sc, reset_type); /* stop xmit side */
2173
2174 /*
2175 * Regardless of whether we're doing a no-loss flush or
2176 * not, stop the PCU and handle what's in the RX queue.
2177 * That way frames aren't dropped which shouldn't be.
2178 */
2179 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2180 ath_rx_flush(sc);
2181
2182 ath_settkipmic(sc); /* configure TKIP MIC handling */
2183 /* NB: indicate channel change so we do a full reset */
2184 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
2185 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2186 __func__, status);
2187 sc->sc_diversity = ath_hal_getdiversity(ah);
2188
2189 /* Let DFS at it in case it's a DFS channel */
2190 ath_dfs_radar_enable(sc, ic->ic_curchan);
2191
2192 if (ath_startrecv(sc) != 0) /* restart recv */
2193 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
2194 /*
2195 * We may be doing a reset in response to an ioctl
2196 * that changes the channel so update any state that
2197 * might change as a result.
2198 */
2199 ath_chan_change(sc, ic->ic_curchan);
2200 if (sc->sc_beacons) { /* restart beacons */
2201#ifdef IEEE80211_SUPPORT_TDMA
2202 if (sc->sc_tdma)
2203 ath_tdma_config(sc, NULL);
2204 else
2205#endif
2206 ath_beacon_config(sc, NULL);
2207 }
2208
2209 /*
2210 * Release the reset lock and re-enable interrupts here.
2211 * If an interrupt was being processed in ath_intr(),
2212 * it would disable interrupts at this point. So we have
2213 * to atomically enable interrupts and decrement the
2214 * reset counter - this way ath_intr() doesn't end up
2215 * disabling interrupts without a corresponding enable
2216 * in the rest or channel change path.
2217 */
2218 ATH_PCU_LOCK(sc);
2219 sc->sc_inreset_cnt--;
2220 /* XXX only do this if sc_inreset_cnt == 0? */
2221 ath_hal_intrset(ah, sc->sc_imask);
2222 ATH_PCU_UNLOCK(sc);
2223
2224 /*
2225 * TX and RX can be started here. If it were started with
2226 * sc_inreset_cnt > 0, the TX and RX path would abort.
2227 * Thus if this is a nested call through the reset or
2228 * channel change code, TX completion will occur but
2229 * RX completion and ath_start / ath_tx_start will not
2230 * run.
2231 */
2232
2233 /* Restart TX/RX as needed */
2234 ath_txrx_start(sc);
2235
2236 /* XXX Restart TX completion and pending TX */
2237 if (reset_type == ATH_RESET_NOLOSS) {
2238 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2239 if (ATH_TXQ_SETUP(sc, i)) {
2240 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2241 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2242 ath_txq_sched(sc, &sc->sc_txq[i]);
2243 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2244 }
2245 }
2246 }
2247
2248 /*
2249 * This may have been set during an ath_start() call which
2250 * set this once it detected a concurrent TX was going on.
2251 * So, clear it.
2252 */
2253 IF_LOCK(&ifp->if_snd);
2254 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2255 IF_UNLOCK(&ifp->if_snd);
2256
2257 /* Handle any frames in the TX queue */
2258 /*
2259 * XXX should this be done by the caller, rather than
2260 * ath_reset() ?
2261 */
2262 ath_tx_kick(sc); /* restart xmit */
2263 return 0;
2264}
2265
2266static int
2267ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
2268{
2269 struct ieee80211com *ic = vap->iv_ic;
2270 struct ifnet *ifp = ic->ic_ifp;
2271 struct ath_softc *sc = ifp->if_softc;
2272 struct ath_hal *ah = sc->sc_ah;
2273
2274 switch (cmd) {
2275 case IEEE80211_IOC_TXPOWER:
2276 /*
2277 * If per-packet TPC is enabled, then we have nothing
2278 * to do; otherwise we need to force the global limit.
2279 * All this can happen directly; no need to reset.
2280 */
2281 if (!ath_hal_gettpc(ah))
2282 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
2283 return 0;
2284 }
2285 /* XXX? Full or NOLOSS? */
2286 return ath_reset(ifp, ATH_RESET_FULL);
2287}
2288
2289struct ath_buf *
2290_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
2291{
2292 struct ath_buf *bf;
2293
2294 ATH_TXBUF_LOCK_ASSERT(sc);
2295
2296 if (btype == ATH_BUFTYPE_MGMT)
2297 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
2298 else
2299 bf = TAILQ_FIRST(&sc->sc_txbuf);
2300
2301 if (bf == NULL) {
2302 sc->sc_stats.ast_tx_getnobuf++;
2303 } else {
2304 if (bf->bf_flags & ATH_BUF_BUSY) {
2305 sc->sc_stats.ast_tx_getbusybuf++;
2306 bf = NULL;
2307 }
2308 }
2309
2310 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
2311 if (btype == ATH_BUFTYPE_MGMT)
2312 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
2313 else {
2314 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
2315 sc->sc_txbuf_cnt--;
2316
2317 /*
2318 * This shuldn't happen; however just to be
2319 * safe print a warning and fudge the txbuf
2320 * count.
2321 */
2322 if (sc->sc_txbuf_cnt < 0) {
2323 device_printf(sc->sc_dev,
2324 "%s: sc_txbuf_cnt < 0?\n",
2325 __func__);
2326 sc->sc_txbuf_cnt = 0;
2327 }
2328 }
2329 } else
2330 bf = NULL;
2331
2332 if (bf == NULL) {
2333 /* XXX should check which list, mgmt or otherwise */
2334 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
2335 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
2336 "out of xmit buffers" : "xmit buffer busy");
2337 return NULL;
2338 }
2339
2340 /* XXX TODO: should do this at buffer list initialisation */
2341 /* XXX (then, ensure the buffer has the right flag set) */
2342 if (btype == ATH_BUFTYPE_MGMT)
2343 bf->bf_flags |= ATH_BUF_MGMT;
2344 else
2345 bf->bf_flags &= (~ATH_BUF_MGMT);
2346
2347 /* Valid bf here; clear some basic fields */
2348 bf->bf_next = NULL; /* XXX just to be sure */
2349 bf->bf_last = NULL; /* XXX again, just to be sure */
2350 bf->bf_comp = NULL; /* XXX again, just to be sure */
2351 bzero(&bf->bf_state, sizeof(bf->bf_state));
2352
2353 /*
2354 * Track the descriptor ID only if doing EDMA
2355 */
2356 if (sc->sc_isedma) {
2357 bf->bf_descid = sc->sc_txbuf_descid;
2358 sc->sc_txbuf_descid++;
2359 }
2360
2361 return bf;
2362}
2363
2364/*
2365 * When retrying a software frame, buffers marked ATH_BUF_BUSY
2366 * can't be thrown back on the queue as they could still be
2367 * in use by the hardware.
2368 *
2369 * This duplicates the buffer, or returns NULL.
2370 *
2371 * The descriptor is also copied but the link pointers and
2372 * the DMA segments aren't copied; this frame should thus
2373 * be again passed through the descriptor setup/chain routines
2374 * so the link is correct.
2375 *
2376 * The caller must free the buffer using ath_freebuf().
2377 *
2378 * XXX TODO: this call shouldn't fail as it'll cause packet loss
2379 * XXX in the TX pathway when retries are needed.
2380 * XXX Figure out how to keep some buffers free, or factor the
2381 * XXX number of busy buffers into the xmit path (ath_start())
2382 * XXX so we don't over-commit.
2383 */
2384struct ath_buf *
2385ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
2386{
2387 struct ath_buf *tbf;
2388
2389 tbf = ath_getbuf(sc,
2390 (bf->bf_flags & ATH_BUF_MGMT) ?
2391 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
2392 if (tbf == NULL)
2393 return NULL; /* XXX failure? Why? */
2394
2395 /* Copy basics */
2396 tbf->bf_next = NULL;
2397 tbf->bf_nseg = bf->bf_nseg;
2398 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY;
2399 tbf->bf_status = bf->bf_status;
2400 tbf->bf_m = bf->bf_m;
2401 tbf->bf_node = bf->bf_node;
2402 /* will be setup by the chain/setup function */
2403 tbf->bf_lastds = NULL;
2404 /* for now, last == self */
2405 tbf->bf_last = tbf;
2406 tbf->bf_comp = bf->bf_comp;
2407
2408 /* NOTE: DMA segments will be setup by the setup/chain functions */
2409
2410 /* The caller has to re-init the descriptor + links */
2411
2412 /* Copy state */
2413 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
2414
2415 return tbf;
2416}
2417
2418struct ath_buf *
2419ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
2420{
2421 struct ath_buf *bf;
2422
2423 ATH_TXBUF_LOCK(sc);
2424 bf = _ath_getbuf_locked(sc, btype);
2425 /*
2426 * If a mgmt buffer was requested but we're out of those,
2427 * try requesting a normal one.
2428 */
2429 if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
2430 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
2431 ATH_TXBUF_UNLOCK(sc);
2432 if (bf == NULL) {
2433 struct ifnet *ifp = sc->sc_ifp;
2434
2435 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2436 sc->sc_stats.ast_tx_qstop++;
2437 IF_LOCK(&ifp->if_snd);
2438 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2439 IF_UNLOCK(&ifp->if_snd);
2440 }
2441 return bf;
2442}
2443
2444static void
2445ath_start_queue(struct ifnet *ifp)
2446{
2447 struct ath_softc *sc = ifp->if_softc;
2448
2449 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: start");
2450 ath_tx_kick(sc);
2450 ath_tx_kick(sc);
2451 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: finished");
2451}
2452
2453void
2454ath_start_task(void *arg, int npending)
2455{
2456 struct ath_softc *sc = (struct ath_softc *) arg;
2457 struct ifnet *ifp = sc->sc_ifp;
2458
2452}
2453
2454void
2455ath_start_task(void *arg, int npending)
2456{
2457 struct ath_softc *sc = (struct ath_softc *) arg;
2458 struct ifnet *ifp = sc->sc_ifp;
2459
2460 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: start");
2461
2459 /* XXX is it ok to hold the ATH_LOCK here? */
2460 ATH_PCU_LOCK(sc);
2461 if (sc->sc_inreset_cnt > 0) {
2462 device_printf(sc->sc_dev,
2463 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2464 ATH_PCU_UNLOCK(sc);
2465 IF_LOCK(&ifp->if_snd);
2466 sc->sc_stats.ast_tx_qstop++;
2467 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2468 IF_UNLOCK(&ifp->if_snd);
2462 /* XXX is it ok to hold the ATH_LOCK here? */
2463 ATH_PCU_LOCK(sc);
2464 if (sc->sc_inreset_cnt > 0) {
2465 device_printf(sc->sc_dev,
2466 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2467 ATH_PCU_UNLOCK(sc);
2468 IF_LOCK(&ifp->if_snd);
2469 sc->sc_stats.ast_tx_qstop++;
2470 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2471 IF_UNLOCK(&ifp->if_snd);
2472 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish");
2469 return;
2470 }
2471 sc->sc_txstart_cnt++;
2472 ATH_PCU_UNLOCK(sc);
2473
2474 ath_start(sc->sc_ifp);
2475
2476 ATH_PCU_LOCK(sc);
2477 sc->sc_txstart_cnt--;
2478 ATH_PCU_UNLOCK(sc);
2473 return;
2474 }
2475 sc->sc_txstart_cnt++;
2476 ATH_PCU_UNLOCK(sc);
2477
2478 ath_start(sc->sc_ifp);
2479
2480 ATH_PCU_LOCK(sc);
2481 sc->sc_txstart_cnt--;
2482 ATH_PCU_UNLOCK(sc);
2483 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: finished");
2479}
2480
2481void
2482ath_start(struct ifnet *ifp)
2483{
2484 struct ath_softc *sc = ifp->if_softc;
2485 struct ieee80211_node *ni;
2486 struct ath_buf *bf;
2487 struct mbuf *m, *next;
2488 ath_bufhead frags;
2484}
2485
2486void
2487ath_start(struct ifnet *ifp)
2488{
2489 struct ath_softc *sc = ifp->if_softc;
2490 struct ieee80211_node *ni;
2491 struct ath_buf *bf;
2492 struct mbuf *m, *next;
2493 ath_bufhead frags;
2494 int npkts = 0;
2489
2490 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2491 return;
2492
2495
2496 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2497 return;
2498
2499 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start: called");
2500
2493 for (;;) {
2494 ATH_TXBUF_LOCK(sc);
2495 if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) {
2496 /* XXX increment counter? */
2497 ATH_TXBUF_UNLOCK(sc);
2498 IF_LOCK(&ifp->if_snd);
2499 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2500 IF_UNLOCK(&ifp->if_snd);
2501 break;
2502 }
2503 ATH_TXBUF_UNLOCK(sc);
2504
2505 /*
2506 * Grab a TX buffer and associated resources.
2507 */
2508 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
2509 if (bf == NULL)
2510 break;
2511
2512 IFQ_DEQUEUE(&ifp->if_snd, m);
2513 if (m == NULL) {
2514 ATH_TXBUF_LOCK(sc);
2515 ath_returnbuf_head(sc, bf);
2516 ATH_TXBUF_UNLOCK(sc);
2517 break;
2518 }
2519 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2501 for (;;) {
2502 ATH_TXBUF_LOCK(sc);
2503 if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) {
2504 /* XXX increment counter? */
2505 ATH_TXBUF_UNLOCK(sc);
2506 IF_LOCK(&ifp->if_snd);
2507 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2508 IF_UNLOCK(&ifp->if_snd);
2509 break;
2510 }
2511 ATH_TXBUF_UNLOCK(sc);
2512
2513 /*
2514 * Grab a TX buffer and associated resources.
2515 */
2516 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
2517 if (bf == NULL)
2518 break;
2519
2520 IFQ_DEQUEUE(&ifp->if_snd, m);
2521 if (m == NULL) {
2522 ATH_TXBUF_LOCK(sc);
2523 ath_returnbuf_head(sc, bf);
2524 ATH_TXBUF_UNLOCK(sc);
2525 break;
2526 }
2527 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2528 npkts ++;
2520 /*
2521 * Check for fragmentation. If this frame
2522 * has been broken up verify we have enough
2523 * buffers to send all the fragments so all
2524 * go out or none...
2525 */
2526 TAILQ_INIT(&frags);
2527 if ((m->m_flags & M_FRAG) &&
2528 !ath_txfrag_setup(sc, &frags, m, ni)) {
2529 DPRINTF(sc, ATH_DEBUG_XMIT,
2530 "%s: out of txfrag buffers\n", __func__);
2531 sc->sc_stats.ast_tx_nofrag++;
2532 ifp->if_oerrors++;
2533 ath_freetx(m);
2534 goto bad;
2535 }
2536 ifp->if_opackets++;
2537 nextfrag:
2538 /*
2539 * Pass the frame to the h/w for transmission.
2540 * Fragmented frames have each frag chained together
2541 * with m_nextpkt. We know there are sufficient ath_buf's
2542 * to send all the frags because of work done by
2543 * ath_txfrag_setup. We leave m_nextpkt set while
2544 * calling ath_tx_start so it can use it to extend the
2545 * the tx duration to cover the subsequent frag and
2546 * so it can reclaim all the mbufs in case of an error;
2547 * ath_tx_start clears m_nextpkt once it commits to
2548 * handing the frame to the hardware.
2549 */
2550 next = m->m_nextpkt;
2551 if (ath_tx_start(sc, ni, bf, m)) {
2552 bad:
2553 ifp->if_oerrors++;
2554 reclaim:
2555 bf->bf_m = NULL;
2556 bf->bf_node = NULL;
2557 ATH_TXBUF_LOCK(sc);
2558 ath_returnbuf_head(sc, bf);
2559 ath_txfrag_cleanup(sc, &frags, ni);
2560 ATH_TXBUF_UNLOCK(sc);
2561 if (ni != NULL)
2562 ieee80211_free_node(ni);
2563 continue;
2564 }
2565
2566 /*
2567 * Check here if the node is in power save state.
2568 */
2569 ath_tx_update_tim(sc, ni, 1);
2570
2571 if (next != NULL) {
2572 /*
2573 * Beware of state changing between frags.
2574 * XXX check sta power-save state?
2575 */
2576 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2577 DPRINTF(sc, ATH_DEBUG_XMIT,
2578 "%s: flush fragmented packet, state %s\n",
2579 __func__,
2580 ieee80211_state_name[ni->ni_vap->iv_state]);
2581 ath_freetx(next);
2582 goto reclaim;
2583 }
2584 m = next;
2585 bf = TAILQ_FIRST(&frags);
2586 KASSERT(bf != NULL, ("no buf for txfrag"));
2587 TAILQ_REMOVE(&frags, bf, bf_list);
2588 goto nextfrag;
2589 }
2590
2591 sc->sc_wd_timer = 5;
2592 }
2529 /*
2530 * Check for fragmentation. If this frame
2531 * has been broken up verify we have enough
2532 * buffers to send all the fragments so all
2533 * go out or none...
2534 */
2535 TAILQ_INIT(&frags);
2536 if ((m->m_flags & M_FRAG) &&
2537 !ath_txfrag_setup(sc, &frags, m, ni)) {
2538 DPRINTF(sc, ATH_DEBUG_XMIT,
2539 "%s: out of txfrag buffers\n", __func__);
2540 sc->sc_stats.ast_tx_nofrag++;
2541 ifp->if_oerrors++;
2542 ath_freetx(m);
2543 goto bad;
2544 }
2545 ifp->if_opackets++;
2546 nextfrag:
2547 /*
2548 * Pass the frame to the h/w for transmission.
2549 * Fragmented frames have each frag chained together
2550 * with m_nextpkt. We know there are sufficient ath_buf's
2551 * to send all the frags because of work done by
2552 * ath_txfrag_setup. We leave m_nextpkt set while
2553 * calling ath_tx_start so it can use it to extend the
2554 * the tx duration to cover the subsequent frag and
2555 * so it can reclaim all the mbufs in case of an error;
2556 * ath_tx_start clears m_nextpkt once it commits to
2557 * handing the frame to the hardware.
2558 */
2559 next = m->m_nextpkt;
2560 if (ath_tx_start(sc, ni, bf, m)) {
2561 bad:
2562 ifp->if_oerrors++;
2563 reclaim:
2564 bf->bf_m = NULL;
2565 bf->bf_node = NULL;
2566 ATH_TXBUF_LOCK(sc);
2567 ath_returnbuf_head(sc, bf);
2568 ath_txfrag_cleanup(sc, &frags, ni);
2569 ATH_TXBUF_UNLOCK(sc);
2570 if (ni != NULL)
2571 ieee80211_free_node(ni);
2572 continue;
2573 }
2574
2575 /*
2576 * Check here if the node is in power save state.
2577 */
2578 ath_tx_update_tim(sc, ni, 1);
2579
2580 if (next != NULL) {
2581 /*
2582 * Beware of state changing between frags.
2583 * XXX check sta power-save state?
2584 */
2585 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2586 DPRINTF(sc, ATH_DEBUG_XMIT,
2587 "%s: flush fragmented packet, state %s\n",
2588 __func__,
2589 ieee80211_state_name[ni->ni_vap->iv_state]);
2590 ath_freetx(next);
2591 goto reclaim;
2592 }
2593 m = next;
2594 bf = TAILQ_FIRST(&frags);
2595 KASSERT(bf != NULL, ("no buf for txfrag"));
2596 TAILQ_REMOVE(&frags, bf, bf_list);
2597 goto nextfrag;
2598 }
2599
2600 sc->sc_wd_timer = 5;
2601 }
2602 ATH_KTR(sc, ATH_KTR_TX, 1, "ath_start: finished; npkts=%d", npkts);
2593}
2594
2595static int
2596ath_media_change(struct ifnet *ifp)
2597{
2598 int error = ieee80211_media_change(ifp);
2599 /* NB: only the fixed rate can change and that doesn't need a reset */
2600 return (error == ENETRESET ? 0 : error);
2601}
2602
2603/*
2604 * Block/unblock tx+rx processing while a key change is done.
2605 * We assume the caller serializes key management operations
2606 * so we only need to worry about synchronization with other
2607 * uses that originate in the driver.
2608 */
2609static void
2610ath_key_update_begin(struct ieee80211vap *vap)
2611{
2612 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2613 struct ath_softc *sc = ifp->if_softc;
2614
2615 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2616 taskqueue_block(sc->sc_tq);
2617 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2618}
2619
2620static void
2621ath_key_update_end(struct ieee80211vap *vap)
2622{
2623 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2624 struct ath_softc *sc = ifp->if_softc;
2625
2626 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2627 IF_UNLOCK(&ifp->if_snd);
2628 taskqueue_unblock(sc->sc_tq);
2629}
2630
2631static void
2632ath_update_promisc(struct ifnet *ifp)
2633{
2634 struct ath_softc *sc = ifp->if_softc;
2635 u_int32_t rfilt;
2636
2637 /* configure rx filter */
2638 rfilt = ath_calcrxfilter(sc);
2639 ath_hal_setrxfilter(sc->sc_ah, rfilt);
2640
2641 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2642}
2643
2644static void
2645ath_update_mcast(struct ifnet *ifp)
2646{
2647 struct ath_softc *sc = ifp->if_softc;
2648 u_int32_t mfilt[2];
2649
2650 /* calculate and install multicast filter */
2651 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2652 struct ifmultiaddr *ifma;
2653 /*
2654 * Merge multicast addresses to form the hardware filter.
2655 */
2656 mfilt[0] = mfilt[1] = 0;
2657 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
2658 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2659 caddr_t dl;
2660 u_int32_t val;
2661 u_int8_t pos;
2662
2663 /* calculate XOR of eight 6bit values */
2664 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2665 val = LE_READ_4(dl + 0);
2666 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2667 val = LE_READ_4(dl + 3);
2668 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2669 pos &= 0x3f;
2670 mfilt[pos / 32] |= (1 << (pos % 32));
2671 }
2672 if_maddr_runlock(ifp);
2673 } else
2674 mfilt[0] = mfilt[1] = ~0;
2675 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2676 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2677 __func__, mfilt[0], mfilt[1]);
2678}
2679
2680void
2681ath_mode_init(struct ath_softc *sc)
2682{
2683 struct ifnet *ifp = sc->sc_ifp;
2684 struct ath_hal *ah = sc->sc_ah;
2685 u_int32_t rfilt;
2686
2687 /* configure rx filter */
2688 rfilt = ath_calcrxfilter(sc);
2689 ath_hal_setrxfilter(ah, rfilt);
2690
2691 /* configure operational mode */
2692 ath_hal_setopmode(ah);
2693
2694 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE,
2695 "%s: ah=%p, ifp=%p, if_addr=%p\n",
2696 __func__,
2697 ah,
2698 ifp,
2699 (ifp == NULL) ? NULL : ifp->if_addr);
2700
2701 /* handle any link-level address change */
2702 ath_hal_setmac(ah, IF_LLADDR(ifp));
2703
2704 /* calculate and install multicast filter */
2705 ath_update_mcast(ifp);
2706}
2707
2708/*
2709 * Set the slot time based on the current setting.
2710 */
2711void
2712ath_setslottime(struct ath_softc *sc)
2713{
2714 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2715 struct ath_hal *ah = sc->sc_ah;
2716 u_int usec;
2717
2718 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2719 usec = 13;
2720 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2721 usec = 21;
2722 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2723 /* honor short/long slot time only in 11g */
2724 /* XXX shouldn't honor on pure g or turbo g channel */
2725 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2726 usec = HAL_SLOT_TIME_9;
2727 else
2728 usec = HAL_SLOT_TIME_20;
2729 } else
2730 usec = HAL_SLOT_TIME_9;
2731
2732 DPRINTF(sc, ATH_DEBUG_RESET,
2733 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2734 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2735 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2736
2737 ath_hal_setslottime(ah, usec);
2738 sc->sc_updateslot = OK;
2739}
2740
2741/*
2742 * Callback from the 802.11 layer to update the
2743 * slot time based on the current setting.
2744 */
2745static void
2746ath_updateslot(struct ifnet *ifp)
2747{
2748 struct ath_softc *sc = ifp->if_softc;
2749 struct ieee80211com *ic = ifp->if_l2com;
2750
2751 /*
2752 * When not coordinating the BSS, change the hardware
2753 * immediately. For other operation we defer the change
2754 * until beacon updates have propagated to the stations.
2755 */
2756 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2757 ic->ic_opmode == IEEE80211_M_MBSS)
2758 sc->sc_updateslot = UPDATE;
2759 else
2760 ath_setslottime(sc);
2761}
2762
2763/*
2764 * Append the contents of src to dst; both queues
2765 * are assumed to be locked.
2766 */
2767void
2768ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2769{
2770
2771 ATH_TXQ_LOCK_ASSERT(dst);
2772 ATH_TXQ_LOCK_ASSERT(src);
2773
2774 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
2775 dst->axq_link = src->axq_link;
2776 src->axq_link = NULL;
2777 dst->axq_depth += src->axq_depth;
2778 dst->axq_aggr_depth += src->axq_aggr_depth;
2779 src->axq_depth = 0;
2780 src->axq_aggr_depth = 0;
2781}
2782
2783/*
2784 * Reset the hardware, with no loss.
2785 *
2786 * This can't be used for a general case reset.
2787 */
2788static void
2789ath_reset_proc(void *arg, int pending)
2790{
2791 struct ath_softc *sc = arg;
2792 struct ifnet *ifp = sc->sc_ifp;
2793
2794#if 0
2795 if_printf(ifp, "%s: resetting\n", __func__);
2796#endif
2797 ath_reset(ifp, ATH_RESET_NOLOSS);
2798}
2799
2800/*
2801 * Reset the hardware after detecting beacons have stopped.
2802 */
2803static void
2804ath_bstuck_proc(void *arg, int pending)
2805{
2806 struct ath_softc *sc = arg;
2807 struct ifnet *ifp = sc->sc_ifp;
2808 uint32_t hangs = 0;
2809
2810 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
2811 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
2812
2813 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
2814 sc->sc_bmisscount);
2815 sc->sc_stats.ast_bstuck++;
2816 /*
2817 * This assumes that there's no simultaneous channel mode change
2818 * occuring.
2819 */
2820 ath_reset(ifp, ATH_RESET_NOLOSS);
2821}
2822
2823static void
2824ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2825{
2826 bus_addr_t *paddr = (bus_addr_t*) arg;
2827 KASSERT(error == 0, ("error %u on bus_dma callback", error));
2828 *paddr = segs->ds_addr;
2829}
2830
2831/*
2832 * Allocate the descriptors and appropriate DMA tag/setup.
2833 *
2834 * For some situations (eg EDMA TX completion), there isn't a requirement
2835 * for the ath_buf entries to be allocated.
2836 */
2837int
2838ath_descdma_alloc_desc(struct ath_softc *sc,
2839 struct ath_descdma *dd, ath_bufhead *head,
2840 const char *name, int ds_size, int ndesc)
2841{
2842#define DS2PHYS(_dd, _ds) \
2843 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2844#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
2845 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
2846 struct ifnet *ifp = sc->sc_ifp;
2847 int error;
2848
2849 dd->dd_descsize = ds_size;
2850
2851 DPRINTF(sc, ATH_DEBUG_RESET,
2852 "%s: %s DMA: %u desc, %d bytes per descriptor\n",
2853 __func__, name, ndesc, dd->dd_descsize);
2854
2855 dd->dd_name = name;
2856 dd->dd_desc_len = dd->dd_descsize * ndesc;
2857
2858 /*
2859 * Merlin work-around:
2860 * Descriptors that cross the 4KB boundary can't be used.
2861 * Assume one skipped descriptor per 4KB page.
2862 */
2863 if (! ath_hal_split4ktrans(sc->sc_ah)) {
2864 int numpages = dd->dd_desc_len / 4096;
2865 dd->dd_desc_len += ds_size * numpages;
2866 }
2867
2868 /*
2869 * Setup DMA descriptor area.
2870 */
2871 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
2872 PAGE_SIZE, 0, /* alignment, bounds */
2873 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2874 BUS_SPACE_MAXADDR, /* highaddr */
2875 NULL, NULL, /* filter, filterarg */
2876 dd->dd_desc_len, /* maxsize */
2877 1, /* nsegments */
2878 dd->dd_desc_len, /* maxsegsize */
2879 BUS_DMA_ALLOCNOW, /* flags */
2880 NULL, /* lockfunc */
2881 NULL, /* lockarg */
2882 &dd->dd_dmat);
2883 if (error != 0) {
2884 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2885 return error;
2886 }
2887
2888 /* allocate descriptors */
2889 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2890 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2891 &dd->dd_dmamap);
2892 if (error != 0) {
2893 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2894 "error %u\n", ndesc, dd->dd_name, error);
2895 goto fail1;
2896 }
2897
2898 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2899 dd->dd_desc, dd->dd_desc_len,
2900 ath_load_cb, &dd->dd_desc_paddr,
2901 BUS_DMA_NOWAIT);
2902 if (error != 0) {
2903 if_printf(ifp, "unable to map %s descriptors, error %u\n",
2904 dd->dd_name, error);
2905 goto fail2;
2906 }
2907
2908 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2909 __func__, dd->dd_name, (uint8_t *) dd->dd_desc,
2910 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr,
2911 /*XXX*/ (u_long) dd->dd_desc_len);
2912
2913 return (0);
2914
2915fail2:
2916 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2917fail1:
2918 bus_dma_tag_destroy(dd->dd_dmat);
2919 memset(dd, 0, sizeof(*dd));
2920 return error;
2921#undef DS2PHYS
2922#undef ATH_DESC_4KB_BOUND_CHECK
2923}
2924
2925int
2926ath_descdma_setup(struct ath_softc *sc,
2927 struct ath_descdma *dd, ath_bufhead *head,
2928 const char *name, int ds_size, int nbuf, int ndesc)
2929{
2930#define DS2PHYS(_dd, _ds) \
2931 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2932#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
2933 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
2934 struct ifnet *ifp = sc->sc_ifp;
2935 uint8_t *ds;
2936 struct ath_buf *bf;
2937 int i, bsize, error;
2938
2939 /* Allocate descriptors */
2940 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size,
2941 nbuf * ndesc);
2942
2943 /* Assume any errors during allocation were dealt with */
2944 if (error != 0) {
2945 return (error);
2946 }
2947
2948 ds = (uint8_t *) dd->dd_desc;
2949
2950 /* allocate rx buffers */
2951 bsize = sizeof(struct ath_buf) * nbuf;
2952 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
2953 if (bf == NULL) {
2954 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
2955 dd->dd_name, bsize);
2956 goto fail3;
2957 }
2958 dd->dd_bufptr = bf;
2959
2960 TAILQ_INIT(head);
2961 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) {
2962 bf->bf_desc = (struct ath_desc *) ds;
2963 bf->bf_daddr = DS2PHYS(dd, ds);
2964 if (! ath_hal_split4ktrans(sc->sc_ah)) {
2965 /*
2966 * Merlin WAR: Skip descriptor addresses which
2967 * cause 4KB boundary crossing along any point
2968 * in the descriptor.
2969 */
2970 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
2971 dd->dd_descsize)) {
2972 /* Start at the next page */
2973 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
2974 bf->bf_desc = (struct ath_desc *) ds;
2975 bf->bf_daddr = DS2PHYS(dd, ds);
2976 }
2977 }
2978 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2979 &bf->bf_dmamap);
2980 if (error != 0) {
2981 if_printf(ifp, "unable to create dmamap for %s "
2982 "buffer %u, error %u\n", dd->dd_name, i, error);
2983 ath_descdma_cleanup(sc, dd, head);
2984 return error;
2985 }
2986 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
2987 TAILQ_INSERT_TAIL(head, bf, bf_list);
2988 }
2989
2990 /*
2991 * XXX TODO: ensure that ds doesn't overflow the descriptor
2992 * allocation otherwise weird stuff will occur and crash your
2993 * machine.
2994 */
2995 return 0;
2996 /* XXX this should likely just call ath_descdma_cleanup() */
2997fail3:
2998 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2999 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3000 bus_dma_tag_destroy(dd->dd_dmat);
3001 memset(dd, 0, sizeof(*dd));
3002 return error;
3003#undef DS2PHYS
3004#undef ATH_DESC_4KB_BOUND_CHECK
3005}
3006
3007/*
3008 * Allocate ath_buf entries but no descriptor contents.
3009 *
3010 * This is for RX EDMA where the descriptors are the header part of
3011 * the RX buffer.
3012 */
3013int
3014ath_descdma_setup_rx_edma(struct ath_softc *sc,
3015 struct ath_descdma *dd, ath_bufhead *head,
3016 const char *name, int nbuf, int rx_status_len)
3017{
3018 struct ifnet *ifp = sc->sc_ifp;
3019 struct ath_buf *bf;
3020 int i, bsize, error;
3021
3022 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n",
3023 __func__, name, nbuf);
3024
3025 dd->dd_name = name;
3026 /*
3027 * This is (mostly) purely for show. We're not allocating any actual
3028 * descriptors here as EDMA RX has the descriptor be part
3029 * of the RX buffer.
3030 *
3031 * However, dd_desc_len is used by ath_descdma_free() to determine
3032 * whether we have already freed this DMA mapping.
3033 */
3034 dd->dd_desc_len = rx_status_len * nbuf;
3035 dd->dd_descsize = rx_status_len;
3036
3037 /* allocate rx buffers */
3038 bsize = sizeof(struct ath_buf) * nbuf;
3039 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3040 if (bf == NULL) {
3041 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3042 dd->dd_name, bsize);
3043 error = ENOMEM;
3044 goto fail3;
3045 }
3046 dd->dd_bufptr = bf;
3047
3048 TAILQ_INIT(head);
3049 for (i = 0; i < nbuf; i++, bf++) {
3050 bf->bf_desc = NULL;
3051 bf->bf_daddr = 0;
3052 bf->bf_lastds = NULL; /* Just an initial value */
3053
3054 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3055 &bf->bf_dmamap);
3056 if (error != 0) {
3057 if_printf(ifp, "unable to create dmamap for %s "
3058 "buffer %u, error %u\n", dd->dd_name, i, error);
3059 ath_descdma_cleanup(sc, dd, head);
3060 return error;
3061 }
3062 TAILQ_INSERT_TAIL(head, bf, bf_list);
3063 }
3064 return 0;
3065fail3:
3066 memset(dd, 0, sizeof(*dd));
3067 return error;
3068}
3069
3070void
3071ath_descdma_cleanup(struct ath_softc *sc,
3072 struct ath_descdma *dd, ath_bufhead *head)
3073{
3074 struct ath_buf *bf;
3075 struct ieee80211_node *ni;
3076
3077 if (dd->dd_dmamap != 0) {
3078 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3079 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3080 bus_dma_tag_destroy(dd->dd_dmat);
3081 }
3082
3083 if (head != NULL) {
3084 TAILQ_FOREACH(bf, head, bf_list) {
3085 if (bf->bf_m) {
3086 m_freem(bf->bf_m);
3087 bf->bf_m = NULL;
3088 }
3089 if (bf->bf_dmamap != NULL) {
3090 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3091 bf->bf_dmamap = NULL;
3092 }
3093 ni = bf->bf_node;
3094 bf->bf_node = NULL;
3095 if (ni != NULL) {
3096 /*
3097 * Reclaim node reference.
3098 */
3099 ieee80211_free_node(ni);
3100 }
3101 }
3102 }
3103
3104 if (head != NULL)
3105 TAILQ_INIT(head);
3106
3107 if (dd->dd_bufptr != NULL)
3108 free(dd->dd_bufptr, M_ATHDEV);
3109 memset(dd, 0, sizeof(*dd));
3110}
3111
3112static int
3113ath_desc_alloc(struct ath_softc *sc)
3114{
3115 int error;
3116
3117 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3118 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_TXDESC);
3119 if (error != 0) {
3120 return error;
3121 }
3122 sc->sc_txbuf_cnt = ath_txbuf;
3123
3124 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
3125 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt,
3126 ATH_TXDESC);
3127 if (error != 0) {
3128 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3129 return error;
3130 }
3131
3132 /*
3133 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
3134 * flag doesn't have to be set in ath_getbuf_locked().
3135 */
3136
3137 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3138 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1);
3139 if (error != 0) {
3140 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3141 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3142 &sc->sc_txbuf_mgmt);
3143 return error;
3144 }
3145 return 0;
3146}
3147
3148static void
3149ath_desc_free(struct ath_softc *sc)
3150{
3151
3152 if (sc->sc_bdma.dd_desc_len != 0)
3153 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3154 if (sc->sc_txdma.dd_desc_len != 0)
3155 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3156 if (sc->sc_txdma_mgmt.dd_desc_len != 0)
3157 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3158 &sc->sc_txbuf_mgmt);
3159}
3160
3161static struct ieee80211_node *
3162ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3163{
3164 struct ieee80211com *ic = vap->iv_ic;
3165 struct ath_softc *sc = ic->ic_ifp->if_softc;
3166 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3167 struct ath_node *an;
3168
3169 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3170 if (an == NULL) {
3171 /* XXX stat+msg */
3172 return NULL;
3173 }
3174 ath_rate_node_init(sc, an);
3175
3176 /* Setup the mutex - there's no associd yet so set the name to NULL */
3177 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
3178 device_get_nameunit(sc->sc_dev), an);
3179 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
3180
3181 /* XXX setup ath_tid */
3182 ath_tx_tid_init(sc, an);
3183
3184 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3185 return &an->an_node;
3186}
3187
3188static void
3189ath_node_cleanup(struct ieee80211_node *ni)
3190{
3191 struct ieee80211com *ic = ni->ni_ic;
3192 struct ath_softc *sc = ic->ic_ifp->if_softc;
3193
3194 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
3195 ath_tx_node_flush(sc, ATH_NODE(ni));
3196 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3197 sc->sc_node_cleanup(ni);
3198}
3199
3200static void
3201ath_node_free(struct ieee80211_node *ni)
3202{
3203 struct ieee80211com *ic = ni->ni_ic;
3204 struct ath_softc *sc = ic->ic_ifp->if_softc;
3205
3206 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3207 mtx_destroy(&ATH_NODE(ni)->an_mtx);
3208 sc->sc_node_free(ni);
3209}
3210
3211static void
3212ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3213{
3214 struct ieee80211com *ic = ni->ni_ic;
3215 struct ath_softc *sc = ic->ic_ifp->if_softc;
3216 struct ath_hal *ah = sc->sc_ah;
3217
3218 *rssi = ic->ic_node_getrssi(ni);
3219 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3220 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3221 else
3222 *noise = -95; /* nominally correct */
3223}
3224
3225/*
3226 * Set the default antenna.
3227 */
3228void
3229ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3230{
3231 struct ath_hal *ah = sc->sc_ah;
3232
3233 /* XXX block beacon interrupts */
3234 ath_hal_setdefantenna(ah, antenna);
3235 if (sc->sc_defant != antenna)
3236 sc->sc_stats.ast_ant_defswitch++;
3237 sc->sc_defant = antenna;
3238 sc->sc_rxotherant = 0;
3239}
3240
3241static void
3242ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
3243{
3244 txq->axq_qnum = qnum;
3245 txq->axq_ac = 0;
3246 txq->axq_depth = 0;
3247 txq->axq_aggr_depth = 0;
3248 txq->axq_intrcnt = 0;
3249 txq->axq_link = NULL;
3250 txq->axq_softc = sc;
3251 TAILQ_INIT(&txq->axq_q);
3252 TAILQ_INIT(&txq->axq_tidq);
3253 ATH_TXQ_LOCK_INIT(sc, txq);
3254}
3255
3256/*
3257 * Setup a h/w transmit queue.
3258 */
3259static struct ath_txq *
3260ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
3261{
3262#define N(a) (sizeof(a)/sizeof(a[0]))
3263 struct ath_hal *ah = sc->sc_ah;
3264 HAL_TXQ_INFO qi;
3265 int qnum;
3266
3267 memset(&qi, 0, sizeof(qi));
3268 qi.tqi_subtype = subtype;
3269 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
3270 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
3271 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
3272 /*
3273 * Enable interrupts only for EOL and DESC conditions.
3274 * We mark tx descriptors to receive a DESC interrupt
3275 * when a tx queue gets deep; otherwise waiting for the
3276 * EOL to reap descriptors. Note that this is done to
3277 * reduce interrupt load and this only defers reaping
3278 * descriptors, never transmitting frames. Aside from
3279 * reducing interrupts this also permits more concurrency.
3280 * The only potential downside is if the tx queue backs
3281 * up in which case the top half of the kernel may backup
3282 * due to a lack of tx descriptors.
3283 */
3284 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
3285 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
3286 if (qnum == -1) {
3287 /*
3288 * NB: don't print a message, this happens
3289 * normally on parts with too few tx queues
3290 */
3291 return NULL;
3292 }
3293 if (qnum >= N(sc->sc_txq)) {
3294 device_printf(sc->sc_dev,
3295 "hal qnum %u out of range, max %zu!\n",
3296 qnum, N(sc->sc_txq));
3297 ath_hal_releasetxqueue(ah, qnum);
3298 return NULL;
3299 }
3300 if (!ATH_TXQ_SETUP(sc, qnum)) {
3301 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
3302 sc->sc_txqsetup |= 1<<qnum;
3303 }
3304 return &sc->sc_txq[qnum];
3305#undef N
3306}
3307
3308/*
3309 * Setup a hardware data transmit queue for the specified
3310 * access control. The hal may not support all requested
3311 * queues in which case it will return a reference to a
3312 * previously setup queue. We record the mapping from ac's
3313 * to h/w queues for use by ath_tx_start and also track
3314 * the set of h/w queues being used to optimize work in the
3315 * transmit interrupt handler and related routines.
3316 */
3317static int
3318ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
3319{
3320#define N(a) (sizeof(a)/sizeof(a[0]))
3321 struct ath_txq *txq;
3322
3323 if (ac >= N(sc->sc_ac2q)) {
3324 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
3325 ac, N(sc->sc_ac2q));
3326 return 0;
3327 }
3328 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
3329 if (txq != NULL) {
3330 txq->axq_ac = ac;
3331 sc->sc_ac2q[ac] = txq;
3332 return 1;
3333 } else
3334 return 0;
3335#undef N
3336}
3337
3338/*
3339 * Update WME parameters for a transmit queue.
3340 */
3341static int
3342ath_txq_update(struct ath_softc *sc, int ac)
3343{
3344#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
3345#define ATH_TXOP_TO_US(v) (v<<5)
3346 struct ifnet *ifp = sc->sc_ifp;
3347 struct ieee80211com *ic = ifp->if_l2com;
3348 struct ath_txq *txq = sc->sc_ac2q[ac];
3349 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3350 struct ath_hal *ah = sc->sc_ah;
3351 HAL_TXQ_INFO qi;
3352
3353 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
3354#ifdef IEEE80211_SUPPORT_TDMA
3355 if (sc->sc_tdma) {
3356 /*
3357 * AIFS is zero so there's no pre-transmit wait. The
3358 * burst time defines the slot duration and is configured
3359 * through net80211. The QCU is setup to not do post-xmit
3360 * back off, lockout all lower-priority QCU's, and fire
3361 * off the DMA beacon alert timer which is setup based
3362 * on the slot configuration.
3363 */
3364 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3365 | HAL_TXQ_TXERRINT_ENABLE
3366 | HAL_TXQ_TXURNINT_ENABLE
3367 | HAL_TXQ_TXEOLINT_ENABLE
3368 | HAL_TXQ_DBA_GATED
3369 | HAL_TXQ_BACKOFF_DISABLE
3370 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
3371 ;
3372 qi.tqi_aifs = 0;
3373 /* XXX +dbaprep? */
3374 qi.tqi_readyTime = sc->sc_tdmaslotlen;
3375 qi.tqi_burstTime = qi.tqi_readyTime;
3376 } else {
3377#endif
3378 /*
3379 * XXX shouldn't this just use the default flags
3380 * used in the previous queue setup?
3381 */
3382 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3383 | HAL_TXQ_TXERRINT_ENABLE
3384 | HAL_TXQ_TXDESCINT_ENABLE
3385 | HAL_TXQ_TXURNINT_ENABLE
3386 | HAL_TXQ_TXEOLINT_ENABLE
3387 ;
3388 qi.tqi_aifs = wmep->wmep_aifsn;
3389 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3390 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3391 qi.tqi_readyTime = 0;
3392 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
3393#ifdef IEEE80211_SUPPORT_TDMA
3394 }
3395#endif
3396
3397 DPRINTF(sc, ATH_DEBUG_RESET,
3398 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
3399 __func__, txq->axq_qnum, qi.tqi_qflags,
3400 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
3401
3402 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
3403 if_printf(ifp, "unable to update hardware queue "
3404 "parameters for %s traffic!\n",
3405 ieee80211_wme_acnames[ac]);
3406 return 0;
3407 } else {
3408 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
3409 return 1;
3410 }
3411#undef ATH_TXOP_TO_US
3412#undef ATH_EXPONENT_TO_VALUE
3413}
3414
3415/*
3416 * Callback from the 802.11 layer to update WME parameters.
3417 */
3418int
3419ath_wme_update(struct ieee80211com *ic)
3420{
3421 struct ath_softc *sc = ic->ic_ifp->if_softc;
3422
3423 return !ath_txq_update(sc, WME_AC_BE) ||
3424 !ath_txq_update(sc, WME_AC_BK) ||
3425 !ath_txq_update(sc, WME_AC_VI) ||
3426 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
3427}
3428
3429/*
3430 * Reclaim resources for a setup queue.
3431 */
3432static void
3433ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
3434{
3435
3436 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
3437 ATH_TXQ_LOCK_DESTROY(txq);
3438 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
3439}
3440
3441/*
3442 * Reclaim all tx queue resources.
3443 */
3444static void
3445ath_tx_cleanup(struct ath_softc *sc)
3446{
3447 int i;
3448
3449 ATH_TXBUF_LOCK_DESTROY(sc);
3450 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3451 if (ATH_TXQ_SETUP(sc, i))
3452 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
3453}
3454
3455/*
3456 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
3457 * using the current rates in sc_rixmap.
3458 */
3459int
3460ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
3461{
3462 int rix = sc->sc_rixmap[rate];
3463 /* NB: return lowest rix for invalid rate */
3464 return (rix == 0xff ? 0 : rix);
3465}
3466
3467static void
3468ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
3469 struct ath_buf *bf)
3470{
3471 struct ieee80211_node *ni = bf->bf_node;
3472 struct ifnet *ifp = sc->sc_ifp;
3473 struct ieee80211com *ic = ifp->if_l2com;
3474 int sr, lr, pri;
3475
3476 if (ts->ts_status == 0) {
3477 u_int8_t txant = ts->ts_antenna;
3478 sc->sc_stats.ast_ant_tx[txant]++;
3479 sc->sc_ant_tx[txant]++;
3480 if (ts->ts_finaltsi != 0)
3481 sc->sc_stats.ast_tx_altrate++;
3482 pri = M_WME_GETAC(bf->bf_m);
3483 if (pri >= WME_AC_VO)
3484 ic->ic_wme.wme_hipri_traffic++;
3485 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)
3486 ni->ni_inact = ni->ni_inact_reload;
3487 } else {
3488 if (ts->ts_status & HAL_TXERR_XRETRY)
3489 sc->sc_stats.ast_tx_xretries++;
3490 if (ts->ts_status & HAL_TXERR_FIFO)
3491 sc->sc_stats.ast_tx_fifoerr++;
3492 if (ts->ts_status & HAL_TXERR_FILT)
3493 sc->sc_stats.ast_tx_filtered++;
3494 if (ts->ts_status & HAL_TXERR_XTXOP)
3495 sc->sc_stats.ast_tx_xtxop++;
3496 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
3497 sc->sc_stats.ast_tx_timerexpired++;
3498
3499 if (ts->ts_status & HAL_TX_DATA_UNDERRUN)
3500 sc->sc_stats.ast_tx_data_underrun++;
3501 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN)
3502 sc->sc_stats.ast_tx_delim_underrun++;
3503
3504 if (bf->bf_m->m_flags & M_FF)
3505 sc->sc_stats.ast_ff_txerr++;
3506 }
3507 /* XXX when is this valid? */
3508 if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
3509 sc->sc_stats.ast_tx_desccfgerr++;
3510
3511 sr = ts->ts_shortretry;
3512 lr = ts->ts_longretry;
3513 sc->sc_stats.ast_tx_shortretry += sr;
3514 sc->sc_stats.ast_tx_longretry += lr;
3515
3516}
3517
3518/*
3519 * The default completion. If fail is 1, this means
3520 * "please don't retry the frame, and just return -1 status
3521 * to the net80211 stack.
3522 */
3523void
3524ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
3525{
3526 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
3527 int st;
3528
3529 if (fail == 1)
3530 st = -1;
3531 else
3532 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ?
3533 ts->ts_status : HAL_TXERR_XRETRY;
3534
3535 if (bf->bf_state.bfs_dobaw)
3536 device_printf(sc->sc_dev,
3537 "%s: bf %p: seqno %d: dobaw should've been cleared!\n",
3538 __func__,
3539 bf,
3540 SEQNO(bf->bf_state.bfs_seqno));
3541 if (bf->bf_next != NULL)
3542 device_printf(sc->sc_dev,
3543 "%s: bf %p: seqno %d: bf_next not NULL!\n",
3544 __func__,
3545 bf,
3546 SEQNO(bf->bf_state.bfs_seqno));
3547
3548 /*
3549 * Check if the node software queue is empty; if so
3550 * then clear the TIM.
3551 *
3552 * This needs to be done before the buffer is freed as
3553 * otherwise the node reference will have been released
3554 * and the node may not actually exist any longer.
3555 *
3556 * XXX I don't like this belonging here, but it's cleaner
3557 * to do it here right now then all the other places
3558 * where ath_tx_default_comp() is called.
3559 *
3560 * XXX TODO: during drain, ensure that the callback is
3561 * being called so we get a chance to update the TIM.
3562 */
3563 if (bf->bf_node)
3564 ath_tx_update_tim(sc, bf->bf_node, 0);
3565
3566 /*
3567 * Do any tx complete callback. Note this must
3568 * be done before releasing the node reference.
3569 * This will free the mbuf, release the net80211
3570 * node and recycle the ath_buf.
3571 */
3572 ath_tx_freebuf(sc, bf, st);
3573}
3574
3575/*
3576 * Update rate control with the given completion status.
3577 */
3578void
3579ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
3580 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
3581 int nframes, int nbad)
3582{
3583 struct ath_node *an;
3584
3585 /* Only for unicast frames */
3586 if (ni == NULL)
3587 return;
3588
3589 an = ATH_NODE(ni);
3590 ATH_NODE_UNLOCK_ASSERT(an);
3591
3592 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
3593 ATH_NODE_LOCK(an);
3594 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
3595 ATH_NODE_UNLOCK(an);
3596 }
3597}
3598
3599/*
3600 * Update the busy status of the last frame on the free list.
3601 * When doing TDMA, the busy flag tracks whether the hardware
3602 * currently points to this buffer or not, and thus gated DMA
3603 * may restart by re-reading the last descriptor in this
3604 * buffer.
3605 *
3606 * This should be called in the completion function once one
3607 * of the buffers has been used.
3608 */
3609static void
3610ath_tx_update_busy(struct ath_softc *sc)
3611{
3612 struct ath_buf *last;
3613
3614 /*
3615 * Since the last frame may still be marked
3616 * as ATH_BUF_BUSY, unmark it here before
3617 * finishing the frame processing.
3618 * Since we've completed a frame (aggregate
3619 * or otherwise), the hardware has moved on
3620 * and is no longer referencing the previous
3621 * descriptor.
3622 */
3623 ATH_TXBUF_LOCK_ASSERT(sc);
3624 last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
3625 if (last != NULL)
3626 last->bf_flags &= ~ATH_BUF_BUSY;
3627 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
3628 if (last != NULL)
3629 last->bf_flags &= ~ATH_BUF_BUSY;
3630}
3631
3632/*
3633 * Process the completion of the given buffer.
3634 *
3635 * This calls the rate control update and then the buffer completion.
3636 * This will either free the buffer or requeue it. In any case, the
3637 * bf pointer should be treated as invalid after this function is called.
3638 */
3639void
3640ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq,
3641 struct ath_tx_status *ts, struct ath_buf *bf)
3642{
3643 struct ieee80211_node *ni = bf->bf_node;
3644 struct ath_node *an = NULL;
3645
3646 ATH_TXQ_UNLOCK_ASSERT(txq);
3647
3648 /* If unicast frame, update general statistics */
3649 if (ni != NULL) {
3650 an = ATH_NODE(ni);
3651 /* update statistics */
3652 ath_tx_update_stats(sc, ts, bf);
3653 }
3654
3655 /*
3656 * Call the completion handler.
3657 * The completion handler is responsible for
3658 * calling the rate control code.
3659 *
3660 * Frames with no completion handler get the
3661 * rate control code called here.
3662 */
3663 if (bf->bf_comp == NULL) {
3664 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
3665 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) {
3666 /*
3667 * XXX assume this isn't an aggregate
3668 * frame.
3669 */
3670 ath_tx_update_ratectrl(sc, ni,
3671 bf->bf_state.bfs_rc, ts,
3672 bf->bf_state.bfs_pktlen, 1,
3673 (ts->ts_status == 0 ? 0 : 1));
3674 }
3675 ath_tx_default_comp(sc, bf, 0);
3676 } else
3677 bf->bf_comp(sc, bf, 0);
3678}
3679
3680
3681
3682/*
3683 * Process completed xmit descriptors from the specified queue.
3684 * Kick the packet scheduler if needed. This can occur from this
3685 * particular task.
3686 */
3687static int
3688ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
3689{
3690 struct ath_hal *ah = sc->sc_ah;
3691 struct ath_buf *bf;
3692 struct ath_desc *ds;
3693 struct ath_tx_status *ts;
3694 struct ieee80211_node *ni;
3695#ifdef IEEE80211_SUPPORT_SUPERG
3696 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3697#endif /* IEEE80211_SUPPORT_SUPERG */
3698 int nacked;
3699 HAL_STATUS status;
3700
3701 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
3702 __func__, txq->axq_qnum,
3703 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
3704 txq->axq_link);
3705
3706 ATH_KTR(sc, ATH_KTR_TXCOMP, 4,
3707 "ath_tx_processq: txq=%u head %p link %p depth %p",
3708 txq->axq_qnum,
3709 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
3710 txq->axq_link,
3711 txq->axq_depth);
3712
3713 nacked = 0;
3714 for (;;) {
3715 ATH_TXQ_LOCK(txq);
3716 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
3717 bf = TAILQ_FIRST(&txq->axq_q);
3718 if (bf == NULL) {
3719 ATH_TXQ_UNLOCK(txq);
3720 break;
3721 }
3722 ds = bf->bf_lastds; /* XXX must be setup correctly! */
3723 ts = &bf->bf_status.ds_txstat;
3724
3725 status = ath_hal_txprocdesc(ah, ds, ts);
3726#ifdef ATH_DEBUG
3727 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
3728 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
3729 status == HAL_OK);
3730 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0))
3731 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
3732 status == HAL_OK);
3733#endif
3734
3735 if (status == HAL_EINPROGRESS) {
3736 ATH_KTR(sc, ATH_KTR_TXCOMP, 3,
3737 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS",
3738 txq->axq_qnum, bf, ds);
3739 ATH_TXQ_UNLOCK(txq);
3740 break;
3741 }
3742 ATH_TXQ_REMOVE(txq, bf, bf_list);
3743#ifdef IEEE80211_SUPPORT_TDMA
3744 if (txq->axq_depth > 0) {
3745 /*
3746 * More frames follow. Mark the buffer busy
3747 * so it's not re-used while the hardware may
3748 * still re-read the link field in the descriptor.
3749 *
3750 * Use the last buffer in an aggregate as that
3751 * is where the hardware may be - intermediate
3752 * descriptors won't be "busy".
3753 */
3754 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
3755 } else
3756#else
3757 if (txq->axq_depth == 0)
3758#endif
3759 txq->axq_link = NULL;
3760 if (bf->bf_state.bfs_aggr)
3761 txq->axq_aggr_depth--;
3762
3763 ni = bf->bf_node;
3764
3765 ATH_KTR(sc, ATH_KTR_TXCOMP, 5,
3766 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x",
3767 txq->axq_qnum, bf, ds, ni, ts->ts_status);
3768 /*
3769 * If unicast frame was ack'd update RSSI,
3770 * including the last rx time used to
3771 * workaround phantom bmiss interrupts.
3772 */
3773 if (ni != NULL && ts->ts_status == 0 &&
3774 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
3775 nacked++;
3776 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
3777 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
3778 ts->ts_rssi);
3779 }
3780 ATH_TXQ_UNLOCK(txq);
3781
3782 /*
3783 * Update statistics and call completion
3784 */
3785 ath_tx_process_buf_completion(sc, txq, ts, bf);
3786
3787 /* XXX at this point, bf and ni may be totally invalid */
3788 }
3789#ifdef IEEE80211_SUPPORT_SUPERG
3790 /*
3791 * Flush fast-frame staging queue when traffic slows.
3792 */
3793 if (txq->axq_depth <= 1)
3794 ieee80211_ff_flush(ic, txq->axq_ac);
3795#endif
3796
3797 /* Kick the TXQ scheduler */
3798 if (dosched) {
3799 ATH_TXQ_LOCK(txq);
3800 ath_txq_sched(sc, txq);
3801 ATH_TXQ_UNLOCK(txq);
3802 }
3803
3804 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
3805 "ath_tx_processq: txq=%u: done",
3806 txq->axq_qnum);
3807
3808 return nacked;
3809}
3810
3811#define TXQACTIVE(t, q) ( (t) & (1 << (q)))
3812
3813/*
3814 * Deferred processing of transmit interrupt; special-cased
3815 * for a single hardware transmit queue (e.g. 5210 and 5211).
3816 */
3817static void
3818ath_tx_proc_q0(void *arg, int npending)
3819{
3820 struct ath_softc *sc = arg;
3821 struct ifnet *ifp = sc->sc_ifp;
3822 uint32_t txqs;
3823
3824 ATH_PCU_LOCK(sc);
3825 sc->sc_txproc_cnt++;
3826 txqs = sc->sc_txq_active;
3827 sc->sc_txq_active &= ~txqs;
3828 ATH_PCU_UNLOCK(sc);
3829
3830 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
3831 "ath_tx_proc_q0: txqs=0x%08x", txqs);
3832
3833 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
3834 /* XXX why is lastrx updated in tx code? */
3835 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
3836 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
3837 ath_tx_processq(sc, sc->sc_cabq, 1);
3838 IF_LOCK(&ifp->if_snd);
3839 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3840 IF_UNLOCK(&ifp->if_snd);
3841 sc->sc_wd_timer = 0;
3842
3843 if (sc->sc_softled)
3844 ath_led_event(sc, sc->sc_txrix);
3845
3846 ATH_PCU_LOCK(sc);
3847 sc->sc_txproc_cnt--;
3848 ATH_PCU_UNLOCK(sc);
3849
3850 ath_tx_kick(sc);
3851}
3852
3853/*
3854 * Deferred processing of transmit interrupt; special-cased
3855 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
3856 */
3857static void
3858ath_tx_proc_q0123(void *arg, int npending)
3859{
3860 struct ath_softc *sc = arg;
3861 struct ifnet *ifp = sc->sc_ifp;
3862 int nacked;
3863 uint32_t txqs;
3864
3865 ATH_PCU_LOCK(sc);
3866 sc->sc_txproc_cnt++;
3867 txqs = sc->sc_txq_active;
3868 sc->sc_txq_active &= ~txqs;
3869 ATH_PCU_UNLOCK(sc);
3870
3871 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
3872 "ath_tx_proc_q0123: txqs=0x%08x", txqs);
3873
3874 /*
3875 * Process each active queue.
3876 */
3877 nacked = 0;
3878 if (TXQACTIVE(txqs, 0))
3879 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
3880 if (TXQACTIVE(txqs, 1))
3881 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
3882 if (TXQACTIVE(txqs, 2))
3883 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
3884 if (TXQACTIVE(txqs, 3))
3885 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
3886 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
3887 ath_tx_processq(sc, sc->sc_cabq, 1);
3888 if (nacked)
3889 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
3890
3891 IF_LOCK(&ifp->if_snd);
3892 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3893 IF_UNLOCK(&ifp->if_snd);
3894 sc->sc_wd_timer = 0;
3895
3896 if (sc->sc_softled)
3897 ath_led_event(sc, sc->sc_txrix);
3898
3899 ATH_PCU_LOCK(sc);
3900 sc->sc_txproc_cnt--;
3901 ATH_PCU_UNLOCK(sc);
3902
3903 ath_tx_kick(sc);
3904}
3905
3906/*
3907 * Deferred processing of transmit interrupt.
3908 */
3909static void
3910ath_tx_proc(void *arg, int npending)
3911{
3912 struct ath_softc *sc = arg;
3913 struct ifnet *ifp = sc->sc_ifp;
3914 int i, nacked;
3915 uint32_t txqs;
3916
3917 ATH_PCU_LOCK(sc);
3918 sc->sc_txproc_cnt++;
3919 txqs = sc->sc_txq_active;
3920 sc->sc_txq_active &= ~txqs;
3921 ATH_PCU_UNLOCK(sc);
3922
3923 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs);
3924
3925 /*
3926 * Process each active queue.
3927 */
3928 nacked = 0;
3929 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3930 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
3931 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
3932 if (nacked)
3933 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
3934
3935 /* XXX check this inside of IF_LOCK? */
3936 IF_LOCK(&ifp->if_snd);
3937 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3938 IF_UNLOCK(&ifp->if_snd);
3939 sc->sc_wd_timer = 0;
3940
3941 if (sc->sc_softled)
3942 ath_led_event(sc, sc->sc_txrix);
3943
3944 ATH_PCU_LOCK(sc);
3945 sc->sc_txproc_cnt--;
3946 ATH_PCU_UNLOCK(sc);
3947
3948 ath_tx_kick(sc);
3949}
3950#undef TXQACTIVE
3951
3952/*
3953 * Deferred processing of TXQ rescheduling.
3954 */
3955static void
3956ath_txq_sched_tasklet(void *arg, int npending)
3957{
3958 struct ath_softc *sc = arg;
3959 int i;
3960
3961 /* XXX is skipping ok? */
3962 ATH_PCU_LOCK(sc);
3963#if 0
3964 if (sc->sc_inreset_cnt > 0) {
3965 device_printf(sc->sc_dev,
3966 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
3967 ATH_PCU_UNLOCK(sc);
3968 return;
3969 }
3970#endif
3971 sc->sc_txproc_cnt++;
3972 ATH_PCU_UNLOCK(sc);
3973
3974 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
3975 if (ATH_TXQ_SETUP(sc, i)) {
3976 ATH_TXQ_LOCK(&sc->sc_txq[i]);
3977 ath_txq_sched(sc, &sc->sc_txq[i]);
3978 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
3979 }
3980 }
3981
3982 ATH_PCU_LOCK(sc);
3983 sc->sc_txproc_cnt--;
3984 ATH_PCU_UNLOCK(sc);
3985}
3986
3987void
3988ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
3989{
3990
3991 ATH_TXBUF_LOCK_ASSERT(sc);
3992
3993 if (bf->bf_flags & ATH_BUF_MGMT)
3994 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
3995 else {
3996 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
3997 sc->sc_txbuf_cnt++;
3998 if (sc->sc_txbuf_cnt > ath_txbuf) {
3999 device_printf(sc->sc_dev,
4000 "%s: sc_txbuf_cnt > %d?\n",
4001 __func__,
4002 ath_txbuf);
4003 sc->sc_txbuf_cnt = ath_txbuf;
4004 }
4005 }
4006}
4007
4008void
4009ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
4010{
4011
4012 ATH_TXBUF_LOCK_ASSERT(sc);
4013
4014 if (bf->bf_flags & ATH_BUF_MGMT)
4015 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
4016 else {
4017 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
4018 sc->sc_txbuf_cnt++;
4019 if (sc->sc_txbuf_cnt > ATH_TXBUF) {
4020 device_printf(sc->sc_dev,
4021 "%s: sc_txbuf_cnt > %d?\n",
4022 __func__,
4023 ATH_TXBUF);
4024 sc->sc_txbuf_cnt = ATH_TXBUF;
4025 }
4026 }
4027}
4028
4029/*
4030 * Return a buffer to the pool and update the 'busy' flag on the
4031 * previous 'tail' entry.
4032 *
4033 * This _must_ only be called when the buffer is involved in a completed
4034 * TX. The logic is that if it was part of an active TX, the previous
4035 * buffer on the list is now not involved in a halted TX DMA queue, waiting
4036 * for restart (eg for TDMA.)
4037 *
4038 * The caller must free the mbuf and recycle the node reference.
4039 */
4040void
4041ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
4042{
4043 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4044 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE);
4045
4046 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
4047 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
4048
4049 ATH_TXBUF_LOCK(sc);
4050 ath_tx_update_busy(sc);
4051 ath_returnbuf_tail(sc, bf);
4052 ATH_TXBUF_UNLOCK(sc);
4053}
4054
4055/*
4056 * This is currently used by ath_tx_draintxq() and
4057 * ath_tx_tid_free_pkts().
4058 *
4059 * It recycles a single ath_buf.
4060 */
4061void
4062ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
4063{
4064 struct ieee80211_node *ni = bf->bf_node;
4065 struct mbuf *m0 = bf->bf_m;
4066
4067 bf->bf_node = NULL;
4068 bf->bf_m = NULL;
4069
4070 /* Free the buffer, it's not needed any longer */
4071 ath_freebuf(sc, bf);
4072
4073 if (ni != NULL) {
4074 /*
4075 * Do any callback and reclaim the node reference.
4076 */
4077 if (m0->m_flags & M_TXCB)
4078 ieee80211_process_callback(ni, m0, status);
4079 ieee80211_free_node(ni);
4080 }
4081 m_freem(m0);
4082
4083 /*
4084 * XXX the buffer used to be freed -after-, but the DMA map was
4085 * freed where ath_freebuf() now is. I've no idea what this
4086 * will do.
4087 */
4088}
4089
4090void
4091ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
4092{
4093#ifdef ATH_DEBUG
4094 struct ath_hal *ah = sc->sc_ah;
4095#endif
4096 struct ath_buf *bf;
4097 u_int ix;
4098
4099 /*
4100 * NB: this assumes output has been stopped and
4101 * we do not need to block ath_tx_proc
4102 */
4103 ATH_TXBUF_LOCK(sc);
4104 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
4105 if (bf != NULL)
4106 bf->bf_flags &= ~ATH_BUF_BUSY;
4107 bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
4108 if (bf != NULL)
4109 bf->bf_flags &= ~ATH_BUF_BUSY;
4110 ATH_TXBUF_UNLOCK(sc);
4111
4112 for (ix = 0;; ix++) {
4113 ATH_TXQ_LOCK(txq);
4114 bf = TAILQ_FIRST(&txq->axq_q);
4115 if (bf == NULL) {
4116 txq->axq_link = NULL;
4117 /*
4118 * There's currently no flag that indicates
4119 * a buffer is on the FIFO. So until that
4120 * occurs, just clear the FIFO counter here.
4121 *
4122 * Yes, this means that if something in parallel
4123 * is pushing things onto this TXQ and pushing
4124 * _that_ into the hardware, things will get
4125 * very fruity very quickly.
4126 */
4127 txq->axq_fifo_depth = 0;
4128 ATH_TXQ_UNLOCK(txq);
4129 break;
4130 }
4131 ATH_TXQ_REMOVE(txq, bf, bf_list);
4132 if (bf->bf_state.bfs_aggr)
4133 txq->axq_aggr_depth--;
4134#ifdef ATH_DEBUG
4135 if (sc->sc_debug & ATH_DEBUG_RESET) {
4136 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
4137 int status = 0;
4138
4139 /*
4140 * EDMA operation has a TX completion FIFO
4141 * separate from the TX descriptor, so this
4142 * method of checking the "completion" status
4143 * is wrong.
4144 */
4145 if (! sc->sc_isedma) {
4146 status = (ath_hal_txprocdesc(ah,
4147 bf->bf_lastds,
4148 &bf->bf_status.ds_txstat) == HAL_OK);
4149 }
4150 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status);
4151 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
4152 bf->bf_m->m_len, 0, -1);
4153 }
4154#endif /* ATH_DEBUG */
4155 /*
4156 * Since we're now doing magic in the completion
4157 * functions, we -must- call it for aggregation
4158 * destinations or BAW tracking will get upset.
4159 */
4160 /*
4161 * Clear ATH_BUF_BUSY; the completion handler
4162 * will free the buffer.
4163 */
4164 ATH_TXQ_UNLOCK(txq);
4165 bf->bf_flags &= ~ATH_BUF_BUSY;
4166 if (bf->bf_comp)
4167 bf->bf_comp(sc, bf, 1);
4168 else
4169 ath_tx_default_comp(sc, bf, 1);
4170 }
4171
4172 /*
4173 * Drain software queued frames which are on
4174 * active TIDs.
4175 */
4176 ath_tx_txq_drain(sc, txq);
4177}
4178
4179static void
4180ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
4181{
4182 struct ath_hal *ah = sc->sc_ah;
4183
4184 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4185 __func__, txq->axq_qnum,
4186 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
4187 txq->axq_link);
4188 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
4189}
4190
4191int
4192ath_stoptxdma(struct ath_softc *sc)
4193{
4194 struct ath_hal *ah = sc->sc_ah;
4195 int i;
4196
4197 /* XXX return value */
4198 if (sc->sc_invalid)
4199 return 0;
4200
4201 if (!sc->sc_invalid) {
4202 /* don't touch the hardware if marked invalid */
4203 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4204 __func__, sc->sc_bhalq,
4205 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
4206 NULL);
4207 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
4208 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4209 if (ATH_TXQ_SETUP(sc, i))
4210 ath_tx_stopdma(sc, &sc->sc_txq[i]);
4211 }
4212
4213 return 1;
4214}
4215
4216/*
4217 * Drain the transmit queues and reclaim resources.
4218 */
4219void
4220ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
4221{
4222#ifdef ATH_DEBUG
4223 struct ath_hal *ah = sc->sc_ah;
4224#endif
4225 struct ifnet *ifp = sc->sc_ifp;
4226 int i;
4227
4228 (void) ath_stoptxdma(sc);
4229
4230 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
4231 /*
4232 * XXX TODO: should we just handle the completed TX frames
4233 * here, whether or not the reset is a full one or not?
4234 */
4235 if (ATH_TXQ_SETUP(sc, i)) {
4236 if (reset_type == ATH_RESET_NOLOSS)
4237 ath_tx_processq(sc, &sc->sc_txq[i], 0);
4238 else
4239 ath_tx_draintxq(sc, &sc->sc_txq[i]);
4240 }
4241 }
4242#ifdef ATH_DEBUG
4243 if (sc->sc_debug & ATH_DEBUG_RESET) {
4244 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
4245 if (bf != NULL && bf->bf_m != NULL) {
4246 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
4247 ath_hal_txprocdesc(ah, bf->bf_lastds,
4248 &bf->bf_status.ds_txstat) == HAL_OK);
4249 ieee80211_dump_pkt(ifp->if_l2com,
4250 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
4251 0, -1);
4252 }
4253 }
4254#endif /* ATH_DEBUG */
4255 IF_LOCK(&ifp->if_snd);
4256 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4257 IF_UNLOCK(&ifp->if_snd);
4258 sc->sc_wd_timer = 0;
4259}
4260
4261/*
4262 * Update internal state after a channel change.
4263 */
4264static void
4265ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
4266{
4267 enum ieee80211_phymode mode;
4268
4269 /*
4270 * Change channels and update the h/w rate map
4271 * if we're switching; e.g. 11a to 11b/g.
4272 */
4273 mode = ieee80211_chan2mode(chan);
4274 if (mode != sc->sc_curmode)
4275 ath_setcurmode(sc, mode);
4276 sc->sc_curchan = chan;
4277}
4278
4279/*
4280 * Set/change channels. If the channel is really being changed,
4281 * it's done by resetting the chip. To accomplish this we must
4282 * first cleanup any pending DMA, then restart stuff after a la
4283 * ath_init.
4284 */
4285static int
4286ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
4287{
4288 struct ifnet *ifp = sc->sc_ifp;
4289 struct ieee80211com *ic = ifp->if_l2com;
4290 struct ath_hal *ah = sc->sc_ah;
4291 int ret = 0;
4292
4293 /* Treat this as an interface reset */
4294 ATH_PCU_UNLOCK_ASSERT(sc);
4295 ATH_UNLOCK_ASSERT(sc);
4296
4297 /* (Try to) stop TX/RX from occuring */
4298 taskqueue_block(sc->sc_tq);
4299
4300 ATH_PCU_LOCK(sc);
4301 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */
4302 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */
4303 if (ath_reset_grablock(sc, 1) == 0) {
4304 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
4305 __func__);
4306 }
4307 ATH_PCU_UNLOCK(sc);
4308
4309 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
4310 __func__, ieee80211_chan2ieee(ic, chan),
4311 chan->ic_freq, chan->ic_flags);
4312 if (chan != sc->sc_curchan) {
4313 HAL_STATUS status;
4314 /*
4315 * To switch channels clear any pending DMA operations;
4316 * wait long enough for the RX fifo to drain, reset the
4317 * hardware at the new frequency, and then re-enable
4318 * the relevant bits of the h/w.
4319 */
4320#if 0
4321 ath_hal_intrset(ah, 0); /* disable interrupts */
4322#endif
4323 ath_stoprecv(sc, 1); /* turn off frame recv */
4324 /*
4325 * First, handle completed TX/RX frames.
4326 */
4327 ath_rx_flush(sc);
4328 ath_draintxq(sc, ATH_RESET_NOLOSS);
4329 /*
4330 * Next, flush the non-scheduled frames.
4331 */
4332 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
4333
4334 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
4335 if_printf(ifp, "%s: unable to reset "
4336 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
4337 __func__, ieee80211_chan2ieee(ic, chan),
4338 chan->ic_freq, chan->ic_flags, status);
4339 ret = EIO;
4340 goto finish;
4341 }
4342 sc->sc_diversity = ath_hal_getdiversity(ah);
4343
4344 /* Let DFS at it in case it's a DFS channel */
4345 ath_dfs_radar_enable(sc, chan);
4346
4347 /*
4348 * Re-enable rx framework.
4349 */
4350 if (ath_startrecv(sc) != 0) {
4351 if_printf(ifp, "%s: unable to restart recv logic\n",
4352 __func__);
4353 ret = EIO;
4354 goto finish;
4355 }
4356
4357 /*
4358 * Change channels and update the h/w rate map
4359 * if we're switching; e.g. 11a to 11b/g.
4360 */
4361 ath_chan_change(sc, chan);
4362
4363 /*
4364 * Reset clears the beacon timers; reset them
4365 * here if needed.
4366 */
4367 if (sc->sc_beacons) { /* restart beacons */
4368#ifdef IEEE80211_SUPPORT_TDMA
4369 if (sc->sc_tdma)
4370 ath_tdma_config(sc, NULL);
4371 else
4372#endif
4373 ath_beacon_config(sc, NULL);
4374 }
4375
4376 /*
4377 * Re-enable interrupts.
4378 */
4379#if 0
4380 ath_hal_intrset(ah, sc->sc_imask);
4381#endif
4382 }
4383
4384finish:
4385 ATH_PCU_LOCK(sc);
4386 sc->sc_inreset_cnt--;
4387 /* XXX only do this if sc_inreset_cnt == 0? */
4388 ath_hal_intrset(ah, sc->sc_imask);
4389 ATH_PCU_UNLOCK(sc);
4390
4391 IF_LOCK(&ifp->if_snd);
4392 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4393 IF_UNLOCK(&ifp->if_snd);
4394 ath_txrx_start(sc);
4395 /* XXX ath_start? */
4396
4397 return ret;
4398}
4399
4400/*
4401 * Periodically recalibrate the PHY to account
4402 * for temperature/environment changes.
4403 */
4404static void
4405ath_calibrate(void *arg)
4406{
4407 struct ath_softc *sc = arg;
4408 struct ath_hal *ah = sc->sc_ah;
4409 struct ifnet *ifp = sc->sc_ifp;
4410 struct ieee80211com *ic = ifp->if_l2com;
4411 HAL_BOOL longCal, isCalDone = AH_TRUE;
4412 HAL_BOOL aniCal, shortCal = AH_FALSE;
4413 int nextcal;
4414
4415 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
4416 goto restart;
4417 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
4418 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
4419 if (sc->sc_doresetcal)
4420 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
4421
4422 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
4423 if (aniCal) {
4424 sc->sc_stats.ast_ani_cal++;
4425 sc->sc_lastani = ticks;
4426 ath_hal_ani_poll(ah, sc->sc_curchan);
4427 }
4428
4429 if (longCal) {
4430 sc->sc_stats.ast_per_cal++;
4431 sc->sc_lastlongcal = ticks;
4432 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
4433 /*
4434 * Rfgain is out of bounds, reset the chip
4435 * to load new gain values.
4436 */
4437 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4438 "%s: rfgain change\n", __func__);
4439 sc->sc_stats.ast_per_rfgain++;
4440 sc->sc_resetcal = 0;
4441 sc->sc_doresetcal = AH_TRUE;
4442 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
4443 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
4444 return;
4445 }
4446 /*
4447 * If this long cal is after an idle period, then
4448 * reset the data collection state so we start fresh.
4449 */
4450 if (sc->sc_resetcal) {
4451 (void) ath_hal_calreset(ah, sc->sc_curchan);
4452 sc->sc_lastcalreset = ticks;
4453 sc->sc_lastshortcal = ticks;
4454 sc->sc_resetcal = 0;
4455 sc->sc_doresetcal = AH_TRUE;
4456 }
4457 }
4458
4459 /* Only call if we're doing a short/long cal, not for ANI calibration */
4460 if (shortCal || longCal) {
4461 isCalDone = AH_FALSE;
4462 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
4463 if (longCal) {
4464 /*
4465 * Calibrate noise floor data again in case of change.
4466 */
4467 ath_hal_process_noisefloor(ah);
4468 }
4469 } else {
4470 DPRINTF(sc, ATH_DEBUG_ANY,
4471 "%s: calibration of channel %u failed\n",
4472 __func__, sc->sc_curchan->ic_freq);
4473 sc->sc_stats.ast_per_calfail++;
4474 }
4475 if (shortCal)
4476 sc->sc_lastshortcal = ticks;
4477 }
4478 if (!isCalDone) {
4479restart:
4480 /*
4481 * Use a shorter interval to potentially collect multiple
4482 * data samples required to complete calibration. Once
4483 * we're told the work is done we drop back to a longer
4484 * interval between requests. We're more aggressive doing
4485 * work when operating as an AP to improve operation right
4486 * after startup.
4487 */
4488 sc->sc_lastshortcal = ticks;
4489 nextcal = ath_shortcalinterval*hz/1000;
4490 if (sc->sc_opmode != HAL_M_HOSTAP)
4491 nextcal *= 10;
4492 sc->sc_doresetcal = AH_TRUE;
4493 } else {
4494 /* nextcal should be the shortest time for next event */
4495 nextcal = ath_longcalinterval*hz;
4496 if (sc->sc_lastcalreset == 0)
4497 sc->sc_lastcalreset = sc->sc_lastlongcal;
4498 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
4499 sc->sc_resetcal = 1; /* setup reset next trip */
4500 sc->sc_doresetcal = AH_FALSE;
4501 }
4502 /* ANI calibration may occur more often than short/long/resetcal */
4503 if (ath_anicalinterval > 0)
4504 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
4505
4506 if (nextcal != 0) {
4507 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
4508 __func__, nextcal, isCalDone ? "" : "!");
4509 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
4510 } else {
4511 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
4512 __func__);
4513 /* NB: don't rearm timer */
4514 }
4515}
4516
4517static void
4518ath_scan_start(struct ieee80211com *ic)
4519{
4520 struct ifnet *ifp = ic->ic_ifp;
4521 struct ath_softc *sc = ifp->if_softc;
4522 struct ath_hal *ah = sc->sc_ah;
4523 u_int32_t rfilt;
4524
4525 /* XXX calibration timer? */
4526
4527 ATH_LOCK(sc);
4528 sc->sc_scanning = 1;
4529 sc->sc_syncbeacon = 0;
4530 rfilt = ath_calcrxfilter(sc);
4531 ATH_UNLOCK(sc);
4532
4533 ATH_PCU_LOCK(sc);
4534 ath_hal_setrxfilter(ah, rfilt);
4535 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
4536 ATH_PCU_UNLOCK(sc);
4537
4538 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
4539 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
4540}
4541
4542static void
4543ath_scan_end(struct ieee80211com *ic)
4544{
4545 struct ifnet *ifp = ic->ic_ifp;
4546 struct ath_softc *sc = ifp->if_softc;
4547 struct ath_hal *ah = sc->sc_ah;
4548 u_int32_t rfilt;
4549
4550 ATH_LOCK(sc);
4551 sc->sc_scanning = 0;
4552 rfilt = ath_calcrxfilter(sc);
4553 ATH_UNLOCK(sc);
4554
4555 ATH_PCU_LOCK(sc);
4556 ath_hal_setrxfilter(ah, rfilt);
4557 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4558
4559 ath_hal_process_noisefloor(ah);
4560 ATH_PCU_UNLOCK(sc);
4561
4562 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4563 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
4564 sc->sc_curaid);
4565}
4566
4567#ifdef ATH_ENABLE_11N
4568/*
4569 * For now, just do a channel change.
4570 *
4571 * Later, we'll go through the hard slog of suspending tx/rx, changing rate
4572 * control state and resetting the hardware without dropping frames out
4573 * of the queue.
4574 *
4575 * The unfortunate trouble here is making absolutely sure that the
4576 * channel width change has propagated enough so the hardware
4577 * absolutely isn't handed bogus frames for it's current operating
4578 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and
4579 * does occur in parallel, we need to make certain we've blocked
4580 * any further ongoing TX (and RX, that can cause raw TX)
4581 * before we do this.
4582 */
4583static void
4584ath_update_chw(struct ieee80211com *ic)
4585{
4586 struct ifnet *ifp = ic->ic_ifp;
4587 struct ath_softc *sc = ifp->if_softc;
4588
4589 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__);
4590 ath_set_channel(ic);
4591}
4592#endif /* ATH_ENABLE_11N */
4593
4594static void
4595ath_set_channel(struct ieee80211com *ic)
4596{
4597 struct ifnet *ifp = ic->ic_ifp;
4598 struct ath_softc *sc = ifp->if_softc;
4599
4600 (void) ath_chan_set(sc, ic->ic_curchan);
4601 /*
4602 * If we are returning to our bss channel then mark state
4603 * so the next recv'd beacon's tsf will be used to sync the
4604 * beacon timers. Note that since we only hear beacons in
4605 * sta/ibss mode this has no effect in other operating modes.
4606 */
4607 ATH_LOCK(sc);
4608 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
4609 sc->sc_syncbeacon = 1;
4610 ATH_UNLOCK(sc);
4611}
4612
4613/*
4614 * Walk the vap list and check if there any vap's in RUN state.
4615 */
4616static int
4617ath_isanyrunningvaps(struct ieee80211vap *this)
4618{
4619 struct ieee80211com *ic = this->iv_ic;
4620 struct ieee80211vap *vap;
4621
4622 IEEE80211_LOCK_ASSERT(ic);
4623
4624 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
4625 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
4626 return 1;
4627 }
4628 return 0;
4629}
4630
4631static int
4632ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4633{
4634 struct ieee80211com *ic = vap->iv_ic;
4635 struct ath_softc *sc = ic->ic_ifp->if_softc;
4636 struct ath_vap *avp = ATH_VAP(vap);
4637 struct ath_hal *ah = sc->sc_ah;
4638 struct ieee80211_node *ni = NULL;
4639 int i, error, stamode;
4640 u_int32_t rfilt;
4641 int csa_run_transition = 0;
4642 static const HAL_LED_STATE leds[] = {
4643 HAL_LED_INIT, /* IEEE80211_S_INIT */
4644 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
4645 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
4646 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
4647 HAL_LED_RUN, /* IEEE80211_S_CAC */
4648 HAL_LED_RUN, /* IEEE80211_S_RUN */
4649 HAL_LED_RUN, /* IEEE80211_S_CSA */
4650 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
4651 };
4652
4653 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
4654 ieee80211_state_name[vap->iv_state],
4655 ieee80211_state_name[nstate]);
4656
4657 /*
4658 * net80211 _should_ have the comlock asserted at this point.
4659 * There are some comments around the calls to vap->iv_newstate
4660 * which indicate that it (newstate) may end up dropping the
4661 * lock. This and the subsequent lock assert check after newstate
4662 * are an attempt to catch these and figure out how/why.
4663 */
4664 IEEE80211_LOCK_ASSERT(ic);
4665
4666 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
4667 csa_run_transition = 1;
4668
4669 callout_drain(&sc->sc_cal_ch);
4670 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
4671
4672 if (nstate == IEEE80211_S_SCAN) {
4673 /*
4674 * Scanning: turn off beacon miss and don't beacon.
4675 * Mark beacon state so when we reach RUN state we'll
4676 * [re]setup beacons. Unblock the task q thread so
4677 * deferred interrupt processing is done.
4678 */
4679 ath_hal_intrset(ah,
4680 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
4681 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4682 sc->sc_beacons = 0;
4683 taskqueue_unblock(sc->sc_tq);
4684 }
4685
4686 ni = ieee80211_ref_node(vap->iv_bss);
4687 rfilt = ath_calcrxfilter(sc);
4688 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
4689 vap->iv_opmode == IEEE80211_M_AHDEMO ||
4690 vap->iv_opmode == IEEE80211_M_IBSS);
4691 if (stamode && nstate == IEEE80211_S_RUN) {
4692 sc->sc_curaid = ni->ni_associd;
4693 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
4694 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4695 }
4696 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4697 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
4698 ath_hal_setrxfilter(ah, rfilt);
4699
4700 /* XXX is this to restore keycache on resume? */
4701 if (vap->iv_opmode != IEEE80211_M_STA &&
4702 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
4703 for (i = 0; i < IEEE80211_WEP_NKID; i++)
4704 if (ath_hal_keyisvalid(ah, i))
4705 ath_hal_keysetmac(ah, i, ni->ni_bssid);
4706 }
4707
4708 /*
4709 * Invoke the parent method to do net80211 work.
4710 */
4711 error = avp->av_newstate(vap, nstate, arg);
4712 if (error != 0)
4713 goto bad;
4714
4715 /*
4716 * See above: ensure av_newstate() doesn't drop the lock
4717 * on us.
4718 */
4719 IEEE80211_LOCK_ASSERT(ic);
4720
4721 if (nstate == IEEE80211_S_RUN) {
4722 /* NB: collect bss node again, it may have changed */
4723 ieee80211_free_node(ni);
4724 ni = ieee80211_ref_node(vap->iv_bss);
4725
4726 DPRINTF(sc, ATH_DEBUG_STATE,
4727 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4728 "capinfo 0x%04x chan %d\n", __func__,
4729 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
4730 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
4731
4732 switch (vap->iv_opmode) {
4733#ifdef IEEE80211_SUPPORT_TDMA
4734 case IEEE80211_M_AHDEMO:
4735 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
4736 break;
4737 /* fall thru... */
4738#endif
4739 case IEEE80211_M_HOSTAP:
4740 case IEEE80211_M_IBSS:
4741 case IEEE80211_M_MBSS:
4742 /*
4743 * Allocate and setup the beacon frame.
4744 *
4745 * Stop any previous beacon DMA. This may be
4746 * necessary, for example, when an ibss merge
4747 * causes reconfiguration; there will be a state
4748 * transition from RUN->RUN that means we may
4749 * be called with beacon transmission active.
4750 */
4751 ath_hal_stoptxdma(ah, sc->sc_bhalq);
4752
4753 error = ath_beacon_alloc(sc, ni);
4754 if (error != 0)
4755 goto bad;
4756 /*
4757 * If joining an adhoc network defer beacon timer
4758 * configuration to the next beacon frame so we
4759 * have a current TSF to use. Otherwise we're
4760 * starting an ibss/bss so there's no need to delay;
4761 * if this is the first vap moving to RUN state, then
4762 * beacon state needs to be [re]configured.
4763 */
4764 if (vap->iv_opmode == IEEE80211_M_IBSS &&
4765 ni->ni_tstamp.tsf != 0) {
4766 sc->sc_syncbeacon = 1;
4767 } else if (!sc->sc_beacons) {
4768#ifdef IEEE80211_SUPPORT_TDMA
4769 if (vap->iv_caps & IEEE80211_C_TDMA)
4770 ath_tdma_config(sc, vap);
4771 else
4772#endif
4773 ath_beacon_config(sc, vap);
4774 sc->sc_beacons = 1;
4775 }
4776 break;
4777 case IEEE80211_M_STA:
4778 /*
4779 * Defer beacon timer configuration to the next
4780 * beacon frame so we have a current TSF to use
4781 * (any TSF collected when scanning is likely old).
4782 * However if it's due to a CSA -> RUN transition,
4783 * force a beacon update so we pick up a lack of
4784 * beacons from an AP in CAC and thus force a
4785 * scan.
4786 */
4787 sc->sc_syncbeacon = 1;
4788 if (csa_run_transition)
4789 ath_beacon_config(sc, vap);
4790 break;
4791 case IEEE80211_M_MONITOR:
4792 /*
4793 * Monitor mode vaps have only INIT->RUN and RUN->RUN
4794 * transitions so we must re-enable interrupts here to
4795 * handle the case of a single monitor mode vap.
4796 */
4797 ath_hal_intrset(ah, sc->sc_imask);
4798 break;
4799 case IEEE80211_M_WDS:
4800 break;
4801 default:
4802 break;
4803 }
4804 /*
4805 * Let the hal process statistics collected during a
4806 * scan so it can provide calibrated noise floor data.
4807 */
4808 ath_hal_process_noisefloor(ah);
4809 /*
4810 * Reset rssi stats; maybe not the best place...
4811 */
4812 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
4813 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
4814 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
4815 /*
4816 * Finally, start any timers and the task q thread
4817 * (in case we didn't go through SCAN state).
4818 */
4819 if (ath_longcalinterval != 0) {
4820 /* start periodic recalibration timer */
4821 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
4822 } else {
4823 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4824 "%s: calibration disabled\n", __func__);
4825 }
4826 taskqueue_unblock(sc->sc_tq);
4827 } else if (nstate == IEEE80211_S_INIT) {
4828 /*
4829 * If there are no vaps left in RUN state then
4830 * shutdown host/driver operation:
4831 * o disable interrupts
4832 * o disable the task queue thread
4833 * o mark beacon processing as stopped
4834 */
4835 if (!ath_isanyrunningvaps(vap)) {
4836 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4837 /* disable interrupts */
4838 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
4839 taskqueue_block(sc->sc_tq);
4840 sc->sc_beacons = 0;
4841 }
4842#ifdef IEEE80211_SUPPORT_TDMA
4843 ath_hal_setcca(ah, AH_TRUE);
4844#endif
4845 }
4846bad:
4847 ieee80211_free_node(ni);
4848 return error;
4849}
4850
4851/*
4852 * Allocate a key cache slot to the station so we can
4853 * setup a mapping from key index to node. The key cache
4854 * slot is needed for managing antenna state and for
4855 * compression when stations do not use crypto. We do
4856 * it uniliaterally here; if crypto is employed this slot
4857 * will be reassigned.
4858 */
4859static void
4860ath_setup_stationkey(struct ieee80211_node *ni)
4861{
4862 struct ieee80211vap *vap = ni->ni_vap;
4863 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4864 ieee80211_keyix keyix, rxkeyix;
4865
4866 /* XXX should take a locked ref to vap->iv_bss */
4867 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
4868 /*
4869 * Key cache is full; we'll fall back to doing
4870 * the more expensive lookup in software. Note
4871 * this also means no h/w compression.
4872 */
4873 /* XXX msg+statistic */
4874 } else {
4875 /* XXX locking? */
4876 ni->ni_ucastkey.wk_keyix = keyix;
4877 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
4878 /* NB: must mark device key to get called back on delete */
4879 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
4880 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
4881 /* NB: this will create a pass-thru key entry */
4882 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
4883 }
4884}
4885
4886/*
4887 * Setup driver-specific state for a newly associated node.
4888 * Note that we're called also on a re-associate, the isnew
4889 * param tells us if this is the first time or not.
4890 */
4891static void
4892ath_newassoc(struct ieee80211_node *ni, int isnew)
4893{
4894 struct ath_node *an = ATH_NODE(ni);
4895 struct ieee80211vap *vap = ni->ni_vap;
4896 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4897 const struct ieee80211_txparam *tp = ni->ni_txparms;
4898
4899 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
4900 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
4901
4902 ath_rate_newassoc(sc, an, isnew);
4903 if (isnew &&
4904 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
4905 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
4906 ath_setup_stationkey(ni);
4907}
4908
4909static int
4910ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
4911 int nchans, struct ieee80211_channel chans[])
4912{
4913 struct ath_softc *sc = ic->ic_ifp->if_softc;
4914 struct ath_hal *ah = sc->sc_ah;
4915 HAL_STATUS status;
4916
4917 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
4918 "%s: rd %u cc %u location %c%s\n",
4919 __func__, reg->regdomain, reg->country, reg->location,
4920 reg->ecm ? " ecm" : "");
4921
4922 status = ath_hal_set_channels(ah, chans, nchans,
4923 reg->country, reg->regdomain);
4924 if (status != HAL_OK) {
4925 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
4926 __func__, status);
4927 return EINVAL; /* XXX */
4928 }
4929
4930 return 0;
4931}
4932
4933static void
4934ath_getradiocaps(struct ieee80211com *ic,
4935 int maxchans, int *nchans, struct ieee80211_channel chans[])
4936{
4937 struct ath_softc *sc = ic->ic_ifp->if_softc;
4938 struct ath_hal *ah = sc->sc_ah;
4939
4940 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
4941 __func__, SKU_DEBUG, CTRY_DEFAULT);
4942
4943 /* XXX check return */
4944 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
4945 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
4946
4947}
4948
4949static int
4950ath_getchannels(struct ath_softc *sc)
4951{
4952 struct ifnet *ifp = sc->sc_ifp;
4953 struct ieee80211com *ic = ifp->if_l2com;
4954 struct ath_hal *ah = sc->sc_ah;
4955 HAL_STATUS status;
4956
4957 /*
4958 * Collect channel set based on EEPROM contents.
4959 */
4960 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
4961 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
4962 if (status != HAL_OK) {
4963 if_printf(ifp, "%s: unable to collect channel list from hal, "
4964 "status %d\n", __func__, status);
4965 return EINVAL;
4966 }
4967 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
4968 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
4969 /* XXX map Atheros sku's to net80211 SKU's */
4970 /* XXX net80211 types too small */
4971 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
4972 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
4973 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
4974 ic->ic_regdomain.isocc[1] = ' ';
4975
4976 ic->ic_regdomain.ecm = 1;
4977 ic->ic_regdomain.location = 'I';
4978
4979 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
4980 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
4981 __func__, sc->sc_eerd, sc->sc_eecc,
4982 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
4983 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
4984 return 0;
4985}
4986
4987static int
4988ath_rate_setup(struct ath_softc *sc, u_int mode)
4989{
4990 struct ath_hal *ah = sc->sc_ah;
4991 const HAL_RATE_TABLE *rt;
4992
4993 switch (mode) {
4994 case IEEE80211_MODE_11A:
4995 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
4996 break;
4997 case IEEE80211_MODE_HALF:
4998 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
4999 break;
5000 case IEEE80211_MODE_QUARTER:
5001 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5002 break;
5003 case IEEE80211_MODE_11B:
5004 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5005 break;
5006 case IEEE80211_MODE_11G:
5007 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5008 break;
5009 case IEEE80211_MODE_TURBO_A:
5010 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5011 break;
5012 case IEEE80211_MODE_TURBO_G:
5013 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5014 break;
5015 case IEEE80211_MODE_STURBO_A:
5016 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5017 break;
5018 case IEEE80211_MODE_11NA:
5019 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5020 break;
5021 case IEEE80211_MODE_11NG:
5022 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5023 break;
5024 default:
5025 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5026 __func__, mode);
5027 return 0;
5028 }
5029 sc->sc_rates[mode] = rt;
5030 return (rt != NULL);
5031}
5032
5033static void
5034ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5035{
5036#define N(a) (sizeof(a)/sizeof(a[0]))
5037 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
5038 static const struct {
5039 u_int rate; /* tx/rx 802.11 rate */
5040 u_int16_t timeOn; /* LED on time (ms) */
5041 u_int16_t timeOff; /* LED off time (ms) */
5042 } blinkrates[] = {
5043 { 108, 40, 10 },
5044 { 96, 44, 11 },
5045 { 72, 50, 13 },
5046 { 48, 57, 14 },
5047 { 36, 67, 16 },
5048 { 24, 80, 20 },
5049 { 22, 100, 25 },
5050 { 18, 133, 34 },
5051 { 12, 160, 40 },
5052 { 10, 200, 50 },
5053 { 6, 240, 58 },
5054 { 4, 267, 66 },
5055 { 2, 400, 100 },
5056 { 0, 500, 130 },
5057 /* XXX half/quarter rates */
5058 };
5059 const HAL_RATE_TABLE *rt;
5060 int i, j;
5061
5062 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
5063 rt = sc->sc_rates[mode];
5064 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
5065 for (i = 0; i < rt->rateCount; i++) {
5066 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5067 if (rt->info[i].phy != IEEE80211_T_HT)
5068 sc->sc_rixmap[ieeerate] = i;
5069 else
5070 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
5071 }
5072 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
5073 for (i = 0; i < N(sc->sc_hwmap); i++) {
5074 if (i >= rt->rateCount) {
5075 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
5076 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
5077 continue;
5078 }
5079 sc->sc_hwmap[i].ieeerate =
5080 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5081 if (rt->info[i].phy == IEEE80211_T_HT)
5082 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
5083 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
5084 if (rt->info[i].shortPreamble ||
5085 rt->info[i].phy == IEEE80211_T_OFDM)
5086 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
5087 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
5088 for (j = 0; j < N(blinkrates)-1; j++)
5089 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
5090 break;
5091 /* NB: this uses the last entry if the rate isn't found */
5092 /* XXX beware of overlow */
5093 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
5094 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
5095 }
5096 sc->sc_currates = rt;
5097 sc->sc_curmode = mode;
5098 /*
5099 * All protection frames are transmited at 2Mb/s for
5100 * 11g, otherwise at 1Mb/s.
5101 */
5102 if (mode == IEEE80211_MODE_11G)
5103 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
5104 else
5105 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
5106 /* NB: caller is responsible for resetting rate control state */
5107#undef N
5108}
5109
5110static void
5111ath_watchdog(void *arg)
5112{
5113 struct ath_softc *sc = arg;
5114 int do_reset = 0;
5115
5116 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
5117 struct ifnet *ifp = sc->sc_ifp;
5118 uint32_t hangs;
5119
5120 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
5121 hangs != 0) {
5122 if_printf(ifp, "%s hang detected (0x%x)\n",
5123 hangs & 0xff ? "bb" : "mac", hangs);
5124 } else
5125 if_printf(ifp, "device timeout\n");
5126 do_reset = 1;
5127 ifp->if_oerrors++;
5128 sc->sc_stats.ast_watchdog++;
5129 }
5130
5131 /*
5132 * We can't hold the lock across the ath_reset() call.
5133 *
5134 * And since this routine can't hold a lock and sleep,
5135 * do the reset deferred.
5136 */
5137 if (do_reset) {
5138 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
5139 }
5140
5141 callout_schedule(&sc->sc_wd_ch, hz);
5142}
5143
5144/*
5145 * Fetch the rate control statistics for the given node.
5146 */
5147static int
5148ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs)
5149{
5150 struct ath_node *an;
5151 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5152 struct ieee80211_node *ni;
5153 int error = 0;
5154
5155 /* Perform a lookup on the given node */
5156 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr);
5157 if (ni == NULL) {
5158 error = EINVAL;
5159 goto bad;
5160 }
5161
5162 /* Lock the ath_node */
5163 an = ATH_NODE(ni);
5164 ATH_NODE_LOCK(an);
5165
5166 /* Fetch the rate control stats for this node */
5167 error = ath_rate_fetch_node_stats(sc, an, rs);
5168
5169 /* No matter what happens here, just drop through */
5170
5171 /* Unlock the ath_node */
5172 ATH_NODE_UNLOCK(an);
5173
5174 /* Unref the node */
5175 ieee80211_node_decref(ni);
5176
5177bad:
5178 return (error);
5179}
5180
5181#ifdef ATH_DIAGAPI
5182/*
5183 * Diagnostic interface to the HAL. This is used by various
5184 * tools to do things like retrieve register contents for
5185 * debugging. The mechanism is intentionally opaque so that
5186 * it can change frequently w/o concern for compatiblity.
5187 */
5188static int
5189ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
5190{
5191 struct ath_hal *ah = sc->sc_ah;
5192 u_int id = ad->ad_id & ATH_DIAG_ID;
5193 void *indata = NULL;
5194 void *outdata = NULL;
5195 u_int32_t insize = ad->ad_in_size;
5196 u_int32_t outsize = ad->ad_out_size;
5197 int error = 0;
5198
5199 if (ad->ad_id & ATH_DIAG_IN) {
5200 /*
5201 * Copy in data.
5202 */
5203 indata = malloc(insize, M_TEMP, M_NOWAIT);
5204 if (indata == NULL) {
5205 error = ENOMEM;
5206 goto bad;
5207 }
5208 error = copyin(ad->ad_in_data, indata, insize);
5209 if (error)
5210 goto bad;
5211 }
5212 if (ad->ad_id & ATH_DIAG_DYN) {
5213 /*
5214 * Allocate a buffer for the results (otherwise the HAL
5215 * returns a pointer to a buffer where we can read the
5216 * results). Note that we depend on the HAL leaving this
5217 * pointer for us to use below in reclaiming the buffer;
5218 * may want to be more defensive.
5219 */
5220 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
5221 if (outdata == NULL) {
5222 error = ENOMEM;
5223 goto bad;
5224 }
5225 }
5226 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
5227 if (outsize < ad->ad_out_size)
5228 ad->ad_out_size = outsize;
5229 if (outdata != NULL)
5230 error = copyout(outdata, ad->ad_out_data,
5231 ad->ad_out_size);
5232 } else {
5233 error = EINVAL;
5234 }
5235bad:
5236 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
5237 free(indata, M_TEMP);
5238 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
5239 free(outdata, M_TEMP);
5240 return error;
5241}
5242#endif /* ATH_DIAGAPI */
5243
5244static int
5245ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
5246{
5247#define IS_RUNNING(ifp) \
5248 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
5249 struct ath_softc *sc = ifp->if_softc;
5250 struct ieee80211com *ic = ifp->if_l2com;
5251 struct ifreq *ifr = (struct ifreq *)data;
5252 const HAL_RATE_TABLE *rt;
5253 int error = 0;
5254
5255 switch (cmd) {
5256 case SIOCSIFFLAGS:
5257 ATH_LOCK(sc);
5258 if (IS_RUNNING(ifp)) {
5259 /*
5260 * To avoid rescanning another access point,
5261 * do not call ath_init() here. Instead,
5262 * only reflect promisc mode settings.
5263 */
5264 ath_mode_init(sc);
5265 } else if (ifp->if_flags & IFF_UP) {
5266 /*
5267 * Beware of being called during attach/detach
5268 * to reset promiscuous mode. In that case we
5269 * will still be marked UP but not RUNNING.
5270 * However trying to re-init the interface
5271 * is the wrong thing to do as we've already
5272 * torn down much of our state. There's
5273 * probably a better way to deal with this.
5274 */
5275 if (!sc->sc_invalid)
5276 ath_init(sc); /* XXX lose error */
5277 } else {
5278 ath_stop_locked(ifp);
5279#ifdef notyet
5280 /* XXX must wakeup in places like ath_vap_delete */
5281 if (!sc->sc_invalid)
5282 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
5283#endif
5284 }
5285 ATH_UNLOCK(sc);
5286 break;
5287 case SIOCGIFMEDIA:
5288 case SIOCSIFMEDIA:
5289 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
5290 break;
5291 case SIOCGATHSTATS:
5292 /* NB: embed these numbers to get a consistent view */
5293 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
5294 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
5295 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
5296 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
5297#ifdef IEEE80211_SUPPORT_TDMA
5298 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
5299 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
5300#endif
5301 rt = sc->sc_currates;
5302 sc->sc_stats.ast_tx_rate =
5303 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
5304 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
5305 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
5306 return copyout(&sc->sc_stats,
5307 ifr->ifr_data, sizeof (sc->sc_stats));
5308 case SIOCGATHAGSTATS:
5309 return copyout(&sc->sc_aggr_stats,
5310 ifr->ifr_data, sizeof (sc->sc_aggr_stats));
5311 case SIOCZATHSTATS:
5312 error = priv_check(curthread, PRIV_DRIVER);
5313 if (error == 0) {
5314 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
5315 memset(&sc->sc_aggr_stats, 0,
5316 sizeof(sc->sc_aggr_stats));
5317 memset(&sc->sc_intr_stats, 0,
5318 sizeof(sc->sc_intr_stats));
5319 }
5320 break;
5321#ifdef ATH_DIAGAPI
5322 case SIOCGATHDIAG:
5323 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
5324 break;
5325 case SIOCGATHPHYERR:
5326 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
5327 break;
5328#endif
5329 case SIOCGATHNODERATESTATS:
5330 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr);
5331 break;
5332 case SIOCGIFADDR:
5333 error = ether_ioctl(ifp, cmd, data);
5334 break;
5335 default:
5336 error = EINVAL;
5337 break;
5338 }
5339 return error;
5340#undef IS_RUNNING
5341}
5342
5343/*
5344 * Announce various information on device/driver attach.
5345 */
5346static void
5347ath_announce(struct ath_softc *sc)
5348{
5349 struct ifnet *ifp = sc->sc_ifp;
5350 struct ath_hal *ah = sc->sc_ah;
5351
5352 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
5353 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
5354 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
5355 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
5356 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
5357 if (bootverbose) {
5358 int i;
5359 for (i = 0; i <= WME_AC_VO; i++) {
5360 struct ath_txq *txq = sc->sc_ac2q[i];
5361 if_printf(ifp, "Use hw queue %u for %s traffic\n",
5362 txq->axq_qnum, ieee80211_wme_acnames[i]);
5363 }
5364 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
5365 sc->sc_cabq->axq_qnum);
5366 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
5367 }
5368 if (ath_rxbuf != ATH_RXBUF)
5369 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
5370 if (ath_txbuf != ATH_TXBUF)
5371 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
5372 if (sc->sc_mcastkey && bootverbose)
5373 if_printf(ifp, "using multicast key search\n");
5374}
5375
5376static void
5377ath_dfs_tasklet(void *p, int npending)
5378{
5379 struct ath_softc *sc = (struct ath_softc *) p;
5380 struct ifnet *ifp = sc->sc_ifp;
5381 struct ieee80211com *ic = ifp->if_l2com;
5382
5383 /*
5384 * If previous processing has found a radar event,
5385 * signal this to the net80211 layer to begin DFS
5386 * processing.
5387 */
5388 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
5389 /* DFS event found, initiate channel change */
5390 /*
5391 * XXX doesn't currently tell us whether the event
5392 * XXX was found in the primary or extension
5393 * XXX channel!
5394 */
5395 IEEE80211_LOCK(ic);
5396 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
5397 IEEE80211_UNLOCK(ic);
5398 }
5399}
5400
5401/*
5402 * Enable/disable power save. This must be called with
5403 * no TX driver locks currently held, so it should only
5404 * be called from the RX path (which doesn't hold any
5405 * TX driver locks.)
5406 */
5407static void
5408ath_node_powersave(struct ieee80211_node *ni, int enable)
5409{
5410 struct ath_node *an = ATH_NODE(ni);
5411 struct ieee80211com *ic = ni->ni_ic;
5412 struct ath_softc *sc = ic->ic_ifp->if_softc;
5413 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
5414
5415 ATH_NODE_UNLOCK_ASSERT(an);
5416 /* XXX and no TXQ locks should be held here */
5417
5418 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: ni=%p, enable=%d\n",
5419 __func__, ni, enable);
5420
5421 /* Suspend or resume software queue handling */
5422 if (enable)
5423 ath_tx_node_sleep(sc, an);
5424 else
5425 ath_tx_node_wakeup(sc, an);
5426
5427 /* Update net80211 state */
5428 avp->av_node_ps(ni, enable);
5429}
5430
5431/*
5432 * Notification from net80211 that the powersave queue state has
5433 * changed.
5434 *
5435 * Since the software queue also may have some frames:
5436 *
5437 * + if the node software queue has frames and the TID state
5438 * is 0, we set the TIM;
5439 * + if the node and the stack are both empty, we clear the TIM bit.
5440 * + If the stack tries to set the bit, always set it.
5441 * + If the stack tries to clear the bit, only clear it if the
5442 * software queue in question is also cleared.
5443 *
5444 * TODO: this is called during node teardown; so let's ensure this
5445 * is all correctly handled and that the TIM bit is cleared.
5446 * It may be that the node flush is called _AFTER_ the net80211
5447 * stack clears the TIM.
5448 *
5449 * Here is the racy part. Since it's possible >1 concurrent,
5450 * overlapping TXes will appear complete with a TX completion in
5451 * another thread, it's possible that the concurrent TIM calls will
5452 * clash. We can't hold the node lock here because setting the
5453 * TIM grabs the net80211 comlock and this may cause a LOR.
5454 * The solution is either to totally serialise _everything_ at
5455 * this point (ie, all TX, completion and any reset/flush go into
5456 * one taskqueue) or a new "ath TIM lock" needs to be created that
5457 * just wraps the driver state change and this call to avp->av_set_tim().
5458 *
5459 * The same race exists in the net80211 power save queue handling
5460 * as well. Since multiple transmitting threads may queue frames
5461 * into the driver, as well as ps-poll and the driver transmitting
5462 * frames (and thus clearing the psq), it's quite possible that
5463 * a packet entering the PSQ and a ps-poll being handled will
5464 * race, causing the TIM to be cleared and not re-set.
5465 */
5466static int
5467ath_node_set_tim(struct ieee80211_node *ni, int enable)
5468{
5469 struct ieee80211com *ic = ni->ni_ic;
5470 struct ath_softc *sc = ic->ic_ifp->if_softc;
5471 struct ath_node *an = ATH_NODE(ni);
5472 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
5473 int changed = 0;
5474
5475 ATH_NODE_UNLOCK_ASSERT(an);
5476
5477 /*
5478 * For now, just track and then update the TIM.
5479 */
5480 ATH_NODE_LOCK(an);
5481 an->an_stack_psq = enable;
5482
5483 /*
5484 * This will get called for all operating modes,
5485 * even if avp->av_set_tim is unset.
5486 * It's currently set for hostap/ibss modes; but
5487 * the same infrastructure is used for both STA
5488 * and AP/IBSS node power save.
5489 */
5490 if (avp->av_set_tim == NULL) {
5491 ATH_NODE_UNLOCK(an);
5492 return (0);
5493 }
5494
5495 /*
5496 * If setting the bit, always set it here.
5497 * If clearing the bit, only clear it if the
5498 * software queue is also empty.
5499 *
5500 * If the node has left power save, just clear the TIM
5501 * bit regardless of the state of the power save queue.
5502 *
5503 * XXX TODO: although atomics are used, it's quite possible
5504 * that a race will occur between this and setting/clearing
5505 * in another thread. TX completion will occur always in
5506 * one thread, however setting/clearing the TIM bit can come
5507 * from a variety of different process contexts!
5508 */
5509 if (enable && an->an_tim_set == 1) {
5510 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5511 "%s: an=%p, enable=%d, tim_set=1, ignoring\n",
5512 __func__, an, enable);
5513 ATH_NODE_UNLOCK(an);
5514 } else if (enable) {
5515 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5516 "%s: an=%p, enable=%d, enabling TIM\n",
5517 __func__, an, enable);
5518 an->an_tim_set = 1;
5519 ATH_NODE_UNLOCK(an);
5520 changed = avp->av_set_tim(ni, enable);
5521 } else if (atomic_load_acq_int(&an->an_swq_depth) == 0) {
5522 /* disable */
5523 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5524 "%s: an=%p, enable=%d, an_swq_depth == 0, disabling\n",
5525 __func__, an, enable);
5526 an->an_tim_set = 0;
5527 ATH_NODE_UNLOCK(an);
5528 changed = avp->av_set_tim(ni, enable);
5529 } else if (! an->an_is_powersave) {
5530 /*
5531 * disable regardless; the node isn't in powersave now
5532 */
5533 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5534 "%s: an=%p, enable=%d, an_pwrsave=0, disabling\n",
5535 __func__, an, enable);
5536 an->an_tim_set = 0;
5537 ATH_NODE_UNLOCK(an);
5538 changed = avp->av_set_tim(ni, enable);
5539 } else {
5540 /*
5541 * psq disable, node is currently in powersave, node
5542 * software queue isn't empty, so don't clear the TIM bit
5543 * for now.
5544 */
5545 ATH_NODE_UNLOCK(an);
5546 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5547 "%s: enable=%d, an_swq_depth > 0, ignoring\n",
5548 __func__, enable);
5549 changed = 0;
5550 }
5551
5552 return (changed);
5553}
5554
5555/*
5556 * Set or update the TIM from the software queue.
5557 *
5558 * Check the software queue depth before attempting to do lock
5559 * anything; that avoids trying to obtain the lock. Then,
5560 * re-check afterwards to ensure nothing has changed in the
5561 * meantime.
5562 *
5563 * set: This is designed to be called from the TX path, after
5564 * a frame has been queued; to see if the swq > 0.
5565 *
5566 * clear: This is designed to be called from the buffer completion point
5567 * (right now it's ath_tx_default_comp()) where the state of
5568 * a software queue has changed.
5569 *
5570 * It makes sense to place it at buffer free / completion rather
5571 * than after each software queue operation, as there's no real
5572 * point in churning the TIM bit as the last frames in the software
5573 * queue are transmitted. If they fail and we retry them, we'd
5574 * just be setting the TIM bit again anyway.
5575 */
5576void
5577ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
5578 int enable)
5579{
5580 struct ath_node *an;
5581 struct ath_vap *avp;
5582
5583 /* Don't do this for broadcast/etc frames */
5584 if (ni == NULL)
5585 return;
5586
5587 an = ATH_NODE(ni);
5588 avp = ATH_VAP(ni->ni_vap);
5589
5590 /*
5591 * And for operating modes without the TIM handler set, let's
5592 * just skip those.
5593 */
5594 if (avp->av_set_tim == NULL)
5595 return;
5596
5597 ATH_NODE_UNLOCK_ASSERT(an);
5598
5599 if (enable) {
5600 /*
5601 * Don't bother grabbing the lock unless the queue is not
5602 * empty.
5603 */
5604 if (atomic_load_acq_int(&an->an_swq_depth) == 0)
5605 return;
5606
5607 ATH_NODE_LOCK(an);
5608 if (an->an_is_powersave &&
5609 an->an_tim_set == 0 &&
5610 atomic_load_acq_int(&an->an_swq_depth) != 0) {
5611 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5612 "%s: an=%p, swq_depth>0, tim_set=0, set!\n",
5613 __func__, an);
5614 an->an_tim_set = 1;
5615 ATH_NODE_UNLOCK(an);
5616 (void) avp->av_set_tim(ni, 1);
5617 } else {
5618 ATH_NODE_UNLOCK(an);
5619 }
5620 } else {
5621 /*
5622 * Don't bother grabbing the lock unless the queue is empty.
5623 */
5624 if (atomic_load_acq_int(&an->an_swq_depth) != 0)
5625 return;
5626
5627 ATH_NODE_LOCK(an);
5628 if (an->an_is_powersave &&
5629 an->an_stack_psq == 0 &&
5630 an->an_tim_set == 1 &&
5631 atomic_load_acq_int(&an->an_swq_depth) == 0) {
5632 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5633 "%s: an=%p, swq_depth=0, tim_set=1, psq_set=0,"
5634 " clear!\n",
5635 __func__, an);
5636 an->an_tim_set = 0;
5637 ATH_NODE_UNLOCK(an);
5638 (void) avp->av_set_tim(ni, 0);
5639 } else {
5640 ATH_NODE_UNLOCK(an);
5641 }
5642 }
5643}
5644
5645MODULE_VERSION(if_ath, 1);
5646MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
5647#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ)
5648MODULE_DEPEND(if_ath, alq, 1, 1, 1);
5649#endif
2603}
2604
2605static int
2606ath_media_change(struct ifnet *ifp)
2607{
2608 int error = ieee80211_media_change(ifp);
2609 /* NB: only the fixed rate can change and that doesn't need a reset */
2610 return (error == ENETRESET ? 0 : error);
2611}
2612
2613/*
2614 * Block/unblock tx+rx processing while a key change is done.
2615 * We assume the caller serializes key management operations
2616 * so we only need to worry about synchronization with other
2617 * uses that originate in the driver.
2618 */
2619static void
2620ath_key_update_begin(struct ieee80211vap *vap)
2621{
2622 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2623 struct ath_softc *sc = ifp->if_softc;
2624
2625 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2626 taskqueue_block(sc->sc_tq);
2627 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2628}
2629
2630static void
2631ath_key_update_end(struct ieee80211vap *vap)
2632{
2633 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2634 struct ath_softc *sc = ifp->if_softc;
2635
2636 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2637 IF_UNLOCK(&ifp->if_snd);
2638 taskqueue_unblock(sc->sc_tq);
2639}
2640
2641static void
2642ath_update_promisc(struct ifnet *ifp)
2643{
2644 struct ath_softc *sc = ifp->if_softc;
2645 u_int32_t rfilt;
2646
2647 /* configure rx filter */
2648 rfilt = ath_calcrxfilter(sc);
2649 ath_hal_setrxfilter(sc->sc_ah, rfilt);
2650
2651 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2652}
2653
2654static void
2655ath_update_mcast(struct ifnet *ifp)
2656{
2657 struct ath_softc *sc = ifp->if_softc;
2658 u_int32_t mfilt[2];
2659
2660 /* calculate and install multicast filter */
2661 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2662 struct ifmultiaddr *ifma;
2663 /*
2664 * Merge multicast addresses to form the hardware filter.
2665 */
2666 mfilt[0] = mfilt[1] = 0;
2667 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
2668 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2669 caddr_t dl;
2670 u_int32_t val;
2671 u_int8_t pos;
2672
2673 /* calculate XOR of eight 6bit values */
2674 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2675 val = LE_READ_4(dl + 0);
2676 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2677 val = LE_READ_4(dl + 3);
2678 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2679 pos &= 0x3f;
2680 mfilt[pos / 32] |= (1 << (pos % 32));
2681 }
2682 if_maddr_runlock(ifp);
2683 } else
2684 mfilt[0] = mfilt[1] = ~0;
2685 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2686 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2687 __func__, mfilt[0], mfilt[1]);
2688}
2689
2690void
2691ath_mode_init(struct ath_softc *sc)
2692{
2693 struct ifnet *ifp = sc->sc_ifp;
2694 struct ath_hal *ah = sc->sc_ah;
2695 u_int32_t rfilt;
2696
2697 /* configure rx filter */
2698 rfilt = ath_calcrxfilter(sc);
2699 ath_hal_setrxfilter(ah, rfilt);
2700
2701 /* configure operational mode */
2702 ath_hal_setopmode(ah);
2703
2704 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE,
2705 "%s: ah=%p, ifp=%p, if_addr=%p\n",
2706 __func__,
2707 ah,
2708 ifp,
2709 (ifp == NULL) ? NULL : ifp->if_addr);
2710
2711 /* handle any link-level address change */
2712 ath_hal_setmac(ah, IF_LLADDR(ifp));
2713
2714 /* calculate and install multicast filter */
2715 ath_update_mcast(ifp);
2716}
2717
2718/*
2719 * Set the slot time based on the current setting.
2720 */
2721void
2722ath_setslottime(struct ath_softc *sc)
2723{
2724 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2725 struct ath_hal *ah = sc->sc_ah;
2726 u_int usec;
2727
2728 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2729 usec = 13;
2730 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2731 usec = 21;
2732 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2733 /* honor short/long slot time only in 11g */
2734 /* XXX shouldn't honor on pure g or turbo g channel */
2735 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2736 usec = HAL_SLOT_TIME_9;
2737 else
2738 usec = HAL_SLOT_TIME_20;
2739 } else
2740 usec = HAL_SLOT_TIME_9;
2741
2742 DPRINTF(sc, ATH_DEBUG_RESET,
2743 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2744 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2745 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2746
2747 ath_hal_setslottime(ah, usec);
2748 sc->sc_updateslot = OK;
2749}
2750
2751/*
2752 * Callback from the 802.11 layer to update the
2753 * slot time based on the current setting.
2754 */
2755static void
2756ath_updateslot(struct ifnet *ifp)
2757{
2758 struct ath_softc *sc = ifp->if_softc;
2759 struct ieee80211com *ic = ifp->if_l2com;
2760
2761 /*
2762 * When not coordinating the BSS, change the hardware
2763 * immediately. For other operation we defer the change
2764 * until beacon updates have propagated to the stations.
2765 */
2766 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2767 ic->ic_opmode == IEEE80211_M_MBSS)
2768 sc->sc_updateslot = UPDATE;
2769 else
2770 ath_setslottime(sc);
2771}
2772
2773/*
2774 * Append the contents of src to dst; both queues
2775 * are assumed to be locked.
2776 */
2777void
2778ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2779{
2780
2781 ATH_TXQ_LOCK_ASSERT(dst);
2782 ATH_TXQ_LOCK_ASSERT(src);
2783
2784 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
2785 dst->axq_link = src->axq_link;
2786 src->axq_link = NULL;
2787 dst->axq_depth += src->axq_depth;
2788 dst->axq_aggr_depth += src->axq_aggr_depth;
2789 src->axq_depth = 0;
2790 src->axq_aggr_depth = 0;
2791}
2792
2793/*
2794 * Reset the hardware, with no loss.
2795 *
2796 * This can't be used for a general case reset.
2797 */
2798static void
2799ath_reset_proc(void *arg, int pending)
2800{
2801 struct ath_softc *sc = arg;
2802 struct ifnet *ifp = sc->sc_ifp;
2803
2804#if 0
2805 if_printf(ifp, "%s: resetting\n", __func__);
2806#endif
2807 ath_reset(ifp, ATH_RESET_NOLOSS);
2808}
2809
2810/*
2811 * Reset the hardware after detecting beacons have stopped.
2812 */
2813static void
2814ath_bstuck_proc(void *arg, int pending)
2815{
2816 struct ath_softc *sc = arg;
2817 struct ifnet *ifp = sc->sc_ifp;
2818 uint32_t hangs = 0;
2819
2820 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
2821 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
2822
2823 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
2824 sc->sc_bmisscount);
2825 sc->sc_stats.ast_bstuck++;
2826 /*
2827 * This assumes that there's no simultaneous channel mode change
2828 * occuring.
2829 */
2830 ath_reset(ifp, ATH_RESET_NOLOSS);
2831}
2832
2833static void
2834ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2835{
2836 bus_addr_t *paddr = (bus_addr_t*) arg;
2837 KASSERT(error == 0, ("error %u on bus_dma callback", error));
2838 *paddr = segs->ds_addr;
2839}
2840
2841/*
2842 * Allocate the descriptors and appropriate DMA tag/setup.
2843 *
2844 * For some situations (eg EDMA TX completion), there isn't a requirement
2845 * for the ath_buf entries to be allocated.
2846 */
2847int
2848ath_descdma_alloc_desc(struct ath_softc *sc,
2849 struct ath_descdma *dd, ath_bufhead *head,
2850 const char *name, int ds_size, int ndesc)
2851{
2852#define DS2PHYS(_dd, _ds) \
2853 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2854#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
2855 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
2856 struct ifnet *ifp = sc->sc_ifp;
2857 int error;
2858
2859 dd->dd_descsize = ds_size;
2860
2861 DPRINTF(sc, ATH_DEBUG_RESET,
2862 "%s: %s DMA: %u desc, %d bytes per descriptor\n",
2863 __func__, name, ndesc, dd->dd_descsize);
2864
2865 dd->dd_name = name;
2866 dd->dd_desc_len = dd->dd_descsize * ndesc;
2867
2868 /*
2869 * Merlin work-around:
2870 * Descriptors that cross the 4KB boundary can't be used.
2871 * Assume one skipped descriptor per 4KB page.
2872 */
2873 if (! ath_hal_split4ktrans(sc->sc_ah)) {
2874 int numpages = dd->dd_desc_len / 4096;
2875 dd->dd_desc_len += ds_size * numpages;
2876 }
2877
2878 /*
2879 * Setup DMA descriptor area.
2880 */
2881 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
2882 PAGE_SIZE, 0, /* alignment, bounds */
2883 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2884 BUS_SPACE_MAXADDR, /* highaddr */
2885 NULL, NULL, /* filter, filterarg */
2886 dd->dd_desc_len, /* maxsize */
2887 1, /* nsegments */
2888 dd->dd_desc_len, /* maxsegsize */
2889 BUS_DMA_ALLOCNOW, /* flags */
2890 NULL, /* lockfunc */
2891 NULL, /* lockarg */
2892 &dd->dd_dmat);
2893 if (error != 0) {
2894 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2895 return error;
2896 }
2897
2898 /* allocate descriptors */
2899 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2900 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2901 &dd->dd_dmamap);
2902 if (error != 0) {
2903 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2904 "error %u\n", ndesc, dd->dd_name, error);
2905 goto fail1;
2906 }
2907
2908 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2909 dd->dd_desc, dd->dd_desc_len,
2910 ath_load_cb, &dd->dd_desc_paddr,
2911 BUS_DMA_NOWAIT);
2912 if (error != 0) {
2913 if_printf(ifp, "unable to map %s descriptors, error %u\n",
2914 dd->dd_name, error);
2915 goto fail2;
2916 }
2917
2918 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2919 __func__, dd->dd_name, (uint8_t *) dd->dd_desc,
2920 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr,
2921 /*XXX*/ (u_long) dd->dd_desc_len);
2922
2923 return (0);
2924
2925fail2:
2926 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2927fail1:
2928 bus_dma_tag_destroy(dd->dd_dmat);
2929 memset(dd, 0, sizeof(*dd));
2930 return error;
2931#undef DS2PHYS
2932#undef ATH_DESC_4KB_BOUND_CHECK
2933}
2934
2935int
2936ath_descdma_setup(struct ath_softc *sc,
2937 struct ath_descdma *dd, ath_bufhead *head,
2938 const char *name, int ds_size, int nbuf, int ndesc)
2939{
2940#define DS2PHYS(_dd, _ds) \
2941 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2942#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
2943 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
2944 struct ifnet *ifp = sc->sc_ifp;
2945 uint8_t *ds;
2946 struct ath_buf *bf;
2947 int i, bsize, error;
2948
2949 /* Allocate descriptors */
2950 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size,
2951 nbuf * ndesc);
2952
2953 /* Assume any errors during allocation were dealt with */
2954 if (error != 0) {
2955 return (error);
2956 }
2957
2958 ds = (uint8_t *) dd->dd_desc;
2959
2960 /* allocate rx buffers */
2961 bsize = sizeof(struct ath_buf) * nbuf;
2962 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
2963 if (bf == NULL) {
2964 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
2965 dd->dd_name, bsize);
2966 goto fail3;
2967 }
2968 dd->dd_bufptr = bf;
2969
2970 TAILQ_INIT(head);
2971 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) {
2972 bf->bf_desc = (struct ath_desc *) ds;
2973 bf->bf_daddr = DS2PHYS(dd, ds);
2974 if (! ath_hal_split4ktrans(sc->sc_ah)) {
2975 /*
2976 * Merlin WAR: Skip descriptor addresses which
2977 * cause 4KB boundary crossing along any point
2978 * in the descriptor.
2979 */
2980 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
2981 dd->dd_descsize)) {
2982 /* Start at the next page */
2983 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
2984 bf->bf_desc = (struct ath_desc *) ds;
2985 bf->bf_daddr = DS2PHYS(dd, ds);
2986 }
2987 }
2988 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2989 &bf->bf_dmamap);
2990 if (error != 0) {
2991 if_printf(ifp, "unable to create dmamap for %s "
2992 "buffer %u, error %u\n", dd->dd_name, i, error);
2993 ath_descdma_cleanup(sc, dd, head);
2994 return error;
2995 }
2996 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
2997 TAILQ_INSERT_TAIL(head, bf, bf_list);
2998 }
2999
3000 /*
3001 * XXX TODO: ensure that ds doesn't overflow the descriptor
3002 * allocation otherwise weird stuff will occur and crash your
3003 * machine.
3004 */
3005 return 0;
3006 /* XXX this should likely just call ath_descdma_cleanup() */
3007fail3:
3008 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3009 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3010 bus_dma_tag_destroy(dd->dd_dmat);
3011 memset(dd, 0, sizeof(*dd));
3012 return error;
3013#undef DS2PHYS
3014#undef ATH_DESC_4KB_BOUND_CHECK
3015}
3016
3017/*
3018 * Allocate ath_buf entries but no descriptor contents.
3019 *
3020 * This is for RX EDMA where the descriptors are the header part of
3021 * the RX buffer.
3022 */
3023int
3024ath_descdma_setup_rx_edma(struct ath_softc *sc,
3025 struct ath_descdma *dd, ath_bufhead *head,
3026 const char *name, int nbuf, int rx_status_len)
3027{
3028 struct ifnet *ifp = sc->sc_ifp;
3029 struct ath_buf *bf;
3030 int i, bsize, error;
3031
3032 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n",
3033 __func__, name, nbuf);
3034
3035 dd->dd_name = name;
3036 /*
3037 * This is (mostly) purely for show. We're not allocating any actual
3038 * descriptors here as EDMA RX has the descriptor be part
3039 * of the RX buffer.
3040 *
3041 * However, dd_desc_len is used by ath_descdma_free() to determine
3042 * whether we have already freed this DMA mapping.
3043 */
3044 dd->dd_desc_len = rx_status_len * nbuf;
3045 dd->dd_descsize = rx_status_len;
3046
3047 /* allocate rx buffers */
3048 bsize = sizeof(struct ath_buf) * nbuf;
3049 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3050 if (bf == NULL) {
3051 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3052 dd->dd_name, bsize);
3053 error = ENOMEM;
3054 goto fail3;
3055 }
3056 dd->dd_bufptr = bf;
3057
3058 TAILQ_INIT(head);
3059 for (i = 0; i < nbuf; i++, bf++) {
3060 bf->bf_desc = NULL;
3061 bf->bf_daddr = 0;
3062 bf->bf_lastds = NULL; /* Just an initial value */
3063
3064 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3065 &bf->bf_dmamap);
3066 if (error != 0) {
3067 if_printf(ifp, "unable to create dmamap for %s "
3068 "buffer %u, error %u\n", dd->dd_name, i, error);
3069 ath_descdma_cleanup(sc, dd, head);
3070 return error;
3071 }
3072 TAILQ_INSERT_TAIL(head, bf, bf_list);
3073 }
3074 return 0;
3075fail3:
3076 memset(dd, 0, sizeof(*dd));
3077 return error;
3078}
3079
3080void
3081ath_descdma_cleanup(struct ath_softc *sc,
3082 struct ath_descdma *dd, ath_bufhead *head)
3083{
3084 struct ath_buf *bf;
3085 struct ieee80211_node *ni;
3086
3087 if (dd->dd_dmamap != 0) {
3088 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3089 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3090 bus_dma_tag_destroy(dd->dd_dmat);
3091 }
3092
3093 if (head != NULL) {
3094 TAILQ_FOREACH(bf, head, bf_list) {
3095 if (bf->bf_m) {
3096 m_freem(bf->bf_m);
3097 bf->bf_m = NULL;
3098 }
3099 if (bf->bf_dmamap != NULL) {
3100 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3101 bf->bf_dmamap = NULL;
3102 }
3103 ni = bf->bf_node;
3104 bf->bf_node = NULL;
3105 if (ni != NULL) {
3106 /*
3107 * Reclaim node reference.
3108 */
3109 ieee80211_free_node(ni);
3110 }
3111 }
3112 }
3113
3114 if (head != NULL)
3115 TAILQ_INIT(head);
3116
3117 if (dd->dd_bufptr != NULL)
3118 free(dd->dd_bufptr, M_ATHDEV);
3119 memset(dd, 0, sizeof(*dd));
3120}
3121
3122static int
3123ath_desc_alloc(struct ath_softc *sc)
3124{
3125 int error;
3126
3127 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3128 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_TXDESC);
3129 if (error != 0) {
3130 return error;
3131 }
3132 sc->sc_txbuf_cnt = ath_txbuf;
3133
3134 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
3135 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt,
3136 ATH_TXDESC);
3137 if (error != 0) {
3138 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3139 return error;
3140 }
3141
3142 /*
3143 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
3144 * flag doesn't have to be set in ath_getbuf_locked().
3145 */
3146
3147 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3148 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1);
3149 if (error != 0) {
3150 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3151 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3152 &sc->sc_txbuf_mgmt);
3153 return error;
3154 }
3155 return 0;
3156}
3157
3158static void
3159ath_desc_free(struct ath_softc *sc)
3160{
3161
3162 if (sc->sc_bdma.dd_desc_len != 0)
3163 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3164 if (sc->sc_txdma.dd_desc_len != 0)
3165 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3166 if (sc->sc_txdma_mgmt.dd_desc_len != 0)
3167 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3168 &sc->sc_txbuf_mgmt);
3169}
3170
3171static struct ieee80211_node *
3172ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3173{
3174 struct ieee80211com *ic = vap->iv_ic;
3175 struct ath_softc *sc = ic->ic_ifp->if_softc;
3176 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3177 struct ath_node *an;
3178
3179 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3180 if (an == NULL) {
3181 /* XXX stat+msg */
3182 return NULL;
3183 }
3184 ath_rate_node_init(sc, an);
3185
3186 /* Setup the mutex - there's no associd yet so set the name to NULL */
3187 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
3188 device_get_nameunit(sc->sc_dev), an);
3189 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
3190
3191 /* XXX setup ath_tid */
3192 ath_tx_tid_init(sc, an);
3193
3194 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3195 return &an->an_node;
3196}
3197
3198static void
3199ath_node_cleanup(struct ieee80211_node *ni)
3200{
3201 struct ieee80211com *ic = ni->ni_ic;
3202 struct ath_softc *sc = ic->ic_ifp->if_softc;
3203
3204 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
3205 ath_tx_node_flush(sc, ATH_NODE(ni));
3206 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3207 sc->sc_node_cleanup(ni);
3208}
3209
3210static void
3211ath_node_free(struct ieee80211_node *ni)
3212{
3213 struct ieee80211com *ic = ni->ni_ic;
3214 struct ath_softc *sc = ic->ic_ifp->if_softc;
3215
3216 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3217 mtx_destroy(&ATH_NODE(ni)->an_mtx);
3218 sc->sc_node_free(ni);
3219}
3220
3221static void
3222ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3223{
3224 struct ieee80211com *ic = ni->ni_ic;
3225 struct ath_softc *sc = ic->ic_ifp->if_softc;
3226 struct ath_hal *ah = sc->sc_ah;
3227
3228 *rssi = ic->ic_node_getrssi(ni);
3229 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3230 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3231 else
3232 *noise = -95; /* nominally correct */
3233}
3234
3235/*
3236 * Set the default antenna.
3237 */
3238void
3239ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3240{
3241 struct ath_hal *ah = sc->sc_ah;
3242
3243 /* XXX block beacon interrupts */
3244 ath_hal_setdefantenna(ah, antenna);
3245 if (sc->sc_defant != antenna)
3246 sc->sc_stats.ast_ant_defswitch++;
3247 sc->sc_defant = antenna;
3248 sc->sc_rxotherant = 0;
3249}
3250
3251static void
3252ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
3253{
3254 txq->axq_qnum = qnum;
3255 txq->axq_ac = 0;
3256 txq->axq_depth = 0;
3257 txq->axq_aggr_depth = 0;
3258 txq->axq_intrcnt = 0;
3259 txq->axq_link = NULL;
3260 txq->axq_softc = sc;
3261 TAILQ_INIT(&txq->axq_q);
3262 TAILQ_INIT(&txq->axq_tidq);
3263 ATH_TXQ_LOCK_INIT(sc, txq);
3264}
3265
3266/*
3267 * Setup a h/w transmit queue.
3268 */
3269static struct ath_txq *
3270ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
3271{
3272#define N(a) (sizeof(a)/sizeof(a[0]))
3273 struct ath_hal *ah = sc->sc_ah;
3274 HAL_TXQ_INFO qi;
3275 int qnum;
3276
3277 memset(&qi, 0, sizeof(qi));
3278 qi.tqi_subtype = subtype;
3279 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
3280 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
3281 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
3282 /*
3283 * Enable interrupts only for EOL and DESC conditions.
3284 * We mark tx descriptors to receive a DESC interrupt
3285 * when a tx queue gets deep; otherwise waiting for the
3286 * EOL to reap descriptors. Note that this is done to
3287 * reduce interrupt load and this only defers reaping
3288 * descriptors, never transmitting frames. Aside from
3289 * reducing interrupts this also permits more concurrency.
3290 * The only potential downside is if the tx queue backs
3291 * up in which case the top half of the kernel may backup
3292 * due to a lack of tx descriptors.
3293 */
3294 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
3295 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
3296 if (qnum == -1) {
3297 /*
3298 * NB: don't print a message, this happens
3299 * normally on parts with too few tx queues
3300 */
3301 return NULL;
3302 }
3303 if (qnum >= N(sc->sc_txq)) {
3304 device_printf(sc->sc_dev,
3305 "hal qnum %u out of range, max %zu!\n",
3306 qnum, N(sc->sc_txq));
3307 ath_hal_releasetxqueue(ah, qnum);
3308 return NULL;
3309 }
3310 if (!ATH_TXQ_SETUP(sc, qnum)) {
3311 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
3312 sc->sc_txqsetup |= 1<<qnum;
3313 }
3314 return &sc->sc_txq[qnum];
3315#undef N
3316}
3317
3318/*
3319 * Setup a hardware data transmit queue for the specified
3320 * access control. The hal may not support all requested
3321 * queues in which case it will return a reference to a
3322 * previously setup queue. We record the mapping from ac's
3323 * to h/w queues for use by ath_tx_start and also track
3324 * the set of h/w queues being used to optimize work in the
3325 * transmit interrupt handler and related routines.
3326 */
3327static int
3328ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
3329{
3330#define N(a) (sizeof(a)/sizeof(a[0]))
3331 struct ath_txq *txq;
3332
3333 if (ac >= N(sc->sc_ac2q)) {
3334 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
3335 ac, N(sc->sc_ac2q));
3336 return 0;
3337 }
3338 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
3339 if (txq != NULL) {
3340 txq->axq_ac = ac;
3341 sc->sc_ac2q[ac] = txq;
3342 return 1;
3343 } else
3344 return 0;
3345#undef N
3346}
3347
3348/*
3349 * Update WME parameters for a transmit queue.
3350 */
3351static int
3352ath_txq_update(struct ath_softc *sc, int ac)
3353{
3354#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
3355#define ATH_TXOP_TO_US(v) (v<<5)
3356 struct ifnet *ifp = sc->sc_ifp;
3357 struct ieee80211com *ic = ifp->if_l2com;
3358 struct ath_txq *txq = sc->sc_ac2q[ac];
3359 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3360 struct ath_hal *ah = sc->sc_ah;
3361 HAL_TXQ_INFO qi;
3362
3363 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
3364#ifdef IEEE80211_SUPPORT_TDMA
3365 if (sc->sc_tdma) {
3366 /*
3367 * AIFS is zero so there's no pre-transmit wait. The
3368 * burst time defines the slot duration and is configured
3369 * through net80211. The QCU is setup to not do post-xmit
3370 * back off, lockout all lower-priority QCU's, and fire
3371 * off the DMA beacon alert timer which is setup based
3372 * on the slot configuration.
3373 */
3374 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3375 | HAL_TXQ_TXERRINT_ENABLE
3376 | HAL_TXQ_TXURNINT_ENABLE
3377 | HAL_TXQ_TXEOLINT_ENABLE
3378 | HAL_TXQ_DBA_GATED
3379 | HAL_TXQ_BACKOFF_DISABLE
3380 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
3381 ;
3382 qi.tqi_aifs = 0;
3383 /* XXX +dbaprep? */
3384 qi.tqi_readyTime = sc->sc_tdmaslotlen;
3385 qi.tqi_burstTime = qi.tqi_readyTime;
3386 } else {
3387#endif
3388 /*
3389 * XXX shouldn't this just use the default flags
3390 * used in the previous queue setup?
3391 */
3392 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3393 | HAL_TXQ_TXERRINT_ENABLE
3394 | HAL_TXQ_TXDESCINT_ENABLE
3395 | HAL_TXQ_TXURNINT_ENABLE
3396 | HAL_TXQ_TXEOLINT_ENABLE
3397 ;
3398 qi.tqi_aifs = wmep->wmep_aifsn;
3399 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3400 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3401 qi.tqi_readyTime = 0;
3402 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
3403#ifdef IEEE80211_SUPPORT_TDMA
3404 }
3405#endif
3406
3407 DPRINTF(sc, ATH_DEBUG_RESET,
3408 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
3409 __func__, txq->axq_qnum, qi.tqi_qflags,
3410 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
3411
3412 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
3413 if_printf(ifp, "unable to update hardware queue "
3414 "parameters for %s traffic!\n",
3415 ieee80211_wme_acnames[ac]);
3416 return 0;
3417 } else {
3418 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
3419 return 1;
3420 }
3421#undef ATH_TXOP_TO_US
3422#undef ATH_EXPONENT_TO_VALUE
3423}
3424
3425/*
3426 * Callback from the 802.11 layer to update WME parameters.
3427 */
3428int
3429ath_wme_update(struct ieee80211com *ic)
3430{
3431 struct ath_softc *sc = ic->ic_ifp->if_softc;
3432
3433 return !ath_txq_update(sc, WME_AC_BE) ||
3434 !ath_txq_update(sc, WME_AC_BK) ||
3435 !ath_txq_update(sc, WME_AC_VI) ||
3436 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
3437}
3438
3439/*
3440 * Reclaim resources for a setup queue.
3441 */
3442static void
3443ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
3444{
3445
3446 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
3447 ATH_TXQ_LOCK_DESTROY(txq);
3448 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
3449}
3450
3451/*
3452 * Reclaim all tx queue resources.
3453 */
3454static void
3455ath_tx_cleanup(struct ath_softc *sc)
3456{
3457 int i;
3458
3459 ATH_TXBUF_LOCK_DESTROY(sc);
3460 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3461 if (ATH_TXQ_SETUP(sc, i))
3462 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
3463}
3464
3465/*
3466 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
3467 * using the current rates in sc_rixmap.
3468 */
3469int
3470ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
3471{
3472 int rix = sc->sc_rixmap[rate];
3473 /* NB: return lowest rix for invalid rate */
3474 return (rix == 0xff ? 0 : rix);
3475}
3476
3477static void
3478ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
3479 struct ath_buf *bf)
3480{
3481 struct ieee80211_node *ni = bf->bf_node;
3482 struct ifnet *ifp = sc->sc_ifp;
3483 struct ieee80211com *ic = ifp->if_l2com;
3484 int sr, lr, pri;
3485
3486 if (ts->ts_status == 0) {
3487 u_int8_t txant = ts->ts_antenna;
3488 sc->sc_stats.ast_ant_tx[txant]++;
3489 sc->sc_ant_tx[txant]++;
3490 if (ts->ts_finaltsi != 0)
3491 sc->sc_stats.ast_tx_altrate++;
3492 pri = M_WME_GETAC(bf->bf_m);
3493 if (pri >= WME_AC_VO)
3494 ic->ic_wme.wme_hipri_traffic++;
3495 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)
3496 ni->ni_inact = ni->ni_inact_reload;
3497 } else {
3498 if (ts->ts_status & HAL_TXERR_XRETRY)
3499 sc->sc_stats.ast_tx_xretries++;
3500 if (ts->ts_status & HAL_TXERR_FIFO)
3501 sc->sc_stats.ast_tx_fifoerr++;
3502 if (ts->ts_status & HAL_TXERR_FILT)
3503 sc->sc_stats.ast_tx_filtered++;
3504 if (ts->ts_status & HAL_TXERR_XTXOP)
3505 sc->sc_stats.ast_tx_xtxop++;
3506 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
3507 sc->sc_stats.ast_tx_timerexpired++;
3508
3509 if (ts->ts_status & HAL_TX_DATA_UNDERRUN)
3510 sc->sc_stats.ast_tx_data_underrun++;
3511 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN)
3512 sc->sc_stats.ast_tx_delim_underrun++;
3513
3514 if (bf->bf_m->m_flags & M_FF)
3515 sc->sc_stats.ast_ff_txerr++;
3516 }
3517 /* XXX when is this valid? */
3518 if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
3519 sc->sc_stats.ast_tx_desccfgerr++;
3520
3521 sr = ts->ts_shortretry;
3522 lr = ts->ts_longretry;
3523 sc->sc_stats.ast_tx_shortretry += sr;
3524 sc->sc_stats.ast_tx_longretry += lr;
3525
3526}
3527
3528/*
3529 * The default completion. If fail is 1, this means
3530 * "please don't retry the frame, and just return -1 status
3531 * to the net80211 stack.
3532 */
3533void
3534ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
3535{
3536 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
3537 int st;
3538
3539 if (fail == 1)
3540 st = -1;
3541 else
3542 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ?
3543 ts->ts_status : HAL_TXERR_XRETRY;
3544
3545 if (bf->bf_state.bfs_dobaw)
3546 device_printf(sc->sc_dev,
3547 "%s: bf %p: seqno %d: dobaw should've been cleared!\n",
3548 __func__,
3549 bf,
3550 SEQNO(bf->bf_state.bfs_seqno));
3551 if (bf->bf_next != NULL)
3552 device_printf(sc->sc_dev,
3553 "%s: bf %p: seqno %d: bf_next not NULL!\n",
3554 __func__,
3555 bf,
3556 SEQNO(bf->bf_state.bfs_seqno));
3557
3558 /*
3559 * Check if the node software queue is empty; if so
3560 * then clear the TIM.
3561 *
3562 * This needs to be done before the buffer is freed as
3563 * otherwise the node reference will have been released
3564 * and the node may not actually exist any longer.
3565 *
3566 * XXX I don't like this belonging here, but it's cleaner
3567 * to do it here right now then all the other places
3568 * where ath_tx_default_comp() is called.
3569 *
3570 * XXX TODO: during drain, ensure that the callback is
3571 * being called so we get a chance to update the TIM.
3572 */
3573 if (bf->bf_node)
3574 ath_tx_update_tim(sc, bf->bf_node, 0);
3575
3576 /*
3577 * Do any tx complete callback. Note this must
3578 * be done before releasing the node reference.
3579 * This will free the mbuf, release the net80211
3580 * node and recycle the ath_buf.
3581 */
3582 ath_tx_freebuf(sc, bf, st);
3583}
3584
3585/*
3586 * Update rate control with the given completion status.
3587 */
3588void
3589ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
3590 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
3591 int nframes, int nbad)
3592{
3593 struct ath_node *an;
3594
3595 /* Only for unicast frames */
3596 if (ni == NULL)
3597 return;
3598
3599 an = ATH_NODE(ni);
3600 ATH_NODE_UNLOCK_ASSERT(an);
3601
3602 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
3603 ATH_NODE_LOCK(an);
3604 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
3605 ATH_NODE_UNLOCK(an);
3606 }
3607}
3608
3609/*
3610 * Update the busy status of the last frame on the free list.
3611 * When doing TDMA, the busy flag tracks whether the hardware
3612 * currently points to this buffer or not, and thus gated DMA
3613 * may restart by re-reading the last descriptor in this
3614 * buffer.
3615 *
3616 * This should be called in the completion function once one
3617 * of the buffers has been used.
3618 */
3619static void
3620ath_tx_update_busy(struct ath_softc *sc)
3621{
3622 struct ath_buf *last;
3623
3624 /*
3625 * Since the last frame may still be marked
3626 * as ATH_BUF_BUSY, unmark it here before
3627 * finishing the frame processing.
3628 * Since we've completed a frame (aggregate
3629 * or otherwise), the hardware has moved on
3630 * and is no longer referencing the previous
3631 * descriptor.
3632 */
3633 ATH_TXBUF_LOCK_ASSERT(sc);
3634 last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
3635 if (last != NULL)
3636 last->bf_flags &= ~ATH_BUF_BUSY;
3637 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
3638 if (last != NULL)
3639 last->bf_flags &= ~ATH_BUF_BUSY;
3640}
3641
3642/*
3643 * Process the completion of the given buffer.
3644 *
3645 * This calls the rate control update and then the buffer completion.
3646 * This will either free the buffer or requeue it. In any case, the
3647 * bf pointer should be treated as invalid after this function is called.
3648 */
3649void
3650ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq,
3651 struct ath_tx_status *ts, struct ath_buf *bf)
3652{
3653 struct ieee80211_node *ni = bf->bf_node;
3654 struct ath_node *an = NULL;
3655
3656 ATH_TXQ_UNLOCK_ASSERT(txq);
3657
3658 /* If unicast frame, update general statistics */
3659 if (ni != NULL) {
3660 an = ATH_NODE(ni);
3661 /* update statistics */
3662 ath_tx_update_stats(sc, ts, bf);
3663 }
3664
3665 /*
3666 * Call the completion handler.
3667 * The completion handler is responsible for
3668 * calling the rate control code.
3669 *
3670 * Frames with no completion handler get the
3671 * rate control code called here.
3672 */
3673 if (bf->bf_comp == NULL) {
3674 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
3675 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) {
3676 /*
3677 * XXX assume this isn't an aggregate
3678 * frame.
3679 */
3680 ath_tx_update_ratectrl(sc, ni,
3681 bf->bf_state.bfs_rc, ts,
3682 bf->bf_state.bfs_pktlen, 1,
3683 (ts->ts_status == 0 ? 0 : 1));
3684 }
3685 ath_tx_default_comp(sc, bf, 0);
3686 } else
3687 bf->bf_comp(sc, bf, 0);
3688}
3689
3690
3691
3692/*
3693 * Process completed xmit descriptors from the specified queue.
3694 * Kick the packet scheduler if needed. This can occur from this
3695 * particular task.
3696 */
3697static int
3698ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
3699{
3700 struct ath_hal *ah = sc->sc_ah;
3701 struct ath_buf *bf;
3702 struct ath_desc *ds;
3703 struct ath_tx_status *ts;
3704 struct ieee80211_node *ni;
3705#ifdef IEEE80211_SUPPORT_SUPERG
3706 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3707#endif /* IEEE80211_SUPPORT_SUPERG */
3708 int nacked;
3709 HAL_STATUS status;
3710
3711 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
3712 __func__, txq->axq_qnum,
3713 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
3714 txq->axq_link);
3715
3716 ATH_KTR(sc, ATH_KTR_TXCOMP, 4,
3717 "ath_tx_processq: txq=%u head %p link %p depth %p",
3718 txq->axq_qnum,
3719 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
3720 txq->axq_link,
3721 txq->axq_depth);
3722
3723 nacked = 0;
3724 for (;;) {
3725 ATH_TXQ_LOCK(txq);
3726 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
3727 bf = TAILQ_FIRST(&txq->axq_q);
3728 if (bf == NULL) {
3729 ATH_TXQ_UNLOCK(txq);
3730 break;
3731 }
3732 ds = bf->bf_lastds; /* XXX must be setup correctly! */
3733 ts = &bf->bf_status.ds_txstat;
3734
3735 status = ath_hal_txprocdesc(ah, ds, ts);
3736#ifdef ATH_DEBUG
3737 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
3738 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
3739 status == HAL_OK);
3740 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0))
3741 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
3742 status == HAL_OK);
3743#endif
3744
3745 if (status == HAL_EINPROGRESS) {
3746 ATH_KTR(sc, ATH_KTR_TXCOMP, 3,
3747 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS",
3748 txq->axq_qnum, bf, ds);
3749 ATH_TXQ_UNLOCK(txq);
3750 break;
3751 }
3752 ATH_TXQ_REMOVE(txq, bf, bf_list);
3753#ifdef IEEE80211_SUPPORT_TDMA
3754 if (txq->axq_depth > 0) {
3755 /*
3756 * More frames follow. Mark the buffer busy
3757 * so it's not re-used while the hardware may
3758 * still re-read the link field in the descriptor.
3759 *
3760 * Use the last buffer in an aggregate as that
3761 * is where the hardware may be - intermediate
3762 * descriptors won't be "busy".
3763 */
3764 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
3765 } else
3766#else
3767 if (txq->axq_depth == 0)
3768#endif
3769 txq->axq_link = NULL;
3770 if (bf->bf_state.bfs_aggr)
3771 txq->axq_aggr_depth--;
3772
3773 ni = bf->bf_node;
3774
3775 ATH_KTR(sc, ATH_KTR_TXCOMP, 5,
3776 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x",
3777 txq->axq_qnum, bf, ds, ni, ts->ts_status);
3778 /*
3779 * If unicast frame was ack'd update RSSI,
3780 * including the last rx time used to
3781 * workaround phantom bmiss interrupts.
3782 */
3783 if (ni != NULL && ts->ts_status == 0 &&
3784 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
3785 nacked++;
3786 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
3787 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
3788 ts->ts_rssi);
3789 }
3790 ATH_TXQ_UNLOCK(txq);
3791
3792 /*
3793 * Update statistics and call completion
3794 */
3795 ath_tx_process_buf_completion(sc, txq, ts, bf);
3796
3797 /* XXX at this point, bf and ni may be totally invalid */
3798 }
3799#ifdef IEEE80211_SUPPORT_SUPERG
3800 /*
3801 * Flush fast-frame staging queue when traffic slows.
3802 */
3803 if (txq->axq_depth <= 1)
3804 ieee80211_ff_flush(ic, txq->axq_ac);
3805#endif
3806
3807 /* Kick the TXQ scheduler */
3808 if (dosched) {
3809 ATH_TXQ_LOCK(txq);
3810 ath_txq_sched(sc, txq);
3811 ATH_TXQ_UNLOCK(txq);
3812 }
3813
3814 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
3815 "ath_tx_processq: txq=%u: done",
3816 txq->axq_qnum);
3817
3818 return nacked;
3819}
3820
3821#define TXQACTIVE(t, q) ( (t) & (1 << (q)))
3822
3823/*
3824 * Deferred processing of transmit interrupt; special-cased
3825 * for a single hardware transmit queue (e.g. 5210 and 5211).
3826 */
3827static void
3828ath_tx_proc_q0(void *arg, int npending)
3829{
3830 struct ath_softc *sc = arg;
3831 struct ifnet *ifp = sc->sc_ifp;
3832 uint32_t txqs;
3833
3834 ATH_PCU_LOCK(sc);
3835 sc->sc_txproc_cnt++;
3836 txqs = sc->sc_txq_active;
3837 sc->sc_txq_active &= ~txqs;
3838 ATH_PCU_UNLOCK(sc);
3839
3840 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
3841 "ath_tx_proc_q0: txqs=0x%08x", txqs);
3842
3843 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
3844 /* XXX why is lastrx updated in tx code? */
3845 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
3846 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
3847 ath_tx_processq(sc, sc->sc_cabq, 1);
3848 IF_LOCK(&ifp->if_snd);
3849 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3850 IF_UNLOCK(&ifp->if_snd);
3851 sc->sc_wd_timer = 0;
3852
3853 if (sc->sc_softled)
3854 ath_led_event(sc, sc->sc_txrix);
3855
3856 ATH_PCU_LOCK(sc);
3857 sc->sc_txproc_cnt--;
3858 ATH_PCU_UNLOCK(sc);
3859
3860 ath_tx_kick(sc);
3861}
3862
3863/*
3864 * Deferred processing of transmit interrupt; special-cased
3865 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
3866 */
3867static void
3868ath_tx_proc_q0123(void *arg, int npending)
3869{
3870 struct ath_softc *sc = arg;
3871 struct ifnet *ifp = sc->sc_ifp;
3872 int nacked;
3873 uint32_t txqs;
3874
3875 ATH_PCU_LOCK(sc);
3876 sc->sc_txproc_cnt++;
3877 txqs = sc->sc_txq_active;
3878 sc->sc_txq_active &= ~txqs;
3879 ATH_PCU_UNLOCK(sc);
3880
3881 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
3882 "ath_tx_proc_q0123: txqs=0x%08x", txqs);
3883
3884 /*
3885 * Process each active queue.
3886 */
3887 nacked = 0;
3888 if (TXQACTIVE(txqs, 0))
3889 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
3890 if (TXQACTIVE(txqs, 1))
3891 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
3892 if (TXQACTIVE(txqs, 2))
3893 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
3894 if (TXQACTIVE(txqs, 3))
3895 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
3896 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
3897 ath_tx_processq(sc, sc->sc_cabq, 1);
3898 if (nacked)
3899 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
3900
3901 IF_LOCK(&ifp->if_snd);
3902 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3903 IF_UNLOCK(&ifp->if_snd);
3904 sc->sc_wd_timer = 0;
3905
3906 if (sc->sc_softled)
3907 ath_led_event(sc, sc->sc_txrix);
3908
3909 ATH_PCU_LOCK(sc);
3910 sc->sc_txproc_cnt--;
3911 ATH_PCU_UNLOCK(sc);
3912
3913 ath_tx_kick(sc);
3914}
3915
3916/*
3917 * Deferred processing of transmit interrupt.
3918 */
3919static void
3920ath_tx_proc(void *arg, int npending)
3921{
3922 struct ath_softc *sc = arg;
3923 struct ifnet *ifp = sc->sc_ifp;
3924 int i, nacked;
3925 uint32_t txqs;
3926
3927 ATH_PCU_LOCK(sc);
3928 sc->sc_txproc_cnt++;
3929 txqs = sc->sc_txq_active;
3930 sc->sc_txq_active &= ~txqs;
3931 ATH_PCU_UNLOCK(sc);
3932
3933 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs);
3934
3935 /*
3936 * Process each active queue.
3937 */
3938 nacked = 0;
3939 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3940 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
3941 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
3942 if (nacked)
3943 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
3944
3945 /* XXX check this inside of IF_LOCK? */
3946 IF_LOCK(&ifp->if_snd);
3947 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3948 IF_UNLOCK(&ifp->if_snd);
3949 sc->sc_wd_timer = 0;
3950
3951 if (sc->sc_softled)
3952 ath_led_event(sc, sc->sc_txrix);
3953
3954 ATH_PCU_LOCK(sc);
3955 sc->sc_txproc_cnt--;
3956 ATH_PCU_UNLOCK(sc);
3957
3958 ath_tx_kick(sc);
3959}
3960#undef TXQACTIVE
3961
3962/*
3963 * Deferred processing of TXQ rescheduling.
3964 */
3965static void
3966ath_txq_sched_tasklet(void *arg, int npending)
3967{
3968 struct ath_softc *sc = arg;
3969 int i;
3970
3971 /* XXX is skipping ok? */
3972 ATH_PCU_LOCK(sc);
3973#if 0
3974 if (sc->sc_inreset_cnt > 0) {
3975 device_printf(sc->sc_dev,
3976 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
3977 ATH_PCU_UNLOCK(sc);
3978 return;
3979 }
3980#endif
3981 sc->sc_txproc_cnt++;
3982 ATH_PCU_UNLOCK(sc);
3983
3984 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
3985 if (ATH_TXQ_SETUP(sc, i)) {
3986 ATH_TXQ_LOCK(&sc->sc_txq[i]);
3987 ath_txq_sched(sc, &sc->sc_txq[i]);
3988 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
3989 }
3990 }
3991
3992 ATH_PCU_LOCK(sc);
3993 sc->sc_txproc_cnt--;
3994 ATH_PCU_UNLOCK(sc);
3995}
3996
3997void
3998ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
3999{
4000
4001 ATH_TXBUF_LOCK_ASSERT(sc);
4002
4003 if (bf->bf_flags & ATH_BUF_MGMT)
4004 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
4005 else {
4006 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4007 sc->sc_txbuf_cnt++;
4008 if (sc->sc_txbuf_cnt > ath_txbuf) {
4009 device_printf(sc->sc_dev,
4010 "%s: sc_txbuf_cnt > %d?\n",
4011 __func__,
4012 ath_txbuf);
4013 sc->sc_txbuf_cnt = ath_txbuf;
4014 }
4015 }
4016}
4017
4018void
4019ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
4020{
4021
4022 ATH_TXBUF_LOCK_ASSERT(sc);
4023
4024 if (bf->bf_flags & ATH_BUF_MGMT)
4025 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
4026 else {
4027 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
4028 sc->sc_txbuf_cnt++;
4029 if (sc->sc_txbuf_cnt > ATH_TXBUF) {
4030 device_printf(sc->sc_dev,
4031 "%s: sc_txbuf_cnt > %d?\n",
4032 __func__,
4033 ATH_TXBUF);
4034 sc->sc_txbuf_cnt = ATH_TXBUF;
4035 }
4036 }
4037}
4038
4039/*
4040 * Return a buffer to the pool and update the 'busy' flag on the
4041 * previous 'tail' entry.
4042 *
4043 * This _must_ only be called when the buffer is involved in a completed
4044 * TX. The logic is that if it was part of an active TX, the previous
4045 * buffer on the list is now not involved in a halted TX DMA queue, waiting
4046 * for restart (eg for TDMA.)
4047 *
4048 * The caller must free the mbuf and recycle the node reference.
4049 */
4050void
4051ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
4052{
4053 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4054 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE);
4055
4056 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
4057 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
4058
4059 ATH_TXBUF_LOCK(sc);
4060 ath_tx_update_busy(sc);
4061 ath_returnbuf_tail(sc, bf);
4062 ATH_TXBUF_UNLOCK(sc);
4063}
4064
4065/*
4066 * This is currently used by ath_tx_draintxq() and
4067 * ath_tx_tid_free_pkts().
4068 *
4069 * It recycles a single ath_buf.
4070 */
4071void
4072ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
4073{
4074 struct ieee80211_node *ni = bf->bf_node;
4075 struct mbuf *m0 = bf->bf_m;
4076
4077 bf->bf_node = NULL;
4078 bf->bf_m = NULL;
4079
4080 /* Free the buffer, it's not needed any longer */
4081 ath_freebuf(sc, bf);
4082
4083 if (ni != NULL) {
4084 /*
4085 * Do any callback and reclaim the node reference.
4086 */
4087 if (m0->m_flags & M_TXCB)
4088 ieee80211_process_callback(ni, m0, status);
4089 ieee80211_free_node(ni);
4090 }
4091 m_freem(m0);
4092
4093 /*
4094 * XXX the buffer used to be freed -after-, but the DMA map was
4095 * freed where ath_freebuf() now is. I've no idea what this
4096 * will do.
4097 */
4098}
4099
4100void
4101ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
4102{
4103#ifdef ATH_DEBUG
4104 struct ath_hal *ah = sc->sc_ah;
4105#endif
4106 struct ath_buf *bf;
4107 u_int ix;
4108
4109 /*
4110 * NB: this assumes output has been stopped and
4111 * we do not need to block ath_tx_proc
4112 */
4113 ATH_TXBUF_LOCK(sc);
4114 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
4115 if (bf != NULL)
4116 bf->bf_flags &= ~ATH_BUF_BUSY;
4117 bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
4118 if (bf != NULL)
4119 bf->bf_flags &= ~ATH_BUF_BUSY;
4120 ATH_TXBUF_UNLOCK(sc);
4121
4122 for (ix = 0;; ix++) {
4123 ATH_TXQ_LOCK(txq);
4124 bf = TAILQ_FIRST(&txq->axq_q);
4125 if (bf == NULL) {
4126 txq->axq_link = NULL;
4127 /*
4128 * There's currently no flag that indicates
4129 * a buffer is on the FIFO. So until that
4130 * occurs, just clear the FIFO counter here.
4131 *
4132 * Yes, this means that if something in parallel
4133 * is pushing things onto this TXQ and pushing
4134 * _that_ into the hardware, things will get
4135 * very fruity very quickly.
4136 */
4137 txq->axq_fifo_depth = 0;
4138 ATH_TXQ_UNLOCK(txq);
4139 break;
4140 }
4141 ATH_TXQ_REMOVE(txq, bf, bf_list);
4142 if (bf->bf_state.bfs_aggr)
4143 txq->axq_aggr_depth--;
4144#ifdef ATH_DEBUG
4145 if (sc->sc_debug & ATH_DEBUG_RESET) {
4146 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
4147 int status = 0;
4148
4149 /*
4150 * EDMA operation has a TX completion FIFO
4151 * separate from the TX descriptor, so this
4152 * method of checking the "completion" status
4153 * is wrong.
4154 */
4155 if (! sc->sc_isedma) {
4156 status = (ath_hal_txprocdesc(ah,
4157 bf->bf_lastds,
4158 &bf->bf_status.ds_txstat) == HAL_OK);
4159 }
4160 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status);
4161 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
4162 bf->bf_m->m_len, 0, -1);
4163 }
4164#endif /* ATH_DEBUG */
4165 /*
4166 * Since we're now doing magic in the completion
4167 * functions, we -must- call it for aggregation
4168 * destinations or BAW tracking will get upset.
4169 */
4170 /*
4171 * Clear ATH_BUF_BUSY; the completion handler
4172 * will free the buffer.
4173 */
4174 ATH_TXQ_UNLOCK(txq);
4175 bf->bf_flags &= ~ATH_BUF_BUSY;
4176 if (bf->bf_comp)
4177 bf->bf_comp(sc, bf, 1);
4178 else
4179 ath_tx_default_comp(sc, bf, 1);
4180 }
4181
4182 /*
4183 * Drain software queued frames which are on
4184 * active TIDs.
4185 */
4186 ath_tx_txq_drain(sc, txq);
4187}
4188
4189static void
4190ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
4191{
4192 struct ath_hal *ah = sc->sc_ah;
4193
4194 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4195 __func__, txq->axq_qnum,
4196 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
4197 txq->axq_link);
4198 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
4199}
4200
4201int
4202ath_stoptxdma(struct ath_softc *sc)
4203{
4204 struct ath_hal *ah = sc->sc_ah;
4205 int i;
4206
4207 /* XXX return value */
4208 if (sc->sc_invalid)
4209 return 0;
4210
4211 if (!sc->sc_invalid) {
4212 /* don't touch the hardware if marked invalid */
4213 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4214 __func__, sc->sc_bhalq,
4215 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
4216 NULL);
4217 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
4218 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4219 if (ATH_TXQ_SETUP(sc, i))
4220 ath_tx_stopdma(sc, &sc->sc_txq[i]);
4221 }
4222
4223 return 1;
4224}
4225
4226/*
4227 * Drain the transmit queues and reclaim resources.
4228 */
4229void
4230ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
4231{
4232#ifdef ATH_DEBUG
4233 struct ath_hal *ah = sc->sc_ah;
4234#endif
4235 struct ifnet *ifp = sc->sc_ifp;
4236 int i;
4237
4238 (void) ath_stoptxdma(sc);
4239
4240 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
4241 /*
4242 * XXX TODO: should we just handle the completed TX frames
4243 * here, whether or not the reset is a full one or not?
4244 */
4245 if (ATH_TXQ_SETUP(sc, i)) {
4246 if (reset_type == ATH_RESET_NOLOSS)
4247 ath_tx_processq(sc, &sc->sc_txq[i], 0);
4248 else
4249 ath_tx_draintxq(sc, &sc->sc_txq[i]);
4250 }
4251 }
4252#ifdef ATH_DEBUG
4253 if (sc->sc_debug & ATH_DEBUG_RESET) {
4254 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
4255 if (bf != NULL && bf->bf_m != NULL) {
4256 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
4257 ath_hal_txprocdesc(ah, bf->bf_lastds,
4258 &bf->bf_status.ds_txstat) == HAL_OK);
4259 ieee80211_dump_pkt(ifp->if_l2com,
4260 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
4261 0, -1);
4262 }
4263 }
4264#endif /* ATH_DEBUG */
4265 IF_LOCK(&ifp->if_snd);
4266 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4267 IF_UNLOCK(&ifp->if_snd);
4268 sc->sc_wd_timer = 0;
4269}
4270
4271/*
4272 * Update internal state after a channel change.
4273 */
4274static void
4275ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
4276{
4277 enum ieee80211_phymode mode;
4278
4279 /*
4280 * Change channels and update the h/w rate map
4281 * if we're switching; e.g. 11a to 11b/g.
4282 */
4283 mode = ieee80211_chan2mode(chan);
4284 if (mode != sc->sc_curmode)
4285 ath_setcurmode(sc, mode);
4286 sc->sc_curchan = chan;
4287}
4288
4289/*
4290 * Set/change channels. If the channel is really being changed,
4291 * it's done by resetting the chip. To accomplish this we must
4292 * first cleanup any pending DMA, then restart stuff after a la
4293 * ath_init.
4294 */
4295static int
4296ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
4297{
4298 struct ifnet *ifp = sc->sc_ifp;
4299 struct ieee80211com *ic = ifp->if_l2com;
4300 struct ath_hal *ah = sc->sc_ah;
4301 int ret = 0;
4302
4303 /* Treat this as an interface reset */
4304 ATH_PCU_UNLOCK_ASSERT(sc);
4305 ATH_UNLOCK_ASSERT(sc);
4306
4307 /* (Try to) stop TX/RX from occuring */
4308 taskqueue_block(sc->sc_tq);
4309
4310 ATH_PCU_LOCK(sc);
4311 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */
4312 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */
4313 if (ath_reset_grablock(sc, 1) == 0) {
4314 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
4315 __func__);
4316 }
4317 ATH_PCU_UNLOCK(sc);
4318
4319 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
4320 __func__, ieee80211_chan2ieee(ic, chan),
4321 chan->ic_freq, chan->ic_flags);
4322 if (chan != sc->sc_curchan) {
4323 HAL_STATUS status;
4324 /*
4325 * To switch channels clear any pending DMA operations;
4326 * wait long enough for the RX fifo to drain, reset the
4327 * hardware at the new frequency, and then re-enable
4328 * the relevant bits of the h/w.
4329 */
4330#if 0
4331 ath_hal_intrset(ah, 0); /* disable interrupts */
4332#endif
4333 ath_stoprecv(sc, 1); /* turn off frame recv */
4334 /*
4335 * First, handle completed TX/RX frames.
4336 */
4337 ath_rx_flush(sc);
4338 ath_draintxq(sc, ATH_RESET_NOLOSS);
4339 /*
4340 * Next, flush the non-scheduled frames.
4341 */
4342 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
4343
4344 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
4345 if_printf(ifp, "%s: unable to reset "
4346 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
4347 __func__, ieee80211_chan2ieee(ic, chan),
4348 chan->ic_freq, chan->ic_flags, status);
4349 ret = EIO;
4350 goto finish;
4351 }
4352 sc->sc_diversity = ath_hal_getdiversity(ah);
4353
4354 /* Let DFS at it in case it's a DFS channel */
4355 ath_dfs_radar_enable(sc, chan);
4356
4357 /*
4358 * Re-enable rx framework.
4359 */
4360 if (ath_startrecv(sc) != 0) {
4361 if_printf(ifp, "%s: unable to restart recv logic\n",
4362 __func__);
4363 ret = EIO;
4364 goto finish;
4365 }
4366
4367 /*
4368 * Change channels and update the h/w rate map
4369 * if we're switching; e.g. 11a to 11b/g.
4370 */
4371 ath_chan_change(sc, chan);
4372
4373 /*
4374 * Reset clears the beacon timers; reset them
4375 * here if needed.
4376 */
4377 if (sc->sc_beacons) { /* restart beacons */
4378#ifdef IEEE80211_SUPPORT_TDMA
4379 if (sc->sc_tdma)
4380 ath_tdma_config(sc, NULL);
4381 else
4382#endif
4383 ath_beacon_config(sc, NULL);
4384 }
4385
4386 /*
4387 * Re-enable interrupts.
4388 */
4389#if 0
4390 ath_hal_intrset(ah, sc->sc_imask);
4391#endif
4392 }
4393
4394finish:
4395 ATH_PCU_LOCK(sc);
4396 sc->sc_inreset_cnt--;
4397 /* XXX only do this if sc_inreset_cnt == 0? */
4398 ath_hal_intrset(ah, sc->sc_imask);
4399 ATH_PCU_UNLOCK(sc);
4400
4401 IF_LOCK(&ifp->if_snd);
4402 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4403 IF_UNLOCK(&ifp->if_snd);
4404 ath_txrx_start(sc);
4405 /* XXX ath_start? */
4406
4407 return ret;
4408}
4409
4410/*
4411 * Periodically recalibrate the PHY to account
4412 * for temperature/environment changes.
4413 */
4414static void
4415ath_calibrate(void *arg)
4416{
4417 struct ath_softc *sc = arg;
4418 struct ath_hal *ah = sc->sc_ah;
4419 struct ifnet *ifp = sc->sc_ifp;
4420 struct ieee80211com *ic = ifp->if_l2com;
4421 HAL_BOOL longCal, isCalDone = AH_TRUE;
4422 HAL_BOOL aniCal, shortCal = AH_FALSE;
4423 int nextcal;
4424
4425 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
4426 goto restart;
4427 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
4428 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
4429 if (sc->sc_doresetcal)
4430 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
4431
4432 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
4433 if (aniCal) {
4434 sc->sc_stats.ast_ani_cal++;
4435 sc->sc_lastani = ticks;
4436 ath_hal_ani_poll(ah, sc->sc_curchan);
4437 }
4438
4439 if (longCal) {
4440 sc->sc_stats.ast_per_cal++;
4441 sc->sc_lastlongcal = ticks;
4442 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
4443 /*
4444 * Rfgain is out of bounds, reset the chip
4445 * to load new gain values.
4446 */
4447 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4448 "%s: rfgain change\n", __func__);
4449 sc->sc_stats.ast_per_rfgain++;
4450 sc->sc_resetcal = 0;
4451 sc->sc_doresetcal = AH_TRUE;
4452 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
4453 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
4454 return;
4455 }
4456 /*
4457 * If this long cal is after an idle period, then
4458 * reset the data collection state so we start fresh.
4459 */
4460 if (sc->sc_resetcal) {
4461 (void) ath_hal_calreset(ah, sc->sc_curchan);
4462 sc->sc_lastcalreset = ticks;
4463 sc->sc_lastshortcal = ticks;
4464 sc->sc_resetcal = 0;
4465 sc->sc_doresetcal = AH_TRUE;
4466 }
4467 }
4468
4469 /* Only call if we're doing a short/long cal, not for ANI calibration */
4470 if (shortCal || longCal) {
4471 isCalDone = AH_FALSE;
4472 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
4473 if (longCal) {
4474 /*
4475 * Calibrate noise floor data again in case of change.
4476 */
4477 ath_hal_process_noisefloor(ah);
4478 }
4479 } else {
4480 DPRINTF(sc, ATH_DEBUG_ANY,
4481 "%s: calibration of channel %u failed\n",
4482 __func__, sc->sc_curchan->ic_freq);
4483 sc->sc_stats.ast_per_calfail++;
4484 }
4485 if (shortCal)
4486 sc->sc_lastshortcal = ticks;
4487 }
4488 if (!isCalDone) {
4489restart:
4490 /*
4491 * Use a shorter interval to potentially collect multiple
4492 * data samples required to complete calibration. Once
4493 * we're told the work is done we drop back to a longer
4494 * interval between requests. We're more aggressive doing
4495 * work when operating as an AP to improve operation right
4496 * after startup.
4497 */
4498 sc->sc_lastshortcal = ticks;
4499 nextcal = ath_shortcalinterval*hz/1000;
4500 if (sc->sc_opmode != HAL_M_HOSTAP)
4501 nextcal *= 10;
4502 sc->sc_doresetcal = AH_TRUE;
4503 } else {
4504 /* nextcal should be the shortest time for next event */
4505 nextcal = ath_longcalinterval*hz;
4506 if (sc->sc_lastcalreset == 0)
4507 sc->sc_lastcalreset = sc->sc_lastlongcal;
4508 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
4509 sc->sc_resetcal = 1; /* setup reset next trip */
4510 sc->sc_doresetcal = AH_FALSE;
4511 }
4512 /* ANI calibration may occur more often than short/long/resetcal */
4513 if (ath_anicalinterval > 0)
4514 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
4515
4516 if (nextcal != 0) {
4517 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
4518 __func__, nextcal, isCalDone ? "" : "!");
4519 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
4520 } else {
4521 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
4522 __func__);
4523 /* NB: don't rearm timer */
4524 }
4525}
4526
4527static void
4528ath_scan_start(struct ieee80211com *ic)
4529{
4530 struct ifnet *ifp = ic->ic_ifp;
4531 struct ath_softc *sc = ifp->if_softc;
4532 struct ath_hal *ah = sc->sc_ah;
4533 u_int32_t rfilt;
4534
4535 /* XXX calibration timer? */
4536
4537 ATH_LOCK(sc);
4538 sc->sc_scanning = 1;
4539 sc->sc_syncbeacon = 0;
4540 rfilt = ath_calcrxfilter(sc);
4541 ATH_UNLOCK(sc);
4542
4543 ATH_PCU_LOCK(sc);
4544 ath_hal_setrxfilter(ah, rfilt);
4545 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
4546 ATH_PCU_UNLOCK(sc);
4547
4548 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
4549 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
4550}
4551
4552static void
4553ath_scan_end(struct ieee80211com *ic)
4554{
4555 struct ifnet *ifp = ic->ic_ifp;
4556 struct ath_softc *sc = ifp->if_softc;
4557 struct ath_hal *ah = sc->sc_ah;
4558 u_int32_t rfilt;
4559
4560 ATH_LOCK(sc);
4561 sc->sc_scanning = 0;
4562 rfilt = ath_calcrxfilter(sc);
4563 ATH_UNLOCK(sc);
4564
4565 ATH_PCU_LOCK(sc);
4566 ath_hal_setrxfilter(ah, rfilt);
4567 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4568
4569 ath_hal_process_noisefloor(ah);
4570 ATH_PCU_UNLOCK(sc);
4571
4572 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4573 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
4574 sc->sc_curaid);
4575}
4576
4577#ifdef ATH_ENABLE_11N
4578/*
4579 * For now, just do a channel change.
4580 *
4581 * Later, we'll go through the hard slog of suspending tx/rx, changing rate
4582 * control state and resetting the hardware without dropping frames out
4583 * of the queue.
4584 *
4585 * The unfortunate trouble here is making absolutely sure that the
4586 * channel width change has propagated enough so the hardware
4587 * absolutely isn't handed bogus frames for it's current operating
4588 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and
4589 * does occur in parallel, we need to make certain we've blocked
4590 * any further ongoing TX (and RX, that can cause raw TX)
4591 * before we do this.
4592 */
4593static void
4594ath_update_chw(struct ieee80211com *ic)
4595{
4596 struct ifnet *ifp = ic->ic_ifp;
4597 struct ath_softc *sc = ifp->if_softc;
4598
4599 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__);
4600 ath_set_channel(ic);
4601}
4602#endif /* ATH_ENABLE_11N */
4603
4604static void
4605ath_set_channel(struct ieee80211com *ic)
4606{
4607 struct ifnet *ifp = ic->ic_ifp;
4608 struct ath_softc *sc = ifp->if_softc;
4609
4610 (void) ath_chan_set(sc, ic->ic_curchan);
4611 /*
4612 * If we are returning to our bss channel then mark state
4613 * so the next recv'd beacon's tsf will be used to sync the
4614 * beacon timers. Note that since we only hear beacons in
4615 * sta/ibss mode this has no effect in other operating modes.
4616 */
4617 ATH_LOCK(sc);
4618 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
4619 sc->sc_syncbeacon = 1;
4620 ATH_UNLOCK(sc);
4621}
4622
4623/*
4624 * Walk the vap list and check if there any vap's in RUN state.
4625 */
4626static int
4627ath_isanyrunningvaps(struct ieee80211vap *this)
4628{
4629 struct ieee80211com *ic = this->iv_ic;
4630 struct ieee80211vap *vap;
4631
4632 IEEE80211_LOCK_ASSERT(ic);
4633
4634 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
4635 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
4636 return 1;
4637 }
4638 return 0;
4639}
4640
4641static int
4642ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4643{
4644 struct ieee80211com *ic = vap->iv_ic;
4645 struct ath_softc *sc = ic->ic_ifp->if_softc;
4646 struct ath_vap *avp = ATH_VAP(vap);
4647 struct ath_hal *ah = sc->sc_ah;
4648 struct ieee80211_node *ni = NULL;
4649 int i, error, stamode;
4650 u_int32_t rfilt;
4651 int csa_run_transition = 0;
4652 static const HAL_LED_STATE leds[] = {
4653 HAL_LED_INIT, /* IEEE80211_S_INIT */
4654 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
4655 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
4656 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
4657 HAL_LED_RUN, /* IEEE80211_S_CAC */
4658 HAL_LED_RUN, /* IEEE80211_S_RUN */
4659 HAL_LED_RUN, /* IEEE80211_S_CSA */
4660 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
4661 };
4662
4663 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
4664 ieee80211_state_name[vap->iv_state],
4665 ieee80211_state_name[nstate]);
4666
4667 /*
4668 * net80211 _should_ have the comlock asserted at this point.
4669 * There are some comments around the calls to vap->iv_newstate
4670 * which indicate that it (newstate) may end up dropping the
4671 * lock. This and the subsequent lock assert check after newstate
4672 * are an attempt to catch these and figure out how/why.
4673 */
4674 IEEE80211_LOCK_ASSERT(ic);
4675
4676 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
4677 csa_run_transition = 1;
4678
4679 callout_drain(&sc->sc_cal_ch);
4680 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
4681
4682 if (nstate == IEEE80211_S_SCAN) {
4683 /*
4684 * Scanning: turn off beacon miss and don't beacon.
4685 * Mark beacon state so when we reach RUN state we'll
4686 * [re]setup beacons. Unblock the task q thread so
4687 * deferred interrupt processing is done.
4688 */
4689 ath_hal_intrset(ah,
4690 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
4691 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4692 sc->sc_beacons = 0;
4693 taskqueue_unblock(sc->sc_tq);
4694 }
4695
4696 ni = ieee80211_ref_node(vap->iv_bss);
4697 rfilt = ath_calcrxfilter(sc);
4698 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
4699 vap->iv_opmode == IEEE80211_M_AHDEMO ||
4700 vap->iv_opmode == IEEE80211_M_IBSS);
4701 if (stamode && nstate == IEEE80211_S_RUN) {
4702 sc->sc_curaid = ni->ni_associd;
4703 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
4704 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4705 }
4706 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4707 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
4708 ath_hal_setrxfilter(ah, rfilt);
4709
4710 /* XXX is this to restore keycache on resume? */
4711 if (vap->iv_opmode != IEEE80211_M_STA &&
4712 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
4713 for (i = 0; i < IEEE80211_WEP_NKID; i++)
4714 if (ath_hal_keyisvalid(ah, i))
4715 ath_hal_keysetmac(ah, i, ni->ni_bssid);
4716 }
4717
4718 /*
4719 * Invoke the parent method to do net80211 work.
4720 */
4721 error = avp->av_newstate(vap, nstate, arg);
4722 if (error != 0)
4723 goto bad;
4724
4725 /*
4726 * See above: ensure av_newstate() doesn't drop the lock
4727 * on us.
4728 */
4729 IEEE80211_LOCK_ASSERT(ic);
4730
4731 if (nstate == IEEE80211_S_RUN) {
4732 /* NB: collect bss node again, it may have changed */
4733 ieee80211_free_node(ni);
4734 ni = ieee80211_ref_node(vap->iv_bss);
4735
4736 DPRINTF(sc, ATH_DEBUG_STATE,
4737 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4738 "capinfo 0x%04x chan %d\n", __func__,
4739 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
4740 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
4741
4742 switch (vap->iv_opmode) {
4743#ifdef IEEE80211_SUPPORT_TDMA
4744 case IEEE80211_M_AHDEMO:
4745 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
4746 break;
4747 /* fall thru... */
4748#endif
4749 case IEEE80211_M_HOSTAP:
4750 case IEEE80211_M_IBSS:
4751 case IEEE80211_M_MBSS:
4752 /*
4753 * Allocate and setup the beacon frame.
4754 *
4755 * Stop any previous beacon DMA. This may be
4756 * necessary, for example, when an ibss merge
4757 * causes reconfiguration; there will be a state
4758 * transition from RUN->RUN that means we may
4759 * be called with beacon transmission active.
4760 */
4761 ath_hal_stoptxdma(ah, sc->sc_bhalq);
4762
4763 error = ath_beacon_alloc(sc, ni);
4764 if (error != 0)
4765 goto bad;
4766 /*
4767 * If joining an adhoc network defer beacon timer
4768 * configuration to the next beacon frame so we
4769 * have a current TSF to use. Otherwise we're
4770 * starting an ibss/bss so there's no need to delay;
4771 * if this is the first vap moving to RUN state, then
4772 * beacon state needs to be [re]configured.
4773 */
4774 if (vap->iv_opmode == IEEE80211_M_IBSS &&
4775 ni->ni_tstamp.tsf != 0) {
4776 sc->sc_syncbeacon = 1;
4777 } else if (!sc->sc_beacons) {
4778#ifdef IEEE80211_SUPPORT_TDMA
4779 if (vap->iv_caps & IEEE80211_C_TDMA)
4780 ath_tdma_config(sc, vap);
4781 else
4782#endif
4783 ath_beacon_config(sc, vap);
4784 sc->sc_beacons = 1;
4785 }
4786 break;
4787 case IEEE80211_M_STA:
4788 /*
4789 * Defer beacon timer configuration to the next
4790 * beacon frame so we have a current TSF to use
4791 * (any TSF collected when scanning is likely old).
4792 * However if it's due to a CSA -> RUN transition,
4793 * force a beacon update so we pick up a lack of
4794 * beacons from an AP in CAC and thus force a
4795 * scan.
4796 */
4797 sc->sc_syncbeacon = 1;
4798 if (csa_run_transition)
4799 ath_beacon_config(sc, vap);
4800 break;
4801 case IEEE80211_M_MONITOR:
4802 /*
4803 * Monitor mode vaps have only INIT->RUN and RUN->RUN
4804 * transitions so we must re-enable interrupts here to
4805 * handle the case of a single monitor mode vap.
4806 */
4807 ath_hal_intrset(ah, sc->sc_imask);
4808 break;
4809 case IEEE80211_M_WDS:
4810 break;
4811 default:
4812 break;
4813 }
4814 /*
4815 * Let the hal process statistics collected during a
4816 * scan so it can provide calibrated noise floor data.
4817 */
4818 ath_hal_process_noisefloor(ah);
4819 /*
4820 * Reset rssi stats; maybe not the best place...
4821 */
4822 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
4823 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
4824 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
4825 /*
4826 * Finally, start any timers and the task q thread
4827 * (in case we didn't go through SCAN state).
4828 */
4829 if (ath_longcalinterval != 0) {
4830 /* start periodic recalibration timer */
4831 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
4832 } else {
4833 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4834 "%s: calibration disabled\n", __func__);
4835 }
4836 taskqueue_unblock(sc->sc_tq);
4837 } else if (nstate == IEEE80211_S_INIT) {
4838 /*
4839 * If there are no vaps left in RUN state then
4840 * shutdown host/driver operation:
4841 * o disable interrupts
4842 * o disable the task queue thread
4843 * o mark beacon processing as stopped
4844 */
4845 if (!ath_isanyrunningvaps(vap)) {
4846 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4847 /* disable interrupts */
4848 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
4849 taskqueue_block(sc->sc_tq);
4850 sc->sc_beacons = 0;
4851 }
4852#ifdef IEEE80211_SUPPORT_TDMA
4853 ath_hal_setcca(ah, AH_TRUE);
4854#endif
4855 }
4856bad:
4857 ieee80211_free_node(ni);
4858 return error;
4859}
4860
4861/*
4862 * Allocate a key cache slot to the station so we can
4863 * setup a mapping from key index to node. The key cache
4864 * slot is needed for managing antenna state and for
4865 * compression when stations do not use crypto. We do
4866 * it uniliaterally here; if crypto is employed this slot
4867 * will be reassigned.
4868 */
4869static void
4870ath_setup_stationkey(struct ieee80211_node *ni)
4871{
4872 struct ieee80211vap *vap = ni->ni_vap;
4873 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4874 ieee80211_keyix keyix, rxkeyix;
4875
4876 /* XXX should take a locked ref to vap->iv_bss */
4877 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
4878 /*
4879 * Key cache is full; we'll fall back to doing
4880 * the more expensive lookup in software. Note
4881 * this also means no h/w compression.
4882 */
4883 /* XXX msg+statistic */
4884 } else {
4885 /* XXX locking? */
4886 ni->ni_ucastkey.wk_keyix = keyix;
4887 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
4888 /* NB: must mark device key to get called back on delete */
4889 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
4890 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
4891 /* NB: this will create a pass-thru key entry */
4892 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
4893 }
4894}
4895
4896/*
4897 * Setup driver-specific state for a newly associated node.
4898 * Note that we're called also on a re-associate, the isnew
4899 * param tells us if this is the first time or not.
4900 */
4901static void
4902ath_newassoc(struct ieee80211_node *ni, int isnew)
4903{
4904 struct ath_node *an = ATH_NODE(ni);
4905 struct ieee80211vap *vap = ni->ni_vap;
4906 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4907 const struct ieee80211_txparam *tp = ni->ni_txparms;
4908
4909 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
4910 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
4911
4912 ath_rate_newassoc(sc, an, isnew);
4913 if (isnew &&
4914 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
4915 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
4916 ath_setup_stationkey(ni);
4917}
4918
4919static int
4920ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
4921 int nchans, struct ieee80211_channel chans[])
4922{
4923 struct ath_softc *sc = ic->ic_ifp->if_softc;
4924 struct ath_hal *ah = sc->sc_ah;
4925 HAL_STATUS status;
4926
4927 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
4928 "%s: rd %u cc %u location %c%s\n",
4929 __func__, reg->regdomain, reg->country, reg->location,
4930 reg->ecm ? " ecm" : "");
4931
4932 status = ath_hal_set_channels(ah, chans, nchans,
4933 reg->country, reg->regdomain);
4934 if (status != HAL_OK) {
4935 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
4936 __func__, status);
4937 return EINVAL; /* XXX */
4938 }
4939
4940 return 0;
4941}
4942
4943static void
4944ath_getradiocaps(struct ieee80211com *ic,
4945 int maxchans, int *nchans, struct ieee80211_channel chans[])
4946{
4947 struct ath_softc *sc = ic->ic_ifp->if_softc;
4948 struct ath_hal *ah = sc->sc_ah;
4949
4950 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
4951 __func__, SKU_DEBUG, CTRY_DEFAULT);
4952
4953 /* XXX check return */
4954 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
4955 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
4956
4957}
4958
4959static int
4960ath_getchannels(struct ath_softc *sc)
4961{
4962 struct ifnet *ifp = sc->sc_ifp;
4963 struct ieee80211com *ic = ifp->if_l2com;
4964 struct ath_hal *ah = sc->sc_ah;
4965 HAL_STATUS status;
4966
4967 /*
4968 * Collect channel set based on EEPROM contents.
4969 */
4970 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
4971 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
4972 if (status != HAL_OK) {
4973 if_printf(ifp, "%s: unable to collect channel list from hal, "
4974 "status %d\n", __func__, status);
4975 return EINVAL;
4976 }
4977 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
4978 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
4979 /* XXX map Atheros sku's to net80211 SKU's */
4980 /* XXX net80211 types too small */
4981 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
4982 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
4983 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
4984 ic->ic_regdomain.isocc[1] = ' ';
4985
4986 ic->ic_regdomain.ecm = 1;
4987 ic->ic_regdomain.location = 'I';
4988
4989 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
4990 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
4991 __func__, sc->sc_eerd, sc->sc_eecc,
4992 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
4993 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
4994 return 0;
4995}
4996
4997static int
4998ath_rate_setup(struct ath_softc *sc, u_int mode)
4999{
5000 struct ath_hal *ah = sc->sc_ah;
5001 const HAL_RATE_TABLE *rt;
5002
5003 switch (mode) {
5004 case IEEE80211_MODE_11A:
5005 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
5006 break;
5007 case IEEE80211_MODE_HALF:
5008 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
5009 break;
5010 case IEEE80211_MODE_QUARTER:
5011 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5012 break;
5013 case IEEE80211_MODE_11B:
5014 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5015 break;
5016 case IEEE80211_MODE_11G:
5017 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5018 break;
5019 case IEEE80211_MODE_TURBO_A:
5020 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5021 break;
5022 case IEEE80211_MODE_TURBO_G:
5023 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5024 break;
5025 case IEEE80211_MODE_STURBO_A:
5026 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5027 break;
5028 case IEEE80211_MODE_11NA:
5029 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5030 break;
5031 case IEEE80211_MODE_11NG:
5032 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5033 break;
5034 default:
5035 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5036 __func__, mode);
5037 return 0;
5038 }
5039 sc->sc_rates[mode] = rt;
5040 return (rt != NULL);
5041}
5042
5043static void
5044ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5045{
5046#define N(a) (sizeof(a)/sizeof(a[0]))
5047 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
5048 static const struct {
5049 u_int rate; /* tx/rx 802.11 rate */
5050 u_int16_t timeOn; /* LED on time (ms) */
5051 u_int16_t timeOff; /* LED off time (ms) */
5052 } blinkrates[] = {
5053 { 108, 40, 10 },
5054 { 96, 44, 11 },
5055 { 72, 50, 13 },
5056 { 48, 57, 14 },
5057 { 36, 67, 16 },
5058 { 24, 80, 20 },
5059 { 22, 100, 25 },
5060 { 18, 133, 34 },
5061 { 12, 160, 40 },
5062 { 10, 200, 50 },
5063 { 6, 240, 58 },
5064 { 4, 267, 66 },
5065 { 2, 400, 100 },
5066 { 0, 500, 130 },
5067 /* XXX half/quarter rates */
5068 };
5069 const HAL_RATE_TABLE *rt;
5070 int i, j;
5071
5072 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
5073 rt = sc->sc_rates[mode];
5074 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
5075 for (i = 0; i < rt->rateCount; i++) {
5076 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5077 if (rt->info[i].phy != IEEE80211_T_HT)
5078 sc->sc_rixmap[ieeerate] = i;
5079 else
5080 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
5081 }
5082 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
5083 for (i = 0; i < N(sc->sc_hwmap); i++) {
5084 if (i >= rt->rateCount) {
5085 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
5086 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
5087 continue;
5088 }
5089 sc->sc_hwmap[i].ieeerate =
5090 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5091 if (rt->info[i].phy == IEEE80211_T_HT)
5092 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
5093 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
5094 if (rt->info[i].shortPreamble ||
5095 rt->info[i].phy == IEEE80211_T_OFDM)
5096 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
5097 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
5098 for (j = 0; j < N(blinkrates)-1; j++)
5099 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
5100 break;
5101 /* NB: this uses the last entry if the rate isn't found */
5102 /* XXX beware of overlow */
5103 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
5104 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
5105 }
5106 sc->sc_currates = rt;
5107 sc->sc_curmode = mode;
5108 /*
5109 * All protection frames are transmited at 2Mb/s for
5110 * 11g, otherwise at 1Mb/s.
5111 */
5112 if (mode == IEEE80211_MODE_11G)
5113 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
5114 else
5115 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
5116 /* NB: caller is responsible for resetting rate control state */
5117#undef N
5118}
5119
5120static void
5121ath_watchdog(void *arg)
5122{
5123 struct ath_softc *sc = arg;
5124 int do_reset = 0;
5125
5126 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
5127 struct ifnet *ifp = sc->sc_ifp;
5128 uint32_t hangs;
5129
5130 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
5131 hangs != 0) {
5132 if_printf(ifp, "%s hang detected (0x%x)\n",
5133 hangs & 0xff ? "bb" : "mac", hangs);
5134 } else
5135 if_printf(ifp, "device timeout\n");
5136 do_reset = 1;
5137 ifp->if_oerrors++;
5138 sc->sc_stats.ast_watchdog++;
5139 }
5140
5141 /*
5142 * We can't hold the lock across the ath_reset() call.
5143 *
5144 * And since this routine can't hold a lock and sleep,
5145 * do the reset deferred.
5146 */
5147 if (do_reset) {
5148 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
5149 }
5150
5151 callout_schedule(&sc->sc_wd_ch, hz);
5152}
5153
5154/*
5155 * Fetch the rate control statistics for the given node.
5156 */
5157static int
5158ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs)
5159{
5160 struct ath_node *an;
5161 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5162 struct ieee80211_node *ni;
5163 int error = 0;
5164
5165 /* Perform a lookup on the given node */
5166 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr);
5167 if (ni == NULL) {
5168 error = EINVAL;
5169 goto bad;
5170 }
5171
5172 /* Lock the ath_node */
5173 an = ATH_NODE(ni);
5174 ATH_NODE_LOCK(an);
5175
5176 /* Fetch the rate control stats for this node */
5177 error = ath_rate_fetch_node_stats(sc, an, rs);
5178
5179 /* No matter what happens here, just drop through */
5180
5181 /* Unlock the ath_node */
5182 ATH_NODE_UNLOCK(an);
5183
5184 /* Unref the node */
5185 ieee80211_node_decref(ni);
5186
5187bad:
5188 return (error);
5189}
5190
5191#ifdef ATH_DIAGAPI
5192/*
5193 * Diagnostic interface to the HAL. This is used by various
5194 * tools to do things like retrieve register contents for
5195 * debugging. The mechanism is intentionally opaque so that
5196 * it can change frequently w/o concern for compatiblity.
5197 */
5198static int
5199ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
5200{
5201 struct ath_hal *ah = sc->sc_ah;
5202 u_int id = ad->ad_id & ATH_DIAG_ID;
5203 void *indata = NULL;
5204 void *outdata = NULL;
5205 u_int32_t insize = ad->ad_in_size;
5206 u_int32_t outsize = ad->ad_out_size;
5207 int error = 0;
5208
5209 if (ad->ad_id & ATH_DIAG_IN) {
5210 /*
5211 * Copy in data.
5212 */
5213 indata = malloc(insize, M_TEMP, M_NOWAIT);
5214 if (indata == NULL) {
5215 error = ENOMEM;
5216 goto bad;
5217 }
5218 error = copyin(ad->ad_in_data, indata, insize);
5219 if (error)
5220 goto bad;
5221 }
5222 if (ad->ad_id & ATH_DIAG_DYN) {
5223 /*
5224 * Allocate a buffer for the results (otherwise the HAL
5225 * returns a pointer to a buffer where we can read the
5226 * results). Note that we depend on the HAL leaving this
5227 * pointer for us to use below in reclaiming the buffer;
5228 * may want to be more defensive.
5229 */
5230 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
5231 if (outdata == NULL) {
5232 error = ENOMEM;
5233 goto bad;
5234 }
5235 }
5236 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
5237 if (outsize < ad->ad_out_size)
5238 ad->ad_out_size = outsize;
5239 if (outdata != NULL)
5240 error = copyout(outdata, ad->ad_out_data,
5241 ad->ad_out_size);
5242 } else {
5243 error = EINVAL;
5244 }
5245bad:
5246 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
5247 free(indata, M_TEMP);
5248 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
5249 free(outdata, M_TEMP);
5250 return error;
5251}
5252#endif /* ATH_DIAGAPI */
5253
5254static int
5255ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
5256{
5257#define IS_RUNNING(ifp) \
5258 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
5259 struct ath_softc *sc = ifp->if_softc;
5260 struct ieee80211com *ic = ifp->if_l2com;
5261 struct ifreq *ifr = (struct ifreq *)data;
5262 const HAL_RATE_TABLE *rt;
5263 int error = 0;
5264
5265 switch (cmd) {
5266 case SIOCSIFFLAGS:
5267 ATH_LOCK(sc);
5268 if (IS_RUNNING(ifp)) {
5269 /*
5270 * To avoid rescanning another access point,
5271 * do not call ath_init() here. Instead,
5272 * only reflect promisc mode settings.
5273 */
5274 ath_mode_init(sc);
5275 } else if (ifp->if_flags & IFF_UP) {
5276 /*
5277 * Beware of being called during attach/detach
5278 * to reset promiscuous mode. In that case we
5279 * will still be marked UP but not RUNNING.
5280 * However trying to re-init the interface
5281 * is the wrong thing to do as we've already
5282 * torn down much of our state. There's
5283 * probably a better way to deal with this.
5284 */
5285 if (!sc->sc_invalid)
5286 ath_init(sc); /* XXX lose error */
5287 } else {
5288 ath_stop_locked(ifp);
5289#ifdef notyet
5290 /* XXX must wakeup in places like ath_vap_delete */
5291 if (!sc->sc_invalid)
5292 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
5293#endif
5294 }
5295 ATH_UNLOCK(sc);
5296 break;
5297 case SIOCGIFMEDIA:
5298 case SIOCSIFMEDIA:
5299 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
5300 break;
5301 case SIOCGATHSTATS:
5302 /* NB: embed these numbers to get a consistent view */
5303 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
5304 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
5305 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
5306 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
5307#ifdef IEEE80211_SUPPORT_TDMA
5308 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
5309 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
5310#endif
5311 rt = sc->sc_currates;
5312 sc->sc_stats.ast_tx_rate =
5313 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
5314 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
5315 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
5316 return copyout(&sc->sc_stats,
5317 ifr->ifr_data, sizeof (sc->sc_stats));
5318 case SIOCGATHAGSTATS:
5319 return copyout(&sc->sc_aggr_stats,
5320 ifr->ifr_data, sizeof (sc->sc_aggr_stats));
5321 case SIOCZATHSTATS:
5322 error = priv_check(curthread, PRIV_DRIVER);
5323 if (error == 0) {
5324 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
5325 memset(&sc->sc_aggr_stats, 0,
5326 sizeof(sc->sc_aggr_stats));
5327 memset(&sc->sc_intr_stats, 0,
5328 sizeof(sc->sc_intr_stats));
5329 }
5330 break;
5331#ifdef ATH_DIAGAPI
5332 case SIOCGATHDIAG:
5333 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
5334 break;
5335 case SIOCGATHPHYERR:
5336 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
5337 break;
5338#endif
5339 case SIOCGATHNODERATESTATS:
5340 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr);
5341 break;
5342 case SIOCGIFADDR:
5343 error = ether_ioctl(ifp, cmd, data);
5344 break;
5345 default:
5346 error = EINVAL;
5347 break;
5348 }
5349 return error;
5350#undef IS_RUNNING
5351}
5352
5353/*
5354 * Announce various information on device/driver attach.
5355 */
5356static void
5357ath_announce(struct ath_softc *sc)
5358{
5359 struct ifnet *ifp = sc->sc_ifp;
5360 struct ath_hal *ah = sc->sc_ah;
5361
5362 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
5363 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
5364 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
5365 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
5366 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
5367 if (bootverbose) {
5368 int i;
5369 for (i = 0; i <= WME_AC_VO; i++) {
5370 struct ath_txq *txq = sc->sc_ac2q[i];
5371 if_printf(ifp, "Use hw queue %u for %s traffic\n",
5372 txq->axq_qnum, ieee80211_wme_acnames[i]);
5373 }
5374 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
5375 sc->sc_cabq->axq_qnum);
5376 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
5377 }
5378 if (ath_rxbuf != ATH_RXBUF)
5379 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
5380 if (ath_txbuf != ATH_TXBUF)
5381 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
5382 if (sc->sc_mcastkey && bootverbose)
5383 if_printf(ifp, "using multicast key search\n");
5384}
5385
5386static void
5387ath_dfs_tasklet(void *p, int npending)
5388{
5389 struct ath_softc *sc = (struct ath_softc *) p;
5390 struct ifnet *ifp = sc->sc_ifp;
5391 struct ieee80211com *ic = ifp->if_l2com;
5392
5393 /*
5394 * If previous processing has found a radar event,
5395 * signal this to the net80211 layer to begin DFS
5396 * processing.
5397 */
5398 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
5399 /* DFS event found, initiate channel change */
5400 /*
5401 * XXX doesn't currently tell us whether the event
5402 * XXX was found in the primary or extension
5403 * XXX channel!
5404 */
5405 IEEE80211_LOCK(ic);
5406 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
5407 IEEE80211_UNLOCK(ic);
5408 }
5409}
5410
5411/*
5412 * Enable/disable power save. This must be called with
5413 * no TX driver locks currently held, so it should only
5414 * be called from the RX path (which doesn't hold any
5415 * TX driver locks.)
5416 */
5417static void
5418ath_node_powersave(struct ieee80211_node *ni, int enable)
5419{
5420 struct ath_node *an = ATH_NODE(ni);
5421 struct ieee80211com *ic = ni->ni_ic;
5422 struct ath_softc *sc = ic->ic_ifp->if_softc;
5423 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
5424
5425 ATH_NODE_UNLOCK_ASSERT(an);
5426 /* XXX and no TXQ locks should be held here */
5427
5428 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: ni=%p, enable=%d\n",
5429 __func__, ni, enable);
5430
5431 /* Suspend or resume software queue handling */
5432 if (enable)
5433 ath_tx_node_sleep(sc, an);
5434 else
5435 ath_tx_node_wakeup(sc, an);
5436
5437 /* Update net80211 state */
5438 avp->av_node_ps(ni, enable);
5439}
5440
5441/*
5442 * Notification from net80211 that the powersave queue state has
5443 * changed.
5444 *
5445 * Since the software queue also may have some frames:
5446 *
5447 * + if the node software queue has frames and the TID state
5448 * is 0, we set the TIM;
5449 * + if the node and the stack are both empty, we clear the TIM bit.
5450 * + If the stack tries to set the bit, always set it.
5451 * + If the stack tries to clear the bit, only clear it if the
5452 * software queue in question is also cleared.
5453 *
5454 * TODO: this is called during node teardown; so let's ensure this
5455 * is all correctly handled and that the TIM bit is cleared.
5456 * It may be that the node flush is called _AFTER_ the net80211
5457 * stack clears the TIM.
5458 *
5459 * Here is the racy part. Since it's possible >1 concurrent,
5460 * overlapping TXes will appear complete with a TX completion in
5461 * another thread, it's possible that the concurrent TIM calls will
5462 * clash. We can't hold the node lock here because setting the
5463 * TIM grabs the net80211 comlock and this may cause a LOR.
5464 * The solution is either to totally serialise _everything_ at
5465 * this point (ie, all TX, completion and any reset/flush go into
5466 * one taskqueue) or a new "ath TIM lock" needs to be created that
5467 * just wraps the driver state change and this call to avp->av_set_tim().
5468 *
5469 * The same race exists in the net80211 power save queue handling
5470 * as well. Since multiple transmitting threads may queue frames
5471 * into the driver, as well as ps-poll and the driver transmitting
5472 * frames (and thus clearing the psq), it's quite possible that
5473 * a packet entering the PSQ and a ps-poll being handled will
5474 * race, causing the TIM to be cleared and not re-set.
5475 */
5476static int
5477ath_node_set_tim(struct ieee80211_node *ni, int enable)
5478{
5479 struct ieee80211com *ic = ni->ni_ic;
5480 struct ath_softc *sc = ic->ic_ifp->if_softc;
5481 struct ath_node *an = ATH_NODE(ni);
5482 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
5483 int changed = 0;
5484
5485 ATH_NODE_UNLOCK_ASSERT(an);
5486
5487 /*
5488 * For now, just track and then update the TIM.
5489 */
5490 ATH_NODE_LOCK(an);
5491 an->an_stack_psq = enable;
5492
5493 /*
5494 * This will get called for all operating modes,
5495 * even if avp->av_set_tim is unset.
5496 * It's currently set for hostap/ibss modes; but
5497 * the same infrastructure is used for both STA
5498 * and AP/IBSS node power save.
5499 */
5500 if (avp->av_set_tim == NULL) {
5501 ATH_NODE_UNLOCK(an);
5502 return (0);
5503 }
5504
5505 /*
5506 * If setting the bit, always set it here.
5507 * If clearing the bit, only clear it if the
5508 * software queue is also empty.
5509 *
5510 * If the node has left power save, just clear the TIM
5511 * bit regardless of the state of the power save queue.
5512 *
5513 * XXX TODO: although atomics are used, it's quite possible
5514 * that a race will occur between this and setting/clearing
5515 * in another thread. TX completion will occur always in
5516 * one thread, however setting/clearing the TIM bit can come
5517 * from a variety of different process contexts!
5518 */
5519 if (enable && an->an_tim_set == 1) {
5520 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5521 "%s: an=%p, enable=%d, tim_set=1, ignoring\n",
5522 __func__, an, enable);
5523 ATH_NODE_UNLOCK(an);
5524 } else if (enable) {
5525 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5526 "%s: an=%p, enable=%d, enabling TIM\n",
5527 __func__, an, enable);
5528 an->an_tim_set = 1;
5529 ATH_NODE_UNLOCK(an);
5530 changed = avp->av_set_tim(ni, enable);
5531 } else if (atomic_load_acq_int(&an->an_swq_depth) == 0) {
5532 /* disable */
5533 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5534 "%s: an=%p, enable=%d, an_swq_depth == 0, disabling\n",
5535 __func__, an, enable);
5536 an->an_tim_set = 0;
5537 ATH_NODE_UNLOCK(an);
5538 changed = avp->av_set_tim(ni, enable);
5539 } else if (! an->an_is_powersave) {
5540 /*
5541 * disable regardless; the node isn't in powersave now
5542 */
5543 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5544 "%s: an=%p, enable=%d, an_pwrsave=0, disabling\n",
5545 __func__, an, enable);
5546 an->an_tim_set = 0;
5547 ATH_NODE_UNLOCK(an);
5548 changed = avp->av_set_tim(ni, enable);
5549 } else {
5550 /*
5551 * psq disable, node is currently in powersave, node
5552 * software queue isn't empty, so don't clear the TIM bit
5553 * for now.
5554 */
5555 ATH_NODE_UNLOCK(an);
5556 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5557 "%s: enable=%d, an_swq_depth > 0, ignoring\n",
5558 __func__, enable);
5559 changed = 0;
5560 }
5561
5562 return (changed);
5563}
5564
5565/*
5566 * Set or update the TIM from the software queue.
5567 *
5568 * Check the software queue depth before attempting to do lock
5569 * anything; that avoids trying to obtain the lock. Then,
5570 * re-check afterwards to ensure nothing has changed in the
5571 * meantime.
5572 *
5573 * set: This is designed to be called from the TX path, after
5574 * a frame has been queued; to see if the swq > 0.
5575 *
5576 * clear: This is designed to be called from the buffer completion point
5577 * (right now it's ath_tx_default_comp()) where the state of
5578 * a software queue has changed.
5579 *
5580 * It makes sense to place it at buffer free / completion rather
5581 * than after each software queue operation, as there's no real
5582 * point in churning the TIM bit as the last frames in the software
5583 * queue are transmitted. If they fail and we retry them, we'd
5584 * just be setting the TIM bit again anyway.
5585 */
5586void
5587ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
5588 int enable)
5589{
5590 struct ath_node *an;
5591 struct ath_vap *avp;
5592
5593 /* Don't do this for broadcast/etc frames */
5594 if (ni == NULL)
5595 return;
5596
5597 an = ATH_NODE(ni);
5598 avp = ATH_VAP(ni->ni_vap);
5599
5600 /*
5601 * And for operating modes without the TIM handler set, let's
5602 * just skip those.
5603 */
5604 if (avp->av_set_tim == NULL)
5605 return;
5606
5607 ATH_NODE_UNLOCK_ASSERT(an);
5608
5609 if (enable) {
5610 /*
5611 * Don't bother grabbing the lock unless the queue is not
5612 * empty.
5613 */
5614 if (atomic_load_acq_int(&an->an_swq_depth) == 0)
5615 return;
5616
5617 ATH_NODE_LOCK(an);
5618 if (an->an_is_powersave &&
5619 an->an_tim_set == 0 &&
5620 atomic_load_acq_int(&an->an_swq_depth) != 0) {
5621 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5622 "%s: an=%p, swq_depth>0, tim_set=0, set!\n",
5623 __func__, an);
5624 an->an_tim_set = 1;
5625 ATH_NODE_UNLOCK(an);
5626 (void) avp->av_set_tim(ni, 1);
5627 } else {
5628 ATH_NODE_UNLOCK(an);
5629 }
5630 } else {
5631 /*
5632 * Don't bother grabbing the lock unless the queue is empty.
5633 */
5634 if (atomic_load_acq_int(&an->an_swq_depth) != 0)
5635 return;
5636
5637 ATH_NODE_LOCK(an);
5638 if (an->an_is_powersave &&
5639 an->an_stack_psq == 0 &&
5640 an->an_tim_set == 1 &&
5641 atomic_load_acq_int(&an->an_swq_depth) == 0) {
5642 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5643 "%s: an=%p, swq_depth=0, tim_set=1, psq_set=0,"
5644 " clear!\n",
5645 __func__, an);
5646 an->an_tim_set = 0;
5647 ATH_NODE_UNLOCK(an);
5648 (void) avp->av_set_tim(ni, 0);
5649 } else {
5650 ATH_NODE_UNLOCK(an);
5651 }
5652 }
5653}
5654
5655MODULE_VERSION(if_ath, 1);
5656MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
5657#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ)
5658MODULE_DEPEND(if_ath, alq, 1, 1, 1);
5659#endif