if_ath.c revision 223459
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 *    redistribution must be conditioned upon including a substantially
14 *    similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 223459 2011-06-23 02:38:36Z adrian $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42#include "opt_wlan.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysctl.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/errno.h>
55#include <sys/callout.h>
56#include <sys/bus.h>
57#include <sys/endian.h>
58#include <sys/kthread.h>
59#include <sys/taskqueue.h>
60#include <sys/priv.h>
61#include <sys/module.h>
62
63#include <machine/bus.h>
64
65#include <net/if.h>
66#include <net/if_dl.h>
67#include <net/if_media.h>
68#include <net/if_types.h>
69#include <net/if_arp.h>
70#include <net/ethernet.h>
71#include <net/if_llc.h>
72
73#include <net80211/ieee80211_var.h>
74#include <net80211/ieee80211_regdomain.h>
75#ifdef IEEE80211_SUPPORT_SUPERG
76#include <net80211/ieee80211_superg.h>
77#endif
78#ifdef IEEE80211_SUPPORT_TDMA
79#include <net80211/ieee80211_tdma.h>
80#endif
81
82#include <net/bpf.h>
83
84#ifdef INET
85#include <netinet/in.h>
86#include <netinet/if_ether.h>
87#endif
88
89#include <dev/ath/if_athvar.h>
90#include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
91#include <dev/ath/ath_hal/ah_diagcodes.h>
92
93#include <dev/ath/if_ath_debug.h>
94#include <dev/ath/if_ath_misc.h>
95#include <dev/ath/if_ath_tx.h>
96#include <dev/ath/if_ath_sysctl.h>
97#include <dev/ath/if_ath_keycache.h>
98#include <dev/ath/if_athdfs.h>
99
100#ifdef ATH_TX99_DIAG
101#include <dev/ath/ath_tx99/ath_tx99.h>
102#endif
103
104
105/*
106 * ATH_BCBUF determines the number of vap's that can transmit
107 * beacons and also (currently) the number of vap's that can
108 * have unique mac addresses/bssid.  When staggering beacons
109 * 4 is probably a good max as otherwise the beacons become
110 * very closely spaced and there is limited time for cab q traffic
111 * to go out.  You can burst beacons instead but that is not good
112 * for stations in power save and at some point you really want
113 * another radio (and channel).
114 *
115 * The limit on the number of mac addresses is tied to our use of
116 * the U/L bit and tracking addresses in a byte; it would be
117 * worthwhile to allow more for applications like proxy sta.
118 */
119CTASSERT(ATH_BCBUF <= 8);
120
121static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
122		    const char name[IFNAMSIZ], int unit, int opmode,
123		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
124		    const uint8_t mac[IEEE80211_ADDR_LEN]);
125static void	ath_vap_delete(struct ieee80211vap *);
126static void	ath_init(void *);
127static void	ath_stop_locked(struct ifnet *);
128static void	ath_stop(struct ifnet *);
129static void	ath_start(struct ifnet *);
130static int	ath_reset_vap(struct ieee80211vap *, u_long);
131static int	ath_media_change(struct ifnet *);
132static void	ath_watchdog(void *);
133static int	ath_ioctl(struct ifnet *, u_long, caddr_t);
134static void	ath_fatal_proc(void *, int);
135static void	ath_bmiss_vap(struct ieee80211vap *);
136static void	ath_bmiss_proc(void *, int);
137static void	ath_key_update_begin(struct ieee80211vap *);
138static void	ath_key_update_end(struct ieee80211vap *);
139static void	ath_update_mcast(struct ifnet *);
140static void	ath_update_promisc(struct ifnet *);
141static void	ath_mode_init(struct ath_softc *);
142static void	ath_setslottime(struct ath_softc *);
143static void	ath_updateslot(struct ifnet *);
144static int	ath_beaconq_setup(struct ath_hal *);
145static int	ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
146static void	ath_beacon_update(struct ieee80211vap *, int item);
147static void	ath_beacon_setup(struct ath_softc *, struct ath_buf *);
148static void	ath_beacon_proc(void *, int);
149static struct ath_buf *ath_beacon_generate(struct ath_softc *,
150			struct ieee80211vap *);
151static void	ath_bstuck_proc(void *, int);
152static void	ath_beacon_return(struct ath_softc *, struct ath_buf *);
153static void	ath_beacon_free(struct ath_softc *);
154static void	ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
155static void	ath_descdma_cleanup(struct ath_softc *sc,
156			struct ath_descdma *, ath_bufhead *);
157static int	ath_desc_alloc(struct ath_softc *);
158static void	ath_desc_free(struct ath_softc *);
159static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
160			const uint8_t [IEEE80211_ADDR_LEN]);
161static void	ath_node_free(struct ieee80211_node *);
162static void	ath_node_getsignal(const struct ieee80211_node *,
163			int8_t *, int8_t *);
164static int	ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
165static void	ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
166			int subtype, int rssi, int nf);
167static void	ath_setdefantenna(struct ath_softc *, u_int);
168static void	ath_rx_proc(void *, int);
169static void	ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
170static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
171static int	ath_tx_setup(struct ath_softc *, int, int);
172static int	ath_wme_update(struct ieee80211com *);
173static void	ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
174static void	ath_tx_cleanup(struct ath_softc *);
175static void	ath_tx_proc_q0(void *, int);
176static void	ath_tx_proc_q0123(void *, int);
177static void	ath_tx_proc(void *, int);
178static void	ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
179static int	ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
180static void	ath_draintxq(struct ath_softc *);
181static void	ath_stoprecv(struct ath_softc *);
182static int	ath_startrecv(struct ath_softc *);
183static void	ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
184static void	ath_scan_start(struct ieee80211com *);
185static void	ath_scan_end(struct ieee80211com *);
186static void	ath_set_channel(struct ieee80211com *);
187static void	ath_calibrate(void *);
188static int	ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
189static void	ath_setup_stationkey(struct ieee80211_node *);
190static void	ath_newassoc(struct ieee80211_node *, int);
191static int	ath_setregdomain(struct ieee80211com *,
192		    struct ieee80211_regdomain *, int,
193		    struct ieee80211_channel []);
194static void	ath_getradiocaps(struct ieee80211com *, int, int *,
195		    struct ieee80211_channel []);
196static int	ath_getchannels(struct ath_softc *);
197static void	ath_led_event(struct ath_softc *, int);
198
199static int	ath_rate_setup(struct ath_softc *, u_int mode);
200static void	ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
201
202static void	ath_announce(struct ath_softc *);
203
204static void	ath_dfs_tasklet(void *, int);
205
206#ifdef IEEE80211_SUPPORT_TDMA
207static void	ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
208		    u_int32_t bintval);
209static void	ath_tdma_bintvalsetup(struct ath_softc *sc,
210		    const struct ieee80211_tdma_state *tdma);
211static void	ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
212static void	ath_tdma_update(struct ieee80211_node *ni,
213		    const struct ieee80211_tdma_param *tdma, int);
214static void	ath_tdma_beacon_send(struct ath_softc *sc,
215		    struct ieee80211vap *vap);
216
217static __inline void
218ath_hal_setcca(struct ath_hal *ah, int ena)
219{
220	/*
221	 * NB: fill me in; this is not provided by default because disabling
222	 *     CCA in most locales violates regulatory.
223	 */
224}
225
226static __inline int
227ath_hal_getcca(struct ath_hal *ah)
228{
229	u_int32_t diag;
230	if (ath_hal_getcapability(ah, HAL_CAP_DIAG, 0, &diag) != HAL_OK)
231		return 1;
232	return ((diag & 0x500000) == 0);
233}
234
235#define	TDMA_EP_MULTIPLIER	(1<<10) /* pow2 to optimize out * and / */
236#define	TDMA_LPF_LEN		6
237#define	TDMA_DUMMY_MARKER	0x127
238#define	TDMA_EP_MUL(x, mul)	((x) * (mul))
239#define	TDMA_IN(x)		(TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
240#define	TDMA_LPF(x, y, len) \
241    ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
242#define	TDMA_SAMPLE(x, y) do {					\
243	x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN);		\
244} while (0)
245#define	TDMA_EP_RND(x,mul) \
246	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
247#define	TDMA_AVG(x)		TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
248#endif /* IEEE80211_SUPPORT_TDMA */
249
250SYSCTL_DECL(_hw_ath);
251
252/* XXX validate sysctl values */
253static	int ath_longcalinterval = 30;		/* long cals every 30 secs */
254SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
255	    0, "long chip calibration interval (secs)");
256static	int ath_shortcalinterval = 100;		/* short cals every 100 ms */
257SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
258	    0, "short chip calibration interval (msecs)");
259static	int ath_resetcalinterval = 20*60;	/* reset cal state 20 mins */
260SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
261	    0, "reset chip calibration results (secs)");
262static	int ath_anicalinterval = 100;		/* ANI calibration - 100 msec */
263SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
264	    0, "ANI calibration (msecs)");
265
266static	int ath_rxbuf = ATH_RXBUF;		/* # rx buffers to allocate */
267SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
268	    0, "rx buffers allocated");
269TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
270static	int ath_txbuf = ATH_TXBUF;		/* # tx buffers to allocate */
271SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
272	    0, "tx buffers allocated");
273TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
274
275static	int ath_bstuck_threshold = 4;		/* max missed beacons */
276SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
277	    0, "max missed beacon xmits before chip reset");
278
279MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
280
281#define	HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
282#define	HAL_MODE_HT40 \
283	(HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
284	HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
285int
286ath_attach(u_int16_t devid, struct ath_softc *sc)
287{
288	struct ifnet *ifp;
289	struct ieee80211com *ic;
290	struct ath_hal *ah = NULL;
291	HAL_STATUS status;
292	int error = 0, i;
293	u_int wmodes;
294	uint8_t macaddr[IEEE80211_ADDR_LEN];
295
296	DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
297
298	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
299	if (ifp == NULL) {
300		device_printf(sc->sc_dev, "can not if_alloc()\n");
301		error = ENOSPC;
302		goto bad;
303	}
304	ic = ifp->if_l2com;
305
306	/* set these up early for if_printf use */
307	if_initname(ifp, device_get_name(sc->sc_dev),
308		device_get_unit(sc->sc_dev));
309
310	ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, sc->sc_eepromdata, &status);
311	if (ah == NULL) {
312		if_printf(ifp, "unable to attach hardware; HAL status %u\n",
313			status);
314		error = ENXIO;
315		goto bad;
316	}
317	sc->sc_ah = ah;
318	sc->sc_invalid = 0;	/* ready to go, enable interrupt handling */
319#ifdef	ATH_DEBUG
320	sc->sc_debug = ath_debug;
321#endif
322
323	/*
324	 * Check if the MAC has multi-rate retry support.
325	 * We do this by trying to setup a fake extended
326	 * descriptor.  MAC's that don't have support will
327	 * return false w/o doing anything.  MAC's that do
328	 * support it will return true w/o doing anything.
329	 */
330	sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
331
332	/*
333	 * Check if the device has hardware counters for PHY
334	 * errors.  If so we need to enable the MIB interrupt
335	 * so we can act on stat triggers.
336	 */
337	if (ath_hal_hwphycounters(ah))
338		sc->sc_needmib = 1;
339
340	/*
341	 * Get the hardware key cache size.
342	 */
343	sc->sc_keymax = ath_hal_keycachesize(ah);
344	if (sc->sc_keymax > ATH_KEYMAX) {
345		if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
346			ATH_KEYMAX, sc->sc_keymax);
347		sc->sc_keymax = ATH_KEYMAX;
348	}
349	/*
350	 * Reset the key cache since some parts do not
351	 * reset the contents on initial power up.
352	 */
353	for (i = 0; i < sc->sc_keymax; i++)
354		ath_hal_keyreset(ah, i);
355
356	/*
357	 * Collect the default channel list.
358	 */
359	error = ath_getchannels(sc);
360	if (error != 0)
361		goto bad;
362
363	/*
364	 * Setup rate tables for all potential media types.
365	 */
366	ath_rate_setup(sc, IEEE80211_MODE_11A);
367	ath_rate_setup(sc, IEEE80211_MODE_11B);
368	ath_rate_setup(sc, IEEE80211_MODE_11G);
369	ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
370	ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
371	ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
372	ath_rate_setup(sc, IEEE80211_MODE_11NA);
373	ath_rate_setup(sc, IEEE80211_MODE_11NG);
374	ath_rate_setup(sc, IEEE80211_MODE_HALF);
375	ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
376
377	/* NB: setup here so ath_rate_update is happy */
378	ath_setcurmode(sc, IEEE80211_MODE_11A);
379
380	/*
381	 * Allocate tx+rx descriptors and populate the lists.
382	 */
383	error = ath_desc_alloc(sc);
384	if (error != 0) {
385		if_printf(ifp, "failed to allocate descriptors: %d\n", error);
386		goto bad;
387	}
388	callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
389	callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
390
391	ATH_TXBUF_LOCK_INIT(sc);
392
393	sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
394		taskqueue_thread_enqueue, &sc->sc_tq);
395	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
396		"%s taskq", ifp->if_xname);
397
398	TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
399	TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
400	TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
401
402	/*
403	 * Allocate hardware transmit queues: one queue for
404	 * beacon frames and one data queue for each QoS
405	 * priority.  Note that the hal handles resetting
406	 * these queues at the needed time.
407	 *
408	 * XXX PS-Poll
409	 */
410	sc->sc_bhalq = ath_beaconq_setup(ah);
411	if (sc->sc_bhalq == (u_int) -1) {
412		if_printf(ifp, "unable to setup a beacon xmit queue!\n");
413		error = EIO;
414		goto bad2;
415	}
416	sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
417	if (sc->sc_cabq == NULL) {
418		if_printf(ifp, "unable to setup CAB xmit queue!\n");
419		error = EIO;
420		goto bad2;
421	}
422	/* NB: insure BK queue is the lowest priority h/w queue */
423	if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
424		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
425			ieee80211_wme_acnames[WME_AC_BK]);
426		error = EIO;
427		goto bad2;
428	}
429	if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
430	    !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
431	    !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
432		/*
433		 * Not enough hardware tx queues to properly do WME;
434		 * just punt and assign them all to the same h/w queue.
435		 * We could do a better job of this if, for example,
436		 * we allocate queues when we switch from station to
437		 * AP mode.
438		 */
439		if (sc->sc_ac2q[WME_AC_VI] != NULL)
440			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
441		if (sc->sc_ac2q[WME_AC_BE] != NULL)
442			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
443		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
444		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
445		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
446	}
447
448	/*
449	 * Special case certain configurations.  Note the
450	 * CAB queue is handled by these specially so don't
451	 * include them when checking the txq setup mask.
452	 */
453	switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
454	case 0x01:
455		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
456		break;
457	case 0x0f:
458		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
459		break;
460	default:
461		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
462		break;
463	}
464
465	/*
466	 * Setup rate control.  Some rate control modules
467	 * call back to change the anntena state so expose
468	 * the necessary entry points.
469	 * XXX maybe belongs in struct ath_ratectrl?
470	 */
471	sc->sc_setdefantenna = ath_setdefantenna;
472	sc->sc_rc = ath_rate_attach(sc);
473	if (sc->sc_rc == NULL) {
474		error = EIO;
475		goto bad2;
476	}
477
478	/* Attach DFS module */
479	if (! ath_dfs_attach(sc)) {
480		device_printf(sc->sc_dev, "%s: unable to attach DFS\n", __func__);
481		error = EIO;
482		goto bad2;
483	}
484
485	/* Start DFS processing tasklet */
486	TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
487
488	sc->sc_blinking = 0;
489	sc->sc_ledstate = 1;
490	sc->sc_ledon = 0;			/* low true */
491	sc->sc_ledidle = (2700*hz)/1000;	/* 2.7sec */
492	callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
493	/*
494	 * Auto-enable soft led processing for IBM cards and for
495	 * 5211 minipci cards.  Users can also manually enable/disable
496	 * support with a sysctl.
497	 */
498	sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
499	if (sc->sc_softled) {
500		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
501		    HAL_GPIO_MUX_MAC_NETWORK_LED);
502		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
503	}
504
505	ifp->if_softc = sc;
506	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
507	ifp->if_start = ath_start;
508	ifp->if_ioctl = ath_ioctl;
509	ifp->if_init = ath_init;
510	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
511	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
512	IFQ_SET_READY(&ifp->if_snd);
513
514	ic->ic_ifp = ifp;
515	/* XXX not right but it's not used anywhere important */
516	ic->ic_phytype = IEEE80211_T_OFDM;
517	ic->ic_opmode = IEEE80211_M_STA;
518	ic->ic_caps =
519		  IEEE80211_C_STA		/* station mode */
520		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
521		| IEEE80211_C_HOSTAP		/* hostap mode */
522		| IEEE80211_C_MONITOR		/* monitor mode */
523		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
524		| IEEE80211_C_WDS		/* 4-address traffic works */
525		| IEEE80211_C_MBSS		/* mesh point link mode */
526		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
527		| IEEE80211_C_SHSLOT		/* short slot time supported */
528		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
529		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
530		| IEEE80211_C_TXFRAG		/* handle tx frags */
531		;
532	/*
533	 * Query the hal to figure out h/w crypto support.
534	 */
535	if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
536		ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
537	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
538		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
539	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
540		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
541	if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
542		ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
543	if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
544		ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
545		/*
546		 * Check if h/w does the MIC and/or whether the
547		 * separate key cache entries are required to
548		 * handle both tx+rx MIC keys.
549		 */
550		if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
551			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
552		/*
553		 * If the h/w supports storing tx+rx MIC keys
554		 * in one cache slot automatically enable use.
555		 */
556		if (ath_hal_hastkipsplit(ah) ||
557		    !ath_hal_settkipsplit(ah, AH_FALSE))
558			sc->sc_splitmic = 1;
559		/*
560		 * If the h/w can do TKIP MIC together with WME then
561		 * we use it; otherwise we force the MIC to be done
562		 * in software by the net80211 layer.
563		 */
564		if (ath_hal_haswmetkipmic(ah))
565			sc->sc_wmetkipmic = 1;
566	}
567	sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
568	/*
569	 * Check for multicast key search support.
570	 */
571	if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
572	    !ath_hal_getmcastkeysearch(sc->sc_ah)) {
573		ath_hal_setmcastkeysearch(sc->sc_ah, 1);
574	}
575	sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
576	/*
577	 * Mark key cache slots associated with global keys
578	 * as in use.  If we knew TKIP was not to be used we
579	 * could leave the +32, +64, and +32+64 slots free.
580	 */
581	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
582		setbit(sc->sc_keymap, i);
583		setbit(sc->sc_keymap, i+64);
584		if (sc->sc_splitmic) {
585			setbit(sc->sc_keymap, i+32);
586			setbit(sc->sc_keymap, i+32+64);
587		}
588	}
589	/*
590	 * TPC support can be done either with a global cap or
591	 * per-packet support.  The latter is not available on
592	 * all parts.  We're a bit pedantic here as all parts
593	 * support a global cap.
594	 */
595	if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
596		ic->ic_caps |= IEEE80211_C_TXPMGT;
597
598	/*
599	 * Mark WME capability only if we have sufficient
600	 * hardware queues to do proper priority scheduling.
601	 */
602	if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
603		ic->ic_caps |= IEEE80211_C_WME;
604	/*
605	 * Check for misc other capabilities.
606	 */
607	if (ath_hal_hasbursting(ah))
608		ic->ic_caps |= IEEE80211_C_BURST;
609	sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
610	sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
611	sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
612	sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
613	if (ath_hal_hasfastframes(ah))
614		ic->ic_caps |= IEEE80211_C_FF;
615	wmodes = ath_hal_getwirelessmodes(ah);
616	if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
617		ic->ic_caps |= IEEE80211_C_TURBOP;
618#ifdef IEEE80211_SUPPORT_TDMA
619	if (ath_hal_macversion(ah) > 0x78) {
620		ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
621		ic->ic_tdma_update = ath_tdma_update;
622	}
623#endif
624
625	/*
626	 * The if_ath 11n support is completely not ready for normal use.
627	 * Enabling this option will likely break everything and everything.
628	 * Don't think of doing that unless you know what you're doing.
629	 */
630
631#ifdef	ATH_ENABLE_11N
632	/*
633	 * Query HT capabilities
634	 */
635	if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
636	    (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
637		int rxs, txs;
638
639		device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
640		ic->ic_htcaps = IEEE80211_HTC_HT		/* HT operation */
641			    | IEEE80211_HTC_AMPDU		/* A-MPDU tx/rx */
642			    | IEEE80211_HTC_AMSDU		/* A-MSDU tx/rx */
643			    | IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
644			    | IEEE80211_HTCAP_SMPS_OFF;		/* SM power save off */
645			;
646
647		/*
648		 * Enable short-GI for HT20 only if the hardware
649		 * advertises support.
650		 * Notably, anything earlier than the AR9287 doesn't.
651		 */
652		if ((ath_hal_getcapability(ah,
653		    HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
654		    (wmodes & HAL_MODE_HT20)) {
655			device_printf(sc->sc_dev,
656			    "[HT] enabling short-GI in 20MHz mode\n");
657			ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
658		}
659
660		if (wmodes & HAL_MODE_HT40)
661			ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
662			    |  IEEE80211_HTCAP_SHORTGI40;
663
664		/*
665		 * rx/tx stream is not currently used anywhere; it needs to be taken
666		 * into account when negotiating which MCS rates it'll receive and
667		 * what MCS rates are available for TX.
668		 */
669		(void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &rxs);
670		(void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &txs);
671
672		ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
673		ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
674
675		ic->ic_txstream = txs;
676		ic->ic_rxstream = rxs;
677
678		device_printf(sc->sc_dev, "[HT] %d RX streams; %d TX streams\n", rxs, txs);
679	}
680#endif
681
682	/*
683	 * Indicate we need the 802.11 header padded to a
684	 * 32-bit boundary for 4-address and QoS frames.
685	 */
686	ic->ic_flags |= IEEE80211_F_DATAPAD;
687
688	/*
689	 * Query the hal about antenna support.
690	 */
691	sc->sc_defant = ath_hal_getdefantenna(ah);
692
693	/*
694	 * Not all chips have the VEOL support we want to
695	 * use with IBSS beacons; check here for it.
696	 */
697	sc->sc_hasveol = ath_hal_hasveol(ah);
698
699	/* get mac address from hardware */
700	ath_hal_getmac(ah, macaddr);
701	if (sc->sc_hasbmask)
702		ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
703
704	/* NB: used to size node table key mapping array */
705	ic->ic_max_keyix = sc->sc_keymax;
706	/* call MI attach routine. */
707	ieee80211_ifattach(ic, macaddr);
708	ic->ic_setregdomain = ath_setregdomain;
709	ic->ic_getradiocaps = ath_getradiocaps;
710	sc->sc_opmode = HAL_M_STA;
711
712	/* override default methods */
713	ic->ic_newassoc = ath_newassoc;
714	ic->ic_updateslot = ath_updateslot;
715	ic->ic_wme.wme_update = ath_wme_update;
716	ic->ic_vap_create = ath_vap_create;
717	ic->ic_vap_delete = ath_vap_delete;
718	ic->ic_raw_xmit = ath_raw_xmit;
719	ic->ic_update_mcast = ath_update_mcast;
720	ic->ic_update_promisc = ath_update_promisc;
721	ic->ic_node_alloc = ath_node_alloc;
722	sc->sc_node_free = ic->ic_node_free;
723	ic->ic_node_free = ath_node_free;
724	ic->ic_node_getsignal = ath_node_getsignal;
725	ic->ic_scan_start = ath_scan_start;
726	ic->ic_scan_end = ath_scan_end;
727	ic->ic_set_channel = ath_set_channel;
728
729	ieee80211_radiotap_attach(ic,
730	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
731		ATH_TX_RADIOTAP_PRESENT,
732	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
733		ATH_RX_RADIOTAP_PRESENT);
734
735	/*
736	 * Setup dynamic sysctl's now that country code and
737	 * regdomain are available from the hal.
738	 */
739	ath_sysctlattach(sc);
740	ath_sysctl_stats_attach(sc);
741	ath_sysctl_hal_attach(sc);
742
743	if (bootverbose)
744		ieee80211_announce(ic);
745	ath_announce(sc);
746	return 0;
747bad2:
748	ath_tx_cleanup(sc);
749	ath_desc_free(sc);
750bad:
751	if (ah)
752		ath_hal_detach(ah);
753	if (ifp != NULL)
754		if_free(ifp);
755	sc->sc_invalid = 1;
756	return error;
757}
758
759int
760ath_detach(struct ath_softc *sc)
761{
762	struct ifnet *ifp = sc->sc_ifp;
763
764	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
765		__func__, ifp->if_flags);
766
767	/*
768	 * NB: the order of these is important:
769	 * o stop the chip so no more interrupts will fire
770	 * o call the 802.11 layer before detaching the hal to
771	 *   insure callbacks into the driver to delete global
772	 *   key cache entries can be handled
773	 * o free the taskqueue which drains any pending tasks
774	 * o reclaim the tx queue data structures after calling
775	 *   the 802.11 layer as we'll get called back to reclaim
776	 *   node state and potentially want to use them
777	 * o to cleanup the tx queues the hal is called, so detach
778	 *   it last
779	 * Other than that, it's straightforward...
780	 */
781	ath_stop(ifp);
782	ieee80211_ifdetach(ifp->if_l2com);
783	taskqueue_free(sc->sc_tq);
784#ifdef ATH_TX99_DIAG
785	if (sc->sc_tx99 != NULL)
786		sc->sc_tx99->detach(sc->sc_tx99);
787#endif
788	ath_rate_detach(sc->sc_rc);
789
790	ath_dfs_detach(sc);
791	ath_desc_free(sc);
792	ath_tx_cleanup(sc);
793	ath_hal_detach(sc->sc_ah);	/* NB: sets chip in full sleep */
794	if_free(ifp);
795
796	return 0;
797}
798
799/*
800 * MAC address handling for multiple BSS on the same radio.
801 * The first vap uses the MAC address from the EEPROM.  For
802 * subsequent vap's we set the U/L bit (bit 1) in the MAC
803 * address and use the next six bits as an index.
804 */
805static void
806assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
807{
808	int i;
809
810	if (clone && sc->sc_hasbmask) {
811		/* NB: we only do this if h/w supports multiple bssid */
812		for (i = 0; i < 8; i++)
813			if ((sc->sc_bssidmask & (1<<i)) == 0)
814				break;
815		if (i != 0)
816			mac[0] |= (i << 2)|0x2;
817	} else
818		i = 0;
819	sc->sc_bssidmask |= 1<<i;
820	sc->sc_hwbssidmask[0] &= ~mac[0];
821	if (i == 0)
822		sc->sc_nbssid0++;
823}
824
825static void
826reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
827{
828	int i = mac[0] >> 2;
829	uint8_t mask;
830
831	if (i != 0 || --sc->sc_nbssid0 == 0) {
832		sc->sc_bssidmask &= ~(1<<i);
833		/* recalculate bssid mask from remaining addresses */
834		mask = 0xff;
835		for (i = 1; i < 8; i++)
836			if (sc->sc_bssidmask & (1<<i))
837				mask &= ~((i<<2)|0x2);
838		sc->sc_hwbssidmask[0] |= mask;
839	}
840}
841
842/*
843 * Assign a beacon xmit slot.  We try to space out
844 * assignments so when beacons are staggered the
845 * traffic coming out of the cab q has maximal time
846 * to go out before the next beacon is scheduled.
847 */
848static int
849assign_bslot(struct ath_softc *sc)
850{
851	u_int slot, free;
852
853	free = 0;
854	for (slot = 0; slot < ATH_BCBUF; slot++)
855		if (sc->sc_bslot[slot] == NULL) {
856			if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
857			    sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
858				return slot;
859			free = slot;
860			/* NB: keep looking for a double slot */
861		}
862	return free;
863}
864
865static struct ieee80211vap *
866ath_vap_create(struct ieee80211com *ic,
867	const char name[IFNAMSIZ], int unit, int opmode, int flags,
868	const uint8_t bssid[IEEE80211_ADDR_LEN],
869	const uint8_t mac0[IEEE80211_ADDR_LEN])
870{
871	struct ath_softc *sc = ic->ic_ifp->if_softc;
872	struct ath_vap *avp;
873	struct ieee80211vap *vap;
874	uint8_t mac[IEEE80211_ADDR_LEN];
875	int ic_opmode, needbeacon, error;
876
877	avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
878	    M_80211_VAP, M_WAITOK | M_ZERO);
879	needbeacon = 0;
880	IEEE80211_ADDR_COPY(mac, mac0);
881
882	ATH_LOCK(sc);
883	ic_opmode = opmode;		/* default to opmode of new vap */
884	switch (opmode) {
885	case IEEE80211_M_STA:
886		if (sc->sc_nstavaps != 0) {	/* XXX only 1 for now */
887			device_printf(sc->sc_dev, "only 1 sta vap supported\n");
888			goto bad;
889		}
890		if (sc->sc_nvaps) {
891			/*
892			 * With multiple vaps we must fall back
893			 * to s/w beacon miss handling.
894			 */
895			flags |= IEEE80211_CLONE_NOBEACONS;
896		}
897		if (flags & IEEE80211_CLONE_NOBEACONS) {
898			/*
899			 * Station mode w/o beacons are implemented w/ AP mode.
900			 */
901			ic_opmode = IEEE80211_M_HOSTAP;
902		}
903		break;
904	case IEEE80211_M_IBSS:
905		if (sc->sc_nvaps != 0) {	/* XXX only 1 for now */
906			device_printf(sc->sc_dev,
907			    "only 1 ibss vap supported\n");
908			goto bad;
909		}
910		needbeacon = 1;
911		break;
912	case IEEE80211_M_AHDEMO:
913#ifdef IEEE80211_SUPPORT_TDMA
914		if (flags & IEEE80211_CLONE_TDMA) {
915			if (sc->sc_nvaps != 0) {
916				device_printf(sc->sc_dev,
917				    "only 1 tdma vap supported\n");
918				goto bad;
919			}
920			needbeacon = 1;
921			flags |= IEEE80211_CLONE_NOBEACONS;
922		}
923		/* fall thru... */
924#endif
925	case IEEE80211_M_MONITOR:
926		if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
927			/*
928			 * Adopt existing mode.  Adding a monitor or ahdemo
929			 * vap to an existing configuration is of dubious
930			 * value but should be ok.
931			 */
932			/* XXX not right for monitor mode */
933			ic_opmode = ic->ic_opmode;
934		}
935		break;
936	case IEEE80211_M_HOSTAP:
937	case IEEE80211_M_MBSS:
938		needbeacon = 1;
939		break;
940	case IEEE80211_M_WDS:
941		if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
942			device_printf(sc->sc_dev,
943			    "wds not supported in sta mode\n");
944			goto bad;
945		}
946		/*
947		 * Silently remove any request for a unique
948		 * bssid; WDS vap's always share the local
949		 * mac address.
950		 */
951		flags &= ~IEEE80211_CLONE_BSSID;
952		if (sc->sc_nvaps == 0)
953			ic_opmode = IEEE80211_M_HOSTAP;
954		else
955			ic_opmode = ic->ic_opmode;
956		break;
957	default:
958		device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
959		goto bad;
960	}
961	/*
962	 * Check that a beacon buffer is available; the code below assumes it.
963	 */
964	if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
965		device_printf(sc->sc_dev, "no beacon buffer available\n");
966		goto bad;
967	}
968
969	/* STA, AHDEMO? */
970	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
971		assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
972		ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
973	}
974
975	vap = &avp->av_vap;
976	/* XXX can't hold mutex across if_alloc */
977	ATH_UNLOCK(sc);
978	error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
979	    bssid, mac);
980	ATH_LOCK(sc);
981	if (error != 0) {
982		device_printf(sc->sc_dev, "%s: error %d creating vap\n",
983		    __func__, error);
984		goto bad2;
985	}
986
987	/* h/w crypto support */
988	vap->iv_key_alloc = ath_key_alloc;
989	vap->iv_key_delete = ath_key_delete;
990	vap->iv_key_set = ath_key_set;
991	vap->iv_key_update_begin = ath_key_update_begin;
992	vap->iv_key_update_end = ath_key_update_end;
993
994	/* override various methods */
995	avp->av_recv_mgmt = vap->iv_recv_mgmt;
996	vap->iv_recv_mgmt = ath_recv_mgmt;
997	vap->iv_reset = ath_reset_vap;
998	vap->iv_update_beacon = ath_beacon_update;
999	avp->av_newstate = vap->iv_newstate;
1000	vap->iv_newstate = ath_newstate;
1001	avp->av_bmiss = vap->iv_bmiss;
1002	vap->iv_bmiss = ath_bmiss_vap;
1003
1004	/* Set default parameters */
1005
1006	/*
1007	 * Anything earlier than some AR9300 series MACs don't
1008	 * support a smaller MPDU density.
1009	 */
1010	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1011	/*
1012	 * All NICs can handle the maximum size, however
1013	 * AR5416 based MACs can only TX aggregates w/ RTS
1014	 * protection when the total aggregate size is <= 8k.
1015	 * However, for now that's enforced by the TX path.
1016	 */
1017	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1018
1019	avp->av_bslot = -1;
1020	if (needbeacon) {
1021		/*
1022		 * Allocate beacon state and setup the q for buffered
1023		 * multicast frames.  We know a beacon buffer is
1024		 * available because we checked above.
1025		 */
1026		avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
1027		STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
1028		if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1029			/*
1030			 * Assign the vap to a beacon xmit slot.  As above
1031			 * this cannot fail to find a free one.
1032			 */
1033			avp->av_bslot = assign_bslot(sc);
1034			KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1035			    ("beacon slot %u not empty", avp->av_bslot));
1036			sc->sc_bslot[avp->av_bslot] = vap;
1037			sc->sc_nbcnvaps++;
1038		}
1039		if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1040			/*
1041			 * Multple vaps are to transmit beacons and we
1042			 * have h/w support for TSF adjusting; enable
1043			 * use of staggered beacons.
1044			 */
1045			sc->sc_stagbeacons = 1;
1046		}
1047		ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1048	}
1049
1050	ic->ic_opmode = ic_opmode;
1051	if (opmode != IEEE80211_M_WDS) {
1052		sc->sc_nvaps++;
1053		if (opmode == IEEE80211_M_STA)
1054			sc->sc_nstavaps++;
1055		if (opmode == IEEE80211_M_MBSS)
1056			sc->sc_nmeshvaps++;
1057	}
1058	switch (ic_opmode) {
1059	case IEEE80211_M_IBSS:
1060		sc->sc_opmode = HAL_M_IBSS;
1061		break;
1062	case IEEE80211_M_STA:
1063		sc->sc_opmode = HAL_M_STA;
1064		break;
1065	case IEEE80211_M_AHDEMO:
1066#ifdef IEEE80211_SUPPORT_TDMA
1067		if (vap->iv_caps & IEEE80211_C_TDMA) {
1068			sc->sc_tdma = 1;
1069			/* NB: disable tsf adjust */
1070			sc->sc_stagbeacons = 0;
1071		}
1072		/*
1073		 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1074		 * just ap mode.
1075		 */
1076		/* fall thru... */
1077#endif
1078	case IEEE80211_M_HOSTAP:
1079	case IEEE80211_M_MBSS:
1080		sc->sc_opmode = HAL_M_HOSTAP;
1081		break;
1082	case IEEE80211_M_MONITOR:
1083		sc->sc_opmode = HAL_M_MONITOR;
1084		break;
1085	default:
1086		/* XXX should not happen */
1087		break;
1088	}
1089	if (sc->sc_hastsfadd) {
1090		/*
1091		 * Configure whether or not TSF adjust should be done.
1092		 */
1093		ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1094	}
1095	if (flags & IEEE80211_CLONE_NOBEACONS) {
1096		/*
1097		 * Enable s/w beacon miss handling.
1098		 */
1099		sc->sc_swbmiss = 1;
1100	}
1101	ATH_UNLOCK(sc);
1102
1103	/* complete setup */
1104	ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1105	return vap;
1106bad2:
1107	reclaim_address(sc, mac);
1108	ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1109bad:
1110	free(avp, M_80211_VAP);
1111	ATH_UNLOCK(sc);
1112	return NULL;
1113}
1114
1115static void
1116ath_vap_delete(struct ieee80211vap *vap)
1117{
1118	struct ieee80211com *ic = vap->iv_ic;
1119	struct ifnet *ifp = ic->ic_ifp;
1120	struct ath_softc *sc = ifp->if_softc;
1121	struct ath_hal *ah = sc->sc_ah;
1122	struct ath_vap *avp = ATH_VAP(vap);
1123
1124	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1125		/*
1126		 * Quiesce the hardware while we remove the vap.  In
1127		 * particular we need to reclaim all references to
1128		 * the vap state by any frames pending on the tx queues.
1129		 */
1130		ath_hal_intrset(ah, 0);		/* disable interrupts */
1131		ath_draintxq(sc);		/* stop xmit side */
1132		ath_stoprecv(sc);		/* stop recv side */
1133	}
1134
1135	ieee80211_vap_detach(vap);
1136	ATH_LOCK(sc);
1137	/*
1138	 * Reclaim beacon state.  Note this must be done before
1139	 * the vap instance is reclaimed as we may have a reference
1140	 * to it in the buffer for the beacon frame.
1141	 */
1142	if (avp->av_bcbuf != NULL) {
1143		if (avp->av_bslot != -1) {
1144			sc->sc_bslot[avp->av_bslot] = NULL;
1145			sc->sc_nbcnvaps--;
1146		}
1147		ath_beacon_return(sc, avp->av_bcbuf);
1148		avp->av_bcbuf = NULL;
1149		if (sc->sc_nbcnvaps == 0) {
1150			sc->sc_stagbeacons = 0;
1151			if (sc->sc_hastsfadd)
1152				ath_hal_settsfadjust(sc->sc_ah, 0);
1153		}
1154		/*
1155		 * Reclaim any pending mcast frames for the vap.
1156		 */
1157		ath_tx_draintxq(sc, &avp->av_mcastq);
1158		ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1159	}
1160	/*
1161	 * Update bookkeeping.
1162	 */
1163	if (vap->iv_opmode == IEEE80211_M_STA) {
1164		sc->sc_nstavaps--;
1165		if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1166			sc->sc_swbmiss = 0;
1167	} else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1168	    vap->iv_opmode == IEEE80211_M_MBSS) {
1169		reclaim_address(sc, vap->iv_myaddr);
1170		ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1171		if (vap->iv_opmode == IEEE80211_M_MBSS)
1172			sc->sc_nmeshvaps--;
1173	}
1174	if (vap->iv_opmode != IEEE80211_M_WDS)
1175		sc->sc_nvaps--;
1176#ifdef IEEE80211_SUPPORT_TDMA
1177	/* TDMA operation ceases when the last vap is destroyed */
1178	if (sc->sc_tdma && sc->sc_nvaps == 0) {
1179		sc->sc_tdma = 0;
1180		sc->sc_swbmiss = 0;
1181	}
1182#endif
1183	ATH_UNLOCK(sc);
1184	free(avp, M_80211_VAP);
1185
1186	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1187		/*
1188		 * Restart rx+tx machines if still running (RUNNING will
1189		 * be reset if we just destroyed the last vap).
1190		 */
1191		if (ath_startrecv(sc) != 0)
1192			if_printf(ifp, "%s: unable to restart recv logic\n",
1193			    __func__);
1194		if (sc->sc_beacons) {		/* restart beacons */
1195#ifdef IEEE80211_SUPPORT_TDMA
1196			if (sc->sc_tdma)
1197				ath_tdma_config(sc, NULL);
1198			else
1199#endif
1200				ath_beacon_config(sc, NULL);
1201		}
1202		ath_hal_intrset(ah, sc->sc_imask);
1203	}
1204}
1205
1206void
1207ath_suspend(struct ath_softc *sc)
1208{
1209	struct ifnet *ifp = sc->sc_ifp;
1210	struct ieee80211com *ic = ifp->if_l2com;
1211
1212	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1213		__func__, ifp->if_flags);
1214
1215	sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1216	if (ic->ic_opmode == IEEE80211_M_STA)
1217		ath_stop(ifp);
1218	else
1219		ieee80211_suspend_all(ic);
1220	/*
1221	 * NB: don't worry about putting the chip in low power
1222	 * mode; pci will power off our socket on suspend and
1223	 * CardBus detaches the device.
1224	 */
1225}
1226
1227/*
1228 * Reset the key cache since some parts do not reset the
1229 * contents on resume.  First we clear all entries, then
1230 * re-load keys that the 802.11 layer assumes are setup
1231 * in h/w.
1232 */
1233static void
1234ath_reset_keycache(struct ath_softc *sc)
1235{
1236	struct ifnet *ifp = sc->sc_ifp;
1237	struct ieee80211com *ic = ifp->if_l2com;
1238	struct ath_hal *ah = sc->sc_ah;
1239	int i;
1240
1241	for (i = 0; i < sc->sc_keymax; i++)
1242		ath_hal_keyreset(ah, i);
1243	ieee80211_crypto_reload_keys(ic);
1244}
1245
1246void
1247ath_resume(struct ath_softc *sc)
1248{
1249	struct ifnet *ifp = sc->sc_ifp;
1250	struct ieee80211com *ic = ifp->if_l2com;
1251	struct ath_hal *ah = sc->sc_ah;
1252	HAL_STATUS status;
1253
1254	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1255		__func__, ifp->if_flags);
1256
1257	/*
1258	 * Must reset the chip before we reload the
1259	 * keycache as we were powered down on suspend.
1260	 */
1261	ath_hal_reset(ah, sc->sc_opmode,
1262	    sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1263	    AH_FALSE, &status);
1264	ath_reset_keycache(sc);
1265
1266	/* Let DFS at it in case it's a DFS channel */
1267	ath_dfs_radar_enable(sc, ic->ic_curchan);
1268
1269	if (sc->sc_resume_up) {
1270		if (ic->ic_opmode == IEEE80211_M_STA) {
1271			ath_init(sc);
1272			/*
1273			 * Program the beacon registers using the last rx'd
1274			 * beacon frame and enable sync on the next beacon
1275			 * we see.  This should handle the case where we
1276			 * wakeup and find the same AP and also the case where
1277			 * we wakeup and need to roam.  For the latter we
1278			 * should get bmiss events that trigger a roam.
1279			 */
1280			ath_beacon_config(sc, NULL);
1281			sc->sc_syncbeacon = 1;
1282		} else
1283			ieee80211_resume_all(ic);
1284	}
1285	if (sc->sc_softled) {
1286		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1287		    HAL_GPIO_MUX_MAC_NETWORK_LED);
1288		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
1289	}
1290}
1291
1292void
1293ath_shutdown(struct ath_softc *sc)
1294{
1295	struct ifnet *ifp = sc->sc_ifp;
1296
1297	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1298		__func__, ifp->if_flags);
1299
1300	ath_stop(ifp);
1301	/* NB: no point powering down chip as we're about to reboot */
1302}
1303
1304/*
1305 * Interrupt handler.  Most of the actual processing is deferred.
1306 */
1307void
1308ath_intr(void *arg)
1309{
1310	struct ath_softc *sc = arg;
1311	struct ifnet *ifp = sc->sc_ifp;
1312	struct ath_hal *ah = sc->sc_ah;
1313	HAL_INT status = 0;
1314
1315	if (sc->sc_invalid) {
1316		/*
1317		 * The hardware is not ready/present, don't touch anything.
1318		 * Note this can happen early on if the IRQ is shared.
1319		 */
1320		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1321		return;
1322	}
1323	if (!ath_hal_intrpend(ah))		/* shared irq, not for us */
1324		return;
1325	if ((ifp->if_flags & IFF_UP) == 0 ||
1326	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1327		HAL_INT status;
1328
1329		DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1330			__func__, ifp->if_flags);
1331		ath_hal_getisr(ah, &status);	/* clear ISR */
1332		ath_hal_intrset(ah, 0);		/* disable further intr's */
1333		return;
1334	}
1335	/*
1336	 * Figure out the reason(s) for the interrupt.  Note
1337	 * that the hal returns a pseudo-ISR that may include
1338	 * bits we haven't explicitly enabled so we mask the
1339	 * value to insure we only process bits we requested.
1340	 */
1341	ath_hal_getisr(ah, &status);		/* NB: clears ISR too */
1342	DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1343	status &= sc->sc_imask;			/* discard unasked for bits */
1344
1345	/* Short-circuit un-handled interrupts */
1346	if (status == 0x0)
1347		return;
1348
1349	if (status & HAL_INT_FATAL) {
1350		sc->sc_stats.ast_hardware++;
1351		ath_hal_intrset(ah, 0);		/* disable intr's until reset */
1352		ath_fatal_proc(sc, 0);
1353	} else {
1354		if (status & HAL_INT_SWBA) {
1355			/*
1356			 * Software beacon alert--time to send a beacon.
1357			 * Handle beacon transmission directly; deferring
1358			 * this is too slow to meet timing constraints
1359			 * under load.
1360			 */
1361#ifdef IEEE80211_SUPPORT_TDMA
1362			if (sc->sc_tdma) {
1363				if (sc->sc_tdmaswba == 0) {
1364					struct ieee80211com *ic = ifp->if_l2com;
1365					struct ieee80211vap *vap =
1366					    TAILQ_FIRST(&ic->ic_vaps);
1367					ath_tdma_beacon_send(sc, vap);
1368					sc->sc_tdmaswba =
1369					    vap->iv_tdma->tdma_bintval;
1370				} else
1371					sc->sc_tdmaswba--;
1372			} else
1373#endif
1374			{
1375				ath_beacon_proc(sc, 0);
1376#ifdef IEEE80211_SUPPORT_SUPERG
1377				/*
1378				 * Schedule the rx taskq in case there's no
1379				 * traffic so any frames held on the staging
1380				 * queue are aged and potentially flushed.
1381				 */
1382				taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1383#endif
1384			}
1385		}
1386		if (status & HAL_INT_RXEOL) {
1387			/*
1388			 * NB: the hardware should re-read the link when
1389			 *     RXE bit is written, but it doesn't work at
1390			 *     least on older hardware revs.
1391			 */
1392			sc->sc_stats.ast_rxeol++;
1393			sc->sc_rxlink = NULL;
1394		}
1395		if (status & HAL_INT_TXURN) {
1396			sc->sc_stats.ast_txurn++;
1397			/* bump tx trigger level */
1398			ath_hal_updatetxtriglevel(ah, AH_TRUE);
1399		}
1400		if (status & HAL_INT_RX)
1401			taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1402		if (status & HAL_INT_TX)
1403			taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1404		if (status & HAL_INT_BMISS) {
1405			sc->sc_stats.ast_bmiss++;
1406			taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1407		}
1408		if (status & HAL_INT_GTT)
1409			sc->sc_stats.ast_tx_timeout++;
1410		if (status & HAL_INT_CST)
1411			sc->sc_stats.ast_tx_cst++;
1412		if (status & HAL_INT_MIB) {
1413			sc->sc_stats.ast_mib++;
1414			/*
1415			 * Disable interrupts until we service the MIB
1416			 * interrupt; otherwise it will continue to fire.
1417			 */
1418			ath_hal_intrset(ah, 0);
1419			/*
1420			 * Let the hal handle the event.  We assume it will
1421			 * clear whatever condition caused the interrupt.
1422			 */
1423			ath_hal_mibevent(ah, &sc->sc_halstats);
1424			ath_hal_intrset(ah, sc->sc_imask);
1425		}
1426		if (status & HAL_INT_RXORN) {
1427			/* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1428			sc->sc_stats.ast_rxorn++;
1429		}
1430	}
1431}
1432
1433static void
1434ath_fatal_proc(void *arg, int pending)
1435{
1436	struct ath_softc *sc = arg;
1437	struct ifnet *ifp = sc->sc_ifp;
1438	u_int32_t *state;
1439	u_int32_t len;
1440	void *sp;
1441
1442	if_printf(ifp, "hardware error; resetting\n");
1443	/*
1444	 * Fatal errors are unrecoverable.  Typically these
1445	 * are caused by DMA errors.  Collect h/w state from
1446	 * the hal so we can diagnose what's going on.
1447	 */
1448	if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1449		KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1450		state = sp;
1451		if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1452		    state[0], state[1] , state[2], state[3],
1453		    state[4], state[5]);
1454	}
1455	ath_reset(ifp);
1456}
1457
1458static void
1459ath_bmiss_vap(struct ieee80211vap *vap)
1460{
1461	/*
1462	 * Workaround phantom bmiss interrupts by sanity-checking
1463	 * the time of our last rx'd frame.  If it is within the
1464	 * beacon miss interval then ignore the interrupt.  If it's
1465	 * truly a bmiss we'll get another interrupt soon and that'll
1466	 * be dispatched up for processing.  Note this applies only
1467	 * for h/w beacon miss events.
1468	 */
1469	if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1470		struct ifnet *ifp = vap->iv_ic->ic_ifp;
1471		struct ath_softc *sc = ifp->if_softc;
1472		u_int64_t lastrx = sc->sc_lastrx;
1473		u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1474		u_int bmisstimeout =
1475			vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1476
1477		DPRINTF(sc, ATH_DEBUG_BEACON,
1478		    "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1479		    __func__, (unsigned long long) tsf,
1480		    (unsigned long long)(tsf - lastrx),
1481		    (unsigned long long) lastrx, bmisstimeout);
1482
1483		if (tsf - lastrx <= bmisstimeout) {
1484			sc->sc_stats.ast_bmiss_phantom++;
1485			return;
1486		}
1487	}
1488	ATH_VAP(vap)->av_bmiss(vap);
1489}
1490
1491static int
1492ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1493{
1494	uint32_t rsize;
1495	void *sp;
1496
1497	if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
1498		return 0;
1499	KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1500	*hangs = *(uint32_t *)sp;
1501	return 1;
1502}
1503
1504static void
1505ath_bmiss_proc(void *arg, int pending)
1506{
1507	struct ath_softc *sc = arg;
1508	struct ifnet *ifp = sc->sc_ifp;
1509	uint32_t hangs;
1510
1511	DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1512
1513	if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1514		if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
1515		ath_reset(ifp);
1516	} else
1517		ieee80211_beacon_miss(ifp->if_l2com);
1518}
1519
1520/*
1521 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1522 * calcs together with WME.  If necessary disable the crypto
1523 * hardware and mark the 802.11 state so keys will be setup
1524 * with the MIC work done in software.
1525 */
1526static void
1527ath_settkipmic(struct ath_softc *sc)
1528{
1529	struct ifnet *ifp = sc->sc_ifp;
1530	struct ieee80211com *ic = ifp->if_l2com;
1531
1532	if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1533		if (ic->ic_flags & IEEE80211_F_WME) {
1534			ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1535			ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1536		} else {
1537			ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1538			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1539		}
1540	}
1541}
1542
1543static void
1544ath_init(void *arg)
1545{
1546	struct ath_softc *sc = (struct ath_softc *) arg;
1547	struct ifnet *ifp = sc->sc_ifp;
1548	struct ieee80211com *ic = ifp->if_l2com;
1549	struct ath_hal *ah = sc->sc_ah;
1550	HAL_STATUS status;
1551
1552	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1553		__func__, ifp->if_flags);
1554
1555	ATH_LOCK(sc);
1556	/*
1557	 * Stop anything previously setup.  This is safe
1558	 * whether this is the first time through or not.
1559	 */
1560	ath_stop_locked(ifp);
1561
1562	/*
1563	 * The basic interface to setting the hardware in a good
1564	 * state is ``reset''.  On return the hardware is known to
1565	 * be powered up and with interrupts disabled.  This must
1566	 * be followed by initialization of the appropriate bits
1567	 * and then setup of the interrupt mask.
1568	 */
1569	ath_settkipmic(sc);
1570	if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1571		if_printf(ifp, "unable to reset hardware; hal status %u\n",
1572			status);
1573		ATH_UNLOCK(sc);
1574		return;
1575	}
1576	ath_chan_change(sc, ic->ic_curchan);
1577
1578	/* Let DFS at it in case it's a DFS channel */
1579	ath_dfs_radar_enable(sc, ic->ic_curchan);
1580
1581	/*
1582	 * Likewise this is set during reset so update
1583	 * state cached in the driver.
1584	 */
1585	sc->sc_diversity = ath_hal_getdiversity(ah);
1586	sc->sc_lastlongcal = 0;
1587	sc->sc_resetcal = 1;
1588	sc->sc_lastcalreset = 0;
1589	sc->sc_lastani = 0;
1590	sc->sc_lastshortcal = 0;
1591	sc->sc_doresetcal = AH_FALSE;
1592
1593	/*
1594	 * Setup the hardware after reset: the key cache
1595	 * is filled as needed and the receive engine is
1596	 * set going.  Frame transmit is handled entirely
1597	 * in the frame output path; there's nothing to do
1598	 * here except setup the interrupt mask.
1599	 */
1600	if (ath_startrecv(sc) != 0) {
1601		if_printf(ifp, "unable to start recv logic\n");
1602		ATH_UNLOCK(sc);
1603		return;
1604	}
1605
1606	/*
1607	 * Enable interrupts.
1608	 */
1609	sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1610		  | HAL_INT_RXEOL | HAL_INT_RXORN
1611		  | HAL_INT_FATAL | HAL_INT_GLOBAL;
1612	/*
1613	 * Enable MIB interrupts when there are hardware phy counters.
1614	 * Note we only do this (at the moment) for station mode.
1615	 */
1616	if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1617		sc->sc_imask |= HAL_INT_MIB;
1618
1619	/* Enable global TX timeout and carrier sense timeout if available */
1620	if (ath_hal_gtxto_supported(ah))
1621		sc->sc_imask |= HAL_INT_GTT;
1622
1623	DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
1624		__func__, sc->sc_imask);
1625
1626	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1627	callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1628	ath_hal_intrset(ah, sc->sc_imask);
1629
1630	ATH_UNLOCK(sc);
1631
1632#ifdef ATH_TX99_DIAG
1633	if (sc->sc_tx99 != NULL)
1634		sc->sc_tx99->start(sc->sc_tx99);
1635	else
1636#endif
1637	ieee80211_start_all(ic);		/* start all vap's */
1638}
1639
1640static void
1641ath_stop_locked(struct ifnet *ifp)
1642{
1643	struct ath_softc *sc = ifp->if_softc;
1644	struct ath_hal *ah = sc->sc_ah;
1645
1646	DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1647		__func__, sc->sc_invalid, ifp->if_flags);
1648
1649	ATH_LOCK_ASSERT(sc);
1650	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1651		/*
1652		 * Shutdown the hardware and driver:
1653		 *    reset 802.11 state machine
1654		 *    turn off timers
1655		 *    disable interrupts
1656		 *    turn off the radio
1657		 *    clear transmit machinery
1658		 *    clear receive machinery
1659		 *    drain and release tx queues
1660		 *    reclaim beacon resources
1661		 *    power down hardware
1662		 *
1663		 * Note that some of this work is not possible if the
1664		 * hardware is gone (invalid).
1665		 */
1666#ifdef ATH_TX99_DIAG
1667		if (sc->sc_tx99 != NULL)
1668			sc->sc_tx99->stop(sc->sc_tx99);
1669#endif
1670		callout_stop(&sc->sc_wd_ch);
1671		sc->sc_wd_timer = 0;
1672		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1673		if (!sc->sc_invalid) {
1674			if (sc->sc_softled) {
1675				callout_stop(&sc->sc_ledtimer);
1676				ath_hal_gpioset(ah, sc->sc_ledpin,
1677					!sc->sc_ledon);
1678				sc->sc_blinking = 0;
1679			}
1680			ath_hal_intrset(ah, 0);
1681		}
1682		ath_draintxq(sc);
1683		if (!sc->sc_invalid) {
1684			ath_stoprecv(sc);
1685			ath_hal_phydisable(ah);
1686		} else
1687			sc->sc_rxlink = NULL;
1688		ath_beacon_free(sc);	/* XXX not needed */
1689	}
1690}
1691
1692static void
1693ath_stop(struct ifnet *ifp)
1694{
1695	struct ath_softc *sc = ifp->if_softc;
1696
1697	ATH_LOCK(sc);
1698	ath_stop_locked(ifp);
1699	ATH_UNLOCK(sc);
1700}
1701
1702/*
1703 * Reset the hardware w/o losing operational state.  This is
1704 * basically a more efficient way of doing ath_stop, ath_init,
1705 * followed by state transitions to the current 802.11
1706 * operational state.  Used to recover from various errors and
1707 * to reset or reload hardware state.
1708 */
1709int
1710ath_reset(struct ifnet *ifp)
1711{
1712	struct ath_softc *sc = ifp->if_softc;
1713	struct ieee80211com *ic = ifp->if_l2com;
1714	struct ath_hal *ah = sc->sc_ah;
1715	HAL_STATUS status;
1716
1717	ath_hal_intrset(ah, 0);		/* disable interrupts */
1718	ath_draintxq(sc);		/* stop xmit side */
1719	ath_stoprecv(sc);		/* stop recv side */
1720	ath_settkipmic(sc);		/* configure TKIP MIC handling */
1721	/* NB: indicate channel change so we do a full reset */
1722	if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
1723		if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1724			__func__, status);
1725	sc->sc_diversity = ath_hal_getdiversity(ah);
1726
1727	/* Let DFS at it in case it's a DFS channel */
1728	ath_dfs_radar_enable(sc, ic->ic_curchan);
1729
1730	if (ath_startrecv(sc) != 0)	/* restart recv */
1731		if_printf(ifp, "%s: unable to start recv logic\n", __func__);
1732	/*
1733	 * We may be doing a reset in response to an ioctl
1734	 * that changes the channel so update any state that
1735	 * might change as a result.
1736	 */
1737	ath_chan_change(sc, ic->ic_curchan);
1738	if (sc->sc_beacons) {		/* restart beacons */
1739#ifdef IEEE80211_SUPPORT_TDMA
1740		if (sc->sc_tdma)
1741			ath_tdma_config(sc, NULL);
1742		else
1743#endif
1744			ath_beacon_config(sc, NULL);
1745	}
1746	ath_hal_intrset(ah, sc->sc_imask);
1747
1748	ath_start(ifp);			/* restart xmit */
1749	return 0;
1750}
1751
1752static int
1753ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
1754{
1755	struct ieee80211com *ic = vap->iv_ic;
1756	struct ifnet *ifp = ic->ic_ifp;
1757	struct ath_softc *sc = ifp->if_softc;
1758	struct ath_hal *ah = sc->sc_ah;
1759
1760	switch (cmd) {
1761	case IEEE80211_IOC_TXPOWER:
1762		/*
1763		 * If per-packet TPC is enabled, then we have nothing
1764		 * to do; otherwise we need to force the global limit.
1765		 * All this can happen directly; no need to reset.
1766		 */
1767		if (!ath_hal_gettpc(ah))
1768			ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1769		return 0;
1770	}
1771	return ath_reset(ifp);
1772}
1773
1774struct ath_buf *
1775_ath_getbuf_locked(struct ath_softc *sc)
1776{
1777	struct ath_buf *bf;
1778
1779	ATH_TXBUF_LOCK_ASSERT(sc);
1780
1781	bf = STAILQ_FIRST(&sc->sc_txbuf);
1782	if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
1783		STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1784	else
1785		bf = NULL;
1786	if (bf == NULL) {
1787		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
1788		    STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
1789			"out of xmit buffers" : "xmit buffer busy");
1790	}
1791	return bf;
1792}
1793
1794struct ath_buf *
1795ath_getbuf(struct ath_softc *sc)
1796{
1797	struct ath_buf *bf;
1798
1799	ATH_TXBUF_LOCK(sc);
1800	bf = _ath_getbuf_locked(sc);
1801	if (bf == NULL) {
1802		struct ifnet *ifp = sc->sc_ifp;
1803
1804		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
1805		sc->sc_stats.ast_tx_qstop++;
1806		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1807	}
1808	ATH_TXBUF_UNLOCK(sc);
1809	return bf;
1810}
1811
1812static void
1813ath_start(struct ifnet *ifp)
1814{
1815	struct ath_softc *sc = ifp->if_softc;
1816	struct ieee80211_node *ni;
1817	struct ath_buf *bf;
1818	struct mbuf *m, *next;
1819	ath_bufhead frags;
1820
1821	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1822		return;
1823	for (;;) {
1824		/*
1825		 * Grab a TX buffer and associated resources.
1826		 */
1827		bf = ath_getbuf(sc);
1828		if (bf == NULL)
1829			break;
1830
1831		IFQ_DEQUEUE(&ifp->if_snd, m);
1832		if (m == NULL) {
1833			ATH_TXBUF_LOCK(sc);
1834			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1835			ATH_TXBUF_UNLOCK(sc);
1836			break;
1837		}
1838		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1839		/*
1840		 * Check for fragmentation.  If this frame
1841		 * has been broken up verify we have enough
1842		 * buffers to send all the fragments so all
1843		 * go out or none...
1844		 */
1845		STAILQ_INIT(&frags);
1846		if ((m->m_flags & M_FRAG) &&
1847		    !ath_txfrag_setup(sc, &frags, m, ni)) {
1848			DPRINTF(sc, ATH_DEBUG_XMIT,
1849			    "%s: out of txfrag buffers\n", __func__);
1850			sc->sc_stats.ast_tx_nofrag++;
1851			ifp->if_oerrors++;
1852			ath_freetx(m);
1853			goto bad;
1854		}
1855		ifp->if_opackets++;
1856	nextfrag:
1857		/*
1858		 * Pass the frame to the h/w for transmission.
1859		 * Fragmented frames have each frag chained together
1860		 * with m_nextpkt.  We know there are sufficient ath_buf's
1861		 * to send all the frags because of work done by
1862		 * ath_txfrag_setup.  We leave m_nextpkt set while
1863		 * calling ath_tx_start so it can use it to extend the
1864		 * the tx duration to cover the subsequent frag and
1865		 * so it can reclaim all the mbufs in case of an error;
1866		 * ath_tx_start clears m_nextpkt once it commits to
1867		 * handing the frame to the hardware.
1868		 */
1869		next = m->m_nextpkt;
1870		if (ath_tx_start(sc, ni, bf, m)) {
1871	bad:
1872			ifp->if_oerrors++;
1873	reclaim:
1874			bf->bf_m = NULL;
1875			bf->bf_node = NULL;
1876			ATH_TXBUF_LOCK(sc);
1877			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1878			ath_txfrag_cleanup(sc, &frags, ni);
1879			ATH_TXBUF_UNLOCK(sc);
1880			if (ni != NULL)
1881				ieee80211_free_node(ni);
1882			continue;
1883		}
1884		if (next != NULL) {
1885			/*
1886			 * Beware of state changing between frags.
1887			 * XXX check sta power-save state?
1888			 */
1889			if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
1890				DPRINTF(sc, ATH_DEBUG_XMIT,
1891				    "%s: flush fragmented packet, state %s\n",
1892				    __func__,
1893				    ieee80211_state_name[ni->ni_vap->iv_state]);
1894				ath_freetx(next);
1895				goto reclaim;
1896			}
1897			m = next;
1898			bf = STAILQ_FIRST(&frags);
1899			KASSERT(bf != NULL, ("no buf for txfrag"));
1900			STAILQ_REMOVE_HEAD(&frags, bf_list);
1901			goto nextfrag;
1902		}
1903
1904		sc->sc_wd_timer = 5;
1905	}
1906}
1907
1908static int
1909ath_media_change(struct ifnet *ifp)
1910{
1911	int error = ieee80211_media_change(ifp);
1912	/* NB: only the fixed rate can change and that doesn't need a reset */
1913	return (error == ENETRESET ? 0 : error);
1914}
1915
1916/*
1917 * Block/unblock tx+rx processing while a key change is done.
1918 * We assume the caller serializes key management operations
1919 * so we only need to worry about synchronization with other
1920 * uses that originate in the driver.
1921 */
1922static void
1923ath_key_update_begin(struct ieee80211vap *vap)
1924{
1925	struct ifnet *ifp = vap->iv_ic->ic_ifp;
1926	struct ath_softc *sc = ifp->if_softc;
1927
1928	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
1929	taskqueue_block(sc->sc_tq);
1930	IF_LOCK(&ifp->if_snd);		/* NB: doesn't block mgmt frames */
1931}
1932
1933static void
1934ath_key_update_end(struct ieee80211vap *vap)
1935{
1936	struct ifnet *ifp = vap->iv_ic->ic_ifp;
1937	struct ath_softc *sc = ifp->if_softc;
1938
1939	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
1940	IF_UNLOCK(&ifp->if_snd);
1941	taskqueue_unblock(sc->sc_tq);
1942}
1943
1944/*
1945 * Calculate the receive filter according to the
1946 * operating mode and state:
1947 *
1948 * o always accept unicast, broadcast, and multicast traffic
1949 * o accept PHY error frames when hardware doesn't have MIB support
1950 *   to count and we need them for ANI (sta mode only until recently)
1951 *   and we are not scanning (ANI is disabled)
1952 *   NB: older hal's add rx filter bits out of sight and we need to
1953 *	 blindly preserve them
1954 * o probe request frames are accepted only when operating in
1955 *   hostap, adhoc, mesh, or monitor modes
1956 * o enable promiscuous mode
1957 *   - when in monitor mode
1958 *   - if interface marked PROMISC (assumes bridge setting is filtered)
1959 * o accept beacons:
1960 *   - when operating in station mode for collecting rssi data when
1961 *     the station is otherwise quiet, or
1962 *   - when operating in adhoc mode so the 802.11 layer creates
1963 *     node table entries for peers,
1964 *   - when scanning
1965 *   - when doing s/w beacon miss (e.g. for ap+sta)
1966 *   - when operating in ap mode in 11g to detect overlapping bss that
1967 *     require protection
1968 *   - when operating in mesh mode to detect neighbors
1969 * o accept control frames:
1970 *   - when in monitor mode
1971 * XXX HT protection for 11n
1972 */
1973static u_int32_t
1974ath_calcrxfilter(struct ath_softc *sc)
1975{
1976	struct ifnet *ifp = sc->sc_ifp;
1977	struct ieee80211com *ic = ifp->if_l2com;
1978	u_int32_t rfilt;
1979
1980	rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
1981	if (!sc->sc_needmib && !sc->sc_scanning)
1982		rfilt |= HAL_RX_FILTER_PHYERR;
1983	if (ic->ic_opmode != IEEE80211_M_STA)
1984		rfilt |= HAL_RX_FILTER_PROBEREQ;
1985	/* XXX ic->ic_monvaps != 0? */
1986	if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
1987		rfilt |= HAL_RX_FILTER_PROM;
1988	if (ic->ic_opmode == IEEE80211_M_STA ||
1989	    ic->ic_opmode == IEEE80211_M_IBSS ||
1990	    sc->sc_swbmiss || sc->sc_scanning)
1991		rfilt |= HAL_RX_FILTER_BEACON;
1992	/*
1993	 * NB: We don't recalculate the rx filter when
1994	 * ic_protmode changes; otherwise we could do
1995	 * this only when ic_protmode != NONE.
1996	 */
1997	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1998	    IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
1999		rfilt |= HAL_RX_FILTER_BEACON;
2000
2001	/*
2002	 * Enable hardware PS-POLL RX only for hostap mode;
2003	 * STA mode sends PS-POLL frames but never
2004	 * receives them.
2005	 */
2006	if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
2007	    0, NULL) == HAL_OK &&
2008	    ic->ic_opmode == IEEE80211_M_HOSTAP)
2009		rfilt |= HAL_RX_FILTER_PSPOLL;
2010
2011	if (sc->sc_nmeshvaps) {
2012		rfilt |= HAL_RX_FILTER_BEACON;
2013		if (sc->sc_hasbmatch)
2014			rfilt |= HAL_RX_FILTER_BSSID;
2015		else
2016			rfilt |= HAL_RX_FILTER_PROM;
2017	}
2018	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2019		rfilt |= HAL_RX_FILTER_CONTROL;
2020
2021	if (sc->sc_dodfs) {
2022		rfilt |= HAL_RX_FILTER_PHYRADAR;
2023	}
2024
2025	/*
2026	 * Enable RX of compressed BAR frames only when doing
2027	 * 802.11n. Required for A-MPDU.
2028	 */
2029	if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
2030		rfilt |= HAL_RX_FILTER_COMPBAR;
2031
2032	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2033	    __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2034	return rfilt;
2035}
2036
2037static void
2038ath_update_promisc(struct ifnet *ifp)
2039{
2040	struct ath_softc *sc = ifp->if_softc;
2041	u_int32_t rfilt;
2042
2043	/* configure rx filter */
2044	rfilt = ath_calcrxfilter(sc);
2045	ath_hal_setrxfilter(sc->sc_ah, rfilt);
2046
2047	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2048}
2049
2050static void
2051ath_update_mcast(struct ifnet *ifp)
2052{
2053	struct ath_softc *sc = ifp->if_softc;
2054	u_int32_t mfilt[2];
2055
2056	/* calculate and install multicast filter */
2057	if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2058		struct ifmultiaddr *ifma;
2059		/*
2060		 * Merge multicast addresses to form the hardware filter.
2061		 */
2062		mfilt[0] = mfilt[1] = 0;
2063		if_maddr_rlock(ifp);	/* XXX need some fiddling to remove? */
2064		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2065			caddr_t dl;
2066			u_int32_t val;
2067			u_int8_t pos;
2068
2069			/* calculate XOR of eight 6bit values */
2070			dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2071			val = LE_READ_4(dl + 0);
2072			pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2073			val = LE_READ_4(dl + 3);
2074			pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2075			pos &= 0x3f;
2076			mfilt[pos / 32] |= (1 << (pos % 32));
2077		}
2078		if_maddr_runlock(ifp);
2079	} else
2080		mfilt[0] = mfilt[1] = ~0;
2081	ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2082	DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2083		__func__, mfilt[0], mfilt[1]);
2084}
2085
2086static void
2087ath_mode_init(struct ath_softc *sc)
2088{
2089	struct ifnet *ifp = sc->sc_ifp;
2090	struct ath_hal *ah = sc->sc_ah;
2091	u_int32_t rfilt;
2092
2093	/* configure rx filter */
2094	rfilt = ath_calcrxfilter(sc);
2095	ath_hal_setrxfilter(ah, rfilt);
2096
2097	/* configure operational mode */
2098	ath_hal_setopmode(ah);
2099
2100	/* handle any link-level address change */
2101	ath_hal_setmac(ah, IF_LLADDR(ifp));
2102
2103	/* calculate and install multicast filter */
2104	ath_update_mcast(ifp);
2105}
2106
2107/*
2108 * Set the slot time based on the current setting.
2109 */
2110static void
2111ath_setslottime(struct ath_softc *sc)
2112{
2113	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2114	struct ath_hal *ah = sc->sc_ah;
2115	u_int usec;
2116
2117	if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2118		usec = 13;
2119	else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2120		usec = 21;
2121	else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2122		/* honor short/long slot time only in 11g */
2123		/* XXX shouldn't honor on pure g or turbo g channel */
2124		if (ic->ic_flags & IEEE80211_F_SHSLOT)
2125			usec = HAL_SLOT_TIME_9;
2126		else
2127			usec = HAL_SLOT_TIME_20;
2128	} else
2129		usec = HAL_SLOT_TIME_9;
2130
2131	DPRINTF(sc, ATH_DEBUG_RESET,
2132	    "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2133	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2134	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2135
2136	ath_hal_setslottime(ah, usec);
2137	sc->sc_updateslot = OK;
2138}
2139
2140/*
2141 * Callback from the 802.11 layer to update the
2142 * slot time based on the current setting.
2143 */
2144static void
2145ath_updateslot(struct ifnet *ifp)
2146{
2147	struct ath_softc *sc = ifp->if_softc;
2148	struct ieee80211com *ic = ifp->if_l2com;
2149
2150	/*
2151	 * When not coordinating the BSS, change the hardware
2152	 * immediately.  For other operation we defer the change
2153	 * until beacon updates have propagated to the stations.
2154	 */
2155	if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2156	    ic->ic_opmode == IEEE80211_M_MBSS)
2157		sc->sc_updateslot = UPDATE;
2158	else
2159		ath_setslottime(sc);
2160}
2161
2162/*
2163 * Setup a h/w transmit queue for beacons.
2164 */
2165static int
2166ath_beaconq_setup(struct ath_hal *ah)
2167{
2168	HAL_TXQ_INFO qi;
2169
2170	memset(&qi, 0, sizeof(qi));
2171	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2172	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2173	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2174	/* NB: for dynamic turbo, don't enable any other interrupts */
2175	qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2176	return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2177}
2178
2179/*
2180 * Setup the transmit queue parameters for the beacon queue.
2181 */
2182static int
2183ath_beaconq_config(struct ath_softc *sc)
2184{
2185#define	ATH_EXPONENT_TO_VALUE(v)	((1<<(v))-1)
2186	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2187	struct ath_hal *ah = sc->sc_ah;
2188	HAL_TXQ_INFO qi;
2189
2190	ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2191	if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2192	    ic->ic_opmode == IEEE80211_M_MBSS) {
2193		/*
2194		 * Always burst out beacon and CAB traffic.
2195		 */
2196		qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2197		qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2198		qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2199	} else {
2200		struct wmeParams *wmep =
2201			&ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2202		/*
2203		 * Adhoc mode; important thing is to use 2x cwmin.
2204		 */
2205		qi.tqi_aifs = wmep->wmep_aifsn;
2206		qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2207		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2208	}
2209
2210	if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2211		device_printf(sc->sc_dev, "unable to update parameters for "
2212			"beacon hardware queue!\n");
2213		return 0;
2214	} else {
2215		ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2216		return 1;
2217	}
2218#undef ATH_EXPONENT_TO_VALUE
2219}
2220
2221/*
2222 * Allocate and setup an initial beacon frame.
2223 */
2224static int
2225ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2226{
2227	struct ieee80211vap *vap = ni->ni_vap;
2228	struct ath_vap *avp = ATH_VAP(vap);
2229	struct ath_buf *bf;
2230	struct mbuf *m;
2231	int error;
2232
2233	bf = avp->av_bcbuf;
2234	if (bf->bf_m != NULL) {
2235		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2236		m_freem(bf->bf_m);
2237		bf->bf_m = NULL;
2238	}
2239	if (bf->bf_node != NULL) {
2240		ieee80211_free_node(bf->bf_node);
2241		bf->bf_node = NULL;
2242	}
2243
2244	/*
2245	 * NB: the beacon data buffer must be 32-bit aligned;
2246	 * we assume the mbuf routines will return us something
2247	 * with this alignment (perhaps should assert).
2248	 */
2249	m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2250	if (m == NULL) {
2251		device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2252		sc->sc_stats.ast_be_nombuf++;
2253		return ENOMEM;
2254	}
2255	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2256				     bf->bf_segs, &bf->bf_nseg,
2257				     BUS_DMA_NOWAIT);
2258	if (error != 0) {
2259		device_printf(sc->sc_dev,
2260		    "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2261		    __func__, error);
2262		m_freem(m);
2263		return error;
2264	}
2265
2266	/*
2267	 * Calculate a TSF adjustment factor required for staggered
2268	 * beacons.  Note that we assume the format of the beacon
2269	 * frame leaves the tstamp field immediately following the
2270	 * header.
2271	 */
2272	if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2273		uint64_t tsfadjust;
2274		struct ieee80211_frame *wh;
2275
2276		/*
2277		 * The beacon interval is in TU's; the TSF is in usecs.
2278		 * We figure out how many TU's to add to align the timestamp
2279		 * then convert to TSF units and handle byte swapping before
2280		 * inserting it in the frame.  The hardware will then add this
2281		 * each time a beacon frame is sent.  Note that we align vap's
2282		 * 1..N and leave vap 0 untouched.  This means vap 0 has a
2283		 * timestamp in one beacon interval while the others get a
2284		 * timstamp aligned to the next interval.
2285		 */
2286		tsfadjust = ni->ni_intval *
2287		    (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2288		tsfadjust = htole64(tsfadjust << 10);	/* TU -> TSF */
2289
2290		DPRINTF(sc, ATH_DEBUG_BEACON,
2291		    "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2292		    __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2293		    avp->av_bslot, ni->ni_intval,
2294		    (long long unsigned) le64toh(tsfadjust));
2295
2296		wh = mtod(m, struct ieee80211_frame *);
2297		memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2298	}
2299	bf->bf_m = m;
2300	bf->bf_node = ieee80211_ref_node(ni);
2301
2302	return 0;
2303}
2304
2305/*
2306 * Setup the beacon frame for transmit.
2307 */
2308static void
2309ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2310{
2311#define	USE_SHPREAMBLE(_ic) \
2312	(((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2313		== IEEE80211_F_SHPREAMBLE)
2314	struct ieee80211_node *ni = bf->bf_node;
2315	struct ieee80211com *ic = ni->ni_ic;
2316	struct mbuf *m = bf->bf_m;
2317	struct ath_hal *ah = sc->sc_ah;
2318	struct ath_desc *ds;
2319	int flags, antenna;
2320	const HAL_RATE_TABLE *rt;
2321	u_int8_t rix, rate;
2322
2323	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2324		__func__, m, m->m_len);
2325
2326	/* setup descriptors */
2327	ds = bf->bf_desc;
2328
2329	flags = HAL_TXDESC_NOACK;
2330	if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2331		ds->ds_link = bf->bf_daddr;	/* self-linked */
2332		flags |= HAL_TXDESC_VEOL;
2333		/*
2334		 * Let hardware handle antenna switching.
2335		 */
2336		antenna = sc->sc_txantenna;
2337	} else {
2338		ds->ds_link = 0;
2339		/*
2340		 * Switch antenna every 4 beacons.
2341		 * XXX assumes two antenna
2342		 */
2343		if (sc->sc_txantenna != 0)
2344			antenna = sc->sc_txantenna;
2345		else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2346			antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2347		else
2348			antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
2349	}
2350
2351	KASSERT(bf->bf_nseg == 1,
2352		("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2353	ds->ds_data = bf->bf_segs[0].ds_addr;
2354	/*
2355	 * Calculate rate code.
2356	 * XXX everything at min xmit rate
2357	 */
2358	rix = 0;
2359	rt = sc->sc_currates;
2360	rate = rt->info[rix].rateCode;
2361	if (USE_SHPREAMBLE(ic))
2362		rate |= rt->info[rix].shortPreamble;
2363	ath_hal_setuptxdesc(ah, ds
2364		, m->m_len + IEEE80211_CRC_LEN	/* frame length */
2365		, sizeof(struct ieee80211_frame)/* header length */
2366		, HAL_PKT_TYPE_BEACON		/* Atheros packet type */
2367		, ni->ni_txpower		/* txpower XXX */
2368		, rate, 1			/* series 0 rate/tries */
2369		, HAL_TXKEYIX_INVALID		/* no encryption */
2370		, antenna			/* antenna mode */
2371		, flags				/* no ack, veol for beacons */
2372		, 0				/* rts/cts rate */
2373		, 0				/* rts/cts duration */
2374	);
2375	/* NB: beacon's BufLen must be a multiple of 4 bytes */
2376	ath_hal_filltxdesc(ah, ds
2377		, roundup(m->m_len, 4)		/* buffer length */
2378		, AH_TRUE			/* first segment */
2379		, AH_TRUE			/* last segment */
2380		, ds				/* first descriptor */
2381	);
2382#if 0
2383	ath_desc_swap(ds);
2384#endif
2385#undef USE_SHPREAMBLE
2386}
2387
2388static void
2389ath_beacon_update(struct ieee80211vap *vap, int item)
2390{
2391	struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2392
2393	setbit(bo->bo_flags, item);
2394}
2395
2396/*
2397 * Append the contents of src to dst; both queues
2398 * are assumed to be locked.
2399 */
2400static void
2401ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2402{
2403	STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
2404	dst->axq_link = src->axq_link;
2405	src->axq_link = NULL;
2406	dst->axq_depth += src->axq_depth;
2407	src->axq_depth = 0;
2408}
2409
2410/*
2411 * Transmit a beacon frame at SWBA.  Dynamic updates to the
2412 * frame contents are done as needed and the slot time is
2413 * also adjusted based on current state.
2414 */
2415static void
2416ath_beacon_proc(void *arg, int pending)
2417{
2418	struct ath_softc *sc = arg;
2419	struct ath_hal *ah = sc->sc_ah;
2420	struct ieee80211vap *vap;
2421	struct ath_buf *bf;
2422	int slot, otherant;
2423	uint32_t bfaddr;
2424
2425	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2426		__func__, pending);
2427	/*
2428	 * Check if the previous beacon has gone out.  If
2429	 * not don't try to post another, skip this period
2430	 * and wait for the next.  Missed beacons indicate
2431	 * a problem and should not occur.  If we miss too
2432	 * many consecutive beacons reset the device.
2433	 */
2434	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2435		sc->sc_bmisscount++;
2436		sc->sc_stats.ast_be_missed++;
2437		DPRINTF(sc, ATH_DEBUG_BEACON,
2438			"%s: missed %u consecutive beacons\n",
2439			__func__, sc->sc_bmisscount);
2440		if (sc->sc_bmisscount >= ath_bstuck_threshold)
2441			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
2442		return;
2443	}
2444	if (sc->sc_bmisscount != 0) {
2445		DPRINTF(sc, ATH_DEBUG_BEACON,
2446			"%s: resume beacon xmit after %u misses\n",
2447			__func__, sc->sc_bmisscount);
2448		sc->sc_bmisscount = 0;
2449	}
2450
2451	if (sc->sc_stagbeacons) {			/* staggered beacons */
2452		struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2453		uint32_t tsftu;
2454
2455		tsftu = ath_hal_gettsf32(ah) >> 10;
2456		/* XXX lintval */
2457		slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
2458		vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
2459		bfaddr = 0;
2460		if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2461			bf = ath_beacon_generate(sc, vap);
2462			if (bf != NULL)
2463				bfaddr = bf->bf_daddr;
2464		}
2465	} else {					/* burst'd beacons */
2466		uint32_t *bflink = &bfaddr;
2467
2468		for (slot = 0; slot < ATH_BCBUF; slot++) {
2469			vap = sc->sc_bslot[slot];
2470			if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2471				bf = ath_beacon_generate(sc, vap);
2472				if (bf != NULL) {
2473					*bflink = bf->bf_daddr;
2474					bflink = &bf->bf_desc->ds_link;
2475				}
2476			}
2477		}
2478		*bflink = 0;				/* terminate list */
2479	}
2480
2481	/*
2482	 * Handle slot time change when a non-ERP station joins/leaves
2483	 * an 11g network.  The 802.11 layer notifies us via callback,
2484	 * we mark updateslot, then wait one beacon before effecting
2485	 * the change.  This gives associated stations at least one
2486	 * beacon interval to note the state change.
2487	 */
2488	/* XXX locking */
2489	if (sc->sc_updateslot == UPDATE) {
2490		sc->sc_updateslot = COMMIT;	/* commit next beacon */
2491		sc->sc_slotupdate = slot;
2492	} else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
2493		ath_setslottime(sc);		/* commit change to h/w */
2494
2495	/*
2496	 * Check recent per-antenna transmit statistics and flip
2497	 * the default antenna if noticeably more frames went out
2498	 * on the non-default antenna.
2499	 * XXX assumes 2 anntenae
2500	 */
2501	if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
2502		otherant = sc->sc_defant & 1 ? 2 : 1;
2503		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
2504			ath_setdefantenna(sc, otherant);
2505		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
2506	}
2507
2508	if (bfaddr != 0) {
2509		/*
2510		 * Stop any current dma and put the new frame on the queue.
2511		 * This should never fail since we check above that no frames
2512		 * are still pending on the queue.
2513		 */
2514		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
2515			DPRINTF(sc, ATH_DEBUG_ANY,
2516				"%s: beacon queue %u did not stop?\n",
2517				__func__, sc->sc_bhalq);
2518		}
2519		/* NB: cabq traffic should already be queued and primed */
2520		ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
2521		ath_hal_txstart(ah, sc->sc_bhalq);
2522
2523		sc->sc_stats.ast_be_xmit++;
2524	}
2525}
2526
2527static struct ath_buf *
2528ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
2529{
2530	struct ath_vap *avp = ATH_VAP(vap);
2531	struct ath_txq *cabq = sc->sc_cabq;
2532	struct ath_buf *bf;
2533	struct mbuf *m;
2534	int nmcastq, error;
2535
2536	KASSERT(vap->iv_state >= IEEE80211_S_RUN,
2537	    ("not running, state %d", vap->iv_state));
2538	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
2539
2540	/*
2541	 * Update dynamic beacon contents.  If this returns
2542	 * non-zero then we need to remap the memory because
2543	 * the beacon frame changed size (probably because
2544	 * of the TIM bitmap).
2545	 */
2546	bf = avp->av_bcbuf;
2547	m = bf->bf_m;
2548	nmcastq = avp->av_mcastq.axq_depth;
2549	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
2550		/* XXX too conservative? */
2551		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2552		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2553					     bf->bf_segs, &bf->bf_nseg,
2554					     BUS_DMA_NOWAIT);
2555		if (error != 0) {
2556			if_printf(vap->iv_ifp,
2557			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
2558			    __func__, error);
2559			return NULL;
2560		}
2561	}
2562	if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
2563		DPRINTF(sc, ATH_DEBUG_BEACON,
2564		    "%s: cabq did not drain, mcastq %u cabq %u\n",
2565		    __func__, nmcastq, cabq->axq_depth);
2566		sc->sc_stats.ast_cabq_busy++;
2567		if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
2568			/*
2569			 * CABQ traffic from a previous vap is still pending.
2570			 * We must drain the q before this beacon frame goes
2571			 * out as otherwise this vap's stations will get cab
2572			 * frames from a different vap.
2573			 * XXX could be slow causing us to miss DBA
2574			 */
2575			ath_tx_draintxq(sc, cabq);
2576		}
2577	}
2578	ath_beacon_setup(sc, bf);
2579	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2580
2581	/*
2582	 * Enable the CAB queue before the beacon queue to
2583	 * insure cab frames are triggered by this beacon.
2584	 */
2585	if (avp->av_boff.bo_tim[4] & 1) {
2586		struct ath_hal *ah = sc->sc_ah;
2587
2588		/* NB: only at DTIM */
2589		ATH_TXQ_LOCK(cabq);
2590		ATH_TXQ_LOCK(&avp->av_mcastq);
2591		if (nmcastq) {
2592			struct ath_buf *bfm;
2593
2594			/*
2595			 * Move frames from the s/w mcast q to the h/w cab q.
2596			 * XXX MORE_DATA bit
2597			 */
2598			bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
2599			if (cabq->axq_link != NULL) {
2600				*cabq->axq_link = bfm->bf_daddr;
2601			} else
2602				ath_hal_puttxbuf(ah, cabq->axq_qnum,
2603					bfm->bf_daddr);
2604			ath_txqmove(cabq, &avp->av_mcastq);
2605
2606			sc->sc_stats.ast_cabq_xmit += nmcastq;
2607		}
2608		/* NB: gated by beacon so safe to start here */
2609		ath_hal_txstart(ah, cabq->axq_qnum);
2610		ATH_TXQ_UNLOCK(cabq);
2611		ATH_TXQ_UNLOCK(&avp->av_mcastq);
2612	}
2613	return bf;
2614}
2615
2616static void
2617ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
2618{
2619	struct ath_vap *avp = ATH_VAP(vap);
2620	struct ath_hal *ah = sc->sc_ah;
2621	struct ath_buf *bf;
2622	struct mbuf *m;
2623	int error;
2624
2625	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
2626
2627	/*
2628	 * Update dynamic beacon contents.  If this returns
2629	 * non-zero then we need to remap the memory because
2630	 * the beacon frame changed size (probably because
2631	 * of the TIM bitmap).
2632	 */
2633	bf = avp->av_bcbuf;
2634	m = bf->bf_m;
2635	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
2636		/* XXX too conservative? */
2637		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2638		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2639					     bf->bf_segs, &bf->bf_nseg,
2640					     BUS_DMA_NOWAIT);
2641		if (error != 0) {
2642			if_printf(vap->iv_ifp,
2643			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
2644			    __func__, error);
2645			return;
2646		}
2647	}
2648	ath_beacon_setup(sc, bf);
2649	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2650
2651	/* NB: caller is known to have already stopped tx dma */
2652	ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
2653	ath_hal_txstart(ah, sc->sc_bhalq);
2654}
2655
2656/*
2657 * Reset the hardware after detecting beacons have stopped.
2658 */
2659static void
2660ath_bstuck_proc(void *arg, int pending)
2661{
2662	struct ath_softc *sc = arg;
2663	struct ifnet *ifp = sc->sc_ifp;
2664
2665	if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
2666		sc->sc_bmisscount);
2667	sc->sc_stats.ast_bstuck++;
2668	ath_reset(ifp);
2669}
2670
2671/*
2672 * Reclaim beacon resources and return buffer to the pool.
2673 */
2674static void
2675ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
2676{
2677
2678	if (bf->bf_m != NULL) {
2679		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2680		m_freem(bf->bf_m);
2681		bf->bf_m = NULL;
2682	}
2683	if (bf->bf_node != NULL) {
2684		ieee80211_free_node(bf->bf_node);
2685		bf->bf_node = NULL;
2686	}
2687	STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
2688}
2689
2690/*
2691 * Reclaim beacon resources.
2692 */
2693static void
2694ath_beacon_free(struct ath_softc *sc)
2695{
2696	struct ath_buf *bf;
2697
2698	STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
2699		if (bf->bf_m != NULL) {
2700			bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2701			m_freem(bf->bf_m);
2702			bf->bf_m = NULL;
2703		}
2704		if (bf->bf_node != NULL) {
2705			ieee80211_free_node(bf->bf_node);
2706			bf->bf_node = NULL;
2707		}
2708	}
2709}
2710
2711/*
2712 * Configure the beacon and sleep timers.
2713 *
2714 * When operating as an AP this resets the TSF and sets
2715 * up the hardware to notify us when we need to issue beacons.
2716 *
2717 * When operating in station mode this sets up the beacon
2718 * timers according to the timestamp of the last received
2719 * beacon and the current TSF, configures PCF and DTIM
2720 * handling, programs the sleep registers so the hardware
2721 * will wakeup in time to receive beacons, and configures
2722 * the beacon miss handling so we'll receive a BMISS
2723 * interrupt when we stop seeing beacons from the AP
2724 * we've associated with.
2725 */
2726static void
2727ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
2728{
2729#define	TSF_TO_TU(_h,_l) \
2730	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
2731#define	FUDGE	2
2732	struct ath_hal *ah = sc->sc_ah;
2733	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2734	struct ieee80211_node *ni;
2735	u_int32_t nexttbtt, intval, tsftu;
2736	u_int64_t tsf;
2737
2738	if (vap == NULL)
2739		vap = TAILQ_FIRST(&ic->ic_vaps);	/* XXX */
2740	ni = vap->iv_bss;
2741
2742	/* extract tstamp from last beacon and convert to TU */
2743	nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
2744			     LE_READ_4(ni->ni_tstamp.data));
2745	if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2746	    ic->ic_opmode == IEEE80211_M_MBSS) {
2747		/*
2748		 * For multi-bss ap/mesh support beacons are either staggered
2749		 * evenly over N slots or burst together.  For the former
2750		 * arrange for the SWBA to be delivered for each slot.
2751		 * Slots that are not occupied will generate nothing.
2752		 */
2753		/* NB: the beacon interval is kept internally in TU's */
2754		intval = ni->ni_intval & HAL_BEACON_PERIOD;
2755		if (sc->sc_stagbeacons)
2756			intval /= ATH_BCBUF;
2757	} else {
2758		/* NB: the beacon interval is kept internally in TU's */
2759		intval = ni->ni_intval & HAL_BEACON_PERIOD;
2760	}
2761	if (nexttbtt == 0)		/* e.g. for ap mode */
2762		nexttbtt = intval;
2763	else if (intval)		/* NB: can be 0 for monitor mode */
2764		nexttbtt = roundup(nexttbtt, intval);
2765	DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
2766		__func__, nexttbtt, intval, ni->ni_intval);
2767	if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
2768		HAL_BEACON_STATE bs;
2769		int dtimperiod, dtimcount;
2770		int cfpperiod, cfpcount;
2771
2772		/*
2773		 * Setup dtim and cfp parameters according to
2774		 * last beacon we received (which may be none).
2775		 */
2776		dtimperiod = ni->ni_dtim_period;
2777		if (dtimperiod <= 0)		/* NB: 0 if not known */
2778			dtimperiod = 1;
2779		dtimcount = ni->ni_dtim_count;
2780		if (dtimcount >= dtimperiod)	/* NB: sanity check */
2781			dtimcount = 0;		/* XXX? */
2782		cfpperiod = 1;			/* NB: no PCF support yet */
2783		cfpcount = 0;
2784		/*
2785		 * Pull nexttbtt forward to reflect the current
2786		 * TSF and calculate dtim+cfp state for the result.
2787		 */
2788		tsf = ath_hal_gettsf64(ah);
2789		tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
2790		do {
2791			nexttbtt += intval;
2792			if (--dtimcount < 0) {
2793				dtimcount = dtimperiod - 1;
2794				if (--cfpcount < 0)
2795					cfpcount = cfpperiod - 1;
2796			}
2797		} while (nexttbtt < tsftu);
2798		memset(&bs, 0, sizeof(bs));
2799		bs.bs_intval = intval;
2800		bs.bs_nexttbtt = nexttbtt;
2801		bs.bs_dtimperiod = dtimperiod*intval;
2802		bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
2803		bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
2804		bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
2805		bs.bs_cfpmaxduration = 0;
2806#if 0
2807		/*
2808		 * The 802.11 layer records the offset to the DTIM
2809		 * bitmap while receiving beacons; use it here to
2810		 * enable h/w detection of our AID being marked in
2811		 * the bitmap vector (to indicate frames for us are
2812		 * pending at the AP).
2813		 * XXX do DTIM handling in s/w to WAR old h/w bugs
2814		 * XXX enable based on h/w rev for newer chips
2815		 */
2816		bs.bs_timoffset = ni->ni_timoff;
2817#endif
2818		/*
2819		 * Calculate the number of consecutive beacons to miss
2820		 * before taking a BMISS interrupt.
2821		 * Note that we clamp the result to at most 10 beacons.
2822		 */
2823		bs.bs_bmissthreshold = vap->iv_bmissthreshold;
2824		if (bs.bs_bmissthreshold > 10)
2825			bs.bs_bmissthreshold = 10;
2826		else if (bs.bs_bmissthreshold <= 0)
2827			bs.bs_bmissthreshold = 1;
2828
2829		/*
2830		 * Calculate sleep duration.  The configuration is
2831		 * given in ms.  We insure a multiple of the beacon
2832		 * period is used.  Also, if the sleep duration is
2833		 * greater than the DTIM period then it makes senses
2834		 * to make it a multiple of that.
2835		 *
2836		 * XXX fixed at 100ms
2837		 */
2838		bs.bs_sleepduration =
2839			roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
2840		if (bs.bs_sleepduration > bs.bs_dtimperiod)
2841			bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
2842
2843		DPRINTF(sc, ATH_DEBUG_BEACON,
2844			"%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
2845			, __func__
2846			, tsf, tsftu
2847			, bs.bs_intval
2848			, bs.bs_nexttbtt
2849			, bs.bs_dtimperiod
2850			, bs.bs_nextdtim
2851			, bs.bs_bmissthreshold
2852			, bs.bs_sleepduration
2853			, bs.bs_cfpperiod
2854			, bs.bs_cfpmaxduration
2855			, bs.bs_cfpnext
2856			, bs.bs_timoffset
2857		);
2858		ath_hal_intrset(ah, 0);
2859		ath_hal_beacontimers(ah, &bs);
2860		sc->sc_imask |= HAL_INT_BMISS;
2861		ath_hal_intrset(ah, sc->sc_imask);
2862	} else {
2863		ath_hal_intrset(ah, 0);
2864		if (nexttbtt == intval)
2865			intval |= HAL_BEACON_RESET_TSF;
2866		if (ic->ic_opmode == IEEE80211_M_IBSS) {
2867			/*
2868			 * In IBSS mode enable the beacon timers but only
2869			 * enable SWBA interrupts if we need to manually
2870			 * prepare beacon frames.  Otherwise we use a
2871			 * self-linked tx descriptor and let the hardware
2872			 * deal with things.
2873			 */
2874			intval |= HAL_BEACON_ENA;
2875			if (!sc->sc_hasveol)
2876				sc->sc_imask |= HAL_INT_SWBA;
2877			if ((intval & HAL_BEACON_RESET_TSF) == 0) {
2878				/*
2879				 * Pull nexttbtt forward to reflect
2880				 * the current TSF.
2881				 */
2882				tsf = ath_hal_gettsf64(ah);
2883				tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
2884				do {
2885					nexttbtt += intval;
2886				} while (nexttbtt < tsftu);
2887			}
2888			ath_beaconq_config(sc);
2889		} else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2890		    ic->ic_opmode == IEEE80211_M_MBSS) {
2891			/*
2892			 * In AP/mesh mode we enable the beacon timers
2893			 * and SWBA interrupts to prepare beacon frames.
2894			 */
2895			intval |= HAL_BEACON_ENA;
2896			sc->sc_imask |= HAL_INT_SWBA;	/* beacon prepare */
2897			ath_beaconq_config(sc);
2898		}
2899		ath_hal_beaconinit(ah, nexttbtt, intval);
2900		sc->sc_bmisscount = 0;
2901		ath_hal_intrset(ah, sc->sc_imask);
2902		/*
2903		 * When using a self-linked beacon descriptor in
2904		 * ibss mode load it once here.
2905		 */
2906		if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
2907			ath_beacon_start_adhoc(sc, vap);
2908	}
2909	sc->sc_syncbeacon = 0;
2910#undef FUDGE
2911#undef TSF_TO_TU
2912}
2913
2914static void
2915ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2916{
2917	bus_addr_t *paddr = (bus_addr_t*) arg;
2918	KASSERT(error == 0, ("error %u on bus_dma callback", error));
2919	*paddr = segs->ds_addr;
2920}
2921
2922static int
2923ath_descdma_setup(struct ath_softc *sc,
2924	struct ath_descdma *dd, ath_bufhead *head,
2925	const char *name, int nbuf, int ndesc)
2926{
2927#define	DS2PHYS(_dd, _ds) \
2928	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2929	struct ifnet *ifp = sc->sc_ifp;
2930	struct ath_desc *ds;
2931	struct ath_buf *bf;
2932	int i, bsize, error;
2933
2934	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
2935	    __func__, name, nbuf, ndesc);
2936
2937	dd->dd_name = name;
2938	dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
2939
2940	/*
2941	 * Setup DMA descriptor area.
2942	 */
2943	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2944		       PAGE_SIZE, 0,		/* alignment, bounds */
2945		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2946		       BUS_SPACE_MAXADDR,	/* highaddr */
2947		       NULL, NULL,		/* filter, filterarg */
2948		       dd->dd_desc_len,		/* maxsize */
2949		       1,			/* nsegments */
2950		       dd->dd_desc_len,		/* maxsegsize */
2951		       BUS_DMA_ALLOCNOW,	/* flags */
2952		       NULL,			/* lockfunc */
2953		       NULL,			/* lockarg */
2954		       &dd->dd_dmat);
2955	if (error != 0) {
2956		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2957		return error;
2958	}
2959
2960	/* allocate descriptors */
2961	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2962	if (error != 0) {
2963		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2964			"error %u\n", dd->dd_name, error);
2965		goto fail0;
2966	}
2967
2968	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2969				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2970				 &dd->dd_dmamap);
2971	if (error != 0) {
2972		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2973			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2974		goto fail1;
2975	}
2976
2977	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2978				dd->dd_desc, dd->dd_desc_len,
2979				ath_load_cb, &dd->dd_desc_paddr,
2980				BUS_DMA_NOWAIT);
2981	if (error != 0) {
2982		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2983			dd->dd_name, error);
2984		goto fail2;
2985	}
2986
2987	ds = dd->dd_desc;
2988	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2989	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2990	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2991
2992	/* allocate rx buffers */
2993	bsize = sizeof(struct ath_buf) * nbuf;
2994	bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
2995	if (bf == NULL) {
2996		if_printf(ifp, "malloc of %s buffers failed, size %u\n",
2997			dd->dd_name, bsize);
2998		goto fail3;
2999	}
3000	dd->dd_bufptr = bf;
3001
3002	STAILQ_INIT(head);
3003	for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3004		bf->bf_desc = ds;
3005		bf->bf_daddr = DS2PHYS(dd, ds);
3006		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3007				&bf->bf_dmamap);
3008		if (error != 0) {
3009			if_printf(ifp, "unable to create dmamap for %s "
3010				"buffer %u, error %u\n", dd->dd_name, i, error);
3011			ath_descdma_cleanup(sc, dd, head);
3012			return error;
3013		}
3014		STAILQ_INSERT_TAIL(head, bf, bf_list);
3015	}
3016	return 0;
3017fail3:
3018	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3019fail2:
3020	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3021fail1:
3022	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3023fail0:
3024	bus_dma_tag_destroy(dd->dd_dmat);
3025	memset(dd, 0, sizeof(*dd));
3026	return error;
3027#undef DS2PHYS
3028}
3029
3030static void
3031ath_descdma_cleanup(struct ath_softc *sc,
3032	struct ath_descdma *dd, ath_bufhead *head)
3033{
3034	struct ath_buf *bf;
3035	struct ieee80211_node *ni;
3036
3037	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3038	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3039	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3040	bus_dma_tag_destroy(dd->dd_dmat);
3041
3042	STAILQ_FOREACH(bf, head, bf_list) {
3043		if (bf->bf_m) {
3044			m_freem(bf->bf_m);
3045			bf->bf_m = NULL;
3046		}
3047		if (bf->bf_dmamap != NULL) {
3048			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3049			bf->bf_dmamap = NULL;
3050		}
3051		ni = bf->bf_node;
3052		bf->bf_node = NULL;
3053		if (ni != NULL) {
3054			/*
3055			 * Reclaim node reference.
3056			 */
3057			ieee80211_free_node(ni);
3058		}
3059	}
3060
3061	STAILQ_INIT(head);
3062	free(dd->dd_bufptr, M_ATHDEV);
3063	memset(dd, 0, sizeof(*dd));
3064}
3065
3066static int
3067ath_desc_alloc(struct ath_softc *sc)
3068{
3069	int error;
3070
3071	error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3072			"rx", ath_rxbuf, 1);
3073	if (error != 0)
3074		return error;
3075
3076	error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3077			"tx", ath_txbuf, ATH_TXDESC);
3078	if (error != 0) {
3079		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3080		return error;
3081	}
3082
3083	error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3084			"beacon", ATH_BCBUF, 1);
3085	if (error != 0) {
3086		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3087		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3088		return error;
3089	}
3090	return 0;
3091}
3092
3093static void
3094ath_desc_free(struct ath_softc *sc)
3095{
3096
3097	if (sc->sc_bdma.dd_desc_len != 0)
3098		ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3099	if (sc->sc_txdma.dd_desc_len != 0)
3100		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3101	if (sc->sc_rxdma.dd_desc_len != 0)
3102		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3103}
3104
3105static struct ieee80211_node *
3106ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3107{
3108	struct ieee80211com *ic = vap->iv_ic;
3109	struct ath_softc *sc = ic->ic_ifp->if_softc;
3110	const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3111	struct ath_node *an;
3112
3113	an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3114	if (an == NULL) {
3115		/* XXX stat+msg */
3116		return NULL;
3117	}
3118	ath_rate_node_init(sc, an);
3119
3120	DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3121	return &an->an_node;
3122}
3123
3124static void
3125ath_node_free(struct ieee80211_node *ni)
3126{
3127	struct ieee80211com *ic = ni->ni_ic;
3128        struct ath_softc *sc = ic->ic_ifp->if_softc;
3129
3130	DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3131
3132	ath_rate_node_cleanup(sc, ATH_NODE(ni));
3133	sc->sc_node_free(ni);
3134}
3135
3136static void
3137ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3138{
3139	struct ieee80211com *ic = ni->ni_ic;
3140	struct ath_softc *sc = ic->ic_ifp->if_softc;
3141	struct ath_hal *ah = sc->sc_ah;
3142
3143	*rssi = ic->ic_node_getrssi(ni);
3144	if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3145		*noise = ath_hal_getchannoise(ah, ni->ni_chan);
3146	else
3147		*noise = -95;		/* nominally correct */
3148}
3149
3150static int
3151ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3152{
3153	struct ath_hal *ah = sc->sc_ah;
3154	int error;
3155	struct mbuf *m;
3156	struct ath_desc *ds;
3157
3158	m = bf->bf_m;
3159	if (m == NULL) {
3160		/*
3161		 * NB: by assigning a page to the rx dma buffer we
3162		 * implicitly satisfy the Atheros requirement that
3163		 * this buffer be cache-line-aligned and sized to be
3164		 * multiple of the cache line size.  Not doing this
3165		 * causes weird stuff to happen (for the 5210 at least).
3166		 */
3167		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3168		if (m == NULL) {
3169			DPRINTF(sc, ATH_DEBUG_ANY,
3170				"%s: no mbuf/cluster\n", __func__);
3171			sc->sc_stats.ast_rx_nombuf++;
3172			return ENOMEM;
3173		}
3174		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3175
3176		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3177					     bf->bf_dmamap, m,
3178					     bf->bf_segs, &bf->bf_nseg,
3179					     BUS_DMA_NOWAIT);
3180		if (error != 0) {
3181			DPRINTF(sc, ATH_DEBUG_ANY,
3182			    "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3183			    __func__, error);
3184			sc->sc_stats.ast_rx_busdma++;
3185			m_freem(m);
3186			return error;
3187		}
3188		KASSERT(bf->bf_nseg == 1,
3189			("multi-segment packet; nseg %u", bf->bf_nseg));
3190		bf->bf_m = m;
3191	}
3192	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3193
3194	/*
3195	 * Setup descriptors.  For receive we always terminate
3196	 * the descriptor list with a self-linked entry so we'll
3197	 * not get overrun under high load (as can happen with a
3198	 * 5212 when ANI processing enables PHY error frames).
3199	 *
3200	 * To insure the last descriptor is self-linked we create
3201	 * each descriptor as self-linked and add it to the end.  As
3202	 * each additional descriptor is added the previous self-linked
3203	 * entry is ``fixed'' naturally.  This should be safe even
3204	 * if DMA is happening.  When processing RX interrupts we
3205	 * never remove/process the last, self-linked, entry on the
3206	 * descriptor list.  This insures the hardware always has
3207	 * someplace to write a new frame.
3208	 */
3209	/*
3210	 * 11N: we can no longer afford to self link the last descriptor.
3211	 * MAC acknowledges BA status as long as it copies frames to host
3212	 * buffer (or rx fifo). This can incorrectly acknowledge packets
3213	 * to a sender if last desc is self-linked.
3214	 */
3215	ds = bf->bf_desc;
3216	if (sc->sc_rxslink)
3217		ds->ds_link = bf->bf_daddr;	/* link to self */
3218	else
3219		ds->ds_link = 0;		/* terminate the list */
3220	ds->ds_data = bf->bf_segs[0].ds_addr;
3221	ath_hal_setuprxdesc(ah, ds
3222		, m->m_len		/* buffer size */
3223		, 0
3224	);
3225
3226	if (sc->sc_rxlink != NULL)
3227		*sc->sc_rxlink = bf->bf_daddr;
3228	sc->sc_rxlink = &ds->ds_link;
3229	return 0;
3230}
3231
3232/*
3233 * Extend 15-bit time stamp from rx descriptor to
3234 * a full 64-bit TSF using the specified TSF.
3235 */
3236static __inline u_int64_t
3237ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf)
3238{
3239	if ((tsf & 0x7fff) < rstamp)
3240		tsf -= 0x8000;
3241	return ((tsf &~ 0x7fff) | rstamp);
3242}
3243
3244/*
3245 * Intercept management frames to collect beacon rssi data
3246 * and to do ibss merges.
3247 */
3248static void
3249ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3250	int subtype, int rssi, int nf)
3251{
3252	struct ieee80211vap *vap = ni->ni_vap;
3253	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3254
3255	/*
3256	 * Call up first so subsequent work can use information
3257	 * potentially stored in the node (e.g. for ibss merge).
3258	 */
3259	ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf);
3260	switch (subtype) {
3261	case IEEE80211_FC0_SUBTYPE_BEACON:
3262		/* update rssi statistics for use by the hal */
3263		ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3264		if (sc->sc_syncbeacon &&
3265		    ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3266			/*
3267			 * Resync beacon timers using the tsf of the beacon
3268			 * frame we just received.
3269			 */
3270			ath_beacon_config(sc, vap);
3271		}
3272		/* fall thru... */
3273	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3274		if (vap->iv_opmode == IEEE80211_M_IBSS &&
3275		    vap->iv_state == IEEE80211_S_RUN) {
3276			uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
3277			uint64_t tsf = ath_extend_tsf(rstamp,
3278				ath_hal_gettsf64(sc->sc_ah));
3279			/*
3280			 * Handle ibss merge as needed; check the tsf on the
3281			 * frame before attempting the merge.  The 802.11 spec
3282			 * says the station should change it's bssid to match
3283			 * the oldest station with the same ssid, where oldest
3284			 * is determined by the tsf.  Note that hardware
3285			 * reconfiguration happens through callback to
3286			 * ath_newstate as the state machine will go from
3287			 * RUN -> RUN when this happens.
3288			 */
3289			if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3290				DPRINTF(sc, ATH_DEBUG_STATE,
3291				    "ibss merge, rstamp %u tsf %ju "
3292				    "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3293				    (uintmax_t)ni->ni_tstamp.tsf);
3294				(void) ieee80211_ibss_merge(ni);
3295			}
3296		}
3297		break;
3298	}
3299}
3300
3301/*
3302 * Set the default antenna.
3303 */
3304static void
3305ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3306{
3307	struct ath_hal *ah = sc->sc_ah;
3308
3309	/* XXX block beacon interrupts */
3310	ath_hal_setdefantenna(ah, antenna);
3311	if (sc->sc_defant != antenna)
3312		sc->sc_stats.ast_ant_defswitch++;
3313	sc->sc_defant = antenna;
3314	sc->sc_rxotherant = 0;
3315}
3316
3317static void
3318ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3319	const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3320{
3321#define	CHAN_HT20	htole32(IEEE80211_CHAN_HT20)
3322#define	CHAN_HT40U	htole32(IEEE80211_CHAN_HT40U)
3323#define	CHAN_HT40D	htole32(IEEE80211_CHAN_HT40D)
3324#define	CHAN_HT		(CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3325	struct ath_softc *sc = ifp->if_softc;
3326	const HAL_RATE_TABLE *rt;
3327	uint8_t rix;
3328
3329	rt = sc->sc_currates;
3330	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3331	rix = rt->rateCodeToIndex[rs->rs_rate];
3332	sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3333	sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3334#ifdef AH_SUPPORT_AR5416
3335	sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
3336	if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) {	/* HT rate */
3337		struct ieee80211com *ic = ifp->if_l2com;
3338
3339		if ((rs->rs_flags & HAL_RX_2040) == 0)
3340			sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
3341		else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
3342			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
3343		else
3344			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
3345		if ((rs->rs_flags & HAL_RX_GI) == 0)
3346			sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
3347	}
3348#endif
3349	sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf));
3350	if (rs->rs_status & HAL_RXERR_CRC)
3351		sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3352	/* XXX propagate other error flags from descriptor */
3353	sc->sc_rx_th.wr_antnoise = nf;
3354	sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
3355	sc->sc_rx_th.wr_antenna = rs->rs_antenna;
3356#undef CHAN_HT
3357#undef CHAN_HT20
3358#undef CHAN_HT40U
3359#undef CHAN_HT40D
3360}
3361
3362static void
3363ath_handle_micerror(struct ieee80211com *ic,
3364	struct ieee80211_frame *wh, int keyix)
3365{
3366	struct ieee80211_node *ni;
3367
3368	/* XXX recheck MIC to deal w/ chips that lie */
3369	/* XXX discard MIC errors on !data frames */
3370	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
3371	if (ni != NULL) {
3372		ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
3373		ieee80211_free_node(ni);
3374	}
3375}
3376
3377static void
3378ath_rx_proc(void *arg, int npending)
3379{
3380#define	PA2DESC(_sc, _pa) \
3381	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
3382		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
3383	struct ath_softc *sc = arg;
3384	struct ath_buf *bf;
3385	struct ifnet *ifp = sc->sc_ifp;
3386	struct ieee80211com *ic = ifp->if_l2com;
3387	struct ath_hal *ah = sc->sc_ah;
3388	struct ath_desc *ds;
3389	struct ath_rx_status *rs;
3390	struct mbuf *m;
3391	struct ieee80211_node *ni;
3392	int len, type, ngood;
3393	HAL_STATUS status;
3394	int16_t nf;
3395	u_int64_t tsf;
3396
3397	DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
3398	ngood = 0;
3399	nf = ath_hal_getchannoise(ah, sc->sc_curchan);
3400	sc->sc_stats.ast_rx_noise = nf;
3401	tsf = ath_hal_gettsf64(ah);
3402	do {
3403		bf = STAILQ_FIRST(&sc->sc_rxbuf);
3404		if (sc->sc_rxslink && bf == NULL) {	/* NB: shouldn't happen */
3405			if_printf(ifp, "%s: no buffer!\n", __func__);
3406			break;
3407		} else if (bf == NULL) {
3408			/*
3409			 * End of List:
3410			 * this can happen for non-self-linked RX chains
3411			 */
3412			sc->sc_stats.ast_rx_hitqueueend++;
3413			break;
3414		}
3415		m = bf->bf_m;
3416		if (m == NULL) {		/* NB: shouldn't happen */
3417			/*
3418			 * If mbuf allocation failed previously there
3419			 * will be no mbuf; try again to re-populate it.
3420			 */
3421			/* XXX make debug msg */
3422			if_printf(ifp, "%s: no mbuf!\n", __func__);
3423			STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
3424			goto rx_next;
3425		}
3426		ds = bf->bf_desc;
3427		if (ds->ds_link == bf->bf_daddr) {
3428			/* NB: never process the self-linked entry at the end */
3429			sc->sc_stats.ast_rx_hitqueueend++;
3430			break;
3431		}
3432		/* XXX sync descriptor memory */
3433		/*
3434		 * Must provide the virtual address of the current
3435		 * descriptor, the physical address, and the virtual
3436		 * address of the next descriptor in the h/w chain.
3437		 * This allows the HAL to look ahead to see if the
3438		 * hardware is done with a descriptor by checking the
3439		 * done bit in the following descriptor and the address
3440		 * of the current descriptor the DMA engine is working
3441		 * on.  All this is necessary because of our use of
3442		 * a self-linked list to avoid rx overruns.
3443		 */
3444		rs = &bf->bf_status.ds_rxstat;
3445		status = ath_hal_rxprocdesc(ah, ds,
3446				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
3447#ifdef ATH_DEBUG
3448		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
3449			ath_printrxbuf(sc, bf, 0, status == HAL_OK);
3450#endif
3451		if (status == HAL_EINPROGRESS)
3452			break;
3453		STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
3454
3455		/* These aren't specifically errors */
3456		if (rs->rs_flags & HAL_RX_GI)
3457			sc->sc_stats.ast_rx_halfgi++;
3458		if (rs->rs_flags & HAL_RX_2040)
3459			sc->sc_stats.ast_rx_2040++;
3460		if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
3461			sc->sc_stats.ast_rx_pre_crc_err++;
3462		if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
3463			sc->sc_stats.ast_rx_post_crc_err++;
3464		if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
3465			sc->sc_stats.ast_rx_decrypt_busy_err++;
3466		if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
3467			sc->sc_stats.ast_rx_hi_rx_chain++;
3468
3469		if (rs->rs_status != 0) {
3470			if (rs->rs_status & HAL_RXERR_CRC)
3471				sc->sc_stats.ast_rx_crcerr++;
3472			if (rs->rs_status & HAL_RXERR_FIFO)
3473				sc->sc_stats.ast_rx_fifoerr++;
3474			if (rs->rs_status & HAL_RXERR_PHY) {
3475				sc->sc_stats.ast_rx_phyerr++;
3476				/* Process DFS radar events */
3477				if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
3478				    (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
3479					/* Since we're touching the frame data, sync it */
3480					bus_dmamap_sync(sc->sc_dmat,
3481					    bf->bf_dmamap,
3482					    BUS_DMASYNC_POSTREAD);
3483					/* Now pass it to the radar processing code */
3484					ath_dfs_process_phy_err(sc, mtod(m, char *), tsf, rs);
3485				}
3486
3487				/* Be suitably paranoid about receiving phy errors out of the stats array bounds */
3488				if (rs->rs_phyerr < 64)
3489					sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
3490				goto rx_error;	/* NB: don't count in ierrors */
3491			}
3492			if (rs->rs_status & HAL_RXERR_DECRYPT) {
3493				/*
3494				 * Decrypt error.  If the error occurred
3495				 * because there was no hardware key, then
3496				 * let the frame through so the upper layers
3497				 * can process it.  This is necessary for 5210
3498				 * parts which have no way to setup a ``clear''
3499				 * key cache entry.
3500				 *
3501				 * XXX do key cache faulting
3502				 */
3503				if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
3504					goto rx_accept;
3505				sc->sc_stats.ast_rx_badcrypt++;
3506			}
3507			if (rs->rs_status & HAL_RXERR_MIC) {
3508				sc->sc_stats.ast_rx_badmic++;
3509				/*
3510				 * Do minimal work required to hand off
3511				 * the 802.11 header for notification.
3512				 */
3513				/* XXX frag's and qos frames */
3514				len = rs->rs_datalen;
3515				if (len >= sizeof (struct ieee80211_frame)) {
3516					bus_dmamap_sync(sc->sc_dmat,
3517					    bf->bf_dmamap,
3518					    BUS_DMASYNC_POSTREAD);
3519					ath_handle_micerror(ic,
3520					    mtod(m, struct ieee80211_frame *),
3521					    sc->sc_splitmic ?
3522						rs->rs_keyix-32 : rs->rs_keyix);
3523				}
3524			}
3525			ifp->if_ierrors++;
3526rx_error:
3527			/*
3528			 * Cleanup any pending partial frame.
3529			 */
3530			if (sc->sc_rxpending != NULL) {
3531				m_freem(sc->sc_rxpending);
3532				sc->sc_rxpending = NULL;
3533			}
3534			/*
3535			 * When a tap is present pass error frames
3536			 * that have been requested.  By default we
3537			 * pass decrypt+mic errors but others may be
3538			 * interesting (e.g. crc).
3539			 */
3540			if (ieee80211_radiotap_active(ic) &&
3541			    (rs->rs_status & sc->sc_monpass)) {
3542				bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3543				    BUS_DMASYNC_POSTREAD);
3544				/* NB: bpf needs the mbuf length setup */
3545				len = rs->rs_datalen;
3546				m->m_pkthdr.len = m->m_len = len;
3547				ath_rx_tap(ifp, m, rs, tsf, nf);
3548				ieee80211_radiotap_rx_all(ic, m);
3549			}
3550			/* XXX pass MIC errors up for s/w reclaculation */
3551			goto rx_next;
3552		}
3553rx_accept:
3554		/*
3555		 * Sync and unmap the frame.  At this point we're
3556		 * committed to passing the mbuf somewhere so clear
3557		 * bf_m; this means a new mbuf must be allocated
3558		 * when the rx descriptor is setup again to receive
3559		 * another frame.
3560		 */
3561		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3562		    BUS_DMASYNC_POSTREAD);
3563		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3564		bf->bf_m = NULL;
3565
3566		len = rs->rs_datalen;
3567		m->m_len = len;
3568
3569		if (rs->rs_more) {
3570			/*
3571			 * Frame spans multiple descriptors; save
3572			 * it for the next completed descriptor, it
3573			 * will be used to construct a jumbogram.
3574			 */
3575			if (sc->sc_rxpending != NULL) {
3576				/* NB: max frame size is currently 2 clusters */
3577				sc->sc_stats.ast_rx_toobig++;
3578				m_freem(sc->sc_rxpending);
3579			}
3580			m->m_pkthdr.rcvif = ifp;
3581			m->m_pkthdr.len = len;
3582			sc->sc_rxpending = m;
3583			goto rx_next;
3584		} else if (sc->sc_rxpending != NULL) {
3585			/*
3586			 * This is the second part of a jumbogram,
3587			 * chain it to the first mbuf, adjust the
3588			 * frame length, and clear the rxpending state.
3589			 */
3590			sc->sc_rxpending->m_next = m;
3591			sc->sc_rxpending->m_pkthdr.len += len;
3592			m = sc->sc_rxpending;
3593			sc->sc_rxpending = NULL;
3594		} else {
3595			/*
3596			 * Normal single-descriptor receive; setup
3597			 * the rcvif and packet length.
3598			 */
3599			m->m_pkthdr.rcvif = ifp;
3600			m->m_pkthdr.len = len;
3601		}
3602
3603		ifp->if_ipackets++;
3604		sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
3605
3606		/*
3607		 * Populate the rx status block.  When there are bpf
3608		 * listeners we do the additional work to provide
3609		 * complete status.  Otherwise we fill in only the
3610		 * material required by ieee80211_input.  Note that
3611		 * noise setting is filled in above.
3612		 */
3613		if (ieee80211_radiotap_active(ic))
3614			ath_rx_tap(ifp, m, rs, tsf, nf);
3615
3616		/*
3617		 * From this point on we assume the frame is at least
3618		 * as large as ieee80211_frame_min; verify that.
3619		 */
3620		if (len < IEEE80211_MIN_LEN) {
3621			if (!ieee80211_radiotap_active(ic)) {
3622				DPRINTF(sc, ATH_DEBUG_RECV,
3623				    "%s: short packet %d\n", __func__, len);
3624				sc->sc_stats.ast_rx_tooshort++;
3625			} else {
3626				/* NB: in particular this captures ack's */
3627				ieee80211_radiotap_rx_all(ic, m);
3628			}
3629			m_freem(m);
3630			goto rx_next;
3631		}
3632
3633		if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
3634			const HAL_RATE_TABLE *rt = sc->sc_currates;
3635			uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
3636
3637			ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
3638			    sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
3639		}
3640
3641		m_adj(m, -IEEE80211_CRC_LEN);
3642
3643		/*
3644		 * Locate the node for sender, track state, and then
3645		 * pass the (referenced) node up to the 802.11 layer
3646		 * for its use.
3647		 */
3648		ni = ieee80211_find_rxnode_withkey(ic,
3649			mtod(m, const struct ieee80211_frame_min *),
3650			rs->rs_keyix == HAL_RXKEYIX_INVALID ?
3651				IEEE80211_KEYIX_NONE : rs->rs_keyix);
3652		sc->sc_lastrs = rs;
3653
3654		if (rs->rs_isaggr)
3655			sc->sc_stats.ast_rx_agg++;
3656
3657		if (ni != NULL) {
3658			/*
3659 			 * Only punt packets for ampdu reorder processing for
3660			 * 11n nodes; net80211 enforces that M_AMPDU is only
3661			 * set for 11n nodes.
3662 			 */
3663			if (ni->ni_flags & IEEE80211_NODE_HT)
3664				m->m_flags |= M_AMPDU;
3665
3666			/*
3667			 * Sending station is known, dispatch directly.
3668			 */
3669			type = ieee80211_input(ni, m, rs->rs_rssi, nf);
3670			ieee80211_free_node(ni);
3671			/*
3672			 * Arrange to update the last rx timestamp only for
3673			 * frames from our ap when operating in station mode.
3674			 * This assumes the rx key is always setup when
3675			 * associated.
3676			 */
3677			if (ic->ic_opmode == IEEE80211_M_STA &&
3678			    rs->rs_keyix != HAL_RXKEYIX_INVALID)
3679				ngood++;
3680		} else {
3681			type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
3682		}
3683		/*
3684		 * Track rx rssi and do any rx antenna management.
3685		 */
3686		ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
3687		if (sc->sc_diversity) {
3688			/*
3689			 * When using fast diversity, change the default rx
3690			 * antenna if diversity chooses the other antenna 3
3691			 * times in a row.
3692			 */
3693			if (sc->sc_defant != rs->rs_antenna) {
3694				if (++sc->sc_rxotherant >= 3)
3695					ath_setdefantenna(sc, rs->rs_antenna);
3696			} else
3697				sc->sc_rxotherant = 0;
3698		}
3699
3700		/* Newer school diversity - kite specific for now */
3701		/* XXX perhaps migrate the normal diversity code to this? */
3702		if ((ah)->ah_rxAntCombDiversity)
3703			(*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz);
3704
3705		if (sc->sc_softled) {
3706			/*
3707			 * Blink for any data frame.  Otherwise do a
3708			 * heartbeat-style blink when idle.  The latter
3709			 * is mainly for station mode where we depend on
3710			 * periodic beacon frames to trigger the poll event.
3711			 */
3712			if (type == IEEE80211_FC0_TYPE_DATA) {
3713				const HAL_RATE_TABLE *rt = sc->sc_currates;
3714				ath_led_event(sc,
3715				    rt->rateCodeToIndex[rs->rs_rate]);
3716			} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
3717				ath_led_event(sc, 0);
3718		}
3719rx_next:
3720		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
3721	} while (ath_rxbuf_init(sc, bf) == 0);
3722
3723	/* rx signal state monitoring */
3724	ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
3725	if (ngood)
3726		sc->sc_lastrx = tsf;
3727
3728	/* Queue DFS tasklet if needed */
3729	if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
3730		taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
3731
3732	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
3733#ifdef IEEE80211_SUPPORT_SUPERG
3734		ieee80211_ff_age_all(ic, 100);
3735#endif
3736		if (!IFQ_IS_EMPTY(&ifp->if_snd))
3737			ath_start(ifp);
3738	}
3739#undef PA2DESC
3740}
3741
3742static void
3743ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
3744{
3745	txq->axq_qnum = qnum;
3746	txq->axq_ac = 0;
3747	txq->axq_depth = 0;
3748	txq->axq_intrcnt = 0;
3749	txq->axq_link = NULL;
3750	STAILQ_INIT(&txq->axq_q);
3751	ATH_TXQ_LOCK_INIT(sc, txq);
3752}
3753
3754/*
3755 * Setup a h/w transmit queue.
3756 */
3757static struct ath_txq *
3758ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
3759{
3760#define	N(a)	(sizeof(a)/sizeof(a[0]))
3761	struct ath_hal *ah = sc->sc_ah;
3762	HAL_TXQ_INFO qi;
3763	int qnum;
3764
3765	memset(&qi, 0, sizeof(qi));
3766	qi.tqi_subtype = subtype;
3767	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
3768	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
3769	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
3770	/*
3771	 * Enable interrupts only for EOL and DESC conditions.
3772	 * We mark tx descriptors to receive a DESC interrupt
3773	 * when a tx queue gets deep; otherwise waiting for the
3774	 * EOL to reap descriptors.  Note that this is done to
3775	 * reduce interrupt load and this only defers reaping
3776	 * descriptors, never transmitting frames.  Aside from
3777	 * reducing interrupts this also permits more concurrency.
3778	 * The only potential downside is if the tx queue backs
3779	 * up in which case the top half of the kernel may backup
3780	 * due to a lack of tx descriptors.
3781	 */
3782	qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
3783	qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
3784	if (qnum == -1) {
3785		/*
3786		 * NB: don't print a message, this happens
3787		 * normally on parts with too few tx queues
3788		 */
3789		return NULL;
3790	}
3791	if (qnum >= N(sc->sc_txq)) {
3792		device_printf(sc->sc_dev,
3793			"hal qnum %u out of range, max %zu!\n",
3794			qnum, N(sc->sc_txq));
3795		ath_hal_releasetxqueue(ah, qnum);
3796		return NULL;
3797	}
3798	if (!ATH_TXQ_SETUP(sc, qnum)) {
3799		ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
3800		sc->sc_txqsetup |= 1<<qnum;
3801	}
3802	return &sc->sc_txq[qnum];
3803#undef N
3804}
3805
3806/*
3807 * Setup a hardware data transmit queue for the specified
3808 * access control.  The hal may not support all requested
3809 * queues in which case it will return a reference to a
3810 * previously setup queue.  We record the mapping from ac's
3811 * to h/w queues for use by ath_tx_start and also track
3812 * the set of h/w queues being used to optimize work in the
3813 * transmit interrupt handler and related routines.
3814 */
3815static int
3816ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
3817{
3818#define	N(a)	(sizeof(a)/sizeof(a[0]))
3819	struct ath_txq *txq;
3820
3821	if (ac >= N(sc->sc_ac2q)) {
3822		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
3823			ac, N(sc->sc_ac2q));
3824		return 0;
3825	}
3826	txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
3827	if (txq != NULL) {
3828		txq->axq_ac = ac;
3829		sc->sc_ac2q[ac] = txq;
3830		return 1;
3831	} else
3832		return 0;
3833#undef N
3834}
3835
3836/*
3837 * Update WME parameters for a transmit queue.
3838 */
3839static int
3840ath_txq_update(struct ath_softc *sc, int ac)
3841{
3842#define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
3843#define	ATH_TXOP_TO_US(v)		(v<<5)
3844	struct ifnet *ifp = sc->sc_ifp;
3845	struct ieee80211com *ic = ifp->if_l2com;
3846	struct ath_txq *txq = sc->sc_ac2q[ac];
3847	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3848	struct ath_hal *ah = sc->sc_ah;
3849	HAL_TXQ_INFO qi;
3850
3851	ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
3852#ifdef IEEE80211_SUPPORT_TDMA
3853	if (sc->sc_tdma) {
3854		/*
3855		 * AIFS is zero so there's no pre-transmit wait.  The
3856		 * burst time defines the slot duration and is configured
3857		 * through net80211.  The QCU is setup to not do post-xmit
3858		 * back off, lockout all lower-priority QCU's, and fire
3859		 * off the DMA beacon alert timer which is setup based
3860		 * on the slot configuration.
3861		 */
3862		qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3863			      | HAL_TXQ_TXERRINT_ENABLE
3864			      | HAL_TXQ_TXURNINT_ENABLE
3865			      | HAL_TXQ_TXEOLINT_ENABLE
3866			      | HAL_TXQ_DBA_GATED
3867			      | HAL_TXQ_BACKOFF_DISABLE
3868			      | HAL_TXQ_ARB_LOCKOUT_GLOBAL
3869			      ;
3870		qi.tqi_aifs = 0;
3871		/* XXX +dbaprep? */
3872		qi.tqi_readyTime = sc->sc_tdmaslotlen;
3873		qi.tqi_burstTime = qi.tqi_readyTime;
3874	} else {
3875#endif
3876		qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3877			      | HAL_TXQ_TXERRINT_ENABLE
3878			      | HAL_TXQ_TXDESCINT_ENABLE
3879			      | HAL_TXQ_TXURNINT_ENABLE
3880			      ;
3881		qi.tqi_aifs = wmep->wmep_aifsn;
3882		qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3883		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3884		qi.tqi_readyTime = 0;
3885		qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
3886#ifdef IEEE80211_SUPPORT_TDMA
3887	}
3888#endif
3889
3890	DPRINTF(sc, ATH_DEBUG_RESET,
3891	    "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
3892	    __func__, txq->axq_qnum, qi.tqi_qflags,
3893	    qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
3894
3895	if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
3896		if_printf(ifp, "unable to update hardware queue "
3897			"parameters for %s traffic!\n",
3898			ieee80211_wme_acnames[ac]);
3899		return 0;
3900	} else {
3901		ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
3902		return 1;
3903	}
3904#undef ATH_TXOP_TO_US
3905#undef ATH_EXPONENT_TO_VALUE
3906}
3907
3908/*
3909 * Callback from the 802.11 layer to update WME parameters.
3910 */
3911static int
3912ath_wme_update(struct ieee80211com *ic)
3913{
3914	struct ath_softc *sc = ic->ic_ifp->if_softc;
3915
3916	return !ath_txq_update(sc, WME_AC_BE) ||
3917	    !ath_txq_update(sc, WME_AC_BK) ||
3918	    !ath_txq_update(sc, WME_AC_VI) ||
3919	    !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
3920}
3921
3922/*
3923 * Reclaim resources for a setup queue.
3924 */
3925static void
3926ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
3927{
3928
3929	ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
3930	ATH_TXQ_LOCK_DESTROY(txq);
3931	sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
3932}
3933
3934/*
3935 * Reclaim all tx queue resources.
3936 */
3937static void
3938ath_tx_cleanup(struct ath_softc *sc)
3939{
3940	int i;
3941
3942	ATH_TXBUF_LOCK_DESTROY(sc);
3943	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3944		if (ATH_TXQ_SETUP(sc, i))
3945			ath_tx_cleanupq(sc, &sc->sc_txq[i]);
3946}
3947
3948/*
3949 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
3950 * using the current rates in sc_rixmap.
3951 */
3952int
3953ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
3954{
3955	int rix = sc->sc_rixmap[rate];
3956	/* NB: return lowest rix for invalid rate */
3957	return (rix == 0xff ? 0 : rix);
3958}
3959
3960/*
3961 * Process completed xmit descriptors from the specified queue.
3962 */
3963static int
3964ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
3965{
3966	struct ath_hal *ah = sc->sc_ah;
3967	struct ifnet *ifp = sc->sc_ifp;
3968	struct ieee80211com *ic = ifp->if_l2com;
3969	struct ath_buf *bf, *last;
3970	struct ath_desc *ds, *ds0;
3971	struct ath_tx_status *ts;
3972	struct ieee80211_node *ni;
3973	struct ath_node *an;
3974	int sr, lr, pri, nacked;
3975	HAL_STATUS status;
3976
3977	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
3978		__func__, txq->axq_qnum,
3979		(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
3980		txq->axq_link);
3981	nacked = 0;
3982	for (;;) {
3983		ATH_TXQ_LOCK(txq);
3984		txq->axq_intrcnt = 0;	/* reset periodic desc intr count */
3985		bf = STAILQ_FIRST(&txq->axq_q);
3986		if (bf == NULL) {
3987			ATH_TXQ_UNLOCK(txq);
3988			break;
3989		}
3990		ds0 = &bf->bf_desc[0];
3991		ds = &bf->bf_desc[bf->bf_nseg - 1];
3992		ts = &bf->bf_status.ds_txstat;
3993		status = ath_hal_txprocdesc(ah, ds, ts);
3994#ifdef ATH_DEBUG
3995		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
3996			ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
3997			    status == HAL_OK);
3998#endif
3999		if (status == HAL_EINPROGRESS) {
4000			ATH_TXQ_UNLOCK(txq);
4001			break;
4002		}
4003		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
4004#ifdef IEEE80211_SUPPORT_TDMA
4005		if (txq->axq_depth > 0) {
4006			/*
4007			 * More frames follow.  Mark the buffer busy
4008			 * so it's not re-used while the hardware may
4009			 * still re-read the link field in the descriptor.
4010			 */
4011			bf->bf_flags |= ATH_BUF_BUSY;
4012		} else
4013#else
4014		if (txq->axq_depth == 0)
4015#endif
4016			txq->axq_link = NULL;
4017		ATH_TXQ_UNLOCK(txq);
4018
4019		ni = bf->bf_node;
4020		if (ni != NULL) {
4021			an = ATH_NODE(ni);
4022			if (ts->ts_status == 0) {
4023				u_int8_t txant = ts->ts_antenna;
4024				sc->sc_stats.ast_ant_tx[txant]++;
4025				sc->sc_ant_tx[txant]++;
4026				if (ts->ts_finaltsi != 0)
4027					sc->sc_stats.ast_tx_altrate++;
4028				pri = M_WME_GETAC(bf->bf_m);
4029				if (pri >= WME_AC_VO)
4030					ic->ic_wme.wme_hipri_traffic++;
4031				if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
4032					ni->ni_inact = ni->ni_inact_reload;
4033			} else {
4034				if (ts->ts_status & HAL_TXERR_XRETRY)
4035					sc->sc_stats.ast_tx_xretries++;
4036				if (ts->ts_status & HAL_TXERR_FIFO)
4037					sc->sc_stats.ast_tx_fifoerr++;
4038				if (ts->ts_status & HAL_TXERR_FILT)
4039					sc->sc_stats.ast_tx_filtered++;
4040				if (ts->ts_status & HAL_TXERR_XTXOP)
4041					sc->sc_stats.ast_tx_xtxop++;
4042				if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
4043					sc->sc_stats.ast_tx_timerexpired++;
4044
4045				/* XXX HAL_TX_DATA_UNDERRUN */
4046				/* XXX HAL_TX_DELIM_UNDERRUN */
4047
4048				if (bf->bf_m->m_flags & M_FF)
4049					sc->sc_stats.ast_ff_txerr++;
4050			}
4051			/* XXX when is this valid? */
4052			if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
4053				sc->sc_stats.ast_tx_desccfgerr++;
4054
4055			sr = ts->ts_shortretry;
4056			lr = ts->ts_longretry;
4057			sc->sc_stats.ast_tx_shortretry += sr;
4058			sc->sc_stats.ast_tx_longretry += lr;
4059			/*
4060			 * Hand the descriptor to the rate control algorithm.
4061			 */
4062			if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4063			    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
4064				/*
4065				 * If frame was ack'd update statistics,
4066				 * including the last rx time used to
4067				 * workaround phantom bmiss interrupts.
4068				 */
4069				if (ts->ts_status == 0) {
4070					nacked++;
4071					sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4072					ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4073						ts->ts_rssi);
4074				}
4075				ath_rate_tx_complete(sc, an, bf);
4076			}
4077			/*
4078			 * Do any tx complete callback.  Note this must
4079			 * be done before releasing the node reference.
4080			 */
4081			if (bf->bf_m->m_flags & M_TXCB)
4082				ieee80211_process_callback(ni, bf->bf_m,
4083				    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
4084				        ts->ts_status : HAL_TXERR_XRETRY);
4085			ieee80211_free_node(ni);
4086		}
4087		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4088		    BUS_DMASYNC_POSTWRITE);
4089		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4090
4091		m_freem(bf->bf_m);
4092		bf->bf_m = NULL;
4093		bf->bf_node = NULL;
4094
4095		ATH_TXBUF_LOCK(sc);
4096		last = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
4097		if (last != NULL)
4098			last->bf_flags &= ~ATH_BUF_BUSY;
4099		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4100		ATH_TXBUF_UNLOCK(sc);
4101	}
4102#ifdef IEEE80211_SUPPORT_SUPERG
4103	/*
4104	 * Flush fast-frame staging queue when traffic slows.
4105	 */
4106	if (txq->axq_depth <= 1)
4107		ieee80211_ff_flush(ic, txq->axq_ac);
4108#endif
4109	return nacked;
4110}
4111
4112static __inline int
4113txqactive(struct ath_hal *ah, int qnum)
4114{
4115	u_int32_t txqs = 1<<qnum;
4116	ath_hal_gettxintrtxqs(ah, &txqs);
4117	return (txqs & (1<<qnum));
4118}
4119
4120/*
4121 * Deferred processing of transmit interrupt; special-cased
4122 * for a single hardware transmit queue (e.g. 5210 and 5211).
4123 */
4124static void
4125ath_tx_proc_q0(void *arg, int npending)
4126{
4127	struct ath_softc *sc = arg;
4128	struct ifnet *ifp = sc->sc_ifp;
4129
4130	if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
4131		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4132	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
4133		ath_tx_processq(sc, sc->sc_cabq);
4134	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4135	sc->sc_wd_timer = 0;
4136
4137	if (sc->sc_softled)
4138		ath_led_event(sc, sc->sc_txrix);
4139
4140	ath_start(ifp);
4141}
4142
4143/*
4144 * Deferred processing of transmit interrupt; special-cased
4145 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4146 */
4147static void
4148ath_tx_proc_q0123(void *arg, int npending)
4149{
4150	struct ath_softc *sc = arg;
4151	struct ifnet *ifp = sc->sc_ifp;
4152	int nacked;
4153
4154	/*
4155	 * Process each active queue.
4156	 */
4157	nacked = 0;
4158	if (txqactive(sc->sc_ah, 0))
4159		nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
4160	if (txqactive(sc->sc_ah, 1))
4161		nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
4162	if (txqactive(sc->sc_ah, 2))
4163		nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
4164	if (txqactive(sc->sc_ah, 3))
4165		nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
4166	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
4167		ath_tx_processq(sc, sc->sc_cabq);
4168	if (nacked)
4169		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4170
4171	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4172	sc->sc_wd_timer = 0;
4173
4174	if (sc->sc_softled)
4175		ath_led_event(sc, sc->sc_txrix);
4176
4177	ath_start(ifp);
4178}
4179
4180/*
4181 * Deferred processing of transmit interrupt.
4182 */
4183static void
4184ath_tx_proc(void *arg, int npending)
4185{
4186	struct ath_softc *sc = arg;
4187	struct ifnet *ifp = sc->sc_ifp;
4188	int i, nacked;
4189
4190	/*
4191	 * Process each active queue.
4192	 */
4193	nacked = 0;
4194	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4195		if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
4196			nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
4197	if (nacked)
4198		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4199
4200	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4201	sc->sc_wd_timer = 0;
4202
4203	if (sc->sc_softled)
4204		ath_led_event(sc, sc->sc_txrix);
4205
4206	ath_start(ifp);
4207}
4208
4209static void
4210ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
4211{
4212#ifdef ATH_DEBUG
4213	struct ath_hal *ah = sc->sc_ah;
4214#endif
4215	struct ieee80211_node *ni;
4216	struct ath_buf *bf;
4217	u_int ix;
4218
4219	/*
4220	 * NB: this assumes output has been stopped and
4221	 *     we do not need to block ath_tx_proc
4222	 */
4223	ATH_TXBUF_LOCK(sc);
4224	bf = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
4225	if (bf != NULL)
4226		bf->bf_flags &= ~ATH_BUF_BUSY;
4227	ATH_TXBUF_UNLOCK(sc);
4228	for (ix = 0;; ix++) {
4229		ATH_TXQ_LOCK(txq);
4230		bf = STAILQ_FIRST(&txq->axq_q);
4231		if (bf == NULL) {
4232			txq->axq_link = NULL;
4233			ATH_TXQ_UNLOCK(txq);
4234			break;
4235		}
4236		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
4237		ATH_TXQ_UNLOCK(txq);
4238#ifdef ATH_DEBUG
4239		if (sc->sc_debug & ATH_DEBUG_RESET) {
4240			struct ieee80211com *ic = sc->sc_ifp->if_l2com;
4241
4242			ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
4243				ath_hal_txprocdesc(ah, bf->bf_desc,
4244				    &bf->bf_status.ds_txstat) == HAL_OK);
4245			ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
4246			    bf->bf_m->m_len, 0, -1);
4247		}
4248#endif /* ATH_DEBUG */
4249		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4250		ni = bf->bf_node;
4251		bf->bf_node = NULL;
4252		if (ni != NULL) {
4253			/*
4254			 * Do any callback and reclaim the node reference.
4255			 */
4256			if (bf->bf_m->m_flags & M_TXCB)
4257				ieee80211_process_callback(ni, bf->bf_m, -1);
4258			ieee80211_free_node(ni);
4259		}
4260		m_freem(bf->bf_m);
4261		bf->bf_m = NULL;
4262		bf->bf_flags &= ~ATH_BUF_BUSY;
4263
4264		ATH_TXBUF_LOCK(sc);
4265		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4266		ATH_TXBUF_UNLOCK(sc);
4267	}
4268}
4269
4270static void
4271ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
4272{
4273	struct ath_hal *ah = sc->sc_ah;
4274
4275	DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4276	    __func__, txq->axq_qnum,
4277	    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
4278	    txq->axq_link);
4279	(void) ath_hal_stoptxdma(ah, txq->axq_qnum);
4280}
4281
4282/*
4283 * Drain the transmit queues and reclaim resources.
4284 */
4285static void
4286ath_draintxq(struct ath_softc *sc)
4287{
4288	struct ath_hal *ah = sc->sc_ah;
4289	struct ifnet *ifp = sc->sc_ifp;
4290	int i;
4291
4292	/* XXX return value */
4293	if (!sc->sc_invalid) {
4294		/* don't touch the hardware if marked invalid */
4295		DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4296		    __func__, sc->sc_bhalq,
4297		    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
4298		    NULL);
4299		(void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
4300		for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4301			if (ATH_TXQ_SETUP(sc, i))
4302				ath_tx_stopdma(sc, &sc->sc_txq[i]);
4303	}
4304	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4305		if (ATH_TXQ_SETUP(sc, i))
4306			ath_tx_draintxq(sc, &sc->sc_txq[i]);
4307#ifdef ATH_DEBUG
4308	if (sc->sc_debug & ATH_DEBUG_RESET) {
4309		struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
4310		if (bf != NULL && bf->bf_m != NULL) {
4311			ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
4312				ath_hal_txprocdesc(ah, bf->bf_desc,
4313				    &bf->bf_status.ds_txstat) == HAL_OK);
4314			ieee80211_dump_pkt(ifp->if_l2com,
4315			    mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
4316			    0, -1);
4317		}
4318	}
4319#endif /* ATH_DEBUG */
4320	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4321	sc->sc_wd_timer = 0;
4322}
4323
4324/*
4325 * Disable the receive h/w in preparation for a reset.
4326 */
4327static void
4328ath_stoprecv(struct ath_softc *sc)
4329{
4330#define	PA2DESC(_sc, _pa) \
4331	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
4332		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
4333	struct ath_hal *ah = sc->sc_ah;
4334
4335	ath_hal_stoppcurecv(ah);	/* disable PCU */
4336	ath_hal_setrxfilter(ah, 0);	/* clear recv filter */
4337	ath_hal_stopdmarecv(ah);	/* disable DMA engine */
4338	DELAY(3000);			/* 3ms is long enough for 1 frame */
4339#ifdef ATH_DEBUG
4340	if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
4341		struct ath_buf *bf;
4342		u_int ix;
4343
4344		printf("%s: rx queue %p, link %p\n", __func__,
4345			(caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
4346		ix = 0;
4347		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
4348			struct ath_desc *ds = bf->bf_desc;
4349			struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
4350			HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
4351				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4352			if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
4353				ath_printrxbuf(sc, bf, ix, status == HAL_OK);
4354			ix++;
4355		}
4356	}
4357#endif
4358	if (sc->sc_rxpending != NULL) {
4359		m_freem(sc->sc_rxpending);
4360		sc->sc_rxpending = NULL;
4361	}
4362	sc->sc_rxlink = NULL;		/* just in case */
4363#undef PA2DESC
4364}
4365
4366/*
4367 * Enable the receive h/w following a reset.
4368 */
4369static int
4370ath_startrecv(struct ath_softc *sc)
4371{
4372	struct ath_hal *ah = sc->sc_ah;
4373	struct ath_buf *bf;
4374
4375	sc->sc_rxlink = NULL;
4376	sc->sc_rxpending = NULL;
4377	STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
4378		int error = ath_rxbuf_init(sc, bf);
4379		if (error != 0) {
4380			DPRINTF(sc, ATH_DEBUG_RECV,
4381				"%s: ath_rxbuf_init failed %d\n",
4382				__func__, error);
4383			return error;
4384		}
4385	}
4386
4387	bf = STAILQ_FIRST(&sc->sc_rxbuf);
4388	ath_hal_putrxbuf(ah, bf->bf_daddr);
4389	ath_hal_rxena(ah);		/* enable recv descriptors */
4390	ath_mode_init(sc);		/* set filters, etc. */
4391	ath_hal_startpcurecv(ah);	/* re-enable PCU/DMA engine */
4392	return 0;
4393}
4394
4395/*
4396 * Update internal state after a channel change.
4397 */
4398static void
4399ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
4400{
4401	enum ieee80211_phymode mode;
4402
4403	/*
4404	 * Change channels and update the h/w rate map
4405	 * if we're switching; e.g. 11a to 11b/g.
4406	 */
4407	mode = ieee80211_chan2mode(chan);
4408	if (mode != sc->sc_curmode)
4409		ath_setcurmode(sc, mode);
4410	sc->sc_curchan = chan;
4411}
4412
4413/*
4414 * Set/change channels.  If the channel is really being changed,
4415 * it's done by resetting the chip.  To accomplish this we must
4416 * first cleanup any pending DMA, then restart stuff after a la
4417 * ath_init.
4418 */
4419static int
4420ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
4421{
4422	struct ifnet *ifp = sc->sc_ifp;
4423	struct ieee80211com *ic = ifp->if_l2com;
4424	struct ath_hal *ah = sc->sc_ah;
4425
4426	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
4427	    __func__, ieee80211_chan2ieee(ic, chan),
4428	    chan->ic_freq, chan->ic_flags);
4429	if (chan != sc->sc_curchan) {
4430		HAL_STATUS status;
4431		/*
4432		 * To switch channels clear any pending DMA operations;
4433		 * wait long enough for the RX fifo to drain, reset the
4434		 * hardware at the new frequency, and then re-enable
4435		 * the relevant bits of the h/w.
4436		 */
4437		ath_hal_intrset(ah, 0);		/* disable interrupts */
4438		ath_draintxq(sc);		/* clear pending tx frames */
4439		ath_stoprecv(sc);		/* turn off frame recv */
4440		if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
4441			if_printf(ifp, "%s: unable to reset "
4442			    "channel %u (%u MHz, flags 0x%x), hal status %u\n",
4443			    __func__, ieee80211_chan2ieee(ic, chan),
4444			    chan->ic_freq, chan->ic_flags, status);
4445			return EIO;
4446		}
4447		sc->sc_diversity = ath_hal_getdiversity(ah);
4448
4449		/* Let DFS at it in case it's a DFS channel */
4450		ath_dfs_radar_enable(sc, ic->ic_curchan);
4451
4452		/*
4453		 * Re-enable rx framework.
4454		 */
4455		if (ath_startrecv(sc) != 0) {
4456			if_printf(ifp, "%s: unable to restart recv logic\n",
4457			    __func__);
4458			return EIO;
4459		}
4460
4461		/*
4462		 * Change channels and update the h/w rate map
4463		 * if we're switching; e.g. 11a to 11b/g.
4464		 */
4465		ath_chan_change(sc, chan);
4466
4467		/*
4468		 * Re-enable interrupts.
4469		 */
4470		ath_hal_intrset(ah, sc->sc_imask);
4471	}
4472	return 0;
4473}
4474
4475/*
4476 * Periodically recalibrate the PHY to account
4477 * for temperature/environment changes.
4478 */
4479static void
4480ath_calibrate(void *arg)
4481{
4482	struct ath_softc *sc = arg;
4483	struct ath_hal *ah = sc->sc_ah;
4484	struct ifnet *ifp = sc->sc_ifp;
4485	struct ieee80211com *ic = ifp->if_l2com;
4486	HAL_BOOL longCal, isCalDone;
4487	HAL_BOOL aniCal, shortCal = AH_FALSE;
4488	int nextcal;
4489
4490	if (ic->ic_flags & IEEE80211_F_SCAN)	/* defer, off channel */
4491		goto restart;
4492	longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
4493	aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
4494	if (sc->sc_doresetcal)
4495		shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
4496
4497	DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
4498	if (aniCal) {
4499		sc->sc_stats.ast_ani_cal++;
4500		sc->sc_lastani = ticks;
4501		ath_hal_ani_poll(ah, sc->sc_curchan);
4502	}
4503
4504	if (longCal) {
4505		sc->sc_stats.ast_per_cal++;
4506		sc->sc_lastlongcal = ticks;
4507		if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
4508			/*
4509			 * Rfgain is out of bounds, reset the chip
4510			 * to load new gain values.
4511			 */
4512			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4513				"%s: rfgain change\n", __func__);
4514			sc->sc_stats.ast_per_rfgain++;
4515			ath_reset(ifp);
4516		}
4517		/*
4518		 * If this long cal is after an idle period, then
4519		 * reset the data collection state so we start fresh.
4520		 */
4521		if (sc->sc_resetcal) {
4522			(void) ath_hal_calreset(ah, sc->sc_curchan);
4523			sc->sc_lastcalreset = ticks;
4524			sc->sc_lastshortcal = ticks;
4525			sc->sc_resetcal = 0;
4526			sc->sc_doresetcal = AH_TRUE;
4527		}
4528	}
4529
4530	/* Only call if we're doing a short/long cal, not for ANI calibration */
4531	if (shortCal || longCal) {
4532		if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
4533			if (longCal) {
4534				/*
4535				 * Calibrate noise floor data again in case of change.
4536				 */
4537				ath_hal_process_noisefloor(ah);
4538			}
4539		} else {
4540			DPRINTF(sc, ATH_DEBUG_ANY,
4541				"%s: calibration of channel %u failed\n",
4542				__func__, sc->sc_curchan->ic_freq);
4543			sc->sc_stats.ast_per_calfail++;
4544		}
4545		if (shortCal)
4546			sc->sc_lastshortcal = ticks;
4547	}
4548	if (!isCalDone) {
4549restart:
4550		/*
4551		 * Use a shorter interval to potentially collect multiple
4552		 * data samples required to complete calibration.  Once
4553		 * we're told the work is done we drop back to a longer
4554		 * interval between requests.  We're more aggressive doing
4555		 * work when operating as an AP to improve operation right
4556		 * after startup.
4557		 */
4558		sc->sc_lastshortcal = ticks;
4559		nextcal = ath_shortcalinterval*hz/1000;
4560		if (sc->sc_opmode != HAL_M_HOSTAP)
4561			nextcal *= 10;
4562		sc->sc_doresetcal = AH_TRUE;
4563	} else {
4564		/* nextcal should be the shortest time for next event */
4565		nextcal = ath_longcalinterval*hz;
4566		if (sc->sc_lastcalreset == 0)
4567			sc->sc_lastcalreset = sc->sc_lastlongcal;
4568		else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
4569			sc->sc_resetcal = 1;	/* setup reset next trip */
4570		sc->sc_doresetcal = AH_FALSE;
4571	}
4572	/* ANI calibration may occur more often than short/long/resetcal */
4573	if (ath_anicalinterval > 0)
4574		nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
4575
4576	if (nextcal != 0) {
4577		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
4578		    __func__, nextcal, isCalDone ? "" : "!");
4579		callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
4580	} else {
4581		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
4582		    __func__);
4583		/* NB: don't rearm timer */
4584	}
4585}
4586
4587static void
4588ath_scan_start(struct ieee80211com *ic)
4589{
4590	struct ifnet *ifp = ic->ic_ifp;
4591	struct ath_softc *sc = ifp->if_softc;
4592	struct ath_hal *ah = sc->sc_ah;
4593	u_int32_t rfilt;
4594
4595	/* XXX calibration timer? */
4596
4597	sc->sc_scanning = 1;
4598	sc->sc_syncbeacon = 0;
4599	rfilt = ath_calcrxfilter(sc);
4600	ath_hal_setrxfilter(ah, rfilt);
4601	ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
4602
4603	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
4604		 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
4605}
4606
4607static void
4608ath_scan_end(struct ieee80211com *ic)
4609{
4610	struct ifnet *ifp = ic->ic_ifp;
4611	struct ath_softc *sc = ifp->if_softc;
4612	struct ath_hal *ah = sc->sc_ah;
4613	u_int32_t rfilt;
4614
4615	sc->sc_scanning = 0;
4616	rfilt = ath_calcrxfilter(sc);
4617	ath_hal_setrxfilter(ah, rfilt);
4618	ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4619
4620	ath_hal_process_noisefloor(ah);
4621
4622	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4623		 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
4624		 sc->sc_curaid);
4625}
4626
4627static void
4628ath_set_channel(struct ieee80211com *ic)
4629{
4630	struct ifnet *ifp = ic->ic_ifp;
4631	struct ath_softc *sc = ifp->if_softc;
4632
4633	(void) ath_chan_set(sc, ic->ic_curchan);
4634	/*
4635	 * If we are returning to our bss channel then mark state
4636	 * so the next recv'd beacon's tsf will be used to sync the
4637	 * beacon timers.  Note that since we only hear beacons in
4638	 * sta/ibss mode this has no effect in other operating modes.
4639	 */
4640	if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
4641		sc->sc_syncbeacon = 1;
4642}
4643
4644/*
4645 * Walk the vap list and check if there any vap's in RUN state.
4646 */
4647static int
4648ath_isanyrunningvaps(struct ieee80211vap *this)
4649{
4650	struct ieee80211com *ic = this->iv_ic;
4651	struct ieee80211vap *vap;
4652
4653	IEEE80211_LOCK_ASSERT(ic);
4654
4655	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
4656		if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
4657			return 1;
4658	}
4659	return 0;
4660}
4661
4662static int
4663ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4664{
4665	struct ieee80211com *ic = vap->iv_ic;
4666	struct ath_softc *sc = ic->ic_ifp->if_softc;
4667	struct ath_vap *avp = ATH_VAP(vap);
4668	struct ath_hal *ah = sc->sc_ah;
4669	struct ieee80211_node *ni = NULL;
4670	int i, error, stamode;
4671	u_int32_t rfilt;
4672	static const HAL_LED_STATE leds[] = {
4673	    HAL_LED_INIT,	/* IEEE80211_S_INIT */
4674	    HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
4675	    HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
4676	    HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
4677	    HAL_LED_RUN, 	/* IEEE80211_S_CAC */
4678	    HAL_LED_RUN, 	/* IEEE80211_S_RUN */
4679	    HAL_LED_RUN, 	/* IEEE80211_S_CSA */
4680	    HAL_LED_RUN, 	/* IEEE80211_S_SLEEP */
4681	};
4682
4683	DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
4684		ieee80211_state_name[vap->iv_state],
4685		ieee80211_state_name[nstate]);
4686
4687	callout_drain(&sc->sc_cal_ch);
4688	ath_hal_setledstate(ah, leds[nstate]);	/* set LED */
4689
4690	if (nstate == IEEE80211_S_SCAN) {
4691		/*
4692		 * Scanning: turn off beacon miss and don't beacon.
4693		 * Mark beacon state so when we reach RUN state we'll
4694		 * [re]setup beacons.  Unblock the task q thread so
4695		 * deferred interrupt processing is done.
4696		 */
4697		ath_hal_intrset(ah,
4698		    sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
4699		sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4700		sc->sc_beacons = 0;
4701		taskqueue_unblock(sc->sc_tq);
4702	}
4703
4704	ni = vap->iv_bss;
4705	rfilt = ath_calcrxfilter(sc);
4706	stamode = (vap->iv_opmode == IEEE80211_M_STA ||
4707		   vap->iv_opmode == IEEE80211_M_AHDEMO ||
4708		   vap->iv_opmode == IEEE80211_M_IBSS);
4709	if (stamode && nstate == IEEE80211_S_RUN) {
4710		sc->sc_curaid = ni->ni_associd;
4711		IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
4712		ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4713	}
4714	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4715	   __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
4716	ath_hal_setrxfilter(ah, rfilt);
4717
4718	/* XXX is this to restore keycache on resume? */
4719	if (vap->iv_opmode != IEEE80211_M_STA &&
4720	    (vap->iv_flags & IEEE80211_F_PRIVACY)) {
4721		for (i = 0; i < IEEE80211_WEP_NKID; i++)
4722			if (ath_hal_keyisvalid(ah, i))
4723				ath_hal_keysetmac(ah, i, ni->ni_bssid);
4724	}
4725
4726	/*
4727	 * Invoke the parent method to do net80211 work.
4728	 */
4729	error = avp->av_newstate(vap, nstate, arg);
4730	if (error != 0)
4731		goto bad;
4732
4733	if (nstate == IEEE80211_S_RUN) {
4734		/* NB: collect bss node again, it may have changed */
4735		ni = vap->iv_bss;
4736
4737		DPRINTF(sc, ATH_DEBUG_STATE,
4738		    "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4739		    "capinfo 0x%04x chan %d\n", __func__,
4740		    vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
4741		    ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
4742
4743		switch (vap->iv_opmode) {
4744#ifdef IEEE80211_SUPPORT_TDMA
4745		case IEEE80211_M_AHDEMO:
4746			if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
4747				break;
4748			/* fall thru... */
4749#endif
4750		case IEEE80211_M_HOSTAP:
4751		case IEEE80211_M_IBSS:
4752		case IEEE80211_M_MBSS:
4753			/*
4754			 * Allocate and setup the beacon frame.
4755			 *
4756			 * Stop any previous beacon DMA.  This may be
4757			 * necessary, for example, when an ibss merge
4758			 * causes reconfiguration; there will be a state
4759			 * transition from RUN->RUN that means we may
4760			 * be called with beacon transmission active.
4761			 */
4762			ath_hal_stoptxdma(ah, sc->sc_bhalq);
4763
4764			error = ath_beacon_alloc(sc, ni);
4765			if (error != 0)
4766				goto bad;
4767			/*
4768			 * If joining an adhoc network defer beacon timer
4769			 * configuration to the next beacon frame so we
4770			 * have a current TSF to use.  Otherwise we're
4771			 * starting an ibss/bss so there's no need to delay;
4772			 * if this is the first vap moving to RUN state, then
4773			 * beacon state needs to be [re]configured.
4774			 */
4775			if (vap->iv_opmode == IEEE80211_M_IBSS &&
4776			    ni->ni_tstamp.tsf != 0) {
4777				sc->sc_syncbeacon = 1;
4778			} else if (!sc->sc_beacons) {
4779#ifdef IEEE80211_SUPPORT_TDMA
4780				if (vap->iv_caps & IEEE80211_C_TDMA)
4781					ath_tdma_config(sc, vap);
4782				else
4783#endif
4784					ath_beacon_config(sc, vap);
4785				sc->sc_beacons = 1;
4786			}
4787			break;
4788		case IEEE80211_M_STA:
4789			/*
4790			 * Defer beacon timer configuration to the next
4791			 * beacon frame so we have a current TSF to use
4792			 * (any TSF collected when scanning is likely old).
4793			 */
4794			sc->sc_syncbeacon = 1;
4795			break;
4796		case IEEE80211_M_MONITOR:
4797			/*
4798			 * Monitor mode vaps have only INIT->RUN and RUN->RUN
4799			 * transitions so we must re-enable interrupts here to
4800			 * handle the case of a single monitor mode vap.
4801			 */
4802			ath_hal_intrset(ah, sc->sc_imask);
4803			break;
4804		case IEEE80211_M_WDS:
4805			break;
4806		default:
4807			break;
4808		}
4809		/*
4810		 * Let the hal process statistics collected during a
4811		 * scan so it can provide calibrated noise floor data.
4812		 */
4813		ath_hal_process_noisefloor(ah);
4814		/*
4815		 * Reset rssi stats; maybe not the best place...
4816		 */
4817		sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
4818		sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
4819		sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
4820		/*
4821		 * Finally, start any timers and the task q thread
4822		 * (in case we didn't go through SCAN state).
4823		 */
4824		if (ath_longcalinterval != 0) {
4825			/* start periodic recalibration timer */
4826			callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
4827		} else {
4828			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4829			    "%s: calibration disabled\n", __func__);
4830		}
4831		taskqueue_unblock(sc->sc_tq);
4832	} else if (nstate == IEEE80211_S_INIT) {
4833		/*
4834		 * If there are no vaps left in RUN state then
4835		 * shutdown host/driver operation:
4836		 * o disable interrupts
4837		 * o disable the task queue thread
4838		 * o mark beacon processing as stopped
4839		 */
4840		if (!ath_isanyrunningvaps(vap)) {
4841			sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4842			/* disable interrupts  */
4843			ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
4844			taskqueue_block(sc->sc_tq);
4845			sc->sc_beacons = 0;
4846		}
4847#ifdef IEEE80211_SUPPORT_TDMA
4848		ath_hal_setcca(ah, AH_TRUE);
4849#endif
4850	}
4851bad:
4852	return error;
4853}
4854
4855/*
4856 * Allocate a key cache slot to the station so we can
4857 * setup a mapping from key index to node. The key cache
4858 * slot is needed for managing antenna state and for
4859 * compression when stations do not use crypto.  We do
4860 * it uniliaterally here; if crypto is employed this slot
4861 * will be reassigned.
4862 */
4863static void
4864ath_setup_stationkey(struct ieee80211_node *ni)
4865{
4866	struct ieee80211vap *vap = ni->ni_vap;
4867	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4868	ieee80211_keyix keyix, rxkeyix;
4869
4870	if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
4871		/*
4872		 * Key cache is full; we'll fall back to doing
4873		 * the more expensive lookup in software.  Note
4874		 * this also means no h/w compression.
4875		 */
4876		/* XXX msg+statistic */
4877	} else {
4878		/* XXX locking? */
4879		ni->ni_ucastkey.wk_keyix = keyix;
4880		ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
4881		/* NB: must mark device key to get called back on delete */
4882		ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
4883		IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
4884		/* NB: this will create a pass-thru key entry */
4885		ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss);
4886	}
4887}
4888
4889/*
4890 * Setup driver-specific state for a newly associated node.
4891 * Note that we're called also on a re-associate, the isnew
4892 * param tells us if this is the first time or not.
4893 */
4894static void
4895ath_newassoc(struct ieee80211_node *ni, int isnew)
4896{
4897	struct ath_node *an = ATH_NODE(ni);
4898	struct ieee80211vap *vap = ni->ni_vap;
4899	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4900	const struct ieee80211_txparam *tp = ni->ni_txparms;
4901
4902	an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
4903	an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
4904
4905	ath_rate_newassoc(sc, an, isnew);
4906	if (isnew &&
4907	    (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
4908	    ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
4909		ath_setup_stationkey(ni);
4910}
4911
4912static int
4913ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
4914	int nchans, struct ieee80211_channel chans[])
4915{
4916	struct ath_softc *sc = ic->ic_ifp->if_softc;
4917	struct ath_hal *ah = sc->sc_ah;
4918	HAL_STATUS status;
4919
4920	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
4921	    "%s: rd %u cc %u location %c%s\n",
4922	    __func__, reg->regdomain, reg->country, reg->location,
4923	    reg->ecm ? " ecm" : "");
4924
4925	status = ath_hal_set_channels(ah, chans, nchans,
4926	    reg->country, reg->regdomain);
4927	if (status != HAL_OK) {
4928		DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
4929		    __func__, status);
4930		return EINVAL;		/* XXX */
4931	}
4932	return 0;
4933}
4934
4935static void
4936ath_getradiocaps(struct ieee80211com *ic,
4937	int maxchans, int *nchans, struct ieee80211_channel chans[])
4938{
4939	struct ath_softc *sc = ic->ic_ifp->if_softc;
4940	struct ath_hal *ah = sc->sc_ah;
4941
4942	DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
4943	    __func__, SKU_DEBUG, CTRY_DEFAULT);
4944
4945	/* XXX check return */
4946	(void) ath_hal_getchannels(ah, chans, maxchans, nchans,
4947	    HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
4948
4949}
4950
4951static int
4952ath_getchannels(struct ath_softc *sc)
4953{
4954	struct ifnet *ifp = sc->sc_ifp;
4955	struct ieee80211com *ic = ifp->if_l2com;
4956	struct ath_hal *ah = sc->sc_ah;
4957	HAL_STATUS status;
4958
4959	/*
4960	 * Collect channel set based on EEPROM contents.
4961	 */
4962	status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
4963	    &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
4964	if (status != HAL_OK) {
4965		if_printf(ifp, "%s: unable to collect channel list from hal, "
4966		    "status %d\n", __func__, status);
4967		return EINVAL;
4968	}
4969	(void) ath_hal_getregdomain(ah, &sc->sc_eerd);
4970	ath_hal_getcountrycode(ah, &sc->sc_eecc);	/* NB: cannot fail */
4971	/* XXX map Atheros sku's to net80211 SKU's */
4972	/* XXX net80211 types too small */
4973	ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
4974	ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
4975	ic->ic_regdomain.isocc[0] = ' ';	/* XXX don't know */
4976	ic->ic_regdomain.isocc[1] = ' ';
4977
4978	ic->ic_regdomain.ecm = 1;
4979	ic->ic_regdomain.location = 'I';
4980
4981	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
4982	    "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
4983	    __func__, sc->sc_eerd, sc->sc_eecc,
4984	    ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
4985	    ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
4986	return 0;
4987}
4988
4989static void
4990ath_led_done(void *arg)
4991{
4992	struct ath_softc *sc = arg;
4993
4994	sc->sc_blinking = 0;
4995}
4996
4997/*
4998 * Turn the LED off: flip the pin and then set a timer so no
4999 * update will happen for the specified duration.
5000 */
5001static void
5002ath_led_off(void *arg)
5003{
5004	struct ath_softc *sc = arg;
5005
5006	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
5007	callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc);
5008}
5009
5010/*
5011 * Blink the LED according to the specified on/off times.
5012 */
5013static void
5014ath_led_blink(struct ath_softc *sc, int on, int off)
5015{
5016	DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
5017	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
5018	sc->sc_blinking = 1;
5019	sc->sc_ledoff = off;
5020	callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc);
5021}
5022
5023static void
5024ath_led_event(struct ath_softc *sc, int rix)
5025{
5026	sc->sc_ledevent = ticks;	/* time of last event */
5027	if (sc->sc_blinking)		/* don't interrupt active blink */
5028		return;
5029	ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff);
5030}
5031
5032static int
5033ath_rate_setup(struct ath_softc *sc, u_int mode)
5034{
5035	struct ath_hal *ah = sc->sc_ah;
5036	const HAL_RATE_TABLE *rt;
5037
5038	switch (mode) {
5039	case IEEE80211_MODE_11A:
5040		rt = ath_hal_getratetable(ah, HAL_MODE_11A);
5041		break;
5042	case IEEE80211_MODE_HALF:
5043		rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
5044		break;
5045	case IEEE80211_MODE_QUARTER:
5046		rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5047		break;
5048	case IEEE80211_MODE_11B:
5049		rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5050		break;
5051	case IEEE80211_MODE_11G:
5052		rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5053		break;
5054	case IEEE80211_MODE_TURBO_A:
5055		rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5056		break;
5057	case IEEE80211_MODE_TURBO_G:
5058		rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5059		break;
5060	case IEEE80211_MODE_STURBO_A:
5061		rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5062		break;
5063	case IEEE80211_MODE_11NA:
5064		rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5065		break;
5066	case IEEE80211_MODE_11NG:
5067		rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5068		break;
5069	default:
5070		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5071			__func__, mode);
5072		return 0;
5073	}
5074	sc->sc_rates[mode] = rt;
5075	return (rt != NULL);
5076}
5077
5078static void
5079ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5080{
5081#define	N(a)	(sizeof(a)/sizeof(a[0]))
5082	/* NB: on/off times from the Atheros NDIS driver, w/ permission */
5083	static const struct {
5084		u_int		rate;		/* tx/rx 802.11 rate */
5085		u_int16_t	timeOn;		/* LED on time (ms) */
5086		u_int16_t	timeOff;	/* LED off time (ms) */
5087	} blinkrates[] = {
5088		{ 108,  40,  10 },
5089		{  96,  44,  11 },
5090		{  72,  50,  13 },
5091		{  48,  57,  14 },
5092		{  36,  67,  16 },
5093		{  24,  80,  20 },
5094		{  22, 100,  25 },
5095		{  18, 133,  34 },
5096		{  12, 160,  40 },
5097		{  10, 200,  50 },
5098		{   6, 240,  58 },
5099		{   4, 267,  66 },
5100		{   2, 400, 100 },
5101		{   0, 500, 130 },
5102		/* XXX half/quarter rates */
5103	};
5104	const HAL_RATE_TABLE *rt;
5105	int i, j;
5106
5107	memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
5108	rt = sc->sc_rates[mode];
5109	KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
5110	for (i = 0; i < rt->rateCount; i++) {
5111		uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5112		if (rt->info[i].phy != IEEE80211_T_HT)
5113			sc->sc_rixmap[ieeerate] = i;
5114		else
5115			sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
5116	}
5117	memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
5118	for (i = 0; i < N(sc->sc_hwmap); i++) {
5119		if (i >= rt->rateCount) {
5120			sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
5121			sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
5122			continue;
5123		}
5124		sc->sc_hwmap[i].ieeerate =
5125			rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5126		if (rt->info[i].phy == IEEE80211_T_HT)
5127			sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
5128		sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
5129		if (rt->info[i].shortPreamble ||
5130		    rt->info[i].phy == IEEE80211_T_OFDM)
5131			sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
5132		sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
5133		for (j = 0; j < N(blinkrates)-1; j++)
5134			if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
5135				break;
5136		/* NB: this uses the last entry if the rate isn't found */
5137		/* XXX beware of overlow */
5138		sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
5139		sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
5140	}
5141	sc->sc_currates = rt;
5142	sc->sc_curmode = mode;
5143	/*
5144	 * All protection frames are transmited at 2Mb/s for
5145	 * 11g, otherwise at 1Mb/s.
5146	 */
5147	if (mode == IEEE80211_MODE_11G)
5148		sc->sc_protrix = ath_tx_findrix(sc, 2*2);
5149	else
5150		sc->sc_protrix = ath_tx_findrix(sc, 2*1);
5151	/* NB: caller is responsible for resetting rate control state */
5152#undef N
5153}
5154
5155static void
5156ath_watchdog(void *arg)
5157{
5158	struct ath_softc *sc = arg;
5159
5160	if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
5161		struct ifnet *ifp = sc->sc_ifp;
5162		uint32_t hangs;
5163
5164		if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
5165		    hangs != 0) {
5166			if_printf(ifp, "%s hang detected (0x%x)\n",
5167			    hangs & 0xff ? "bb" : "mac", hangs);
5168		} else
5169			if_printf(ifp, "device timeout\n");
5170		ath_reset(ifp);
5171		ifp->if_oerrors++;
5172		sc->sc_stats.ast_watchdog++;
5173	}
5174	callout_schedule(&sc->sc_wd_ch, hz);
5175}
5176
5177#ifdef ATH_DIAGAPI
5178/*
5179 * Diagnostic interface to the HAL.  This is used by various
5180 * tools to do things like retrieve register contents for
5181 * debugging.  The mechanism is intentionally opaque so that
5182 * it can change frequently w/o concern for compatiblity.
5183 */
5184static int
5185ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
5186{
5187	struct ath_hal *ah = sc->sc_ah;
5188	u_int id = ad->ad_id & ATH_DIAG_ID;
5189	void *indata = NULL;
5190	void *outdata = NULL;
5191	u_int32_t insize = ad->ad_in_size;
5192	u_int32_t outsize = ad->ad_out_size;
5193	int error = 0;
5194
5195	if (ad->ad_id & ATH_DIAG_IN) {
5196		/*
5197		 * Copy in data.
5198		 */
5199		indata = malloc(insize, M_TEMP, M_NOWAIT);
5200		if (indata == NULL) {
5201			error = ENOMEM;
5202			goto bad;
5203		}
5204		error = copyin(ad->ad_in_data, indata, insize);
5205		if (error)
5206			goto bad;
5207	}
5208	if (ad->ad_id & ATH_DIAG_DYN) {
5209		/*
5210		 * Allocate a buffer for the results (otherwise the HAL
5211		 * returns a pointer to a buffer where we can read the
5212		 * results).  Note that we depend on the HAL leaving this
5213		 * pointer for us to use below in reclaiming the buffer;
5214		 * may want to be more defensive.
5215		 */
5216		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
5217		if (outdata == NULL) {
5218			error = ENOMEM;
5219			goto bad;
5220		}
5221	}
5222	if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
5223		if (outsize < ad->ad_out_size)
5224			ad->ad_out_size = outsize;
5225		if (outdata != NULL)
5226			error = copyout(outdata, ad->ad_out_data,
5227					ad->ad_out_size);
5228	} else {
5229		error = EINVAL;
5230	}
5231bad:
5232	if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
5233		free(indata, M_TEMP);
5234	if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
5235		free(outdata, M_TEMP);
5236	return error;
5237}
5238#endif /* ATH_DIAGAPI */
5239
5240static int
5241ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
5242{
5243#define	IS_RUNNING(ifp) \
5244	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
5245	struct ath_softc *sc = ifp->if_softc;
5246	struct ieee80211com *ic = ifp->if_l2com;
5247	struct ifreq *ifr = (struct ifreq *)data;
5248	const HAL_RATE_TABLE *rt;
5249	int error = 0;
5250
5251	switch (cmd) {
5252	case SIOCSIFFLAGS:
5253		ATH_LOCK(sc);
5254		if (IS_RUNNING(ifp)) {
5255			/*
5256			 * To avoid rescanning another access point,
5257			 * do not call ath_init() here.  Instead,
5258			 * only reflect promisc mode settings.
5259			 */
5260			ath_mode_init(sc);
5261		} else if (ifp->if_flags & IFF_UP) {
5262			/*
5263			 * Beware of being called during attach/detach
5264			 * to reset promiscuous mode.  In that case we
5265			 * will still be marked UP but not RUNNING.
5266			 * However trying to re-init the interface
5267			 * is the wrong thing to do as we've already
5268			 * torn down much of our state.  There's
5269			 * probably a better way to deal with this.
5270			 */
5271			if (!sc->sc_invalid)
5272				ath_init(sc);	/* XXX lose error */
5273		} else {
5274			ath_stop_locked(ifp);
5275#ifdef notyet
5276			/* XXX must wakeup in places like ath_vap_delete */
5277			if (!sc->sc_invalid)
5278				ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
5279#endif
5280		}
5281		ATH_UNLOCK(sc);
5282		break;
5283	case SIOCGIFMEDIA:
5284	case SIOCSIFMEDIA:
5285		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
5286		break;
5287	case SIOCGATHSTATS:
5288		/* NB: embed these numbers to get a consistent view */
5289		sc->sc_stats.ast_tx_packets = ifp->if_opackets;
5290		sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
5291		sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
5292		sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
5293#ifdef IEEE80211_SUPPORT_TDMA
5294		sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
5295		sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
5296#endif
5297		rt = sc->sc_currates;
5298		sc->sc_stats.ast_tx_rate =
5299		    rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
5300		if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
5301			sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
5302		return copyout(&sc->sc_stats,
5303		    ifr->ifr_data, sizeof (sc->sc_stats));
5304	case SIOCZATHSTATS:
5305		error = priv_check(curthread, PRIV_DRIVER);
5306		if (error == 0)
5307			memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
5308		break;
5309#ifdef ATH_DIAGAPI
5310	case SIOCGATHDIAG:
5311		error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
5312		break;
5313#endif
5314	case SIOCGIFADDR:
5315		error = ether_ioctl(ifp, cmd, data);
5316		break;
5317	default:
5318		error = EINVAL;
5319		break;
5320	}
5321	return error;
5322#undef IS_RUNNING
5323}
5324
5325/*
5326 * Announce various information on device/driver attach.
5327 */
5328static void
5329ath_announce(struct ath_softc *sc)
5330{
5331	struct ifnet *ifp = sc->sc_ifp;
5332	struct ath_hal *ah = sc->sc_ah;
5333
5334	if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
5335		ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
5336		ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
5337	if (bootverbose) {
5338		int i;
5339		for (i = 0; i <= WME_AC_VO; i++) {
5340			struct ath_txq *txq = sc->sc_ac2q[i];
5341			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5342				txq->axq_qnum, ieee80211_wme_acnames[i]);
5343		}
5344		if_printf(ifp, "Use hw queue %u for CAB traffic\n",
5345			sc->sc_cabq->axq_qnum);
5346		if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
5347	}
5348	if (ath_rxbuf != ATH_RXBUF)
5349		if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
5350	if (ath_txbuf != ATH_TXBUF)
5351		if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
5352	if (sc->sc_mcastkey && bootverbose)
5353		if_printf(ifp, "using multicast key search\n");
5354}
5355
5356#ifdef IEEE80211_SUPPORT_TDMA
5357static __inline uint32_t
5358ath_hal_getnexttbtt(struct ath_hal *ah)
5359{
5360#define	AR_TIMER0	0x8028
5361	return OS_REG_READ(ah, AR_TIMER0);
5362}
5363
5364static __inline void
5365ath_hal_adjusttsf(struct ath_hal *ah, int32_t tsfdelta)
5366{
5367	/* XXX handle wrap/overflow */
5368	OS_REG_WRITE(ah, AR_TSF_L32, OS_REG_READ(ah, AR_TSF_L32) + tsfdelta);
5369}
5370
5371static void
5372ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
5373{
5374	struct ath_hal *ah = sc->sc_ah;
5375	HAL_BEACON_TIMERS bt;
5376
5377	bt.bt_intval = bintval | HAL_BEACON_ENA;
5378	bt.bt_nexttbtt = nexttbtt;
5379	bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
5380	bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
5381	bt.bt_nextatim = nexttbtt+1;
5382	ath_hal_beaconsettimers(ah, &bt);
5383}
5384
5385/*
5386 * Calculate the beacon interval.  This is periodic in the
5387 * superframe for the bss.  We assume each station is configured
5388 * identically wrt transmit rate so the guard time we calculate
5389 * above will be the same on all stations.  Note we need to
5390 * factor in the xmit time because the hardware will schedule
5391 * a frame for transmit if the start of the frame is within
5392 * the burst time.  When we get hardware that properly kills
5393 * frames in the PCU we can reduce/eliminate the guard time.
5394 *
5395 * Roundup to 1024 is so we have 1 TU buffer in the guard time
5396 * to deal with the granularity of the nexttbtt timer.  11n MAC's
5397 * with 1us timer granularity should allow us to reduce/eliminate
5398 * this.
5399 */
5400static void
5401ath_tdma_bintvalsetup(struct ath_softc *sc,
5402	const struct ieee80211_tdma_state *tdma)
5403{
5404	/* copy from vap state (XXX check all vaps have same value?) */
5405	sc->sc_tdmaslotlen = tdma->tdma_slotlen;
5406
5407	sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
5408		tdma->tdma_slotcnt, 1024);
5409	sc->sc_tdmabintval >>= 10;		/* TSF -> TU */
5410	if (sc->sc_tdmabintval & 1)
5411		sc->sc_tdmabintval++;
5412
5413	if (tdma->tdma_slot == 0) {
5414		/*
5415		 * Only slot 0 beacons; other slots respond.
5416		 */
5417		sc->sc_imask |= HAL_INT_SWBA;
5418		sc->sc_tdmaswba = 0;		/* beacon immediately */
5419	} else {
5420		/* XXX all vaps must be slot 0 or slot !0 */
5421		sc->sc_imask &= ~HAL_INT_SWBA;
5422	}
5423}
5424
5425/*
5426 * Max 802.11 overhead.  This assumes no 4-address frames and
5427 * the encapsulation done by ieee80211_encap (llc).  We also
5428 * include potential crypto overhead.
5429 */
5430#define	IEEE80211_MAXOVERHEAD \
5431	(sizeof(struct ieee80211_qosframe) \
5432	 + sizeof(struct llc) \
5433	 + IEEE80211_ADDR_LEN \
5434	 + IEEE80211_WEP_IVLEN \
5435	 + IEEE80211_WEP_KIDLEN \
5436	 + IEEE80211_WEP_CRCLEN \
5437	 + IEEE80211_WEP_MICLEN \
5438	 + IEEE80211_CRC_LEN)
5439
5440/*
5441 * Setup initially for tdma operation.  Start the beacon
5442 * timers and enable SWBA if we are slot 0.  Otherwise
5443 * we wait for slot 0 to arrive so we can sync up before
5444 * starting to transmit.
5445 */
5446static void
5447ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
5448{
5449	struct ath_hal *ah = sc->sc_ah;
5450	struct ifnet *ifp = sc->sc_ifp;
5451	struct ieee80211com *ic = ifp->if_l2com;
5452	const struct ieee80211_txparam *tp;
5453	const struct ieee80211_tdma_state *tdma = NULL;
5454	int rix;
5455
5456	if (vap == NULL) {
5457		vap = TAILQ_FIRST(&ic->ic_vaps);   /* XXX */
5458		if (vap == NULL) {
5459			if_printf(ifp, "%s: no vaps?\n", __func__);
5460			return;
5461		}
5462	}
5463	tp = vap->iv_bss->ni_txparms;
5464	/*
5465	 * Calculate the guard time for each slot.  This is the
5466	 * time to send a maximal-size frame according to the
5467	 * fixed/lowest transmit rate.  Note that the interface
5468	 * mtu does not include the 802.11 overhead so we must
5469	 * tack that on (ath_hal_computetxtime includes the
5470	 * preamble and plcp in it's calculation).
5471	 */
5472	tdma = vap->iv_tdma;
5473	if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
5474		rix = ath_tx_findrix(sc, tp->ucastrate);
5475	else
5476		rix = ath_tx_findrix(sc, tp->mcastrate);
5477	/* XXX short preamble assumed */
5478	sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
5479		ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
5480
5481	ath_hal_intrset(ah, 0);
5482
5483	ath_beaconq_config(sc);			/* setup h/w beacon q */
5484	if (sc->sc_setcca)
5485		ath_hal_setcca(ah, AH_FALSE);	/* disable CCA */
5486	ath_tdma_bintvalsetup(sc, tdma);	/* calculate beacon interval */
5487	ath_tdma_settimers(sc, sc->sc_tdmabintval,
5488		sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
5489	sc->sc_syncbeacon = 0;
5490
5491	sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
5492	sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
5493
5494	ath_hal_intrset(ah, sc->sc_imask);
5495
5496	DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
5497	    "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
5498	    tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
5499	    tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
5500	    sc->sc_tdmadbaprep);
5501}
5502
5503/*
5504 * Update tdma operation.  Called from the 802.11 layer
5505 * when a beacon is received from the TDMA station operating
5506 * in the slot immediately preceding us in the bss.  Use
5507 * the rx timestamp for the beacon frame to update our
5508 * beacon timers so we follow their schedule.  Note that
5509 * by using the rx timestamp we implicitly include the
5510 * propagation delay in our schedule.
5511 */
5512static void
5513ath_tdma_update(struct ieee80211_node *ni,
5514	const struct ieee80211_tdma_param *tdma, int changed)
5515{
5516#define	TSF_TO_TU(_h,_l) \
5517	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
5518#define	TU_TO_TSF(_tu)	(((u_int64_t)(_tu)) << 10)
5519	struct ieee80211vap *vap = ni->ni_vap;
5520	struct ieee80211com *ic = ni->ni_ic;
5521	struct ath_softc *sc = ic->ic_ifp->if_softc;
5522	struct ath_hal *ah = sc->sc_ah;
5523	const HAL_RATE_TABLE *rt = sc->sc_currates;
5524	u_int64_t tsf, rstamp, nextslot;
5525	u_int32_t txtime, nextslottu, timer0;
5526	int32_t tudelta, tsfdelta;
5527	const struct ath_rx_status *rs;
5528	int rix;
5529
5530	sc->sc_stats.ast_tdma_update++;
5531
5532	/*
5533	 * Check for and adopt configuration changes.
5534	 */
5535	if (changed != 0) {
5536		const struct ieee80211_tdma_state *ts = vap->iv_tdma;
5537
5538		ath_tdma_bintvalsetup(sc, ts);
5539		if (changed & TDMA_UPDATE_SLOTLEN)
5540			ath_wme_update(ic);
5541
5542		DPRINTF(sc, ATH_DEBUG_TDMA,
5543		    "%s: adopt slot %u slotcnt %u slotlen %u us "
5544		    "bintval %u TU\n", __func__,
5545		    ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
5546		    sc->sc_tdmabintval);
5547
5548		/* XXX right? */
5549		ath_hal_intrset(ah, sc->sc_imask);
5550		/* NB: beacon timers programmed below */
5551	}
5552
5553	/* extend rx timestamp to 64 bits */
5554	rs = sc->sc_lastrs;
5555	tsf = ath_hal_gettsf64(ah);
5556	rstamp = ath_extend_tsf(rs->rs_tstamp, tsf);
5557	/*
5558	 * The rx timestamp is set by the hardware on completing
5559	 * reception (at the point where the rx descriptor is DMA'd
5560	 * to the host).  To find the start of our next slot we
5561	 * must adjust this time by the time required to send
5562	 * the packet just received.
5563	 */
5564	rix = rt->rateCodeToIndex[rs->rs_rate];
5565	txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
5566	    rt->info[rix].shortPreamble);
5567	/* NB: << 9 is to cvt to TU and /2 */
5568	nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
5569	nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
5570
5571	/*
5572	 * TIMER0 is the h/w's idea of NextTBTT (in TU's).  Convert
5573	 * to usecs and calculate the difference between what the
5574	 * other station thinks and what we have programmed.  This
5575	 * lets us figure how to adjust our timers to match.  The
5576	 * adjustments are done by pulling the TSF forward and possibly
5577	 * rewriting the beacon timers.
5578	 */
5579	timer0 = ath_hal_getnexttbtt(ah);
5580	tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD+1)) - TU_TO_TSF(timer0));
5581
5582	DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
5583	    "tsfdelta %d avg +%d/-%d\n", tsfdelta,
5584	    TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
5585
5586	if (tsfdelta < 0) {
5587		TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
5588		TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
5589		tsfdelta = -tsfdelta % 1024;
5590		nextslottu++;
5591	} else if (tsfdelta > 0) {
5592		TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
5593		TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
5594		tsfdelta = 1024 - (tsfdelta % 1024);
5595		nextslottu++;
5596	} else {
5597		TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
5598		TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
5599	}
5600	tudelta = nextslottu - timer0;
5601
5602	/*
5603	 * Copy sender's timetstamp into tdma ie so they can
5604	 * calculate roundtrip time.  We submit a beacon frame
5605	 * below after any timer adjustment.  The frame goes out
5606	 * at the next TBTT so the sender can calculate the
5607	 * roundtrip by inspecting the tdma ie in our beacon frame.
5608	 *
5609	 * NB: This tstamp is subtlely preserved when
5610	 *     IEEE80211_BEACON_TDMA is marked (e.g. when the
5611	 *     slot position changes) because ieee80211_add_tdma
5612	 *     skips over the data.
5613	 */
5614	memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
5615		__offsetof(struct ieee80211_tdma_param, tdma_tstamp),
5616		&ni->ni_tstamp.data, 8);
5617#if 0
5618	DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
5619	    "tsf %llu nextslot %llu (%d, %d) nextslottu %u timer0 %u (%d)\n",
5620	    (unsigned long long) tsf, (unsigned long long) nextslot,
5621	    (int)(nextslot - tsf), tsfdelta,
5622	    nextslottu, timer0, tudelta);
5623#endif
5624	/*
5625	 * Adjust the beacon timers only when pulling them forward
5626	 * or when going back by less than the beacon interval.
5627	 * Negative jumps larger than the beacon interval seem to
5628	 * cause the timers to stop and generally cause instability.
5629	 * This basically filters out jumps due to missed beacons.
5630	 */
5631	if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
5632		ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
5633		sc->sc_stats.ast_tdma_timers++;
5634	}
5635	if (tsfdelta > 0) {
5636		ath_hal_adjusttsf(ah, tsfdelta);
5637		sc->sc_stats.ast_tdma_tsf++;
5638	}
5639	ath_tdma_beacon_send(sc, vap);		/* prepare response */
5640#undef TU_TO_TSF
5641#undef TSF_TO_TU
5642}
5643
5644/*
5645 * Transmit a beacon frame at SWBA.  Dynamic updates
5646 * to the frame contents are done as needed.
5647 */
5648static void
5649ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
5650{
5651	struct ath_hal *ah = sc->sc_ah;
5652	struct ath_buf *bf;
5653	int otherant;
5654
5655	/*
5656	 * Check if the previous beacon has gone out.  If
5657	 * not don't try to post another, skip this period
5658	 * and wait for the next.  Missed beacons indicate
5659	 * a problem and should not occur.  If we miss too
5660	 * many consecutive beacons reset the device.
5661	 */
5662	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
5663		sc->sc_bmisscount++;
5664		DPRINTF(sc, ATH_DEBUG_BEACON,
5665			"%s: missed %u consecutive beacons\n",
5666			__func__, sc->sc_bmisscount);
5667		if (sc->sc_bmisscount >= ath_bstuck_threshold)
5668			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
5669		return;
5670	}
5671	if (sc->sc_bmisscount != 0) {
5672		DPRINTF(sc, ATH_DEBUG_BEACON,
5673			"%s: resume beacon xmit after %u misses\n",
5674			__func__, sc->sc_bmisscount);
5675		sc->sc_bmisscount = 0;
5676	}
5677
5678	/*
5679	 * Check recent per-antenna transmit statistics and flip
5680	 * the default antenna if noticeably more frames went out
5681	 * on the non-default antenna.
5682	 * XXX assumes 2 anntenae
5683	 */
5684	if (!sc->sc_diversity) {
5685		otherant = sc->sc_defant & 1 ? 2 : 1;
5686		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
5687			ath_setdefantenna(sc, otherant);
5688		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
5689	}
5690
5691	bf = ath_beacon_generate(sc, vap);
5692	if (bf != NULL) {
5693		/*
5694		 * Stop any current dma and put the new frame on the queue.
5695		 * This should never fail since we check above that no frames
5696		 * are still pending on the queue.
5697		 */
5698		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
5699			DPRINTF(sc, ATH_DEBUG_ANY,
5700				"%s: beacon queue %u did not stop?\n",
5701				__func__, sc->sc_bhalq);
5702			/* NB: the HAL still stops DMA, so proceed */
5703		}
5704		ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
5705		ath_hal_txstart(ah, sc->sc_bhalq);
5706
5707		sc->sc_stats.ast_be_xmit++;		/* XXX per-vap? */
5708
5709		/*
5710		 * Record local TSF for our last send for use
5711		 * in arbitrating slot collisions.
5712		 */
5713		vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
5714	}
5715}
5716#endif /* IEEE80211_SUPPORT_TDMA */
5717
5718static void
5719ath_dfs_tasklet(void *p, int npending)
5720{
5721	struct ath_softc *sc = (struct ath_softc *) p;
5722	struct ifnet *ifp = sc->sc_ifp;
5723	struct ieee80211com *ic = ifp->if_l2com;
5724
5725	/*
5726	 * If previous processing has found a radar event,
5727	 * signal this to the net80211 layer to begin DFS
5728	 * processing.
5729	 */
5730	if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
5731		/* DFS event found, initiate channel change */
5732		ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
5733	}
5734}
5735
5736MODULE_VERSION(if_ath, 1);
5737MODULE_DEPEND(if_ath, wlan, 1, 1, 1);          /* 802.11 media layer */
5738