if_mwl.c revision 228621
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 228621 2011-12-17 10:23:17Z bschmidt $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/sysctl.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/kernel.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/errno.h>
52#include <sys/callout.h>
53#include <sys/bus.h>
54#include <sys/endian.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57
58#include <machine/bus.h>
59
60#include <net/if.h>
61#include <net/if_dl.h>
62#include <net/if_media.h>
63#include <net/if_types.h>
64#include <net/if_arp.h>
65#include <net/ethernet.h>
66#include <net/if_llc.h>
67
68#include <net/bpf.h>
69
70#include <net80211/ieee80211_var.h>
71#include <net80211/ieee80211_regdomain.h>
72
73#ifdef INET
74#include <netinet/in.h>
75#include <netinet/if_ether.h>
76#endif /* INET */
77
78#include <dev/mwl/if_mwlvar.h>
79#include <dev/mwl/mwldiag.h>
80
81/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
82#define	MS(v,x)	(((v) & x) >> x##_S)
83#define	SM(v,x)	(((v) << x##_S) & x)
84
85static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
86		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
87		    const uint8_t [IEEE80211_ADDR_LEN],
88		    const uint8_t [IEEE80211_ADDR_LEN]);
89static void	mwl_vap_delete(struct ieee80211vap *);
90static int	mwl_setupdma(struct mwl_softc *);
91static int	mwl_hal_reset(struct mwl_softc *sc);
92static int	mwl_init_locked(struct mwl_softc *);
93static void	mwl_init(void *);
94static void	mwl_stop_locked(struct ifnet *, int);
95static int	mwl_reset(struct ieee80211vap *, u_long);
96static void	mwl_stop(struct ifnet *, int);
97static void	mwl_start(struct ifnet *);
98static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
99			const struct ieee80211_bpf_params *);
100static int	mwl_media_change(struct ifnet *);
101static void	mwl_watchdog(void *);
102static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
103static void	mwl_radar_proc(void *, int);
104static void	mwl_chanswitch_proc(void *, int);
105static void	mwl_bawatchdog_proc(void *, int);
106static int	mwl_key_alloc(struct ieee80211vap *,
107			struct ieee80211_key *,
108			ieee80211_keyix *, ieee80211_keyix *);
109static int	mwl_key_delete(struct ieee80211vap *,
110			const struct ieee80211_key *);
111static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
112			const uint8_t mac[IEEE80211_ADDR_LEN]);
113static int	mwl_mode_init(struct mwl_softc *);
114static void	mwl_update_mcast(struct ifnet *);
115static void	mwl_update_promisc(struct ifnet *);
116static void	mwl_updateslot(struct ifnet *);
117static int	mwl_beacon_setup(struct ieee80211vap *);
118static void	mwl_beacon_update(struct ieee80211vap *, int);
119#ifdef MWL_HOST_PS_SUPPORT
120static void	mwl_update_ps(struct ieee80211vap *, int);
121static int	mwl_set_tim(struct ieee80211_node *, int);
122#endif
123static int	mwl_dma_setup(struct mwl_softc *);
124static void	mwl_dma_cleanup(struct mwl_softc *);
125static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
126		    const uint8_t [IEEE80211_ADDR_LEN]);
127static void	mwl_node_cleanup(struct ieee80211_node *);
128static void	mwl_node_drain(struct ieee80211_node *);
129static void	mwl_node_getsignal(const struct ieee80211_node *,
130			int8_t *, int8_t *);
131static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
132			struct ieee80211_mimo_info *);
133static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
134static void	mwl_rx_proc(void *, int);
135static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
136static int	mwl_tx_setup(struct mwl_softc *, int, int);
137static int	mwl_wme_update(struct ieee80211com *);
138static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
139static void	mwl_tx_cleanup(struct mwl_softc *);
140static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
141static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
142			     struct mwl_txbuf *, struct mbuf *);
143static void	mwl_tx_proc(void *, int);
144static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
145static void	mwl_draintxq(struct mwl_softc *);
146static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
147static int	mwl_recv_action(struct ieee80211_node *,
148			const struct ieee80211_frame *,
149			const uint8_t *, const uint8_t *);
150static int	mwl_addba_request(struct ieee80211_node *,
151			struct ieee80211_tx_ampdu *, int dialogtoken,
152			int baparamset, int batimeout);
153static int	mwl_addba_response(struct ieee80211_node *,
154			struct ieee80211_tx_ampdu *, int status,
155			int baparamset, int batimeout);
156static void	mwl_addba_stop(struct ieee80211_node *,
157			struct ieee80211_tx_ampdu *);
158static int	mwl_startrecv(struct mwl_softc *);
159static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
160			struct ieee80211_channel *);
161static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
162static void	mwl_scan_start(struct ieee80211com *);
163static void	mwl_scan_end(struct ieee80211com *);
164static void	mwl_set_channel(struct ieee80211com *);
165static int	mwl_peerstadb(struct ieee80211_node *,
166			int aid, int staid, MWL_HAL_PEERINFO *pi);
167static int	mwl_localstadb(struct ieee80211vap *);
168static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
169static int	allocstaid(struct mwl_softc *sc, int aid);
170static void	delstaid(struct mwl_softc *sc, int staid);
171static void	mwl_newassoc(struct ieee80211_node *, int);
172static void	mwl_agestations(void *);
173static int	mwl_setregdomain(struct ieee80211com *,
174			struct ieee80211_regdomain *, int,
175			struct ieee80211_channel []);
176static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
177			struct ieee80211_channel []);
178static int	mwl_getchannels(struct mwl_softc *);
179
180static void	mwl_sysctlattach(struct mwl_softc *);
181static void	mwl_announce(struct mwl_softc *);
182
183SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
184
185static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
186SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
187	    0, "rx descriptors allocated");
188static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
189SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
190	    0, "rx buffers allocated");
191TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
192static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
193SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
194	    0, "tx buffers allocated");
195TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
196static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
197SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
198	    0, "tx buffers to send at once");
199TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
200static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
201SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
202	    0, "max rx buffers to process per interrupt");
203TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
204static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
205SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
206	    0, "min free rx buffers before restarting traffic");
207TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
208
209#ifdef MWL_DEBUG
210static	int mwl_debug = 0;
211SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
212	    0, "control debugging printfs");
213TUNABLE_INT("hw.mwl.debug", &mwl_debug);
214enum {
215	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
216	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
217	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
218	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
219	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
220	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
221	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
222	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
223	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
224	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
225	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
226	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
227	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
228	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
229	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
230	MWL_DEBUG_ANY		= 0xffffffff
231};
232#define	IS_BEACON(wh) \
233    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
234	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
235#define	IFF_DUMPPKTS_RECV(sc, wh) \
236    (((sc->sc_debug & MWL_DEBUG_RECV) && \
237      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
238     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
239#define	IFF_DUMPPKTS_XMIT(sc) \
240	((sc->sc_debug & MWL_DEBUG_XMIT) || \
241	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
242#define	DPRINTF(sc, m, fmt, ...) do {				\
243	if (sc->sc_debug & (m))					\
244		printf(fmt, __VA_ARGS__);			\
245} while (0)
246#define	KEYPRINTF(sc, hk, mac) do {				\
247	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
248		mwl_keyprint(sc, __func__, hk, mac);		\
249} while (0)
250static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
251static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
252#else
253#define	IFF_DUMPPKTS_RECV(sc, wh) \
254	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
255#define	IFF_DUMPPKTS_XMIT(sc) \
256	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
257#define	DPRINTF(sc, m, fmt, ...) do {				\
258	(void) sc;						\
259} while (0)
260#define	KEYPRINTF(sc, k, mac) do {				\
261	(void) sc;						\
262} while (0)
263#endif
264
265static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
266
267/*
268 * Each packet has fixed front matter: a 2-byte length
269 * of the payload, followed by a 4-address 802.11 header
270 * (regardless of the actual header and always w/o any
271 * QoS header).  The payload then follows.
272 */
273struct mwltxrec {
274	uint16_t fwlen;
275	struct ieee80211_frame_addr4 wh;
276} __packed;
277
278/*
279 * Read/Write shorthands for accesses to BAR 0.  Note
280 * that all BAR 1 operations are done in the "hal" and
281 * there should be no reference to them here.
282 */
283static __inline uint32_t
284RD4(struct mwl_softc *sc, bus_size_t off)
285{
286	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
287}
288
289static __inline void
290WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
291{
292	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
293}
294
295int
296mwl_attach(uint16_t devid, struct mwl_softc *sc)
297{
298	struct ifnet *ifp;
299	struct ieee80211com *ic;
300	struct mwl_hal *mh;
301	int error = 0;
302
303	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
304
305	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
306	if (ifp == NULL) {
307		device_printf(sc->sc_dev, "cannot if_alloc()\n");
308		return ENOSPC;
309	}
310	ic = ifp->if_l2com;
311
312	/* set these up early for if_printf use */
313	if_initname(ifp, device_get_name(sc->sc_dev),
314		device_get_unit(sc->sc_dev));
315
316	mh = mwl_hal_attach(sc->sc_dev, devid,
317	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
318	if (mh == NULL) {
319		if_printf(ifp, "unable to attach HAL\n");
320		error = EIO;
321		goto bad;
322	}
323	sc->sc_mh = mh;
324	/*
325	 * Load firmware so we can get setup.  We arbitrarily
326	 * pick station firmware; we'll re-load firmware as
327	 * needed so setting up the wrong mode isn't a big deal.
328	 */
329	if (mwl_hal_fwload(mh, NULL) != 0) {
330		if_printf(ifp, "unable to setup builtin firmware\n");
331		error = EIO;
332		goto bad1;
333	}
334	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
335		if_printf(ifp, "unable to fetch h/w specs\n");
336		error = EIO;
337		goto bad1;
338	}
339	error = mwl_getchannels(sc);
340	if (error != 0)
341		goto bad1;
342
343	sc->sc_txantenna = 0;		/* h/w default */
344	sc->sc_rxantenna = 0;		/* h/w default */
345	sc->sc_invalid = 0;		/* ready to go, enable int handling */
346	sc->sc_ageinterval = MWL_AGEINTERVAL;
347
348	/*
349	 * Allocate tx+rx descriptors and populate the lists.
350	 * We immediately push the information to the firmware
351	 * as otherwise it gets upset.
352	 */
353	error = mwl_dma_setup(sc);
354	if (error != 0) {
355		if_printf(ifp, "failed to setup descriptors: %d\n", error);
356		goto bad1;
357	}
358	error = mwl_setupdma(sc);	/* push to firmware */
359	if (error != 0)			/* NB: mwl_setupdma prints msg */
360		goto bad1;
361
362	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
363	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
364
365	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
366		taskqueue_thread_enqueue, &sc->sc_tq);
367	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
368		"%s taskq", ifp->if_xname);
369
370	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
371	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
372	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
373	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
374
375	/* NB: insure BK queue is the lowest priority h/w queue */
376	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
377		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
378			ieee80211_wme_acnames[WME_AC_BK]);
379		error = EIO;
380		goto bad2;
381	}
382	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
383	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
384	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
385		/*
386		 * Not enough hardware tx queues to properly do WME;
387		 * just punt and assign them all to the same h/w queue.
388		 * We could do a better job of this if, for example,
389		 * we allocate queues when we switch from station to
390		 * AP mode.
391		 */
392		if (sc->sc_ac2q[WME_AC_VI] != NULL)
393			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
394		if (sc->sc_ac2q[WME_AC_BE] != NULL)
395			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
396		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
397		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
398		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
399	}
400	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
401
402	ifp->if_softc = sc;
403	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
404	ifp->if_start = mwl_start;
405	ifp->if_ioctl = mwl_ioctl;
406	ifp->if_init = mwl_init;
407	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
408	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
409	IFQ_SET_READY(&ifp->if_snd);
410
411	ic->ic_ifp = ifp;
412	/* XXX not right but it's not used anywhere important */
413	ic->ic_phytype = IEEE80211_T_OFDM;
414	ic->ic_opmode = IEEE80211_M_STA;
415	ic->ic_caps =
416		  IEEE80211_C_STA		/* station mode supported */
417		| IEEE80211_C_HOSTAP		/* hostap mode */
418		| IEEE80211_C_MONITOR		/* monitor mode */
419#if 0
420		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
421		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
422#endif
423		| IEEE80211_C_MBSS		/* mesh point link mode */
424		| IEEE80211_C_WDS		/* WDS supported */
425		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
426		| IEEE80211_C_SHSLOT		/* short slot time supported */
427		| IEEE80211_C_WME		/* WME/WMM supported */
428		| IEEE80211_C_BURST		/* xmit bursting supported */
429		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
430		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
431		| IEEE80211_C_TXFRAG		/* handle tx frags */
432		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
433		| IEEE80211_C_DFS		/* DFS supported */
434		;
435
436	ic->ic_htcaps =
437		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
438		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
439		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
440		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
441		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
442#if MWL_AGGR_SIZE == 7935
443		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
444#else
445		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
446#endif
447#if 0
448		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
449		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
450#endif
451		/* s/w capabilities */
452		| IEEE80211_HTC_HT		/* HT operation */
453		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
454		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
455		| IEEE80211_HTC_SMPS		/* SMPS available */
456		;
457
458	/*
459	 * Mark h/w crypto support.
460	 * XXX no way to query h/w support.
461	 */
462	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
463			  |  IEEE80211_CRYPTO_AES_CCM
464			  |  IEEE80211_CRYPTO_TKIP
465			  |  IEEE80211_CRYPTO_TKIPMIC
466			  ;
467	/*
468	 * Transmit requires space in the packet for a special
469	 * format transmit record and optional padding between
470	 * this record and the payload.  Ask the net80211 layer
471	 * to arrange this when encapsulating packets so we can
472	 * add it efficiently.
473	 */
474	ic->ic_headroom = sizeof(struct mwltxrec) -
475		sizeof(struct ieee80211_frame);
476
477	/* call MI attach routine. */
478	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
479	ic->ic_setregdomain = mwl_setregdomain;
480	ic->ic_getradiocaps = mwl_getradiocaps;
481	/* override default methods */
482	ic->ic_raw_xmit = mwl_raw_xmit;
483	ic->ic_newassoc = mwl_newassoc;
484	ic->ic_updateslot = mwl_updateslot;
485	ic->ic_update_mcast = mwl_update_mcast;
486	ic->ic_update_promisc = mwl_update_promisc;
487	ic->ic_wme.wme_update = mwl_wme_update;
488
489	ic->ic_node_alloc = mwl_node_alloc;
490	sc->sc_node_cleanup = ic->ic_node_cleanup;
491	ic->ic_node_cleanup = mwl_node_cleanup;
492	sc->sc_node_drain = ic->ic_node_drain;
493	ic->ic_node_drain = mwl_node_drain;
494	ic->ic_node_getsignal = mwl_node_getsignal;
495	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
496
497	ic->ic_scan_start = mwl_scan_start;
498	ic->ic_scan_end = mwl_scan_end;
499	ic->ic_set_channel = mwl_set_channel;
500
501	sc->sc_recv_action = ic->ic_recv_action;
502	ic->ic_recv_action = mwl_recv_action;
503	sc->sc_addba_request = ic->ic_addba_request;
504	ic->ic_addba_request = mwl_addba_request;
505	sc->sc_addba_response = ic->ic_addba_response;
506	ic->ic_addba_response = mwl_addba_response;
507	sc->sc_addba_stop = ic->ic_addba_stop;
508	ic->ic_addba_stop = mwl_addba_stop;
509
510	ic->ic_vap_create = mwl_vap_create;
511	ic->ic_vap_delete = mwl_vap_delete;
512
513	ieee80211_radiotap_attach(ic,
514	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
515		MWL_TX_RADIOTAP_PRESENT,
516	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
517		MWL_RX_RADIOTAP_PRESENT);
518	/*
519	 * Setup dynamic sysctl's now that country code and
520	 * regdomain are available from the hal.
521	 */
522	mwl_sysctlattach(sc);
523
524	if (bootverbose)
525		ieee80211_announce(ic);
526	mwl_announce(sc);
527	return 0;
528bad2:
529	mwl_dma_cleanup(sc);
530bad1:
531	mwl_hal_detach(mh);
532bad:
533	if_free(ifp);
534	sc->sc_invalid = 1;
535	return error;
536}
537
538int
539mwl_detach(struct mwl_softc *sc)
540{
541	struct ifnet *ifp = sc->sc_ifp;
542	struct ieee80211com *ic = ifp->if_l2com;
543
544	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
545		__func__, ifp->if_flags);
546
547	mwl_stop(ifp, 1);
548	/*
549	 * NB: the order of these is important:
550	 * o call the 802.11 layer before detaching the hal to
551	 *   insure callbacks into the driver to delete global
552	 *   key cache entries can be handled
553	 * o reclaim the tx queue data structures after calling
554	 *   the 802.11 layer as we'll get called back to reclaim
555	 *   node state and potentially want to use them
556	 * o to cleanup the tx queues the hal is called, so detach
557	 *   it last
558	 * Other than that, it's straightforward...
559	 */
560	ieee80211_ifdetach(ic);
561	callout_drain(&sc->sc_watchdog);
562	mwl_dma_cleanup(sc);
563	mwl_tx_cleanup(sc);
564	mwl_hal_detach(sc->sc_mh);
565	if_free(ifp);
566
567	return 0;
568}
569
570/*
571 * MAC address handling for multiple BSS on the same radio.
572 * The first vap uses the MAC address from the EEPROM.  For
573 * subsequent vap's we set the U/L bit (bit 1) in the MAC
574 * address and use the next six bits as an index.
575 */
576static void
577assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
578{
579	int i;
580
581	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
582		/* NB: we only do this if h/w supports multiple bssid */
583		for (i = 0; i < 32; i++)
584			if ((sc->sc_bssidmask & (1<<i)) == 0)
585				break;
586		if (i != 0)
587			mac[0] |= (i << 2)|0x2;
588	} else
589		i = 0;
590	sc->sc_bssidmask |= 1<<i;
591	if (i == 0)
592		sc->sc_nbssid0++;
593}
594
595static void
596reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
597{
598	int i = mac[0] >> 2;
599	if (i != 0 || --sc->sc_nbssid0 == 0)
600		sc->sc_bssidmask &= ~(1<<i);
601}
602
603static struct ieee80211vap *
604mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
605    enum ieee80211_opmode opmode, int flags,
606    const uint8_t bssid[IEEE80211_ADDR_LEN],
607    const uint8_t mac0[IEEE80211_ADDR_LEN])
608{
609	struct ifnet *ifp = ic->ic_ifp;
610	struct mwl_softc *sc = ifp->if_softc;
611	struct mwl_hal *mh = sc->sc_mh;
612	struct ieee80211vap *vap, *apvap;
613	struct mwl_hal_vap *hvap;
614	struct mwl_vap *mvp;
615	uint8_t mac[IEEE80211_ADDR_LEN];
616
617	IEEE80211_ADDR_COPY(mac, mac0);
618	switch (opmode) {
619	case IEEE80211_M_HOSTAP:
620	case IEEE80211_M_MBSS:
621		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
622			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
623		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
624		if (hvap == NULL) {
625			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
626				reclaim_address(sc, mac);
627			return NULL;
628		}
629		break;
630	case IEEE80211_M_STA:
631		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
632			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
633		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
634		if (hvap == NULL) {
635			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
636				reclaim_address(sc, mac);
637			return NULL;
638		}
639		/* no h/w beacon miss support; always use s/w */
640		flags |= IEEE80211_CLONE_NOBEACONS;
641		break;
642	case IEEE80211_M_WDS:
643		hvap = NULL;		/* NB: we use associated AP vap */
644		if (sc->sc_napvaps == 0)
645			return NULL;	/* no existing AP vap */
646		break;
647	case IEEE80211_M_MONITOR:
648		hvap = NULL;
649		break;
650	case IEEE80211_M_IBSS:
651	case IEEE80211_M_AHDEMO:
652	default:
653		return NULL;
654	}
655
656	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
657	    M_80211_VAP, M_NOWAIT | M_ZERO);
658	if (mvp == NULL) {
659		if (hvap != NULL) {
660			mwl_hal_delvap(hvap);
661			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
662				reclaim_address(sc, mac);
663		}
664		/* XXX msg */
665		return NULL;
666	}
667	mvp->mv_hvap = hvap;
668	if (opmode == IEEE80211_M_WDS) {
669		/*
670		 * WDS vaps must have an associated AP vap; find one.
671		 * XXX not right.
672		 */
673		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
674			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
675				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
676				break;
677			}
678		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
679	}
680	vap = &mvp->mv_vap;
681	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
682	if (hvap != NULL)
683		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
684	/* override with driver methods */
685	mvp->mv_newstate = vap->iv_newstate;
686	vap->iv_newstate = mwl_newstate;
687	vap->iv_max_keyix = 0;	/* XXX */
688	vap->iv_key_alloc = mwl_key_alloc;
689	vap->iv_key_delete = mwl_key_delete;
690	vap->iv_key_set = mwl_key_set;
691#ifdef MWL_HOST_PS_SUPPORT
692	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
693		vap->iv_update_ps = mwl_update_ps;
694		mvp->mv_set_tim = vap->iv_set_tim;
695		vap->iv_set_tim = mwl_set_tim;
696	}
697#endif
698	vap->iv_reset = mwl_reset;
699	vap->iv_update_beacon = mwl_beacon_update;
700
701	/* override max aid so sta's cannot assoc when we're out of sta id's */
702	vap->iv_max_aid = MWL_MAXSTAID;
703	/* override default A-MPDU rx parameters */
704	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
705	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
706
707	/* complete setup */
708	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
709
710	switch (vap->iv_opmode) {
711	case IEEE80211_M_HOSTAP:
712	case IEEE80211_M_MBSS:
713	case IEEE80211_M_STA:
714		/*
715		 * Setup sta db entry for local address.
716		 */
717		mwl_localstadb(vap);
718		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
719		    vap->iv_opmode == IEEE80211_M_MBSS)
720			sc->sc_napvaps++;
721		else
722			sc->sc_nstavaps++;
723		break;
724	case IEEE80211_M_WDS:
725		sc->sc_nwdsvaps++;
726		break;
727	default:
728		break;
729	}
730	/*
731	 * Setup overall operating mode.
732	 */
733	if (sc->sc_napvaps)
734		ic->ic_opmode = IEEE80211_M_HOSTAP;
735	else if (sc->sc_nstavaps)
736		ic->ic_opmode = IEEE80211_M_STA;
737	else
738		ic->ic_opmode = opmode;
739
740	return vap;
741}
742
743static void
744mwl_vap_delete(struct ieee80211vap *vap)
745{
746	struct mwl_vap *mvp = MWL_VAP(vap);
747	struct ifnet *parent = vap->iv_ic->ic_ifp;
748	struct mwl_softc *sc = parent->if_softc;
749	struct mwl_hal *mh = sc->sc_mh;
750	struct mwl_hal_vap *hvap = mvp->mv_hvap;
751	enum ieee80211_opmode opmode = vap->iv_opmode;
752
753	/* XXX disallow ap vap delete if WDS still present */
754	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
755		/* quiesce h/w while we remove the vap */
756		mwl_hal_intrset(mh, 0);		/* disable interrupts */
757	}
758	ieee80211_vap_detach(vap);
759	switch (opmode) {
760	case IEEE80211_M_HOSTAP:
761	case IEEE80211_M_MBSS:
762	case IEEE80211_M_STA:
763		KASSERT(hvap != NULL, ("no hal vap handle"));
764		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
765		mwl_hal_delvap(hvap);
766		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
767			sc->sc_napvaps--;
768		else
769			sc->sc_nstavaps--;
770		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
771		reclaim_address(sc, vap->iv_myaddr);
772		break;
773	case IEEE80211_M_WDS:
774		sc->sc_nwdsvaps--;
775		break;
776	default:
777		break;
778	}
779	mwl_cleartxq(sc, vap);
780	free(mvp, M_80211_VAP);
781	if (parent->if_drv_flags & IFF_DRV_RUNNING)
782		mwl_hal_intrset(mh, sc->sc_imask);
783}
784
785void
786mwl_suspend(struct mwl_softc *sc)
787{
788	struct ifnet *ifp = sc->sc_ifp;
789
790	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
791		__func__, ifp->if_flags);
792
793	mwl_stop(ifp, 1);
794}
795
796void
797mwl_resume(struct mwl_softc *sc)
798{
799	struct ifnet *ifp = sc->sc_ifp;
800
801	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
802		__func__, ifp->if_flags);
803
804	if (ifp->if_flags & IFF_UP)
805		mwl_init(sc);
806}
807
808void
809mwl_shutdown(void *arg)
810{
811	struct mwl_softc *sc = arg;
812
813	mwl_stop(sc->sc_ifp, 1);
814}
815
816/*
817 * Interrupt handler.  Most of the actual processing is deferred.
818 */
819void
820mwl_intr(void *arg)
821{
822	struct mwl_softc *sc = arg;
823	struct mwl_hal *mh = sc->sc_mh;
824	uint32_t status;
825
826	if (sc->sc_invalid) {
827		/*
828		 * The hardware is not ready/present, don't touch anything.
829		 * Note this can happen early on if the IRQ is shared.
830		 */
831		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
832		return;
833	}
834	/*
835	 * Figure out the reason(s) for the interrupt.
836	 */
837	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
838	if (status == 0)			/* must be a shared irq */
839		return;
840
841	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
842	    __func__, status, sc->sc_imask);
843	if (status & MACREG_A2HRIC_BIT_RX_RDY)
844		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
845	if (status & MACREG_A2HRIC_BIT_TX_DONE)
846		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
847	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
848		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
849	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
850		mwl_hal_cmddone(mh);
851	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
852		;
853	}
854	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
855		/* TKIP ICV error */
856		sc->sc_stats.mst_rx_badtkipicv++;
857	}
858	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
859		/* 11n aggregation queue is empty, re-fill */
860		;
861	}
862	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
863		;
864	}
865	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
866		/* radar detected, process event */
867		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
868	}
869	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
870		/* DFS channel switch */
871		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
872	}
873}
874
875static void
876mwl_radar_proc(void *arg, int pending)
877{
878	struct mwl_softc *sc = arg;
879	struct ifnet *ifp = sc->sc_ifp;
880	struct ieee80211com *ic = ifp->if_l2com;
881
882	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
883	    __func__, pending);
884
885	sc->sc_stats.mst_radardetect++;
886	/* XXX stop h/w BA streams? */
887
888	IEEE80211_LOCK(ic);
889	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
890	IEEE80211_UNLOCK(ic);
891}
892
893static void
894mwl_chanswitch_proc(void *arg, int pending)
895{
896	struct mwl_softc *sc = arg;
897	struct ifnet *ifp = sc->sc_ifp;
898	struct ieee80211com *ic = ifp->if_l2com;
899
900	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
901	    __func__, pending);
902
903	IEEE80211_LOCK(ic);
904	sc->sc_csapending = 0;
905	ieee80211_csa_completeswitch(ic);
906	IEEE80211_UNLOCK(ic);
907}
908
909static void
910mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
911{
912	struct ieee80211_node *ni = sp->data[0];
913
914	/* send DELBA and drop the stream */
915	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
916}
917
918static void
919mwl_bawatchdog_proc(void *arg, int pending)
920{
921	struct mwl_softc *sc = arg;
922	struct mwl_hal *mh = sc->sc_mh;
923	const MWL_HAL_BASTREAM *sp;
924	uint8_t bitmap, n;
925
926	sc->sc_stats.mst_bawatchdog++;
927
928	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
929		DPRINTF(sc, MWL_DEBUG_AMPDU,
930		    "%s: could not get bitmap\n", __func__);
931		sc->sc_stats.mst_bawatchdog_failed++;
932		return;
933	}
934	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
935	if (bitmap == 0xff) {
936		n = 0;
937		/* disable all ba streams */
938		for (bitmap = 0; bitmap < 8; bitmap++) {
939			sp = mwl_hal_bastream_lookup(mh, bitmap);
940			if (sp != NULL) {
941				mwl_bawatchdog(sp);
942				n++;
943			}
944		}
945		if (n == 0) {
946			DPRINTF(sc, MWL_DEBUG_AMPDU,
947			    "%s: no BA streams found\n", __func__);
948			sc->sc_stats.mst_bawatchdog_empty++;
949		}
950	} else if (bitmap != 0xaa) {
951		/* disable a single ba stream */
952		sp = mwl_hal_bastream_lookup(mh, bitmap);
953		if (sp != NULL) {
954			mwl_bawatchdog(sp);
955		} else {
956			DPRINTF(sc, MWL_DEBUG_AMPDU,
957			    "%s: no BA stream %d\n", __func__, bitmap);
958			sc->sc_stats.mst_bawatchdog_notfound++;
959		}
960	}
961}
962
963/*
964 * Convert net80211 channel to a HAL channel.
965 */
966static void
967mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
968{
969	hc->channel = chan->ic_ieee;
970
971	*(uint32_t *)&hc->channelFlags = 0;
972	if (IEEE80211_IS_CHAN_2GHZ(chan))
973		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
974	else if (IEEE80211_IS_CHAN_5GHZ(chan))
975		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
976	if (IEEE80211_IS_CHAN_HT40(chan)) {
977		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
978		if (IEEE80211_IS_CHAN_HT40U(chan))
979			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
980		else
981			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
982	} else
983		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
984	/* XXX 10MHz channels */
985}
986
987/*
988 * Inform firmware of our tx/rx dma setup.  The BAR 0
989 * writes below are for compatibility with older firmware.
990 * For current firmware we send this information with a
991 * cmd block via mwl_hal_sethwdma.
992 */
993static int
994mwl_setupdma(struct mwl_softc *sc)
995{
996	int error, i;
997
998	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
999	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1000	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1001
1002	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1003		struct mwl_txq *txq = &sc->sc_txq[i];
1004		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1005		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1006	}
1007	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1008	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1009
1010	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1011	if (error != 0) {
1012		device_printf(sc->sc_dev,
1013		    "unable to setup tx/rx dma; hal status %u\n", error);
1014		/* XXX */
1015	}
1016	return error;
1017}
1018
1019/*
1020 * Inform firmware of tx rate parameters.
1021 * Called after a channel change.
1022 */
1023static int
1024mwl_setcurchanrates(struct mwl_softc *sc)
1025{
1026	struct ifnet *ifp = sc->sc_ifp;
1027	struct ieee80211com *ic = ifp->if_l2com;
1028	const struct ieee80211_rateset *rs;
1029	MWL_HAL_TXRATE rates;
1030
1031	memset(&rates, 0, sizeof(rates));
1032	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1033	/* rate used to send management frames */
1034	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1035	/* rate used to send multicast frames */
1036	rates.McastRate = rates.MgtRate;
1037
1038	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1039}
1040
1041/*
1042 * Inform firmware of tx rate parameters.  Called whenever
1043 * user-settable params change and after a channel change.
1044 */
1045static int
1046mwl_setrates(struct ieee80211vap *vap)
1047{
1048	struct mwl_vap *mvp = MWL_VAP(vap);
1049	struct ieee80211_node *ni = vap->iv_bss;
1050	const struct ieee80211_txparam *tp = ni->ni_txparms;
1051	MWL_HAL_TXRATE rates;
1052
1053	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1054
1055	/*
1056	 * Update the h/w rate map.
1057	 * NB: 0x80 for MCS is passed through unchanged
1058	 */
1059	memset(&rates, 0, sizeof(rates));
1060	/* rate used to send management frames */
1061	rates.MgtRate = tp->mgmtrate;
1062	/* rate used to send multicast frames */
1063	rates.McastRate = tp->mcastrate;
1064
1065	/* while here calculate EAPOL fixed rate cookie */
1066	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1067
1068	return mwl_hal_settxrate(mvp->mv_hvap,
1069	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1070		RATE_FIXED : RATE_AUTO, &rates);
1071}
1072
1073/*
1074 * Setup a fixed xmit rate cookie for EAPOL frames.
1075 */
1076static void
1077mwl_seteapolformat(struct ieee80211vap *vap)
1078{
1079	struct mwl_vap *mvp = MWL_VAP(vap);
1080	struct ieee80211_node *ni = vap->iv_bss;
1081	enum ieee80211_phymode mode;
1082	uint8_t rate;
1083
1084	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1085
1086	mode = ieee80211_chan2mode(ni->ni_chan);
1087	/*
1088	 * Use legacy rates when operating a mixed HT+non-HT bss.
1089	 * NB: this may violate POLA for sta and wds vap's.
1090	 */
1091	if (mode == IEEE80211_MODE_11NA &&
1092	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1093		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1094	else if (mode == IEEE80211_MODE_11NG &&
1095	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1096		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1097	else
1098		rate = vap->iv_txparms[mode].mgmtrate;
1099
1100	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1101}
1102
1103/*
1104 * Map SKU+country code to region code for radar bin'ing.
1105 */
1106static int
1107mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1108{
1109	switch (rd->regdomain) {
1110	case SKU_FCC:
1111	case SKU_FCC3:
1112		return DOMAIN_CODE_FCC;
1113	case SKU_CA:
1114		return DOMAIN_CODE_IC;
1115	case SKU_ETSI:
1116	case SKU_ETSI2:
1117	case SKU_ETSI3:
1118		if (rd->country == CTRY_SPAIN)
1119			return DOMAIN_CODE_SPAIN;
1120		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1121			return DOMAIN_CODE_FRANCE;
1122		/* XXX force 1.3.1 radar type */
1123		return DOMAIN_CODE_ETSI_131;
1124	case SKU_JAPAN:
1125		return DOMAIN_CODE_MKK;
1126	case SKU_ROW:
1127		return DOMAIN_CODE_DGT;	/* Taiwan */
1128	case SKU_APAC:
1129	case SKU_APAC2:
1130	case SKU_APAC3:
1131		return DOMAIN_CODE_AUS;	/* Australia */
1132	}
1133	/* XXX KOREA? */
1134	return DOMAIN_CODE_FCC;			/* XXX? */
1135}
1136
1137static int
1138mwl_hal_reset(struct mwl_softc *sc)
1139{
1140	struct ifnet *ifp = sc->sc_ifp;
1141	struct ieee80211com *ic = ifp->if_l2com;
1142	struct mwl_hal *mh = sc->sc_mh;
1143
1144	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1145	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1146	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1147	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1148	mwl_chan_set(sc, ic->ic_curchan);
1149	/* NB: RF/RA performance tuned for indoor mode */
1150	mwl_hal_setrateadaptmode(mh, 0);
1151	mwl_hal_setoptimizationlevel(mh,
1152	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1153
1154	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1155
1156	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1157	mwl_hal_setcfend(mh, 0);			/* XXX */
1158
1159	return 1;
1160}
1161
1162static int
1163mwl_init_locked(struct mwl_softc *sc)
1164{
1165	struct ifnet *ifp = sc->sc_ifp;
1166	struct mwl_hal *mh = sc->sc_mh;
1167	int error = 0;
1168
1169	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1170		__func__, ifp->if_flags);
1171
1172	MWL_LOCK_ASSERT(sc);
1173
1174	/*
1175	 * Stop anything previously setup.  This is safe
1176	 * whether this is the first time through or not.
1177	 */
1178	mwl_stop_locked(ifp, 0);
1179
1180	/*
1181	 * Push vap-independent state to the firmware.
1182	 */
1183	if (!mwl_hal_reset(sc)) {
1184		if_printf(ifp, "unable to reset hardware\n");
1185		return EIO;
1186	}
1187
1188	/*
1189	 * Setup recv (once); transmit is already good to go.
1190	 */
1191	error = mwl_startrecv(sc);
1192	if (error != 0) {
1193		if_printf(ifp, "unable to start recv logic\n");
1194		return error;
1195	}
1196
1197	/*
1198	 * Enable interrupts.
1199	 */
1200	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1201		     | MACREG_A2HRIC_BIT_TX_DONE
1202		     | MACREG_A2HRIC_BIT_OPC_DONE
1203#if 0
1204		     | MACREG_A2HRIC_BIT_MAC_EVENT
1205#endif
1206		     | MACREG_A2HRIC_BIT_ICV_ERROR
1207		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1208		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1209#if 0
1210		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1211#endif
1212		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1213		     | MACREQ_A2HRIC_BIT_TX_ACK
1214		     ;
1215
1216	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1217	mwl_hal_intrset(mh, sc->sc_imask);
1218	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1219
1220	return 0;
1221}
1222
1223static void
1224mwl_init(void *arg)
1225{
1226	struct mwl_softc *sc = arg;
1227	struct ifnet *ifp = sc->sc_ifp;
1228	struct ieee80211com *ic = ifp->if_l2com;
1229	int error = 0;
1230
1231	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1232		__func__, ifp->if_flags);
1233
1234	MWL_LOCK(sc);
1235	error = mwl_init_locked(sc);
1236	MWL_UNLOCK(sc);
1237
1238	if (error == 0)
1239		ieee80211_start_all(ic);	/* start all vap's */
1240}
1241
1242static void
1243mwl_stop_locked(struct ifnet *ifp, int disable)
1244{
1245	struct mwl_softc *sc = ifp->if_softc;
1246
1247	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1248		__func__, sc->sc_invalid, ifp->if_flags);
1249
1250	MWL_LOCK_ASSERT(sc);
1251	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1252		/*
1253		 * Shutdown the hardware and driver.
1254		 */
1255		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1256		callout_stop(&sc->sc_watchdog);
1257		sc->sc_tx_timer = 0;
1258		mwl_draintxq(sc);
1259	}
1260}
1261
1262static void
1263mwl_stop(struct ifnet *ifp, int disable)
1264{
1265	struct mwl_softc *sc = ifp->if_softc;
1266
1267	MWL_LOCK(sc);
1268	mwl_stop_locked(ifp, disable);
1269	MWL_UNLOCK(sc);
1270}
1271
1272static int
1273mwl_reset_vap(struct ieee80211vap *vap, int state)
1274{
1275	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1276	struct ieee80211com *ic = vap->iv_ic;
1277
1278	if (state == IEEE80211_S_RUN)
1279		mwl_setrates(vap);
1280	/* XXX off by 1? */
1281	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1282	/* XXX auto? 20/40 split? */
1283	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1284	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1285	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1286	    HTPROTECT_NONE : HTPROTECT_AUTO);
1287	/* XXX txpower cap */
1288
1289	/* re-setup beacons */
1290	if (state == IEEE80211_S_RUN &&
1291	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1292	     vap->iv_opmode == IEEE80211_M_MBSS ||
1293	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1294		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1295		mwl_hal_setnprotmode(hvap,
1296		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1297		return mwl_beacon_setup(vap);
1298	}
1299	return 0;
1300}
1301
1302/*
1303 * Reset the hardware w/o losing operational state.
1304 * Used to to reset or reload hardware state for a vap.
1305 */
1306static int
1307mwl_reset(struct ieee80211vap *vap, u_long cmd)
1308{
1309	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1310	int error = 0;
1311
1312	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1313		struct ieee80211com *ic = vap->iv_ic;
1314		struct ifnet *ifp = ic->ic_ifp;
1315		struct mwl_softc *sc = ifp->if_softc;
1316		struct mwl_hal *mh = sc->sc_mh;
1317
1318		/* XXX handle DWDS sta vap change */
1319		/* XXX do we need to disable interrupts? */
1320		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1321		error = mwl_reset_vap(vap, vap->iv_state);
1322		mwl_hal_intrset(mh, sc->sc_imask);
1323	}
1324	return error;
1325}
1326
1327/*
1328 * Allocate a tx buffer for sending a frame.  The
1329 * packet is assumed to have the WME AC stored so
1330 * we can use it to select the appropriate h/w queue.
1331 */
1332static struct mwl_txbuf *
1333mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1334{
1335	struct mwl_txbuf *bf;
1336
1337	/*
1338	 * Grab a TX buffer and associated resources.
1339	 */
1340	MWL_TXQ_LOCK(txq);
1341	bf = STAILQ_FIRST(&txq->free);
1342	if (bf != NULL) {
1343		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1344		txq->nfree--;
1345	}
1346	MWL_TXQ_UNLOCK(txq);
1347	if (bf == NULL)
1348		DPRINTF(sc, MWL_DEBUG_XMIT,
1349		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1350	return bf;
1351}
1352
1353/*
1354 * Return a tx buffer to the queue it came from.  Note there
1355 * are two cases because we must preserve the order of buffers
1356 * as it reflects the fixed order of descriptors in memory
1357 * (the firmware pre-fetches descriptors so we cannot reorder).
1358 */
1359static void
1360mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1361{
1362	bf->bf_m = NULL;
1363	bf->bf_node = NULL;
1364	MWL_TXQ_LOCK(txq);
1365	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1366	txq->nfree++;
1367	MWL_TXQ_UNLOCK(txq);
1368}
1369
1370static void
1371mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1372{
1373	bf->bf_m = NULL;
1374	bf->bf_node = NULL;
1375	MWL_TXQ_LOCK(txq);
1376	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1377	txq->nfree++;
1378	MWL_TXQ_UNLOCK(txq);
1379}
1380
1381static void
1382mwl_start(struct ifnet *ifp)
1383{
1384	struct mwl_softc *sc = ifp->if_softc;
1385	struct ieee80211_node *ni;
1386	struct mwl_txbuf *bf;
1387	struct mbuf *m;
1388	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1389	int nqueued;
1390
1391	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1392		return;
1393	nqueued = 0;
1394	for (;;) {
1395		bf = NULL;
1396		IFQ_DEQUEUE(&ifp->if_snd, m);
1397		if (m == NULL)
1398			break;
1399		/*
1400		 * Grab the node for the destination.
1401		 */
1402		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1403		KASSERT(ni != NULL, ("no node"));
1404		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1405		/*
1406		 * Grab a TX buffer and associated resources.
1407		 * We honor the classification by the 802.11 layer.
1408		 */
1409		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1410		bf = mwl_gettxbuf(sc, txq);
1411		if (bf == NULL) {
1412			m_freem(m);
1413			ieee80211_free_node(ni);
1414#ifdef MWL_TX_NODROP
1415			sc->sc_stats.mst_tx_qstop++;
1416			/* XXX blocks other traffic */
1417			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1418			break;
1419#else
1420			DPRINTF(sc, MWL_DEBUG_XMIT,
1421			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1422			sc->sc_stats.mst_tx_qdrop++;
1423			continue;
1424#endif /* MWL_TX_NODROP */
1425		}
1426
1427		/*
1428		 * Pass the frame to the h/w for transmission.
1429		 */
1430		if (mwl_tx_start(sc, ni, bf, m)) {
1431			ifp->if_oerrors++;
1432			mwl_puttxbuf_head(txq, bf);
1433			ieee80211_free_node(ni);
1434			continue;
1435		}
1436		nqueued++;
1437		if (nqueued >= mwl_txcoalesce) {
1438			/*
1439			 * Poke the firmware to process queued frames;
1440			 * see below about (lack of) locking.
1441			 */
1442			nqueued = 0;
1443			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1444		}
1445	}
1446	if (nqueued) {
1447		/*
1448		 * NB: We don't need to lock against tx done because
1449		 * this just prods the firmware to check the transmit
1450		 * descriptors.  The firmware will also start fetching
1451		 * descriptors by itself if it notices new ones are
1452		 * present when it goes to deliver a tx done interrupt
1453		 * to the host. So if we race with tx done processing
1454		 * it's ok.  Delivering the kick here rather than in
1455		 * mwl_tx_start is an optimization to avoid poking the
1456		 * firmware for each packet.
1457		 *
1458		 * NB: the queue id isn't used so 0 is ok.
1459		 */
1460		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1461	}
1462}
1463
1464static int
1465mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1466	const struct ieee80211_bpf_params *params)
1467{
1468	struct ieee80211com *ic = ni->ni_ic;
1469	struct ifnet *ifp = ic->ic_ifp;
1470	struct mwl_softc *sc = ifp->if_softc;
1471	struct mwl_txbuf *bf;
1472	struct mwl_txq *txq;
1473
1474	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1475		ieee80211_free_node(ni);
1476		m_freem(m);
1477		return ENETDOWN;
1478	}
1479	/*
1480	 * Grab a TX buffer and associated resources.
1481	 * Note that we depend on the classification
1482	 * by the 802.11 layer to get to the right h/w
1483	 * queue.  Management frames must ALWAYS go on
1484	 * queue 1 but we cannot just force that here
1485	 * because we may receive non-mgt frames.
1486	 */
1487	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1488	bf = mwl_gettxbuf(sc, txq);
1489	if (bf == NULL) {
1490		sc->sc_stats.mst_tx_qstop++;
1491		/* XXX blocks other traffic */
1492		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1493		ieee80211_free_node(ni);
1494		m_freem(m);
1495		return ENOBUFS;
1496	}
1497	/*
1498	 * Pass the frame to the h/w for transmission.
1499	 */
1500	if (mwl_tx_start(sc, ni, bf, m)) {
1501		ifp->if_oerrors++;
1502		mwl_puttxbuf_head(txq, bf);
1503
1504		ieee80211_free_node(ni);
1505		return EIO;		/* XXX */
1506	}
1507	/*
1508	 * NB: We don't need to lock against tx done because
1509	 * this just prods the firmware to check the transmit
1510	 * descriptors.  The firmware will also start fetching
1511	 * descriptors by itself if it notices new ones are
1512	 * present when it goes to deliver a tx done interrupt
1513	 * to the host. So if we race with tx done processing
1514	 * it's ok.  Delivering the kick here rather than in
1515	 * mwl_tx_start is an optimization to avoid poking the
1516	 * firmware for each packet.
1517	 *
1518	 * NB: the queue id isn't used so 0 is ok.
1519	 */
1520	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1521	return 0;
1522}
1523
1524static int
1525mwl_media_change(struct ifnet *ifp)
1526{
1527	struct ieee80211vap *vap = ifp->if_softc;
1528	int error;
1529
1530	error = ieee80211_media_change(ifp);
1531	/* NB: only the fixed rate can change and that doesn't need a reset */
1532	if (error == ENETRESET) {
1533		mwl_setrates(vap);
1534		error = 0;
1535	}
1536	return error;
1537}
1538
1539#ifdef MWL_DEBUG
1540static void
1541mwl_keyprint(struct mwl_softc *sc, const char *tag,
1542	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1543{
1544	static const char *ciphers[] = {
1545		"WEP",
1546		"TKIP",
1547		"AES-CCM",
1548	};
1549	int i, n;
1550
1551	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1552	for (i = 0, n = hk->keyLen; i < n; i++)
1553		printf(" %02x", hk->key.aes[i]);
1554	printf(" mac %s", ether_sprintf(mac));
1555	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1556		printf(" %s", "rxmic");
1557		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1558			printf(" %02x", hk->key.tkip.rxMic[i]);
1559		printf(" txmic");
1560		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1561			printf(" %02x", hk->key.tkip.txMic[i]);
1562	}
1563	printf(" flags 0x%x\n", hk->keyFlags);
1564}
1565#endif
1566
1567/*
1568 * Allocate a key cache slot for a unicast key.  The
1569 * firmware handles key allocation and every station is
1570 * guaranteed key space so we are always successful.
1571 */
1572static int
1573mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1574	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1575{
1576	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1577
1578	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1579	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1580		if (!(&vap->iv_nw_keys[0] <= k &&
1581		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1582			/* should not happen */
1583			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1584				"%s: bogus group key\n", __func__);
1585			return 0;
1586		}
1587		/* give the caller what they requested */
1588		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1589	} else {
1590		/*
1591		 * Firmware handles key allocation.
1592		 */
1593		*keyix = *rxkeyix = 0;
1594	}
1595	return 1;
1596}
1597
1598/*
1599 * Delete a key entry allocated by mwl_key_alloc.
1600 */
1601static int
1602mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1603{
1604	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1605	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1606	MWL_HAL_KEYVAL hk;
1607	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1608	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1609
1610	if (hvap == NULL) {
1611		if (vap->iv_opmode != IEEE80211_M_WDS) {
1612			/* XXX monitor mode? */
1613			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1614			    "%s: no hvap for opmode %d\n", __func__,
1615			    vap->iv_opmode);
1616			return 0;
1617		}
1618		hvap = MWL_VAP(vap)->mv_ap_hvap;
1619	}
1620
1621	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1622	    __func__, k->wk_keyix);
1623
1624	memset(&hk, 0, sizeof(hk));
1625	hk.keyIndex = k->wk_keyix;
1626	switch (k->wk_cipher->ic_cipher) {
1627	case IEEE80211_CIPHER_WEP:
1628		hk.keyTypeId = KEY_TYPE_ID_WEP;
1629		break;
1630	case IEEE80211_CIPHER_TKIP:
1631		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1632		break;
1633	case IEEE80211_CIPHER_AES_CCM:
1634		hk.keyTypeId = KEY_TYPE_ID_AES;
1635		break;
1636	default:
1637		/* XXX should not happen */
1638		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1639		    __func__, k->wk_cipher->ic_cipher);
1640		return 0;
1641	}
1642	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1643}
1644
1645static __inline int
1646addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1647{
1648	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1649		if (k->wk_flags & IEEE80211_KEY_XMIT)
1650			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1651		if (k->wk_flags & IEEE80211_KEY_RECV)
1652			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1653		return 1;
1654	} else
1655		return 0;
1656}
1657
1658/*
1659 * Set the key cache contents for the specified key.  Key cache
1660 * slot(s) must already have been allocated by mwl_key_alloc.
1661 */
1662static int
1663mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1664	const uint8_t mac[IEEE80211_ADDR_LEN])
1665{
1666#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1667/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1668#define	IEEE80211_IS_STATICKEY(k) \
1669	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1670	 (GRPXMIT|IEEE80211_KEY_RECV))
1671	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1672	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1673	const struct ieee80211_cipher *cip = k->wk_cipher;
1674	const uint8_t *macaddr;
1675	MWL_HAL_KEYVAL hk;
1676
1677	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1678		("s/w crypto set?"));
1679
1680	if (hvap == NULL) {
1681		if (vap->iv_opmode != IEEE80211_M_WDS) {
1682			/* XXX monitor mode? */
1683			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1684			    "%s: no hvap for opmode %d\n", __func__,
1685			    vap->iv_opmode);
1686			return 0;
1687		}
1688		hvap = MWL_VAP(vap)->mv_ap_hvap;
1689	}
1690	memset(&hk, 0, sizeof(hk));
1691	hk.keyIndex = k->wk_keyix;
1692	switch (cip->ic_cipher) {
1693	case IEEE80211_CIPHER_WEP:
1694		hk.keyTypeId = KEY_TYPE_ID_WEP;
1695		hk.keyLen = k->wk_keylen;
1696		if (k->wk_keyix == vap->iv_def_txkey)
1697			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1698		if (!IEEE80211_IS_STATICKEY(k)) {
1699			/* NB: WEP is never used for the PTK */
1700			(void) addgroupflags(&hk, k);
1701		}
1702		break;
1703	case IEEE80211_CIPHER_TKIP:
1704		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1705		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1706		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1707		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1708		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1709		if (!addgroupflags(&hk, k))
1710			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1711		break;
1712	case IEEE80211_CIPHER_AES_CCM:
1713		hk.keyTypeId = KEY_TYPE_ID_AES;
1714		hk.keyLen = k->wk_keylen;
1715		if (!addgroupflags(&hk, k))
1716			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1717		break;
1718	default:
1719		/* XXX should not happen */
1720		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1721		    __func__, k->wk_cipher->ic_cipher);
1722		return 0;
1723	}
1724	/*
1725	 * NB: tkip mic keys get copied here too; the layout
1726	 *     just happens to match that in ieee80211_key.
1727	 */
1728	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1729
1730	/*
1731	 * Locate address of sta db entry for writing key;
1732	 * the convention unfortunately is somewhat different
1733	 * than how net80211, hostapd, and wpa_supplicant think.
1734	 */
1735	if (vap->iv_opmode == IEEE80211_M_STA) {
1736		/*
1737		 * NB: keys plumbed before the sta reaches AUTH state
1738		 * will be discarded or written to the wrong sta db
1739		 * entry because iv_bss is meaningless.  This is ok
1740		 * (right now) because we handle deferred plumbing of
1741		 * WEP keys when the sta reaches AUTH state.
1742		 */
1743		macaddr = vap->iv_bss->ni_bssid;
1744		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1745			/* XXX plumb to local sta db too for static key wep */
1746			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1747		}
1748	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1749	    vap->iv_state != IEEE80211_S_RUN) {
1750		/*
1751		 * Prior to RUN state a WDS vap will not it's BSS node
1752		 * setup so we will plumb the key to the wrong mac
1753		 * address (it'll be our local address).  Workaround
1754		 * this for the moment by grabbing the correct address.
1755		 */
1756		macaddr = vap->iv_des_bssid;
1757	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1758		macaddr = vap->iv_myaddr;
1759	else
1760		macaddr = mac;
1761	KEYPRINTF(sc, &hk, macaddr);
1762	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1763#undef IEEE80211_IS_STATICKEY
1764#undef GRPXMIT
1765}
1766
1767/* unaligned little endian access */
1768#define LE_READ_2(p)				\
1769	((uint16_t)				\
1770	 ((((const uint8_t *)(p))[0]      ) |	\
1771	  (((const uint8_t *)(p))[1] <<  8)))
1772#define LE_READ_4(p)				\
1773	((uint32_t)				\
1774	 ((((const uint8_t *)(p))[0]      ) |	\
1775	  (((const uint8_t *)(p))[1] <<  8) |	\
1776	  (((const uint8_t *)(p))[2] << 16) |	\
1777	  (((const uint8_t *)(p))[3] << 24)))
1778
1779/*
1780 * Set the multicast filter contents into the hardware.
1781 * XXX f/w has no support; just defer to the os.
1782 */
1783static void
1784mwl_setmcastfilter(struct mwl_softc *sc)
1785{
1786	struct ifnet *ifp = sc->sc_ifp;
1787#if 0
1788	struct ether_multi *enm;
1789	struct ether_multistep estep;
1790	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1791	uint8_t *mp;
1792	int nmc;
1793
1794	mp = macs;
1795	nmc = 0;
1796	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1797	while (enm != NULL) {
1798		/* XXX Punt on ranges. */
1799		if (nmc == MWL_HAL_MCAST_MAX ||
1800		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1801			ifp->if_flags |= IFF_ALLMULTI;
1802			return;
1803		}
1804		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1805		mp += IEEE80211_ADDR_LEN, nmc++;
1806		ETHER_NEXT_MULTI(estep, enm);
1807	}
1808	ifp->if_flags &= ~IFF_ALLMULTI;
1809	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1810#else
1811	/* XXX no mcast filter support; we get everything */
1812	ifp->if_flags |= IFF_ALLMULTI;
1813#endif
1814}
1815
1816static int
1817mwl_mode_init(struct mwl_softc *sc)
1818{
1819	struct ifnet *ifp = sc->sc_ifp;
1820	struct ieee80211com *ic = ifp->if_l2com;
1821	struct mwl_hal *mh = sc->sc_mh;
1822
1823	/*
1824	 * NB: Ignore promisc in hostap mode; it's set by the
1825	 * bridge.  This is wrong but we have no way to
1826	 * identify internal requests (from the bridge)
1827	 * versus external requests such as for tcpdump.
1828	 */
1829	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1830	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1831	mwl_setmcastfilter(sc);
1832
1833	return 0;
1834}
1835
1836/*
1837 * Callback from the 802.11 layer after a multicast state change.
1838 */
1839static void
1840mwl_update_mcast(struct ifnet *ifp)
1841{
1842	struct mwl_softc *sc = ifp->if_softc;
1843
1844	mwl_setmcastfilter(sc);
1845}
1846
1847/*
1848 * Callback from the 802.11 layer after a promiscuous mode change.
1849 * Note this interface does not check the operating mode as this
1850 * is an internal callback and we are expected to honor the current
1851 * state (e.g. this is used for setting the interface in promiscuous
1852 * mode when operating in hostap mode to do ACS).
1853 */
1854static void
1855mwl_update_promisc(struct ifnet *ifp)
1856{
1857	struct mwl_softc *sc = ifp->if_softc;
1858
1859	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1860}
1861
1862/*
1863 * Callback from the 802.11 layer to update the slot time
1864 * based on the current setting.  We use it to notify the
1865 * firmware of ERP changes and the f/w takes care of things
1866 * like slot time and preamble.
1867 */
1868static void
1869mwl_updateslot(struct ifnet *ifp)
1870{
1871	struct mwl_softc *sc = ifp->if_softc;
1872	struct ieee80211com *ic = ifp->if_l2com;
1873	struct mwl_hal *mh = sc->sc_mh;
1874	int prot;
1875
1876	/* NB: can be called early; suppress needless cmds */
1877	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1878		return;
1879
1880	/*
1881	 * Calculate the ERP flags.  The firwmare will use
1882	 * this to carry out the appropriate measures.
1883	 */
1884	prot = 0;
1885	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1886		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1887			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1888		if (ic->ic_flags & IEEE80211_F_USEPROT)
1889			prot |= IEEE80211_ERP_USE_PROTECTION;
1890		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1891			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1892	}
1893
1894	DPRINTF(sc, MWL_DEBUG_RESET,
1895	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1896	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1897	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1898	    ic->ic_flags);
1899
1900	mwl_hal_setgprot(mh, prot);
1901}
1902
1903/*
1904 * Setup the beacon frame.
1905 */
1906static int
1907mwl_beacon_setup(struct ieee80211vap *vap)
1908{
1909	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1910	struct ieee80211_node *ni = vap->iv_bss;
1911	struct ieee80211_beacon_offsets bo;
1912	struct mbuf *m;
1913
1914	m = ieee80211_beacon_alloc(ni, &bo);
1915	if (m == NULL)
1916		return ENOBUFS;
1917	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1918	m_free(m);
1919
1920	return 0;
1921}
1922
1923/*
1924 * Update the beacon frame in response to a change.
1925 */
1926static void
1927mwl_beacon_update(struct ieee80211vap *vap, int item)
1928{
1929	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1930	struct ieee80211com *ic = vap->iv_ic;
1931
1932	KASSERT(hvap != NULL, ("no beacon"));
1933	switch (item) {
1934	case IEEE80211_BEACON_ERP:
1935		mwl_updateslot(ic->ic_ifp);
1936		break;
1937	case IEEE80211_BEACON_HTINFO:
1938		mwl_hal_setnprotmode(hvap,
1939		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1940		break;
1941	case IEEE80211_BEACON_CAPS:
1942	case IEEE80211_BEACON_WME:
1943	case IEEE80211_BEACON_APPIE:
1944	case IEEE80211_BEACON_CSA:
1945		break;
1946	case IEEE80211_BEACON_TIM:
1947		/* NB: firmware always forms TIM */
1948		return;
1949	}
1950	/* XXX retain beacon frame and update */
1951	mwl_beacon_setup(vap);
1952}
1953
1954static void
1955mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1956{
1957	bus_addr_t *paddr = (bus_addr_t*) arg;
1958	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1959	*paddr = segs->ds_addr;
1960}
1961
1962#ifdef MWL_HOST_PS_SUPPORT
1963/*
1964 * Handle power save station occupancy changes.
1965 */
1966static void
1967mwl_update_ps(struct ieee80211vap *vap, int nsta)
1968{
1969	struct mwl_vap *mvp = MWL_VAP(vap);
1970
1971	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1972		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1973	mvp->mv_last_ps_sta = nsta;
1974}
1975
1976/*
1977 * Handle associated station power save state changes.
1978 */
1979static int
1980mwl_set_tim(struct ieee80211_node *ni, int set)
1981{
1982	struct ieee80211vap *vap = ni->ni_vap;
1983	struct mwl_vap *mvp = MWL_VAP(vap);
1984
1985	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1986		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1987		    IEEE80211_AID(ni->ni_associd), set);
1988		return 1;
1989	} else
1990		return 0;
1991}
1992#endif /* MWL_HOST_PS_SUPPORT */
1993
1994static int
1995mwl_desc_setup(struct mwl_softc *sc, const char *name,
1996	struct mwl_descdma *dd,
1997	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1998{
1999	struct ifnet *ifp = sc->sc_ifp;
2000	uint8_t *ds;
2001	int error;
2002
2003	DPRINTF(sc, MWL_DEBUG_RESET,
2004	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2005	    __func__, name, nbuf, (uintmax_t) bufsize,
2006	    ndesc, (uintmax_t) descsize);
2007
2008	dd->dd_name = name;
2009	dd->dd_desc_len = nbuf * ndesc * descsize;
2010
2011	/*
2012	 * Setup DMA descriptor area.
2013	 */
2014	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2015		       PAGE_SIZE, 0,		/* alignment, bounds */
2016		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2017		       BUS_SPACE_MAXADDR,	/* highaddr */
2018		       NULL, NULL,		/* filter, filterarg */
2019		       dd->dd_desc_len,		/* maxsize */
2020		       1,			/* nsegments */
2021		       dd->dd_desc_len,		/* maxsegsize */
2022		       BUS_DMA_ALLOCNOW,	/* flags */
2023		       NULL,			/* lockfunc */
2024		       NULL,			/* lockarg */
2025		       &dd->dd_dmat);
2026	if (error != 0) {
2027		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2028		return error;
2029	}
2030
2031	/* allocate descriptors */
2032	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2033	if (error != 0) {
2034		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2035			"error %u\n", dd->dd_name, error);
2036		goto fail0;
2037	}
2038
2039	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2040				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2041				 &dd->dd_dmamap);
2042	if (error != 0) {
2043		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2044			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2045		goto fail1;
2046	}
2047
2048	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2049				dd->dd_desc, dd->dd_desc_len,
2050				mwl_load_cb, &dd->dd_desc_paddr,
2051				BUS_DMA_NOWAIT);
2052	if (error != 0) {
2053		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2054			dd->dd_name, error);
2055		goto fail2;
2056	}
2057
2058	ds = dd->dd_desc;
2059	memset(ds, 0, dd->dd_desc_len);
2060	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2061	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2062	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2063
2064	return 0;
2065fail2:
2066	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2067fail1:
2068	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2069fail0:
2070	bus_dma_tag_destroy(dd->dd_dmat);
2071	memset(dd, 0, sizeof(*dd));
2072	return error;
2073#undef DS2PHYS
2074}
2075
2076static void
2077mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2078{
2079	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2080	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2081	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2082	bus_dma_tag_destroy(dd->dd_dmat);
2083
2084	memset(dd, 0, sizeof(*dd));
2085}
2086
2087/*
2088 * Construct a tx q's free list.  The order of entries on
2089 * the list must reflect the physical layout of tx descriptors
2090 * because the firmware pre-fetches descriptors.
2091 *
2092 * XXX might be better to use indices into the buffer array.
2093 */
2094static void
2095mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2096{
2097	struct mwl_txbuf *bf;
2098	int i;
2099
2100	bf = txq->dma.dd_bufptr;
2101	STAILQ_INIT(&txq->free);
2102	for (i = 0; i < mwl_txbuf; i++, bf++)
2103		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2104	txq->nfree = i;
2105}
2106
2107#define	DS2PHYS(_dd, _ds) \
2108	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2109
2110static int
2111mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2112{
2113	struct ifnet *ifp = sc->sc_ifp;
2114	int error, bsize, i;
2115	struct mwl_txbuf *bf;
2116	struct mwl_txdesc *ds;
2117
2118	error = mwl_desc_setup(sc, "tx", &txq->dma,
2119			mwl_txbuf, sizeof(struct mwl_txbuf),
2120			MWL_TXDESC, sizeof(struct mwl_txdesc));
2121	if (error != 0)
2122		return error;
2123
2124	/* allocate and setup tx buffers */
2125	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2126	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2127	if (bf == NULL) {
2128		if_printf(ifp, "malloc of %u tx buffers failed\n",
2129			mwl_txbuf);
2130		return ENOMEM;
2131	}
2132	txq->dma.dd_bufptr = bf;
2133
2134	ds = txq->dma.dd_desc;
2135	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2136		bf->bf_desc = ds;
2137		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2138		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2139				&bf->bf_dmamap);
2140		if (error != 0) {
2141			if_printf(ifp, "unable to create dmamap for tx "
2142				"buffer %u, error %u\n", i, error);
2143			return error;
2144		}
2145	}
2146	mwl_txq_reset(sc, txq);
2147	return 0;
2148}
2149
2150static void
2151mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2152{
2153	struct mwl_txbuf *bf;
2154	int i;
2155
2156	bf = txq->dma.dd_bufptr;
2157	for (i = 0; i < mwl_txbuf; i++, bf++) {
2158		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2159		KASSERT(bf->bf_node == NULL, ("node on free list"));
2160		if (bf->bf_dmamap != NULL)
2161			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2162	}
2163	STAILQ_INIT(&txq->free);
2164	txq->nfree = 0;
2165	if (txq->dma.dd_bufptr != NULL) {
2166		free(txq->dma.dd_bufptr, M_MWLDEV);
2167		txq->dma.dd_bufptr = NULL;
2168	}
2169	if (txq->dma.dd_desc_len != 0)
2170		mwl_desc_cleanup(sc, &txq->dma);
2171}
2172
2173static int
2174mwl_rxdma_setup(struct mwl_softc *sc)
2175{
2176	struct ifnet *ifp = sc->sc_ifp;
2177	int error, jumbosize, bsize, i;
2178	struct mwl_rxbuf *bf;
2179	struct mwl_jumbo *rbuf;
2180	struct mwl_rxdesc *ds;
2181	caddr_t data;
2182
2183	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2184			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2185			1, sizeof(struct mwl_rxdesc));
2186	if (error != 0)
2187		return error;
2188
2189	/*
2190	 * Receive is done to a private pool of jumbo buffers.
2191	 * This allows us to attach to mbuf's and avoid re-mapping
2192	 * memory on each rx we post.  We allocate a large chunk
2193	 * of memory and manage it in the driver.  The mbuf free
2194	 * callback method is used to reclaim frames after sending
2195	 * them up the stack.  By default we allocate 2x the number of
2196	 * rx descriptors configured so we have some slop to hold
2197	 * us while frames are processed.
2198	 */
2199	if (mwl_rxbuf < 2*mwl_rxdesc) {
2200		if_printf(ifp,
2201		    "too few rx dma buffers (%d); increasing to %d\n",
2202		    mwl_rxbuf, 2*mwl_rxdesc);
2203		mwl_rxbuf = 2*mwl_rxdesc;
2204	}
2205	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2206	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2207
2208	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2209		       PAGE_SIZE, 0,		/* alignment, bounds */
2210		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2211		       BUS_SPACE_MAXADDR,	/* highaddr */
2212		       NULL, NULL,		/* filter, filterarg */
2213		       sc->sc_rxmemsize,	/* maxsize */
2214		       1,			/* nsegments */
2215		       sc->sc_rxmemsize,	/* maxsegsize */
2216		       BUS_DMA_ALLOCNOW,	/* flags */
2217		       NULL,			/* lockfunc */
2218		       NULL,			/* lockarg */
2219		       &sc->sc_rxdmat);
2220	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2221	if (error != 0) {
2222		if_printf(ifp, "could not create rx DMA map\n");
2223		return error;
2224	}
2225
2226	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2227				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2228				 &sc->sc_rxmap);
2229	if (error != 0) {
2230		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2231		    (uintmax_t) sc->sc_rxmemsize);
2232		return error;
2233	}
2234
2235	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2236				sc->sc_rxmem, sc->sc_rxmemsize,
2237				mwl_load_cb, &sc->sc_rxmem_paddr,
2238				BUS_DMA_NOWAIT);
2239	if (error != 0) {
2240		if_printf(ifp, "could not load rx DMA map\n");
2241		return error;
2242	}
2243
2244	/*
2245	 * Allocate rx buffers and set them up.
2246	 */
2247	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2248	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2249	if (bf == NULL) {
2250		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2251		return error;
2252	}
2253	sc->sc_rxdma.dd_bufptr = bf;
2254
2255	STAILQ_INIT(&sc->sc_rxbuf);
2256	ds = sc->sc_rxdma.dd_desc;
2257	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2258		bf->bf_desc = ds;
2259		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2260		/* pre-assign dma buffer */
2261		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2262		/* NB: tail is intentional to preserve descriptor order */
2263		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2264	}
2265
2266	/*
2267	 * Place remainder of dma memory buffers on the free list.
2268	 */
2269	SLIST_INIT(&sc->sc_rxfree);
2270	for (; i < mwl_rxbuf; i++) {
2271		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2272		rbuf = MWL_JUMBO_DATA2BUF(data);
2273		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2274		sc->sc_nrxfree++;
2275	}
2276	MWL_RXFREE_INIT(sc);
2277	return 0;
2278}
2279#undef DS2PHYS
2280
2281static void
2282mwl_rxdma_cleanup(struct mwl_softc *sc)
2283{
2284	if (sc->sc_rxmap != NULL)
2285		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2286	if (sc->sc_rxmem != NULL) {
2287		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2288		sc->sc_rxmem = NULL;
2289	}
2290	if (sc->sc_rxmap != NULL) {
2291		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2292		sc->sc_rxmap = NULL;
2293	}
2294	if (sc->sc_rxdma.dd_bufptr != NULL) {
2295		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2296		sc->sc_rxdma.dd_bufptr = NULL;
2297	}
2298	if (sc->sc_rxdma.dd_desc_len != 0)
2299		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2300	MWL_RXFREE_DESTROY(sc);
2301}
2302
2303static int
2304mwl_dma_setup(struct mwl_softc *sc)
2305{
2306	int error, i;
2307
2308	error = mwl_rxdma_setup(sc);
2309	if (error != 0) {
2310		mwl_rxdma_cleanup(sc);
2311		return error;
2312	}
2313
2314	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2315		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2316		if (error != 0) {
2317			mwl_dma_cleanup(sc);
2318			return error;
2319		}
2320	}
2321	return 0;
2322}
2323
2324static void
2325mwl_dma_cleanup(struct mwl_softc *sc)
2326{
2327	int i;
2328
2329	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2330		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2331	mwl_rxdma_cleanup(sc);
2332}
2333
2334static struct ieee80211_node *
2335mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2336{
2337	struct ieee80211com *ic = vap->iv_ic;
2338	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2339	const size_t space = sizeof(struct mwl_node);
2340	struct mwl_node *mn;
2341
2342	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2343	if (mn == NULL) {
2344		/* XXX stat+msg */
2345		return NULL;
2346	}
2347	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2348	return &mn->mn_node;
2349}
2350
2351static void
2352mwl_node_cleanup(struct ieee80211_node *ni)
2353{
2354	struct ieee80211com *ic = ni->ni_ic;
2355        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2356	struct mwl_node *mn = MWL_NODE(ni);
2357
2358	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2359	    __func__, ni, ni->ni_ic, mn->mn_staid);
2360
2361	if (mn->mn_staid != 0) {
2362		struct ieee80211vap *vap = ni->ni_vap;
2363
2364		if (mn->mn_hvap != NULL) {
2365			if (vap->iv_opmode == IEEE80211_M_STA)
2366				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2367			else
2368				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2369		}
2370		/*
2371		 * NB: legacy WDS peer sta db entry is installed using
2372		 * the associate ap's hvap; use it again to delete it.
2373		 * XXX can vap be NULL?
2374		 */
2375		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2376		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2377			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2378			    ni->ni_macaddr);
2379		delstaid(sc, mn->mn_staid);
2380		mn->mn_staid = 0;
2381	}
2382	sc->sc_node_cleanup(ni);
2383}
2384
2385/*
2386 * Reclaim rx dma buffers from packets sitting on the ampdu
2387 * reorder queue for a station.  We replace buffers with a
2388 * system cluster (if available).
2389 */
2390static void
2391mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2392{
2393#if 0
2394	int i, n, off;
2395	struct mbuf *m;
2396	void *cl;
2397
2398	n = rap->rxa_qframes;
2399	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2400		m = rap->rxa_m[i];
2401		if (m == NULL)
2402			continue;
2403		n--;
2404		/* our dma buffers have a well-known free routine */
2405		if ((m->m_flags & M_EXT) == 0 ||
2406		    m->m_ext.ext_free != mwl_ext_free)
2407			continue;
2408		/*
2409		 * Try to allocate a cluster and move the data.
2410		 */
2411		off = m->m_data - m->m_ext.ext_buf;
2412		if (off + m->m_pkthdr.len > MCLBYTES) {
2413			/* XXX no AMSDU for now */
2414			continue;
2415		}
2416		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2417		    &m->m_ext.ext_paddr);
2418		if (cl != NULL) {
2419			/*
2420			 * Copy the existing data to the cluster, remove
2421			 * the rx dma buffer, and attach the cluster in
2422			 * its place.  Note we preserve the offset to the
2423			 * data so frames being bridged can still prepend
2424			 * their headers without adding another mbuf.
2425			 */
2426			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2427			MEXTREMOVE(m);
2428			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2429			/* setup mbuf like _MCLGET does */
2430			m->m_flags |= M_CLUSTER | M_EXT_RW;
2431			_MOWNERREF(m, M_EXT | M_CLUSTER);
2432			/* NB: m_data is clobbered by MEXTADDR, adjust */
2433			m->m_data += off;
2434		}
2435	}
2436#endif
2437}
2438
2439/*
2440 * Callback to reclaim resources.  We first let the
2441 * net80211 layer do it's thing, then if we are still
2442 * blocked by a lack of rx dma buffers we walk the ampdu
2443 * reorder q's to reclaim buffers by copying to a system
2444 * cluster.
2445 */
2446static void
2447mwl_node_drain(struct ieee80211_node *ni)
2448{
2449	struct ieee80211com *ic = ni->ni_ic;
2450        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2451	struct mwl_node *mn = MWL_NODE(ni);
2452
2453	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2454	    __func__, ni, ni->ni_vap, mn->mn_staid);
2455
2456	/* NB: call up first to age out ampdu q's */
2457	sc->sc_node_drain(ni);
2458
2459	/* XXX better to not check low water mark? */
2460	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2461	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2462		uint8_t tid;
2463		/*
2464		 * Walk the reorder q and reclaim rx dma buffers by copying
2465		 * the packet contents into clusters.
2466		 */
2467		for (tid = 0; tid < WME_NUM_TID; tid++) {
2468			struct ieee80211_rx_ampdu *rap;
2469
2470			rap = &ni->ni_rx_ampdu[tid];
2471			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2472				continue;
2473			if (rap->rxa_qframes)
2474				mwl_ampdu_rxdma_reclaim(rap);
2475		}
2476	}
2477}
2478
2479static void
2480mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2481{
2482	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2483#ifdef MWL_ANT_INFO_SUPPORT
2484#if 0
2485	/* XXX need to smooth data */
2486	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2487#else
2488	*noise = -95;		/* XXX */
2489#endif
2490#else
2491	*noise = -95;		/* XXX */
2492#endif
2493}
2494
2495/*
2496 * Convert Hardware per-antenna rssi info to common format:
2497 * Let a1, a2, a3 represent the amplitudes per chain
2498 * Let amax represent max[a1, a2, a3]
2499 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2500 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2501 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2502 * maintain some extra precision.
2503 *
2504 * Values are stored in .5 db format capped at 127.
2505 */
2506static void
2507mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2508	struct ieee80211_mimo_info *mi)
2509{
2510#define	CVT(_dst, _src) do {						\
2511	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2512	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2513} while (0)
2514	static const int8_t logdbtbl[32] = {
2515	       0,   0,  24,  38,  48,  56,  62,  68,
2516	      72,  76,  80,  83,  86,  89,  92,  94,
2517	      96,  98, 100, 102, 104, 106, 107, 109,
2518	     110, 112, 113, 115, 116, 117, 118, 119
2519	};
2520	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2521	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2522	uint32_t rssi_max;
2523
2524	rssi_max = mn->mn_ai.rssi_a;
2525	if (mn->mn_ai.rssi_b > rssi_max)
2526		rssi_max = mn->mn_ai.rssi_b;
2527	if (mn->mn_ai.rssi_c > rssi_max)
2528		rssi_max = mn->mn_ai.rssi_c;
2529
2530	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2531	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2532	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2533
2534	mi->noise[0] = mn->mn_ai.nf_a;
2535	mi->noise[1] = mn->mn_ai.nf_b;
2536	mi->noise[2] = mn->mn_ai.nf_c;
2537#undef CVT
2538}
2539
2540static __inline void *
2541mwl_getrxdma(struct mwl_softc *sc)
2542{
2543	struct mwl_jumbo *buf;
2544	void *data;
2545
2546	/*
2547	 * Allocate from jumbo pool.
2548	 */
2549	MWL_RXFREE_LOCK(sc);
2550	buf = SLIST_FIRST(&sc->sc_rxfree);
2551	if (buf == NULL) {
2552		DPRINTF(sc, MWL_DEBUG_ANY,
2553		    "%s: out of rx dma buffers\n", __func__);
2554		sc->sc_stats.mst_rx_nodmabuf++;
2555		data = NULL;
2556	} else {
2557		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2558		sc->sc_nrxfree--;
2559		data = MWL_JUMBO_BUF2DATA(buf);
2560	}
2561	MWL_RXFREE_UNLOCK(sc);
2562	return data;
2563}
2564
2565static __inline void
2566mwl_putrxdma(struct mwl_softc *sc, void *data)
2567{
2568	struct mwl_jumbo *buf;
2569
2570	/* XXX bounds check data */
2571	MWL_RXFREE_LOCK(sc);
2572	buf = MWL_JUMBO_DATA2BUF(data);
2573	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2574	sc->sc_nrxfree++;
2575	MWL_RXFREE_UNLOCK(sc);
2576}
2577
2578static int
2579mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2580{
2581	struct mwl_rxdesc *ds;
2582
2583	ds = bf->bf_desc;
2584	if (bf->bf_data == NULL) {
2585		bf->bf_data = mwl_getrxdma(sc);
2586		if (bf->bf_data == NULL) {
2587			/* mark descriptor to be skipped */
2588			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2589			/* NB: don't need PREREAD */
2590			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2591			sc->sc_stats.mst_rxbuf_failed++;
2592			return ENOMEM;
2593		}
2594	}
2595	/*
2596	 * NB: DMA buffer contents is known to be unmodified
2597	 *     so there's no need to flush the data cache.
2598	 */
2599
2600	/*
2601	 * Setup descriptor.
2602	 */
2603	ds->QosCtrl = 0;
2604	ds->RSSI = 0;
2605	ds->Status = EAGLE_RXD_STATUS_IDLE;
2606	ds->Channel = 0;
2607	ds->PktLen = htole16(MWL_AGGR_SIZE);
2608	ds->SQ2 = 0;
2609	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2610	/* NB: don't touch pPhysNext, set once */
2611	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2612	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2613
2614	return 0;
2615}
2616
2617static void
2618mwl_ext_free(void *data, void *arg)
2619{
2620	struct mwl_softc *sc = arg;
2621
2622	/* XXX bounds check data */
2623	mwl_putrxdma(sc, data);
2624	/*
2625	 * If we were previously blocked by a lack of rx dma buffers
2626	 * check if we now have enough to restart rx interrupt handling.
2627	 * NB: we know we are called at splvm which is above splnet.
2628	 */
2629	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2630		sc->sc_rxblocked = 0;
2631		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2632	}
2633}
2634
2635struct mwl_frame_bar {
2636	u_int8_t	i_fc[2];
2637	u_int8_t	i_dur[2];
2638	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2639	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2640	/* ctl, seq, FCS */
2641} __packed;
2642
2643/*
2644 * Like ieee80211_anyhdrsize, but handles BAR frames
2645 * specially so the logic below to piece the 802.11
2646 * header together works.
2647 */
2648static __inline int
2649mwl_anyhdrsize(const void *data)
2650{
2651	const struct ieee80211_frame *wh = data;
2652
2653	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2654		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2655		case IEEE80211_FC0_SUBTYPE_CTS:
2656		case IEEE80211_FC0_SUBTYPE_ACK:
2657			return sizeof(struct ieee80211_frame_ack);
2658		case IEEE80211_FC0_SUBTYPE_BAR:
2659			return sizeof(struct mwl_frame_bar);
2660		}
2661		return sizeof(struct ieee80211_frame_min);
2662	} else
2663		return ieee80211_hdrsize(data);
2664}
2665
2666static void
2667mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2668{
2669	const struct ieee80211_frame *wh;
2670	struct ieee80211_node *ni;
2671
2672	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2673	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2674	if (ni != NULL) {
2675		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2676		ieee80211_free_node(ni);
2677	}
2678}
2679
2680/*
2681 * Convert hardware signal strength to rssi.  The value
2682 * provided by the device has the noise floor added in;
2683 * we need to compensate for this but we don't have that
2684 * so we use a fixed value.
2685 *
2686 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2687 * offset is already set as part of the initial gain.  This
2688 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2689 */
2690static __inline int
2691cvtrssi(uint8_t ssi)
2692{
2693	int rssi = (int) ssi + 8;
2694	/* XXX hack guess until we have a real noise floor */
2695	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2696	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2697}
2698
2699static void
2700mwl_rx_proc(void *arg, int npending)
2701{
2702#define	IEEE80211_DIR_DSTODS(wh) \
2703	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2704	struct mwl_softc *sc = arg;
2705	struct ifnet *ifp = sc->sc_ifp;
2706	struct ieee80211com *ic = ifp->if_l2com;
2707	struct mwl_rxbuf *bf;
2708	struct mwl_rxdesc *ds;
2709	struct mbuf *m;
2710	struct ieee80211_qosframe *wh;
2711	struct ieee80211_qosframe_addr4 *wh4;
2712	struct ieee80211_node *ni;
2713	struct mwl_node *mn;
2714	int off, len, hdrlen, pktlen, rssi, ntodo;
2715	uint8_t *data, status;
2716	void *newdata;
2717	int16_t nf;
2718
2719	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2720	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2721	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2722	nf = -96;			/* XXX */
2723	bf = sc->sc_rxnext;
2724	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2725		if (bf == NULL)
2726			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2727		ds = bf->bf_desc;
2728		data = bf->bf_data;
2729		if (data == NULL) {
2730			/*
2731			 * If data allocation failed previously there
2732			 * will be no buffer; try again to re-populate it.
2733			 * Note the firmware will not advance to the next
2734			 * descriptor with a dma buffer so we must mimic
2735			 * this or we'll get out of sync.
2736			 */
2737			DPRINTF(sc, MWL_DEBUG_ANY,
2738			    "%s: rx buf w/o dma memory\n", __func__);
2739			(void) mwl_rxbuf_init(sc, bf);
2740			sc->sc_stats.mst_rx_dmabufmissing++;
2741			break;
2742		}
2743		MWL_RXDESC_SYNC(sc, ds,
2744		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2745		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2746			break;
2747#ifdef MWL_DEBUG
2748		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2749			mwl_printrxbuf(bf, 0);
2750#endif
2751		status = ds->Status;
2752		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2753			ifp->if_ierrors++;
2754			sc->sc_stats.mst_rx_crypto++;
2755			/*
2756			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2757			 *     for backwards compatibility.
2758			 */
2759			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2760			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2761				/*
2762				 * MIC error, notify upper layers.
2763				 */
2764				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2765				    BUS_DMASYNC_POSTREAD);
2766				mwl_handlemicerror(ic, data);
2767				sc->sc_stats.mst_rx_tkipmic++;
2768			}
2769			/* XXX too painful to tap packets */
2770			goto rx_next;
2771		}
2772		/*
2773		 * Sync the data buffer.
2774		 */
2775		len = le16toh(ds->PktLen);
2776		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2777		/*
2778		 * The 802.11 header is provided all or in part at the front;
2779		 * use it to calculate the true size of the header that we'll
2780		 * construct below.  We use this to figure out where to copy
2781		 * payload prior to constructing the header.
2782		 */
2783		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2784		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2785
2786		/* calculate rssi early so we can re-use for each aggregate */
2787		rssi = cvtrssi(ds->RSSI);
2788
2789		pktlen = hdrlen + (len - off);
2790		/*
2791		 * NB: we know our frame is at least as large as
2792		 * IEEE80211_MIN_LEN because there is a 4-address
2793		 * frame at the front.  Hence there's no need to
2794		 * vet the packet length.  If the frame in fact
2795		 * is too small it should be discarded at the
2796		 * net80211 layer.
2797		 */
2798
2799		/*
2800		 * Attach dma buffer to an mbuf.  We tried
2801		 * doing this based on the packet size (i.e.
2802		 * copying small packets) but it turns out to
2803		 * be a net loss.  The tradeoff might be system
2804		 * dependent (cache architecture is important).
2805		 */
2806		MGETHDR(m, M_DONTWAIT, MT_DATA);
2807		if (m == NULL) {
2808			DPRINTF(sc, MWL_DEBUG_ANY,
2809			    "%s: no rx mbuf\n", __func__);
2810			sc->sc_stats.mst_rx_nombuf++;
2811			goto rx_next;
2812		}
2813		/*
2814		 * Acquire the replacement dma buffer before
2815		 * processing the frame.  If we're out of dma
2816		 * buffers we disable rx interrupts and wait
2817		 * for the free pool to reach mlw_rxdmalow buffers
2818		 * before starting to do work again.  If the firmware
2819		 * runs out of descriptors then it will toss frames
2820		 * which is better than our doing it as that can
2821		 * starve our processing.  It is also important that
2822		 * we always process rx'd frames in case they are
2823		 * A-MPDU as otherwise the host's view of the BA
2824		 * window may get out of sync with the firmware.
2825		 */
2826		newdata = mwl_getrxdma(sc);
2827		if (newdata == NULL) {
2828			/* NB: stat+msg in mwl_getrxdma */
2829			m_free(m);
2830			/* disable RX interrupt and mark state */
2831			mwl_hal_intrset(sc->sc_mh,
2832			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2833			sc->sc_rxblocked = 1;
2834			ieee80211_drain(ic);
2835			/* XXX check rxblocked and immediately start again? */
2836			goto rx_stop;
2837		}
2838		bf->bf_data = newdata;
2839		/*
2840		 * Attach the dma buffer to the mbuf;
2841		 * mwl_rxbuf_init will re-setup the rx
2842		 * descriptor using the replacement dma
2843		 * buffer we just installed above.
2844		 */
2845		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2846		    data, sc, 0, EXT_NET_DRV);
2847		m->m_data += off - hdrlen;
2848		m->m_pkthdr.len = m->m_len = pktlen;
2849		m->m_pkthdr.rcvif = ifp;
2850		/* NB: dma buffer assumed read-only */
2851
2852		/*
2853		 * Piece 802.11 header together.
2854		 */
2855		wh = mtod(m, struct ieee80211_qosframe *);
2856		/* NB: don't need to do this sometimes but ... */
2857		/* XXX special case so we can memcpy after m_devget? */
2858		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2859		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2860			if (IEEE80211_DIR_DSTODS(wh)) {
2861				wh4 = mtod(m,
2862				    struct ieee80211_qosframe_addr4*);
2863				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2864			} else {
2865				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2866			}
2867		}
2868		/*
2869		 * The f/w strips WEP header but doesn't clear
2870		 * the WEP bit; mark the packet with M_WEP so
2871		 * net80211 will treat the data as decrypted.
2872		 * While here also clear the PWR_MGT bit since
2873		 * power save is handled by the firmware and
2874		 * passing this up will potentially cause the
2875		 * upper layer to put a station in power save
2876		 * (except when configured with MWL_HOST_PS_SUPPORT).
2877		 */
2878		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2879			m->m_flags |= M_WEP;
2880#ifdef MWL_HOST_PS_SUPPORT
2881		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2882#else
2883		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2884#endif
2885
2886		if (ieee80211_radiotap_active(ic)) {
2887			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2888
2889			tap->wr_flags = 0;
2890			tap->wr_rate = ds->Rate;
2891			tap->wr_antsignal = rssi + nf;
2892			tap->wr_antnoise = nf;
2893		}
2894		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2895			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2896			    len, ds->Rate, rssi);
2897		}
2898		ifp->if_ipackets++;
2899
2900		/* dispatch */
2901		ni = ieee80211_find_rxnode(ic,
2902		    (const struct ieee80211_frame_min *) wh);
2903		if (ni != NULL) {
2904			mn = MWL_NODE(ni);
2905#ifdef MWL_ANT_INFO_SUPPORT
2906			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2907			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2908			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2909			mn->mn_ai.rsvd1 = rssi;
2910#endif
2911			/* tag AMPDU aggregates for reorder processing */
2912			if (ni->ni_flags & IEEE80211_NODE_HT)
2913				m->m_flags |= M_AMPDU;
2914			(void) ieee80211_input(ni, m, rssi, nf);
2915			ieee80211_free_node(ni);
2916		} else
2917			(void) ieee80211_input_all(ic, m, rssi, nf);
2918rx_next:
2919		/* NB: ignore ENOMEM so we process more descriptors */
2920		(void) mwl_rxbuf_init(sc, bf);
2921		bf = STAILQ_NEXT(bf, bf_list);
2922	}
2923rx_stop:
2924	sc->sc_rxnext = bf;
2925
2926	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2927	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2928		/* NB: kick fw; the tx thread may have been preempted */
2929		mwl_hal_txstart(sc->sc_mh, 0);
2930		mwl_start(ifp);
2931	}
2932#undef IEEE80211_DIR_DSTODS
2933}
2934
2935static void
2936mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2937{
2938	struct mwl_txbuf *bf, *bn;
2939	struct mwl_txdesc *ds;
2940
2941	MWL_TXQ_LOCK_INIT(sc, txq);
2942	txq->qnum = qnum;
2943	txq->txpri = 0;	/* XXX */
2944#if 0
2945	/* NB: q setup by mwl_txdma_setup XXX */
2946	STAILQ_INIT(&txq->free);
2947#endif
2948	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2949		bf->bf_txq = txq;
2950
2951		ds = bf->bf_desc;
2952		bn = STAILQ_NEXT(bf, bf_list);
2953		if (bn == NULL)
2954			bn = STAILQ_FIRST(&txq->free);
2955		ds->pPhysNext = htole32(bn->bf_daddr);
2956	}
2957	STAILQ_INIT(&txq->active);
2958}
2959
2960/*
2961 * Setup a hardware data transmit queue for the specified
2962 * access control.  We record the mapping from ac's
2963 * to h/w queues for use by mwl_tx_start.
2964 */
2965static int
2966mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2967{
2968#define	N(a)	(sizeof(a)/sizeof(a[0]))
2969	struct mwl_txq *txq;
2970
2971	if (ac >= N(sc->sc_ac2q)) {
2972		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2973			ac, N(sc->sc_ac2q));
2974		return 0;
2975	}
2976	if (mvtype >= MWL_NUM_TX_QUEUES) {
2977		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2978			mvtype, MWL_NUM_TX_QUEUES);
2979		return 0;
2980	}
2981	txq = &sc->sc_txq[mvtype];
2982	mwl_txq_init(sc, txq, mvtype);
2983	sc->sc_ac2q[ac] = txq;
2984	return 1;
2985#undef N
2986}
2987
2988/*
2989 * Update WME parameters for a transmit queue.
2990 */
2991static int
2992mwl_txq_update(struct mwl_softc *sc, int ac)
2993{
2994#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2995	struct ifnet *ifp = sc->sc_ifp;
2996	struct ieee80211com *ic = ifp->if_l2com;
2997	struct mwl_txq *txq = sc->sc_ac2q[ac];
2998	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2999	struct mwl_hal *mh = sc->sc_mh;
3000	int aifs, cwmin, cwmax, txoplim;
3001
3002	aifs = wmep->wmep_aifsn;
3003	/* XXX in sta mode need to pass log values for cwmin/max */
3004	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3005	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3006	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3007
3008	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3009		device_printf(sc->sc_dev, "unable to update hardware queue "
3010			"parameters for %s traffic!\n",
3011			ieee80211_wme_acnames[ac]);
3012		return 0;
3013	}
3014	return 1;
3015#undef MWL_EXPONENT_TO_VALUE
3016}
3017
3018/*
3019 * Callback from the 802.11 layer to update WME parameters.
3020 */
3021static int
3022mwl_wme_update(struct ieee80211com *ic)
3023{
3024	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3025
3026	return !mwl_txq_update(sc, WME_AC_BE) ||
3027	    !mwl_txq_update(sc, WME_AC_BK) ||
3028	    !mwl_txq_update(sc, WME_AC_VI) ||
3029	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3030}
3031
3032/*
3033 * Reclaim resources for a setup queue.
3034 */
3035static void
3036mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3037{
3038	/* XXX hal work? */
3039	MWL_TXQ_LOCK_DESTROY(txq);
3040}
3041
3042/*
3043 * Reclaim all tx queue resources.
3044 */
3045static void
3046mwl_tx_cleanup(struct mwl_softc *sc)
3047{
3048	int i;
3049
3050	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3051		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3052}
3053
3054static int
3055mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3056{
3057	struct mbuf *m;
3058	int error;
3059
3060	/*
3061	 * Load the DMA map so any coalescing is done.  This
3062	 * also calculates the number of descriptors we need.
3063	 */
3064	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3065				     bf->bf_segs, &bf->bf_nseg,
3066				     BUS_DMA_NOWAIT);
3067	if (error == EFBIG) {
3068		/* XXX packet requires too many descriptors */
3069		bf->bf_nseg = MWL_TXDESC+1;
3070	} else if (error != 0) {
3071		sc->sc_stats.mst_tx_busdma++;
3072		m_freem(m0);
3073		return error;
3074	}
3075	/*
3076	 * Discard null packets and check for packets that
3077	 * require too many TX descriptors.  We try to convert
3078	 * the latter to a cluster.
3079	 */
3080	if (error == EFBIG) {		/* too many desc's, linearize */
3081		sc->sc_stats.mst_tx_linear++;
3082#if MWL_TXDESC > 1
3083		m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
3084#else
3085		m = m_defrag(m0, M_DONTWAIT);
3086#endif
3087		if (m == NULL) {
3088			m_freem(m0);
3089			sc->sc_stats.mst_tx_nombuf++;
3090			return ENOMEM;
3091		}
3092		m0 = m;
3093		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3094					     bf->bf_segs, &bf->bf_nseg,
3095					     BUS_DMA_NOWAIT);
3096		if (error != 0) {
3097			sc->sc_stats.mst_tx_busdma++;
3098			m_freem(m0);
3099			return error;
3100		}
3101		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3102		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3103	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3104		sc->sc_stats.mst_tx_nodata++;
3105		m_freem(m0);
3106		return EIO;
3107	}
3108	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3109		__func__, m0, m0->m_pkthdr.len);
3110	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3111	bf->bf_m = m0;
3112
3113	return 0;
3114}
3115
3116static __inline int
3117mwl_cvtlegacyrate(int rate)
3118{
3119	switch (rate) {
3120	case 2:	 return 0;
3121	case 4:	 return 1;
3122	case 11: return 2;
3123	case 22: return 3;
3124	case 44: return 4;
3125	case 12: return 5;
3126	case 18: return 6;
3127	case 24: return 7;
3128	case 36: return 8;
3129	case 48: return 9;
3130	case 72: return 10;
3131	case 96: return 11;
3132	case 108:return 12;
3133	}
3134	return 0;
3135}
3136
3137/*
3138 * Calculate fixed tx rate information per client state;
3139 * this value is suitable for writing to the Format field
3140 * of a tx descriptor.
3141 */
3142static uint16_t
3143mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3144{
3145	uint16_t fmt;
3146
3147	fmt = SM(3, EAGLE_TXD_ANTENNA)
3148	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3149		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3150	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3151		fmt |= EAGLE_TXD_FORMAT_HT
3152		    /* NB: 0x80 implicitly stripped from ucastrate */
3153		    | SM(rate, EAGLE_TXD_RATE);
3154		/* XXX short/long GI may be wrong; re-check */
3155		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3156			fmt |= EAGLE_TXD_CHW_40
3157			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3158			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3159		} else {
3160			fmt |= EAGLE_TXD_CHW_20
3161			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3162			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3163		}
3164	} else {			/* legacy rate */
3165		fmt |= EAGLE_TXD_FORMAT_LEGACY
3166		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3167		    | EAGLE_TXD_CHW_20
3168		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3169		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3170			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3171	}
3172	return fmt;
3173}
3174
3175static int
3176mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3177    struct mbuf *m0)
3178{
3179#define	IEEE80211_DIR_DSTODS(wh) \
3180	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3181	struct ifnet *ifp = sc->sc_ifp;
3182	struct ieee80211com *ic = ifp->if_l2com;
3183	struct ieee80211vap *vap = ni->ni_vap;
3184	int error, iswep, ismcast;
3185	int hdrlen, copyhdrlen, pktlen;
3186	struct mwl_txdesc *ds;
3187	struct mwl_txq *txq;
3188	struct ieee80211_frame *wh;
3189	struct mwltxrec *tr;
3190	struct mwl_node *mn;
3191	uint16_t qos;
3192#if MWL_TXDESC > 1
3193	int i;
3194#endif
3195
3196	wh = mtod(m0, struct ieee80211_frame *);
3197	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3198	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3199	hdrlen = ieee80211_anyhdrsize(wh);
3200	copyhdrlen = hdrlen;
3201	pktlen = m0->m_pkthdr.len;
3202	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3203		if (IEEE80211_DIR_DSTODS(wh)) {
3204			qos = *(uint16_t *)
3205			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3206			copyhdrlen -= sizeof(qos);
3207		} else
3208			qos = *(uint16_t *)
3209			    (((struct ieee80211_qosframe *) wh)->i_qos);
3210	} else
3211		qos = 0;
3212
3213	if (iswep) {
3214		const struct ieee80211_cipher *cip;
3215		struct ieee80211_key *k;
3216
3217		/*
3218		 * Construct the 802.11 header+trailer for an encrypted
3219		 * frame. The only reason this can fail is because of an
3220		 * unknown or unsupported cipher/key type.
3221		 *
3222		 * NB: we do this even though the firmware will ignore
3223		 *     what we've done for WEP and TKIP as we need the
3224		 *     ExtIV filled in for CCMP and this also adjusts
3225		 *     the headers which simplifies our work below.
3226		 */
3227		k = ieee80211_crypto_encap(ni, m0);
3228		if (k == NULL) {
3229			/*
3230			 * This can happen when the key is yanked after the
3231			 * frame was queued.  Just discard the frame; the
3232			 * 802.11 layer counts failures and provides
3233			 * debugging/diagnostics.
3234			 */
3235			m_freem(m0);
3236			return EIO;
3237		}
3238		/*
3239		 * Adjust the packet length for the crypto additions
3240		 * done during encap and any other bits that the f/w
3241		 * will add later on.
3242		 */
3243		cip = k->wk_cipher;
3244		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3245
3246		/* packet header may have moved, reset our local pointer */
3247		wh = mtod(m0, struct ieee80211_frame *);
3248	}
3249
3250	if (ieee80211_radiotap_active_vap(vap)) {
3251		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3252		if (iswep)
3253			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3254#if 0
3255		sc->sc_tx_th.wt_rate = ds->DataRate;
3256#endif
3257		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3258		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3259
3260		ieee80211_radiotap_tx(vap, m0);
3261	}
3262	/*
3263	 * Copy up/down the 802.11 header; the firmware requires
3264	 * we present a 2-byte payload length followed by a
3265	 * 4-address header (w/o QoS), followed (optionally) by
3266	 * any WEP/ExtIV header (but only filled in for CCMP).
3267	 * We are assured the mbuf has sufficient headroom to
3268	 * prepend in-place by the setup of ic_headroom in
3269	 * mwl_attach.
3270	 */
3271	if (hdrlen < sizeof(struct mwltxrec)) {
3272		const int space = sizeof(struct mwltxrec) - hdrlen;
3273		if (M_LEADINGSPACE(m0) < space) {
3274			/* NB: should never happen */
3275			device_printf(sc->sc_dev,
3276			    "not enough headroom, need %d found %zd, "
3277			    "m_flags 0x%x m_len %d\n",
3278			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3279			ieee80211_dump_pkt(ic,
3280			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3281			m_freem(m0);
3282			sc->sc_stats.mst_tx_noheadroom++;
3283			return EIO;
3284		}
3285		M_PREPEND(m0, space, M_NOWAIT);
3286	}
3287	tr = mtod(m0, struct mwltxrec *);
3288	if (wh != (struct ieee80211_frame *) &tr->wh)
3289		ovbcopy(wh, &tr->wh, hdrlen);
3290	/*
3291	 * Note: the "firmware length" is actually the length
3292	 * of the fully formed "802.11 payload".  That is, it's
3293	 * everything except for the 802.11 header.  In particular
3294	 * this includes all crypto material including the MIC!
3295	 */
3296	tr->fwlen = htole16(pktlen - hdrlen);
3297
3298	/*
3299	 * Load the DMA map so any coalescing is done.  This
3300	 * also calculates the number of descriptors we need.
3301	 */
3302	error = mwl_tx_dmasetup(sc, bf, m0);
3303	if (error != 0) {
3304		/* NB: stat collected in mwl_tx_dmasetup */
3305		DPRINTF(sc, MWL_DEBUG_XMIT,
3306		    "%s: unable to setup dma\n", __func__);
3307		return error;
3308	}
3309	bf->bf_node = ni;			/* NB: held reference */
3310	m0 = bf->bf_m;				/* NB: may have changed */
3311	tr = mtod(m0, struct mwltxrec *);
3312	wh = (struct ieee80211_frame *)&tr->wh;
3313
3314	/*
3315	 * Formulate tx descriptor.
3316	 */
3317	ds = bf->bf_desc;
3318	txq = bf->bf_txq;
3319
3320	ds->QosCtrl = qos;			/* NB: already little-endian */
3321#if MWL_TXDESC == 1
3322	/*
3323	 * NB: multiframes should be zero because the descriptors
3324	 *     are initialized to zero.  This should handle the case
3325	 *     where the driver is built with MWL_TXDESC=1 but we are
3326	 *     using firmware with multi-segment support.
3327	 */
3328	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3329	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3330#else
3331	ds->multiframes = htole32(bf->bf_nseg);
3332	ds->PktLen = htole16(m0->m_pkthdr.len);
3333	for (i = 0; i < bf->bf_nseg; i++) {
3334		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3335		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3336	}
3337#endif
3338	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3339	ds->Format = 0;
3340	ds->pad = 0;
3341	ds->ack_wcb_addr = 0;
3342
3343	mn = MWL_NODE(ni);
3344	/*
3345	 * Select transmit rate.
3346	 */
3347	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3348	case IEEE80211_FC0_TYPE_MGT:
3349		sc->sc_stats.mst_tx_mgmt++;
3350		/* fall thru... */
3351	case IEEE80211_FC0_TYPE_CTL:
3352		/* NB: assign to BE q to avoid bursting */
3353		ds->TxPriority = MWL_WME_AC_BE;
3354		break;
3355	case IEEE80211_FC0_TYPE_DATA:
3356		if (!ismcast) {
3357			const struct ieee80211_txparam *tp = ni->ni_txparms;
3358			/*
3359			 * EAPOL frames get forced to a fixed rate and w/o
3360			 * aggregation; otherwise check for any fixed rate
3361			 * for the client (may depend on association state).
3362			 */
3363			if (m0->m_flags & M_EAPOL) {
3364				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3365				ds->Format = mvp->mv_eapolformat;
3366				ds->pad = htole16(
3367				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3368			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3369				/* XXX pre-calculate per node */
3370				ds->Format = htole16(
3371				    mwl_calcformat(tp->ucastrate, ni));
3372				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3373			}
3374			/* NB: EAPOL frames will never have qos set */
3375			if (qos == 0)
3376				ds->TxPriority = txq->qnum;
3377#if MWL_MAXBA > 3
3378			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3379				ds->TxPriority = mn->mn_ba[3].txq;
3380#endif
3381#if MWL_MAXBA > 2
3382			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3383				ds->TxPriority = mn->mn_ba[2].txq;
3384#endif
3385#if MWL_MAXBA > 1
3386			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3387				ds->TxPriority = mn->mn_ba[1].txq;
3388#endif
3389#if MWL_MAXBA > 0
3390			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3391				ds->TxPriority = mn->mn_ba[0].txq;
3392#endif
3393			else
3394				ds->TxPriority = txq->qnum;
3395		} else
3396			ds->TxPriority = txq->qnum;
3397		break;
3398	default:
3399		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3400			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3401		sc->sc_stats.mst_tx_badframetype++;
3402		m_freem(m0);
3403		return EIO;
3404	}
3405
3406	if (IFF_DUMPPKTS_XMIT(sc))
3407		ieee80211_dump_pkt(ic,
3408		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3409		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3410
3411	MWL_TXQ_LOCK(txq);
3412	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3413	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3414	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3415
3416	ifp->if_opackets++;
3417	sc->sc_tx_timer = 5;
3418	MWL_TXQ_UNLOCK(txq);
3419
3420	return 0;
3421#undef	IEEE80211_DIR_DSTODS
3422}
3423
3424static __inline int
3425mwl_cvtlegacyrix(int rix)
3426{
3427#define	N(x)	(sizeof(x)/sizeof(x[0]))
3428	static const int ieeerates[] =
3429	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3430	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3431#undef N
3432}
3433
3434/*
3435 * Process completed xmit descriptors from the specified queue.
3436 */
3437static int
3438mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3439{
3440#define	EAGLE_TXD_STATUS_MCAST \
3441	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3442	struct ifnet *ifp = sc->sc_ifp;
3443	struct ieee80211com *ic = ifp->if_l2com;
3444	struct mwl_txbuf *bf;
3445	struct mwl_txdesc *ds;
3446	struct ieee80211_node *ni;
3447	struct mwl_node *an;
3448	int nreaped;
3449	uint32_t status;
3450
3451	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3452	for (nreaped = 0;; nreaped++) {
3453		MWL_TXQ_LOCK(txq);
3454		bf = STAILQ_FIRST(&txq->active);
3455		if (bf == NULL) {
3456			MWL_TXQ_UNLOCK(txq);
3457			break;
3458		}
3459		ds = bf->bf_desc;
3460		MWL_TXDESC_SYNC(txq, ds,
3461		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3462		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3463			MWL_TXQ_UNLOCK(txq);
3464			break;
3465		}
3466		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3467		MWL_TXQ_UNLOCK(txq);
3468
3469#ifdef MWL_DEBUG
3470		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3471			mwl_printtxbuf(bf, txq->qnum, nreaped);
3472#endif
3473		ni = bf->bf_node;
3474		if (ni != NULL) {
3475			an = MWL_NODE(ni);
3476			status = le32toh(ds->Status);
3477			if (status & EAGLE_TXD_STATUS_OK) {
3478				uint16_t Format = le16toh(ds->Format);
3479				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3480
3481				sc->sc_stats.mst_ant_tx[txant]++;
3482				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3483					sc->sc_stats.mst_tx_retries++;
3484				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3485					sc->sc_stats.mst_tx_mretries++;
3486				if (txq->qnum >= MWL_WME_AC_VO)
3487					ic->ic_wme.wme_hipri_traffic++;
3488				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3489				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3490					ni->ni_txrate = mwl_cvtlegacyrix(
3491					    ni->ni_txrate);
3492				} else
3493					ni->ni_txrate |= IEEE80211_RATE_MCS;
3494				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3495			} else {
3496				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3497					sc->sc_stats.mst_tx_linkerror++;
3498				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3499					sc->sc_stats.mst_tx_xretries++;
3500				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3501					sc->sc_stats.mst_tx_aging++;
3502				if (bf->bf_m->m_flags & M_FF)
3503					sc->sc_stats.mst_ff_txerr++;
3504			}
3505			/*
3506			 * Do any tx complete callback.  Note this must
3507			 * be done before releasing the node reference.
3508			 * XXX no way to figure out if frame was ACK'd
3509			 */
3510			if (bf->bf_m->m_flags & M_TXCB) {
3511				/* XXX strip fw len in case header inspected */
3512				m_adj(bf->bf_m, sizeof(uint16_t));
3513				ieee80211_process_callback(ni, bf->bf_m,
3514					(status & EAGLE_TXD_STATUS_OK) == 0);
3515			}
3516			/*
3517			 * Reclaim reference to node.
3518			 *
3519			 * NB: the node may be reclaimed here if, for example
3520			 *     this is a DEAUTH message that was sent and the
3521			 *     node was timed out due to inactivity.
3522			 */
3523			ieee80211_free_node(ni);
3524		}
3525		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3526
3527		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3528		    BUS_DMASYNC_POSTWRITE);
3529		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3530		m_freem(bf->bf_m);
3531
3532		mwl_puttxbuf_tail(txq, bf);
3533	}
3534	return nreaped;
3535#undef EAGLE_TXD_STATUS_MCAST
3536}
3537
3538/*
3539 * Deferred processing of transmit interrupt; special-cased
3540 * for four hardware queues, 0-3.
3541 */
3542static void
3543mwl_tx_proc(void *arg, int npending)
3544{
3545	struct mwl_softc *sc = arg;
3546	struct ifnet *ifp = sc->sc_ifp;
3547	int nreaped;
3548
3549	/*
3550	 * Process each active queue.
3551	 */
3552	nreaped = 0;
3553	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3554		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3555	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3556		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3557	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3558		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3559	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3560		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3561
3562	if (nreaped != 0) {
3563		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3564		sc->sc_tx_timer = 0;
3565		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3566			/* NB: kick fw; the tx thread may have been preempted */
3567			mwl_hal_txstart(sc->sc_mh, 0);
3568			mwl_start(ifp);
3569		}
3570	}
3571}
3572
3573static void
3574mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3575{
3576	struct ieee80211_node *ni;
3577	struct mwl_txbuf *bf;
3578	u_int ix;
3579
3580	/*
3581	 * NB: this assumes output has been stopped and
3582	 *     we do not need to block mwl_tx_tasklet
3583	 */
3584	for (ix = 0;; ix++) {
3585		MWL_TXQ_LOCK(txq);
3586		bf = STAILQ_FIRST(&txq->active);
3587		if (bf == NULL) {
3588			MWL_TXQ_UNLOCK(txq);
3589			break;
3590		}
3591		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3592		MWL_TXQ_UNLOCK(txq);
3593#ifdef MWL_DEBUG
3594		if (sc->sc_debug & MWL_DEBUG_RESET) {
3595			struct ifnet *ifp = sc->sc_ifp;
3596			struct ieee80211com *ic = ifp->if_l2com;
3597			const struct mwltxrec *tr =
3598			    mtod(bf->bf_m, const struct mwltxrec *);
3599			mwl_printtxbuf(bf, txq->qnum, ix);
3600			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3601				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3602		}
3603#endif /* MWL_DEBUG */
3604		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3605		ni = bf->bf_node;
3606		if (ni != NULL) {
3607			/*
3608			 * Reclaim node reference.
3609			 */
3610			ieee80211_free_node(ni);
3611		}
3612		m_freem(bf->bf_m);
3613
3614		mwl_puttxbuf_tail(txq, bf);
3615	}
3616}
3617
3618/*
3619 * Drain the transmit queues and reclaim resources.
3620 */
3621static void
3622mwl_draintxq(struct mwl_softc *sc)
3623{
3624	struct ifnet *ifp = sc->sc_ifp;
3625	int i;
3626
3627	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3628		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3629	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3630	sc->sc_tx_timer = 0;
3631}
3632
3633#ifdef MWL_DIAGAPI
3634/*
3635 * Reset the transmit queues to a pristine state after a fw download.
3636 */
3637static void
3638mwl_resettxq(struct mwl_softc *sc)
3639{
3640	int i;
3641
3642	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3643		mwl_txq_reset(sc, &sc->sc_txq[i]);
3644}
3645#endif /* MWL_DIAGAPI */
3646
3647/*
3648 * Clear the transmit queues of any frames submitted for the
3649 * specified vap.  This is done when the vap is deleted so we
3650 * don't potentially reference the vap after it is gone.
3651 * Note we cannot remove the frames; we only reclaim the node
3652 * reference.
3653 */
3654static void
3655mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3656{
3657	struct mwl_txq *txq;
3658	struct mwl_txbuf *bf;
3659	int i;
3660
3661	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3662		txq = &sc->sc_txq[i];
3663		MWL_TXQ_LOCK(txq);
3664		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3665			struct ieee80211_node *ni = bf->bf_node;
3666			if (ni != NULL && ni->ni_vap == vap) {
3667				bf->bf_node = NULL;
3668				ieee80211_free_node(ni);
3669			}
3670		}
3671		MWL_TXQ_UNLOCK(txq);
3672	}
3673}
3674
3675static int
3676mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3677	const uint8_t *frm, const uint8_t *efrm)
3678{
3679	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3680	const struct ieee80211_action *ia;
3681
3682	ia = (const struct ieee80211_action *) frm;
3683	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3684	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3685		const struct ieee80211_action_ht_mimopowersave *mps =
3686		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3687
3688		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3689		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3690		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3691		return 0;
3692	} else
3693		return sc->sc_recv_action(ni, wh, frm, efrm);
3694}
3695
3696static int
3697mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3698	int dialogtoken, int baparamset, int batimeout)
3699{
3700	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3701	struct ieee80211vap *vap = ni->ni_vap;
3702	struct mwl_node *mn = MWL_NODE(ni);
3703	struct mwl_bastate *bas;
3704
3705	bas = tap->txa_private;
3706	if (bas == NULL) {
3707		const MWL_HAL_BASTREAM *sp;
3708		/*
3709		 * Check for a free BA stream slot.
3710		 */
3711#if MWL_MAXBA > 3
3712		if (mn->mn_ba[3].bastream == NULL)
3713			bas = &mn->mn_ba[3];
3714		else
3715#endif
3716#if MWL_MAXBA > 2
3717		if (mn->mn_ba[2].bastream == NULL)
3718			bas = &mn->mn_ba[2];
3719		else
3720#endif
3721#if MWL_MAXBA > 1
3722		if (mn->mn_ba[1].bastream == NULL)
3723			bas = &mn->mn_ba[1];
3724		else
3725#endif
3726#if MWL_MAXBA > 0
3727		if (mn->mn_ba[0].bastream == NULL)
3728			bas = &mn->mn_ba[0];
3729		else
3730#endif
3731		{
3732			/* sta already has max BA streams */
3733			/* XXX assign BA stream to highest priority tid */
3734			DPRINTF(sc, MWL_DEBUG_AMPDU,
3735			    "%s: already has max bastreams\n", __func__);
3736			sc->sc_stats.mst_ampdu_reject++;
3737			return 0;
3738		}
3739		/* NB: no held reference to ni */
3740		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3741		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3742		    ni->ni_macaddr, WME_AC_TO_TID(tap->txa_ac), ni->ni_htparam,
3743		    ni, tap);
3744		if (sp == NULL) {
3745			/*
3746			 * No available stream, return 0 so no
3747			 * a-mpdu aggregation will be done.
3748			 */
3749			DPRINTF(sc, MWL_DEBUG_AMPDU,
3750			    "%s: no bastream available\n", __func__);
3751			sc->sc_stats.mst_ampdu_nostream++;
3752			return 0;
3753		}
3754		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3755		    __func__, sp);
3756		/* NB: qos is left zero so we won't match in mwl_tx_start */
3757		bas->bastream = sp;
3758		tap->txa_private = bas;
3759	}
3760	/* fetch current seq# from the firmware; if available */
3761	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3762	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3763	    &tap->txa_start) != 0)
3764		tap->txa_start = 0;
3765	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3766}
3767
3768static int
3769mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3770	int code, int baparamset, int batimeout)
3771{
3772	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3773	struct mwl_bastate *bas;
3774
3775	bas = tap->txa_private;
3776	if (bas == NULL) {
3777		/* XXX should not happen */
3778		DPRINTF(sc, MWL_DEBUG_AMPDU,
3779		    "%s: no BA stream allocated, AC %d\n",
3780		    __func__, tap->txa_ac);
3781		sc->sc_stats.mst_addba_nostream++;
3782		return 0;
3783	}
3784	if (code == IEEE80211_STATUS_SUCCESS) {
3785		struct ieee80211vap *vap = ni->ni_vap;
3786		int bufsiz, error;
3787
3788		/*
3789		 * Tell the firmware to setup the BA stream;
3790		 * we know resources are available because we
3791		 * pre-allocated one before forming the request.
3792		 */
3793		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3794		if (bufsiz == 0)
3795			bufsiz = IEEE80211_AGGR_BAWMAX;
3796		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3797		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3798		if (error != 0) {
3799			/*
3800			 * Setup failed, return immediately so no a-mpdu
3801			 * aggregation will be done.
3802			 */
3803			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3804			mwl_bastream_free(bas);
3805			tap->txa_private = NULL;
3806
3807			DPRINTF(sc, MWL_DEBUG_AMPDU,
3808			    "%s: create failed, error %d, bufsiz %d AC %d "
3809			    "htparam 0x%x\n", __func__, error, bufsiz,
3810			    tap->txa_ac, ni->ni_htparam);
3811			sc->sc_stats.mst_bacreate_failed++;
3812			return 0;
3813		}
3814		/* NB: cache txq to avoid ptr indirect */
3815		mwl_bastream_setup(bas, tap->txa_ac, bas->bastream->txq);
3816		DPRINTF(sc, MWL_DEBUG_AMPDU,
3817		    "%s: bastream %p assigned to txq %d AC %d bufsiz %d "
3818		    "htparam 0x%x\n", __func__, bas->bastream,
3819		    bas->txq, tap->txa_ac, bufsiz, ni->ni_htparam);
3820	} else {
3821		/*
3822		 * Other side NAK'd us; return the resources.
3823		 */
3824		DPRINTF(sc, MWL_DEBUG_AMPDU,
3825		    "%s: request failed with code %d, destroy bastream %p\n",
3826		    __func__, code, bas->bastream);
3827		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3828		mwl_bastream_free(bas);
3829		tap->txa_private = NULL;
3830	}
3831	/* NB: firmware sends BAR so we don't need to */
3832	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3833}
3834
3835static void
3836mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3837{
3838	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3839	struct mwl_bastate *bas;
3840
3841	bas = tap->txa_private;
3842	if (bas != NULL) {
3843		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3844		    __func__, bas->bastream);
3845		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3846		mwl_bastream_free(bas);
3847		tap->txa_private = NULL;
3848	}
3849	sc->sc_addba_stop(ni, tap);
3850}
3851
3852/*
3853 * Setup the rx data structures.  This should only be
3854 * done once or we may get out of sync with the firmware.
3855 */
3856static int
3857mwl_startrecv(struct mwl_softc *sc)
3858{
3859	if (!sc->sc_recvsetup) {
3860		struct mwl_rxbuf *bf, *prev;
3861		struct mwl_rxdesc *ds;
3862
3863		prev = NULL;
3864		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3865			int error = mwl_rxbuf_init(sc, bf);
3866			if (error != 0) {
3867				DPRINTF(sc, MWL_DEBUG_RECV,
3868					"%s: mwl_rxbuf_init failed %d\n",
3869					__func__, error);
3870				return error;
3871			}
3872			if (prev != NULL) {
3873				ds = prev->bf_desc;
3874				ds->pPhysNext = htole32(bf->bf_daddr);
3875			}
3876			prev = bf;
3877		}
3878		if (prev != NULL) {
3879			ds = prev->bf_desc;
3880			ds->pPhysNext =
3881			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3882		}
3883		sc->sc_recvsetup = 1;
3884	}
3885	mwl_mode_init(sc);		/* set filters, etc. */
3886	return 0;
3887}
3888
3889static MWL_HAL_APMODE
3890mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3891{
3892	MWL_HAL_APMODE mode;
3893
3894	if (IEEE80211_IS_CHAN_HT(chan)) {
3895		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3896			mode = AP_MODE_N_ONLY;
3897		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3898			mode = AP_MODE_AandN;
3899		else if (vap->iv_flags & IEEE80211_F_PUREG)
3900			mode = AP_MODE_GandN;
3901		else
3902			mode = AP_MODE_BandGandN;
3903	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3904		if (vap->iv_flags & IEEE80211_F_PUREG)
3905			mode = AP_MODE_G_ONLY;
3906		else
3907			mode = AP_MODE_MIXED;
3908	} else if (IEEE80211_IS_CHAN_B(chan))
3909		mode = AP_MODE_B_ONLY;
3910	else if (IEEE80211_IS_CHAN_A(chan))
3911		mode = AP_MODE_A_ONLY;
3912	else
3913		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3914	return mode;
3915}
3916
3917static int
3918mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3919{
3920	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3921	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3922}
3923
3924/*
3925 * Set/change channels.
3926 */
3927static int
3928mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3929{
3930	struct mwl_hal *mh = sc->sc_mh;
3931	struct ifnet *ifp = sc->sc_ifp;
3932	struct ieee80211com *ic = ifp->if_l2com;
3933	MWL_HAL_CHANNEL hchan;
3934	int maxtxpow;
3935
3936	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3937	    __func__, chan->ic_freq, chan->ic_flags);
3938
3939	/*
3940	 * Convert to a HAL channel description with
3941	 * the flags constrained to reflect the current
3942	 * operating mode.
3943	 */
3944	mwl_mapchan(&hchan, chan);
3945	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3946#if 0
3947	mwl_draintxq(sc);		/* clear pending tx frames */
3948#endif
3949	mwl_hal_setchannel(mh, &hchan);
3950	/*
3951	 * Tx power is cap'd by the regulatory setting and
3952	 * possibly a user-set limit.  We pass the min of
3953	 * these to the hal to apply them to the cal data
3954	 * for this channel.
3955	 * XXX min bound?
3956	 */
3957	maxtxpow = 2*chan->ic_maxregpower;
3958	if (maxtxpow > ic->ic_txpowlimit)
3959		maxtxpow = ic->ic_txpowlimit;
3960	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3961	/* NB: potentially change mcast/mgt rates */
3962	mwl_setcurchanrates(sc);
3963
3964	/*
3965	 * Update internal state.
3966	 */
3967	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3968	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3969	if (IEEE80211_IS_CHAN_A(chan)) {
3970		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3971		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3972	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3973		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3974		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3975	} else {
3976		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3977		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3978	}
3979	sc->sc_curchan = hchan;
3980	mwl_hal_intrset(mh, sc->sc_imask);
3981
3982	return 0;
3983}
3984
3985static void
3986mwl_scan_start(struct ieee80211com *ic)
3987{
3988	struct ifnet *ifp = ic->ic_ifp;
3989	struct mwl_softc *sc = ifp->if_softc;
3990
3991	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3992}
3993
3994static void
3995mwl_scan_end(struct ieee80211com *ic)
3996{
3997	struct ifnet *ifp = ic->ic_ifp;
3998	struct mwl_softc *sc = ifp->if_softc;
3999
4000	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4001}
4002
4003static void
4004mwl_set_channel(struct ieee80211com *ic)
4005{
4006	struct ifnet *ifp = ic->ic_ifp;
4007	struct mwl_softc *sc = ifp->if_softc;
4008
4009	(void) mwl_chan_set(sc, ic->ic_curchan);
4010}
4011
4012/*
4013 * Handle a channel switch request.  We inform the firmware
4014 * and mark the global state to suppress various actions.
4015 * NB: we issue only one request to the fw; we may be called
4016 * multiple times if there are multiple vap's.
4017 */
4018static void
4019mwl_startcsa(struct ieee80211vap *vap)
4020{
4021	struct ieee80211com *ic = vap->iv_ic;
4022	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4023	MWL_HAL_CHANNEL hchan;
4024
4025	if (sc->sc_csapending)
4026		return;
4027
4028	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4029	/* 1 =>'s quiet channel */
4030	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4031	sc->sc_csapending = 1;
4032}
4033
4034/*
4035 * Plumb any static WEP key for the station.  This is
4036 * necessary as we must propagate the key from the
4037 * global key table of the vap to each sta db entry.
4038 */
4039static void
4040mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4041{
4042	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4043		IEEE80211_F_PRIVACY &&
4044	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4045	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4046		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4047}
4048
4049static int
4050mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4051{
4052#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4053	struct ieee80211vap *vap = ni->ni_vap;
4054	struct mwl_hal_vap *hvap;
4055	int error;
4056
4057	if (vap->iv_opmode == IEEE80211_M_WDS) {
4058		/*
4059		 * WDS vap's do not have a f/w vap; instead they piggyback
4060		 * on an AP vap and we must install the sta db entry and
4061		 * crypto state using that AP's handle (the WDS vap has none).
4062		 */
4063		hvap = MWL_VAP(vap)->mv_ap_hvap;
4064	} else
4065		hvap = MWL_VAP(vap)->mv_hvap;
4066	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4067	    aid, staid, pi,
4068	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4069	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4070	if (error == 0) {
4071		/*
4072		 * Setup security for this station.  For sta mode this is
4073		 * needed even though do the same thing on transition to
4074		 * AUTH state because the call to mwl_hal_newstation
4075		 * clobbers the crypto state we setup.
4076		 */
4077		mwl_setanywepkey(vap, ni->ni_macaddr);
4078	}
4079	return error;
4080#undef WME
4081}
4082
4083static void
4084mwl_setglobalkeys(struct ieee80211vap *vap)
4085{
4086	struct ieee80211_key *wk;
4087
4088	wk = &vap->iv_nw_keys[0];
4089	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4090		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4091			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4092}
4093
4094/*
4095 * Convert a legacy rate set to a firmware bitmask.
4096 */
4097static uint32_t
4098get_rate_bitmap(const struct ieee80211_rateset *rs)
4099{
4100	uint32_t rates;
4101	int i;
4102
4103	rates = 0;
4104	for (i = 0; i < rs->rs_nrates; i++)
4105		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4106		case 2:	  rates |= 0x001; break;
4107		case 4:	  rates |= 0x002; break;
4108		case 11:  rates |= 0x004; break;
4109		case 22:  rates |= 0x008; break;
4110		case 44:  rates |= 0x010; break;
4111		case 12:  rates |= 0x020; break;
4112		case 18:  rates |= 0x040; break;
4113		case 24:  rates |= 0x080; break;
4114		case 36:  rates |= 0x100; break;
4115		case 48:  rates |= 0x200; break;
4116		case 72:  rates |= 0x400; break;
4117		case 96:  rates |= 0x800; break;
4118		case 108: rates |= 0x1000; break;
4119		}
4120	return rates;
4121}
4122
4123/*
4124 * Construct an HT firmware bitmask from an HT rate set.
4125 */
4126static uint32_t
4127get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4128{
4129	uint32_t rates;
4130	int i;
4131
4132	rates = 0;
4133	for (i = 0; i < rs->rs_nrates; i++) {
4134		if (rs->rs_rates[i] < 16)
4135			rates |= 1<<rs->rs_rates[i];
4136	}
4137	return rates;
4138}
4139
4140/*
4141 * Craft station database entry for station.
4142 * NB: use host byte order here, the hal handles byte swapping.
4143 */
4144static MWL_HAL_PEERINFO *
4145mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4146{
4147	const struct ieee80211vap *vap = ni->ni_vap;
4148
4149	memset(pi, 0, sizeof(*pi));
4150	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4151	pi->CapInfo = ni->ni_capinfo;
4152	if (ni->ni_flags & IEEE80211_NODE_HT) {
4153		/* HT capabilities, etc */
4154		pi->HTCapabilitiesInfo = ni->ni_htcap;
4155		/* XXX pi.HTCapabilitiesInfo */
4156	        pi->MacHTParamInfo = ni->ni_htparam;
4157		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4158		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4159		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4160		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4161		pi->AddHtInfo.stbc = ni->ni_htstbc;
4162
4163		/* constrain according to local configuration */
4164		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4165			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4166		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4167			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4168		if (ni->ni_chw != 40)
4169			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4170	}
4171	return pi;
4172}
4173
4174/*
4175 * Re-create the local sta db entry for a vap to ensure
4176 * up to date WME state is pushed to the firmware.  Because
4177 * this resets crypto state this must be followed by a
4178 * reload of any keys in the global key table.
4179 */
4180static int
4181mwl_localstadb(struct ieee80211vap *vap)
4182{
4183#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4184	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4185	struct ieee80211_node *bss;
4186	MWL_HAL_PEERINFO pi;
4187	int error;
4188
4189	switch (vap->iv_opmode) {
4190	case IEEE80211_M_STA:
4191		bss = vap->iv_bss;
4192		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4193		    vap->iv_state == IEEE80211_S_RUN ?
4194			mkpeerinfo(&pi, bss) : NULL,
4195		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4196		    bss->ni_ies.wme_ie != NULL ?
4197			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4198		if (error == 0)
4199			mwl_setglobalkeys(vap);
4200		break;
4201	case IEEE80211_M_HOSTAP:
4202	case IEEE80211_M_MBSS:
4203		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4204		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4205		if (error == 0)
4206			mwl_setglobalkeys(vap);
4207		break;
4208	default:
4209		error = 0;
4210		break;
4211	}
4212	return error;
4213#undef WME
4214}
4215
4216static int
4217mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4218{
4219	struct mwl_vap *mvp = MWL_VAP(vap);
4220	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4221	struct ieee80211com *ic = vap->iv_ic;
4222	struct ieee80211_node *ni = NULL;
4223	struct ifnet *ifp = ic->ic_ifp;
4224	struct mwl_softc *sc = ifp->if_softc;
4225	struct mwl_hal *mh = sc->sc_mh;
4226	enum ieee80211_state ostate = vap->iv_state;
4227	int error;
4228
4229	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4230	    vap->iv_ifp->if_xname, __func__,
4231	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4232
4233	callout_stop(&sc->sc_timer);
4234	/*
4235	 * Clear current radar detection state.
4236	 */
4237	if (ostate == IEEE80211_S_CAC) {
4238		/* stop quiet mode radar detection */
4239		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4240	} else if (sc->sc_radarena) {
4241		/* stop in-service radar detection */
4242		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4243		sc->sc_radarena = 0;
4244	}
4245	/*
4246	 * Carry out per-state actions before doing net80211 work.
4247	 */
4248	if (nstate == IEEE80211_S_INIT) {
4249		/* NB: only ap+sta vap's have a fw entity */
4250		if (hvap != NULL)
4251			mwl_hal_stop(hvap);
4252	} else if (nstate == IEEE80211_S_SCAN) {
4253		mwl_hal_start(hvap);
4254		/* NB: this disables beacon frames */
4255		mwl_hal_setinframode(hvap);
4256	} else if (nstate == IEEE80211_S_AUTH) {
4257		/*
4258		 * Must create a sta db entry in case a WEP key needs to
4259		 * be plumbed.  This entry will be overwritten if we
4260		 * associate; otherwise it will be reclaimed on node free.
4261		 */
4262		ni = vap->iv_bss;
4263		MWL_NODE(ni)->mn_hvap = hvap;
4264		(void) mwl_peerstadb(ni, 0, 0, NULL);
4265	} else if (nstate == IEEE80211_S_CSA) {
4266		/* XXX move to below? */
4267		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4268		    vap->iv_opmode == IEEE80211_M_MBSS)
4269			mwl_startcsa(vap);
4270	} else if (nstate == IEEE80211_S_CAC) {
4271		/* XXX move to below? */
4272		/* stop ap xmit and enable quiet mode radar detection */
4273		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4274	}
4275
4276	/*
4277	 * Invoke the parent method to do net80211 work.
4278	 */
4279	error = mvp->mv_newstate(vap, nstate, arg);
4280
4281	/*
4282	 * Carry out work that must be done after net80211 runs;
4283	 * this work requires up to date state (e.g. iv_bss).
4284	 */
4285	if (error == 0 && nstate == IEEE80211_S_RUN) {
4286		/* NB: collect bss node again, it may have changed */
4287		ni = vap->iv_bss;
4288
4289		DPRINTF(sc, MWL_DEBUG_STATE,
4290		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4291		    "capinfo 0x%04x chan %d\n",
4292		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4293		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4294		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4295
4296		/*
4297		 * Recreate local sta db entry to update WME/HT state.
4298		 */
4299		mwl_localstadb(vap);
4300		switch (vap->iv_opmode) {
4301		case IEEE80211_M_HOSTAP:
4302		case IEEE80211_M_MBSS:
4303			if (ostate == IEEE80211_S_CAC) {
4304				/* enable in-service radar detection */
4305				mwl_hal_setradardetection(mh,
4306				    DR_IN_SERVICE_MONITOR_START);
4307				sc->sc_radarena = 1;
4308			}
4309			/*
4310			 * Allocate and setup the beacon frame
4311			 * (and related state).
4312			 */
4313			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4314			if (error != 0) {
4315				DPRINTF(sc, MWL_DEBUG_STATE,
4316				    "%s: beacon setup failed, error %d\n",
4317				    __func__, error);
4318				goto bad;
4319			}
4320			/* NB: must be after setting up beacon */
4321			mwl_hal_start(hvap);
4322			break;
4323		case IEEE80211_M_STA:
4324			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4325			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4326			/*
4327			 * Set state now that we're associated.
4328			 */
4329			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4330			mwl_setrates(vap);
4331			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4332			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4333			    sc->sc_ndwdsvaps++ == 0)
4334				mwl_hal_setdwds(mh, 1);
4335			break;
4336		case IEEE80211_M_WDS:
4337			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4338			    vap->iv_ifp->if_xname, __func__,
4339			    ether_sprintf(ni->ni_bssid));
4340			mwl_seteapolformat(vap);
4341			break;
4342		default:
4343			break;
4344		}
4345		/*
4346		 * Set CS mode according to operating channel;
4347		 * this mostly an optimization for 5GHz.
4348		 *
4349		 * NB: must follow mwl_hal_start which resets csmode
4350		 */
4351		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4352			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4353		else
4354			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4355		/*
4356		 * Start timer to prod firmware.
4357		 */
4358		if (sc->sc_ageinterval != 0)
4359			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4360			    mwl_agestations, sc);
4361	} else if (nstate == IEEE80211_S_SLEEP) {
4362		/* XXX set chip in power save */
4363	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4364	    --sc->sc_ndwdsvaps == 0)
4365		mwl_hal_setdwds(mh, 0);
4366bad:
4367	return error;
4368}
4369
4370/*
4371 * Manage station id's; these are separate from AID's
4372 * as AID's may have values out of the range of possible
4373 * station id's acceptable to the firmware.
4374 */
4375static int
4376allocstaid(struct mwl_softc *sc, int aid)
4377{
4378	int staid;
4379
4380	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4381		/* NB: don't use 0 */
4382		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4383			if (isclr(sc->sc_staid, staid))
4384				break;
4385	} else
4386		staid = aid;
4387	setbit(sc->sc_staid, staid);
4388	return staid;
4389}
4390
4391static void
4392delstaid(struct mwl_softc *sc, int staid)
4393{
4394	clrbit(sc->sc_staid, staid);
4395}
4396
4397/*
4398 * Setup driver-specific state for a newly associated node.
4399 * Note that we're called also on a re-associate, the isnew
4400 * param tells us if this is the first time or not.
4401 */
4402static void
4403mwl_newassoc(struct ieee80211_node *ni, int isnew)
4404{
4405	struct ieee80211vap *vap = ni->ni_vap;
4406        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4407	struct mwl_node *mn = MWL_NODE(ni);
4408	MWL_HAL_PEERINFO pi;
4409	uint16_t aid;
4410	int error;
4411
4412	aid = IEEE80211_AID(ni->ni_associd);
4413	if (isnew) {
4414		mn->mn_staid = allocstaid(sc, aid);
4415		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4416	} else {
4417		mn = MWL_NODE(ni);
4418		/* XXX reset BA stream? */
4419	}
4420	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4421	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4422	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4423	if (error != 0) {
4424		DPRINTF(sc, MWL_DEBUG_NODE,
4425		    "%s: error %d creating sta db entry\n",
4426		    __func__, error);
4427		/* XXX how to deal with error? */
4428	}
4429}
4430
4431/*
4432 * Periodically poke the firmware to age out station state
4433 * (power save queues, pending tx aggregates).
4434 */
4435static void
4436mwl_agestations(void *arg)
4437{
4438	struct mwl_softc *sc = arg;
4439
4440	mwl_hal_setkeepalive(sc->sc_mh);
4441	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4442		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4443}
4444
4445static const struct mwl_hal_channel *
4446findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4447{
4448	int i;
4449
4450	for (i = 0; i < ci->nchannels; i++) {
4451		const struct mwl_hal_channel *hc = &ci->channels[i];
4452		if (hc->ieee == ieee)
4453			return hc;
4454	}
4455	return NULL;
4456}
4457
4458static int
4459mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4460	int nchan, struct ieee80211_channel chans[])
4461{
4462	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4463	struct mwl_hal *mh = sc->sc_mh;
4464	const MWL_HAL_CHANNELINFO *ci;
4465	int i;
4466
4467	for (i = 0; i < nchan; i++) {
4468		struct ieee80211_channel *c = &chans[i];
4469		const struct mwl_hal_channel *hc;
4470
4471		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4472			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4473			    IEEE80211_IS_CHAN_HT40(c) ?
4474				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4475		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4476			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4477			    IEEE80211_IS_CHAN_HT40(c) ?
4478				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4479		} else {
4480			if_printf(ic->ic_ifp,
4481			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4482			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4483			return EINVAL;
4484		}
4485		/*
4486		 * Verify channel has cal data and cap tx power.
4487		 */
4488		hc = findhalchannel(ci, c->ic_ieee);
4489		if (hc != NULL) {
4490			if (c->ic_maxpower > 2*hc->maxTxPow)
4491				c->ic_maxpower = 2*hc->maxTxPow;
4492			goto next;
4493		}
4494		if (IEEE80211_IS_CHAN_HT40(c)) {
4495			/*
4496			 * Look for the extension channel since the
4497			 * hal table only has the primary channel.
4498			 */
4499			hc = findhalchannel(ci, c->ic_extieee);
4500			if (hc != NULL) {
4501				if (c->ic_maxpower > 2*hc->maxTxPow)
4502					c->ic_maxpower = 2*hc->maxTxPow;
4503				goto next;
4504			}
4505		}
4506		if_printf(ic->ic_ifp,
4507		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4508		    __func__, c->ic_ieee, c->ic_extieee,
4509		    c->ic_freq, c->ic_flags);
4510		return EINVAL;
4511	next:
4512		;
4513	}
4514	return 0;
4515}
4516
4517#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4518#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4519
4520static void
4521addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4522{
4523	c->ic_freq = freq;
4524	c->ic_flags = flags;
4525	c->ic_ieee = ieee;
4526	c->ic_minpower = 0;
4527	c->ic_maxpower = 2*txpow;
4528	c->ic_maxregpower = txpow;
4529}
4530
4531static const struct ieee80211_channel *
4532findchannel(const struct ieee80211_channel chans[], int nchans,
4533	int freq, int flags)
4534{
4535	const struct ieee80211_channel *c;
4536	int i;
4537
4538	for (i = 0; i < nchans; i++) {
4539		c = &chans[i];
4540		if (c->ic_freq == freq && c->ic_flags == flags)
4541			return c;
4542	}
4543	return NULL;
4544}
4545
4546static void
4547addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4548	const MWL_HAL_CHANNELINFO *ci, int flags)
4549{
4550	struct ieee80211_channel *c;
4551	const struct ieee80211_channel *extc;
4552	const struct mwl_hal_channel *hc;
4553	int i;
4554
4555	c = &chans[*nchans];
4556
4557	flags &= ~IEEE80211_CHAN_HT;
4558	for (i = 0; i < ci->nchannels; i++) {
4559		/*
4560		 * Each entry defines an HT40 channel pair; find the
4561		 * extension channel above and the insert the pair.
4562		 */
4563		hc = &ci->channels[i];
4564		extc = findchannel(chans, *nchans, hc->freq+20,
4565		    flags | IEEE80211_CHAN_HT20);
4566		if (extc != NULL) {
4567			if (*nchans >= maxchans)
4568				break;
4569			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4570			    hc->ieee, hc->maxTxPow);
4571			c->ic_extieee = extc->ic_ieee;
4572			c++, (*nchans)++;
4573			if (*nchans >= maxchans)
4574				break;
4575			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4576			    extc->ic_ieee, hc->maxTxPow);
4577			c->ic_extieee = hc->ieee;
4578			c++, (*nchans)++;
4579		}
4580	}
4581}
4582
4583static void
4584addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4585	const MWL_HAL_CHANNELINFO *ci, int flags)
4586{
4587	struct ieee80211_channel *c;
4588	int i;
4589
4590	c = &chans[*nchans];
4591
4592	for (i = 0; i < ci->nchannels; i++) {
4593		const struct mwl_hal_channel *hc;
4594
4595		hc = &ci->channels[i];
4596		if (*nchans >= maxchans)
4597			break;
4598		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4599		c++, (*nchans)++;
4600		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4601			/* g channel have a separate b-only entry */
4602			if (*nchans >= maxchans)
4603				break;
4604			c[0] = c[-1];
4605			c[-1].ic_flags = IEEE80211_CHAN_B;
4606			c++, (*nchans)++;
4607		}
4608		if (flags == IEEE80211_CHAN_HTG) {
4609			/* HT g channel have a separate g-only entry */
4610			if (*nchans >= maxchans)
4611				break;
4612			c[-1].ic_flags = IEEE80211_CHAN_G;
4613			c[0] = c[-1];
4614			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4615			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4616			c++, (*nchans)++;
4617		}
4618		if (flags == IEEE80211_CHAN_HTA) {
4619			/* HT a channel have a separate a-only entry */
4620			if (*nchans >= maxchans)
4621				break;
4622			c[-1].ic_flags = IEEE80211_CHAN_A;
4623			c[0] = c[-1];
4624			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4625			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4626			c++, (*nchans)++;
4627		}
4628	}
4629}
4630
4631static void
4632getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4633	struct ieee80211_channel chans[])
4634{
4635	const MWL_HAL_CHANNELINFO *ci;
4636
4637	/*
4638	 * Use the channel info from the hal to craft the
4639	 * channel list.  Note that we pass back an unsorted
4640	 * list; the caller is required to sort it for us
4641	 * (if desired).
4642	 */
4643	*nchans = 0;
4644	if (mwl_hal_getchannelinfo(sc->sc_mh,
4645	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4646		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4647	if (mwl_hal_getchannelinfo(sc->sc_mh,
4648	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4649		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4650	if (mwl_hal_getchannelinfo(sc->sc_mh,
4651	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4652		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4653	if (mwl_hal_getchannelinfo(sc->sc_mh,
4654	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4655		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4656}
4657
4658static void
4659mwl_getradiocaps(struct ieee80211com *ic,
4660	int maxchans, int *nchans, struct ieee80211_channel chans[])
4661{
4662	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4663
4664	getchannels(sc, maxchans, nchans, chans);
4665}
4666
4667static int
4668mwl_getchannels(struct mwl_softc *sc)
4669{
4670	struct ifnet *ifp = sc->sc_ifp;
4671	struct ieee80211com *ic = ifp->if_l2com;
4672
4673	/*
4674	 * Use the channel info from the hal to craft the
4675	 * channel list for net80211.  Note that we pass up
4676	 * an unsorted list; net80211 will sort it for us.
4677	 */
4678	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4679	ic->ic_nchans = 0;
4680	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4681
4682	ic->ic_regdomain.regdomain = SKU_DEBUG;
4683	ic->ic_regdomain.country = CTRY_DEFAULT;
4684	ic->ic_regdomain.location = 'I';
4685	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4686	ic->ic_regdomain.isocc[1] = ' ';
4687	return (ic->ic_nchans == 0 ? EIO : 0);
4688}
4689#undef IEEE80211_CHAN_HTA
4690#undef IEEE80211_CHAN_HTG
4691
4692#ifdef MWL_DEBUG
4693static void
4694mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4695{
4696	const struct mwl_rxdesc *ds = bf->bf_desc;
4697	uint32_t status = le32toh(ds->Status);
4698
4699	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4700	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4701	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4702	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4703	    ds->RxControl,
4704	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4705	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4706	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4707	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4708}
4709
4710static void
4711mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4712{
4713	const struct mwl_txdesc *ds = bf->bf_desc;
4714	uint32_t status = le32toh(ds->Status);
4715
4716	printf("Q%u[%3u]", qnum, ix);
4717	printf(" (DS.V:%p DS.P:%p)\n",
4718	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4719	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4720	    le32toh(ds->pPhysNext),
4721	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4722	    status & EAGLE_TXD_STATUS_USED ?
4723		"" : (status & 3) != 0 ? " *" : " !");
4724	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4725	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4726	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4727#if MWL_TXDESC > 1
4728	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4729	    , le32toh(ds->multiframes)
4730	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4731	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4732	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4733	);
4734	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4735	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4736	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4737	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4738	);
4739#endif
4740#if 0
4741{ const uint8_t *cp = (const uint8_t *) ds;
4742  int i;
4743  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4744	printf("%02x ", cp[i]);
4745	if (((i+1) % 16) == 0)
4746		printf("\n");
4747  }
4748  printf("\n");
4749}
4750#endif
4751}
4752#endif /* MWL_DEBUG */
4753
4754#if 0
4755static void
4756mwl_txq_dump(struct mwl_txq *txq)
4757{
4758	struct mwl_txbuf *bf;
4759	int i = 0;
4760
4761	MWL_TXQ_LOCK(txq);
4762	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4763		struct mwl_txdesc *ds = bf->bf_desc;
4764		MWL_TXDESC_SYNC(txq, ds,
4765		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4766#ifdef MWL_DEBUG
4767		mwl_printtxbuf(bf, txq->qnum, i);
4768#endif
4769		i++;
4770	}
4771	MWL_TXQ_UNLOCK(txq);
4772}
4773#endif
4774
4775static void
4776mwl_watchdog(void *arg)
4777{
4778	struct mwl_softc *sc;
4779	struct ifnet *ifp;
4780
4781	sc = arg;
4782	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4783	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4784		return;
4785
4786	ifp = sc->sc_ifp;
4787	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4788		if (mwl_hal_setkeepalive(sc->sc_mh))
4789			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4790		else
4791			if_printf(ifp, "transmit timeout\n");
4792#if 0
4793		mwl_reset(ifp);
4794mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4795#endif
4796		ifp->if_oerrors++;
4797		sc->sc_stats.mst_watchdog++;
4798	}
4799}
4800
4801#ifdef MWL_DIAGAPI
4802/*
4803 * Diagnostic interface to the HAL.  This is used by various
4804 * tools to do things like retrieve register contents for
4805 * debugging.  The mechanism is intentionally opaque so that
4806 * it can change frequently w/o concern for compatiblity.
4807 */
4808static int
4809mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4810{
4811	struct mwl_hal *mh = sc->sc_mh;
4812	u_int id = md->md_id & MWL_DIAG_ID;
4813	void *indata = NULL;
4814	void *outdata = NULL;
4815	u_int32_t insize = md->md_in_size;
4816	u_int32_t outsize = md->md_out_size;
4817	int error = 0;
4818
4819	if (md->md_id & MWL_DIAG_IN) {
4820		/*
4821		 * Copy in data.
4822		 */
4823		indata = malloc(insize, M_TEMP, M_NOWAIT);
4824		if (indata == NULL) {
4825			error = ENOMEM;
4826			goto bad;
4827		}
4828		error = copyin(md->md_in_data, indata, insize);
4829		if (error)
4830			goto bad;
4831	}
4832	if (md->md_id & MWL_DIAG_DYN) {
4833		/*
4834		 * Allocate a buffer for the results (otherwise the HAL
4835		 * returns a pointer to a buffer where we can read the
4836		 * results).  Note that we depend on the HAL leaving this
4837		 * pointer for us to use below in reclaiming the buffer;
4838		 * may want to be more defensive.
4839		 */
4840		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4841		if (outdata == NULL) {
4842			error = ENOMEM;
4843			goto bad;
4844		}
4845	}
4846	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4847		if (outsize < md->md_out_size)
4848			md->md_out_size = outsize;
4849		if (outdata != NULL)
4850			error = copyout(outdata, md->md_out_data,
4851					md->md_out_size);
4852	} else {
4853		error = EINVAL;
4854	}
4855bad:
4856	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4857		free(indata, M_TEMP);
4858	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4859		free(outdata, M_TEMP);
4860	return error;
4861}
4862
4863static int
4864mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4865{
4866	struct mwl_hal *mh = sc->sc_mh;
4867	int error;
4868
4869	MWL_LOCK_ASSERT(sc);
4870
4871	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4872		device_printf(sc->sc_dev, "unable to load firmware\n");
4873		return EIO;
4874	}
4875	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4876		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4877		return EIO;
4878	}
4879	error = mwl_setupdma(sc);
4880	if (error != 0) {
4881		/* NB: mwl_setupdma prints a msg */
4882		return error;
4883	}
4884	/*
4885	 * Reset tx/rx data structures; after reload we must
4886	 * re-start the driver's notion of the next xmit/recv.
4887	 */
4888	mwl_draintxq(sc);		/* clear pending frames */
4889	mwl_resettxq(sc);		/* rebuild tx q lists */
4890	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4891	return 0;
4892}
4893#endif /* MWL_DIAGAPI */
4894
4895static int
4896mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4897{
4898#define	IS_RUNNING(ifp) \
4899	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4900	struct mwl_softc *sc = ifp->if_softc;
4901	struct ieee80211com *ic = ifp->if_l2com;
4902	struct ifreq *ifr = (struct ifreq *)data;
4903	int error = 0, startall;
4904
4905	switch (cmd) {
4906	case SIOCSIFFLAGS:
4907		MWL_LOCK(sc);
4908		startall = 0;
4909		if (IS_RUNNING(ifp)) {
4910			/*
4911			 * To avoid rescanning another access point,
4912			 * do not call mwl_init() here.  Instead,
4913			 * only reflect promisc mode settings.
4914			 */
4915			mwl_mode_init(sc);
4916		} else if (ifp->if_flags & IFF_UP) {
4917			/*
4918			 * Beware of being called during attach/detach
4919			 * to reset promiscuous mode.  In that case we
4920			 * will still be marked UP but not RUNNING.
4921			 * However trying to re-init the interface
4922			 * is the wrong thing to do as we've already
4923			 * torn down much of our state.  There's
4924			 * probably a better way to deal with this.
4925			 */
4926			if (!sc->sc_invalid) {
4927				mwl_init_locked(sc);	/* XXX lose error */
4928				startall = 1;
4929			}
4930		} else
4931			mwl_stop_locked(ifp, 1);
4932		MWL_UNLOCK(sc);
4933		if (startall)
4934			ieee80211_start_all(ic);
4935		break;
4936	case SIOCGMVSTATS:
4937		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4938		/* NB: embed these numbers to get a consistent view */
4939		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4940		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4941		/*
4942		 * NB: Drop the softc lock in case of a page fault;
4943		 * we'll accept any potential inconsisentcy in the
4944		 * statistics.  The alternative is to copy the data
4945		 * to a local structure.
4946		 */
4947		return copyout(&sc->sc_stats,
4948				ifr->ifr_data, sizeof (sc->sc_stats));
4949#ifdef MWL_DIAGAPI
4950	case SIOCGMVDIAG:
4951		/* XXX check privs */
4952		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4953	case SIOCGMVRESET:
4954		/* XXX check privs */
4955		MWL_LOCK(sc);
4956		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4957		MWL_UNLOCK(sc);
4958		break;
4959#endif /* MWL_DIAGAPI */
4960	case SIOCGIFMEDIA:
4961		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4962		break;
4963	case SIOCGIFADDR:
4964		error = ether_ioctl(ifp, cmd, data);
4965		break;
4966	default:
4967		error = EINVAL;
4968		break;
4969	}
4970	return error;
4971#undef IS_RUNNING
4972}
4973
4974#ifdef	MWL_DEBUG
4975static int
4976mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4977{
4978	struct mwl_softc *sc = arg1;
4979	int debug, error;
4980
4981	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4982	error = sysctl_handle_int(oidp, &debug, 0, req);
4983	if (error || !req->newptr)
4984		return error;
4985	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4986	sc->sc_debug = debug & 0x00ffffff;
4987	return 0;
4988}
4989#endif /* MWL_DEBUG */
4990
4991static void
4992mwl_sysctlattach(struct mwl_softc *sc)
4993{
4994#ifdef	MWL_DEBUG
4995	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4996	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4997
4998	sc->sc_debug = mwl_debug;
4999	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
5000		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5001		mwl_sysctl_debug, "I", "control debugging printfs");
5002#endif
5003}
5004
5005/*
5006 * Announce various information on device/driver attach.
5007 */
5008static void
5009mwl_announce(struct mwl_softc *sc)
5010{
5011	struct ifnet *ifp = sc->sc_ifp;
5012
5013	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5014		sc->sc_hwspecs.hwVersion,
5015		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5016		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5017		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5018		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5019		sc->sc_hwspecs.regionCode);
5020	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5021
5022	if (bootverbose) {
5023		int i;
5024		for (i = 0; i <= WME_AC_VO; i++) {
5025			struct mwl_txq *txq = sc->sc_ac2q[i];
5026			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5027				txq->qnum, ieee80211_wme_acnames[i]);
5028		}
5029	}
5030	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5031		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5032	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5033		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5034	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5035		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5036	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5037		if_printf(ifp, "multi-bss support\n");
5038#ifdef MWL_TX_NODROP
5039	if (bootverbose)
5040		if_printf(ifp, "no tx drop\n");
5041#endif
5042}
5043