1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/sysctl.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/kernel.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/errno.h>
52#include <sys/callout.h>
53#include <sys/bus.h>
54#include <sys/endian.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57
58#include <machine/bus.h>
59
60#include <net/if.h>
61#include <net/if_dl.h>
62#include <net/if_media.h>
63#include <net/if_types.h>
64#include <net/if_arp.h>
65#include <net/ethernet.h>
66#include <net/if_llc.h>
67
68#include <net/bpf.h>
69
70#include <net80211/ieee80211_var.h>
71#include <net80211/ieee80211_regdomain.h>
72
73#ifdef INET
74#include <netinet/in.h>
75#include <netinet/if_ether.h>
76#endif /* INET */
77
78#include <dev/mwl/if_mwlvar.h>
79#include <dev/mwl/mwldiag.h>
80
81/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
82#define	MS(v,x)	(((v) & x) >> x##_S)
83#define	SM(v,x)	(((v) << x##_S) & x)
84
85static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
86		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
87		    const uint8_t [IEEE80211_ADDR_LEN],
88		    const uint8_t [IEEE80211_ADDR_LEN]);
89static void	mwl_vap_delete(struct ieee80211vap *);
90static int	mwl_setupdma(struct mwl_softc *);
91static int	mwl_hal_reset(struct mwl_softc *sc);
92static int	mwl_init_locked(struct mwl_softc *);
93static void	mwl_init(void *);
94static void	mwl_stop_locked(struct ifnet *, int);
95static int	mwl_reset(struct ieee80211vap *, u_long);
96static void	mwl_stop(struct ifnet *, int);
97static void	mwl_start(struct ifnet *);
98static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
99			const struct ieee80211_bpf_params *);
100static int	mwl_media_change(struct ifnet *);
101static void	mwl_watchdog(void *);
102static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
103static void	mwl_radar_proc(void *, int);
104static void	mwl_chanswitch_proc(void *, int);
105static void	mwl_bawatchdog_proc(void *, int);
106static int	mwl_key_alloc(struct ieee80211vap *,
107			struct ieee80211_key *,
108			ieee80211_keyix *, ieee80211_keyix *);
109static int	mwl_key_delete(struct ieee80211vap *,
110			const struct ieee80211_key *);
111static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
112			const uint8_t mac[IEEE80211_ADDR_LEN]);
113static int	mwl_mode_init(struct mwl_softc *);
114static void	mwl_update_mcast(struct ifnet *);
115static void	mwl_update_promisc(struct ifnet *);
116static void	mwl_updateslot(struct ifnet *);
117static int	mwl_beacon_setup(struct ieee80211vap *);
118static void	mwl_beacon_update(struct ieee80211vap *, int);
119#ifdef MWL_HOST_PS_SUPPORT
120static void	mwl_update_ps(struct ieee80211vap *, int);
121static int	mwl_set_tim(struct ieee80211_node *, int);
122#endif
123static int	mwl_dma_setup(struct mwl_softc *);
124static void	mwl_dma_cleanup(struct mwl_softc *);
125static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
126		    const uint8_t [IEEE80211_ADDR_LEN]);
127static void	mwl_node_cleanup(struct ieee80211_node *);
128static void	mwl_node_drain(struct ieee80211_node *);
129static void	mwl_node_getsignal(const struct ieee80211_node *,
130			int8_t *, int8_t *);
131static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
132			struct ieee80211_mimo_info *);
133static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
134static void	mwl_rx_proc(void *, int);
135static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
136static int	mwl_tx_setup(struct mwl_softc *, int, int);
137static int	mwl_wme_update(struct ieee80211com *);
138static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
139static void	mwl_tx_cleanup(struct mwl_softc *);
140static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
141static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
142			     struct mwl_txbuf *, struct mbuf *);
143static void	mwl_tx_proc(void *, int);
144static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
145static void	mwl_draintxq(struct mwl_softc *);
146static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
147static int	mwl_recv_action(struct ieee80211_node *,
148			const struct ieee80211_frame *,
149			const uint8_t *, const uint8_t *);
150static int	mwl_addba_request(struct ieee80211_node *,
151			struct ieee80211_tx_ampdu *, int dialogtoken,
152			int baparamset, int batimeout);
153static int	mwl_addba_response(struct ieee80211_node *,
154			struct ieee80211_tx_ampdu *, int status,
155			int baparamset, int batimeout);
156static void	mwl_addba_stop(struct ieee80211_node *,
157			struct ieee80211_tx_ampdu *);
158static int	mwl_startrecv(struct mwl_softc *);
159static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
160			struct ieee80211_channel *);
161static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
162static void	mwl_scan_start(struct ieee80211com *);
163static void	mwl_scan_end(struct ieee80211com *);
164static void	mwl_set_channel(struct ieee80211com *);
165static int	mwl_peerstadb(struct ieee80211_node *,
166			int aid, int staid, MWL_HAL_PEERINFO *pi);
167static int	mwl_localstadb(struct ieee80211vap *);
168static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
169static int	allocstaid(struct mwl_softc *sc, int aid);
170static void	delstaid(struct mwl_softc *sc, int staid);
171static void	mwl_newassoc(struct ieee80211_node *, int);
172static void	mwl_agestations(void *);
173static int	mwl_setregdomain(struct ieee80211com *,
174			struct ieee80211_regdomain *, int,
175			struct ieee80211_channel []);
176static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
177			struct ieee80211_channel []);
178static int	mwl_getchannels(struct mwl_softc *);
179
180static void	mwl_sysctlattach(struct mwl_softc *);
181static void	mwl_announce(struct mwl_softc *);
182
183SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
184
185static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
186SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
187	    0, "rx descriptors allocated");
188static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
189SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
190	    0, "rx buffers allocated");
191TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
192static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
193SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
194	    0, "tx buffers allocated");
195TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
196static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
197SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
198	    0, "tx buffers to send at once");
199TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
200static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
201SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
202	    0, "max rx buffers to process per interrupt");
203TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
204static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
205SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
206	    0, "min free rx buffers before restarting traffic");
207TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
208
209#ifdef MWL_DEBUG
210static	int mwl_debug = 0;
211SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
212	    0, "control debugging printfs");
213TUNABLE_INT("hw.mwl.debug", &mwl_debug);
214enum {
215	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
216	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
217	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
218	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
219	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
220	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
221	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
222	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
223	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
224	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
225	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
226	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
227	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
228	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
229	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
230	MWL_DEBUG_ANY		= 0xffffffff
231};
232#define	IS_BEACON(wh) \
233    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
234	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
235#define	IFF_DUMPPKTS_RECV(sc, wh) \
236    (((sc->sc_debug & MWL_DEBUG_RECV) && \
237      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
238     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
239#define	IFF_DUMPPKTS_XMIT(sc) \
240	((sc->sc_debug & MWL_DEBUG_XMIT) || \
241	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
242#define	DPRINTF(sc, m, fmt, ...) do {				\
243	if (sc->sc_debug & (m))					\
244		printf(fmt, __VA_ARGS__);			\
245} while (0)
246#define	KEYPRINTF(sc, hk, mac) do {				\
247	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
248		mwl_keyprint(sc, __func__, hk, mac);		\
249} while (0)
250static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
251static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
252#else
253#define	IFF_DUMPPKTS_RECV(sc, wh) \
254	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
255#define	IFF_DUMPPKTS_XMIT(sc) \
256	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
257#define	DPRINTF(sc, m, fmt, ...) do {				\
258	(void) sc;						\
259} while (0)
260#define	KEYPRINTF(sc, k, mac) do {				\
261	(void) sc;						\
262} while (0)
263#endif
264
265MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
266
267/*
268 * Each packet has fixed front matter: a 2-byte length
269 * of the payload, followed by a 4-address 802.11 header
270 * (regardless of the actual header and always w/o any
271 * QoS header).  The payload then follows.
272 */
273struct mwltxrec {
274	uint16_t fwlen;
275	struct ieee80211_frame_addr4 wh;
276} __packed;
277
278/*
279 * Read/Write shorthands for accesses to BAR 0.  Note
280 * that all BAR 1 operations are done in the "hal" and
281 * there should be no reference to them here.
282 */
283static __inline uint32_t
284RD4(struct mwl_softc *sc, bus_size_t off)
285{
286	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
287}
288
289static __inline void
290WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
291{
292	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
293}
294
295int
296mwl_attach(uint16_t devid, struct mwl_softc *sc)
297{
298	struct ifnet *ifp;
299	struct ieee80211com *ic;
300	struct mwl_hal *mh;
301	int error = 0;
302
303	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
304
305	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
306	if (ifp == NULL) {
307		device_printf(sc->sc_dev, "cannot if_alloc()\n");
308		return ENOSPC;
309	}
310	ic = ifp->if_l2com;
311
312	/* set these up early for if_printf use */
313	if_initname(ifp, device_get_name(sc->sc_dev),
314		device_get_unit(sc->sc_dev));
315
316	mh = mwl_hal_attach(sc->sc_dev, devid,
317	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
318	if (mh == NULL) {
319		if_printf(ifp, "unable to attach HAL\n");
320		error = EIO;
321		goto bad;
322	}
323	sc->sc_mh = mh;
324	/*
325	 * Load firmware so we can get setup.  We arbitrarily
326	 * pick station firmware; we'll re-load firmware as
327	 * needed so setting up the wrong mode isn't a big deal.
328	 */
329	if (mwl_hal_fwload(mh, NULL) != 0) {
330		if_printf(ifp, "unable to setup builtin firmware\n");
331		error = EIO;
332		goto bad1;
333	}
334	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
335		if_printf(ifp, "unable to fetch h/w specs\n");
336		error = EIO;
337		goto bad1;
338	}
339	error = mwl_getchannels(sc);
340	if (error != 0)
341		goto bad1;
342
343	sc->sc_txantenna = 0;		/* h/w default */
344	sc->sc_rxantenna = 0;		/* h/w default */
345	sc->sc_invalid = 0;		/* ready to go, enable int handling */
346	sc->sc_ageinterval = MWL_AGEINTERVAL;
347
348	/*
349	 * Allocate tx+rx descriptors and populate the lists.
350	 * We immediately push the information to the firmware
351	 * as otherwise it gets upset.
352	 */
353	error = mwl_dma_setup(sc);
354	if (error != 0) {
355		if_printf(ifp, "failed to setup descriptors: %d\n", error);
356		goto bad1;
357	}
358	error = mwl_setupdma(sc);	/* push to firmware */
359	if (error != 0)			/* NB: mwl_setupdma prints msg */
360		goto bad1;
361
362	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
363	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
364
365	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
366		taskqueue_thread_enqueue, &sc->sc_tq);
367	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
368		"%s taskq", ifp->if_xname);
369
370	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
371	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
372	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
373	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
374
375	/* NB: insure BK queue is the lowest priority h/w queue */
376	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
377		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
378			ieee80211_wme_acnames[WME_AC_BK]);
379		error = EIO;
380		goto bad2;
381	}
382	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
383	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
384	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
385		/*
386		 * Not enough hardware tx queues to properly do WME;
387		 * just punt and assign them all to the same h/w queue.
388		 * We could do a better job of this if, for example,
389		 * we allocate queues when we switch from station to
390		 * AP mode.
391		 */
392		if (sc->sc_ac2q[WME_AC_VI] != NULL)
393			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
394		if (sc->sc_ac2q[WME_AC_BE] != NULL)
395			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
396		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
397		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
398		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
399	}
400	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
401
402	ifp->if_softc = sc;
403	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
404	ifp->if_start = mwl_start;
405	ifp->if_ioctl = mwl_ioctl;
406	ifp->if_init = mwl_init;
407	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
408	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
409	IFQ_SET_READY(&ifp->if_snd);
410
411	ic->ic_ifp = ifp;
412	/* XXX not right but it's not used anywhere important */
413	ic->ic_phytype = IEEE80211_T_OFDM;
414	ic->ic_opmode = IEEE80211_M_STA;
415	ic->ic_caps =
416		  IEEE80211_C_STA		/* station mode supported */
417		| IEEE80211_C_HOSTAP		/* hostap mode */
418		| IEEE80211_C_MONITOR		/* monitor mode */
419#if 0
420		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
421		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
422#endif
423		| IEEE80211_C_MBSS		/* mesh point link mode */
424		| IEEE80211_C_WDS		/* WDS supported */
425		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
426		| IEEE80211_C_SHSLOT		/* short slot time supported */
427		| IEEE80211_C_WME		/* WME/WMM supported */
428		| IEEE80211_C_BURST		/* xmit bursting supported */
429		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
430		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
431		| IEEE80211_C_TXFRAG		/* handle tx frags */
432		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
433		| IEEE80211_C_DFS		/* DFS supported */
434		;
435
436	ic->ic_htcaps =
437		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
438		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
439		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
440		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
441		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
442#if MWL_AGGR_SIZE == 7935
443		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
444#else
445		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
446#endif
447#if 0
448		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
449		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
450#endif
451		/* s/w capabilities */
452		| IEEE80211_HTC_HT		/* HT operation */
453		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
454		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
455		| IEEE80211_HTC_SMPS		/* SMPS available */
456		;
457
458	/*
459	 * Mark h/w crypto support.
460	 * XXX no way to query h/w support.
461	 */
462	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
463			  |  IEEE80211_CRYPTO_AES_CCM
464			  |  IEEE80211_CRYPTO_TKIP
465			  |  IEEE80211_CRYPTO_TKIPMIC
466			  ;
467	/*
468	 * Transmit requires space in the packet for a special
469	 * format transmit record and optional padding between
470	 * this record and the payload.  Ask the net80211 layer
471	 * to arrange this when encapsulating packets so we can
472	 * add it efficiently.
473	 */
474	ic->ic_headroom = sizeof(struct mwltxrec) -
475		sizeof(struct ieee80211_frame);
476
477	/* call MI attach routine. */
478	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
479	ic->ic_setregdomain = mwl_setregdomain;
480	ic->ic_getradiocaps = mwl_getradiocaps;
481	/* override default methods */
482	ic->ic_raw_xmit = mwl_raw_xmit;
483	ic->ic_newassoc = mwl_newassoc;
484	ic->ic_updateslot = mwl_updateslot;
485	ic->ic_update_mcast = mwl_update_mcast;
486	ic->ic_update_promisc = mwl_update_promisc;
487	ic->ic_wme.wme_update = mwl_wme_update;
488
489	ic->ic_node_alloc = mwl_node_alloc;
490	sc->sc_node_cleanup = ic->ic_node_cleanup;
491	ic->ic_node_cleanup = mwl_node_cleanup;
492	sc->sc_node_drain = ic->ic_node_drain;
493	ic->ic_node_drain = mwl_node_drain;
494	ic->ic_node_getsignal = mwl_node_getsignal;
495	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
496
497	ic->ic_scan_start = mwl_scan_start;
498	ic->ic_scan_end = mwl_scan_end;
499	ic->ic_set_channel = mwl_set_channel;
500
501	sc->sc_recv_action = ic->ic_recv_action;
502	ic->ic_recv_action = mwl_recv_action;
503	sc->sc_addba_request = ic->ic_addba_request;
504	ic->ic_addba_request = mwl_addba_request;
505	sc->sc_addba_response = ic->ic_addba_response;
506	ic->ic_addba_response = mwl_addba_response;
507	sc->sc_addba_stop = ic->ic_addba_stop;
508	ic->ic_addba_stop = mwl_addba_stop;
509
510	ic->ic_vap_create = mwl_vap_create;
511	ic->ic_vap_delete = mwl_vap_delete;
512
513	ieee80211_radiotap_attach(ic,
514	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
515		MWL_TX_RADIOTAP_PRESENT,
516	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
517		MWL_RX_RADIOTAP_PRESENT);
518	/*
519	 * Setup dynamic sysctl's now that country code and
520	 * regdomain are available from the hal.
521	 */
522	mwl_sysctlattach(sc);
523
524	if (bootverbose)
525		ieee80211_announce(ic);
526	mwl_announce(sc);
527	return 0;
528bad2:
529	mwl_dma_cleanup(sc);
530bad1:
531	mwl_hal_detach(mh);
532bad:
533	if_free(ifp);
534	sc->sc_invalid = 1;
535	return error;
536}
537
538int
539mwl_detach(struct mwl_softc *sc)
540{
541	struct ifnet *ifp = sc->sc_ifp;
542	struct ieee80211com *ic = ifp->if_l2com;
543
544	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
545		__func__, ifp->if_flags);
546
547	mwl_stop(ifp, 1);
548	/*
549	 * NB: the order of these is important:
550	 * o call the 802.11 layer before detaching the hal to
551	 *   insure callbacks into the driver to delete global
552	 *   key cache entries can be handled
553	 * o reclaim the tx queue data structures after calling
554	 *   the 802.11 layer as we'll get called back to reclaim
555	 *   node state and potentially want to use them
556	 * o to cleanup the tx queues the hal is called, so detach
557	 *   it last
558	 * Other than that, it's straightforward...
559	 */
560	ieee80211_ifdetach(ic);
561	callout_drain(&sc->sc_watchdog);
562	mwl_dma_cleanup(sc);
563	mwl_tx_cleanup(sc);
564	mwl_hal_detach(sc->sc_mh);
565	if_free(ifp);
566
567	return 0;
568}
569
570/*
571 * MAC address handling for multiple BSS on the same radio.
572 * The first vap uses the MAC address from the EEPROM.  For
573 * subsequent vap's we set the U/L bit (bit 1) in the MAC
574 * address and use the next six bits as an index.
575 */
576static void
577assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
578{
579	int i;
580
581	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
582		/* NB: we only do this if h/w supports multiple bssid */
583		for (i = 0; i < 32; i++)
584			if ((sc->sc_bssidmask & (1<<i)) == 0)
585				break;
586		if (i != 0)
587			mac[0] |= (i << 2)|0x2;
588	} else
589		i = 0;
590	sc->sc_bssidmask |= 1<<i;
591	if (i == 0)
592		sc->sc_nbssid0++;
593}
594
595static void
596reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
597{
598	int i = mac[0] >> 2;
599	if (i != 0 || --sc->sc_nbssid0 == 0)
600		sc->sc_bssidmask &= ~(1<<i);
601}
602
603static struct ieee80211vap *
604mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
605    enum ieee80211_opmode opmode, int flags,
606    const uint8_t bssid[IEEE80211_ADDR_LEN],
607    const uint8_t mac0[IEEE80211_ADDR_LEN])
608{
609	struct ifnet *ifp = ic->ic_ifp;
610	struct mwl_softc *sc = ifp->if_softc;
611	struct mwl_hal *mh = sc->sc_mh;
612	struct ieee80211vap *vap, *apvap;
613	struct mwl_hal_vap *hvap;
614	struct mwl_vap *mvp;
615	uint8_t mac[IEEE80211_ADDR_LEN];
616
617	IEEE80211_ADDR_COPY(mac, mac0);
618	switch (opmode) {
619	case IEEE80211_M_HOSTAP:
620	case IEEE80211_M_MBSS:
621		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
622			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
623		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
624		if (hvap == NULL) {
625			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
626				reclaim_address(sc, mac);
627			return NULL;
628		}
629		break;
630	case IEEE80211_M_STA:
631		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
632			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
633		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
634		if (hvap == NULL) {
635			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
636				reclaim_address(sc, mac);
637			return NULL;
638		}
639		/* no h/w beacon miss support; always use s/w */
640		flags |= IEEE80211_CLONE_NOBEACONS;
641		break;
642	case IEEE80211_M_WDS:
643		hvap = NULL;		/* NB: we use associated AP vap */
644		if (sc->sc_napvaps == 0)
645			return NULL;	/* no existing AP vap */
646		break;
647	case IEEE80211_M_MONITOR:
648		hvap = NULL;
649		break;
650	case IEEE80211_M_IBSS:
651	case IEEE80211_M_AHDEMO:
652	default:
653		return NULL;
654	}
655
656	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
657	    M_80211_VAP, M_NOWAIT | M_ZERO);
658	if (mvp == NULL) {
659		if (hvap != NULL) {
660			mwl_hal_delvap(hvap);
661			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
662				reclaim_address(sc, mac);
663		}
664		/* XXX msg */
665		return NULL;
666	}
667	mvp->mv_hvap = hvap;
668	if (opmode == IEEE80211_M_WDS) {
669		/*
670		 * WDS vaps must have an associated AP vap; find one.
671		 * XXX not right.
672		 */
673		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
674			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
675				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
676				break;
677			}
678		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
679	}
680	vap = &mvp->mv_vap;
681	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
682	if (hvap != NULL)
683		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
684	/* override with driver methods */
685	mvp->mv_newstate = vap->iv_newstate;
686	vap->iv_newstate = mwl_newstate;
687	vap->iv_max_keyix = 0;	/* XXX */
688	vap->iv_key_alloc = mwl_key_alloc;
689	vap->iv_key_delete = mwl_key_delete;
690	vap->iv_key_set = mwl_key_set;
691#ifdef MWL_HOST_PS_SUPPORT
692	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
693		vap->iv_update_ps = mwl_update_ps;
694		mvp->mv_set_tim = vap->iv_set_tim;
695		vap->iv_set_tim = mwl_set_tim;
696	}
697#endif
698	vap->iv_reset = mwl_reset;
699	vap->iv_update_beacon = mwl_beacon_update;
700
701	/* override max aid so sta's cannot assoc when we're out of sta id's */
702	vap->iv_max_aid = MWL_MAXSTAID;
703	/* override default A-MPDU rx parameters */
704	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
705	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
706
707	/* complete setup */
708	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
709
710	switch (vap->iv_opmode) {
711	case IEEE80211_M_HOSTAP:
712	case IEEE80211_M_MBSS:
713	case IEEE80211_M_STA:
714		/*
715		 * Setup sta db entry for local address.
716		 */
717		mwl_localstadb(vap);
718		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
719		    vap->iv_opmode == IEEE80211_M_MBSS)
720			sc->sc_napvaps++;
721		else
722			sc->sc_nstavaps++;
723		break;
724	case IEEE80211_M_WDS:
725		sc->sc_nwdsvaps++;
726		break;
727	default:
728		break;
729	}
730	/*
731	 * Setup overall operating mode.
732	 */
733	if (sc->sc_napvaps)
734		ic->ic_opmode = IEEE80211_M_HOSTAP;
735	else if (sc->sc_nstavaps)
736		ic->ic_opmode = IEEE80211_M_STA;
737	else
738		ic->ic_opmode = opmode;
739
740	return vap;
741}
742
743static void
744mwl_vap_delete(struct ieee80211vap *vap)
745{
746	struct mwl_vap *mvp = MWL_VAP(vap);
747	struct ifnet *parent = vap->iv_ic->ic_ifp;
748	struct mwl_softc *sc = parent->if_softc;
749	struct mwl_hal *mh = sc->sc_mh;
750	struct mwl_hal_vap *hvap = mvp->mv_hvap;
751	enum ieee80211_opmode opmode = vap->iv_opmode;
752
753	/* XXX disallow ap vap delete if WDS still present */
754	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
755		/* quiesce h/w while we remove the vap */
756		mwl_hal_intrset(mh, 0);		/* disable interrupts */
757	}
758	ieee80211_vap_detach(vap);
759	switch (opmode) {
760	case IEEE80211_M_HOSTAP:
761	case IEEE80211_M_MBSS:
762	case IEEE80211_M_STA:
763		KASSERT(hvap != NULL, ("no hal vap handle"));
764		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
765		mwl_hal_delvap(hvap);
766		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
767			sc->sc_napvaps--;
768		else
769			sc->sc_nstavaps--;
770		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
771		reclaim_address(sc, vap->iv_myaddr);
772		break;
773	case IEEE80211_M_WDS:
774		sc->sc_nwdsvaps--;
775		break;
776	default:
777		break;
778	}
779	mwl_cleartxq(sc, vap);
780	free(mvp, M_80211_VAP);
781	if (parent->if_drv_flags & IFF_DRV_RUNNING)
782		mwl_hal_intrset(mh, sc->sc_imask);
783}
784
785void
786mwl_suspend(struct mwl_softc *sc)
787{
788	struct ifnet *ifp = sc->sc_ifp;
789
790	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
791		__func__, ifp->if_flags);
792
793	mwl_stop(ifp, 1);
794}
795
796void
797mwl_resume(struct mwl_softc *sc)
798{
799	struct ifnet *ifp = sc->sc_ifp;
800
801	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
802		__func__, ifp->if_flags);
803
804	if (ifp->if_flags & IFF_UP)
805		mwl_init(sc);
806}
807
808void
809mwl_shutdown(void *arg)
810{
811	struct mwl_softc *sc = arg;
812
813	mwl_stop(sc->sc_ifp, 1);
814}
815
816/*
817 * Interrupt handler.  Most of the actual processing is deferred.
818 */
819void
820mwl_intr(void *arg)
821{
822	struct mwl_softc *sc = arg;
823	struct mwl_hal *mh = sc->sc_mh;
824	uint32_t status;
825
826#if !defined(__HAIKU__)
827	if (sc->sc_invalid) {
828		/*
829		 * The hardware is not ready/present, don't touch anything.
830		 * Note this can happen early on if the IRQ is shared.
831		 */
832		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
833		return;
834	}
835	/*
836	 * Figure out the reason(s) for the interrupt.
837	 */
838	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
839	if (status == 0)			/* must be a shared irq */
840		return;
841#else
842	status = atomic_get((int32 *)&sc->sc_intr_status);
843#endif
844
845	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
846	    __func__, status, sc->sc_imask);
847	if (status & MACREG_A2HRIC_BIT_RX_RDY)
848		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
849	if (status & MACREG_A2HRIC_BIT_TX_DONE)
850		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
851	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
852		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
853	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
854		mwl_hal_cmddone(mh);
855	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
856		;
857	}
858	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
859		/* TKIP ICV error */
860		sc->sc_stats.mst_rx_badtkipicv++;
861	}
862	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
863		/* 11n aggregation queue is empty, re-fill */
864		;
865	}
866	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
867		;
868	}
869	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
870		/* radar detected, process event */
871		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
872	}
873	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
874		/* DFS channel switch */
875		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
876	}
877}
878
879static void
880mwl_radar_proc(void *arg, int pending)
881{
882	struct mwl_softc *sc = arg;
883	struct ifnet *ifp = sc->sc_ifp;
884	struct ieee80211com *ic = ifp->if_l2com;
885
886	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
887	    __func__, pending);
888
889	sc->sc_stats.mst_radardetect++;
890	/* XXX stop h/w BA streams? */
891
892	IEEE80211_LOCK(ic);
893	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
894	IEEE80211_UNLOCK(ic);
895}
896
897static void
898mwl_chanswitch_proc(void *arg, int pending)
899{
900	struct mwl_softc *sc = arg;
901	struct ifnet *ifp = sc->sc_ifp;
902	struct ieee80211com *ic = ifp->if_l2com;
903
904	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
905	    __func__, pending);
906
907	IEEE80211_LOCK(ic);
908	sc->sc_csapending = 0;
909	ieee80211_csa_completeswitch(ic);
910	IEEE80211_UNLOCK(ic);
911}
912
913static void
914mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
915{
916	struct ieee80211_node *ni = sp->data[0];
917
918	/* send DELBA and drop the stream */
919	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
920}
921
922static void
923mwl_bawatchdog_proc(void *arg, int pending)
924{
925	struct mwl_softc *sc = arg;
926	struct mwl_hal *mh = sc->sc_mh;
927	const MWL_HAL_BASTREAM *sp;
928	uint8_t bitmap, n;
929
930	sc->sc_stats.mst_bawatchdog++;
931
932	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
933		DPRINTF(sc, MWL_DEBUG_AMPDU,
934		    "%s: could not get bitmap\n", __func__);
935		sc->sc_stats.mst_bawatchdog_failed++;
936		return;
937	}
938	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
939	if (bitmap == 0xff) {
940		n = 0;
941		/* disable all ba streams */
942		for (bitmap = 0; bitmap < 8; bitmap++) {
943			sp = mwl_hal_bastream_lookup(mh, bitmap);
944			if (sp != NULL) {
945				mwl_bawatchdog(sp);
946				n++;
947			}
948		}
949		if (n == 0) {
950			DPRINTF(sc, MWL_DEBUG_AMPDU,
951			    "%s: no BA streams found\n", __func__);
952			sc->sc_stats.mst_bawatchdog_empty++;
953		}
954	} else if (bitmap != 0xaa) {
955		/* disable a single ba stream */
956		sp = mwl_hal_bastream_lookup(mh, bitmap);
957		if (sp != NULL) {
958			mwl_bawatchdog(sp);
959		} else {
960			DPRINTF(sc, MWL_DEBUG_AMPDU,
961			    "%s: no BA stream %d\n", __func__, bitmap);
962			sc->sc_stats.mst_bawatchdog_notfound++;
963		}
964	}
965}
966
967/*
968 * Convert net80211 channel to a HAL channel.
969 */
970static void
971mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
972{
973	hc->channel = chan->ic_ieee;
974
975	*(uint32_t *)&hc->channelFlags = 0;
976	if (IEEE80211_IS_CHAN_2GHZ(chan))
977		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
978	else if (IEEE80211_IS_CHAN_5GHZ(chan))
979		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
980	if (IEEE80211_IS_CHAN_HT40(chan)) {
981		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
982		if (IEEE80211_IS_CHAN_HT40U(chan))
983			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
984		else
985			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
986	} else
987		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
988	/* XXX 10MHz channels */
989}
990
991/*
992 * Inform firmware of our tx/rx dma setup.  The BAR 0
993 * writes below are for compatibility with older firmware.
994 * For current firmware we send this information with a
995 * cmd block via mwl_hal_sethwdma.
996 */
997static int
998mwl_setupdma(struct mwl_softc *sc)
999{
1000	int error, i;
1001
1002	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1003	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1004	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1005
1006	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1007		struct mwl_txq *txq = &sc->sc_txq[i];
1008		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1009		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1010	}
1011	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1012	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1013
1014	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1015	if (error != 0) {
1016		device_printf(sc->sc_dev,
1017		    "unable to setup tx/rx dma; hal status %u\n", error);
1018		/* XXX */
1019	}
1020	return error;
1021}
1022
1023/*
1024 * Inform firmware of tx rate parameters.
1025 * Called after a channel change.
1026 */
1027static int
1028mwl_setcurchanrates(struct mwl_softc *sc)
1029{
1030	struct ifnet *ifp = sc->sc_ifp;
1031	struct ieee80211com *ic = ifp->if_l2com;
1032	const struct ieee80211_rateset *rs;
1033	MWL_HAL_TXRATE rates;
1034
1035	memset(&rates, 0, sizeof(rates));
1036	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1037	/* rate used to send management frames */
1038	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1039	/* rate used to send multicast frames */
1040	rates.McastRate = rates.MgtRate;
1041
1042	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1043}
1044
1045/*
1046 * Inform firmware of tx rate parameters.  Called whenever
1047 * user-settable params change and after a channel change.
1048 */
1049static int
1050mwl_setrates(struct ieee80211vap *vap)
1051{
1052	struct mwl_vap *mvp = MWL_VAP(vap);
1053	struct ieee80211_node *ni = vap->iv_bss;
1054	const struct ieee80211_txparam *tp = ni->ni_txparms;
1055	MWL_HAL_TXRATE rates;
1056
1057	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1058
1059	/*
1060	 * Update the h/w rate map.
1061	 * NB: 0x80 for MCS is passed through unchanged
1062	 */
1063	memset(&rates, 0, sizeof(rates));
1064	/* rate used to send management frames */
1065	rates.MgtRate = tp->mgmtrate;
1066	/* rate used to send multicast frames */
1067	rates.McastRate = tp->mcastrate;
1068
1069	/* while here calculate EAPOL fixed rate cookie */
1070	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1071
1072	return mwl_hal_settxrate(mvp->mv_hvap,
1073	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1074		RATE_FIXED : RATE_AUTO, &rates);
1075}
1076
1077/*
1078 * Setup a fixed xmit rate cookie for EAPOL frames.
1079 */
1080static void
1081mwl_seteapolformat(struct ieee80211vap *vap)
1082{
1083	struct mwl_vap *mvp = MWL_VAP(vap);
1084	struct ieee80211_node *ni = vap->iv_bss;
1085	enum ieee80211_phymode mode;
1086	uint8_t rate;
1087
1088	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1089
1090	mode = ieee80211_chan2mode(ni->ni_chan);
1091	/*
1092	 * Use legacy rates when operating a mixed HT+non-HT bss.
1093	 * NB: this may violate POLA for sta and wds vap's.
1094	 */
1095	if (mode == IEEE80211_MODE_11NA &&
1096	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1097		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1098	else if (mode == IEEE80211_MODE_11NG &&
1099	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1100		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1101	else
1102		rate = vap->iv_txparms[mode].mgmtrate;
1103
1104	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1105}
1106
1107/*
1108 * Map SKU+country code to region code for radar bin'ing.
1109 */
1110static int
1111mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1112{
1113	switch (rd->regdomain) {
1114	case SKU_FCC:
1115	case SKU_FCC3:
1116		return DOMAIN_CODE_FCC;
1117	case SKU_CA:
1118		return DOMAIN_CODE_IC;
1119	case SKU_ETSI:
1120	case SKU_ETSI2:
1121	case SKU_ETSI3:
1122		if (rd->country == CTRY_SPAIN)
1123			return DOMAIN_CODE_SPAIN;
1124		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1125			return DOMAIN_CODE_FRANCE;
1126		/* XXX force 1.3.1 radar type */
1127		return DOMAIN_CODE_ETSI_131;
1128	case SKU_JAPAN:
1129		return DOMAIN_CODE_MKK;
1130	case SKU_ROW:
1131		return DOMAIN_CODE_DGT;	/* Taiwan */
1132	case SKU_APAC:
1133	case SKU_APAC2:
1134	case SKU_APAC3:
1135		return DOMAIN_CODE_AUS;	/* Australia */
1136	}
1137	/* XXX KOREA? */
1138	return DOMAIN_CODE_FCC;			/* XXX? */
1139}
1140
1141static int
1142mwl_hal_reset(struct mwl_softc *sc)
1143{
1144	struct ifnet *ifp = sc->sc_ifp;
1145	struct ieee80211com *ic = ifp->if_l2com;
1146	struct mwl_hal *mh = sc->sc_mh;
1147
1148	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1149	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1150	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1151	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1152	mwl_chan_set(sc, ic->ic_curchan);
1153	/* NB: RF/RA performance tuned for indoor mode */
1154	mwl_hal_setrateadaptmode(mh, 0);
1155	mwl_hal_setoptimizationlevel(mh,
1156	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1157
1158	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1159
1160	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1161	mwl_hal_setcfend(mh, 0);			/* XXX */
1162
1163	return 1;
1164}
1165
1166static int
1167mwl_init_locked(struct mwl_softc *sc)
1168{
1169	struct ifnet *ifp = sc->sc_ifp;
1170	struct mwl_hal *mh = sc->sc_mh;
1171	int error = 0;
1172
1173	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1174		__func__, ifp->if_flags);
1175
1176	MWL_LOCK_ASSERT(sc);
1177
1178	/*
1179	 * Stop anything previously setup.  This is safe
1180	 * whether this is the first time through or not.
1181	 */
1182	mwl_stop_locked(ifp, 0);
1183
1184	/*
1185	 * Push vap-independent state to the firmware.
1186	 */
1187	if (!mwl_hal_reset(sc)) {
1188		if_printf(ifp, "unable to reset hardware\n");
1189		return EIO;
1190	}
1191
1192	/*
1193	 * Setup recv (once); transmit is already good to go.
1194	 */
1195	error = mwl_startrecv(sc);
1196	if (error != 0) {
1197		if_printf(ifp, "unable to start recv logic\n");
1198		return error;
1199	}
1200
1201	/*
1202	 * Enable interrupts.
1203	 */
1204	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1205		     | MACREG_A2HRIC_BIT_TX_DONE
1206		     | MACREG_A2HRIC_BIT_OPC_DONE
1207#if 0
1208		     | MACREG_A2HRIC_BIT_MAC_EVENT
1209#endif
1210		     | MACREG_A2HRIC_BIT_ICV_ERROR
1211		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1212		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1213#if 0
1214		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1215#endif
1216		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1217		     | MACREQ_A2HRIC_BIT_TX_ACK
1218		     ;
1219
1220	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1221	mwl_hal_intrset(mh, sc->sc_imask);
1222	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1223
1224	return 0;
1225}
1226
1227static void
1228mwl_init(void *arg)
1229{
1230	struct mwl_softc *sc = arg;
1231	struct ifnet *ifp = sc->sc_ifp;
1232	struct ieee80211com *ic = ifp->if_l2com;
1233	int error = 0;
1234
1235	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1236		__func__, ifp->if_flags);
1237
1238	MWL_LOCK(sc);
1239	error = mwl_init_locked(sc);
1240	MWL_UNLOCK(sc);
1241
1242	if (error == 0)
1243		ieee80211_start_all(ic);	/* start all vap's */
1244}
1245
1246static void
1247mwl_stop_locked(struct ifnet *ifp, int disable)
1248{
1249	struct mwl_softc *sc = ifp->if_softc;
1250
1251	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1252		__func__, sc->sc_invalid, ifp->if_flags);
1253
1254	MWL_LOCK_ASSERT(sc);
1255	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1256		/*
1257		 * Shutdown the hardware and driver.
1258		 */
1259		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1260		callout_stop(&sc->sc_watchdog);
1261		sc->sc_tx_timer = 0;
1262		mwl_draintxq(sc);
1263	}
1264}
1265
1266static void
1267mwl_stop(struct ifnet *ifp, int disable)
1268{
1269	struct mwl_softc *sc = ifp->if_softc;
1270
1271	MWL_LOCK(sc);
1272	mwl_stop_locked(ifp, disable);
1273	MWL_UNLOCK(sc);
1274}
1275
1276static int
1277mwl_reset_vap(struct ieee80211vap *vap, int state)
1278{
1279	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1280	struct ieee80211com *ic = vap->iv_ic;
1281
1282	if (state == IEEE80211_S_RUN)
1283		mwl_setrates(vap);
1284	/* XXX off by 1? */
1285	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1286	/* XXX auto? 20/40 split? */
1287	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1288	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1289	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1290	    HTPROTECT_NONE : HTPROTECT_AUTO);
1291	/* XXX txpower cap */
1292
1293	/* re-setup beacons */
1294	if (state == IEEE80211_S_RUN &&
1295	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1296	     vap->iv_opmode == IEEE80211_M_MBSS ||
1297	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1298		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1299		mwl_hal_setnprotmode(hvap,
1300		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1301		return mwl_beacon_setup(vap);
1302	}
1303	return 0;
1304}
1305
1306/*
1307 * Reset the hardware w/o losing operational state.
1308 * Used to to reset or reload hardware state for a vap.
1309 */
1310static int
1311mwl_reset(struct ieee80211vap *vap, u_long cmd)
1312{
1313	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1314	int error = 0;
1315
1316	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1317		struct ieee80211com *ic = vap->iv_ic;
1318		struct ifnet *ifp = ic->ic_ifp;
1319		struct mwl_softc *sc = ifp->if_softc;
1320		struct mwl_hal *mh = sc->sc_mh;
1321
1322		/* XXX handle DWDS sta vap change */
1323		/* XXX do we need to disable interrupts? */
1324		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1325		error = mwl_reset_vap(vap, vap->iv_state);
1326		mwl_hal_intrset(mh, sc->sc_imask);
1327	}
1328	return error;
1329}
1330
1331/*
1332 * Allocate a tx buffer for sending a frame.  The
1333 * packet is assumed to have the WME AC stored so
1334 * we can use it to select the appropriate h/w queue.
1335 */
1336static struct mwl_txbuf *
1337mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1338{
1339	struct mwl_txbuf *bf;
1340
1341	/*
1342	 * Grab a TX buffer and associated resources.
1343	 */
1344	MWL_TXQ_LOCK(txq);
1345	bf = STAILQ_FIRST(&txq->free);
1346	if (bf != NULL) {
1347		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1348		txq->nfree--;
1349	}
1350	MWL_TXQ_UNLOCK(txq);
1351	if (bf == NULL)
1352		DPRINTF(sc, MWL_DEBUG_XMIT,
1353		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1354	return bf;
1355}
1356
1357/*
1358 * Return a tx buffer to the queue it came from.  Note there
1359 * are two cases because we must preserve the order of buffers
1360 * as it reflects the fixed order of descriptors in memory
1361 * (the firmware pre-fetches descriptors so we cannot reorder).
1362 */
1363static void
1364mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1365{
1366	bf->bf_m = NULL;
1367	bf->bf_node = NULL;
1368	MWL_TXQ_LOCK(txq);
1369	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1370	txq->nfree++;
1371	MWL_TXQ_UNLOCK(txq);
1372}
1373
1374static void
1375mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1376{
1377	bf->bf_m = NULL;
1378	bf->bf_node = NULL;
1379	MWL_TXQ_LOCK(txq);
1380	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1381	txq->nfree++;
1382	MWL_TXQ_UNLOCK(txq);
1383}
1384
1385static void
1386mwl_start(struct ifnet *ifp)
1387{
1388	struct mwl_softc *sc = ifp->if_softc;
1389	struct ieee80211_node *ni;
1390	struct mwl_txbuf *bf;
1391	struct mbuf *m;
1392	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1393	int nqueued;
1394
1395	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1396		return;
1397	nqueued = 0;
1398	for (;;) {
1399		bf = NULL;
1400		IFQ_DEQUEUE(&ifp->if_snd, m);
1401		if (m == NULL)
1402			break;
1403		/*
1404		 * Grab the node for the destination.
1405		 */
1406		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1407		KASSERT(ni != NULL, ("no node"));
1408		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1409		/*
1410		 * Grab a TX buffer and associated resources.
1411		 * We honor the classification by the 802.11 layer.
1412		 */
1413		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1414		bf = mwl_gettxbuf(sc, txq);
1415		if (bf == NULL) {
1416			m_freem(m);
1417			ieee80211_free_node(ni);
1418#ifdef MWL_TX_NODROP
1419			sc->sc_stats.mst_tx_qstop++;
1420			/* XXX blocks other traffic */
1421			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1422			break;
1423#else
1424			DPRINTF(sc, MWL_DEBUG_XMIT,
1425			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1426			sc->sc_stats.mst_tx_qdrop++;
1427			continue;
1428#endif /* MWL_TX_NODROP */
1429		}
1430
1431		/*
1432		 * Pass the frame to the h/w for transmission.
1433		 */
1434		if (mwl_tx_start(sc, ni, bf, m)) {
1435			ifp->if_oerrors++;
1436			mwl_puttxbuf_head(txq, bf);
1437			ieee80211_free_node(ni);
1438			continue;
1439		}
1440		nqueued++;
1441		if (nqueued >= mwl_txcoalesce) {
1442			/*
1443			 * Poke the firmware to process queued frames;
1444			 * see below about (lack of) locking.
1445			 */
1446			nqueued = 0;
1447			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1448		}
1449	}
1450	if (nqueued) {
1451		/*
1452		 * NB: We don't need to lock against tx done because
1453		 * this just prods the firmware to check the transmit
1454		 * descriptors.  The firmware will also start fetching
1455		 * descriptors by itself if it notices new ones are
1456		 * present when it goes to deliver a tx done interrupt
1457		 * to the host. So if we race with tx done processing
1458		 * it's ok.  Delivering the kick here rather than in
1459		 * mwl_tx_start is an optimization to avoid poking the
1460		 * firmware for each packet.
1461		 *
1462		 * NB: the queue id isn't used so 0 is ok.
1463		 */
1464		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1465	}
1466}
1467
1468static int
1469mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1470	const struct ieee80211_bpf_params *params)
1471{
1472	struct ieee80211com *ic = ni->ni_ic;
1473	struct ifnet *ifp = ic->ic_ifp;
1474	struct mwl_softc *sc = ifp->if_softc;
1475	struct mwl_txbuf *bf;
1476	struct mwl_txq *txq;
1477
1478	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1479		ieee80211_free_node(ni);
1480		m_freem(m);
1481		return ENETDOWN;
1482	}
1483	/*
1484	 * Grab a TX buffer and associated resources.
1485	 * Note that we depend on the classification
1486	 * by the 802.11 layer to get to the right h/w
1487	 * queue.  Management frames must ALWAYS go on
1488	 * queue 1 but we cannot just force that here
1489	 * because we may receive non-mgt frames.
1490	 */
1491	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1492	bf = mwl_gettxbuf(sc, txq);
1493	if (bf == NULL) {
1494		sc->sc_stats.mst_tx_qstop++;
1495		/* XXX blocks other traffic */
1496		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1497		ieee80211_free_node(ni);
1498		m_freem(m);
1499		return ENOBUFS;
1500	}
1501	/*
1502	 * Pass the frame to the h/w for transmission.
1503	 */
1504	if (mwl_tx_start(sc, ni, bf, m)) {
1505		ifp->if_oerrors++;
1506		mwl_puttxbuf_head(txq, bf);
1507
1508		ieee80211_free_node(ni);
1509		return EIO;		/* XXX */
1510	}
1511	/*
1512	 * NB: We don't need to lock against tx done because
1513	 * this just prods the firmware to check the transmit
1514	 * descriptors.  The firmware will also start fetching
1515	 * descriptors by itself if it notices new ones are
1516	 * present when it goes to deliver a tx done interrupt
1517	 * to the host. So if we race with tx done processing
1518	 * it's ok.  Delivering the kick here rather than in
1519	 * mwl_tx_start is an optimization to avoid poking the
1520	 * firmware for each packet.
1521	 *
1522	 * NB: the queue id isn't used so 0 is ok.
1523	 */
1524	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1525	return 0;
1526}
1527
1528static int
1529mwl_media_change(struct ifnet *ifp)
1530{
1531	struct ieee80211vap *vap = ifp->if_softc;
1532	int error;
1533
1534	error = ieee80211_media_change(ifp);
1535	/* NB: only the fixed rate can change and that doesn't need a reset */
1536	if (error == ENETRESET) {
1537		mwl_setrates(vap);
1538		error = 0;
1539	}
1540	return error;
1541}
1542
1543#ifdef MWL_DEBUG
1544static void
1545mwl_keyprint(struct mwl_softc *sc, const char *tag,
1546	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1547{
1548	static const char *ciphers[] = {
1549		"WEP",
1550		"TKIP",
1551		"AES-CCM",
1552	};
1553	int i, n;
1554
1555	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1556	for (i = 0, n = hk->keyLen; i < n; i++)
1557		printf(" %02x", hk->key.aes[i]);
1558	printf(" mac %s", ether_sprintf(mac));
1559	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1560		printf(" %s", "rxmic");
1561		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1562			printf(" %02x", hk->key.tkip.rxMic[i]);
1563		printf(" txmic");
1564		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1565			printf(" %02x", hk->key.tkip.txMic[i]);
1566	}
1567	printf(" flags 0x%x\n", hk->keyFlags);
1568}
1569#endif
1570
1571/*
1572 * Allocate a key cache slot for a unicast key.  The
1573 * firmware handles key allocation and every station is
1574 * guaranteed key space so we are always successful.
1575 */
1576static int
1577mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1578	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1579{
1580	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1581
1582	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1583	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1584		if (!(&vap->iv_nw_keys[0] <= k &&
1585		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1586			/* should not happen */
1587			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1588				"%s: bogus group key\n", __func__);
1589			return 0;
1590		}
1591		/* give the caller what they requested */
1592		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1593	} else {
1594		/*
1595		 * Firmware handles key allocation.
1596		 */
1597		*keyix = *rxkeyix = 0;
1598	}
1599	return 1;
1600}
1601
1602/*
1603 * Delete a key entry allocated by mwl_key_alloc.
1604 */
1605static int
1606mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1607{
1608	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1609	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1610	MWL_HAL_KEYVAL hk;
1611	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1612	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1613
1614	if (hvap == NULL) {
1615		if (vap->iv_opmode != IEEE80211_M_WDS) {
1616			/* XXX monitor mode? */
1617			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1618			    "%s: no hvap for opmode %d\n", __func__,
1619			    vap->iv_opmode);
1620			return 0;
1621		}
1622		hvap = MWL_VAP(vap)->mv_ap_hvap;
1623	}
1624
1625	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1626	    __func__, k->wk_keyix);
1627
1628	memset(&hk, 0, sizeof(hk));
1629	hk.keyIndex = k->wk_keyix;
1630	switch (k->wk_cipher->ic_cipher) {
1631	case IEEE80211_CIPHER_WEP:
1632		hk.keyTypeId = KEY_TYPE_ID_WEP;
1633		break;
1634	case IEEE80211_CIPHER_TKIP:
1635		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1636		break;
1637	case IEEE80211_CIPHER_AES_CCM:
1638		hk.keyTypeId = KEY_TYPE_ID_AES;
1639		break;
1640	default:
1641		/* XXX should not happen */
1642		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1643		    __func__, k->wk_cipher->ic_cipher);
1644		return 0;
1645	}
1646	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1647}
1648
1649static __inline int
1650addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1651{
1652	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1653		if (k->wk_flags & IEEE80211_KEY_XMIT)
1654			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1655		if (k->wk_flags & IEEE80211_KEY_RECV)
1656			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1657		return 1;
1658	} else
1659		return 0;
1660}
1661
1662/*
1663 * Set the key cache contents for the specified key.  Key cache
1664 * slot(s) must already have been allocated by mwl_key_alloc.
1665 */
1666static int
1667mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1668	const uint8_t mac[IEEE80211_ADDR_LEN])
1669{
1670#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1671/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1672#define	IEEE80211_IS_STATICKEY(k) \
1673	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1674	 (GRPXMIT|IEEE80211_KEY_RECV))
1675	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1676	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1677	const struct ieee80211_cipher *cip = k->wk_cipher;
1678	const uint8_t *macaddr;
1679	MWL_HAL_KEYVAL hk;
1680
1681	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1682		("s/w crypto set?"));
1683
1684	if (hvap == NULL) {
1685		if (vap->iv_opmode != IEEE80211_M_WDS) {
1686			/* XXX monitor mode? */
1687			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1688			    "%s: no hvap for opmode %d\n", __func__,
1689			    vap->iv_opmode);
1690			return 0;
1691		}
1692		hvap = MWL_VAP(vap)->mv_ap_hvap;
1693	}
1694	memset(&hk, 0, sizeof(hk));
1695	hk.keyIndex = k->wk_keyix;
1696	switch (cip->ic_cipher) {
1697	case IEEE80211_CIPHER_WEP:
1698		hk.keyTypeId = KEY_TYPE_ID_WEP;
1699		hk.keyLen = k->wk_keylen;
1700		if (k->wk_keyix == vap->iv_def_txkey)
1701			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1702		if (!IEEE80211_IS_STATICKEY(k)) {
1703			/* NB: WEP is never used for the PTK */
1704			(void) addgroupflags(&hk, k);
1705		}
1706		break;
1707	case IEEE80211_CIPHER_TKIP:
1708		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1709		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1710		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1711		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1712		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1713		if (!addgroupflags(&hk, k))
1714			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1715		break;
1716	case IEEE80211_CIPHER_AES_CCM:
1717		hk.keyTypeId = KEY_TYPE_ID_AES;
1718		hk.keyLen = k->wk_keylen;
1719		if (!addgroupflags(&hk, k))
1720			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1721		break;
1722	default:
1723		/* XXX should not happen */
1724		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1725		    __func__, k->wk_cipher->ic_cipher);
1726		return 0;
1727	}
1728	/*
1729	 * NB: tkip mic keys get copied here too; the layout
1730	 *     just happens to match that in ieee80211_key.
1731	 */
1732	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1733
1734	/*
1735	 * Locate address of sta db entry for writing key;
1736	 * the convention unfortunately is somewhat different
1737	 * than how net80211, hostapd, and wpa_supplicant think.
1738	 */
1739	if (vap->iv_opmode == IEEE80211_M_STA) {
1740		/*
1741		 * NB: keys plumbed before the sta reaches AUTH state
1742		 * will be discarded or written to the wrong sta db
1743		 * entry because iv_bss is meaningless.  This is ok
1744		 * (right now) because we handle deferred plumbing of
1745		 * WEP keys when the sta reaches AUTH state.
1746		 */
1747		macaddr = vap->iv_bss->ni_bssid;
1748		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1749			/* XXX plumb to local sta db too for static key wep */
1750			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1751		}
1752	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1753	    vap->iv_state != IEEE80211_S_RUN) {
1754		/*
1755		 * Prior to RUN state a WDS vap will not it's BSS node
1756		 * setup so we will plumb the key to the wrong mac
1757		 * address (it'll be our local address).  Workaround
1758		 * this for the moment by grabbing the correct address.
1759		 */
1760		macaddr = vap->iv_des_bssid;
1761	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1762		macaddr = vap->iv_myaddr;
1763	else
1764		macaddr = mac;
1765	KEYPRINTF(sc, &hk, macaddr);
1766	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1767#undef IEEE80211_IS_STATICKEY
1768#undef GRPXMIT
1769}
1770
1771/* unaligned little endian access */
1772#define LE_READ_2(p)				\
1773	((uint16_t)				\
1774	 ((((const uint8_t *)(p))[0]      ) |	\
1775	  (((const uint8_t *)(p))[1] <<  8)))
1776#define LE_READ_4(p)				\
1777	((uint32_t)				\
1778	 ((((const uint8_t *)(p))[0]      ) |	\
1779	  (((const uint8_t *)(p))[1] <<  8) |	\
1780	  (((const uint8_t *)(p))[2] << 16) |	\
1781	  (((const uint8_t *)(p))[3] << 24)))
1782
1783/*
1784 * Set the multicast filter contents into the hardware.
1785 * XXX f/w has no support; just defer to the os.
1786 */
1787static void
1788mwl_setmcastfilter(struct mwl_softc *sc)
1789{
1790	struct ifnet *ifp = sc->sc_ifp;
1791#if 0
1792	struct ether_multi *enm;
1793	struct ether_multistep estep;
1794	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1795	uint8_t *mp;
1796	int nmc;
1797
1798	mp = macs;
1799	nmc = 0;
1800	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1801	while (enm != NULL) {
1802		/* XXX Punt on ranges. */
1803		if (nmc == MWL_HAL_MCAST_MAX ||
1804		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1805			ifp->if_flags |= IFF_ALLMULTI;
1806			return;
1807		}
1808		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1809		mp += IEEE80211_ADDR_LEN, nmc++;
1810		ETHER_NEXT_MULTI(estep, enm);
1811	}
1812	ifp->if_flags &= ~IFF_ALLMULTI;
1813	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1814#else
1815	/* XXX no mcast filter support; we get everything */
1816	ifp->if_flags |= IFF_ALLMULTI;
1817#endif
1818}
1819
1820static int
1821mwl_mode_init(struct mwl_softc *sc)
1822{
1823	struct ifnet *ifp = sc->sc_ifp;
1824	struct ieee80211com *ic = ifp->if_l2com;
1825	struct mwl_hal *mh = sc->sc_mh;
1826
1827	/*
1828	 * NB: Ignore promisc in hostap mode; it's set by the
1829	 * bridge.  This is wrong but we have no way to
1830	 * identify internal requests (from the bridge)
1831	 * versus external requests such as for tcpdump.
1832	 */
1833	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1834	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1835	mwl_setmcastfilter(sc);
1836
1837	return 0;
1838}
1839
1840/*
1841 * Callback from the 802.11 layer after a multicast state change.
1842 */
1843static void
1844mwl_update_mcast(struct ifnet *ifp)
1845{
1846	struct mwl_softc *sc = ifp->if_softc;
1847
1848	mwl_setmcastfilter(sc);
1849}
1850
1851/*
1852 * Callback from the 802.11 layer after a promiscuous mode change.
1853 * Note this interface does not check the operating mode as this
1854 * is an internal callback and we are expected to honor the current
1855 * state (e.g. this is used for setting the interface in promiscuous
1856 * mode when operating in hostap mode to do ACS).
1857 */
1858static void
1859mwl_update_promisc(struct ifnet *ifp)
1860{
1861	struct mwl_softc *sc = ifp->if_softc;
1862
1863	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1864}
1865
1866/*
1867 * Callback from the 802.11 layer to update the slot time
1868 * based on the current setting.  We use it to notify the
1869 * firmware of ERP changes and the f/w takes care of things
1870 * like slot time and preamble.
1871 */
1872static void
1873mwl_updateslot(struct ifnet *ifp)
1874{
1875	struct mwl_softc *sc = ifp->if_softc;
1876	struct ieee80211com *ic = ifp->if_l2com;
1877	struct mwl_hal *mh = sc->sc_mh;
1878	int prot;
1879
1880	/* NB: can be called early; suppress needless cmds */
1881	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1882		return;
1883
1884	/*
1885	 * Calculate the ERP flags.  The firwmare will use
1886	 * this to carry out the appropriate measures.
1887	 */
1888	prot = 0;
1889	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1890		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1891			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1892		if (ic->ic_flags & IEEE80211_F_USEPROT)
1893			prot |= IEEE80211_ERP_USE_PROTECTION;
1894		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1895			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1896	}
1897
1898	DPRINTF(sc, MWL_DEBUG_RESET,
1899	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1900	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1901	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1902	    ic->ic_flags);
1903
1904	mwl_hal_setgprot(mh, prot);
1905}
1906
1907/*
1908 * Setup the beacon frame.
1909 */
1910static int
1911mwl_beacon_setup(struct ieee80211vap *vap)
1912{
1913	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1914	struct ieee80211_node *ni = vap->iv_bss;
1915	struct ieee80211_beacon_offsets bo;
1916	struct mbuf *m;
1917
1918	m = ieee80211_beacon_alloc(ni, &bo);
1919	if (m == NULL)
1920		return ENOBUFS;
1921	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1922	m_free(m);
1923
1924	return 0;
1925}
1926
1927/*
1928 * Update the beacon frame in response to a change.
1929 */
1930static void
1931mwl_beacon_update(struct ieee80211vap *vap, int item)
1932{
1933	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1934	struct ieee80211com *ic = vap->iv_ic;
1935
1936	KASSERT(hvap != NULL, ("no beacon"));
1937	switch (item) {
1938	case IEEE80211_BEACON_ERP:
1939		mwl_updateslot(ic->ic_ifp);
1940		break;
1941	case IEEE80211_BEACON_HTINFO:
1942		mwl_hal_setnprotmode(hvap,
1943		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1944		break;
1945	case IEEE80211_BEACON_CAPS:
1946	case IEEE80211_BEACON_WME:
1947	case IEEE80211_BEACON_APPIE:
1948	case IEEE80211_BEACON_CSA:
1949		break;
1950	case IEEE80211_BEACON_TIM:
1951		/* NB: firmware always forms TIM */
1952		return;
1953	}
1954	/* XXX retain beacon frame and update */
1955	mwl_beacon_setup(vap);
1956}
1957
1958static void
1959mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1960{
1961	bus_addr_t *paddr = (bus_addr_t*) arg;
1962	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1963	*paddr = segs->ds_addr;
1964}
1965
1966#ifdef MWL_HOST_PS_SUPPORT
1967/*
1968 * Handle power save station occupancy changes.
1969 */
1970static void
1971mwl_update_ps(struct ieee80211vap *vap, int nsta)
1972{
1973	struct mwl_vap *mvp = MWL_VAP(vap);
1974
1975	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1976		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1977	mvp->mv_last_ps_sta = nsta;
1978}
1979
1980/*
1981 * Handle associated station power save state changes.
1982 */
1983static int
1984mwl_set_tim(struct ieee80211_node *ni, int set)
1985{
1986	struct ieee80211vap *vap = ni->ni_vap;
1987	struct mwl_vap *mvp = MWL_VAP(vap);
1988
1989	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1990		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1991		    IEEE80211_AID(ni->ni_associd), set);
1992		return 1;
1993	} else
1994		return 0;
1995}
1996#endif /* MWL_HOST_PS_SUPPORT */
1997
1998static int
1999mwl_desc_setup(struct mwl_softc *sc, const char *name,
2000	struct mwl_descdma *dd,
2001	int nbuf, size_t bufsize, int ndesc, size_t descsize)
2002{
2003	struct ifnet *ifp = sc->sc_ifp;
2004	uint8_t *ds;
2005	int error;
2006
2007	DPRINTF(sc, MWL_DEBUG_RESET,
2008	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2009	    __func__, name, nbuf, (uintmax_t) bufsize,
2010	    ndesc, (uintmax_t) descsize);
2011
2012	dd->dd_name = name;
2013	dd->dd_desc_len = nbuf * ndesc * descsize;
2014
2015	/*
2016	 * Setup DMA descriptor area.
2017	 */
2018	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2019		       PAGE_SIZE, 0,		/* alignment, bounds */
2020		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2021		       BUS_SPACE_MAXADDR,	/* highaddr */
2022		       NULL, NULL,		/* filter, filterarg */
2023		       dd->dd_desc_len,		/* maxsize */
2024		       1,			/* nsegments */
2025		       dd->dd_desc_len,		/* maxsegsize */
2026		       BUS_DMA_ALLOCNOW,	/* flags */
2027		       NULL,			/* lockfunc */
2028		       NULL,			/* lockarg */
2029		       &dd->dd_dmat);
2030	if (error != 0) {
2031		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2032		return error;
2033	}
2034
2035	/* allocate descriptors */
2036	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2037	if (error != 0) {
2038		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2039			"error %u\n", dd->dd_name, error);
2040		goto fail0;
2041	}
2042
2043	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2044				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2045				 &dd->dd_dmamap);
2046	if (error != 0) {
2047		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2048			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2049		goto fail1;
2050	}
2051
2052	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2053				dd->dd_desc, dd->dd_desc_len,
2054				mwl_load_cb, &dd->dd_desc_paddr,
2055				BUS_DMA_NOWAIT);
2056	if (error != 0) {
2057		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2058			dd->dd_name, error);
2059		goto fail2;
2060	}
2061
2062	ds = dd->dd_desc;
2063	memset(ds, 0, dd->dd_desc_len);
2064	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2065	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2066	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2067
2068	return 0;
2069fail2:
2070	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2071fail1:
2072	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2073fail0:
2074	bus_dma_tag_destroy(dd->dd_dmat);
2075	memset(dd, 0, sizeof(*dd));
2076	return error;
2077#undef DS2PHYS
2078}
2079
2080static void
2081mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2082{
2083	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2084	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2085	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2086	bus_dma_tag_destroy(dd->dd_dmat);
2087
2088	memset(dd, 0, sizeof(*dd));
2089}
2090
2091/*
2092 * Construct a tx q's free list.  The order of entries on
2093 * the list must reflect the physical layout of tx descriptors
2094 * because the firmware pre-fetches descriptors.
2095 *
2096 * XXX might be better to use indices into the buffer array.
2097 */
2098static void
2099mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2100{
2101	struct mwl_txbuf *bf;
2102	int i;
2103
2104	bf = txq->dma.dd_bufptr;
2105	STAILQ_INIT(&txq->free);
2106	for (i = 0; i < mwl_txbuf; i++, bf++)
2107		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2108	txq->nfree = i;
2109}
2110
2111#define	DS2PHYS(_dd, _ds) \
2112	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2113
2114static int
2115mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2116{
2117	struct ifnet *ifp = sc->sc_ifp;
2118	int error, bsize, i;
2119	struct mwl_txbuf *bf;
2120	struct mwl_txdesc *ds;
2121
2122	error = mwl_desc_setup(sc, "tx", &txq->dma,
2123			mwl_txbuf, sizeof(struct mwl_txbuf),
2124			MWL_TXDESC, sizeof(struct mwl_txdesc));
2125	if (error != 0)
2126		return error;
2127
2128	/* allocate and setup tx buffers */
2129	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2130	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2131	if (bf == NULL) {
2132		if_printf(ifp, "malloc of %u tx buffers failed\n",
2133			mwl_txbuf);
2134		return ENOMEM;
2135	}
2136	txq->dma.dd_bufptr = bf;
2137
2138	ds = txq->dma.dd_desc;
2139	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2140		bf->bf_desc = ds;
2141		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2142		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2143				&bf->bf_dmamap);
2144		if (error != 0) {
2145			if_printf(ifp, "unable to create dmamap for tx "
2146				"buffer %u, error %u\n", i, error);
2147			return error;
2148		}
2149	}
2150	mwl_txq_reset(sc, txq);
2151	return 0;
2152}
2153
2154static void
2155mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2156{
2157	struct mwl_txbuf *bf;
2158	int i;
2159
2160	bf = txq->dma.dd_bufptr;
2161	for (i = 0; i < mwl_txbuf; i++, bf++) {
2162		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2163		KASSERT(bf->bf_node == NULL, ("node on free list"));
2164		if (bf->bf_dmamap != NULL)
2165			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2166	}
2167	STAILQ_INIT(&txq->free);
2168	txq->nfree = 0;
2169	if (txq->dma.dd_bufptr != NULL) {
2170		free(txq->dma.dd_bufptr, M_MWLDEV);
2171		txq->dma.dd_bufptr = NULL;
2172	}
2173	if (txq->dma.dd_desc_len != 0)
2174		mwl_desc_cleanup(sc, &txq->dma);
2175}
2176
2177static int
2178mwl_rxdma_setup(struct mwl_softc *sc)
2179{
2180	struct ifnet *ifp = sc->sc_ifp;
2181	int error, jumbosize, bsize, i;
2182	struct mwl_rxbuf *bf;
2183	struct mwl_jumbo *rbuf;
2184	struct mwl_rxdesc *ds;
2185	caddr_t data;
2186
2187	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2188			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2189			1, sizeof(struct mwl_rxdesc));
2190	if (error != 0)
2191		return error;
2192
2193	/*
2194	 * Receive is done to a private pool of jumbo buffers.
2195	 * This allows us to attach to mbuf's and avoid re-mapping
2196	 * memory on each rx we post.  We allocate a large chunk
2197	 * of memory and manage it in the driver.  The mbuf free
2198	 * callback method is used to reclaim frames after sending
2199	 * them up the stack.  By default we allocate 2x the number of
2200	 * rx descriptors configured so we have some slop to hold
2201	 * us while frames are processed.
2202	 */
2203	if (mwl_rxbuf < 2*mwl_rxdesc) {
2204		if_printf(ifp,
2205		    "too few rx dma buffers (%d); increasing to %d\n",
2206		    mwl_rxbuf, 2*mwl_rxdesc);
2207		mwl_rxbuf = 2*mwl_rxdesc;
2208	}
2209	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2210	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2211
2212	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2213		       PAGE_SIZE, 0,		/* alignment, bounds */
2214		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2215		       BUS_SPACE_MAXADDR,	/* highaddr */
2216		       NULL, NULL,		/* filter, filterarg */
2217		       sc->sc_rxmemsize,	/* maxsize */
2218		       1,			/* nsegments */
2219		       sc->sc_rxmemsize,	/* maxsegsize */
2220		       BUS_DMA_ALLOCNOW,	/* flags */
2221		       NULL,			/* lockfunc */
2222		       NULL,			/* lockarg */
2223		       &sc->sc_rxdmat);
2224	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2225	if (error != 0) {
2226		if_printf(ifp, "could not create rx DMA map\n");
2227		return error;
2228	}
2229
2230	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2231				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2232				 &sc->sc_rxmap);
2233	if (error != 0) {
2234		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2235		    (uintmax_t) sc->sc_rxmemsize);
2236		return error;
2237	}
2238
2239	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2240				sc->sc_rxmem, sc->sc_rxmemsize,
2241				mwl_load_cb, &sc->sc_rxmem_paddr,
2242				BUS_DMA_NOWAIT);
2243	if (error != 0) {
2244		if_printf(ifp, "could not load rx DMA map\n");
2245		return error;
2246	}
2247
2248	/*
2249	 * Allocate rx buffers and set them up.
2250	 */
2251	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2252	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2253	if (bf == NULL) {
2254		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2255		return error;
2256	}
2257	sc->sc_rxdma.dd_bufptr = bf;
2258
2259	STAILQ_INIT(&sc->sc_rxbuf);
2260	ds = sc->sc_rxdma.dd_desc;
2261	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2262		bf->bf_desc = ds;
2263		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2264		/* pre-assign dma buffer */
2265		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2266		/* NB: tail is intentional to preserve descriptor order */
2267		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2268	}
2269
2270	/*
2271	 * Place remainder of dma memory buffers on the free list.
2272	 */
2273	SLIST_INIT(&sc->sc_rxfree);
2274	for (; i < mwl_rxbuf; i++) {
2275		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2276		rbuf = MWL_JUMBO_DATA2BUF(data);
2277		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2278		sc->sc_nrxfree++;
2279	}
2280	MWL_RXFREE_INIT(sc);
2281	return 0;
2282}
2283#undef DS2PHYS
2284
2285static void
2286mwl_rxdma_cleanup(struct mwl_softc *sc)
2287{
2288	if (sc->sc_rxmap != NULL)
2289		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2290	if (sc->sc_rxmem != NULL) {
2291		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2292		sc->sc_rxmem = NULL;
2293	}
2294	if (sc->sc_rxmap != NULL) {
2295		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2296		sc->sc_rxmap = NULL;
2297	}
2298	if (sc->sc_rxdma.dd_bufptr != NULL) {
2299		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2300		sc->sc_rxdma.dd_bufptr = NULL;
2301	}
2302	if (sc->sc_rxdma.dd_desc_len != 0)
2303		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2304	MWL_RXFREE_DESTROY(sc);
2305}
2306
2307static int
2308mwl_dma_setup(struct mwl_softc *sc)
2309{
2310	int error, i;
2311
2312	error = mwl_rxdma_setup(sc);
2313	if (error != 0) {
2314		mwl_rxdma_cleanup(sc);
2315		return error;
2316	}
2317
2318	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2319		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2320		if (error != 0) {
2321			mwl_dma_cleanup(sc);
2322			return error;
2323		}
2324	}
2325	return 0;
2326}
2327
2328static void
2329mwl_dma_cleanup(struct mwl_softc *sc)
2330{
2331	int i;
2332
2333	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2334		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2335	mwl_rxdma_cleanup(sc);
2336}
2337
2338static struct ieee80211_node *
2339mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2340{
2341	struct ieee80211com *ic = vap->iv_ic;
2342	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2343	const size_t space = sizeof(struct mwl_node);
2344	struct mwl_node *mn;
2345
2346	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2347	if (mn == NULL) {
2348		/* XXX stat+msg */
2349		return NULL;
2350	}
2351	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2352	return &mn->mn_node;
2353}
2354
2355static void
2356mwl_node_cleanup(struct ieee80211_node *ni)
2357{
2358	struct ieee80211com *ic = ni->ni_ic;
2359        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2360	struct mwl_node *mn = MWL_NODE(ni);
2361
2362	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2363	    __func__, ni, ni->ni_ic, mn->mn_staid);
2364
2365	if (mn->mn_staid != 0) {
2366		struct ieee80211vap *vap = ni->ni_vap;
2367
2368		if (mn->mn_hvap != NULL) {
2369			if (vap->iv_opmode == IEEE80211_M_STA)
2370				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2371			else
2372				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2373		}
2374		/*
2375		 * NB: legacy WDS peer sta db entry is installed using
2376		 * the associate ap's hvap; use it again to delete it.
2377		 * XXX can vap be NULL?
2378		 */
2379		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2380		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2381			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2382			    ni->ni_macaddr);
2383		delstaid(sc, mn->mn_staid);
2384		mn->mn_staid = 0;
2385	}
2386	sc->sc_node_cleanup(ni);
2387}
2388
2389/*
2390 * Reclaim rx dma buffers from packets sitting on the ampdu
2391 * reorder queue for a station.  We replace buffers with a
2392 * system cluster (if available).
2393 */
2394static void
2395mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2396{
2397#if 0
2398	int i, n, off;
2399	struct mbuf *m;
2400	void *cl;
2401
2402	n = rap->rxa_qframes;
2403	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2404		m = rap->rxa_m[i];
2405		if (m == NULL)
2406			continue;
2407		n--;
2408		/* our dma buffers have a well-known free routine */
2409		if ((m->m_flags & M_EXT) == 0 ||
2410		    m->m_ext.ext_free != mwl_ext_free)
2411			continue;
2412		/*
2413		 * Try to allocate a cluster and move the data.
2414		 */
2415		off = m->m_data - m->m_ext.ext_buf;
2416		if (off + m->m_pkthdr.len > MCLBYTES) {
2417			/* XXX no AMSDU for now */
2418			continue;
2419		}
2420		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2421		    &m->m_ext.ext_paddr);
2422		if (cl != NULL) {
2423			/*
2424			 * Copy the existing data to the cluster, remove
2425			 * the rx dma buffer, and attach the cluster in
2426			 * its place.  Note we preserve the offset to the
2427			 * data so frames being bridged can still prepend
2428			 * their headers without adding another mbuf.
2429			 */
2430			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2431			MEXTREMOVE(m);
2432			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2433			/* setup mbuf like _MCLGET does */
2434			m->m_flags |= M_CLUSTER | M_EXT_RW;
2435			_MOWNERREF(m, M_EXT | M_CLUSTER);
2436			/* NB: m_data is clobbered by MEXTADDR, adjust */
2437			m->m_data += off;
2438		}
2439	}
2440#endif
2441}
2442
2443/*
2444 * Callback to reclaim resources.  We first let the
2445 * net80211 layer do it's thing, then if we are still
2446 * blocked by a lack of rx dma buffers we walk the ampdu
2447 * reorder q's to reclaim buffers by copying to a system
2448 * cluster.
2449 */
2450static void
2451mwl_node_drain(struct ieee80211_node *ni)
2452{
2453	struct ieee80211com *ic = ni->ni_ic;
2454        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2455	struct mwl_node *mn = MWL_NODE(ni);
2456
2457	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2458	    __func__, ni, ni->ni_vap, mn->mn_staid);
2459
2460	/* NB: call up first to age out ampdu q's */
2461	sc->sc_node_drain(ni);
2462
2463	/* XXX better to not check low water mark? */
2464	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2465	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2466		uint8_t tid;
2467		/*
2468		 * Walk the reorder q and reclaim rx dma buffers by copying
2469		 * the packet contents into clusters.
2470		 */
2471		for (tid = 0; tid < WME_NUM_TID; tid++) {
2472			struct ieee80211_rx_ampdu *rap;
2473
2474			rap = &ni->ni_rx_ampdu[tid];
2475			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2476				continue;
2477			if (rap->rxa_qframes)
2478				mwl_ampdu_rxdma_reclaim(rap);
2479		}
2480	}
2481}
2482
2483static void
2484mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2485{
2486	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2487#ifdef MWL_ANT_INFO_SUPPORT
2488#if 0
2489	/* XXX need to smooth data */
2490	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2491#else
2492	*noise = -95;		/* XXX */
2493#endif
2494#else
2495	*noise = -95;		/* XXX */
2496#endif
2497}
2498
2499/*
2500 * Convert Hardware per-antenna rssi info to common format:
2501 * Let a1, a2, a3 represent the amplitudes per chain
2502 * Let amax represent max[a1, a2, a3]
2503 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2504 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2505 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2506 * maintain some extra precision.
2507 *
2508 * Values are stored in .5 db format capped at 127.
2509 */
2510static void
2511mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2512	struct ieee80211_mimo_info *mi)
2513{
2514#define	CVT(_dst, _src) do {						\
2515	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2516	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2517} while (0)
2518	static const int8_t logdbtbl[32] = {
2519	       0,   0,  24,  38,  48,  56,  62,  68,
2520	      72,  76,  80,  83,  86,  89,  92,  94,
2521	      96,  98, 100, 102, 104, 106, 107, 109,
2522	     110, 112, 113, 115, 116, 117, 118, 119
2523	};
2524	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2525	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2526	uint32_t rssi_max;
2527
2528	rssi_max = mn->mn_ai.rssi_a;
2529	if (mn->mn_ai.rssi_b > rssi_max)
2530		rssi_max = mn->mn_ai.rssi_b;
2531	if (mn->mn_ai.rssi_c > rssi_max)
2532		rssi_max = mn->mn_ai.rssi_c;
2533
2534	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2535	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2536	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2537
2538	mi->noise[0] = mn->mn_ai.nf_a;
2539	mi->noise[1] = mn->mn_ai.nf_b;
2540	mi->noise[2] = mn->mn_ai.nf_c;
2541#undef CVT
2542}
2543
2544static __inline void *
2545mwl_getrxdma(struct mwl_softc *sc)
2546{
2547	struct mwl_jumbo *buf;
2548	void *data;
2549
2550	/*
2551	 * Allocate from jumbo pool.
2552	 */
2553	MWL_RXFREE_LOCK(sc);
2554	buf = SLIST_FIRST(&sc->sc_rxfree);
2555	if (buf == NULL) {
2556		DPRINTF(sc, MWL_DEBUG_ANY,
2557		    "%s: out of rx dma buffers\n", __func__);
2558		sc->sc_stats.mst_rx_nodmabuf++;
2559		data = NULL;
2560	} else {
2561		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2562		sc->sc_nrxfree--;
2563		data = MWL_JUMBO_BUF2DATA(buf);
2564	}
2565	MWL_RXFREE_UNLOCK(sc);
2566	return data;
2567}
2568
2569static __inline void
2570mwl_putrxdma(struct mwl_softc *sc, void *data)
2571{
2572	struct mwl_jumbo *buf;
2573
2574	/* XXX bounds check data */
2575	MWL_RXFREE_LOCK(sc);
2576	buf = MWL_JUMBO_DATA2BUF(data);
2577	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2578	sc->sc_nrxfree++;
2579	MWL_RXFREE_UNLOCK(sc);
2580}
2581
2582static int
2583mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2584{
2585	struct mwl_rxdesc *ds;
2586
2587	ds = bf->bf_desc;
2588	if (bf->bf_data == NULL) {
2589		bf->bf_data = mwl_getrxdma(sc);
2590		if (bf->bf_data == NULL) {
2591			/* mark descriptor to be skipped */
2592			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2593			/* NB: don't need PREREAD */
2594			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2595			sc->sc_stats.mst_rxbuf_failed++;
2596			return ENOMEM;
2597		}
2598	}
2599	/*
2600	 * NB: DMA buffer contents is known to be unmodified
2601	 *     so there's no need to flush the data cache.
2602	 */
2603
2604	/*
2605	 * Setup descriptor.
2606	 */
2607	ds->QosCtrl = 0;
2608	ds->RSSI = 0;
2609	ds->Status = EAGLE_RXD_STATUS_IDLE;
2610	ds->Channel = 0;
2611	ds->PktLen = htole16(MWL_AGGR_SIZE);
2612	ds->SQ2 = 0;
2613	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2614	/* NB: don't touch pPhysNext, set once */
2615	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2616	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2617
2618	return 0;
2619}
2620
2621static void
2622mwl_ext_free(void *data, void *arg)
2623{
2624	struct mwl_softc *sc = arg;
2625
2626	/* XXX bounds check data */
2627	mwl_putrxdma(sc, data);
2628	/*
2629	 * If we were previously blocked by a lack of rx dma buffers
2630	 * check if we now have enough to restart rx interrupt handling.
2631	 * NB: we know we are called at splvm which is above splnet.
2632	 */
2633	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2634		sc->sc_rxblocked = 0;
2635		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2636	}
2637}
2638
2639struct mwl_frame_bar {
2640	u_int8_t	i_fc[2];
2641	u_int8_t	i_dur[2];
2642	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2643	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2644	/* ctl, seq, FCS */
2645} __packed;
2646
2647/*
2648 * Like ieee80211_anyhdrsize, but handles BAR frames
2649 * specially so the logic below to piece the 802.11
2650 * header together works.
2651 */
2652static __inline int
2653mwl_anyhdrsize(const void *data)
2654{
2655	const struct ieee80211_frame *wh = data;
2656
2657	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2658		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2659		case IEEE80211_FC0_SUBTYPE_CTS:
2660		case IEEE80211_FC0_SUBTYPE_ACK:
2661			return sizeof(struct ieee80211_frame_ack);
2662		case IEEE80211_FC0_SUBTYPE_BAR:
2663			return sizeof(struct mwl_frame_bar);
2664		}
2665		return sizeof(struct ieee80211_frame_min);
2666	} else
2667		return ieee80211_hdrsize(data);
2668}
2669
2670static void
2671mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2672{
2673	const struct ieee80211_frame *wh;
2674	struct ieee80211_node *ni;
2675
2676	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2677	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2678	if (ni != NULL) {
2679		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2680		ieee80211_free_node(ni);
2681	}
2682}
2683
2684/*
2685 * Convert hardware signal strength to rssi.  The value
2686 * provided by the device has the noise floor added in;
2687 * we need to compensate for this but we don't have that
2688 * so we use a fixed value.
2689 *
2690 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2691 * offset is already set as part of the initial gain.  This
2692 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2693 */
2694static __inline int
2695cvtrssi(uint8_t ssi)
2696{
2697	int rssi = (int) ssi + 8;
2698	/* XXX hack guess until we have a real noise floor */
2699	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2700	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2701}
2702
2703static void
2704mwl_rx_proc(void *arg, int npending)
2705{
2706#define	IEEE80211_DIR_DSTODS(wh) \
2707	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2708	struct mwl_softc *sc = arg;
2709	struct ifnet *ifp = sc->sc_ifp;
2710	struct ieee80211com *ic = ifp->if_l2com;
2711	struct mwl_rxbuf *bf;
2712	struct mwl_rxdesc *ds;
2713	struct mbuf *m;
2714	struct ieee80211_qosframe *wh;
2715	struct ieee80211_qosframe_addr4 *wh4;
2716	struct ieee80211_node *ni;
2717	struct mwl_node *mn;
2718	int off, len, hdrlen, pktlen, rssi, ntodo;
2719	uint8_t *data, status;
2720	void *newdata;
2721	int16_t nf;
2722
2723	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2724	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2725	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2726	nf = -96;			/* XXX */
2727	bf = sc->sc_rxnext;
2728	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2729		if (bf == NULL)
2730			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2731		ds = bf->bf_desc;
2732		data = bf->bf_data;
2733		if (data == NULL) {
2734			/*
2735			 * If data allocation failed previously there
2736			 * will be no buffer; try again to re-populate it.
2737			 * Note the firmware will not advance to the next
2738			 * descriptor with a dma buffer so we must mimic
2739			 * this or we'll get out of sync.
2740			 */
2741			DPRINTF(sc, MWL_DEBUG_ANY,
2742			    "%s: rx buf w/o dma memory\n", __func__);
2743			(void) mwl_rxbuf_init(sc, bf);
2744			sc->sc_stats.mst_rx_dmabufmissing++;
2745			break;
2746		}
2747		MWL_RXDESC_SYNC(sc, ds,
2748		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2749		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2750			break;
2751#ifdef MWL_DEBUG
2752		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2753			mwl_printrxbuf(bf, 0);
2754#endif
2755		status = ds->Status;
2756		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2757			ifp->if_ierrors++;
2758			sc->sc_stats.mst_rx_crypto++;
2759			/*
2760			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2761			 *     for backwards compatibility.
2762			 */
2763			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2764			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2765				/*
2766				 * MIC error, notify upper layers.
2767				 */
2768				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2769				    BUS_DMASYNC_POSTREAD);
2770				mwl_handlemicerror(ic, data);
2771				sc->sc_stats.mst_rx_tkipmic++;
2772			}
2773			/* XXX too painful to tap packets */
2774			goto rx_next;
2775		}
2776		/*
2777		 * Sync the data buffer.
2778		 */
2779		len = le16toh(ds->PktLen);
2780		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2781		/*
2782		 * The 802.11 header is provided all or in part at the front;
2783		 * use it to calculate the true size of the header that we'll
2784		 * construct below.  We use this to figure out where to copy
2785		 * payload prior to constructing the header.
2786		 */
2787		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2788		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2789
2790		/* calculate rssi early so we can re-use for each aggregate */
2791		rssi = cvtrssi(ds->RSSI);
2792
2793		pktlen = hdrlen + (len - off);
2794		/*
2795		 * NB: we know our frame is at least as large as
2796		 * IEEE80211_MIN_LEN because there is a 4-address
2797		 * frame at the front.  Hence there's no need to
2798		 * vet the packet length.  If the frame in fact
2799		 * is too small it should be discarded at the
2800		 * net80211 layer.
2801		 */
2802
2803		/*
2804		 * Attach dma buffer to an mbuf.  We tried
2805		 * doing this based on the packet size (i.e.
2806		 * copying small packets) but it turns out to
2807		 * be a net loss.  The tradeoff might be system
2808		 * dependent (cache architecture is important).
2809		 */
2810		MGETHDR(m, M_DONTWAIT, MT_DATA);
2811		if (m == NULL) {
2812			DPRINTF(sc, MWL_DEBUG_ANY,
2813			    "%s: no rx mbuf\n", __func__);
2814			sc->sc_stats.mst_rx_nombuf++;
2815			goto rx_next;
2816		}
2817		/*
2818		 * Acquire the replacement dma buffer before
2819		 * processing the frame.  If we're out of dma
2820		 * buffers we disable rx interrupts and wait
2821		 * for the free pool to reach mlw_rxdmalow buffers
2822		 * before starting to do work again.  If the firmware
2823		 * runs out of descriptors then it will toss frames
2824		 * which is better than our doing it as that can
2825		 * starve our processing.  It is also important that
2826		 * we always process rx'd frames in case they are
2827		 * A-MPDU as otherwise the host's view of the BA
2828		 * window may get out of sync with the firmware.
2829		 */
2830		newdata = mwl_getrxdma(sc);
2831		if (newdata == NULL) {
2832			/* NB: stat+msg in mwl_getrxdma */
2833			m_free(m);
2834			/* disable RX interrupt and mark state */
2835			mwl_hal_intrset(sc->sc_mh,
2836			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2837			sc->sc_rxblocked = 1;
2838			ieee80211_drain(ic);
2839			/* XXX check rxblocked and immediately start again? */
2840			goto rx_stop;
2841		}
2842		bf->bf_data = newdata;
2843		/*
2844		 * Attach the dma buffer to the mbuf;
2845		 * mwl_rxbuf_init will re-setup the rx
2846		 * descriptor using the replacement dma
2847		 * buffer we just installed above.
2848		 */
2849		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2850		    data, sc, 0, EXT_NET_DRV);
2851		m->m_data += off - hdrlen;
2852		m->m_pkthdr.len = m->m_len = pktlen;
2853		m->m_pkthdr.rcvif = ifp;
2854		/* NB: dma buffer assumed read-only */
2855
2856		/*
2857		 * Piece 802.11 header together.
2858		 */
2859		wh = mtod(m, struct ieee80211_qosframe *);
2860		/* NB: don't need to do this sometimes but ... */
2861		/* XXX special case so we can memcpy after m_devget? */
2862		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2863		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2864			if (IEEE80211_DIR_DSTODS(wh)) {
2865				wh4 = mtod(m,
2866				    struct ieee80211_qosframe_addr4*);
2867				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2868			} else {
2869				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2870			}
2871		}
2872		/*
2873		 * The f/w strips WEP header but doesn't clear
2874		 * the WEP bit; mark the packet with M_WEP so
2875		 * net80211 will treat the data as decrypted.
2876		 * While here also clear the PWR_MGT bit since
2877		 * power save is handled by the firmware and
2878		 * passing this up will potentially cause the
2879		 * upper layer to put a station in power save
2880		 * (except when configured with MWL_HOST_PS_SUPPORT).
2881		 */
2882		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2883			m->m_flags |= M_WEP;
2884#ifdef MWL_HOST_PS_SUPPORT
2885		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2886#else
2887		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2888#endif
2889
2890		if (ieee80211_radiotap_active(ic)) {
2891			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2892
2893			tap->wr_flags = 0;
2894			tap->wr_rate = ds->Rate;
2895			tap->wr_antsignal = rssi + nf;
2896			tap->wr_antnoise = nf;
2897		}
2898		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2899			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2900			    len, ds->Rate, rssi);
2901		}
2902		ifp->if_ipackets++;
2903
2904		/* dispatch */
2905		ni = ieee80211_find_rxnode(ic,
2906		    (const struct ieee80211_frame_min *) wh);
2907		if (ni != NULL) {
2908			mn = MWL_NODE(ni);
2909#ifdef MWL_ANT_INFO_SUPPORT
2910			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2911			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2912			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2913			mn->mn_ai.rsvd1 = rssi;
2914#endif
2915			/* tag AMPDU aggregates for reorder processing */
2916			if (ni->ni_flags & IEEE80211_NODE_HT)
2917				m->m_flags |= M_AMPDU;
2918			(void) ieee80211_input(ni, m, rssi, nf);
2919			ieee80211_free_node(ni);
2920		} else
2921			(void) ieee80211_input_all(ic, m, rssi, nf);
2922rx_next:
2923		/* NB: ignore ENOMEM so we process more descriptors */
2924		(void) mwl_rxbuf_init(sc, bf);
2925		bf = STAILQ_NEXT(bf, bf_list);
2926	}
2927rx_stop:
2928	sc->sc_rxnext = bf;
2929
2930	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2931	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2932		/* NB: kick fw; the tx thread may have been preempted */
2933		mwl_hal_txstart(sc->sc_mh, 0);
2934		mwl_start(ifp);
2935	}
2936#undef IEEE80211_DIR_DSTODS
2937}
2938
2939static void
2940mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2941{
2942	struct mwl_txbuf *bf, *bn;
2943	struct mwl_txdesc *ds;
2944
2945	MWL_TXQ_LOCK_INIT(sc, txq);
2946	txq->qnum = qnum;
2947	txq->txpri = 0;	/* XXX */
2948#if 0
2949	/* NB: q setup by mwl_txdma_setup XXX */
2950	STAILQ_INIT(&txq->free);
2951#endif
2952	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2953		bf->bf_txq = txq;
2954
2955		ds = bf->bf_desc;
2956		bn = STAILQ_NEXT(bf, bf_list);
2957		if (bn == NULL)
2958			bn = STAILQ_FIRST(&txq->free);
2959		ds->pPhysNext = htole32(bn->bf_daddr);
2960	}
2961	STAILQ_INIT(&txq->active);
2962}
2963
2964/*
2965 * Setup a hardware data transmit queue for the specified
2966 * access control.  We record the mapping from ac's
2967 * to h/w queues for use by mwl_tx_start.
2968 */
2969static int
2970mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2971{
2972#define	N(a)	(sizeof(a)/sizeof(a[0]))
2973	struct mwl_txq *txq;
2974
2975	if (ac >= N(sc->sc_ac2q)) {
2976		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2977			ac, N(sc->sc_ac2q));
2978		return 0;
2979	}
2980	if (mvtype >= MWL_NUM_TX_QUEUES) {
2981		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2982			mvtype, MWL_NUM_TX_QUEUES);
2983		return 0;
2984	}
2985	txq = &sc->sc_txq[mvtype];
2986	mwl_txq_init(sc, txq, mvtype);
2987	sc->sc_ac2q[ac] = txq;
2988	return 1;
2989#undef N
2990}
2991
2992/*
2993 * Update WME parameters for a transmit queue.
2994 */
2995static int
2996mwl_txq_update(struct mwl_softc *sc, int ac)
2997{
2998#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2999	struct ifnet *ifp = sc->sc_ifp;
3000	struct ieee80211com *ic = ifp->if_l2com;
3001	struct mwl_txq *txq = sc->sc_ac2q[ac];
3002	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3003	struct mwl_hal *mh = sc->sc_mh;
3004	int aifs, cwmin, cwmax, txoplim;
3005
3006	aifs = wmep->wmep_aifsn;
3007	/* XXX in sta mode need to pass log values for cwmin/max */
3008	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3009	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3010	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3011
3012	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3013		device_printf(sc->sc_dev, "unable to update hardware queue "
3014			"parameters for %s traffic!\n",
3015			ieee80211_wme_acnames[ac]);
3016		return 0;
3017	}
3018	return 1;
3019#undef MWL_EXPONENT_TO_VALUE
3020}
3021
3022/*
3023 * Callback from the 802.11 layer to update WME parameters.
3024 */
3025static int
3026mwl_wme_update(struct ieee80211com *ic)
3027{
3028	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3029
3030	return !mwl_txq_update(sc, WME_AC_BE) ||
3031	    !mwl_txq_update(sc, WME_AC_BK) ||
3032	    !mwl_txq_update(sc, WME_AC_VI) ||
3033	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3034}
3035
3036/*
3037 * Reclaim resources for a setup queue.
3038 */
3039static void
3040mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3041{
3042	/* XXX hal work? */
3043	MWL_TXQ_LOCK_DESTROY(txq);
3044}
3045
3046/*
3047 * Reclaim all tx queue resources.
3048 */
3049static void
3050mwl_tx_cleanup(struct mwl_softc *sc)
3051{
3052	int i;
3053
3054	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3055		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3056}
3057
3058static int
3059mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3060{
3061	struct mbuf *m;
3062	int error;
3063
3064	/*
3065	 * Load the DMA map so any coalescing is done.  This
3066	 * also calculates the number of descriptors we need.
3067	 */
3068	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3069				     bf->bf_segs, &bf->bf_nseg,
3070				     BUS_DMA_NOWAIT);
3071	if (error == EFBIG) {
3072		/* XXX packet requires too many descriptors */
3073		bf->bf_nseg = MWL_TXDESC+1;
3074	} else if (error != 0) {
3075		sc->sc_stats.mst_tx_busdma++;
3076		m_freem(m0);
3077		return error;
3078	}
3079	/*
3080	 * Discard null packets and check for packets that
3081	 * require too many TX descriptors.  We try to convert
3082	 * the latter to a cluster.
3083	 */
3084	if (error == EFBIG) {		/* too many desc's, linearize */
3085		sc->sc_stats.mst_tx_linear++;
3086#if MWL_TXDESC > 1
3087		m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
3088#else
3089		m = m_defrag(m0, M_DONTWAIT);
3090#endif
3091		if (m == NULL) {
3092			m_freem(m0);
3093			sc->sc_stats.mst_tx_nombuf++;
3094			return ENOMEM;
3095		}
3096		m0 = m;
3097		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3098					     bf->bf_segs, &bf->bf_nseg,
3099					     BUS_DMA_NOWAIT);
3100		if (error != 0) {
3101			sc->sc_stats.mst_tx_busdma++;
3102			m_freem(m0);
3103			return error;
3104		}
3105		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3106		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3107	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3108		sc->sc_stats.mst_tx_nodata++;
3109		m_freem(m0);
3110		return EIO;
3111	}
3112	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3113		__func__, m0, m0->m_pkthdr.len);
3114	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3115	bf->bf_m = m0;
3116
3117	return 0;
3118}
3119
3120static __inline int
3121mwl_cvtlegacyrate(int rate)
3122{
3123	switch (rate) {
3124	case 2:	 return 0;
3125	case 4:	 return 1;
3126	case 11: return 2;
3127	case 22: return 3;
3128	case 44: return 4;
3129	case 12: return 5;
3130	case 18: return 6;
3131	case 24: return 7;
3132	case 36: return 8;
3133	case 48: return 9;
3134	case 72: return 10;
3135	case 96: return 11;
3136	case 108:return 12;
3137	}
3138	return 0;
3139}
3140
3141/*
3142 * Calculate fixed tx rate information per client state;
3143 * this value is suitable for writing to the Format field
3144 * of a tx descriptor.
3145 */
3146static uint16_t
3147mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3148{
3149	uint16_t fmt;
3150
3151	fmt = SM(3, EAGLE_TXD_ANTENNA)
3152	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3153		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3154	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3155		fmt |= EAGLE_TXD_FORMAT_HT
3156		    /* NB: 0x80 implicitly stripped from ucastrate */
3157		    | SM(rate, EAGLE_TXD_RATE);
3158		/* XXX short/long GI may be wrong; re-check */
3159		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3160			fmt |= EAGLE_TXD_CHW_40
3161			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3162			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3163		} else {
3164			fmt |= EAGLE_TXD_CHW_20
3165			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3166			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3167		}
3168	} else {			/* legacy rate */
3169		fmt |= EAGLE_TXD_FORMAT_LEGACY
3170		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3171		    | EAGLE_TXD_CHW_20
3172		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3173		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3174			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3175	}
3176	return fmt;
3177}
3178
3179static int
3180mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3181    struct mbuf *m0)
3182{
3183#define	IEEE80211_DIR_DSTODS(wh) \
3184	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3185	struct ifnet *ifp = sc->sc_ifp;
3186	struct ieee80211com *ic = ifp->if_l2com;
3187	struct ieee80211vap *vap = ni->ni_vap;
3188	int error, iswep, ismcast;
3189	int hdrlen, copyhdrlen, pktlen;
3190	struct mwl_txdesc *ds;
3191	struct mwl_txq *txq;
3192	struct ieee80211_frame *wh;
3193	struct mwltxrec *tr;
3194	struct mwl_node *mn;
3195	uint16_t qos;
3196#if MWL_TXDESC > 1
3197	int i;
3198#endif
3199
3200	wh = mtod(m0, struct ieee80211_frame *);
3201	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3202	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3203	hdrlen = ieee80211_anyhdrsize(wh);
3204	copyhdrlen = hdrlen;
3205	pktlen = m0->m_pkthdr.len;
3206	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3207		if (IEEE80211_DIR_DSTODS(wh)) {
3208			qos = *(uint16_t *)
3209			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3210			copyhdrlen -= sizeof(qos);
3211		} else
3212			qos = *(uint16_t *)
3213			    (((struct ieee80211_qosframe *) wh)->i_qos);
3214	} else
3215		qos = 0;
3216
3217	if (iswep) {
3218		const struct ieee80211_cipher *cip;
3219		struct ieee80211_key *k;
3220
3221		/*
3222		 * Construct the 802.11 header+trailer for an encrypted
3223		 * frame. The only reason this can fail is because of an
3224		 * unknown or unsupported cipher/key type.
3225		 *
3226		 * NB: we do this even though the firmware will ignore
3227		 *     what we've done for WEP and TKIP as we need the
3228		 *     ExtIV filled in for CCMP and this also adjusts
3229		 *     the headers which simplifies our work below.
3230		 */
3231		k = ieee80211_crypto_encap(ni, m0);
3232		if (k == NULL) {
3233			/*
3234			 * This can happen when the key is yanked after the
3235			 * frame was queued.  Just discard the frame; the
3236			 * 802.11 layer counts failures and provides
3237			 * debugging/diagnostics.
3238			 */
3239			m_freem(m0);
3240			return EIO;
3241		}
3242		/*
3243		 * Adjust the packet length for the crypto additions
3244		 * done during encap and any other bits that the f/w
3245		 * will add later on.
3246		 */
3247		cip = k->wk_cipher;
3248		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3249
3250		/* packet header may have moved, reset our local pointer */
3251		wh = mtod(m0, struct ieee80211_frame *);
3252	}
3253
3254	if (ieee80211_radiotap_active_vap(vap)) {
3255		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3256		if (iswep)
3257			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3258#if 0
3259		sc->sc_tx_th.wt_rate = ds->DataRate;
3260#endif
3261		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3262		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3263
3264		ieee80211_radiotap_tx(vap, m0);
3265	}
3266	/*
3267	 * Copy up/down the 802.11 header; the firmware requires
3268	 * we present a 2-byte payload length followed by a
3269	 * 4-address header (w/o QoS), followed (optionally) by
3270	 * any WEP/ExtIV header (but only filled in for CCMP).
3271	 * We are assured the mbuf has sufficient headroom to
3272	 * prepend in-place by the setup of ic_headroom in
3273	 * mwl_attach.
3274	 */
3275	if (hdrlen < sizeof(struct mwltxrec)) {
3276		const int space = sizeof(struct mwltxrec) - hdrlen;
3277		if (M_LEADINGSPACE(m0) < space) {
3278			/* NB: should never happen */
3279			device_printf(sc->sc_dev,
3280			    "not enough headroom, need %d found %zd, "
3281			    "m_flags 0x%x m_len %d\n",
3282			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3283			ieee80211_dump_pkt(ic,
3284			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3285			m_freem(m0);
3286			sc->sc_stats.mst_tx_noheadroom++;
3287			return EIO;
3288		}
3289		M_PREPEND(m0, space, M_NOWAIT);
3290	}
3291	tr = mtod(m0, struct mwltxrec *);
3292	if (wh != (struct ieee80211_frame *) &tr->wh)
3293		ovbcopy(wh, &tr->wh, hdrlen);
3294	/*
3295	 * Note: the "firmware length" is actually the length
3296	 * of the fully formed "802.11 payload".  That is, it's
3297	 * everything except for the 802.11 header.  In particular
3298	 * this includes all crypto material including the MIC!
3299	 */
3300	tr->fwlen = htole16(pktlen - hdrlen);
3301
3302	/*
3303	 * Load the DMA map so any coalescing is done.  This
3304	 * also calculates the number of descriptors we need.
3305	 */
3306	error = mwl_tx_dmasetup(sc, bf, m0);
3307	if (error != 0) {
3308		/* NB: stat collected in mwl_tx_dmasetup */
3309		DPRINTF(sc, MWL_DEBUG_XMIT,
3310		    "%s: unable to setup dma\n", __func__);
3311		return error;
3312	}
3313	bf->bf_node = ni;			/* NB: held reference */
3314	m0 = bf->bf_m;				/* NB: may have changed */
3315	tr = mtod(m0, struct mwltxrec *);
3316	wh = (struct ieee80211_frame *)&tr->wh;
3317
3318	/*
3319	 * Formulate tx descriptor.
3320	 */
3321	ds = bf->bf_desc;
3322	txq = bf->bf_txq;
3323
3324	ds->QosCtrl = qos;			/* NB: already little-endian */
3325#if MWL_TXDESC == 1
3326	/*
3327	 * NB: multiframes should be zero because the descriptors
3328	 *     are initialized to zero.  This should handle the case
3329	 *     where the driver is built with MWL_TXDESC=1 but we are
3330	 *     using firmware with multi-segment support.
3331	 */
3332	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3333	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3334#else
3335	ds->multiframes = htole32(bf->bf_nseg);
3336	ds->PktLen = htole16(m0->m_pkthdr.len);
3337	for (i = 0; i < bf->bf_nseg; i++) {
3338		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3339		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3340	}
3341#endif
3342	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3343	ds->Format = 0;
3344	ds->pad = 0;
3345	ds->ack_wcb_addr = 0;
3346
3347	mn = MWL_NODE(ni);
3348	/*
3349	 * Select transmit rate.
3350	 */
3351	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3352	case IEEE80211_FC0_TYPE_MGT:
3353		sc->sc_stats.mst_tx_mgmt++;
3354		/* fall thru... */
3355	case IEEE80211_FC0_TYPE_CTL:
3356		/* NB: assign to BE q to avoid bursting */
3357		ds->TxPriority = MWL_WME_AC_BE;
3358		break;
3359	case IEEE80211_FC0_TYPE_DATA:
3360		if (!ismcast) {
3361			const struct ieee80211_txparam *tp = ni->ni_txparms;
3362			/*
3363			 * EAPOL frames get forced to a fixed rate and w/o
3364			 * aggregation; otherwise check for any fixed rate
3365			 * for the client (may depend on association state).
3366			 */
3367			if (m0->m_flags & M_EAPOL) {
3368				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3369				ds->Format = mvp->mv_eapolformat;
3370				ds->pad = htole16(
3371				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3372			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3373				/* XXX pre-calculate per node */
3374				ds->Format = htole16(
3375				    mwl_calcformat(tp->ucastrate, ni));
3376				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3377			}
3378			/* NB: EAPOL frames will never have qos set */
3379			if (qos == 0)
3380				ds->TxPriority = txq->qnum;
3381#if MWL_MAXBA > 3
3382			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3383				ds->TxPriority = mn->mn_ba[3].txq;
3384#endif
3385#if MWL_MAXBA > 2
3386			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3387				ds->TxPriority = mn->mn_ba[2].txq;
3388#endif
3389#if MWL_MAXBA > 1
3390			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3391				ds->TxPriority = mn->mn_ba[1].txq;
3392#endif
3393#if MWL_MAXBA > 0
3394			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3395				ds->TxPriority = mn->mn_ba[0].txq;
3396#endif
3397			else
3398				ds->TxPriority = txq->qnum;
3399		} else
3400			ds->TxPriority = txq->qnum;
3401		break;
3402	default:
3403		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3404			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3405		sc->sc_stats.mst_tx_badframetype++;
3406		m_freem(m0);
3407		return EIO;
3408	}
3409
3410	if (IFF_DUMPPKTS_XMIT(sc))
3411		ieee80211_dump_pkt(ic,
3412		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3413		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3414
3415	MWL_TXQ_LOCK(txq);
3416	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3417	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3418	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3419
3420	ifp->if_opackets++;
3421	sc->sc_tx_timer = 5;
3422	MWL_TXQ_UNLOCK(txq);
3423
3424	return 0;
3425#undef	IEEE80211_DIR_DSTODS
3426}
3427
3428static __inline int
3429mwl_cvtlegacyrix(int rix)
3430{
3431#define	N(x)	(sizeof(x)/sizeof(x[0]))
3432	static const int ieeerates[] =
3433	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3434	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3435#undef N
3436}
3437
3438/*
3439 * Process completed xmit descriptors from the specified queue.
3440 */
3441static int
3442mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3443{
3444#define	EAGLE_TXD_STATUS_MCAST \
3445	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3446	struct ifnet *ifp = sc->sc_ifp;
3447	struct ieee80211com *ic = ifp->if_l2com;
3448	struct mwl_txbuf *bf;
3449	struct mwl_txdesc *ds;
3450	struct ieee80211_node *ni;
3451	struct mwl_node *an;
3452	int nreaped;
3453	uint32_t status;
3454
3455	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3456	for (nreaped = 0;; nreaped++) {
3457		MWL_TXQ_LOCK(txq);
3458		bf = STAILQ_FIRST(&txq->active);
3459		if (bf == NULL) {
3460			MWL_TXQ_UNLOCK(txq);
3461			break;
3462		}
3463		ds = bf->bf_desc;
3464		MWL_TXDESC_SYNC(txq, ds,
3465		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3466		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3467			MWL_TXQ_UNLOCK(txq);
3468			break;
3469		}
3470		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3471		MWL_TXQ_UNLOCK(txq);
3472
3473#ifdef MWL_DEBUG
3474		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3475			mwl_printtxbuf(bf, txq->qnum, nreaped);
3476#endif
3477		ni = bf->bf_node;
3478		if (ni != NULL) {
3479			an = MWL_NODE(ni);
3480			status = le32toh(ds->Status);
3481			if (status & EAGLE_TXD_STATUS_OK) {
3482				uint16_t Format = le16toh(ds->Format);
3483				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3484
3485				sc->sc_stats.mst_ant_tx[txant]++;
3486				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3487					sc->sc_stats.mst_tx_retries++;
3488				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3489					sc->sc_stats.mst_tx_mretries++;
3490				if (txq->qnum >= MWL_WME_AC_VO)
3491					ic->ic_wme.wme_hipri_traffic++;
3492				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3493				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3494					ni->ni_txrate = mwl_cvtlegacyrix(
3495					    ni->ni_txrate);
3496				} else
3497					ni->ni_txrate |= IEEE80211_RATE_MCS;
3498				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3499			} else {
3500				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3501					sc->sc_stats.mst_tx_linkerror++;
3502				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3503					sc->sc_stats.mst_tx_xretries++;
3504				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3505					sc->sc_stats.mst_tx_aging++;
3506				if (bf->bf_m->m_flags & M_FF)
3507					sc->sc_stats.mst_ff_txerr++;
3508			}
3509			/*
3510			 * Do any tx complete callback.  Note this must
3511			 * be done before releasing the node reference.
3512			 * XXX no way to figure out if frame was ACK'd
3513			 */
3514			if (bf->bf_m->m_flags & M_TXCB) {
3515				/* XXX strip fw len in case header inspected */
3516				m_adj(bf->bf_m, sizeof(uint16_t));
3517				ieee80211_process_callback(ni, bf->bf_m,
3518					(status & EAGLE_TXD_STATUS_OK) == 0);
3519			}
3520			/*
3521			 * Reclaim reference to node.
3522			 *
3523			 * NB: the node may be reclaimed here if, for example
3524			 *     this is a DEAUTH message that was sent and the
3525			 *     node was timed out due to inactivity.
3526			 */
3527			ieee80211_free_node(ni);
3528		}
3529		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3530
3531		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3532		    BUS_DMASYNC_POSTWRITE);
3533		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3534		m_freem(bf->bf_m);
3535
3536		mwl_puttxbuf_tail(txq, bf);
3537	}
3538	return nreaped;
3539#undef EAGLE_TXD_STATUS_MCAST
3540}
3541
3542/*
3543 * Deferred processing of transmit interrupt; special-cased
3544 * for four hardware queues, 0-3.
3545 */
3546static void
3547mwl_tx_proc(void *arg, int npending)
3548{
3549	struct mwl_softc *sc = arg;
3550	struct ifnet *ifp = sc->sc_ifp;
3551	int nreaped;
3552
3553	/*
3554	 * Process each active queue.
3555	 */
3556	nreaped = 0;
3557	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3558		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3559	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3560		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3561	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3562		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3563	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3564		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3565
3566	if (nreaped != 0) {
3567		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3568		sc->sc_tx_timer = 0;
3569		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3570			/* NB: kick fw; the tx thread may have been preempted */
3571			mwl_hal_txstart(sc->sc_mh, 0);
3572			mwl_start(ifp);
3573		}
3574	}
3575}
3576
3577static void
3578mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3579{
3580	struct ieee80211_node *ni;
3581	struct mwl_txbuf *bf;
3582	u_int ix;
3583
3584	/*
3585	 * NB: this assumes output has been stopped and
3586	 *     we do not need to block mwl_tx_tasklet
3587	 */
3588	for (ix = 0;; ix++) {
3589		MWL_TXQ_LOCK(txq);
3590		bf = STAILQ_FIRST(&txq->active);
3591		if (bf == NULL) {
3592			MWL_TXQ_UNLOCK(txq);
3593			break;
3594		}
3595		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3596		MWL_TXQ_UNLOCK(txq);
3597#ifdef MWL_DEBUG
3598		if (sc->sc_debug & MWL_DEBUG_RESET) {
3599			struct ifnet *ifp = sc->sc_ifp;
3600			struct ieee80211com *ic = ifp->if_l2com;
3601			const struct mwltxrec *tr =
3602			    mtod(bf->bf_m, const struct mwltxrec *);
3603			mwl_printtxbuf(bf, txq->qnum, ix);
3604			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3605				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3606		}
3607#endif /* MWL_DEBUG */
3608		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3609		ni = bf->bf_node;
3610		if (ni != NULL) {
3611			/*
3612			 * Reclaim node reference.
3613			 */
3614			ieee80211_free_node(ni);
3615		}
3616		m_freem(bf->bf_m);
3617
3618		mwl_puttxbuf_tail(txq, bf);
3619	}
3620}
3621
3622/*
3623 * Drain the transmit queues and reclaim resources.
3624 */
3625static void
3626mwl_draintxq(struct mwl_softc *sc)
3627{
3628	struct ifnet *ifp = sc->sc_ifp;
3629	int i;
3630
3631	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3632		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3633	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3634	sc->sc_tx_timer = 0;
3635}
3636
3637#ifdef MWL_DIAGAPI
3638/*
3639 * Reset the transmit queues to a pristine state after a fw download.
3640 */
3641static void
3642mwl_resettxq(struct mwl_softc *sc)
3643{
3644	int i;
3645
3646	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3647		mwl_txq_reset(sc, &sc->sc_txq[i]);
3648}
3649#endif /* MWL_DIAGAPI */
3650
3651/*
3652 * Clear the transmit queues of any frames submitted for the
3653 * specified vap.  This is done when the vap is deleted so we
3654 * don't potentially reference the vap after it is gone.
3655 * Note we cannot remove the frames; we only reclaim the node
3656 * reference.
3657 */
3658static void
3659mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3660{
3661	struct mwl_txq *txq;
3662	struct mwl_txbuf *bf;
3663	int i;
3664
3665	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3666		txq = &sc->sc_txq[i];
3667		MWL_TXQ_LOCK(txq);
3668		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3669			struct ieee80211_node *ni = bf->bf_node;
3670			if (ni != NULL && ni->ni_vap == vap) {
3671				bf->bf_node = NULL;
3672				ieee80211_free_node(ni);
3673			}
3674		}
3675		MWL_TXQ_UNLOCK(txq);
3676	}
3677}
3678
3679static int
3680mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3681	const uint8_t *frm, const uint8_t *efrm)
3682{
3683	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3684	const struct ieee80211_action *ia;
3685
3686	ia = (const struct ieee80211_action *) frm;
3687	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3688	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3689		const struct ieee80211_action_ht_mimopowersave *mps =
3690		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3691
3692		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3693		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3694		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3695		return 0;
3696	} else
3697		return sc->sc_recv_action(ni, wh, frm, efrm);
3698}
3699
3700static int
3701mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3702	int dialogtoken, int baparamset, int batimeout)
3703{
3704	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3705	struct ieee80211vap *vap = ni->ni_vap;
3706	struct mwl_node *mn = MWL_NODE(ni);
3707	struct mwl_bastate *bas;
3708
3709	bas = tap->txa_private;
3710	if (bas == NULL) {
3711		const MWL_HAL_BASTREAM *sp;
3712		/*
3713		 * Check for a free BA stream slot.
3714		 */
3715#if MWL_MAXBA > 3
3716		if (mn->mn_ba[3].bastream == NULL)
3717			bas = &mn->mn_ba[3];
3718		else
3719#endif
3720#if MWL_MAXBA > 2
3721		if (mn->mn_ba[2].bastream == NULL)
3722			bas = &mn->mn_ba[2];
3723		else
3724#endif
3725#if MWL_MAXBA > 1
3726		if (mn->mn_ba[1].bastream == NULL)
3727			bas = &mn->mn_ba[1];
3728		else
3729#endif
3730#if MWL_MAXBA > 0
3731		if (mn->mn_ba[0].bastream == NULL)
3732			bas = &mn->mn_ba[0];
3733		else
3734#endif
3735		{
3736			/* sta already has max BA streams */
3737			/* XXX assign BA stream to highest priority tid */
3738			DPRINTF(sc, MWL_DEBUG_AMPDU,
3739			    "%s: already has max bastreams\n", __func__);
3740			sc->sc_stats.mst_ampdu_reject++;
3741			return 0;
3742		}
3743		/* NB: no held reference to ni */
3744		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3745		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3746		    ni->ni_macaddr, WME_AC_TO_TID(tap->txa_ac), ni->ni_htparam,
3747		    ni, tap);
3748		if (sp == NULL) {
3749			/*
3750			 * No available stream, return 0 so no
3751			 * a-mpdu aggregation will be done.
3752			 */
3753			DPRINTF(sc, MWL_DEBUG_AMPDU,
3754			    "%s: no bastream available\n", __func__);
3755			sc->sc_stats.mst_ampdu_nostream++;
3756			return 0;
3757		}
3758		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3759		    __func__, sp);
3760		/* NB: qos is left zero so we won't match in mwl_tx_start */
3761		bas->bastream = sp;
3762		tap->txa_private = bas;
3763	}
3764	/* fetch current seq# from the firmware; if available */
3765	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3766	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3767	    &tap->txa_start) != 0)
3768		tap->txa_start = 0;
3769	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3770}
3771
3772static int
3773mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3774	int code, int baparamset, int batimeout)
3775{
3776	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3777	struct mwl_bastate *bas;
3778
3779	bas = tap->txa_private;
3780	if (bas == NULL) {
3781		/* XXX should not happen */
3782		DPRINTF(sc, MWL_DEBUG_AMPDU,
3783		    "%s: no BA stream allocated, AC %d\n",
3784		    __func__, tap->txa_ac);
3785		sc->sc_stats.mst_addba_nostream++;
3786		return 0;
3787	}
3788	if (code == IEEE80211_STATUS_SUCCESS) {
3789		struct ieee80211vap *vap = ni->ni_vap;
3790		int bufsiz, error;
3791
3792		/*
3793		 * Tell the firmware to setup the BA stream;
3794		 * we know resources are available because we
3795		 * pre-allocated one before forming the request.
3796		 */
3797		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3798		if (bufsiz == 0)
3799			bufsiz = IEEE80211_AGGR_BAWMAX;
3800		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3801		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3802		if (error != 0) {
3803			/*
3804			 * Setup failed, return immediately so no a-mpdu
3805			 * aggregation will be done.
3806			 */
3807			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3808			mwl_bastream_free(bas);
3809			tap->txa_private = NULL;
3810
3811			DPRINTF(sc, MWL_DEBUG_AMPDU,
3812			    "%s: create failed, error %d, bufsiz %d AC %d "
3813			    "htparam 0x%x\n", __func__, error, bufsiz,
3814			    tap->txa_ac, ni->ni_htparam);
3815			sc->sc_stats.mst_bacreate_failed++;
3816			return 0;
3817		}
3818		/* NB: cache txq to avoid ptr indirect */
3819		mwl_bastream_setup(bas, tap->txa_ac, bas->bastream->txq);
3820		DPRINTF(sc, MWL_DEBUG_AMPDU,
3821		    "%s: bastream %p assigned to txq %d AC %d bufsiz %d "
3822		    "htparam 0x%x\n", __func__, bas->bastream,
3823		    bas->txq, tap->txa_ac, bufsiz, ni->ni_htparam);
3824	} else {
3825		/*
3826		 * Other side NAK'd us; return the resources.
3827		 */
3828		DPRINTF(sc, MWL_DEBUG_AMPDU,
3829		    "%s: request failed with code %d, destroy bastream %p\n",
3830		    __func__, code, bas->bastream);
3831		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3832		mwl_bastream_free(bas);
3833		tap->txa_private = NULL;
3834	}
3835	/* NB: firmware sends BAR so we don't need to */
3836	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3837}
3838
3839static void
3840mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3841{
3842	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3843	struct mwl_bastate *bas;
3844
3845	bas = tap->txa_private;
3846	if (bas != NULL) {
3847		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3848		    __func__, bas->bastream);
3849		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3850		mwl_bastream_free(bas);
3851		tap->txa_private = NULL;
3852	}
3853	sc->sc_addba_stop(ni, tap);
3854}
3855
3856/*
3857 * Setup the rx data structures.  This should only be
3858 * done once or we may get out of sync with the firmware.
3859 */
3860static int
3861mwl_startrecv(struct mwl_softc *sc)
3862{
3863	if (!sc->sc_recvsetup) {
3864		struct mwl_rxbuf *bf, *prev;
3865		struct mwl_rxdesc *ds;
3866
3867		prev = NULL;
3868		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3869			int error = mwl_rxbuf_init(sc, bf);
3870			if (error != 0) {
3871				DPRINTF(sc, MWL_DEBUG_RECV,
3872					"%s: mwl_rxbuf_init failed %d\n",
3873					__func__, error);
3874				return error;
3875			}
3876			if (prev != NULL) {
3877				ds = prev->bf_desc;
3878				ds->pPhysNext = htole32(bf->bf_daddr);
3879			}
3880			prev = bf;
3881		}
3882		if (prev != NULL) {
3883			ds = prev->bf_desc;
3884			ds->pPhysNext =
3885			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3886		}
3887		sc->sc_recvsetup = 1;
3888	}
3889	mwl_mode_init(sc);		/* set filters, etc. */
3890	return 0;
3891}
3892
3893static MWL_HAL_APMODE
3894mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3895{
3896	MWL_HAL_APMODE mode;
3897
3898	if (IEEE80211_IS_CHAN_HT(chan)) {
3899		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3900			mode = AP_MODE_N_ONLY;
3901		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3902			mode = AP_MODE_AandN;
3903		else if (vap->iv_flags & IEEE80211_F_PUREG)
3904			mode = AP_MODE_GandN;
3905		else
3906			mode = AP_MODE_BandGandN;
3907	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3908		if (vap->iv_flags & IEEE80211_F_PUREG)
3909			mode = AP_MODE_G_ONLY;
3910		else
3911			mode = AP_MODE_MIXED;
3912	} else if (IEEE80211_IS_CHAN_B(chan))
3913		mode = AP_MODE_B_ONLY;
3914	else if (IEEE80211_IS_CHAN_A(chan))
3915		mode = AP_MODE_A_ONLY;
3916	else
3917		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3918	return mode;
3919}
3920
3921static int
3922mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3923{
3924	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3925	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3926}
3927
3928/*
3929 * Set/change channels.
3930 */
3931static int
3932mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3933{
3934	struct mwl_hal *mh = sc->sc_mh;
3935	struct ifnet *ifp = sc->sc_ifp;
3936	struct ieee80211com *ic = ifp->if_l2com;
3937	MWL_HAL_CHANNEL hchan;
3938	int maxtxpow;
3939
3940	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3941	    __func__, chan->ic_freq, chan->ic_flags);
3942
3943	/*
3944	 * Convert to a HAL channel description with
3945	 * the flags constrained to reflect the current
3946	 * operating mode.
3947	 */
3948	mwl_mapchan(&hchan, chan);
3949	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3950#if 0
3951	mwl_draintxq(sc);		/* clear pending tx frames */
3952#endif
3953	mwl_hal_setchannel(mh, &hchan);
3954	/*
3955	 * Tx power is cap'd by the regulatory setting and
3956	 * possibly a user-set limit.  We pass the min of
3957	 * these to the hal to apply them to the cal data
3958	 * for this channel.
3959	 * XXX min bound?
3960	 */
3961	maxtxpow = 2*chan->ic_maxregpower;
3962	if (maxtxpow > ic->ic_txpowlimit)
3963		maxtxpow = ic->ic_txpowlimit;
3964	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3965	/* NB: potentially change mcast/mgt rates */
3966	mwl_setcurchanrates(sc);
3967
3968	/*
3969	 * Update internal state.
3970	 */
3971	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3972	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3973	if (IEEE80211_IS_CHAN_A(chan)) {
3974		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3975		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3976	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3977		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3978		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3979	} else {
3980		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3981		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3982	}
3983	sc->sc_curchan = hchan;
3984	mwl_hal_intrset(mh, sc->sc_imask);
3985
3986	return 0;
3987}
3988
3989static void
3990mwl_scan_start(struct ieee80211com *ic)
3991{
3992	struct ifnet *ifp = ic->ic_ifp;
3993	struct mwl_softc *sc = ifp->if_softc;
3994
3995	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3996}
3997
3998static void
3999mwl_scan_end(struct ieee80211com *ic)
4000{
4001	struct ifnet *ifp = ic->ic_ifp;
4002	struct mwl_softc *sc = ifp->if_softc;
4003
4004	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4005}
4006
4007static void
4008mwl_set_channel(struct ieee80211com *ic)
4009{
4010	struct ifnet *ifp = ic->ic_ifp;
4011	struct mwl_softc *sc = ifp->if_softc;
4012
4013	(void) mwl_chan_set(sc, ic->ic_curchan);
4014}
4015
4016/*
4017 * Handle a channel switch request.  We inform the firmware
4018 * and mark the global state to suppress various actions.
4019 * NB: we issue only one request to the fw; we may be called
4020 * multiple times if there are multiple vap's.
4021 */
4022static void
4023mwl_startcsa(struct ieee80211vap *vap)
4024{
4025	struct ieee80211com *ic = vap->iv_ic;
4026	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4027	MWL_HAL_CHANNEL hchan;
4028
4029	if (sc->sc_csapending)
4030		return;
4031
4032	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4033	/* 1 =>'s quiet channel */
4034	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4035	sc->sc_csapending = 1;
4036}
4037
4038/*
4039 * Plumb any static WEP key for the station.  This is
4040 * necessary as we must propagate the key from the
4041 * global key table of the vap to each sta db entry.
4042 */
4043static void
4044mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4045{
4046	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4047		IEEE80211_F_PRIVACY &&
4048	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4049	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4050		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4051}
4052
4053static int
4054mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4055{
4056#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4057	struct ieee80211vap *vap = ni->ni_vap;
4058	struct mwl_hal_vap *hvap;
4059	int error;
4060
4061	if (vap->iv_opmode == IEEE80211_M_WDS) {
4062		/*
4063		 * WDS vap's do not have a f/w vap; instead they piggyback
4064		 * on an AP vap and we must install the sta db entry and
4065		 * crypto state using that AP's handle (the WDS vap has none).
4066		 */
4067		hvap = MWL_VAP(vap)->mv_ap_hvap;
4068	} else
4069		hvap = MWL_VAP(vap)->mv_hvap;
4070	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4071	    aid, staid, pi,
4072	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4073	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4074	if (error == 0) {
4075		/*
4076		 * Setup security for this station.  For sta mode this is
4077		 * needed even though do the same thing on transition to
4078		 * AUTH state because the call to mwl_hal_newstation
4079		 * clobbers the crypto state we setup.
4080		 */
4081		mwl_setanywepkey(vap, ni->ni_macaddr);
4082	}
4083	return error;
4084#undef WME
4085}
4086
4087static void
4088mwl_setglobalkeys(struct ieee80211vap *vap)
4089{
4090	struct ieee80211_key *wk;
4091
4092	wk = &vap->iv_nw_keys[0];
4093	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4094		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4095			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4096}
4097
4098/*
4099 * Convert a legacy rate set to a firmware bitmask.
4100 */
4101static uint32_t
4102get_rate_bitmap(const struct ieee80211_rateset *rs)
4103{
4104	uint32_t rates;
4105	int i;
4106
4107	rates = 0;
4108	for (i = 0; i < rs->rs_nrates; i++)
4109		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4110		case 2:	  rates |= 0x001; break;
4111		case 4:	  rates |= 0x002; break;
4112		case 11:  rates |= 0x004; break;
4113		case 22:  rates |= 0x008; break;
4114		case 44:  rates |= 0x010; break;
4115		case 12:  rates |= 0x020; break;
4116		case 18:  rates |= 0x040; break;
4117		case 24:  rates |= 0x080; break;
4118		case 36:  rates |= 0x100; break;
4119		case 48:  rates |= 0x200; break;
4120		case 72:  rates |= 0x400; break;
4121		case 96:  rates |= 0x800; break;
4122		case 108: rates |= 0x1000; break;
4123		}
4124	return rates;
4125}
4126
4127/*
4128 * Construct an HT firmware bitmask from an HT rate set.
4129 */
4130static uint32_t
4131get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4132{
4133	uint32_t rates;
4134	int i;
4135
4136	rates = 0;
4137	for (i = 0; i < rs->rs_nrates; i++) {
4138		if (rs->rs_rates[i] < 16)
4139			rates |= 1<<rs->rs_rates[i];
4140	}
4141	return rates;
4142}
4143
4144/*
4145 * Craft station database entry for station.
4146 * NB: use host byte order here, the hal handles byte swapping.
4147 */
4148static MWL_HAL_PEERINFO *
4149mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4150{
4151	const struct ieee80211vap *vap = ni->ni_vap;
4152
4153	memset(pi, 0, sizeof(*pi));
4154	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4155	pi->CapInfo = ni->ni_capinfo;
4156	if (ni->ni_flags & IEEE80211_NODE_HT) {
4157		/* HT capabilities, etc */
4158		pi->HTCapabilitiesInfo = ni->ni_htcap;
4159		/* XXX pi.HTCapabilitiesInfo */
4160	        pi->MacHTParamInfo = ni->ni_htparam;
4161		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4162		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4163		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4164		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4165		pi->AddHtInfo.stbc = ni->ni_htstbc;
4166
4167		/* constrain according to local configuration */
4168		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4169			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4170		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4171			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4172		if (ni->ni_chw != 40)
4173			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4174	}
4175	return pi;
4176}
4177
4178/*
4179 * Re-create the local sta db entry for a vap to ensure
4180 * up to date WME state is pushed to the firmware.  Because
4181 * this resets crypto state this must be followed by a
4182 * reload of any keys in the global key table.
4183 */
4184static int
4185mwl_localstadb(struct ieee80211vap *vap)
4186{
4187#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4188	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4189	struct ieee80211_node *bss;
4190	MWL_HAL_PEERINFO pi;
4191	int error;
4192
4193	switch (vap->iv_opmode) {
4194	case IEEE80211_M_STA:
4195		bss = vap->iv_bss;
4196		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4197		    vap->iv_state == IEEE80211_S_RUN ?
4198			mkpeerinfo(&pi, bss) : NULL,
4199		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4200		    bss->ni_ies.wme_ie != NULL ?
4201			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4202		if (error == 0)
4203			mwl_setglobalkeys(vap);
4204		break;
4205	case IEEE80211_M_HOSTAP:
4206	case IEEE80211_M_MBSS:
4207		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4208		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4209		if (error == 0)
4210			mwl_setglobalkeys(vap);
4211		break;
4212	default:
4213		error = 0;
4214		break;
4215	}
4216	return error;
4217#undef WME
4218}
4219
4220static int
4221mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4222{
4223	struct mwl_vap *mvp = MWL_VAP(vap);
4224	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4225	struct ieee80211com *ic = vap->iv_ic;
4226	struct ieee80211_node *ni = NULL;
4227	struct ifnet *ifp = ic->ic_ifp;
4228	struct mwl_softc *sc = ifp->if_softc;
4229	struct mwl_hal *mh = sc->sc_mh;
4230	enum ieee80211_state ostate = vap->iv_state;
4231	int error;
4232
4233	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4234	    vap->iv_ifp->if_xname, __func__,
4235	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4236
4237	callout_stop(&sc->sc_timer);
4238	/*
4239	 * Clear current radar detection state.
4240	 */
4241	if (ostate == IEEE80211_S_CAC) {
4242		/* stop quiet mode radar detection */
4243		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4244	} else if (sc->sc_radarena) {
4245		/* stop in-service radar detection */
4246		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4247		sc->sc_radarena = 0;
4248	}
4249	/*
4250	 * Carry out per-state actions before doing net80211 work.
4251	 */
4252	if (nstate == IEEE80211_S_INIT) {
4253		/* NB: only ap+sta vap's have a fw entity */
4254		if (hvap != NULL)
4255			mwl_hal_stop(hvap);
4256	} else if (nstate == IEEE80211_S_SCAN) {
4257		mwl_hal_start(hvap);
4258		/* NB: this disables beacon frames */
4259		mwl_hal_setinframode(hvap);
4260	} else if (nstate == IEEE80211_S_AUTH) {
4261		/*
4262		 * Must create a sta db entry in case a WEP key needs to
4263		 * be plumbed.  This entry will be overwritten if we
4264		 * associate; otherwise it will be reclaimed on node free.
4265		 */
4266		ni = vap->iv_bss;
4267		MWL_NODE(ni)->mn_hvap = hvap;
4268		(void) mwl_peerstadb(ni, 0, 0, NULL);
4269	} else if (nstate == IEEE80211_S_CSA) {
4270		/* XXX move to below? */
4271		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4272		    vap->iv_opmode == IEEE80211_M_MBSS)
4273			mwl_startcsa(vap);
4274	} else if (nstate == IEEE80211_S_CAC) {
4275		/* XXX move to below? */
4276		/* stop ap xmit and enable quiet mode radar detection */
4277		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4278	}
4279
4280	/*
4281	 * Invoke the parent method to do net80211 work.
4282	 */
4283	error = mvp->mv_newstate(vap, nstate, arg);
4284
4285	/*
4286	 * Carry out work that must be done after net80211 runs;
4287	 * this work requires up to date state (e.g. iv_bss).
4288	 */
4289	if (error == 0 && nstate == IEEE80211_S_RUN) {
4290		/* NB: collect bss node again, it may have changed */
4291		ni = vap->iv_bss;
4292
4293		DPRINTF(sc, MWL_DEBUG_STATE,
4294		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4295		    "capinfo 0x%04x chan %d\n",
4296		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4297		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4298		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4299
4300		/*
4301		 * Recreate local sta db entry to update WME/HT state.
4302		 */
4303		mwl_localstadb(vap);
4304		switch (vap->iv_opmode) {
4305		case IEEE80211_M_HOSTAP:
4306		case IEEE80211_M_MBSS:
4307			if (ostate == IEEE80211_S_CAC) {
4308				/* enable in-service radar detection */
4309				mwl_hal_setradardetection(mh,
4310				    DR_IN_SERVICE_MONITOR_START);
4311				sc->sc_radarena = 1;
4312			}
4313			/*
4314			 * Allocate and setup the beacon frame
4315			 * (and related state).
4316			 */
4317			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4318			if (error != 0) {
4319				DPRINTF(sc, MWL_DEBUG_STATE,
4320				    "%s: beacon setup failed, error %d\n",
4321				    __func__, error);
4322				goto bad;
4323			}
4324			/* NB: must be after setting up beacon */
4325			mwl_hal_start(hvap);
4326			break;
4327		case IEEE80211_M_STA:
4328			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4329			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4330			/*
4331			 * Set state now that we're associated.
4332			 */
4333			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4334			mwl_setrates(vap);
4335			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4336			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4337			    sc->sc_ndwdsvaps++ == 0)
4338				mwl_hal_setdwds(mh, 1);
4339			break;
4340		case IEEE80211_M_WDS:
4341			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4342			    vap->iv_ifp->if_xname, __func__,
4343			    ether_sprintf(ni->ni_bssid));
4344			mwl_seteapolformat(vap);
4345			break;
4346		default:
4347			break;
4348		}
4349		/*
4350		 * Set CS mode according to operating channel;
4351		 * this mostly an optimization for 5GHz.
4352		 *
4353		 * NB: must follow mwl_hal_start which resets csmode
4354		 */
4355		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4356			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4357		else
4358			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4359		/*
4360		 * Start timer to prod firmware.
4361		 */
4362		if (sc->sc_ageinterval != 0)
4363			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4364			    mwl_agestations, sc);
4365	} else if (nstate == IEEE80211_S_SLEEP) {
4366		/* XXX set chip in power save */
4367	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4368	    --sc->sc_ndwdsvaps == 0)
4369		mwl_hal_setdwds(mh, 0);
4370bad:
4371	return error;
4372}
4373
4374/*
4375 * Manage station id's; these are separate from AID's
4376 * as AID's may have values out of the range of possible
4377 * station id's acceptable to the firmware.
4378 */
4379static int
4380allocstaid(struct mwl_softc *sc, int aid)
4381{
4382	int staid;
4383
4384	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4385		/* NB: don't use 0 */
4386		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4387			if (isclr(sc->sc_staid, staid))
4388				break;
4389	} else
4390		staid = aid;
4391	setbit(sc->sc_staid, staid);
4392	return staid;
4393}
4394
4395static void
4396delstaid(struct mwl_softc *sc, int staid)
4397{
4398	clrbit(sc->sc_staid, staid);
4399}
4400
4401/*
4402 * Setup driver-specific state for a newly associated node.
4403 * Note that we're called also on a re-associate, the isnew
4404 * param tells us if this is the first time or not.
4405 */
4406static void
4407mwl_newassoc(struct ieee80211_node *ni, int isnew)
4408{
4409	struct ieee80211vap *vap = ni->ni_vap;
4410        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4411	struct mwl_node *mn = MWL_NODE(ni);
4412	MWL_HAL_PEERINFO pi;
4413	uint16_t aid;
4414	int error;
4415
4416	aid = IEEE80211_AID(ni->ni_associd);
4417	if (isnew) {
4418		mn->mn_staid = allocstaid(sc, aid);
4419		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4420	} else {
4421		mn = MWL_NODE(ni);
4422		/* XXX reset BA stream? */
4423	}
4424	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4425	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4426	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4427	if (error != 0) {
4428		DPRINTF(sc, MWL_DEBUG_NODE,
4429		    "%s: error %d creating sta db entry\n",
4430		    __func__, error);
4431		/* XXX how to deal with error? */
4432	}
4433}
4434
4435/*
4436 * Periodically poke the firmware to age out station state
4437 * (power save queues, pending tx aggregates).
4438 */
4439static void
4440mwl_agestations(void *arg)
4441{
4442	struct mwl_softc *sc = arg;
4443
4444	mwl_hal_setkeepalive(sc->sc_mh);
4445	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4446		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4447}
4448
4449static const struct mwl_hal_channel *
4450findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4451{
4452	int i;
4453
4454	for (i = 0; i < ci->nchannels; i++) {
4455		const struct mwl_hal_channel *hc = &ci->channels[i];
4456		if (hc->ieee == ieee)
4457			return hc;
4458	}
4459	return NULL;
4460}
4461
4462static int
4463mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4464	int nchan, struct ieee80211_channel chans[])
4465{
4466	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4467	struct mwl_hal *mh = sc->sc_mh;
4468	const MWL_HAL_CHANNELINFO *ci;
4469	int i;
4470
4471	for (i = 0; i < nchan; i++) {
4472		struct ieee80211_channel *c = &chans[i];
4473		const struct mwl_hal_channel *hc;
4474
4475		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4476			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4477			    IEEE80211_IS_CHAN_HT40(c) ?
4478				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4479		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4480			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4481			    IEEE80211_IS_CHAN_HT40(c) ?
4482				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4483		} else {
4484			if_printf(ic->ic_ifp,
4485			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4486			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4487			return EINVAL;
4488		}
4489		/*
4490		 * Verify channel has cal data and cap tx power.
4491		 */
4492		hc = findhalchannel(ci, c->ic_ieee);
4493		if (hc != NULL) {
4494			if (c->ic_maxpower > 2*hc->maxTxPow)
4495				c->ic_maxpower = 2*hc->maxTxPow;
4496			goto next;
4497		}
4498		if (IEEE80211_IS_CHAN_HT40(c)) {
4499			/*
4500			 * Look for the extension channel since the
4501			 * hal table only has the primary channel.
4502			 */
4503			hc = findhalchannel(ci, c->ic_extieee);
4504			if (hc != NULL) {
4505				if (c->ic_maxpower > 2*hc->maxTxPow)
4506					c->ic_maxpower = 2*hc->maxTxPow;
4507				goto next;
4508			}
4509		}
4510		if_printf(ic->ic_ifp,
4511		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4512		    __func__, c->ic_ieee, c->ic_extieee,
4513		    c->ic_freq, c->ic_flags);
4514		return EINVAL;
4515	next:
4516		;
4517	}
4518	return 0;
4519}
4520
4521#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4522#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4523
4524static void
4525addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4526{
4527	c->ic_freq = freq;
4528	c->ic_flags = flags;
4529	c->ic_ieee = ieee;
4530	c->ic_minpower = 0;
4531	c->ic_maxpower = 2*txpow;
4532	c->ic_maxregpower = txpow;
4533}
4534
4535static const struct ieee80211_channel *
4536findchannel(const struct ieee80211_channel chans[], int nchans,
4537	int freq, int flags)
4538{
4539	const struct ieee80211_channel *c;
4540	int i;
4541
4542	for (i = 0; i < nchans; i++) {
4543		c = &chans[i];
4544		if (c->ic_freq == freq && c->ic_flags == flags)
4545			return c;
4546	}
4547	return NULL;
4548}
4549
4550static void
4551addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4552	const MWL_HAL_CHANNELINFO *ci, int flags)
4553{
4554	struct ieee80211_channel *c;
4555	const struct ieee80211_channel *extc;
4556	const struct mwl_hal_channel *hc;
4557	int i;
4558
4559	c = &chans[*nchans];
4560
4561	flags &= ~IEEE80211_CHAN_HT;
4562	for (i = 0; i < ci->nchannels; i++) {
4563		/*
4564		 * Each entry defines an HT40 channel pair; find the
4565		 * extension channel above and the insert the pair.
4566		 */
4567		hc = &ci->channels[i];
4568		extc = findchannel(chans, *nchans, hc->freq+20,
4569		    flags | IEEE80211_CHAN_HT20);
4570		if (extc != NULL) {
4571			if (*nchans >= maxchans)
4572				break;
4573			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4574			    hc->ieee, hc->maxTxPow);
4575			c->ic_extieee = extc->ic_ieee;
4576			c++, (*nchans)++;
4577			if (*nchans >= maxchans)
4578				break;
4579			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4580			    extc->ic_ieee, hc->maxTxPow);
4581			c->ic_extieee = hc->ieee;
4582			c++, (*nchans)++;
4583		}
4584	}
4585}
4586
4587static void
4588addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4589	const MWL_HAL_CHANNELINFO *ci, int flags)
4590{
4591	struct ieee80211_channel *c;
4592	int i;
4593
4594	c = &chans[*nchans];
4595
4596	for (i = 0; i < ci->nchannels; i++) {
4597		const struct mwl_hal_channel *hc;
4598
4599		hc = &ci->channels[i];
4600		if (*nchans >= maxchans)
4601			break;
4602		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4603		c++, (*nchans)++;
4604		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4605			/* g channel have a separate b-only entry */
4606			if (*nchans >= maxchans)
4607				break;
4608			c[0] = c[-1];
4609			c[-1].ic_flags = IEEE80211_CHAN_B;
4610			c++, (*nchans)++;
4611		}
4612		if (flags == IEEE80211_CHAN_HTG) {
4613			/* HT g channel have a separate g-only entry */
4614			if (*nchans >= maxchans)
4615				break;
4616			c[-1].ic_flags = IEEE80211_CHAN_G;
4617			c[0] = c[-1];
4618			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4619			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4620			c++, (*nchans)++;
4621		}
4622		if (flags == IEEE80211_CHAN_HTA) {
4623			/* HT a channel have a separate a-only entry */
4624			if (*nchans >= maxchans)
4625				break;
4626			c[-1].ic_flags = IEEE80211_CHAN_A;
4627			c[0] = c[-1];
4628			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4629			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4630			c++, (*nchans)++;
4631		}
4632	}
4633}
4634
4635static void
4636getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4637	struct ieee80211_channel chans[])
4638{
4639	const MWL_HAL_CHANNELINFO *ci;
4640
4641	/*
4642	 * Use the channel info from the hal to craft the
4643	 * channel list.  Note that we pass back an unsorted
4644	 * list; the caller is required to sort it for us
4645	 * (if desired).
4646	 */
4647	*nchans = 0;
4648	if (mwl_hal_getchannelinfo(sc->sc_mh,
4649	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4650		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4651	if (mwl_hal_getchannelinfo(sc->sc_mh,
4652	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4653		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4654	if (mwl_hal_getchannelinfo(sc->sc_mh,
4655	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4656		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4657	if (mwl_hal_getchannelinfo(sc->sc_mh,
4658	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4659		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4660}
4661
4662static void
4663mwl_getradiocaps(struct ieee80211com *ic,
4664	int maxchans, int *nchans, struct ieee80211_channel chans[])
4665{
4666	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4667
4668	getchannels(sc, maxchans, nchans, chans);
4669}
4670
4671static int
4672mwl_getchannels(struct mwl_softc *sc)
4673{
4674	struct ifnet *ifp = sc->sc_ifp;
4675	struct ieee80211com *ic = ifp->if_l2com;
4676
4677	/*
4678	 * Use the channel info from the hal to craft the
4679	 * channel list for net80211.  Note that we pass up
4680	 * an unsorted list; net80211 will sort it for us.
4681	 */
4682	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4683	ic->ic_nchans = 0;
4684	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4685
4686	ic->ic_regdomain.regdomain = SKU_DEBUG;
4687	ic->ic_regdomain.country = CTRY_DEFAULT;
4688	ic->ic_regdomain.location = 'I';
4689	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4690	ic->ic_regdomain.isocc[1] = ' ';
4691	return (ic->ic_nchans == 0 ? EIO : 0);
4692}
4693#undef IEEE80211_CHAN_HTA
4694#undef IEEE80211_CHAN_HTG
4695
4696#ifdef MWL_DEBUG
4697static void
4698mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4699{
4700	const struct mwl_rxdesc *ds = bf->bf_desc;
4701	uint32_t status = le32toh(ds->Status);
4702
4703	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4704	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4705	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4706	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4707	    ds->RxControl,
4708	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4709	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4710	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4711	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4712}
4713
4714static void
4715mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4716{
4717	const struct mwl_txdesc *ds = bf->bf_desc;
4718	uint32_t status = le32toh(ds->Status);
4719
4720	printf("Q%u[%3u]", qnum, ix);
4721	printf(" (DS.V:%p DS.P:%p)\n",
4722	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4723	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4724	    le32toh(ds->pPhysNext),
4725	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4726	    status & EAGLE_TXD_STATUS_USED ?
4727		"" : (status & 3) != 0 ? " *" : " !");
4728	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4729	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4730	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4731#if MWL_TXDESC > 1
4732	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4733	    , le32toh(ds->multiframes)
4734	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4735	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4736	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4737	);
4738	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4739	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4740	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4741	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4742	);
4743#endif
4744#if 0
4745{ const uint8_t *cp = (const uint8_t *) ds;
4746  int i;
4747  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4748	printf("%02x ", cp[i]);
4749	if (((i+1) % 16) == 0)
4750		printf("\n");
4751  }
4752  printf("\n");
4753}
4754#endif
4755}
4756#endif /* MWL_DEBUG */
4757
4758#if 0
4759static void
4760mwl_txq_dump(struct mwl_txq *txq)
4761{
4762	struct mwl_txbuf *bf;
4763	int i = 0;
4764
4765	MWL_TXQ_LOCK(txq);
4766	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4767		struct mwl_txdesc *ds = bf->bf_desc;
4768		MWL_TXDESC_SYNC(txq, ds,
4769		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4770#ifdef MWL_DEBUG
4771		mwl_printtxbuf(bf, txq->qnum, i);
4772#endif
4773		i++;
4774	}
4775	MWL_TXQ_UNLOCK(txq);
4776}
4777#endif
4778
4779static void
4780mwl_watchdog(void *arg)
4781{
4782	struct mwl_softc *sc;
4783	struct ifnet *ifp;
4784
4785	sc = arg;
4786	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4787	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4788		return;
4789
4790	ifp = sc->sc_ifp;
4791	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4792		if (mwl_hal_setkeepalive(sc->sc_mh))
4793			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4794		else
4795			if_printf(ifp, "transmit timeout\n");
4796#if 0
4797		mwl_reset(ifp);
4798mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4799#endif
4800		ifp->if_oerrors++;
4801		sc->sc_stats.mst_watchdog++;
4802	}
4803}
4804
4805#ifdef MWL_DIAGAPI
4806/*
4807 * Diagnostic interface to the HAL.  This is used by various
4808 * tools to do things like retrieve register contents for
4809 * debugging.  The mechanism is intentionally opaque so that
4810 * it can change frequently w/o concern for compatiblity.
4811 */
4812static int
4813mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4814{
4815	struct mwl_hal *mh = sc->sc_mh;
4816	u_int id = md->md_id & MWL_DIAG_ID;
4817	void *indata = NULL;
4818	void *outdata = NULL;
4819	u_int32_t insize = md->md_in_size;
4820	u_int32_t outsize = md->md_out_size;
4821	int error = 0;
4822
4823	if (md->md_id & MWL_DIAG_IN) {
4824		/*
4825		 * Copy in data.
4826		 */
4827		indata = malloc(insize, M_TEMP, M_NOWAIT);
4828		if (indata == NULL) {
4829			error = ENOMEM;
4830			goto bad;
4831		}
4832		error = copyin(md->md_in_data, indata, insize);
4833		if (error)
4834			goto bad;
4835	}
4836	if (md->md_id & MWL_DIAG_DYN) {
4837		/*
4838		 * Allocate a buffer for the results (otherwise the HAL
4839		 * returns a pointer to a buffer where we can read the
4840		 * results).  Note that we depend on the HAL leaving this
4841		 * pointer for us to use below in reclaiming the buffer;
4842		 * may want to be more defensive.
4843		 */
4844		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4845		if (outdata == NULL) {
4846			error = ENOMEM;
4847			goto bad;
4848		}
4849	}
4850	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4851		if (outsize < md->md_out_size)
4852			md->md_out_size = outsize;
4853		if (outdata != NULL)
4854			error = copyout(outdata, md->md_out_data,
4855					md->md_out_size);
4856	} else {
4857		error = EINVAL;
4858	}
4859bad:
4860	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4861		free(indata, M_TEMP);
4862	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4863		free(outdata, M_TEMP);
4864	return error;
4865}
4866
4867static int
4868mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4869{
4870	struct mwl_hal *mh = sc->sc_mh;
4871	int error;
4872
4873	MWL_LOCK_ASSERT(sc);
4874
4875	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4876		device_printf(sc->sc_dev, "unable to load firmware\n");
4877		return EIO;
4878	}
4879	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4880		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4881		return EIO;
4882	}
4883	error = mwl_setupdma(sc);
4884	if (error != 0) {
4885		/* NB: mwl_setupdma prints a msg */
4886		return error;
4887	}
4888	/*
4889	 * Reset tx/rx data structures; after reload we must
4890	 * re-start the driver's notion of the next xmit/recv.
4891	 */
4892	mwl_draintxq(sc);		/* clear pending frames */
4893	mwl_resettxq(sc);		/* rebuild tx q lists */
4894	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4895	return 0;
4896}
4897#endif /* MWL_DIAGAPI */
4898
4899static int
4900mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4901{
4902#define	IS_RUNNING(ifp) \
4903	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4904	struct mwl_softc *sc = ifp->if_softc;
4905	struct ieee80211com *ic = ifp->if_l2com;
4906	struct ifreq *ifr = (struct ifreq *)data;
4907	int error = 0, startall;
4908
4909	switch (cmd) {
4910	case SIOCSIFFLAGS:
4911		MWL_LOCK(sc);
4912		startall = 0;
4913		if (IS_RUNNING(ifp)) {
4914			/*
4915			 * To avoid rescanning another access point,
4916			 * do not call mwl_init() here.  Instead,
4917			 * only reflect promisc mode settings.
4918			 */
4919			mwl_mode_init(sc);
4920		} else if (ifp->if_flags & IFF_UP) {
4921			/*
4922			 * Beware of being called during attach/detach
4923			 * to reset promiscuous mode.  In that case we
4924			 * will still be marked UP but not RUNNING.
4925			 * However trying to re-init the interface
4926			 * is the wrong thing to do as we've already
4927			 * torn down much of our state.  There's
4928			 * probably a better way to deal with this.
4929			 */
4930			if (!sc->sc_invalid) {
4931				mwl_init_locked(sc);	/* XXX lose error */
4932				startall = 1;
4933			}
4934		} else
4935			mwl_stop_locked(ifp, 1);
4936		MWL_UNLOCK(sc);
4937		if (startall)
4938			ieee80211_start_all(ic);
4939		break;
4940	case SIOCGMVSTATS:
4941		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4942		/* NB: embed these numbers to get a consistent view */
4943		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4944		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4945		/*
4946		 * NB: Drop the softc lock in case of a page fault;
4947		 * we'll accept any potential inconsisentcy in the
4948		 * statistics.  The alternative is to copy the data
4949		 * to a local structure.
4950		 */
4951		return copyout(&sc->sc_stats,
4952				ifr->ifr_data, sizeof (sc->sc_stats));
4953#ifdef MWL_DIAGAPI
4954	case SIOCGMVDIAG:
4955		/* XXX check privs */
4956		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4957	case SIOCGMVRESET:
4958		/* XXX check privs */
4959		MWL_LOCK(sc);
4960		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4961		MWL_UNLOCK(sc);
4962		break;
4963#endif /* MWL_DIAGAPI */
4964	case SIOCGIFMEDIA:
4965		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4966		break;
4967	case SIOCGIFADDR:
4968		error = ether_ioctl(ifp, cmd, data);
4969		break;
4970	default:
4971		error = EINVAL;
4972		break;
4973	}
4974	return error;
4975#undef IS_RUNNING
4976}
4977
4978#ifdef	MWL_DEBUG
4979static int
4980mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4981{
4982	struct mwl_softc *sc = arg1;
4983	int debug, error;
4984
4985	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4986	error = sysctl_handle_int(oidp, &debug, 0, req);
4987	if (error || !req->newptr)
4988		return error;
4989	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4990	sc->sc_debug = debug & 0x00ffffff;
4991	return 0;
4992}
4993#endif /* MWL_DEBUG */
4994
4995static void
4996mwl_sysctlattach(struct mwl_softc *sc)
4997{
4998#ifdef	MWL_DEBUG
4999	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
5000	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
5001
5002	sc->sc_debug = mwl_debug;
5003	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
5004		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5005		mwl_sysctl_debug, "I", "control debugging printfs");
5006#endif
5007}
5008
5009/*
5010 * Announce various information on device/driver attach.
5011 */
5012static void
5013mwl_announce(struct mwl_softc *sc)
5014{
5015	struct ifnet *ifp = sc->sc_ifp;
5016
5017	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5018		sc->sc_hwspecs.hwVersion,
5019		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5020		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5021		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5022		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5023		sc->sc_hwspecs.regionCode);
5024	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5025
5026	if (bootverbose) {
5027		int i;
5028		for (i = 0; i <= WME_AC_VO; i++) {
5029			struct mwl_txq *txq = sc->sc_ac2q[i];
5030			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5031				txq->qnum, ieee80211_wme_acnames[i]);
5032		}
5033	}
5034	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5035		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5036	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5037		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5038	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5039		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5040	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5041		if_printf(ifp, "multi-bss support\n");
5042#ifdef MWL_TX_NODROP
5043	if (bootverbose)
5044		if_printf(ifp, "no tx drop\n");
5045#endif
5046}
5047