if_mwl.c revision 234367
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 234367 2012-04-17 04:31:50Z adrian $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40#include "opt_wlan.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysctl.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/kernel.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/errno.h>
53#include <sys/callout.h>
54#include <sys/bus.h>
55#include <sys/endian.h>
56#include <sys/kthread.h>
57#include <sys/taskqueue.h>
58
59#include <machine/bus.h>
60
61#include <net/if.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65#include <net/if_arp.h>
66#include <net/ethernet.h>
67#include <net/if_llc.h>
68
69#include <net/bpf.h>
70
71#include <net80211/ieee80211_var.h>
72#include <net80211/ieee80211_regdomain.h>
73
74#ifdef INET
75#include <netinet/in.h>
76#include <netinet/if_ether.h>
77#endif /* INET */
78
79#include <dev/mwl/if_mwlvar.h>
80#include <dev/mwl/mwldiag.h>
81
82/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
83#define	MS(v,x)	(((v) & x) >> x##_S)
84#define	SM(v,x)	(((v) << x##_S) & x)
85
86static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
87		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
88		    const uint8_t [IEEE80211_ADDR_LEN],
89		    const uint8_t [IEEE80211_ADDR_LEN]);
90static void	mwl_vap_delete(struct ieee80211vap *);
91static int	mwl_setupdma(struct mwl_softc *);
92static int	mwl_hal_reset(struct mwl_softc *sc);
93static int	mwl_init_locked(struct mwl_softc *);
94static void	mwl_init(void *);
95static void	mwl_stop_locked(struct ifnet *, int);
96static int	mwl_reset(struct ieee80211vap *, u_long);
97static void	mwl_stop(struct ifnet *, int);
98static void	mwl_start(struct ifnet *);
99static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
100			const struct ieee80211_bpf_params *);
101static int	mwl_media_change(struct ifnet *);
102static void	mwl_watchdog(void *);
103static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
104static void	mwl_radar_proc(void *, int);
105static void	mwl_chanswitch_proc(void *, int);
106static void	mwl_bawatchdog_proc(void *, int);
107static int	mwl_key_alloc(struct ieee80211vap *,
108			struct ieee80211_key *,
109			ieee80211_keyix *, ieee80211_keyix *);
110static int	mwl_key_delete(struct ieee80211vap *,
111			const struct ieee80211_key *);
112static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
113			const uint8_t mac[IEEE80211_ADDR_LEN]);
114static int	mwl_mode_init(struct mwl_softc *);
115static void	mwl_update_mcast(struct ifnet *);
116static void	mwl_update_promisc(struct ifnet *);
117static void	mwl_updateslot(struct ifnet *);
118static int	mwl_beacon_setup(struct ieee80211vap *);
119static void	mwl_beacon_update(struct ieee80211vap *, int);
120#ifdef MWL_HOST_PS_SUPPORT
121static void	mwl_update_ps(struct ieee80211vap *, int);
122static int	mwl_set_tim(struct ieee80211_node *, int);
123#endif
124static int	mwl_dma_setup(struct mwl_softc *);
125static void	mwl_dma_cleanup(struct mwl_softc *);
126static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
127		    const uint8_t [IEEE80211_ADDR_LEN]);
128static void	mwl_node_cleanup(struct ieee80211_node *);
129static void	mwl_node_drain(struct ieee80211_node *);
130static void	mwl_node_getsignal(const struct ieee80211_node *,
131			int8_t *, int8_t *);
132static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
133			struct ieee80211_mimo_info *);
134static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
135static void	mwl_rx_proc(void *, int);
136static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
137static int	mwl_tx_setup(struct mwl_softc *, int, int);
138static int	mwl_wme_update(struct ieee80211com *);
139static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
140static void	mwl_tx_cleanup(struct mwl_softc *);
141static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
142static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
143			     struct mwl_txbuf *, struct mbuf *);
144static void	mwl_tx_proc(void *, int);
145static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
146static void	mwl_draintxq(struct mwl_softc *);
147static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
148static int	mwl_recv_action(struct ieee80211_node *,
149			const struct ieee80211_frame *,
150			const uint8_t *, const uint8_t *);
151static int	mwl_addba_request(struct ieee80211_node *,
152			struct ieee80211_tx_ampdu *, int dialogtoken,
153			int baparamset, int batimeout);
154static int	mwl_addba_response(struct ieee80211_node *,
155			struct ieee80211_tx_ampdu *, int status,
156			int baparamset, int batimeout);
157static void	mwl_addba_stop(struct ieee80211_node *,
158			struct ieee80211_tx_ampdu *);
159static int	mwl_startrecv(struct mwl_softc *);
160static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
161			struct ieee80211_channel *);
162static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
163static void	mwl_scan_start(struct ieee80211com *);
164static void	mwl_scan_end(struct ieee80211com *);
165static void	mwl_set_channel(struct ieee80211com *);
166static int	mwl_peerstadb(struct ieee80211_node *,
167			int aid, int staid, MWL_HAL_PEERINFO *pi);
168static int	mwl_localstadb(struct ieee80211vap *);
169static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
170static int	allocstaid(struct mwl_softc *sc, int aid);
171static void	delstaid(struct mwl_softc *sc, int staid);
172static void	mwl_newassoc(struct ieee80211_node *, int);
173static void	mwl_agestations(void *);
174static int	mwl_setregdomain(struct ieee80211com *,
175			struct ieee80211_regdomain *, int,
176			struct ieee80211_channel []);
177static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
178			struct ieee80211_channel []);
179static int	mwl_getchannels(struct mwl_softc *);
180
181static void	mwl_sysctlattach(struct mwl_softc *);
182static void	mwl_announce(struct mwl_softc *);
183
184SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
185
186static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
187SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
188	    0, "rx descriptors allocated");
189static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
190SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
191	    0, "rx buffers allocated");
192TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
193static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
194SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
195	    0, "tx buffers allocated");
196TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
197static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
198SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
199	    0, "tx buffers to send at once");
200TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
201static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
202SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
203	    0, "max rx buffers to process per interrupt");
204TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
205static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
206SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
207	    0, "min free rx buffers before restarting traffic");
208TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
209
210#ifdef MWL_DEBUG
211static	int mwl_debug = 0;
212SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
213	    0, "control debugging printfs");
214TUNABLE_INT("hw.mwl.debug", &mwl_debug);
215enum {
216	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
217	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
218	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
219	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
220	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
221	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
222	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
223	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
224	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
225	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
226	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
227	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
228	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
229	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
230	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
231	MWL_DEBUG_ANY		= 0xffffffff
232};
233#define	IS_BEACON(wh) \
234    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
235	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
236#define	IFF_DUMPPKTS_RECV(sc, wh) \
237    (((sc->sc_debug & MWL_DEBUG_RECV) && \
238      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
239     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
240#define	IFF_DUMPPKTS_XMIT(sc) \
241	((sc->sc_debug & MWL_DEBUG_XMIT) || \
242	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
243#define	DPRINTF(sc, m, fmt, ...) do {				\
244	if (sc->sc_debug & (m))					\
245		printf(fmt, __VA_ARGS__);			\
246} while (0)
247#define	KEYPRINTF(sc, hk, mac) do {				\
248	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
249		mwl_keyprint(sc, __func__, hk, mac);		\
250} while (0)
251static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
252static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
253#else
254#define	IFF_DUMPPKTS_RECV(sc, wh) \
255	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
256#define	IFF_DUMPPKTS_XMIT(sc) \
257	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
258#define	DPRINTF(sc, m, fmt, ...) do {				\
259	(void) sc;						\
260} while (0)
261#define	KEYPRINTF(sc, k, mac) do {				\
262	(void) sc;						\
263} while (0)
264#endif
265
266static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
267
268/*
269 * Each packet has fixed front matter: a 2-byte length
270 * of the payload, followed by a 4-address 802.11 header
271 * (regardless of the actual header and always w/o any
272 * QoS header).  The payload then follows.
273 */
274struct mwltxrec {
275	uint16_t fwlen;
276	struct ieee80211_frame_addr4 wh;
277} __packed;
278
279/*
280 * Read/Write shorthands for accesses to BAR 0.  Note
281 * that all BAR 1 operations are done in the "hal" and
282 * there should be no reference to them here.
283 */
284static __inline uint32_t
285RD4(struct mwl_softc *sc, bus_size_t off)
286{
287	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
288}
289
290static __inline void
291WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
292{
293	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
294}
295
296int
297mwl_attach(uint16_t devid, struct mwl_softc *sc)
298{
299	struct ifnet *ifp;
300	struct ieee80211com *ic;
301	struct mwl_hal *mh;
302	int error = 0;
303
304	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
305
306	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
307	if (ifp == NULL) {
308		device_printf(sc->sc_dev, "cannot if_alloc()\n");
309		return ENOSPC;
310	}
311	ic = ifp->if_l2com;
312
313	/* set these up early for if_printf use */
314	if_initname(ifp, device_get_name(sc->sc_dev),
315		device_get_unit(sc->sc_dev));
316
317	mh = mwl_hal_attach(sc->sc_dev, devid,
318	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
319	if (mh == NULL) {
320		if_printf(ifp, "unable to attach HAL\n");
321		error = EIO;
322		goto bad;
323	}
324	sc->sc_mh = mh;
325	/*
326	 * Load firmware so we can get setup.  We arbitrarily
327	 * pick station firmware; we'll re-load firmware as
328	 * needed so setting up the wrong mode isn't a big deal.
329	 */
330	if (mwl_hal_fwload(mh, NULL) != 0) {
331		if_printf(ifp, "unable to setup builtin firmware\n");
332		error = EIO;
333		goto bad1;
334	}
335	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
336		if_printf(ifp, "unable to fetch h/w specs\n");
337		error = EIO;
338		goto bad1;
339	}
340	error = mwl_getchannels(sc);
341	if (error != 0)
342		goto bad1;
343
344	sc->sc_txantenna = 0;		/* h/w default */
345	sc->sc_rxantenna = 0;		/* h/w default */
346	sc->sc_invalid = 0;		/* ready to go, enable int handling */
347	sc->sc_ageinterval = MWL_AGEINTERVAL;
348
349	/*
350	 * Allocate tx+rx descriptors and populate the lists.
351	 * We immediately push the information to the firmware
352	 * as otherwise it gets upset.
353	 */
354	error = mwl_dma_setup(sc);
355	if (error != 0) {
356		if_printf(ifp, "failed to setup descriptors: %d\n", error);
357		goto bad1;
358	}
359	error = mwl_setupdma(sc);	/* push to firmware */
360	if (error != 0)			/* NB: mwl_setupdma prints msg */
361		goto bad1;
362
363	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
364	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
365
366	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
367		taskqueue_thread_enqueue, &sc->sc_tq);
368	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
369		"%s taskq", ifp->if_xname);
370
371	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
372	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
373	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
374	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
375
376	/* NB: insure BK queue is the lowest priority h/w queue */
377	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
378		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
379			ieee80211_wme_acnames[WME_AC_BK]);
380		error = EIO;
381		goto bad2;
382	}
383	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
384	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
385	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
386		/*
387		 * Not enough hardware tx queues to properly do WME;
388		 * just punt and assign them all to the same h/w queue.
389		 * We could do a better job of this if, for example,
390		 * we allocate queues when we switch from station to
391		 * AP mode.
392		 */
393		if (sc->sc_ac2q[WME_AC_VI] != NULL)
394			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
395		if (sc->sc_ac2q[WME_AC_BE] != NULL)
396			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
397		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
398		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
399		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
400	}
401	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
402
403	ifp->if_softc = sc;
404	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
405	ifp->if_start = mwl_start;
406	ifp->if_ioctl = mwl_ioctl;
407	ifp->if_init = mwl_init;
408	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
409	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
410	IFQ_SET_READY(&ifp->if_snd);
411
412	ic->ic_ifp = ifp;
413	/* XXX not right but it's not used anywhere important */
414	ic->ic_phytype = IEEE80211_T_OFDM;
415	ic->ic_opmode = IEEE80211_M_STA;
416	ic->ic_caps =
417		  IEEE80211_C_STA		/* station mode supported */
418		| IEEE80211_C_HOSTAP		/* hostap mode */
419		| IEEE80211_C_MONITOR		/* monitor mode */
420#if 0
421		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
422		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
423#endif
424		| IEEE80211_C_MBSS		/* mesh point link mode */
425		| IEEE80211_C_WDS		/* WDS supported */
426		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
427		| IEEE80211_C_SHSLOT		/* short slot time supported */
428		| IEEE80211_C_WME		/* WME/WMM supported */
429		| IEEE80211_C_BURST		/* xmit bursting supported */
430		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
431		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
432		| IEEE80211_C_TXFRAG		/* handle tx frags */
433		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
434		| IEEE80211_C_DFS		/* DFS supported */
435		;
436
437	ic->ic_htcaps =
438		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
439		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
440		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
441		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
442		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
443#if MWL_AGGR_SIZE == 7935
444		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
445#else
446		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
447#endif
448#if 0
449		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
450		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
451#endif
452		/* s/w capabilities */
453		| IEEE80211_HTC_HT		/* HT operation */
454		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
455		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
456		| IEEE80211_HTC_SMPS		/* SMPS available */
457		;
458
459	/*
460	 * Mark h/w crypto support.
461	 * XXX no way to query h/w support.
462	 */
463	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
464			  |  IEEE80211_CRYPTO_AES_CCM
465			  |  IEEE80211_CRYPTO_TKIP
466			  |  IEEE80211_CRYPTO_TKIPMIC
467			  ;
468	/*
469	 * Transmit requires space in the packet for a special
470	 * format transmit record and optional padding between
471	 * this record and the payload.  Ask the net80211 layer
472	 * to arrange this when encapsulating packets so we can
473	 * add it efficiently.
474	 */
475	ic->ic_headroom = sizeof(struct mwltxrec) -
476		sizeof(struct ieee80211_frame);
477
478	/* call MI attach routine. */
479	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
480	ic->ic_setregdomain = mwl_setregdomain;
481	ic->ic_getradiocaps = mwl_getradiocaps;
482	/* override default methods */
483	ic->ic_raw_xmit = mwl_raw_xmit;
484	ic->ic_newassoc = mwl_newassoc;
485	ic->ic_updateslot = mwl_updateslot;
486	ic->ic_update_mcast = mwl_update_mcast;
487	ic->ic_update_promisc = mwl_update_promisc;
488	ic->ic_wme.wme_update = mwl_wme_update;
489
490	ic->ic_node_alloc = mwl_node_alloc;
491	sc->sc_node_cleanup = ic->ic_node_cleanup;
492	ic->ic_node_cleanup = mwl_node_cleanup;
493	sc->sc_node_drain = ic->ic_node_drain;
494	ic->ic_node_drain = mwl_node_drain;
495	ic->ic_node_getsignal = mwl_node_getsignal;
496	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
497
498	ic->ic_scan_start = mwl_scan_start;
499	ic->ic_scan_end = mwl_scan_end;
500	ic->ic_set_channel = mwl_set_channel;
501
502	sc->sc_recv_action = ic->ic_recv_action;
503	ic->ic_recv_action = mwl_recv_action;
504	sc->sc_addba_request = ic->ic_addba_request;
505	ic->ic_addba_request = mwl_addba_request;
506	sc->sc_addba_response = ic->ic_addba_response;
507	ic->ic_addba_response = mwl_addba_response;
508	sc->sc_addba_stop = ic->ic_addba_stop;
509	ic->ic_addba_stop = mwl_addba_stop;
510
511	ic->ic_vap_create = mwl_vap_create;
512	ic->ic_vap_delete = mwl_vap_delete;
513
514	ieee80211_radiotap_attach(ic,
515	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
516		MWL_TX_RADIOTAP_PRESENT,
517	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
518		MWL_RX_RADIOTAP_PRESENT);
519	/*
520	 * Setup dynamic sysctl's now that country code and
521	 * regdomain are available from the hal.
522	 */
523	mwl_sysctlattach(sc);
524
525	if (bootverbose)
526		ieee80211_announce(ic);
527	mwl_announce(sc);
528	return 0;
529bad2:
530	mwl_dma_cleanup(sc);
531bad1:
532	mwl_hal_detach(mh);
533bad:
534	if_free(ifp);
535	sc->sc_invalid = 1;
536	return error;
537}
538
539int
540mwl_detach(struct mwl_softc *sc)
541{
542	struct ifnet *ifp = sc->sc_ifp;
543	struct ieee80211com *ic = ifp->if_l2com;
544
545	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
546		__func__, ifp->if_flags);
547
548	mwl_stop(ifp, 1);
549	/*
550	 * NB: the order of these is important:
551	 * o call the 802.11 layer before detaching the hal to
552	 *   insure callbacks into the driver to delete global
553	 *   key cache entries can be handled
554	 * o reclaim the tx queue data structures after calling
555	 *   the 802.11 layer as we'll get called back to reclaim
556	 *   node state and potentially want to use them
557	 * o to cleanup the tx queues the hal is called, so detach
558	 *   it last
559	 * Other than that, it's straightforward...
560	 */
561	ieee80211_ifdetach(ic);
562	callout_drain(&sc->sc_watchdog);
563	mwl_dma_cleanup(sc);
564	mwl_tx_cleanup(sc);
565	mwl_hal_detach(sc->sc_mh);
566	if_free(ifp);
567
568	return 0;
569}
570
571/*
572 * MAC address handling for multiple BSS on the same radio.
573 * The first vap uses the MAC address from the EEPROM.  For
574 * subsequent vap's we set the U/L bit (bit 1) in the MAC
575 * address and use the next six bits as an index.
576 */
577static void
578assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
579{
580	int i;
581
582	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
583		/* NB: we only do this if h/w supports multiple bssid */
584		for (i = 0; i < 32; i++)
585			if ((sc->sc_bssidmask & (1<<i)) == 0)
586				break;
587		if (i != 0)
588			mac[0] |= (i << 2)|0x2;
589	} else
590		i = 0;
591	sc->sc_bssidmask |= 1<<i;
592	if (i == 0)
593		sc->sc_nbssid0++;
594}
595
596static void
597reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
598{
599	int i = mac[0] >> 2;
600	if (i != 0 || --sc->sc_nbssid0 == 0)
601		sc->sc_bssidmask &= ~(1<<i);
602}
603
604static struct ieee80211vap *
605mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
606    enum ieee80211_opmode opmode, int flags,
607    const uint8_t bssid[IEEE80211_ADDR_LEN],
608    const uint8_t mac0[IEEE80211_ADDR_LEN])
609{
610	struct ifnet *ifp = ic->ic_ifp;
611	struct mwl_softc *sc = ifp->if_softc;
612	struct mwl_hal *mh = sc->sc_mh;
613	struct ieee80211vap *vap, *apvap;
614	struct mwl_hal_vap *hvap;
615	struct mwl_vap *mvp;
616	uint8_t mac[IEEE80211_ADDR_LEN];
617
618	IEEE80211_ADDR_COPY(mac, mac0);
619	switch (opmode) {
620	case IEEE80211_M_HOSTAP:
621	case IEEE80211_M_MBSS:
622		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
623			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
624		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
625		if (hvap == NULL) {
626			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
627				reclaim_address(sc, mac);
628			return NULL;
629		}
630		break;
631	case IEEE80211_M_STA:
632		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
633			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
634		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
635		if (hvap == NULL) {
636			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
637				reclaim_address(sc, mac);
638			return NULL;
639		}
640		/* no h/w beacon miss support; always use s/w */
641		flags |= IEEE80211_CLONE_NOBEACONS;
642		break;
643	case IEEE80211_M_WDS:
644		hvap = NULL;		/* NB: we use associated AP vap */
645		if (sc->sc_napvaps == 0)
646			return NULL;	/* no existing AP vap */
647		break;
648	case IEEE80211_M_MONITOR:
649		hvap = NULL;
650		break;
651	case IEEE80211_M_IBSS:
652	case IEEE80211_M_AHDEMO:
653	default:
654		return NULL;
655	}
656
657	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
658	    M_80211_VAP, M_NOWAIT | M_ZERO);
659	if (mvp == NULL) {
660		if (hvap != NULL) {
661			mwl_hal_delvap(hvap);
662			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
663				reclaim_address(sc, mac);
664		}
665		/* XXX msg */
666		return NULL;
667	}
668	mvp->mv_hvap = hvap;
669	if (opmode == IEEE80211_M_WDS) {
670		/*
671		 * WDS vaps must have an associated AP vap; find one.
672		 * XXX not right.
673		 */
674		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
675			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
676				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
677				break;
678			}
679		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
680	}
681	vap = &mvp->mv_vap;
682	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
683	if (hvap != NULL)
684		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
685	/* override with driver methods */
686	mvp->mv_newstate = vap->iv_newstate;
687	vap->iv_newstate = mwl_newstate;
688	vap->iv_max_keyix = 0;	/* XXX */
689	vap->iv_key_alloc = mwl_key_alloc;
690	vap->iv_key_delete = mwl_key_delete;
691	vap->iv_key_set = mwl_key_set;
692#ifdef MWL_HOST_PS_SUPPORT
693	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
694		vap->iv_update_ps = mwl_update_ps;
695		mvp->mv_set_tim = vap->iv_set_tim;
696		vap->iv_set_tim = mwl_set_tim;
697	}
698#endif
699	vap->iv_reset = mwl_reset;
700	vap->iv_update_beacon = mwl_beacon_update;
701
702	/* override max aid so sta's cannot assoc when we're out of sta id's */
703	vap->iv_max_aid = MWL_MAXSTAID;
704	/* override default A-MPDU rx parameters */
705	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
706	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
707
708	/* complete setup */
709	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
710
711	switch (vap->iv_opmode) {
712	case IEEE80211_M_HOSTAP:
713	case IEEE80211_M_MBSS:
714	case IEEE80211_M_STA:
715		/*
716		 * Setup sta db entry for local address.
717		 */
718		mwl_localstadb(vap);
719		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
720		    vap->iv_opmode == IEEE80211_M_MBSS)
721			sc->sc_napvaps++;
722		else
723			sc->sc_nstavaps++;
724		break;
725	case IEEE80211_M_WDS:
726		sc->sc_nwdsvaps++;
727		break;
728	default:
729		break;
730	}
731	/*
732	 * Setup overall operating mode.
733	 */
734	if (sc->sc_napvaps)
735		ic->ic_opmode = IEEE80211_M_HOSTAP;
736	else if (sc->sc_nstavaps)
737		ic->ic_opmode = IEEE80211_M_STA;
738	else
739		ic->ic_opmode = opmode;
740
741	return vap;
742}
743
744static void
745mwl_vap_delete(struct ieee80211vap *vap)
746{
747	struct mwl_vap *mvp = MWL_VAP(vap);
748	struct ifnet *parent = vap->iv_ic->ic_ifp;
749	struct mwl_softc *sc = parent->if_softc;
750	struct mwl_hal *mh = sc->sc_mh;
751	struct mwl_hal_vap *hvap = mvp->mv_hvap;
752	enum ieee80211_opmode opmode = vap->iv_opmode;
753
754	/* XXX disallow ap vap delete if WDS still present */
755	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
756		/* quiesce h/w while we remove the vap */
757		mwl_hal_intrset(mh, 0);		/* disable interrupts */
758	}
759	ieee80211_vap_detach(vap);
760	switch (opmode) {
761	case IEEE80211_M_HOSTAP:
762	case IEEE80211_M_MBSS:
763	case IEEE80211_M_STA:
764		KASSERT(hvap != NULL, ("no hal vap handle"));
765		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
766		mwl_hal_delvap(hvap);
767		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
768			sc->sc_napvaps--;
769		else
770			sc->sc_nstavaps--;
771		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
772		reclaim_address(sc, vap->iv_myaddr);
773		break;
774	case IEEE80211_M_WDS:
775		sc->sc_nwdsvaps--;
776		break;
777	default:
778		break;
779	}
780	mwl_cleartxq(sc, vap);
781	free(mvp, M_80211_VAP);
782	if (parent->if_drv_flags & IFF_DRV_RUNNING)
783		mwl_hal_intrset(mh, sc->sc_imask);
784}
785
786void
787mwl_suspend(struct mwl_softc *sc)
788{
789	struct ifnet *ifp = sc->sc_ifp;
790
791	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
792		__func__, ifp->if_flags);
793
794	mwl_stop(ifp, 1);
795}
796
797void
798mwl_resume(struct mwl_softc *sc)
799{
800	struct ifnet *ifp = sc->sc_ifp;
801
802	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
803		__func__, ifp->if_flags);
804
805	if (ifp->if_flags & IFF_UP)
806		mwl_init(sc);
807}
808
809void
810mwl_shutdown(void *arg)
811{
812	struct mwl_softc *sc = arg;
813
814	mwl_stop(sc->sc_ifp, 1);
815}
816
817/*
818 * Interrupt handler.  Most of the actual processing is deferred.
819 */
820void
821mwl_intr(void *arg)
822{
823	struct mwl_softc *sc = arg;
824	struct mwl_hal *mh = sc->sc_mh;
825	uint32_t status;
826
827	if (sc->sc_invalid) {
828		/*
829		 * The hardware is not ready/present, don't touch anything.
830		 * Note this can happen early on if the IRQ is shared.
831		 */
832		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
833		return;
834	}
835	/*
836	 * Figure out the reason(s) for the interrupt.
837	 */
838	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
839	if (status == 0)			/* must be a shared irq */
840		return;
841
842	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
843	    __func__, status, sc->sc_imask);
844	if (status & MACREG_A2HRIC_BIT_RX_RDY)
845		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
846	if (status & MACREG_A2HRIC_BIT_TX_DONE)
847		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
848	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
849		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
850	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
851		mwl_hal_cmddone(mh);
852	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
853		;
854	}
855	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
856		/* TKIP ICV error */
857		sc->sc_stats.mst_rx_badtkipicv++;
858	}
859	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
860		/* 11n aggregation queue is empty, re-fill */
861		;
862	}
863	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
864		;
865	}
866	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
867		/* radar detected, process event */
868		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
869	}
870	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
871		/* DFS channel switch */
872		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
873	}
874}
875
876static void
877mwl_radar_proc(void *arg, int pending)
878{
879	struct mwl_softc *sc = arg;
880	struct ifnet *ifp = sc->sc_ifp;
881	struct ieee80211com *ic = ifp->if_l2com;
882
883	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
884	    __func__, pending);
885
886	sc->sc_stats.mst_radardetect++;
887	/* XXX stop h/w BA streams? */
888
889	IEEE80211_LOCK(ic);
890	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
891	IEEE80211_UNLOCK(ic);
892}
893
894static void
895mwl_chanswitch_proc(void *arg, int pending)
896{
897	struct mwl_softc *sc = arg;
898	struct ifnet *ifp = sc->sc_ifp;
899	struct ieee80211com *ic = ifp->if_l2com;
900
901	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
902	    __func__, pending);
903
904	IEEE80211_LOCK(ic);
905	sc->sc_csapending = 0;
906	ieee80211_csa_completeswitch(ic);
907	IEEE80211_UNLOCK(ic);
908}
909
910static void
911mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
912{
913	struct ieee80211_node *ni = sp->data[0];
914
915	/* send DELBA and drop the stream */
916	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
917}
918
919static void
920mwl_bawatchdog_proc(void *arg, int pending)
921{
922	struct mwl_softc *sc = arg;
923	struct mwl_hal *mh = sc->sc_mh;
924	const MWL_HAL_BASTREAM *sp;
925	uint8_t bitmap, n;
926
927	sc->sc_stats.mst_bawatchdog++;
928
929	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
930		DPRINTF(sc, MWL_DEBUG_AMPDU,
931		    "%s: could not get bitmap\n", __func__);
932		sc->sc_stats.mst_bawatchdog_failed++;
933		return;
934	}
935	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
936	if (bitmap == 0xff) {
937		n = 0;
938		/* disable all ba streams */
939		for (bitmap = 0; bitmap < 8; bitmap++) {
940			sp = mwl_hal_bastream_lookup(mh, bitmap);
941			if (sp != NULL) {
942				mwl_bawatchdog(sp);
943				n++;
944			}
945		}
946		if (n == 0) {
947			DPRINTF(sc, MWL_DEBUG_AMPDU,
948			    "%s: no BA streams found\n", __func__);
949			sc->sc_stats.mst_bawatchdog_empty++;
950		}
951	} else if (bitmap != 0xaa) {
952		/* disable a single ba stream */
953		sp = mwl_hal_bastream_lookup(mh, bitmap);
954		if (sp != NULL) {
955			mwl_bawatchdog(sp);
956		} else {
957			DPRINTF(sc, MWL_DEBUG_AMPDU,
958			    "%s: no BA stream %d\n", __func__, bitmap);
959			sc->sc_stats.mst_bawatchdog_notfound++;
960		}
961	}
962}
963
964/*
965 * Convert net80211 channel to a HAL channel.
966 */
967static void
968mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
969{
970	hc->channel = chan->ic_ieee;
971
972	*(uint32_t *)&hc->channelFlags = 0;
973	if (IEEE80211_IS_CHAN_2GHZ(chan))
974		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
975	else if (IEEE80211_IS_CHAN_5GHZ(chan))
976		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
977	if (IEEE80211_IS_CHAN_HT40(chan)) {
978		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
979		if (IEEE80211_IS_CHAN_HT40U(chan))
980			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
981		else
982			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
983	} else
984		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
985	/* XXX 10MHz channels */
986}
987
988/*
989 * Inform firmware of our tx/rx dma setup.  The BAR 0
990 * writes below are for compatibility with older firmware.
991 * For current firmware we send this information with a
992 * cmd block via mwl_hal_sethwdma.
993 */
994static int
995mwl_setupdma(struct mwl_softc *sc)
996{
997	int error, i;
998
999	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1000	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1001	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1002
1003	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1004		struct mwl_txq *txq = &sc->sc_txq[i];
1005		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1006		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1007	}
1008	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1009	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1010
1011	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1012	if (error != 0) {
1013		device_printf(sc->sc_dev,
1014		    "unable to setup tx/rx dma; hal status %u\n", error);
1015		/* XXX */
1016	}
1017	return error;
1018}
1019
1020/*
1021 * Inform firmware of tx rate parameters.
1022 * Called after a channel change.
1023 */
1024static int
1025mwl_setcurchanrates(struct mwl_softc *sc)
1026{
1027	struct ifnet *ifp = sc->sc_ifp;
1028	struct ieee80211com *ic = ifp->if_l2com;
1029	const struct ieee80211_rateset *rs;
1030	MWL_HAL_TXRATE rates;
1031
1032	memset(&rates, 0, sizeof(rates));
1033	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1034	/* rate used to send management frames */
1035	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1036	/* rate used to send multicast frames */
1037	rates.McastRate = rates.MgtRate;
1038
1039	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1040}
1041
1042/*
1043 * Inform firmware of tx rate parameters.  Called whenever
1044 * user-settable params change and after a channel change.
1045 */
1046static int
1047mwl_setrates(struct ieee80211vap *vap)
1048{
1049	struct mwl_vap *mvp = MWL_VAP(vap);
1050	struct ieee80211_node *ni = vap->iv_bss;
1051	const struct ieee80211_txparam *tp = ni->ni_txparms;
1052	MWL_HAL_TXRATE rates;
1053
1054	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1055
1056	/*
1057	 * Update the h/w rate map.
1058	 * NB: 0x80 for MCS is passed through unchanged
1059	 */
1060	memset(&rates, 0, sizeof(rates));
1061	/* rate used to send management frames */
1062	rates.MgtRate = tp->mgmtrate;
1063	/* rate used to send multicast frames */
1064	rates.McastRate = tp->mcastrate;
1065
1066	/* while here calculate EAPOL fixed rate cookie */
1067	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1068
1069	return mwl_hal_settxrate(mvp->mv_hvap,
1070	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1071		RATE_FIXED : RATE_AUTO, &rates);
1072}
1073
1074/*
1075 * Setup a fixed xmit rate cookie for EAPOL frames.
1076 */
1077static void
1078mwl_seteapolformat(struct ieee80211vap *vap)
1079{
1080	struct mwl_vap *mvp = MWL_VAP(vap);
1081	struct ieee80211_node *ni = vap->iv_bss;
1082	enum ieee80211_phymode mode;
1083	uint8_t rate;
1084
1085	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1086
1087	mode = ieee80211_chan2mode(ni->ni_chan);
1088	/*
1089	 * Use legacy rates when operating a mixed HT+non-HT bss.
1090	 * NB: this may violate POLA for sta and wds vap's.
1091	 */
1092	if (mode == IEEE80211_MODE_11NA &&
1093	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1094		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1095	else if (mode == IEEE80211_MODE_11NG &&
1096	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1097		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1098	else
1099		rate = vap->iv_txparms[mode].mgmtrate;
1100
1101	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1102}
1103
1104/*
1105 * Map SKU+country code to region code for radar bin'ing.
1106 */
1107static int
1108mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1109{
1110	switch (rd->regdomain) {
1111	case SKU_FCC:
1112	case SKU_FCC3:
1113		return DOMAIN_CODE_FCC;
1114	case SKU_CA:
1115		return DOMAIN_CODE_IC;
1116	case SKU_ETSI:
1117	case SKU_ETSI2:
1118	case SKU_ETSI3:
1119		if (rd->country == CTRY_SPAIN)
1120			return DOMAIN_CODE_SPAIN;
1121		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1122			return DOMAIN_CODE_FRANCE;
1123		/* XXX force 1.3.1 radar type */
1124		return DOMAIN_CODE_ETSI_131;
1125	case SKU_JAPAN:
1126		return DOMAIN_CODE_MKK;
1127	case SKU_ROW:
1128		return DOMAIN_CODE_DGT;	/* Taiwan */
1129	case SKU_APAC:
1130	case SKU_APAC2:
1131	case SKU_APAC3:
1132		return DOMAIN_CODE_AUS;	/* Australia */
1133	}
1134	/* XXX KOREA? */
1135	return DOMAIN_CODE_FCC;			/* XXX? */
1136}
1137
1138static int
1139mwl_hal_reset(struct mwl_softc *sc)
1140{
1141	struct ifnet *ifp = sc->sc_ifp;
1142	struct ieee80211com *ic = ifp->if_l2com;
1143	struct mwl_hal *mh = sc->sc_mh;
1144
1145	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1146	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1147	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1148	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1149	mwl_chan_set(sc, ic->ic_curchan);
1150	/* NB: RF/RA performance tuned for indoor mode */
1151	mwl_hal_setrateadaptmode(mh, 0);
1152	mwl_hal_setoptimizationlevel(mh,
1153	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1154
1155	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1156
1157	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1158	mwl_hal_setcfend(mh, 0);			/* XXX */
1159
1160	return 1;
1161}
1162
1163static int
1164mwl_init_locked(struct mwl_softc *sc)
1165{
1166	struct ifnet *ifp = sc->sc_ifp;
1167	struct mwl_hal *mh = sc->sc_mh;
1168	int error = 0;
1169
1170	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1171		__func__, ifp->if_flags);
1172
1173	MWL_LOCK_ASSERT(sc);
1174
1175	/*
1176	 * Stop anything previously setup.  This is safe
1177	 * whether this is the first time through or not.
1178	 */
1179	mwl_stop_locked(ifp, 0);
1180
1181	/*
1182	 * Push vap-independent state to the firmware.
1183	 */
1184	if (!mwl_hal_reset(sc)) {
1185		if_printf(ifp, "unable to reset hardware\n");
1186		return EIO;
1187	}
1188
1189	/*
1190	 * Setup recv (once); transmit is already good to go.
1191	 */
1192	error = mwl_startrecv(sc);
1193	if (error != 0) {
1194		if_printf(ifp, "unable to start recv logic\n");
1195		return error;
1196	}
1197
1198	/*
1199	 * Enable interrupts.
1200	 */
1201	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1202		     | MACREG_A2HRIC_BIT_TX_DONE
1203		     | MACREG_A2HRIC_BIT_OPC_DONE
1204#if 0
1205		     | MACREG_A2HRIC_BIT_MAC_EVENT
1206#endif
1207		     | MACREG_A2HRIC_BIT_ICV_ERROR
1208		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1209		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1210#if 0
1211		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1212#endif
1213		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1214		     | MACREQ_A2HRIC_BIT_TX_ACK
1215		     ;
1216
1217	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1218	mwl_hal_intrset(mh, sc->sc_imask);
1219	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1220
1221	return 0;
1222}
1223
1224static void
1225mwl_init(void *arg)
1226{
1227	struct mwl_softc *sc = arg;
1228	struct ifnet *ifp = sc->sc_ifp;
1229	struct ieee80211com *ic = ifp->if_l2com;
1230	int error = 0;
1231
1232	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1233		__func__, ifp->if_flags);
1234
1235	MWL_LOCK(sc);
1236	error = mwl_init_locked(sc);
1237	MWL_UNLOCK(sc);
1238
1239	if (error == 0)
1240		ieee80211_start_all(ic);	/* start all vap's */
1241}
1242
1243static void
1244mwl_stop_locked(struct ifnet *ifp, int disable)
1245{
1246	struct mwl_softc *sc = ifp->if_softc;
1247
1248	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1249		__func__, sc->sc_invalid, ifp->if_flags);
1250
1251	MWL_LOCK_ASSERT(sc);
1252	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1253		/*
1254		 * Shutdown the hardware and driver.
1255		 */
1256		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1257		callout_stop(&sc->sc_watchdog);
1258		sc->sc_tx_timer = 0;
1259		mwl_draintxq(sc);
1260	}
1261}
1262
1263static void
1264mwl_stop(struct ifnet *ifp, int disable)
1265{
1266	struct mwl_softc *sc = ifp->if_softc;
1267
1268	MWL_LOCK(sc);
1269	mwl_stop_locked(ifp, disable);
1270	MWL_UNLOCK(sc);
1271}
1272
1273static int
1274mwl_reset_vap(struct ieee80211vap *vap, int state)
1275{
1276	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1277	struct ieee80211com *ic = vap->iv_ic;
1278
1279	if (state == IEEE80211_S_RUN)
1280		mwl_setrates(vap);
1281	/* XXX off by 1? */
1282	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1283	/* XXX auto? 20/40 split? */
1284	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1285	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1286	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1287	    HTPROTECT_NONE : HTPROTECT_AUTO);
1288	/* XXX txpower cap */
1289
1290	/* re-setup beacons */
1291	if (state == IEEE80211_S_RUN &&
1292	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1293	     vap->iv_opmode == IEEE80211_M_MBSS ||
1294	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1295		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1296		mwl_hal_setnprotmode(hvap,
1297		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1298		return mwl_beacon_setup(vap);
1299	}
1300	return 0;
1301}
1302
1303/*
1304 * Reset the hardware w/o losing operational state.
1305 * Used to to reset or reload hardware state for a vap.
1306 */
1307static int
1308mwl_reset(struct ieee80211vap *vap, u_long cmd)
1309{
1310	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1311	int error = 0;
1312
1313	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1314		struct ieee80211com *ic = vap->iv_ic;
1315		struct ifnet *ifp = ic->ic_ifp;
1316		struct mwl_softc *sc = ifp->if_softc;
1317		struct mwl_hal *mh = sc->sc_mh;
1318
1319		/* XXX handle DWDS sta vap change */
1320		/* XXX do we need to disable interrupts? */
1321		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1322		error = mwl_reset_vap(vap, vap->iv_state);
1323		mwl_hal_intrset(mh, sc->sc_imask);
1324	}
1325	return error;
1326}
1327
1328/*
1329 * Allocate a tx buffer for sending a frame.  The
1330 * packet is assumed to have the WME AC stored so
1331 * we can use it to select the appropriate h/w queue.
1332 */
1333static struct mwl_txbuf *
1334mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1335{
1336	struct mwl_txbuf *bf;
1337
1338	/*
1339	 * Grab a TX buffer and associated resources.
1340	 */
1341	MWL_TXQ_LOCK(txq);
1342	bf = STAILQ_FIRST(&txq->free);
1343	if (bf != NULL) {
1344		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1345		txq->nfree--;
1346	}
1347	MWL_TXQ_UNLOCK(txq);
1348	if (bf == NULL)
1349		DPRINTF(sc, MWL_DEBUG_XMIT,
1350		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1351	return bf;
1352}
1353
1354/*
1355 * Return a tx buffer to the queue it came from.  Note there
1356 * are two cases because we must preserve the order of buffers
1357 * as it reflects the fixed order of descriptors in memory
1358 * (the firmware pre-fetches descriptors so we cannot reorder).
1359 */
1360static void
1361mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1362{
1363	bf->bf_m = NULL;
1364	bf->bf_node = NULL;
1365	MWL_TXQ_LOCK(txq);
1366	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1367	txq->nfree++;
1368	MWL_TXQ_UNLOCK(txq);
1369}
1370
1371static void
1372mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1373{
1374	bf->bf_m = NULL;
1375	bf->bf_node = NULL;
1376	MWL_TXQ_LOCK(txq);
1377	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1378	txq->nfree++;
1379	MWL_TXQ_UNLOCK(txq);
1380}
1381
1382static void
1383mwl_start(struct ifnet *ifp)
1384{
1385	struct mwl_softc *sc = ifp->if_softc;
1386	struct ieee80211_node *ni;
1387	struct mwl_txbuf *bf;
1388	struct mbuf *m;
1389	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1390	int nqueued;
1391
1392	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1393		return;
1394	nqueued = 0;
1395	for (;;) {
1396		bf = NULL;
1397		IFQ_DEQUEUE(&ifp->if_snd, m);
1398		if (m == NULL)
1399			break;
1400		/*
1401		 * Grab the node for the destination.
1402		 */
1403		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1404		KASSERT(ni != NULL, ("no node"));
1405		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1406		/*
1407		 * Grab a TX buffer and associated resources.
1408		 * We honor the classification by the 802.11 layer.
1409		 */
1410		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1411		bf = mwl_gettxbuf(sc, txq);
1412		if (bf == NULL) {
1413			m_freem(m);
1414			ieee80211_free_node(ni);
1415#ifdef MWL_TX_NODROP
1416			sc->sc_stats.mst_tx_qstop++;
1417			/* XXX blocks other traffic */
1418			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1419			break;
1420#else
1421			DPRINTF(sc, MWL_DEBUG_XMIT,
1422			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1423			sc->sc_stats.mst_tx_qdrop++;
1424			continue;
1425#endif /* MWL_TX_NODROP */
1426		}
1427
1428		/*
1429		 * Pass the frame to the h/w for transmission.
1430		 */
1431		if (mwl_tx_start(sc, ni, bf, m)) {
1432			ifp->if_oerrors++;
1433			mwl_puttxbuf_head(txq, bf);
1434			ieee80211_free_node(ni);
1435			continue;
1436		}
1437		nqueued++;
1438		if (nqueued >= mwl_txcoalesce) {
1439			/*
1440			 * Poke the firmware to process queued frames;
1441			 * see below about (lack of) locking.
1442			 */
1443			nqueued = 0;
1444			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1445		}
1446	}
1447	if (nqueued) {
1448		/*
1449		 * NB: We don't need to lock against tx done because
1450		 * this just prods the firmware to check the transmit
1451		 * descriptors.  The firmware will also start fetching
1452		 * descriptors by itself if it notices new ones are
1453		 * present when it goes to deliver a tx done interrupt
1454		 * to the host. So if we race with tx done processing
1455		 * it's ok.  Delivering the kick here rather than in
1456		 * mwl_tx_start is an optimization to avoid poking the
1457		 * firmware for each packet.
1458		 *
1459		 * NB: the queue id isn't used so 0 is ok.
1460		 */
1461		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1462	}
1463}
1464
1465static int
1466mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1467	const struct ieee80211_bpf_params *params)
1468{
1469	struct ieee80211com *ic = ni->ni_ic;
1470	struct ifnet *ifp = ic->ic_ifp;
1471	struct mwl_softc *sc = ifp->if_softc;
1472	struct mwl_txbuf *bf;
1473	struct mwl_txq *txq;
1474
1475	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1476		ieee80211_free_node(ni);
1477		m_freem(m);
1478		return ENETDOWN;
1479	}
1480	/*
1481	 * Grab a TX buffer and associated resources.
1482	 * Note that we depend on the classification
1483	 * by the 802.11 layer to get to the right h/w
1484	 * queue.  Management frames must ALWAYS go on
1485	 * queue 1 but we cannot just force that here
1486	 * because we may receive non-mgt frames.
1487	 */
1488	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1489	bf = mwl_gettxbuf(sc, txq);
1490	if (bf == NULL) {
1491		sc->sc_stats.mst_tx_qstop++;
1492		/* XXX blocks other traffic */
1493		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1494		ieee80211_free_node(ni);
1495		m_freem(m);
1496		return ENOBUFS;
1497	}
1498	/*
1499	 * Pass the frame to the h/w for transmission.
1500	 */
1501	if (mwl_tx_start(sc, ni, bf, m)) {
1502		ifp->if_oerrors++;
1503		mwl_puttxbuf_head(txq, bf);
1504
1505		ieee80211_free_node(ni);
1506		return EIO;		/* XXX */
1507	}
1508	/*
1509	 * NB: We don't need to lock against tx done because
1510	 * this just prods the firmware to check the transmit
1511	 * descriptors.  The firmware will also start fetching
1512	 * descriptors by itself if it notices new ones are
1513	 * present when it goes to deliver a tx done interrupt
1514	 * to the host. So if we race with tx done processing
1515	 * it's ok.  Delivering the kick here rather than in
1516	 * mwl_tx_start is an optimization to avoid poking the
1517	 * firmware for each packet.
1518	 *
1519	 * NB: the queue id isn't used so 0 is ok.
1520	 */
1521	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1522	return 0;
1523}
1524
1525static int
1526mwl_media_change(struct ifnet *ifp)
1527{
1528	struct ieee80211vap *vap = ifp->if_softc;
1529	int error;
1530
1531	error = ieee80211_media_change(ifp);
1532	/* NB: only the fixed rate can change and that doesn't need a reset */
1533	if (error == ENETRESET) {
1534		mwl_setrates(vap);
1535		error = 0;
1536	}
1537	return error;
1538}
1539
1540#ifdef MWL_DEBUG
1541static void
1542mwl_keyprint(struct mwl_softc *sc, const char *tag,
1543	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1544{
1545	static const char *ciphers[] = {
1546		"WEP",
1547		"TKIP",
1548		"AES-CCM",
1549	};
1550	int i, n;
1551
1552	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1553	for (i = 0, n = hk->keyLen; i < n; i++)
1554		printf(" %02x", hk->key.aes[i]);
1555	printf(" mac %s", ether_sprintf(mac));
1556	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1557		printf(" %s", "rxmic");
1558		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1559			printf(" %02x", hk->key.tkip.rxMic[i]);
1560		printf(" txmic");
1561		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1562			printf(" %02x", hk->key.tkip.txMic[i]);
1563	}
1564	printf(" flags 0x%x\n", hk->keyFlags);
1565}
1566#endif
1567
1568/*
1569 * Allocate a key cache slot for a unicast key.  The
1570 * firmware handles key allocation and every station is
1571 * guaranteed key space so we are always successful.
1572 */
1573static int
1574mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1575	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1576{
1577	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1578
1579	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1580	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1581		if (!(&vap->iv_nw_keys[0] <= k &&
1582		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1583			/* should not happen */
1584			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1585				"%s: bogus group key\n", __func__);
1586			return 0;
1587		}
1588		/* give the caller what they requested */
1589		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1590	} else {
1591		/*
1592		 * Firmware handles key allocation.
1593		 */
1594		*keyix = *rxkeyix = 0;
1595	}
1596	return 1;
1597}
1598
1599/*
1600 * Delete a key entry allocated by mwl_key_alloc.
1601 */
1602static int
1603mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1604{
1605	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1606	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1607	MWL_HAL_KEYVAL hk;
1608	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1609	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1610
1611	if (hvap == NULL) {
1612		if (vap->iv_opmode != IEEE80211_M_WDS) {
1613			/* XXX monitor mode? */
1614			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1615			    "%s: no hvap for opmode %d\n", __func__,
1616			    vap->iv_opmode);
1617			return 0;
1618		}
1619		hvap = MWL_VAP(vap)->mv_ap_hvap;
1620	}
1621
1622	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1623	    __func__, k->wk_keyix);
1624
1625	memset(&hk, 0, sizeof(hk));
1626	hk.keyIndex = k->wk_keyix;
1627	switch (k->wk_cipher->ic_cipher) {
1628	case IEEE80211_CIPHER_WEP:
1629		hk.keyTypeId = KEY_TYPE_ID_WEP;
1630		break;
1631	case IEEE80211_CIPHER_TKIP:
1632		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1633		break;
1634	case IEEE80211_CIPHER_AES_CCM:
1635		hk.keyTypeId = KEY_TYPE_ID_AES;
1636		break;
1637	default:
1638		/* XXX should not happen */
1639		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1640		    __func__, k->wk_cipher->ic_cipher);
1641		return 0;
1642	}
1643	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1644}
1645
1646static __inline int
1647addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1648{
1649	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1650		if (k->wk_flags & IEEE80211_KEY_XMIT)
1651			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1652		if (k->wk_flags & IEEE80211_KEY_RECV)
1653			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1654		return 1;
1655	} else
1656		return 0;
1657}
1658
1659/*
1660 * Set the key cache contents for the specified key.  Key cache
1661 * slot(s) must already have been allocated by mwl_key_alloc.
1662 */
1663static int
1664mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1665	const uint8_t mac[IEEE80211_ADDR_LEN])
1666{
1667#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1668/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1669#define	IEEE80211_IS_STATICKEY(k) \
1670	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1671	 (GRPXMIT|IEEE80211_KEY_RECV))
1672	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1673	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1674	const struct ieee80211_cipher *cip = k->wk_cipher;
1675	const uint8_t *macaddr;
1676	MWL_HAL_KEYVAL hk;
1677
1678	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1679		("s/w crypto set?"));
1680
1681	if (hvap == NULL) {
1682		if (vap->iv_opmode != IEEE80211_M_WDS) {
1683			/* XXX monitor mode? */
1684			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1685			    "%s: no hvap for opmode %d\n", __func__,
1686			    vap->iv_opmode);
1687			return 0;
1688		}
1689		hvap = MWL_VAP(vap)->mv_ap_hvap;
1690	}
1691	memset(&hk, 0, sizeof(hk));
1692	hk.keyIndex = k->wk_keyix;
1693	switch (cip->ic_cipher) {
1694	case IEEE80211_CIPHER_WEP:
1695		hk.keyTypeId = KEY_TYPE_ID_WEP;
1696		hk.keyLen = k->wk_keylen;
1697		if (k->wk_keyix == vap->iv_def_txkey)
1698			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1699		if (!IEEE80211_IS_STATICKEY(k)) {
1700			/* NB: WEP is never used for the PTK */
1701			(void) addgroupflags(&hk, k);
1702		}
1703		break;
1704	case IEEE80211_CIPHER_TKIP:
1705		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1706		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1707		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1708		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1709		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1710		if (!addgroupflags(&hk, k))
1711			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1712		break;
1713	case IEEE80211_CIPHER_AES_CCM:
1714		hk.keyTypeId = KEY_TYPE_ID_AES;
1715		hk.keyLen = k->wk_keylen;
1716		if (!addgroupflags(&hk, k))
1717			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1718		break;
1719	default:
1720		/* XXX should not happen */
1721		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1722		    __func__, k->wk_cipher->ic_cipher);
1723		return 0;
1724	}
1725	/*
1726	 * NB: tkip mic keys get copied here too; the layout
1727	 *     just happens to match that in ieee80211_key.
1728	 */
1729	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1730
1731	/*
1732	 * Locate address of sta db entry for writing key;
1733	 * the convention unfortunately is somewhat different
1734	 * than how net80211, hostapd, and wpa_supplicant think.
1735	 */
1736	if (vap->iv_opmode == IEEE80211_M_STA) {
1737		/*
1738		 * NB: keys plumbed before the sta reaches AUTH state
1739		 * will be discarded or written to the wrong sta db
1740		 * entry because iv_bss is meaningless.  This is ok
1741		 * (right now) because we handle deferred plumbing of
1742		 * WEP keys when the sta reaches AUTH state.
1743		 */
1744		macaddr = vap->iv_bss->ni_bssid;
1745		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1746			/* XXX plumb to local sta db too for static key wep */
1747			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1748		}
1749	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1750	    vap->iv_state != IEEE80211_S_RUN) {
1751		/*
1752		 * Prior to RUN state a WDS vap will not it's BSS node
1753		 * setup so we will plumb the key to the wrong mac
1754		 * address (it'll be our local address).  Workaround
1755		 * this for the moment by grabbing the correct address.
1756		 */
1757		macaddr = vap->iv_des_bssid;
1758	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1759		macaddr = vap->iv_myaddr;
1760	else
1761		macaddr = mac;
1762	KEYPRINTF(sc, &hk, macaddr);
1763	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1764#undef IEEE80211_IS_STATICKEY
1765#undef GRPXMIT
1766}
1767
1768/* unaligned little endian access */
1769#define LE_READ_2(p)				\
1770	((uint16_t)				\
1771	 ((((const uint8_t *)(p))[0]      ) |	\
1772	  (((const uint8_t *)(p))[1] <<  8)))
1773#define LE_READ_4(p)				\
1774	((uint32_t)				\
1775	 ((((const uint8_t *)(p))[0]      ) |	\
1776	  (((const uint8_t *)(p))[1] <<  8) |	\
1777	  (((const uint8_t *)(p))[2] << 16) |	\
1778	  (((const uint8_t *)(p))[3] << 24)))
1779
1780/*
1781 * Set the multicast filter contents into the hardware.
1782 * XXX f/w has no support; just defer to the os.
1783 */
1784static void
1785mwl_setmcastfilter(struct mwl_softc *sc)
1786{
1787	struct ifnet *ifp = sc->sc_ifp;
1788#if 0
1789	struct ether_multi *enm;
1790	struct ether_multistep estep;
1791	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1792	uint8_t *mp;
1793	int nmc;
1794
1795	mp = macs;
1796	nmc = 0;
1797	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1798	while (enm != NULL) {
1799		/* XXX Punt on ranges. */
1800		if (nmc == MWL_HAL_MCAST_MAX ||
1801		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1802			ifp->if_flags |= IFF_ALLMULTI;
1803			return;
1804		}
1805		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1806		mp += IEEE80211_ADDR_LEN, nmc++;
1807		ETHER_NEXT_MULTI(estep, enm);
1808	}
1809	ifp->if_flags &= ~IFF_ALLMULTI;
1810	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1811#else
1812	/* XXX no mcast filter support; we get everything */
1813	ifp->if_flags |= IFF_ALLMULTI;
1814#endif
1815}
1816
1817static int
1818mwl_mode_init(struct mwl_softc *sc)
1819{
1820	struct ifnet *ifp = sc->sc_ifp;
1821	struct ieee80211com *ic = ifp->if_l2com;
1822	struct mwl_hal *mh = sc->sc_mh;
1823
1824	/*
1825	 * NB: Ignore promisc in hostap mode; it's set by the
1826	 * bridge.  This is wrong but we have no way to
1827	 * identify internal requests (from the bridge)
1828	 * versus external requests such as for tcpdump.
1829	 */
1830	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1831	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1832	mwl_setmcastfilter(sc);
1833
1834	return 0;
1835}
1836
1837/*
1838 * Callback from the 802.11 layer after a multicast state change.
1839 */
1840static void
1841mwl_update_mcast(struct ifnet *ifp)
1842{
1843	struct mwl_softc *sc = ifp->if_softc;
1844
1845	mwl_setmcastfilter(sc);
1846}
1847
1848/*
1849 * Callback from the 802.11 layer after a promiscuous mode change.
1850 * Note this interface does not check the operating mode as this
1851 * is an internal callback and we are expected to honor the current
1852 * state (e.g. this is used for setting the interface in promiscuous
1853 * mode when operating in hostap mode to do ACS).
1854 */
1855static void
1856mwl_update_promisc(struct ifnet *ifp)
1857{
1858	struct mwl_softc *sc = ifp->if_softc;
1859
1860	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1861}
1862
1863/*
1864 * Callback from the 802.11 layer to update the slot time
1865 * based on the current setting.  We use it to notify the
1866 * firmware of ERP changes and the f/w takes care of things
1867 * like slot time and preamble.
1868 */
1869static void
1870mwl_updateslot(struct ifnet *ifp)
1871{
1872	struct mwl_softc *sc = ifp->if_softc;
1873	struct ieee80211com *ic = ifp->if_l2com;
1874	struct mwl_hal *mh = sc->sc_mh;
1875	int prot;
1876
1877	/* NB: can be called early; suppress needless cmds */
1878	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1879		return;
1880
1881	/*
1882	 * Calculate the ERP flags.  The firwmare will use
1883	 * this to carry out the appropriate measures.
1884	 */
1885	prot = 0;
1886	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1887		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1888			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1889		if (ic->ic_flags & IEEE80211_F_USEPROT)
1890			prot |= IEEE80211_ERP_USE_PROTECTION;
1891		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1892			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1893	}
1894
1895	DPRINTF(sc, MWL_DEBUG_RESET,
1896	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1897	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1898	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1899	    ic->ic_flags);
1900
1901	mwl_hal_setgprot(mh, prot);
1902}
1903
1904/*
1905 * Setup the beacon frame.
1906 */
1907static int
1908mwl_beacon_setup(struct ieee80211vap *vap)
1909{
1910	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1911	struct ieee80211_node *ni = vap->iv_bss;
1912	struct ieee80211_beacon_offsets bo;
1913	struct mbuf *m;
1914
1915	m = ieee80211_beacon_alloc(ni, &bo);
1916	if (m == NULL)
1917		return ENOBUFS;
1918	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1919	m_free(m);
1920
1921	return 0;
1922}
1923
1924/*
1925 * Update the beacon frame in response to a change.
1926 */
1927static void
1928mwl_beacon_update(struct ieee80211vap *vap, int item)
1929{
1930	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1931	struct ieee80211com *ic = vap->iv_ic;
1932
1933	KASSERT(hvap != NULL, ("no beacon"));
1934	switch (item) {
1935	case IEEE80211_BEACON_ERP:
1936		mwl_updateslot(ic->ic_ifp);
1937		break;
1938	case IEEE80211_BEACON_HTINFO:
1939		mwl_hal_setnprotmode(hvap,
1940		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1941		break;
1942	case IEEE80211_BEACON_CAPS:
1943	case IEEE80211_BEACON_WME:
1944	case IEEE80211_BEACON_APPIE:
1945	case IEEE80211_BEACON_CSA:
1946		break;
1947	case IEEE80211_BEACON_TIM:
1948		/* NB: firmware always forms TIM */
1949		return;
1950	}
1951	/* XXX retain beacon frame and update */
1952	mwl_beacon_setup(vap);
1953}
1954
1955static void
1956mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1957{
1958	bus_addr_t *paddr = (bus_addr_t*) arg;
1959	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1960	*paddr = segs->ds_addr;
1961}
1962
1963#ifdef MWL_HOST_PS_SUPPORT
1964/*
1965 * Handle power save station occupancy changes.
1966 */
1967static void
1968mwl_update_ps(struct ieee80211vap *vap, int nsta)
1969{
1970	struct mwl_vap *mvp = MWL_VAP(vap);
1971
1972	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1973		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1974	mvp->mv_last_ps_sta = nsta;
1975}
1976
1977/*
1978 * Handle associated station power save state changes.
1979 */
1980static int
1981mwl_set_tim(struct ieee80211_node *ni, int set)
1982{
1983	struct ieee80211vap *vap = ni->ni_vap;
1984	struct mwl_vap *mvp = MWL_VAP(vap);
1985
1986	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1987		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1988		    IEEE80211_AID(ni->ni_associd), set);
1989		return 1;
1990	} else
1991		return 0;
1992}
1993#endif /* MWL_HOST_PS_SUPPORT */
1994
1995static int
1996mwl_desc_setup(struct mwl_softc *sc, const char *name,
1997	struct mwl_descdma *dd,
1998	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1999{
2000	struct ifnet *ifp = sc->sc_ifp;
2001	uint8_t *ds;
2002	int error;
2003
2004	DPRINTF(sc, MWL_DEBUG_RESET,
2005	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2006	    __func__, name, nbuf, (uintmax_t) bufsize,
2007	    ndesc, (uintmax_t) descsize);
2008
2009	dd->dd_name = name;
2010	dd->dd_desc_len = nbuf * ndesc * descsize;
2011
2012	/*
2013	 * Setup DMA descriptor area.
2014	 */
2015	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2016		       PAGE_SIZE, 0,		/* alignment, bounds */
2017		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2018		       BUS_SPACE_MAXADDR,	/* highaddr */
2019		       NULL, NULL,		/* filter, filterarg */
2020		       dd->dd_desc_len,		/* maxsize */
2021		       1,			/* nsegments */
2022		       dd->dd_desc_len,		/* maxsegsize */
2023		       BUS_DMA_ALLOCNOW,	/* flags */
2024		       NULL,			/* lockfunc */
2025		       NULL,			/* lockarg */
2026		       &dd->dd_dmat);
2027	if (error != 0) {
2028		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2029		return error;
2030	}
2031
2032	/* allocate descriptors */
2033	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2034	if (error != 0) {
2035		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2036			"error %u\n", dd->dd_name, error);
2037		goto fail0;
2038	}
2039
2040	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2041				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2042				 &dd->dd_dmamap);
2043	if (error != 0) {
2044		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2045			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2046		goto fail1;
2047	}
2048
2049	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2050				dd->dd_desc, dd->dd_desc_len,
2051				mwl_load_cb, &dd->dd_desc_paddr,
2052				BUS_DMA_NOWAIT);
2053	if (error != 0) {
2054		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2055			dd->dd_name, error);
2056		goto fail2;
2057	}
2058
2059	ds = dd->dd_desc;
2060	memset(ds, 0, dd->dd_desc_len);
2061	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2062	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2063	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2064
2065	return 0;
2066fail2:
2067	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2068fail1:
2069	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2070fail0:
2071	bus_dma_tag_destroy(dd->dd_dmat);
2072	memset(dd, 0, sizeof(*dd));
2073	return error;
2074#undef DS2PHYS
2075}
2076
2077static void
2078mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2079{
2080	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2081	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2082	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2083	bus_dma_tag_destroy(dd->dd_dmat);
2084
2085	memset(dd, 0, sizeof(*dd));
2086}
2087
2088/*
2089 * Construct a tx q's free list.  The order of entries on
2090 * the list must reflect the physical layout of tx descriptors
2091 * because the firmware pre-fetches descriptors.
2092 *
2093 * XXX might be better to use indices into the buffer array.
2094 */
2095static void
2096mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2097{
2098	struct mwl_txbuf *bf;
2099	int i;
2100
2101	bf = txq->dma.dd_bufptr;
2102	STAILQ_INIT(&txq->free);
2103	for (i = 0; i < mwl_txbuf; i++, bf++)
2104		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2105	txq->nfree = i;
2106}
2107
2108#define	DS2PHYS(_dd, _ds) \
2109	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2110
2111static int
2112mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2113{
2114	struct ifnet *ifp = sc->sc_ifp;
2115	int error, bsize, i;
2116	struct mwl_txbuf *bf;
2117	struct mwl_txdesc *ds;
2118
2119	error = mwl_desc_setup(sc, "tx", &txq->dma,
2120			mwl_txbuf, sizeof(struct mwl_txbuf),
2121			MWL_TXDESC, sizeof(struct mwl_txdesc));
2122	if (error != 0)
2123		return error;
2124
2125	/* allocate and setup tx buffers */
2126	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2127	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2128	if (bf == NULL) {
2129		if_printf(ifp, "malloc of %u tx buffers failed\n",
2130			mwl_txbuf);
2131		return ENOMEM;
2132	}
2133	txq->dma.dd_bufptr = bf;
2134
2135	ds = txq->dma.dd_desc;
2136	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2137		bf->bf_desc = ds;
2138		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2139		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2140				&bf->bf_dmamap);
2141		if (error != 0) {
2142			if_printf(ifp, "unable to create dmamap for tx "
2143				"buffer %u, error %u\n", i, error);
2144			return error;
2145		}
2146	}
2147	mwl_txq_reset(sc, txq);
2148	return 0;
2149}
2150
2151static void
2152mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2153{
2154	struct mwl_txbuf *bf;
2155	int i;
2156
2157	bf = txq->dma.dd_bufptr;
2158	for (i = 0; i < mwl_txbuf; i++, bf++) {
2159		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2160		KASSERT(bf->bf_node == NULL, ("node on free list"));
2161		if (bf->bf_dmamap != NULL)
2162			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2163	}
2164	STAILQ_INIT(&txq->free);
2165	txq->nfree = 0;
2166	if (txq->dma.dd_bufptr != NULL) {
2167		free(txq->dma.dd_bufptr, M_MWLDEV);
2168		txq->dma.dd_bufptr = NULL;
2169	}
2170	if (txq->dma.dd_desc_len != 0)
2171		mwl_desc_cleanup(sc, &txq->dma);
2172}
2173
2174static int
2175mwl_rxdma_setup(struct mwl_softc *sc)
2176{
2177	struct ifnet *ifp = sc->sc_ifp;
2178	int error, jumbosize, bsize, i;
2179	struct mwl_rxbuf *bf;
2180	struct mwl_jumbo *rbuf;
2181	struct mwl_rxdesc *ds;
2182	caddr_t data;
2183
2184	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2185			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2186			1, sizeof(struct mwl_rxdesc));
2187	if (error != 0)
2188		return error;
2189
2190	/*
2191	 * Receive is done to a private pool of jumbo buffers.
2192	 * This allows us to attach to mbuf's and avoid re-mapping
2193	 * memory on each rx we post.  We allocate a large chunk
2194	 * of memory and manage it in the driver.  The mbuf free
2195	 * callback method is used to reclaim frames after sending
2196	 * them up the stack.  By default we allocate 2x the number of
2197	 * rx descriptors configured so we have some slop to hold
2198	 * us while frames are processed.
2199	 */
2200	if (mwl_rxbuf < 2*mwl_rxdesc) {
2201		if_printf(ifp,
2202		    "too few rx dma buffers (%d); increasing to %d\n",
2203		    mwl_rxbuf, 2*mwl_rxdesc);
2204		mwl_rxbuf = 2*mwl_rxdesc;
2205	}
2206	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2207	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2208
2209	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2210		       PAGE_SIZE, 0,		/* alignment, bounds */
2211		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2212		       BUS_SPACE_MAXADDR,	/* highaddr */
2213		       NULL, NULL,		/* filter, filterarg */
2214		       sc->sc_rxmemsize,	/* maxsize */
2215		       1,			/* nsegments */
2216		       sc->sc_rxmemsize,	/* maxsegsize */
2217		       BUS_DMA_ALLOCNOW,	/* flags */
2218		       NULL,			/* lockfunc */
2219		       NULL,			/* lockarg */
2220		       &sc->sc_rxdmat);
2221	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2222	if (error != 0) {
2223		if_printf(ifp, "could not create rx DMA map\n");
2224		return error;
2225	}
2226
2227	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2228				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2229				 &sc->sc_rxmap);
2230	if (error != 0) {
2231		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2232		    (uintmax_t) sc->sc_rxmemsize);
2233		return error;
2234	}
2235
2236	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2237				sc->sc_rxmem, sc->sc_rxmemsize,
2238				mwl_load_cb, &sc->sc_rxmem_paddr,
2239				BUS_DMA_NOWAIT);
2240	if (error != 0) {
2241		if_printf(ifp, "could not load rx DMA map\n");
2242		return error;
2243	}
2244
2245	/*
2246	 * Allocate rx buffers and set them up.
2247	 */
2248	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2249	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2250	if (bf == NULL) {
2251		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2252		return error;
2253	}
2254	sc->sc_rxdma.dd_bufptr = bf;
2255
2256	STAILQ_INIT(&sc->sc_rxbuf);
2257	ds = sc->sc_rxdma.dd_desc;
2258	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2259		bf->bf_desc = ds;
2260		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2261		/* pre-assign dma buffer */
2262		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2263		/* NB: tail is intentional to preserve descriptor order */
2264		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2265	}
2266
2267	/*
2268	 * Place remainder of dma memory buffers on the free list.
2269	 */
2270	SLIST_INIT(&sc->sc_rxfree);
2271	for (; i < mwl_rxbuf; i++) {
2272		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2273		rbuf = MWL_JUMBO_DATA2BUF(data);
2274		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2275		sc->sc_nrxfree++;
2276	}
2277	MWL_RXFREE_INIT(sc);
2278	return 0;
2279}
2280#undef DS2PHYS
2281
2282static void
2283mwl_rxdma_cleanup(struct mwl_softc *sc)
2284{
2285	if (sc->sc_rxmap != NULL)
2286		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2287	if (sc->sc_rxmem != NULL) {
2288		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2289		sc->sc_rxmem = NULL;
2290	}
2291	if (sc->sc_rxmap != NULL) {
2292		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2293		sc->sc_rxmap = NULL;
2294	}
2295	if (sc->sc_rxdma.dd_bufptr != NULL) {
2296		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2297		sc->sc_rxdma.dd_bufptr = NULL;
2298	}
2299	if (sc->sc_rxdma.dd_desc_len != 0)
2300		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2301	MWL_RXFREE_DESTROY(sc);
2302}
2303
2304static int
2305mwl_dma_setup(struct mwl_softc *sc)
2306{
2307	int error, i;
2308
2309	error = mwl_rxdma_setup(sc);
2310	if (error != 0) {
2311		mwl_rxdma_cleanup(sc);
2312		return error;
2313	}
2314
2315	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2316		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2317		if (error != 0) {
2318			mwl_dma_cleanup(sc);
2319			return error;
2320		}
2321	}
2322	return 0;
2323}
2324
2325static void
2326mwl_dma_cleanup(struct mwl_softc *sc)
2327{
2328	int i;
2329
2330	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2331		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2332	mwl_rxdma_cleanup(sc);
2333}
2334
2335static struct ieee80211_node *
2336mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2337{
2338	struct ieee80211com *ic = vap->iv_ic;
2339	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2340	const size_t space = sizeof(struct mwl_node);
2341	struct mwl_node *mn;
2342
2343	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2344	if (mn == NULL) {
2345		/* XXX stat+msg */
2346		return NULL;
2347	}
2348	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2349	return &mn->mn_node;
2350}
2351
2352static void
2353mwl_node_cleanup(struct ieee80211_node *ni)
2354{
2355	struct ieee80211com *ic = ni->ni_ic;
2356        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2357	struct mwl_node *mn = MWL_NODE(ni);
2358
2359	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2360	    __func__, ni, ni->ni_ic, mn->mn_staid);
2361
2362	if (mn->mn_staid != 0) {
2363		struct ieee80211vap *vap = ni->ni_vap;
2364
2365		if (mn->mn_hvap != NULL) {
2366			if (vap->iv_opmode == IEEE80211_M_STA)
2367				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2368			else
2369				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2370		}
2371		/*
2372		 * NB: legacy WDS peer sta db entry is installed using
2373		 * the associate ap's hvap; use it again to delete it.
2374		 * XXX can vap be NULL?
2375		 */
2376		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2377		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2378			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2379			    ni->ni_macaddr);
2380		delstaid(sc, mn->mn_staid);
2381		mn->mn_staid = 0;
2382	}
2383	sc->sc_node_cleanup(ni);
2384}
2385
2386/*
2387 * Reclaim rx dma buffers from packets sitting on the ampdu
2388 * reorder queue for a station.  We replace buffers with a
2389 * system cluster (if available).
2390 */
2391static void
2392mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2393{
2394#if 0
2395	int i, n, off;
2396	struct mbuf *m;
2397	void *cl;
2398
2399	n = rap->rxa_qframes;
2400	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2401		m = rap->rxa_m[i];
2402		if (m == NULL)
2403			continue;
2404		n--;
2405		/* our dma buffers have a well-known free routine */
2406		if ((m->m_flags & M_EXT) == 0 ||
2407		    m->m_ext.ext_free != mwl_ext_free)
2408			continue;
2409		/*
2410		 * Try to allocate a cluster and move the data.
2411		 */
2412		off = m->m_data - m->m_ext.ext_buf;
2413		if (off + m->m_pkthdr.len > MCLBYTES) {
2414			/* XXX no AMSDU for now */
2415			continue;
2416		}
2417		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2418		    &m->m_ext.ext_paddr);
2419		if (cl != NULL) {
2420			/*
2421			 * Copy the existing data to the cluster, remove
2422			 * the rx dma buffer, and attach the cluster in
2423			 * its place.  Note we preserve the offset to the
2424			 * data so frames being bridged can still prepend
2425			 * their headers without adding another mbuf.
2426			 */
2427			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2428			MEXTREMOVE(m);
2429			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2430			/* setup mbuf like _MCLGET does */
2431			m->m_flags |= M_CLUSTER | M_EXT_RW;
2432			_MOWNERREF(m, M_EXT | M_CLUSTER);
2433			/* NB: m_data is clobbered by MEXTADDR, adjust */
2434			m->m_data += off;
2435		}
2436	}
2437#endif
2438}
2439
2440/*
2441 * Callback to reclaim resources.  We first let the
2442 * net80211 layer do it's thing, then if we are still
2443 * blocked by a lack of rx dma buffers we walk the ampdu
2444 * reorder q's to reclaim buffers by copying to a system
2445 * cluster.
2446 */
2447static void
2448mwl_node_drain(struct ieee80211_node *ni)
2449{
2450	struct ieee80211com *ic = ni->ni_ic;
2451        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2452	struct mwl_node *mn = MWL_NODE(ni);
2453
2454	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2455	    __func__, ni, ni->ni_vap, mn->mn_staid);
2456
2457	/* NB: call up first to age out ampdu q's */
2458	sc->sc_node_drain(ni);
2459
2460	/* XXX better to not check low water mark? */
2461	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2462	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2463		uint8_t tid;
2464		/*
2465		 * Walk the reorder q and reclaim rx dma buffers by copying
2466		 * the packet contents into clusters.
2467		 */
2468		for (tid = 0; tid < WME_NUM_TID; tid++) {
2469			struct ieee80211_rx_ampdu *rap;
2470
2471			rap = &ni->ni_rx_ampdu[tid];
2472			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2473				continue;
2474			if (rap->rxa_qframes)
2475				mwl_ampdu_rxdma_reclaim(rap);
2476		}
2477	}
2478}
2479
2480static void
2481mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2482{
2483	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2484#ifdef MWL_ANT_INFO_SUPPORT
2485#if 0
2486	/* XXX need to smooth data */
2487	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2488#else
2489	*noise = -95;		/* XXX */
2490#endif
2491#else
2492	*noise = -95;		/* XXX */
2493#endif
2494}
2495
2496/*
2497 * Convert Hardware per-antenna rssi info to common format:
2498 * Let a1, a2, a3 represent the amplitudes per chain
2499 * Let amax represent max[a1, a2, a3]
2500 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2501 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2502 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2503 * maintain some extra precision.
2504 *
2505 * Values are stored in .5 db format capped at 127.
2506 */
2507static void
2508mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2509	struct ieee80211_mimo_info *mi)
2510{
2511#define	CVT(_dst, _src) do {						\
2512	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2513	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2514} while (0)
2515	static const int8_t logdbtbl[32] = {
2516	       0,   0,  24,  38,  48,  56,  62,  68,
2517	      72,  76,  80,  83,  86,  89,  92,  94,
2518	      96,  98, 100, 102, 104, 106, 107, 109,
2519	     110, 112, 113, 115, 116, 117, 118, 119
2520	};
2521	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2522	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2523	uint32_t rssi_max;
2524
2525	rssi_max = mn->mn_ai.rssi_a;
2526	if (mn->mn_ai.rssi_b > rssi_max)
2527		rssi_max = mn->mn_ai.rssi_b;
2528	if (mn->mn_ai.rssi_c > rssi_max)
2529		rssi_max = mn->mn_ai.rssi_c;
2530
2531	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2532	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2533	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2534
2535	mi->noise[0] = mn->mn_ai.nf_a;
2536	mi->noise[1] = mn->mn_ai.nf_b;
2537	mi->noise[2] = mn->mn_ai.nf_c;
2538#undef CVT
2539}
2540
2541static __inline void *
2542mwl_getrxdma(struct mwl_softc *sc)
2543{
2544	struct mwl_jumbo *buf;
2545	void *data;
2546
2547	/*
2548	 * Allocate from jumbo pool.
2549	 */
2550	MWL_RXFREE_LOCK(sc);
2551	buf = SLIST_FIRST(&sc->sc_rxfree);
2552	if (buf == NULL) {
2553		DPRINTF(sc, MWL_DEBUG_ANY,
2554		    "%s: out of rx dma buffers\n", __func__);
2555		sc->sc_stats.mst_rx_nodmabuf++;
2556		data = NULL;
2557	} else {
2558		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2559		sc->sc_nrxfree--;
2560		data = MWL_JUMBO_BUF2DATA(buf);
2561	}
2562	MWL_RXFREE_UNLOCK(sc);
2563	return data;
2564}
2565
2566static __inline void
2567mwl_putrxdma(struct mwl_softc *sc, void *data)
2568{
2569	struct mwl_jumbo *buf;
2570
2571	/* XXX bounds check data */
2572	MWL_RXFREE_LOCK(sc);
2573	buf = MWL_JUMBO_DATA2BUF(data);
2574	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2575	sc->sc_nrxfree++;
2576	MWL_RXFREE_UNLOCK(sc);
2577}
2578
2579static int
2580mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2581{
2582	struct mwl_rxdesc *ds;
2583
2584	ds = bf->bf_desc;
2585	if (bf->bf_data == NULL) {
2586		bf->bf_data = mwl_getrxdma(sc);
2587		if (bf->bf_data == NULL) {
2588			/* mark descriptor to be skipped */
2589			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2590			/* NB: don't need PREREAD */
2591			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2592			sc->sc_stats.mst_rxbuf_failed++;
2593			return ENOMEM;
2594		}
2595	}
2596	/*
2597	 * NB: DMA buffer contents is known to be unmodified
2598	 *     so there's no need to flush the data cache.
2599	 */
2600
2601	/*
2602	 * Setup descriptor.
2603	 */
2604	ds->QosCtrl = 0;
2605	ds->RSSI = 0;
2606	ds->Status = EAGLE_RXD_STATUS_IDLE;
2607	ds->Channel = 0;
2608	ds->PktLen = htole16(MWL_AGGR_SIZE);
2609	ds->SQ2 = 0;
2610	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2611	/* NB: don't touch pPhysNext, set once */
2612	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2613	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2614
2615	return 0;
2616}
2617
2618static void
2619mwl_ext_free(void *data, void *arg)
2620{
2621	struct mwl_softc *sc = arg;
2622
2623	/* XXX bounds check data */
2624	mwl_putrxdma(sc, data);
2625	/*
2626	 * If we were previously blocked by a lack of rx dma buffers
2627	 * check if we now have enough to restart rx interrupt handling.
2628	 * NB: we know we are called at splvm which is above splnet.
2629	 */
2630	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2631		sc->sc_rxblocked = 0;
2632		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2633	}
2634}
2635
2636struct mwl_frame_bar {
2637	u_int8_t	i_fc[2];
2638	u_int8_t	i_dur[2];
2639	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2640	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2641	/* ctl, seq, FCS */
2642} __packed;
2643
2644/*
2645 * Like ieee80211_anyhdrsize, but handles BAR frames
2646 * specially so the logic below to piece the 802.11
2647 * header together works.
2648 */
2649static __inline int
2650mwl_anyhdrsize(const void *data)
2651{
2652	const struct ieee80211_frame *wh = data;
2653
2654	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2655		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2656		case IEEE80211_FC0_SUBTYPE_CTS:
2657		case IEEE80211_FC0_SUBTYPE_ACK:
2658			return sizeof(struct ieee80211_frame_ack);
2659		case IEEE80211_FC0_SUBTYPE_BAR:
2660			return sizeof(struct mwl_frame_bar);
2661		}
2662		return sizeof(struct ieee80211_frame_min);
2663	} else
2664		return ieee80211_hdrsize(data);
2665}
2666
2667static void
2668mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2669{
2670	const struct ieee80211_frame *wh;
2671	struct ieee80211_node *ni;
2672
2673	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2674	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2675	if (ni != NULL) {
2676		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2677		ieee80211_free_node(ni);
2678	}
2679}
2680
2681/*
2682 * Convert hardware signal strength to rssi.  The value
2683 * provided by the device has the noise floor added in;
2684 * we need to compensate for this but we don't have that
2685 * so we use a fixed value.
2686 *
2687 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2688 * offset is already set as part of the initial gain.  This
2689 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2690 */
2691static __inline int
2692cvtrssi(uint8_t ssi)
2693{
2694	int rssi = (int) ssi + 8;
2695	/* XXX hack guess until we have a real noise floor */
2696	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2697	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2698}
2699
2700static void
2701mwl_rx_proc(void *arg, int npending)
2702{
2703#define	IEEE80211_DIR_DSTODS(wh) \
2704	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2705	struct mwl_softc *sc = arg;
2706	struct ifnet *ifp = sc->sc_ifp;
2707	struct ieee80211com *ic = ifp->if_l2com;
2708	struct mwl_rxbuf *bf;
2709	struct mwl_rxdesc *ds;
2710	struct mbuf *m;
2711	struct ieee80211_qosframe *wh;
2712	struct ieee80211_qosframe_addr4 *wh4;
2713	struct ieee80211_node *ni;
2714	struct mwl_node *mn;
2715	int off, len, hdrlen, pktlen, rssi, ntodo;
2716	uint8_t *data, status;
2717	void *newdata;
2718	int16_t nf;
2719
2720	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2721	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2722	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2723	nf = -96;			/* XXX */
2724	bf = sc->sc_rxnext;
2725	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2726		if (bf == NULL)
2727			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2728		ds = bf->bf_desc;
2729		data = bf->bf_data;
2730		if (data == NULL) {
2731			/*
2732			 * If data allocation failed previously there
2733			 * will be no buffer; try again to re-populate it.
2734			 * Note the firmware will not advance to the next
2735			 * descriptor with a dma buffer so we must mimic
2736			 * this or we'll get out of sync.
2737			 */
2738			DPRINTF(sc, MWL_DEBUG_ANY,
2739			    "%s: rx buf w/o dma memory\n", __func__);
2740			(void) mwl_rxbuf_init(sc, bf);
2741			sc->sc_stats.mst_rx_dmabufmissing++;
2742			break;
2743		}
2744		MWL_RXDESC_SYNC(sc, ds,
2745		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2746		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2747			break;
2748#ifdef MWL_DEBUG
2749		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2750			mwl_printrxbuf(bf, 0);
2751#endif
2752		status = ds->Status;
2753		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2754			ifp->if_ierrors++;
2755			sc->sc_stats.mst_rx_crypto++;
2756			/*
2757			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2758			 *     for backwards compatibility.
2759			 */
2760			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2761			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2762				/*
2763				 * MIC error, notify upper layers.
2764				 */
2765				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2766				    BUS_DMASYNC_POSTREAD);
2767				mwl_handlemicerror(ic, data);
2768				sc->sc_stats.mst_rx_tkipmic++;
2769			}
2770			/* XXX too painful to tap packets */
2771			goto rx_next;
2772		}
2773		/*
2774		 * Sync the data buffer.
2775		 */
2776		len = le16toh(ds->PktLen);
2777		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2778		/*
2779		 * The 802.11 header is provided all or in part at the front;
2780		 * use it to calculate the true size of the header that we'll
2781		 * construct below.  We use this to figure out where to copy
2782		 * payload prior to constructing the header.
2783		 */
2784		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2785		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2786
2787		/* calculate rssi early so we can re-use for each aggregate */
2788		rssi = cvtrssi(ds->RSSI);
2789
2790		pktlen = hdrlen + (len - off);
2791		/*
2792		 * NB: we know our frame is at least as large as
2793		 * IEEE80211_MIN_LEN because there is a 4-address
2794		 * frame at the front.  Hence there's no need to
2795		 * vet the packet length.  If the frame in fact
2796		 * is too small it should be discarded at the
2797		 * net80211 layer.
2798		 */
2799
2800		/*
2801		 * Attach dma buffer to an mbuf.  We tried
2802		 * doing this based on the packet size (i.e.
2803		 * copying small packets) but it turns out to
2804		 * be a net loss.  The tradeoff might be system
2805		 * dependent (cache architecture is important).
2806		 */
2807		MGETHDR(m, M_DONTWAIT, MT_DATA);
2808		if (m == NULL) {
2809			DPRINTF(sc, MWL_DEBUG_ANY,
2810			    "%s: no rx mbuf\n", __func__);
2811			sc->sc_stats.mst_rx_nombuf++;
2812			goto rx_next;
2813		}
2814		/*
2815		 * Acquire the replacement dma buffer before
2816		 * processing the frame.  If we're out of dma
2817		 * buffers we disable rx interrupts and wait
2818		 * for the free pool to reach mlw_rxdmalow buffers
2819		 * before starting to do work again.  If the firmware
2820		 * runs out of descriptors then it will toss frames
2821		 * which is better than our doing it as that can
2822		 * starve our processing.  It is also important that
2823		 * we always process rx'd frames in case they are
2824		 * A-MPDU as otherwise the host's view of the BA
2825		 * window may get out of sync with the firmware.
2826		 */
2827		newdata = mwl_getrxdma(sc);
2828		if (newdata == NULL) {
2829			/* NB: stat+msg in mwl_getrxdma */
2830			m_free(m);
2831			/* disable RX interrupt and mark state */
2832			mwl_hal_intrset(sc->sc_mh,
2833			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2834			sc->sc_rxblocked = 1;
2835			ieee80211_drain(ic);
2836			/* XXX check rxblocked and immediately start again? */
2837			goto rx_stop;
2838		}
2839		bf->bf_data = newdata;
2840		/*
2841		 * Attach the dma buffer to the mbuf;
2842		 * mwl_rxbuf_init will re-setup the rx
2843		 * descriptor using the replacement dma
2844		 * buffer we just installed above.
2845		 */
2846		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2847		    data, sc, 0, EXT_NET_DRV);
2848		m->m_data += off - hdrlen;
2849		m->m_pkthdr.len = m->m_len = pktlen;
2850		m->m_pkthdr.rcvif = ifp;
2851		/* NB: dma buffer assumed read-only */
2852
2853		/*
2854		 * Piece 802.11 header together.
2855		 */
2856		wh = mtod(m, struct ieee80211_qosframe *);
2857		/* NB: don't need to do this sometimes but ... */
2858		/* XXX special case so we can memcpy after m_devget? */
2859		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2860		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2861			if (IEEE80211_DIR_DSTODS(wh)) {
2862				wh4 = mtod(m,
2863				    struct ieee80211_qosframe_addr4*);
2864				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2865			} else {
2866				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2867			}
2868		}
2869		/*
2870		 * The f/w strips WEP header but doesn't clear
2871		 * the WEP bit; mark the packet with M_WEP so
2872		 * net80211 will treat the data as decrypted.
2873		 * While here also clear the PWR_MGT bit since
2874		 * power save is handled by the firmware and
2875		 * passing this up will potentially cause the
2876		 * upper layer to put a station in power save
2877		 * (except when configured with MWL_HOST_PS_SUPPORT).
2878		 */
2879		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2880			m->m_flags |= M_WEP;
2881#ifdef MWL_HOST_PS_SUPPORT
2882		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2883#else
2884		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2885#endif
2886
2887		if (ieee80211_radiotap_active(ic)) {
2888			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2889
2890			tap->wr_flags = 0;
2891			tap->wr_rate = ds->Rate;
2892			tap->wr_antsignal = rssi + nf;
2893			tap->wr_antnoise = nf;
2894		}
2895		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2896			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2897			    len, ds->Rate, rssi);
2898		}
2899		ifp->if_ipackets++;
2900
2901		/* dispatch */
2902		ni = ieee80211_find_rxnode(ic,
2903		    (const struct ieee80211_frame_min *) wh);
2904		if (ni != NULL) {
2905			mn = MWL_NODE(ni);
2906#ifdef MWL_ANT_INFO_SUPPORT
2907			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2908			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2909			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2910			mn->mn_ai.rsvd1 = rssi;
2911#endif
2912			/* tag AMPDU aggregates for reorder processing */
2913			if (ni->ni_flags & IEEE80211_NODE_HT)
2914				m->m_flags |= M_AMPDU;
2915			(void) ieee80211_input(ni, m, rssi, nf);
2916			ieee80211_free_node(ni);
2917		} else
2918			(void) ieee80211_input_all(ic, m, rssi, nf);
2919rx_next:
2920		/* NB: ignore ENOMEM so we process more descriptors */
2921		(void) mwl_rxbuf_init(sc, bf);
2922		bf = STAILQ_NEXT(bf, bf_list);
2923	}
2924rx_stop:
2925	sc->sc_rxnext = bf;
2926
2927	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2928	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2929		/* NB: kick fw; the tx thread may have been preempted */
2930		mwl_hal_txstart(sc->sc_mh, 0);
2931		mwl_start(ifp);
2932	}
2933#undef IEEE80211_DIR_DSTODS
2934}
2935
2936static void
2937mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2938{
2939	struct mwl_txbuf *bf, *bn;
2940	struct mwl_txdesc *ds;
2941
2942	MWL_TXQ_LOCK_INIT(sc, txq);
2943	txq->qnum = qnum;
2944	txq->txpri = 0;	/* XXX */
2945#if 0
2946	/* NB: q setup by mwl_txdma_setup XXX */
2947	STAILQ_INIT(&txq->free);
2948#endif
2949	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2950		bf->bf_txq = txq;
2951
2952		ds = bf->bf_desc;
2953		bn = STAILQ_NEXT(bf, bf_list);
2954		if (bn == NULL)
2955			bn = STAILQ_FIRST(&txq->free);
2956		ds->pPhysNext = htole32(bn->bf_daddr);
2957	}
2958	STAILQ_INIT(&txq->active);
2959}
2960
2961/*
2962 * Setup a hardware data transmit queue for the specified
2963 * access control.  We record the mapping from ac's
2964 * to h/w queues for use by mwl_tx_start.
2965 */
2966static int
2967mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2968{
2969#define	N(a)	(sizeof(a)/sizeof(a[0]))
2970	struct mwl_txq *txq;
2971
2972	if (ac >= N(sc->sc_ac2q)) {
2973		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2974			ac, N(sc->sc_ac2q));
2975		return 0;
2976	}
2977	if (mvtype >= MWL_NUM_TX_QUEUES) {
2978		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2979			mvtype, MWL_NUM_TX_QUEUES);
2980		return 0;
2981	}
2982	txq = &sc->sc_txq[mvtype];
2983	mwl_txq_init(sc, txq, mvtype);
2984	sc->sc_ac2q[ac] = txq;
2985	return 1;
2986#undef N
2987}
2988
2989/*
2990 * Update WME parameters for a transmit queue.
2991 */
2992static int
2993mwl_txq_update(struct mwl_softc *sc, int ac)
2994{
2995#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2996	struct ifnet *ifp = sc->sc_ifp;
2997	struct ieee80211com *ic = ifp->if_l2com;
2998	struct mwl_txq *txq = sc->sc_ac2q[ac];
2999	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3000	struct mwl_hal *mh = sc->sc_mh;
3001	int aifs, cwmin, cwmax, txoplim;
3002
3003	aifs = wmep->wmep_aifsn;
3004	/* XXX in sta mode need to pass log values for cwmin/max */
3005	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3006	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3007	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3008
3009	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3010		device_printf(sc->sc_dev, "unable to update hardware queue "
3011			"parameters for %s traffic!\n",
3012			ieee80211_wme_acnames[ac]);
3013		return 0;
3014	}
3015	return 1;
3016#undef MWL_EXPONENT_TO_VALUE
3017}
3018
3019/*
3020 * Callback from the 802.11 layer to update WME parameters.
3021 */
3022static int
3023mwl_wme_update(struct ieee80211com *ic)
3024{
3025	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3026
3027	return !mwl_txq_update(sc, WME_AC_BE) ||
3028	    !mwl_txq_update(sc, WME_AC_BK) ||
3029	    !mwl_txq_update(sc, WME_AC_VI) ||
3030	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3031}
3032
3033/*
3034 * Reclaim resources for a setup queue.
3035 */
3036static void
3037mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3038{
3039	/* XXX hal work? */
3040	MWL_TXQ_LOCK_DESTROY(txq);
3041}
3042
3043/*
3044 * Reclaim all tx queue resources.
3045 */
3046static void
3047mwl_tx_cleanup(struct mwl_softc *sc)
3048{
3049	int i;
3050
3051	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3052		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3053}
3054
3055static int
3056mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3057{
3058	struct mbuf *m;
3059	int error;
3060
3061	/*
3062	 * Load the DMA map so any coalescing is done.  This
3063	 * also calculates the number of descriptors we need.
3064	 */
3065	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3066				     bf->bf_segs, &bf->bf_nseg,
3067				     BUS_DMA_NOWAIT);
3068	if (error == EFBIG) {
3069		/* XXX packet requires too many descriptors */
3070		bf->bf_nseg = MWL_TXDESC+1;
3071	} else if (error != 0) {
3072		sc->sc_stats.mst_tx_busdma++;
3073		m_freem(m0);
3074		return error;
3075	}
3076	/*
3077	 * Discard null packets and check for packets that
3078	 * require too many TX descriptors.  We try to convert
3079	 * the latter to a cluster.
3080	 */
3081	if (error == EFBIG) {		/* too many desc's, linearize */
3082		sc->sc_stats.mst_tx_linear++;
3083#if MWL_TXDESC > 1
3084		m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
3085#else
3086		m = m_defrag(m0, M_DONTWAIT);
3087#endif
3088		if (m == NULL) {
3089			m_freem(m0);
3090			sc->sc_stats.mst_tx_nombuf++;
3091			return ENOMEM;
3092		}
3093		m0 = m;
3094		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3095					     bf->bf_segs, &bf->bf_nseg,
3096					     BUS_DMA_NOWAIT);
3097		if (error != 0) {
3098			sc->sc_stats.mst_tx_busdma++;
3099			m_freem(m0);
3100			return error;
3101		}
3102		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3103		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3104	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3105		sc->sc_stats.mst_tx_nodata++;
3106		m_freem(m0);
3107		return EIO;
3108	}
3109	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3110		__func__, m0, m0->m_pkthdr.len);
3111	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3112	bf->bf_m = m0;
3113
3114	return 0;
3115}
3116
3117static __inline int
3118mwl_cvtlegacyrate(int rate)
3119{
3120	switch (rate) {
3121	case 2:	 return 0;
3122	case 4:	 return 1;
3123	case 11: return 2;
3124	case 22: return 3;
3125	case 44: return 4;
3126	case 12: return 5;
3127	case 18: return 6;
3128	case 24: return 7;
3129	case 36: return 8;
3130	case 48: return 9;
3131	case 72: return 10;
3132	case 96: return 11;
3133	case 108:return 12;
3134	}
3135	return 0;
3136}
3137
3138/*
3139 * Calculate fixed tx rate information per client state;
3140 * this value is suitable for writing to the Format field
3141 * of a tx descriptor.
3142 */
3143static uint16_t
3144mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3145{
3146	uint16_t fmt;
3147
3148	fmt = SM(3, EAGLE_TXD_ANTENNA)
3149	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3150		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3151	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3152		fmt |= EAGLE_TXD_FORMAT_HT
3153		    /* NB: 0x80 implicitly stripped from ucastrate */
3154		    | SM(rate, EAGLE_TXD_RATE);
3155		/* XXX short/long GI may be wrong; re-check */
3156		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3157			fmt |= EAGLE_TXD_CHW_40
3158			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3159			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3160		} else {
3161			fmt |= EAGLE_TXD_CHW_20
3162			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3163			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3164		}
3165	} else {			/* legacy rate */
3166		fmt |= EAGLE_TXD_FORMAT_LEGACY
3167		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3168		    | EAGLE_TXD_CHW_20
3169		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3170		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3171			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3172	}
3173	return fmt;
3174}
3175
3176static int
3177mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3178    struct mbuf *m0)
3179{
3180#define	IEEE80211_DIR_DSTODS(wh) \
3181	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3182	struct ifnet *ifp = sc->sc_ifp;
3183	struct ieee80211com *ic = ifp->if_l2com;
3184	struct ieee80211vap *vap = ni->ni_vap;
3185	int error, iswep, ismcast;
3186	int hdrlen, copyhdrlen, pktlen;
3187	struct mwl_txdesc *ds;
3188	struct mwl_txq *txq;
3189	struct ieee80211_frame *wh;
3190	struct mwltxrec *tr;
3191	struct mwl_node *mn;
3192	uint16_t qos;
3193#if MWL_TXDESC > 1
3194	int i;
3195#endif
3196
3197	wh = mtod(m0, struct ieee80211_frame *);
3198	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3199	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3200	hdrlen = ieee80211_anyhdrsize(wh);
3201	copyhdrlen = hdrlen;
3202	pktlen = m0->m_pkthdr.len;
3203	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3204		if (IEEE80211_DIR_DSTODS(wh)) {
3205			qos = *(uint16_t *)
3206			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3207			copyhdrlen -= sizeof(qos);
3208		} else
3209			qos = *(uint16_t *)
3210			    (((struct ieee80211_qosframe *) wh)->i_qos);
3211	} else
3212		qos = 0;
3213
3214	if (iswep) {
3215		const struct ieee80211_cipher *cip;
3216		struct ieee80211_key *k;
3217
3218		/*
3219		 * Construct the 802.11 header+trailer for an encrypted
3220		 * frame. The only reason this can fail is because of an
3221		 * unknown or unsupported cipher/key type.
3222		 *
3223		 * NB: we do this even though the firmware will ignore
3224		 *     what we've done for WEP and TKIP as we need the
3225		 *     ExtIV filled in for CCMP and this also adjusts
3226		 *     the headers which simplifies our work below.
3227		 */
3228		k = ieee80211_crypto_encap(ni, m0);
3229		if (k == NULL) {
3230			/*
3231			 * This can happen when the key is yanked after the
3232			 * frame was queued.  Just discard the frame; the
3233			 * 802.11 layer counts failures and provides
3234			 * debugging/diagnostics.
3235			 */
3236			m_freem(m0);
3237			return EIO;
3238		}
3239		/*
3240		 * Adjust the packet length for the crypto additions
3241		 * done during encap and any other bits that the f/w
3242		 * will add later on.
3243		 */
3244		cip = k->wk_cipher;
3245		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3246
3247		/* packet header may have moved, reset our local pointer */
3248		wh = mtod(m0, struct ieee80211_frame *);
3249	}
3250
3251	if (ieee80211_radiotap_active_vap(vap)) {
3252		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3253		if (iswep)
3254			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3255#if 0
3256		sc->sc_tx_th.wt_rate = ds->DataRate;
3257#endif
3258		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3259		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3260
3261		ieee80211_radiotap_tx(vap, m0);
3262	}
3263	/*
3264	 * Copy up/down the 802.11 header; the firmware requires
3265	 * we present a 2-byte payload length followed by a
3266	 * 4-address header (w/o QoS), followed (optionally) by
3267	 * any WEP/ExtIV header (but only filled in for CCMP).
3268	 * We are assured the mbuf has sufficient headroom to
3269	 * prepend in-place by the setup of ic_headroom in
3270	 * mwl_attach.
3271	 */
3272	if (hdrlen < sizeof(struct mwltxrec)) {
3273		const int space = sizeof(struct mwltxrec) - hdrlen;
3274		if (M_LEADINGSPACE(m0) < space) {
3275			/* NB: should never happen */
3276			device_printf(sc->sc_dev,
3277			    "not enough headroom, need %d found %zd, "
3278			    "m_flags 0x%x m_len %d\n",
3279			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3280			ieee80211_dump_pkt(ic,
3281			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3282			m_freem(m0);
3283			sc->sc_stats.mst_tx_noheadroom++;
3284			return EIO;
3285		}
3286		M_PREPEND(m0, space, M_NOWAIT);
3287	}
3288	tr = mtod(m0, struct mwltxrec *);
3289	if (wh != (struct ieee80211_frame *) &tr->wh)
3290		ovbcopy(wh, &tr->wh, hdrlen);
3291	/*
3292	 * Note: the "firmware length" is actually the length
3293	 * of the fully formed "802.11 payload".  That is, it's
3294	 * everything except for the 802.11 header.  In particular
3295	 * this includes all crypto material including the MIC!
3296	 */
3297	tr->fwlen = htole16(pktlen - hdrlen);
3298
3299	/*
3300	 * Load the DMA map so any coalescing is done.  This
3301	 * also calculates the number of descriptors we need.
3302	 */
3303	error = mwl_tx_dmasetup(sc, bf, m0);
3304	if (error != 0) {
3305		/* NB: stat collected in mwl_tx_dmasetup */
3306		DPRINTF(sc, MWL_DEBUG_XMIT,
3307		    "%s: unable to setup dma\n", __func__);
3308		return error;
3309	}
3310	bf->bf_node = ni;			/* NB: held reference */
3311	m0 = bf->bf_m;				/* NB: may have changed */
3312	tr = mtod(m0, struct mwltxrec *);
3313	wh = (struct ieee80211_frame *)&tr->wh;
3314
3315	/*
3316	 * Formulate tx descriptor.
3317	 */
3318	ds = bf->bf_desc;
3319	txq = bf->bf_txq;
3320
3321	ds->QosCtrl = qos;			/* NB: already little-endian */
3322#if MWL_TXDESC == 1
3323	/*
3324	 * NB: multiframes should be zero because the descriptors
3325	 *     are initialized to zero.  This should handle the case
3326	 *     where the driver is built with MWL_TXDESC=1 but we are
3327	 *     using firmware with multi-segment support.
3328	 */
3329	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3330	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3331#else
3332	ds->multiframes = htole32(bf->bf_nseg);
3333	ds->PktLen = htole16(m0->m_pkthdr.len);
3334	for (i = 0; i < bf->bf_nseg; i++) {
3335		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3336		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3337	}
3338#endif
3339	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3340	ds->Format = 0;
3341	ds->pad = 0;
3342	ds->ack_wcb_addr = 0;
3343
3344	mn = MWL_NODE(ni);
3345	/*
3346	 * Select transmit rate.
3347	 */
3348	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3349	case IEEE80211_FC0_TYPE_MGT:
3350		sc->sc_stats.mst_tx_mgmt++;
3351		/* fall thru... */
3352	case IEEE80211_FC0_TYPE_CTL:
3353		/* NB: assign to BE q to avoid bursting */
3354		ds->TxPriority = MWL_WME_AC_BE;
3355		break;
3356	case IEEE80211_FC0_TYPE_DATA:
3357		if (!ismcast) {
3358			const struct ieee80211_txparam *tp = ni->ni_txparms;
3359			/*
3360			 * EAPOL frames get forced to a fixed rate and w/o
3361			 * aggregation; otherwise check for any fixed rate
3362			 * for the client (may depend on association state).
3363			 */
3364			if (m0->m_flags & M_EAPOL) {
3365				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3366				ds->Format = mvp->mv_eapolformat;
3367				ds->pad = htole16(
3368				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3369			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3370				/* XXX pre-calculate per node */
3371				ds->Format = htole16(
3372				    mwl_calcformat(tp->ucastrate, ni));
3373				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3374			}
3375			/* NB: EAPOL frames will never have qos set */
3376			if (qos == 0)
3377				ds->TxPriority = txq->qnum;
3378#if MWL_MAXBA > 3
3379			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3380				ds->TxPriority = mn->mn_ba[3].txq;
3381#endif
3382#if MWL_MAXBA > 2
3383			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3384				ds->TxPriority = mn->mn_ba[2].txq;
3385#endif
3386#if MWL_MAXBA > 1
3387			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3388				ds->TxPriority = mn->mn_ba[1].txq;
3389#endif
3390#if MWL_MAXBA > 0
3391			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3392				ds->TxPriority = mn->mn_ba[0].txq;
3393#endif
3394			else
3395				ds->TxPriority = txq->qnum;
3396		} else
3397			ds->TxPriority = txq->qnum;
3398		break;
3399	default:
3400		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3401			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3402		sc->sc_stats.mst_tx_badframetype++;
3403		m_freem(m0);
3404		return EIO;
3405	}
3406
3407	if (IFF_DUMPPKTS_XMIT(sc))
3408		ieee80211_dump_pkt(ic,
3409		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3410		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3411
3412	MWL_TXQ_LOCK(txq);
3413	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3414	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3415	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3416
3417	ifp->if_opackets++;
3418	sc->sc_tx_timer = 5;
3419	MWL_TXQ_UNLOCK(txq);
3420
3421	return 0;
3422#undef	IEEE80211_DIR_DSTODS
3423}
3424
3425static __inline int
3426mwl_cvtlegacyrix(int rix)
3427{
3428#define	N(x)	(sizeof(x)/sizeof(x[0]))
3429	static const int ieeerates[] =
3430	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3431	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3432#undef N
3433}
3434
3435/*
3436 * Process completed xmit descriptors from the specified queue.
3437 */
3438static int
3439mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3440{
3441#define	EAGLE_TXD_STATUS_MCAST \
3442	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3443	struct ifnet *ifp = sc->sc_ifp;
3444	struct ieee80211com *ic = ifp->if_l2com;
3445	struct mwl_txbuf *bf;
3446	struct mwl_txdesc *ds;
3447	struct ieee80211_node *ni;
3448	struct mwl_node *an;
3449	int nreaped;
3450	uint32_t status;
3451
3452	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3453	for (nreaped = 0;; nreaped++) {
3454		MWL_TXQ_LOCK(txq);
3455		bf = STAILQ_FIRST(&txq->active);
3456		if (bf == NULL) {
3457			MWL_TXQ_UNLOCK(txq);
3458			break;
3459		}
3460		ds = bf->bf_desc;
3461		MWL_TXDESC_SYNC(txq, ds,
3462		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3463		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3464			MWL_TXQ_UNLOCK(txq);
3465			break;
3466		}
3467		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3468		MWL_TXQ_UNLOCK(txq);
3469
3470#ifdef MWL_DEBUG
3471		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3472			mwl_printtxbuf(bf, txq->qnum, nreaped);
3473#endif
3474		ni = bf->bf_node;
3475		if (ni != NULL) {
3476			an = MWL_NODE(ni);
3477			status = le32toh(ds->Status);
3478			if (status & EAGLE_TXD_STATUS_OK) {
3479				uint16_t Format = le16toh(ds->Format);
3480				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3481
3482				sc->sc_stats.mst_ant_tx[txant]++;
3483				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3484					sc->sc_stats.mst_tx_retries++;
3485				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3486					sc->sc_stats.mst_tx_mretries++;
3487				if (txq->qnum >= MWL_WME_AC_VO)
3488					ic->ic_wme.wme_hipri_traffic++;
3489				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3490				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3491					ni->ni_txrate = mwl_cvtlegacyrix(
3492					    ni->ni_txrate);
3493				} else
3494					ni->ni_txrate |= IEEE80211_RATE_MCS;
3495				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3496			} else {
3497				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3498					sc->sc_stats.mst_tx_linkerror++;
3499				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3500					sc->sc_stats.mst_tx_xretries++;
3501				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3502					sc->sc_stats.mst_tx_aging++;
3503				if (bf->bf_m->m_flags & M_FF)
3504					sc->sc_stats.mst_ff_txerr++;
3505			}
3506			/*
3507			 * Do any tx complete callback.  Note this must
3508			 * be done before releasing the node reference.
3509			 * XXX no way to figure out if frame was ACK'd
3510			 */
3511			if (bf->bf_m->m_flags & M_TXCB) {
3512				/* XXX strip fw len in case header inspected */
3513				m_adj(bf->bf_m, sizeof(uint16_t));
3514				ieee80211_process_callback(ni, bf->bf_m,
3515					(status & EAGLE_TXD_STATUS_OK) == 0);
3516			}
3517			/*
3518			 * Reclaim reference to node.
3519			 *
3520			 * NB: the node may be reclaimed here if, for example
3521			 *     this is a DEAUTH message that was sent and the
3522			 *     node was timed out due to inactivity.
3523			 */
3524			ieee80211_free_node(ni);
3525		}
3526		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3527
3528		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3529		    BUS_DMASYNC_POSTWRITE);
3530		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3531		m_freem(bf->bf_m);
3532
3533		mwl_puttxbuf_tail(txq, bf);
3534	}
3535	return nreaped;
3536#undef EAGLE_TXD_STATUS_MCAST
3537}
3538
3539/*
3540 * Deferred processing of transmit interrupt; special-cased
3541 * for four hardware queues, 0-3.
3542 */
3543static void
3544mwl_tx_proc(void *arg, int npending)
3545{
3546	struct mwl_softc *sc = arg;
3547	struct ifnet *ifp = sc->sc_ifp;
3548	int nreaped;
3549
3550	/*
3551	 * Process each active queue.
3552	 */
3553	nreaped = 0;
3554	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3555		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3556	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3557		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3558	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3559		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3560	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3561		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3562
3563	if (nreaped != 0) {
3564		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3565		sc->sc_tx_timer = 0;
3566		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3567			/* NB: kick fw; the tx thread may have been preempted */
3568			mwl_hal_txstart(sc->sc_mh, 0);
3569			mwl_start(ifp);
3570		}
3571	}
3572}
3573
3574static void
3575mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3576{
3577	struct ieee80211_node *ni;
3578	struct mwl_txbuf *bf;
3579	u_int ix;
3580
3581	/*
3582	 * NB: this assumes output has been stopped and
3583	 *     we do not need to block mwl_tx_tasklet
3584	 */
3585	for (ix = 0;; ix++) {
3586		MWL_TXQ_LOCK(txq);
3587		bf = STAILQ_FIRST(&txq->active);
3588		if (bf == NULL) {
3589			MWL_TXQ_UNLOCK(txq);
3590			break;
3591		}
3592		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3593		MWL_TXQ_UNLOCK(txq);
3594#ifdef MWL_DEBUG
3595		if (sc->sc_debug & MWL_DEBUG_RESET) {
3596			struct ifnet *ifp = sc->sc_ifp;
3597			struct ieee80211com *ic = ifp->if_l2com;
3598			const struct mwltxrec *tr =
3599			    mtod(bf->bf_m, const struct mwltxrec *);
3600			mwl_printtxbuf(bf, txq->qnum, ix);
3601			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3602				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3603		}
3604#endif /* MWL_DEBUG */
3605		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3606		ni = bf->bf_node;
3607		if (ni != NULL) {
3608			/*
3609			 * Reclaim node reference.
3610			 */
3611			ieee80211_free_node(ni);
3612		}
3613		m_freem(bf->bf_m);
3614
3615		mwl_puttxbuf_tail(txq, bf);
3616	}
3617}
3618
3619/*
3620 * Drain the transmit queues and reclaim resources.
3621 */
3622static void
3623mwl_draintxq(struct mwl_softc *sc)
3624{
3625	struct ifnet *ifp = sc->sc_ifp;
3626	int i;
3627
3628	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3629		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3630	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3631	sc->sc_tx_timer = 0;
3632}
3633
3634#ifdef MWL_DIAGAPI
3635/*
3636 * Reset the transmit queues to a pristine state after a fw download.
3637 */
3638static void
3639mwl_resettxq(struct mwl_softc *sc)
3640{
3641	int i;
3642
3643	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3644		mwl_txq_reset(sc, &sc->sc_txq[i]);
3645}
3646#endif /* MWL_DIAGAPI */
3647
3648/*
3649 * Clear the transmit queues of any frames submitted for the
3650 * specified vap.  This is done when the vap is deleted so we
3651 * don't potentially reference the vap after it is gone.
3652 * Note we cannot remove the frames; we only reclaim the node
3653 * reference.
3654 */
3655static void
3656mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3657{
3658	struct mwl_txq *txq;
3659	struct mwl_txbuf *bf;
3660	int i;
3661
3662	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3663		txq = &sc->sc_txq[i];
3664		MWL_TXQ_LOCK(txq);
3665		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3666			struct ieee80211_node *ni = bf->bf_node;
3667			if (ni != NULL && ni->ni_vap == vap) {
3668				bf->bf_node = NULL;
3669				ieee80211_free_node(ni);
3670			}
3671		}
3672		MWL_TXQ_UNLOCK(txq);
3673	}
3674}
3675
3676static int
3677mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3678	const uint8_t *frm, const uint8_t *efrm)
3679{
3680	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3681	const struct ieee80211_action *ia;
3682
3683	ia = (const struct ieee80211_action *) frm;
3684	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3685	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3686		const struct ieee80211_action_ht_mimopowersave *mps =
3687		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3688
3689		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3690		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3691		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3692		return 0;
3693	} else
3694		return sc->sc_recv_action(ni, wh, frm, efrm);
3695}
3696
3697static int
3698mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3699	int dialogtoken, int baparamset, int batimeout)
3700{
3701	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3702	struct ieee80211vap *vap = ni->ni_vap;
3703	struct mwl_node *mn = MWL_NODE(ni);
3704	struct mwl_bastate *bas;
3705
3706	bas = tap->txa_private;
3707	if (bas == NULL) {
3708		const MWL_HAL_BASTREAM *sp;
3709		/*
3710		 * Check for a free BA stream slot.
3711		 */
3712#if MWL_MAXBA > 3
3713		if (mn->mn_ba[3].bastream == NULL)
3714			bas = &mn->mn_ba[3];
3715		else
3716#endif
3717#if MWL_MAXBA > 2
3718		if (mn->mn_ba[2].bastream == NULL)
3719			bas = &mn->mn_ba[2];
3720		else
3721#endif
3722#if MWL_MAXBA > 1
3723		if (mn->mn_ba[1].bastream == NULL)
3724			bas = &mn->mn_ba[1];
3725		else
3726#endif
3727#if MWL_MAXBA > 0
3728		if (mn->mn_ba[0].bastream == NULL)
3729			bas = &mn->mn_ba[0];
3730		else
3731#endif
3732		{
3733			/* sta already has max BA streams */
3734			/* XXX assign BA stream to highest priority tid */
3735			DPRINTF(sc, MWL_DEBUG_AMPDU,
3736			    "%s: already has max bastreams\n", __func__);
3737			sc->sc_stats.mst_ampdu_reject++;
3738			return 0;
3739		}
3740		/* NB: no held reference to ni */
3741		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3742		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3743		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3744		    ni, tap);
3745		if (sp == NULL) {
3746			/*
3747			 * No available stream, return 0 so no
3748			 * a-mpdu aggregation will be done.
3749			 */
3750			DPRINTF(sc, MWL_DEBUG_AMPDU,
3751			    "%s: no bastream available\n", __func__);
3752			sc->sc_stats.mst_ampdu_nostream++;
3753			return 0;
3754		}
3755		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3756		    __func__, sp);
3757		/* NB: qos is left zero so we won't match in mwl_tx_start */
3758		bas->bastream = sp;
3759		tap->txa_private = bas;
3760	}
3761	/* fetch current seq# from the firmware; if available */
3762	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3763	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3764	    &tap->txa_start) != 0)
3765		tap->txa_start = 0;
3766	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3767}
3768
3769static int
3770mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3771	int code, int baparamset, int batimeout)
3772{
3773	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3774	struct mwl_bastate *bas;
3775
3776	bas = tap->txa_private;
3777	if (bas == NULL) {
3778		/* XXX should not happen */
3779		DPRINTF(sc, MWL_DEBUG_AMPDU,
3780		    "%s: no BA stream allocated, TID %d\n",
3781		    __func__, tap->txa_tid);
3782		sc->sc_stats.mst_addba_nostream++;
3783		return 0;
3784	}
3785	if (code == IEEE80211_STATUS_SUCCESS) {
3786		struct ieee80211vap *vap = ni->ni_vap;
3787		int bufsiz, error;
3788
3789		/*
3790		 * Tell the firmware to setup the BA stream;
3791		 * we know resources are available because we
3792		 * pre-allocated one before forming the request.
3793		 */
3794		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3795		if (bufsiz == 0)
3796			bufsiz = IEEE80211_AGGR_BAWMAX;
3797		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3798		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3799		if (error != 0) {
3800			/*
3801			 * Setup failed, return immediately so no a-mpdu
3802			 * aggregation will be done.
3803			 */
3804			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3805			mwl_bastream_free(bas);
3806			tap->txa_private = NULL;
3807
3808			DPRINTF(sc, MWL_DEBUG_AMPDU,
3809			    "%s: create failed, error %d, bufsiz %d TID %d "
3810			    "htparam 0x%x\n", __func__, error, bufsiz,
3811			    tap->txa_tid, ni->ni_htparam);
3812			sc->sc_stats.mst_bacreate_failed++;
3813			return 0;
3814		}
3815		/* NB: cache txq to avoid ptr indirect */
3816		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3817		DPRINTF(sc, MWL_DEBUG_AMPDU,
3818		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3819		    "htparam 0x%x\n", __func__, bas->bastream,
3820		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3821	} else {
3822		/*
3823		 * Other side NAK'd us; return the resources.
3824		 */
3825		DPRINTF(sc, MWL_DEBUG_AMPDU,
3826		    "%s: request failed with code %d, destroy bastream %p\n",
3827		    __func__, code, bas->bastream);
3828		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3829		mwl_bastream_free(bas);
3830		tap->txa_private = NULL;
3831	}
3832	/* NB: firmware sends BAR so we don't need to */
3833	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3834}
3835
3836static void
3837mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3838{
3839	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3840	struct mwl_bastate *bas;
3841
3842	bas = tap->txa_private;
3843	if (bas != NULL) {
3844		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3845		    __func__, bas->bastream);
3846		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3847		mwl_bastream_free(bas);
3848		tap->txa_private = NULL;
3849	}
3850	sc->sc_addba_stop(ni, tap);
3851}
3852
3853/*
3854 * Setup the rx data structures.  This should only be
3855 * done once or we may get out of sync with the firmware.
3856 */
3857static int
3858mwl_startrecv(struct mwl_softc *sc)
3859{
3860	if (!sc->sc_recvsetup) {
3861		struct mwl_rxbuf *bf, *prev;
3862		struct mwl_rxdesc *ds;
3863
3864		prev = NULL;
3865		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3866			int error = mwl_rxbuf_init(sc, bf);
3867			if (error != 0) {
3868				DPRINTF(sc, MWL_DEBUG_RECV,
3869					"%s: mwl_rxbuf_init failed %d\n",
3870					__func__, error);
3871				return error;
3872			}
3873			if (prev != NULL) {
3874				ds = prev->bf_desc;
3875				ds->pPhysNext = htole32(bf->bf_daddr);
3876			}
3877			prev = bf;
3878		}
3879		if (prev != NULL) {
3880			ds = prev->bf_desc;
3881			ds->pPhysNext =
3882			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3883		}
3884		sc->sc_recvsetup = 1;
3885	}
3886	mwl_mode_init(sc);		/* set filters, etc. */
3887	return 0;
3888}
3889
3890static MWL_HAL_APMODE
3891mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3892{
3893	MWL_HAL_APMODE mode;
3894
3895	if (IEEE80211_IS_CHAN_HT(chan)) {
3896		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3897			mode = AP_MODE_N_ONLY;
3898		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3899			mode = AP_MODE_AandN;
3900		else if (vap->iv_flags & IEEE80211_F_PUREG)
3901			mode = AP_MODE_GandN;
3902		else
3903			mode = AP_MODE_BandGandN;
3904	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3905		if (vap->iv_flags & IEEE80211_F_PUREG)
3906			mode = AP_MODE_G_ONLY;
3907		else
3908			mode = AP_MODE_MIXED;
3909	} else if (IEEE80211_IS_CHAN_B(chan))
3910		mode = AP_MODE_B_ONLY;
3911	else if (IEEE80211_IS_CHAN_A(chan))
3912		mode = AP_MODE_A_ONLY;
3913	else
3914		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3915	return mode;
3916}
3917
3918static int
3919mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3920{
3921	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3922	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3923}
3924
3925/*
3926 * Set/change channels.
3927 */
3928static int
3929mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3930{
3931	struct mwl_hal *mh = sc->sc_mh;
3932	struct ifnet *ifp = sc->sc_ifp;
3933	struct ieee80211com *ic = ifp->if_l2com;
3934	MWL_HAL_CHANNEL hchan;
3935	int maxtxpow;
3936
3937	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3938	    __func__, chan->ic_freq, chan->ic_flags);
3939
3940	/*
3941	 * Convert to a HAL channel description with
3942	 * the flags constrained to reflect the current
3943	 * operating mode.
3944	 */
3945	mwl_mapchan(&hchan, chan);
3946	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3947#if 0
3948	mwl_draintxq(sc);		/* clear pending tx frames */
3949#endif
3950	mwl_hal_setchannel(mh, &hchan);
3951	/*
3952	 * Tx power is cap'd by the regulatory setting and
3953	 * possibly a user-set limit.  We pass the min of
3954	 * these to the hal to apply them to the cal data
3955	 * for this channel.
3956	 * XXX min bound?
3957	 */
3958	maxtxpow = 2*chan->ic_maxregpower;
3959	if (maxtxpow > ic->ic_txpowlimit)
3960		maxtxpow = ic->ic_txpowlimit;
3961	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3962	/* NB: potentially change mcast/mgt rates */
3963	mwl_setcurchanrates(sc);
3964
3965	/*
3966	 * Update internal state.
3967	 */
3968	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3969	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3970	if (IEEE80211_IS_CHAN_A(chan)) {
3971		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3972		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3973	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3974		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3975		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3976	} else {
3977		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3978		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3979	}
3980	sc->sc_curchan = hchan;
3981	mwl_hal_intrset(mh, sc->sc_imask);
3982
3983	return 0;
3984}
3985
3986static void
3987mwl_scan_start(struct ieee80211com *ic)
3988{
3989	struct ifnet *ifp = ic->ic_ifp;
3990	struct mwl_softc *sc = ifp->if_softc;
3991
3992	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3993}
3994
3995static void
3996mwl_scan_end(struct ieee80211com *ic)
3997{
3998	struct ifnet *ifp = ic->ic_ifp;
3999	struct mwl_softc *sc = ifp->if_softc;
4000
4001	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4002}
4003
4004static void
4005mwl_set_channel(struct ieee80211com *ic)
4006{
4007	struct ifnet *ifp = ic->ic_ifp;
4008	struct mwl_softc *sc = ifp->if_softc;
4009
4010	(void) mwl_chan_set(sc, ic->ic_curchan);
4011}
4012
4013/*
4014 * Handle a channel switch request.  We inform the firmware
4015 * and mark the global state to suppress various actions.
4016 * NB: we issue only one request to the fw; we may be called
4017 * multiple times if there are multiple vap's.
4018 */
4019static void
4020mwl_startcsa(struct ieee80211vap *vap)
4021{
4022	struct ieee80211com *ic = vap->iv_ic;
4023	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4024	MWL_HAL_CHANNEL hchan;
4025
4026	if (sc->sc_csapending)
4027		return;
4028
4029	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4030	/* 1 =>'s quiet channel */
4031	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4032	sc->sc_csapending = 1;
4033}
4034
4035/*
4036 * Plumb any static WEP key for the station.  This is
4037 * necessary as we must propagate the key from the
4038 * global key table of the vap to each sta db entry.
4039 */
4040static void
4041mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4042{
4043	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4044		IEEE80211_F_PRIVACY &&
4045	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4046	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4047		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4048}
4049
4050static int
4051mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4052{
4053#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4054	struct ieee80211vap *vap = ni->ni_vap;
4055	struct mwl_hal_vap *hvap;
4056	int error;
4057
4058	if (vap->iv_opmode == IEEE80211_M_WDS) {
4059		/*
4060		 * WDS vap's do not have a f/w vap; instead they piggyback
4061		 * on an AP vap and we must install the sta db entry and
4062		 * crypto state using that AP's handle (the WDS vap has none).
4063		 */
4064		hvap = MWL_VAP(vap)->mv_ap_hvap;
4065	} else
4066		hvap = MWL_VAP(vap)->mv_hvap;
4067	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4068	    aid, staid, pi,
4069	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4070	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4071	if (error == 0) {
4072		/*
4073		 * Setup security for this station.  For sta mode this is
4074		 * needed even though do the same thing on transition to
4075		 * AUTH state because the call to mwl_hal_newstation
4076		 * clobbers the crypto state we setup.
4077		 */
4078		mwl_setanywepkey(vap, ni->ni_macaddr);
4079	}
4080	return error;
4081#undef WME
4082}
4083
4084static void
4085mwl_setglobalkeys(struct ieee80211vap *vap)
4086{
4087	struct ieee80211_key *wk;
4088
4089	wk = &vap->iv_nw_keys[0];
4090	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4091		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4092			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4093}
4094
4095/*
4096 * Convert a legacy rate set to a firmware bitmask.
4097 */
4098static uint32_t
4099get_rate_bitmap(const struct ieee80211_rateset *rs)
4100{
4101	uint32_t rates;
4102	int i;
4103
4104	rates = 0;
4105	for (i = 0; i < rs->rs_nrates; i++)
4106		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4107		case 2:	  rates |= 0x001; break;
4108		case 4:	  rates |= 0x002; break;
4109		case 11:  rates |= 0x004; break;
4110		case 22:  rates |= 0x008; break;
4111		case 44:  rates |= 0x010; break;
4112		case 12:  rates |= 0x020; break;
4113		case 18:  rates |= 0x040; break;
4114		case 24:  rates |= 0x080; break;
4115		case 36:  rates |= 0x100; break;
4116		case 48:  rates |= 0x200; break;
4117		case 72:  rates |= 0x400; break;
4118		case 96:  rates |= 0x800; break;
4119		case 108: rates |= 0x1000; break;
4120		}
4121	return rates;
4122}
4123
4124/*
4125 * Construct an HT firmware bitmask from an HT rate set.
4126 */
4127static uint32_t
4128get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4129{
4130	uint32_t rates;
4131	int i;
4132
4133	rates = 0;
4134	for (i = 0; i < rs->rs_nrates; i++) {
4135		if (rs->rs_rates[i] < 16)
4136			rates |= 1<<rs->rs_rates[i];
4137	}
4138	return rates;
4139}
4140
4141/*
4142 * Craft station database entry for station.
4143 * NB: use host byte order here, the hal handles byte swapping.
4144 */
4145static MWL_HAL_PEERINFO *
4146mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4147{
4148	const struct ieee80211vap *vap = ni->ni_vap;
4149
4150	memset(pi, 0, sizeof(*pi));
4151	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4152	pi->CapInfo = ni->ni_capinfo;
4153	if (ni->ni_flags & IEEE80211_NODE_HT) {
4154		/* HT capabilities, etc */
4155		pi->HTCapabilitiesInfo = ni->ni_htcap;
4156		/* XXX pi.HTCapabilitiesInfo */
4157	        pi->MacHTParamInfo = ni->ni_htparam;
4158		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4159		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4160		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4161		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4162		pi->AddHtInfo.stbc = ni->ni_htstbc;
4163
4164		/* constrain according to local configuration */
4165		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4166			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4167		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4168			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4169		if (ni->ni_chw != 40)
4170			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4171	}
4172	return pi;
4173}
4174
4175/*
4176 * Re-create the local sta db entry for a vap to ensure
4177 * up to date WME state is pushed to the firmware.  Because
4178 * this resets crypto state this must be followed by a
4179 * reload of any keys in the global key table.
4180 */
4181static int
4182mwl_localstadb(struct ieee80211vap *vap)
4183{
4184#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4185	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4186	struct ieee80211_node *bss;
4187	MWL_HAL_PEERINFO pi;
4188	int error;
4189
4190	switch (vap->iv_opmode) {
4191	case IEEE80211_M_STA:
4192		bss = vap->iv_bss;
4193		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4194		    vap->iv_state == IEEE80211_S_RUN ?
4195			mkpeerinfo(&pi, bss) : NULL,
4196		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4197		    bss->ni_ies.wme_ie != NULL ?
4198			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4199		if (error == 0)
4200			mwl_setglobalkeys(vap);
4201		break;
4202	case IEEE80211_M_HOSTAP:
4203	case IEEE80211_M_MBSS:
4204		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4205		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4206		if (error == 0)
4207			mwl_setglobalkeys(vap);
4208		break;
4209	default:
4210		error = 0;
4211		break;
4212	}
4213	return error;
4214#undef WME
4215}
4216
4217static int
4218mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4219{
4220	struct mwl_vap *mvp = MWL_VAP(vap);
4221	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4222	struct ieee80211com *ic = vap->iv_ic;
4223	struct ieee80211_node *ni = NULL;
4224	struct ifnet *ifp = ic->ic_ifp;
4225	struct mwl_softc *sc = ifp->if_softc;
4226	struct mwl_hal *mh = sc->sc_mh;
4227	enum ieee80211_state ostate = vap->iv_state;
4228	int error;
4229
4230	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4231	    vap->iv_ifp->if_xname, __func__,
4232	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4233
4234	callout_stop(&sc->sc_timer);
4235	/*
4236	 * Clear current radar detection state.
4237	 */
4238	if (ostate == IEEE80211_S_CAC) {
4239		/* stop quiet mode radar detection */
4240		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4241	} else if (sc->sc_radarena) {
4242		/* stop in-service radar detection */
4243		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4244		sc->sc_radarena = 0;
4245	}
4246	/*
4247	 * Carry out per-state actions before doing net80211 work.
4248	 */
4249	if (nstate == IEEE80211_S_INIT) {
4250		/* NB: only ap+sta vap's have a fw entity */
4251		if (hvap != NULL)
4252			mwl_hal_stop(hvap);
4253	} else if (nstate == IEEE80211_S_SCAN) {
4254		mwl_hal_start(hvap);
4255		/* NB: this disables beacon frames */
4256		mwl_hal_setinframode(hvap);
4257	} else if (nstate == IEEE80211_S_AUTH) {
4258		/*
4259		 * Must create a sta db entry in case a WEP key needs to
4260		 * be plumbed.  This entry will be overwritten if we
4261		 * associate; otherwise it will be reclaimed on node free.
4262		 */
4263		ni = vap->iv_bss;
4264		MWL_NODE(ni)->mn_hvap = hvap;
4265		(void) mwl_peerstadb(ni, 0, 0, NULL);
4266	} else if (nstate == IEEE80211_S_CSA) {
4267		/* XXX move to below? */
4268		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4269		    vap->iv_opmode == IEEE80211_M_MBSS)
4270			mwl_startcsa(vap);
4271	} else if (nstate == IEEE80211_S_CAC) {
4272		/* XXX move to below? */
4273		/* stop ap xmit and enable quiet mode radar detection */
4274		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4275	}
4276
4277	/*
4278	 * Invoke the parent method to do net80211 work.
4279	 */
4280	error = mvp->mv_newstate(vap, nstate, arg);
4281
4282	/*
4283	 * Carry out work that must be done after net80211 runs;
4284	 * this work requires up to date state (e.g. iv_bss).
4285	 */
4286	if (error == 0 && nstate == IEEE80211_S_RUN) {
4287		/* NB: collect bss node again, it may have changed */
4288		ni = vap->iv_bss;
4289
4290		DPRINTF(sc, MWL_DEBUG_STATE,
4291		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4292		    "capinfo 0x%04x chan %d\n",
4293		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4294		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4295		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4296
4297		/*
4298		 * Recreate local sta db entry to update WME/HT state.
4299		 */
4300		mwl_localstadb(vap);
4301		switch (vap->iv_opmode) {
4302		case IEEE80211_M_HOSTAP:
4303		case IEEE80211_M_MBSS:
4304			if (ostate == IEEE80211_S_CAC) {
4305				/* enable in-service radar detection */
4306				mwl_hal_setradardetection(mh,
4307				    DR_IN_SERVICE_MONITOR_START);
4308				sc->sc_radarena = 1;
4309			}
4310			/*
4311			 * Allocate and setup the beacon frame
4312			 * (and related state).
4313			 */
4314			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4315			if (error != 0) {
4316				DPRINTF(sc, MWL_DEBUG_STATE,
4317				    "%s: beacon setup failed, error %d\n",
4318				    __func__, error);
4319				goto bad;
4320			}
4321			/* NB: must be after setting up beacon */
4322			mwl_hal_start(hvap);
4323			break;
4324		case IEEE80211_M_STA:
4325			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4326			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4327			/*
4328			 * Set state now that we're associated.
4329			 */
4330			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4331			mwl_setrates(vap);
4332			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4333			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4334			    sc->sc_ndwdsvaps++ == 0)
4335				mwl_hal_setdwds(mh, 1);
4336			break;
4337		case IEEE80211_M_WDS:
4338			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4339			    vap->iv_ifp->if_xname, __func__,
4340			    ether_sprintf(ni->ni_bssid));
4341			mwl_seteapolformat(vap);
4342			break;
4343		default:
4344			break;
4345		}
4346		/*
4347		 * Set CS mode according to operating channel;
4348		 * this mostly an optimization for 5GHz.
4349		 *
4350		 * NB: must follow mwl_hal_start which resets csmode
4351		 */
4352		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4353			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4354		else
4355			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4356		/*
4357		 * Start timer to prod firmware.
4358		 */
4359		if (sc->sc_ageinterval != 0)
4360			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4361			    mwl_agestations, sc);
4362	} else if (nstate == IEEE80211_S_SLEEP) {
4363		/* XXX set chip in power save */
4364	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4365	    --sc->sc_ndwdsvaps == 0)
4366		mwl_hal_setdwds(mh, 0);
4367bad:
4368	return error;
4369}
4370
4371/*
4372 * Manage station id's; these are separate from AID's
4373 * as AID's may have values out of the range of possible
4374 * station id's acceptable to the firmware.
4375 */
4376static int
4377allocstaid(struct mwl_softc *sc, int aid)
4378{
4379	int staid;
4380
4381	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4382		/* NB: don't use 0 */
4383		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4384			if (isclr(sc->sc_staid, staid))
4385				break;
4386	} else
4387		staid = aid;
4388	setbit(sc->sc_staid, staid);
4389	return staid;
4390}
4391
4392static void
4393delstaid(struct mwl_softc *sc, int staid)
4394{
4395	clrbit(sc->sc_staid, staid);
4396}
4397
4398/*
4399 * Setup driver-specific state for a newly associated node.
4400 * Note that we're called also on a re-associate, the isnew
4401 * param tells us if this is the first time or not.
4402 */
4403static void
4404mwl_newassoc(struct ieee80211_node *ni, int isnew)
4405{
4406	struct ieee80211vap *vap = ni->ni_vap;
4407        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4408	struct mwl_node *mn = MWL_NODE(ni);
4409	MWL_HAL_PEERINFO pi;
4410	uint16_t aid;
4411	int error;
4412
4413	aid = IEEE80211_AID(ni->ni_associd);
4414	if (isnew) {
4415		mn->mn_staid = allocstaid(sc, aid);
4416		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4417	} else {
4418		mn = MWL_NODE(ni);
4419		/* XXX reset BA stream? */
4420	}
4421	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4422	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4423	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4424	if (error != 0) {
4425		DPRINTF(sc, MWL_DEBUG_NODE,
4426		    "%s: error %d creating sta db entry\n",
4427		    __func__, error);
4428		/* XXX how to deal with error? */
4429	}
4430}
4431
4432/*
4433 * Periodically poke the firmware to age out station state
4434 * (power save queues, pending tx aggregates).
4435 */
4436static void
4437mwl_agestations(void *arg)
4438{
4439	struct mwl_softc *sc = arg;
4440
4441	mwl_hal_setkeepalive(sc->sc_mh);
4442	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4443		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4444}
4445
4446static const struct mwl_hal_channel *
4447findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4448{
4449	int i;
4450
4451	for (i = 0; i < ci->nchannels; i++) {
4452		const struct mwl_hal_channel *hc = &ci->channels[i];
4453		if (hc->ieee == ieee)
4454			return hc;
4455	}
4456	return NULL;
4457}
4458
4459static int
4460mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4461	int nchan, struct ieee80211_channel chans[])
4462{
4463	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4464	struct mwl_hal *mh = sc->sc_mh;
4465	const MWL_HAL_CHANNELINFO *ci;
4466	int i;
4467
4468	for (i = 0; i < nchan; i++) {
4469		struct ieee80211_channel *c = &chans[i];
4470		const struct mwl_hal_channel *hc;
4471
4472		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4473			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4474			    IEEE80211_IS_CHAN_HT40(c) ?
4475				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4476		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4477			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4478			    IEEE80211_IS_CHAN_HT40(c) ?
4479				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4480		} else {
4481			if_printf(ic->ic_ifp,
4482			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4483			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4484			return EINVAL;
4485		}
4486		/*
4487		 * Verify channel has cal data and cap tx power.
4488		 */
4489		hc = findhalchannel(ci, c->ic_ieee);
4490		if (hc != NULL) {
4491			if (c->ic_maxpower > 2*hc->maxTxPow)
4492				c->ic_maxpower = 2*hc->maxTxPow;
4493			goto next;
4494		}
4495		if (IEEE80211_IS_CHAN_HT40(c)) {
4496			/*
4497			 * Look for the extension channel since the
4498			 * hal table only has the primary channel.
4499			 */
4500			hc = findhalchannel(ci, c->ic_extieee);
4501			if (hc != NULL) {
4502				if (c->ic_maxpower > 2*hc->maxTxPow)
4503					c->ic_maxpower = 2*hc->maxTxPow;
4504				goto next;
4505			}
4506		}
4507		if_printf(ic->ic_ifp,
4508		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4509		    __func__, c->ic_ieee, c->ic_extieee,
4510		    c->ic_freq, c->ic_flags);
4511		return EINVAL;
4512	next:
4513		;
4514	}
4515	return 0;
4516}
4517
4518#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4519#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4520
4521static void
4522addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4523{
4524	c->ic_freq = freq;
4525	c->ic_flags = flags;
4526	c->ic_ieee = ieee;
4527	c->ic_minpower = 0;
4528	c->ic_maxpower = 2*txpow;
4529	c->ic_maxregpower = txpow;
4530}
4531
4532static const struct ieee80211_channel *
4533findchannel(const struct ieee80211_channel chans[], int nchans,
4534	int freq, int flags)
4535{
4536	const struct ieee80211_channel *c;
4537	int i;
4538
4539	for (i = 0; i < nchans; i++) {
4540		c = &chans[i];
4541		if (c->ic_freq == freq && c->ic_flags == flags)
4542			return c;
4543	}
4544	return NULL;
4545}
4546
4547static void
4548addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4549	const MWL_HAL_CHANNELINFO *ci, int flags)
4550{
4551	struct ieee80211_channel *c;
4552	const struct ieee80211_channel *extc;
4553	const struct mwl_hal_channel *hc;
4554	int i;
4555
4556	c = &chans[*nchans];
4557
4558	flags &= ~IEEE80211_CHAN_HT;
4559	for (i = 0; i < ci->nchannels; i++) {
4560		/*
4561		 * Each entry defines an HT40 channel pair; find the
4562		 * extension channel above and the insert the pair.
4563		 */
4564		hc = &ci->channels[i];
4565		extc = findchannel(chans, *nchans, hc->freq+20,
4566		    flags | IEEE80211_CHAN_HT20);
4567		if (extc != NULL) {
4568			if (*nchans >= maxchans)
4569				break;
4570			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4571			    hc->ieee, hc->maxTxPow);
4572			c->ic_extieee = extc->ic_ieee;
4573			c++, (*nchans)++;
4574			if (*nchans >= maxchans)
4575				break;
4576			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4577			    extc->ic_ieee, hc->maxTxPow);
4578			c->ic_extieee = hc->ieee;
4579			c++, (*nchans)++;
4580		}
4581	}
4582}
4583
4584static void
4585addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4586	const MWL_HAL_CHANNELINFO *ci, int flags)
4587{
4588	struct ieee80211_channel *c;
4589	int i;
4590
4591	c = &chans[*nchans];
4592
4593	for (i = 0; i < ci->nchannels; i++) {
4594		const struct mwl_hal_channel *hc;
4595
4596		hc = &ci->channels[i];
4597		if (*nchans >= maxchans)
4598			break;
4599		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4600		c++, (*nchans)++;
4601		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4602			/* g channel have a separate b-only entry */
4603			if (*nchans >= maxchans)
4604				break;
4605			c[0] = c[-1];
4606			c[-1].ic_flags = IEEE80211_CHAN_B;
4607			c++, (*nchans)++;
4608		}
4609		if (flags == IEEE80211_CHAN_HTG) {
4610			/* HT g channel have a separate g-only entry */
4611			if (*nchans >= maxchans)
4612				break;
4613			c[-1].ic_flags = IEEE80211_CHAN_G;
4614			c[0] = c[-1];
4615			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4616			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4617			c++, (*nchans)++;
4618		}
4619		if (flags == IEEE80211_CHAN_HTA) {
4620			/* HT a channel have a separate a-only entry */
4621			if (*nchans >= maxchans)
4622				break;
4623			c[-1].ic_flags = IEEE80211_CHAN_A;
4624			c[0] = c[-1];
4625			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4626			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4627			c++, (*nchans)++;
4628		}
4629	}
4630}
4631
4632static void
4633getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4634	struct ieee80211_channel chans[])
4635{
4636	const MWL_HAL_CHANNELINFO *ci;
4637
4638	/*
4639	 * Use the channel info from the hal to craft the
4640	 * channel list.  Note that we pass back an unsorted
4641	 * list; the caller is required to sort it for us
4642	 * (if desired).
4643	 */
4644	*nchans = 0;
4645	if (mwl_hal_getchannelinfo(sc->sc_mh,
4646	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4647		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4648	if (mwl_hal_getchannelinfo(sc->sc_mh,
4649	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4650		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4651	if (mwl_hal_getchannelinfo(sc->sc_mh,
4652	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4653		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4654	if (mwl_hal_getchannelinfo(sc->sc_mh,
4655	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4656		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4657}
4658
4659static void
4660mwl_getradiocaps(struct ieee80211com *ic,
4661	int maxchans, int *nchans, struct ieee80211_channel chans[])
4662{
4663	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4664
4665	getchannels(sc, maxchans, nchans, chans);
4666}
4667
4668static int
4669mwl_getchannels(struct mwl_softc *sc)
4670{
4671	struct ifnet *ifp = sc->sc_ifp;
4672	struct ieee80211com *ic = ifp->if_l2com;
4673
4674	/*
4675	 * Use the channel info from the hal to craft the
4676	 * channel list for net80211.  Note that we pass up
4677	 * an unsorted list; net80211 will sort it for us.
4678	 */
4679	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4680	ic->ic_nchans = 0;
4681	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4682
4683	ic->ic_regdomain.regdomain = SKU_DEBUG;
4684	ic->ic_regdomain.country = CTRY_DEFAULT;
4685	ic->ic_regdomain.location = 'I';
4686	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4687	ic->ic_regdomain.isocc[1] = ' ';
4688	return (ic->ic_nchans == 0 ? EIO : 0);
4689}
4690#undef IEEE80211_CHAN_HTA
4691#undef IEEE80211_CHAN_HTG
4692
4693#ifdef MWL_DEBUG
4694static void
4695mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4696{
4697	const struct mwl_rxdesc *ds = bf->bf_desc;
4698	uint32_t status = le32toh(ds->Status);
4699
4700	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4701	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4702	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4703	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4704	    ds->RxControl,
4705	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4706	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4707	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4708	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4709}
4710
4711static void
4712mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4713{
4714	const struct mwl_txdesc *ds = bf->bf_desc;
4715	uint32_t status = le32toh(ds->Status);
4716
4717	printf("Q%u[%3u]", qnum, ix);
4718	printf(" (DS.V:%p DS.P:%p)\n",
4719	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4720	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4721	    le32toh(ds->pPhysNext),
4722	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4723	    status & EAGLE_TXD_STATUS_USED ?
4724		"" : (status & 3) != 0 ? " *" : " !");
4725	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4726	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4727	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4728#if MWL_TXDESC > 1
4729	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4730	    , le32toh(ds->multiframes)
4731	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4732	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4733	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4734	);
4735	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4736	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4737	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4738	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4739	);
4740#endif
4741#if 0
4742{ const uint8_t *cp = (const uint8_t *) ds;
4743  int i;
4744  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4745	printf("%02x ", cp[i]);
4746	if (((i+1) % 16) == 0)
4747		printf("\n");
4748  }
4749  printf("\n");
4750}
4751#endif
4752}
4753#endif /* MWL_DEBUG */
4754
4755#if 0
4756static void
4757mwl_txq_dump(struct mwl_txq *txq)
4758{
4759	struct mwl_txbuf *bf;
4760	int i = 0;
4761
4762	MWL_TXQ_LOCK(txq);
4763	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4764		struct mwl_txdesc *ds = bf->bf_desc;
4765		MWL_TXDESC_SYNC(txq, ds,
4766		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4767#ifdef MWL_DEBUG
4768		mwl_printtxbuf(bf, txq->qnum, i);
4769#endif
4770		i++;
4771	}
4772	MWL_TXQ_UNLOCK(txq);
4773}
4774#endif
4775
4776static void
4777mwl_watchdog(void *arg)
4778{
4779	struct mwl_softc *sc;
4780	struct ifnet *ifp;
4781
4782	sc = arg;
4783	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4784	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4785		return;
4786
4787	ifp = sc->sc_ifp;
4788	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4789		if (mwl_hal_setkeepalive(sc->sc_mh))
4790			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4791		else
4792			if_printf(ifp, "transmit timeout\n");
4793#if 0
4794		mwl_reset(ifp);
4795mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4796#endif
4797		ifp->if_oerrors++;
4798		sc->sc_stats.mst_watchdog++;
4799	}
4800}
4801
4802#ifdef MWL_DIAGAPI
4803/*
4804 * Diagnostic interface to the HAL.  This is used by various
4805 * tools to do things like retrieve register contents for
4806 * debugging.  The mechanism is intentionally opaque so that
4807 * it can change frequently w/o concern for compatiblity.
4808 */
4809static int
4810mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4811{
4812	struct mwl_hal *mh = sc->sc_mh;
4813	u_int id = md->md_id & MWL_DIAG_ID;
4814	void *indata = NULL;
4815	void *outdata = NULL;
4816	u_int32_t insize = md->md_in_size;
4817	u_int32_t outsize = md->md_out_size;
4818	int error = 0;
4819
4820	if (md->md_id & MWL_DIAG_IN) {
4821		/*
4822		 * Copy in data.
4823		 */
4824		indata = malloc(insize, M_TEMP, M_NOWAIT);
4825		if (indata == NULL) {
4826			error = ENOMEM;
4827			goto bad;
4828		}
4829		error = copyin(md->md_in_data, indata, insize);
4830		if (error)
4831			goto bad;
4832	}
4833	if (md->md_id & MWL_DIAG_DYN) {
4834		/*
4835		 * Allocate a buffer for the results (otherwise the HAL
4836		 * returns a pointer to a buffer where we can read the
4837		 * results).  Note that we depend on the HAL leaving this
4838		 * pointer for us to use below in reclaiming the buffer;
4839		 * may want to be more defensive.
4840		 */
4841		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4842		if (outdata == NULL) {
4843			error = ENOMEM;
4844			goto bad;
4845		}
4846	}
4847	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4848		if (outsize < md->md_out_size)
4849			md->md_out_size = outsize;
4850		if (outdata != NULL)
4851			error = copyout(outdata, md->md_out_data,
4852					md->md_out_size);
4853	} else {
4854		error = EINVAL;
4855	}
4856bad:
4857	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4858		free(indata, M_TEMP);
4859	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4860		free(outdata, M_TEMP);
4861	return error;
4862}
4863
4864static int
4865mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4866{
4867	struct mwl_hal *mh = sc->sc_mh;
4868	int error;
4869
4870	MWL_LOCK_ASSERT(sc);
4871
4872	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4873		device_printf(sc->sc_dev, "unable to load firmware\n");
4874		return EIO;
4875	}
4876	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4877		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4878		return EIO;
4879	}
4880	error = mwl_setupdma(sc);
4881	if (error != 0) {
4882		/* NB: mwl_setupdma prints a msg */
4883		return error;
4884	}
4885	/*
4886	 * Reset tx/rx data structures; after reload we must
4887	 * re-start the driver's notion of the next xmit/recv.
4888	 */
4889	mwl_draintxq(sc);		/* clear pending frames */
4890	mwl_resettxq(sc);		/* rebuild tx q lists */
4891	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4892	return 0;
4893}
4894#endif /* MWL_DIAGAPI */
4895
4896static int
4897mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4898{
4899#define	IS_RUNNING(ifp) \
4900	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4901	struct mwl_softc *sc = ifp->if_softc;
4902	struct ieee80211com *ic = ifp->if_l2com;
4903	struct ifreq *ifr = (struct ifreq *)data;
4904	int error = 0, startall;
4905
4906	switch (cmd) {
4907	case SIOCSIFFLAGS:
4908		MWL_LOCK(sc);
4909		startall = 0;
4910		if (IS_RUNNING(ifp)) {
4911			/*
4912			 * To avoid rescanning another access point,
4913			 * do not call mwl_init() here.  Instead,
4914			 * only reflect promisc mode settings.
4915			 */
4916			mwl_mode_init(sc);
4917		} else if (ifp->if_flags & IFF_UP) {
4918			/*
4919			 * Beware of being called during attach/detach
4920			 * to reset promiscuous mode.  In that case we
4921			 * will still be marked UP but not RUNNING.
4922			 * However trying to re-init the interface
4923			 * is the wrong thing to do as we've already
4924			 * torn down much of our state.  There's
4925			 * probably a better way to deal with this.
4926			 */
4927			if (!sc->sc_invalid) {
4928				mwl_init_locked(sc);	/* XXX lose error */
4929				startall = 1;
4930			}
4931		} else
4932			mwl_stop_locked(ifp, 1);
4933		MWL_UNLOCK(sc);
4934		if (startall)
4935			ieee80211_start_all(ic);
4936		break;
4937	case SIOCGMVSTATS:
4938		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4939		/* NB: embed these numbers to get a consistent view */
4940		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4941		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4942		/*
4943		 * NB: Drop the softc lock in case of a page fault;
4944		 * we'll accept any potential inconsisentcy in the
4945		 * statistics.  The alternative is to copy the data
4946		 * to a local structure.
4947		 */
4948		return copyout(&sc->sc_stats,
4949				ifr->ifr_data, sizeof (sc->sc_stats));
4950#ifdef MWL_DIAGAPI
4951	case SIOCGMVDIAG:
4952		/* XXX check privs */
4953		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4954	case SIOCGMVRESET:
4955		/* XXX check privs */
4956		MWL_LOCK(sc);
4957		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4958		MWL_UNLOCK(sc);
4959		break;
4960#endif /* MWL_DIAGAPI */
4961	case SIOCGIFMEDIA:
4962		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4963		break;
4964	case SIOCGIFADDR:
4965		error = ether_ioctl(ifp, cmd, data);
4966		break;
4967	default:
4968		error = EINVAL;
4969		break;
4970	}
4971	return error;
4972#undef IS_RUNNING
4973}
4974
4975#ifdef	MWL_DEBUG
4976static int
4977mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4978{
4979	struct mwl_softc *sc = arg1;
4980	int debug, error;
4981
4982	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4983	error = sysctl_handle_int(oidp, &debug, 0, req);
4984	if (error || !req->newptr)
4985		return error;
4986	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4987	sc->sc_debug = debug & 0x00ffffff;
4988	return 0;
4989}
4990#endif /* MWL_DEBUG */
4991
4992static void
4993mwl_sysctlattach(struct mwl_softc *sc)
4994{
4995#ifdef	MWL_DEBUG
4996	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4997	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4998
4999	sc->sc_debug = mwl_debug;
5000	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
5001		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5002		mwl_sysctl_debug, "I", "control debugging printfs");
5003#endif
5004}
5005
5006/*
5007 * Announce various information on device/driver attach.
5008 */
5009static void
5010mwl_announce(struct mwl_softc *sc)
5011{
5012	struct ifnet *ifp = sc->sc_ifp;
5013
5014	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5015		sc->sc_hwspecs.hwVersion,
5016		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5017		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5018		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5019		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5020		sc->sc_hwspecs.regionCode);
5021	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5022
5023	if (bootverbose) {
5024		int i;
5025		for (i = 0; i <= WME_AC_VO; i++) {
5026			struct mwl_txq *txq = sc->sc_ac2q[i];
5027			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5028				txq->qnum, ieee80211_wme_acnames[i]);
5029		}
5030	}
5031	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5032		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5033	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5034		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5035	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5036		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5037	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5038		if_printf(ifp, "multi-bss support\n");
5039#ifdef MWL_TX_NODROP
5040	if (bootverbose)
5041		if_printf(ifp, "no tx drop\n");
5042#endif
5043}
5044