if_mwl.c revision 268529
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 268529 2014-07-11 13:58:48Z glebius $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40#include "opt_wlan.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysctl.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/kernel.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/errno.h>
53#include <sys/callout.h>
54#include <sys/bus.h>
55#include <sys/endian.h>
56#include <sys/kthread.h>
57#include <sys/taskqueue.h>
58
59#include <machine/bus.h>
60
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#include <net/if_types.h>
66#include <net/if_arp.h>
67#include <net/ethernet.h>
68#include <net/if_llc.h>
69
70#include <net/bpf.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
74
75#ifdef INET
76#include <netinet/in.h>
77#include <netinet/if_ether.h>
78#endif /* INET */
79
80#include <dev/mwl/if_mwlvar.h>
81#include <dev/mwl/mwldiag.h>
82
83/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
84#define	MS(v,x)	(((v) & x) >> x##_S)
85#define	SM(v,x)	(((v) << x##_S) & x)
86
87static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
88		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
89		    const uint8_t [IEEE80211_ADDR_LEN],
90		    const uint8_t [IEEE80211_ADDR_LEN]);
91static void	mwl_vap_delete(struct ieee80211vap *);
92static int	mwl_setupdma(struct mwl_softc *);
93static int	mwl_hal_reset(struct mwl_softc *sc);
94static int	mwl_init_locked(struct mwl_softc *);
95static void	mwl_init(void *);
96static void	mwl_stop_locked(struct ifnet *, int);
97static int	mwl_reset(struct ieee80211vap *, u_long);
98static void	mwl_stop(struct ifnet *, int);
99static void	mwl_start(struct ifnet *);
100static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
101			const struct ieee80211_bpf_params *);
102static int	mwl_media_change(struct ifnet *);
103static void	mwl_watchdog(void *);
104static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
105static void	mwl_radar_proc(void *, int);
106static void	mwl_chanswitch_proc(void *, int);
107static void	mwl_bawatchdog_proc(void *, int);
108static int	mwl_key_alloc(struct ieee80211vap *,
109			struct ieee80211_key *,
110			ieee80211_keyix *, ieee80211_keyix *);
111static int	mwl_key_delete(struct ieee80211vap *,
112			const struct ieee80211_key *);
113static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
114			const uint8_t mac[IEEE80211_ADDR_LEN]);
115static int	mwl_mode_init(struct mwl_softc *);
116static void	mwl_update_mcast(struct ifnet *);
117static void	mwl_update_promisc(struct ifnet *);
118static void	mwl_updateslot(struct ifnet *);
119static int	mwl_beacon_setup(struct ieee80211vap *);
120static void	mwl_beacon_update(struct ieee80211vap *, int);
121#ifdef MWL_HOST_PS_SUPPORT
122static void	mwl_update_ps(struct ieee80211vap *, int);
123static int	mwl_set_tim(struct ieee80211_node *, int);
124#endif
125static int	mwl_dma_setup(struct mwl_softc *);
126static void	mwl_dma_cleanup(struct mwl_softc *);
127static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128		    const uint8_t [IEEE80211_ADDR_LEN]);
129static void	mwl_node_cleanup(struct ieee80211_node *);
130static void	mwl_node_drain(struct ieee80211_node *);
131static void	mwl_node_getsignal(const struct ieee80211_node *,
132			int8_t *, int8_t *);
133static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
134			struct ieee80211_mimo_info *);
135static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136static void	mwl_rx_proc(void *, int);
137static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138static int	mwl_tx_setup(struct mwl_softc *, int, int);
139static int	mwl_wme_update(struct ieee80211com *);
140static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141static void	mwl_tx_cleanup(struct mwl_softc *);
142static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144			     struct mwl_txbuf *, struct mbuf *);
145static void	mwl_tx_proc(void *, int);
146static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147static void	mwl_draintxq(struct mwl_softc *);
148static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149static int	mwl_recv_action(struct ieee80211_node *,
150			const struct ieee80211_frame *,
151			const uint8_t *, const uint8_t *);
152static int	mwl_addba_request(struct ieee80211_node *,
153			struct ieee80211_tx_ampdu *, int dialogtoken,
154			int baparamset, int batimeout);
155static int	mwl_addba_response(struct ieee80211_node *,
156			struct ieee80211_tx_ampdu *, int status,
157			int baparamset, int batimeout);
158static void	mwl_addba_stop(struct ieee80211_node *,
159			struct ieee80211_tx_ampdu *);
160static int	mwl_startrecv(struct mwl_softc *);
161static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162			struct ieee80211_channel *);
163static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164static void	mwl_scan_start(struct ieee80211com *);
165static void	mwl_scan_end(struct ieee80211com *);
166static void	mwl_set_channel(struct ieee80211com *);
167static int	mwl_peerstadb(struct ieee80211_node *,
168			int aid, int staid, MWL_HAL_PEERINFO *pi);
169static int	mwl_localstadb(struct ieee80211vap *);
170static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171static int	allocstaid(struct mwl_softc *sc, int aid);
172static void	delstaid(struct mwl_softc *sc, int staid);
173static void	mwl_newassoc(struct ieee80211_node *, int);
174static void	mwl_agestations(void *);
175static int	mwl_setregdomain(struct ieee80211com *,
176			struct ieee80211_regdomain *, int,
177			struct ieee80211_channel []);
178static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
179			struct ieee80211_channel []);
180static int	mwl_getchannels(struct mwl_softc *);
181
182static void	mwl_sysctlattach(struct mwl_softc *);
183static void	mwl_announce(struct mwl_softc *);
184
185SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
186
187static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
188SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
189	    0, "rx descriptors allocated");
190static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
191SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
192	    0, "rx buffers allocated");
193static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
194SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
195	    0, "tx buffers allocated");
196static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
197SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
198	    0, "tx buffers to send at once");
199static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
200SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
201	    0, "max rx buffers to process per interrupt");
202static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
203SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
204	    0, "min free rx buffers before restarting traffic");
205
206#ifdef MWL_DEBUG
207static	int mwl_debug = 0;
208SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
209	    0, "control debugging printfs");
210enum {
211	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
212	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
213	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
214	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
215	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
216	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
217	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
218	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
219	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
220	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
221	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
222	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
223	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
224	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
225	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
226	MWL_DEBUG_ANY		= 0xffffffff
227};
228#define	IS_BEACON(wh) \
229    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
230	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
231#define	IFF_DUMPPKTS_RECV(sc, wh) \
232    (((sc->sc_debug & MWL_DEBUG_RECV) && \
233      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
234     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
235#define	IFF_DUMPPKTS_XMIT(sc) \
236	((sc->sc_debug & MWL_DEBUG_XMIT) || \
237	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
238#define	DPRINTF(sc, m, fmt, ...) do {				\
239	if (sc->sc_debug & (m))					\
240		printf(fmt, __VA_ARGS__);			\
241} while (0)
242#define	KEYPRINTF(sc, hk, mac) do {				\
243	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
244		mwl_keyprint(sc, __func__, hk, mac);		\
245} while (0)
246static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
247static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
248#else
249#define	IFF_DUMPPKTS_RECV(sc, wh) \
250	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
251#define	IFF_DUMPPKTS_XMIT(sc) \
252	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
253#define	DPRINTF(sc, m, fmt, ...) do {				\
254	(void) sc;						\
255} while (0)
256#define	KEYPRINTF(sc, k, mac) do {				\
257	(void) sc;						\
258} while (0)
259#endif
260
261static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
262
263/*
264 * Each packet has fixed front matter: a 2-byte length
265 * of the payload, followed by a 4-address 802.11 header
266 * (regardless of the actual header and always w/o any
267 * QoS header).  The payload then follows.
268 */
269struct mwltxrec {
270	uint16_t fwlen;
271	struct ieee80211_frame_addr4 wh;
272} __packed;
273
274/*
275 * Read/Write shorthands for accesses to BAR 0.  Note
276 * that all BAR 1 operations are done in the "hal" and
277 * there should be no reference to them here.
278 */
279#ifdef MWL_DEBUG
280static __inline uint32_t
281RD4(struct mwl_softc *sc, bus_size_t off)
282{
283	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
284}
285#endif
286
287static __inline void
288WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
289{
290	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
291}
292
293int
294mwl_attach(uint16_t devid, struct mwl_softc *sc)
295{
296	struct ifnet *ifp;
297	struct ieee80211com *ic;
298	struct mwl_hal *mh;
299	int error = 0;
300
301	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
302
303	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
304	if (ifp == NULL) {
305		device_printf(sc->sc_dev, "cannot if_alloc()\n");
306		return ENOSPC;
307	}
308	ic = ifp->if_l2com;
309
310	/*
311	 * Setup the RX free list lock early, so it can be consistently
312	 * removed.
313	 */
314	MWL_RXFREE_INIT(sc);
315
316	/* set these up early for if_printf use */
317	if_initname(ifp, device_get_name(sc->sc_dev),
318		device_get_unit(sc->sc_dev));
319
320	mh = mwl_hal_attach(sc->sc_dev, devid,
321	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
322	if (mh == NULL) {
323		if_printf(ifp, "unable to attach HAL\n");
324		error = EIO;
325		goto bad;
326	}
327	sc->sc_mh = mh;
328	/*
329	 * Load firmware so we can get setup.  We arbitrarily
330	 * pick station firmware; we'll re-load firmware as
331	 * needed so setting up the wrong mode isn't a big deal.
332	 */
333	if (mwl_hal_fwload(mh, NULL) != 0) {
334		if_printf(ifp, "unable to setup builtin firmware\n");
335		error = EIO;
336		goto bad1;
337	}
338	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
339		if_printf(ifp, "unable to fetch h/w specs\n");
340		error = EIO;
341		goto bad1;
342	}
343	error = mwl_getchannels(sc);
344	if (error != 0)
345		goto bad1;
346
347	sc->sc_txantenna = 0;		/* h/w default */
348	sc->sc_rxantenna = 0;		/* h/w default */
349	sc->sc_invalid = 0;		/* ready to go, enable int handling */
350	sc->sc_ageinterval = MWL_AGEINTERVAL;
351
352	/*
353	 * Allocate tx+rx descriptors and populate the lists.
354	 * We immediately push the information to the firmware
355	 * as otherwise it gets upset.
356	 */
357	error = mwl_dma_setup(sc);
358	if (error != 0) {
359		if_printf(ifp, "failed to setup descriptors: %d\n", error);
360		goto bad1;
361	}
362	error = mwl_setupdma(sc);	/* push to firmware */
363	if (error != 0)			/* NB: mwl_setupdma prints msg */
364		goto bad1;
365
366	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
367	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
368
369	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
370		taskqueue_thread_enqueue, &sc->sc_tq);
371	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
372		"%s taskq", ifp->if_xname);
373
374	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
375	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
376	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
377	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
378
379	/* NB: insure BK queue is the lowest priority h/w queue */
380	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
381		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
382			ieee80211_wme_acnames[WME_AC_BK]);
383		error = EIO;
384		goto bad2;
385	}
386	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
387	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
388	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
389		/*
390		 * Not enough hardware tx queues to properly do WME;
391		 * just punt and assign them all to the same h/w queue.
392		 * We could do a better job of this if, for example,
393		 * we allocate queues when we switch from station to
394		 * AP mode.
395		 */
396		if (sc->sc_ac2q[WME_AC_VI] != NULL)
397			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
398		if (sc->sc_ac2q[WME_AC_BE] != NULL)
399			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
400		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
401		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
402		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
403	}
404	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
405
406	ifp->if_softc = sc;
407	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
408	ifp->if_start = mwl_start;
409	ifp->if_ioctl = mwl_ioctl;
410	ifp->if_init = mwl_init;
411	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
412	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
413	IFQ_SET_READY(&ifp->if_snd);
414
415	ic->ic_ifp = ifp;
416	/* XXX not right but it's not used anywhere important */
417	ic->ic_phytype = IEEE80211_T_OFDM;
418	ic->ic_opmode = IEEE80211_M_STA;
419	ic->ic_caps =
420		  IEEE80211_C_STA		/* station mode supported */
421		| IEEE80211_C_HOSTAP		/* hostap mode */
422		| IEEE80211_C_MONITOR		/* monitor mode */
423#if 0
424		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
425		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
426#endif
427		| IEEE80211_C_MBSS		/* mesh point link mode */
428		| IEEE80211_C_WDS		/* WDS supported */
429		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
430		| IEEE80211_C_SHSLOT		/* short slot time supported */
431		| IEEE80211_C_WME		/* WME/WMM supported */
432		| IEEE80211_C_BURST		/* xmit bursting supported */
433		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
434		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
435		| IEEE80211_C_TXFRAG		/* handle tx frags */
436		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
437		| IEEE80211_C_DFS		/* DFS supported */
438		;
439
440	ic->ic_htcaps =
441		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
442		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
443		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
444		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
445		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
446#if MWL_AGGR_SIZE == 7935
447		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
448#else
449		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
450#endif
451#if 0
452		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
453		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
454#endif
455		/* s/w capabilities */
456		| IEEE80211_HTC_HT		/* HT operation */
457		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
458		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
459		| IEEE80211_HTC_SMPS		/* SMPS available */
460		;
461
462	/*
463	 * Mark h/w crypto support.
464	 * XXX no way to query h/w support.
465	 */
466	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
467			  |  IEEE80211_CRYPTO_AES_CCM
468			  |  IEEE80211_CRYPTO_TKIP
469			  |  IEEE80211_CRYPTO_TKIPMIC
470			  ;
471	/*
472	 * Transmit requires space in the packet for a special
473	 * format transmit record and optional padding between
474	 * this record and the payload.  Ask the net80211 layer
475	 * to arrange this when encapsulating packets so we can
476	 * add it efficiently.
477	 */
478	ic->ic_headroom = sizeof(struct mwltxrec) -
479		sizeof(struct ieee80211_frame);
480
481	/* call MI attach routine. */
482	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
483	ic->ic_setregdomain = mwl_setregdomain;
484	ic->ic_getradiocaps = mwl_getradiocaps;
485	/* override default methods */
486	ic->ic_raw_xmit = mwl_raw_xmit;
487	ic->ic_newassoc = mwl_newassoc;
488	ic->ic_updateslot = mwl_updateslot;
489	ic->ic_update_mcast = mwl_update_mcast;
490	ic->ic_update_promisc = mwl_update_promisc;
491	ic->ic_wme.wme_update = mwl_wme_update;
492
493	ic->ic_node_alloc = mwl_node_alloc;
494	sc->sc_node_cleanup = ic->ic_node_cleanup;
495	ic->ic_node_cleanup = mwl_node_cleanup;
496	sc->sc_node_drain = ic->ic_node_drain;
497	ic->ic_node_drain = mwl_node_drain;
498	ic->ic_node_getsignal = mwl_node_getsignal;
499	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
500
501	ic->ic_scan_start = mwl_scan_start;
502	ic->ic_scan_end = mwl_scan_end;
503	ic->ic_set_channel = mwl_set_channel;
504
505	sc->sc_recv_action = ic->ic_recv_action;
506	ic->ic_recv_action = mwl_recv_action;
507	sc->sc_addba_request = ic->ic_addba_request;
508	ic->ic_addba_request = mwl_addba_request;
509	sc->sc_addba_response = ic->ic_addba_response;
510	ic->ic_addba_response = mwl_addba_response;
511	sc->sc_addba_stop = ic->ic_addba_stop;
512	ic->ic_addba_stop = mwl_addba_stop;
513
514	ic->ic_vap_create = mwl_vap_create;
515	ic->ic_vap_delete = mwl_vap_delete;
516
517	ieee80211_radiotap_attach(ic,
518	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
519		MWL_TX_RADIOTAP_PRESENT,
520	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
521		MWL_RX_RADIOTAP_PRESENT);
522	/*
523	 * Setup dynamic sysctl's now that country code and
524	 * regdomain are available from the hal.
525	 */
526	mwl_sysctlattach(sc);
527
528	if (bootverbose)
529		ieee80211_announce(ic);
530	mwl_announce(sc);
531	return 0;
532bad2:
533	mwl_dma_cleanup(sc);
534bad1:
535	mwl_hal_detach(mh);
536bad:
537	MWL_RXFREE_DESTROY(sc);
538	if_free(ifp);
539	sc->sc_invalid = 1;
540	return error;
541}
542
543int
544mwl_detach(struct mwl_softc *sc)
545{
546	struct ifnet *ifp = sc->sc_ifp;
547	struct ieee80211com *ic = ifp->if_l2com;
548
549	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
550		__func__, ifp->if_flags);
551
552	mwl_stop(ifp, 1);
553	/*
554	 * NB: the order of these is important:
555	 * o call the 802.11 layer before detaching the hal to
556	 *   insure callbacks into the driver to delete global
557	 *   key cache entries can be handled
558	 * o reclaim the tx queue data structures after calling
559	 *   the 802.11 layer as we'll get called back to reclaim
560	 *   node state and potentially want to use them
561	 * o to cleanup the tx queues the hal is called, so detach
562	 *   it last
563	 * Other than that, it's straightforward...
564	 */
565	ieee80211_ifdetach(ic);
566	callout_drain(&sc->sc_watchdog);
567	mwl_dma_cleanup(sc);
568	MWL_RXFREE_DESTROY(sc);
569	mwl_tx_cleanup(sc);
570	mwl_hal_detach(sc->sc_mh);
571	if_free(ifp);
572
573	return 0;
574}
575
576/*
577 * MAC address handling for multiple BSS on the same radio.
578 * The first vap uses the MAC address from the EEPROM.  For
579 * subsequent vap's we set the U/L bit (bit 1) in the MAC
580 * address and use the next six bits as an index.
581 */
582static void
583assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
584{
585	int i;
586
587	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
588		/* NB: we only do this if h/w supports multiple bssid */
589		for (i = 0; i < 32; i++)
590			if ((sc->sc_bssidmask & (1<<i)) == 0)
591				break;
592		if (i != 0)
593			mac[0] |= (i << 2)|0x2;
594	} else
595		i = 0;
596	sc->sc_bssidmask |= 1<<i;
597	if (i == 0)
598		sc->sc_nbssid0++;
599}
600
601static void
602reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
603{
604	int i = mac[0] >> 2;
605	if (i != 0 || --sc->sc_nbssid0 == 0)
606		sc->sc_bssidmask &= ~(1<<i);
607}
608
609static struct ieee80211vap *
610mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
611    enum ieee80211_opmode opmode, int flags,
612    const uint8_t bssid[IEEE80211_ADDR_LEN],
613    const uint8_t mac0[IEEE80211_ADDR_LEN])
614{
615	struct ifnet *ifp = ic->ic_ifp;
616	struct mwl_softc *sc = ifp->if_softc;
617	struct mwl_hal *mh = sc->sc_mh;
618	struct ieee80211vap *vap, *apvap;
619	struct mwl_hal_vap *hvap;
620	struct mwl_vap *mvp;
621	uint8_t mac[IEEE80211_ADDR_LEN];
622
623	IEEE80211_ADDR_COPY(mac, mac0);
624	switch (opmode) {
625	case IEEE80211_M_HOSTAP:
626	case IEEE80211_M_MBSS:
627		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
628			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
629		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
630		if (hvap == NULL) {
631			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
632				reclaim_address(sc, mac);
633			return NULL;
634		}
635		break;
636	case IEEE80211_M_STA:
637		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
638			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
639		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
640		if (hvap == NULL) {
641			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
642				reclaim_address(sc, mac);
643			return NULL;
644		}
645		/* no h/w beacon miss support; always use s/w */
646		flags |= IEEE80211_CLONE_NOBEACONS;
647		break;
648	case IEEE80211_M_WDS:
649		hvap = NULL;		/* NB: we use associated AP vap */
650		if (sc->sc_napvaps == 0)
651			return NULL;	/* no existing AP vap */
652		break;
653	case IEEE80211_M_MONITOR:
654		hvap = NULL;
655		break;
656	case IEEE80211_M_IBSS:
657	case IEEE80211_M_AHDEMO:
658	default:
659		return NULL;
660	}
661
662	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
663	    M_80211_VAP, M_NOWAIT | M_ZERO);
664	if (mvp == NULL) {
665		if (hvap != NULL) {
666			mwl_hal_delvap(hvap);
667			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
668				reclaim_address(sc, mac);
669		}
670		/* XXX msg */
671		return NULL;
672	}
673	mvp->mv_hvap = hvap;
674	if (opmode == IEEE80211_M_WDS) {
675		/*
676		 * WDS vaps must have an associated AP vap; find one.
677		 * XXX not right.
678		 */
679		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
680			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
681				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
682				break;
683			}
684		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
685	}
686	vap = &mvp->mv_vap;
687	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
688	if (hvap != NULL)
689		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
690	/* override with driver methods */
691	mvp->mv_newstate = vap->iv_newstate;
692	vap->iv_newstate = mwl_newstate;
693	vap->iv_max_keyix = 0;	/* XXX */
694	vap->iv_key_alloc = mwl_key_alloc;
695	vap->iv_key_delete = mwl_key_delete;
696	vap->iv_key_set = mwl_key_set;
697#ifdef MWL_HOST_PS_SUPPORT
698	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
699		vap->iv_update_ps = mwl_update_ps;
700		mvp->mv_set_tim = vap->iv_set_tim;
701		vap->iv_set_tim = mwl_set_tim;
702	}
703#endif
704	vap->iv_reset = mwl_reset;
705	vap->iv_update_beacon = mwl_beacon_update;
706
707	/* override max aid so sta's cannot assoc when we're out of sta id's */
708	vap->iv_max_aid = MWL_MAXSTAID;
709	/* override default A-MPDU rx parameters */
710	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
711	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
712
713	/* complete setup */
714	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
715
716	switch (vap->iv_opmode) {
717	case IEEE80211_M_HOSTAP:
718	case IEEE80211_M_MBSS:
719	case IEEE80211_M_STA:
720		/*
721		 * Setup sta db entry for local address.
722		 */
723		mwl_localstadb(vap);
724		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
725		    vap->iv_opmode == IEEE80211_M_MBSS)
726			sc->sc_napvaps++;
727		else
728			sc->sc_nstavaps++;
729		break;
730	case IEEE80211_M_WDS:
731		sc->sc_nwdsvaps++;
732		break;
733	default:
734		break;
735	}
736	/*
737	 * Setup overall operating mode.
738	 */
739	if (sc->sc_napvaps)
740		ic->ic_opmode = IEEE80211_M_HOSTAP;
741	else if (sc->sc_nstavaps)
742		ic->ic_opmode = IEEE80211_M_STA;
743	else
744		ic->ic_opmode = opmode;
745
746	return vap;
747}
748
749static void
750mwl_vap_delete(struct ieee80211vap *vap)
751{
752	struct mwl_vap *mvp = MWL_VAP(vap);
753	struct ifnet *parent = vap->iv_ic->ic_ifp;
754	struct mwl_softc *sc = parent->if_softc;
755	struct mwl_hal *mh = sc->sc_mh;
756	struct mwl_hal_vap *hvap = mvp->mv_hvap;
757	enum ieee80211_opmode opmode = vap->iv_opmode;
758
759	/* XXX disallow ap vap delete if WDS still present */
760	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
761		/* quiesce h/w while we remove the vap */
762		mwl_hal_intrset(mh, 0);		/* disable interrupts */
763	}
764	ieee80211_vap_detach(vap);
765	switch (opmode) {
766	case IEEE80211_M_HOSTAP:
767	case IEEE80211_M_MBSS:
768	case IEEE80211_M_STA:
769		KASSERT(hvap != NULL, ("no hal vap handle"));
770		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
771		mwl_hal_delvap(hvap);
772		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
773			sc->sc_napvaps--;
774		else
775			sc->sc_nstavaps--;
776		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
777		reclaim_address(sc, vap->iv_myaddr);
778		break;
779	case IEEE80211_M_WDS:
780		sc->sc_nwdsvaps--;
781		break;
782	default:
783		break;
784	}
785	mwl_cleartxq(sc, vap);
786	free(mvp, M_80211_VAP);
787	if (parent->if_drv_flags & IFF_DRV_RUNNING)
788		mwl_hal_intrset(mh, sc->sc_imask);
789}
790
791void
792mwl_suspend(struct mwl_softc *sc)
793{
794	struct ifnet *ifp = sc->sc_ifp;
795
796	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
797		__func__, ifp->if_flags);
798
799	mwl_stop(ifp, 1);
800}
801
802void
803mwl_resume(struct mwl_softc *sc)
804{
805	struct ifnet *ifp = sc->sc_ifp;
806
807	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
808		__func__, ifp->if_flags);
809
810	if (ifp->if_flags & IFF_UP)
811		mwl_init(sc);
812}
813
814void
815mwl_shutdown(void *arg)
816{
817	struct mwl_softc *sc = arg;
818
819	mwl_stop(sc->sc_ifp, 1);
820}
821
822/*
823 * Interrupt handler.  Most of the actual processing is deferred.
824 */
825void
826mwl_intr(void *arg)
827{
828	struct mwl_softc *sc = arg;
829	struct mwl_hal *mh = sc->sc_mh;
830	uint32_t status;
831
832	if (sc->sc_invalid) {
833		/*
834		 * The hardware is not ready/present, don't touch anything.
835		 * Note this can happen early on if the IRQ is shared.
836		 */
837		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
838		return;
839	}
840	/*
841	 * Figure out the reason(s) for the interrupt.
842	 */
843	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
844	if (status == 0)			/* must be a shared irq */
845		return;
846
847	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
848	    __func__, status, sc->sc_imask);
849	if (status & MACREG_A2HRIC_BIT_RX_RDY)
850		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
851	if (status & MACREG_A2HRIC_BIT_TX_DONE)
852		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
853	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
854		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
855	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
856		mwl_hal_cmddone(mh);
857	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
858		;
859	}
860	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
861		/* TKIP ICV error */
862		sc->sc_stats.mst_rx_badtkipicv++;
863	}
864	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
865		/* 11n aggregation queue is empty, re-fill */
866		;
867	}
868	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
869		;
870	}
871	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
872		/* radar detected, process event */
873		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
874	}
875	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
876		/* DFS channel switch */
877		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
878	}
879}
880
881static void
882mwl_radar_proc(void *arg, int pending)
883{
884	struct mwl_softc *sc = arg;
885	struct ifnet *ifp = sc->sc_ifp;
886	struct ieee80211com *ic = ifp->if_l2com;
887
888	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
889	    __func__, pending);
890
891	sc->sc_stats.mst_radardetect++;
892	/* XXX stop h/w BA streams? */
893
894	IEEE80211_LOCK(ic);
895	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
896	IEEE80211_UNLOCK(ic);
897}
898
899static void
900mwl_chanswitch_proc(void *arg, int pending)
901{
902	struct mwl_softc *sc = arg;
903	struct ifnet *ifp = sc->sc_ifp;
904	struct ieee80211com *ic = ifp->if_l2com;
905
906	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
907	    __func__, pending);
908
909	IEEE80211_LOCK(ic);
910	sc->sc_csapending = 0;
911	ieee80211_csa_completeswitch(ic);
912	IEEE80211_UNLOCK(ic);
913}
914
915static void
916mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
917{
918	struct ieee80211_node *ni = sp->data[0];
919
920	/* send DELBA and drop the stream */
921	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
922}
923
924static void
925mwl_bawatchdog_proc(void *arg, int pending)
926{
927	struct mwl_softc *sc = arg;
928	struct mwl_hal *mh = sc->sc_mh;
929	const MWL_HAL_BASTREAM *sp;
930	uint8_t bitmap, n;
931
932	sc->sc_stats.mst_bawatchdog++;
933
934	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
935		DPRINTF(sc, MWL_DEBUG_AMPDU,
936		    "%s: could not get bitmap\n", __func__);
937		sc->sc_stats.mst_bawatchdog_failed++;
938		return;
939	}
940	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
941	if (bitmap == 0xff) {
942		n = 0;
943		/* disable all ba streams */
944		for (bitmap = 0; bitmap < 8; bitmap++) {
945			sp = mwl_hal_bastream_lookup(mh, bitmap);
946			if (sp != NULL) {
947				mwl_bawatchdog(sp);
948				n++;
949			}
950		}
951		if (n == 0) {
952			DPRINTF(sc, MWL_DEBUG_AMPDU,
953			    "%s: no BA streams found\n", __func__);
954			sc->sc_stats.mst_bawatchdog_empty++;
955		}
956	} else if (bitmap != 0xaa) {
957		/* disable a single ba stream */
958		sp = mwl_hal_bastream_lookup(mh, bitmap);
959		if (sp != NULL) {
960			mwl_bawatchdog(sp);
961		} else {
962			DPRINTF(sc, MWL_DEBUG_AMPDU,
963			    "%s: no BA stream %d\n", __func__, bitmap);
964			sc->sc_stats.mst_bawatchdog_notfound++;
965		}
966	}
967}
968
969/*
970 * Convert net80211 channel to a HAL channel.
971 */
972static void
973mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
974{
975	hc->channel = chan->ic_ieee;
976
977	*(uint32_t *)&hc->channelFlags = 0;
978	if (IEEE80211_IS_CHAN_2GHZ(chan))
979		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
980	else if (IEEE80211_IS_CHAN_5GHZ(chan))
981		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
982	if (IEEE80211_IS_CHAN_HT40(chan)) {
983		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
984		if (IEEE80211_IS_CHAN_HT40U(chan))
985			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
986		else
987			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
988	} else
989		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
990	/* XXX 10MHz channels */
991}
992
993/*
994 * Inform firmware of our tx/rx dma setup.  The BAR 0
995 * writes below are for compatibility with older firmware.
996 * For current firmware we send this information with a
997 * cmd block via mwl_hal_sethwdma.
998 */
999static int
1000mwl_setupdma(struct mwl_softc *sc)
1001{
1002	int error, i;
1003
1004	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1005	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1006	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1007
1008	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1009		struct mwl_txq *txq = &sc->sc_txq[i];
1010		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1011		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1012	}
1013	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1014	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1015
1016	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1017	if (error != 0) {
1018		device_printf(sc->sc_dev,
1019		    "unable to setup tx/rx dma; hal status %u\n", error);
1020		/* XXX */
1021	}
1022	return error;
1023}
1024
1025/*
1026 * Inform firmware of tx rate parameters.
1027 * Called after a channel change.
1028 */
1029static int
1030mwl_setcurchanrates(struct mwl_softc *sc)
1031{
1032	struct ifnet *ifp = sc->sc_ifp;
1033	struct ieee80211com *ic = ifp->if_l2com;
1034	const struct ieee80211_rateset *rs;
1035	MWL_HAL_TXRATE rates;
1036
1037	memset(&rates, 0, sizeof(rates));
1038	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1039	/* rate used to send management frames */
1040	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1041	/* rate used to send multicast frames */
1042	rates.McastRate = rates.MgtRate;
1043
1044	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1045}
1046
1047/*
1048 * Inform firmware of tx rate parameters.  Called whenever
1049 * user-settable params change and after a channel change.
1050 */
1051static int
1052mwl_setrates(struct ieee80211vap *vap)
1053{
1054	struct mwl_vap *mvp = MWL_VAP(vap);
1055	struct ieee80211_node *ni = vap->iv_bss;
1056	const struct ieee80211_txparam *tp = ni->ni_txparms;
1057	MWL_HAL_TXRATE rates;
1058
1059	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1060
1061	/*
1062	 * Update the h/w rate map.
1063	 * NB: 0x80 for MCS is passed through unchanged
1064	 */
1065	memset(&rates, 0, sizeof(rates));
1066	/* rate used to send management frames */
1067	rates.MgtRate = tp->mgmtrate;
1068	/* rate used to send multicast frames */
1069	rates.McastRate = tp->mcastrate;
1070
1071	/* while here calculate EAPOL fixed rate cookie */
1072	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1073
1074	return mwl_hal_settxrate(mvp->mv_hvap,
1075	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1076		RATE_FIXED : RATE_AUTO, &rates);
1077}
1078
1079/*
1080 * Setup a fixed xmit rate cookie for EAPOL frames.
1081 */
1082static void
1083mwl_seteapolformat(struct ieee80211vap *vap)
1084{
1085	struct mwl_vap *mvp = MWL_VAP(vap);
1086	struct ieee80211_node *ni = vap->iv_bss;
1087	enum ieee80211_phymode mode;
1088	uint8_t rate;
1089
1090	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1091
1092	mode = ieee80211_chan2mode(ni->ni_chan);
1093	/*
1094	 * Use legacy rates when operating a mixed HT+non-HT bss.
1095	 * NB: this may violate POLA for sta and wds vap's.
1096	 */
1097	if (mode == IEEE80211_MODE_11NA &&
1098	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1099		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1100	else if (mode == IEEE80211_MODE_11NG &&
1101	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1102		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1103	else
1104		rate = vap->iv_txparms[mode].mgmtrate;
1105
1106	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1107}
1108
1109/*
1110 * Map SKU+country code to region code for radar bin'ing.
1111 */
1112static int
1113mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1114{
1115	switch (rd->regdomain) {
1116	case SKU_FCC:
1117	case SKU_FCC3:
1118		return DOMAIN_CODE_FCC;
1119	case SKU_CA:
1120		return DOMAIN_CODE_IC;
1121	case SKU_ETSI:
1122	case SKU_ETSI2:
1123	case SKU_ETSI3:
1124		if (rd->country == CTRY_SPAIN)
1125			return DOMAIN_CODE_SPAIN;
1126		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1127			return DOMAIN_CODE_FRANCE;
1128		/* XXX force 1.3.1 radar type */
1129		return DOMAIN_CODE_ETSI_131;
1130	case SKU_JAPAN:
1131		return DOMAIN_CODE_MKK;
1132	case SKU_ROW:
1133		return DOMAIN_CODE_DGT;	/* Taiwan */
1134	case SKU_APAC:
1135	case SKU_APAC2:
1136	case SKU_APAC3:
1137		return DOMAIN_CODE_AUS;	/* Australia */
1138	}
1139	/* XXX KOREA? */
1140	return DOMAIN_CODE_FCC;			/* XXX? */
1141}
1142
1143static int
1144mwl_hal_reset(struct mwl_softc *sc)
1145{
1146	struct ifnet *ifp = sc->sc_ifp;
1147	struct ieee80211com *ic = ifp->if_l2com;
1148	struct mwl_hal *mh = sc->sc_mh;
1149
1150	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1151	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1152	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1153	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1154	mwl_chan_set(sc, ic->ic_curchan);
1155	/* NB: RF/RA performance tuned for indoor mode */
1156	mwl_hal_setrateadaptmode(mh, 0);
1157	mwl_hal_setoptimizationlevel(mh,
1158	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1159
1160	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1161
1162	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1163	mwl_hal_setcfend(mh, 0);			/* XXX */
1164
1165	return 1;
1166}
1167
1168static int
1169mwl_init_locked(struct mwl_softc *sc)
1170{
1171	struct ifnet *ifp = sc->sc_ifp;
1172	struct mwl_hal *mh = sc->sc_mh;
1173	int error = 0;
1174
1175	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1176		__func__, ifp->if_flags);
1177
1178	MWL_LOCK_ASSERT(sc);
1179
1180	/*
1181	 * Stop anything previously setup.  This is safe
1182	 * whether this is the first time through or not.
1183	 */
1184	mwl_stop_locked(ifp, 0);
1185
1186	/*
1187	 * Push vap-independent state to the firmware.
1188	 */
1189	if (!mwl_hal_reset(sc)) {
1190		if_printf(ifp, "unable to reset hardware\n");
1191		return EIO;
1192	}
1193
1194	/*
1195	 * Setup recv (once); transmit is already good to go.
1196	 */
1197	error = mwl_startrecv(sc);
1198	if (error != 0) {
1199		if_printf(ifp, "unable to start recv logic\n");
1200		return error;
1201	}
1202
1203	/*
1204	 * Enable interrupts.
1205	 */
1206	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1207		     | MACREG_A2HRIC_BIT_TX_DONE
1208		     | MACREG_A2HRIC_BIT_OPC_DONE
1209#if 0
1210		     | MACREG_A2HRIC_BIT_MAC_EVENT
1211#endif
1212		     | MACREG_A2HRIC_BIT_ICV_ERROR
1213		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1214		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1215#if 0
1216		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1217#endif
1218		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1219		     | MACREQ_A2HRIC_BIT_TX_ACK
1220		     ;
1221
1222	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1223	mwl_hal_intrset(mh, sc->sc_imask);
1224	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1225
1226	return 0;
1227}
1228
1229static void
1230mwl_init(void *arg)
1231{
1232	struct mwl_softc *sc = arg;
1233	struct ifnet *ifp = sc->sc_ifp;
1234	struct ieee80211com *ic = ifp->if_l2com;
1235	int error = 0;
1236
1237	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1238		__func__, ifp->if_flags);
1239
1240	MWL_LOCK(sc);
1241	error = mwl_init_locked(sc);
1242	MWL_UNLOCK(sc);
1243
1244	if (error == 0)
1245		ieee80211_start_all(ic);	/* start all vap's */
1246}
1247
1248static void
1249mwl_stop_locked(struct ifnet *ifp, int disable)
1250{
1251	struct mwl_softc *sc = ifp->if_softc;
1252
1253	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1254		__func__, sc->sc_invalid, ifp->if_flags);
1255
1256	MWL_LOCK_ASSERT(sc);
1257	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1258		/*
1259		 * Shutdown the hardware and driver.
1260		 */
1261		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1262		callout_stop(&sc->sc_watchdog);
1263		sc->sc_tx_timer = 0;
1264		mwl_draintxq(sc);
1265	}
1266}
1267
1268static void
1269mwl_stop(struct ifnet *ifp, int disable)
1270{
1271	struct mwl_softc *sc = ifp->if_softc;
1272
1273	MWL_LOCK(sc);
1274	mwl_stop_locked(ifp, disable);
1275	MWL_UNLOCK(sc);
1276}
1277
1278static int
1279mwl_reset_vap(struct ieee80211vap *vap, int state)
1280{
1281	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1282	struct ieee80211com *ic = vap->iv_ic;
1283
1284	if (state == IEEE80211_S_RUN)
1285		mwl_setrates(vap);
1286	/* XXX off by 1? */
1287	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1288	/* XXX auto? 20/40 split? */
1289	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1290	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1291	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1292	    HTPROTECT_NONE : HTPROTECT_AUTO);
1293	/* XXX txpower cap */
1294
1295	/* re-setup beacons */
1296	if (state == IEEE80211_S_RUN &&
1297	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1298	     vap->iv_opmode == IEEE80211_M_MBSS ||
1299	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1300		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1301		mwl_hal_setnprotmode(hvap,
1302		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1303		return mwl_beacon_setup(vap);
1304	}
1305	return 0;
1306}
1307
1308/*
1309 * Reset the hardware w/o losing operational state.
1310 * Used to to reset or reload hardware state for a vap.
1311 */
1312static int
1313mwl_reset(struct ieee80211vap *vap, u_long cmd)
1314{
1315	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1316	int error = 0;
1317
1318	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1319		struct ieee80211com *ic = vap->iv_ic;
1320		struct ifnet *ifp = ic->ic_ifp;
1321		struct mwl_softc *sc = ifp->if_softc;
1322		struct mwl_hal *mh = sc->sc_mh;
1323
1324		/* XXX handle DWDS sta vap change */
1325		/* XXX do we need to disable interrupts? */
1326		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1327		error = mwl_reset_vap(vap, vap->iv_state);
1328		mwl_hal_intrset(mh, sc->sc_imask);
1329	}
1330	return error;
1331}
1332
1333/*
1334 * Allocate a tx buffer for sending a frame.  The
1335 * packet is assumed to have the WME AC stored so
1336 * we can use it to select the appropriate h/w queue.
1337 */
1338static struct mwl_txbuf *
1339mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1340{
1341	struct mwl_txbuf *bf;
1342
1343	/*
1344	 * Grab a TX buffer and associated resources.
1345	 */
1346	MWL_TXQ_LOCK(txq);
1347	bf = STAILQ_FIRST(&txq->free);
1348	if (bf != NULL) {
1349		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1350		txq->nfree--;
1351	}
1352	MWL_TXQ_UNLOCK(txq);
1353	if (bf == NULL)
1354		DPRINTF(sc, MWL_DEBUG_XMIT,
1355		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1356	return bf;
1357}
1358
1359/*
1360 * Return a tx buffer to the queue it came from.  Note there
1361 * are two cases because we must preserve the order of buffers
1362 * as it reflects the fixed order of descriptors in memory
1363 * (the firmware pre-fetches descriptors so we cannot reorder).
1364 */
1365static void
1366mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1367{
1368	bf->bf_m = NULL;
1369	bf->bf_node = NULL;
1370	MWL_TXQ_LOCK(txq);
1371	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1372	txq->nfree++;
1373	MWL_TXQ_UNLOCK(txq);
1374}
1375
1376static void
1377mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1378{
1379	bf->bf_m = NULL;
1380	bf->bf_node = NULL;
1381	MWL_TXQ_LOCK(txq);
1382	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1383	txq->nfree++;
1384	MWL_TXQ_UNLOCK(txq);
1385}
1386
1387static void
1388mwl_start(struct ifnet *ifp)
1389{
1390	struct mwl_softc *sc = ifp->if_softc;
1391	struct ieee80211_node *ni;
1392	struct mwl_txbuf *bf;
1393	struct mbuf *m;
1394	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1395	int nqueued;
1396
1397	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1398		return;
1399	nqueued = 0;
1400	for (;;) {
1401		bf = NULL;
1402		IFQ_DEQUEUE(&ifp->if_snd, m);
1403		if (m == NULL)
1404			break;
1405		/*
1406		 * Grab the node for the destination.
1407		 */
1408		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1409		KASSERT(ni != NULL, ("no node"));
1410		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1411		/*
1412		 * Grab a TX buffer and associated resources.
1413		 * We honor the classification by the 802.11 layer.
1414		 */
1415		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1416		bf = mwl_gettxbuf(sc, txq);
1417		if (bf == NULL) {
1418			m_freem(m);
1419			ieee80211_free_node(ni);
1420#ifdef MWL_TX_NODROP
1421			sc->sc_stats.mst_tx_qstop++;
1422			/* XXX blocks other traffic */
1423			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1424			break;
1425#else
1426			DPRINTF(sc, MWL_DEBUG_XMIT,
1427			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1428			sc->sc_stats.mst_tx_qdrop++;
1429			continue;
1430#endif /* MWL_TX_NODROP */
1431		}
1432
1433		/*
1434		 * Pass the frame to the h/w for transmission.
1435		 */
1436		if (mwl_tx_start(sc, ni, bf, m)) {
1437			ifp->if_oerrors++;
1438			mwl_puttxbuf_head(txq, bf);
1439			ieee80211_free_node(ni);
1440			continue;
1441		}
1442		nqueued++;
1443		if (nqueued >= mwl_txcoalesce) {
1444			/*
1445			 * Poke the firmware to process queued frames;
1446			 * see below about (lack of) locking.
1447			 */
1448			nqueued = 0;
1449			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1450		}
1451	}
1452	if (nqueued) {
1453		/*
1454		 * NB: We don't need to lock against tx done because
1455		 * this just prods the firmware to check the transmit
1456		 * descriptors.  The firmware will also start fetching
1457		 * descriptors by itself if it notices new ones are
1458		 * present when it goes to deliver a tx done interrupt
1459		 * to the host. So if we race with tx done processing
1460		 * it's ok.  Delivering the kick here rather than in
1461		 * mwl_tx_start is an optimization to avoid poking the
1462		 * firmware for each packet.
1463		 *
1464		 * NB: the queue id isn't used so 0 is ok.
1465		 */
1466		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1467	}
1468}
1469
1470static int
1471mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1472	const struct ieee80211_bpf_params *params)
1473{
1474	struct ieee80211com *ic = ni->ni_ic;
1475	struct ifnet *ifp = ic->ic_ifp;
1476	struct mwl_softc *sc = ifp->if_softc;
1477	struct mwl_txbuf *bf;
1478	struct mwl_txq *txq;
1479
1480	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1481		ieee80211_free_node(ni);
1482		m_freem(m);
1483		return ENETDOWN;
1484	}
1485	/*
1486	 * Grab a TX buffer and associated resources.
1487	 * Note that we depend on the classification
1488	 * by the 802.11 layer to get to the right h/w
1489	 * queue.  Management frames must ALWAYS go on
1490	 * queue 1 but we cannot just force that here
1491	 * because we may receive non-mgt frames.
1492	 */
1493	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1494	bf = mwl_gettxbuf(sc, txq);
1495	if (bf == NULL) {
1496		sc->sc_stats.mst_tx_qstop++;
1497		/* XXX blocks other traffic */
1498		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1499		ieee80211_free_node(ni);
1500		m_freem(m);
1501		return ENOBUFS;
1502	}
1503	/*
1504	 * Pass the frame to the h/w for transmission.
1505	 */
1506	if (mwl_tx_start(sc, ni, bf, m)) {
1507		ifp->if_oerrors++;
1508		mwl_puttxbuf_head(txq, bf);
1509
1510		ieee80211_free_node(ni);
1511		return EIO;		/* XXX */
1512	}
1513	/*
1514	 * NB: We don't need to lock against tx done because
1515	 * this just prods the firmware to check the transmit
1516	 * descriptors.  The firmware will also start fetching
1517	 * descriptors by itself if it notices new ones are
1518	 * present when it goes to deliver a tx done interrupt
1519	 * to the host. So if we race with tx done processing
1520	 * it's ok.  Delivering the kick here rather than in
1521	 * mwl_tx_start is an optimization to avoid poking the
1522	 * firmware for each packet.
1523	 *
1524	 * NB: the queue id isn't used so 0 is ok.
1525	 */
1526	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1527	return 0;
1528}
1529
1530static int
1531mwl_media_change(struct ifnet *ifp)
1532{
1533	struct ieee80211vap *vap = ifp->if_softc;
1534	int error;
1535
1536	error = ieee80211_media_change(ifp);
1537	/* NB: only the fixed rate can change and that doesn't need a reset */
1538	if (error == ENETRESET) {
1539		mwl_setrates(vap);
1540		error = 0;
1541	}
1542	return error;
1543}
1544
1545#ifdef MWL_DEBUG
1546static void
1547mwl_keyprint(struct mwl_softc *sc, const char *tag,
1548	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1549{
1550	static const char *ciphers[] = {
1551		"WEP",
1552		"TKIP",
1553		"AES-CCM",
1554	};
1555	int i, n;
1556
1557	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1558	for (i = 0, n = hk->keyLen; i < n; i++)
1559		printf(" %02x", hk->key.aes[i]);
1560	printf(" mac %s", ether_sprintf(mac));
1561	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1562		printf(" %s", "rxmic");
1563		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1564			printf(" %02x", hk->key.tkip.rxMic[i]);
1565		printf(" txmic");
1566		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1567			printf(" %02x", hk->key.tkip.txMic[i]);
1568	}
1569	printf(" flags 0x%x\n", hk->keyFlags);
1570}
1571#endif
1572
1573/*
1574 * Allocate a key cache slot for a unicast key.  The
1575 * firmware handles key allocation and every station is
1576 * guaranteed key space so we are always successful.
1577 */
1578static int
1579mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1580	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1581{
1582	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1583
1584	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1585	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1586		if (!(&vap->iv_nw_keys[0] <= k &&
1587		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1588			/* should not happen */
1589			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1590				"%s: bogus group key\n", __func__);
1591			return 0;
1592		}
1593		/* give the caller what they requested */
1594		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1595	} else {
1596		/*
1597		 * Firmware handles key allocation.
1598		 */
1599		*keyix = *rxkeyix = 0;
1600	}
1601	return 1;
1602}
1603
1604/*
1605 * Delete a key entry allocated by mwl_key_alloc.
1606 */
1607static int
1608mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1609{
1610	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1611	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1612	MWL_HAL_KEYVAL hk;
1613	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1614	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1615
1616	if (hvap == NULL) {
1617		if (vap->iv_opmode != IEEE80211_M_WDS) {
1618			/* XXX monitor mode? */
1619			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1620			    "%s: no hvap for opmode %d\n", __func__,
1621			    vap->iv_opmode);
1622			return 0;
1623		}
1624		hvap = MWL_VAP(vap)->mv_ap_hvap;
1625	}
1626
1627	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1628	    __func__, k->wk_keyix);
1629
1630	memset(&hk, 0, sizeof(hk));
1631	hk.keyIndex = k->wk_keyix;
1632	switch (k->wk_cipher->ic_cipher) {
1633	case IEEE80211_CIPHER_WEP:
1634		hk.keyTypeId = KEY_TYPE_ID_WEP;
1635		break;
1636	case IEEE80211_CIPHER_TKIP:
1637		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1638		break;
1639	case IEEE80211_CIPHER_AES_CCM:
1640		hk.keyTypeId = KEY_TYPE_ID_AES;
1641		break;
1642	default:
1643		/* XXX should not happen */
1644		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1645		    __func__, k->wk_cipher->ic_cipher);
1646		return 0;
1647	}
1648	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1649}
1650
1651static __inline int
1652addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1653{
1654	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1655		if (k->wk_flags & IEEE80211_KEY_XMIT)
1656			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1657		if (k->wk_flags & IEEE80211_KEY_RECV)
1658			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1659		return 1;
1660	} else
1661		return 0;
1662}
1663
1664/*
1665 * Set the key cache contents for the specified key.  Key cache
1666 * slot(s) must already have been allocated by mwl_key_alloc.
1667 */
1668static int
1669mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1670	const uint8_t mac[IEEE80211_ADDR_LEN])
1671{
1672#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1673/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1674#define	IEEE80211_IS_STATICKEY(k) \
1675	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1676	 (GRPXMIT|IEEE80211_KEY_RECV))
1677	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1678	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1679	const struct ieee80211_cipher *cip = k->wk_cipher;
1680	const uint8_t *macaddr;
1681	MWL_HAL_KEYVAL hk;
1682
1683	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1684		("s/w crypto set?"));
1685
1686	if (hvap == NULL) {
1687		if (vap->iv_opmode != IEEE80211_M_WDS) {
1688			/* XXX monitor mode? */
1689			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1690			    "%s: no hvap for opmode %d\n", __func__,
1691			    vap->iv_opmode);
1692			return 0;
1693		}
1694		hvap = MWL_VAP(vap)->mv_ap_hvap;
1695	}
1696	memset(&hk, 0, sizeof(hk));
1697	hk.keyIndex = k->wk_keyix;
1698	switch (cip->ic_cipher) {
1699	case IEEE80211_CIPHER_WEP:
1700		hk.keyTypeId = KEY_TYPE_ID_WEP;
1701		hk.keyLen = k->wk_keylen;
1702		if (k->wk_keyix == vap->iv_def_txkey)
1703			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1704		if (!IEEE80211_IS_STATICKEY(k)) {
1705			/* NB: WEP is never used for the PTK */
1706			(void) addgroupflags(&hk, k);
1707		}
1708		break;
1709	case IEEE80211_CIPHER_TKIP:
1710		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1711		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1712		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1713		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1714		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1715		if (!addgroupflags(&hk, k))
1716			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1717		break;
1718	case IEEE80211_CIPHER_AES_CCM:
1719		hk.keyTypeId = KEY_TYPE_ID_AES;
1720		hk.keyLen = k->wk_keylen;
1721		if (!addgroupflags(&hk, k))
1722			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1723		break;
1724	default:
1725		/* XXX should not happen */
1726		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1727		    __func__, k->wk_cipher->ic_cipher);
1728		return 0;
1729	}
1730	/*
1731	 * NB: tkip mic keys get copied here too; the layout
1732	 *     just happens to match that in ieee80211_key.
1733	 */
1734	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1735
1736	/*
1737	 * Locate address of sta db entry for writing key;
1738	 * the convention unfortunately is somewhat different
1739	 * than how net80211, hostapd, and wpa_supplicant think.
1740	 */
1741	if (vap->iv_opmode == IEEE80211_M_STA) {
1742		/*
1743		 * NB: keys plumbed before the sta reaches AUTH state
1744		 * will be discarded or written to the wrong sta db
1745		 * entry because iv_bss is meaningless.  This is ok
1746		 * (right now) because we handle deferred plumbing of
1747		 * WEP keys when the sta reaches AUTH state.
1748		 */
1749		macaddr = vap->iv_bss->ni_bssid;
1750		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1751			/* XXX plumb to local sta db too for static key wep */
1752			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1753		}
1754	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1755	    vap->iv_state != IEEE80211_S_RUN) {
1756		/*
1757		 * Prior to RUN state a WDS vap will not it's BSS node
1758		 * setup so we will plumb the key to the wrong mac
1759		 * address (it'll be our local address).  Workaround
1760		 * this for the moment by grabbing the correct address.
1761		 */
1762		macaddr = vap->iv_des_bssid;
1763	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1764		macaddr = vap->iv_myaddr;
1765	else
1766		macaddr = mac;
1767	KEYPRINTF(sc, &hk, macaddr);
1768	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1769#undef IEEE80211_IS_STATICKEY
1770#undef GRPXMIT
1771}
1772
1773/* unaligned little endian access */
1774#define LE_READ_2(p)				\
1775	((uint16_t)				\
1776	 ((((const uint8_t *)(p))[0]      ) |	\
1777	  (((const uint8_t *)(p))[1] <<  8)))
1778#define LE_READ_4(p)				\
1779	((uint32_t)				\
1780	 ((((const uint8_t *)(p))[0]      ) |	\
1781	  (((const uint8_t *)(p))[1] <<  8) |	\
1782	  (((const uint8_t *)(p))[2] << 16) |	\
1783	  (((const uint8_t *)(p))[3] << 24)))
1784
1785/*
1786 * Set the multicast filter contents into the hardware.
1787 * XXX f/w has no support; just defer to the os.
1788 */
1789static void
1790mwl_setmcastfilter(struct mwl_softc *sc)
1791{
1792	struct ifnet *ifp = sc->sc_ifp;
1793#if 0
1794	struct ether_multi *enm;
1795	struct ether_multistep estep;
1796	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1797	uint8_t *mp;
1798	int nmc;
1799
1800	mp = macs;
1801	nmc = 0;
1802	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1803	while (enm != NULL) {
1804		/* XXX Punt on ranges. */
1805		if (nmc == MWL_HAL_MCAST_MAX ||
1806		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1807			ifp->if_flags |= IFF_ALLMULTI;
1808			return;
1809		}
1810		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1811		mp += IEEE80211_ADDR_LEN, nmc++;
1812		ETHER_NEXT_MULTI(estep, enm);
1813	}
1814	ifp->if_flags &= ~IFF_ALLMULTI;
1815	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1816#else
1817	/* XXX no mcast filter support; we get everything */
1818	ifp->if_flags |= IFF_ALLMULTI;
1819#endif
1820}
1821
1822static int
1823mwl_mode_init(struct mwl_softc *sc)
1824{
1825	struct ifnet *ifp = sc->sc_ifp;
1826	struct ieee80211com *ic = ifp->if_l2com;
1827	struct mwl_hal *mh = sc->sc_mh;
1828
1829	/*
1830	 * NB: Ignore promisc in hostap mode; it's set by the
1831	 * bridge.  This is wrong but we have no way to
1832	 * identify internal requests (from the bridge)
1833	 * versus external requests such as for tcpdump.
1834	 */
1835	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1836	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1837	mwl_setmcastfilter(sc);
1838
1839	return 0;
1840}
1841
1842/*
1843 * Callback from the 802.11 layer after a multicast state change.
1844 */
1845static void
1846mwl_update_mcast(struct ifnet *ifp)
1847{
1848	struct mwl_softc *sc = ifp->if_softc;
1849
1850	mwl_setmcastfilter(sc);
1851}
1852
1853/*
1854 * Callback from the 802.11 layer after a promiscuous mode change.
1855 * Note this interface does not check the operating mode as this
1856 * is an internal callback and we are expected to honor the current
1857 * state (e.g. this is used for setting the interface in promiscuous
1858 * mode when operating in hostap mode to do ACS).
1859 */
1860static void
1861mwl_update_promisc(struct ifnet *ifp)
1862{
1863	struct mwl_softc *sc = ifp->if_softc;
1864
1865	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1866}
1867
1868/*
1869 * Callback from the 802.11 layer to update the slot time
1870 * based on the current setting.  We use it to notify the
1871 * firmware of ERP changes and the f/w takes care of things
1872 * like slot time and preamble.
1873 */
1874static void
1875mwl_updateslot(struct ifnet *ifp)
1876{
1877	struct mwl_softc *sc = ifp->if_softc;
1878	struct ieee80211com *ic = ifp->if_l2com;
1879	struct mwl_hal *mh = sc->sc_mh;
1880	int prot;
1881
1882	/* NB: can be called early; suppress needless cmds */
1883	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1884		return;
1885
1886	/*
1887	 * Calculate the ERP flags.  The firwmare will use
1888	 * this to carry out the appropriate measures.
1889	 */
1890	prot = 0;
1891	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1892		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1893			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1894		if (ic->ic_flags & IEEE80211_F_USEPROT)
1895			prot |= IEEE80211_ERP_USE_PROTECTION;
1896		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1897			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1898	}
1899
1900	DPRINTF(sc, MWL_DEBUG_RESET,
1901	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1902	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1903	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1904	    ic->ic_flags);
1905
1906	mwl_hal_setgprot(mh, prot);
1907}
1908
1909/*
1910 * Setup the beacon frame.
1911 */
1912static int
1913mwl_beacon_setup(struct ieee80211vap *vap)
1914{
1915	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1916	struct ieee80211_node *ni = vap->iv_bss;
1917	struct ieee80211_beacon_offsets bo;
1918	struct mbuf *m;
1919
1920	m = ieee80211_beacon_alloc(ni, &bo);
1921	if (m == NULL)
1922		return ENOBUFS;
1923	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1924	m_free(m);
1925
1926	return 0;
1927}
1928
1929/*
1930 * Update the beacon frame in response to a change.
1931 */
1932static void
1933mwl_beacon_update(struct ieee80211vap *vap, int item)
1934{
1935	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1936	struct ieee80211com *ic = vap->iv_ic;
1937
1938	KASSERT(hvap != NULL, ("no beacon"));
1939	switch (item) {
1940	case IEEE80211_BEACON_ERP:
1941		mwl_updateslot(ic->ic_ifp);
1942		break;
1943	case IEEE80211_BEACON_HTINFO:
1944		mwl_hal_setnprotmode(hvap,
1945		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1946		break;
1947	case IEEE80211_BEACON_CAPS:
1948	case IEEE80211_BEACON_WME:
1949	case IEEE80211_BEACON_APPIE:
1950	case IEEE80211_BEACON_CSA:
1951		break;
1952	case IEEE80211_BEACON_TIM:
1953		/* NB: firmware always forms TIM */
1954		return;
1955	}
1956	/* XXX retain beacon frame and update */
1957	mwl_beacon_setup(vap);
1958}
1959
1960static void
1961mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1962{
1963	bus_addr_t *paddr = (bus_addr_t*) arg;
1964	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1965	*paddr = segs->ds_addr;
1966}
1967
1968#ifdef MWL_HOST_PS_SUPPORT
1969/*
1970 * Handle power save station occupancy changes.
1971 */
1972static void
1973mwl_update_ps(struct ieee80211vap *vap, int nsta)
1974{
1975	struct mwl_vap *mvp = MWL_VAP(vap);
1976
1977	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1978		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1979	mvp->mv_last_ps_sta = nsta;
1980}
1981
1982/*
1983 * Handle associated station power save state changes.
1984 */
1985static int
1986mwl_set_tim(struct ieee80211_node *ni, int set)
1987{
1988	struct ieee80211vap *vap = ni->ni_vap;
1989	struct mwl_vap *mvp = MWL_VAP(vap);
1990
1991	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1992		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1993		    IEEE80211_AID(ni->ni_associd), set);
1994		return 1;
1995	} else
1996		return 0;
1997}
1998#endif /* MWL_HOST_PS_SUPPORT */
1999
2000static int
2001mwl_desc_setup(struct mwl_softc *sc, const char *name,
2002	struct mwl_descdma *dd,
2003	int nbuf, size_t bufsize, int ndesc, size_t descsize)
2004{
2005	struct ifnet *ifp = sc->sc_ifp;
2006	uint8_t *ds;
2007	int error;
2008
2009	DPRINTF(sc, MWL_DEBUG_RESET,
2010	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2011	    __func__, name, nbuf, (uintmax_t) bufsize,
2012	    ndesc, (uintmax_t) descsize);
2013
2014	dd->dd_name = name;
2015	dd->dd_desc_len = nbuf * ndesc * descsize;
2016
2017	/*
2018	 * Setup DMA descriptor area.
2019	 */
2020	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2021		       PAGE_SIZE, 0,		/* alignment, bounds */
2022		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2023		       BUS_SPACE_MAXADDR,	/* highaddr */
2024		       NULL, NULL,		/* filter, filterarg */
2025		       dd->dd_desc_len,		/* maxsize */
2026		       1,			/* nsegments */
2027		       dd->dd_desc_len,		/* maxsegsize */
2028		       BUS_DMA_ALLOCNOW,	/* flags */
2029		       NULL,			/* lockfunc */
2030		       NULL,			/* lockarg */
2031		       &dd->dd_dmat);
2032	if (error != 0) {
2033		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2034		return error;
2035	}
2036
2037	/* allocate descriptors */
2038	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2039				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2040				 &dd->dd_dmamap);
2041	if (error != 0) {
2042		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2043			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2044		goto fail1;
2045	}
2046
2047	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2048				dd->dd_desc, dd->dd_desc_len,
2049				mwl_load_cb, &dd->dd_desc_paddr,
2050				BUS_DMA_NOWAIT);
2051	if (error != 0) {
2052		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2053			dd->dd_name, error);
2054		goto fail2;
2055	}
2056
2057	ds = dd->dd_desc;
2058	memset(ds, 0, dd->dd_desc_len);
2059	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2060	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2061	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2062
2063	return 0;
2064fail2:
2065	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2066fail1:
2067	bus_dma_tag_destroy(dd->dd_dmat);
2068	memset(dd, 0, sizeof(*dd));
2069	return error;
2070#undef DS2PHYS
2071}
2072
2073static void
2074mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2075{
2076	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2077	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2078	bus_dma_tag_destroy(dd->dd_dmat);
2079
2080	memset(dd, 0, sizeof(*dd));
2081}
2082
2083/*
2084 * Construct a tx q's free list.  The order of entries on
2085 * the list must reflect the physical layout of tx descriptors
2086 * because the firmware pre-fetches descriptors.
2087 *
2088 * XXX might be better to use indices into the buffer array.
2089 */
2090static void
2091mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2092{
2093	struct mwl_txbuf *bf;
2094	int i;
2095
2096	bf = txq->dma.dd_bufptr;
2097	STAILQ_INIT(&txq->free);
2098	for (i = 0; i < mwl_txbuf; i++, bf++)
2099		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2100	txq->nfree = i;
2101}
2102
2103#define	DS2PHYS(_dd, _ds) \
2104	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2105
2106static int
2107mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2108{
2109	struct ifnet *ifp = sc->sc_ifp;
2110	int error, bsize, i;
2111	struct mwl_txbuf *bf;
2112	struct mwl_txdesc *ds;
2113
2114	error = mwl_desc_setup(sc, "tx", &txq->dma,
2115			mwl_txbuf, sizeof(struct mwl_txbuf),
2116			MWL_TXDESC, sizeof(struct mwl_txdesc));
2117	if (error != 0)
2118		return error;
2119
2120	/* allocate and setup tx buffers */
2121	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2122	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2123	if (bf == NULL) {
2124		if_printf(ifp, "malloc of %u tx buffers failed\n",
2125			mwl_txbuf);
2126		return ENOMEM;
2127	}
2128	txq->dma.dd_bufptr = bf;
2129
2130	ds = txq->dma.dd_desc;
2131	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2132		bf->bf_desc = ds;
2133		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2134		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2135				&bf->bf_dmamap);
2136		if (error != 0) {
2137			if_printf(ifp, "unable to create dmamap for tx "
2138				"buffer %u, error %u\n", i, error);
2139			return error;
2140		}
2141	}
2142	mwl_txq_reset(sc, txq);
2143	return 0;
2144}
2145
2146static void
2147mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2148{
2149	struct mwl_txbuf *bf;
2150	int i;
2151
2152	bf = txq->dma.dd_bufptr;
2153	for (i = 0; i < mwl_txbuf; i++, bf++) {
2154		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2155		KASSERT(bf->bf_node == NULL, ("node on free list"));
2156		if (bf->bf_dmamap != NULL)
2157			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2158	}
2159	STAILQ_INIT(&txq->free);
2160	txq->nfree = 0;
2161	if (txq->dma.dd_bufptr != NULL) {
2162		free(txq->dma.dd_bufptr, M_MWLDEV);
2163		txq->dma.dd_bufptr = NULL;
2164	}
2165	if (txq->dma.dd_desc_len != 0)
2166		mwl_desc_cleanup(sc, &txq->dma);
2167}
2168
2169static int
2170mwl_rxdma_setup(struct mwl_softc *sc)
2171{
2172	struct ifnet *ifp = sc->sc_ifp;
2173	int error, jumbosize, bsize, i;
2174	struct mwl_rxbuf *bf;
2175	struct mwl_jumbo *rbuf;
2176	struct mwl_rxdesc *ds;
2177	caddr_t data;
2178
2179	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2180			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2181			1, sizeof(struct mwl_rxdesc));
2182	if (error != 0)
2183		return error;
2184
2185	/*
2186	 * Receive is done to a private pool of jumbo buffers.
2187	 * This allows us to attach to mbuf's and avoid re-mapping
2188	 * memory on each rx we post.  We allocate a large chunk
2189	 * of memory and manage it in the driver.  The mbuf free
2190	 * callback method is used to reclaim frames after sending
2191	 * them up the stack.  By default we allocate 2x the number of
2192	 * rx descriptors configured so we have some slop to hold
2193	 * us while frames are processed.
2194	 */
2195	if (mwl_rxbuf < 2*mwl_rxdesc) {
2196		if_printf(ifp,
2197		    "too few rx dma buffers (%d); increasing to %d\n",
2198		    mwl_rxbuf, 2*mwl_rxdesc);
2199		mwl_rxbuf = 2*mwl_rxdesc;
2200	}
2201	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2202	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2203
2204	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2205		       PAGE_SIZE, 0,		/* alignment, bounds */
2206		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2207		       BUS_SPACE_MAXADDR,	/* highaddr */
2208		       NULL, NULL,		/* filter, filterarg */
2209		       sc->sc_rxmemsize,	/* maxsize */
2210		       1,			/* nsegments */
2211		       sc->sc_rxmemsize,	/* maxsegsize */
2212		       BUS_DMA_ALLOCNOW,	/* flags */
2213		       NULL,			/* lockfunc */
2214		       NULL,			/* lockarg */
2215		       &sc->sc_rxdmat);
2216	if (error != 0) {
2217		if_printf(ifp, "could not create rx DMA tag\n");
2218		return error;
2219	}
2220
2221	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2222				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2223				 &sc->sc_rxmap);
2224	if (error != 0) {
2225		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2226		    (uintmax_t) sc->sc_rxmemsize);
2227		return error;
2228	}
2229
2230	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2231				sc->sc_rxmem, sc->sc_rxmemsize,
2232				mwl_load_cb, &sc->sc_rxmem_paddr,
2233				BUS_DMA_NOWAIT);
2234	if (error != 0) {
2235		if_printf(ifp, "could not load rx DMA map\n");
2236		return error;
2237	}
2238
2239	/*
2240	 * Allocate rx buffers and set them up.
2241	 */
2242	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2243	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2244	if (bf == NULL) {
2245		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2246		return error;
2247	}
2248	sc->sc_rxdma.dd_bufptr = bf;
2249
2250	STAILQ_INIT(&sc->sc_rxbuf);
2251	ds = sc->sc_rxdma.dd_desc;
2252	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2253		bf->bf_desc = ds;
2254		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2255		/* pre-assign dma buffer */
2256		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2257		/* NB: tail is intentional to preserve descriptor order */
2258		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2259	}
2260
2261	/*
2262	 * Place remainder of dma memory buffers on the free list.
2263	 */
2264	SLIST_INIT(&sc->sc_rxfree);
2265	for (; i < mwl_rxbuf; i++) {
2266		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2267		rbuf = MWL_JUMBO_DATA2BUF(data);
2268		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2269		sc->sc_nrxfree++;
2270	}
2271	return 0;
2272}
2273#undef DS2PHYS
2274
2275static void
2276mwl_rxdma_cleanup(struct mwl_softc *sc)
2277{
2278	if (sc->sc_rxmem_paddr != 0) {
2279		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2280		sc->sc_rxmem_paddr = 0;
2281	}
2282	if (sc->sc_rxmem != NULL) {
2283		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2284		sc->sc_rxmem = NULL;
2285	}
2286	if (sc->sc_rxdma.dd_bufptr != NULL) {
2287		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2288		sc->sc_rxdma.dd_bufptr = NULL;
2289	}
2290	if (sc->sc_rxdma.dd_desc_len != 0)
2291		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2292}
2293
2294static int
2295mwl_dma_setup(struct mwl_softc *sc)
2296{
2297	int error, i;
2298
2299	error = mwl_rxdma_setup(sc);
2300	if (error != 0) {
2301		mwl_rxdma_cleanup(sc);
2302		return error;
2303	}
2304
2305	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2306		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2307		if (error != 0) {
2308			mwl_dma_cleanup(sc);
2309			return error;
2310		}
2311	}
2312	return 0;
2313}
2314
2315static void
2316mwl_dma_cleanup(struct mwl_softc *sc)
2317{
2318	int i;
2319
2320	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2321		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2322	mwl_rxdma_cleanup(sc);
2323}
2324
2325static struct ieee80211_node *
2326mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2327{
2328	struct ieee80211com *ic = vap->iv_ic;
2329	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2330	const size_t space = sizeof(struct mwl_node);
2331	struct mwl_node *mn;
2332
2333	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2334	if (mn == NULL) {
2335		/* XXX stat+msg */
2336		return NULL;
2337	}
2338	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2339	return &mn->mn_node;
2340}
2341
2342static void
2343mwl_node_cleanup(struct ieee80211_node *ni)
2344{
2345	struct ieee80211com *ic = ni->ni_ic;
2346        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2347	struct mwl_node *mn = MWL_NODE(ni);
2348
2349	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2350	    __func__, ni, ni->ni_ic, mn->mn_staid);
2351
2352	if (mn->mn_staid != 0) {
2353		struct ieee80211vap *vap = ni->ni_vap;
2354
2355		if (mn->mn_hvap != NULL) {
2356			if (vap->iv_opmode == IEEE80211_M_STA)
2357				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2358			else
2359				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2360		}
2361		/*
2362		 * NB: legacy WDS peer sta db entry is installed using
2363		 * the associate ap's hvap; use it again to delete it.
2364		 * XXX can vap be NULL?
2365		 */
2366		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2367		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2368			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2369			    ni->ni_macaddr);
2370		delstaid(sc, mn->mn_staid);
2371		mn->mn_staid = 0;
2372	}
2373	sc->sc_node_cleanup(ni);
2374}
2375
2376/*
2377 * Reclaim rx dma buffers from packets sitting on the ampdu
2378 * reorder queue for a station.  We replace buffers with a
2379 * system cluster (if available).
2380 */
2381static void
2382mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2383{
2384#if 0
2385	int i, n, off;
2386	struct mbuf *m;
2387	void *cl;
2388
2389	n = rap->rxa_qframes;
2390	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2391		m = rap->rxa_m[i];
2392		if (m == NULL)
2393			continue;
2394		n--;
2395		/* our dma buffers have a well-known free routine */
2396		if ((m->m_flags & M_EXT) == 0 ||
2397		    m->m_ext.ext_free != mwl_ext_free)
2398			continue;
2399		/*
2400		 * Try to allocate a cluster and move the data.
2401		 */
2402		off = m->m_data - m->m_ext.ext_buf;
2403		if (off + m->m_pkthdr.len > MCLBYTES) {
2404			/* XXX no AMSDU for now */
2405			continue;
2406		}
2407		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2408		    &m->m_ext.ext_paddr);
2409		if (cl != NULL) {
2410			/*
2411			 * Copy the existing data to the cluster, remove
2412			 * the rx dma buffer, and attach the cluster in
2413			 * its place.  Note we preserve the offset to the
2414			 * data so frames being bridged can still prepend
2415			 * their headers without adding another mbuf.
2416			 */
2417			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2418			MEXTREMOVE(m);
2419			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2420			/* setup mbuf like _MCLGET does */
2421			m->m_flags |= M_CLUSTER | M_EXT_RW;
2422			_MOWNERREF(m, M_EXT | M_CLUSTER);
2423			/* NB: m_data is clobbered by MEXTADDR, adjust */
2424			m->m_data += off;
2425		}
2426	}
2427#endif
2428}
2429
2430/*
2431 * Callback to reclaim resources.  We first let the
2432 * net80211 layer do it's thing, then if we are still
2433 * blocked by a lack of rx dma buffers we walk the ampdu
2434 * reorder q's to reclaim buffers by copying to a system
2435 * cluster.
2436 */
2437static void
2438mwl_node_drain(struct ieee80211_node *ni)
2439{
2440	struct ieee80211com *ic = ni->ni_ic;
2441        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2442	struct mwl_node *mn = MWL_NODE(ni);
2443
2444	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2445	    __func__, ni, ni->ni_vap, mn->mn_staid);
2446
2447	/* NB: call up first to age out ampdu q's */
2448	sc->sc_node_drain(ni);
2449
2450	/* XXX better to not check low water mark? */
2451	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2452	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2453		uint8_t tid;
2454		/*
2455		 * Walk the reorder q and reclaim rx dma buffers by copying
2456		 * the packet contents into clusters.
2457		 */
2458		for (tid = 0; tid < WME_NUM_TID; tid++) {
2459			struct ieee80211_rx_ampdu *rap;
2460
2461			rap = &ni->ni_rx_ampdu[tid];
2462			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2463				continue;
2464			if (rap->rxa_qframes)
2465				mwl_ampdu_rxdma_reclaim(rap);
2466		}
2467	}
2468}
2469
2470static void
2471mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2472{
2473	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2474#ifdef MWL_ANT_INFO_SUPPORT
2475#if 0
2476	/* XXX need to smooth data */
2477	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2478#else
2479	*noise = -95;		/* XXX */
2480#endif
2481#else
2482	*noise = -95;		/* XXX */
2483#endif
2484}
2485
2486/*
2487 * Convert Hardware per-antenna rssi info to common format:
2488 * Let a1, a2, a3 represent the amplitudes per chain
2489 * Let amax represent max[a1, a2, a3]
2490 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2491 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2492 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2493 * maintain some extra precision.
2494 *
2495 * Values are stored in .5 db format capped at 127.
2496 */
2497static void
2498mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2499	struct ieee80211_mimo_info *mi)
2500{
2501#define	CVT(_dst, _src) do {						\
2502	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2503	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2504} while (0)
2505	static const int8_t logdbtbl[32] = {
2506	       0,   0,  24,  38,  48,  56,  62,  68,
2507	      72,  76,  80,  83,  86,  89,  92,  94,
2508	      96,  98, 100, 102, 104, 106, 107, 109,
2509	     110, 112, 113, 115, 116, 117, 118, 119
2510	};
2511	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2512	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2513	uint32_t rssi_max;
2514
2515	rssi_max = mn->mn_ai.rssi_a;
2516	if (mn->mn_ai.rssi_b > rssi_max)
2517		rssi_max = mn->mn_ai.rssi_b;
2518	if (mn->mn_ai.rssi_c > rssi_max)
2519		rssi_max = mn->mn_ai.rssi_c;
2520
2521	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2522	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2523	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2524
2525	mi->noise[0] = mn->mn_ai.nf_a;
2526	mi->noise[1] = mn->mn_ai.nf_b;
2527	mi->noise[2] = mn->mn_ai.nf_c;
2528#undef CVT
2529}
2530
2531static __inline void *
2532mwl_getrxdma(struct mwl_softc *sc)
2533{
2534	struct mwl_jumbo *buf;
2535	void *data;
2536
2537	/*
2538	 * Allocate from jumbo pool.
2539	 */
2540	MWL_RXFREE_LOCK(sc);
2541	buf = SLIST_FIRST(&sc->sc_rxfree);
2542	if (buf == NULL) {
2543		DPRINTF(sc, MWL_DEBUG_ANY,
2544		    "%s: out of rx dma buffers\n", __func__);
2545		sc->sc_stats.mst_rx_nodmabuf++;
2546		data = NULL;
2547	} else {
2548		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2549		sc->sc_nrxfree--;
2550		data = MWL_JUMBO_BUF2DATA(buf);
2551	}
2552	MWL_RXFREE_UNLOCK(sc);
2553	return data;
2554}
2555
2556static __inline void
2557mwl_putrxdma(struct mwl_softc *sc, void *data)
2558{
2559	struct mwl_jumbo *buf;
2560
2561	/* XXX bounds check data */
2562	MWL_RXFREE_LOCK(sc);
2563	buf = MWL_JUMBO_DATA2BUF(data);
2564	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2565	sc->sc_nrxfree++;
2566	MWL_RXFREE_UNLOCK(sc);
2567}
2568
2569static int
2570mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2571{
2572	struct mwl_rxdesc *ds;
2573
2574	ds = bf->bf_desc;
2575	if (bf->bf_data == NULL) {
2576		bf->bf_data = mwl_getrxdma(sc);
2577		if (bf->bf_data == NULL) {
2578			/* mark descriptor to be skipped */
2579			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2580			/* NB: don't need PREREAD */
2581			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2582			sc->sc_stats.mst_rxbuf_failed++;
2583			return ENOMEM;
2584		}
2585	}
2586	/*
2587	 * NB: DMA buffer contents is known to be unmodified
2588	 *     so there's no need to flush the data cache.
2589	 */
2590
2591	/*
2592	 * Setup descriptor.
2593	 */
2594	ds->QosCtrl = 0;
2595	ds->RSSI = 0;
2596	ds->Status = EAGLE_RXD_STATUS_IDLE;
2597	ds->Channel = 0;
2598	ds->PktLen = htole16(MWL_AGGR_SIZE);
2599	ds->SQ2 = 0;
2600	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2601	/* NB: don't touch pPhysNext, set once */
2602	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2603	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2604
2605	return 0;
2606}
2607
2608static void
2609mwl_ext_free(struct mbuf *m, void *data, void *arg)
2610{
2611	struct mwl_softc *sc = arg;
2612
2613	/* XXX bounds check data */
2614	mwl_putrxdma(sc, data);
2615	/*
2616	 * If we were previously blocked by a lack of rx dma buffers
2617	 * check if we now have enough to restart rx interrupt handling.
2618	 * NB: we know we are called at splvm which is above splnet.
2619	 */
2620	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2621		sc->sc_rxblocked = 0;
2622		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2623	}
2624}
2625
2626struct mwl_frame_bar {
2627	u_int8_t	i_fc[2];
2628	u_int8_t	i_dur[2];
2629	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2630	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2631	/* ctl, seq, FCS */
2632} __packed;
2633
2634/*
2635 * Like ieee80211_anyhdrsize, but handles BAR frames
2636 * specially so the logic below to piece the 802.11
2637 * header together works.
2638 */
2639static __inline int
2640mwl_anyhdrsize(const void *data)
2641{
2642	const struct ieee80211_frame *wh = data;
2643
2644	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2645		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2646		case IEEE80211_FC0_SUBTYPE_CTS:
2647		case IEEE80211_FC0_SUBTYPE_ACK:
2648			return sizeof(struct ieee80211_frame_ack);
2649		case IEEE80211_FC0_SUBTYPE_BAR:
2650			return sizeof(struct mwl_frame_bar);
2651		}
2652		return sizeof(struct ieee80211_frame_min);
2653	} else
2654		return ieee80211_hdrsize(data);
2655}
2656
2657static void
2658mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2659{
2660	const struct ieee80211_frame *wh;
2661	struct ieee80211_node *ni;
2662
2663	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2664	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2665	if (ni != NULL) {
2666		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2667		ieee80211_free_node(ni);
2668	}
2669}
2670
2671/*
2672 * Convert hardware signal strength to rssi.  The value
2673 * provided by the device has the noise floor added in;
2674 * we need to compensate for this but we don't have that
2675 * so we use a fixed value.
2676 *
2677 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2678 * offset is already set as part of the initial gain.  This
2679 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2680 */
2681static __inline int
2682cvtrssi(uint8_t ssi)
2683{
2684	int rssi = (int) ssi + 8;
2685	/* XXX hack guess until we have a real noise floor */
2686	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2687	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2688}
2689
2690static void
2691mwl_rx_proc(void *arg, int npending)
2692{
2693#define	IEEE80211_DIR_DSTODS(wh) \
2694	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2695	struct mwl_softc *sc = arg;
2696	struct ifnet *ifp = sc->sc_ifp;
2697	struct ieee80211com *ic = ifp->if_l2com;
2698	struct mwl_rxbuf *bf;
2699	struct mwl_rxdesc *ds;
2700	struct mbuf *m;
2701	struct ieee80211_qosframe *wh;
2702	struct ieee80211_qosframe_addr4 *wh4;
2703	struct ieee80211_node *ni;
2704	struct mwl_node *mn;
2705	int off, len, hdrlen, pktlen, rssi, ntodo;
2706	uint8_t *data, status;
2707	void *newdata;
2708	int16_t nf;
2709
2710	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2711	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2712	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2713	nf = -96;			/* XXX */
2714	bf = sc->sc_rxnext;
2715	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2716		if (bf == NULL)
2717			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2718		ds = bf->bf_desc;
2719		data = bf->bf_data;
2720		if (data == NULL) {
2721			/*
2722			 * If data allocation failed previously there
2723			 * will be no buffer; try again to re-populate it.
2724			 * Note the firmware will not advance to the next
2725			 * descriptor with a dma buffer so we must mimic
2726			 * this or we'll get out of sync.
2727			 */
2728			DPRINTF(sc, MWL_DEBUG_ANY,
2729			    "%s: rx buf w/o dma memory\n", __func__);
2730			(void) mwl_rxbuf_init(sc, bf);
2731			sc->sc_stats.mst_rx_dmabufmissing++;
2732			break;
2733		}
2734		MWL_RXDESC_SYNC(sc, ds,
2735		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2736		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2737			break;
2738#ifdef MWL_DEBUG
2739		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2740			mwl_printrxbuf(bf, 0);
2741#endif
2742		status = ds->Status;
2743		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2744			ifp->if_ierrors++;
2745			sc->sc_stats.mst_rx_crypto++;
2746			/*
2747			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2748			 *     for backwards compatibility.
2749			 */
2750			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2751			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2752				/*
2753				 * MIC error, notify upper layers.
2754				 */
2755				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2756				    BUS_DMASYNC_POSTREAD);
2757				mwl_handlemicerror(ic, data);
2758				sc->sc_stats.mst_rx_tkipmic++;
2759			}
2760			/* XXX too painful to tap packets */
2761			goto rx_next;
2762		}
2763		/*
2764		 * Sync the data buffer.
2765		 */
2766		len = le16toh(ds->PktLen);
2767		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2768		/*
2769		 * The 802.11 header is provided all or in part at the front;
2770		 * use it to calculate the true size of the header that we'll
2771		 * construct below.  We use this to figure out where to copy
2772		 * payload prior to constructing the header.
2773		 */
2774		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2775		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2776
2777		/* calculate rssi early so we can re-use for each aggregate */
2778		rssi = cvtrssi(ds->RSSI);
2779
2780		pktlen = hdrlen + (len - off);
2781		/*
2782		 * NB: we know our frame is at least as large as
2783		 * IEEE80211_MIN_LEN because there is a 4-address
2784		 * frame at the front.  Hence there's no need to
2785		 * vet the packet length.  If the frame in fact
2786		 * is too small it should be discarded at the
2787		 * net80211 layer.
2788		 */
2789
2790		/*
2791		 * Attach dma buffer to an mbuf.  We tried
2792		 * doing this based on the packet size (i.e.
2793		 * copying small packets) but it turns out to
2794		 * be a net loss.  The tradeoff might be system
2795		 * dependent (cache architecture is important).
2796		 */
2797		MGETHDR(m, M_NOWAIT, MT_DATA);
2798		if (m == NULL) {
2799			DPRINTF(sc, MWL_DEBUG_ANY,
2800			    "%s: no rx mbuf\n", __func__);
2801			sc->sc_stats.mst_rx_nombuf++;
2802			goto rx_next;
2803		}
2804		/*
2805		 * Acquire the replacement dma buffer before
2806		 * processing the frame.  If we're out of dma
2807		 * buffers we disable rx interrupts and wait
2808		 * for the free pool to reach mlw_rxdmalow buffers
2809		 * before starting to do work again.  If the firmware
2810		 * runs out of descriptors then it will toss frames
2811		 * which is better than our doing it as that can
2812		 * starve our processing.  It is also important that
2813		 * we always process rx'd frames in case they are
2814		 * A-MPDU as otherwise the host's view of the BA
2815		 * window may get out of sync with the firmware.
2816		 */
2817		newdata = mwl_getrxdma(sc);
2818		if (newdata == NULL) {
2819			/* NB: stat+msg in mwl_getrxdma */
2820			m_free(m);
2821			/* disable RX interrupt and mark state */
2822			mwl_hal_intrset(sc->sc_mh,
2823			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2824			sc->sc_rxblocked = 1;
2825			ieee80211_drain(ic);
2826			/* XXX check rxblocked and immediately start again? */
2827			goto rx_stop;
2828		}
2829		bf->bf_data = newdata;
2830		/*
2831		 * Attach the dma buffer to the mbuf;
2832		 * mwl_rxbuf_init will re-setup the rx
2833		 * descriptor using the replacement dma
2834		 * buffer we just installed above.
2835		 */
2836		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2837		    data, sc, 0, EXT_NET_DRV);
2838		m->m_data += off - hdrlen;
2839		m->m_pkthdr.len = m->m_len = pktlen;
2840		m->m_pkthdr.rcvif = ifp;
2841		/* NB: dma buffer assumed read-only */
2842
2843		/*
2844		 * Piece 802.11 header together.
2845		 */
2846		wh = mtod(m, struct ieee80211_qosframe *);
2847		/* NB: don't need to do this sometimes but ... */
2848		/* XXX special case so we can memcpy after m_devget? */
2849		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2850		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2851			if (IEEE80211_DIR_DSTODS(wh)) {
2852				wh4 = mtod(m,
2853				    struct ieee80211_qosframe_addr4*);
2854				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2855			} else {
2856				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2857			}
2858		}
2859		/*
2860		 * The f/w strips WEP header but doesn't clear
2861		 * the WEP bit; mark the packet with M_WEP so
2862		 * net80211 will treat the data as decrypted.
2863		 * While here also clear the PWR_MGT bit since
2864		 * power save is handled by the firmware and
2865		 * passing this up will potentially cause the
2866		 * upper layer to put a station in power save
2867		 * (except when configured with MWL_HOST_PS_SUPPORT).
2868		 */
2869		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2870			m->m_flags |= M_WEP;
2871#ifdef MWL_HOST_PS_SUPPORT
2872		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2873#else
2874		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2875		    IEEE80211_FC1_PWR_MGT);
2876#endif
2877
2878		if (ieee80211_radiotap_active(ic)) {
2879			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2880
2881			tap->wr_flags = 0;
2882			tap->wr_rate = ds->Rate;
2883			tap->wr_antsignal = rssi + nf;
2884			tap->wr_antnoise = nf;
2885		}
2886		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2887			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2888			    len, ds->Rate, rssi);
2889		}
2890		ifp->if_ipackets++;
2891
2892		/* dispatch */
2893		ni = ieee80211_find_rxnode(ic,
2894		    (const struct ieee80211_frame_min *) wh);
2895		if (ni != NULL) {
2896			mn = MWL_NODE(ni);
2897#ifdef MWL_ANT_INFO_SUPPORT
2898			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2899			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2900			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2901			mn->mn_ai.rsvd1 = rssi;
2902#endif
2903			/* tag AMPDU aggregates for reorder processing */
2904			if (ni->ni_flags & IEEE80211_NODE_HT)
2905				m->m_flags |= M_AMPDU;
2906			(void) ieee80211_input(ni, m, rssi, nf);
2907			ieee80211_free_node(ni);
2908		} else
2909			(void) ieee80211_input_all(ic, m, rssi, nf);
2910rx_next:
2911		/* NB: ignore ENOMEM so we process more descriptors */
2912		(void) mwl_rxbuf_init(sc, bf);
2913		bf = STAILQ_NEXT(bf, bf_list);
2914	}
2915rx_stop:
2916	sc->sc_rxnext = bf;
2917
2918	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2919	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2920		/* NB: kick fw; the tx thread may have been preempted */
2921		mwl_hal_txstart(sc->sc_mh, 0);
2922		mwl_start(ifp);
2923	}
2924#undef IEEE80211_DIR_DSTODS
2925}
2926
2927static void
2928mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2929{
2930	struct mwl_txbuf *bf, *bn;
2931	struct mwl_txdesc *ds;
2932
2933	MWL_TXQ_LOCK_INIT(sc, txq);
2934	txq->qnum = qnum;
2935	txq->txpri = 0;	/* XXX */
2936#if 0
2937	/* NB: q setup by mwl_txdma_setup XXX */
2938	STAILQ_INIT(&txq->free);
2939#endif
2940	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2941		bf->bf_txq = txq;
2942
2943		ds = bf->bf_desc;
2944		bn = STAILQ_NEXT(bf, bf_list);
2945		if (bn == NULL)
2946			bn = STAILQ_FIRST(&txq->free);
2947		ds->pPhysNext = htole32(bn->bf_daddr);
2948	}
2949	STAILQ_INIT(&txq->active);
2950}
2951
2952/*
2953 * Setup a hardware data transmit queue for the specified
2954 * access control.  We record the mapping from ac's
2955 * to h/w queues for use by mwl_tx_start.
2956 */
2957static int
2958mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2959{
2960#define	N(a)	(sizeof(a)/sizeof(a[0]))
2961	struct mwl_txq *txq;
2962
2963	if (ac >= N(sc->sc_ac2q)) {
2964		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2965			ac, N(sc->sc_ac2q));
2966		return 0;
2967	}
2968	if (mvtype >= MWL_NUM_TX_QUEUES) {
2969		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2970			mvtype, MWL_NUM_TX_QUEUES);
2971		return 0;
2972	}
2973	txq = &sc->sc_txq[mvtype];
2974	mwl_txq_init(sc, txq, mvtype);
2975	sc->sc_ac2q[ac] = txq;
2976	return 1;
2977#undef N
2978}
2979
2980/*
2981 * Update WME parameters for a transmit queue.
2982 */
2983static int
2984mwl_txq_update(struct mwl_softc *sc, int ac)
2985{
2986#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2987	struct ifnet *ifp = sc->sc_ifp;
2988	struct ieee80211com *ic = ifp->if_l2com;
2989	struct mwl_txq *txq = sc->sc_ac2q[ac];
2990	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2991	struct mwl_hal *mh = sc->sc_mh;
2992	int aifs, cwmin, cwmax, txoplim;
2993
2994	aifs = wmep->wmep_aifsn;
2995	/* XXX in sta mode need to pass log values for cwmin/max */
2996	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2997	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2998	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2999
3000	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3001		device_printf(sc->sc_dev, "unable to update hardware queue "
3002			"parameters for %s traffic!\n",
3003			ieee80211_wme_acnames[ac]);
3004		return 0;
3005	}
3006	return 1;
3007#undef MWL_EXPONENT_TO_VALUE
3008}
3009
3010/*
3011 * Callback from the 802.11 layer to update WME parameters.
3012 */
3013static int
3014mwl_wme_update(struct ieee80211com *ic)
3015{
3016	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3017
3018	return !mwl_txq_update(sc, WME_AC_BE) ||
3019	    !mwl_txq_update(sc, WME_AC_BK) ||
3020	    !mwl_txq_update(sc, WME_AC_VI) ||
3021	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3022}
3023
3024/*
3025 * Reclaim resources for a setup queue.
3026 */
3027static void
3028mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3029{
3030	/* XXX hal work? */
3031	MWL_TXQ_LOCK_DESTROY(txq);
3032}
3033
3034/*
3035 * Reclaim all tx queue resources.
3036 */
3037static void
3038mwl_tx_cleanup(struct mwl_softc *sc)
3039{
3040	int i;
3041
3042	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3043		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3044}
3045
3046static int
3047mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3048{
3049	struct mbuf *m;
3050	int error;
3051
3052	/*
3053	 * Load the DMA map so any coalescing is done.  This
3054	 * also calculates the number of descriptors we need.
3055	 */
3056	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3057				     bf->bf_segs, &bf->bf_nseg,
3058				     BUS_DMA_NOWAIT);
3059	if (error == EFBIG) {
3060		/* XXX packet requires too many descriptors */
3061		bf->bf_nseg = MWL_TXDESC+1;
3062	} else if (error != 0) {
3063		sc->sc_stats.mst_tx_busdma++;
3064		m_freem(m0);
3065		return error;
3066	}
3067	/*
3068	 * Discard null packets and check for packets that
3069	 * require too many TX descriptors.  We try to convert
3070	 * the latter to a cluster.
3071	 */
3072	if (error == EFBIG) {		/* too many desc's, linearize */
3073		sc->sc_stats.mst_tx_linear++;
3074#if MWL_TXDESC > 1
3075		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
3076#else
3077		m = m_defrag(m0, M_NOWAIT);
3078#endif
3079		if (m == NULL) {
3080			m_freem(m0);
3081			sc->sc_stats.mst_tx_nombuf++;
3082			return ENOMEM;
3083		}
3084		m0 = m;
3085		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3086					     bf->bf_segs, &bf->bf_nseg,
3087					     BUS_DMA_NOWAIT);
3088		if (error != 0) {
3089			sc->sc_stats.mst_tx_busdma++;
3090			m_freem(m0);
3091			return error;
3092		}
3093		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3094		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3095	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3096		sc->sc_stats.mst_tx_nodata++;
3097		m_freem(m0);
3098		return EIO;
3099	}
3100	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3101		__func__, m0, m0->m_pkthdr.len);
3102	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3103	bf->bf_m = m0;
3104
3105	return 0;
3106}
3107
3108static __inline int
3109mwl_cvtlegacyrate(int rate)
3110{
3111	switch (rate) {
3112	case 2:	 return 0;
3113	case 4:	 return 1;
3114	case 11: return 2;
3115	case 22: return 3;
3116	case 44: return 4;
3117	case 12: return 5;
3118	case 18: return 6;
3119	case 24: return 7;
3120	case 36: return 8;
3121	case 48: return 9;
3122	case 72: return 10;
3123	case 96: return 11;
3124	case 108:return 12;
3125	}
3126	return 0;
3127}
3128
3129/*
3130 * Calculate fixed tx rate information per client state;
3131 * this value is suitable for writing to the Format field
3132 * of a tx descriptor.
3133 */
3134static uint16_t
3135mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3136{
3137	uint16_t fmt;
3138
3139	fmt = SM(3, EAGLE_TXD_ANTENNA)
3140	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3141		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3142	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3143		fmt |= EAGLE_TXD_FORMAT_HT
3144		    /* NB: 0x80 implicitly stripped from ucastrate */
3145		    | SM(rate, EAGLE_TXD_RATE);
3146		/* XXX short/long GI may be wrong; re-check */
3147		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3148			fmt |= EAGLE_TXD_CHW_40
3149			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3150			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3151		} else {
3152			fmt |= EAGLE_TXD_CHW_20
3153			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3154			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3155		}
3156	} else {			/* legacy rate */
3157		fmt |= EAGLE_TXD_FORMAT_LEGACY
3158		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3159		    | EAGLE_TXD_CHW_20
3160		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3161		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3162			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3163	}
3164	return fmt;
3165}
3166
3167static int
3168mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3169    struct mbuf *m0)
3170{
3171#define	IEEE80211_DIR_DSTODS(wh) \
3172	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3173	struct ifnet *ifp = sc->sc_ifp;
3174	struct ieee80211com *ic = ifp->if_l2com;
3175	struct ieee80211vap *vap = ni->ni_vap;
3176	int error, iswep, ismcast;
3177	int hdrlen, copyhdrlen, pktlen;
3178	struct mwl_txdesc *ds;
3179	struct mwl_txq *txq;
3180	struct ieee80211_frame *wh;
3181	struct mwltxrec *tr;
3182	struct mwl_node *mn;
3183	uint16_t qos;
3184#if MWL_TXDESC > 1
3185	int i;
3186#endif
3187
3188	wh = mtod(m0, struct ieee80211_frame *);
3189	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3190	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3191	hdrlen = ieee80211_anyhdrsize(wh);
3192	copyhdrlen = hdrlen;
3193	pktlen = m0->m_pkthdr.len;
3194	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3195		if (IEEE80211_DIR_DSTODS(wh)) {
3196			qos = *(uint16_t *)
3197			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3198			copyhdrlen -= sizeof(qos);
3199		} else
3200			qos = *(uint16_t *)
3201			    (((struct ieee80211_qosframe *) wh)->i_qos);
3202	} else
3203		qos = 0;
3204
3205	if (iswep) {
3206		const struct ieee80211_cipher *cip;
3207		struct ieee80211_key *k;
3208
3209		/*
3210		 * Construct the 802.11 header+trailer for an encrypted
3211		 * frame. The only reason this can fail is because of an
3212		 * unknown or unsupported cipher/key type.
3213		 *
3214		 * NB: we do this even though the firmware will ignore
3215		 *     what we've done for WEP and TKIP as we need the
3216		 *     ExtIV filled in for CCMP and this also adjusts
3217		 *     the headers which simplifies our work below.
3218		 */
3219		k = ieee80211_crypto_encap(ni, m0);
3220		if (k == NULL) {
3221			/*
3222			 * This can happen when the key is yanked after the
3223			 * frame was queued.  Just discard the frame; the
3224			 * 802.11 layer counts failures and provides
3225			 * debugging/diagnostics.
3226			 */
3227			m_freem(m0);
3228			return EIO;
3229		}
3230		/*
3231		 * Adjust the packet length for the crypto additions
3232		 * done during encap and any other bits that the f/w
3233		 * will add later on.
3234		 */
3235		cip = k->wk_cipher;
3236		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3237
3238		/* packet header may have moved, reset our local pointer */
3239		wh = mtod(m0, struct ieee80211_frame *);
3240	}
3241
3242	if (ieee80211_radiotap_active_vap(vap)) {
3243		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3244		if (iswep)
3245			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3246#if 0
3247		sc->sc_tx_th.wt_rate = ds->DataRate;
3248#endif
3249		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3250		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3251
3252		ieee80211_radiotap_tx(vap, m0);
3253	}
3254	/*
3255	 * Copy up/down the 802.11 header; the firmware requires
3256	 * we present a 2-byte payload length followed by a
3257	 * 4-address header (w/o QoS), followed (optionally) by
3258	 * any WEP/ExtIV header (but only filled in for CCMP).
3259	 * We are assured the mbuf has sufficient headroom to
3260	 * prepend in-place by the setup of ic_headroom in
3261	 * mwl_attach.
3262	 */
3263	if (hdrlen < sizeof(struct mwltxrec)) {
3264		const int space = sizeof(struct mwltxrec) - hdrlen;
3265		if (M_LEADINGSPACE(m0) < space) {
3266			/* NB: should never happen */
3267			device_printf(sc->sc_dev,
3268			    "not enough headroom, need %d found %zd, "
3269			    "m_flags 0x%x m_len %d\n",
3270			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3271			ieee80211_dump_pkt(ic,
3272			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3273			m_freem(m0);
3274			sc->sc_stats.mst_tx_noheadroom++;
3275			return EIO;
3276		}
3277		M_PREPEND(m0, space, M_NOWAIT);
3278	}
3279	tr = mtod(m0, struct mwltxrec *);
3280	if (wh != (struct ieee80211_frame *) &tr->wh)
3281		ovbcopy(wh, &tr->wh, hdrlen);
3282	/*
3283	 * Note: the "firmware length" is actually the length
3284	 * of the fully formed "802.11 payload".  That is, it's
3285	 * everything except for the 802.11 header.  In particular
3286	 * this includes all crypto material including the MIC!
3287	 */
3288	tr->fwlen = htole16(pktlen - hdrlen);
3289
3290	/*
3291	 * Load the DMA map so any coalescing is done.  This
3292	 * also calculates the number of descriptors we need.
3293	 */
3294	error = mwl_tx_dmasetup(sc, bf, m0);
3295	if (error != 0) {
3296		/* NB: stat collected in mwl_tx_dmasetup */
3297		DPRINTF(sc, MWL_DEBUG_XMIT,
3298		    "%s: unable to setup dma\n", __func__);
3299		return error;
3300	}
3301	bf->bf_node = ni;			/* NB: held reference */
3302	m0 = bf->bf_m;				/* NB: may have changed */
3303	tr = mtod(m0, struct mwltxrec *);
3304	wh = (struct ieee80211_frame *)&tr->wh;
3305
3306	/*
3307	 * Formulate tx descriptor.
3308	 */
3309	ds = bf->bf_desc;
3310	txq = bf->bf_txq;
3311
3312	ds->QosCtrl = qos;			/* NB: already little-endian */
3313#if MWL_TXDESC == 1
3314	/*
3315	 * NB: multiframes should be zero because the descriptors
3316	 *     are initialized to zero.  This should handle the case
3317	 *     where the driver is built with MWL_TXDESC=1 but we are
3318	 *     using firmware with multi-segment support.
3319	 */
3320	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3321	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3322#else
3323	ds->multiframes = htole32(bf->bf_nseg);
3324	ds->PktLen = htole16(m0->m_pkthdr.len);
3325	for (i = 0; i < bf->bf_nseg; i++) {
3326		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3327		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3328	}
3329#endif
3330	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3331	ds->Format = 0;
3332	ds->pad = 0;
3333	ds->ack_wcb_addr = 0;
3334
3335	mn = MWL_NODE(ni);
3336	/*
3337	 * Select transmit rate.
3338	 */
3339	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3340	case IEEE80211_FC0_TYPE_MGT:
3341		sc->sc_stats.mst_tx_mgmt++;
3342		/* fall thru... */
3343	case IEEE80211_FC0_TYPE_CTL:
3344		/* NB: assign to BE q to avoid bursting */
3345		ds->TxPriority = MWL_WME_AC_BE;
3346		break;
3347	case IEEE80211_FC0_TYPE_DATA:
3348		if (!ismcast) {
3349			const struct ieee80211_txparam *tp = ni->ni_txparms;
3350			/*
3351			 * EAPOL frames get forced to a fixed rate and w/o
3352			 * aggregation; otherwise check for any fixed rate
3353			 * for the client (may depend on association state).
3354			 */
3355			if (m0->m_flags & M_EAPOL) {
3356				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3357				ds->Format = mvp->mv_eapolformat;
3358				ds->pad = htole16(
3359				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3360			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3361				/* XXX pre-calculate per node */
3362				ds->Format = htole16(
3363				    mwl_calcformat(tp->ucastrate, ni));
3364				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3365			}
3366			/* NB: EAPOL frames will never have qos set */
3367			if (qos == 0)
3368				ds->TxPriority = txq->qnum;
3369#if MWL_MAXBA > 3
3370			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3371				ds->TxPriority = mn->mn_ba[3].txq;
3372#endif
3373#if MWL_MAXBA > 2
3374			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3375				ds->TxPriority = mn->mn_ba[2].txq;
3376#endif
3377#if MWL_MAXBA > 1
3378			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3379				ds->TxPriority = mn->mn_ba[1].txq;
3380#endif
3381#if MWL_MAXBA > 0
3382			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3383				ds->TxPriority = mn->mn_ba[0].txq;
3384#endif
3385			else
3386				ds->TxPriority = txq->qnum;
3387		} else
3388			ds->TxPriority = txq->qnum;
3389		break;
3390	default:
3391		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3392			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3393		sc->sc_stats.mst_tx_badframetype++;
3394		m_freem(m0);
3395		return EIO;
3396	}
3397
3398	if (IFF_DUMPPKTS_XMIT(sc))
3399		ieee80211_dump_pkt(ic,
3400		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3401		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3402
3403	MWL_TXQ_LOCK(txq);
3404	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3405	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3406	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3407
3408	ifp->if_opackets++;
3409	sc->sc_tx_timer = 5;
3410	MWL_TXQ_UNLOCK(txq);
3411
3412	return 0;
3413#undef	IEEE80211_DIR_DSTODS
3414}
3415
3416static __inline int
3417mwl_cvtlegacyrix(int rix)
3418{
3419#define	N(x)	(sizeof(x)/sizeof(x[0]))
3420	static const int ieeerates[] =
3421	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3422	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3423#undef N
3424}
3425
3426/*
3427 * Process completed xmit descriptors from the specified queue.
3428 */
3429static int
3430mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3431{
3432#define	EAGLE_TXD_STATUS_MCAST \
3433	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3434	struct ifnet *ifp = sc->sc_ifp;
3435	struct ieee80211com *ic = ifp->if_l2com;
3436	struct mwl_txbuf *bf;
3437	struct mwl_txdesc *ds;
3438	struct ieee80211_node *ni;
3439	struct mwl_node *an;
3440	int nreaped;
3441	uint32_t status;
3442
3443	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3444	for (nreaped = 0;; nreaped++) {
3445		MWL_TXQ_LOCK(txq);
3446		bf = STAILQ_FIRST(&txq->active);
3447		if (bf == NULL) {
3448			MWL_TXQ_UNLOCK(txq);
3449			break;
3450		}
3451		ds = bf->bf_desc;
3452		MWL_TXDESC_SYNC(txq, ds,
3453		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3454		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3455			MWL_TXQ_UNLOCK(txq);
3456			break;
3457		}
3458		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3459		MWL_TXQ_UNLOCK(txq);
3460
3461#ifdef MWL_DEBUG
3462		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3463			mwl_printtxbuf(bf, txq->qnum, nreaped);
3464#endif
3465		ni = bf->bf_node;
3466		if (ni != NULL) {
3467			an = MWL_NODE(ni);
3468			status = le32toh(ds->Status);
3469			if (status & EAGLE_TXD_STATUS_OK) {
3470				uint16_t Format = le16toh(ds->Format);
3471				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3472
3473				sc->sc_stats.mst_ant_tx[txant]++;
3474				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3475					sc->sc_stats.mst_tx_retries++;
3476				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3477					sc->sc_stats.mst_tx_mretries++;
3478				if (txq->qnum >= MWL_WME_AC_VO)
3479					ic->ic_wme.wme_hipri_traffic++;
3480				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3481				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3482					ni->ni_txrate = mwl_cvtlegacyrix(
3483					    ni->ni_txrate);
3484				} else
3485					ni->ni_txrate |= IEEE80211_RATE_MCS;
3486				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3487			} else {
3488				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3489					sc->sc_stats.mst_tx_linkerror++;
3490				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3491					sc->sc_stats.mst_tx_xretries++;
3492				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3493					sc->sc_stats.mst_tx_aging++;
3494				if (bf->bf_m->m_flags & M_FF)
3495					sc->sc_stats.mst_ff_txerr++;
3496			}
3497			/*
3498			 * Do any tx complete callback.  Note this must
3499			 * be done before releasing the node reference.
3500			 * XXX no way to figure out if frame was ACK'd
3501			 */
3502			if (bf->bf_m->m_flags & M_TXCB) {
3503				/* XXX strip fw len in case header inspected */
3504				m_adj(bf->bf_m, sizeof(uint16_t));
3505				ieee80211_process_callback(ni, bf->bf_m,
3506					(status & EAGLE_TXD_STATUS_OK) == 0);
3507			}
3508			/*
3509			 * Reclaim reference to node.
3510			 *
3511			 * NB: the node may be reclaimed here if, for example
3512			 *     this is a DEAUTH message that was sent and the
3513			 *     node was timed out due to inactivity.
3514			 */
3515			ieee80211_free_node(ni);
3516		}
3517		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3518
3519		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3520		    BUS_DMASYNC_POSTWRITE);
3521		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3522		m_freem(bf->bf_m);
3523
3524		mwl_puttxbuf_tail(txq, bf);
3525	}
3526	return nreaped;
3527#undef EAGLE_TXD_STATUS_MCAST
3528}
3529
3530/*
3531 * Deferred processing of transmit interrupt; special-cased
3532 * for four hardware queues, 0-3.
3533 */
3534static void
3535mwl_tx_proc(void *arg, int npending)
3536{
3537	struct mwl_softc *sc = arg;
3538	struct ifnet *ifp = sc->sc_ifp;
3539	int nreaped;
3540
3541	/*
3542	 * Process each active queue.
3543	 */
3544	nreaped = 0;
3545	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3546		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3547	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3548		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3549	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3550		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3551	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3552		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3553
3554	if (nreaped != 0) {
3555		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3556		sc->sc_tx_timer = 0;
3557		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3558			/* NB: kick fw; the tx thread may have been preempted */
3559			mwl_hal_txstart(sc->sc_mh, 0);
3560			mwl_start(ifp);
3561		}
3562	}
3563}
3564
3565static void
3566mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3567{
3568	struct ieee80211_node *ni;
3569	struct mwl_txbuf *bf;
3570	u_int ix;
3571
3572	/*
3573	 * NB: this assumes output has been stopped and
3574	 *     we do not need to block mwl_tx_tasklet
3575	 */
3576	for (ix = 0;; ix++) {
3577		MWL_TXQ_LOCK(txq);
3578		bf = STAILQ_FIRST(&txq->active);
3579		if (bf == NULL) {
3580			MWL_TXQ_UNLOCK(txq);
3581			break;
3582		}
3583		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3584		MWL_TXQ_UNLOCK(txq);
3585#ifdef MWL_DEBUG
3586		if (sc->sc_debug & MWL_DEBUG_RESET) {
3587			struct ifnet *ifp = sc->sc_ifp;
3588			struct ieee80211com *ic = ifp->if_l2com;
3589			const struct mwltxrec *tr =
3590			    mtod(bf->bf_m, const struct mwltxrec *);
3591			mwl_printtxbuf(bf, txq->qnum, ix);
3592			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3593				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3594		}
3595#endif /* MWL_DEBUG */
3596		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3597		ni = bf->bf_node;
3598		if (ni != NULL) {
3599			/*
3600			 * Reclaim node reference.
3601			 */
3602			ieee80211_free_node(ni);
3603		}
3604		m_freem(bf->bf_m);
3605
3606		mwl_puttxbuf_tail(txq, bf);
3607	}
3608}
3609
3610/*
3611 * Drain the transmit queues and reclaim resources.
3612 */
3613static void
3614mwl_draintxq(struct mwl_softc *sc)
3615{
3616	struct ifnet *ifp = sc->sc_ifp;
3617	int i;
3618
3619	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3620		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3621	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3622	sc->sc_tx_timer = 0;
3623}
3624
3625#ifdef MWL_DIAGAPI
3626/*
3627 * Reset the transmit queues to a pristine state after a fw download.
3628 */
3629static void
3630mwl_resettxq(struct mwl_softc *sc)
3631{
3632	int i;
3633
3634	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3635		mwl_txq_reset(sc, &sc->sc_txq[i]);
3636}
3637#endif /* MWL_DIAGAPI */
3638
3639/*
3640 * Clear the transmit queues of any frames submitted for the
3641 * specified vap.  This is done when the vap is deleted so we
3642 * don't potentially reference the vap after it is gone.
3643 * Note we cannot remove the frames; we only reclaim the node
3644 * reference.
3645 */
3646static void
3647mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3648{
3649	struct mwl_txq *txq;
3650	struct mwl_txbuf *bf;
3651	int i;
3652
3653	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3654		txq = &sc->sc_txq[i];
3655		MWL_TXQ_LOCK(txq);
3656		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3657			struct ieee80211_node *ni = bf->bf_node;
3658			if (ni != NULL && ni->ni_vap == vap) {
3659				bf->bf_node = NULL;
3660				ieee80211_free_node(ni);
3661			}
3662		}
3663		MWL_TXQ_UNLOCK(txq);
3664	}
3665}
3666
3667static int
3668mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3669	const uint8_t *frm, const uint8_t *efrm)
3670{
3671	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3672	const struct ieee80211_action *ia;
3673
3674	ia = (const struct ieee80211_action *) frm;
3675	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3676	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3677		const struct ieee80211_action_ht_mimopowersave *mps =
3678		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3679
3680		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3681		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3682		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3683		return 0;
3684	} else
3685		return sc->sc_recv_action(ni, wh, frm, efrm);
3686}
3687
3688static int
3689mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3690	int dialogtoken, int baparamset, int batimeout)
3691{
3692	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3693	struct ieee80211vap *vap = ni->ni_vap;
3694	struct mwl_node *mn = MWL_NODE(ni);
3695	struct mwl_bastate *bas;
3696
3697	bas = tap->txa_private;
3698	if (bas == NULL) {
3699		const MWL_HAL_BASTREAM *sp;
3700		/*
3701		 * Check for a free BA stream slot.
3702		 */
3703#if MWL_MAXBA > 3
3704		if (mn->mn_ba[3].bastream == NULL)
3705			bas = &mn->mn_ba[3];
3706		else
3707#endif
3708#if MWL_MAXBA > 2
3709		if (mn->mn_ba[2].bastream == NULL)
3710			bas = &mn->mn_ba[2];
3711		else
3712#endif
3713#if MWL_MAXBA > 1
3714		if (mn->mn_ba[1].bastream == NULL)
3715			bas = &mn->mn_ba[1];
3716		else
3717#endif
3718#if MWL_MAXBA > 0
3719		if (mn->mn_ba[0].bastream == NULL)
3720			bas = &mn->mn_ba[0];
3721		else
3722#endif
3723		{
3724			/* sta already has max BA streams */
3725			/* XXX assign BA stream to highest priority tid */
3726			DPRINTF(sc, MWL_DEBUG_AMPDU,
3727			    "%s: already has max bastreams\n", __func__);
3728			sc->sc_stats.mst_ampdu_reject++;
3729			return 0;
3730		}
3731		/* NB: no held reference to ni */
3732		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3733		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3734		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3735		    ni, tap);
3736		if (sp == NULL) {
3737			/*
3738			 * No available stream, return 0 so no
3739			 * a-mpdu aggregation will be done.
3740			 */
3741			DPRINTF(sc, MWL_DEBUG_AMPDU,
3742			    "%s: no bastream available\n", __func__);
3743			sc->sc_stats.mst_ampdu_nostream++;
3744			return 0;
3745		}
3746		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3747		    __func__, sp);
3748		/* NB: qos is left zero so we won't match in mwl_tx_start */
3749		bas->bastream = sp;
3750		tap->txa_private = bas;
3751	}
3752	/* fetch current seq# from the firmware; if available */
3753	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3754	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3755	    &tap->txa_start) != 0)
3756		tap->txa_start = 0;
3757	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3758}
3759
3760static int
3761mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3762	int code, int baparamset, int batimeout)
3763{
3764	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3765	struct mwl_bastate *bas;
3766
3767	bas = tap->txa_private;
3768	if (bas == NULL) {
3769		/* XXX should not happen */
3770		DPRINTF(sc, MWL_DEBUG_AMPDU,
3771		    "%s: no BA stream allocated, TID %d\n",
3772		    __func__, tap->txa_tid);
3773		sc->sc_stats.mst_addba_nostream++;
3774		return 0;
3775	}
3776	if (code == IEEE80211_STATUS_SUCCESS) {
3777		struct ieee80211vap *vap = ni->ni_vap;
3778		int bufsiz, error;
3779
3780		/*
3781		 * Tell the firmware to setup the BA stream;
3782		 * we know resources are available because we
3783		 * pre-allocated one before forming the request.
3784		 */
3785		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3786		if (bufsiz == 0)
3787			bufsiz = IEEE80211_AGGR_BAWMAX;
3788		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3789		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3790		if (error != 0) {
3791			/*
3792			 * Setup failed, return immediately so no a-mpdu
3793			 * aggregation will be done.
3794			 */
3795			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3796			mwl_bastream_free(bas);
3797			tap->txa_private = NULL;
3798
3799			DPRINTF(sc, MWL_DEBUG_AMPDU,
3800			    "%s: create failed, error %d, bufsiz %d TID %d "
3801			    "htparam 0x%x\n", __func__, error, bufsiz,
3802			    tap->txa_tid, ni->ni_htparam);
3803			sc->sc_stats.mst_bacreate_failed++;
3804			return 0;
3805		}
3806		/* NB: cache txq to avoid ptr indirect */
3807		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3808		DPRINTF(sc, MWL_DEBUG_AMPDU,
3809		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3810		    "htparam 0x%x\n", __func__, bas->bastream,
3811		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3812	} else {
3813		/*
3814		 * Other side NAK'd us; return the resources.
3815		 */
3816		DPRINTF(sc, MWL_DEBUG_AMPDU,
3817		    "%s: request failed with code %d, destroy bastream %p\n",
3818		    __func__, code, bas->bastream);
3819		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3820		mwl_bastream_free(bas);
3821		tap->txa_private = NULL;
3822	}
3823	/* NB: firmware sends BAR so we don't need to */
3824	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3825}
3826
3827static void
3828mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3829{
3830	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3831	struct mwl_bastate *bas;
3832
3833	bas = tap->txa_private;
3834	if (bas != NULL) {
3835		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3836		    __func__, bas->bastream);
3837		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3838		mwl_bastream_free(bas);
3839		tap->txa_private = NULL;
3840	}
3841	sc->sc_addba_stop(ni, tap);
3842}
3843
3844/*
3845 * Setup the rx data structures.  This should only be
3846 * done once or we may get out of sync with the firmware.
3847 */
3848static int
3849mwl_startrecv(struct mwl_softc *sc)
3850{
3851	if (!sc->sc_recvsetup) {
3852		struct mwl_rxbuf *bf, *prev;
3853		struct mwl_rxdesc *ds;
3854
3855		prev = NULL;
3856		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3857			int error = mwl_rxbuf_init(sc, bf);
3858			if (error != 0) {
3859				DPRINTF(sc, MWL_DEBUG_RECV,
3860					"%s: mwl_rxbuf_init failed %d\n",
3861					__func__, error);
3862				return error;
3863			}
3864			if (prev != NULL) {
3865				ds = prev->bf_desc;
3866				ds->pPhysNext = htole32(bf->bf_daddr);
3867			}
3868			prev = bf;
3869		}
3870		if (prev != NULL) {
3871			ds = prev->bf_desc;
3872			ds->pPhysNext =
3873			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3874		}
3875		sc->sc_recvsetup = 1;
3876	}
3877	mwl_mode_init(sc);		/* set filters, etc. */
3878	return 0;
3879}
3880
3881static MWL_HAL_APMODE
3882mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3883{
3884	MWL_HAL_APMODE mode;
3885
3886	if (IEEE80211_IS_CHAN_HT(chan)) {
3887		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3888			mode = AP_MODE_N_ONLY;
3889		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3890			mode = AP_MODE_AandN;
3891		else if (vap->iv_flags & IEEE80211_F_PUREG)
3892			mode = AP_MODE_GandN;
3893		else
3894			mode = AP_MODE_BandGandN;
3895	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3896		if (vap->iv_flags & IEEE80211_F_PUREG)
3897			mode = AP_MODE_G_ONLY;
3898		else
3899			mode = AP_MODE_MIXED;
3900	} else if (IEEE80211_IS_CHAN_B(chan))
3901		mode = AP_MODE_B_ONLY;
3902	else if (IEEE80211_IS_CHAN_A(chan))
3903		mode = AP_MODE_A_ONLY;
3904	else
3905		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3906	return mode;
3907}
3908
3909static int
3910mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3911{
3912	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3913	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3914}
3915
3916/*
3917 * Set/change channels.
3918 */
3919static int
3920mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3921{
3922	struct mwl_hal *mh = sc->sc_mh;
3923	struct ifnet *ifp = sc->sc_ifp;
3924	struct ieee80211com *ic = ifp->if_l2com;
3925	MWL_HAL_CHANNEL hchan;
3926	int maxtxpow;
3927
3928	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3929	    __func__, chan->ic_freq, chan->ic_flags);
3930
3931	/*
3932	 * Convert to a HAL channel description with
3933	 * the flags constrained to reflect the current
3934	 * operating mode.
3935	 */
3936	mwl_mapchan(&hchan, chan);
3937	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3938#if 0
3939	mwl_draintxq(sc);		/* clear pending tx frames */
3940#endif
3941	mwl_hal_setchannel(mh, &hchan);
3942	/*
3943	 * Tx power is cap'd by the regulatory setting and
3944	 * possibly a user-set limit.  We pass the min of
3945	 * these to the hal to apply them to the cal data
3946	 * for this channel.
3947	 * XXX min bound?
3948	 */
3949	maxtxpow = 2*chan->ic_maxregpower;
3950	if (maxtxpow > ic->ic_txpowlimit)
3951		maxtxpow = ic->ic_txpowlimit;
3952	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3953	/* NB: potentially change mcast/mgt rates */
3954	mwl_setcurchanrates(sc);
3955
3956	/*
3957	 * Update internal state.
3958	 */
3959	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3960	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3961	if (IEEE80211_IS_CHAN_A(chan)) {
3962		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3963		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3964	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3965		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3966		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3967	} else {
3968		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3969		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3970	}
3971	sc->sc_curchan = hchan;
3972	mwl_hal_intrset(mh, sc->sc_imask);
3973
3974	return 0;
3975}
3976
3977static void
3978mwl_scan_start(struct ieee80211com *ic)
3979{
3980	struct ifnet *ifp = ic->ic_ifp;
3981	struct mwl_softc *sc = ifp->if_softc;
3982
3983	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3984}
3985
3986static void
3987mwl_scan_end(struct ieee80211com *ic)
3988{
3989	struct ifnet *ifp = ic->ic_ifp;
3990	struct mwl_softc *sc = ifp->if_softc;
3991
3992	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3993}
3994
3995static void
3996mwl_set_channel(struct ieee80211com *ic)
3997{
3998	struct ifnet *ifp = ic->ic_ifp;
3999	struct mwl_softc *sc = ifp->if_softc;
4000
4001	(void) mwl_chan_set(sc, ic->ic_curchan);
4002}
4003
4004/*
4005 * Handle a channel switch request.  We inform the firmware
4006 * and mark the global state to suppress various actions.
4007 * NB: we issue only one request to the fw; we may be called
4008 * multiple times if there are multiple vap's.
4009 */
4010static void
4011mwl_startcsa(struct ieee80211vap *vap)
4012{
4013	struct ieee80211com *ic = vap->iv_ic;
4014	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4015	MWL_HAL_CHANNEL hchan;
4016
4017	if (sc->sc_csapending)
4018		return;
4019
4020	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4021	/* 1 =>'s quiet channel */
4022	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4023	sc->sc_csapending = 1;
4024}
4025
4026/*
4027 * Plumb any static WEP key for the station.  This is
4028 * necessary as we must propagate the key from the
4029 * global key table of the vap to each sta db entry.
4030 */
4031static void
4032mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4033{
4034	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4035		IEEE80211_F_PRIVACY &&
4036	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4037	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4038		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4039}
4040
4041static int
4042mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4043{
4044#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4045	struct ieee80211vap *vap = ni->ni_vap;
4046	struct mwl_hal_vap *hvap;
4047	int error;
4048
4049	if (vap->iv_opmode == IEEE80211_M_WDS) {
4050		/*
4051		 * WDS vap's do not have a f/w vap; instead they piggyback
4052		 * on an AP vap and we must install the sta db entry and
4053		 * crypto state using that AP's handle (the WDS vap has none).
4054		 */
4055		hvap = MWL_VAP(vap)->mv_ap_hvap;
4056	} else
4057		hvap = MWL_VAP(vap)->mv_hvap;
4058	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4059	    aid, staid, pi,
4060	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4061	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4062	if (error == 0) {
4063		/*
4064		 * Setup security for this station.  For sta mode this is
4065		 * needed even though do the same thing on transition to
4066		 * AUTH state because the call to mwl_hal_newstation
4067		 * clobbers the crypto state we setup.
4068		 */
4069		mwl_setanywepkey(vap, ni->ni_macaddr);
4070	}
4071	return error;
4072#undef WME
4073}
4074
4075static void
4076mwl_setglobalkeys(struct ieee80211vap *vap)
4077{
4078	struct ieee80211_key *wk;
4079
4080	wk = &vap->iv_nw_keys[0];
4081	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4082		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4083			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4084}
4085
4086/*
4087 * Convert a legacy rate set to a firmware bitmask.
4088 */
4089static uint32_t
4090get_rate_bitmap(const struct ieee80211_rateset *rs)
4091{
4092	uint32_t rates;
4093	int i;
4094
4095	rates = 0;
4096	for (i = 0; i < rs->rs_nrates; i++)
4097		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4098		case 2:	  rates |= 0x001; break;
4099		case 4:	  rates |= 0x002; break;
4100		case 11:  rates |= 0x004; break;
4101		case 22:  rates |= 0x008; break;
4102		case 44:  rates |= 0x010; break;
4103		case 12:  rates |= 0x020; break;
4104		case 18:  rates |= 0x040; break;
4105		case 24:  rates |= 0x080; break;
4106		case 36:  rates |= 0x100; break;
4107		case 48:  rates |= 0x200; break;
4108		case 72:  rates |= 0x400; break;
4109		case 96:  rates |= 0x800; break;
4110		case 108: rates |= 0x1000; break;
4111		}
4112	return rates;
4113}
4114
4115/*
4116 * Construct an HT firmware bitmask from an HT rate set.
4117 */
4118static uint32_t
4119get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4120{
4121	uint32_t rates;
4122	int i;
4123
4124	rates = 0;
4125	for (i = 0; i < rs->rs_nrates; i++) {
4126		if (rs->rs_rates[i] < 16)
4127			rates |= 1<<rs->rs_rates[i];
4128	}
4129	return rates;
4130}
4131
4132/*
4133 * Craft station database entry for station.
4134 * NB: use host byte order here, the hal handles byte swapping.
4135 */
4136static MWL_HAL_PEERINFO *
4137mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4138{
4139	const struct ieee80211vap *vap = ni->ni_vap;
4140
4141	memset(pi, 0, sizeof(*pi));
4142	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4143	pi->CapInfo = ni->ni_capinfo;
4144	if (ni->ni_flags & IEEE80211_NODE_HT) {
4145		/* HT capabilities, etc */
4146		pi->HTCapabilitiesInfo = ni->ni_htcap;
4147		/* XXX pi.HTCapabilitiesInfo */
4148	        pi->MacHTParamInfo = ni->ni_htparam;
4149		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4150		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4151		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4152		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4153		pi->AddHtInfo.stbc = ni->ni_htstbc;
4154
4155		/* constrain according to local configuration */
4156		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4157			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4158		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4159			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4160		if (ni->ni_chw != 40)
4161			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4162	}
4163	return pi;
4164}
4165
4166/*
4167 * Re-create the local sta db entry for a vap to ensure
4168 * up to date WME state is pushed to the firmware.  Because
4169 * this resets crypto state this must be followed by a
4170 * reload of any keys in the global key table.
4171 */
4172static int
4173mwl_localstadb(struct ieee80211vap *vap)
4174{
4175#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4176	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4177	struct ieee80211_node *bss;
4178	MWL_HAL_PEERINFO pi;
4179	int error;
4180
4181	switch (vap->iv_opmode) {
4182	case IEEE80211_M_STA:
4183		bss = vap->iv_bss;
4184		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4185		    vap->iv_state == IEEE80211_S_RUN ?
4186			mkpeerinfo(&pi, bss) : NULL,
4187		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4188		    bss->ni_ies.wme_ie != NULL ?
4189			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4190		if (error == 0)
4191			mwl_setglobalkeys(vap);
4192		break;
4193	case IEEE80211_M_HOSTAP:
4194	case IEEE80211_M_MBSS:
4195		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4196		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4197		if (error == 0)
4198			mwl_setglobalkeys(vap);
4199		break;
4200	default:
4201		error = 0;
4202		break;
4203	}
4204	return error;
4205#undef WME
4206}
4207
4208static int
4209mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4210{
4211	struct mwl_vap *mvp = MWL_VAP(vap);
4212	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4213	struct ieee80211com *ic = vap->iv_ic;
4214	struct ieee80211_node *ni = NULL;
4215	struct ifnet *ifp = ic->ic_ifp;
4216	struct mwl_softc *sc = ifp->if_softc;
4217	struct mwl_hal *mh = sc->sc_mh;
4218	enum ieee80211_state ostate = vap->iv_state;
4219	int error;
4220
4221	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4222	    vap->iv_ifp->if_xname, __func__,
4223	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4224
4225	callout_stop(&sc->sc_timer);
4226	/*
4227	 * Clear current radar detection state.
4228	 */
4229	if (ostate == IEEE80211_S_CAC) {
4230		/* stop quiet mode radar detection */
4231		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4232	} else if (sc->sc_radarena) {
4233		/* stop in-service radar detection */
4234		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4235		sc->sc_radarena = 0;
4236	}
4237	/*
4238	 * Carry out per-state actions before doing net80211 work.
4239	 */
4240	if (nstate == IEEE80211_S_INIT) {
4241		/* NB: only ap+sta vap's have a fw entity */
4242		if (hvap != NULL)
4243			mwl_hal_stop(hvap);
4244	} else if (nstate == IEEE80211_S_SCAN) {
4245		mwl_hal_start(hvap);
4246		/* NB: this disables beacon frames */
4247		mwl_hal_setinframode(hvap);
4248	} else if (nstate == IEEE80211_S_AUTH) {
4249		/*
4250		 * Must create a sta db entry in case a WEP key needs to
4251		 * be plumbed.  This entry will be overwritten if we
4252		 * associate; otherwise it will be reclaimed on node free.
4253		 */
4254		ni = vap->iv_bss;
4255		MWL_NODE(ni)->mn_hvap = hvap;
4256		(void) mwl_peerstadb(ni, 0, 0, NULL);
4257	} else if (nstate == IEEE80211_S_CSA) {
4258		/* XXX move to below? */
4259		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4260		    vap->iv_opmode == IEEE80211_M_MBSS)
4261			mwl_startcsa(vap);
4262	} else if (nstate == IEEE80211_S_CAC) {
4263		/* XXX move to below? */
4264		/* stop ap xmit and enable quiet mode radar detection */
4265		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4266	}
4267
4268	/*
4269	 * Invoke the parent method to do net80211 work.
4270	 */
4271	error = mvp->mv_newstate(vap, nstate, arg);
4272
4273	/*
4274	 * Carry out work that must be done after net80211 runs;
4275	 * this work requires up to date state (e.g. iv_bss).
4276	 */
4277	if (error == 0 && nstate == IEEE80211_S_RUN) {
4278		/* NB: collect bss node again, it may have changed */
4279		ni = vap->iv_bss;
4280
4281		DPRINTF(sc, MWL_DEBUG_STATE,
4282		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4283		    "capinfo 0x%04x chan %d\n",
4284		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4285		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4286		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4287
4288		/*
4289		 * Recreate local sta db entry to update WME/HT state.
4290		 */
4291		mwl_localstadb(vap);
4292		switch (vap->iv_opmode) {
4293		case IEEE80211_M_HOSTAP:
4294		case IEEE80211_M_MBSS:
4295			if (ostate == IEEE80211_S_CAC) {
4296				/* enable in-service radar detection */
4297				mwl_hal_setradardetection(mh,
4298				    DR_IN_SERVICE_MONITOR_START);
4299				sc->sc_radarena = 1;
4300			}
4301			/*
4302			 * Allocate and setup the beacon frame
4303			 * (and related state).
4304			 */
4305			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4306			if (error != 0) {
4307				DPRINTF(sc, MWL_DEBUG_STATE,
4308				    "%s: beacon setup failed, error %d\n",
4309				    __func__, error);
4310				goto bad;
4311			}
4312			/* NB: must be after setting up beacon */
4313			mwl_hal_start(hvap);
4314			break;
4315		case IEEE80211_M_STA:
4316			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4317			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4318			/*
4319			 * Set state now that we're associated.
4320			 */
4321			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4322			mwl_setrates(vap);
4323			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4324			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4325			    sc->sc_ndwdsvaps++ == 0)
4326				mwl_hal_setdwds(mh, 1);
4327			break;
4328		case IEEE80211_M_WDS:
4329			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4330			    vap->iv_ifp->if_xname, __func__,
4331			    ether_sprintf(ni->ni_bssid));
4332			mwl_seteapolformat(vap);
4333			break;
4334		default:
4335			break;
4336		}
4337		/*
4338		 * Set CS mode according to operating channel;
4339		 * this mostly an optimization for 5GHz.
4340		 *
4341		 * NB: must follow mwl_hal_start which resets csmode
4342		 */
4343		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4344			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4345		else
4346			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4347		/*
4348		 * Start timer to prod firmware.
4349		 */
4350		if (sc->sc_ageinterval != 0)
4351			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4352			    mwl_agestations, sc);
4353	} else if (nstate == IEEE80211_S_SLEEP) {
4354		/* XXX set chip in power save */
4355	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4356	    --sc->sc_ndwdsvaps == 0)
4357		mwl_hal_setdwds(mh, 0);
4358bad:
4359	return error;
4360}
4361
4362/*
4363 * Manage station id's; these are separate from AID's
4364 * as AID's may have values out of the range of possible
4365 * station id's acceptable to the firmware.
4366 */
4367static int
4368allocstaid(struct mwl_softc *sc, int aid)
4369{
4370	int staid;
4371
4372	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4373		/* NB: don't use 0 */
4374		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4375			if (isclr(sc->sc_staid, staid))
4376				break;
4377	} else
4378		staid = aid;
4379	setbit(sc->sc_staid, staid);
4380	return staid;
4381}
4382
4383static void
4384delstaid(struct mwl_softc *sc, int staid)
4385{
4386	clrbit(sc->sc_staid, staid);
4387}
4388
4389/*
4390 * Setup driver-specific state for a newly associated node.
4391 * Note that we're called also on a re-associate, the isnew
4392 * param tells us if this is the first time or not.
4393 */
4394static void
4395mwl_newassoc(struct ieee80211_node *ni, int isnew)
4396{
4397	struct ieee80211vap *vap = ni->ni_vap;
4398        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4399	struct mwl_node *mn = MWL_NODE(ni);
4400	MWL_HAL_PEERINFO pi;
4401	uint16_t aid;
4402	int error;
4403
4404	aid = IEEE80211_AID(ni->ni_associd);
4405	if (isnew) {
4406		mn->mn_staid = allocstaid(sc, aid);
4407		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4408	} else {
4409		mn = MWL_NODE(ni);
4410		/* XXX reset BA stream? */
4411	}
4412	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4413	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4414	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4415	if (error != 0) {
4416		DPRINTF(sc, MWL_DEBUG_NODE,
4417		    "%s: error %d creating sta db entry\n",
4418		    __func__, error);
4419		/* XXX how to deal with error? */
4420	}
4421}
4422
4423/*
4424 * Periodically poke the firmware to age out station state
4425 * (power save queues, pending tx aggregates).
4426 */
4427static void
4428mwl_agestations(void *arg)
4429{
4430	struct mwl_softc *sc = arg;
4431
4432	mwl_hal_setkeepalive(sc->sc_mh);
4433	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4434		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4435}
4436
4437static const struct mwl_hal_channel *
4438findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4439{
4440	int i;
4441
4442	for (i = 0; i < ci->nchannels; i++) {
4443		const struct mwl_hal_channel *hc = &ci->channels[i];
4444		if (hc->ieee == ieee)
4445			return hc;
4446	}
4447	return NULL;
4448}
4449
4450static int
4451mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4452	int nchan, struct ieee80211_channel chans[])
4453{
4454	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4455	struct mwl_hal *mh = sc->sc_mh;
4456	const MWL_HAL_CHANNELINFO *ci;
4457	int i;
4458
4459	for (i = 0; i < nchan; i++) {
4460		struct ieee80211_channel *c = &chans[i];
4461		const struct mwl_hal_channel *hc;
4462
4463		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4464			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4465			    IEEE80211_IS_CHAN_HT40(c) ?
4466				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4467		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4468			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4469			    IEEE80211_IS_CHAN_HT40(c) ?
4470				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4471		} else {
4472			if_printf(ic->ic_ifp,
4473			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4474			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4475			return EINVAL;
4476		}
4477		/*
4478		 * Verify channel has cal data and cap tx power.
4479		 */
4480		hc = findhalchannel(ci, c->ic_ieee);
4481		if (hc != NULL) {
4482			if (c->ic_maxpower > 2*hc->maxTxPow)
4483				c->ic_maxpower = 2*hc->maxTxPow;
4484			goto next;
4485		}
4486		if (IEEE80211_IS_CHAN_HT40(c)) {
4487			/*
4488			 * Look for the extension channel since the
4489			 * hal table only has the primary channel.
4490			 */
4491			hc = findhalchannel(ci, c->ic_extieee);
4492			if (hc != NULL) {
4493				if (c->ic_maxpower > 2*hc->maxTxPow)
4494					c->ic_maxpower = 2*hc->maxTxPow;
4495				goto next;
4496			}
4497		}
4498		if_printf(ic->ic_ifp,
4499		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4500		    __func__, c->ic_ieee, c->ic_extieee,
4501		    c->ic_freq, c->ic_flags);
4502		return EINVAL;
4503	next:
4504		;
4505	}
4506	return 0;
4507}
4508
4509#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4510#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4511
4512static void
4513addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4514{
4515	c->ic_freq = freq;
4516	c->ic_flags = flags;
4517	c->ic_ieee = ieee;
4518	c->ic_minpower = 0;
4519	c->ic_maxpower = 2*txpow;
4520	c->ic_maxregpower = txpow;
4521}
4522
4523static const struct ieee80211_channel *
4524findchannel(const struct ieee80211_channel chans[], int nchans,
4525	int freq, int flags)
4526{
4527	const struct ieee80211_channel *c;
4528	int i;
4529
4530	for (i = 0; i < nchans; i++) {
4531		c = &chans[i];
4532		if (c->ic_freq == freq && c->ic_flags == flags)
4533			return c;
4534	}
4535	return NULL;
4536}
4537
4538static void
4539addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4540	const MWL_HAL_CHANNELINFO *ci, int flags)
4541{
4542	struct ieee80211_channel *c;
4543	const struct ieee80211_channel *extc;
4544	const struct mwl_hal_channel *hc;
4545	int i;
4546
4547	c = &chans[*nchans];
4548
4549	flags &= ~IEEE80211_CHAN_HT;
4550	for (i = 0; i < ci->nchannels; i++) {
4551		/*
4552		 * Each entry defines an HT40 channel pair; find the
4553		 * extension channel above and the insert the pair.
4554		 */
4555		hc = &ci->channels[i];
4556		extc = findchannel(chans, *nchans, hc->freq+20,
4557		    flags | IEEE80211_CHAN_HT20);
4558		if (extc != NULL) {
4559			if (*nchans >= maxchans)
4560				break;
4561			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4562			    hc->ieee, hc->maxTxPow);
4563			c->ic_extieee = extc->ic_ieee;
4564			c++, (*nchans)++;
4565			if (*nchans >= maxchans)
4566				break;
4567			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4568			    extc->ic_ieee, hc->maxTxPow);
4569			c->ic_extieee = hc->ieee;
4570			c++, (*nchans)++;
4571		}
4572	}
4573}
4574
4575static void
4576addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4577	const MWL_HAL_CHANNELINFO *ci, int flags)
4578{
4579	struct ieee80211_channel *c;
4580	int i;
4581
4582	c = &chans[*nchans];
4583
4584	for (i = 0; i < ci->nchannels; i++) {
4585		const struct mwl_hal_channel *hc;
4586
4587		hc = &ci->channels[i];
4588		if (*nchans >= maxchans)
4589			break;
4590		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4591		c++, (*nchans)++;
4592		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4593			/* g channel have a separate b-only entry */
4594			if (*nchans >= maxchans)
4595				break;
4596			c[0] = c[-1];
4597			c[-1].ic_flags = IEEE80211_CHAN_B;
4598			c++, (*nchans)++;
4599		}
4600		if (flags == IEEE80211_CHAN_HTG) {
4601			/* HT g channel have a separate g-only entry */
4602			if (*nchans >= maxchans)
4603				break;
4604			c[-1].ic_flags = IEEE80211_CHAN_G;
4605			c[0] = c[-1];
4606			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4607			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4608			c++, (*nchans)++;
4609		}
4610		if (flags == IEEE80211_CHAN_HTA) {
4611			/* HT a channel have a separate a-only entry */
4612			if (*nchans >= maxchans)
4613				break;
4614			c[-1].ic_flags = IEEE80211_CHAN_A;
4615			c[0] = c[-1];
4616			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4617			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4618			c++, (*nchans)++;
4619		}
4620	}
4621}
4622
4623static void
4624getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4625	struct ieee80211_channel chans[])
4626{
4627	const MWL_HAL_CHANNELINFO *ci;
4628
4629	/*
4630	 * Use the channel info from the hal to craft the
4631	 * channel list.  Note that we pass back an unsorted
4632	 * list; the caller is required to sort it for us
4633	 * (if desired).
4634	 */
4635	*nchans = 0;
4636	if (mwl_hal_getchannelinfo(sc->sc_mh,
4637	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4638		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4639	if (mwl_hal_getchannelinfo(sc->sc_mh,
4640	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4641		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4642	if (mwl_hal_getchannelinfo(sc->sc_mh,
4643	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4644		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4645	if (mwl_hal_getchannelinfo(sc->sc_mh,
4646	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4647		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4648}
4649
4650static void
4651mwl_getradiocaps(struct ieee80211com *ic,
4652	int maxchans, int *nchans, struct ieee80211_channel chans[])
4653{
4654	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4655
4656	getchannels(sc, maxchans, nchans, chans);
4657}
4658
4659static int
4660mwl_getchannels(struct mwl_softc *sc)
4661{
4662	struct ifnet *ifp = sc->sc_ifp;
4663	struct ieee80211com *ic = ifp->if_l2com;
4664
4665	/*
4666	 * Use the channel info from the hal to craft the
4667	 * channel list for net80211.  Note that we pass up
4668	 * an unsorted list; net80211 will sort it for us.
4669	 */
4670	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4671	ic->ic_nchans = 0;
4672	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4673
4674	ic->ic_regdomain.regdomain = SKU_DEBUG;
4675	ic->ic_regdomain.country = CTRY_DEFAULT;
4676	ic->ic_regdomain.location = 'I';
4677	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4678	ic->ic_regdomain.isocc[1] = ' ';
4679	return (ic->ic_nchans == 0 ? EIO : 0);
4680}
4681#undef IEEE80211_CHAN_HTA
4682#undef IEEE80211_CHAN_HTG
4683
4684#ifdef MWL_DEBUG
4685static void
4686mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4687{
4688	const struct mwl_rxdesc *ds = bf->bf_desc;
4689	uint32_t status = le32toh(ds->Status);
4690
4691	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4692	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4693	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4694	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4695	    ds->RxControl,
4696	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4697	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4698	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4699	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4700}
4701
4702static void
4703mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4704{
4705	const struct mwl_txdesc *ds = bf->bf_desc;
4706	uint32_t status = le32toh(ds->Status);
4707
4708	printf("Q%u[%3u]", qnum, ix);
4709	printf(" (DS.V:%p DS.P:%p)\n",
4710	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4711	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4712	    le32toh(ds->pPhysNext),
4713	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4714	    status & EAGLE_TXD_STATUS_USED ?
4715		"" : (status & 3) != 0 ? " *" : " !");
4716	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4717	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4718	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4719#if MWL_TXDESC > 1
4720	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4721	    , le32toh(ds->multiframes)
4722	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4723	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4724	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4725	);
4726	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4727	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4728	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4729	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4730	);
4731#endif
4732#if 0
4733{ const uint8_t *cp = (const uint8_t *) ds;
4734  int i;
4735  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4736	printf("%02x ", cp[i]);
4737	if (((i+1) % 16) == 0)
4738		printf("\n");
4739  }
4740  printf("\n");
4741}
4742#endif
4743}
4744#endif /* MWL_DEBUG */
4745
4746#if 0
4747static void
4748mwl_txq_dump(struct mwl_txq *txq)
4749{
4750	struct mwl_txbuf *bf;
4751	int i = 0;
4752
4753	MWL_TXQ_LOCK(txq);
4754	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4755		struct mwl_txdesc *ds = bf->bf_desc;
4756		MWL_TXDESC_SYNC(txq, ds,
4757		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4758#ifdef MWL_DEBUG
4759		mwl_printtxbuf(bf, txq->qnum, i);
4760#endif
4761		i++;
4762	}
4763	MWL_TXQ_UNLOCK(txq);
4764}
4765#endif
4766
4767static void
4768mwl_watchdog(void *arg)
4769{
4770	struct mwl_softc *sc;
4771	struct ifnet *ifp;
4772
4773	sc = arg;
4774	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4775	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4776		return;
4777
4778	ifp = sc->sc_ifp;
4779	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4780		if (mwl_hal_setkeepalive(sc->sc_mh))
4781			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4782		else
4783			if_printf(ifp, "transmit timeout\n");
4784#if 0
4785		mwl_reset(ifp);
4786mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4787#endif
4788		ifp->if_oerrors++;
4789		sc->sc_stats.mst_watchdog++;
4790	}
4791}
4792
4793#ifdef MWL_DIAGAPI
4794/*
4795 * Diagnostic interface to the HAL.  This is used by various
4796 * tools to do things like retrieve register contents for
4797 * debugging.  The mechanism is intentionally opaque so that
4798 * it can change frequently w/o concern for compatiblity.
4799 */
4800static int
4801mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4802{
4803	struct mwl_hal *mh = sc->sc_mh;
4804	u_int id = md->md_id & MWL_DIAG_ID;
4805	void *indata = NULL;
4806	void *outdata = NULL;
4807	u_int32_t insize = md->md_in_size;
4808	u_int32_t outsize = md->md_out_size;
4809	int error = 0;
4810
4811	if (md->md_id & MWL_DIAG_IN) {
4812		/*
4813		 * Copy in data.
4814		 */
4815		indata = malloc(insize, M_TEMP, M_NOWAIT);
4816		if (indata == NULL) {
4817			error = ENOMEM;
4818			goto bad;
4819		}
4820		error = copyin(md->md_in_data, indata, insize);
4821		if (error)
4822			goto bad;
4823	}
4824	if (md->md_id & MWL_DIAG_DYN) {
4825		/*
4826		 * Allocate a buffer for the results (otherwise the HAL
4827		 * returns a pointer to a buffer where we can read the
4828		 * results).  Note that we depend on the HAL leaving this
4829		 * pointer for us to use below in reclaiming the buffer;
4830		 * may want to be more defensive.
4831		 */
4832		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4833		if (outdata == NULL) {
4834			error = ENOMEM;
4835			goto bad;
4836		}
4837	}
4838	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4839		if (outsize < md->md_out_size)
4840			md->md_out_size = outsize;
4841		if (outdata != NULL)
4842			error = copyout(outdata, md->md_out_data,
4843					md->md_out_size);
4844	} else {
4845		error = EINVAL;
4846	}
4847bad:
4848	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4849		free(indata, M_TEMP);
4850	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4851		free(outdata, M_TEMP);
4852	return error;
4853}
4854
4855static int
4856mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4857{
4858	struct mwl_hal *mh = sc->sc_mh;
4859	int error;
4860
4861	MWL_LOCK_ASSERT(sc);
4862
4863	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4864		device_printf(sc->sc_dev, "unable to load firmware\n");
4865		return EIO;
4866	}
4867	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4868		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4869		return EIO;
4870	}
4871	error = mwl_setupdma(sc);
4872	if (error != 0) {
4873		/* NB: mwl_setupdma prints a msg */
4874		return error;
4875	}
4876	/*
4877	 * Reset tx/rx data structures; after reload we must
4878	 * re-start the driver's notion of the next xmit/recv.
4879	 */
4880	mwl_draintxq(sc);		/* clear pending frames */
4881	mwl_resettxq(sc);		/* rebuild tx q lists */
4882	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4883	return 0;
4884}
4885#endif /* MWL_DIAGAPI */
4886
4887static int
4888mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4889{
4890#define	IS_RUNNING(ifp) \
4891	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4892	struct mwl_softc *sc = ifp->if_softc;
4893	struct ieee80211com *ic = ifp->if_l2com;
4894	struct ifreq *ifr = (struct ifreq *)data;
4895	int error = 0, startall;
4896
4897	switch (cmd) {
4898	case SIOCSIFFLAGS:
4899		MWL_LOCK(sc);
4900		startall = 0;
4901		if (IS_RUNNING(ifp)) {
4902			/*
4903			 * To avoid rescanning another access point,
4904			 * do not call mwl_init() here.  Instead,
4905			 * only reflect promisc mode settings.
4906			 */
4907			mwl_mode_init(sc);
4908		} else if (ifp->if_flags & IFF_UP) {
4909			/*
4910			 * Beware of being called during attach/detach
4911			 * to reset promiscuous mode.  In that case we
4912			 * will still be marked UP but not RUNNING.
4913			 * However trying to re-init the interface
4914			 * is the wrong thing to do as we've already
4915			 * torn down much of our state.  There's
4916			 * probably a better way to deal with this.
4917			 */
4918			if (!sc->sc_invalid) {
4919				mwl_init_locked(sc);	/* XXX lose error */
4920				startall = 1;
4921			}
4922		} else
4923			mwl_stop_locked(ifp, 1);
4924		MWL_UNLOCK(sc);
4925		if (startall)
4926			ieee80211_start_all(ic);
4927		break;
4928	case SIOCGMVSTATS:
4929		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4930		/* NB: embed these numbers to get a consistent view */
4931		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4932		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4933		/*
4934		 * NB: Drop the softc lock in case of a page fault;
4935		 * we'll accept any potential inconsisentcy in the
4936		 * statistics.  The alternative is to copy the data
4937		 * to a local structure.
4938		 */
4939		return copyout(&sc->sc_stats,
4940				ifr->ifr_data, sizeof (sc->sc_stats));
4941#ifdef MWL_DIAGAPI
4942	case SIOCGMVDIAG:
4943		/* XXX check privs */
4944		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4945	case SIOCGMVRESET:
4946		/* XXX check privs */
4947		MWL_LOCK(sc);
4948		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4949		MWL_UNLOCK(sc);
4950		break;
4951#endif /* MWL_DIAGAPI */
4952	case SIOCGIFMEDIA:
4953		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4954		break;
4955	case SIOCGIFADDR:
4956		error = ether_ioctl(ifp, cmd, data);
4957		break;
4958	default:
4959		error = EINVAL;
4960		break;
4961	}
4962	return error;
4963#undef IS_RUNNING
4964}
4965
4966#ifdef	MWL_DEBUG
4967static int
4968mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4969{
4970	struct mwl_softc *sc = arg1;
4971	int debug, error;
4972
4973	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4974	error = sysctl_handle_int(oidp, &debug, 0, req);
4975	if (error || !req->newptr)
4976		return error;
4977	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4978	sc->sc_debug = debug & 0x00ffffff;
4979	return 0;
4980}
4981#endif /* MWL_DEBUG */
4982
4983static void
4984mwl_sysctlattach(struct mwl_softc *sc)
4985{
4986#ifdef	MWL_DEBUG
4987	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4988	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4989
4990	sc->sc_debug = mwl_debug;
4991	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4992		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4993		mwl_sysctl_debug, "I", "control debugging printfs");
4994#endif
4995}
4996
4997/*
4998 * Announce various information on device/driver attach.
4999 */
5000static void
5001mwl_announce(struct mwl_softc *sc)
5002{
5003	struct ifnet *ifp = sc->sc_ifp;
5004
5005	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5006		sc->sc_hwspecs.hwVersion,
5007		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5008		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5009		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5010		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5011		sc->sc_hwspecs.regionCode);
5012	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5013
5014	if (bootverbose) {
5015		int i;
5016		for (i = 0; i <= WME_AC_VO; i++) {
5017			struct mwl_txq *txq = sc->sc_ac2q[i];
5018			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5019				txq->qnum, ieee80211_wme_acnames[i]);
5020		}
5021	}
5022	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5023		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5024	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5025		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5026	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5027		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5028	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5029		if_printf(ifp, "multi-bss support\n");
5030#ifdef MWL_TX_NODROP
5031	if (bootverbose)
5032		if_printf(ifp, "no tx drop\n");
5033#endif
5034}
5035