if_mwl.c revision 283291
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 283291 2015-05-22 17:05:21Z jkim $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40#include "opt_wlan.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysctl.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/kernel.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/errno.h>
53#include <sys/callout.h>
54#include <sys/bus.h>
55#include <sys/endian.h>
56#include <sys/kthread.h>
57#include <sys/taskqueue.h>
58
59#include <machine/bus.h>
60
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#include <net/if_types.h>
66#include <net/if_arp.h>
67#include <net/ethernet.h>
68#include <net/if_llc.h>
69
70#include <net/bpf.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
74
75#ifdef INET
76#include <netinet/in.h>
77#include <netinet/if_ether.h>
78#endif /* INET */
79
80#include <dev/mwl/if_mwlvar.h>
81#include <dev/mwl/mwldiag.h>
82
83/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
84#define	MS(v,x)	(((v) & x) >> x##_S)
85#define	SM(v,x)	(((v) << x##_S) & x)
86
87static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
88		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
89		    const uint8_t [IEEE80211_ADDR_LEN],
90		    const uint8_t [IEEE80211_ADDR_LEN]);
91static void	mwl_vap_delete(struct ieee80211vap *);
92static int	mwl_setupdma(struct mwl_softc *);
93static int	mwl_hal_reset(struct mwl_softc *sc);
94static int	mwl_init_locked(struct mwl_softc *);
95static void	mwl_init(void *);
96static void	mwl_stop_locked(struct ifnet *, int);
97static int	mwl_reset(struct ieee80211vap *, u_long);
98static void	mwl_stop(struct ifnet *, int);
99static void	mwl_start(struct ifnet *);
100static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
101			const struct ieee80211_bpf_params *);
102static int	mwl_media_change(struct ifnet *);
103static void	mwl_watchdog(void *);
104static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
105static void	mwl_radar_proc(void *, int);
106static void	mwl_chanswitch_proc(void *, int);
107static void	mwl_bawatchdog_proc(void *, int);
108static int	mwl_key_alloc(struct ieee80211vap *,
109			struct ieee80211_key *,
110			ieee80211_keyix *, ieee80211_keyix *);
111static int	mwl_key_delete(struct ieee80211vap *,
112			const struct ieee80211_key *);
113static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
114			const uint8_t mac[IEEE80211_ADDR_LEN]);
115static int	mwl_mode_init(struct mwl_softc *);
116static void	mwl_update_mcast(struct ifnet *);
117static void	mwl_update_promisc(struct ifnet *);
118static void	mwl_updateslot(struct ifnet *);
119static int	mwl_beacon_setup(struct ieee80211vap *);
120static void	mwl_beacon_update(struct ieee80211vap *, int);
121#ifdef MWL_HOST_PS_SUPPORT
122static void	mwl_update_ps(struct ieee80211vap *, int);
123static int	mwl_set_tim(struct ieee80211_node *, int);
124#endif
125static int	mwl_dma_setup(struct mwl_softc *);
126static void	mwl_dma_cleanup(struct mwl_softc *);
127static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128		    const uint8_t [IEEE80211_ADDR_LEN]);
129static void	mwl_node_cleanup(struct ieee80211_node *);
130static void	mwl_node_drain(struct ieee80211_node *);
131static void	mwl_node_getsignal(const struct ieee80211_node *,
132			int8_t *, int8_t *);
133static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
134			struct ieee80211_mimo_info *);
135static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136static void	mwl_rx_proc(void *, int);
137static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138static int	mwl_tx_setup(struct mwl_softc *, int, int);
139static int	mwl_wme_update(struct ieee80211com *);
140static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141static void	mwl_tx_cleanup(struct mwl_softc *);
142static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144			     struct mwl_txbuf *, struct mbuf *);
145static void	mwl_tx_proc(void *, int);
146static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147static void	mwl_draintxq(struct mwl_softc *);
148static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149static int	mwl_recv_action(struct ieee80211_node *,
150			const struct ieee80211_frame *,
151			const uint8_t *, const uint8_t *);
152static int	mwl_addba_request(struct ieee80211_node *,
153			struct ieee80211_tx_ampdu *, int dialogtoken,
154			int baparamset, int batimeout);
155static int	mwl_addba_response(struct ieee80211_node *,
156			struct ieee80211_tx_ampdu *, int status,
157			int baparamset, int batimeout);
158static void	mwl_addba_stop(struct ieee80211_node *,
159			struct ieee80211_tx_ampdu *);
160static int	mwl_startrecv(struct mwl_softc *);
161static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162			struct ieee80211_channel *);
163static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164static void	mwl_scan_start(struct ieee80211com *);
165static void	mwl_scan_end(struct ieee80211com *);
166static void	mwl_set_channel(struct ieee80211com *);
167static int	mwl_peerstadb(struct ieee80211_node *,
168			int aid, int staid, MWL_HAL_PEERINFO *pi);
169static int	mwl_localstadb(struct ieee80211vap *);
170static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171static int	allocstaid(struct mwl_softc *sc, int aid);
172static void	delstaid(struct mwl_softc *sc, int staid);
173static void	mwl_newassoc(struct ieee80211_node *, int);
174static void	mwl_agestations(void *);
175static int	mwl_setregdomain(struct ieee80211com *,
176			struct ieee80211_regdomain *, int,
177			struct ieee80211_channel []);
178static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
179			struct ieee80211_channel []);
180static int	mwl_getchannels(struct mwl_softc *);
181
182static void	mwl_sysctlattach(struct mwl_softc *);
183static void	mwl_announce(struct mwl_softc *);
184
185SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
186
187static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
188SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
189	    0, "rx descriptors allocated");
190static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
191SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
192	    0, "rx buffers allocated");
193static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
194SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
195	    0, "tx buffers allocated");
196static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
197SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
198	    0, "tx buffers to send at once");
199static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
200SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
201	    0, "max rx buffers to process per interrupt");
202static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
203SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
204	    0, "min free rx buffers before restarting traffic");
205
206#ifdef MWL_DEBUG
207static	int mwl_debug = 0;
208SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
209	    0, "control debugging printfs");
210enum {
211	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
212	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
213	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
214	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
215	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
216	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
217	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
218	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
219	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
220	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
221	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
222	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
223	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
224	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
225	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
226	MWL_DEBUG_ANY		= 0xffffffff
227};
228#define	IS_BEACON(wh) \
229    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
230	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
231#define	IFF_DUMPPKTS_RECV(sc, wh) \
232    (((sc->sc_debug & MWL_DEBUG_RECV) && \
233      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
234     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
235#define	IFF_DUMPPKTS_XMIT(sc) \
236	((sc->sc_debug & MWL_DEBUG_XMIT) || \
237	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
238#define	DPRINTF(sc, m, fmt, ...) do {				\
239	if (sc->sc_debug & (m))					\
240		printf(fmt, __VA_ARGS__);			\
241} while (0)
242#define	KEYPRINTF(sc, hk, mac) do {				\
243	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
244		mwl_keyprint(sc, __func__, hk, mac);		\
245} while (0)
246static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
247static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
248#else
249#define	IFF_DUMPPKTS_RECV(sc, wh) \
250	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
251#define	IFF_DUMPPKTS_XMIT(sc) \
252	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
253#define	DPRINTF(sc, m, fmt, ...) do {				\
254	(void) sc;						\
255} while (0)
256#define	KEYPRINTF(sc, k, mac) do {				\
257	(void) sc;						\
258} while (0)
259#endif
260
261static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
262
263/*
264 * Each packet has fixed front matter: a 2-byte length
265 * of the payload, followed by a 4-address 802.11 header
266 * (regardless of the actual header and always w/o any
267 * QoS header).  The payload then follows.
268 */
269struct mwltxrec {
270	uint16_t fwlen;
271	struct ieee80211_frame_addr4 wh;
272} __packed;
273
274/*
275 * Read/Write shorthands for accesses to BAR 0.  Note
276 * that all BAR 1 operations are done in the "hal" and
277 * there should be no reference to them here.
278 */
279#ifdef MWL_DEBUG
280static __inline uint32_t
281RD4(struct mwl_softc *sc, bus_size_t off)
282{
283	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
284}
285#endif
286
287static __inline void
288WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
289{
290	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
291}
292
293int
294mwl_attach(uint16_t devid, struct mwl_softc *sc)
295{
296	struct ifnet *ifp;
297	struct ieee80211com *ic;
298	struct mwl_hal *mh;
299	int error = 0;
300
301	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
302
303	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
304	if (ifp == NULL) {
305		device_printf(sc->sc_dev, "cannot if_alloc()\n");
306		return ENOSPC;
307	}
308	ic = ifp->if_l2com;
309
310	/*
311	 * Setup the RX free list lock early, so it can be consistently
312	 * removed.
313	 */
314	MWL_RXFREE_INIT(sc);
315
316	/* set these up early for if_printf use */
317	if_initname(ifp, device_get_name(sc->sc_dev),
318		device_get_unit(sc->sc_dev));
319
320	mh = mwl_hal_attach(sc->sc_dev, devid,
321	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
322	if (mh == NULL) {
323		if_printf(ifp, "unable to attach HAL\n");
324		error = EIO;
325		goto bad;
326	}
327	sc->sc_mh = mh;
328	/*
329	 * Load firmware so we can get setup.  We arbitrarily
330	 * pick station firmware; we'll re-load firmware as
331	 * needed so setting up the wrong mode isn't a big deal.
332	 */
333	if (mwl_hal_fwload(mh, NULL) != 0) {
334		if_printf(ifp, "unable to setup builtin firmware\n");
335		error = EIO;
336		goto bad1;
337	}
338	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
339		if_printf(ifp, "unable to fetch h/w specs\n");
340		error = EIO;
341		goto bad1;
342	}
343	error = mwl_getchannels(sc);
344	if (error != 0)
345		goto bad1;
346
347	sc->sc_txantenna = 0;		/* h/w default */
348	sc->sc_rxantenna = 0;		/* h/w default */
349	sc->sc_invalid = 0;		/* ready to go, enable int handling */
350	sc->sc_ageinterval = MWL_AGEINTERVAL;
351
352	/*
353	 * Allocate tx+rx descriptors and populate the lists.
354	 * We immediately push the information to the firmware
355	 * as otherwise it gets upset.
356	 */
357	error = mwl_dma_setup(sc);
358	if (error != 0) {
359		if_printf(ifp, "failed to setup descriptors: %d\n", error);
360		goto bad1;
361	}
362	error = mwl_setupdma(sc);	/* push to firmware */
363	if (error != 0)			/* NB: mwl_setupdma prints msg */
364		goto bad1;
365
366	callout_init(&sc->sc_timer, 1);
367	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
368
369	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
370		taskqueue_thread_enqueue, &sc->sc_tq);
371	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
372		"%s taskq", ifp->if_xname);
373
374	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
375	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
376	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
377	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
378
379	/* NB: insure BK queue is the lowest priority h/w queue */
380	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
381		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
382			ieee80211_wme_acnames[WME_AC_BK]);
383		error = EIO;
384		goto bad2;
385	}
386	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
387	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
388	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
389		/*
390		 * Not enough hardware tx queues to properly do WME;
391		 * just punt and assign them all to the same h/w queue.
392		 * We could do a better job of this if, for example,
393		 * we allocate queues when we switch from station to
394		 * AP mode.
395		 */
396		if (sc->sc_ac2q[WME_AC_VI] != NULL)
397			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
398		if (sc->sc_ac2q[WME_AC_BE] != NULL)
399			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
400		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
401		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
402		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
403	}
404	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
405
406	ifp->if_softc = sc;
407	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
408	ifp->if_start = mwl_start;
409	ifp->if_ioctl = mwl_ioctl;
410	ifp->if_init = mwl_init;
411	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
412	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
413	IFQ_SET_READY(&ifp->if_snd);
414
415	ic->ic_ifp = ifp;
416	/* XXX not right but it's not used anywhere important */
417	ic->ic_phytype = IEEE80211_T_OFDM;
418	ic->ic_opmode = IEEE80211_M_STA;
419	ic->ic_caps =
420		  IEEE80211_C_STA		/* station mode supported */
421		| IEEE80211_C_HOSTAP		/* hostap mode */
422		| IEEE80211_C_MONITOR		/* monitor mode */
423#if 0
424		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
425		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
426#endif
427		| IEEE80211_C_MBSS		/* mesh point link mode */
428		| IEEE80211_C_WDS		/* WDS supported */
429		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
430		| IEEE80211_C_SHSLOT		/* short slot time supported */
431		| IEEE80211_C_WME		/* WME/WMM supported */
432		| IEEE80211_C_BURST		/* xmit bursting supported */
433		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
434		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
435		| IEEE80211_C_TXFRAG		/* handle tx frags */
436		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
437		| IEEE80211_C_DFS		/* DFS supported */
438		;
439
440	ic->ic_htcaps =
441		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
442		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
443		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
444		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
445		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
446#if MWL_AGGR_SIZE == 7935
447		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
448#else
449		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
450#endif
451#if 0
452		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
453		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
454#endif
455		/* s/w capabilities */
456		| IEEE80211_HTC_HT		/* HT operation */
457		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
458		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
459		| IEEE80211_HTC_SMPS		/* SMPS available */
460		;
461
462	/*
463	 * Mark h/w crypto support.
464	 * XXX no way to query h/w support.
465	 */
466	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
467			  |  IEEE80211_CRYPTO_AES_CCM
468			  |  IEEE80211_CRYPTO_TKIP
469			  |  IEEE80211_CRYPTO_TKIPMIC
470			  ;
471	/*
472	 * Transmit requires space in the packet for a special
473	 * format transmit record and optional padding between
474	 * this record and the payload.  Ask the net80211 layer
475	 * to arrange this when encapsulating packets so we can
476	 * add it efficiently.
477	 */
478	ic->ic_headroom = sizeof(struct mwltxrec) -
479		sizeof(struct ieee80211_frame);
480
481	/* call MI attach routine. */
482	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
483	ic->ic_setregdomain = mwl_setregdomain;
484	ic->ic_getradiocaps = mwl_getradiocaps;
485	/* override default methods */
486	ic->ic_raw_xmit = mwl_raw_xmit;
487	ic->ic_newassoc = mwl_newassoc;
488	ic->ic_updateslot = mwl_updateslot;
489	ic->ic_update_mcast = mwl_update_mcast;
490	ic->ic_update_promisc = mwl_update_promisc;
491	ic->ic_wme.wme_update = mwl_wme_update;
492
493	ic->ic_node_alloc = mwl_node_alloc;
494	sc->sc_node_cleanup = ic->ic_node_cleanup;
495	ic->ic_node_cleanup = mwl_node_cleanup;
496	sc->sc_node_drain = ic->ic_node_drain;
497	ic->ic_node_drain = mwl_node_drain;
498	ic->ic_node_getsignal = mwl_node_getsignal;
499	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
500
501	ic->ic_scan_start = mwl_scan_start;
502	ic->ic_scan_end = mwl_scan_end;
503	ic->ic_set_channel = mwl_set_channel;
504
505	sc->sc_recv_action = ic->ic_recv_action;
506	ic->ic_recv_action = mwl_recv_action;
507	sc->sc_addba_request = ic->ic_addba_request;
508	ic->ic_addba_request = mwl_addba_request;
509	sc->sc_addba_response = ic->ic_addba_response;
510	ic->ic_addba_response = mwl_addba_response;
511	sc->sc_addba_stop = ic->ic_addba_stop;
512	ic->ic_addba_stop = mwl_addba_stop;
513
514	ic->ic_vap_create = mwl_vap_create;
515	ic->ic_vap_delete = mwl_vap_delete;
516
517	ieee80211_radiotap_attach(ic,
518	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
519		MWL_TX_RADIOTAP_PRESENT,
520	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
521		MWL_RX_RADIOTAP_PRESENT);
522	/*
523	 * Setup dynamic sysctl's now that country code and
524	 * regdomain are available from the hal.
525	 */
526	mwl_sysctlattach(sc);
527
528	if (bootverbose)
529		ieee80211_announce(ic);
530	mwl_announce(sc);
531	return 0;
532bad2:
533	mwl_dma_cleanup(sc);
534bad1:
535	mwl_hal_detach(mh);
536bad:
537	MWL_RXFREE_DESTROY(sc);
538	if_free(ifp);
539	sc->sc_invalid = 1;
540	return error;
541}
542
543int
544mwl_detach(struct mwl_softc *sc)
545{
546	struct ifnet *ifp = sc->sc_ifp;
547	struct ieee80211com *ic = ifp->if_l2com;
548
549	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
550		__func__, ifp->if_flags);
551
552	mwl_stop(ifp, 1);
553	/*
554	 * NB: the order of these is important:
555	 * o call the 802.11 layer before detaching the hal to
556	 *   insure callbacks into the driver to delete global
557	 *   key cache entries can be handled
558	 * o reclaim the tx queue data structures after calling
559	 *   the 802.11 layer as we'll get called back to reclaim
560	 *   node state and potentially want to use them
561	 * o to cleanup the tx queues the hal is called, so detach
562	 *   it last
563	 * Other than that, it's straightforward...
564	 */
565	ieee80211_ifdetach(ic);
566	callout_drain(&sc->sc_watchdog);
567	mwl_dma_cleanup(sc);
568	MWL_RXFREE_DESTROY(sc);
569	mwl_tx_cleanup(sc);
570	mwl_hal_detach(sc->sc_mh);
571	if_free(ifp);
572
573	return 0;
574}
575
576/*
577 * MAC address handling for multiple BSS on the same radio.
578 * The first vap uses the MAC address from the EEPROM.  For
579 * subsequent vap's we set the U/L bit (bit 1) in the MAC
580 * address and use the next six bits as an index.
581 */
582static void
583assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
584{
585	int i;
586
587	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
588		/* NB: we only do this if h/w supports multiple bssid */
589		for (i = 0; i < 32; i++)
590			if ((sc->sc_bssidmask & (1<<i)) == 0)
591				break;
592		if (i != 0)
593			mac[0] |= (i << 2)|0x2;
594	} else
595		i = 0;
596	sc->sc_bssidmask |= 1<<i;
597	if (i == 0)
598		sc->sc_nbssid0++;
599}
600
601static void
602reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
603{
604	int i = mac[0] >> 2;
605	if (i != 0 || --sc->sc_nbssid0 == 0)
606		sc->sc_bssidmask &= ~(1<<i);
607}
608
609static struct ieee80211vap *
610mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
611    enum ieee80211_opmode opmode, int flags,
612    const uint8_t bssid[IEEE80211_ADDR_LEN],
613    const uint8_t mac0[IEEE80211_ADDR_LEN])
614{
615	struct ifnet *ifp = ic->ic_ifp;
616	struct mwl_softc *sc = ifp->if_softc;
617	struct mwl_hal *mh = sc->sc_mh;
618	struct ieee80211vap *vap, *apvap;
619	struct mwl_hal_vap *hvap;
620	struct mwl_vap *mvp;
621	uint8_t mac[IEEE80211_ADDR_LEN];
622
623	IEEE80211_ADDR_COPY(mac, mac0);
624	switch (opmode) {
625	case IEEE80211_M_HOSTAP:
626	case IEEE80211_M_MBSS:
627		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
628			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
629		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
630		if (hvap == NULL) {
631			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
632				reclaim_address(sc, mac);
633			return NULL;
634		}
635		break;
636	case IEEE80211_M_STA:
637		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
638			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
639		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
640		if (hvap == NULL) {
641			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
642				reclaim_address(sc, mac);
643			return NULL;
644		}
645		/* no h/w beacon miss support; always use s/w */
646		flags |= IEEE80211_CLONE_NOBEACONS;
647		break;
648	case IEEE80211_M_WDS:
649		hvap = NULL;		/* NB: we use associated AP vap */
650		if (sc->sc_napvaps == 0)
651			return NULL;	/* no existing AP vap */
652		break;
653	case IEEE80211_M_MONITOR:
654		hvap = NULL;
655		break;
656	case IEEE80211_M_IBSS:
657	case IEEE80211_M_AHDEMO:
658	default:
659		return NULL;
660	}
661
662	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
663	    M_80211_VAP, M_NOWAIT | M_ZERO);
664	if (mvp == NULL) {
665		if (hvap != NULL) {
666			mwl_hal_delvap(hvap);
667			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
668				reclaim_address(sc, mac);
669		}
670		/* XXX msg */
671		return NULL;
672	}
673	mvp->mv_hvap = hvap;
674	if (opmode == IEEE80211_M_WDS) {
675		/*
676		 * WDS vaps must have an associated AP vap; find one.
677		 * XXX not right.
678		 */
679		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
680			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
681				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
682				break;
683			}
684		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
685	}
686	vap = &mvp->mv_vap;
687	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
688	if (hvap != NULL)
689		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
690	/* override with driver methods */
691	mvp->mv_newstate = vap->iv_newstate;
692	vap->iv_newstate = mwl_newstate;
693	vap->iv_max_keyix = 0;	/* XXX */
694	vap->iv_key_alloc = mwl_key_alloc;
695	vap->iv_key_delete = mwl_key_delete;
696	vap->iv_key_set = mwl_key_set;
697#ifdef MWL_HOST_PS_SUPPORT
698	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
699		vap->iv_update_ps = mwl_update_ps;
700		mvp->mv_set_tim = vap->iv_set_tim;
701		vap->iv_set_tim = mwl_set_tim;
702	}
703#endif
704	vap->iv_reset = mwl_reset;
705	vap->iv_update_beacon = mwl_beacon_update;
706
707	/* override max aid so sta's cannot assoc when we're out of sta id's */
708	vap->iv_max_aid = MWL_MAXSTAID;
709	/* override default A-MPDU rx parameters */
710	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
711	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
712
713	/* complete setup */
714	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
715
716	switch (vap->iv_opmode) {
717	case IEEE80211_M_HOSTAP:
718	case IEEE80211_M_MBSS:
719	case IEEE80211_M_STA:
720		/*
721		 * Setup sta db entry for local address.
722		 */
723		mwl_localstadb(vap);
724		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
725		    vap->iv_opmode == IEEE80211_M_MBSS)
726			sc->sc_napvaps++;
727		else
728			sc->sc_nstavaps++;
729		break;
730	case IEEE80211_M_WDS:
731		sc->sc_nwdsvaps++;
732		break;
733	default:
734		break;
735	}
736	/*
737	 * Setup overall operating mode.
738	 */
739	if (sc->sc_napvaps)
740		ic->ic_opmode = IEEE80211_M_HOSTAP;
741	else if (sc->sc_nstavaps)
742		ic->ic_opmode = IEEE80211_M_STA;
743	else
744		ic->ic_opmode = opmode;
745
746	return vap;
747}
748
749static void
750mwl_vap_delete(struct ieee80211vap *vap)
751{
752	struct mwl_vap *mvp = MWL_VAP(vap);
753	struct ifnet *parent = vap->iv_ic->ic_ifp;
754	struct mwl_softc *sc = parent->if_softc;
755	struct mwl_hal *mh = sc->sc_mh;
756	struct mwl_hal_vap *hvap = mvp->mv_hvap;
757	enum ieee80211_opmode opmode = vap->iv_opmode;
758
759	/* XXX disallow ap vap delete if WDS still present */
760	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
761		/* quiesce h/w while we remove the vap */
762		mwl_hal_intrset(mh, 0);		/* disable interrupts */
763	}
764	ieee80211_vap_detach(vap);
765	switch (opmode) {
766	case IEEE80211_M_HOSTAP:
767	case IEEE80211_M_MBSS:
768	case IEEE80211_M_STA:
769		KASSERT(hvap != NULL, ("no hal vap handle"));
770		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
771		mwl_hal_delvap(hvap);
772		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
773			sc->sc_napvaps--;
774		else
775			sc->sc_nstavaps--;
776		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
777		reclaim_address(sc, vap->iv_myaddr);
778		break;
779	case IEEE80211_M_WDS:
780		sc->sc_nwdsvaps--;
781		break;
782	default:
783		break;
784	}
785	mwl_cleartxq(sc, vap);
786	free(mvp, M_80211_VAP);
787	if (parent->if_drv_flags & IFF_DRV_RUNNING)
788		mwl_hal_intrset(mh, sc->sc_imask);
789}
790
791void
792mwl_suspend(struct mwl_softc *sc)
793{
794	struct ifnet *ifp = sc->sc_ifp;
795
796	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
797		__func__, ifp->if_flags);
798
799	mwl_stop(ifp, 1);
800}
801
802void
803mwl_resume(struct mwl_softc *sc)
804{
805	struct ifnet *ifp = sc->sc_ifp;
806
807	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
808		__func__, ifp->if_flags);
809
810	if (ifp->if_flags & IFF_UP)
811		mwl_init(sc);
812}
813
814void
815mwl_shutdown(void *arg)
816{
817	struct mwl_softc *sc = arg;
818
819	mwl_stop(sc->sc_ifp, 1);
820}
821
822/*
823 * Interrupt handler.  Most of the actual processing is deferred.
824 */
825void
826mwl_intr(void *arg)
827{
828	struct mwl_softc *sc = arg;
829	struct mwl_hal *mh = sc->sc_mh;
830	uint32_t status;
831
832	if (sc->sc_invalid) {
833		/*
834		 * The hardware is not ready/present, don't touch anything.
835		 * Note this can happen early on if the IRQ is shared.
836		 */
837		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
838		return;
839	}
840	/*
841	 * Figure out the reason(s) for the interrupt.
842	 */
843	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
844	if (status == 0)			/* must be a shared irq */
845		return;
846
847	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
848	    __func__, status, sc->sc_imask);
849	if (status & MACREG_A2HRIC_BIT_RX_RDY)
850		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
851	if (status & MACREG_A2HRIC_BIT_TX_DONE)
852		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
853	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
854		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
855	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
856		mwl_hal_cmddone(mh);
857	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
858		;
859	}
860	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
861		/* TKIP ICV error */
862		sc->sc_stats.mst_rx_badtkipicv++;
863	}
864	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
865		/* 11n aggregation queue is empty, re-fill */
866		;
867	}
868	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
869		;
870	}
871	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
872		/* radar detected, process event */
873		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
874	}
875	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
876		/* DFS channel switch */
877		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
878	}
879}
880
881static void
882mwl_radar_proc(void *arg, int pending)
883{
884	struct mwl_softc *sc = arg;
885	struct ifnet *ifp = sc->sc_ifp;
886	struct ieee80211com *ic = ifp->if_l2com;
887
888	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
889	    __func__, pending);
890
891	sc->sc_stats.mst_radardetect++;
892	/* XXX stop h/w BA streams? */
893
894	IEEE80211_LOCK(ic);
895	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
896	IEEE80211_UNLOCK(ic);
897}
898
899static void
900mwl_chanswitch_proc(void *arg, int pending)
901{
902	struct mwl_softc *sc = arg;
903	struct ifnet *ifp = sc->sc_ifp;
904	struct ieee80211com *ic = ifp->if_l2com;
905
906	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
907	    __func__, pending);
908
909	IEEE80211_LOCK(ic);
910	sc->sc_csapending = 0;
911	ieee80211_csa_completeswitch(ic);
912	IEEE80211_UNLOCK(ic);
913}
914
915static void
916mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
917{
918	struct ieee80211_node *ni = sp->data[0];
919
920	/* send DELBA and drop the stream */
921	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
922}
923
924static void
925mwl_bawatchdog_proc(void *arg, int pending)
926{
927	struct mwl_softc *sc = arg;
928	struct mwl_hal *mh = sc->sc_mh;
929	const MWL_HAL_BASTREAM *sp;
930	uint8_t bitmap, n;
931
932	sc->sc_stats.mst_bawatchdog++;
933
934	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
935		DPRINTF(sc, MWL_DEBUG_AMPDU,
936		    "%s: could not get bitmap\n", __func__);
937		sc->sc_stats.mst_bawatchdog_failed++;
938		return;
939	}
940	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
941	if (bitmap == 0xff) {
942		n = 0;
943		/* disable all ba streams */
944		for (bitmap = 0; bitmap < 8; bitmap++) {
945			sp = mwl_hal_bastream_lookup(mh, bitmap);
946			if (sp != NULL) {
947				mwl_bawatchdog(sp);
948				n++;
949			}
950		}
951		if (n == 0) {
952			DPRINTF(sc, MWL_DEBUG_AMPDU,
953			    "%s: no BA streams found\n", __func__);
954			sc->sc_stats.mst_bawatchdog_empty++;
955		}
956	} else if (bitmap != 0xaa) {
957		/* disable a single ba stream */
958		sp = mwl_hal_bastream_lookup(mh, bitmap);
959		if (sp != NULL) {
960			mwl_bawatchdog(sp);
961		} else {
962			DPRINTF(sc, MWL_DEBUG_AMPDU,
963			    "%s: no BA stream %d\n", __func__, bitmap);
964			sc->sc_stats.mst_bawatchdog_notfound++;
965		}
966	}
967}
968
969/*
970 * Convert net80211 channel to a HAL channel.
971 */
972static void
973mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
974{
975	hc->channel = chan->ic_ieee;
976
977	*(uint32_t *)&hc->channelFlags = 0;
978	if (IEEE80211_IS_CHAN_2GHZ(chan))
979		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
980	else if (IEEE80211_IS_CHAN_5GHZ(chan))
981		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
982	if (IEEE80211_IS_CHAN_HT40(chan)) {
983		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
984		if (IEEE80211_IS_CHAN_HT40U(chan))
985			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
986		else
987			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
988	} else
989		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
990	/* XXX 10MHz channels */
991}
992
993/*
994 * Inform firmware of our tx/rx dma setup.  The BAR 0
995 * writes below are for compatibility with older firmware.
996 * For current firmware we send this information with a
997 * cmd block via mwl_hal_sethwdma.
998 */
999static int
1000mwl_setupdma(struct mwl_softc *sc)
1001{
1002	int error, i;
1003
1004	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1005	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1006	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1007
1008	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1009		struct mwl_txq *txq = &sc->sc_txq[i];
1010		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1011		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1012	}
1013	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1014	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1015
1016	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1017	if (error != 0) {
1018		device_printf(sc->sc_dev,
1019		    "unable to setup tx/rx dma; hal status %u\n", error);
1020		/* XXX */
1021	}
1022	return error;
1023}
1024
1025/*
1026 * Inform firmware of tx rate parameters.
1027 * Called after a channel change.
1028 */
1029static int
1030mwl_setcurchanrates(struct mwl_softc *sc)
1031{
1032	struct ifnet *ifp = sc->sc_ifp;
1033	struct ieee80211com *ic = ifp->if_l2com;
1034	const struct ieee80211_rateset *rs;
1035	MWL_HAL_TXRATE rates;
1036
1037	memset(&rates, 0, sizeof(rates));
1038	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1039	/* rate used to send management frames */
1040	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1041	/* rate used to send multicast frames */
1042	rates.McastRate = rates.MgtRate;
1043
1044	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1045}
1046
1047/*
1048 * Inform firmware of tx rate parameters.  Called whenever
1049 * user-settable params change and after a channel change.
1050 */
1051static int
1052mwl_setrates(struct ieee80211vap *vap)
1053{
1054	struct mwl_vap *mvp = MWL_VAP(vap);
1055	struct ieee80211_node *ni = vap->iv_bss;
1056	const struct ieee80211_txparam *tp = ni->ni_txparms;
1057	MWL_HAL_TXRATE rates;
1058
1059	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1060
1061	/*
1062	 * Update the h/w rate map.
1063	 * NB: 0x80 for MCS is passed through unchanged
1064	 */
1065	memset(&rates, 0, sizeof(rates));
1066	/* rate used to send management frames */
1067	rates.MgtRate = tp->mgmtrate;
1068	/* rate used to send multicast frames */
1069	rates.McastRate = tp->mcastrate;
1070
1071	/* while here calculate EAPOL fixed rate cookie */
1072	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1073
1074	return mwl_hal_settxrate(mvp->mv_hvap,
1075	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1076		RATE_FIXED : RATE_AUTO, &rates);
1077}
1078
1079/*
1080 * Setup a fixed xmit rate cookie for EAPOL frames.
1081 */
1082static void
1083mwl_seteapolformat(struct ieee80211vap *vap)
1084{
1085	struct mwl_vap *mvp = MWL_VAP(vap);
1086	struct ieee80211_node *ni = vap->iv_bss;
1087	enum ieee80211_phymode mode;
1088	uint8_t rate;
1089
1090	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1091
1092	mode = ieee80211_chan2mode(ni->ni_chan);
1093	/*
1094	 * Use legacy rates when operating a mixed HT+non-HT bss.
1095	 * NB: this may violate POLA for sta and wds vap's.
1096	 */
1097	if (mode == IEEE80211_MODE_11NA &&
1098	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1099		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1100	else if (mode == IEEE80211_MODE_11NG &&
1101	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1102		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1103	else
1104		rate = vap->iv_txparms[mode].mgmtrate;
1105
1106	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1107}
1108
1109/*
1110 * Map SKU+country code to region code for radar bin'ing.
1111 */
1112static int
1113mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1114{
1115	switch (rd->regdomain) {
1116	case SKU_FCC:
1117	case SKU_FCC3:
1118		return DOMAIN_CODE_FCC;
1119	case SKU_CA:
1120		return DOMAIN_CODE_IC;
1121	case SKU_ETSI:
1122	case SKU_ETSI2:
1123	case SKU_ETSI3:
1124		if (rd->country == CTRY_SPAIN)
1125			return DOMAIN_CODE_SPAIN;
1126		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1127			return DOMAIN_CODE_FRANCE;
1128		/* XXX force 1.3.1 radar type */
1129		return DOMAIN_CODE_ETSI_131;
1130	case SKU_JAPAN:
1131		return DOMAIN_CODE_MKK;
1132	case SKU_ROW:
1133		return DOMAIN_CODE_DGT;	/* Taiwan */
1134	case SKU_APAC:
1135	case SKU_APAC2:
1136	case SKU_APAC3:
1137		return DOMAIN_CODE_AUS;	/* Australia */
1138	}
1139	/* XXX KOREA? */
1140	return DOMAIN_CODE_FCC;			/* XXX? */
1141}
1142
1143static int
1144mwl_hal_reset(struct mwl_softc *sc)
1145{
1146	struct ifnet *ifp = sc->sc_ifp;
1147	struct ieee80211com *ic = ifp->if_l2com;
1148	struct mwl_hal *mh = sc->sc_mh;
1149
1150	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1151	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1152	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1153	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1154	mwl_chan_set(sc, ic->ic_curchan);
1155	/* NB: RF/RA performance tuned for indoor mode */
1156	mwl_hal_setrateadaptmode(mh, 0);
1157	mwl_hal_setoptimizationlevel(mh,
1158	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1159
1160	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1161
1162	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1163	mwl_hal_setcfend(mh, 0);			/* XXX */
1164
1165	return 1;
1166}
1167
1168static int
1169mwl_init_locked(struct mwl_softc *sc)
1170{
1171	struct ifnet *ifp = sc->sc_ifp;
1172	struct mwl_hal *mh = sc->sc_mh;
1173	int error = 0;
1174
1175	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1176		__func__, ifp->if_flags);
1177
1178	MWL_LOCK_ASSERT(sc);
1179
1180	/*
1181	 * Stop anything previously setup.  This is safe
1182	 * whether this is the first time through or not.
1183	 */
1184	mwl_stop_locked(ifp, 0);
1185
1186	/*
1187	 * Push vap-independent state to the firmware.
1188	 */
1189	if (!mwl_hal_reset(sc)) {
1190		if_printf(ifp, "unable to reset hardware\n");
1191		return EIO;
1192	}
1193
1194	/*
1195	 * Setup recv (once); transmit is already good to go.
1196	 */
1197	error = mwl_startrecv(sc);
1198	if (error != 0) {
1199		if_printf(ifp, "unable to start recv logic\n");
1200		return error;
1201	}
1202
1203	/*
1204	 * Enable interrupts.
1205	 */
1206	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1207		     | MACREG_A2HRIC_BIT_TX_DONE
1208		     | MACREG_A2HRIC_BIT_OPC_DONE
1209#if 0
1210		     | MACREG_A2HRIC_BIT_MAC_EVENT
1211#endif
1212		     | MACREG_A2HRIC_BIT_ICV_ERROR
1213		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1214		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1215#if 0
1216		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1217#endif
1218		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1219		     | MACREQ_A2HRIC_BIT_TX_ACK
1220		     ;
1221
1222	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1223	mwl_hal_intrset(mh, sc->sc_imask);
1224	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1225
1226	return 0;
1227}
1228
1229static void
1230mwl_init(void *arg)
1231{
1232	struct mwl_softc *sc = arg;
1233	struct ifnet *ifp = sc->sc_ifp;
1234	struct ieee80211com *ic = ifp->if_l2com;
1235	int error = 0;
1236
1237	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1238		__func__, ifp->if_flags);
1239
1240	MWL_LOCK(sc);
1241	error = mwl_init_locked(sc);
1242	MWL_UNLOCK(sc);
1243
1244	if (error == 0)
1245		ieee80211_start_all(ic);	/* start all vap's */
1246}
1247
1248static void
1249mwl_stop_locked(struct ifnet *ifp, int disable)
1250{
1251	struct mwl_softc *sc = ifp->if_softc;
1252
1253	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1254		__func__, sc->sc_invalid, ifp->if_flags);
1255
1256	MWL_LOCK_ASSERT(sc);
1257	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1258		/*
1259		 * Shutdown the hardware and driver.
1260		 */
1261		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1262		callout_stop(&sc->sc_watchdog);
1263		sc->sc_tx_timer = 0;
1264		mwl_draintxq(sc);
1265	}
1266}
1267
1268static void
1269mwl_stop(struct ifnet *ifp, int disable)
1270{
1271	struct mwl_softc *sc = ifp->if_softc;
1272
1273	MWL_LOCK(sc);
1274	mwl_stop_locked(ifp, disable);
1275	MWL_UNLOCK(sc);
1276}
1277
1278static int
1279mwl_reset_vap(struct ieee80211vap *vap, int state)
1280{
1281	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1282	struct ieee80211com *ic = vap->iv_ic;
1283
1284	if (state == IEEE80211_S_RUN)
1285		mwl_setrates(vap);
1286	/* XXX off by 1? */
1287	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1288	/* XXX auto? 20/40 split? */
1289	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1290	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1291	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1292	    HTPROTECT_NONE : HTPROTECT_AUTO);
1293	/* XXX txpower cap */
1294
1295	/* re-setup beacons */
1296	if (state == IEEE80211_S_RUN &&
1297	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1298	     vap->iv_opmode == IEEE80211_M_MBSS ||
1299	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1300		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1301		mwl_hal_setnprotmode(hvap,
1302		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1303		return mwl_beacon_setup(vap);
1304	}
1305	return 0;
1306}
1307
1308/*
1309 * Reset the hardware w/o losing operational state.
1310 * Used to to reset or reload hardware state for a vap.
1311 */
1312static int
1313mwl_reset(struct ieee80211vap *vap, u_long cmd)
1314{
1315	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1316	int error = 0;
1317
1318	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1319		struct ieee80211com *ic = vap->iv_ic;
1320		struct ifnet *ifp = ic->ic_ifp;
1321		struct mwl_softc *sc = ifp->if_softc;
1322		struct mwl_hal *mh = sc->sc_mh;
1323
1324		/* XXX handle DWDS sta vap change */
1325		/* XXX do we need to disable interrupts? */
1326		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1327		error = mwl_reset_vap(vap, vap->iv_state);
1328		mwl_hal_intrset(mh, sc->sc_imask);
1329	}
1330	return error;
1331}
1332
1333/*
1334 * Allocate a tx buffer for sending a frame.  The
1335 * packet is assumed to have the WME AC stored so
1336 * we can use it to select the appropriate h/w queue.
1337 */
1338static struct mwl_txbuf *
1339mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1340{
1341	struct mwl_txbuf *bf;
1342
1343	/*
1344	 * Grab a TX buffer and associated resources.
1345	 */
1346	MWL_TXQ_LOCK(txq);
1347	bf = STAILQ_FIRST(&txq->free);
1348	if (bf != NULL) {
1349		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1350		txq->nfree--;
1351	}
1352	MWL_TXQ_UNLOCK(txq);
1353	if (bf == NULL)
1354		DPRINTF(sc, MWL_DEBUG_XMIT,
1355		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1356	return bf;
1357}
1358
1359/*
1360 * Return a tx buffer to the queue it came from.  Note there
1361 * are two cases because we must preserve the order of buffers
1362 * as it reflects the fixed order of descriptors in memory
1363 * (the firmware pre-fetches descriptors so we cannot reorder).
1364 */
1365static void
1366mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1367{
1368	bf->bf_m = NULL;
1369	bf->bf_node = NULL;
1370	MWL_TXQ_LOCK(txq);
1371	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1372	txq->nfree++;
1373	MWL_TXQ_UNLOCK(txq);
1374}
1375
1376static void
1377mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1378{
1379	bf->bf_m = NULL;
1380	bf->bf_node = NULL;
1381	MWL_TXQ_LOCK(txq);
1382	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1383	txq->nfree++;
1384	MWL_TXQ_UNLOCK(txq);
1385}
1386
1387static void
1388mwl_start(struct ifnet *ifp)
1389{
1390	struct mwl_softc *sc = ifp->if_softc;
1391	struct ieee80211_node *ni;
1392	struct mwl_txbuf *bf;
1393	struct mbuf *m;
1394	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1395	int nqueued;
1396
1397	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1398		return;
1399	nqueued = 0;
1400	for (;;) {
1401		bf = NULL;
1402		IFQ_DEQUEUE(&ifp->if_snd, m);
1403		if (m == NULL)
1404			break;
1405		/*
1406		 * Grab the node for the destination.
1407		 */
1408		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1409		KASSERT(ni != NULL, ("no node"));
1410		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1411		/*
1412		 * Grab a TX buffer and associated resources.
1413		 * We honor the classification by the 802.11 layer.
1414		 */
1415		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1416		bf = mwl_gettxbuf(sc, txq);
1417		if (bf == NULL) {
1418			m_freem(m);
1419			ieee80211_free_node(ni);
1420#ifdef MWL_TX_NODROP
1421			sc->sc_stats.mst_tx_qstop++;
1422			/* XXX blocks other traffic */
1423			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1424			break;
1425#else
1426			DPRINTF(sc, MWL_DEBUG_XMIT,
1427			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1428			sc->sc_stats.mst_tx_qdrop++;
1429			continue;
1430#endif /* MWL_TX_NODROP */
1431		}
1432
1433		/*
1434		 * Pass the frame to the h/w for transmission.
1435		 */
1436		if (mwl_tx_start(sc, ni, bf, m)) {
1437			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1438			mwl_puttxbuf_head(txq, bf);
1439			ieee80211_free_node(ni);
1440			continue;
1441		}
1442		nqueued++;
1443		if (nqueued >= mwl_txcoalesce) {
1444			/*
1445			 * Poke the firmware to process queued frames;
1446			 * see below about (lack of) locking.
1447			 */
1448			nqueued = 0;
1449			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1450		}
1451	}
1452	if (nqueued) {
1453		/*
1454		 * NB: We don't need to lock against tx done because
1455		 * this just prods the firmware to check the transmit
1456		 * descriptors.  The firmware will also start fetching
1457		 * descriptors by itself if it notices new ones are
1458		 * present when it goes to deliver a tx done interrupt
1459		 * to the host. So if we race with tx done processing
1460		 * it's ok.  Delivering the kick here rather than in
1461		 * mwl_tx_start is an optimization to avoid poking the
1462		 * firmware for each packet.
1463		 *
1464		 * NB: the queue id isn't used so 0 is ok.
1465		 */
1466		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1467	}
1468}
1469
1470static int
1471mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1472	const struct ieee80211_bpf_params *params)
1473{
1474	struct ieee80211com *ic = ni->ni_ic;
1475	struct ifnet *ifp = ic->ic_ifp;
1476	struct mwl_softc *sc = ifp->if_softc;
1477	struct mwl_txbuf *bf;
1478	struct mwl_txq *txq;
1479
1480	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1481		ieee80211_free_node(ni);
1482		m_freem(m);
1483		return ENETDOWN;
1484	}
1485	/*
1486	 * Grab a TX buffer and associated resources.
1487	 * Note that we depend on the classification
1488	 * by the 802.11 layer to get to the right h/w
1489	 * queue.  Management frames must ALWAYS go on
1490	 * queue 1 but we cannot just force that here
1491	 * because we may receive non-mgt frames.
1492	 */
1493	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1494	bf = mwl_gettxbuf(sc, txq);
1495	if (bf == NULL) {
1496		sc->sc_stats.mst_tx_qstop++;
1497		/* XXX blocks other traffic */
1498		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1499		ieee80211_free_node(ni);
1500		m_freem(m);
1501		return ENOBUFS;
1502	}
1503	/*
1504	 * Pass the frame to the h/w for transmission.
1505	 */
1506	if (mwl_tx_start(sc, ni, bf, m)) {
1507		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1508		mwl_puttxbuf_head(txq, bf);
1509
1510		ieee80211_free_node(ni);
1511		return EIO;		/* XXX */
1512	}
1513	/*
1514	 * NB: We don't need to lock against tx done because
1515	 * this just prods the firmware to check the transmit
1516	 * descriptors.  The firmware will also start fetching
1517	 * descriptors by itself if it notices new ones are
1518	 * present when it goes to deliver a tx done interrupt
1519	 * to the host. So if we race with tx done processing
1520	 * it's ok.  Delivering the kick here rather than in
1521	 * mwl_tx_start is an optimization to avoid poking the
1522	 * firmware for each packet.
1523	 *
1524	 * NB: the queue id isn't used so 0 is ok.
1525	 */
1526	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1527	return 0;
1528}
1529
1530static int
1531mwl_media_change(struct ifnet *ifp)
1532{
1533	struct ieee80211vap *vap = ifp->if_softc;
1534	int error;
1535
1536	error = ieee80211_media_change(ifp);
1537	/* NB: only the fixed rate can change and that doesn't need a reset */
1538	if (error == ENETRESET) {
1539		mwl_setrates(vap);
1540		error = 0;
1541	}
1542	return error;
1543}
1544
1545#ifdef MWL_DEBUG
1546static void
1547mwl_keyprint(struct mwl_softc *sc, const char *tag,
1548	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1549{
1550	static const char *ciphers[] = {
1551		"WEP",
1552		"TKIP",
1553		"AES-CCM",
1554	};
1555	int i, n;
1556
1557	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1558	for (i = 0, n = hk->keyLen; i < n; i++)
1559		printf(" %02x", hk->key.aes[i]);
1560	printf(" mac %s", ether_sprintf(mac));
1561	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1562		printf(" %s", "rxmic");
1563		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1564			printf(" %02x", hk->key.tkip.rxMic[i]);
1565		printf(" txmic");
1566		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1567			printf(" %02x", hk->key.tkip.txMic[i]);
1568	}
1569	printf(" flags 0x%x\n", hk->keyFlags);
1570}
1571#endif
1572
1573/*
1574 * Allocate a key cache slot for a unicast key.  The
1575 * firmware handles key allocation and every station is
1576 * guaranteed key space so we are always successful.
1577 */
1578static int
1579mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1580	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1581{
1582	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1583
1584	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1585	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1586		if (!(&vap->iv_nw_keys[0] <= k &&
1587		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1588			/* should not happen */
1589			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1590				"%s: bogus group key\n", __func__);
1591			return 0;
1592		}
1593		/* give the caller what they requested */
1594		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1595	} else {
1596		/*
1597		 * Firmware handles key allocation.
1598		 */
1599		*keyix = *rxkeyix = 0;
1600	}
1601	return 1;
1602}
1603
1604/*
1605 * Delete a key entry allocated by mwl_key_alloc.
1606 */
1607static int
1608mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1609{
1610	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1611	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1612	MWL_HAL_KEYVAL hk;
1613	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1614	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1615
1616	if (hvap == NULL) {
1617		if (vap->iv_opmode != IEEE80211_M_WDS) {
1618			/* XXX monitor mode? */
1619			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1620			    "%s: no hvap for opmode %d\n", __func__,
1621			    vap->iv_opmode);
1622			return 0;
1623		}
1624		hvap = MWL_VAP(vap)->mv_ap_hvap;
1625	}
1626
1627	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1628	    __func__, k->wk_keyix);
1629
1630	memset(&hk, 0, sizeof(hk));
1631	hk.keyIndex = k->wk_keyix;
1632	switch (k->wk_cipher->ic_cipher) {
1633	case IEEE80211_CIPHER_WEP:
1634		hk.keyTypeId = KEY_TYPE_ID_WEP;
1635		break;
1636	case IEEE80211_CIPHER_TKIP:
1637		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1638		break;
1639	case IEEE80211_CIPHER_AES_CCM:
1640		hk.keyTypeId = KEY_TYPE_ID_AES;
1641		break;
1642	default:
1643		/* XXX should not happen */
1644		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1645		    __func__, k->wk_cipher->ic_cipher);
1646		return 0;
1647	}
1648	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1649}
1650
1651static __inline int
1652addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1653{
1654	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1655		if (k->wk_flags & IEEE80211_KEY_XMIT)
1656			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1657		if (k->wk_flags & IEEE80211_KEY_RECV)
1658			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1659		return 1;
1660	} else
1661		return 0;
1662}
1663
1664/*
1665 * Set the key cache contents for the specified key.  Key cache
1666 * slot(s) must already have been allocated by mwl_key_alloc.
1667 */
1668static int
1669mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1670	const uint8_t mac[IEEE80211_ADDR_LEN])
1671{
1672#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1673/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1674#define	IEEE80211_IS_STATICKEY(k) \
1675	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1676	 (GRPXMIT|IEEE80211_KEY_RECV))
1677	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1678	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1679	const struct ieee80211_cipher *cip = k->wk_cipher;
1680	const uint8_t *macaddr;
1681	MWL_HAL_KEYVAL hk;
1682
1683	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1684		("s/w crypto set?"));
1685
1686	if (hvap == NULL) {
1687		if (vap->iv_opmode != IEEE80211_M_WDS) {
1688			/* XXX monitor mode? */
1689			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1690			    "%s: no hvap for opmode %d\n", __func__,
1691			    vap->iv_opmode);
1692			return 0;
1693		}
1694		hvap = MWL_VAP(vap)->mv_ap_hvap;
1695	}
1696	memset(&hk, 0, sizeof(hk));
1697	hk.keyIndex = k->wk_keyix;
1698	switch (cip->ic_cipher) {
1699	case IEEE80211_CIPHER_WEP:
1700		hk.keyTypeId = KEY_TYPE_ID_WEP;
1701		hk.keyLen = k->wk_keylen;
1702		if (k->wk_keyix == vap->iv_def_txkey)
1703			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1704		if (!IEEE80211_IS_STATICKEY(k)) {
1705			/* NB: WEP is never used for the PTK */
1706			(void) addgroupflags(&hk, k);
1707		}
1708		break;
1709	case IEEE80211_CIPHER_TKIP:
1710		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1711		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1712		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1713		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1714		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1715		if (!addgroupflags(&hk, k))
1716			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1717		break;
1718	case IEEE80211_CIPHER_AES_CCM:
1719		hk.keyTypeId = KEY_TYPE_ID_AES;
1720		hk.keyLen = k->wk_keylen;
1721		if (!addgroupflags(&hk, k))
1722			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1723		break;
1724	default:
1725		/* XXX should not happen */
1726		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1727		    __func__, k->wk_cipher->ic_cipher);
1728		return 0;
1729	}
1730	/*
1731	 * NB: tkip mic keys get copied here too; the layout
1732	 *     just happens to match that in ieee80211_key.
1733	 */
1734	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1735
1736	/*
1737	 * Locate address of sta db entry for writing key;
1738	 * the convention unfortunately is somewhat different
1739	 * than how net80211, hostapd, and wpa_supplicant think.
1740	 */
1741	if (vap->iv_opmode == IEEE80211_M_STA) {
1742		/*
1743		 * NB: keys plumbed before the sta reaches AUTH state
1744		 * will be discarded or written to the wrong sta db
1745		 * entry because iv_bss is meaningless.  This is ok
1746		 * (right now) because we handle deferred plumbing of
1747		 * WEP keys when the sta reaches AUTH state.
1748		 */
1749		macaddr = vap->iv_bss->ni_bssid;
1750		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1751			/* XXX plumb to local sta db too for static key wep */
1752			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1753		}
1754	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1755	    vap->iv_state != IEEE80211_S_RUN) {
1756		/*
1757		 * Prior to RUN state a WDS vap will not it's BSS node
1758		 * setup so we will plumb the key to the wrong mac
1759		 * address (it'll be our local address).  Workaround
1760		 * this for the moment by grabbing the correct address.
1761		 */
1762		macaddr = vap->iv_des_bssid;
1763	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1764		macaddr = vap->iv_myaddr;
1765	else
1766		macaddr = mac;
1767	KEYPRINTF(sc, &hk, macaddr);
1768	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1769#undef IEEE80211_IS_STATICKEY
1770#undef GRPXMIT
1771}
1772
1773/* unaligned little endian access */
1774#define LE_READ_2(p)				\
1775	((uint16_t)				\
1776	 ((((const uint8_t *)(p))[0]      ) |	\
1777	  (((const uint8_t *)(p))[1] <<  8)))
1778#define LE_READ_4(p)				\
1779	((uint32_t)				\
1780	 ((((const uint8_t *)(p))[0]      ) |	\
1781	  (((const uint8_t *)(p))[1] <<  8) |	\
1782	  (((const uint8_t *)(p))[2] << 16) |	\
1783	  (((const uint8_t *)(p))[3] << 24)))
1784
1785/*
1786 * Set the multicast filter contents into the hardware.
1787 * XXX f/w has no support; just defer to the os.
1788 */
1789static void
1790mwl_setmcastfilter(struct mwl_softc *sc)
1791{
1792	struct ifnet *ifp = sc->sc_ifp;
1793#if 0
1794	struct ether_multi *enm;
1795	struct ether_multistep estep;
1796	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1797	uint8_t *mp;
1798	int nmc;
1799
1800	mp = macs;
1801	nmc = 0;
1802	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1803	while (enm != NULL) {
1804		/* XXX Punt on ranges. */
1805		if (nmc == MWL_HAL_MCAST_MAX ||
1806		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1807			ifp->if_flags |= IFF_ALLMULTI;
1808			return;
1809		}
1810		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1811		mp += IEEE80211_ADDR_LEN, nmc++;
1812		ETHER_NEXT_MULTI(estep, enm);
1813	}
1814	ifp->if_flags &= ~IFF_ALLMULTI;
1815	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1816#else
1817	/* XXX no mcast filter support; we get everything */
1818	ifp->if_flags |= IFF_ALLMULTI;
1819#endif
1820}
1821
1822static int
1823mwl_mode_init(struct mwl_softc *sc)
1824{
1825	struct ifnet *ifp = sc->sc_ifp;
1826	struct ieee80211com *ic = ifp->if_l2com;
1827	struct mwl_hal *mh = sc->sc_mh;
1828
1829	/*
1830	 * NB: Ignore promisc in hostap mode; it's set by the
1831	 * bridge.  This is wrong but we have no way to
1832	 * identify internal requests (from the bridge)
1833	 * versus external requests such as for tcpdump.
1834	 */
1835	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1836	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1837	mwl_setmcastfilter(sc);
1838
1839	return 0;
1840}
1841
1842/*
1843 * Callback from the 802.11 layer after a multicast state change.
1844 */
1845static void
1846mwl_update_mcast(struct ifnet *ifp)
1847{
1848	struct mwl_softc *sc = ifp->if_softc;
1849
1850	mwl_setmcastfilter(sc);
1851}
1852
1853/*
1854 * Callback from the 802.11 layer after a promiscuous mode change.
1855 * Note this interface does not check the operating mode as this
1856 * is an internal callback and we are expected to honor the current
1857 * state (e.g. this is used for setting the interface in promiscuous
1858 * mode when operating in hostap mode to do ACS).
1859 */
1860static void
1861mwl_update_promisc(struct ifnet *ifp)
1862{
1863	struct mwl_softc *sc = ifp->if_softc;
1864
1865	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1866}
1867
1868/*
1869 * Callback from the 802.11 layer to update the slot time
1870 * based on the current setting.  We use it to notify the
1871 * firmware of ERP changes and the f/w takes care of things
1872 * like slot time and preamble.
1873 */
1874static void
1875mwl_updateslot(struct ifnet *ifp)
1876{
1877	struct mwl_softc *sc = ifp->if_softc;
1878	struct ieee80211com *ic = ifp->if_l2com;
1879	struct mwl_hal *mh = sc->sc_mh;
1880	int prot;
1881
1882	/* NB: can be called early; suppress needless cmds */
1883	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1884		return;
1885
1886	/*
1887	 * Calculate the ERP flags.  The firwmare will use
1888	 * this to carry out the appropriate measures.
1889	 */
1890	prot = 0;
1891	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1892		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1893			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1894		if (ic->ic_flags & IEEE80211_F_USEPROT)
1895			prot |= IEEE80211_ERP_USE_PROTECTION;
1896		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1897			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1898	}
1899
1900	DPRINTF(sc, MWL_DEBUG_RESET,
1901	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1902	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1903	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1904	    ic->ic_flags);
1905
1906	mwl_hal_setgprot(mh, prot);
1907}
1908
1909/*
1910 * Setup the beacon frame.
1911 */
1912static int
1913mwl_beacon_setup(struct ieee80211vap *vap)
1914{
1915	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1916	struct ieee80211_node *ni = vap->iv_bss;
1917	struct ieee80211_beacon_offsets bo;
1918	struct mbuf *m;
1919
1920	m = ieee80211_beacon_alloc(ni, &bo);
1921	if (m == NULL)
1922		return ENOBUFS;
1923	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1924	m_free(m);
1925
1926	return 0;
1927}
1928
1929/*
1930 * Update the beacon frame in response to a change.
1931 */
1932static void
1933mwl_beacon_update(struct ieee80211vap *vap, int item)
1934{
1935	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1936	struct ieee80211com *ic = vap->iv_ic;
1937
1938	KASSERT(hvap != NULL, ("no beacon"));
1939	switch (item) {
1940	case IEEE80211_BEACON_ERP:
1941		mwl_updateslot(ic->ic_ifp);
1942		break;
1943	case IEEE80211_BEACON_HTINFO:
1944		mwl_hal_setnprotmode(hvap,
1945		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1946		break;
1947	case IEEE80211_BEACON_CAPS:
1948	case IEEE80211_BEACON_WME:
1949	case IEEE80211_BEACON_APPIE:
1950	case IEEE80211_BEACON_CSA:
1951		break;
1952	case IEEE80211_BEACON_TIM:
1953		/* NB: firmware always forms TIM */
1954		return;
1955	}
1956	/* XXX retain beacon frame and update */
1957	mwl_beacon_setup(vap);
1958}
1959
1960static void
1961mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1962{
1963	bus_addr_t *paddr = (bus_addr_t*) arg;
1964	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1965	*paddr = segs->ds_addr;
1966}
1967
1968#ifdef MWL_HOST_PS_SUPPORT
1969/*
1970 * Handle power save station occupancy changes.
1971 */
1972static void
1973mwl_update_ps(struct ieee80211vap *vap, int nsta)
1974{
1975	struct mwl_vap *mvp = MWL_VAP(vap);
1976
1977	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1978		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1979	mvp->mv_last_ps_sta = nsta;
1980}
1981
1982/*
1983 * Handle associated station power save state changes.
1984 */
1985static int
1986mwl_set_tim(struct ieee80211_node *ni, int set)
1987{
1988	struct ieee80211vap *vap = ni->ni_vap;
1989	struct mwl_vap *mvp = MWL_VAP(vap);
1990
1991	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1992		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1993		    IEEE80211_AID(ni->ni_associd), set);
1994		return 1;
1995	} else
1996		return 0;
1997}
1998#endif /* MWL_HOST_PS_SUPPORT */
1999
2000static int
2001mwl_desc_setup(struct mwl_softc *sc, const char *name,
2002	struct mwl_descdma *dd,
2003	int nbuf, size_t bufsize, int ndesc, size_t descsize)
2004{
2005	struct ifnet *ifp = sc->sc_ifp;
2006	uint8_t *ds;
2007	int error;
2008
2009	DPRINTF(sc, MWL_DEBUG_RESET,
2010	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2011	    __func__, name, nbuf, (uintmax_t) bufsize,
2012	    ndesc, (uintmax_t) descsize);
2013
2014	dd->dd_name = name;
2015	dd->dd_desc_len = nbuf * ndesc * descsize;
2016
2017	/*
2018	 * Setup DMA descriptor area.
2019	 */
2020	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2021		       PAGE_SIZE, 0,		/* alignment, bounds */
2022		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2023		       BUS_SPACE_MAXADDR,	/* highaddr */
2024		       NULL, NULL,		/* filter, filterarg */
2025		       dd->dd_desc_len,		/* maxsize */
2026		       1,			/* nsegments */
2027		       dd->dd_desc_len,		/* maxsegsize */
2028		       BUS_DMA_ALLOCNOW,	/* flags */
2029		       NULL,			/* lockfunc */
2030		       NULL,			/* lockarg */
2031		       &dd->dd_dmat);
2032	if (error != 0) {
2033		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2034		return error;
2035	}
2036
2037	/* allocate descriptors */
2038	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2039				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2040				 &dd->dd_dmamap);
2041	if (error != 0) {
2042		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2043			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2044		goto fail1;
2045	}
2046
2047	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2048				dd->dd_desc, dd->dd_desc_len,
2049				mwl_load_cb, &dd->dd_desc_paddr,
2050				BUS_DMA_NOWAIT);
2051	if (error != 0) {
2052		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2053			dd->dd_name, error);
2054		goto fail2;
2055	}
2056
2057	ds = dd->dd_desc;
2058	memset(ds, 0, dd->dd_desc_len);
2059	DPRINTF(sc, MWL_DEBUG_RESET,
2060	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
2061	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2062	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2063
2064	return 0;
2065fail2:
2066	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2067fail1:
2068	bus_dma_tag_destroy(dd->dd_dmat);
2069	memset(dd, 0, sizeof(*dd));
2070	return error;
2071#undef DS2PHYS
2072}
2073
2074static void
2075mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2076{
2077	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2078	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2079	bus_dma_tag_destroy(dd->dd_dmat);
2080
2081	memset(dd, 0, sizeof(*dd));
2082}
2083
2084/*
2085 * Construct a tx q's free list.  The order of entries on
2086 * the list must reflect the physical layout of tx descriptors
2087 * because the firmware pre-fetches descriptors.
2088 *
2089 * XXX might be better to use indices into the buffer array.
2090 */
2091static void
2092mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2093{
2094	struct mwl_txbuf *bf;
2095	int i;
2096
2097	bf = txq->dma.dd_bufptr;
2098	STAILQ_INIT(&txq->free);
2099	for (i = 0; i < mwl_txbuf; i++, bf++)
2100		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2101	txq->nfree = i;
2102}
2103
2104#define	DS2PHYS(_dd, _ds) \
2105	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2106
2107static int
2108mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2109{
2110	struct ifnet *ifp = sc->sc_ifp;
2111	int error, bsize, i;
2112	struct mwl_txbuf *bf;
2113	struct mwl_txdesc *ds;
2114
2115	error = mwl_desc_setup(sc, "tx", &txq->dma,
2116			mwl_txbuf, sizeof(struct mwl_txbuf),
2117			MWL_TXDESC, sizeof(struct mwl_txdesc));
2118	if (error != 0)
2119		return error;
2120
2121	/* allocate and setup tx buffers */
2122	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2123	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2124	if (bf == NULL) {
2125		if_printf(ifp, "malloc of %u tx buffers failed\n",
2126			mwl_txbuf);
2127		return ENOMEM;
2128	}
2129	txq->dma.dd_bufptr = bf;
2130
2131	ds = txq->dma.dd_desc;
2132	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2133		bf->bf_desc = ds;
2134		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2135		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2136				&bf->bf_dmamap);
2137		if (error != 0) {
2138			if_printf(ifp, "unable to create dmamap for tx "
2139				"buffer %u, error %u\n", i, error);
2140			return error;
2141		}
2142	}
2143	mwl_txq_reset(sc, txq);
2144	return 0;
2145}
2146
2147static void
2148mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2149{
2150	struct mwl_txbuf *bf;
2151	int i;
2152
2153	bf = txq->dma.dd_bufptr;
2154	for (i = 0; i < mwl_txbuf; i++, bf++) {
2155		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2156		KASSERT(bf->bf_node == NULL, ("node on free list"));
2157		if (bf->bf_dmamap != NULL)
2158			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2159	}
2160	STAILQ_INIT(&txq->free);
2161	txq->nfree = 0;
2162	if (txq->dma.dd_bufptr != NULL) {
2163		free(txq->dma.dd_bufptr, M_MWLDEV);
2164		txq->dma.dd_bufptr = NULL;
2165	}
2166	if (txq->dma.dd_desc_len != 0)
2167		mwl_desc_cleanup(sc, &txq->dma);
2168}
2169
2170static int
2171mwl_rxdma_setup(struct mwl_softc *sc)
2172{
2173	struct ifnet *ifp = sc->sc_ifp;
2174	int error, jumbosize, bsize, i;
2175	struct mwl_rxbuf *bf;
2176	struct mwl_jumbo *rbuf;
2177	struct mwl_rxdesc *ds;
2178	caddr_t data;
2179
2180	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2181			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2182			1, sizeof(struct mwl_rxdesc));
2183	if (error != 0)
2184		return error;
2185
2186	/*
2187	 * Receive is done to a private pool of jumbo buffers.
2188	 * This allows us to attach to mbuf's and avoid re-mapping
2189	 * memory on each rx we post.  We allocate a large chunk
2190	 * of memory and manage it in the driver.  The mbuf free
2191	 * callback method is used to reclaim frames after sending
2192	 * them up the stack.  By default we allocate 2x the number of
2193	 * rx descriptors configured so we have some slop to hold
2194	 * us while frames are processed.
2195	 */
2196	if (mwl_rxbuf < 2*mwl_rxdesc) {
2197		if_printf(ifp,
2198		    "too few rx dma buffers (%d); increasing to %d\n",
2199		    mwl_rxbuf, 2*mwl_rxdesc);
2200		mwl_rxbuf = 2*mwl_rxdesc;
2201	}
2202	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2203	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2204
2205	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2206		       PAGE_SIZE, 0,		/* alignment, bounds */
2207		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2208		       BUS_SPACE_MAXADDR,	/* highaddr */
2209		       NULL, NULL,		/* filter, filterarg */
2210		       sc->sc_rxmemsize,	/* maxsize */
2211		       1,			/* nsegments */
2212		       sc->sc_rxmemsize,	/* maxsegsize */
2213		       BUS_DMA_ALLOCNOW,	/* flags */
2214		       NULL,			/* lockfunc */
2215		       NULL,			/* lockarg */
2216		       &sc->sc_rxdmat);
2217	if (error != 0) {
2218		if_printf(ifp, "could not create rx DMA tag\n");
2219		return error;
2220	}
2221
2222	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2223				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2224				 &sc->sc_rxmap);
2225	if (error != 0) {
2226		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2227		    (uintmax_t) sc->sc_rxmemsize);
2228		return error;
2229	}
2230
2231	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2232				sc->sc_rxmem, sc->sc_rxmemsize,
2233				mwl_load_cb, &sc->sc_rxmem_paddr,
2234				BUS_DMA_NOWAIT);
2235	if (error != 0) {
2236		if_printf(ifp, "could not load rx DMA map\n");
2237		return error;
2238	}
2239
2240	/*
2241	 * Allocate rx buffers and set them up.
2242	 */
2243	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2244	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2245	if (bf == NULL) {
2246		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2247		return error;
2248	}
2249	sc->sc_rxdma.dd_bufptr = bf;
2250
2251	STAILQ_INIT(&sc->sc_rxbuf);
2252	ds = sc->sc_rxdma.dd_desc;
2253	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2254		bf->bf_desc = ds;
2255		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2256		/* pre-assign dma buffer */
2257		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2258		/* NB: tail is intentional to preserve descriptor order */
2259		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2260	}
2261
2262	/*
2263	 * Place remainder of dma memory buffers on the free list.
2264	 */
2265	SLIST_INIT(&sc->sc_rxfree);
2266	for (; i < mwl_rxbuf; i++) {
2267		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2268		rbuf = MWL_JUMBO_DATA2BUF(data);
2269		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2270		sc->sc_nrxfree++;
2271	}
2272	return 0;
2273}
2274#undef DS2PHYS
2275
2276static void
2277mwl_rxdma_cleanup(struct mwl_softc *sc)
2278{
2279	if (sc->sc_rxmem_paddr != 0) {
2280		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2281		sc->sc_rxmem_paddr = 0;
2282	}
2283	if (sc->sc_rxmem != NULL) {
2284		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2285		sc->sc_rxmem = NULL;
2286	}
2287	if (sc->sc_rxdma.dd_bufptr != NULL) {
2288		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2289		sc->sc_rxdma.dd_bufptr = NULL;
2290	}
2291	if (sc->sc_rxdma.dd_desc_len != 0)
2292		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2293}
2294
2295static int
2296mwl_dma_setup(struct mwl_softc *sc)
2297{
2298	int error, i;
2299
2300	error = mwl_rxdma_setup(sc);
2301	if (error != 0) {
2302		mwl_rxdma_cleanup(sc);
2303		return error;
2304	}
2305
2306	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2307		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2308		if (error != 0) {
2309			mwl_dma_cleanup(sc);
2310			return error;
2311		}
2312	}
2313	return 0;
2314}
2315
2316static void
2317mwl_dma_cleanup(struct mwl_softc *sc)
2318{
2319	int i;
2320
2321	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2322		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2323	mwl_rxdma_cleanup(sc);
2324}
2325
2326static struct ieee80211_node *
2327mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2328{
2329	struct ieee80211com *ic = vap->iv_ic;
2330	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2331	const size_t space = sizeof(struct mwl_node);
2332	struct mwl_node *mn;
2333
2334	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2335	if (mn == NULL) {
2336		/* XXX stat+msg */
2337		return NULL;
2338	}
2339	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2340	return &mn->mn_node;
2341}
2342
2343static void
2344mwl_node_cleanup(struct ieee80211_node *ni)
2345{
2346	struct ieee80211com *ic = ni->ni_ic;
2347        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2348	struct mwl_node *mn = MWL_NODE(ni);
2349
2350	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2351	    __func__, ni, ni->ni_ic, mn->mn_staid);
2352
2353	if (mn->mn_staid != 0) {
2354		struct ieee80211vap *vap = ni->ni_vap;
2355
2356		if (mn->mn_hvap != NULL) {
2357			if (vap->iv_opmode == IEEE80211_M_STA)
2358				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2359			else
2360				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2361		}
2362		/*
2363		 * NB: legacy WDS peer sta db entry is installed using
2364		 * the associate ap's hvap; use it again to delete it.
2365		 * XXX can vap be NULL?
2366		 */
2367		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2368		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2369			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2370			    ni->ni_macaddr);
2371		delstaid(sc, mn->mn_staid);
2372		mn->mn_staid = 0;
2373	}
2374	sc->sc_node_cleanup(ni);
2375}
2376
2377/*
2378 * Reclaim rx dma buffers from packets sitting on the ampdu
2379 * reorder queue for a station.  We replace buffers with a
2380 * system cluster (if available).
2381 */
2382static void
2383mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2384{
2385#if 0
2386	int i, n, off;
2387	struct mbuf *m;
2388	void *cl;
2389
2390	n = rap->rxa_qframes;
2391	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2392		m = rap->rxa_m[i];
2393		if (m == NULL)
2394			continue;
2395		n--;
2396		/* our dma buffers have a well-known free routine */
2397		if ((m->m_flags & M_EXT) == 0 ||
2398		    m->m_ext.ext_free != mwl_ext_free)
2399			continue;
2400		/*
2401		 * Try to allocate a cluster and move the data.
2402		 */
2403		off = m->m_data - m->m_ext.ext_buf;
2404		if (off + m->m_pkthdr.len > MCLBYTES) {
2405			/* XXX no AMSDU for now */
2406			continue;
2407		}
2408		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2409		    &m->m_ext.ext_paddr);
2410		if (cl != NULL) {
2411			/*
2412			 * Copy the existing data to the cluster, remove
2413			 * the rx dma buffer, and attach the cluster in
2414			 * its place.  Note we preserve the offset to the
2415			 * data so frames being bridged can still prepend
2416			 * their headers without adding another mbuf.
2417			 */
2418			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2419			MEXTREMOVE(m);
2420			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2421			/* setup mbuf like _MCLGET does */
2422			m->m_flags |= M_CLUSTER | M_EXT_RW;
2423			_MOWNERREF(m, M_EXT | M_CLUSTER);
2424			/* NB: m_data is clobbered by MEXTADDR, adjust */
2425			m->m_data += off;
2426		}
2427	}
2428#endif
2429}
2430
2431/*
2432 * Callback to reclaim resources.  We first let the
2433 * net80211 layer do it's thing, then if we are still
2434 * blocked by a lack of rx dma buffers we walk the ampdu
2435 * reorder q's to reclaim buffers by copying to a system
2436 * cluster.
2437 */
2438static void
2439mwl_node_drain(struct ieee80211_node *ni)
2440{
2441	struct ieee80211com *ic = ni->ni_ic;
2442        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2443	struct mwl_node *mn = MWL_NODE(ni);
2444
2445	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2446	    __func__, ni, ni->ni_vap, mn->mn_staid);
2447
2448	/* NB: call up first to age out ampdu q's */
2449	sc->sc_node_drain(ni);
2450
2451	/* XXX better to not check low water mark? */
2452	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2453	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2454		uint8_t tid;
2455		/*
2456		 * Walk the reorder q and reclaim rx dma buffers by copying
2457		 * the packet contents into clusters.
2458		 */
2459		for (tid = 0; tid < WME_NUM_TID; tid++) {
2460			struct ieee80211_rx_ampdu *rap;
2461
2462			rap = &ni->ni_rx_ampdu[tid];
2463			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2464				continue;
2465			if (rap->rxa_qframes)
2466				mwl_ampdu_rxdma_reclaim(rap);
2467		}
2468	}
2469}
2470
2471static void
2472mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2473{
2474	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2475#ifdef MWL_ANT_INFO_SUPPORT
2476#if 0
2477	/* XXX need to smooth data */
2478	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2479#else
2480	*noise = -95;		/* XXX */
2481#endif
2482#else
2483	*noise = -95;		/* XXX */
2484#endif
2485}
2486
2487/*
2488 * Convert Hardware per-antenna rssi info to common format:
2489 * Let a1, a2, a3 represent the amplitudes per chain
2490 * Let amax represent max[a1, a2, a3]
2491 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2492 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2493 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2494 * maintain some extra precision.
2495 *
2496 * Values are stored in .5 db format capped at 127.
2497 */
2498static void
2499mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2500	struct ieee80211_mimo_info *mi)
2501{
2502#define	CVT(_dst, _src) do {						\
2503	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2504	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2505} while (0)
2506	static const int8_t logdbtbl[32] = {
2507	       0,   0,  24,  38,  48,  56,  62,  68,
2508	      72,  76,  80,  83,  86,  89,  92,  94,
2509	      96,  98, 100, 102, 104, 106, 107, 109,
2510	     110, 112, 113, 115, 116, 117, 118, 119
2511	};
2512	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2513	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2514	uint32_t rssi_max;
2515
2516	rssi_max = mn->mn_ai.rssi_a;
2517	if (mn->mn_ai.rssi_b > rssi_max)
2518		rssi_max = mn->mn_ai.rssi_b;
2519	if (mn->mn_ai.rssi_c > rssi_max)
2520		rssi_max = mn->mn_ai.rssi_c;
2521
2522	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2523	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2524	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2525
2526	mi->noise[0] = mn->mn_ai.nf_a;
2527	mi->noise[1] = mn->mn_ai.nf_b;
2528	mi->noise[2] = mn->mn_ai.nf_c;
2529#undef CVT
2530}
2531
2532static __inline void *
2533mwl_getrxdma(struct mwl_softc *sc)
2534{
2535	struct mwl_jumbo *buf;
2536	void *data;
2537
2538	/*
2539	 * Allocate from jumbo pool.
2540	 */
2541	MWL_RXFREE_LOCK(sc);
2542	buf = SLIST_FIRST(&sc->sc_rxfree);
2543	if (buf == NULL) {
2544		DPRINTF(sc, MWL_DEBUG_ANY,
2545		    "%s: out of rx dma buffers\n", __func__);
2546		sc->sc_stats.mst_rx_nodmabuf++;
2547		data = NULL;
2548	} else {
2549		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2550		sc->sc_nrxfree--;
2551		data = MWL_JUMBO_BUF2DATA(buf);
2552	}
2553	MWL_RXFREE_UNLOCK(sc);
2554	return data;
2555}
2556
2557static __inline void
2558mwl_putrxdma(struct mwl_softc *sc, void *data)
2559{
2560	struct mwl_jumbo *buf;
2561
2562	/* XXX bounds check data */
2563	MWL_RXFREE_LOCK(sc);
2564	buf = MWL_JUMBO_DATA2BUF(data);
2565	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2566	sc->sc_nrxfree++;
2567	MWL_RXFREE_UNLOCK(sc);
2568}
2569
2570static int
2571mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2572{
2573	struct mwl_rxdesc *ds;
2574
2575	ds = bf->bf_desc;
2576	if (bf->bf_data == NULL) {
2577		bf->bf_data = mwl_getrxdma(sc);
2578		if (bf->bf_data == NULL) {
2579			/* mark descriptor to be skipped */
2580			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2581			/* NB: don't need PREREAD */
2582			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2583			sc->sc_stats.mst_rxbuf_failed++;
2584			return ENOMEM;
2585		}
2586	}
2587	/*
2588	 * NB: DMA buffer contents is known to be unmodified
2589	 *     so there's no need to flush the data cache.
2590	 */
2591
2592	/*
2593	 * Setup descriptor.
2594	 */
2595	ds->QosCtrl = 0;
2596	ds->RSSI = 0;
2597	ds->Status = EAGLE_RXD_STATUS_IDLE;
2598	ds->Channel = 0;
2599	ds->PktLen = htole16(MWL_AGGR_SIZE);
2600	ds->SQ2 = 0;
2601	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2602	/* NB: don't touch pPhysNext, set once */
2603	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2604	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2605
2606	return 0;
2607}
2608
2609static void
2610mwl_ext_free(struct mbuf *m, void *data, void *arg)
2611{
2612	struct mwl_softc *sc = arg;
2613
2614	/* XXX bounds check data */
2615	mwl_putrxdma(sc, data);
2616	/*
2617	 * If we were previously blocked by a lack of rx dma buffers
2618	 * check if we now have enough to restart rx interrupt handling.
2619	 * NB: we know we are called at splvm which is above splnet.
2620	 */
2621	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2622		sc->sc_rxblocked = 0;
2623		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2624	}
2625}
2626
2627struct mwl_frame_bar {
2628	u_int8_t	i_fc[2];
2629	u_int8_t	i_dur[2];
2630	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2631	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2632	/* ctl, seq, FCS */
2633} __packed;
2634
2635/*
2636 * Like ieee80211_anyhdrsize, but handles BAR frames
2637 * specially so the logic below to piece the 802.11
2638 * header together works.
2639 */
2640static __inline int
2641mwl_anyhdrsize(const void *data)
2642{
2643	const struct ieee80211_frame *wh = data;
2644
2645	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2646		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2647		case IEEE80211_FC0_SUBTYPE_CTS:
2648		case IEEE80211_FC0_SUBTYPE_ACK:
2649			return sizeof(struct ieee80211_frame_ack);
2650		case IEEE80211_FC0_SUBTYPE_BAR:
2651			return sizeof(struct mwl_frame_bar);
2652		}
2653		return sizeof(struct ieee80211_frame_min);
2654	} else
2655		return ieee80211_hdrsize(data);
2656}
2657
2658static void
2659mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2660{
2661	const struct ieee80211_frame *wh;
2662	struct ieee80211_node *ni;
2663
2664	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2665	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2666	if (ni != NULL) {
2667		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2668		ieee80211_free_node(ni);
2669	}
2670}
2671
2672/*
2673 * Convert hardware signal strength to rssi.  The value
2674 * provided by the device has the noise floor added in;
2675 * we need to compensate for this but we don't have that
2676 * so we use a fixed value.
2677 *
2678 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2679 * offset is already set as part of the initial gain.  This
2680 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2681 */
2682static __inline int
2683cvtrssi(uint8_t ssi)
2684{
2685	int rssi = (int) ssi + 8;
2686	/* XXX hack guess until we have a real noise floor */
2687	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2688	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2689}
2690
2691static void
2692mwl_rx_proc(void *arg, int npending)
2693{
2694#define	IEEE80211_DIR_DSTODS(wh) \
2695	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2696	struct mwl_softc *sc = arg;
2697	struct ifnet *ifp = sc->sc_ifp;
2698	struct ieee80211com *ic = ifp->if_l2com;
2699	struct mwl_rxbuf *bf;
2700	struct mwl_rxdesc *ds;
2701	struct mbuf *m;
2702	struct ieee80211_qosframe *wh;
2703	struct ieee80211_qosframe_addr4 *wh4;
2704	struct ieee80211_node *ni;
2705	struct mwl_node *mn;
2706	int off, len, hdrlen, pktlen, rssi, ntodo;
2707	uint8_t *data, status;
2708	void *newdata;
2709	int16_t nf;
2710
2711	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2712	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2713	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2714	nf = -96;			/* XXX */
2715	bf = sc->sc_rxnext;
2716	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2717		if (bf == NULL)
2718			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2719		ds = bf->bf_desc;
2720		data = bf->bf_data;
2721		if (data == NULL) {
2722			/*
2723			 * If data allocation failed previously there
2724			 * will be no buffer; try again to re-populate it.
2725			 * Note the firmware will not advance to the next
2726			 * descriptor with a dma buffer so we must mimic
2727			 * this or we'll get out of sync.
2728			 */
2729			DPRINTF(sc, MWL_DEBUG_ANY,
2730			    "%s: rx buf w/o dma memory\n", __func__);
2731			(void) mwl_rxbuf_init(sc, bf);
2732			sc->sc_stats.mst_rx_dmabufmissing++;
2733			break;
2734		}
2735		MWL_RXDESC_SYNC(sc, ds,
2736		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2737		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2738			break;
2739#ifdef MWL_DEBUG
2740		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2741			mwl_printrxbuf(bf, 0);
2742#endif
2743		status = ds->Status;
2744		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2745			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2746			sc->sc_stats.mst_rx_crypto++;
2747			/*
2748			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2749			 *     for backwards compatibility.
2750			 */
2751			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2752			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2753				/*
2754				 * MIC error, notify upper layers.
2755				 */
2756				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2757				    BUS_DMASYNC_POSTREAD);
2758				mwl_handlemicerror(ic, data);
2759				sc->sc_stats.mst_rx_tkipmic++;
2760			}
2761			/* XXX too painful to tap packets */
2762			goto rx_next;
2763		}
2764		/*
2765		 * Sync the data buffer.
2766		 */
2767		len = le16toh(ds->PktLen);
2768		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2769		/*
2770		 * The 802.11 header is provided all or in part at the front;
2771		 * use it to calculate the true size of the header that we'll
2772		 * construct below.  We use this to figure out where to copy
2773		 * payload prior to constructing the header.
2774		 */
2775		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2776		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2777
2778		/* calculate rssi early so we can re-use for each aggregate */
2779		rssi = cvtrssi(ds->RSSI);
2780
2781		pktlen = hdrlen + (len - off);
2782		/*
2783		 * NB: we know our frame is at least as large as
2784		 * IEEE80211_MIN_LEN because there is a 4-address
2785		 * frame at the front.  Hence there's no need to
2786		 * vet the packet length.  If the frame in fact
2787		 * is too small it should be discarded at the
2788		 * net80211 layer.
2789		 */
2790
2791		/*
2792		 * Attach dma buffer to an mbuf.  We tried
2793		 * doing this based on the packet size (i.e.
2794		 * copying small packets) but it turns out to
2795		 * be a net loss.  The tradeoff might be system
2796		 * dependent (cache architecture is important).
2797		 */
2798		MGETHDR(m, M_NOWAIT, MT_DATA);
2799		if (m == NULL) {
2800			DPRINTF(sc, MWL_DEBUG_ANY,
2801			    "%s: no rx mbuf\n", __func__);
2802			sc->sc_stats.mst_rx_nombuf++;
2803			goto rx_next;
2804		}
2805		/*
2806		 * Acquire the replacement dma buffer before
2807		 * processing the frame.  If we're out of dma
2808		 * buffers we disable rx interrupts and wait
2809		 * for the free pool to reach mlw_rxdmalow buffers
2810		 * before starting to do work again.  If the firmware
2811		 * runs out of descriptors then it will toss frames
2812		 * which is better than our doing it as that can
2813		 * starve our processing.  It is also important that
2814		 * we always process rx'd frames in case they are
2815		 * A-MPDU as otherwise the host's view of the BA
2816		 * window may get out of sync with the firmware.
2817		 */
2818		newdata = mwl_getrxdma(sc);
2819		if (newdata == NULL) {
2820			/* NB: stat+msg in mwl_getrxdma */
2821			m_free(m);
2822			/* disable RX interrupt and mark state */
2823			mwl_hal_intrset(sc->sc_mh,
2824			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2825			sc->sc_rxblocked = 1;
2826			ieee80211_drain(ic);
2827			/* XXX check rxblocked and immediately start again? */
2828			goto rx_stop;
2829		}
2830		bf->bf_data = newdata;
2831		/*
2832		 * Attach the dma buffer to the mbuf;
2833		 * mwl_rxbuf_init will re-setup the rx
2834		 * descriptor using the replacement dma
2835		 * buffer we just installed above.
2836		 */
2837		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2838		    data, sc, 0, EXT_NET_DRV);
2839		m->m_data += off - hdrlen;
2840		m->m_pkthdr.len = m->m_len = pktlen;
2841		m->m_pkthdr.rcvif = ifp;
2842		/* NB: dma buffer assumed read-only */
2843
2844		/*
2845		 * Piece 802.11 header together.
2846		 */
2847		wh = mtod(m, struct ieee80211_qosframe *);
2848		/* NB: don't need to do this sometimes but ... */
2849		/* XXX special case so we can memcpy after m_devget? */
2850		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2851		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2852			if (IEEE80211_DIR_DSTODS(wh)) {
2853				wh4 = mtod(m,
2854				    struct ieee80211_qosframe_addr4*);
2855				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2856			} else {
2857				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2858			}
2859		}
2860		/*
2861		 * The f/w strips WEP header but doesn't clear
2862		 * the WEP bit; mark the packet with M_WEP so
2863		 * net80211 will treat the data as decrypted.
2864		 * While here also clear the PWR_MGT bit since
2865		 * power save is handled by the firmware and
2866		 * passing this up will potentially cause the
2867		 * upper layer to put a station in power save
2868		 * (except when configured with MWL_HOST_PS_SUPPORT).
2869		 */
2870		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2871			m->m_flags |= M_WEP;
2872#ifdef MWL_HOST_PS_SUPPORT
2873		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2874#else
2875		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2876		    IEEE80211_FC1_PWR_MGT);
2877#endif
2878
2879		if (ieee80211_radiotap_active(ic)) {
2880			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2881
2882			tap->wr_flags = 0;
2883			tap->wr_rate = ds->Rate;
2884			tap->wr_antsignal = rssi + nf;
2885			tap->wr_antnoise = nf;
2886		}
2887		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2888			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2889			    len, ds->Rate, rssi);
2890		}
2891		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2892
2893		/* dispatch */
2894		ni = ieee80211_find_rxnode(ic,
2895		    (const struct ieee80211_frame_min *) wh);
2896		if (ni != NULL) {
2897			mn = MWL_NODE(ni);
2898#ifdef MWL_ANT_INFO_SUPPORT
2899			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2900			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2901			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2902			mn->mn_ai.rsvd1 = rssi;
2903#endif
2904			/* tag AMPDU aggregates for reorder processing */
2905			if (ni->ni_flags & IEEE80211_NODE_HT)
2906				m->m_flags |= M_AMPDU;
2907			(void) ieee80211_input(ni, m, rssi, nf);
2908			ieee80211_free_node(ni);
2909		} else
2910			(void) ieee80211_input_all(ic, m, rssi, nf);
2911rx_next:
2912		/* NB: ignore ENOMEM so we process more descriptors */
2913		(void) mwl_rxbuf_init(sc, bf);
2914		bf = STAILQ_NEXT(bf, bf_list);
2915	}
2916rx_stop:
2917	sc->sc_rxnext = bf;
2918
2919	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2920	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2921		/* NB: kick fw; the tx thread may have been preempted */
2922		mwl_hal_txstart(sc->sc_mh, 0);
2923		mwl_start(ifp);
2924	}
2925#undef IEEE80211_DIR_DSTODS
2926}
2927
2928static void
2929mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2930{
2931	struct mwl_txbuf *bf, *bn;
2932	struct mwl_txdesc *ds;
2933
2934	MWL_TXQ_LOCK_INIT(sc, txq);
2935	txq->qnum = qnum;
2936	txq->txpri = 0;	/* XXX */
2937#if 0
2938	/* NB: q setup by mwl_txdma_setup XXX */
2939	STAILQ_INIT(&txq->free);
2940#endif
2941	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2942		bf->bf_txq = txq;
2943
2944		ds = bf->bf_desc;
2945		bn = STAILQ_NEXT(bf, bf_list);
2946		if (bn == NULL)
2947			bn = STAILQ_FIRST(&txq->free);
2948		ds->pPhysNext = htole32(bn->bf_daddr);
2949	}
2950	STAILQ_INIT(&txq->active);
2951}
2952
2953/*
2954 * Setup a hardware data transmit queue for the specified
2955 * access control.  We record the mapping from ac's
2956 * to h/w queues for use by mwl_tx_start.
2957 */
2958static int
2959mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2960{
2961#define	N(a)	(sizeof(a)/sizeof(a[0]))
2962	struct mwl_txq *txq;
2963
2964	if (ac >= N(sc->sc_ac2q)) {
2965		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2966			ac, N(sc->sc_ac2q));
2967		return 0;
2968	}
2969	if (mvtype >= MWL_NUM_TX_QUEUES) {
2970		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2971			mvtype, MWL_NUM_TX_QUEUES);
2972		return 0;
2973	}
2974	txq = &sc->sc_txq[mvtype];
2975	mwl_txq_init(sc, txq, mvtype);
2976	sc->sc_ac2q[ac] = txq;
2977	return 1;
2978#undef N
2979}
2980
2981/*
2982 * Update WME parameters for a transmit queue.
2983 */
2984static int
2985mwl_txq_update(struct mwl_softc *sc, int ac)
2986{
2987#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2988	struct ifnet *ifp = sc->sc_ifp;
2989	struct ieee80211com *ic = ifp->if_l2com;
2990	struct mwl_txq *txq = sc->sc_ac2q[ac];
2991	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2992	struct mwl_hal *mh = sc->sc_mh;
2993	int aifs, cwmin, cwmax, txoplim;
2994
2995	aifs = wmep->wmep_aifsn;
2996	/* XXX in sta mode need to pass log values for cwmin/max */
2997	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2998	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2999	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3000
3001	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3002		device_printf(sc->sc_dev, "unable to update hardware queue "
3003			"parameters for %s traffic!\n",
3004			ieee80211_wme_acnames[ac]);
3005		return 0;
3006	}
3007	return 1;
3008#undef MWL_EXPONENT_TO_VALUE
3009}
3010
3011/*
3012 * Callback from the 802.11 layer to update WME parameters.
3013 */
3014static int
3015mwl_wme_update(struct ieee80211com *ic)
3016{
3017	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3018
3019	return !mwl_txq_update(sc, WME_AC_BE) ||
3020	    !mwl_txq_update(sc, WME_AC_BK) ||
3021	    !mwl_txq_update(sc, WME_AC_VI) ||
3022	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3023}
3024
3025/*
3026 * Reclaim resources for a setup queue.
3027 */
3028static void
3029mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3030{
3031	/* XXX hal work? */
3032	MWL_TXQ_LOCK_DESTROY(txq);
3033}
3034
3035/*
3036 * Reclaim all tx queue resources.
3037 */
3038static void
3039mwl_tx_cleanup(struct mwl_softc *sc)
3040{
3041	int i;
3042
3043	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3044		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3045}
3046
3047static int
3048mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3049{
3050	struct mbuf *m;
3051	int error;
3052
3053	/*
3054	 * Load the DMA map so any coalescing is done.  This
3055	 * also calculates the number of descriptors we need.
3056	 */
3057	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3058				     bf->bf_segs, &bf->bf_nseg,
3059				     BUS_DMA_NOWAIT);
3060	if (error == EFBIG) {
3061		/* XXX packet requires too many descriptors */
3062		bf->bf_nseg = MWL_TXDESC+1;
3063	} else if (error != 0) {
3064		sc->sc_stats.mst_tx_busdma++;
3065		m_freem(m0);
3066		return error;
3067	}
3068	/*
3069	 * Discard null packets and check for packets that
3070	 * require too many TX descriptors.  We try to convert
3071	 * the latter to a cluster.
3072	 */
3073	if (error == EFBIG) {		/* too many desc's, linearize */
3074		sc->sc_stats.mst_tx_linear++;
3075#if MWL_TXDESC > 1
3076		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
3077#else
3078		m = m_defrag(m0, M_NOWAIT);
3079#endif
3080		if (m == NULL) {
3081			m_freem(m0);
3082			sc->sc_stats.mst_tx_nombuf++;
3083			return ENOMEM;
3084		}
3085		m0 = m;
3086		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3087					     bf->bf_segs, &bf->bf_nseg,
3088					     BUS_DMA_NOWAIT);
3089		if (error != 0) {
3090			sc->sc_stats.mst_tx_busdma++;
3091			m_freem(m0);
3092			return error;
3093		}
3094		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3095		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3096	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3097		sc->sc_stats.mst_tx_nodata++;
3098		m_freem(m0);
3099		return EIO;
3100	}
3101	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3102		__func__, m0, m0->m_pkthdr.len);
3103	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3104	bf->bf_m = m0;
3105
3106	return 0;
3107}
3108
3109static __inline int
3110mwl_cvtlegacyrate(int rate)
3111{
3112	switch (rate) {
3113	case 2:	 return 0;
3114	case 4:	 return 1;
3115	case 11: return 2;
3116	case 22: return 3;
3117	case 44: return 4;
3118	case 12: return 5;
3119	case 18: return 6;
3120	case 24: return 7;
3121	case 36: return 8;
3122	case 48: return 9;
3123	case 72: return 10;
3124	case 96: return 11;
3125	case 108:return 12;
3126	}
3127	return 0;
3128}
3129
3130/*
3131 * Calculate fixed tx rate information per client state;
3132 * this value is suitable for writing to the Format field
3133 * of a tx descriptor.
3134 */
3135static uint16_t
3136mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3137{
3138	uint16_t fmt;
3139
3140	fmt = SM(3, EAGLE_TXD_ANTENNA)
3141	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3142		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3143	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3144		fmt |= EAGLE_TXD_FORMAT_HT
3145		    /* NB: 0x80 implicitly stripped from ucastrate */
3146		    | SM(rate, EAGLE_TXD_RATE);
3147		/* XXX short/long GI may be wrong; re-check */
3148		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3149			fmt |= EAGLE_TXD_CHW_40
3150			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3151			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3152		} else {
3153			fmt |= EAGLE_TXD_CHW_20
3154			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3155			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3156		}
3157	} else {			/* legacy rate */
3158		fmt |= EAGLE_TXD_FORMAT_LEGACY
3159		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3160		    | EAGLE_TXD_CHW_20
3161		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3162		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3163			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3164	}
3165	return fmt;
3166}
3167
3168static int
3169mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3170    struct mbuf *m0)
3171{
3172#define	IEEE80211_DIR_DSTODS(wh) \
3173	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3174	struct ifnet *ifp = sc->sc_ifp;
3175	struct ieee80211com *ic = ifp->if_l2com;
3176	struct ieee80211vap *vap = ni->ni_vap;
3177	int error, iswep, ismcast;
3178	int hdrlen, copyhdrlen, pktlen;
3179	struct mwl_txdesc *ds;
3180	struct mwl_txq *txq;
3181	struct ieee80211_frame *wh;
3182	struct mwltxrec *tr;
3183	struct mwl_node *mn;
3184	uint16_t qos;
3185#if MWL_TXDESC > 1
3186	int i;
3187#endif
3188
3189	wh = mtod(m0, struct ieee80211_frame *);
3190	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3191	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3192	hdrlen = ieee80211_anyhdrsize(wh);
3193	copyhdrlen = hdrlen;
3194	pktlen = m0->m_pkthdr.len;
3195	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3196		if (IEEE80211_DIR_DSTODS(wh)) {
3197			qos = *(uint16_t *)
3198			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3199			copyhdrlen -= sizeof(qos);
3200		} else
3201			qos = *(uint16_t *)
3202			    (((struct ieee80211_qosframe *) wh)->i_qos);
3203	} else
3204		qos = 0;
3205
3206	if (iswep) {
3207		const struct ieee80211_cipher *cip;
3208		struct ieee80211_key *k;
3209
3210		/*
3211		 * Construct the 802.11 header+trailer for an encrypted
3212		 * frame. The only reason this can fail is because of an
3213		 * unknown or unsupported cipher/key type.
3214		 *
3215		 * NB: we do this even though the firmware will ignore
3216		 *     what we've done for WEP and TKIP as we need the
3217		 *     ExtIV filled in for CCMP and this also adjusts
3218		 *     the headers which simplifies our work below.
3219		 */
3220		k = ieee80211_crypto_encap(ni, m0);
3221		if (k == NULL) {
3222			/*
3223			 * This can happen when the key is yanked after the
3224			 * frame was queued.  Just discard the frame; the
3225			 * 802.11 layer counts failures and provides
3226			 * debugging/diagnostics.
3227			 */
3228			m_freem(m0);
3229			return EIO;
3230		}
3231		/*
3232		 * Adjust the packet length for the crypto additions
3233		 * done during encap and any other bits that the f/w
3234		 * will add later on.
3235		 */
3236		cip = k->wk_cipher;
3237		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3238
3239		/* packet header may have moved, reset our local pointer */
3240		wh = mtod(m0, struct ieee80211_frame *);
3241	}
3242
3243	if (ieee80211_radiotap_active_vap(vap)) {
3244		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3245		if (iswep)
3246			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3247#if 0
3248		sc->sc_tx_th.wt_rate = ds->DataRate;
3249#endif
3250		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3251		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3252
3253		ieee80211_radiotap_tx(vap, m0);
3254	}
3255	/*
3256	 * Copy up/down the 802.11 header; the firmware requires
3257	 * we present a 2-byte payload length followed by a
3258	 * 4-address header (w/o QoS), followed (optionally) by
3259	 * any WEP/ExtIV header (but only filled in for CCMP).
3260	 * We are assured the mbuf has sufficient headroom to
3261	 * prepend in-place by the setup of ic_headroom in
3262	 * mwl_attach.
3263	 */
3264	if (hdrlen < sizeof(struct mwltxrec)) {
3265		const int space = sizeof(struct mwltxrec) - hdrlen;
3266		if (M_LEADINGSPACE(m0) < space) {
3267			/* NB: should never happen */
3268			device_printf(sc->sc_dev,
3269			    "not enough headroom, need %d found %zd, "
3270			    "m_flags 0x%x m_len %d\n",
3271			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3272			ieee80211_dump_pkt(ic,
3273			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3274			m_freem(m0);
3275			sc->sc_stats.mst_tx_noheadroom++;
3276			return EIO;
3277		}
3278		M_PREPEND(m0, space, M_NOWAIT);
3279	}
3280	tr = mtod(m0, struct mwltxrec *);
3281	if (wh != (struct ieee80211_frame *) &tr->wh)
3282		ovbcopy(wh, &tr->wh, hdrlen);
3283	/*
3284	 * Note: the "firmware length" is actually the length
3285	 * of the fully formed "802.11 payload".  That is, it's
3286	 * everything except for the 802.11 header.  In particular
3287	 * this includes all crypto material including the MIC!
3288	 */
3289	tr->fwlen = htole16(pktlen - hdrlen);
3290
3291	/*
3292	 * Load the DMA map so any coalescing is done.  This
3293	 * also calculates the number of descriptors we need.
3294	 */
3295	error = mwl_tx_dmasetup(sc, bf, m0);
3296	if (error != 0) {
3297		/* NB: stat collected in mwl_tx_dmasetup */
3298		DPRINTF(sc, MWL_DEBUG_XMIT,
3299		    "%s: unable to setup dma\n", __func__);
3300		return error;
3301	}
3302	bf->bf_node = ni;			/* NB: held reference */
3303	m0 = bf->bf_m;				/* NB: may have changed */
3304	tr = mtod(m0, struct mwltxrec *);
3305	wh = (struct ieee80211_frame *)&tr->wh;
3306
3307	/*
3308	 * Formulate tx descriptor.
3309	 */
3310	ds = bf->bf_desc;
3311	txq = bf->bf_txq;
3312
3313	ds->QosCtrl = qos;			/* NB: already little-endian */
3314#if MWL_TXDESC == 1
3315	/*
3316	 * NB: multiframes should be zero because the descriptors
3317	 *     are initialized to zero.  This should handle the case
3318	 *     where the driver is built with MWL_TXDESC=1 but we are
3319	 *     using firmware with multi-segment support.
3320	 */
3321	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3322	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3323#else
3324	ds->multiframes = htole32(bf->bf_nseg);
3325	ds->PktLen = htole16(m0->m_pkthdr.len);
3326	for (i = 0; i < bf->bf_nseg; i++) {
3327		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3328		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3329	}
3330#endif
3331	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3332	ds->Format = 0;
3333	ds->pad = 0;
3334	ds->ack_wcb_addr = 0;
3335
3336	mn = MWL_NODE(ni);
3337	/*
3338	 * Select transmit rate.
3339	 */
3340	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3341	case IEEE80211_FC0_TYPE_MGT:
3342		sc->sc_stats.mst_tx_mgmt++;
3343		/* fall thru... */
3344	case IEEE80211_FC0_TYPE_CTL:
3345		/* NB: assign to BE q to avoid bursting */
3346		ds->TxPriority = MWL_WME_AC_BE;
3347		break;
3348	case IEEE80211_FC0_TYPE_DATA:
3349		if (!ismcast) {
3350			const struct ieee80211_txparam *tp = ni->ni_txparms;
3351			/*
3352			 * EAPOL frames get forced to a fixed rate and w/o
3353			 * aggregation; otherwise check for any fixed rate
3354			 * for the client (may depend on association state).
3355			 */
3356			if (m0->m_flags & M_EAPOL) {
3357				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3358				ds->Format = mvp->mv_eapolformat;
3359				ds->pad = htole16(
3360				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3361			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3362				/* XXX pre-calculate per node */
3363				ds->Format = htole16(
3364				    mwl_calcformat(tp->ucastrate, ni));
3365				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3366			}
3367			/* NB: EAPOL frames will never have qos set */
3368			if (qos == 0)
3369				ds->TxPriority = txq->qnum;
3370#if MWL_MAXBA > 3
3371			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3372				ds->TxPriority = mn->mn_ba[3].txq;
3373#endif
3374#if MWL_MAXBA > 2
3375			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3376				ds->TxPriority = mn->mn_ba[2].txq;
3377#endif
3378#if MWL_MAXBA > 1
3379			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3380				ds->TxPriority = mn->mn_ba[1].txq;
3381#endif
3382#if MWL_MAXBA > 0
3383			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3384				ds->TxPriority = mn->mn_ba[0].txq;
3385#endif
3386			else
3387				ds->TxPriority = txq->qnum;
3388		} else
3389			ds->TxPriority = txq->qnum;
3390		break;
3391	default:
3392		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3393			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3394		sc->sc_stats.mst_tx_badframetype++;
3395		m_freem(m0);
3396		return EIO;
3397	}
3398
3399	if (IFF_DUMPPKTS_XMIT(sc))
3400		ieee80211_dump_pkt(ic,
3401		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3402		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3403
3404	MWL_TXQ_LOCK(txq);
3405	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3406	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3407	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3408
3409	if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3410	sc->sc_tx_timer = 5;
3411	MWL_TXQ_UNLOCK(txq);
3412
3413	return 0;
3414#undef	IEEE80211_DIR_DSTODS
3415}
3416
3417static __inline int
3418mwl_cvtlegacyrix(int rix)
3419{
3420#define	N(x)	(sizeof(x)/sizeof(x[0]))
3421	static const int ieeerates[] =
3422	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3423	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3424#undef N
3425}
3426
3427/*
3428 * Process completed xmit descriptors from the specified queue.
3429 */
3430static int
3431mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3432{
3433#define	EAGLE_TXD_STATUS_MCAST \
3434	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3435	struct ifnet *ifp = sc->sc_ifp;
3436	struct ieee80211com *ic = ifp->if_l2com;
3437	struct mwl_txbuf *bf;
3438	struct mwl_txdesc *ds;
3439	struct ieee80211_node *ni;
3440	struct mwl_node *an;
3441	int nreaped;
3442	uint32_t status;
3443
3444	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3445	for (nreaped = 0;; nreaped++) {
3446		MWL_TXQ_LOCK(txq);
3447		bf = STAILQ_FIRST(&txq->active);
3448		if (bf == NULL) {
3449			MWL_TXQ_UNLOCK(txq);
3450			break;
3451		}
3452		ds = bf->bf_desc;
3453		MWL_TXDESC_SYNC(txq, ds,
3454		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3455		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3456			MWL_TXQ_UNLOCK(txq);
3457			break;
3458		}
3459		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3460		MWL_TXQ_UNLOCK(txq);
3461
3462#ifdef MWL_DEBUG
3463		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3464			mwl_printtxbuf(bf, txq->qnum, nreaped);
3465#endif
3466		ni = bf->bf_node;
3467		if (ni != NULL) {
3468			an = MWL_NODE(ni);
3469			status = le32toh(ds->Status);
3470			if (status & EAGLE_TXD_STATUS_OK) {
3471				uint16_t Format = le16toh(ds->Format);
3472				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3473
3474				sc->sc_stats.mst_ant_tx[txant]++;
3475				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3476					sc->sc_stats.mst_tx_retries++;
3477				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3478					sc->sc_stats.mst_tx_mretries++;
3479				if (txq->qnum >= MWL_WME_AC_VO)
3480					ic->ic_wme.wme_hipri_traffic++;
3481				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3482				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3483					ni->ni_txrate = mwl_cvtlegacyrix(
3484					    ni->ni_txrate);
3485				} else
3486					ni->ni_txrate |= IEEE80211_RATE_MCS;
3487				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3488			} else {
3489				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3490					sc->sc_stats.mst_tx_linkerror++;
3491				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3492					sc->sc_stats.mst_tx_xretries++;
3493				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3494					sc->sc_stats.mst_tx_aging++;
3495				if (bf->bf_m->m_flags & M_FF)
3496					sc->sc_stats.mst_ff_txerr++;
3497			}
3498			/*
3499			 * Do any tx complete callback.  Note this must
3500			 * be done before releasing the node reference.
3501			 * XXX no way to figure out if frame was ACK'd
3502			 */
3503			if (bf->bf_m->m_flags & M_TXCB) {
3504				/* XXX strip fw len in case header inspected */
3505				m_adj(bf->bf_m, sizeof(uint16_t));
3506				ieee80211_process_callback(ni, bf->bf_m,
3507					(status & EAGLE_TXD_STATUS_OK) == 0);
3508			}
3509			/*
3510			 * Reclaim reference to node.
3511			 *
3512			 * NB: the node may be reclaimed here if, for example
3513			 *     this is a DEAUTH message that was sent and the
3514			 *     node was timed out due to inactivity.
3515			 */
3516			ieee80211_free_node(ni);
3517		}
3518		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3519
3520		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3521		    BUS_DMASYNC_POSTWRITE);
3522		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3523		m_freem(bf->bf_m);
3524
3525		mwl_puttxbuf_tail(txq, bf);
3526	}
3527	return nreaped;
3528#undef EAGLE_TXD_STATUS_MCAST
3529}
3530
3531/*
3532 * Deferred processing of transmit interrupt; special-cased
3533 * for four hardware queues, 0-3.
3534 */
3535static void
3536mwl_tx_proc(void *arg, int npending)
3537{
3538	struct mwl_softc *sc = arg;
3539	struct ifnet *ifp = sc->sc_ifp;
3540	int nreaped;
3541
3542	/*
3543	 * Process each active queue.
3544	 */
3545	nreaped = 0;
3546	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3547		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3548	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3549		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3550	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3551		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3552	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3553		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3554
3555	if (nreaped != 0) {
3556		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3557		sc->sc_tx_timer = 0;
3558		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3559			/* NB: kick fw; the tx thread may have been preempted */
3560			mwl_hal_txstart(sc->sc_mh, 0);
3561			mwl_start(ifp);
3562		}
3563	}
3564}
3565
3566static void
3567mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3568{
3569	struct ieee80211_node *ni;
3570	struct mwl_txbuf *bf;
3571	u_int ix;
3572
3573	/*
3574	 * NB: this assumes output has been stopped and
3575	 *     we do not need to block mwl_tx_tasklet
3576	 */
3577	for (ix = 0;; ix++) {
3578		MWL_TXQ_LOCK(txq);
3579		bf = STAILQ_FIRST(&txq->active);
3580		if (bf == NULL) {
3581			MWL_TXQ_UNLOCK(txq);
3582			break;
3583		}
3584		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3585		MWL_TXQ_UNLOCK(txq);
3586#ifdef MWL_DEBUG
3587		if (sc->sc_debug & MWL_DEBUG_RESET) {
3588			struct ifnet *ifp = sc->sc_ifp;
3589			struct ieee80211com *ic = ifp->if_l2com;
3590			const struct mwltxrec *tr =
3591			    mtod(bf->bf_m, const struct mwltxrec *);
3592			mwl_printtxbuf(bf, txq->qnum, ix);
3593			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3594				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3595		}
3596#endif /* MWL_DEBUG */
3597		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3598		ni = bf->bf_node;
3599		if (ni != NULL) {
3600			/*
3601			 * Reclaim node reference.
3602			 */
3603			ieee80211_free_node(ni);
3604		}
3605		m_freem(bf->bf_m);
3606
3607		mwl_puttxbuf_tail(txq, bf);
3608	}
3609}
3610
3611/*
3612 * Drain the transmit queues and reclaim resources.
3613 */
3614static void
3615mwl_draintxq(struct mwl_softc *sc)
3616{
3617	struct ifnet *ifp = sc->sc_ifp;
3618	int i;
3619
3620	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3621		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3622	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3623	sc->sc_tx_timer = 0;
3624}
3625
3626#ifdef MWL_DIAGAPI
3627/*
3628 * Reset the transmit queues to a pristine state after a fw download.
3629 */
3630static void
3631mwl_resettxq(struct mwl_softc *sc)
3632{
3633	int i;
3634
3635	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3636		mwl_txq_reset(sc, &sc->sc_txq[i]);
3637}
3638#endif /* MWL_DIAGAPI */
3639
3640/*
3641 * Clear the transmit queues of any frames submitted for the
3642 * specified vap.  This is done when the vap is deleted so we
3643 * don't potentially reference the vap after it is gone.
3644 * Note we cannot remove the frames; we only reclaim the node
3645 * reference.
3646 */
3647static void
3648mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3649{
3650	struct mwl_txq *txq;
3651	struct mwl_txbuf *bf;
3652	int i;
3653
3654	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3655		txq = &sc->sc_txq[i];
3656		MWL_TXQ_LOCK(txq);
3657		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3658			struct ieee80211_node *ni = bf->bf_node;
3659			if (ni != NULL && ni->ni_vap == vap) {
3660				bf->bf_node = NULL;
3661				ieee80211_free_node(ni);
3662			}
3663		}
3664		MWL_TXQ_UNLOCK(txq);
3665	}
3666}
3667
3668static int
3669mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3670	const uint8_t *frm, const uint8_t *efrm)
3671{
3672	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3673	const struct ieee80211_action *ia;
3674
3675	ia = (const struct ieee80211_action *) frm;
3676	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3677	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3678		const struct ieee80211_action_ht_mimopowersave *mps =
3679		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3680
3681		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3682		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3683		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3684		return 0;
3685	} else
3686		return sc->sc_recv_action(ni, wh, frm, efrm);
3687}
3688
3689static int
3690mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3691	int dialogtoken, int baparamset, int batimeout)
3692{
3693	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3694	struct ieee80211vap *vap = ni->ni_vap;
3695	struct mwl_node *mn = MWL_NODE(ni);
3696	struct mwl_bastate *bas;
3697
3698	bas = tap->txa_private;
3699	if (bas == NULL) {
3700		const MWL_HAL_BASTREAM *sp;
3701		/*
3702		 * Check for a free BA stream slot.
3703		 */
3704#if MWL_MAXBA > 3
3705		if (mn->mn_ba[3].bastream == NULL)
3706			bas = &mn->mn_ba[3];
3707		else
3708#endif
3709#if MWL_MAXBA > 2
3710		if (mn->mn_ba[2].bastream == NULL)
3711			bas = &mn->mn_ba[2];
3712		else
3713#endif
3714#if MWL_MAXBA > 1
3715		if (mn->mn_ba[1].bastream == NULL)
3716			bas = &mn->mn_ba[1];
3717		else
3718#endif
3719#if MWL_MAXBA > 0
3720		if (mn->mn_ba[0].bastream == NULL)
3721			bas = &mn->mn_ba[0];
3722		else
3723#endif
3724		{
3725			/* sta already has max BA streams */
3726			/* XXX assign BA stream to highest priority tid */
3727			DPRINTF(sc, MWL_DEBUG_AMPDU,
3728			    "%s: already has max bastreams\n", __func__);
3729			sc->sc_stats.mst_ampdu_reject++;
3730			return 0;
3731		}
3732		/* NB: no held reference to ni */
3733		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3734		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3735		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3736		    ni, tap);
3737		if (sp == NULL) {
3738			/*
3739			 * No available stream, return 0 so no
3740			 * a-mpdu aggregation will be done.
3741			 */
3742			DPRINTF(sc, MWL_DEBUG_AMPDU,
3743			    "%s: no bastream available\n", __func__);
3744			sc->sc_stats.mst_ampdu_nostream++;
3745			return 0;
3746		}
3747		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3748		    __func__, sp);
3749		/* NB: qos is left zero so we won't match in mwl_tx_start */
3750		bas->bastream = sp;
3751		tap->txa_private = bas;
3752	}
3753	/* fetch current seq# from the firmware; if available */
3754	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3755	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3756	    &tap->txa_start) != 0)
3757		tap->txa_start = 0;
3758	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3759}
3760
3761static int
3762mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3763	int code, int baparamset, int batimeout)
3764{
3765	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3766	struct mwl_bastate *bas;
3767
3768	bas = tap->txa_private;
3769	if (bas == NULL) {
3770		/* XXX should not happen */
3771		DPRINTF(sc, MWL_DEBUG_AMPDU,
3772		    "%s: no BA stream allocated, TID %d\n",
3773		    __func__, tap->txa_tid);
3774		sc->sc_stats.mst_addba_nostream++;
3775		return 0;
3776	}
3777	if (code == IEEE80211_STATUS_SUCCESS) {
3778		struct ieee80211vap *vap = ni->ni_vap;
3779		int bufsiz, error;
3780
3781		/*
3782		 * Tell the firmware to setup the BA stream;
3783		 * we know resources are available because we
3784		 * pre-allocated one before forming the request.
3785		 */
3786		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3787		if (bufsiz == 0)
3788			bufsiz = IEEE80211_AGGR_BAWMAX;
3789		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3790		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3791		if (error != 0) {
3792			/*
3793			 * Setup failed, return immediately so no a-mpdu
3794			 * aggregation will be done.
3795			 */
3796			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3797			mwl_bastream_free(bas);
3798			tap->txa_private = NULL;
3799
3800			DPRINTF(sc, MWL_DEBUG_AMPDU,
3801			    "%s: create failed, error %d, bufsiz %d TID %d "
3802			    "htparam 0x%x\n", __func__, error, bufsiz,
3803			    tap->txa_tid, ni->ni_htparam);
3804			sc->sc_stats.mst_bacreate_failed++;
3805			return 0;
3806		}
3807		/* NB: cache txq to avoid ptr indirect */
3808		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3809		DPRINTF(sc, MWL_DEBUG_AMPDU,
3810		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3811		    "htparam 0x%x\n", __func__, bas->bastream,
3812		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3813	} else {
3814		/*
3815		 * Other side NAK'd us; return the resources.
3816		 */
3817		DPRINTF(sc, MWL_DEBUG_AMPDU,
3818		    "%s: request failed with code %d, destroy bastream %p\n",
3819		    __func__, code, bas->bastream);
3820		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3821		mwl_bastream_free(bas);
3822		tap->txa_private = NULL;
3823	}
3824	/* NB: firmware sends BAR so we don't need to */
3825	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3826}
3827
3828static void
3829mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3830{
3831	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3832	struct mwl_bastate *bas;
3833
3834	bas = tap->txa_private;
3835	if (bas != NULL) {
3836		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3837		    __func__, bas->bastream);
3838		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3839		mwl_bastream_free(bas);
3840		tap->txa_private = NULL;
3841	}
3842	sc->sc_addba_stop(ni, tap);
3843}
3844
3845/*
3846 * Setup the rx data structures.  This should only be
3847 * done once or we may get out of sync with the firmware.
3848 */
3849static int
3850mwl_startrecv(struct mwl_softc *sc)
3851{
3852	if (!sc->sc_recvsetup) {
3853		struct mwl_rxbuf *bf, *prev;
3854		struct mwl_rxdesc *ds;
3855
3856		prev = NULL;
3857		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3858			int error = mwl_rxbuf_init(sc, bf);
3859			if (error != 0) {
3860				DPRINTF(sc, MWL_DEBUG_RECV,
3861					"%s: mwl_rxbuf_init failed %d\n",
3862					__func__, error);
3863				return error;
3864			}
3865			if (prev != NULL) {
3866				ds = prev->bf_desc;
3867				ds->pPhysNext = htole32(bf->bf_daddr);
3868			}
3869			prev = bf;
3870		}
3871		if (prev != NULL) {
3872			ds = prev->bf_desc;
3873			ds->pPhysNext =
3874			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3875		}
3876		sc->sc_recvsetup = 1;
3877	}
3878	mwl_mode_init(sc);		/* set filters, etc. */
3879	return 0;
3880}
3881
3882static MWL_HAL_APMODE
3883mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3884{
3885	MWL_HAL_APMODE mode;
3886
3887	if (IEEE80211_IS_CHAN_HT(chan)) {
3888		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3889			mode = AP_MODE_N_ONLY;
3890		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3891			mode = AP_MODE_AandN;
3892		else if (vap->iv_flags & IEEE80211_F_PUREG)
3893			mode = AP_MODE_GandN;
3894		else
3895			mode = AP_MODE_BandGandN;
3896	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3897		if (vap->iv_flags & IEEE80211_F_PUREG)
3898			mode = AP_MODE_G_ONLY;
3899		else
3900			mode = AP_MODE_MIXED;
3901	} else if (IEEE80211_IS_CHAN_B(chan))
3902		mode = AP_MODE_B_ONLY;
3903	else if (IEEE80211_IS_CHAN_A(chan))
3904		mode = AP_MODE_A_ONLY;
3905	else
3906		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3907	return mode;
3908}
3909
3910static int
3911mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3912{
3913	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3914	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3915}
3916
3917/*
3918 * Set/change channels.
3919 */
3920static int
3921mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3922{
3923	struct mwl_hal *mh = sc->sc_mh;
3924	struct ifnet *ifp = sc->sc_ifp;
3925	struct ieee80211com *ic = ifp->if_l2com;
3926	MWL_HAL_CHANNEL hchan;
3927	int maxtxpow;
3928
3929	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3930	    __func__, chan->ic_freq, chan->ic_flags);
3931
3932	/*
3933	 * Convert to a HAL channel description with
3934	 * the flags constrained to reflect the current
3935	 * operating mode.
3936	 */
3937	mwl_mapchan(&hchan, chan);
3938	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3939#if 0
3940	mwl_draintxq(sc);		/* clear pending tx frames */
3941#endif
3942	mwl_hal_setchannel(mh, &hchan);
3943	/*
3944	 * Tx power is cap'd by the regulatory setting and
3945	 * possibly a user-set limit.  We pass the min of
3946	 * these to the hal to apply them to the cal data
3947	 * for this channel.
3948	 * XXX min bound?
3949	 */
3950	maxtxpow = 2*chan->ic_maxregpower;
3951	if (maxtxpow > ic->ic_txpowlimit)
3952		maxtxpow = ic->ic_txpowlimit;
3953	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3954	/* NB: potentially change mcast/mgt rates */
3955	mwl_setcurchanrates(sc);
3956
3957	/*
3958	 * Update internal state.
3959	 */
3960	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3961	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3962	if (IEEE80211_IS_CHAN_A(chan)) {
3963		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3964		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3965	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3966		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3967		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3968	} else {
3969		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3970		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3971	}
3972	sc->sc_curchan = hchan;
3973	mwl_hal_intrset(mh, sc->sc_imask);
3974
3975	return 0;
3976}
3977
3978static void
3979mwl_scan_start(struct ieee80211com *ic)
3980{
3981	struct ifnet *ifp = ic->ic_ifp;
3982	struct mwl_softc *sc = ifp->if_softc;
3983
3984	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3985}
3986
3987static void
3988mwl_scan_end(struct ieee80211com *ic)
3989{
3990	struct ifnet *ifp = ic->ic_ifp;
3991	struct mwl_softc *sc = ifp->if_softc;
3992
3993	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3994}
3995
3996static void
3997mwl_set_channel(struct ieee80211com *ic)
3998{
3999	struct ifnet *ifp = ic->ic_ifp;
4000	struct mwl_softc *sc = ifp->if_softc;
4001
4002	(void) mwl_chan_set(sc, ic->ic_curchan);
4003}
4004
4005/*
4006 * Handle a channel switch request.  We inform the firmware
4007 * and mark the global state to suppress various actions.
4008 * NB: we issue only one request to the fw; we may be called
4009 * multiple times if there are multiple vap's.
4010 */
4011static void
4012mwl_startcsa(struct ieee80211vap *vap)
4013{
4014	struct ieee80211com *ic = vap->iv_ic;
4015	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4016	MWL_HAL_CHANNEL hchan;
4017
4018	if (sc->sc_csapending)
4019		return;
4020
4021	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4022	/* 1 =>'s quiet channel */
4023	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4024	sc->sc_csapending = 1;
4025}
4026
4027/*
4028 * Plumb any static WEP key for the station.  This is
4029 * necessary as we must propagate the key from the
4030 * global key table of the vap to each sta db entry.
4031 */
4032static void
4033mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4034{
4035	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4036		IEEE80211_F_PRIVACY &&
4037	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4038	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4039		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4040}
4041
4042static int
4043mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4044{
4045#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4046	struct ieee80211vap *vap = ni->ni_vap;
4047	struct mwl_hal_vap *hvap;
4048	int error;
4049
4050	if (vap->iv_opmode == IEEE80211_M_WDS) {
4051		/*
4052		 * WDS vap's do not have a f/w vap; instead they piggyback
4053		 * on an AP vap and we must install the sta db entry and
4054		 * crypto state using that AP's handle (the WDS vap has none).
4055		 */
4056		hvap = MWL_VAP(vap)->mv_ap_hvap;
4057	} else
4058		hvap = MWL_VAP(vap)->mv_hvap;
4059	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4060	    aid, staid, pi,
4061	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4062	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4063	if (error == 0) {
4064		/*
4065		 * Setup security for this station.  For sta mode this is
4066		 * needed even though do the same thing on transition to
4067		 * AUTH state because the call to mwl_hal_newstation
4068		 * clobbers the crypto state we setup.
4069		 */
4070		mwl_setanywepkey(vap, ni->ni_macaddr);
4071	}
4072	return error;
4073#undef WME
4074}
4075
4076static void
4077mwl_setglobalkeys(struct ieee80211vap *vap)
4078{
4079	struct ieee80211_key *wk;
4080
4081	wk = &vap->iv_nw_keys[0];
4082	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4083		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4084			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4085}
4086
4087/*
4088 * Convert a legacy rate set to a firmware bitmask.
4089 */
4090static uint32_t
4091get_rate_bitmap(const struct ieee80211_rateset *rs)
4092{
4093	uint32_t rates;
4094	int i;
4095
4096	rates = 0;
4097	for (i = 0; i < rs->rs_nrates; i++)
4098		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4099		case 2:	  rates |= 0x001; break;
4100		case 4:	  rates |= 0x002; break;
4101		case 11:  rates |= 0x004; break;
4102		case 22:  rates |= 0x008; break;
4103		case 44:  rates |= 0x010; break;
4104		case 12:  rates |= 0x020; break;
4105		case 18:  rates |= 0x040; break;
4106		case 24:  rates |= 0x080; break;
4107		case 36:  rates |= 0x100; break;
4108		case 48:  rates |= 0x200; break;
4109		case 72:  rates |= 0x400; break;
4110		case 96:  rates |= 0x800; break;
4111		case 108: rates |= 0x1000; break;
4112		}
4113	return rates;
4114}
4115
4116/*
4117 * Construct an HT firmware bitmask from an HT rate set.
4118 */
4119static uint32_t
4120get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4121{
4122	uint32_t rates;
4123	int i;
4124
4125	rates = 0;
4126	for (i = 0; i < rs->rs_nrates; i++) {
4127		if (rs->rs_rates[i] < 16)
4128			rates |= 1<<rs->rs_rates[i];
4129	}
4130	return rates;
4131}
4132
4133/*
4134 * Craft station database entry for station.
4135 * NB: use host byte order here, the hal handles byte swapping.
4136 */
4137static MWL_HAL_PEERINFO *
4138mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4139{
4140	const struct ieee80211vap *vap = ni->ni_vap;
4141
4142	memset(pi, 0, sizeof(*pi));
4143	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4144	pi->CapInfo = ni->ni_capinfo;
4145	if (ni->ni_flags & IEEE80211_NODE_HT) {
4146		/* HT capabilities, etc */
4147		pi->HTCapabilitiesInfo = ni->ni_htcap;
4148		/* XXX pi.HTCapabilitiesInfo */
4149	        pi->MacHTParamInfo = ni->ni_htparam;
4150		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4151		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4152		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4153		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4154		pi->AddHtInfo.stbc = ni->ni_htstbc;
4155
4156		/* constrain according to local configuration */
4157		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4158			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4159		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4160			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4161		if (ni->ni_chw != 40)
4162			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4163	}
4164	return pi;
4165}
4166
4167/*
4168 * Re-create the local sta db entry for a vap to ensure
4169 * up to date WME state is pushed to the firmware.  Because
4170 * this resets crypto state this must be followed by a
4171 * reload of any keys in the global key table.
4172 */
4173static int
4174mwl_localstadb(struct ieee80211vap *vap)
4175{
4176#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4177	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4178	struct ieee80211_node *bss;
4179	MWL_HAL_PEERINFO pi;
4180	int error;
4181
4182	switch (vap->iv_opmode) {
4183	case IEEE80211_M_STA:
4184		bss = vap->iv_bss;
4185		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4186		    vap->iv_state == IEEE80211_S_RUN ?
4187			mkpeerinfo(&pi, bss) : NULL,
4188		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4189		    bss->ni_ies.wme_ie != NULL ?
4190			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4191		if (error == 0)
4192			mwl_setglobalkeys(vap);
4193		break;
4194	case IEEE80211_M_HOSTAP:
4195	case IEEE80211_M_MBSS:
4196		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4197		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4198		if (error == 0)
4199			mwl_setglobalkeys(vap);
4200		break;
4201	default:
4202		error = 0;
4203		break;
4204	}
4205	return error;
4206#undef WME
4207}
4208
4209static int
4210mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4211{
4212	struct mwl_vap *mvp = MWL_VAP(vap);
4213	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4214	struct ieee80211com *ic = vap->iv_ic;
4215	struct ieee80211_node *ni = NULL;
4216	struct ifnet *ifp = ic->ic_ifp;
4217	struct mwl_softc *sc = ifp->if_softc;
4218	struct mwl_hal *mh = sc->sc_mh;
4219	enum ieee80211_state ostate = vap->iv_state;
4220	int error;
4221
4222	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4223	    vap->iv_ifp->if_xname, __func__,
4224	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4225
4226	callout_stop(&sc->sc_timer);
4227	/*
4228	 * Clear current radar detection state.
4229	 */
4230	if (ostate == IEEE80211_S_CAC) {
4231		/* stop quiet mode radar detection */
4232		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4233	} else if (sc->sc_radarena) {
4234		/* stop in-service radar detection */
4235		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4236		sc->sc_radarena = 0;
4237	}
4238	/*
4239	 * Carry out per-state actions before doing net80211 work.
4240	 */
4241	if (nstate == IEEE80211_S_INIT) {
4242		/* NB: only ap+sta vap's have a fw entity */
4243		if (hvap != NULL)
4244			mwl_hal_stop(hvap);
4245	} else if (nstate == IEEE80211_S_SCAN) {
4246		mwl_hal_start(hvap);
4247		/* NB: this disables beacon frames */
4248		mwl_hal_setinframode(hvap);
4249	} else if (nstate == IEEE80211_S_AUTH) {
4250		/*
4251		 * Must create a sta db entry in case a WEP key needs to
4252		 * be plumbed.  This entry will be overwritten if we
4253		 * associate; otherwise it will be reclaimed on node free.
4254		 */
4255		ni = vap->iv_bss;
4256		MWL_NODE(ni)->mn_hvap = hvap;
4257		(void) mwl_peerstadb(ni, 0, 0, NULL);
4258	} else if (nstate == IEEE80211_S_CSA) {
4259		/* XXX move to below? */
4260		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4261		    vap->iv_opmode == IEEE80211_M_MBSS)
4262			mwl_startcsa(vap);
4263	} else if (nstate == IEEE80211_S_CAC) {
4264		/* XXX move to below? */
4265		/* stop ap xmit and enable quiet mode radar detection */
4266		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4267	}
4268
4269	/*
4270	 * Invoke the parent method to do net80211 work.
4271	 */
4272	error = mvp->mv_newstate(vap, nstate, arg);
4273
4274	/*
4275	 * Carry out work that must be done after net80211 runs;
4276	 * this work requires up to date state (e.g. iv_bss).
4277	 */
4278	if (error == 0 && nstate == IEEE80211_S_RUN) {
4279		/* NB: collect bss node again, it may have changed */
4280		ni = vap->iv_bss;
4281
4282		DPRINTF(sc, MWL_DEBUG_STATE,
4283		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4284		    "capinfo 0x%04x chan %d\n",
4285		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4286		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4287		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4288
4289		/*
4290		 * Recreate local sta db entry to update WME/HT state.
4291		 */
4292		mwl_localstadb(vap);
4293		switch (vap->iv_opmode) {
4294		case IEEE80211_M_HOSTAP:
4295		case IEEE80211_M_MBSS:
4296			if (ostate == IEEE80211_S_CAC) {
4297				/* enable in-service radar detection */
4298				mwl_hal_setradardetection(mh,
4299				    DR_IN_SERVICE_MONITOR_START);
4300				sc->sc_radarena = 1;
4301			}
4302			/*
4303			 * Allocate and setup the beacon frame
4304			 * (and related state).
4305			 */
4306			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4307			if (error != 0) {
4308				DPRINTF(sc, MWL_DEBUG_STATE,
4309				    "%s: beacon setup failed, error %d\n",
4310				    __func__, error);
4311				goto bad;
4312			}
4313			/* NB: must be after setting up beacon */
4314			mwl_hal_start(hvap);
4315			break;
4316		case IEEE80211_M_STA:
4317			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4318			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4319			/*
4320			 * Set state now that we're associated.
4321			 */
4322			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4323			mwl_setrates(vap);
4324			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4325			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4326			    sc->sc_ndwdsvaps++ == 0)
4327				mwl_hal_setdwds(mh, 1);
4328			break;
4329		case IEEE80211_M_WDS:
4330			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4331			    vap->iv_ifp->if_xname, __func__,
4332			    ether_sprintf(ni->ni_bssid));
4333			mwl_seteapolformat(vap);
4334			break;
4335		default:
4336			break;
4337		}
4338		/*
4339		 * Set CS mode according to operating channel;
4340		 * this mostly an optimization for 5GHz.
4341		 *
4342		 * NB: must follow mwl_hal_start which resets csmode
4343		 */
4344		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4345			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4346		else
4347			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4348		/*
4349		 * Start timer to prod firmware.
4350		 */
4351		if (sc->sc_ageinterval != 0)
4352			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4353			    mwl_agestations, sc);
4354	} else if (nstate == IEEE80211_S_SLEEP) {
4355		/* XXX set chip in power save */
4356	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4357	    --sc->sc_ndwdsvaps == 0)
4358		mwl_hal_setdwds(mh, 0);
4359bad:
4360	return error;
4361}
4362
4363/*
4364 * Manage station id's; these are separate from AID's
4365 * as AID's may have values out of the range of possible
4366 * station id's acceptable to the firmware.
4367 */
4368static int
4369allocstaid(struct mwl_softc *sc, int aid)
4370{
4371	int staid;
4372
4373	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4374		/* NB: don't use 0 */
4375		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4376			if (isclr(sc->sc_staid, staid))
4377				break;
4378	} else
4379		staid = aid;
4380	setbit(sc->sc_staid, staid);
4381	return staid;
4382}
4383
4384static void
4385delstaid(struct mwl_softc *sc, int staid)
4386{
4387	clrbit(sc->sc_staid, staid);
4388}
4389
4390/*
4391 * Setup driver-specific state for a newly associated node.
4392 * Note that we're called also on a re-associate, the isnew
4393 * param tells us if this is the first time or not.
4394 */
4395static void
4396mwl_newassoc(struct ieee80211_node *ni, int isnew)
4397{
4398	struct ieee80211vap *vap = ni->ni_vap;
4399        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4400	struct mwl_node *mn = MWL_NODE(ni);
4401	MWL_HAL_PEERINFO pi;
4402	uint16_t aid;
4403	int error;
4404
4405	aid = IEEE80211_AID(ni->ni_associd);
4406	if (isnew) {
4407		mn->mn_staid = allocstaid(sc, aid);
4408		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4409	} else {
4410		mn = MWL_NODE(ni);
4411		/* XXX reset BA stream? */
4412	}
4413	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4414	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4415	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4416	if (error != 0) {
4417		DPRINTF(sc, MWL_DEBUG_NODE,
4418		    "%s: error %d creating sta db entry\n",
4419		    __func__, error);
4420		/* XXX how to deal with error? */
4421	}
4422}
4423
4424/*
4425 * Periodically poke the firmware to age out station state
4426 * (power save queues, pending tx aggregates).
4427 */
4428static void
4429mwl_agestations(void *arg)
4430{
4431	struct mwl_softc *sc = arg;
4432
4433	mwl_hal_setkeepalive(sc->sc_mh);
4434	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4435		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4436}
4437
4438static const struct mwl_hal_channel *
4439findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4440{
4441	int i;
4442
4443	for (i = 0; i < ci->nchannels; i++) {
4444		const struct mwl_hal_channel *hc = &ci->channels[i];
4445		if (hc->ieee == ieee)
4446			return hc;
4447	}
4448	return NULL;
4449}
4450
4451static int
4452mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4453	int nchan, struct ieee80211_channel chans[])
4454{
4455	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4456	struct mwl_hal *mh = sc->sc_mh;
4457	const MWL_HAL_CHANNELINFO *ci;
4458	int i;
4459
4460	for (i = 0; i < nchan; i++) {
4461		struct ieee80211_channel *c = &chans[i];
4462		const struct mwl_hal_channel *hc;
4463
4464		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4465			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4466			    IEEE80211_IS_CHAN_HT40(c) ?
4467				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4468		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4469			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4470			    IEEE80211_IS_CHAN_HT40(c) ?
4471				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4472		} else {
4473			if_printf(ic->ic_ifp,
4474			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4475			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4476			return EINVAL;
4477		}
4478		/*
4479		 * Verify channel has cal data and cap tx power.
4480		 */
4481		hc = findhalchannel(ci, c->ic_ieee);
4482		if (hc != NULL) {
4483			if (c->ic_maxpower > 2*hc->maxTxPow)
4484				c->ic_maxpower = 2*hc->maxTxPow;
4485			goto next;
4486		}
4487		if (IEEE80211_IS_CHAN_HT40(c)) {
4488			/*
4489			 * Look for the extension channel since the
4490			 * hal table only has the primary channel.
4491			 */
4492			hc = findhalchannel(ci, c->ic_extieee);
4493			if (hc != NULL) {
4494				if (c->ic_maxpower > 2*hc->maxTxPow)
4495					c->ic_maxpower = 2*hc->maxTxPow;
4496				goto next;
4497			}
4498		}
4499		if_printf(ic->ic_ifp,
4500		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4501		    __func__, c->ic_ieee, c->ic_extieee,
4502		    c->ic_freq, c->ic_flags);
4503		return EINVAL;
4504	next:
4505		;
4506	}
4507	return 0;
4508}
4509
4510#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4511#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4512
4513static void
4514addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4515{
4516	c->ic_freq = freq;
4517	c->ic_flags = flags;
4518	c->ic_ieee = ieee;
4519	c->ic_minpower = 0;
4520	c->ic_maxpower = 2*txpow;
4521	c->ic_maxregpower = txpow;
4522}
4523
4524static const struct ieee80211_channel *
4525findchannel(const struct ieee80211_channel chans[], int nchans,
4526	int freq, int flags)
4527{
4528	const struct ieee80211_channel *c;
4529	int i;
4530
4531	for (i = 0; i < nchans; i++) {
4532		c = &chans[i];
4533		if (c->ic_freq == freq && c->ic_flags == flags)
4534			return c;
4535	}
4536	return NULL;
4537}
4538
4539static void
4540addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4541	const MWL_HAL_CHANNELINFO *ci, int flags)
4542{
4543	struct ieee80211_channel *c;
4544	const struct ieee80211_channel *extc;
4545	const struct mwl_hal_channel *hc;
4546	int i;
4547
4548	c = &chans[*nchans];
4549
4550	flags &= ~IEEE80211_CHAN_HT;
4551	for (i = 0; i < ci->nchannels; i++) {
4552		/*
4553		 * Each entry defines an HT40 channel pair; find the
4554		 * extension channel above and the insert the pair.
4555		 */
4556		hc = &ci->channels[i];
4557		extc = findchannel(chans, *nchans, hc->freq+20,
4558		    flags | IEEE80211_CHAN_HT20);
4559		if (extc != NULL) {
4560			if (*nchans >= maxchans)
4561				break;
4562			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4563			    hc->ieee, hc->maxTxPow);
4564			c->ic_extieee = extc->ic_ieee;
4565			c++, (*nchans)++;
4566			if (*nchans >= maxchans)
4567				break;
4568			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4569			    extc->ic_ieee, hc->maxTxPow);
4570			c->ic_extieee = hc->ieee;
4571			c++, (*nchans)++;
4572		}
4573	}
4574}
4575
4576static void
4577addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4578	const MWL_HAL_CHANNELINFO *ci, int flags)
4579{
4580	struct ieee80211_channel *c;
4581	int i;
4582
4583	c = &chans[*nchans];
4584
4585	for (i = 0; i < ci->nchannels; i++) {
4586		const struct mwl_hal_channel *hc;
4587
4588		hc = &ci->channels[i];
4589		if (*nchans >= maxchans)
4590			break;
4591		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4592		c++, (*nchans)++;
4593		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4594			/* g channel have a separate b-only entry */
4595			if (*nchans >= maxchans)
4596				break;
4597			c[0] = c[-1];
4598			c[-1].ic_flags = IEEE80211_CHAN_B;
4599			c++, (*nchans)++;
4600		}
4601		if (flags == IEEE80211_CHAN_HTG) {
4602			/* HT g channel have a separate g-only entry */
4603			if (*nchans >= maxchans)
4604				break;
4605			c[-1].ic_flags = IEEE80211_CHAN_G;
4606			c[0] = c[-1];
4607			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4608			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4609			c++, (*nchans)++;
4610		}
4611		if (flags == IEEE80211_CHAN_HTA) {
4612			/* HT a channel have a separate a-only entry */
4613			if (*nchans >= maxchans)
4614				break;
4615			c[-1].ic_flags = IEEE80211_CHAN_A;
4616			c[0] = c[-1];
4617			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4618			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4619			c++, (*nchans)++;
4620		}
4621	}
4622}
4623
4624static void
4625getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4626	struct ieee80211_channel chans[])
4627{
4628	const MWL_HAL_CHANNELINFO *ci;
4629
4630	/*
4631	 * Use the channel info from the hal to craft the
4632	 * channel list.  Note that we pass back an unsorted
4633	 * list; the caller is required to sort it for us
4634	 * (if desired).
4635	 */
4636	*nchans = 0;
4637	if (mwl_hal_getchannelinfo(sc->sc_mh,
4638	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4639		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4640	if (mwl_hal_getchannelinfo(sc->sc_mh,
4641	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4642		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4643	if (mwl_hal_getchannelinfo(sc->sc_mh,
4644	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4645		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4646	if (mwl_hal_getchannelinfo(sc->sc_mh,
4647	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4648		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4649}
4650
4651static void
4652mwl_getradiocaps(struct ieee80211com *ic,
4653	int maxchans, int *nchans, struct ieee80211_channel chans[])
4654{
4655	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4656
4657	getchannels(sc, maxchans, nchans, chans);
4658}
4659
4660static int
4661mwl_getchannels(struct mwl_softc *sc)
4662{
4663	struct ifnet *ifp = sc->sc_ifp;
4664	struct ieee80211com *ic = ifp->if_l2com;
4665
4666	/*
4667	 * Use the channel info from the hal to craft the
4668	 * channel list for net80211.  Note that we pass up
4669	 * an unsorted list; net80211 will sort it for us.
4670	 */
4671	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4672	ic->ic_nchans = 0;
4673	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4674
4675	ic->ic_regdomain.regdomain = SKU_DEBUG;
4676	ic->ic_regdomain.country = CTRY_DEFAULT;
4677	ic->ic_regdomain.location = 'I';
4678	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4679	ic->ic_regdomain.isocc[1] = ' ';
4680	return (ic->ic_nchans == 0 ? EIO : 0);
4681}
4682#undef IEEE80211_CHAN_HTA
4683#undef IEEE80211_CHAN_HTG
4684
4685#ifdef MWL_DEBUG
4686static void
4687mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4688{
4689	const struct mwl_rxdesc *ds = bf->bf_desc;
4690	uint32_t status = le32toh(ds->Status);
4691
4692	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4693	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4694	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4695	    le32toh(ds->pPhysBuffData), ds->RxControl,
4696	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4697	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4698	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4699	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4700}
4701
4702static void
4703mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4704{
4705	const struct mwl_txdesc *ds = bf->bf_desc;
4706	uint32_t status = le32toh(ds->Status);
4707
4708	printf("Q%u[%3u]", qnum, ix);
4709	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4710	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4711	    le32toh(ds->pPhysNext),
4712	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4713	    status & EAGLE_TXD_STATUS_USED ?
4714		"" : (status & 3) != 0 ? " *" : " !");
4715	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4716	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4717	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4718#if MWL_TXDESC > 1
4719	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4720	    , le32toh(ds->multiframes)
4721	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4722	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4723	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4724	);
4725	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4726	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4727	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4728	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4729	);
4730#endif
4731#if 0
4732{ const uint8_t *cp = (const uint8_t *) ds;
4733  int i;
4734  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4735	printf("%02x ", cp[i]);
4736	if (((i+1) % 16) == 0)
4737		printf("\n");
4738  }
4739  printf("\n");
4740}
4741#endif
4742}
4743#endif /* MWL_DEBUG */
4744
4745#if 0
4746static void
4747mwl_txq_dump(struct mwl_txq *txq)
4748{
4749	struct mwl_txbuf *bf;
4750	int i = 0;
4751
4752	MWL_TXQ_LOCK(txq);
4753	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4754		struct mwl_txdesc *ds = bf->bf_desc;
4755		MWL_TXDESC_SYNC(txq, ds,
4756		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4757#ifdef MWL_DEBUG
4758		mwl_printtxbuf(bf, txq->qnum, i);
4759#endif
4760		i++;
4761	}
4762	MWL_TXQ_UNLOCK(txq);
4763}
4764#endif
4765
4766static void
4767mwl_watchdog(void *arg)
4768{
4769	struct mwl_softc *sc;
4770	struct ifnet *ifp;
4771
4772	sc = arg;
4773	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4774	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4775		return;
4776
4777	ifp = sc->sc_ifp;
4778	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4779		if (mwl_hal_setkeepalive(sc->sc_mh))
4780			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4781		else
4782			if_printf(ifp, "transmit timeout\n");
4783#if 0
4784		mwl_reset(ifp);
4785mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4786#endif
4787		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4788		sc->sc_stats.mst_watchdog++;
4789	}
4790}
4791
4792#ifdef MWL_DIAGAPI
4793/*
4794 * Diagnostic interface to the HAL.  This is used by various
4795 * tools to do things like retrieve register contents for
4796 * debugging.  The mechanism is intentionally opaque so that
4797 * it can change frequently w/o concern for compatiblity.
4798 */
4799static int
4800mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4801{
4802	struct mwl_hal *mh = sc->sc_mh;
4803	u_int id = md->md_id & MWL_DIAG_ID;
4804	void *indata = NULL;
4805	void *outdata = NULL;
4806	u_int32_t insize = md->md_in_size;
4807	u_int32_t outsize = md->md_out_size;
4808	int error = 0;
4809
4810	if (md->md_id & MWL_DIAG_IN) {
4811		/*
4812		 * Copy in data.
4813		 */
4814		indata = malloc(insize, M_TEMP, M_NOWAIT);
4815		if (indata == NULL) {
4816			error = ENOMEM;
4817			goto bad;
4818		}
4819		error = copyin(md->md_in_data, indata, insize);
4820		if (error)
4821			goto bad;
4822	}
4823	if (md->md_id & MWL_DIAG_DYN) {
4824		/*
4825		 * Allocate a buffer for the results (otherwise the HAL
4826		 * returns a pointer to a buffer where we can read the
4827		 * results).  Note that we depend on the HAL leaving this
4828		 * pointer for us to use below in reclaiming the buffer;
4829		 * may want to be more defensive.
4830		 */
4831		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4832		if (outdata == NULL) {
4833			error = ENOMEM;
4834			goto bad;
4835		}
4836	}
4837	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4838		if (outsize < md->md_out_size)
4839			md->md_out_size = outsize;
4840		if (outdata != NULL)
4841			error = copyout(outdata, md->md_out_data,
4842					md->md_out_size);
4843	} else {
4844		error = EINVAL;
4845	}
4846bad:
4847	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4848		free(indata, M_TEMP);
4849	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4850		free(outdata, M_TEMP);
4851	return error;
4852}
4853
4854static int
4855mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4856{
4857	struct mwl_hal *mh = sc->sc_mh;
4858	int error;
4859
4860	MWL_LOCK_ASSERT(sc);
4861
4862	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4863		device_printf(sc->sc_dev, "unable to load firmware\n");
4864		return EIO;
4865	}
4866	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4867		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4868		return EIO;
4869	}
4870	error = mwl_setupdma(sc);
4871	if (error != 0) {
4872		/* NB: mwl_setupdma prints a msg */
4873		return error;
4874	}
4875	/*
4876	 * Reset tx/rx data structures; after reload we must
4877	 * re-start the driver's notion of the next xmit/recv.
4878	 */
4879	mwl_draintxq(sc);		/* clear pending frames */
4880	mwl_resettxq(sc);		/* rebuild tx q lists */
4881	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4882	return 0;
4883}
4884#endif /* MWL_DIAGAPI */
4885
4886static int
4887mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4888{
4889#define	IS_RUNNING(ifp) \
4890	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4891	struct mwl_softc *sc = ifp->if_softc;
4892	struct ieee80211com *ic = ifp->if_l2com;
4893	struct ifreq *ifr = (struct ifreq *)data;
4894	int error = 0, startall;
4895
4896	switch (cmd) {
4897	case SIOCSIFFLAGS:
4898		MWL_LOCK(sc);
4899		startall = 0;
4900		if (IS_RUNNING(ifp)) {
4901			/*
4902			 * To avoid rescanning another access point,
4903			 * do not call mwl_init() here.  Instead,
4904			 * only reflect promisc mode settings.
4905			 */
4906			mwl_mode_init(sc);
4907		} else if (ifp->if_flags & IFF_UP) {
4908			/*
4909			 * Beware of being called during attach/detach
4910			 * to reset promiscuous mode.  In that case we
4911			 * will still be marked UP but not RUNNING.
4912			 * However trying to re-init the interface
4913			 * is the wrong thing to do as we've already
4914			 * torn down much of our state.  There's
4915			 * probably a better way to deal with this.
4916			 */
4917			if (!sc->sc_invalid) {
4918				mwl_init_locked(sc);	/* XXX lose error */
4919				startall = 1;
4920			}
4921		} else
4922			mwl_stop_locked(ifp, 1);
4923		MWL_UNLOCK(sc);
4924		if (startall)
4925			ieee80211_start_all(ic);
4926		break;
4927	case SIOCGMVSTATS:
4928		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4929		/* NB: embed these numbers to get a consistent view */
4930		sc->sc_stats.mst_tx_packets =
4931		    ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
4932		sc->sc_stats.mst_rx_packets =
4933		    ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
4934		/*
4935		 * NB: Drop the softc lock in case of a page fault;
4936		 * we'll accept any potential inconsisentcy in the
4937		 * statistics.  The alternative is to copy the data
4938		 * to a local structure.
4939		 */
4940		return copyout(&sc->sc_stats,
4941				ifr->ifr_data, sizeof (sc->sc_stats));
4942#ifdef MWL_DIAGAPI
4943	case SIOCGMVDIAG:
4944		/* XXX check privs */
4945		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4946	case SIOCGMVRESET:
4947		/* XXX check privs */
4948		MWL_LOCK(sc);
4949		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4950		MWL_UNLOCK(sc);
4951		break;
4952#endif /* MWL_DIAGAPI */
4953	case SIOCGIFMEDIA:
4954		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4955		break;
4956	case SIOCGIFADDR:
4957		error = ether_ioctl(ifp, cmd, data);
4958		break;
4959	default:
4960		error = EINVAL;
4961		break;
4962	}
4963	return error;
4964#undef IS_RUNNING
4965}
4966
4967#ifdef	MWL_DEBUG
4968static int
4969mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4970{
4971	struct mwl_softc *sc = arg1;
4972	int debug, error;
4973
4974	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4975	error = sysctl_handle_int(oidp, &debug, 0, req);
4976	if (error || !req->newptr)
4977		return error;
4978	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4979	sc->sc_debug = debug & 0x00ffffff;
4980	return 0;
4981}
4982#endif /* MWL_DEBUG */
4983
4984static void
4985mwl_sysctlattach(struct mwl_softc *sc)
4986{
4987#ifdef	MWL_DEBUG
4988	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4989	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4990
4991	sc->sc_debug = mwl_debug;
4992	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4993		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4994		mwl_sysctl_debug, "I", "control debugging printfs");
4995#endif
4996}
4997
4998/*
4999 * Announce various information on device/driver attach.
5000 */
5001static void
5002mwl_announce(struct mwl_softc *sc)
5003{
5004	struct ifnet *ifp = sc->sc_ifp;
5005
5006	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5007		sc->sc_hwspecs.hwVersion,
5008		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5009		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5010		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5011		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5012		sc->sc_hwspecs.regionCode);
5013	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5014
5015	if (bootverbose) {
5016		int i;
5017		for (i = 0; i <= WME_AC_VO; i++) {
5018			struct mwl_txq *txq = sc->sc_ac2q[i];
5019			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5020				txq->qnum, ieee80211_wme_acnames[i]);
5021		}
5022	}
5023	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5024		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5025	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5026		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5027	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5028		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5029	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5030		if_printf(ifp, "multi-bss support\n");
5031#ifdef MWL_TX_NODROP
5032	if (bootverbose)
5033		if_printf(ifp, "no tx drop\n");
5034#endif
5035}
5036