if_mwl.c revision 267985
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 267985 2014-06-27 22:05:21Z gjb $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40#include "opt_wlan.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysctl.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/kernel.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/errno.h>
53#include <sys/callout.h>
54#include <sys/bus.h>
55#include <sys/endian.h>
56#include <sys/kthread.h>
57#include <sys/taskqueue.h>
58
59#include <machine/bus.h>
60
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#include <net/if_types.h>
66#include <net/if_arp.h>
67#include <net/ethernet.h>
68#include <net/if_llc.h>
69
70#include <net/bpf.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
74
75#ifdef INET
76#include <netinet/in.h>
77#include <netinet/if_ether.h>
78#endif /* INET */
79
80#include <dev/mwl/if_mwlvar.h>
81#include <dev/mwl/mwldiag.h>
82
83/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
84#define	MS(v,x)	(((v) & x) >> x##_S)
85#define	SM(v,x)	(((v) << x##_S) & x)
86
87static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
88		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
89		    const uint8_t [IEEE80211_ADDR_LEN],
90		    const uint8_t [IEEE80211_ADDR_LEN]);
91static void	mwl_vap_delete(struct ieee80211vap *);
92static int	mwl_setupdma(struct mwl_softc *);
93static int	mwl_hal_reset(struct mwl_softc *sc);
94static int	mwl_init_locked(struct mwl_softc *);
95static void	mwl_init(void *);
96static void	mwl_stop_locked(struct ifnet *, int);
97static int	mwl_reset(struct ieee80211vap *, u_long);
98static void	mwl_stop(struct ifnet *, int);
99static void	mwl_start(struct ifnet *);
100static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
101			const struct ieee80211_bpf_params *);
102static int	mwl_media_change(struct ifnet *);
103static void	mwl_watchdog(void *);
104static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
105static void	mwl_radar_proc(void *, int);
106static void	mwl_chanswitch_proc(void *, int);
107static void	mwl_bawatchdog_proc(void *, int);
108static int	mwl_key_alloc(struct ieee80211vap *,
109			struct ieee80211_key *,
110			ieee80211_keyix *, ieee80211_keyix *);
111static int	mwl_key_delete(struct ieee80211vap *,
112			const struct ieee80211_key *);
113static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
114			const uint8_t mac[IEEE80211_ADDR_LEN]);
115static int	mwl_mode_init(struct mwl_softc *);
116static void	mwl_update_mcast(struct ifnet *);
117static void	mwl_update_promisc(struct ifnet *);
118static void	mwl_updateslot(struct ifnet *);
119static int	mwl_beacon_setup(struct ieee80211vap *);
120static void	mwl_beacon_update(struct ieee80211vap *, int);
121#ifdef MWL_HOST_PS_SUPPORT
122static void	mwl_update_ps(struct ieee80211vap *, int);
123static int	mwl_set_tim(struct ieee80211_node *, int);
124#endif
125static int	mwl_dma_setup(struct mwl_softc *);
126static void	mwl_dma_cleanup(struct mwl_softc *);
127static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128		    const uint8_t [IEEE80211_ADDR_LEN]);
129static void	mwl_node_cleanup(struct ieee80211_node *);
130static void	mwl_node_drain(struct ieee80211_node *);
131static void	mwl_node_getsignal(const struct ieee80211_node *,
132			int8_t *, int8_t *);
133static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
134			struct ieee80211_mimo_info *);
135static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136static void	mwl_rx_proc(void *, int);
137static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138static int	mwl_tx_setup(struct mwl_softc *, int, int);
139static int	mwl_wme_update(struct ieee80211com *);
140static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141static void	mwl_tx_cleanup(struct mwl_softc *);
142static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144			     struct mwl_txbuf *, struct mbuf *);
145static void	mwl_tx_proc(void *, int);
146static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147static void	mwl_draintxq(struct mwl_softc *);
148static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149static int	mwl_recv_action(struct ieee80211_node *,
150			const struct ieee80211_frame *,
151			const uint8_t *, const uint8_t *);
152static int	mwl_addba_request(struct ieee80211_node *,
153			struct ieee80211_tx_ampdu *, int dialogtoken,
154			int baparamset, int batimeout);
155static int	mwl_addba_response(struct ieee80211_node *,
156			struct ieee80211_tx_ampdu *, int status,
157			int baparamset, int batimeout);
158static void	mwl_addba_stop(struct ieee80211_node *,
159			struct ieee80211_tx_ampdu *);
160static int	mwl_startrecv(struct mwl_softc *);
161static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162			struct ieee80211_channel *);
163static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164static void	mwl_scan_start(struct ieee80211com *);
165static void	mwl_scan_end(struct ieee80211com *);
166static void	mwl_set_channel(struct ieee80211com *);
167static int	mwl_peerstadb(struct ieee80211_node *,
168			int aid, int staid, MWL_HAL_PEERINFO *pi);
169static int	mwl_localstadb(struct ieee80211vap *);
170static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171static int	allocstaid(struct mwl_softc *sc, int aid);
172static void	delstaid(struct mwl_softc *sc, int staid);
173static void	mwl_newassoc(struct ieee80211_node *, int);
174static void	mwl_agestations(void *);
175static int	mwl_setregdomain(struct ieee80211com *,
176			struct ieee80211_regdomain *, int,
177			struct ieee80211_channel []);
178static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
179			struct ieee80211_channel []);
180static int	mwl_getchannels(struct mwl_softc *);
181
182static void	mwl_sysctlattach(struct mwl_softc *);
183static void	mwl_announce(struct mwl_softc *);
184
185SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
186
187static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
188SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
189	    0, "rx descriptors allocated");
190static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
191SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
192	    0, "rx buffers allocated");
193TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
194static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
195SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
196	    0, "tx buffers allocated");
197TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
198static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
199SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
200	    0, "tx buffers to send at once");
201TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
202static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
203SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
204	    0, "max rx buffers to process per interrupt");
205TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
206static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
207SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
208	    0, "min free rx buffers before restarting traffic");
209TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
210
211#ifdef MWL_DEBUG
212static	int mwl_debug = 0;
213SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
214	    0, "control debugging printfs");
215TUNABLE_INT("hw.mwl.debug", &mwl_debug);
216enum {
217	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
218	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
219	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
220	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
221	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
222	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
223	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
224	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
225	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
226	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
227	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
228	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
229	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
230	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
231	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
232	MWL_DEBUG_ANY		= 0xffffffff
233};
234#define	IS_BEACON(wh) \
235    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
236	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
237#define	IFF_DUMPPKTS_RECV(sc, wh) \
238    (((sc->sc_debug & MWL_DEBUG_RECV) && \
239      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
240     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
241#define	IFF_DUMPPKTS_XMIT(sc) \
242	((sc->sc_debug & MWL_DEBUG_XMIT) || \
243	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
244#define	DPRINTF(sc, m, fmt, ...) do {				\
245	if (sc->sc_debug & (m))					\
246		printf(fmt, __VA_ARGS__);			\
247} while (0)
248#define	KEYPRINTF(sc, hk, mac) do {				\
249	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
250		mwl_keyprint(sc, __func__, hk, mac);		\
251} while (0)
252static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
253static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
254#else
255#define	IFF_DUMPPKTS_RECV(sc, wh) \
256	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
257#define	IFF_DUMPPKTS_XMIT(sc) \
258	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
259#define	DPRINTF(sc, m, fmt, ...) do {				\
260	(void) sc;						\
261} while (0)
262#define	KEYPRINTF(sc, k, mac) do {				\
263	(void) sc;						\
264} while (0)
265#endif
266
267static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
268
269/*
270 * Each packet has fixed front matter: a 2-byte length
271 * of the payload, followed by a 4-address 802.11 header
272 * (regardless of the actual header and always w/o any
273 * QoS header).  The payload then follows.
274 */
275struct mwltxrec {
276	uint16_t fwlen;
277	struct ieee80211_frame_addr4 wh;
278} __packed;
279
280/*
281 * Read/Write shorthands for accesses to BAR 0.  Note
282 * that all BAR 1 operations are done in the "hal" and
283 * there should be no reference to them here.
284 */
285#ifdef MWL_DEBUG
286static __inline uint32_t
287RD4(struct mwl_softc *sc, bus_size_t off)
288{
289	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
290}
291#endif
292
293static __inline void
294WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
295{
296	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
297}
298
299int
300mwl_attach(uint16_t devid, struct mwl_softc *sc)
301{
302	struct ifnet *ifp;
303	struct ieee80211com *ic;
304	struct mwl_hal *mh;
305	int error = 0;
306
307	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
308
309	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
310	if (ifp == NULL) {
311		device_printf(sc->sc_dev, "cannot if_alloc()\n");
312		return ENOSPC;
313	}
314	ic = ifp->if_l2com;
315
316	/*
317	 * Setup the RX free list lock early, so it can be consistently
318	 * removed.
319	 */
320	MWL_RXFREE_INIT(sc);
321
322	/* set these up early for if_printf use */
323	if_initname(ifp, device_get_name(sc->sc_dev),
324		device_get_unit(sc->sc_dev));
325
326	mh = mwl_hal_attach(sc->sc_dev, devid,
327	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
328	if (mh == NULL) {
329		if_printf(ifp, "unable to attach HAL\n");
330		error = EIO;
331		goto bad;
332	}
333	sc->sc_mh = mh;
334	/*
335	 * Load firmware so we can get setup.  We arbitrarily
336	 * pick station firmware; we'll re-load firmware as
337	 * needed so setting up the wrong mode isn't a big deal.
338	 */
339	if (mwl_hal_fwload(mh, NULL) != 0) {
340		if_printf(ifp, "unable to setup builtin firmware\n");
341		error = EIO;
342		goto bad1;
343	}
344	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
345		if_printf(ifp, "unable to fetch h/w specs\n");
346		error = EIO;
347		goto bad1;
348	}
349	error = mwl_getchannels(sc);
350	if (error != 0)
351		goto bad1;
352
353	sc->sc_txantenna = 0;		/* h/w default */
354	sc->sc_rxantenna = 0;		/* h/w default */
355	sc->sc_invalid = 0;		/* ready to go, enable int handling */
356	sc->sc_ageinterval = MWL_AGEINTERVAL;
357
358	/*
359	 * Allocate tx+rx descriptors and populate the lists.
360	 * We immediately push the information to the firmware
361	 * as otherwise it gets upset.
362	 */
363	error = mwl_dma_setup(sc);
364	if (error != 0) {
365		if_printf(ifp, "failed to setup descriptors: %d\n", error);
366		goto bad1;
367	}
368	error = mwl_setupdma(sc);	/* push to firmware */
369	if (error != 0)			/* NB: mwl_setupdma prints msg */
370		goto bad1;
371
372	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
373	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
374
375	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
376		taskqueue_thread_enqueue, &sc->sc_tq);
377	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
378		"%s taskq", ifp->if_xname);
379
380	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
381	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
382	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
383	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
384
385	/* NB: insure BK queue is the lowest priority h/w queue */
386	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
387		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
388			ieee80211_wme_acnames[WME_AC_BK]);
389		error = EIO;
390		goto bad2;
391	}
392	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
393	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
394	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
395		/*
396		 * Not enough hardware tx queues to properly do WME;
397		 * just punt and assign them all to the same h/w queue.
398		 * We could do a better job of this if, for example,
399		 * we allocate queues when we switch from station to
400		 * AP mode.
401		 */
402		if (sc->sc_ac2q[WME_AC_VI] != NULL)
403			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
404		if (sc->sc_ac2q[WME_AC_BE] != NULL)
405			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
406		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
407		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
408		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
409	}
410	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
411
412	ifp->if_softc = sc;
413	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
414	ifp->if_start = mwl_start;
415	ifp->if_ioctl = mwl_ioctl;
416	ifp->if_init = mwl_init;
417	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
418	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
419	IFQ_SET_READY(&ifp->if_snd);
420
421	ic->ic_ifp = ifp;
422	/* XXX not right but it's not used anywhere important */
423	ic->ic_phytype = IEEE80211_T_OFDM;
424	ic->ic_opmode = IEEE80211_M_STA;
425	ic->ic_caps =
426		  IEEE80211_C_STA		/* station mode supported */
427		| IEEE80211_C_HOSTAP		/* hostap mode */
428		| IEEE80211_C_MONITOR		/* monitor mode */
429#if 0
430		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
431		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
432#endif
433		| IEEE80211_C_MBSS		/* mesh point link mode */
434		| IEEE80211_C_WDS		/* WDS supported */
435		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
436		| IEEE80211_C_SHSLOT		/* short slot time supported */
437		| IEEE80211_C_WME		/* WME/WMM supported */
438		| IEEE80211_C_BURST		/* xmit bursting supported */
439		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
440		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
441		| IEEE80211_C_TXFRAG		/* handle tx frags */
442		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
443		| IEEE80211_C_DFS		/* DFS supported */
444		;
445
446	ic->ic_htcaps =
447		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
448		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
449		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
450		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
451		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
452#if MWL_AGGR_SIZE == 7935
453		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
454#else
455		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
456#endif
457#if 0
458		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
459		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
460#endif
461		/* s/w capabilities */
462		| IEEE80211_HTC_HT		/* HT operation */
463		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
464		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
465		| IEEE80211_HTC_SMPS		/* SMPS available */
466		;
467
468	/*
469	 * Mark h/w crypto support.
470	 * XXX no way to query h/w support.
471	 */
472	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
473			  |  IEEE80211_CRYPTO_AES_CCM
474			  |  IEEE80211_CRYPTO_TKIP
475			  |  IEEE80211_CRYPTO_TKIPMIC
476			  ;
477	/*
478	 * Transmit requires space in the packet for a special
479	 * format transmit record and optional padding between
480	 * this record and the payload.  Ask the net80211 layer
481	 * to arrange this when encapsulating packets so we can
482	 * add it efficiently.
483	 */
484	ic->ic_headroom = sizeof(struct mwltxrec) -
485		sizeof(struct ieee80211_frame);
486
487	/* call MI attach routine. */
488	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
489	ic->ic_setregdomain = mwl_setregdomain;
490	ic->ic_getradiocaps = mwl_getradiocaps;
491	/* override default methods */
492	ic->ic_raw_xmit = mwl_raw_xmit;
493	ic->ic_newassoc = mwl_newassoc;
494	ic->ic_updateslot = mwl_updateslot;
495	ic->ic_update_mcast = mwl_update_mcast;
496	ic->ic_update_promisc = mwl_update_promisc;
497	ic->ic_wme.wme_update = mwl_wme_update;
498
499	ic->ic_node_alloc = mwl_node_alloc;
500	sc->sc_node_cleanup = ic->ic_node_cleanup;
501	ic->ic_node_cleanup = mwl_node_cleanup;
502	sc->sc_node_drain = ic->ic_node_drain;
503	ic->ic_node_drain = mwl_node_drain;
504	ic->ic_node_getsignal = mwl_node_getsignal;
505	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
506
507	ic->ic_scan_start = mwl_scan_start;
508	ic->ic_scan_end = mwl_scan_end;
509	ic->ic_set_channel = mwl_set_channel;
510
511	sc->sc_recv_action = ic->ic_recv_action;
512	ic->ic_recv_action = mwl_recv_action;
513	sc->sc_addba_request = ic->ic_addba_request;
514	ic->ic_addba_request = mwl_addba_request;
515	sc->sc_addba_response = ic->ic_addba_response;
516	ic->ic_addba_response = mwl_addba_response;
517	sc->sc_addba_stop = ic->ic_addba_stop;
518	ic->ic_addba_stop = mwl_addba_stop;
519
520	ic->ic_vap_create = mwl_vap_create;
521	ic->ic_vap_delete = mwl_vap_delete;
522
523	ieee80211_radiotap_attach(ic,
524	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
525		MWL_TX_RADIOTAP_PRESENT,
526	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
527		MWL_RX_RADIOTAP_PRESENT);
528	/*
529	 * Setup dynamic sysctl's now that country code and
530	 * regdomain are available from the hal.
531	 */
532	mwl_sysctlattach(sc);
533
534	if (bootverbose)
535		ieee80211_announce(ic);
536	mwl_announce(sc);
537	return 0;
538bad2:
539	mwl_dma_cleanup(sc);
540bad1:
541	mwl_hal_detach(mh);
542bad:
543	MWL_RXFREE_DESTROY(sc);
544	if_free(ifp);
545	sc->sc_invalid = 1;
546	return error;
547}
548
549int
550mwl_detach(struct mwl_softc *sc)
551{
552	struct ifnet *ifp = sc->sc_ifp;
553	struct ieee80211com *ic = ifp->if_l2com;
554
555	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
556		__func__, ifp->if_flags);
557
558	mwl_stop(ifp, 1);
559	/*
560	 * NB: the order of these is important:
561	 * o call the 802.11 layer before detaching the hal to
562	 *   insure callbacks into the driver to delete global
563	 *   key cache entries can be handled
564	 * o reclaim the tx queue data structures after calling
565	 *   the 802.11 layer as we'll get called back to reclaim
566	 *   node state and potentially want to use them
567	 * o to cleanup the tx queues the hal is called, so detach
568	 *   it last
569	 * Other than that, it's straightforward...
570	 */
571	ieee80211_ifdetach(ic);
572	callout_drain(&sc->sc_watchdog);
573	mwl_dma_cleanup(sc);
574	MWL_RXFREE_DESTROY(sc);
575	mwl_tx_cleanup(sc);
576	mwl_hal_detach(sc->sc_mh);
577	if_free(ifp);
578
579	return 0;
580}
581
582/*
583 * MAC address handling for multiple BSS on the same radio.
584 * The first vap uses the MAC address from the EEPROM.  For
585 * subsequent vap's we set the U/L bit (bit 1) in the MAC
586 * address and use the next six bits as an index.
587 */
588static void
589assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
590{
591	int i;
592
593	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
594		/* NB: we only do this if h/w supports multiple bssid */
595		for (i = 0; i < 32; i++)
596			if ((sc->sc_bssidmask & (1<<i)) == 0)
597				break;
598		if (i != 0)
599			mac[0] |= (i << 2)|0x2;
600	} else
601		i = 0;
602	sc->sc_bssidmask |= 1<<i;
603	if (i == 0)
604		sc->sc_nbssid0++;
605}
606
607static void
608reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
609{
610	int i = mac[0] >> 2;
611	if (i != 0 || --sc->sc_nbssid0 == 0)
612		sc->sc_bssidmask &= ~(1<<i);
613}
614
615static struct ieee80211vap *
616mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
617    enum ieee80211_opmode opmode, int flags,
618    const uint8_t bssid[IEEE80211_ADDR_LEN],
619    const uint8_t mac0[IEEE80211_ADDR_LEN])
620{
621	struct ifnet *ifp = ic->ic_ifp;
622	struct mwl_softc *sc = ifp->if_softc;
623	struct mwl_hal *mh = sc->sc_mh;
624	struct ieee80211vap *vap, *apvap;
625	struct mwl_hal_vap *hvap;
626	struct mwl_vap *mvp;
627	uint8_t mac[IEEE80211_ADDR_LEN];
628
629	IEEE80211_ADDR_COPY(mac, mac0);
630	switch (opmode) {
631	case IEEE80211_M_HOSTAP:
632	case IEEE80211_M_MBSS:
633		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
634			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
635		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
636		if (hvap == NULL) {
637			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
638				reclaim_address(sc, mac);
639			return NULL;
640		}
641		break;
642	case IEEE80211_M_STA:
643		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
644			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
645		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
646		if (hvap == NULL) {
647			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
648				reclaim_address(sc, mac);
649			return NULL;
650		}
651		/* no h/w beacon miss support; always use s/w */
652		flags |= IEEE80211_CLONE_NOBEACONS;
653		break;
654	case IEEE80211_M_WDS:
655		hvap = NULL;		/* NB: we use associated AP vap */
656		if (sc->sc_napvaps == 0)
657			return NULL;	/* no existing AP vap */
658		break;
659	case IEEE80211_M_MONITOR:
660		hvap = NULL;
661		break;
662	case IEEE80211_M_IBSS:
663	case IEEE80211_M_AHDEMO:
664	default:
665		return NULL;
666	}
667
668	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
669	    M_80211_VAP, M_NOWAIT | M_ZERO);
670	if (mvp == NULL) {
671		if (hvap != NULL) {
672			mwl_hal_delvap(hvap);
673			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
674				reclaim_address(sc, mac);
675		}
676		/* XXX msg */
677		return NULL;
678	}
679	mvp->mv_hvap = hvap;
680	if (opmode == IEEE80211_M_WDS) {
681		/*
682		 * WDS vaps must have an associated AP vap; find one.
683		 * XXX not right.
684		 */
685		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
686			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
687				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
688				break;
689			}
690		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
691	}
692	vap = &mvp->mv_vap;
693	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
694	if (hvap != NULL)
695		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
696	/* override with driver methods */
697	mvp->mv_newstate = vap->iv_newstate;
698	vap->iv_newstate = mwl_newstate;
699	vap->iv_max_keyix = 0;	/* XXX */
700	vap->iv_key_alloc = mwl_key_alloc;
701	vap->iv_key_delete = mwl_key_delete;
702	vap->iv_key_set = mwl_key_set;
703#ifdef MWL_HOST_PS_SUPPORT
704	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
705		vap->iv_update_ps = mwl_update_ps;
706		mvp->mv_set_tim = vap->iv_set_tim;
707		vap->iv_set_tim = mwl_set_tim;
708	}
709#endif
710	vap->iv_reset = mwl_reset;
711	vap->iv_update_beacon = mwl_beacon_update;
712
713	/* override max aid so sta's cannot assoc when we're out of sta id's */
714	vap->iv_max_aid = MWL_MAXSTAID;
715	/* override default A-MPDU rx parameters */
716	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
717	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
718
719	/* complete setup */
720	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
721
722	switch (vap->iv_opmode) {
723	case IEEE80211_M_HOSTAP:
724	case IEEE80211_M_MBSS:
725	case IEEE80211_M_STA:
726		/*
727		 * Setup sta db entry for local address.
728		 */
729		mwl_localstadb(vap);
730		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
731		    vap->iv_opmode == IEEE80211_M_MBSS)
732			sc->sc_napvaps++;
733		else
734			sc->sc_nstavaps++;
735		break;
736	case IEEE80211_M_WDS:
737		sc->sc_nwdsvaps++;
738		break;
739	default:
740		break;
741	}
742	/*
743	 * Setup overall operating mode.
744	 */
745	if (sc->sc_napvaps)
746		ic->ic_opmode = IEEE80211_M_HOSTAP;
747	else if (sc->sc_nstavaps)
748		ic->ic_opmode = IEEE80211_M_STA;
749	else
750		ic->ic_opmode = opmode;
751
752	return vap;
753}
754
755static void
756mwl_vap_delete(struct ieee80211vap *vap)
757{
758	struct mwl_vap *mvp = MWL_VAP(vap);
759	struct ifnet *parent = vap->iv_ic->ic_ifp;
760	struct mwl_softc *sc = parent->if_softc;
761	struct mwl_hal *mh = sc->sc_mh;
762	struct mwl_hal_vap *hvap = mvp->mv_hvap;
763	enum ieee80211_opmode opmode = vap->iv_opmode;
764
765	/* XXX disallow ap vap delete if WDS still present */
766	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
767		/* quiesce h/w while we remove the vap */
768		mwl_hal_intrset(mh, 0);		/* disable interrupts */
769	}
770	ieee80211_vap_detach(vap);
771	switch (opmode) {
772	case IEEE80211_M_HOSTAP:
773	case IEEE80211_M_MBSS:
774	case IEEE80211_M_STA:
775		KASSERT(hvap != NULL, ("no hal vap handle"));
776		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
777		mwl_hal_delvap(hvap);
778		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
779			sc->sc_napvaps--;
780		else
781			sc->sc_nstavaps--;
782		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
783		reclaim_address(sc, vap->iv_myaddr);
784		break;
785	case IEEE80211_M_WDS:
786		sc->sc_nwdsvaps--;
787		break;
788	default:
789		break;
790	}
791	mwl_cleartxq(sc, vap);
792	free(mvp, M_80211_VAP);
793	if (parent->if_drv_flags & IFF_DRV_RUNNING)
794		mwl_hal_intrset(mh, sc->sc_imask);
795}
796
797void
798mwl_suspend(struct mwl_softc *sc)
799{
800	struct ifnet *ifp = sc->sc_ifp;
801
802	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
803		__func__, ifp->if_flags);
804
805	mwl_stop(ifp, 1);
806}
807
808void
809mwl_resume(struct mwl_softc *sc)
810{
811	struct ifnet *ifp = sc->sc_ifp;
812
813	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
814		__func__, ifp->if_flags);
815
816	if (ifp->if_flags & IFF_UP)
817		mwl_init(sc);
818}
819
820void
821mwl_shutdown(void *arg)
822{
823	struct mwl_softc *sc = arg;
824
825	mwl_stop(sc->sc_ifp, 1);
826}
827
828/*
829 * Interrupt handler.  Most of the actual processing is deferred.
830 */
831void
832mwl_intr(void *arg)
833{
834	struct mwl_softc *sc = arg;
835	struct mwl_hal *mh = sc->sc_mh;
836	uint32_t status;
837
838	if (sc->sc_invalid) {
839		/*
840		 * The hardware is not ready/present, don't touch anything.
841		 * Note this can happen early on if the IRQ is shared.
842		 */
843		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
844		return;
845	}
846	/*
847	 * Figure out the reason(s) for the interrupt.
848	 */
849	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
850	if (status == 0)			/* must be a shared irq */
851		return;
852
853	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
854	    __func__, status, sc->sc_imask);
855	if (status & MACREG_A2HRIC_BIT_RX_RDY)
856		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
857	if (status & MACREG_A2HRIC_BIT_TX_DONE)
858		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
859	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
860		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
861	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
862		mwl_hal_cmddone(mh);
863	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
864		;
865	}
866	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
867		/* TKIP ICV error */
868		sc->sc_stats.mst_rx_badtkipicv++;
869	}
870	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
871		/* 11n aggregation queue is empty, re-fill */
872		;
873	}
874	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
875		;
876	}
877	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
878		/* radar detected, process event */
879		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
880	}
881	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
882		/* DFS channel switch */
883		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
884	}
885}
886
887static void
888mwl_radar_proc(void *arg, int pending)
889{
890	struct mwl_softc *sc = arg;
891	struct ifnet *ifp = sc->sc_ifp;
892	struct ieee80211com *ic = ifp->if_l2com;
893
894	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
895	    __func__, pending);
896
897	sc->sc_stats.mst_radardetect++;
898	/* XXX stop h/w BA streams? */
899
900	IEEE80211_LOCK(ic);
901	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
902	IEEE80211_UNLOCK(ic);
903}
904
905static void
906mwl_chanswitch_proc(void *arg, int pending)
907{
908	struct mwl_softc *sc = arg;
909	struct ifnet *ifp = sc->sc_ifp;
910	struct ieee80211com *ic = ifp->if_l2com;
911
912	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
913	    __func__, pending);
914
915	IEEE80211_LOCK(ic);
916	sc->sc_csapending = 0;
917	ieee80211_csa_completeswitch(ic);
918	IEEE80211_UNLOCK(ic);
919}
920
921static void
922mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
923{
924	struct ieee80211_node *ni = sp->data[0];
925
926	/* send DELBA and drop the stream */
927	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
928}
929
930static void
931mwl_bawatchdog_proc(void *arg, int pending)
932{
933	struct mwl_softc *sc = arg;
934	struct mwl_hal *mh = sc->sc_mh;
935	const MWL_HAL_BASTREAM *sp;
936	uint8_t bitmap, n;
937
938	sc->sc_stats.mst_bawatchdog++;
939
940	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
941		DPRINTF(sc, MWL_DEBUG_AMPDU,
942		    "%s: could not get bitmap\n", __func__);
943		sc->sc_stats.mst_bawatchdog_failed++;
944		return;
945	}
946	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
947	if (bitmap == 0xff) {
948		n = 0;
949		/* disable all ba streams */
950		for (bitmap = 0; bitmap < 8; bitmap++) {
951			sp = mwl_hal_bastream_lookup(mh, bitmap);
952			if (sp != NULL) {
953				mwl_bawatchdog(sp);
954				n++;
955			}
956		}
957		if (n == 0) {
958			DPRINTF(sc, MWL_DEBUG_AMPDU,
959			    "%s: no BA streams found\n", __func__);
960			sc->sc_stats.mst_bawatchdog_empty++;
961		}
962	} else if (bitmap != 0xaa) {
963		/* disable a single ba stream */
964		sp = mwl_hal_bastream_lookup(mh, bitmap);
965		if (sp != NULL) {
966			mwl_bawatchdog(sp);
967		} else {
968			DPRINTF(sc, MWL_DEBUG_AMPDU,
969			    "%s: no BA stream %d\n", __func__, bitmap);
970			sc->sc_stats.mst_bawatchdog_notfound++;
971		}
972	}
973}
974
975/*
976 * Convert net80211 channel to a HAL channel.
977 */
978static void
979mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
980{
981	hc->channel = chan->ic_ieee;
982
983	*(uint32_t *)&hc->channelFlags = 0;
984	if (IEEE80211_IS_CHAN_2GHZ(chan))
985		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
986	else if (IEEE80211_IS_CHAN_5GHZ(chan))
987		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
988	if (IEEE80211_IS_CHAN_HT40(chan)) {
989		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
990		if (IEEE80211_IS_CHAN_HT40U(chan))
991			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
992		else
993			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
994	} else
995		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
996	/* XXX 10MHz channels */
997}
998
999/*
1000 * Inform firmware of our tx/rx dma setup.  The BAR 0
1001 * writes below are for compatibility with older firmware.
1002 * For current firmware we send this information with a
1003 * cmd block via mwl_hal_sethwdma.
1004 */
1005static int
1006mwl_setupdma(struct mwl_softc *sc)
1007{
1008	int error, i;
1009
1010	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1011	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1012	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1013
1014	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1015		struct mwl_txq *txq = &sc->sc_txq[i];
1016		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1017		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1018	}
1019	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1020	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1021
1022	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1023	if (error != 0) {
1024		device_printf(sc->sc_dev,
1025		    "unable to setup tx/rx dma; hal status %u\n", error);
1026		/* XXX */
1027	}
1028	return error;
1029}
1030
1031/*
1032 * Inform firmware of tx rate parameters.
1033 * Called after a channel change.
1034 */
1035static int
1036mwl_setcurchanrates(struct mwl_softc *sc)
1037{
1038	struct ifnet *ifp = sc->sc_ifp;
1039	struct ieee80211com *ic = ifp->if_l2com;
1040	const struct ieee80211_rateset *rs;
1041	MWL_HAL_TXRATE rates;
1042
1043	memset(&rates, 0, sizeof(rates));
1044	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1045	/* rate used to send management frames */
1046	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1047	/* rate used to send multicast frames */
1048	rates.McastRate = rates.MgtRate;
1049
1050	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1051}
1052
1053/*
1054 * Inform firmware of tx rate parameters.  Called whenever
1055 * user-settable params change and after a channel change.
1056 */
1057static int
1058mwl_setrates(struct ieee80211vap *vap)
1059{
1060	struct mwl_vap *mvp = MWL_VAP(vap);
1061	struct ieee80211_node *ni = vap->iv_bss;
1062	const struct ieee80211_txparam *tp = ni->ni_txparms;
1063	MWL_HAL_TXRATE rates;
1064
1065	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1066
1067	/*
1068	 * Update the h/w rate map.
1069	 * NB: 0x80 for MCS is passed through unchanged
1070	 */
1071	memset(&rates, 0, sizeof(rates));
1072	/* rate used to send management frames */
1073	rates.MgtRate = tp->mgmtrate;
1074	/* rate used to send multicast frames */
1075	rates.McastRate = tp->mcastrate;
1076
1077	/* while here calculate EAPOL fixed rate cookie */
1078	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1079
1080	return mwl_hal_settxrate(mvp->mv_hvap,
1081	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1082		RATE_FIXED : RATE_AUTO, &rates);
1083}
1084
1085/*
1086 * Setup a fixed xmit rate cookie for EAPOL frames.
1087 */
1088static void
1089mwl_seteapolformat(struct ieee80211vap *vap)
1090{
1091	struct mwl_vap *mvp = MWL_VAP(vap);
1092	struct ieee80211_node *ni = vap->iv_bss;
1093	enum ieee80211_phymode mode;
1094	uint8_t rate;
1095
1096	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1097
1098	mode = ieee80211_chan2mode(ni->ni_chan);
1099	/*
1100	 * Use legacy rates when operating a mixed HT+non-HT bss.
1101	 * NB: this may violate POLA for sta and wds vap's.
1102	 */
1103	if (mode == IEEE80211_MODE_11NA &&
1104	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1105		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1106	else if (mode == IEEE80211_MODE_11NG &&
1107	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1108		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1109	else
1110		rate = vap->iv_txparms[mode].mgmtrate;
1111
1112	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1113}
1114
1115/*
1116 * Map SKU+country code to region code for radar bin'ing.
1117 */
1118static int
1119mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1120{
1121	switch (rd->regdomain) {
1122	case SKU_FCC:
1123	case SKU_FCC3:
1124		return DOMAIN_CODE_FCC;
1125	case SKU_CA:
1126		return DOMAIN_CODE_IC;
1127	case SKU_ETSI:
1128	case SKU_ETSI2:
1129	case SKU_ETSI3:
1130		if (rd->country == CTRY_SPAIN)
1131			return DOMAIN_CODE_SPAIN;
1132		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1133			return DOMAIN_CODE_FRANCE;
1134		/* XXX force 1.3.1 radar type */
1135		return DOMAIN_CODE_ETSI_131;
1136	case SKU_JAPAN:
1137		return DOMAIN_CODE_MKK;
1138	case SKU_ROW:
1139		return DOMAIN_CODE_DGT;	/* Taiwan */
1140	case SKU_APAC:
1141	case SKU_APAC2:
1142	case SKU_APAC3:
1143		return DOMAIN_CODE_AUS;	/* Australia */
1144	}
1145	/* XXX KOREA? */
1146	return DOMAIN_CODE_FCC;			/* XXX? */
1147}
1148
1149static int
1150mwl_hal_reset(struct mwl_softc *sc)
1151{
1152	struct ifnet *ifp = sc->sc_ifp;
1153	struct ieee80211com *ic = ifp->if_l2com;
1154	struct mwl_hal *mh = sc->sc_mh;
1155
1156	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1157	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1158	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1159	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1160	mwl_chan_set(sc, ic->ic_curchan);
1161	/* NB: RF/RA performance tuned for indoor mode */
1162	mwl_hal_setrateadaptmode(mh, 0);
1163	mwl_hal_setoptimizationlevel(mh,
1164	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1165
1166	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1167
1168	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1169	mwl_hal_setcfend(mh, 0);			/* XXX */
1170
1171	return 1;
1172}
1173
1174static int
1175mwl_init_locked(struct mwl_softc *sc)
1176{
1177	struct ifnet *ifp = sc->sc_ifp;
1178	struct mwl_hal *mh = sc->sc_mh;
1179	int error = 0;
1180
1181	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1182		__func__, ifp->if_flags);
1183
1184	MWL_LOCK_ASSERT(sc);
1185
1186	/*
1187	 * Stop anything previously setup.  This is safe
1188	 * whether this is the first time through or not.
1189	 */
1190	mwl_stop_locked(ifp, 0);
1191
1192	/*
1193	 * Push vap-independent state to the firmware.
1194	 */
1195	if (!mwl_hal_reset(sc)) {
1196		if_printf(ifp, "unable to reset hardware\n");
1197		return EIO;
1198	}
1199
1200	/*
1201	 * Setup recv (once); transmit is already good to go.
1202	 */
1203	error = mwl_startrecv(sc);
1204	if (error != 0) {
1205		if_printf(ifp, "unable to start recv logic\n");
1206		return error;
1207	}
1208
1209	/*
1210	 * Enable interrupts.
1211	 */
1212	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1213		     | MACREG_A2HRIC_BIT_TX_DONE
1214		     | MACREG_A2HRIC_BIT_OPC_DONE
1215#if 0
1216		     | MACREG_A2HRIC_BIT_MAC_EVENT
1217#endif
1218		     | MACREG_A2HRIC_BIT_ICV_ERROR
1219		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1220		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1221#if 0
1222		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1223#endif
1224		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1225		     | MACREQ_A2HRIC_BIT_TX_ACK
1226		     ;
1227
1228	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1229	mwl_hal_intrset(mh, sc->sc_imask);
1230	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1231
1232	return 0;
1233}
1234
1235static void
1236mwl_init(void *arg)
1237{
1238	struct mwl_softc *sc = arg;
1239	struct ifnet *ifp = sc->sc_ifp;
1240	struct ieee80211com *ic = ifp->if_l2com;
1241	int error = 0;
1242
1243	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1244		__func__, ifp->if_flags);
1245
1246	MWL_LOCK(sc);
1247	error = mwl_init_locked(sc);
1248	MWL_UNLOCK(sc);
1249
1250	if (error == 0)
1251		ieee80211_start_all(ic);	/* start all vap's */
1252}
1253
1254static void
1255mwl_stop_locked(struct ifnet *ifp, int disable)
1256{
1257	struct mwl_softc *sc = ifp->if_softc;
1258
1259	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1260		__func__, sc->sc_invalid, ifp->if_flags);
1261
1262	MWL_LOCK_ASSERT(sc);
1263	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1264		/*
1265		 * Shutdown the hardware and driver.
1266		 */
1267		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1268		callout_stop(&sc->sc_watchdog);
1269		sc->sc_tx_timer = 0;
1270		mwl_draintxq(sc);
1271	}
1272}
1273
1274static void
1275mwl_stop(struct ifnet *ifp, int disable)
1276{
1277	struct mwl_softc *sc = ifp->if_softc;
1278
1279	MWL_LOCK(sc);
1280	mwl_stop_locked(ifp, disable);
1281	MWL_UNLOCK(sc);
1282}
1283
1284static int
1285mwl_reset_vap(struct ieee80211vap *vap, int state)
1286{
1287	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1288	struct ieee80211com *ic = vap->iv_ic;
1289
1290	if (state == IEEE80211_S_RUN)
1291		mwl_setrates(vap);
1292	/* XXX off by 1? */
1293	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1294	/* XXX auto? 20/40 split? */
1295	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1296	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1297	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1298	    HTPROTECT_NONE : HTPROTECT_AUTO);
1299	/* XXX txpower cap */
1300
1301	/* re-setup beacons */
1302	if (state == IEEE80211_S_RUN &&
1303	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1304	     vap->iv_opmode == IEEE80211_M_MBSS ||
1305	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1306		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1307		mwl_hal_setnprotmode(hvap,
1308		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1309		return mwl_beacon_setup(vap);
1310	}
1311	return 0;
1312}
1313
1314/*
1315 * Reset the hardware w/o losing operational state.
1316 * Used to to reset or reload hardware state for a vap.
1317 */
1318static int
1319mwl_reset(struct ieee80211vap *vap, u_long cmd)
1320{
1321	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1322	int error = 0;
1323
1324	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1325		struct ieee80211com *ic = vap->iv_ic;
1326		struct ifnet *ifp = ic->ic_ifp;
1327		struct mwl_softc *sc = ifp->if_softc;
1328		struct mwl_hal *mh = sc->sc_mh;
1329
1330		/* XXX handle DWDS sta vap change */
1331		/* XXX do we need to disable interrupts? */
1332		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1333		error = mwl_reset_vap(vap, vap->iv_state);
1334		mwl_hal_intrset(mh, sc->sc_imask);
1335	}
1336	return error;
1337}
1338
1339/*
1340 * Allocate a tx buffer for sending a frame.  The
1341 * packet is assumed to have the WME AC stored so
1342 * we can use it to select the appropriate h/w queue.
1343 */
1344static struct mwl_txbuf *
1345mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1346{
1347	struct mwl_txbuf *bf;
1348
1349	/*
1350	 * Grab a TX buffer and associated resources.
1351	 */
1352	MWL_TXQ_LOCK(txq);
1353	bf = STAILQ_FIRST(&txq->free);
1354	if (bf != NULL) {
1355		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1356		txq->nfree--;
1357	}
1358	MWL_TXQ_UNLOCK(txq);
1359	if (bf == NULL)
1360		DPRINTF(sc, MWL_DEBUG_XMIT,
1361		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1362	return bf;
1363}
1364
1365/*
1366 * Return a tx buffer to the queue it came from.  Note there
1367 * are two cases because we must preserve the order of buffers
1368 * as it reflects the fixed order of descriptors in memory
1369 * (the firmware pre-fetches descriptors so we cannot reorder).
1370 */
1371static void
1372mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1373{
1374	bf->bf_m = NULL;
1375	bf->bf_node = NULL;
1376	MWL_TXQ_LOCK(txq);
1377	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1378	txq->nfree++;
1379	MWL_TXQ_UNLOCK(txq);
1380}
1381
1382static void
1383mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1384{
1385	bf->bf_m = NULL;
1386	bf->bf_node = NULL;
1387	MWL_TXQ_LOCK(txq);
1388	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1389	txq->nfree++;
1390	MWL_TXQ_UNLOCK(txq);
1391}
1392
1393static void
1394mwl_start(struct ifnet *ifp)
1395{
1396	struct mwl_softc *sc = ifp->if_softc;
1397	struct ieee80211_node *ni;
1398	struct mwl_txbuf *bf;
1399	struct mbuf *m;
1400	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1401	int nqueued;
1402
1403	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1404		return;
1405	nqueued = 0;
1406	for (;;) {
1407		bf = NULL;
1408		IFQ_DEQUEUE(&ifp->if_snd, m);
1409		if (m == NULL)
1410			break;
1411		/*
1412		 * Grab the node for the destination.
1413		 */
1414		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1415		KASSERT(ni != NULL, ("no node"));
1416		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1417		/*
1418		 * Grab a TX buffer and associated resources.
1419		 * We honor the classification by the 802.11 layer.
1420		 */
1421		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1422		bf = mwl_gettxbuf(sc, txq);
1423		if (bf == NULL) {
1424			m_freem(m);
1425			ieee80211_free_node(ni);
1426#ifdef MWL_TX_NODROP
1427			sc->sc_stats.mst_tx_qstop++;
1428			/* XXX blocks other traffic */
1429			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1430			break;
1431#else
1432			DPRINTF(sc, MWL_DEBUG_XMIT,
1433			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1434			sc->sc_stats.mst_tx_qdrop++;
1435			continue;
1436#endif /* MWL_TX_NODROP */
1437		}
1438
1439		/*
1440		 * Pass the frame to the h/w for transmission.
1441		 */
1442		if (mwl_tx_start(sc, ni, bf, m)) {
1443			ifp->if_oerrors++;
1444			mwl_puttxbuf_head(txq, bf);
1445			ieee80211_free_node(ni);
1446			continue;
1447		}
1448		nqueued++;
1449		if (nqueued >= mwl_txcoalesce) {
1450			/*
1451			 * Poke the firmware to process queued frames;
1452			 * see below about (lack of) locking.
1453			 */
1454			nqueued = 0;
1455			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1456		}
1457	}
1458	if (nqueued) {
1459		/*
1460		 * NB: We don't need to lock against tx done because
1461		 * this just prods the firmware to check the transmit
1462		 * descriptors.  The firmware will also start fetching
1463		 * descriptors by itself if it notices new ones are
1464		 * present when it goes to deliver a tx done interrupt
1465		 * to the host. So if we race with tx done processing
1466		 * it's ok.  Delivering the kick here rather than in
1467		 * mwl_tx_start is an optimization to avoid poking the
1468		 * firmware for each packet.
1469		 *
1470		 * NB: the queue id isn't used so 0 is ok.
1471		 */
1472		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1473	}
1474}
1475
1476static int
1477mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1478	const struct ieee80211_bpf_params *params)
1479{
1480	struct ieee80211com *ic = ni->ni_ic;
1481	struct ifnet *ifp = ic->ic_ifp;
1482	struct mwl_softc *sc = ifp->if_softc;
1483	struct mwl_txbuf *bf;
1484	struct mwl_txq *txq;
1485
1486	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1487		ieee80211_free_node(ni);
1488		m_freem(m);
1489		return ENETDOWN;
1490	}
1491	/*
1492	 * Grab a TX buffer and associated resources.
1493	 * Note that we depend on the classification
1494	 * by the 802.11 layer to get to the right h/w
1495	 * queue.  Management frames must ALWAYS go on
1496	 * queue 1 but we cannot just force that here
1497	 * because we may receive non-mgt frames.
1498	 */
1499	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1500	bf = mwl_gettxbuf(sc, txq);
1501	if (bf == NULL) {
1502		sc->sc_stats.mst_tx_qstop++;
1503		/* XXX blocks other traffic */
1504		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1505		ieee80211_free_node(ni);
1506		m_freem(m);
1507		return ENOBUFS;
1508	}
1509	/*
1510	 * Pass the frame to the h/w for transmission.
1511	 */
1512	if (mwl_tx_start(sc, ni, bf, m)) {
1513		ifp->if_oerrors++;
1514		mwl_puttxbuf_head(txq, bf);
1515
1516		ieee80211_free_node(ni);
1517		return EIO;		/* XXX */
1518	}
1519	/*
1520	 * NB: We don't need to lock against tx done because
1521	 * this just prods the firmware to check the transmit
1522	 * descriptors.  The firmware will also start fetching
1523	 * descriptors by itself if it notices new ones are
1524	 * present when it goes to deliver a tx done interrupt
1525	 * to the host. So if we race with tx done processing
1526	 * it's ok.  Delivering the kick here rather than in
1527	 * mwl_tx_start is an optimization to avoid poking the
1528	 * firmware for each packet.
1529	 *
1530	 * NB: the queue id isn't used so 0 is ok.
1531	 */
1532	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1533	return 0;
1534}
1535
1536static int
1537mwl_media_change(struct ifnet *ifp)
1538{
1539	struct ieee80211vap *vap = ifp->if_softc;
1540	int error;
1541
1542	error = ieee80211_media_change(ifp);
1543	/* NB: only the fixed rate can change and that doesn't need a reset */
1544	if (error == ENETRESET) {
1545		mwl_setrates(vap);
1546		error = 0;
1547	}
1548	return error;
1549}
1550
1551#ifdef MWL_DEBUG
1552static void
1553mwl_keyprint(struct mwl_softc *sc, const char *tag,
1554	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1555{
1556	static const char *ciphers[] = {
1557		"WEP",
1558		"TKIP",
1559		"AES-CCM",
1560	};
1561	int i, n;
1562
1563	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1564	for (i = 0, n = hk->keyLen; i < n; i++)
1565		printf(" %02x", hk->key.aes[i]);
1566	printf(" mac %s", ether_sprintf(mac));
1567	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1568		printf(" %s", "rxmic");
1569		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1570			printf(" %02x", hk->key.tkip.rxMic[i]);
1571		printf(" txmic");
1572		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1573			printf(" %02x", hk->key.tkip.txMic[i]);
1574	}
1575	printf(" flags 0x%x\n", hk->keyFlags);
1576}
1577#endif
1578
1579/*
1580 * Allocate a key cache slot for a unicast key.  The
1581 * firmware handles key allocation and every station is
1582 * guaranteed key space so we are always successful.
1583 */
1584static int
1585mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1586	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1587{
1588	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1589
1590	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1591	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1592		if (!(&vap->iv_nw_keys[0] <= k &&
1593		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1594			/* should not happen */
1595			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1596				"%s: bogus group key\n", __func__);
1597			return 0;
1598		}
1599		/* give the caller what they requested */
1600		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1601	} else {
1602		/*
1603		 * Firmware handles key allocation.
1604		 */
1605		*keyix = *rxkeyix = 0;
1606	}
1607	return 1;
1608}
1609
1610/*
1611 * Delete a key entry allocated by mwl_key_alloc.
1612 */
1613static int
1614mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1615{
1616	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1617	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1618	MWL_HAL_KEYVAL hk;
1619	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1620	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1621
1622	if (hvap == NULL) {
1623		if (vap->iv_opmode != IEEE80211_M_WDS) {
1624			/* XXX monitor mode? */
1625			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1626			    "%s: no hvap for opmode %d\n", __func__,
1627			    vap->iv_opmode);
1628			return 0;
1629		}
1630		hvap = MWL_VAP(vap)->mv_ap_hvap;
1631	}
1632
1633	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1634	    __func__, k->wk_keyix);
1635
1636	memset(&hk, 0, sizeof(hk));
1637	hk.keyIndex = k->wk_keyix;
1638	switch (k->wk_cipher->ic_cipher) {
1639	case IEEE80211_CIPHER_WEP:
1640		hk.keyTypeId = KEY_TYPE_ID_WEP;
1641		break;
1642	case IEEE80211_CIPHER_TKIP:
1643		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1644		break;
1645	case IEEE80211_CIPHER_AES_CCM:
1646		hk.keyTypeId = KEY_TYPE_ID_AES;
1647		break;
1648	default:
1649		/* XXX should not happen */
1650		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1651		    __func__, k->wk_cipher->ic_cipher);
1652		return 0;
1653	}
1654	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1655}
1656
1657static __inline int
1658addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1659{
1660	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1661		if (k->wk_flags & IEEE80211_KEY_XMIT)
1662			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1663		if (k->wk_flags & IEEE80211_KEY_RECV)
1664			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1665		return 1;
1666	} else
1667		return 0;
1668}
1669
1670/*
1671 * Set the key cache contents for the specified key.  Key cache
1672 * slot(s) must already have been allocated by mwl_key_alloc.
1673 */
1674static int
1675mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1676	const uint8_t mac[IEEE80211_ADDR_LEN])
1677{
1678#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1679/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1680#define	IEEE80211_IS_STATICKEY(k) \
1681	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1682	 (GRPXMIT|IEEE80211_KEY_RECV))
1683	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1684	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1685	const struct ieee80211_cipher *cip = k->wk_cipher;
1686	const uint8_t *macaddr;
1687	MWL_HAL_KEYVAL hk;
1688
1689	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1690		("s/w crypto set?"));
1691
1692	if (hvap == NULL) {
1693		if (vap->iv_opmode != IEEE80211_M_WDS) {
1694			/* XXX monitor mode? */
1695			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1696			    "%s: no hvap for opmode %d\n", __func__,
1697			    vap->iv_opmode);
1698			return 0;
1699		}
1700		hvap = MWL_VAP(vap)->mv_ap_hvap;
1701	}
1702	memset(&hk, 0, sizeof(hk));
1703	hk.keyIndex = k->wk_keyix;
1704	switch (cip->ic_cipher) {
1705	case IEEE80211_CIPHER_WEP:
1706		hk.keyTypeId = KEY_TYPE_ID_WEP;
1707		hk.keyLen = k->wk_keylen;
1708		if (k->wk_keyix == vap->iv_def_txkey)
1709			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1710		if (!IEEE80211_IS_STATICKEY(k)) {
1711			/* NB: WEP is never used for the PTK */
1712			(void) addgroupflags(&hk, k);
1713		}
1714		break;
1715	case IEEE80211_CIPHER_TKIP:
1716		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1717		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1718		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1719		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1720		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1721		if (!addgroupflags(&hk, k))
1722			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1723		break;
1724	case IEEE80211_CIPHER_AES_CCM:
1725		hk.keyTypeId = KEY_TYPE_ID_AES;
1726		hk.keyLen = k->wk_keylen;
1727		if (!addgroupflags(&hk, k))
1728			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1729		break;
1730	default:
1731		/* XXX should not happen */
1732		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1733		    __func__, k->wk_cipher->ic_cipher);
1734		return 0;
1735	}
1736	/*
1737	 * NB: tkip mic keys get copied here too; the layout
1738	 *     just happens to match that in ieee80211_key.
1739	 */
1740	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1741
1742	/*
1743	 * Locate address of sta db entry for writing key;
1744	 * the convention unfortunately is somewhat different
1745	 * than how net80211, hostapd, and wpa_supplicant think.
1746	 */
1747	if (vap->iv_opmode == IEEE80211_M_STA) {
1748		/*
1749		 * NB: keys plumbed before the sta reaches AUTH state
1750		 * will be discarded or written to the wrong sta db
1751		 * entry because iv_bss is meaningless.  This is ok
1752		 * (right now) because we handle deferred plumbing of
1753		 * WEP keys when the sta reaches AUTH state.
1754		 */
1755		macaddr = vap->iv_bss->ni_bssid;
1756		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1757			/* XXX plumb to local sta db too for static key wep */
1758			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1759		}
1760	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1761	    vap->iv_state != IEEE80211_S_RUN) {
1762		/*
1763		 * Prior to RUN state a WDS vap will not it's BSS node
1764		 * setup so we will plumb the key to the wrong mac
1765		 * address (it'll be our local address).  Workaround
1766		 * this for the moment by grabbing the correct address.
1767		 */
1768		macaddr = vap->iv_des_bssid;
1769	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1770		macaddr = vap->iv_myaddr;
1771	else
1772		macaddr = mac;
1773	KEYPRINTF(sc, &hk, macaddr);
1774	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1775#undef IEEE80211_IS_STATICKEY
1776#undef GRPXMIT
1777}
1778
1779/* unaligned little endian access */
1780#define LE_READ_2(p)				\
1781	((uint16_t)				\
1782	 ((((const uint8_t *)(p))[0]      ) |	\
1783	  (((const uint8_t *)(p))[1] <<  8)))
1784#define LE_READ_4(p)				\
1785	((uint32_t)				\
1786	 ((((const uint8_t *)(p))[0]      ) |	\
1787	  (((const uint8_t *)(p))[1] <<  8) |	\
1788	  (((const uint8_t *)(p))[2] << 16) |	\
1789	  (((const uint8_t *)(p))[3] << 24)))
1790
1791/*
1792 * Set the multicast filter contents into the hardware.
1793 * XXX f/w has no support; just defer to the os.
1794 */
1795static void
1796mwl_setmcastfilter(struct mwl_softc *sc)
1797{
1798	struct ifnet *ifp = sc->sc_ifp;
1799#if 0
1800	struct ether_multi *enm;
1801	struct ether_multistep estep;
1802	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1803	uint8_t *mp;
1804	int nmc;
1805
1806	mp = macs;
1807	nmc = 0;
1808	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1809	while (enm != NULL) {
1810		/* XXX Punt on ranges. */
1811		if (nmc == MWL_HAL_MCAST_MAX ||
1812		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1813			ifp->if_flags |= IFF_ALLMULTI;
1814			return;
1815		}
1816		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1817		mp += IEEE80211_ADDR_LEN, nmc++;
1818		ETHER_NEXT_MULTI(estep, enm);
1819	}
1820	ifp->if_flags &= ~IFF_ALLMULTI;
1821	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1822#else
1823	/* XXX no mcast filter support; we get everything */
1824	ifp->if_flags |= IFF_ALLMULTI;
1825#endif
1826}
1827
1828static int
1829mwl_mode_init(struct mwl_softc *sc)
1830{
1831	struct ifnet *ifp = sc->sc_ifp;
1832	struct ieee80211com *ic = ifp->if_l2com;
1833	struct mwl_hal *mh = sc->sc_mh;
1834
1835	/*
1836	 * NB: Ignore promisc in hostap mode; it's set by the
1837	 * bridge.  This is wrong but we have no way to
1838	 * identify internal requests (from the bridge)
1839	 * versus external requests such as for tcpdump.
1840	 */
1841	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1842	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1843	mwl_setmcastfilter(sc);
1844
1845	return 0;
1846}
1847
1848/*
1849 * Callback from the 802.11 layer after a multicast state change.
1850 */
1851static void
1852mwl_update_mcast(struct ifnet *ifp)
1853{
1854	struct mwl_softc *sc = ifp->if_softc;
1855
1856	mwl_setmcastfilter(sc);
1857}
1858
1859/*
1860 * Callback from the 802.11 layer after a promiscuous mode change.
1861 * Note this interface does not check the operating mode as this
1862 * is an internal callback and we are expected to honor the current
1863 * state (e.g. this is used for setting the interface in promiscuous
1864 * mode when operating in hostap mode to do ACS).
1865 */
1866static void
1867mwl_update_promisc(struct ifnet *ifp)
1868{
1869	struct mwl_softc *sc = ifp->if_softc;
1870
1871	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1872}
1873
1874/*
1875 * Callback from the 802.11 layer to update the slot time
1876 * based on the current setting.  We use it to notify the
1877 * firmware of ERP changes and the f/w takes care of things
1878 * like slot time and preamble.
1879 */
1880static void
1881mwl_updateslot(struct ifnet *ifp)
1882{
1883	struct mwl_softc *sc = ifp->if_softc;
1884	struct ieee80211com *ic = ifp->if_l2com;
1885	struct mwl_hal *mh = sc->sc_mh;
1886	int prot;
1887
1888	/* NB: can be called early; suppress needless cmds */
1889	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1890		return;
1891
1892	/*
1893	 * Calculate the ERP flags.  The firwmare will use
1894	 * this to carry out the appropriate measures.
1895	 */
1896	prot = 0;
1897	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1898		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1899			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1900		if (ic->ic_flags & IEEE80211_F_USEPROT)
1901			prot |= IEEE80211_ERP_USE_PROTECTION;
1902		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1903			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1904	}
1905
1906	DPRINTF(sc, MWL_DEBUG_RESET,
1907	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1908	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1909	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1910	    ic->ic_flags);
1911
1912	mwl_hal_setgprot(mh, prot);
1913}
1914
1915/*
1916 * Setup the beacon frame.
1917 */
1918static int
1919mwl_beacon_setup(struct ieee80211vap *vap)
1920{
1921	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1922	struct ieee80211_node *ni = vap->iv_bss;
1923	struct ieee80211_beacon_offsets bo;
1924	struct mbuf *m;
1925
1926	m = ieee80211_beacon_alloc(ni, &bo);
1927	if (m == NULL)
1928		return ENOBUFS;
1929	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1930	m_free(m);
1931
1932	return 0;
1933}
1934
1935/*
1936 * Update the beacon frame in response to a change.
1937 */
1938static void
1939mwl_beacon_update(struct ieee80211vap *vap, int item)
1940{
1941	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1942	struct ieee80211com *ic = vap->iv_ic;
1943
1944	KASSERT(hvap != NULL, ("no beacon"));
1945	switch (item) {
1946	case IEEE80211_BEACON_ERP:
1947		mwl_updateslot(ic->ic_ifp);
1948		break;
1949	case IEEE80211_BEACON_HTINFO:
1950		mwl_hal_setnprotmode(hvap,
1951		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1952		break;
1953	case IEEE80211_BEACON_CAPS:
1954	case IEEE80211_BEACON_WME:
1955	case IEEE80211_BEACON_APPIE:
1956	case IEEE80211_BEACON_CSA:
1957		break;
1958	case IEEE80211_BEACON_TIM:
1959		/* NB: firmware always forms TIM */
1960		return;
1961	}
1962	/* XXX retain beacon frame and update */
1963	mwl_beacon_setup(vap);
1964}
1965
1966static void
1967mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1968{
1969	bus_addr_t *paddr = (bus_addr_t*) arg;
1970	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1971	*paddr = segs->ds_addr;
1972}
1973
1974#ifdef MWL_HOST_PS_SUPPORT
1975/*
1976 * Handle power save station occupancy changes.
1977 */
1978static void
1979mwl_update_ps(struct ieee80211vap *vap, int nsta)
1980{
1981	struct mwl_vap *mvp = MWL_VAP(vap);
1982
1983	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1984		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1985	mvp->mv_last_ps_sta = nsta;
1986}
1987
1988/*
1989 * Handle associated station power save state changes.
1990 */
1991static int
1992mwl_set_tim(struct ieee80211_node *ni, int set)
1993{
1994	struct ieee80211vap *vap = ni->ni_vap;
1995	struct mwl_vap *mvp = MWL_VAP(vap);
1996
1997	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1998		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1999		    IEEE80211_AID(ni->ni_associd), set);
2000		return 1;
2001	} else
2002		return 0;
2003}
2004#endif /* MWL_HOST_PS_SUPPORT */
2005
2006static int
2007mwl_desc_setup(struct mwl_softc *sc, const char *name,
2008	struct mwl_descdma *dd,
2009	int nbuf, size_t bufsize, int ndesc, size_t descsize)
2010{
2011	struct ifnet *ifp = sc->sc_ifp;
2012	uint8_t *ds;
2013	int error;
2014
2015	DPRINTF(sc, MWL_DEBUG_RESET,
2016	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2017	    __func__, name, nbuf, (uintmax_t) bufsize,
2018	    ndesc, (uintmax_t) descsize);
2019
2020	dd->dd_name = name;
2021	dd->dd_desc_len = nbuf * ndesc * descsize;
2022
2023	/*
2024	 * Setup DMA descriptor area.
2025	 */
2026	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2027		       PAGE_SIZE, 0,		/* alignment, bounds */
2028		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2029		       BUS_SPACE_MAXADDR,	/* highaddr */
2030		       NULL, NULL,		/* filter, filterarg */
2031		       dd->dd_desc_len,		/* maxsize */
2032		       1,			/* nsegments */
2033		       dd->dd_desc_len,		/* maxsegsize */
2034		       BUS_DMA_ALLOCNOW,	/* flags */
2035		       NULL,			/* lockfunc */
2036		       NULL,			/* lockarg */
2037		       &dd->dd_dmat);
2038	if (error != 0) {
2039		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2040		return error;
2041	}
2042
2043	/* allocate descriptors */
2044	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2045				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2046				 &dd->dd_dmamap);
2047	if (error != 0) {
2048		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2049			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2050		goto fail1;
2051	}
2052
2053	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2054				dd->dd_desc, dd->dd_desc_len,
2055				mwl_load_cb, &dd->dd_desc_paddr,
2056				BUS_DMA_NOWAIT);
2057	if (error != 0) {
2058		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2059			dd->dd_name, error);
2060		goto fail2;
2061	}
2062
2063	ds = dd->dd_desc;
2064	memset(ds, 0, dd->dd_desc_len);
2065	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2066	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2067	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2068
2069	return 0;
2070fail2:
2071	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2072fail1:
2073	bus_dma_tag_destroy(dd->dd_dmat);
2074	memset(dd, 0, sizeof(*dd));
2075	return error;
2076#undef DS2PHYS
2077}
2078
2079static void
2080mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2081{
2082	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2083	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2084	bus_dma_tag_destroy(dd->dd_dmat);
2085
2086	memset(dd, 0, sizeof(*dd));
2087}
2088
2089/*
2090 * Construct a tx q's free list.  The order of entries on
2091 * the list must reflect the physical layout of tx descriptors
2092 * because the firmware pre-fetches descriptors.
2093 *
2094 * XXX might be better to use indices into the buffer array.
2095 */
2096static void
2097mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2098{
2099	struct mwl_txbuf *bf;
2100	int i;
2101
2102	bf = txq->dma.dd_bufptr;
2103	STAILQ_INIT(&txq->free);
2104	for (i = 0; i < mwl_txbuf; i++, bf++)
2105		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2106	txq->nfree = i;
2107}
2108
2109#define	DS2PHYS(_dd, _ds) \
2110	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2111
2112static int
2113mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2114{
2115	struct ifnet *ifp = sc->sc_ifp;
2116	int error, bsize, i;
2117	struct mwl_txbuf *bf;
2118	struct mwl_txdesc *ds;
2119
2120	error = mwl_desc_setup(sc, "tx", &txq->dma,
2121			mwl_txbuf, sizeof(struct mwl_txbuf),
2122			MWL_TXDESC, sizeof(struct mwl_txdesc));
2123	if (error != 0)
2124		return error;
2125
2126	/* allocate and setup tx buffers */
2127	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2128	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2129	if (bf == NULL) {
2130		if_printf(ifp, "malloc of %u tx buffers failed\n",
2131			mwl_txbuf);
2132		return ENOMEM;
2133	}
2134	txq->dma.dd_bufptr = bf;
2135
2136	ds = txq->dma.dd_desc;
2137	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2138		bf->bf_desc = ds;
2139		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2140		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2141				&bf->bf_dmamap);
2142		if (error != 0) {
2143			if_printf(ifp, "unable to create dmamap for tx "
2144				"buffer %u, error %u\n", i, error);
2145			return error;
2146		}
2147	}
2148	mwl_txq_reset(sc, txq);
2149	return 0;
2150}
2151
2152static void
2153mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2154{
2155	struct mwl_txbuf *bf;
2156	int i;
2157
2158	bf = txq->dma.dd_bufptr;
2159	for (i = 0; i < mwl_txbuf; i++, bf++) {
2160		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2161		KASSERT(bf->bf_node == NULL, ("node on free list"));
2162		if (bf->bf_dmamap != NULL)
2163			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2164	}
2165	STAILQ_INIT(&txq->free);
2166	txq->nfree = 0;
2167	if (txq->dma.dd_bufptr != NULL) {
2168		free(txq->dma.dd_bufptr, M_MWLDEV);
2169		txq->dma.dd_bufptr = NULL;
2170	}
2171	if (txq->dma.dd_desc_len != 0)
2172		mwl_desc_cleanup(sc, &txq->dma);
2173}
2174
2175static int
2176mwl_rxdma_setup(struct mwl_softc *sc)
2177{
2178	struct ifnet *ifp = sc->sc_ifp;
2179	int error, jumbosize, bsize, i;
2180	struct mwl_rxbuf *bf;
2181	struct mwl_jumbo *rbuf;
2182	struct mwl_rxdesc *ds;
2183	caddr_t data;
2184
2185	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2186			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2187			1, sizeof(struct mwl_rxdesc));
2188	if (error != 0)
2189		return error;
2190
2191	/*
2192	 * Receive is done to a private pool of jumbo buffers.
2193	 * This allows us to attach to mbuf's and avoid re-mapping
2194	 * memory on each rx we post.  We allocate a large chunk
2195	 * of memory and manage it in the driver.  The mbuf free
2196	 * callback method is used to reclaim frames after sending
2197	 * them up the stack.  By default we allocate 2x the number of
2198	 * rx descriptors configured so we have some slop to hold
2199	 * us while frames are processed.
2200	 */
2201	if (mwl_rxbuf < 2*mwl_rxdesc) {
2202		if_printf(ifp,
2203		    "too few rx dma buffers (%d); increasing to %d\n",
2204		    mwl_rxbuf, 2*mwl_rxdesc);
2205		mwl_rxbuf = 2*mwl_rxdesc;
2206	}
2207	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2208	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2209
2210	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2211		       PAGE_SIZE, 0,		/* alignment, bounds */
2212		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2213		       BUS_SPACE_MAXADDR,	/* highaddr */
2214		       NULL, NULL,		/* filter, filterarg */
2215		       sc->sc_rxmemsize,	/* maxsize */
2216		       1,			/* nsegments */
2217		       sc->sc_rxmemsize,	/* maxsegsize */
2218		       BUS_DMA_ALLOCNOW,	/* flags */
2219		       NULL,			/* lockfunc */
2220		       NULL,			/* lockarg */
2221		       &sc->sc_rxdmat);
2222	if (error != 0) {
2223		if_printf(ifp, "could not create rx DMA tag\n");
2224		return error;
2225	}
2226
2227	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2228				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2229				 &sc->sc_rxmap);
2230	if (error != 0) {
2231		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2232		    (uintmax_t) sc->sc_rxmemsize);
2233		return error;
2234	}
2235
2236	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2237				sc->sc_rxmem, sc->sc_rxmemsize,
2238				mwl_load_cb, &sc->sc_rxmem_paddr,
2239				BUS_DMA_NOWAIT);
2240	if (error != 0) {
2241		if_printf(ifp, "could not load rx DMA map\n");
2242		return error;
2243	}
2244
2245	/*
2246	 * Allocate rx buffers and set them up.
2247	 */
2248	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2249	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2250	if (bf == NULL) {
2251		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2252		return error;
2253	}
2254	sc->sc_rxdma.dd_bufptr = bf;
2255
2256	STAILQ_INIT(&sc->sc_rxbuf);
2257	ds = sc->sc_rxdma.dd_desc;
2258	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2259		bf->bf_desc = ds;
2260		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2261		/* pre-assign dma buffer */
2262		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2263		/* NB: tail is intentional to preserve descriptor order */
2264		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2265	}
2266
2267	/*
2268	 * Place remainder of dma memory buffers on the free list.
2269	 */
2270	SLIST_INIT(&sc->sc_rxfree);
2271	for (; i < mwl_rxbuf; i++) {
2272		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2273		rbuf = MWL_JUMBO_DATA2BUF(data);
2274		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2275		sc->sc_nrxfree++;
2276	}
2277	return 0;
2278}
2279#undef DS2PHYS
2280
2281static void
2282mwl_rxdma_cleanup(struct mwl_softc *sc)
2283{
2284	if (sc->sc_rxmem_paddr != 0) {
2285		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2286		sc->sc_rxmem_paddr = 0;
2287	}
2288	if (sc->sc_rxmem != NULL) {
2289		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2290		sc->sc_rxmem = NULL;
2291	}
2292	if (sc->sc_rxdma.dd_bufptr != NULL) {
2293		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2294		sc->sc_rxdma.dd_bufptr = NULL;
2295	}
2296	if (sc->sc_rxdma.dd_desc_len != 0)
2297		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2298}
2299
2300static int
2301mwl_dma_setup(struct mwl_softc *sc)
2302{
2303	int error, i;
2304
2305	error = mwl_rxdma_setup(sc);
2306	if (error != 0) {
2307		mwl_rxdma_cleanup(sc);
2308		return error;
2309	}
2310
2311	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2312		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2313		if (error != 0) {
2314			mwl_dma_cleanup(sc);
2315			return error;
2316		}
2317	}
2318	return 0;
2319}
2320
2321static void
2322mwl_dma_cleanup(struct mwl_softc *sc)
2323{
2324	int i;
2325
2326	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2327		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2328	mwl_rxdma_cleanup(sc);
2329}
2330
2331static struct ieee80211_node *
2332mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2333{
2334	struct ieee80211com *ic = vap->iv_ic;
2335	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2336	const size_t space = sizeof(struct mwl_node);
2337	struct mwl_node *mn;
2338
2339	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2340	if (mn == NULL) {
2341		/* XXX stat+msg */
2342		return NULL;
2343	}
2344	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2345	return &mn->mn_node;
2346}
2347
2348static void
2349mwl_node_cleanup(struct ieee80211_node *ni)
2350{
2351	struct ieee80211com *ic = ni->ni_ic;
2352        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2353	struct mwl_node *mn = MWL_NODE(ni);
2354
2355	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2356	    __func__, ni, ni->ni_ic, mn->mn_staid);
2357
2358	if (mn->mn_staid != 0) {
2359		struct ieee80211vap *vap = ni->ni_vap;
2360
2361		if (mn->mn_hvap != NULL) {
2362			if (vap->iv_opmode == IEEE80211_M_STA)
2363				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2364			else
2365				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2366		}
2367		/*
2368		 * NB: legacy WDS peer sta db entry is installed using
2369		 * the associate ap's hvap; use it again to delete it.
2370		 * XXX can vap be NULL?
2371		 */
2372		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2373		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2374			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2375			    ni->ni_macaddr);
2376		delstaid(sc, mn->mn_staid);
2377		mn->mn_staid = 0;
2378	}
2379	sc->sc_node_cleanup(ni);
2380}
2381
2382/*
2383 * Reclaim rx dma buffers from packets sitting on the ampdu
2384 * reorder queue for a station.  We replace buffers with a
2385 * system cluster (if available).
2386 */
2387static void
2388mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2389{
2390#if 0
2391	int i, n, off;
2392	struct mbuf *m;
2393	void *cl;
2394
2395	n = rap->rxa_qframes;
2396	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2397		m = rap->rxa_m[i];
2398		if (m == NULL)
2399			continue;
2400		n--;
2401		/* our dma buffers have a well-known free routine */
2402		if ((m->m_flags & M_EXT) == 0 ||
2403		    m->m_ext.ext_free != mwl_ext_free)
2404			continue;
2405		/*
2406		 * Try to allocate a cluster and move the data.
2407		 */
2408		off = m->m_data - m->m_ext.ext_buf;
2409		if (off + m->m_pkthdr.len > MCLBYTES) {
2410			/* XXX no AMSDU for now */
2411			continue;
2412		}
2413		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2414		    &m->m_ext.ext_paddr);
2415		if (cl != NULL) {
2416			/*
2417			 * Copy the existing data to the cluster, remove
2418			 * the rx dma buffer, and attach the cluster in
2419			 * its place.  Note we preserve the offset to the
2420			 * data so frames being bridged can still prepend
2421			 * their headers without adding another mbuf.
2422			 */
2423			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2424			MEXTREMOVE(m);
2425			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2426			/* setup mbuf like _MCLGET does */
2427			m->m_flags |= M_CLUSTER | M_EXT_RW;
2428			_MOWNERREF(m, M_EXT | M_CLUSTER);
2429			/* NB: m_data is clobbered by MEXTADDR, adjust */
2430			m->m_data += off;
2431		}
2432	}
2433#endif
2434}
2435
2436/*
2437 * Callback to reclaim resources.  We first let the
2438 * net80211 layer do it's thing, then if we are still
2439 * blocked by a lack of rx dma buffers we walk the ampdu
2440 * reorder q's to reclaim buffers by copying to a system
2441 * cluster.
2442 */
2443static void
2444mwl_node_drain(struct ieee80211_node *ni)
2445{
2446	struct ieee80211com *ic = ni->ni_ic;
2447        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2448	struct mwl_node *mn = MWL_NODE(ni);
2449
2450	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2451	    __func__, ni, ni->ni_vap, mn->mn_staid);
2452
2453	/* NB: call up first to age out ampdu q's */
2454	sc->sc_node_drain(ni);
2455
2456	/* XXX better to not check low water mark? */
2457	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2458	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2459		uint8_t tid;
2460		/*
2461		 * Walk the reorder q and reclaim rx dma buffers by copying
2462		 * the packet contents into clusters.
2463		 */
2464		for (tid = 0; tid < WME_NUM_TID; tid++) {
2465			struct ieee80211_rx_ampdu *rap;
2466
2467			rap = &ni->ni_rx_ampdu[tid];
2468			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2469				continue;
2470			if (rap->rxa_qframes)
2471				mwl_ampdu_rxdma_reclaim(rap);
2472		}
2473	}
2474}
2475
2476static void
2477mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2478{
2479	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2480#ifdef MWL_ANT_INFO_SUPPORT
2481#if 0
2482	/* XXX need to smooth data */
2483	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2484#else
2485	*noise = -95;		/* XXX */
2486#endif
2487#else
2488	*noise = -95;		/* XXX */
2489#endif
2490}
2491
2492/*
2493 * Convert Hardware per-antenna rssi info to common format:
2494 * Let a1, a2, a3 represent the amplitudes per chain
2495 * Let amax represent max[a1, a2, a3]
2496 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2497 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2498 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2499 * maintain some extra precision.
2500 *
2501 * Values are stored in .5 db format capped at 127.
2502 */
2503static void
2504mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2505	struct ieee80211_mimo_info *mi)
2506{
2507#define	CVT(_dst, _src) do {						\
2508	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2509	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2510} while (0)
2511	static const int8_t logdbtbl[32] = {
2512	       0,   0,  24,  38,  48,  56,  62,  68,
2513	      72,  76,  80,  83,  86,  89,  92,  94,
2514	      96,  98, 100, 102, 104, 106, 107, 109,
2515	     110, 112, 113, 115, 116, 117, 118, 119
2516	};
2517	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2518	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2519	uint32_t rssi_max;
2520
2521	rssi_max = mn->mn_ai.rssi_a;
2522	if (mn->mn_ai.rssi_b > rssi_max)
2523		rssi_max = mn->mn_ai.rssi_b;
2524	if (mn->mn_ai.rssi_c > rssi_max)
2525		rssi_max = mn->mn_ai.rssi_c;
2526
2527	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2528	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2529	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2530
2531	mi->noise[0] = mn->mn_ai.nf_a;
2532	mi->noise[1] = mn->mn_ai.nf_b;
2533	mi->noise[2] = mn->mn_ai.nf_c;
2534#undef CVT
2535}
2536
2537static __inline void *
2538mwl_getrxdma(struct mwl_softc *sc)
2539{
2540	struct mwl_jumbo *buf;
2541	void *data;
2542
2543	/*
2544	 * Allocate from jumbo pool.
2545	 */
2546	MWL_RXFREE_LOCK(sc);
2547	buf = SLIST_FIRST(&sc->sc_rxfree);
2548	if (buf == NULL) {
2549		DPRINTF(sc, MWL_DEBUG_ANY,
2550		    "%s: out of rx dma buffers\n", __func__);
2551		sc->sc_stats.mst_rx_nodmabuf++;
2552		data = NULL;
2553	} else {
2554		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2555		sc->sc_nrxfree--;
2556		data = MWL_JUMBO_BUF2DATA(buf);
2557	}
2558	MWL_RXFREE_UNLOCK(sc);
2559	return data;
2560}
2561
2562static __inline void
2563mwl_putrxdma(struct mwl_softc *sc, void *data)
2564{
2565	struct mwl_jumbo *buf;
2566
2567	/* XXX bounds check data */
2568	MWL_RXFREE_LOCK(sc);
2569	buf = MWL_JUMBO_DATA2BUF(data);
2570	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2571	sc->sc_nrxfree++;
2572	MWL_RXFREE_UNLOCK(sc);
2573}
2574
2575static int
2576mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2577{
2578	struct mwl_rxdesc *ds;
2579
2580	ds = bf->bf_desc;
2581	if (bf->bf_data == NULL) {
2582		bf->bf_data = mwl_getrxdma(sc);
2583		if (bf->bf_data == NULL) {
2584			/* mark descriptor to be skipped */
2585			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2586			/* NB: don't need PREREAD */
2587			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2588			sc->sc_stats.mst_rxbuf_failed++;
2589			return ENOMEM;
2590		}
2591	}
2592	/*
2593	 * NB: DMA buffer contents is known to be unmodified
2594	 *     so there's no need to flush the data cache.
2595	 */
2596
2597	/*
2598	 * Setup descriptor.
2599	 */
2600	ds->QosCtrl = 0;
2601	ds->RSSI = 0;
2602	ds->Status = EAGLE_RXD_STATUS_IDLE;
2603	ds->Channel = 0;
2604	ds->PktLen = htole16(MWL_AGGR_SIZE);
2605	ds->SQ2 = 0;
2606	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2607	/* NB: don't touch pPhysNext, set once */
2608	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2609	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2610
2611	return 0;
2612}
2613
2614static int
2615mwl_ext_free(struct mbuf *m, void *data, void *arg)
2616{
2617	struct mwl_softc *sc = arg;
2618
2619	/* XXX bounds check data */
2620	mwl_putrxdma(sc, data);
2621	/*
2622	 * If we were previously blocked by a lack of rx dma buffers
2623	 * check if we now have enough to restart rx interrupt handling.
2624	 * NB: we know we are called at splvm which is above splnet.
2625	 */
2626	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2627		sc->sc_rxblocked = 0;
2628		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2629	}
2630	return (EXT_FREE_OK);
2631}
2632
2633struct mwl_frame_bar {
2634	u_int8_t	i_fc[2];
2635	u_int8_t	i_dur[2];
2636	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2637	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2638	/* ctl, seq, FCS */
2639} __packed;
2640
2641/*
2642 * Like ieee80211_anyhdrsize, but handles BAR frames
2643 * specially so the logic below to piece the 802.11
2644 * header together works.
2645 */
2646static __inline int
2647mwl_anyhdrsize(const void *data)
2648{
2649	const struct ieee80211_frame *wh = data;
2650
2651	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2652		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2653		case IEEE80211_FC0_SUBTYPE_CTS:
2654		case IEEE80211_FC0_SUBTYPE_ACK:
2655			return sizeof(struct ieee80211_frame_ack);
2656		case IEEE80211_FC0_SUBTYPE_BAR:
2657			return sizeof(struct mwl_frame_bar);
2658		}
2659		return sizeof(struct ieee80211_frame_min);
2660	} else
2661		return ieee80211_hdrsize(data);
2662}
2663
2664static void
2665mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2666{
2667	const struct ieee80211_frame *wh;
2668	struct ieee80211_node *ni;
2669
2670	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2671	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2672	if (ni != NULL) {
2673		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2674		ieee80211_free_node(ni);
2675	}
2676}
2677
2678/*
2679 * Convert hardware signal strength to rssi.  The value
2680 * provided by the device has the noise floor added in;
2681 * we need to compensate for this but we don't have that
2682 * so we use a fixed value.
2683 *
2684 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2685 * offset is already set as part of the initial gain.  This
2686 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2687 */
2688static __inline int
2689cvtrssi(uint8_t ssi)
2690{
2691	int rssi = (int) ssi + 8;
2692	/* XXX hack guess until we have a real noise floor */
2693	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2694	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2695}
2696
2697static void
2698mwl_rx_proc(void *arg, int npending)
2699{
2700#define	IEEE80211_DIR_DSTODS(wh) \
2701	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2702	struct mwl_softc *sc = arg;
2703	struct ifnet *ifp = sc->sc_ifp;
2704	struct ieee80211com *ic = ifp->if_l2com;
2705	struct mwl_rxbuf *bf;
2706	struct mwl_rxdesc *ds;
2707	struct mbuf *m;
2708	struct ieee80211_qosframe *wh;
2709	struct ieee80211_qosframe_addr4 *wh4;
2710	struct ieee80211_node *ni;
2711	struct mwl_node *mn;
2712	int off, len, hdrlen, pktlen, rssi, ntodo;
2713	uint8_t *data, status;
2714	void *newdata;
2715	int16_t nf;
2716
2717	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2718	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2719	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2720	nf = -96;			/* XXX */
2721	bf = sc->sc_rxnext;
2722	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2723		if (bf == NULL)
2724			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2725		ds = bf->bf_desc;
2726		data = bf->bf_data;
2727		if (data == NULL) {
2728			/*
2729			 * If data allocation failed previously there
2730			 * will be no buffer; try again to re-populate it.
2731			 * Note the firmware will not advance to the next
2732			 * descriptor with a dma buffer so we must mimic
2733			 * this or we'll get out of sync.
2734			 */
2735			DPRINTF(sc, MWL_DEBUG_ANY,
2736			    "%s: rx buf w/o dma memory\n", __func__);
2737			(void) mwl_rxbuf_init(sc, bf);
2738			sc->sc_stats.mst_rx_dmabufmissing++;
2739			break;
2740		}
2741		MWL_RXDESC_SYNC(sc, ds,
2742		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2743		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2744			break;
2745#ifdef MWL_DEBUG
2746		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2747			mwl_printrxbuf(bf, 0);
2748#endif
2749		status = ds->Status;
2750		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2751			ifp->if_ierrors++;
2752			sc->sc_stats.mst_rx_crypto++;
2753			/*
2754			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2755			 *     for backwards compatibility.
2756			 */
2757			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2758			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2759				/*
2760				 * MIC error, notify upper layers.
2761				 */
2762				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2763				    BUS_DMASYNC_POSTREAD);
2764				mwl_handlemicerror(ic, data);
2765				sc->sc_stats.mst_rx_tkipmic++;
2766			}
2767			/* XXX too painful to tap packets */
2768			goto rx_next;
2769		}
2770		/*
2771		 * Sync the data buffer.
2772		 */
2773		len = le16toh(ds->PktLen);
2774		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2775		/*
2776		 * The 802.11 header is provided all or in part at the front;
2777		 * use it to calculate the true size of the header that we'll
2778		 * construct below.  We use this to figure out where to copy
2779		 * payload prior to constructing the header.
2780		 */
2781		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2782		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2783
2784		/* calculate rssi early so we can re-use for each aggregate */
2785		rssi = cvtrssi(ds->RSSI);
2786
2787		pktlen = hdrlen + (len - off);
2788		/*
2789		 * NB: we know our frame is at least as large as
2790		 * IEEE80211_MIN_LEN because there is a 4-address
2791		 * frame at the front.  Hence there's no need to
2792		 * vet the packet length.  If the frame in fact
2793		 * is too small it should be discarded at the
2794		 * net80211 layer.
2795		 */
2796
2797		/*
2798		 * Attach dma buffer to an mbuf.  We tried
2799		 * doing this based on the packet size (i.e.
2800		 * copying small packets) but it turns out to
2801		 * be a net loss.  The tradeoff might be system
2802		 * dependent (cache architecture is important).
2803		 */
2804		MGETHDR(m, M_NOWAIT, MT_DATA);
2805		if (m == NULL) {
2806			DPRINTF(sc, MWL_DEBUG_ANY,
2807			    "%s: no rx mbuf\n", __func__);
2808			sc->sc_stats.mst_rx_nombuf++;
2809			goto rx_next;
2810		}
2811		/*
2812		 * Acquire the replacement dma buffer before
2813		 * processing the frame.  If we're out of dma
2814		 * buffers we disable rx interrupts and wait
2815		 * for the free pool to reach mlw_rxdmalow buffers
2816		 * before starting to do work again.  If the firmware
2817		 * runs out of descriptors then it will toss frames
2818		 * which is better than our doing it as that can
2819		 * starve our processing.  It is also important that
2820		 * we always process rx'd frames in case they are
2821		 * A-MPDU as otherwise the host's view of the BA
2822		 * window may get out of sync with the firmware.
2823		 */
2824		newdata = mwl_getrxdma(sc);
2825		if (newdata == NULL) {
2826			/* NB: stat+msg in mwl_getrxdma */
2827			m_free(m);
2828			/* disable RX interrupt and mark state */
2829			mwl_hal_intrset(sc->sc_mh,
2830			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2831			sc->sc_rxblocked = 1;
2832			ieee80211_drain(ic);
2833			/* XXX check rxblocked and immediately start again? */
2834			goto rx_stop;
2835		}
2836		bf->bf_data = newdata;
2837		/*
2838		 * Attach the dma buffer to the mbuf;
2839		 * mwl_rxbuf_init will re-setup the rx
2840		 * descriptor using the replacement dma
2841		 * buffer we just installed above.
2842		 */
2843		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2844		    data, sc, 0, EXT_NET_DRV);
2845		m->m_data += off - hdrlen;
2846		m->m_pkthdr.len = m->m_len = pktlen;
2847		m->m_pkthdr.rcvif = ifp;
2848		/* NB: dma buffer assumed read-only */
2849
2850		/*
2851		 * Piece 802.11 header together.
2852		 */
2853		wh = mtod(m, struct ieee80211_qosframe *);
2854		/* NB: don't need to do this sometimes but ... */
2855		/* XXX special case so we can memcpy after m_devget? */
2856		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2857		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2858			if (IEEE80211_DIR_DSTODS(wh)) {
2859				wh4 = mtod(m,
2860				    struct ieee80211_qosframe_addr4*);
2861				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2862			} else {
2863				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2864			}
2865		}
2866		/*
2867		 * The f/w strips WEP header but doesn't clear
2868		 * the WEP bit; mark the packet with M_WEP so
2869		 * net80211 will treat the data as decrypted.
2870		 * While here also clear the PWR_MGT bit since
2871		 * power save is handled by the firmware and
2872		 * passing this up will potentially cause the
2873		 * upper layer to put a station in power save
2874		 * (except when configured with MWL_HOST_PS_SUPPORT).
2875		 */
2876		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2877			m->m_flags |= M_WEP;
2878#ifdef MWL_HOST_PS_SUPPORT
2879		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2880#else
2881		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2882		    IEEE80211_FC1_PWR_MGT);
2883#endif
2884
2885		if (ieee80211_radiotap_active(ic)) {
2886			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2887
2888			tap->wr_flags = 0;
2889			tap->wr_rate = ds->Rate;
2890			tap->wr_antsignal = rssi + nf;
2891			tap->wr_antnoise = nf;
2892		}
2893		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2894			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2895			    len, ds->Rate, rssi);
2896		}
2897		ifp->if_ipackets++;
2898
2899		/* dispatch */
2900		ni = ieee80211_find_rxnode(ic,
2901		    (const struct ieee80211_frame_min *) wh);
2902		if (ni != NULL) {
2903			mn = MWL_NODE(ni);
2904#ifdef MWL_ANT_INFO_SUPPORT
2905			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2906			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2907			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2908			mn->mn_ai.rsvd1 = rssi;
2909#endif
2910			/* tag AMPDU aggregates for reorder processing */
2911			if (ni->ni_flags & IEEE80211_NODE_HT)
2912				m->m_flags |= M_AMPDU;
2913			(void) ieee80211_input(ni, m, rssi, nf);
2914			ieee80211_free_node(ni);
2915		} else
2916			(void) ieee80211_input_all(ic, m, rssi, nf);
2917rx_next:
2918		/* NB: ignore ENOMEM so we process more descriptors */
2919		(void) mwl_rxbuf_init(sc, bf);
2920		bf = STAILQ_NEXT(bf, bf_list);
2921	}
2922rx_stop:
2923	sc->sc_rxnext = bf;
2924
2925	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2926	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2927		/* NB: kick fw; the tx thread may have been preempted */
2928		mwl_hal_txstart(sc->sc_mh, 0);
2929		mwl_start(ifp);
2930	}
2931#undef IEEE80211_DIR_DSTODS
2932}
2933
2934static void
2935mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2936{
2937	struct mwl_txbuf *bf, *bn;
2938	struct mwl_txdesc *ds;
2939
2940	MWL_TXQ_LOCK_INIT(sc, txq);
2941	txq->qnum = qnum;
2942	txq->txpri = 0;	/* XXX */
2943#if 0
2944	/* NB: q setup by mwl_txdma_setup XXX */
2945	STAILQ_INIT(&txq->free);
2946#endif
2947	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2948		bf->bf_txq = txq;
2949
2950		ds = bf->bf_desc;
2951		bn = STAILQ_NEXT(bf, bf_list);
2952		if (bn == NULL)
2953			bn = STAILQ_FIRST(&txq->free);
2954		ds->pPhysNext = htole32(bn->bf_daddr);
2955	}
2956	STAILQ_INIT(&txq->active);
2957}
2958
2959/*
2960 * Setup a hardware data transmit queue for the specified
2961 * access control.  We record the mapping from ac's
2962 * to h/w queues for use by mwl_tx_start.
2963 */
2964static int
2965mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2966{
2967#define	N(a)	(sizeof(a)/sizeof(a[0]))
2968	struct mwl_txq *txq;
2969
2970	if (ac >= N(sc->sc_ac2q)) {
2971		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2972			ac, N(sc->sc_ac2q));
2973		return 0;
2974	}
2975	if (mvtype >= MWL_NUM_TX_QUEUES) {
2976		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2977			mvtype, MWL_NUM_TX_QUEUES);
2978		return 0;
2979	}
2980	txq = &sc->sc_txq[mvtype];
2981	mwl_txq_init(sc, txq, mvtype);
2982	sc->sc_ac2q[ac] = txq;
2983	return 1;
2984#undef N
2985}
2986
2987/*
2988 * Update WME parameters for a transmit queue.
2989 */
2990static int
2991mwl_txq_update(struct mwl_softc *sc, int ac)
2992{
2993#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2994	struct ifnet *ifp = sc->sc_ifp;
2995	struct ieee80211com *ic = ifp->if_l2com;
2996	struct mwl_txq *txq = sc->sc_ac2q[ac];
2997	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2998	struct mwl_hal *mh = sc->sc_mh;
2999	int aifs, cwmin, cwmax, txoplim;
3000
3001	aifs = wmep->wmep_aifsn;
3002	/* XXX in sta mode need to pass log values for cwmin/max */
3003	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3004	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3005	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3006
3007	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3008		device_printf(sc->sc_dev, "unable to update hardware queue "
3009			"parameters for %s traffic!\n",
3010			ieee80211_wme_acnames[ac]);
3011		return 0;
3012	}
3013	return 1;
3014#undef MWL_EXPONENT_TO_VALUE
3015}
3016
3017/*
3018 * Callback from the 802.11 layer to update WME parameters.
3019 */
3020static int
3021mwl_wme_update(struct ieee80211com *ic)
3022{
3023	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3024
3025	return !mwl_txq_update(sc, WME_AC_BE) ||
3026	    !mwl_txq_update(sc, WME_AC_BK) ||
3027	    !mwl_txq_update(sc, WME_AC_VI) ||
3028	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3029}
3030
3031/*
3032 * Reclaim resources for a setup queue.
3033 */
3034static void
3035mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3036{
3037	/* XXX hal work? */
3038	MWL_TXQ_LOCK_DESTROY(txq);
3039}
3040
3041/*
3042 * Reclaim all tx queue resources.
3043 */
3044static void
3045mwl_tx_cleanup(struct mwl_softc *sc)
3046{
3047	int i;
3048
3049	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3050		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3051}
3052
3053static int
3054mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3055{
3056	struct mbuf *m;
3057	int error;
3058
3059	/*
3060	 * Load the DMA map so any coalescing is done.  This
3061	 * also calculates the number of descriptors we need.
3062	 */
3063	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3064				     bf->bf_segs, &bf->bf_nseg,
3065				     BUS_DMA_NOWAIT);
3066	if (error == EFBIG) {
3067		/* XXX packet requires too many descriptors */
3068		bf->bf_nseg = MWL_TXDESC+1;
3069	} else if (error != 0) {
3070		sc->sc_stats.mst_tx_busdma++;
3071		m_freem(m0);
3072		return error;
3073	}
3074	/*
3075	 * Discard null packets and check for packets that
3076	 * require too many TX descriptors.  We try to convert
3077	 * the latter to a cluster.
3078	 */
3079	if (error == EFBIG) {		/* too many desc's, linearize */
3080		sc->sc_stats.mst_tx_linear++;
3081#if MWL_TXDESC > 1
3082		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
3083#else
3084		m = m_defrag(m0, M_NOWAIT);
3085#endif
3086		if (m == NULL) {
3087			m_freem(m0);
3088			sc->sc_stats.mst_tx_nombuf++;
3089			return ENOMEM;
3090		}
3091		m0 = m;
3092		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3093					     bf->bf_segs, &bf->bf_nseg,
3094					     BUS_DMA_NOWAIT);
3095		if (error != 0) {
3096			sc->sc_stats.mst_tx_busdma++;
3097			m_freem(m0);
3098			return error;
3099		}
3100		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3101		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3102	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3103		sc->sc_stats.mst_tx_nodata++;
3104		m_freem(m0);
3105		return EIO;
3106	}
3107	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3108		__func__, m0, m0->m_pkthdr.len);
3109	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3110	bf->bf_m = m0;
3111
3112	return 0;
3113}
3114
3115static __inline int
3116mwl_cvtlegacyrate(int rate)
3117{
3118	switch (rate) {
3119	case 2:	 return 0;
3120	case 4:	 return 1;
3121	case 11: return 2;
3122	case 22: return 3;
3123	case 44: return 4;
3124	case 12: return 5;
3125	case 18: return 6;
3126	case 24: return 7;
3127	case 36: return 8;
3128	case 48: return 9;
3129	case 72: return 10;
3130	case 96: return 11;
3131	case 108:return 12;
3132	}
3133	return 0;
3134}
3135
3136/*
3137 * Calculate fixed tx rate information per client state;
3138 * this value is suitable for writing to the Format field
3139 * of a tx descriptor.
3140 */
3141static uint16_t
3142mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3143{
3144	uint16_t fmt;
3145
3146	fmt = SM(3, EAGLE_TXD_ANTENNA)
3147	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3148		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3149	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3150		fmt |= EAGLE_TXD_FORMAT_HT
3151		    /* NB: 0x80 implicitly stripped from ucastrate */
3152		    | SM(rate, EAGLE_TXD_RATE);
3153		/* XXX short/long GI may be wrong; re-check */
3154		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3155			fmt |= EAGLE_TXD_CHW_40
3156			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3157			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3158		} else {
3159			fmt |= EAGLE_TXD_CHW_20
3160			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3161			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3162		}
3163	} else {			/* legacy rate */
3164		fmt |= EAGLE_TXD_FORMAT_LEGACY
3165		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3166		    | EAGLE_TXD_CHW_20
3167		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3168		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3169			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3170	}
3171	return fmt;
3172}
3173
3174static int
3175mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3176    struct mbuf *m0)
3177{
3178#define	IEEE80211_DIR_DSTODS(wh) \
3179	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3180	struct ifnet *ifp = sc->sc_ifp;
3181	struct ieee80211com *ic = ifp->if_l2com;
3182	struct ieee80211vap *vap = ni->ni_vap;
3183	int error, iswep, ismcast;
3184	int hdrlen, copyhdrlen, pktlen;
3185	struct mwl_txdesc *ds;
3186	struct mwl_txq *txq;
3187	struct ieee80211_frame *wh;
3188	struct mwltxrec *tr;
3189	struct mwl_node *mn;
3190	uint16_t qos;
3191#if MWL_TXDESC > 1
3192	int i;
3193#endif
3194
3195	wh = mtod(m0, struct ieee80211_frame *);
3196	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3197	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3198	hdrlen = ieee80211_anyhdrsize(wh);
3199	copyhdrlen = hdrlen;
3200	pktlen = m0->m_pkthdr.len;
3201	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3202		if (IEEE80211_DIR_DSTODS(wh)) {
3203			qos = *(uint16_t *)
3204			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3205			copyhdrlen -= sizeof(qos);
3206		} else
3207			qos = *(uint16_t *)
3208			    (((struct ieee80211_qosframe *) wh)->i_qos);
3209	} else
3210		qos = 0;
3211
3212	if (iswep) {
3213		const struct ieee80211_cipher *cip;
3214		struct ieee80211_key *k;
3215
3216		/*
3217		 * Construct the 802.11 header+trailer for an encrypted
3218		 * frame. The only reason this can fail is because of an
3219		 * unknown or unsupported cipher/key type.
3220		 *
3221		 * NB: we do this even though the firmware will ignore
3222		 *     what we've done for WEP and TKIP as we need the
3223		 *     ExtIV filled in for CCMP and this also adjusts
3224		 *     the headers which simplifies our work below.
3225		 */
3226		k = ieee80211_crypto_encap(ni, m0);
3227		if (k == NULL) {
3228			/*
3229			 * This can happen when the key is yanked after the
3230			 * frame was queued.  Just discard the frame; the
3231			 * 802.11 layer counts failures and provides
3232			 * debugging/diagnostics.
3233			 */
3234			m_freem(m0);
3235			return EIO;
3236		}
3237		/*
3238		 * Adjust the packet length for the crypto additions
3239		 * done during encap and any other bits that the f/w
3240		 * will add later on.
3241		 */
3242		cip = k->wk_cipher;
3243		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3244
3245		/* packet header may have moved, reset our local pointer */
3246		wh = mtod(m0, struct ieee80211_frame *);
3247	}
3248
3249	if (ieee80211_radiotap_active_vap(vap)) {
3250		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3251		if (iswep)
3252			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3253#if 0
3254		sc->sc_tx_th.wt_rate = ds->DataRate;
3255#endif
3256		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3257		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3258
3259		ieee80211_radiotap_tx(vap, m0);
3260	}
3261	/*
3262	 * Copy up/down the 802.11 header; the firmware requires
3263	 * we present a 2-byte payload length followed by a
3264	 * 4-address header (w/o QoS), followed (optionally) by
3265	 * any WEP/ExtIV header (but only filled in for CCMP).
3266	 * We are assured the mbuf has sufficient headroom to
3267	 * prepend in-place by the setup of ic_headroom in
3268	 * mwl_attach.
3269	 */
3270	if (hdrlen < sizeof(struct mwltxrec)) {
3271		const int space = sizeof(struct mwltxrec) - hdrlen;
3272		if (M_LEADINGSPACE(m0) < space) {
3273			/* NB: should never happen */
3274			device_printf(sc->sc_dev,
3275			    "not enough headroom, need %d found %zd, "
3276			    "m_flags 0x%x m_len %d\n",
3277			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3278			ieee80211_dump_pkt(ic,
3279			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3280			m_freem(m0);
3281			sc->sc_stats.mst_tx_noheadroom++;
3282			return EIO;
3283		}
3284		M_PREPEND(m0, space, M_NOWAIT);
3285	}
3286	tr = mtod(m0, struct mwltxrec *);
3287	if (wh != (struct ieee80211_frame *) &tr->wh)
3288		ovbcopy(wh, &tr->wh, hdrlen);
3289	/*
3290	 * Note: the "firmware length" is actually the length
3291	 * of the fully formed "802.11 payload".  That is, it's
3292	 * everything except for the 802.11 header.  In particular
3293	 * this includes all crypto material including the MIC!
3294	 */
3295	tr->fwlen = htole16(pktlen - hdrlen);
3296
3297	/*
3298	 * Load the DMA map so any coalescing is done.  This
3299	 * also calculates the number of descriptors we need.
3300	 */
3301	error = mwl_tx_dmasetup(sc, bf, m0);
3302	if (error != 0) {
3303		/* NB: stat collected in mwl_tx_dmasetup */
3304		DPRINTF(sc, MWL_DEBUG_XMIT,
3305		    "%s: unable to setup dma\n", __func__);
3306		return error;
3307	}
3308	bf->bf_node = ni;			/* NB: held reference */
3309	m0 = bf->bf_m;				/* NB: may have changed */
3310	tr = mtod(m0, struct mwltxrec *);
3311	wh = (struct ieee80211_frame *)&tr->wh;
3312
3313	/*
3314	 * Formulate tx descriptor.
3315	 */
3316	ds = bf->bf_desc;
3317	txq = bf->bf_txq;
3318
3319	ds->QosCtrl = qos;			/* NB: already little-endian */
3320#if MWL_TXDESC == 1
3321	/*
3322	 * NB: multiframes should be zero because the descriptors
3323	 *     are initialized to zero.  This should handle the case
3324	 *     where the driver is built with MWL_TXDESC=1 but we are
3325	 *     using firmware with multi-segment support.
3326	 */
3327	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3328	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3329#else
3330	ds->multiframes = htole32(bf->bf_nseg);
3331	ds->PktLen = htole16(m0->m_pkthdr.len);
3332	for (i = 0; i < bf->bf_nseg; i++) {
3333		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3334		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3335	}
3336#endif
3337	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3338	ds->Format = 0;
3339	ds->pad = 0;
3340	ds->ack_wcb_addr = 0;
3341
3342	mn = MWL_NODE(ni);
3343	/*
3344	 * Select transmit rate.
3345	 */
3346	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3347	case IEEE80211_FC0_TYPE_MGT:
3348		sc->sc_stats.mst_tx_mgmt++;
3349		/* fall thru... */
3350	case IEEE80211_FC0_TYPE_CTL:
3351		/* NB: assign to BE q to avoid bursting */
3352		ds->TxPriority = MWL_WME_AC_BE;
3353		break;
3354	case IEEE80211_FC0_TYPE_DATA:
3355		if (!ismcast) {
3356			const struct ieee80211_txparam *tp = ni->ni_txparms;
3357			/*
3358			 * EAPOL frames get forced to a fixed rate and w/o
3359			 * aggregation; otherwise check for any fixed rate
3360			 * for the client (may depend on association state).
3361			 */
3362			if (m0->m_flags & M_EAPOL) {
3363				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3364				ds->Format = mvp->mv_eapolformat;
3365				ds->pad = htole16(
3366				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3367			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3368				/* XXX pre-calculate per node */
3369				ds->Format = htole16(
3370				    mwl_calcformat(tp->ucastrate, ni));
3371				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3372			}
3373			/* NB: EAPOL frames will never have qos set */
3374			if (qos == 0)
3375				ds->TxPriority = txq->qnum;
3376#if MWL_MAXBA > 3
3377			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3378				ds->TxPriority = mn->mn_ba[3].txq;
3379#endif
3380#if MWL_MAXBA > 2
3381			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3382				ds->TxPriority = mn->mn_ba[2].txq;
3383#endif
3384#if MWL_MAXBA > 1
3385			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3386				ds->TxPriority = mn->mn_ba[1].txq;
3387#endif
3388#if MWL_MAXBA > 0
3389			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3390				ds->TxPriority = mn->mn_ba[0].txq;
3391#endif
3392			else
3393				ds->TxPriority = txq->qnum;
3394		} else
3395			ds->TxPriority = txq->qnum;
3396		break;
3397	default:
3398		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3399			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3400		sc->sc_stats.mst_tx_badframetype++;
3401		m_freem(m0);
3402		return EIO;
3403	}
3404
3405	if (IFF_DUMPPKTS_XMIT(sc))
3406		ieee80211_dump_pkt(ic,
3407		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3408		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3409
3410	MWL_TXQ_LOCK(txq);
3411	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3412	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3413	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3414
3415	ifp->if_opackets++;
3416	sc->sc_tx_timer = 5;
3417	MWL_TXQ_UNLOCK(txq);
3418
3419	return 0;
3420#undef	IEEE80211_DIR_DSTODS
3421}
3422
3423static __inline int
3424mwl_cvtlegacyrix(int rix)
3425{
3426#define	N(x)	(sizeof(x)/sizeof(x[0]))
3427	static const int ieeerates[] =
3428	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3429	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3430#undef N
3431}
3432
3433/*
3434 * Process completed xmit descriptors from the specified queue.
3435 */
3436static int
3437mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3438{
3439#define	EAGLE_TXD_STATUS_MCAST \
3440	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3441	struct ifnet *ifp = sc->sc_ifp;
3442	struct ieee80211com *ic = ifp->if_l2com;
3443	struct mwl_txbuf *bf;
3444	struct mwl_txdesc *ds;
3445	struct ieee80211_node *ni;
3446	struct mwl_node *an;
3447	int nreaped;
3448	uint32_t status;
3449
3450	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3451	for (nreaped = 0;; nreaped++) {
3452		MWL_TXQ_LOCK(txq);
3453		bf = STAILQ_FIRST(&txq->active);
3454		if (bf == NULL) {
3455			MWL_TXQ_UNLOCK(txq);
3456			break;
3457		}
3458		ds = bf->bf_desc;
3459		MWL_TXDESC_SYNC(txq, ds,
3460		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3461		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3462			MWL_TXQ_UNLOCK(txq);
3463			break;
3464		}
3465		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3466		MWL_TXQ_UNLOCK(txq);
3467
3468#ifdef MWL_DEBUG
3469		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3470			mwl_printtxbuf(bf, txq->qnum, nreaped);
3471#endif
3472		ni = bf->bf_node;
3473		if (ni != NULL) {
3474			an = MWL_NODE(ni);
3475			status = le32toh(ds->Status);
3476			if (status & EAGLE_TXD_STATUS_OK) {
3477				uint16_t Format = le16toh(ds->Format);
3478				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3479
3480				sc->sc_stats.mst_ant_tx[txant]++;
3481				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3482					sc->sc_stats.mst_tx_retries++;
3483				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3484					sc->sc_stats.mst_tx_mretries++;
3485				if (txq->qnum >= MWL_WME_AC_VO)
3486					ic->ic_wme.wme_hipri_traffic++;
3487				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3488				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3489					ni->ni_txrate = mwl_cvtlegacyrix(
3490					    ni->ni_txrate);
3491				} else
3492					ni->ni_txrate |= IEEE80211_RATE_MCS;
3493				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3494			} else {
3495				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3496					sc->sc_stats.mst_tx_linkerror++;
3497				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3498					sc->sc_stats.mst_tx_xretries++;
3499				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3500					sc->sc_stats.mst_tx_aging++;
3501				if (bf->bf_m->m_flags & M_FF)
3502					sc->sc_stats.mst_ff_txerr++;
3503			}
3504			/*
3505			 * Do any tx complete callback.  Note this must
3506			 * be done before releasing the node reference.
3507			 * XXX no way to figure out if frame was ACK'd
3508			 */
3509			if (bf->bf_m->m_flags & M_TXCB) {
3510				/* XXX strip fw len in case header inspected */
3511				m_adj(bf->bf_m, sizeof(uint16_t));
3512				ieee80211_process_callback(ni, bf->bf_m,
3513					(status & EAGLE_TXD_STATUS_OK) == 0);
3514			}
3515			/*
3516			 * Reclaim reference to node.
3517			 *
3518			 * NB: the node may be reclaimed here if, for example
3519			 *     this is a DEAUTH message that was sent and the
3520			 *     node was timed out due to inactivity.
3521			 */
3522			ieee80211_free_node(ni);
3523		}
3524		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3525
3526		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3527		    BUS_DMASYNC_POSTWRITE);
3528		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3529		m_freem(bf->bf_m);
3530
3531		mwl_puttxbuf_tail(txq, bf);
3532	}
3533	return nreaped;
3534#undef EAGLE_TXD_STATUS_MCAST
3535}
3536
3537/*
3538 * Deferred processing of transmit interrupt; special-cased
3539 * for four hardware queues, 0-3.
3540 */
3541static void
3542mwl_tx_proc(void *arg, int npending)
3543{
3544	struct mwl_softc *sc = arg;
3545	struct ifnet *ifp = sc->sc_ifp;
3546	int nreaped;
3547
3548	/*
3549	 * Process each active queue.
3550	 */
3551	nreaped = 0;
3552	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3553		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3554	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3555		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3556	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3557		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3558	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3559		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3560
3561	if (nreaped != 0) {
3562		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3563		sc->sc_tx_timer = 0;
3564		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3565			/* NB: kick fw; the tx thread may have been preempted */
3566			mwl_hal_txstart(sc->sc_mh, 0);
3567			mwl_start(ifp);
3568		}
3569	}
3570}
3571
3572static void
3573mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3574{
3575	struct ieee80211_node *ni;
3576	struct mwl_txbuf *bf;
3577	u_int ix;
3578
3579	/*
3580	 * NB: this assumes output has been stopped and
3581	 *     we do not need to block mwl_tx_tasklet
3582	 */
3583	for (ix = 0;; ix++) {
3584		MWL_TXQ_LOCK(txq);
3585		bf = STAILQ_FIRST(&txq->active);
3586		if (bf == NULL) {
3587			MWL_TXQ_UNLOCK(txq);
3588			break;
3589		}
3590		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3591		MWL_TXQ_UNLOCK(txq);
3592#ifdef MWL_DEBUG
3593		if (sc->sc_debug & MWL_DEBUG_RESET) {
3594			struct ifnet *ifp = sc->sc_ifp;
3595			struct ieee80211com *ic = ifp->if_l2com;
3596			const struct mwltxrec *tr =
3597			    mtod(bf->bf_m, const struct mwltxrec *);
3598			mwl_printtxbuf(bf, txq->qnum, ix);
3599			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3600				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3601		}
3602#endif /* MWL_DEBUG */
3603		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3604		ni = bf->bf_node;
3605		if (ni != NULL) {
3606			/*
3607			 * Reclaim node reference.
3608			 */
3609			ieee80211_free_node(ni);
3610		}
3611		m_freem(bf->bf_m);
3612
3613		mwl_puttxbuf_tail(txq, bf);
3614	}
3615}
3616
3617/*
3618 * Drain the transmit queues and reclaim resources.
3619 */
3620static void
3621mwl_draintxq(struct mwl_softc *sc)
3622{
3623	struct ifnet *ifp = sc->sc_ifp;
3624	int i;
3625
3626	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3627		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3628	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3629	sc->sc_tx_timer = 0;
3630}
3631
3632#ifdef MWL_DIAGAPI
3633/*
3634 * Reset the transmit queues to a pristine state after a fw download.
3635 */
3636static void
3637mwl_resettxq(struct mwl_softc *sc)
3638{
3639	int i;
3640
3641	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3642		mwl_txq_reset(sc, &sc->sc_txq[i]);
3643}
3644#endif /* MWL_DIAGAPI */
3645
3646/*
3647 * Clear the transmit queues of any frames submitted for the
3648 * specified vap.  This is done when the vap is deleted so we
3649 * don't potentially reference the vap after it is gone.
3650 * Note we cannot remove the frames; we only reclaim the node
3651 * reference.
3652 */
3653static void
3654mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3655{
3656	struct mwl_txq *txq;
3657	struct mwl_txbuf *bf;
3658	int i;
3659
3660	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3661		txq = &sc->sc_txq[i];
3662		MWL_TXQ_LOCK(txq);
3663		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3664			struct ieee80211_node *ni = bf->bf_node;
3665			if (ni != NULL && ni->ni_vap == vap) {
3666				bf->bf_node = NULL;
3667				ieee80211_free_node(ni);
3668			}
3669		}
3670		MWL_TXQ_UNLOCK(txq);
3671	}
3672}
3673
3674static int
3675mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3676	const uint8_t *frm, const uint8_t *efrm)
3677{
3678	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3679	const struct ieee80211_action *ia;
3680
3681	ia = (const struct ieee80211_action *) frm;
3682	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3683	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3684		const struct ieee80211_action_ht_mimopowersave *mps =
3685		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3686
3687		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3688		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3689		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3690		return 0;
3691	} else
3692		return sc->sc_recv_action(ni, wh, frm, efrm);
3693}
3694
3695static int
3696mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3697	int dialogtoken, int baparamset, int batimeout)
3698{
3699	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3700	struct ieee80211vap *vap = ni->ni_vap;
3701	struct mwl_node *mn = MWL_NODE(ni);
3702	struct mwl_bastate *bas;
3703
3704	bas = tap->txa_private;
3705	if (bas == NULL) {
3706		const MWL_HAL_BASTREAM *sp;
3707		/*
3708		 * Check for a free BA stream slot.
3709		 */
3710#if MWL_MAXBA > 3
3711		if (mn->mn_ba[3].bastream == NULL)
3712			bas = &mn->mn_ba[3];
3713		else
3714#endif
3715#if MWL_MAXBA > 2
3716		if (mn->mn_ba[2].bastream == NULL)
3717			bas = &mn->mn_ba[2];
3718		else
3719#endif
3720#if MWL_MAXBA > 1
3721		if (mn->mn_ba[1].bastream == NULL)
3722			bas = &mn->mn_ba[1];
3723		else
3724#endif
3725#if MWL_MAXBA > 0
3726		if (mn->mn_ba[0].bastream == NULL)
3727			bas = &mn->mn_ba[0];
3728		else
3729#endif
3730		{
3731			/* sta already has max BA streams */
3732			/* XXX assign BA stream to highest priority tid */
3733			DPRINTF(sc, MWL_DEBUG_AMPDU,
3734			    "%s: already has max bastreams\n", __func__);
3735			sc->sc_stats.mst_ampdu_reject++;
3736			return 0;
3737		}
3738		/* NB: no held reference to ni */
3739		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3740		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3741		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3742		    ni, tap);
3743		if (sp == NULL) {
3744			/*
3745			 * No available stream, return 0 so no
3746			 * a-mpdu aggregation will be done.
3747			 */
3748			DPRINTF(sc, MWL_DEBUG_AMPDU,
3749			    "%s: no bastream available\n", __func__);
3750			sc->sc_stats.mst_ampdu_nostream++;
3751			return 0;
3752		}
3753		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3754		    __func__, sp);
3755		/* NB: qos is left zero so we won't match in mwl_tx_start */
3756		bas->bastream = sp;
3757		tap->txa_private = bas;
3758	}
3759	/* fetch current seq# from the firmware; if available */
3760	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3761	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3762	    &tap->txa_start) != 0)
3763		tap->txa_start = 0;
3764	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3765}
3766
3767static int
3768mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3769	int code, int baparamset, int batimeout)
3770{
3771	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3772	struct mwl_bastate *bas;
3773
3774	bas = tap->txa_private;
3775	if (bas == NULL) {
3776		/* XXX should not happen */
3777		DPRINTF(sc, MWL_DEBUG_AMPDU,
3778		    "%s: no BA stream allocated, TID %d\n",
3779		    __func__, tap->txa_tid);
3780		sc->sc_stats.mst_addba_nostream++;
3781		return 0;
3782	}
3783	if (code == IEEE80211_STATUS_SUCCESS) {
3784		struct ieee80211vap *vap = ni->ni_vap;
3785		int bufsiz, error;
3786
3787		/*
3788		 * Tell the firmware to setup the BA stream;
3789		 * we know resources are available because we
3790		 * pre-allocated one before forming the request.
3791		 */
3792		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3793		if (bufsiz == 0)
3794			bufsiz = IEEE80211_AGGR_BAWMAX;
3795		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3796		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3797		if (error != 0) {
3798			/*
3799			 * Setup failed, return immediately so no a-mpdu
3800			 * aggregation will be done.
3801			 */
3802			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3803			mwl_bastream_free(bas);
3804			tap->txa_private = NULL;
3805
3806			DPRINTF(sc, MWL_DEBUG_AMPDU,
3807			    "%s: create failed, error %d, bufsiz %d TID %d "
3808			    "htparam 0x%x\n", __func__, error, bufsiz,
3809			    tap->txa_tid, ni->ni_htparam);
3810			sc->sc_stats.mst_bacreate_failed++;
3811			return 0;
3812		}
3813		/* NB: cache txq to avoid ptr indirect */
3814		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3815		DPRINTF(sc, MWL_DEBUG_AMPDU,
3816		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3817		    "htparam 0x%x\n", __func__, bas->bastream,
3818		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3819	} else {
3820		/*
3821		 * Other side NAK'd us; return the resources.
3822		 */
3823		DPRINTF(sc, MWL_DEBUG_AMPDU,
3824		    "%s: request failed with code %d, destroy bastream %p\n",
3825		    __func__, code, bas->bastream);
3826		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3827		mwl_bastream_free(bas);
3828		tap->txa_private = NULL;
3829	}
3830	/* NB: firmware sends BAR so we don't need to */
3831	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3832}
3833
3834static void
3835mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3836{
3837	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3838	struct mwl_bastate *bas;
3839
3840	bas = tap->txa_private;
3841	if (bas != NULL) {
3842		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3843		    __func__, bas->bastream);
3844		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3845		mwl_bastream_free(bas);
3846		tap->txa_private = NULL;
3847	}
3848	sc->sc_addba_stop(ni, tap);
3849}
3850
3851/*
3852 * Setup the rx data structures.  This should only be
3853 * done once or we may get out of sync with the firmware.
3854 */
3855static int
3856mwl_startrecv(struct mwl_softc *sc)
3857{
3858	if (!sc->sc_recvsetup) {
3859		struct mwl_rxbuf *bf, *prev;
3860		struct mwl_rxdesc *ds;
3861
3862		prev = NULL;
3863		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3864			int error = mwl_rxbuf_init(sc, bf);
3865			if (error != 0) {
3866				DPRINTF(sc, MWL_DEBUG_RECV,
3867					"%s: mwl_rxbuf_init failed %d\n",
3868					__func__, error);
3869				return error;
3870			}
3871			if (prev != NULL) {
3872				ds = prev->bf_desc;
3873				ds->pPhysNext = htole32(bf->bf_daddr);
3874			}
3875			prev = bf;
3876		}
3877		if (prev != NULL) {
3878			ds = prev->bf_desc;
3879			ds->pPhysNext =
3880			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3881		}
3882		sc->sc_recvsetup = 1;
3883	}
3884	mwl_mode_init(sc);		/* set filters, etc. */
3885	return 0;
3886}
3887
3888static MWL_HAL_APMODE
3889mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3890{
3891	MWL_HAL_APMODE mode;
3892
3893	if (IEEE80211_IS_CHAN_HT(chan)) {
3894		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3895			mode = AP_MODE_N_ONLY;
3896		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3897			mode = AP_MODE_AandN;
3898		else if (vap->iv_flags & IEEE80211_F_PUREG)
3899			mode = AP_MODE_GandN;
3900		else
3901			mode = AP_MODE_BandGandN;
3902	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3903		if (vap->iv_flags & IEEE80211_F_PUREG)
3904			mode = AP_MODE_G_ONLY;
3905		else
3906			mode = AP_MODE_MIXED;
3907	} else if (IEEE80211_IS_CHAN_B(chan))
3908		mode = AP_MODE_B_ONLY;
3909	else if (IEEE80211_IS_CHAN_A(chan))
3910		mode = AP_MODE_A_ONLY;
3911	else
3912		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3913	return mode;
3914}
3915
3916static int
3917mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3918{
3919	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3920	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3921}
3922
3923/*
3924 * Set/change channels.
3925 */
3926static int
3927mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3928{
3929	struct mwl_hal *mh = sc->sc_mh;
3930	struct ifnet *ifp = sc->sc_ifp;
3931	struct ieee80211com *ic = ifp->if_l2com;
3932	MWL_HAL_CHANNEL hchan;
3933	int maxtxpow;
3934
3935	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3936	    __func__, chan->ic_freq, chan->ic_flags);
3937
3938	/*
3939	 * Convert to a HAL channel description with
3940	 * the flags constrained to reflect the current
3941	 * operating mode.
3942	 */
3943	mwl_mapchan(&hchan, chan);
3944	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3945#if 0
3946	mwl_draintxq(sc);		/* clear pending tx frames */
3947#endif
3948	mwl_hal_setchannel(mh, &hchan);
3949	/*
3950	 * Tx power is cap'd by the regulatory setting and
3951	 * possibly a user-set limit.  We pass the min of
3952	 * these to the hal to apply them to the cal data
3953	 * for this channel.
3954	 * XXX min bound?
3955	 */
3956	maxtxpow = 2*chan->ic_maxregpower;
3957	if (maxtxpow > ic->ic_txpowlimit)
3958		maxtxpow = ic->ic_txpowlimit;
3959	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3960	/* NB: potentially change mcast/mgt rates */
3961	mwl_setcurchanrates(sc);
3962
3963	/*
3964	 * Update internal state.
3965	 */
3966	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3967	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3968	if (IEEE80211_IS_CHAN_A(chan)) {
3969		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3970		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3971	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3972		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3973		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3974	} else {
3975		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3976		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3977	}
3978	sc->sc_curchan = hchan;
3979	mwl_hal_intrset(mh, sc->sc_imask);
3980
3981	return 0;
3982}
3983
3984static void
3985mwl_scan_start(struct ieee80211com *ic)
3986{
3987	struct ifnet *ifp = ic->ic_ifp;
3988	struct mwl_softc *sc = ifp->if_softc;
3989
3990	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3991}
3992
3993static void
3994mwl_scan_end(struct ieee80211com *ic)
3995{
3996	struct ifnet *ifp = ic->ic_ifp;
3997	struct mwl_softc *sc = ifp->if_softc;
3998
3999	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4000}
4001
4002static void
4003mwl_set_channel(struct ieee80211com *ic)
4004{
4005	struct ifnet *ifp = ic->ic_ifp;
4006	struct mwl_softc *sc = ifp->if_softc;
4007
4008	(void) mwl_chan_set(sc, ic->ic_curchan);
4009}
4010
4011/*
4012 * Handle a channel switch request.  We inform the firmware
4013 * and mark the global state to suppress various actions.
4014 * NB: we issue only one request to the fw; we may be called
4015 * multiple times if there are multiple vap's.
4016 */
4017static void
4018mwl_startcsa(struct ieee80211vap *vap)
4019{
4020	struct ieee80211com *ic = vap->iv_ic;
4021	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4022	MWL_HAL_CHANNEL hchan;
4023
4024	if (sc->sc_csapending)
4025		return;
4026
4027	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4028	/* 1 =>'s quiet channel */
4029	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4030	sc->sc_csapending = 1;
4031}
4032
4033/*
4034 * Plumb any static WEP key for the station.  This is
4035 * necessary as we must propagate the key from the
4036 * global key table of the vap to each sta db entry.
4037 */
4038static void
4039mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4040{
4041	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4042		IEEE80211_F_PRIVACY &&
4043	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4044	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4045		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4046}
4047
4048static int
4049mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4050{
4051#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4052	struct ieee80211vap *vap = ni->ni_vap;
4053	struct mwl_hal_vap *hvap;
4054	int error;
4055
4056	if (vap->iv_opmode == IEEE80211_M_WDS) {
4057		/*
4058		 * WDS vap's do not have a f/w vap; instead they piggyback
4059		 * on an AP vap and we must install the sta db entry and
4060		 * crypto state using that AP's handle (the WDS vap has none).
4061		 */
4062		hvap = MWL_VAP(vap)->mv_ap_hvap;
4063	} else
4064		hvap = MWL_VAP(vap)->mv_hvap;
4065	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4066	    aid, staid, pi,
4067	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4068	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4069	if (error == 0) {
4070		/*
4071		 * Setup security for this station.  For sta mode this is
4072		 * needed even though do the same thing on transition to
4073		 * AUTH state because the call to mwl_hal_newstation
4074		 * clobbers the crypto state we setup.
4075		 */
4076		mwl_setanywepkey(vap, ni->ni_macaddr);
4077	}
4078	return error;
4079#undef WME
4080}
4081
4082static void
4083mwl_setglobalkeys(struct ieee80211vap *vap)
4084{
4085	struct ieee80211_key *wk;
4086
4087	wk = &vap->iv_nw_keys[0];
4088	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4089		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4090			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4091}
4092
4093/*
4094 * Convert a legacy rate set to a firmware bitmask.
4095 */
4096static uint32_t
4097get_rate_bitmap(const struct ieee80211_rateset *rs)
4098{
4099	uint32_t rates;
4100	int i;
4101
4102	rates = 0;
4103	for (i = 0; i < rs->rs_nrates; i++)
4104		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4105		case 2:	  rates |= 0x001; break;
4106		case 4:	  rates |= 0x002; break;
4107		case 11:  rates |= 0x004; break;
4108		case 22:  rates |= 0x008; break;
4109		case 44:  rates |= 0x010; break;
4110		case 12:  rates |= 0x020; break;
4111		case 18:  rates |= 0x040; break;
4112		case 24:  rates |= 0x080; break;
4113		case 36:  rates |= 0x100; break;
4114		case 48:  rates |= 0x200; break;
4115		case 72:  rates |= 0x400; break;
4116		case 96:  rates |= 0x800; break;
4117		case 108: rates |= 0x1000; break;
4118		}
4119	return rates;
4120}
4121
4122/*
4123 * Construct an HT firmware bitmask from an HT rate set.
4124 */
4125static uint32_t
4126get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4127{
4128	uint32_t rates;
4129	int i;
4130
4131	rates = 0;
4132	for (i = 0; i < rs->rs_nrates; i++) {
4133		if (rs->rs_rates[i] < 16)
4134			rates |= 1<<rs->rs_rates[i];
4135	}
4136	return rates;
4137}
4138
4139/*
4140 * Craft station database entry for station.
4141 * NB: use host byte order here, the hal handles byte swapping.
4142 */
4143static MWL_HAL_PEERINFO *
4144mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4145{
4146	const struct ieee80211vap *vap = ni->ni_vap;
4147
4148	memset(pi, 0, sizeof(*pi));
4149	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4150	pi->CapInfo = ni->ni_capinfo;
4151	if (ni->ni_flags & IEEE80211_NODE_HT) {
4152		/* HT capabilities, etc */
4153		pi->HTCapabilitiesInfo = ni->ni_htcap;
4154		/* XXX pi.HTCapabilitiesInfo */
4155	        pi->MacHTParamInfo = ni->ni_htparam;
4156		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4157		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4158		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4159		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4160		pi->AddHtInfo.stbc = ni->ni_htstbc;
4161
4162		/* constrain according to local configuration */
4163		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4164			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4165		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4166			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4167		if (ni->ni_chw != 40)
4168			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4169	}
4170	return pi;
4171}
4172
4173/*
4174 * Re-create the local sta db entry for a vap to ensure
4175 * up to date WME state is pushed to the firmware.  Because
4176 * this resets crypto state this must be followed by a
4177 * reload of any keys in the global key table.
4178 */
4179static int
4180mwl_localstadb(struct ieee80211vap *vap)
4181{
4182#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4183	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4184	struct ieee80211_node *bss;
4185	MWL_HAL_PEERINFO pi;
4186	int error;
4187
4188	switch (vap->iv_opmode) {
4189	case IEEE80211_M_STA:
4190		bss = vap->iv_bss;
4191		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4192		    vap->iv_state == IEEE80211_S_RUN ?
4193			mkpeerinfo(&pi, bss) : NULL,
4194		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4195		    bss->ni_ies.wme_ie != NULL ?
4196			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4197		if (error == 0)
4198			mwl_setglobalkeys(vap);
4199		break;
4200	case IEEE80211_M_HOSTAP:
4201	case IEEE80211_M_MBSS:
4202		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4203		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4204		if (error == 0)
4205			mwl_setglobalkeys(vap);
4206		break;
4207	default:
4208		error = 0;
4209		break;
4210	}
4211	return error;
4212#undef WME
4213}
4214
4215static int
4216mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4217{
4218	struct mwl_vap *mvp = MWL_VAP(vap);
4219	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4220	struct ieee80211com *ic = vap->iv_ic;
4221	struct ieee80211_node *ni = NULL;
4222	struct ifnet *ifp = ic->ic_ifp;
4223	struct mwl_softc *sc = ifp->if_softc;
4224	struct mwl_hal *mh = sc->sc_mh;
4225	enum ieee80211_state ostate = vap->iv_state;
4226	int error;
4227
4228	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4229	    vap->iv_ifp->if_xname, __func__,
4230	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4231
4232	callout_stop(&sc->sc_timer);
4233	/*
4234	 * Clear current radar detection state.
4235	 */
4236	if (ostate == IEEE80211_S_CAC) {
4237		/* stop quiet mode radar detection */
4238		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4239	} else if (sc->sc_radarena) {
4240		/* stop in-service radar detection */
4241		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4242		sc->sc_radarena = 0;
4243	}
4244	/*
4245	 * Carry out per-state actions before doing net80211 work.
4246	 */
4247	if (nstate == IEEE80211_S_INIT) {
4248		/* NB: only ap+sta vap's have a fw entity */
4249		if (hvap != NULL)
4250			mwl_hal_stop(hvap);
4251	} else if (nstate == IEEE80211_S_SCAN) {
4252		mwl_hal_start(hvap);
4253		/* NB: this disables beacon frames */
4254		mwl_hal_setinframode(hvap);
4255	} else if (nstate == IEEE80211_S_AUTH) {
4256		/*
4257		 * Must create a sta db entry in case a WEP key needs to
4258		 * be plumbed.  This entry will be overwritten if we
4259		 * associate; otherwise it will be reclaimed on node free.
4260		 */
4261		ni = vap->iv_bss;
4262		MWL_NODE(ni)->mn_hvap = hvap;
4263		(void) mwl_peerstadb(ni, 0, 0, NULL);
4264	} else if (nstate == IEEE80211_S_CSA) {
4265		/* XXX move to below? */
4266		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4267		    vap->iv_opmode == IEEE80211_M_MBSS)
4268			mwl_startcsa(vap);
4269	} else if (nstate == IEEE80211_S_CAC) {
4270		/* XXX move to below? */
4271		/* stop ap xmit and enable quiet mode radar detection */
4272		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4273	}
4274
4275	/*
4276	 * Invoke the parent method to do net80211 work.
4277	 */
4278	error = mvp->mv_newstate(vap, nstate, arg);
4279
4280	/*
4281	 * Carry out work that must be done after net80211 runs;
4282	 * this work requires up to date state (e.g. iv_bss).
4283	 */
4284	if (error == 0 && nstate == IEEE80211_S_RUN) {
4285		/* NB: collect bss node again, it may have changed */
4286		ni = vap->iv_bss;
4287
4288		DPRINTF(sc, MWL_DEBUG_STATE,
4289		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4290		    "capinfo 0x%04x chan %d\n",
4291		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4292		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4293		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4294
4295		/*
4296		 * Recreate local sta db entry to update WME/HT state.
4297		 */
4298		mwl_localstadb(vap);
4299		switch (vap->iv_opmode) {
4300		case IEEE80211_M_HOSTAP:
4301		case IEEE80211_M_MBSS:
4302			if (ostate == IEEE80211_S_CAC) {
4303				/* enable in-service radar detection */
4304				mwl_hal_setradardetection(mh,
4305				    DR_IN_SERVICE_MONITOR_START);
4306				sc->sc_radarena = 1;
4307			}
4308			/*
4309			 * Allocate and setup the beacon frame
4310			 * (and related state).
4311			 */
4312			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4313			if (error != 0) {
4314				DPRINTF(sc, MWL_DEBUG_STATE,
4315				    "%s: beacon setup failed, error %d\n",
4316				    __func__, error);
4317				goto bad;
4318			}
4319			/* NB: must be after setting up beacon */
4320			mwl_hal_start(hvap);
4321			break;
4322		case IEEE80211_M_STA:
4323			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4324			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4325			/*
4326			 * Set state now that we're associated.
4327			 */
4328			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4329			mwl_setrates(vap);
4330			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4331			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4332			    sc->sc_ndwdsvaps++ == 0)
4333				mwl_hal_setdwds(mh, 1);
4334			break;
4335		case IEEE80211_M_WDS:
4336			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4337			    vap->iv_ifp->if_xname, __func__,
4338			    ether_sprintf(ni->ni_bssid));
4339			mwl_seteapolformat(vap);
4340			break;
4341		default:
4342			break;
4343		}
4344		/*
4345		 * Set CS mode according to operating channel;
4346		 * this mostly an optimization for 5GHz.
4347		 *
4348		 * NB: must follow mwl_hal_start which resets csmode
4349		 */
4350		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4351			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4352		else
4353			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4354		/*
4355		 * Start timer to prod firmware.
4356		 */
4357		if (sc->sc_ageinterval != 0)
4358			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4359			    mwl_agestations, sc);
4360	} else if (nstate == IEEE80211_S_SLEEP) {
4361		/* XXX set chip in power save */
4362	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4363	    --sc->sc_ndwdsvaps == 0)
4364		mwl_hal_setdwds(mh, 0);
4365bad:
4366	return error;
4367}
4368
4369/*
4370 * Manage station id's; these are separate from AID's
4371 * as AID's may have values out of the range of possible
4372 * station id's acceptable to the firmware.
4373 */
4374static int
4375allocstaid(struct mwl_softc *sc, int aid)
4376{
4377	int staid;
4378
4379	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4380		/* NB: don't use 0 */
4381		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4382			if (isclr(sc->sc_staid, staid))
4383				break;
4384	} else
4385		staid = aid;
4386	setbit(sc->sc_staid, staid);
4387	return staid;
4388}
4389
4390static void
4391delstaid(struct mwl_softc *sc, int staid)
4392{
4393	clrbit(sc->sc_staid, staid);
4394}
4395
4396/*
4397 * Setup driver-specific state for a newly associated node.
4398 * Note that we're called also on a re-associate, the isnew
4399 * param tells us if this is the first time or not.
4400 */
4401static void
4402mwl_newassoc(struct ieee80211_node *ni, int isnew)
4403{
4404	struct ieee80211vap *vap = ni->ni_vap;
4405        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4406	struct mwl_node *mn = MWL_NODE(ni);
4407	MWL_HAL_PEERINFO pi;
4408	uint16_t aid;
4409	int error;
4410
4411	aid = IEEE80211_AID(ni->ni_associd);
4412	if (isnew) {
4413		mn->mn_staid = allocstaid(sc, aid);
4414		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4415	} else {
4416		mn = MWL_NODE(ni);
4417		/* XXX reset BA stream? */
4418	}
4419	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4420	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4421	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4422	if (error != 0) {
4423		DPRINTF(sc, MWL_DEBUG_NODE,
4424		    "%s: error %d creating sta db entry\n",
4425		    __func__, error);
4426		/* XXX how to deal with error? */
4427	}
4428}
4429
4430/*
4431 * Periodically poke the firmware to age out station state
4432 * (power save queues, pending tx aggregates).
4433 */
4434static void
4435mwl_agestations(void *arg)
4436{
4437	struct mwl_softc *sc = arg;
4438
4439	mwl_hal_setkeepalive(sc->sc_mh);
4440	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4441		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4442}
4443
4444static const struct mwl_hal_channel *
4445findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4446{
4447	int i;
4448
4449	for (i = 0; i < ci->nchannels; i++) {
4450		const struct mwl_hal_channel *hc = &ci->channels[i];
4451		if (hc->ieee == ieee)
4452			return hc;
4453	}
4454	return NULL;
4455}
4456
4457static int
4458mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4459	int nchan, struct ieee80211_channel chans[])
4460{
4461	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4462	struct mwl_hal *mh = sc->sc_mh;
4463	const MWL_HAL_CHANNELINFO *ci;
4464	int i;
4465
4466	for (i = 0; i < nchan; i++) {
4467		struct ieee80211_channel *c = &chans[i];
4468		const struct mwl_hal_channel *hc;
4469
4470		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4471			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4472			    IEEE80211_IS_CHAN_HT40(c) ?
4473				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4474		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4475			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4476			    IEEE80211_IS_CHAN_HT40(c) ?
4477				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4478		} else {
4479			if_printf(ic->ic_ifp,
4480			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4481			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4482			return EINVAL;
4483		}
4484		/*
4485		 * Verify channel has cal data and cap tx power.
4486		 */
4487		hc = findhalchannel(ci, c->ic_ieee);
4488		if (hc != NULL) {
4489			if (c->ic_maxpower > 2*hc->maxTxPow)
4490				c->ic_maxpower = 2*hc->maxTxPow;
4491			goto next;
4492		}
4493		if (IEEE80211_IS_CHAN_HT40(c)) {
4494			/*
4495			 * Look for the extension channel since the
4496			 * hal table only has the primary channel.
4497			 */
4498			hc = findhalchannel(ci, c->ic_extieee);
4499			if (hc != NULL) {
4500				if (c->ic_maxpower > 2*hc->maxTxPow)
4501					c->ic_maxpower = 2*hc->maxTxPow;
4502				goto next;
4503			}
4504		}
4505		if_printf(ic->ic_ifp,
4506		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4507		    __func__, c->ic_ieee, c->ic_extieee,
4508		    c->ic_freq, c->ic_flags);
4509		return EINVAL;
4510	next:
4511		;
4512	}
4513	return 0;
4514}
4515
4516#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4517#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4518
4519static void
4520addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4521{
4522	c->ic_freq = freq;
4523	c->ic_flags = flags;
4524	c->ic_ieee = ieee;
4525	c->ic_minpower = 0;
4526	c->ic_maxpower = 2*txpow;
4527	c->ic_maxregpower = txpow;
4528}
4529
4530static const struct ieee80211_channel *
4531findchannel(const struct ieee80211_channel chans[], int nchans,
4532	int freq, int flags)
4533{
4534	const struct ieee80211_channel *c;
4535	int i;
4536
4537	for (i = 0; i < nchans; i++) {
4538		c = &chans[i];
4539		if (c->ic_freq == freq && c->ic_flags == flags)
4540			return c;
4541	}
4542	return NULL;
4543}
4544
4545static void
4546addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4547	const MWL_HAL_CHANNELINFO *ci, int flags)
4548{
4549	struct ieee80211_channel *c;
4550	const struct ieee80211_channel *extc;
4551	const struct mwl_hal_channel *hc;
4552	int i;
4553
4554	c = &chans[*nchans];
4555
4556	flags &= ~IEEE80211_CHAN_HT;
4557	for (i = 0; i < ci->nchannels; i++) {
4558		/*
4559		 * Each entry defines an HT40 channel pair; find the
4560		 * extension channel above and the insert the pair.
4561		 */
4562		hc = &ci->channels[i];
4563		extc = findchannel(chans, *nchans, hc->freq+20,
4564		    flags | IEEE80211_CHAN_HT20);
4565		if (extc != NULL) {
4566			if (*nchans >= maxchans)
4567				break;
4568			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4569			    hc->ieee, hc->maxTxPow);
4570			c->ic_extieee = extc->ic_ieee;
4571			c++, (*nchans)++;
4572			if (*nchans >= maxchans)
4573				break;
4574			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4575			    extc->ic_ieee, hc->maxTxPow);
4576			c->ic_extieee = hc->ieee;
4577			c++, (*nchans)++;
4578		}
4579	}
4580}
4581
4582static void
4583addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4584	const MWL_HAL_CHANNELINFO *ci, int flags)
4585{
4586	struct ieee80211_channel *c;
4587	int i;
4588
4589	c = &chans[*nchans];
4590
4591	for (i = 0; i < ci->nchannels; i++) {
4592		const struct mwl_hal_channel *hc;
4593
4594		hc = &ci->channels[i];
4595		if (*nchans >= maxchans)
4596			break;
4597		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4598		c++, (*nchans)++;
4599		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4600			/* g channel have a separate b-only entry */
4601			if (*nchans >= maxchans)
4602				break;
4603			c[0] = c[-1];
4604			c[-1].ic_flags = IEEE80211_CHAN_B;
4605			c++, (*nchans)++;
4606		}
4607		if (flags == IEEE80211_CHAN_HTG) {
4608			/* HT g channel have a separate g-only entry */
4609			if (*nchans >= maxchans)
4610				break;
4611			c[-1].ic_flags = IEEE80211_CHAN_G;
4612			c[0] = c[-1];
4613			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4614			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4615			c++, (*nchans)++;
4616		}
4617		if (flags == IEEE80211_CHAN_HTA) {
4618			/* HT a channel have a separate a-only entry */
4619			if (*nchans >= maxchans)
4620				break;
4621			c[-1].ic_flags = IEEE80211_CHAN_A;
4622			c[0] = c[-1];
4623			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4624			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4625			c++, (*nchans)++;
4626		}
4627	}
4628}
4629
4630static void
4631getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4632	struct ieee80211_channel chans[])
4633{
4634	const MWL_HAL_CHANNELINFO *ci;
4635
4636	/*
4637	 * Use the channel info from the hal to craft the
4638	 * channel list.  Note that we pass back an unsorted
4639	 * list; the caller is required to sort it for us
4640	 * (if desired).
4641	 */
4642	*nchans = 0;
4643	if (mwl_hal_getchannelinfo(sc->sc_mh,
4644	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4645		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4646	if (mwl_hal_getchannelinfo(sc->sc_mh,
4647	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4648		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4649	if (mwl_hal_getchannelinfo(sc->sc_mh,
4650	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4651		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4652	if (mwl_hal_getchannelinfo(sc->sc_mh,
4653	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4654		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4655}
4656
4657static void
4658mwl_getradiocaps(struct ieee80211com *ic,
4659	int maxchans, int *nchans, struct ieee80211_channel chans[])
4660{
4661	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4662
4663	getchannels(sc, maxchans, nchans, chans);
4664}
4665
4666static int
4667mwl_getchannels(struct mwl_softc *sc)
4668{
4669	struct ifnet *ifp = sc->sc_ifp;
4670	struct ieee80211com *ic = ifp->if_l2com;
4671
4672	/*
4673	 * Use the channel info from the hal to craft the
4674	 * channel list for net80211.  Note that we pass up
4675	 * an unsorted list; net80211 will sort it for us.
4676	 */
4677	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4678	ic->ic_nchans = 0;
4679	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4680
4681	ic->ic_regdomain.regdomain = SKU_DEBUG;
4682	ic->ic_regdomain.country = CTRY_DEFAULT;
4683	ic->ic_regdomain.location = 'I';
4684	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4685	ic->ic_regdomain.isocc[1] = ' ';
4686	return (ic->ic_nchans == 0 ? EIO : 0);
4687}
4688#undef IEEE80211_CHAN_HTA
4689#undef IEEE80211_CHAN_HTG
4690
4691#ifdef MWL_DEBUG
4692static void
4693mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4694{
4695	const struct mwl_rxdesc *ds = bf->bf_desc;
4696	uint32_t status = le32toh(ds->Status);
4697
4698	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4699	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4700	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4701	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4702	    ds->RxControl,
4703	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4704	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4705	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4706	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4707}
4708
4709static void
4710mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4711{
4712	const struct mwl_txdesc *ds = bf->bf_desc;
4713	uint32_t status = le32toh(ds->Status);
4714
4715	printf("Q%u[%3u]", qnum, ix);
4716	printf(" (DS.V:%p DS.P:%p)\n",
4717	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4718	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4719	    le32toh(ds->pPhysNext),
4720	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4721	    status & EAGLE_TXD_STATUS_USED ?
4722		"" : (status & 3) != 0 ? " *" : " !");
4723	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4724	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4725	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4726#if MWL_TXDESC > 1
4727	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4728	    , le32toh(ds->multiframes)
4729	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4730	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4731	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4732	);
4733	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4734	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4735	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4736	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4737	);
4738#endif
4739#if 0
4740{ const uint8_t *cp = (const uint8_t *) ds;
4741  int i;
4742  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4743	printf("%02x ", cp[i]);
4744	if (((i+1) % 16) == 0)
4745		printf("\n");
4746  }
4747  printf("\n");
4748}
4749#endif
4750}
4751#endif /* MWL_DEBUG */
4752
4753#if 0
4754static void
4755mwl_txq_dump(struct mwl_txq *txq)
4756{
4757	struct mwl_txbuf *bf;
4758	int i = 0;
4759
4760	MWL_TXQ_LOCK(txq);
4761	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4762		struct mwl_txdesc *ds = bf->bf_desc;
4763		MWL_TXDESC_SYNC(txq, ds,
4764		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4765#ifdef MWL_DEBUG
4766		mwl_printtxbuf(bf, txq->qnum, i);
4767#endif
4768		i++;
4769	}
4770	MWL_TXQ_UNLOCK(txq);
4771}
4772#endif
4773
4774static void
4775mwl_watchdog(void *arg)
4776{
4777	struct mwl_softc *sc;
4778	struct ifnet *ifp;
4779
4780	sc = arg;
4781	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4782	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4783		return;
4784
4785	ifp = sc->sc_ifp;
4786	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4787		if (mwl_hal_setkeepalive(sc->sc_mh))
4788			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4789		else
4790			if_printf(ifp, "transmit timeout\n");
4791#if 0
4792		mwl_reset(ifp);
4793mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4794#endif
4795		ifp->if_oerrors++;
4796		sc->sc_stats.mst_watchdog++;
4797	}
4798}
4799
4800#ifdef MWL_DIAGAPI
4801/*
4802 * Diagnostic interface to the HAL.  This is used by various
4803 * tools to do things like retrieve register contents for
4804 * debugging.  The mechanism is intentionally opaque so that
4805 * it can change frequently w/o concern for compatiblity.
4806 */
4807static int
4808mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4809{
4810	struct mwl_hal *mh = sc->sc_mh;
4811	u_int id = md->md_id & MWL_DIAG_ID;
4812	void *indata = NULL;
4813	void *outdata = NULL;
4814	u_int32_t insize = md->md_in_size;
4815	u_int32_t outsize = md->md_out_size;
4816	int error = 0;
4817
4818	if (md->md_id & MWL_DIAG_IN) {
4819		/*
4820		 * Copy in data.
4821		 */
4822		indata = malloc(insize, M_TEMP, M_NOWAIT);
4823		if (indata == NULL) {
4824			error = ENOMEM;
4825			goto bad;
4826		}
4827		error = copyin(md->md_in_data, indata, insize);
4828		if (error)
4829			goto bad;
4830	}
4831	if (md->md_id & MWL_DIAG_DYN) {
4832		/*
4833		 * Allocate a buffer for the results (otherwise the HAL
4834		 * returns a pointer to a buffer where we can read the
4835		 * results).  Note that we depend on the HAL leaving this
4836		 * pointer for us to use below in reclaiming the buffer;
4837		 * may want to be more defensive.
4838		 */
4839		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4840		if (outdata == NULL) {
4841			error = ENOMEM;
4842			goto bad;
4843		}
4844	}
4845	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4846		if (outsize < md->md_out_size)
4847			md->md_out_size = outsize;
4848		if (outdata != NULL)
4849			error = copyout(outdata, md->md_out_data,
4850					md->md_out_size);
4851	} else {
4852		error = EINVAL;
4853	}
4854bad:
4855	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4856		free(indata, M_TEMP);
4857	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4858		free(outdata, M_TEMP);
4859	return error;
4860}
4861
4862static int
4863mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4864{
4865	struct mwl_hal *mh = sc->sc_mh;
4866	int error;
4867
4868	MWL_LOCK_ASSERT(sc);
4869
4870	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4871		device_printf(sc->sc_dev, "unable to load firmware\n");
4872		return EIO;
4873	}
4874	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4875		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4876		return EIO;
4877	}
4878	error = mwl_setupdma(sc);
4879	if (error != 0) {
4880		/* NB: mwl_setupdma prints a msg */
4881		return error;
4882	}
4883	/*
4884	 * Reset tx/rx data structures; after reload we must
4885	 * re-start the driver's notion of the next xmit/recv.
4886	 */
4887	mwl_draintxq(sc);		/* clear pending frames */
4888	mwl_resettxq(sc);		/* rebuild tx q lists */
4889	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4890	return 0;
4891}
4892#endif /* MWL_DIAGAPI */
4893
4894static int
4895mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4896{
4897#define	IS_RUNNING(ifp) \
4898	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4899	struct mwl_softc *sc = ifp->if_softc;
4900	struct ieee80211com *ic = ifp->if_l2com;
4901	struct ifreq *ifr = (struct ifreq *)data;
4902	int error = 0, startall;
4903
4904	switch (cmd) {
4905	case SIOCSIFFLAGS:
4906		MWL_LOCK(sc);
4907		startall = 0;
4908		if (IS_RUNNING(ifp)) {
4909			/*
4910			 * To avoid rescanning another access point,
4911			 * do not call mwl_init() here.  Instead,
4912			 * only reflect promisc mode settings.
4913			 */
4914			mwl_mode_init(sc);
4915		} else if (ifp->if_flags & IFF_UP) {
4916			/*
4917			 * Beware of being called during attach/detach
4918			 * to reset promiscuous mode.  In that case we
4919			 * will still be marked UP but not RUNNING.
4920			 * However trying to re-init the interface
4921			 * is the wrong thing to do as we've already
4922			 * torn down much of our state.  There's
4923			 * probably a better way to deal with this.
4924			 */
4925			if (!sc->sc_invalid) {
4926				mwl_init_locked(sc);	/* XXX lose error */
4927				startall = 1;
4928			}
4929		} else
4930			mwl_stop_locked(ifp, 1);
4931		MWL_UNLOCK(sc);
4932		if (startall)
4933			ieee80211_start_all(ic);
4934		break;
4935	case SIOCGMVSTATS:
4936		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4937		/* NB: embed these numbers to get a consistent view */
4938		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4939		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4940		/*
4941		 * NB: Drop the softc lock in case of a page fault;
4942		 * we'll accept any potential inconsisentcy in the
4943		 * statistics.  The alternative is to copy the data
4944		 * to a local structure.
4945		 */
4946		return copyout(&sc->sc_stats,
4947				ifr->ifr_data, sizeof (sc->sc_stats));
4948#ifdef MWL_DIAGAPI
4949	case SIOCGMVDIAG:
4950		/* XXX check privs */
4951		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4952	case SIOCGMVRESET:
4953		/* XXX check privs */
4954		MWL_LOCK(sc);
4955		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4956		MWL_UNLOCK(sc);
4957		break;
4958#endif /* MWL_DIAGAPI */
4959	case SIOCGIFMEDIA:
4960		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4961		break;
4962	case SIOCGIFADDR:
4963		error = ether_ioctl(ifp, cmd, data);
4964		break;
4965	default:
4966		error = EINVAL;
4967		break;
4968	}
4969	return error;
4970#undef IS_RUNNING
4971}
4972
4973#ifdef	MWL_DEBUG
4974static int
4975mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4976{
4977	struct mwl_softc *sc = arg1;
4978	int debug, error;
4979
4980	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4981	error = sysctl_handle_int(oidp, &debug, 0, req);
4982	if (error || !req->newptr)
4983		return error;
4984	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4985	sc->sc_debug = debug & 0x00ffffff;
4986	return 0;
4987}
4988#endif /* MWL_DEBUG */
4989
4990static void
4991mwl_sysctlattach(struct mwl_softc *sc)
4992{
4993#ifdef	MWL_DEBUG
4994	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4995	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4996
4997	sc->sc_debug = mwl_debug;
4998	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4999		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5000		mwl_sysctl_debug, "I", "control debugging printfs");
5001#endif
5002}
5003
5004/*
5005 * Announce various information on device/driver attach.
5006 */
5007static void
5008mwl_announce(struct mwl_softc *sc)
5009{
5010	struct ifnet *ifp = sc->sc_ifp;
5011
5012	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5013		sc->sc_hwspecs.hwVersion,
5014		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5015		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5016		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5017		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5018		sc->sc_hwspecs.regionCode);
5019	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5020
5021	if (bootverbose) {
5022		int i;
5023		for (i = 0; i <= WME_AC_VO; i++) {
5024			struct mwl_txq *txq = sc->sc_ac2q[i];
5025			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5026				txq->qnum, ieee80211_wme_acnames[i]);
5027		}
5028	}
5029	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5030		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5031	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5032		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5033	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5034		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5035	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5036		if_printf(ifp, "multi-bss support\n");
5037#ifdef MWL_TX_NODROP
5038	if (bootverbose)
5039		if_printf(ifp, "no tx drop\n");
5040#endif
5041}
5042