if_mwl.c revision 254842
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 254842 2013-08-25 10:57:09Z andre $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40#include "opt_wlan.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysctl.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/kernel.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/errno.h>
53#include <sys/callout.h>
54#include <sys/bus.h>
55#include <sys/endian.h>
56#include <sys/kthread.h>
57#include <sys/taskqueue.h>
58
59#include <machine/bus.h>
60
61#include <net/if.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65#include <net/if_arp.h>
66#include <net/ethernet.h>
67#include <net/if_llc.h>
68
69#include <net/bpf.h>
70
71#include <net80211/ieee80211_var.h>
72#include <net80211/ieee80211_regdomain.h>
73
74#ifdef INET
75#include <netinet/in.h>
76#include <netinet/if_ether.h>
77#endif /* INET */
78
79#include <dev/mwl/if_mwlvar.h>
80#include <dev/mwl/mwldiag.h>
81
82/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
83#define	MS(v,x)	(((v) & x) >> x##_S)
84#define	SM(v,x)	(((v) << x##_S) & x)
85
86static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
87		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
88		    const uint8_t [IEEE80211_ADDR_LEN],
89		    const uint8_t [IEEE80211_ADDR_LEN]);
90static void	mwl_vap_delete(struct ieee80211vap *);
91static int	mwl_setupdma(struct mwl_softc *);
92static int	mwl_hal_reset(struct mwl_softc *sc);
93static int	mwl_init_locked(struct mwl_softc *);
94static void	mwl_init(void *);
95static void	mwl_stop_locked(struct ifnet *, int);
96static int	mwl_reset(struct ieee80211vap *, u_long);
97static void	mwl_stop(struct ifnet *, int);
98static void	mwl_start(struct ifnet *);
99static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
100			const struct ieee80211_bpf_params *);
101static int	mwl_media_change(struct ifnet *);
102static void	mwl_watchdog(void *);
103static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
104static void	mwl_radar_proc(void *, int);
105static void	mwl_chanswitch_proc(void *, int);
106static void	mwl_bawatchdog_proc(void *, int);
107static int	mwl_key_alloc(struct ieee80211vap *,
108			struct ieee80211_key *,
109			ieee80211_keyix *, ieee80211_keyix *);
110static int	mwl_key_delete(struct ieee80211vap *,
111			const struct ieee80211_key *);
112static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
113			const uint8_t mac[IEEE80211_ADDR_LEN]);
114static int	mwl_mode_init(struct mwl_softc *);
115static void	mwl_update_mcast(struct ifnet *);
116static void	mwl_update_promisc(struct ifnet *);
117static void	mwl_updateslot(struct ifnet *);
118static int	mwl_beacon_setup(struct ieee80211vap *);
119static void	mwl_beacon_update(struct ieee80211vap *, int);
120#ifdef MWL_HOST_PS_SUPPORT
121static void	mwl_update_ps(struct ieee80211vap *, int);
122static int	mwl_set_tim(struct ieee80211_node *, int);
123#endif
124static int	mwl_dma_setup(struct mwl_softc *);
125static void	mwl_dma_cleanup(struct mwl_softc *);
126static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
127		    const uint8_t [IEEE80211_ADDR_LEN]);
128static void	mwl_node_cleanup(struct ieee80211_node *);
129static void	mwl_node_drain(struct ieee80211_node *);
130static void	mwl_node_getsignal(const struct ieee80211_node *,
131			int8_t *, int8_t *);
132static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
133			struct ieee80211_mimo_info *);
134static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
135static void	mwl_rx_proc(void *, int);
136static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
137static int	mwl_tx_setup(struct mwl_softc *, int, int);
138static int	mwl_wme_update(struct ieee80211com *);
139static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
140static void	mwl_tx_cleanup(struct mwl_softc *);
141static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
142static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
143			     struct mwl_txbuf *, struct mbuf *);
144static void	mwl_tx_proc(void *, int);
145static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
146static void	mwl_draintxq(struct mwl_softc *);
147static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
148static int	mwl_recv_action(struct ieee80211_node *,
149			const struct ieee80211_frame *,
150			const uint8_t *, const uint8_t *);
151static int	mwl_addba_request(struct ieee80211_node *,
152			struct ieee80211_tx_ampdu *, int dialogtoken,
153			int baparamset, int batimeout);
154static int	mwl_addba_response(struct ieee80211_node *,
155			struct ieee80211_tx_ampdu *, int status,
156			int baparamset, int batimeout);
157static void	mwl_addba_stop(struct ieee80211_node *,
158			struct ieee80211_tx_ampdu *);
159static int	mwl_startrecv(struct mwl_softc *);
160static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
161			struct ieee80211_channel *);
162static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
163static void	mwl_scan_start(struct ieee80211com *);
164static void	mwl_scan_end(struct ieee80211com *);
165static void	mwl_set_channel(struct ieee80211com *);
166static int	mwl_peerstadb(struct ieee80211_node *,
167			int aid, int staid, MWL_HAL_PEERINFO *pi);
168static int	mwl_localstadb(struct ieee80211vap *);
169static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
170static int	allocstaid(struct mwl_softc *sc, int aid);
171static void	delstaid(struct mwl_softc *sc, int staid);
172static void	mwl_newassoc(struct ieee80211_node *, int);
173static void	mwl_agestations(void *);
174static int	mwl_setregdomain(struct ieee80211com *,
175			struct ieee80211_regdomain *, int,
176			struct ieee80211_channel []);
177static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
178			struct ieee80211_channel []);
179static int	mwl_getchannels(struct mwl_softc *);
180
181static void	mwl_sysctlattach(struct mwl_softc *);
182static void	mwl_announce(struct mwl_softc *);
183
184SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
185
186static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
187SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
188	    0, "rx descriptors allocated");
189static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
190SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
191	    0, "rx buffers allocated");
192TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
193static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
194SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
195	    0, "tx buffers allocated");
196TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
197static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
198SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
199	    0, "tx buffers to send at once");
200TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
201static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
202SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
203	    0, "max rx buffers to process per interrupt");
204TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
205static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
206SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
207	    0, "min free rx buffers before restarting traffic");
208TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
209
210#ifdef MWL_DEBUG
211static	int mwl_debug = 0;
212SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
213	    0, "control debugging printfs");
214TUNABLE_INT("hw.mwl.debug", &mwl_debug);
215enum {
216	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
217	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
218	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
219	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
220	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
221	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
222	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
223	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
224	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
225	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
226	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
227	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
228	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
229	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
230	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
231	MWL_DEBUG_ANY		= 0xffffffff
232};
233#define	IS_BEACON(wh) \
234    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
235	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
236#define	IFF_DUMPPKTS_RECV(sc, wh) \
237    (((sc->sc_debug & MWL_DEBUG_RECV) && \
238      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
239     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
240#define	IFF_DUMPPKTS_XMIT(sc) \
241	((sc->sc_debug & MWL_DEBUG_XMIT) || \
242	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
243#define	DPRINTF(sc, m, fmt, ...) do {				\
244	if (sc->sc_debug & (m))					\
245		printf(fmt, __VA_ARGS__);			\
246} while (0)
247#define	KEYPRINTF(sc, hk, mac) do {				\
248	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
249		mwl_keyprint(sc, __func__, hk, mac);		\
250} while (0)
251static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
252static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
253#else
254#define	IFF_DUMPPKTS_RECV(sc, wh) \
255	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
256#define	IFF_DUMPPKTS_XMIT(sc) \
257	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
258#define	DPRINTF(sc, m, fmt, ...) do {				\
259	(void) sc;						\
260} while (0)
261#define	KEYPRINTF(sc, k, mac) do {				\
262	(void) sc;						\
263} while (0)
264#endif
265
266static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
267
268/*
269 * Each packet has fixed front matter: a 2-byte length
270 * of the payload, followed by a 4-address 802.11 header
271 * (regardless of the actual header and always w/o any
272 * QoS header).  The payload then follows.
273 */
274struct mwltxrec {
275	uint16_t fwlen;
276	struct ieee80211_frame_addr4 wh;
277} __packed;
278
279/*
280 * Read/Write shorthands for accesses to BAR 0.  Note
281 * that all BAR 1 operations are done in the "hal" and
282 * there should be no reference to them here.
283 */
284static __inline uint32_t
285RD4(struct mwl_softc *sc, bus_size_t off)
286{
287	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
288}
289
290static __inline void
291WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
292{
293	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
294}
295
296int
297mwl_attach(uint16_t devid, struct mwl_softc *sc)
298{
299	struct ifnet *ifp;
300	struct ieee80211com *ic;
301	struct mwl_hal *mh;
302	int error = 0;
303
304	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
305
306	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
307	if (ifp == NULL) {
308		device_printf(sc->sc_dev, "cannot if_alloc()\n");
309		return ENOSPC;
310	}
311	ic = ifp->if_l2com;
312
313	/*
314	 * Setup the RX free list lock early, so it can be consistently
315	 * removed.
316	 */
317	MWL_RXFREE_INIT(sc);
318
319	/* set these up early for if_printf use */
320	if_initname(ifp, device_get_name(sc->sc_dev),
321		device_get_unit(sc->sc_dev));
322
323	mh = mwl_hal_attach(sc->sc_dev, devid,
324	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
325	if (mh == NULL) {
326		if_printf(ifp, "unable to attach HAL\n");
327		error = EIO;
328		goto bad;
329	}
330	sc->sc_mh = mh;
331	/*
332	 * Load firmware so we can get setup.  We arbitrarily
333	 * pick station firmware; we'll re-load firmware as
334	 * needed so setting up the wrong mode isn't a big deal.
335	 */
336	if (mwl_hal_fwload(mh, NULL) != 0) {
337		if_printf(ifp, "unable to setup builtin firmware\n");
338		error = EIO;
339		goto bad1;
340	}
341	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
342		if_printf(ifp, "unable to fetch h/w specs\n");
343		error = EIO;
344		goto bad1;
345	}
346	error = mwl_getchannels(sc);
347	if (error != 0)
348		goto bad1;
349
350	sc->sc_txantenna = 0;		/* h/w default */
351	sc->sc_rxantenna = 0;		/* h/w default */
352	sc->sc_invalid = 0;		/* ready to go, enable int handling */
353	sc->sc_ageinterval = MWL_AGEINTERVAL;
354
355	/*
356	 * Allocate tx+rx descriptors and populate the lists.
357	 * We immediately push the information to the firmware
358	 * as otherwise it gets upset.
359	 */
360	error = mwl_dma_setup(sc);
361	if (error != 0) {
362		if_printf(ifp, "failed to setup descriptors: %d\n", error);
363		goto bad1;
364	}
365	error = mwl_setupdma(sc);	/* push to firmware */
366	if (error != 0)			/* NB: mwl_setupdma prints msg */
367		goto bad1;
368
369	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
370	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
371
372	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
373		taskqueue_thread_enqueue, &sc->sc_tq);
374	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
375		"%s taskq", ifp->if_xname);
376
377	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
378	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
379	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
380	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
381
382	/* NB: insure BK queue is the lowest priority h/w queue */
383	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
384		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
385			ieee80211_wme_acnames[WME_AC_BK]);
386		error = EIO;
387		goto bad2;
388	}
389	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
390	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
391	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
392		/*
393		 * Not enough hardware tx queues to properly do WME;
394		 * just punt and assign them all to the same h/w queue.
395		 * We could do a better job of this if, for example,
396		 * we allocate queues when we switch from station to
397		 * AP mode.
398		 */
399		if (sc->sc_ac2q[WME_AC_VI] != NULL)
400			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
401		if (sc->sc_ac2q[WME_AC_BE] != NULL)
402			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
403		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
404		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
405		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
406	}
407	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
408
409	ifp->if_softc = sc;
410	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
411	ifp->if_start = mwl_start;
412	ifp->if_ioctl = mwl_ioctl;
413	ifp->if_init = mwl_init;
414	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
415	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
416	IFQ_SET_READY(&ifp->if_snd);
417
418	ic->ic_ifp = ifp;
419	/* XXX not right but it's not used anywhere important */
420	ic->ic_phytype = IEEE80211_T_OFDM;
421	ic->ic_opmode = IEEE80211_M_STA;
422	ic->ic_caps =
423		  IEEE80211_C_STA		/* station mode supported */
424		| IEEE80211_C_HOSTAP		/* hostap mode */
425		| IEEE80211_C_MONITOR		/* monitor mode */
426#if 0
427		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
428		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
429#endif
430		| IEEE80211_C_MBSS		/* mesh point link mode */
431		| IEEE80211_C_WDS		/* WDS supported */
432		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
433		| IEEE80211_C_SHSLOT		/* short slot time supported */
434		| IEEE80211_C_WME		/* WME/WMM supported */
435		| IEEE80211_C_BURST		/* xmit bursting supported */
436		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
437		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
438		| IEEE80211_C_TXFRAG		/* handle tx frags */
439		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
440		| IEEE80211_C_DFS		/* DFS supported */
441		;
442
443	ic->ic_htcaps =
444		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
445		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
446		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
447		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
448		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
449#if MWL_AGGR_SIZE == 7935
450		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
451#else
452		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
453#endif
454#if 0
455		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
456		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
457#endif
458		/* s/w capabilities */
459		| IEEE80211_HTC_HT		/* HT operation */
460		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
461		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
462		| IEEE80211_HTC_SMPS		/* SMPS available */
463		;
464
465	/*
466	 * Mark h/w crypto support.
467	 * XXX no way to query h/w support.
468	 */
469	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
470			  |  IEEE80211_CRYPTO_AES_CCM
471			  |  IEEE80211_CRYPTO_TKIP
472			  |  IEEE80211_CRYPTO_TKIPMIC
473			  ;
474	/*
475	 * Transmit requires space in the packet for a special
476	 * format transmit record and optional padding between
477	 * this record and the payload.  Ask the net80211 layer
478	 * to arrange this when encapsulating packets so we can
479	 * add it efficiently.
480	 */
481	ic->ic_headroom = sizeof(struct mwltxrec) -
482		sizeof(struct ieee80211_frame);
483
484	/* call MI attach routine. */
485	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
486	ic->ic_setregdomain = mwl_setregdomain;
487	ic->ic_getradiocaps = mwl_getradiocaps;
488	/* override default methods */
489	ic->ic_raw_xmit = mwl_raw_xmit;
490	ic->ic_newassoc = mwl_newassoc;
491	ic->ic_updateslot = mwl_updateslot;
492	ic->ic_update_mcast = mwl_update_mcast;
493	ic->ic_update_promisc = mwl_update_promisc;
494	ic->ic_wme.wme_update = mwl_wme_update;
495
496	ic->ic_node_alloc = mwl_node_alloc;
497	sc->sc_node_cleanup = ic->ic_node_cleanup;
498	ic->ic_node_cleanup = mwl_node_cleanup;
499	sc->sc_node_drain = ic->ic_node_drain;
500	ic->ic_node_drain = mwl_node_drain;
501	ic->ic_node_getsignal = mwl_node_getsignal;
502	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
503
504	ic->ic_scan_start = mwl_scan_start;
505	ic->ic_scan_end = mwl_scan_end;
506	ic->ic_set_channel = mwl_set_channel;
507
508	sc->sc_recv_action = ic->ic_recv_action;
509	ic->ic_recv_action = mwl_recv_action;
510	sc->sc_addba_request = ic->ic_addba_request;
511	ic->ic_addba_request = mwl_addba_request;
512	sc->sc_addba_response = ic->ic_addba_response;
513	ic->ic_addba_response = mwl_addba_response;
514	sc->sc_addba_stop = ic->ic_addba_stop;
515	ic->ic_addba_stop = mwl_addba_stop;
516
517	ic->ic_vap_create = mwl_vap_create;
518	ic->ic_vap_delete = mwl_vap_delete;
519
520	ieee80211_radiotap_attach(ic,
521	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
522		MWL_TX_RADIOTAP_PRESENT,
523	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
524		MWL_RX_RADIOTAP_PRESENT);
525	/*
526	 * Setup dynamic sysctl's now that country code and
527	 * regdomain are available from the hal.
528	 */
529	mwl_sysctlattach(sc);
530
531	if (bootverbose)
532		ieee80211_announce(ic);
533	mwl_announce(sc);
534	return 0;
535bad2:
536	mwl_dma_cleanup(sc);
537bad1:
538	mwl_hal_detach(mh);
539bad:
540	MWL_RXFREE_DESTROY(sc);
541	if_free(ifp);
542	sc->sc_invalid = 1;
543	return error;
544}
545
546int
547mwl_detach(struct mwl_softc *sc)
548{
549	struct ifnet *ifp = sc->sc_ifp;
550	struct ieee80211com *ic = ifp->if_l2com;
551
552	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
553		__func__, ifp->if_flags);
554
555	mwl_stop(ifp, 1);
556	/*
557	 * NB: the order of these is important:
558	 * o call the 802.11 layer before detaching the hal to
559	 *   insure callbacks into the driver to delete global
560	 *   key cache entries can be handled
561	 * o reclaim the tx queue data structures after calling
562	 *   the 802.11 layer as we'll get called back to reclaim
563	 *   node state and potentially want to use them
564	 * o to cleanup the tx queues the hal is called, so detach
565	 *   it last
566	 * Other than that, it's straightforward...
567	 */
568	ieee80211_ifdetach(ic);
569	callout_drain(&sc->sc_watchdog);
570	mwl_dma_cleanup(sc);
571	MWL_RXFREE_DESTROY(sc);
572	mwl_tx_cleanup(sc);
573	mwl_hal_detach(sc->sc_mh);
574	if_free(ifp);
575
576	return 0;
577}
578
579/*
580 * MAC address handling for multiple BSS on the same radio.
581 * The first vap uses the MAC address from the EEPROM.  For
582 * subsequent vap's we set the U/L bit (bit 1) in the MAC
583 * address and use the next six bits as an index.
584 */
585static void
586assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
587{
588	int i;
589
590	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
591		/* NB: we only do this if h/w supports multiple bssid */
592		for (i = 0; i < 32; i++)
593			if ((sc->sc_bssidmask & (1<<i)) == 0)
594				break;
595		if (i != 0)
596			mac[0] |= (i << 2)|0x2;
597	} else
598		i = 0;
599	sc->sc_bssidmask |= 1<<i;
600	if (i == 0)
601		sc->sc_nbssid0++;
602}
603
604static void
605reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
606{
607	int i = mac[0] >> 2;
608	if (i != 0 || --sc->sc_nbssid0 == 0)
609		sc->sc_bssidmask &= ~(1<<i);
610}
611
612static struct ieee80211vap *
613mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
614    enum ieee80211_opmode opmode, int flags,
615    const uint8_t bssid[IEEE80211_ADDR_LEN],
616    const uint8_t mac0[IEEE80211_ADDR_LEN])
617{
618	struct ifnet *ifp = ic->ic_ifp;
619	struct mwl_softc *sc = ifp->if_softc;
620	struct mwl_hal *mh = sc->sc_mh;
621	struct ieee80211vap *vap, *apvap;
622	struct mwl_hal_vap *hvap;
623	struct mwl_vap *mvp;
624	uint8_t mac[IEEE80211_ADDR_LEN];
625
626	IEEE80211_ADDR_COPY(mac, mac0);
627	switch (opmode) {
628	case IEEE80211_M_HOSTAP:
629	case IEEE80211_M_MBSS:
630		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
631			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
632		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
633		if (hvap == NULL) {
634			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
635				reclaim_address(sc, mac);
636			return NULL;
637		}
638		break;
639	case IEEE80211_M_STA:
640		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
641			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
642		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
643		if (hvap == NULL) {
644			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
645				reclaim_address(sc, mac);
646			return NULL;
647		}
648		/* no h/w beacon miss support; always use s/w */
649		flags |= IEEE80211_CLONE_NOBEACONS;
650		break;
651	case IEEE80211_M_WDS:
652		hvap = NULL;		/* NB: we use associated AP vap */
653		if (sc->sc_napvaps == 0)
654			return NULL;	/* no existing AP vap */
655		break;
656	case IEEE80211_M_MONITOR:
657		hvap = NULL;
658		break;
659	case IEEE80211_M_IBSS:
660	case IEEE80211_M_AHDEMO:
661	default:
662		return NULL;
663	}
664
665	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
666	    M_80211_VAP, M_NOWAIT | M_ZERO);
667	if (mvp == NULL) {
668		if (hvap != NULL) {
669			mwl_hal_delvap(hvap);
670			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
671				reclaim_address(sc, mac);
672		}
673		/* XXX msg */
674		return NULL;
675	}
676	mvp->mv_hvap = hvap;
677	if (opmode == IEEE80211_M_WDS) {
678		/*
679		 * WDS vaps must have an associated AP vap; find one.
680		 * XXX not right.
681		 */
682		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
683			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
684				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
685				break;
686			}
687		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
688	}
689	vap = &mvp->mv_vap;
690	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
691	if (hvap != NULL)
692		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
693	/* override with driver methods */
694	mvp->mv_newstate = vap->iv_newstate;
695	vap->iv_newstate = mwl_newstate;
696	vap->iv_max_keyix = 0;	/* XXX */
697	vap->iv_key_alloc = mwl_key_alloc;
698	vap->iv_key_delete = mwl_key_delete;
699	vap->iv_key_set = mwl_key_set;
700#ifdef MWL_HOST_PS_SUPPORT
701	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
702		vap->iv_update_ps = mwl_update_ps;
703		mvp->mv_set_tim = vap->iv_set_tim;
704		vap->iv_set_tim = mwl_set_tim;
705	}
706#endif
707	vap->iv_reset = mwl_reset;
708	vap->iv_update_beacon = mwl_beacon_update;
709
710	/* override max aid so sta's cannot assoc when we're out of sta id's */
711	vap->iv_max_aid = MWL_MAXSTAID;
712	/* override default A-MPDU rx parameters */
713	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
714	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
715
716	/* complete setup */
717	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
718
719	switch (vap->iv_opmode) {
720	case IEEE80211_M_HOSTAP:
721	case IEEE80211_M_MBSS:
722	case IEEE80211_M_STA:
723		/*
724		 * Setup sta db entry for local address.
725		 */
726		mwl_localstadb(vap);
727		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
728		    vap->iv_opmode == IEEE80211_M_MBSS)
729			sc->sc_napvaps++;
730		else
731			sc->sc_nstavaps++;
732		break;
733	case IEEE80211_M_WDS:
734		sc->sc_nwdsvaps++;
735		break;
736	default:
737		break;
738	}
739	/*
740	 * Setup overall operating mode.
741	 */
742	if (sc->sc_napvaps)
743		ic->ic_opmode = IEEE80211_M_HOSTAP;
744	else if (sc->sc_nstavaps)
745		ic->ic_opmode = IEEE80211_M_STA;
746	else
747		ic->ic_opmode = opmode;
748
749	return vap;
750}
751
752static void
753mwl_vap_delete(struct ieee80211vap *vap)
754{
755	struct mwl_vap *mvp = MWL_VAP(vap);
756	struct ifnet *parent = vap->iv_ic->ic_ifp;
757	struct mwl_softc *sc = parent->if_softc;
758	struct mwl_hal *mh = sc->sc_mh;
759	struct mwl_hal_vap *hvap = mvp->mv_hvap;
760	enum ieee80211_opmode opmode = vap->iv_opmode;
761
762	/* XXX disallow ap vap delete if WDS still present */
763	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
764		/* quiesce h/w while we remove the vap */
765		mwl_hal_intrset(mh, 0);		/* disable interrupts */
766	}
767	ieee80211_vap_detach(vap);
768	switch (opmode) {
769	case IEEE80211_M_HOSTAP:
770	case IEEE80211_M_MBSS:
771	case IEEE80211_M_STA:
772		KASSERT(hvap != NULL, ("no hal vap handle"));
773		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
774		mwl_hal_delvap(hvap);
775		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
776			sc->sc_napvaps--;
777		else
778			sc->sc_nstavaps--;
779		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
780		reclaim_address(sc, vap->iv_myaddr);
781		break;
782	case IEEE80211_M_WDS:
783		sc->sc_nwdsvaps--;
784		break;
785	default:
786		break;
787	}
788	mwl_cleartxq(sc, vap);
789	free(mvp, M_80211_VAP);
790	if (parent->if_drv_flags & IFF_DRV_RUNNING)
791		mwl_hal_intrset(mh, sc->sc_imask);
792}
793
794void
795mwl_suspend(struct mwl_softc *sc)
796{
797	struct ifnet *ifp = sc->sc_ifp;
798
799	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
800		__func__, ifp->if_flags);
801
802	mwl_stop(ifp, 1);
803}
804
805void
806mwl_resume(struct mwl_softc *sc)
807{
808	struct ifnet *ifp = sc->sc_ifp;
809
810	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
811		__func__, ifp->if_flags);
812
813	if (ifp->if_flags & IFF_UP)
814		mwl_init(sc);
815}
816
817void
818mwl_shutdown(void *arg)
819{
820	struct mwl_softc *sc = arg;
821
822	mwl_stop(sc->sc_ifp, 1);
823}
824
825/*
826 * Interrupt handler.  Most of the actual processing is deferred.
827 */
828void
829mwl_intr(void *arg)
830{
831	struct mwl_softc *sc = arg;
832	struct mwl_hal *mh = sc->sc_mh;
833	uint32_t status;
834
835	if (sc->sc_invalid) {
836		/*
837		 * The hardware is not ready/present, don't touch anything.
838		 * Note this can happen early on if the IRQ is shared.
839		 */
840		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
841		return;
842	}
843	/*
844	 * Figure out the reason(s) for the interrupt.
845	 */
846	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
847	if (status == 0)			/* must be a shared irq */
848		return;
849
850	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
851	    __func__, status, sc->sc_imask);
852	if (status & MACREG_A2HRIC_BIT_RX_RDY)
853		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
854	if (status & MACREG_A2HRIC_BIT_TX_DONE)
855		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
856	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
857		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
858	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
859		mwl_hal_cmddone(mh);
860	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
861		;
862	}
863	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
864		/* TKIP ICV error */
865		sc->sc_stats.mst_rx_badtkipicv++;
866	}
867	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
868		/* 11n aggregation queue is empty, re-fill */
869		;
870	}
871	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
872		;
873	}
874	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
875		/* radar detected, process event */
876		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
877	}
878	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
879		/* DFS channel switch */
880		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
881	}
882}
883
884static void
885mwl_radar_proc(void *arg, int pending)
886{
887	struct mwl_softc *sc = arg;
888	struct ifnet *ifp = sc->sc_ifp;
889	struct ieee80211com *ic = ifp->if_l2com;
890
891	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
892	    __func__, pending);
893
894	sc->sc_stats.mst_radardetect++;
895	/* XXX stop h/w BA streams? */
896
897	IEEE80211_LOCK(ic);
898	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
899	IEEE80211_UNLOCK(ic);
900}
901
902static void
903mwl_chanswitch_proc(void *arg, int pending)
904{
905	struct mwl_softc *sc = arg;
906	struct ifnet *ifp = sc->sc_ifp;
907	struct ieee80211com *ic = ifp->if_l2com;
908
909	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
910	    __func__, pending);
911
912	IEEE80211_LOCK(ic);
913	sc->sc_csapending = 0;
914	ieee80211_csa_completeswitch(ic);
915	IEEE80211_UNLOCK(ic);
916}
917
918static void
919mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
920{
921	struct ieee80211_node *ni = sp->data[0];
922
923	/* send DELBA and drop the stream */
924	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
925}
926
927static void
928mwl_bawatchdog_proc(void *arg, int pending)
929{
930	struct mwl_softc *sc = arg;
931	struct mwl_hal *mh = sc->sc_mh;
932	const MWL_HAL_BASTREAM *sp;
933	uint8_t bitmap, n;
934
935	sc->sc_stats.mst_bawatchdog++;
936
937	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
938		DPRINTF(sc, MWL_DEBUG_AMPDU,
939		    "%s: could not get bitmap\n", __func__);
940		sc->sc_stats.mst_bawatchdog_failed++;
941		return;
942	}
943	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
944	if (bitmap == 0xff) {
945		n = 0;
946		/* disable all ba streams */
947		for (bitmap = 0; bitmap < 8; bitmap++) {
948			sp = mwl_hal_bastream_lookup(mh, bitmap);
949			if (sp != NULL) {
950				mwl_bawatchdog(sp);
951				n++;
952			}
953		}
954		if (n == 0) {
955			DPRINTF(sc, MWL_DEBUG_AMPDU,
956			    "%s: no BA streams found\n", __func__);
957			sc->sc_stats.mst_bawatchdog_empty++;
958		}
959	} else if (bitmap != 0xaa) {
960		/* disable a single ba stream */
961		sp = mwl_hal_bastream_lookup(mh, bitmap);
962		if (sp != NULL) {
963			mwl_bawatchdog(sp);
964		} else {
965			DPRINTF(sc, MWL_DEBUG_AMPDU,
966			    "%s: no BA stream %d\n", __func__, bitmap);
967			sc->sc_stats.mst_bawatchdog_notfound++;
968		}
969	}
970}
971
972/*
973 * Convert net80211 channel to a HAL channel.
974 */
975static void
976mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
977{
978	hc->channel = chan->ic_ieee;
979
980	*(uint32_t *)&hc->channelFlags = 0;
981	if (IEEE80211_IS_CHAN_2GHZ(chan))
982		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
983	else if (IEEE80211_IS_CHAN_5GHZ(chan))
984		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
985	if (IEEE80211_IS_CHAN_HT40(chan)) {
986		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
987		if (IEEE80211_IS_CHAN_HT40U(chan))
988			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
989		else
990			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
991	} else
992		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
993	/* XXX 10MHz channels */
994}
995
996/*
997 * Inform firmware of our tx/rx dma setup.  The BAR 0
998 * writes below are for compatibility with older firmware.
999 * For current firmware we send this information with a
1000 * cmd block via mwl_hal_sethwdma.
1001 */
1002static int
1003mwl_setupdma(struct mwl_softc *sc)
1004{
1005	int error, i;
1006
1007	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1008	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1009	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1010
1011	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1012		struct mwl_txq *txq = &sc->sc_txq[i];
1013		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1014		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1015	}
1016	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1017	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1018
1019	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1020	if (error != 0) {
1021		device_printf(sc->sc_dev,
1022		    "unable to setup tx/rx dma; hal status %u\n", error);
1023		/* XXX */
1024	}
1025	return error;
1026}
1027
1028/*
1029 * Inform firmware of tx rate parameters.
1030 * Called after a channel change.
1031 */
1032static int
1033mwl_setcurchanrates(struct mwl_softc *sc)
1034{
1035	struct ifnet *ifp = sc->sc_ifp;
1036	struct ieee80211com *ic = ifp->if_l2com;
1037	const struct ieee80211_rateset *rs;
1038	MWL_HAL_TXRATE rates;
1039
1040	memset(&rates, 0, sizeof(rates));
1041	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1042	/* rate used to send management frames */
1043	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1044	/* rate used to send multicast frames */
1045	rates.McastRate = rates.MgtRate;
1046
1047	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1048}
1049
1050/*
1051 * Inform firmware of tx rate parameters.  Called whenever
1052 * user-settable params change and after a channel change.
1053 */
1054static int
1055mwl_setrates(struct ieee80211vap *vap)
1056{
1057	struct mwl_vap *mvp = MWL_VAP(vap);
1058	struct ieee80211_node *ni = vap->iv_bss;
1059	const struct ieee80211_txparam *tp = ni->ni_txparms;
1060	MWL_HAL_TXRATE rates;
1061
1062	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1063
1064	/*
1065	 * Update the h/w rate map.
1066	 * NB: 0x80 for MCS is passed through unchanged
1067	 */
1068	memset(&rates, 0, sizeof(rates));
1069	/* rate used to send management frames */
1070	rates.MgtRate = tp->mgmtrate;
1071	/* rate used to send multicast frames */
1072	rates.McastRate = tp->mcastrate;
1073
1074	/* while here calculate EAPOL fixed rate cookie */
1075	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1076
1077	return mwl_hal_settxrate(mvp->mv_hvap,
1078	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1079		RATE_FIXED : RATE_AUTO, &rates);
1080}
1081
1082/*
1083 * Setup a fixed xmit rate cookie for EAPOL frames.
1084 */
1085static void
1086mwl_seteapolformat(struct ieee80211vap *vap)
1087{
1088	struct mwl_vap *mvp = MWL_VAP(vap);
1089	struct ieee80211_node *ni = vap->iv_bss;
1090	enum ieee80211_phymode mode;
1091	uint8_t rate;
1092
1093	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1094
1095	mode = ieee80211_chan2mode(ni->ni_chan);
1096	/*
1097	 * Use legacy rates when operating a mixed HT+non-HT bss.
1098	 * NB: this may violate POLA for sta and wds vap's.
1099	 */
1100	if (mode == IEEE80211_MODE_11NA &&
1101	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1102		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1103	else if (mode == IEEE80211_MODE_11NG &&
1104	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1105		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1106	else
1107		rate = vap->iv_txparms[mode].mgmtrate;
1108
1109	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1110}
1111
1112/*
1113 * Map SKU+country code to region code for radar bin'ing.
1114 */
1115static int
1116mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1117{
1118	switch (rd->regdomain) {
1119	case SKU_FCC:
1120	case SKU_FCC3:
1121		return DOMAIN_CODE_FCC;
1122	case SKU_CA:
1123		return DOMAIN_CODE_IC;
1124	case SKU_ETSI:
1125	case SKU_ETSI2:
1126	case SKU_ETSI3:
1127		if (rd->country == CTRY_SPAIN)
1128			return DOMAIN_CODE_SPAIN;
1129		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1130			return DOMAIN_CODE_FRANCE;
1131		/* XXX force 1.3.1 radar type */
1132		return DOMAIN_CODE_ETSI_131;
1133	case SKU_JAPAN:
1134		return DOMAIN_CODE_MKK;
1135	case SKU_ROW:
1136		return DOMAIN_CODE_DGT;	/* Taiwan */
1137	case SKU_APAC:
1138	case SKU_APAC2:
1139	case SKU_APAC3:
1140		return DOMAIN_CODE_AUS;	/* Australia */
1141	}
1142	/* XXX KOREA? */
1143	return DOMAIN_CODE_FCC;			/* XXX? */
1144}
1145
1146static int
1147mwl_hal_reset(struct mwl_softc *sc)
1148{
1149	struct ifnet *ifp = sc->sc_ifp;
1150	struct ieee80211com *ic = ifp->if_l2com;
1151	struct mwl_hal *mh = sc->sc_mh;
1152
1153	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1154	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1155	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1156	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1157	mwl_chan_set(sc, ic->ic_curchan);
1158	/* NB: RF/RA performance tuned for indoor mode */
1159	mwl_hal_setrateadaptmode(mh, 0);
1160	mwl_hal_setoptimizationlevel(mh,
1161	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1162
1163	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1164
1165	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1166	mwl_hal_setcfend(mh, 0);			/* XXX */
1167
1168	return 1;
1169}
1170
1171static int
1172mwl_init_locked(struct mwl_softc *sc)
1173{
1174	struct ifnet *ifp = sc->sc_ifp;
1175	struct mwl_hal *mh = sc->sc_mh;
1176	int error = 0;
1177
1178	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1179		__func__, ifp->if_flags);
1180
1181	MWL_LOCK_ASSERT(sc);
1182
1183	/*
1184	 * Stop anything previously setup.  This is safe
1185	 * whether this is the first time through or not.
1186	 */
1187	mwl_stop_locked(ifp, 0);
1188
1189	/*
1190	 * Push vap-independent state to the firmware.
1191	 */
1192	if (!mwl_hal_reset(sc)) {
1193		if_printf(ifp, "unable to reset hardware\n");
1194		return EIO;
1195	}
1196
1197	/*
1198	 * Setup recv (once); transmit is already good to go.
1199	 */
1200	error = mwl_startrecv(sc);
1201	if (error != 0) {
1202		if_printf(ifp, "unable to start recv logic\n");
1203		return error;
1204	}
1205
1206	/*
1207	 * Enable interrupts.
1208	 */
1209	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1210		     | MACREG_A2HRIC_BIT_TX_DONE
1211		     | MACREG_A2HRIC_BIT_OPC_DONE
1212#if 0
1213		     | MACREG_A2HRIC_BIT_MAC_EVENT
1214#endif
1215		     | MACREG_A2HRIC_BIT_ICV_ERROR
1216		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1217		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1218#if 0
1219		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1220#endif
1221		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1222		     | MACREQ_A2HRIC_BIT_TX_ACK
1223		     ;
1224
1225	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1226	mwl_hal_intrset(mh, sc->sc_imask);
1227	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1228
1229	return 0;
1230}
1231
1232static void
1233mwl_init(void *arg)
1234{
1235	struct mwl_softc *sc = arg;
1236	struct ifnet *ifp = sc->sc_ifp;
1237	struct ieee80211com *ic = ifp->if_l2com;
1238	int error = 0;
1239
1240	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1241		__func__, ifp->if_flags);
1242
1243	MWL_LOCK(sc);
1244	error = mwl_init_locked(sc);
1245	MWL_UNLOCK(sc);
1246
1247	if (error == 0)
1248		ieee80211_start_all(ic);	/* start all vap's */
1249}
1250
1251static void
1252mwl_stop_locked(struct ifnet *ifp, int disable)
1253{
1254	struct mwl_softc *sc = ifp->if_softc;
1255
1256	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1257		__func__, sc->sc_invalid, ifp->if_flags);
1258
1259	MWL_LOCK_ASSERT(sc);
1260	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1261		/*
1262		 * Shutdown the hardware and driver.
1263		 */
1264		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1265		callout_stop(&sc->sc_watchdog);
1266		sc->sc_tx_timer = 0;
1267		mwl_draintxq(sc);
1268	}
1269}
1270
1271static void
1272mwl_stop(struct ifnet *ifp, int disable)
1273{
1274	struct mwl_softc *sc = ifp->if_softc;
1275
1276	MWL_LOCK(sc);
1277	mwl_stop_locked(ifp, disable);
1278	MWL_UNLOCK(sc);
1279}
1280
1281static int
1282mwl_reset_vap(struct ieee80211vap *vap, int state)
1283{
1284	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1285	struct ieee80211com *ic = vap->iv_ic;
1286
1287	if (state == IEEE80211_S_RUN)
1288		mwl_setrates(vap);
1289	/* XXX off by 1? */
1290	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1291	/* XXX auto? 20/40 split? */
1292	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1293	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1294	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1295	    HTPROTECT_NONE : HTPROTECT_AUTO);
1296	/* XXX txpower cap */
1297
1298	/* re-setup beacons */
1299	if (state == IEEE80211_S_RUN &&
1300	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1301	     vap->iv_opmode == IEEE80211_M_MBSS ||
1302	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1303		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1304		mwl_hal_setnprotmode(hvap,
1305		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1306		return mwl_beacon_setup(vap);
1307	}
1308	return 0;
1309}
1310
1311/*
1312 * Reset the hardware w/o losing operational state.
1313 * Used to to reset or reload hardware state for a vap.
1314 */
1315static int
1316mwl_reset(struct ieee80211vap *vap, u_long cmd)
1317{
1318	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1319	int error = 0;
1320
1321	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1322		struct ieee80211com *ic = vap->iv_ic;
1323		struct ifnet *ifp = ic->ic_ifp;
1324		struct mwl_softc *sc = ifp->if_softc;
1325		struct mwl_hal *mh = sc->sc_mh;
1326
1327		/* XXX handle DWDS sta vap change */
1328		/* XXX do we need to disable interrupts? */
1329		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1330		error = mwl_reset_vap(vap, vap->iv_state);
1331		mwl_hal_intrset(mh, sc->sc_imask);
1332	}
1333	return error;
1334}
1335
1336/*
1337 * Allocate a tx buffer for sending a frame.  The
1338 * packet is assumed to have the WME AC stored so
1339 * we can use it to select the appropriate h/w queue.
1340 */
1341static struct mwl_txbuf *
1342mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1343{
1344	struct mwl_txbuf *bf;
1345
1346	/*
1347	 * Grab a TX buffer and associated resources.
1348	 */
1349	MWL_TXQ_LOCK(txq);
1350	bf = STAILQ_FIRST(&txq->free);
1351	if (bf != NULL) {
1352		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1353		txq->nfree--;
1354	}
1355	MWL_TXQ_UNLOCK(txq);
1356	if (bf == NULL)
1357		DPRINTF(sc, MWL_DEBUG_XMIT,
1358		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1359	return bf;
1360}
1361
1362/*
1363 * Return a tx buffer to the queue it came from.  Note there
1364 * are two cases because we must preserve the order of buffers
1365 * as it reflects the fixed order of descriptors in memory
1366 * (the firmware pre-fetches descriptors so we cannot reorder).
1367 */
1368static void
1369mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1370{
1371	bf->bf_m = NULL;
1372	bf->bf_node = NULL;
1373	MWL_TXQ_LOCK(txq);
1374	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1375	txq->nfree++;
1376	MWL_TXQ_UNLOCK(txq);
1377}
1378
1379static void
1380mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1381{
1382	bf->bf_m = NULL;
1383	bf->bf_node = NULL;
1384	MWL_TXQ_LOCK(txq);
1385	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1386	txq->nfree++;
1387	MWL_TXQ_UNLOCK(txq);
1388}
1389
1390static void
1391mwl_start(struct ifnet *ifp)
1392{
1393	struct mwl_softc *sc = ifp->if_softc;
1394	struct ieee80211_node *ni;
1395	struct mwl_txbuf *bf;
1396	struct mbuf *m;
1397	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1398	int nqueued;
1399
1400	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1401		return;
1402	nqueued = 0;
1403	for (;;) {
1404		bf = NULL;
1405		IFQ_DEQUEUE(&ifp->if_snd, m);
1406		if (m == NULL)
1407			break;
1408		/*
1409		 * Grab the node for the destination.
1410		 */
1411		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1412		KASSERT(ni != NULL, ("no node"));
1413		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1414		/*
1415		 * Grab a TX buffer and associated resources.
1416		 * We honor the classification by the 802.11 layer.
1417		 */
1418		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1419		bf = mwl_gettxbuf(sc, txq);
1420		if (bf == NULL) {
1421			m_freem(m);
1422			ieee80211_free_node(ni);
1423#ifdef MWL_TX_NODROP
1424			sc->sc_stats.mst_tx_qstop++;
1425			/* XXX blocks other traffic */
1426			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1427			break;
1428#else
1429			DPRINTF(sc, MWL_DEBUG_XMIT,
1430			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1431			sc->sc_stats.mst_tx_qdrop++;
1432			continue;
1433#endif /* MWL_TX_NODROP */
1434		}
1435
1436		/*
1437		 * Pass the frame to the h/w for transmission.
1438		 */
1439		if (mwl_tx_start(sc, ni, bf, m)) {
1440			ifp->if_oerrors++;
1441			mwl_puttxbuf_head(txq, bf);
1442			ieee80211_free_node(ni);
1443			continue;
1444		}
1445		nqueued++;
1446		if (nqueued >= mwl_txcoalesce) {
1447			/*
1448			 * Poke the firmware to process queued frames;
1449			 * see below about (lack of) locking.
1450			 */
1451			nqueued = 0;
1452			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1453		}
1454	}
1455	if (nqueued) {
1456		/*
1457		 * NB: We don't need to lock against tx done because
1458		 * this just prods the firmware to check the transmit
1459		 * descriptors.  The firmware will also start fetching
1460		 * descriptors by itself if it notices new ones are
1461		 * present when it goes to deliver a tx done interrupt
1462		 * to the host. So if we race with tx done processing
1463		 * it's ok.  Delivering the kick here rather than in
1464		 * mwl_tx_start is an optimization to avoid poking the
1465		 * firmware for each packet.
1466		 *
1467		 * NB: the queue id isn't used so 0 is ok.
1468		 */
1469		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1470	}
1471}
1472
1473static int
1474mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1475	const struct ieee80211_bpf_params *params)
1476{
1477	struct ieee80211com *ic = ni->ni_ic;
1478	struct ifnet *ifp = ic->ic_ifp;
1479	struct mwl_softc *sc = ifp->if_softc;
1480	struct mwl_txbuf *bf;
1481	struct mwl_txq *txq;
1482
1483	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1484		ieee80211_free_node(ni);
1485		m_freem(m);
1486		return ENETDOWN;
1487	}
1488	/*
1489	 * Grab a TX buffer and associated resources.
1490	 * Note that we depend on the classification
1491	 * by the 802.11 layer to get to the right h/w
1492	 * queue.  Management frames must ALWAYS go on
1493	 * queue 1 but we cannot just force that here
1494	 * because we may receive non-mgt frames.
1495	 */
1496	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1497	bf = mwl_gettxbuf(sc, txq);
1498	if (bf == NULL) {
1499		sc->sc_stats.mst_tx_qstop++;
1500		/* XXX blocks other traffic */
1501		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1502		ieee80211_free_node(ni);
1503		m_freem(m);
1504		return ENOBUFS;
1505	}
1506	/*
1507	 * Pass the frame to the h/w for transmission.
1508	 */
1509	if (mwl_tx_start(sc, ni, bf, m)) {
1510		ifp->if_oerrors++;
1511		mwl_puttxbuf_head(txq, bf);
1512
1513		ieee80211_free_node(ni);
1514		return EIO;		/* XXX */
1515	}
1516	/*
1517	 * NB: We don't need to lock against tx done because
1518	 * this just prods the firmware to check the transmit
1519	 * descriptors.  The firmware will also start fetching
1520	 * descriptors by itself if it notices new ones are
1521	 * present when it goes to deliver a tx done interrupt
1522	 * to the host. So if we race with tx done processing
1523	 * it's ok.  Delivering the kick here rather than in
1524	 * mwl_tx_start is an optimization to avoid poking the
1525	 * firmware for each packet.
1526	 *
1527	 * NB: the queue id isn't used so 0 is ok.
1528	 */
1529	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1530	return 0;
1531}
1532
1533static int
1534mwl_media_change(struct ifnet *ifp)
1535{
1536	struct ieee80211vap *vap = ifp->if_softc;
1537	int error;
1538
1539	error = ieee80211_media_change(ifp);
1540	/* NB: only the fixed rate can change and that doesn't need a reset */
1541	if (error == ENETRESET) {
1542		mwl_setrates(vap);
1543		error = 0;
1544	}
1545	return error;
1546}
1547
1548#ifdef MWL_DEBUG
1549static void
1550mwl_keyprint(struct mwl_softc *sc, const char *tag,
1551	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1552{
1553	static const char *ciphers[] = {
1554		"WEP",
1555		"TKIP",
1556		"AES-CCM",
1557	};
1558	int i, n;
1559
1560	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1561	for (i = 0, n = hk->keyLen; i < n; i++)
1562		printf(" %02x", hk->key.aes[i]);
1563	printf(" mac %s", ether_sprintf(mac));
1564	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1565		printf(" %s", "rxmic");
1566		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1567			printf(" %02x", hk->key.tkip.rxMic[i]);
1568		printf(" txmic");
1569		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1570			printf(" %02x", hk->key.tkip.txMic[i]);
1571	}
1572	printf(" flags 0x%x\n", hk->keyFlags);
1573}
1574#endif
1575
1576/*
1577 * Allocate a key cache slot for a unicast key.  The
1578 * firmware handles key allocation and every station is
1579 * guaranteed key space so we are always successful.
1580 */
1581static int
1582mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1583	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1584{
1585	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1586
1587	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1588	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1589		if (!(&vap->iv_nw_keys[0] <= k &&
1590		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1591			/* should not happen */
1592			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1593				"%s: bogus group key\n", __func__);
1594			return 0;
1595		}
1596		/* give the caller what they requested */
1597		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1598	} else {
1599		/*
1600		 * Firmware handles key allocation.
1601		 */
1602		*keyix = *rxkeyix = 0;
1603	}
1604	return 1;
1605}
1606
1607/*
1608 * Delete a key entry allocated by mwl_key_alloc.
1609 */
1610static int
1611mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1612{
1613	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1614	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1615	MWL_HAL_KEYVAL hk;
1616	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1617	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1618
1619	if (hvap == NULL) {
1620		if (vap->iv_opmode != IEEE80211_M_WDS) {
1621			/* XXX monitor mode? */
1622			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1623			    "%s: no hvap for opmode %d\n", __func__,
1624			    vap->iv_opmode);
1625			return 0;
1626		}
1627		hvap = MWL_VAP(vap)->mv_ap_hvap;
1628	}
1629
1630	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1631	    __func__, k->wk_keyix);
1632
1633	memset(&hk, 0, sizeof(hk));
1634	hk.keyIndex = k->wk_keyix;
1635	switch (k->wk_cipher->ic_cipher) {
1636	case IEEE80211_CIPHER_WEP:
1637		hk.keyTypeId = KEY_TYPE_ID_WEP;
1638		break;
1639	case IEEE80211_CIPHER_TKIP:
1640		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1641		break;
1642	case IEEE80211_CIPHER_AES_CCM:
1643		hk.keyTypeId = KEY_TYPE_ID_AES;
1644		break;
1645	default:
1646		/* XXX should not happen */
1647		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1648		    __func__, k->wk_cipher->ic_cipher);
1649		return 0;
1650	}
1651	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1652}
1653
1654static __inline int
1655addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1656{
1657	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1658		if (k->wk_flags & IEEE80211_KEY_XMIT)
1659			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1660		if (k->wk_flags & IEEE80211_KEY_RECV)
1661			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1662		return 1;
1663	} else
1664		return 0;
1665}
1666
1667/*
1668 * Set the key cache contents for the specified key.  Key cache
1669 * slot(s) must already have been allocated by mwl_key_alloc.
1670 */
1671static int
1672mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1673	const uint8_t mac[IEEE80211_ADDR_LEN])
1674{
1675#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1676/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1677#define	IEEE80211_IS_STATICKEY(k) \
1678	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1679	 (GRPXMIT|IEEE80211_KEY_RECV))
1680	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1681	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1682	const struct ieee80211_cipher *cip = k->wk_cipher;
1683	const uint8_t *macaddr;
1684	MWL_HAL_KEYVAL hk;
1685
1686	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1687		("s/w crypto set?"));
1688
1689	if (hvap == NULL) {
1690		if (vap->iv_opmode != IEEE80211_M_WDS) {
1691			/* XXX monitor mode? */
1692			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1693			    "%s: no hvap for opmode %d\n", __func__,
1694			    vap->iv_opmode);
1695			return 0;
1696		}
1697		hvap = MWL_VAP(vap)->mv_ap_hvap;
1698	}
1699	memset(&hk, 0, sizeof(hk));
1700	hk.keyIndex = k->wk_keyix;
1701	switch (cip->ic_cipher) {
1702	case IEEE80211_CIPHER_WEP:
1703		hk.keyTypeId = KEY_TYPE_ID_WEP;
1704		hk.keyLen = k->wk_keylen;
1705		if (k->wk_keyix == vap->iv_def_txkey)
1706			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1707		if (!IEEE80211_IS_STATICKEY(k)) {
1708			/* NB: WEP is never used for the PTK */
1709			(void) addgroupflags(&hk, k);
1710		}
1711		break;
1712	case IEEE80211_CIPHER_TKIP:
1713		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1714		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1715		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1716		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1717		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1718		if (!addgroupflags(&hk, k))
1719			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1720		break;
1721	case IEEE80211_CIPHER_AES_CCM:
1722		hk.keyTypeId = KEY_TYPE_ID_AES;
1723		hk.keyLen = k->wk_keylen;
1724		if (!addgroupflags(&hk, k))
1725			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1726		break;
1727	default:
1728		/* XXX should not happen */
1729		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1730		    __func__, k->wk_cipher->ic_cipher);
1731		return 0;
1732	}
1733	/*
1734	 * NB: tkip mic keys get copied here too; the layout
1735	 *     just happens to match that in ieee80211_key.
1736	 */
1737	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1738
1739	/*
1740	 * Locate address of sta db entry for writing key;
1741	 * the convention unfortunately is somewhat different
1742	 * than how net80211, hostapd, and wpa_supplicant think.
1743	 */
1744	if (vap->iv_opmode == IEEE80211_M_STA) {
1745		/*
1746		 * NB: keys plumbed before the sta reaches AUTH state
1747		 * will be discarded or written to the wrong sta db
1748		 * entry because iv_bss is meaningless.  This is ok
1749		 * (right now) because we handle deferred plumbing of
1750		 * WEP keys when the sta reaches AUTH state.
1751		 */
1752		macaddr = vap->iv_bss->ni_bssid;
1753		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1754			/* XXX plumb to local sta db too for static key wep */
1755			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1756		}
1757	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1758	    vap->iv_state != IEEE80211_S_RUN) {
1759		/*
1760		 * Prior to RUN state a WDS vap will not it's BSS node
1761		 * setup so we will plumb the key to the wrong mac
1762		 * address (it'll be our local address).  Workaround
1763		 * this for the moment by grabbing the correct address.
1764		 */
1765		macaddr = vap->iv_des_bssid;
1766	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1767		macaddr = vap->iv_myaddr;
1768	else
1769		macaddr = mac;
1770	KEYPRINTF(sc, &hk, macaddr);
1771	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1772#undef IEEE80211_IS_STATICKEY
1773#undef GRPXMIT
1774}
1775
1776/* unaligned little endian access */
1777#define LE_READ_2(p)				\
1778	((uint16_t)				\
1779	 ((((const uint8_t *)(p))[0]      ) |	\
1780	  (((const uint8_t *)(p))[1] <<  8)))
1781#define LE_READ_4(p)				\
1782	((uint32_t)				\
1783	 ((((const uint8_t *)(p))[0]      ) |	\
1784	  (((const uint8_t *)(p))[1] <<  8) |	\
1785	  (((const uint8_t *)(p))[2] << 16) |	\
1786	  (((const uint8_t *)(p))[3] << 24)))
1787
1788/*
1789 * Set the multicast filter contents into the hardware.
1790 * XXX f/w has no support; just defer to the os.
1791 */
1792static void
1793mwl_setmcastfilter(struct mwl_softc *sc)
1794{
1795	struct ifnet *ifp = sc->sc_ifp;
1796#if 0
1797	struct ether_multi *enm;
1798	struct ether_multistep estep;
1799	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1800	uint8_t *mp;
1801	int nmc;
1802
1803	mp = macs;
1804	nmc = 0;
1805	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1806	while (enm != NULL) {
1807		/* XXX Punt on ranges. */
1808		if (nmc == MWL_HAL_MCAST_MAX ||
1809		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1810			ifp->if_flags |= IFF_ALLMULTI;
1811			return;
1812		}
1813		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1814		mp += IEEE80211_ADDR_LEN, nmc++;
1815		ETHER_NEXT_MULTI(estep, enm);
1816	}
1817	ifp->if_flags &= ~IFF_ALLMULTI;
1818	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1819#else
1820	/* XXX no mcast filter support; we get everything */
1821	ifp->if_flags |= IFF_ALLMULTI;
1822#endif
1823}
1824
1825static int
1826mwl_mode_init(struct mwl_softc *sc)
1827{
1828	struct ifnet *ifp = sc->sc_ifp;
1829	struct ieee80211com *ic = ifp->if_l2com;
1830	struct mwl_hal *mh = sc->sc_mh;
1831
1832	/*
1833	 * NB: Ignore promisc in hostap mode; it's set by the
1834	 * bridge.  This is wrong but we have no way to
1835	 * identify internal requests (from the bridge)
1836	 * versus external requests such as for tcpdump.
1837	 */
1838	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1839	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1840	mwl_setmcastfilter(sc);
1841
1842	return 0;
1843}
1844
1845/*
1846 * Callback from the 802.11 layer after a multicast state change.
1847 */
1848static void
1849mwl_update_mcast(struct ifnet *ifp)
1850{
1851	struct mwl_softc *sc = ifp->if_softc;
1852
1853	mwl_setmcastfilter(sc);
1854}
1855
1856/*
1857 * Callback from the 802.11 layer after a promiscuous mode change.
1858 * Note this interface does not check the operating mode as this
1859 * is an internal callback and we are expected to honor the current
1860 * state (e.g. this is used for setting the interface in promiscuous
1861 * mode when operating in hostap mode to do ACS).
1862 */
1863static void
1864mwl_update_promisc(struct ifnet *ifp)
1865{
1866	struct mwl_softc *sc = ifp->if_softc;
1867
1868	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1869}
1870
1871/*
1872 * Callback from the 802.11 layer to update the slot time
1873 * based on the current setting.  We use it to notify the
1874 * firmware of ERP changes and the f/w takes care of things
1875 * like slot time and preamble.
1876 */
1877static void
1878mwl_updateslot(struct ifnet *ifp)
1879{
1880	struct mwl_softc *sc = ifp->if_softc;
1881	struct ieee80211com *ic = ifp->if_l2com;
1882	struct mwl_hal *mh = sc->sc_mh;
1883	int prot;
1884
1885	/* NB: can be called early; suppress needless cmds */
1886	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1887		return;
1888
1889	/*
1890	 * Calculate the ERP flags.  The firwmare will use
1891	 * this to carry out the appropriate measures.
1892	 */
1893	prot = 0;
1894	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1895		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1896			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1897		if (ic->ic_flags & IEEE80211_F_USEPROT)
1898			prot |= IEEE80211_ERP_USE_PROTECTION;
1899		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1900			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1901	}
1902
1903	DPRINTF(sc, MWL_DEBUG_RESET,
1904	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1905	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1906	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1907	    ic->ic_flags);
1908
1909	mwl_hal_setgprot(mh, prot);
1910}
1911
1912/*
1913 * Setup the beacon frame.
1914 */
1915static int
1916mwl_beacon_setup(struct ieee80211vap *vap)
1917{
1918	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1919	struct ieee80211_node *ni = vap->iv_bss;
1920	struct ieee80211_beacon_offsets bo;
1921	struct mbuf *m;
1922
1923	m = ieee80211_beacon_alloc(ni, &bo);
1924	if (m == NULL)
1925		return ENOBUFS;
1926	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1927	m_free(m);
1928
1929	return 0;
1930}
1931
1932/*
1933 * Update the beacon frame in response to a change.
1934 */
1935static void
1936mwl_beacon_update(struct ieee80211vap *vap, int item)
1937{
1938	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1939	struct ieee80211com *ic = vap->iv_ic;
1940
1941	KASSERT(hvap != NULL, ("no beacon"));
1942	switch (item) {
1943	case IEEE80211_BEACON_ERP:
1944		mwl_updateslot(ic->ic_ifp);
1945		break;
1946	case IEEE80211_BEACON_HTINFO:
1947		mwl_hal_setnprotmode(hvap,
1948		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1949		break;
1950	case IEEE80211_BEACON_CAPS:
1951	case IEEE80211_BEACON_WME:
1952	case IEEE80211_BEACON_APPIE:
1953	case IEEE80211_BEACON_CSA:
1954		break;
1955	case IEEE80211_BEACON_TIM:
1956		/* NB: firmware always forms TIM */
1957		return;
1958	}
1959	/* XXX retain beacon frame and update */
1960	mwl_beacon_setup(vap);
1961}
1962
1963static void
1964mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1965{
1966	bus_addr_t *paddr = (bus_addr_t*) arg;
1967	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1968	*paddr = segs->ds_addr;
1969}
1970
1971#ifdef MWL_HOST_PS_SUPPORT
1972/*
1973 * Handle power save station occupancy changes.
1974 */
1975static void
1976mwl_update_ps(struct ieee80211vap *vap, int nsta)
1977{
1978	struct mwl_vap *mvp = MWL_VAP(vap);
1979
1980	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1981		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1982	mvp->mv_last_ps_sta = nsta;
1983}
1984
1985/*
1986 * Handle associated station power save state changes.
1987 */
1988static int
1989mwl_set_tim(struct ieee80211_node *ni, int set)
1990{
1991	struct ieee80211vap *vap = ni->ni_vap;
1992	struct mwl_vap *mvp = MWL_VAP(vap);
1993
1994	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1995		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1996		    IEEE80211_AID(ni->ni_associd), set);
1997		return 1;
1998	} else
1999		return 0;
2000}
2001#endif /* MWL_HOST_PS_SUPPORT */
2002
2003static int
2004mwl_desc_setup(struct mwl_softc *sc, const char *name,
2005	struct mwl_descdma *dd,
2006	int nbuf, size_t bufsize, int ndesc, size_t descsize)
2007{
2008	struct ifnet *ifp = sc->sc_ifp;
2009	uint8_t *ds;
2010	int error;
2011
2012	DPRINTF(sc, MWL_DEBUG_RESET,
2013	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2014	    __func__, name, nbuf, (uintmax_t) bufsize,
2015	    ndesc, (uintmax_t) descsize);
2016
2017	dd->dd_name = name;
2018	dd->dd_desc_len = nbuf * ndesc * descsize;
2019
2020	/*
2021	 * Setup DMA descriptor area.
2022	 */
2023	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2024		       PAGE_SIZE, 0,		/* alignment, bounds */
2025		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2026		       BUS_SPACE_MAXADDR,	/* highaddr */
2027		       NULL, NULL,		/* filter, filterarg */
2028		       dd->dd_desc_len,		/* maxsize */
2029		       1,			/* nsegments */
2030		       dd->dd_desc_len,		/* maxsegsize */
2031		       BUS_DMA_ALLOCNOW,	/* flags */
2032		       NULL,			/* lockfunc */
2033		       NULL,			/* lockarg */
2034		       &dd->dd_dmat);
2035	if (error != 0) {
2036		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2037		return error;
2038	}
2039
2040	/* allocate descriptors */
2041	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2042	if (error != 0) {
2043		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2044			"error %u\n", dd->dd_name, error);
2045		goto fail0;
2046	}
2047
2048	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2049				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2050				 &dd->dd_dmamap);
2051	if (error != 0) {
2052		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2053			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2054		goto fail1;
2055	}
2056
2057	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2058				dd->dd_desc, dd->dd_desc_len,
2059				mwl_load_cb, &dd->dd_desc_paddr,
2060				BUS_DMA_NOWAIT);
2061	if (error != 0) {
2062		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2063			dd->dd_name, error);
2064		goto fail2;
2065	}
2066
2067	ds = dd->dd_desc;
2068	memset(ds, 0, dd->dd_desc_len);
2069	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2070	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2071	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2072
2073	return 0;
2074fail2:
2075	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2076fail1:
2077	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2078fail0:
2079	bus_dma_tag_destroy(dd->dd_dmat);
2080	memset(dd, 0, sizeof(*dd));
2081	return error;
2082#undef DS2PHYS
2083}
2084
2085static void
2086mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2087{
2088	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2089	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2090	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2091	bus_dma_tag_destroy(dd->dd_dmat);
2092
2093	memset(dd, 0, sizeof(*dd));
2094}
2095
2096/*
2097 * Construct a tx q's free list.  The order of entries on
2098 * the list must reflect the physical layout of tx descriptors
2099 * because the firmware pre-fetches descriptors.
2100 *
2101 * XXX might be better to use indices into the buffer array.
2102 */
2103static void
2104mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2105{
2106	struct mwl_txbuf *bf;
2107	int i;
2108
2109	bf = txq->dma.dd_bufptr;
2110	STAILQ_INIT(&txq->free);
2111	for (i = 0; i < mwl_txbuf; i++, bf++)
2112		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2113	txq->nfree = i;
2114}
2115
2116#define	DS2PHYS(_dd, _ds) \
2117	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2118
2119static int
2120mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2121{
2122	struct ifnet *ifp = sc->sc_ifp;
2123	int error, bsize, i;
2124	struct mwl_txbuf *bf;
2125	struct mwl_txdesc *ds;
2126
2127	error = mwl_desc_setup(sc, "tx", &txq->dma,
2128			mwl_txbuf, sizeof(struct mwl_txbuf),
2129			MWL_TXDESC, sizeof(struct mwl_txdesc));
2130	if (error != 0)
2131		return error;
2132
2133	/* allocate and setup tx buffers */
2134	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2135	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2136	if (bf == NULL) {
2137		if_printf(ifp, "malloc of %u tx buffers failed\n",
2138			mwl_txbuf);
2139		return ENOMEM;
2140	}
2141	txq->dma.dd_bufptr = bf;
2142
2143	ds = txq->dma.dd_desc;
2144	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2145		bf->bf_desc = ds;
2146		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2147		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2148				&bf->bf_dmamap);
2149		if (error != 0) {
2150			if_printf(ifp, "unable to create dmamap for tx "
2151				"buffer %u, error %u\n", i, error);
2152			return error;
2153		}
2154	}
2155	mwl_txq_reset(sc, txq);
2156	return 0;
2157}
2158
2159static void
2160mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2161{
2162	struct mwl_txbuf *bf;
2163	int i;
2164
2165	bf = txq->dma.dd_bufptr;
2166	for (i = 0; i < mwl_txbuf; i++, bf++) {
2167		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2168		KASSERT(bf->bf_node == NULL, ("node on free list"));
2169		if (bf->bf_dmamap != NULL)
2170			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2171	}
2172	STAILQ_INIT(&txq->free);
2173	txq->nfree = 0;
2174	if (txq->dma.dd_bufptr != NULL) {
2175		free(txq->dma.dd_bufptr, M_MWLDEV);
2176		txq->dma.dd_bufptr = NULL;
2177	}
2178	if (txq->dma.dd_desc_len != 0)
2179		mwl_desc_cleanup(sc, &txq->dma);
2180}
2181
2182static int
2183mwl_rxdma_setup(struct mwl_softc *sc)
2184{
2185	struct ifnet *ifp = sc->sc_ifp;
2186	int error, jumbosize, bsize, i;
2187	struct mwl_rxbuf *bf;
2188	struct mwl_jumbo *rbuf;
2189	struct mwl_rxdesc *ds;
2190	caddr_t data;
2191
2192	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2193			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2194			1, sizeof(struct mwl_rxdesc));
2195	if (error != 0)
2196		return error;
2197
2198	/*
2199	 * Receive is done to a private pool of jumbo buffers.
2200	 * This allows us to attach to mbuf's and avoid re-mapping
2201	 * memory on each rx we post.  We allocate a large chunk
2202	 * of memory and manage it in the driver.  The mbuf free
2203	 * callback method is used to reclaim frames after sending
2204	 * them up the stack.  By default we allocate 2x the number of
2205	 * rx descriptors configured so we have some slop to hold
2206	 * us while frames are processed.
2207	 */
2208	if (mwl_rxbuf < 2*mwl_rxdesc) {
2209		if_printf(ifp,
2210		    "too few rx dma buffers (%d); increasing to %d\n",
2211		    mwl_rxbuf, 2*mwl_rxdesc);
2212		mwl_rxbuf = 2*mwl_rxdesc;
2213	}
2214	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2215	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2216
2217	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2218		       PAGE_SIZE, 0,		/* alignment, bounds */
2219		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2220		       BUS_SPACE_MAXADDR,	/* highaddr */
2221		       NULL, NULL,		/* filter, filterarg */
2222		       sc->sc_rxmemsize,	/* maxsize */
2223		       1,			/* nsegments */
2224		       sc->sc_rxmemsize,	/* maxsegsize */
2225		       BUS_DMA_ALLOCNOW,	/* flags */
2226		       NULL,			/* lockfunc */
2227		       NULL,			/* lockarg */
2228		       &sc->sc_rxdmat);
2229	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2230	if (error != 0) {
2231		if_printf(ifp, "could not create rx DMA map\n");
2232		return error;
2233	}
2234
2235	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2236				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2237				 &sc->sc_rxmap);
2238	if (error != 0) {
2239		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2240		    (uintmax_t) sc->sc_rxmemsize);
2241		return error;
2242	}
2243
2244	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2245				sc->sc_rxmem, sc->sc_rxmemsize,
2246				mwl_load_cb, &sc->sc_rxmem_paddr,
2247				BUS_DMA_NOWAIT);
2248	if (error != 0) {
2249		if_printf(ifp, "could not load rx DMA map\n");
2250		return error;
2251	}
2252
2253	/*
2254	 * Allocate rx buffers and set them up.
2255	 */
2256	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2257	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2258	if (bf == NULL) {
2259		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2260		return error;
2261	}
2262	sc->sc_rxdma.dd_bufptr = bf;
2263
2264	STAILQ_INIT(&sc->sc_rxbuf);
2265	ds = sc->sc_rxdma.dd_desc;
2266	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2267		bf->bf_desc = ds;
2268		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2269		/* pre-assign dma buffer */
2270		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2271		/* NB: tail is intentional to preserve descriptor order */
2272		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2273	}
2274
2275	/*
2276	 * Place remainder of dma memory buffers on the free list.
2277	 */
2278	SLIST_INIT(&sc->sc_rxfree);
2279	for (; i < mwl_rxbuf; i++) {
2280		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2281		rbuf = MWL_JUMBO_DATA2BUF(data);
2282		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2283		sc->sc_nrxfree++;
2284	}
2285	return 0;
2286}
2287#undef DS2PHYS
2288
2289static void
2290mwl_rxdma_cleanup(struct mwl_softc *sc)
2291{
2292	if (sc->sc_rxmap != NULL)
2293		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2294	if (sc->sc_rxmem != NULL) {
2295		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2296		sc->sc_rxmem = NULL;
2297	}
2298	if (sc->sc_rxmap != NULL) {
2299		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2300		sc->sc_rxmap = NULL;
2301	}
2302	if (sc->sc_rxdma.dd_bufptr != NULL) {
2303		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2304		sc->sc_rxdma.dd_bufptr = NULL;
2305	}
2306	if (sc->sc_rxdma.dd_desc_len != 0)
2307		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2308}
2309
2310static int
2311mwl_dma_setup(struct mwl_softc *sc)
2312{
2313	int error, i;
2314
2315	error = mwl_rxdma_setup(sc);
2316	if (error != 0) {
2317		mwl_rxdma_cleanup(sc);
2318		return error;
2319	}
2320
2321	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2322		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2323		if (error != 0) {
2324			mwl_dma_cleanup(sc);
2325			return error;
2326		}
2327	}
2328	return 0;
2329}
2330
2331static void
2332mwl_dma_cleanup(struct mwl_softc *sc)
2333{
2334	int i;
2335
2336	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2337		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2338	mwl_rxdma_cleanup(sc);
2339}
2340
2341static struct ieee80211_node *
2342mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2343{
2344	struct ieee80211com *ic = vap->iv_ic;
2345	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2346	const size_t space = sizeof(struct mwl_node);
2347	struct mwl_node *mn;
2348
2349	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2350	if (mn == NULL) {
2351		/* XXX stat+msg */
2352		return NULL;
2353	}
2354	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2355	return &mn->mn_node;
2356}
2357
2358static void
2359mwl_node_cleanup(struct ieee80211_node *ni)
2360{
2361	struct ieee80211com *ic = ni->ni_ic;
2362        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2363	struct mwl_node *mn = MWL_NODE(ni);
2364
2365	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2366	    __func__, ni, ni->ni_ic, mn->mn_staid);
2367
2368	if (mn->mn_staid != 0) {
2369		struct ieee80211vap *vap = ni->ni_vap;
2370
2371		if (mn->mn_hvap != NULL) {
2372			if (vap->iv_opmode == IEEE80211_M_STA)
2373				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2374			else
2375				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2376		}
2377		/*
2378		 * NB: legacy WDS peer sta db entry is installed using
2379		 * the associate ap's hvap; use it again to delete it.
2380		 * XXX can vap be NULL?
2381		 */
2382		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2383		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2384			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2385			    ni->ni_macaddr);
2386		delstaid(sc, mn->mn_staid);
2387		mn->mn_staid = 0;
2388	}
2389	sc->sc_node_cleanup(ni);
2390}
2391
2392/*
2393 * Reclaim rx dma buffers from packets sitting on the ampdu
2394 * reorder queue for a station.  We replace buffers with a
2395 * system cluster (if available).
2396 */
2397static void
2398mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2399{
2400#if 0
2401	int i, n, off;
2402	struct mbuf *m;
2403	void *cl;
2404
2405	n = rap->rxa_qframes;
2406	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2407		m = rap->rxa_m[i];
2408		if (m == NULL)
2409			continue;
2410		n--;
2411		/* our dma buffers have a well-known free routine */
2412		if ((m->m_flags & M_EXT) == 0 ||
2413		    m->m_ext.ext_free != mwl_ext_free)
2414			continue;
2415		/*
2416		 * Try to allocate a cluster and move the data.
2417		 */
2418		off = m->m_data - m->m_ext.ext_buf;
2419		if (off + m->m_pkthdr.len > MCLBYTES) {
2420			/* XXX no AMSDU for now */
2421			continue;
2422		}
2423		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2424		    &m->m_ext.ext_paddr);
2425		if (cl != NULL) {
2426			/*
2427			 * Copy the existing data to the cluster, remove
2428			 * the rx dma buffer, and attach the cluster in
2429			 * its place.  Note we preserve the offset to the
2430			 * data so frames being bridged can still prepend
2431			 * their headers without adding another mbuf.
2432			 */
2433			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2434			MEXTREMOVE(m);
2435			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2436			/* setup mbuf like _MCLGET does */
2437			m->m_flags |= M_CLUSTER | M_EXT_RW;
2438			_MOWNERREF(m, M_EXT | M_CLUSTER);
2439			/* NB: m_data is clobbered by MEXTADDR, adjust */
2440			m->m_data += off;
2441		}
2442	}
2443#endif
2444}
2445
2446/*
2447 * Callback to reclaim resources.  We first let the
2448 * net80211 layer do it's thing, then if we are still
2449 * blocked by a lack of rx dma buffers we walk the ampdu
2450 * reorder q's to reclaim buffers by copying to a system
2451 * cluster.
2452 */
2453static void
2454mwl_node_drain(struct ieee80211_node *ni)
2455{
2456	struct ieee80211com *ic = ni->ni_ic;
2457        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2458	struct mwl_node *mn = MWL_NODE(ni);
2459
2460	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2461	    __func__, ni, ni->ni_vap, mn->mn_staid);
2462
2463	/* NB: call up first to age out ampdu q's */
2464	sc->sc_node_drain(ni);
2465
2466	/* XXX better to not check low water mark? */
2467	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2468	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2469		uint8_t tid;
2470		/*
2471		 * Walk the reorder q and reclaim rx dma buffers by copying
2472		 * the packet contents into clusters.
2473		 */
2474		for (tid = 0; tid < WME_NUM_TID; tid++) {
2475			struct ieee80211_rx_ampdu *rap;
2476
2477			rap = &ni->ni_rx_ampdu[tid];
2478			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2479				continue;
2480			if (rap->rxa_qframes)
2481				mwl_ampdu_rxdma_reclaim(rap);
2482		}
2483	}
2484}
2485
2486static void
2487mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2488{
2489	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2490#ifdef MWL_ANT_INFO_SUPPORT
2491#if 0
2492	/* XXX need to smooth data */
2493	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2494#else
2495	*noise = -95;		/* XXX */
2496#endif
2497#else
2498	*noise = -95;		/* XXX */
2499#endif
2500}
2501
2502/*
2503 * Convert Hardware per-antenna rssi info to common format:
2504 * Let a1, a2, a3 represent the amplitudes per chain
2505 * Let amax represent max[a1, a2, a3]
2506 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2507 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2508 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2509 * maintain some extra precision.
2510 *
2511 * Values are stored in .5 db format capped at 127.
2512 */
2513static void
2514mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2515	struct ieee80211_mimo_info *mi)
2516{
2517#define	CVT(_dst, _src) do {						\
2518	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2519	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2520} while (0)
2521	static const int8_t logdbtbl[32] = {
2522	       0,   0,  24,  38,  48,  56,  62,  68,
2523	      72,  76,  80,  83,  86,  89,  92,  94,
2524	      96,  98, 100, 102, 104, 106, 107, 109,
2525	     110, 112, 113, 115, 116, 117, 118, 119
2526	};
2527	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2528	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2529	uint32_t rssi_max;
2530
2531	rssi_max = mn->mn_ai.rssi_a;
2532	if (mn->mn_ai.rssi_b > rssi_max)
2533		rssi_max = mn->mn_ai.rssi_b;
2534	if (mn->mn_ai.rssi_c > rssi_max)
2535		rssi_max = mn->mn_ai.rssi_c;
2536
2537	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2538	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2539	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2540
2541	mi->noise[0] = mn->mn_ai.nf_a;
2542	mi->noise[1] = mn->mn_ai.nf_b;
2543	mi->noise[2] = mn->mn_ai.nf_c;
2544#undef CVT
2545}
2546
2547static __inline void *
2548mwl_getrxdma(struct mwl_softc *sc)
2549{
2550	struct mwl_jumbo *buf;
2551	void *data;
2552
2553	/*
2554	 * Allocate from jumbo pool.
2555	 */
2556	MWL_RXFREE_LOCK(sc);
2557	buf = SLIST_FIRST(&sc->sc_rxfree);
2558	if (buf == NULL) {
2559		DPRINTF(sc, MWL_DEBUG_ANY,
2560		    "%s: out of rx dma buffers\n", __func__);
2561		sc->sc_stats.mst_rx_nodmabuf++;
2562		data = NULL;
2563	} else {
2564		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2565		sc->sc_nrxfree--;
2566		data = MWL_JUMBO_BUF2DATA(buf);
2567	}
2568	MWL_RXFREE_UNLOCK(sc);
2569	return data;
2570}
2571
2572static __inline void
2573mwl_putrxdma(struct mwl_softc *sc, void *data)
2574{
2575	struct mwl_jumbo *buf;
2576
2577	/* XXX bounds check data */
2578	MWL_RXFREE_LOCK(sc);
2579	buf = MWL_JUMBO_DATA2BUF(data);
2580	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2581	sc->sc_nrxfree++;
2582	MWL_RXFREE_UNLOCK(sc);
2583}
2584
2585static int
2586mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2587{
2588	struct mwl_rxdesc *ds;
2589
2590	ds = bf->bf_desc;
2591	if (bf->bf_data == NULL) {
2592		bf->bf_data = mwl_getrxdma(sc);
2593		if (bf->bf_data == NULL) {
2594			/* mark descriptor to be skipped */
2595			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2596			/* NB: don't need PREREAD */
2597			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2598			sc->sc_stats.mst_rxbuf_failed++;
2599			return ENOMEM;
2600		}
2601	}
2602	/*
2603	 * NB: DMA buffer contents is known to be unmodified
2604	 *     so there's no need to flush the data cache.
2605	 */
2606
2607	/*
2608	 * Setup descriptor.
2609	 */
2610	ds->QosCtrl = 0;
2611	ds->RSSI = 0;
2612	ds->Status = EAGLE_RXD_STATUS_IDLE;
2613	ds->Channel = 0;
2614	ds->PktLen = htole16(MWL_AGGR_SIZE);
2615	ds->SQ2 = 0;
2616	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2617	/* NB: don't touch pPhysNext, set once */
2618	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2619	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2620
2621	return 0;
2622}
2623
2624static int
2625mwl_ext_free(struct mbuf *m, void *data, void *arg)
2626{
2627	struct mwl_softc *sc = arg;
2628
2629	/* XXX bounds check data */
2630	mwl_putrxdma(sc, data);
2631	/*
2632	 * If we were previously blocked by a lack of rx dma buffers
2633	 * check if we now have enough to restart rx interrupt handling.
2634	 * NB: we know we are called at splvm which is above splnet.
2635	 */
2636	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2637		sc->sc_rxblocked = 0;
2638		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2639	}
2640	return (EXT_FREE_OK);
2641}
2642
2643struct mwl_frame_bar {
2644	u_int8_t	i_fc[2];
2645	u_int8_t	i_dur[2];
2646	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2647	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2648	/* ctl, seq, FCS */
2649} __packed;
2650
2651/*
2652 * Like ieee80211_anyhdrsize, but handles BAR frames
2653 * specially so the logic below to piece the 802.11
2654 * header together works.
2655 */
2656static __inline int
2657mwl_anyhdrsize(const void *data)
2658{
2659	const struct ieee80211_frame *wh = data;
2660
2661	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2662		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2663		case IEEE80211_FC0_SUBTYPE_CTS:
2664		case IEEE80211_FC0_SUBTYPE_ACK:
2665			return sizeof(struct ieee80211_frame_ack);
2666		case IEEE80211_FC0_SUBTYPE_BAR:
2667			return sizeof(struct mwl_frame_bar);
2668		}
2669		return sizeof(struct ieee80211_frame_min);
2670	} else
2671		return ieee80211_hdrsize(data);
2672}
2673
2674static void
2675mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2676{
2677	const struct ieee80211_frame *wh;
2678	struct ieee80211_node *ni;
2679
2680	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2681	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2682	if (ni != NULL) {
2683		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2684		ieee80211_free_node(ni);
2685	}
2686}
2687
2688/*
2689 * Convert hardware signal strength to rssi.  The value
2690 * provided by the device has the noise floor added in;
2691 * we need to compensate for this but we don't have that
2692 * so we use a fixed value.
2693 *
2694 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2695 * offset is already set as part of the initial gain.  This
2696 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2697 */
2698static __inline int
2699cvtrssi(uint8_t ssi)
2700{
2701	int rssi = (int) ssi + 8;
2702	/* XXX hack guess until we have a real noise floor */
2703	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2704	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2705}
2706
2707static void
2708mwl_rx_proc(void *arg, int npending)
2709{
2710#define	IEEE80211_DIR_DSTODS(wh) \
2711	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2712	struct mwl_softc *sc = arg;
2713	struct ifnet *ifp = sc->sc_ifp;
2714	struct ieee80211com *ic = ifp->if_l2com;
2715	struct mwl_rxbuf *bf;
2716	struct mwl_rxdesc *ds;
2717	struct mbuf *m;
2718	struct ieee80211_qosframe *wh;
2719	struct ieee80211_qosframe_addr4 *wh4;
2720	struct ieee80211_node *ni;
2721	struct mwl_node *mn;
2722	int off, len, hdrlen, pktlen, rssi, ntodo;
2723	uint8_t *data, status;
2724	void *newdata;
2725	int16_t nf;
2726
2727	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2728	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2729	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2730	nf = -96;			/* XXX */
2731	bf = sc->sc_rxnext;
2732	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2733		if (bf == NULL)
2734			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2735		ds = bf->bf_desc;
2736		data = bf->bf_data;
2737		if (data == NULL) {
2738			/*
2739			 * If data allocation failed previously there
2740			 * will be no buffer; try again to re-populate it.
2741			 * Note the firmware will not advance to the next
2742			 * descriptor with a dma buffer so we must mimic
2743			 * this or we'll get out of sync.
2744			 */
2745			DPRINTF(sc, MWL_DEBUG_ANY,
2746			    "%s: rx buf w/o dma memory\n", __func__);
2747			(void) mwl_rxbuf_init(sc, bf);
2748			sc->sc_stats.mst_rx_dmabufmissing++;
2749			break;
2750		}
2751		MWL_RXDESC_SYNC(sc, ds,
2752		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2753		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2754			break;
2755#ifdef MWL_DEBUG
2756		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2757			mwl_printrxbuf(bf, 0);
2758#endif
2759		status = ds->Status;
2760		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2761			ifp->if_ierrors++;
2762			sc->sc_stats.mst_rx_crypto++;
2763			/*
2764			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2765			 *     for backwards compatibility.
2766			 */
2767			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2768			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2769				/*
2770				 * MIC error, notify upper layers.
2771				 */
2772				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2773				    BUS_DMASYNC_POSTREAD);
2774				mwl_handlemicerror(ic, data);
2775				sc->sc_stats.mst_rx_tkipmic++;
2776			}
2777			/* XXX too painful to tap packets */
2778			goto rx_next;
2779		}
2780		/*
2781		 * Sync the data buffer.
2782		 */
2783		len = le16toh(ds->PktLen);
2784		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2785		/*
2786		 * The 802.11 header is provided all or in part at the front;
2787		 * use it to calculate the true size of the header that we'll
2788		 * construct below.  We use this to figure out where to copy
2789		 * payload prior to constructing the header.
2790		 */
2791		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2792		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2793
2794		/* calculate rssi early so we can re-use for each aggregate */
2795		rssi = cvtrssi(ds->RSSI);
2796
2797		pktlen = hdrlen + (len - off);
2798		/*
2799		 * NB: we know our frame is at least as large as
2800		 * IEEE80211_MIN_LEN because there is a 4-address
2801		 * frame at the front.  Hence there's no need to
2802		 * vet the packet length.  If the frame in fact
2803		 * is too small it should be discarded at the
2804		 * net80211 layer.
2805		 */
2806
2807		/*
2808		 * Attach dma buffer to an mbuf.  We tried
2809		 * doing this based on the packet size (i.e.
2810		 * copying small packets) but it turns out to
2811		 * be a net loss.  The tradeoff might be system
2812		 * dependent (cache architecture is important).
2813		 */
2814		MGETHDR(m, M_NOWAIT, MT_DATA);
2815		if (m == NULL) {
2816			DPRINTF(sc, MWL_DEBUG_ANY,
2817			    "%s: no rx mbuf\n", __func__);
2818			sc->sc_stats.mst_rx_nombuf++;
2819			goto rx_next;
2820		}
2821		/*
2822		 * Acquire the replacement dma buffer before
2823		 * processing the frame.  If we're out of dma
2824		 * buffers we disable rx interrupts and wait
2825		 * for the free pool to reach mlw_rxdmalow buffers
2826		 * before starting to do work again.  If the firmware
2827		 * runs out of descriptors then it will toss frames
2828		 * which is better than our doing it as that can
2829		 * starve our processing.  It is also important that
2830		 * we always process rx'd frames in case they are
2831		 * A-MPDU as otherwise the host's view of the BA
2832		 * window may get out of sync with the firmware.
2833		 */
2834		newdata = mwl_getrxdma(sc);
2835		if (newdata == NULL) {
2836			/* NB: stat+msg in mwl_getrxdma */
2837			m_free(m);
2838			/* disable RX interrupt and mark state */
2839			mwl_hal_intrset(sc->sc_mh,
2840			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2841			sc->sc_rxblocked = 1;
2842			ieee80211_drain(ic);
2843			/* XXX check rxblocked and immediately start again? */
2844			goto rx_stop;
2845		}
2846		bf->bf_data = newdata;
2847		/*
2848		 * Attach the dma buffer to the mbuf;
2849		 * mwl_rxbuf_init will re-setup the rx
2850		 * descriptor using the replacement dma
2851		 * buffer we just installed above.
2852		 */
2853		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2854		    data, sc, 0, EXT_NET_DRV);
2855		m->m_data += off - hdrlen;
2856		m->m_pkthdr.len = m->m_len = pktlen;
2857		m->m_pkthdr.rcvif = ifp;
2858		/* NB: dma buffer assumed read-only */
2859
2860		/*
2861		 * Piece 802.11 header together.
2862		 */
2863		wh = mtod(m, struct ieee80211_qosframe *);
2864		/* NB: don't need to do this sometimes but ... */
2865		/* XXX special case so we can memcpy after m_devget? */
2866		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2867		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2868			if (IEEE80211_DIR_DSTODS(wh)) {
2869				wh4 = mtod(m,
2870				    struct ieee80211_qosframe_addr4*);
2871				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2872			} else {
2873				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2874			}
2875		}
2876		/*
2877		 * The f/w strips WEP header but doesn't clear
2878		 * the WEP bit; mark the packet with M_WEP so
2879		 * net80211 will treat the data as decrypted.
2880		 * While here also clear the PWR_MGT bit since
2881		 * power save is handled by the firmware and
2882		 * passing this up will potentially cause the
2883		 * upper layer to put a station in power save
2884		 * (except when configured with MWL_HOST_PS_SUPPORT).
2885		 */
2886		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2887			m->m_flags |= M_WEP;
2888#ifdef MWL_HOST_PS_SUPPORT
2889		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2890#else
2891		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2892#endif
2893
2894		if (ieee80211_radiotap_active(ic)) {
2895			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2896
2897			tap->wr_flags = 0;
2898			tap->wr_rate = ds->Rate;
2899			tap->wr_antsignal = rssi + nf;
2900			tap->wr_antnoise = nf;
2901		}
2902		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2903			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2904			    len, ds->Rate, rssi);
2905		}
2906		ifp->if_ipackets++;
2907
2908		/* dispatch */
2909		ni = ieee80211_find_rxnode(ic,
2910		    (const struct ieee80211_frame_min *) wh);
2911		if (ni != NULL) {
2912			mn = MWL_NODE(ni);
2913#ifdef MWL_ANT_INFO_SUPPORT
2914			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2915			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2916			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2917			mn->mn_ai.rsvd1 = rssi;
2918#endif
2919			/* tag AMPDU aggregates for reorder processing */
2920			if (ni->ni_flags & IEEE80211_NODE_HT)
2921				m->m_flags |= M_AMPDU;
2922			(void) ieee80211_input(ni, m, rssi, nf);
2923			ieee80211_free_node(ni);
2924		} else
2925			(void) ieee80211_input_all(ic, m, rssi, nf);
2926rx_next:
2927		/* NB: ignore ENOMEM so we process more descriptors */
2928		(void) mwl_rxbuf_init(sc, bf);
2929		bf = STAILQ_NEXT(bf, bf_list);
2930	}
2931rx_stop:
2932	sc->sc_rxnext = bf;
2933
2934	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2935	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2936		/* NB: kick fw; the tx thread may have been preempted */
2937		mwl_hal_txstart(sc->sc_mh, 0);
2938		mwl_start(ifp);
2939	}
2940#undef IEEE80211_DIR_DSTODS
2941}
2942
2943static void
2944mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2945{
2946	struct mwl_txbuf *bf, *bn;
2947	struct mwl_txdesc *ds;
2948
2949	MWL_TXQ_LOCK_INIT(sc, txq);
2950	txq->qnum = qnum;
2951	txq->txpri = 0;	/* XXX */
2952#if 0
2953	/* NB: q setup by mwl_txdma_setup XXX */
2954	STAILQ_INIT(&txq->free);
2955#endif
2956	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2957		bf->bf_txq = txq;
2958
2959		ds = bf->bf_desc;
2960		bn = STAILQ_NEXT(bf, bf_list);
2961		if (bn == NULL)
2962			bn = STAILQ_FIRST(&txq->free);
2963		ds->pPhysNext = htole32(bn->bf_daddr);
2964	}
2965	STAILQ_INIT(&txq->active);
2966}
2967
2968/*
2969 * Setup a hardware data transmit queue for the specified
2970 * access control.  We record the mapping from ac's
2971 * to h/w queues for use by mwl_tx_start.
2972 */
2973static int
2974mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2975{
2976#define	N(a)	(sizeof(a)/sizeof(a[0]))
2977	struct mwl_txq *txq;
2978
2979	if (ac >= N(sc->sc_ac2q)) {
2980		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2981			ac, N(sc->sc_ac2q));
2982		return 0;
2983	}
2984	if (mvtype >= MWL_NUM_TX_QUEUES) {
2985		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2986			mvtype, MWL_NUM_TX_QUEUES);
2987		return 0;
2988	}
2989	txq = &sc->sc_txq[mvtype];
2990	mwl_txq_init(sc, txq, mvtype);
2991	sc->sc_ac2q[ac] = txq;
2992	return 1;
2993#undef N
2994}
2995
2996/*
2997 * Update WME parameters for a transmit queue.
2998 */
2999static int
3000mwl_txq_update(struct mwl_softc *sc, int ac)
3001{
3002#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
3003	struct ifnet *ifp = sc->sc_ifp;
3004	struct ieee80211com *ic = ifp->if_l2com;
3005	struct mwl_txq *txq = sc->sc_ac2q[ac];
3006	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3007	struct mwl_hal *mh = sc->sc_mh;
3008	int aifs, cwmin, cwmax, txoplim;
3009
3010	aifs = wmep->wmep_aifsn;
3011	/* XXX in sta mode need to pass log values for cwmin/max */
3012	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3013	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3014	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3015
3016	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3017		device_printf(sc->sc_dev, "unable to update hardware queue "
3018			"parameters for %s traffic!\n",
3019			ieee80211_wme_acnames[ac]);
3020		return 0;
3021	}
3022	return 1;
3023#undef MWL_EXPONENT_TO_VALUE
3024}
3025
3026/*
3027 * Callback from the 802.11 layer to update WME parameters.
3028 */
3029static int
3030mwl_wme_update(struct ieee80211com *ic)
3031{
3032	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3033
3034	return !mwl_txq_update(sc, WME_AC_BE) ||
3035	    !mwl_txq_update(sc, WME_AC_BK) ||
3036	    !mwl_txq_update(sc, WME_AC_VI) ||
3037	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3038}
3039
3040/*
3041 * Reclaim resources for a setup queue.
3042 */
3043static void
3044mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3045{
3046	/* XXX hal work? */
3047	MWL_TXQ_LOCK_DESTROY(txq);
3048}
3049
3050/*
3051 * Reclaim all tx queue resources.
3052 */
3053static void
3054mwl_tx_cleanup(struct mwl_softc *sc)
3055{
3056	int i;
3057
3058	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3059		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3060}
3061
3062static int
3063mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3064{
3065	struct mbuf *m;
3066	int error;
3067
3068	/*
3069	 * Load the DMA map so any coalescing is done.  This
3070	 * also calculates the number of descriptors we need.
3071	 */
3072	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3073				     bf->bf_segs, &bf->bf_nseg,
3074				     BUS_DMA_NOWAIT);
3075	if (error == EFBIG) {
3076		/* XXX packet requires too many descriptors */
3077		bf->bf_nseg = MWL_TXDESC+1;
3078	} else if (error != 0) {
3079		sc->sc_stats.mst_tx_busdma++;
3080		m_freem(m0);
3081		return error;
3082	}
3083	/*
3084	 * Discard null packets and check for packets that
3085	 * require too many TX descriptors.  We try to convert
3086	 * the latter to a cluster.
3087	 */
3088	if (error == EFBIG) {		/* too many desc's, linearize */
3089		sc->sc_stats.mst_tx_linear++;
3090#if MWL_TXDESC > 1
3091		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
3092#else
3093		m = m_defrag(m0, M_NOWAIT);
3094#endif
3095		if (m == NULL) {
3096			m_freem(m0);
3097			sc->sc_stats.mst_tx_nombuf++;
3098			return ENOMEM;
3099		}
3100		m0 = m;
3101		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3102					     bf->bf_segs, &bf->bf_nseg,
3103					     BUS_DMA_NOWAIT);
3104		if (error != 0) {
3105			sc->sc_stats.mst_tx_busdma++;
3106			m_freem(m0);
3107			return error;
3108		}
3109		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3110		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3111	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3112		sc->sc_stats.mst_tx_nodata++;
3113		m_freem(m0);
3114		return EIO;
3115	}
3116	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3117		__func__, m0, m0->m_pkthdr.len);
3118	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3119	bf->bf_m = m0;
3120
3121	return 0;
3122}
3123
3124static __inline int
3125mwl_cvtlegacyrate(int rate)
3126{
3127	switch (rate) {
3128	case 2:	 return 0;
3129	case 4:	 return 1;
3130	case 11: return 2;
3131	case 22: return 3;
3132	case 44: return 4;
3133	case 12: return 5;
3134	case 18: return 6;
3135	case 24: return 7;
3136	case 36: return 8;
3137	case 48: return 9;
3138	case 72: return 10;
3139	case 96: return 11;
3140	case 108:return 12;
3141	}
3142	return 0;
3143}
3144
3145/*
3146 * Calculate fixed tx rate information per client state;
3147 * this value is suitable for writing to the Format field
3148 * of a tx descriptor.
3149 */
3150static uint16_t
3151mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3152{
3153	uint16_t fmt;
3154
3155	fmt = SM(3, EAGLE_TXD_ANTENNA)
3156	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3157		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3158	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3159		fmt |= EAGLE_TXD_FORMAT_HT
3160		    /* NB: 0x80 implicitly stripped from ucastrate */
3161		    | SM(rate, EAGLE_TXD_RATE);
3162		/* XXX short/long GI may be wrong; re-check */
3163		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3164			fmt |= EAGLE_TXD_CHW_40
3165			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3166			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3167		} else {
3168			fmt |= EAGLE_TXD_CHW_20
3169			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3170			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3171		}
3172	} else {			/* legacy rate */
3173		fmt |= EAGLE_TXD_FORMAT_LEGACY
3174		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3175		    | EAGLE_TXD_CHW_20
3176		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3177		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3178			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3179	}
3180	return fmt;
3181}
3182
3183static int
3184mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3185    struct mbuf *m0)
3186{
3187#define	IEEE80211_DIR_DSTODS(wh) \
3188	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3189	struct ifnet *ifp = sc->sc_ifp;
3190	struct ieee80211com *ic = ifp->if_l2com;
3191	struct ieee80211vap *vap = ni->ni_vap;
3192	int error, iswep, ismcast;
3193	int hdrlen, copyhdrlen, pktlen;
3194	struct mwl_txdesc *ds;
3195	struct mwl_txq *txq;
3196	struct ieee80211_frame *wh;
3197	struct mwltxrec *tr;
3198	struct mwl_node *mn;
3199	uint16_t qos;
3200#if MWL_TXDESC > 1
3201	int i;
3202#endif
3203
3204	wh = mtod(m0, struct ieee80211_frame *);
3205	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3206	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3207	hdrlen = ieee80211_anyhdrsize(wh);
3208	copyhdrlen = hdrlen;
3209	pktlen = m0->m_pkthdr.len;
3210	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3211		if (IEEE80211_DIR_DSTODS(wh)) {
3212			qos = *(uint16_t *)
3213			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3214			copyhdrlen -= sizeof(qos);
3215		} else
3216			qos = *(uint16_t *)
3217			    (((struct ieee80211_qosframe *) wh)->i_qos);
3218	} else
3219		qos = 0;
3220
3221	if (iswep) {
3222		const struct ieee80211_cipher *cip;
3223		struct ieee80211_key *k;
3224
3225		/*
3226		 * Construct the 802.11 header+trailer for an encrypted
3227		 * frame. The only reason this can fail is because of an
3228		 * unknown or unsupported cipher/key type.
3229		 *
3230		 * NB: we do this even though the firmware will ignore
3231		 *     what we've done for WEP and TKIP as we need the
3232		 *     ExtIV filled in for CCMP and this also adjusts
3233		 *     the headers which simplifies our work below.
3234		 */
3235		k = ieee80211_crypto_encap(ni, m0);
3236		if (k == NULL) {
3237			/*
3238			 * This can happen when the key is yanked after the
3239			 * frame was queued.  Just discard the frame; the
3240			 * 802.11 layer counts failures and provides
3241			 * debugging/diagnostics.
3242			 */
3243			m_freem(m0);
3244			return EIO;
3245		}
3246		/*
3247		 * Adjust the packet length for the crypto additions
3248		 * done during encap and any other bits that the f/w
3249		 * will add later on.
3250		 */
3251		cip = k->wk_cipher;
3252		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3253
3254		/* packet header may have moved, reset our local pointer */
3255		wh = mtod(m0, struct ieee80211_frame *);
3256	}
3257
3258	if (ieee80211_radiotap_active_vap(vap)) {
3259		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3260		if (iswep)
3261			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3262#if 0
3263		sc->sc_tx_th.wt_rate = ds->DataRate;
3264#endif
3265		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3266		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3267
3268		ieee80211_radiotap_tx(vap, m0);
3269	}
3270	/*
3271	 * Copy up/down the 802.11 header; the firmware requires
3272	 * we present a 2-byte payload length followed by a
3273	 * 4-address header (w/o QoS), followed (optionally) by
3274	 * any WEP/ExtIV header (but only filled in for CCMP).
3275	 * We are assured the mbuf has sufficient headroom to
3276	 * prepend in-place by the setup of ic_headroom in
3277	 * mwl_attach.
3278	 */
3279	if (hdrlen < sizeof(struct mwltxrec)) {
3280		const int space = sizeof(struct mwltxrec) - hdrlen;
3281		if (M_LEADINGSPACE(m0) < space) {
3282			/* NB: should never happen */
3283			device_printf(sc->sc_dev,
3284			    "not enough headroom, need %d found %zd, "
3285			    "m_flags 0x%x m_len %d\n",
3286			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3287			ieee80211_dump_pkt(ic,
3288			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3289			m_freem(m0);
3290			sc->sc_stats.mst_tx_noheadroom++;
3291			return EIO;
3292		}
3293		M_PREPEND(m0, space, M_NOWAIT);
3294	}
3295	tr = mtod(m0, struct mwltxrec *);
3296	if (wh != (struct ieee80211_frame *) &tr->wh)
3297		ovbcopy(wh, &tr->wh, hdrlen);
3298	/*
3299	 * Note: the "firmware length" is actually the length
3300	 * of the fully formed "802.11 payload".  That is, it's
3301	 * everything except for the 802.11 header.  In particular
3302	 * this includes all crypto material including the MIC!
3303	 */
3304	tr->fwlen = htole16(pktlen - hdrlen);
3305
3306	/*
3307	 * Load the DMA map so any coalescing is done.  This
3308	 * also calculates the number of descriptors we need.
3309	 */
3310	error = mwl_tx_dmasetup(sc, bf, m0);
3311	if (error != 0) {
3312		/* NB: stat collected in mwl_tx_dmasetup */
3313		DPRINTF(sc, MWL_DEBUG_XMIT,
3314		    "%s: unable to setup dma\n", __func__);
3315		return error;
3316	}
3317	bf->bf_node = ni;			/* NB: held reference */
3318	m0 = bf->bf_m;				/* NB: may have changed */
3319	tr = mtod(m0, struct mwltxrec *);
3320	wh = (struct ieee80211_frame *)&tr->wh;
3321
3322	/*
3323	 * Formulate tx descriptor.
3324	 */
3325	ds = bf->bf_desc;
3326	txq = bf->bf_txq;
3327
3328	ds->QosCtrl = qos;			/* NB: already little-endian */
3329#if MWL_TXDESC == 1
3330	/*
3331	 * NB: multiframes should be zero because the descriptors
3332	 *     are initialized to zero.  This should handle the case
3333	 *     where the driver is built with MWL_TXDESC=1 but we are
3334	 *     using firmware with multi-segment support.
3335	 */
3336	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3337	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3338#else
3339	ds->multiframes = htole32(bf->bf_nseg);
3340	ds->PktLen = htole16(m0->m_pkthdr.len);
3341	for (i = 0; i < bf->bf_nseg; i++) {
3342		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3343		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3344	}
3345#endif
3346	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3347	ds->Format = 0;
3348	ds->pad = 0;
3349	ds->ack_wcb_addr = 0;
3350
3351	mn = MWL_NODE(ni);
3352	/*
3353	 * Select transmit rate.
3354	 */
3355	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3356	case IEEE80211_FC0_TYPE_MGT:
3357		sc->sc_stats.mst_tx_mgmt++;
3358		/* fall thru... */
3359	case IEEE80211_FC0_TYPE_CTL:
3360		/* NB: assign to BE q to avoid bursting */
3361		ds->TxPriority = MWL_WME_AC_BE;
3362		break;
3363	case IEEE80211_FC0_TYPE_DATA:
3364		if (!ismcast) {
3365			const struct ieee80211_txparam *tp = ni->ni_txparms;
3366			/*
3367			 * EAPOL frames get forced to a fixed rate and w/o
3368			 * aggregation; otherwise check for any fixed rate
3369			 * for the client (may depend on association state).
3370			 */
3371			if (m0->m_flags & M_EAPOL) {
3372				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3373				ds->Format = mvp->mv_eapolformat;
3374				ds->pad = htole16(
3375				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3376			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3377				/* XXX pre-calculate per node */
3378				ds->Format = htole16(
3379				    mwl_calcformat(tp->ucastrate, ni));
3380				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3381			}
3382			/* NB: EAPOL frames will never have qos set */
3383			if (qos == 0)
3384				ds->TxPriority = txq->qnum;
3385#if MWL_MAXBA > 3
3386			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3387				ds->TxPriority = mn->mn_ba[3].txq;
3388#endif
3389#if MWL_MAXBA > 2
3390			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3391				ds->TxPriority = mn->mn_ba[2].txq;
3392#endif
3393#if MWL_MAXBA > 1
3394			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3395				ds->TxPriority = mn->mn_ba[1].txq;
3396#endif
3397#if MWL_MAXBA > 0
3398			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3399				ds->TxPriority = mn->mn_ba[0].txq;
3400#endif
3401			else
3402				ds->TxPriority = txq->qnum;
3403		} else
3404			ds->TxPriority = txq->qnum;
3405		break;
3406	default:
3407		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3408			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3409		sc->sc_stats.mst_tx_badframetype++;
3410		m_freem(m0);
3411		return EIO;
3412	}
3413
3414	if (IFF_DUMPPKTS_XMIT(sc))
3415		ieee80211_dump_pkt(ic,
3416		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3417		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3418
3419	MWL_TXQ_LOCK(txq);
3420	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3421	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3422	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3423
3424	ifp->if_opackets++;
3425	sc->sc_tx_timer = 5;
3426	MWL_TXQ_UNLOCK(txq);
3427
3428	return 0;
3429#undef	IEEE80211_DIR_DSTODS
3430}
3431
3432static __inline int
3433mwl_cvtlegacyrix(int rix)
3434{
3435#define	N(x)	(sizeof(x)/sizeof(x[0]))
3436	static const int ieeerates[] =
3437	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3438	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3439#undef N
3440}
3441
3442/*
3443 * Process completed xmit descriptors from the specified queue.
3444 */
3445static int
3446mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3447{
3448#define	EAGLE_TXD_STATUS_MCAST \
3449	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3450	struct ifnet *ifp = sc->sc_ifp;
3451	struct ieee80211com *ic = ifp->if_l2com;
3452	struct mwl_txbuf *bf;
3453	struct mwl_txdesc *ds;
3454	struct ieee80211_node *ni;
3455	struct mwl_node *an;
3456	int nreaped;
3457	uint32_t status;
3458
3459	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3460	for (nreaped = 0;; nreaped++) {
3461		MWL_TXQ_LOCK(txq);
3462		bf = STAILQ_FIRST(&txq->active);
3463		if (bf == NULL) {
3464			MWL_TXQ_UNLOCK(txq);
3465			break;
3466		}
3467		ds = bf->bf_desc;
3468		MWL_TXDESC_SYNC(txq, ds,
3469		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3470		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3471			MWL_TXQ_UNLOCK(txq);
3472			break;
3473		}
3474		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3475		MWL_TXQ_UNLOCK(txq);
3476
3477#ifdef MWL_DEBUG
3478		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3479			mwl_printtxbuf(bf, txq->qnum, nreaped);
3480#endif
3481		ni = bf->bf_node;
3482		if (ni != NULL) {
3483			an = MWL_NODE(ni);
3484			status = le32toh(ds->Status);
3485			if (status & EAGLE_TXD_STATUS_OK) {
3486				uint16_t Format = le16toh(ds->Format);
3487				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3488
3489				sc->sc_stats.mst_ant_tx[txant]++;
3490				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3491					sc->sc_stats.mst_tx_retries++;
3492				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3493					sc->sc_stats.mst_tx_mretries++;
3494				if (txq->qnum >= MWL_WME_AC_VO)
3495					ic->ic_wme.wme_hipri_traffic++;
3496				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3497				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3498					ni->ni_txrate = mwl_cvtlegacyrix(
3499					    ni->ni_txrate);
3500				} else
3501					ni->ni_txrate |= IEEE80211_RATE_MCS;
3502				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3503			} else {
3504				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3505					sc->sc_stats.mst_tx_linkerror++;
3506				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3507					sc->sc_stats.mst_tx_xretries++;
3508				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3509					sc->sc_stats.mst_tx_aging++;
3510				if (bf->bf_m->m_flags & M_FF)
3511					sc->sc_stats.mst_ff_txerr++;
3512			}
3513			/*
3514			 * Do any tx complete callback.  Note this must
3515			 * be done before releasing the node reference.
3516			 * XXX no way to figure out if frame was ACK'd
3517			 */
3518			if (bf->bf_m->m_flags & M_TXCB) {
3519				/* XXX strip fw len in case header inspected */
3520				m_adj(bf->bf_m, sizeof(uint16_t));
3521				ieee80211_process_callback(ni, bf->bf_m,
3522					(status & EAGLE_TXD_STATUS_OK) == 0);
3523			}
3524			/*
3525			 * Reclaim reference to node.
3526			 *
3527			 * NB: the node may be reclaimed here if, for example
3528			 *     this is a DEAUTH message that was sent and the
3529			 *     node was timed out due to inactivity.
3530			 */
3531			ieee80211_free_node(ni);
3532		}
3533		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3534
3535		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3536		    BUS_DMASYNC_POSTWRITE);
3537		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3538		m_freem(bf->bf_m);
3539
3540		mwl_puttxbuf_tail(txq, bf);
3541	}
3542	return nreaped;
3543#undef EAGLE_TXD_STATUS_MCAST
3544}
3545
3546/*
3547 * Deferred processing of transmit interrupt; special-cased
3548 * for four hardware queues, 0-3.
3549 */
3550static void
3551mwl_tx_proc(void *arg, int npending)
3552{
3553	struct mwl_softc *sc = arg;
3554	struct ifnet *ifp = sc->sc_ifp;
3555	int nreaped;
3556
3557	/*
3558	 * Process each active queue.
3559	 */
3560	nreaped = 0;
3561	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3562		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3563	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3564		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3565	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3566		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3567	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3568		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3569
3570	if (nreaped != 0) {
3571		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3572		sc->sc_tx_timer = 0;
3573		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3574			/* NB: kick fw; the tx thread may have been preempted */
3575			mwl_hal_txstart(sc->sc_mh, 0);
3576			mwl_start(ifp);
3577		}
3578	}
3579}
3580
3581static void
3582mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3583{
3584	struct ieee80211_node *ni;
3585	struct mwl_txbuf *bf;
3586	u_int ix;
3587
3588	/*
3589	 * NB: this assumes output has been stopped and
3590	 *     we do not need to block mwl_tx_tasklet
3591	 */
3592	for (ix = 0;; ix++) {
3593		MWL_TXQ_LOCK(txq);
3594		bf = STAILQ_FIRST(&txq->active);
3595		if (bf == NULL) {
3596			MWL_TXQ_UNLOCK(txq);
3597			break;
3598		}
3599		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3600		MWL_TXQ_UNLOCK(txq);
3601#ifdef MWL_DEBUG
3602		if (sc->sc_debug & MWL_DEBUG_RESET) {
3603			struct ifnet *ifp = sc->sc_ifp;
3604			struct ieee80211com *ic = ifp->if_l2com;
3605			const struct mwltxrec *tr =
3606			    mtod(bf->bf_m, const struct mwltxrec *);
3607			mwl_printtxbuf(bf, txq->qnum, ix);
3608			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3609				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3610		}
3611#endif /* MWL_DEBUG */
3612		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3613		ni = bf->bf_node;
3614		if (ni != NULL) {
3615			/*
3616			 * Reclaim node reference.
3617			 */
3618			ieee80211_free_node(ni);
3619		}
3620		m_freem(bf->bf_m);
3621
3622		mwl_puttxbuf_tail(txq, bf);
3623	}
3624}
3625
3626/*
3627 * Drain the transmit queues and reclaim resources.
3628 */
3629static void
3630mwl_draintxq(struct mwl_softc *sc)
3631{
3632	struct ifnet *ifp = sc->sc_ifp;
3633	int i;
3634
3635	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3636		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3637	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3638	sc->sc_tx_timer = 0;
3639}
3640
3641#ifdef MWL_DIAGAPI
3642/*
3643 * Reset the transmit queues to a pristine state after a fw download.
3644 */
3645static void
3646mwl_resettxq(struct mwl_softc *sc)
3647{
3648	int i;
3649
3650	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3651		mwl_txq_reset(sc, &sc->sc_txq[i]);
3652}
3653#endif /* MWL_DIAGAPI */
3654
3655/*
3656 * Clear the transmit queues of any frames submitted for the
3657 * specified vap.  This is done when the vap is deleted so we
3658 * don't potentially reference the vap after it is gone.
3659 * Note we cannot remove the frames; we only reclaim the node
3660 * reference.
3661 */
3662static void
3663mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3664{
3665	struct mwl_txq *txq;
3666	struct mwl_txbuf *bf;
3667	int i;
3668
3669	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3670		txq = &sc->sc_txq[i];
3671		MWL_TXQ_LOCK(txq);
3672		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3673			struct ieee80211_node *ni = bf->bf_node;
3674			if (ni != NULL && ni->ni_vap == vap) {
3675				bf->bf_node = NULL;
3676				ieee80211_free_node(ni);
3677			}
3678		}
3679		MWL_TXQ_UNLOCK(txq);
3680	}
3681}
3682
3683static int
3684mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3685	const uint8_t *frm, const uint8_t *efrm)
3686{
3687	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3688	const struct ieee80211_action *ia;
3689
3690	ia = (const struct ieee80211_action *) frm;
3691	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3692	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3693		const struct ieee80211_action_ht_mimopowersave *mps =
3694		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3695
3696		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3697		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3698		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3699		return 0;
3700	} else
3701		return sc->sc_recv_action(ni, wh, frm, efrm);
3702}
3703
3704static int
3705mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3706	int dialogtoken, int baparamset, int batimeout)
3707{
3708	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3709	struct ieee80211vap *vap = ni->ni_vap;
3710	struct mwl_node *mn = MWL_NODE(ni);
3711	struct mwl_bastate *bas;
3712
3713	bas = tap->txa_private;
3714	if (bas == NULL) {
3715		const MWL_HAL_BASTREAM *sp;
3716		/*
3717		 * Check for a free BA stream slot.
3718		 */
3719#if MWL_MAXBA > 3
3720		if (mn->mn_ba[3].bastream == NULL)
3721			bas = &mn->mn_ba[3];
3722		else
3723#endif
3724#if MWL_MAXBA > 2
3725		if (mn->mn_ba[2].bastream == NULL)
3726			bas = &mn->mn_ba[2];
3727		else
3728#endif
3729#if MWL_MAXBA > 1
3730		if (mn->mn_ba[1].bastream == NULL)
3731			bas = &mn->mn_ba[1];
3732		else
3733#endif
3734#if MWL_MAXBA > 0
3735		if (mn->mn_ba[0].bastream == NULL)
3736			bas = &mn->mn_ba[0];
3737		else
3738#endif
3739		{
3740			/* sta already has max BA streams */
3741			/* XXX assign BA stream to highest priority tid */
3742			DPRINTF(sc, MWL_DEBUG_AMPDU,
3743			    "%s: already has max bastreams\n", __func__);
3744			sc->sc_stats.mst_ampdu_reject++;
3745			return 0;
3746		}
3747		/* NB: no held reference to ni */
3748		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3749		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3750		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3751		    ni, tap);
3752		if (sp == NULL) {
3753			/*
3754			 * No available stream, return 0 so no
3755			 * a-mpdu aggregation will be done.
3756			 */
3757			DPRINTF(sc, MWL_DEBUG_AMPDU,
3758			    "%s: no bastream available\n", __func__);
3759			sc->sc_stats.mst_ampdu_nostream++;
3760			return 0;
3761		}
3762		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3763		    __func__, sp);
3764		/* NB: qos is left zero so we won't match in mwl_tx_start */
3765		bas->bastream = sp;
3766		tap->txa_private = bas;
3767	}
3768	/* fetch current seq# from the firmware; if available */
3769	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3770	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3771	    &tap->txa_start) != 0)
3772		tap->txa_start = 0;
3773	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3774}
3775
3776static int
3777mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3778	int code, int baparamset, int batimeout)
3779{
3780	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3781	struct mwl_bastate *bas;
3782
3783	bas = tap->txa_private;
3784	if (bas == NULL) {
3785		/* XXX should not happen */
3786		DPRINTF(sc, MWL_DEBUG_AMPDU,
3787		    "%s: no BA stream allocated, TID %d\n",
3788		    __func__, tap->txa_tid);
3789		sc->sc_stats.mst_addba_nostream++;
3790		return 0;
3791	}
3792	if (code == IEEE80211_STATUS_SUCCESS) {
3793		struct ieee80211vap *vap = ni->ni_vap;
3794		int bufsiz, error;
3795
3796		/*
3797		 * Tell the firmware to setup the BA stream;
3798		 * we know resources are available because we
3799		 * pre-allocated one before forming the request.
3800		 */
3801		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3802		if (bufsiz == 0)
3803			bufsiz = IEEE80211_AGGR_BAWMAX;
3804		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3805		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3806		if (error != 0) {
3807			/*
3808			 * Setup failed, return immediately so no a-mpdu
3809			 * aggregation will be done.
3810			 */
3811			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3812			mwl_bastream_free(bas);
3813			tap->txa_private = NULL;
3814
3815			DPRINTF(sc, MWL_DEBUG_AMPDU,
3816			    "%s: create failed, error %d, bufsiz %d TID %d "
3817			    "htparam 0x%x\n", __func__, error, bufsiz,
3818			    tap->txa_tid, ni->ni_htparam);
3819			sc->sc_stats.mst_bacreate_failed++;
3820			return 0;
3821		}
3822		/* NB: cache txq to avoid ptr indirect */
3823		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3824		DPRINTF(sc, MWL_DEBUG_AMPDU,
3825		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3826		    "htparam 0x%x\n", __func__, bas->bastream,
3827		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3828	} else {
3829		/*
3830		 * Other side NAK'd us; return the resources.
3831		 */
3832		DPRINTF(sc, MWL_DEBUG_AMPDU,
3833		    "%s: request failed with code %d, destroy bastream %p\n",
3834		    __func__, code, bas->bastream);
3835		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3836		mwl_bastream_free(bas);
3837		tap->txa_private = NULL;
3838	}
3839	/* NB: firmware sends BAR so we don't need to */
3840	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3841}
3842
3843static void
3844mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3845{
3846	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3847	struct mwl_bastate *bas;
3848
3849	bas = tap->txa_private;
3850	if (bas != NULL) {
3851		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3852		    __func__, bas->bastream);
3853		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3854		mwl_bastream_free(bas);
3855		tap->txa_private = NULL;
3856	}
3857	sc->sc_addba_stop(ni, tap);
3858}
3859
3860/*
3861 * Setup the rx data structures.  This should only be
3862 * done once or we may get out of sync with the firmware.
3863 */
3864static int
3865mwl_startrecv(struct mwl_softc *sc)
3866{
3867	if (!sc->sc_recvsetup) {
3868		struct mwl_rxbuf *bf, *prev;
3869		struct mwl_rxdesc *ds;
3870
3871		prev = NULL;
3872		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3873			int error = mwl_rxbuf_init(sc, bf);
3874			if (error != 0) {
3875				DPRINTF(sc, MWL_DEBUG_RECV,
3876					"%s: mwl_rxbuf_init failed %d\n",
3877					__func__, error);
3878				return error;
3879			}
3880			if (prev != NULL) {
3881				ds = prev->bf_desc;
3882				ds->pPhysNext = htole32(bf->bf_daddr);
3883			}
3884			prev = bf;
3885		}
3886		if (prev != NULL) {
3887			ds = prev->bf_desc;
3888			ds->pPhysNext =
3889			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3890		}
3891		sc->sc_recvsetup = 1;
3892	}
3893	mwl_mode_init(sc);		/* set filters, etc. */
3894	return 0;
3895}
3896
3897static MWL_HAL_APMODE
3898mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3899{
3900	MWL_HAL_APMODE mode;
3901
3902	if (IEEE80211_IS_CHAN_HT(chan)) {
3903		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3904			mode = AP_MODE_N_ONLY;
3905		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3906			mode = AP_MODE_AandN;
3907		else if (vap->iv_flags & IEEE80211_F_PUREG)
3908			mode = AP_MODE_GandN;
3909		else
3910			mode = AP_MODE_BandGandN;
3911	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3912		if (vap->iv_flags & IEEE80211_F_PUREG)
3913			mode = AP_MODE_G_ONLY;
3914		else
3915			mode = AP_MODE_MIXED;
3916	} else if (IEEE80211_IS_CHAN_B(chan))
3917		mode = AP_MODE_B_ONLY;
3918	else if (IEEE80211_IS_CHAN_A(chan))
3919		mode = AP_MODE_A_ONLY;
3920	else
3921		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3922	return mode;
3923}
3924
3925static int
3926mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3927{
3928	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3929	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3930}
3931
3932/*
3933 * Set/change channels.
3934 */
3935static int
3936mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3937{
3938	struct mwl_hal *mh = sc->sc_mh;
3939	struct ifnet *ifp = sc->sc_ifp;
3940	struct ieee80211com *ic = ifp->if_l2com;
3941	MWL_HAL_CHANNEL hchan;
3942	int maxtxpow;
3943
3944	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3945	    __func__, chan->ic_freq, chan->ic_flags);
3946
3947	/*
3948	 * Convert to a HAL channel description with
3949	 * the flags constrained to reflect the current
3950	 * operating mode.
3951	 */
3952	mwl_mapchan(&hchan, chan);
3953	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3954#if 0
3955	mwl_draintxq(sc);		/* clear pending tx frames */
3956#endif
3957	mwl_hal_setchannel(mh, &hchan);
3958	/*
3959	 * Tx power is cap'd by the regulatory setting and
3960	 * possibly a user-set limit.  We pass the min of
3961	 * these to the hal to apply them to the cal data
3962	 * for this channel.
3963	 * XXX min bound?
3964	 */
3965	maxtxpow = 2*chan->ic_maxregpower;
3966	if (maxtxpow > ic->ic_txpowlimit)
3967		maxtxpow = ic->ic_txpowlimit;
3968	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3969	/* NB: potentially change mcast/mgt rates */
3970	mwl_setcurchanrates(sc);
3971
3972	/*
3973	 * Update internal state.
3974	 */
3975	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3976	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3977	if (IEEE80211_IS_CHAN_A(chan)) {
3978		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3979		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3980	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3981		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3982		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3983	} else {
3984		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3985		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3986	}
3987	sc->sc_curchan = hchan;
3988	mwl_hal_intrset(mh, sc->sc_imask);
3989
3990	return 0;
3991}
3992
3993static void
3994mwl_scan_start(struct ieee80211com *ic)
3995{
3996	struct ifnet *ifp = ic->ic_ifp;
3997	struct mwl_softc *sc = ifp->if_softc;
3998
3999	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4000}
4001
4002static void
4003mwl_scan_end(struct ieee80211com *ic)
4004{
4005	struct ifnet *ifp = ic->ic_ifp;
4006	struct mwl_softc *sc = ifp->if_softc;
4007
4008	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4009}
4010
4011static void
4012mwl_set_channel(struct ieee80211com *ic)
4013{
4014	struct ifnet *ifp = ic->ic_ifp;
4015	struct mwl_softc *sc = ifp->if_softc;
4016
4017	(void) mwl_chan_set(sc, ic->ic_curchan);
4018}
4019
4020/*
4021 * Handle a channel switch request.  We inform the firmware
4022 * and mark the global state to suppress various actions.
4023 * NB: we issue only one request to the fw; we may be called
4024 * multiple times if there are multiple vap's.
4025 */
4026static void
4027mwl_startcsa(struct ieee80211vap *vap)
4028{
4029	struct ieee80211com *ic = vap->iv_ic;
4030	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4031	MWL_HAL_CHANNEL hchan;
4032
4033	if (sc->sc_csapending)
4034		return;
4035
4036	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4037	/* 1 =>'s quiet channel */
4038	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4039	sc->sc_csapending = 1;
4040}
4041
4042/*
4043 * Plumb any static WEP key for the station.  This is
4044 * necessary as we must propagate the key from the
4045 * global key table of the vap to each sta db entry.
4046 */
4047static void
4048mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4049{
4050	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4051		IEEE80211_F_PRIVACY &&
4052	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4053	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4054		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4055}
4056
4057static int
4058mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4059{
4060#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4061	struct ieee80211vap *vap = ni->ni_vap;
4062	struct mwl_hal_vap *hvap;
4063	int error;
4064
4065	if (vap->iv_opmode == IEEE80211_M_WDS) {
4066		/*
4067		 * WDS vap's do not have a f/w vap; instead they piggyback
4068		 * on an AP vap and we must install the sta db entry and
4069		 * crypto state using that AP's handle (the WDS vap has none).
4070		 */
4071		hvap = MWL_VAP(vap)->mv_ap_hvap;
4072	} else
4073		hvap = MWL_VAP(vap)->mv_hvap;
4074	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4075	    aid, staid, pi,
4076	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4077	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4078	if (error == 0) {
4079		/*
4080		 * Setup security for this station.  For sta mode this is
4081		 * needed even though do the same thing on transition to
4082		 * AUTH state because the call to mwl_hal_newstation
4083		 * clobbers the crypto state we setup.
4084		 */
4085		mwl_setanywepkey(vap, ni->ni_macaddr);
4086	}
4087	return error;
4088#undef WME
4089}
4090
4091static void
4092mwl_setglobalkeys(struct ieee80211vap *vap)
4093{
4094	struct ieee80211_key *wk;
4095
4096	wk = &vap->iv_nw_keys[0];
4097	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4098		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4099			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4100}
4101
4102/*
4103 * Convert a legacy rate set to a firmware bitmask.
4104 */
4105static uint32_t
4106get_rate_bitmap(const struct ieee80211_rateset *rs)
4107{
4108	uint32_t rates;
4109	int i;
4110
4111	rates = 0;
4112	for (i = 0; i < rs->rs_nrates; i++)
4113		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4114		case 2:	  rates |= 0x001; break;
4115		case 4:	  rates |= 0x002; break;
4116		case 11:  rates |= 0x004; break;
4117		case 22:  rates |= 0x008; break;
4118		case 44:  rates |= 0x010; break;
4119		case 12:  rates |= 0x020; break;
4120		case 18:  rates |= 0x040; break;
4121		case 24:  rates |= 0x080; break;
4122		case 36:  rates |= 0x100; break;
4123		case 48:  rates |= 0x200; break;
4124		case 72:  rates |= 0x400; break;
4125		case 96:  rates |= 0x800; break;
4126		case 108: rates |= 0x1000; break;
4127		}
4128	return rates;
4129}
4130
4131/*
4132 * Construct an HT firmware bitmask from an HT rate set.
4133 */
4134static uint32_t
4135get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4136{
4137	uint32_t rates;
4138	int i;
4139
4140	rates = 0;
4141	for (i = 0; i < rs->rs_nrates; i++) {
4142		if (rs->rs_rates[i] < 16)
4143			rates |= 1<<rs->rs_rates[i];
4144	}
4145	return rates;
4146}
4147
4148/*
4149 * Craft station database entry for station.
4150 * NB: use host byte order here, the hal handles byte swapping.
4151 */
4152static MWL_HAL_PEERINFO *
4153mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4154{
4155	const struct ieee80211vap *vap = ni->ni_vap;
4156
4157	memset(pi, 0, sizeof(*pi));
4158	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4159	pi->CapInfo = ni->ni_capinfo;
4160	if (ni->ni_flags & IEEE80211_NODE_HT) {
4161		/* HT capabilities, etc */
4162		pi->HTCapabilitiesInfo = ni->ni_htcap;
4163		/* XXX pi.HTCapabilitiesInfo */
4164	        pi->MacHTParamInfo = ni->ni_htparam;
4165		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4166		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4167		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4168		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4169		pi->AddHtInfo.stbc = ni->ni_htstbc;
4170
4171		/* constrain according to local configuration */
4172		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4173			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4174		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4175			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4176		if (ni->ni_chw != 40)
4177			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4178	}
4179	return pi;
4180}
4181
4182/*
4183 * Re-create the local sta db entry for a vap to ensure
4184 * up to date WME state is pushed to the firmware.  Because
4185 * this resets crypto state this must be followed by a
4186 * reload of any keys in the global key table.
4187 */
4188static int
4189mwl_localstadb(struct ieee80211vap *vap)
4190{
4191#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4192	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4193	struct ieee80211_node *bss;
4194	MWL_HAL_PEERINFO pi;
4195	int error;
4196
4197	switch (vap->iv_opmode) {
4198	case IEEE80211_M_STA:
4199		bss = vap->iv_bss;
4200		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4201		    vap->iv_state == IEEE80211_S_RUN ?
4202			mkpeerinfo(&pi, bss) : NULL,
4203		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4204		    bss->ni_ies.wme_ie != NULL ?
4205			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4206		if (error == 0)
4207			mwl_setglobalkeys(vap);
4208		break;
4209	case IEEE80211_M_HOSTAP:
4210	case IEEE80211_M_MBSS:
4211		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4212		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4213		if (error == 0)
4214			mwl_setglobalkeys(vap);
4215		break;
4216	default:
4217		error = 0;
4218		break;
4219	}
4220	return error;
4221#undef WME
4222}
4223
4224static int
4225mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4226{
4227	struct mwl_vap *mvp = MWL_VAP(vap);
4228	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4229	struct ieee80211com *ic = vap->iv_ic;
4230	struct ieee80211_node *ni = NULL;
4231	struct ifnet *ifp = ic->ic_ifp;
4232	struct mwl_softc *sc = ifp->if_softc;
4233	struct mwl_hal *mh = sc->sc_mh;
4234	enum ieee80211_state ostate = vap->iv_state;
4235	int error;
4236
4237	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4238	    vap->iv_ifp->if_xname, __func__,
4239	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4240
4241	callout_stop(&sc->sc_timer);
4242	/*
4243	 * Clear current radar detection state.
4244	 */
4245	if (ostate == IEEE80211_S_CAC) {
4246		/* stop quiet mode radar detection */
4247		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4248	} else if (sc->sc_radarena) {
4249		/* stop in-service radar detection */
4250		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4251		sc->sc_radarena = 0;
4252	}
4253	/*
4254	 * Carry out per-state actions before doing net80211 work.
4255	 */
4256	if (nstate == IEEE80211_S_INIT) {
4257		/* NB: only ap+sta vap's have a fw entity */
4258		if (hvap != NULL)
4259			mwl_hal_stop(hvap);
4260	} else if (nstate == IEEE80211_S_SCAN) {
4261		mwl_hal_start(hvap);
4262		/* NB: this disables beacon frames */
4263		mwl_hal_setinframode(hvap);
4264	} else if (nstate == IEEE80211_S_AUTH) {
4265		/*
4266		 * Must create a sta db entry in case a WEP key needs to
4267		 * be plumbed.  This entry will be overwritten if we
4268		 * associate; otherwise it will be reclaimed on node free.
4269		 */
4270		ni = vap->iv_bss;
4271		MWL_NODE(ni)->mn_hvap = hvap;
4272		(void) mwl_peerstadb(ni, 0, 0, NULL);
4273	} else if (nstate == IEEE80211_S_CSA) {
4274		/* XXX move to below? */
4275		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4276		    vap->iv_opmode == IEEE80211_M_MBSS)
4277			mwl_startcsa(vap);
4278	} else if (nstate == IEEE80211_S_CAC) {
4279		/* XXX move to below? */
4280		/* stop ap xmit and enable quiet mode radar detection */
4281		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4282	}
4283
4284	/*
4285	 * Invoke the parent method to do net80211 work.
4286	 */
4287	error = mvp->mv_newstate(vap, nstate, arg);
4288
4289	/*
4290	 * Carry out work that must be done after net80211 runs;
4291	 * this work requires up to date state (e.g. iv_bss).
4292	 */
4293	if (error == 0 && nstate == IEEE80211_S_RUN) {
4294		/* NB: collect bss node again, it may have changed */
4295		ni = vap->iv_bss;
4296
4297		DPRINTF(sc, MWL_DEBUG_STATE,
4298		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4299		    "capinfo 0x%04x chan %d\n",
4300		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4301		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4302		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4303
4304		/*
4305		 * Recreate local sta db entry to update WME/HT state.
4306		 */
4307		mwl_localstadb(vap);
4308		switch (vap->iv_opmode) {
4309		case IEEE80211_M_HOSTAP:
4310		case IEEE80211_M_MBSS:
4311			if (ostate == IEEE80211_S_CAC) {
4312				/* enable in-service radar detection */
4313				mwl_hal_setradardetection(mh,
4314				    DR_IN_SERVICE_MONITOR_START);
4315				sc->sc_radarena = 1;
4316			}
4317			/*
4318			 * Allocate and setup the beacon frame
4319			 * (and related state).
4320			 */
4321			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4322			if (error != 0) {
4323				DPRINTF(sc, MWL_DEBUG_STATE,
4324				    "%s: beacon setup failed, error %d\n",
4325				    __func__, error);
4326				goto bad;
4327			}
4328			/* NB: must be after setting up beacon */
4329			mwl_hal_start(hvap);
4330			break;
4331		case IEEE80211_M_STA:
4332			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4333			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4334			/*
4335			 * Set state now that we're associated.
4336			 */
4337			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4338			mwl_setrates(vap);
4339			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4340			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4341			    sc->sc_ndwdsvaps++ == 0)
4342				mwl_hal_setdwds(mh, 1);
4343			break;
4344		case IEEE80211_M_WDS:
4345			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4346			    vap->iv_ifp->if_xname, __func__,
4347			    ether_sprintf(ni->ni_bssid));
4348			mwl_seteapolformat(vap);
4349			break;
4350		default:
4351			break;
4352		}
4353		/*
4354		 * Set CS mode according to operating channel;
4355		 * this mostly an optimization for 5GHz.
4356		 *
4357		 * NB: must follow mwl_hal_start which resets csmode
4358		 */
4359		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4360			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4361		else
4362			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4363		/*
4364		 * Start timer to prod firmware.
4365		 */
4366		if (sc->sc_ageinterval != 0)
4367			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4368			    mwl_agestations, sc);
4369	} else if (nstate == IEEE80211_S_SLEEP) {
4370		/* XXX set chip in power save */
4371	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4372	    --sc->sc_ndwdsvaps == 0)
4373		mwl_hal_setdwds(mh, 0);
4374bad:
4375	return error;
4376}
4377
4378/*
4379 * Manage station id's; these are separate from AID's
4380 * as AID's may have values out of the range of possible
4381 * station id's acceptable to the firmware.
4382 */
4383static int
4384allocstaid(struct mwl_softc *sc, int aid)
4385{
4386	int staid;
4387
4388	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4389		/* NB: don't use 0 */
4390		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4391			if (isclr(sc->sc_staid, staid))
4392				break;
4393	} else
4394		staid = aid;
4395	setbit(sc->sc_staid, staid);
4396	return staid;
4397}
4398
4399static void
4400delstaid(struct mwl_softc *sc, int staid)
4401{
4402	clrbit(sc->sc_staid, staid);
4403}
4404
4405/*
4406 * Setup driver-specific state for a newly associated node.
4407 * Note that we're called also on a re-associate, the isnew
4408 * param tells us if this is the first time or not.
4409 */
4410static void
4411mwl_newassoc(struct ieee80211_node *ni, int isnew)
4412{
4413	struct ieee80211vap *vap = ni->ni_vap;
4414        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4415	struct mwl_node *mn = MWL_NODE(ni);
4416	MWL_HAL_PEERINFO pi;
4417	uint16_t aid;
4418	int error;
4419
4420	aid = IEEE80211_AID(ni->ni_associd);
4421	if (isnew) {
4422		mn->mn_staid = allocstaid(sc, aid);
4423		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4424	} else {
4425		mn = MWL_NODE(ni);
4426		/* XXX reset BA stream? */
4427	}
4428	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4429	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4430	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4431	if (error != 0) {
4432		DPRINTF(sc, MWL_DEBUG_NODE,
4433		    "%s: error %d creating sta db entry\n",
4434		    __func__, error);
4435		/* XXX how to deal with error? */
4436	}
4437}
4438
4439/*
4440 * Periodically poke the firmware to age out station state
4441 * (power save queues, pending tx aggregates).
4442 */
4443static void
4444mwl_agestations(void *arg)
4445{
4446	struct mwl_softc *sc = arg;
4447
4448	mwl_hal_setkeepalive(sc->sc_mh);
4449	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4450		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4451}
4452
4453static const struct mwl_hal_channel *
4454findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4455{
4456	int i;
4457
4458	for (i = 0; i < ci->nchannels; i++) {
4459		const struct mwl_hal_channel *hc = &ci->channels[i];
4460		if (hc->ieee == ieee)
4461			return hc;
4462	}
4463	return NULL;
4464}
4465
4466static int
4467mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4468	int nchan, struct ieee80211_channel chans[])
4469{
4470	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4471	struct mwl_hal *mh = sc->sc_mh;
4472	const MWL_HAL_CHANNELINFO *ci;
4473	int i;
4474
4475	for (i = 0; i < nchan; i++) {
4476		struct ieee80211_channel *c = &chans[i];
4477		const struct mwl_hal_channel *hc;
4478
4479		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4480			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4481			    IEEE80211_IS_CHAN_HT40(c) ?
4482				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4483		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4484			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4485			    IEEE80211_IS_CHAN_HT40(c) ?
4486				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4487		} else {
4488			if_printf(ic->ic_ifp,
4489			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4490			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4491			return EINVAL;
4492		}
4493		/*
4494		 * Verify channel has cal data and cap tx power.
4495		 */
4496		hc = findhalchannel(ci, c->ic_ieee);
4497		if (hc != NULL) {
4498			if (c->ic_maxpower > 2*hc->maxTxPow)
4499				c->ic_maxpower = 2*hc->maxTxPow;
4500			goto next;
4501		}
4502		if (IEEE80211_IS_CHAN_HT40(c)) {
4503			/*
4504			 * Look for the extension channel since the
4505			 * hal table only has the primary channel.
4506			 */
4507			hc = findhalchannel(ci, c->ic_extieee);
4508			if (hc != NULL) {
4509				if (c->ic_maxpower > 2*hc->maxTxPow)
4510					c->ic_maxpower = 2*hc->maxTxPow;
4511				goto next;
4512			}
4513		}
4514		if_printf(ic->ic_ifp,
4515		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4516		    __func__, c->ic_ieee, c->ic_extieee,
4517		    c->ic_freq, c->ic_flags);
4518		return EINVAL;
4519	next:
4520		;
4521	}
4522	return 0;
4523}
4524
4525#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4526#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4527
4528static void
4529addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4530{
4531	c->ic_freq = freq;
4532	c->ic_flags = flags;
4533	c->ic_ieee = ieee;
4534	c->ic_minpower = 0;
4535	c->ic_maxpower = 2*txpow;
4536	c->ic_maxregpower = txpow;
4537}
4538
4539static const struct ieee80211_channel *
4540findchannel(const struct ieee80211_channel chans[], int nchans,
4541	int freq, int flags)
4542{
4543	const struct ieee80211_channel *c;
4544	int i;
4545
4546	for (i = 0; i < nchans; i++) {
4547		c = &chans[i];
4548		if (c->ic_freq == freq && c->ic_flags == flags)
4549			return c;
4550	}
4551	return NULL;
4552}
4553
4554static void
4555addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4556	const MWL_HAL_CHANNELINFO *ci, int flags)
4557{
4558	struct ieee80211_channel *c;
4559	const struct ieee80211_channel *extc;
4560	const struct mwl_hal_channel *hc;
4561	int i;
4562
4563	c = &chans[*nchans];
4564
4565	flags &= ~IEEE80211_CHAN_HT;
4566	for (i = 0; i < ci->nchannels; i++) {
4567		/*
4568		 * Each entry defines an HT40 channel pair; find the
4569		 * extension channel above and the insert the pair.
4570		 */
4571		hc = &ci->channels[i];
4572		extc = findchannel(chans, *nchans, hc->freq+20,
4573		    flags | IEEE80211_CHAN_HT20);
4574		if (extc != NULL) {
4575			if (*nchans >= maxchans)
4576				break;
4577			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4578			    hc->ieee, hc->maxTxPow);
4579			c->ic_extieee = extc->ic_ieee;
4580			c++, (*nchans)++;
4581			if (*nchans >= maxchans)
4582				break;
4583			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4584			    extc->ic_ieee, hc->maxTxPow);
4585			c->ic_extieee = hc->ieee;
4586			c++, (*nchans)++;
4587		}
4588	}
4589}
4590
4591static void
4592addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4593	const MWL_HAL_CHANNELINFO *ci, int flags)
4594{
4595	struct ieee80211_channel *c;
4596	int i;
4597
4598	c = &chans[*nchans];
4599
4600	for (i = 0; i < ci->nchannels; i++) {
4601		const struct mwl_hal_channel *hc;
4602
4603		hc = &ci->channels[i];
4604		if (*nchans >= maxchans)
4605			break;
4606		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4607		c++, (*nchans)++;
4608		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4609			/* g channel have a separate b-only entry */
4610			if (*nchans >= maxchans)
4611				break;
4612			c[0] = c[-1];
4613			c[-1].ic_flags = IEEE80211_CHAN_B;
4614			c++, (*nchans)++;
4615		}
4616		if (flags == IEEE80211_CHAN_HTG) {
4617			/* HT g channel have a separate g-only entry */
4618			if (*nchans >= maxchans)
4619				break;
4620			c[-1].ic_flags = IEEE80211_CHAN_G;
4621			c[0] = c[-1];
4622			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4623			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4624			c++, (*nchans)++;
4625		}
4626		if (flags == IEEE80211_CHAN_HTA) {
4627			/* HT a channel have a separate a-only entry */
4628			if (*nchans >= maxchans)
4629				break;
4630			c[-1].ic_flags = IEEE80211_CHAN_A;
4631			c[0] = c[-1];
4632			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4633			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4634			c++, (*nchans)++;
4635		}
4636	}
4637}
4638
4639static void
4640getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4641	struct ieee80211_channel chans[])
4642{
4643	const MWL_HAL_CHANNELINFO *ci;
4644
4645	/*
4646	 * Use the channel info from the hal to craft the
4647	 * channel list.  Note that we pass back an unsorted
4648	 * list; the caller is required to sort it for us
4649	 * (if desired).
4650	 */
4651	*nchans = 0;
4652	if (mwl_hal_getchannelinfo(sc->sc_mh,
4653	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4654		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4655	if (mwl_hal_getchannelinfo(sc->sc_mh,
4656	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4657		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4658	if (mwl_hal_getchannelinfo(sc->sc_mh,
4659	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4660		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4661	if (mwl_hal_getchannelinfo(sc->sc_mh,
4662	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4663		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4664}
4665
4666static void
4667mwl_getradiocaps(struct ieee80211com *ic,
4668	int maxchans, int *nchans, struct ieee80211_channel chans[])
4669{
4670	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4671
4672	getchannels(sc, maxchans, nchans, chans);
4673}
4674
4675static int
4676mwl_getchannels(struct mwl_softc *sc)
4677{
4678	struct ifnet *ifp = sc->sc_ifp;
4679	struct ieee80211com *ic = ifp->if_l2com;
4680
4681	/*
4682	 * Use the channel info from the hal to craft the
4683	 * channel list for net80211.  Note that we pass up
4684	 * an unsorted list; net80211 will sort it for us.
4685	 */
4686	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4687	ic->ic_nchans = 0;
4688	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4689
4690	ic->ic_regdomain.regdomain = SKU_DEBUG;
4691	ic->ic_regdomain.country = CTRY_DEFAULT;
4692	ic->ic_regdomain.location = 'I';
4693	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4694	ic->ic_regdomain.isocc[1] = ' ';
4695	return (ic->ic_nchans == 0 ? EIO : 0);
4696}
4697#undef IEEE80211_CHAN_HTA
4698#undef IEEE80211_CHAN_HTG
4699
4700#ifdef MWL_DEBUG
4701static void
4702mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4703{
4704	const struct mwl_rxdesc *ds = bf->bf_desc;
4705	uint32_t status = le32toh(ds->Status);
4706
4707	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4708	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4709	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4710	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4711	    ds->RxControl,
4712	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4713	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4714	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4715	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4716}
4717
4718static void
4719mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4720{
4721	const struct mwl_txdesc *ds = bf->bf_desc;
4722	uint32_t status = le32toh(ds->Status);
4723
4724	printf("Q%u[%3u]", qnum, ix);
4725	printf(" (DS.V:%p DS.P:%p)\n",
4726	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4727	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4728	    le32toh(ds->pPhysNext),
4729	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4730	    status & EAGLE_TXD_STATUS_USED ?
4731		"" : (status & 3) != 0 ? " *" : " !");
4732	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4733	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4734	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4735#if MWL_TXDESC > 1
4736	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4737	    , le32toh(ds->multiframes)
4738	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4739	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4740	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4741	);
4742	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4743	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4744	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4745	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4746	);
4747#endif
4748#if 0
4749{ const uint8_t *cp = (const uint8_t *) ds;
4750  int i;
4751  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4752	printf("%02x ", cp[i]);
4753	if (((i+1) % 16) == 0)
4754		printf("\n");
4755  }
4756  printf("\n");
4757}
4758#endif
4759}
4760#endif /* MWL_DEBUG */
4761
4762#if 0
4763static void
4764mwl_txq_dump(struct mwl_txq *txq)
4765{
4766	struct mwl_txbuf *bf;
4767	int i = 0;
4768
4769	MWL_TXQ_LOCK(txq);
4770	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4771		struct mwl_txdesc *ds = bf->bf_desc;
4772		MWL_TXDESC_SYNC(txq, ds,
4773		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4774#ifdef MWL_DEBUG
4775		mwl_printtxbuf(bf, txq->qnum, i);
4776#endif
4777		i++;
4778	}
4779	MWL_TXQ_UNLOCK(txq);
4780}
4781#endif
4782
4783static void
4784mwl_watchdog(void *arg)
4785{
4786	struct mwl_softc *sc;
4787	struct ifnet *ifp;
4788
4789	sc = arg;
4790	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4791	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4792		return;
4793
4794	ifp = sc->sc_ifp;
4795	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4796		if (mwl_hal_setkeepalive(sc->sc_mh))
4797			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4798		else
4799			if_printf(ifp, "transmit timeout\n");
4800#if 0
4801		mwl_reset(ifp);
4802mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4803#endif
4804		ifp->if_oerrors++;
4805		sc->sc_stats.mst_watchdog++;
4806	}
4807}
4808
4809#ifdef MWL_DIAGAPI
4810/*
4811 * Diagnostic interface to the HAL.  This is used by various
4812 * tools to do things like retrieve register contents for
4813 * debugging.  The mechanism is intentionally opaque so that
4814 * it can change frequently w/o concern for compatiblity.
4815 */
4816static int
4817mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4818{
4819	struct mwl_hal *mh = sc->sc_mh;
4820	u_int id = md->md_id & MWL_DIAG_ID;
4821	void *indata = NULL;
4822	void *outdata = NULL;
4823	u_int32_t insize = md->md_in_size;
4824	u_int32_t outsize = md->md_out_size;
4825	int error = 0;
4826
4827	if (md->md_id & MWL_DIAG_IN) {
4828		/*
4829		 * Copy in data.
4830		 */
4831		indata = malloc(insize, M_TEMP, M_NOWAIT);
4832		if (indata == NULL) {
4833			error = ENOMEM;
4834			goto bad;
4835		}
4836		error = copyin(md->md_in_data, indata, insize);
4837		if (error)
4838			goto bad;
4839	}
4840	if (md->md_id & MWL_DIAG_DYN) {
4841		/*
4842		 * Allocate a buffer for the results (otherwise the HAL
4843		 * returns a pointer to a buffer where we can read the
4844		 * results).  Note that we depend on the HAL leaving this
4845		 * pointer for us to use below in reclaiming the buffer;
4846		 * may want to be more defensive.
4847		 */
4848		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4849		if (outdata == NULL) {
4850			error = ENOMEM;
4851			goto bad;
4852		}
4853	}
4854	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4855		if (outsize < md->md_out_size)
4856			md->md_out_size = outsize;
4857		if (outdata != NULL)
4858			error = copyout(outdata, md->md_out_data,
4859					md->md_out_size);
4860	} else {
4861		error = EINVAL;
4862	}
4863bad:
4864	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4865		free(indata, M_TEMP);
4866	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4867		free(outdata, M_TEMP);
4868	return error;
4869}
4870
4871static int
4872mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4873{
4874	struct mwl_hal *mh = sc->sc_mh;
4875	int error;
4876
4877	MWL_LOCK_ASSERT(sc);
4878
4879	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4880		device_printf(sc->sc_dev, "unable to load firmware\n");
4881		return EIO;
4882	}
4883	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4884		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4885		return EIO;
4886	}
4887	error = mwl_setupdma(sc);
4888	if (error != 0) {
4889		/* NB: mwl_setupdma prints a msg */
4890		return error;
4891	}
4892	/*
4893	 * Reset tx/rx data structures; after reload we must
4894	 * re-start the driver's notion of the next xmit/recv.
4895	 */
4896	mwl_draintxq(sc);		/* clear pending frames */
4897	mwl_resettxq(sc);		/* rebuild tx q lists */
4898	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4899	return 0;
4900}
4901#endif /* MWL_DIAGAPI */
4902
4903static int
4904mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4905{
4906#define	IS_RUNNING(ifp) \
4907	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4908	struct mwl_softc *sc = ifp->if_softc;
4909	struct ieee80211com *ic = ifp->if_l2com;
4910	struct ifreq *ifr = (struct ifreq *)data;
4911	int error = 0, startall;
4912
4913	switch (cmd) {
4914	case SIOCSIFFLAGS:
4915		MWL_LOCK(sc);
4916		startall = 0;
4917		if (IS_RUNNING(ifp)) {
4918			/*
4919			 * To avoid rescanning another access point,
4920			 * do not call mwl_init() here.  Instead,
4921			 * only reflect promisc mode settings.
4922			 */
4923			mwl_mode_init(sc);
4924		} else if (ifp->if_flags & IFF_UP) {
4925			/*
4926			 * Beware of being called during attach/detach
4927			 * to reset promiscuous mode.  In that case we
4928			 * will still be marked UP but not RUNNING.
4929			 * However trying to re-init the interface
4930			 * is the wrong thing to do as we've already
4931			 * torn down much of our state.  There's
4932			 * probably a better way to deal with this.
4933			 */
4934			if (!sc->sc_invalid) {
4935				mwl_init_locked(sc);	/* XXX lose error */
4936				startall = 1;
4937			}
4938		} else
4939			mwl_stop_locked(ifp, 1);
4940		MWL_UNLOCK(sc);
4941		if (startall)
4942			ieee80211_start_all(ic);
4943		break;
4944	case SIOCGMVSTATS:
4945		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4946		/* NB: embed these numbers to get a consistent view */
4947		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4948		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4949		/*
4950		 * NB: Drop the softc lock in case of a page fault;
4951		 * we'll accept any potential inconsisentcy in the
4952		 * statistics.  The alternative is to copy the data
4953		 * to a local structure.
4954		 */
4955		return copyout(&sc->sc_stats,
4956				ifr->ifr_data, sizeof (sc->sc_stats));
4957#ifdef MWL_DIAGAPI
4958	case SIOCGMVDIAG:
4959		/* XXX check privs */
4960		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4961	case SIOCGMVRESET:
4962		/* XXX check privs */
4963		MWL_LOCK(sc);
4964		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4965		MWL_UNLOCK(sc);
4966		break;
4967#endif /* MWL_DIAGAPI */
4968	case SIOCGIFMEDIA:
4969		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4970		break;
4971	case SIOCGIFADDR:
4972		error = ether_ioctl(ifp, cmd, data);
4973		break;
4974	default:
4975		error = EINVAL;
4976		break;
4977	}
4978	return error;
4979#undef IS_RUNNING
4980}
4981
4982#ifdef	MWL_DEBUG
4983static int
4984mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4985{
4986	struct mwl_softc *sc = arg1;
4987	int debug, error;
4988
4989	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4990	error = sysctl_handle_int(oidp, &debug, 0, req);
4991	if (error || !req->newptr)
4992		return error;
4993	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4994	sc->sc_debug = debug & 0x00ffffff;
4995	return 0;
4996}
4997#endif /* MWL_DEBUG */
4998
4999static void
5000mwl_sysctlattach(struct mwl_softc *sc)
5001{
5002#ifdef	MWL_DEBUG
5003	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
5004	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
5005
5006	sc->sc_debug = mwl_debug;
5007	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
5008		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5009		mwl_sysctl_debug, "I", "control debugging printfs");
5010#endif
5011}
5012
5013/*
5014 * Announce various information on device/driver attach.
5015 */
5016static void
5017mwl_announce(struct mwl_softc *sc)
5018{
5019	struct ifnet *ifp = sc->sc_ifp;
5020
5021	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5022		sc->sc_hwspecs.hwVersion,
5023		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5024		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5025		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5026		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5027		sc->sc_hwspecs.regionCode);
5028	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5029
5030	if (bootverbose) {
5031		int i;
5032		for (i = 0; i <= WME_AC_VO; i++) {
5033			struct mwl_txq *txq = sc->sc_ac2q[i];
5034			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5035				txq->qnum, ieee80211_wme_acnames[i]);
5036		}
5037	}
5038	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5039		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5040	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5041		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5042	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5043		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5044	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5045		if_printf(ifp, "multi-bss support\n");
5046#ifdef MWL_TX_NODROP
5047	if (bootverbose)
5048		if_printf(ifp, "no tx drop\n");
5049#endif
5050}
5051