if_mwl.c revision 283537
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 283537 2015-05-25 18:50:26Z glebius $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40#include "opt_wlan.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysctl.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/kernel.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/errno.h>
53#include <sys/callout.h>
54#include <sys/bus.h>
55#include <sys/endian.h>
56#include <sys/kthread.h>
57#include <sys/taskqueue.h>
58
59#include <machine/bus.h>
60
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#include <net/if_types.h>
66#include <net/if_arp.h>
67#include <net/ethernet.h>
68#include <net/if_llc.h>
69
70#include <net/bpf.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
74
75#ifdef INET
76#include <netinet/in.h>
77#include <netinet/if_ether.h>
78#endif /* INET */
79
80#include <dev/mwl/if_mwlvar.h>
81#include <dev/mwl/mwldiag.h>
82
83/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
84#define	MS(v,x)	(((v) & x) >> x##_S)
85#define	SM(v,x)	(((v) << x##_S) & x)
86
87static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
88		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
89		    const uint8_t [IEEE80211_ADDR_LEN],
90		    const uint8_t [IEEE80211_ADDR_LEN]);
91static void	mwl_vap_delete(struct ieee80211vap *);
92static int	mwl_setupdma(struct mwl_softc *);
93static int	mwl_hal_reset(struct mwl_softc *sc);
94static int	mwl_init_locked(struct mwl_softc *);
95static void	mwl_init(void *);
96static void	mwl_stop_locked(struct ifnet *, int);
97static int	mwl_reset(struct ieee80211vap *, u_long);
98static void	mwl_stop(struct ifnet *, int);
99static void	mwl_start(struct ifnet *);
100static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
101			const struct ieee80211_bpf_params *);
102static int	mwl_media_change(struct ifnet *);
103static void	mwl_watchdog(void *);
104static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
105static void	mwl_radar_proc(void *, int);
106static void	mwl_chanswitch_proc(void *, int);
107static void	mwl_bawatchdog_proc(void *, int);
108static int	mwl_key_alloc(struct ieee80211vap *,
109			struct ieee80211_key *,
110			ieee80211_keyix *, ieee80211_keyix *);
111static int	mwl_key_delete(struct ieee80211vap *,
112			const struct ieee80211_key *);
113static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
114			const uint8_t mac[IEEE80211_ADDR_LEN]);
115static int	mwl_mode_init(struct mwl_softc *);
116static void	mwl_update_mcast(struct ifnet *);
117static void	mwl_update_promisc(struct ifnet *);
118static void	mwl_updateslot(struct ifnet *);
119static int	mwl_beacon_setup(struct ieee80211vap *);
120static void	mwl_beacon_update(struct ieee80211vap *, int);
121#ifdef MWL_HOST_PS_SUPPORT
122static void	mwl_update_ps(struct ieee80211vap *, int);
123static int	mwl_set_tim(struct ieee80211_node *, int);
124#endif
125static int	mwl_dma_setup(struct mwl_softc *);
126static void	mwl_dma_cleanup(struct mwl_softc *);
127static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128		    const uint8_t [IEEE80211_ADDR_LEN]);
129static void	mwl_node_cleanup(struct ieee80211_node *);
130static void	mwl_node_drain(struct ieee80211_node *);
131static void	mwl_node_getsignal(const struct ieee80211_node *,
132			int8_t *, int8_t *);
133static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
134			struct ieee80211_mimo_info *);
135static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136static void	mwl_rx_proc(void *, int);
137static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138static int	mwl_tx_setup(struct mwl_softc *, int, int);
139static int	mwl_wme_update(struct ieee80211com *);
140static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141static void	mwl_tx_cleanup(struct mwl_softc *);
142static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144			     struct mwl_txbuf *, struct mbuf *);
145static void	mwl_tx_proc(void *, int);
146static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147static void	mwl_draintxq(struct mwl_softc *);
148static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149static int	mwl_recv_action(struct ieee80211_node *,
150			const struct ieee80211_frame *,
151			const uint8_t *, const uint8_t *);
152static int	mwl_addba_request(struct ieee80211_node *,
153			struct ieee80211_tx_ampdu *, int dialogtoken,
154			int baparamset, int batimeout);
155static int	mwl_addba_response(struct ieee80211_node *,
156			struct ieee80211_tx_ampdu *, int status,
157			int baparamset, int batimeout);
158static void	mwl_addba_stop(struct ieee80211_node *,
159			struct ieee80211_tx_ampdu *);
160static int	mwl_startrecv(struct mwl_softc *);
161static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162			struct ieee80211_channel *);
163static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164static void	mwl_scan_start(struct ieee80211com *);
165static void	mwl_scan_end(struct ieee80211com *);
166static void	mwl_set_channel(struct ieee80211com *);
167static int	mwl_peerstadb(struct ieee80211_node *,
168			int aid, int staid, MWL_HAL_PEERINFO *pi);
169static int	mwl_localstadb(struct ieee80211vap *);
170static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171static int	allocstaid(struct mwl_softc *sc, int aid);
172static void	delstaid(struct mwl_softc *sc, int staid);
173static void	mwl_newassoc(struct ieee80211_node *, int);
174static void	mwl_agestations(void *);
175static int	mwl_setregdomain(struct ieee80211com *,
176			struct ieee80211_regdomain *, int,
177			struct ieee80211_channel []);
178static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
179			struct ieee80211_channel []);
180static int	mwl_getchannels(struct mwl_softc *);
181
182static void	mwl_sysctlattach(struct mwl_softc *);
183static void	mwl_announce(struct mwl_softc *);
184
185SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
186
187static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
188SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
189	    0, "rx descriptors allocated");
190static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
191SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
192	    0, "rx buffers allocated");
193static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
194SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
195	    0, "tx buffers allocated");
196static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
197SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
198	    0, "tx buffers to send at once");
199static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
200SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
201	    0, "max rx buffers to process per interrupt");
202static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
203SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
204	    0, "min free rx buffers before restarting traffic");
205
206#ifdef MWL_DEBUG
207static	int mwl_debug = 0;
208SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
209	    0, "control debugging printfs");
210enum {
211	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
212	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
213	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
214	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
215	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
216	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
217	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
218	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
219	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
220	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
221	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
222	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
223	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
224	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
225	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
226	MWL_DEBUG_ANY		= 0xffffffff
227};
228#define	IS_BEACON(wh) \
229    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
230	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
231#define	IFF_DUMPPKTS_RECV(sc, wh) \
232    (((sc->sc_debug & MWL_DEBUG_RECV) && \
233      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
234     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
235#define	IFF_DUMPPKTS_XMIT(sc) \
236	((sc->sc_debug & MWL_DEBUG_XMIT) || \
237	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
238#define	DPRINTF(sc, m, fmt, ...) do {				\
239	if (sc->sc_debug & (m))					\
240		printf(fmt, __VA_ARGS__);			\
241} while (0)
242#define	KEYPRINTF(sc, hk, mac) do {				\
243	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
244		mwl_keyprint(sc, __func__, hk, mac);		\
245} while (0)
246static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
247static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
248#else
249#define	IFF_DUMPPKTS_RECV(sc, wh) \
250	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
251#define	IFF_DUMPPKTS_XMIT(sc) \
252	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
253#define	DPRINTF(sc, m, fmt, ...) do {				\
254	(void) sc;						\
255} while (0)
256#define	KEYPRINTF(sc, k, mac) do {				\
257	(void) sc;						\
258} while (0)
259#endif
260
261static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
262
263/*
264 * Each packet has fixed front matter: a 2-byte length
265 * of the payload, followed by a 4-address 802.11 header
266 * (regardless of the actual header and always w/o any
267 * QoS header).  The payload then follows.
268 */
269struct mwltxrec {
270	uint16_t fwlen;
271	struct ieee80211_frame_addr4 wh;
272} __packed;
273
274/*
275 * Read/Write shorthands for accesses to BAR 0.  Note
276 * that all BAR 1 operations are done in the "hal" and
277 * there should be no reference to them here.
278 */
279#ifdef MWL_DEBUG
280static __inline uint32_t
281RD4(struct mwl_softc *sc, bus_size_t off)
282{
283	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
284}
285#endif
286
287static __inline void
288WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
289{
290	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
291}
292
293int
294mwl_attach(uint16_t devid, struct mwl_softc *sc)
295{
296	struct ifnet *ifp;
297	struct ieee80211com *ic;
298	struct mwl_hal *mh;
299	int error = 0;
300
301	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
302
303	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
304	if (ifp == NULL) {
305		device_printf(sc->sc_dev, "cannot if_alloc()\n");
306		return ENOSPC;
307	}
308	ic = ifp->if_l2com;
309
310	/*
311	 * Setup the RX free list lock early, so it can be consistently
312	 * removed.
313	 */
314	MWL_RXFREE_INIT(sc);
315
316	/* set these up early for if_printf use */
317	if_initname(ifp, device_get_name(sc->sc_dev),
318		device_get_unit(sc->sc_dev));
319
320	mh = mwl_hal_attach(sc->sc_dev, devid,
321	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
322	if (mh == NULL) {
323		if_printf(ifp, "unable to attach HAL\n");
324		error = EIO;
325		goto bad;
326	}
327	sc->sc_mh = mh;
328	/*
329	 * Load firmware so we can get setup.  We arbitrarily
330	 * pick station firmware; we'll re-load firmware as
331	 * needed so setting up the wrong mode isn't a big deal.
332	 */
333	if (mwl_hal_fwload(mh, NULL) != 0) {
334		if_printf(ifp, "unable to setup builtin firmware\n");
335		error = EIO;
336		goto bad1;
337	}
338	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
339		if_printf(ifp, "unable to fetch h/w specs\n");
340		error = EIO;
341		goto bad1;
342	}
343	error = mwl_getchannels(sc);
344	if (error != 0)
345		goto bad1;
346
347	sc->sc_txantenna = 0;		/* h/w default */
348	sc->sc_rxantenna = 0;		/* h/w default */
349	sc->sc_invalid = 0;		/* ready to go, enable int handling */
350	sc->sc_ageinterval = MWL_AGEINTERVAL;
351
352	/*
353	 * Allocate tx+rx descriptors and populate the lists.
354	 * We immediately push the information to the firmware
355	 * as otherwise it gets upset.
356	 */
357	error = mwl_dma_setup(sc);
358	if (error != 0) {
359		if_printf(ifp, "failed to setup descriptors: %d\n", error);
360		goto bad1;
361	}
362	error = mwl_setupdma(sc);	/* push to firmware */
363	if (error != 0)			/* NB: mwl_setupdma prints msg */
364		goto bad1;
365
366	callout_init(&sc->sc_timer, 1);
367	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
368
369	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
370		taskqueue_thread_enqueue, &sc->sc_tq);
371	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
372		"%s taskq", ifp->if_xname);
373
374	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
375	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
376	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
377	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
378
379	/* NB: insure BK queue is the lowest priority h/w queue */
380	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
381		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
382			ieee80211_wme_acnames[WME_AC_BK]);
383		error = EIO;
384		goto bad2;
385	}
386	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
387	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
388	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
389		/*
390		 * Not enough hardware tx queues to properly do WME;
391		 * just punt and assign them all to the same h/w queue.
392		 * We could do a better job of this if, for example,
393		 * we allocate queues when we switch from station to
394		 * AP mode.
395		 */
396		if (sc->sc_ac2q[WME_AC_VI] != NULL)
397			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
398		if (sc->sc_ac2q[WME_AC_BE] != NULL)
399			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
400		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
401		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
402		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
403	}
404	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
405
406	ifp->if_softc = sc;
407	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
408	ifp->if_start = mwl_start;
409	ifp->if_ioctl = mwl_ioctl;
410	ifp->if_init = mwl_init;
411	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
412	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
413	IFQ_SET_READY(&ifp->if_snd);
414
415	ic->ic_ifp = ifp;
416	ic->ic_softc = sc;
417	ic->ic_name = device_get_nameunit(sc->sc_dev);
418	/* XXX not right but it's not used anywhere important */
419	ic->ic_phytype = IEEE80211_T_OFDM;
420	ic->ic_opmode = IEEE80211_M_STA;
421	ic->ic_caps =
422		  IEEE80211_C_STA		/* station mode supported */
423		| IEEE80211_C_HOSTAP		/* hostap mode */
424		| IEEE80211_C_MONITOR		/* monitor mode */
425#if 0
426		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
427		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
428#endif
429		| IEEE80211_C_MBSS		/* mesh point link mode */
430		| IEEE80211_C_WDS		/* WDS supported */
431		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
432		| IEEE80211_C_SHSLOT		/* short slot time supported */
433		| IEEE80211_C_WME		/* WME/WMM supported */
434		| IEEE80211_C_BURST		/* xmit bursting supported */
435		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
436		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
437		| IEEE80211_C_TXFRAG		/* handle tx frags */
438		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
439		| IEEE80211_C_DFS		/* DFS supported */
440		;
441
442	ic->ic_htcaps =
443		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
444		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
445		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
446		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
447		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
448#if MWL_AGGR_SIZE == 7935
449		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
450#else
451		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
452#endif
453#if 0
454		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
455		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
456#endif
457		/* s/w capabilities */
458		| IEEE80211_HTC_HT		/* HT operation */
459		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
460		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
461		| IEEE80211_HTC_SMPS		/* SMPS available */
462		;
463
464	/*
465	 * Mark h/w crypto support.
466	 * XXX no way to query h/w support.
467	 */
468	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
469			  |  IEEE80211_CRYPTO_AES_CCM
470			  |  IEEE80211_CRYPTO_TKIP
471			  |  IEEE80211_CRYPTO_TKIPMIC
472			  ;
473	/*
474	 * Transmit requires space in the packet for a special
475	 * format transmit record and optional padding between
476	 * this record and the payload.  Ask the net80211 layer
477	 * to arrange this when encapsulating packets so we can
478	 * add it efficiently.
479	 */
480	ic->ic_headroom = sizeof(struct mwltxrec) -
481		sizeof(struct ieee80211_frame);
482
483	/* call MI attach routine. */
484	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
485	ic->ic_setregdomain = mwl_setregdomain;
486	ic->ic_getradiocaps = mwl_getradiocaps;
487	/* override default methods */
488	ic->ic_raw_xmit = mwl_raw_xmit;
489	ic->ic_newassoc = mwl_newassoc;
490	ic->ic_updateslot = mwl_updateslot;
491	ic->ic_update_mcast = mwl_update_mcast;
492	ic->ic_update_promisc = mwl_update_promisc;
493	ic->ic_wme.wme_update = mwl_wme_update;
494
495	ic->ic_node_alloc = mwl_node_alloc;
496	sc->sc_node_cleanup = ic->ic_node_cleanup;
497	ic->ic_node_cleanup = mwl_node_cleanup;
498	sc->sc_node_drain = ic->ic_node_drain;
499	ic->ic_node_drain = mwl_node_drain;
500	ic->ic_node_getsignal = mwl_node_getsignal;
501	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
502
503	ic->ic_scan_start = mwl_scan_start;
504	ic->ic_scan_end = mwl_scan_end;
505	ic->ic_set_channel = mwl_set_channel;
506
507	sc->sc_recv_action = ic->ic_recv_action;
508	ic->ic_recv_action = mwl_recv_action;
509	sc->sc_addba_request = ic->ic_addba_request;
510	ic->ic_addba_request = mwl_addba_request;
511	sc->sc_addba_response = ic->ic_addba_response;
512	ic->ic_addba_response = mwl_addba_response;
513	sc->sc_addba_stop = ic->ic_addba_stop;
514	ic->ic_addba_stop = mwl_addba_stop;
515
516	ic->ic_vap_create = mwl_vap_create;
517	ic->ic_vap_delete = mwl_vap_delete;
518
519	ieee80211_radiotap_attach(ic,
520	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
521		MWL_TX_RADIOTAP_PRESENT,
522	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
523		MWL_RX_RADIOTAP_PRESENT);
524	/*
525	 * Setup dynamic sysctl's now that country code and
526	 * regdomain are available from the hal.
527	 */
528	mwl_sysctlattach(sc);
529
530	if (bootverbose)
531		ieee80211_announce(ic);
532	mwl_announce(sc);
533	return 0;
534bad2:
535	mwl_dma_cleanup(sc);
536bad1:
537	mwl_hal_detach(mh);
538bad:
539	MWL_RXFREE_DESTROY(sc);
540	if_free(ifp);
541	sc->sc_invalid = 1;
542	return error;
543}
544
545int
546mwl_detach(struct mwl_softc *sc)
547{
548	struct ifnet *ifp = sc->sc_ifp;
549	struct ieee80211com *ic = ifp->if_l2com;
550
551	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
552		__func__, ifp->if_flags);
553
554	mwl_stop(ifp, 1);
555	/*
556	 * NB: the order of these is important:
557	 * o call the 802.11 layer before detaching the hal to
558	 *   insure callbacks into the driver to delete global
559	 *   key cache entries can be handled
560	 * o reclaim the tx queue data structures after calling
561	 *   the 802.11 layer as we'll get called back to reclaim
562	 *   node state and potentially want to use them
563	 * o to cleanup the tx queues the hal is called, so detach
564	 *   it last
565	 * Other than that, it's straightforward...
566	 */
567	ieee80211_ifdetach(ic);
568	callout_drain(&sc->sc_watchdog);
569	mwl_dma_cleanup(sc);
570	MWL_RXFREE_DESTROY(sc);
571	mwl_tx_cleanup(sc);
572	mwl_hal_detach(sc->sc_mh);
573	if_free(ifp);
574
575	return 0;
576}
577
578/*
579 * MAC address handling for multiple BSS on the same radio.
580 * The first vap uses the MAC address from the EEPROM.  For
581 * subsequent vap's we set the U/L bit (bit 1) in the MAC
582 * address and use the next six bits as an index.
583 */
584static void
585assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
586{
587	int i;
588
589	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
590		/* NB: we only do this if h/w supports multiple bssid */
591		for (i = 0; i < 32; i++)
592			if ((sc->sc_bssidmask & (1<<i)) == 0)
593				break;
594		if (i != 0)
595			mac[0] |= (i << 2)|0x2;
596	} else
597		i = 0;
598	sc->sc_bssidmask |= 1<<i;
599	if (i == 0)
600		sc->sc_nbssid0++;
601}
602
603static void
604reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
605{
606	int i = mac[0] >> 2;
607	if (i != 0 || --sc->sc_nbssid0 == 0)
608		sc->sc_bssidmask &= ~(1<<i);
609}
610
611static struct ieee80211vap *
612mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
613    enum ieee80211_opmode opmode, int flags,
614    const uint8_t bssid[IEEE80211_ADDR_LEN],
615    const uint8_t mac0[IEEE80211_ADDR_LEN])
616{
617	struct ifnet *ifp = ic->ic_ifp;
618	struct mwl_softc *sc = ifp->if_softc;
619	struct mwl_hal *mh = sc->sc_mh;
620	struct ieee80211vap *vap, *apvap;
621	struct mwl_hal_vap *hvap;
622	struct mwl_vap *mvp;
623	uint8_t mac[IEEE80211_ADDR_LEN];
624
625	IEEE80211_ADDR_COPY(mac, mac0);
626	switch (opmode) {
627	case IEEE80211_M_HOSTAP:
628	case IEEE80211_M_MBSS:
629		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
630			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
631		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
632		if (hvap == NULL) {
633			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
634				reclaim_address(sc, mac);
635			return NULL;
636		}
637		break;
638	case IEEE80211_M_STA:
639		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
640			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
641		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
642		if (hvap == NULL) {
643			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
644				reclaim_address(sc, mac);
645			return NULL;
646		}
647		/* no h/w beacon miss support; always use s/w */
648		flags |= IEEE80211_CLONE_NOBEACONS;
649		break;
650	case IEEE80211_M_WDS:
651		hvap = NULL;		/* NB: we use associated AP vap */
652		if (sc->sc_napvaps == 0)
653			return NULL;	/* no existing AP vap */
654		break;
655	case IEEE80211_M_MONITOR:
656		hvap = NULL;
657		break;
658	case IEEE80211_M_IBSS:
659	case IEEE80211_M_AHDEMO:
660	default:
661		return NULL;
662	}
663
664	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
665	    M_80211_VAP, M_NOWAIT | M_ZERO);
666	if (mvp == NULL) {
667		if (hvap != NULL) {
668			mwl_hal_delvap(hvap);
669			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
670				reclaim_address(sc, mac);
671		}
672		/* XXX msg */
673		return NULL;
674	}
675	mvp->mv_hvap = hvap;
676	if (opmode == IEEE80211_M_WDS) {
677		/*
678		 * WDS vaps must have an associated AP vap; find one.
679		 * XXX not right.
680		 */
681		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
682			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
683				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
684				break;
685			}
686		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
687	}
688	vap = &mvp->mv_vap;
689	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
690	if (hvap != NULL)
691		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
692	/* override with driver methods */
693	mvp->mv_newstate = vap->iv_newstate;
694	vap->iv_newstate = mwl_newstate;
695	vap->iv_max_keyix = 0;	/* XXX */
696	vap->iv_key_alloc = mwl_key_alloc;
697	vap->iv_key_delete = mwl_key_delete;
698	vap->iv_key_set = mwl_key_set;
699#ifdef MWL_HOST_PS_SUPPORT
700	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
701		vap->iv_update_ps = mwl_update_ps;
702		mvp->mv_set_tim = vap->iv_set_tim;
703		vap->iv_set_tim = mwl_set_tim;
704	}
705#endif
706	vap->iv_reset = mwl_reset;
707	vap->iv_update_beacon = mwl_beacon_update;
708
709	/* override max aid so sta's cannot assoc when we're out of sta id's */
710	vap->iv_max_aid = MWL_MAXSTAID;
711	/* override default A-MPDU rx parameters */
712	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
713	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
714
715	/* complete setup */
716	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
717
718	switch (vap->iv_opmode) {
719	case IEEE80211_M_HOSTAP:
720	case IEEE80211_M_MBSS:
721	case IEEE80211_M_STA:
722		/*
723		 * Setup sta db entry for local address.
724		 */
725		mwl_localstadb(vap);
726		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
727		    vap->iv_opmode == IEEE80211_M_MBSS)
728			sc->sc_napvaps++;
729		else
730			sc->sc_nstavaps++;
731		break;
732	case IEEE80211_M_WDS:
733		sc->sc_nwdsvaps++;
734		break;
735	default:
736		break;
737	}
738	/*
739	 * Setup overall operating mode.
740	 */
741	if (sc->sc_napvaps)
742		ic->ic_opmode = IEEE80211_M_HOSTAP;
743	else if (sc->sc_nstavaps)
744		ic->ic_opmode = IEEE80211_M_STA;
745	else
746		ic->ic_opmode = opmode;
747
748	return vap;
749}
750
751static void
752mwl_vap_delete(struct ieee80211vap *vap)
753{
754	struct mwl_vap *mvp = MWL_VAP(vap);
755	struct ifnet *parent = vap->iv_ic->ic_ifp;
756	struct mwl_softc *sc = parent->if_softc;
757	struct mwl_hal *mh = sc->sc_mh;
758	struct mwl_hal_vap *hvap = mvp->mv_hvap;
759	enum ieee80211_opmode opmode = vap->iv_opmode;
760
761	/* XXX disallow ap vap delete if WDS still present */
762	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
763		/* quiesce h/w while we remove the vap */
764		mwl_hal_intrset(mh, 0);		/* disable interrupts */
765	}
766	ieee80211_vap_detach(vap);
767	switch (opmode) {
768	case IEEE80211_M_HOSTAP:
769	case IEEE80211_M_MBSS:
770	case IEEE80211_M_STA:
771		KASSERT(hvap != NULL, ("no hal vap handle"));
772		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
773		mwl_hal_delvap(hvap);
774		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
775			sc->sc_napvaps--;
776		else
777			sc->sc_nstavaps--;
778		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
779		reclaim_address(sc, vap->iv_myaddr);
780		break;
781	case IEEE80211_M_WDS:
782		sc->sc_nwdsvaps--;
783		break;
784	default:
785		break;
786	}
787	mwl_cleartxq(sc, vap);
788	free(mvp, M_80211_VAP);
789	if (parent->if_drv_flags & IFF_DRV_RUNNING)
790		mwl_hal_intrset(mh, sc->sc_imask);
791}
792
793void
794mwl_suspend(struct mwl_softc *sc)
795{
796	struct ifnet *ifp = sc->sc_ifp;
797
798	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
799		__func__, ifp->if_flags);
800
801	mwl_stop(ifp, 1);
802}
803
804void
805mwl_resume(struct mwl_softc *sc)
806{
807	struct ifnet *ifp = sc->sc_ifp;
808
809	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
810		__func__, ifp->if_flags);
811
812	if (ifp->if_flags & IFF_UP)
813		mwl_init(sc);
814}
815
816void
817mwl_shutdown(void *arg)
818{
819	struct mwl_softc *sc = arg;
820
821	mwl_stop(sc->sc_ifp, 1);
822}
823
824/*
825 * Interrupt handler.  Most of the actual processing is deferred.
826 */
827void
828mwl_intr(void *arg)
829{
830	struct mwl_softc *sc = arg;
831	struct mwl_hal *mh = sc->sc_mh;
832	uint32_t status;
833
834	if (sc->sc_invalid) {
835		/*
836		 * The hardware is not ready/present, don't touch anything.
837		 * Note this can happen early on if the IRQ is shared.
838		 */
839		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
840		return;
841	}
842	/*
843	 * Figure out the reason(s) for the interrupt.
844	 */
845	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
846	if (status == 0)			/* must be a shared irq */
847		return;
848
849	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
850	    __func__, status, sc->sc_imask);
851	if (status & MACREG_A2HRIC_BIT_RX_RDY)
852		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
853	if (status & MACREG_A2HRIC_BIT_TX_DONE)
854		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
855	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
856		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
857	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
858		mwl_hal_cmddone(mh);
859	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
860		;
861	}
862	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
863		/* TKIP ICV error */
864		sc->sc_stats.mst_rx_badtkipicv++;
865	}
866	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
867		/* 11n aggregation queue is empty, re-fill */
868		;
869	}
870	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
871		;
872	}
873	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
874		/* radar detected, process event */
875		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
876	}
877	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
878		/* DFS channel switch */
879		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
880	}
881}
882
883static void
884mwl_radar_proc(void *arg, int pending)
885{
886	struct mwl_softc *sc = arg;
887	struct ifnet *ifp = sc->sc_ifp;
888	struct ieee80211com *ic = ifp->if_l2com;
889
890	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
891	    __func__, pending);
892
893	sc->sc_stats.mst_radardetect++;
894	/* XXX stop h/w BA streams? */
895
896	IEEE80211_LOCK(ic);
897	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
898	IEEE80211_UNLOCK(ic);
899}
900
901static void
902mwl_chanswitch_proc(void *arg, int pending)
903{
904	struct mwl_softc *sc = arg;
905	struct ifnet *ifp = sc->sc_ifp;
906	struct ieee80211com *ic = ifp->if_l2com;
907
908	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
909	    __func__, pending);
910
911	IEEE80211_LOCK(ic);
912	sc->sc_csapending = 0;
913	ieee80211_csa_completeswitch(ic);
914	IEEE80211_UNLOCK(ic);
915}
916
917static void
918mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
919{
920	struct ieee80211_node *ni = sp->data[0];
921
922	/* send DELBA and drop the stream */
923	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
924}
925
926static void
927mwl_bawatchdog_proc(void *arg, int pending)
928{
929	struct mwl_softc *sc = arg;
930	struct mwl_hal *mh = sc->sc_mh;
931	const MWL_HAL_BASTREAM *sp;
932	uint8_t bitmap, n;
933
934	sc->sc_stats.mst_bawatchdog++;
935
936	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
937		DPRINTF(sc, MWL_DEBUG_AMPDU,
938		    "%s: could not get bitmap\n", __func__);
939		sc->sc_stats.mst_bawatchdog_failed++;
940		return;
941	}
942	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
943	if (bitmap == 0xff) {
944		n = 0;
945		/* disable all ba streams */
946		for (bitmap = 0; bitmap < 8; bitmap++) {
947			sp = mwl_hal_bastream_lookup(mh, bitmap);
948			if (sp != NULL) {
949				mwl_bawatchdog(sp);
950				n++;
951			}
952		}
953		if (n == 0) {
954			DPRINTF(sc, MWL_DEBUG_AMPDU,
955			    "%s: no BA streams found\n", __func__);
956			sc->sc_stats.mst_bawatchdog_empty++;
957		}
958	} else if (bitmap != 0xaa) {
959		/* disable a single ba stream */
960		sp = mwl_hal_bastream_lookup(mh, bitmap);
961		if (sp != NULL) {
962			mwl_bawatchdog(sp);
963		} else {
964			DPRINTF(sc, MWL_DEBUG_AMPDU,
965			    "%s: no BA stream %d\n", __func__, bitmap);
966			sc->sc_stats.mst_bawatchdog_notfound++;
967		}
968	}
969}
970
971/*
972 * Convert net80211 channel to a HAL channel.
973 */
974static void
975mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
976{
977	hc->channel = chan->ic_ieee;
978
979	*(uint32_t *)&hc->channelFlags = 0;
980	if (IEEE80211_IS_CHAN_2GHZ(chan))
981		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
982	else if (IEEE80211_IS_CHAN_5GHZ(chan))
983		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
984	if (IEEE80211_IS_CHAN_HT40(chan)) {
985		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
986		if (IEEE80211_IS_CHAN_HT40U(chan))
987			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
988		else
989			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
990	} else
991		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
992	/* XXX 10MHz channels */
993}
994
995/*
996 * Inform firmware of our tx/rx dma setup.  The BAR 0
997 * writes below are for compatibility with older firmware.
998 * For current firmware we send this information with a
999 * cmd block via mwl_hal_sethwdma.
1000 */
1001static int
1002mwl_setupdma(struct mwl_softc *sc)
1003{
1004	int error, i;
1005
1006	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1007	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1008	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1009
1010	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1011		struct mwl_txq *txq = &sc->sc_txq[i];
1012		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1013		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1014	}
1015	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1016	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1017
1018	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1019	if (error != 0) {
1020		device_printf(sc->sc_dev,
1021		    "unable to setup tx/rx dma; hal status %u\n", error);
1022		/* XXX */
1023	}
1024	return error;
1025}
1026
1027/*
1028 * Inform firmware of tx rate parameters.
1029 * Called after a channel change.
1030 */
1031static int
1032mwl_setcurchanrates(struct mwl_softc *sc)
1033{
1034	struct ifnet *ifp = sc->sc_ifp;
1035	struct ieee80211com *ic = ifp->if_l2com;
1036	const struct ieee80211_rateset *rs;
1037	MWL_HAL_TXRATE rates;
1038
1039	memset(&rates, 0, sizeof(rates));
1040	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1041	/* rate used to send management frames */
1042	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1043	/* rate used to send multicast frames */
1044	rates.McastRate = rates.MgtRate;
1045
1046	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1047}
1048
1049/*
1050 * Inform firmware of tx rate parameters.  Called whenever
1051 * user-settable params change and after a channel change.
1052 */
1053static int
1054mwl_setrates(struct ieee80211vap *vap)
1055{
1056	struct mwl_vap *mvp = MWL_VAP(vap);
1057	struct ieee80211_node *ni = vap->iv_bss;
1058	const struct ieee80211_txparam *tp = ni->ni_txparms;
1059	MWL_HAL_TXRATE rates;
1060
1061	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1062
1063	/*
1064	 * Update the h/w rate map.
1065	 * NB: 0x80 for MCS is passed through unchanged
1066	 */
1067	memset(&rates, 0, sizeof(rates));
1068	/* rate used to send management frames */
1069	rates.MgtRate = tp->mgmtrate;
1070	/* rate used to send multicast frames */
1071	rates.McastRate = tp->mcastrate;
1072
1073	/* while here calculate EAPOL fixed rate cookie */
1074	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1075
1076	return mwl_hal_settxrate(mvp->mv_hvap,
1077	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1078		RATE_FIXED : RATE_AUTO, &rates);
1079}
1080
1081/*
1082 * Setup a fixed xmit rate cookie for EAPOL frames.
1083 */
1084static void
1085mwl_seteapolformat(struct ieee80211vap *vap)
1086{
1087	struct mwl_vap *mvp = MWL_VAP(vap);
1088	struct ieee80211_node *ni = vap->iv_bss;
1089	enum ieee80211_phymode mode;
1090	uint8_t rate;
1091
1092	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1093
1094	mode = ieee80211_chan2mode(ni->ni_chan);
1095	/*
1096	 * Use legacy rates when operating a mixed HT+non-HT bss.
1097	 * NB: this may violate POLA for sta and wds vap's.
1098	 */
1099	if (mode == IEEE80211_MODE_11NA &&
1100	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1101		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1102	else if (mode == IEEE80211_MODE_11NG &&
1103	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1104		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1105	else
1106		rate = vap->iv_txparms[mode].mgmtrate;
1107
1108	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1109}
1110
1111/*
1112 * Map SKU+country code to region code for radar bin'ing.
1113 */
1114static int
1115mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1116{
1117	switch (rd->regdomain) {
1118	case SKU_FCC:
1119	case SKU_FCC3:
1120		return DOMAIN_CODE_FCC;
1121	case SKU_CA:
1122		return DOMAIN_CODE_IC;
1123	case SKU_ETSI:
1124	case SKU_ETSI2:
1125	case SKU_ETSI3:
1126		if (rd->country == CTRY_SPAIN)
1127			return DOMAIN_CODE_SPAIN;
1128		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1129			return DOMAIN_CODE_FRANCE;
1130		/* XXX force 1.3.1 radar type */
1131		return DOMAIN_CODE_ETSI_131;
1132	case SKU_JAPAN:
1133		return DOMAIN_CODE_MKK;
1134	case SKU_ROW:
1135		return DOMAIN_CODE_DGT;	/* Taiwan */
1136	case SKU_APAC:
1137	case SKU_APAC2:
1138	case SKU_APAC3:
1139		return DOMAIN_CODE_AUS;	/* Australia */
1140	}
1141	/* XXX KOREA? */
1142	return DOMAIN_CODE_FCC;			/* XXX? */
1143}
1144
1145static int
1146mwl_hal_reset(struct mwl_softc *sc)
1147{
1148	struct ifnet *ifp = sc->sc_ifp;
1149	struct ieee80211com *ic = ifp->if_l2com;
1150	struct mwl_hal *mh = sc->sc_mh;
1151
1152	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1153	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1154	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1155	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1156	mwl_chan_set(sc, ic->ic_curchan);
1157	/* NB: RF/RA performance tuned for indoor mode */
1158	mwl_hal_setrateadaptmode(mh, 0);
1159	mwl_hal_setoptimizationlevel(mh,
1160	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1161
1162	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1163
1164	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1165	mwl_hal_setcfend(mh, 0);			/* XXX */
1166
1167	return 1;
1168}
1169
1170static int
1171mwl_init_locked(struct mwl_softc *sc)
1172{
1173	struct ifnet *ifp = sc->sc_ifp;
1174	struct mwl_hal *mh = sc->sc_mh;
1175	int error = 0;
1176
1177	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1178		__func__, ifp->if_flags);
1179
1180	MWL_LOCK_ASSERT(sc);
1181
1182	/*
1183	 * Stop anything previously setup.  This is safe
1184	 * whether this is the first time through or not.
1185	 */
1186	mwl_stop_locked(ifp, 0);
1187
1188	/*
1189	 * Push vap-independent state to the firmware.
1190	 */
1191	if (!mwl_hal_reset(sc)) {
1192		if_printf(ifp, "unable to reset hardware\n");
1193		return EIO;
1194	}
1195
1196	/*
1197	 * Setup recv (once); transmit is already good to go.
1198	 */
1199	error = mwl_startrecv(sc);
1200	if (error != 0) {
1201		if_printf(ifp, "unable to start recv logic\n");
1202		return error;
1203	}
1204
1205	/*
1206	 * Enable interrupts.
1207	 */
1208	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1209		     | MACREG_A2HRIC_BIT_TX_DONE
1210		     | MACREG_A2HRIC_BIT_OPC_DONE
1211#if 0
1212		     | MACREG_A2HRIC_BIT_MAC_EVENT
1213#endif
1214		     | MACREG_A2HRIC_BIT_ICV_ERROR
1215		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1216		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1217#if 0
1218		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1219#endif
1220		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1221		     | MACREQ_A2HRIC_BIT_TX_ACK
1222		     ;
1223
1224	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1225	mwl_hal_intrset(mh, sc->sc_imask);
1226	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1227
1228	return 0;
1229}
1230
1231static void
1232mwl_init(void *arg)
1233{
1234	struct mwl_softc *sc = arg;
1235	struct ifnet *ifp = sc->sc_ifp;
1236	struct ieee80211com *ic = ifp->if_l2com;
1237	int error = 0;
1238
1239	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1240		__func__, ifp->if_flags);
1241
1242	MWL_LOCK(sc);
1243	error = mwl_init_locked(sc);
1244	MWL_UNLOCK(sc);
1245
1246	if (error == 0)
1247		ieee80211_start_all(ic);	/* start all vap's */
1248}
1249
1250static void
1251mwl_stop_locked(struct ifnet *ifp, int disable)
1252{
1253	struct mwl_softc *sc = ifp->if_softc;
1254
1255	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1256		__func__, sc->sc_invalid, ifp->if_flags);
1257
1258	MWL_LOCK_ASSERT(sc);
1259	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1260		/*
1261		 * Shutdown the hardware and driver.
1262		 */
1263		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1264		callout_stop(&sc->sc_watchdog);
1265		sc->sc_tx_timer = 0;
1266		mwl_draintxq(sc);
1267	}
1268}
1269
1270static void
1271mwl_stop(struct ifnet *ifp, int disable)
1272{
1273	struct mwl_softc *sc = ifp->if_softc;
1274
1275	MWL_LOCK(sc);
1276	mwl_stop_locked(ifp, disable);
1277	MWL_UNLOCK(sc);
1278}
1279
1280static int
1281mwl_reset_vap(struct ieee80211vap *vap, int state)
1282{
1283	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1284	struct ieee80211com *ic = vap->iv_ic;
1285
1286	if (state == IEEE80211_S_RUN)
1287		mwl_setrates(vap);
1288	/* XXX off by 1? */
1289	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1290	/* XXX auto? 20/40 split? */
1291	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1292	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1293	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1294	    HTPROTECT_NONE : HTPROTECT_AUTO);
1295	/* XXX txpower cap */
1296
1297	/* re-setup beacons */
1298	if (state == IEEE80211_S_RUN &&
1299	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1300	     vap->iv_opmode == IEEE80211_M_MBSS ||
1301	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1302		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1303		mwl_hal_setnprotmode(hvap,
1304		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1305		return mwl_beacon_setup(vap);
1306	}
1307	return 0;
1308}
1309
1310/*
1311 * Reset the hardware w/o losing operational state.
1312 * Used to to reset or reload hardware state for a vap.
1313 */
1314static int
1315mwl_reset(struct ieee80211vap *vap, u_long cmd)
1316{
1317	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1318	int error = 0;
1319
1320	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1321		struct ieee80211com *ic = vap->iv_ic;
1322		struct ifnet *ifp = ic->ic_ifp;
1323		struct mwl_softc *sc = ifp->if_softc;
1324		struct mwl_hal *mh = sc->sc_mh;
1325
1326		/* XXX handle DWDS sta vap change */
1327		/* XXX do we need to disable interrupts? */
1328		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1329		error = mwl_reset_vap(vap, vap->iv_state);
1330		mwl_hal_intrset(mh, sc->sc_imask);
1331	}
1332	return error;
1333}
1334
1335/*
1336 * Allocate a tx buffer for sending a frame.  The
1337 * packet is assumed to have the WME AC stored so
1338 * we can use it to select the appropriate h/w queue.
1339 */
1340static struct mwl_txbuf *
1341mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1342{
1343	struct mwl_txbuf *bf;
1344
1345	/*
1346	 * Grab a TX buffer and associated resources.
1347	 */
1348	MWL_TXQ_LOCK(txq);
1349	bf = STAILQ_FIRST(&txq->free);
1350	if (bf != NULL) {
1351		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1352		txq->nfree--;
1353	}
1354	MWL_TXQ_UNLOCK(txq);
1355	if (bf == NULL)
1356		DPRINTF(sc, MWL_DEBUG_XMIT,
1357		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1358	return bf;
1359}
1360
1361/*
1362 * Return a tx buffer to the queue it came from.  Note there
1363 * are two cases because we must preserve the order of buffers
1364 * as it reflects the fixed order of descriptors in memory
1365 * (the firmware pre-fetches descriptors so we cannot reorder).
1366 */
1367static void
1368mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1369{
1370	bf->bf_m = NULL;
1371	bf->bf_node = NULL;
1372	MWL_TXQ_LOCK(txq);
1373	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1374	txq->nfree++;
1375	MWL_TXQ_UNLOCK(txq);
1376}
1377
1378static void
1379mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1380{
1381	bf->bf_m = NULL;
1382	bf->bf_node = NULL;
1383	MWL_TXQ_LOCK(txq);
1384	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1385	txq->nfree++;
1386	MWL_TXQ_UNLOCK(txq);
1387}
1388
1389static void
1390mwl_start(struct ifnet *ifp)
1391{
1392	struct mwl_softc *sc = ifp->if_softc;
1393	struct ieee80211_node *ni;
1394	struct mwl_txbuf *bf;
1395	struct mbuf *m;
1396	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1397	int nqueued;
1398
1399	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1400		return;
1401	nqueued = 0;
1402	for (;;) {
1403		bf = NULL;
1404		IFQ_DEQUEUE(&ifp->if_snd, m);
1405		if (m == NULL)
1406			break;
1407		/*
1408		 * Grab the node for the destination.
1409		 */
1410		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1411		KASSERT(ni != NULL, ("no node"));
1412		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1413		/*
1414		 * Grab a TX buffer and associated resources.
1415		 * We honor the classification by the 802.11 layer.
1416		 */
1417		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1418		bf = mwl_gettxbuf(sc, txq);
1419		if (bf == NULL) {
1420			m_freem(m);
1421			ieee80211_free_node(ni);
1422#ifdef MWL_TX_NODROP
1423			sc->sc_stats.mst_tx_qstop++;
1424			/* XXX blocks other traffic */
1425			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1426			break;
1427#else
1428			DPRINTF(sc, MWL_DEBUG_XMIT,
1429			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1430			sc->sc_stats.mst_tx_qdrop++;
1431			continue;
1432#endif /* MWL_TX_NODROP */
1433		}
1434
1435		/*
1436		 * Pass the frame to the h/w for transmission.
1437		 */
1438		if (mwl_tx_start(sc, ni, bf, m)) {
1439			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1440			mwl_puttxbuf_head(txq, bf);
1441			ieee80211_free_node(ni);
1442			continue;
1443		}
1444		nqueued++;
1445		if (nqueued >= mwl_txcoalesce) {
1446			/*
1447			 * Poke the firmware to process queued frames;
1448			 * see below about (lack of) locking.
1449			 */
1450			nqueued = 0;
1451			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1452		}
1453	}
1454	if (nqueued) {
1455		/*
1456		 * NB: We don't need to lock against tx done because
1457		 * this just prods the firmware to check the transmit
1458		 * descriptors.  The firmware will also start fetching
1459		 * descriptors by itself if it notices new ones are
1460		 * present when it goes to deliver a tx done interrupt
1461		 * to the host. So if we race with tx done processing
1462		 * it's ok.  Delivering the kick here rather than in
1463		 * mwl_tx_start is an optimization to avoid poking the
1464		 * firmware for each packet.
1465		 *
1466		 * NB: the queue id isn't used so 0 is ok.
1467		 */
1468		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1469	}
1470}
1471
1472static int
1473mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1474	const struct ieee80211_bpf_params *params)
1475{
1476	struct ieee80211com *ic = ni->ni_ic;
1477	struct ifnet *ifp = ic->ic_ifp;
1478	struct mwl_softc *sc = ifp->if_softc;
1479	struct mwl_txbuf *bf;
1480	struct mwl_txq *txq;
1481
1482	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1483		ieee80211_free_node(ni);
1484		m_freem(m);
1485		return ENETDOWN;
1486	}
1487	/*
1488	 * Grab a TX buffer and associated resources.
1489	 * Note that we depend on the classification
1490	 * by the 802.11 layer to get to the right h/w
1491	 * queue.  Management frames must ALWAYS go on
1492	 * queue 1 but we cannot just force that here
1493	 * because we may receive non-mgt frames.
1494	 */
1495	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1496	bf = mwl_gettxbuf(sc, txq);
1497	if (bf == NULL) {
1498		sc->sc_stats.mst_tx_qstop++;
1499		/* XXX blocks other traffic */
1500		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1501		ieee80211_free_node(ni);
1502		m_freem(m);
1503		return ENOBUFS;
1504	}
1505	/*
1506	 * Pass the frame to the h/w for transmission.
1507	 */
1508	if (mwl_tx_start(sc, ni, bf, m)) {
1509		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1510		mwl_puttxbuf_head(txq, bf);
1511
1512		ieee80211_free_node(ni);
1513		return EIO;		/* XXX */
1514	}
1515	/*
1516	 * NB: We don't need to lock against tx done because
1517	 * this just prods the firmware to check the transmit
1518	 * descriptors.  The firmware will also start fetching
1519	 * descriptors by itself if it notices new ones are
1520	 * present when it goes to deliver a tx done interrupt
1521	 * to the host. So if we race with tx done processing
1522	 * it's ok.  Delivering the kick here rather than in
1523	 * mwl_tx_start is an optimization to avoid poking the
1524	 * firmware for each packet.
1525	 *
1526	 * NB: the queue id isn't used so 0 is ok.
1527	 */
1528	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1529	return 0;
1530}
1531
1532static int
1533mwl_media_change(struct ifnet *ifp)
1534{
1535	struct ieee80211vap *vap = ifp->if_softc;
1536	int error;
1537
1538	error = ieee80211_media_change(ifp);
1539	/* NB: only the fixed rate can change and that doesn't need a reset */
1540	if (error == ENETRESET) {
1541		mwl_setrates(vap);
1542		error = 0;
1543	}
1544	return error;
1545}
1546
1547#ifdef MWL_DEBUG
1548static void
1549mwl_keyprint(struct mwl_softc *sc, const char *tag,
1550	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1551{
1552	static const char *ciphers[] = {
1553		"WEP",
1554		"TKIP",
1555		"AES-CCM",
1556	};
1557	int i, n;
1558
1559	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1560	for (i = 0, n = hk->keyLen; i < n; i++)
1561		printf(" %02x", hk->key.aes[i]);
1562	printf(" mac %s", ether_sprintf(mac));
1563	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1564		printf(" %s", "rxmic");
1565		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1566			printf(" %02x", hk->key.tkip.rxMic[i]);
1567		printf(" txmic");
1568		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1569			printf(" %02x", hk->key.tkip.txMic[i]);
1570	}
1571	printf(" flags 0x%x\n", hk->keyFlags);
1572}
1573#endif
1574
1575/*
1576 * Allocate a key cache slot for a unicast key.  The
1577 * firmware handles key allocation and every station is
1578 * guaranteed key space so we are always successful.
1579 */
1580static int
1581mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1582	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1583{
1584	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1585
1586	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1587	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1588		if (!(&vap->iv_nw_keys[0] <= k &&
1589		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1590			/* should not happen */
1591			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1592				"%s: bogus group key\n", __func__);
1593			return 0;
1594		}
1595		/* give the caller what they requested */
1596		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1597	} else {
1598		/*
1599		 * Firmware handles key allocation.
1600		 */
1601		*keyix = *rxkeyix = 0;
1602	}
1603	return 1;
1604}
1605
1606/*
1607 * Delete a key entry allocated by mwl_key_alloc.
1608 */
1609static int
1610mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1611{
1612	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1613	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1614	MWL_HAL_KEYVAL hk;
1615	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1616	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1617
1618	if (hvap == NULL) {
1619		if (vap->iv_opmode != IEEE80211_M_WDS) {
1620			/* XXX monitor mode? */
1621			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1622			    "%s: no hvap for opmode %d\n", __func__,
1623			    vap->iv_opmode);
1624			return 0;
1625		}
1626		hvap = MWL_VAP(vap)->mv_ap_hvap;
1627	}
1628
1629	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1630	    __func__, k->wk_keyix);
1631
1632	memset(&hk, 0, sizeof(hk));
1633	hk.keyIndex = k->wk_keyix;
1634	switch (k->wk_cipher->ic_cipher) {
1635	case IEEE80211_CIPHER_WEP:
1636		hk.keyTypeId = KEY_TYPE_ID_WEP;
1637		break;
1638	case IEEE80211_CIPHER_TKIP:
1639		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1640		break;
1641	case IEEE80211_CIPHER_AES_CCM:
1642		hk.keyTypeId = KEY_TYPE_ID_AES;
1643		break;
1644	default:
1645		/* XXX should not happen */
1646		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1647		    __func__, k->wk_cipher->ic_cipher);
1648		return 0;
1649	}
1650	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1651}
1652
1653static __inline int
1654addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1655{
1656	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1657		if (k->wk_flags & IEEE80211_KEY_XMIT)
1658			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1659		if (k->wk_flags & IEEE80211_KEY_RECV)
1660			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1661		return 1;
1662	} else
1663		return 0;
1664}
1665
1666/*
1667 * Set the key cache contents for the specified key.  Key cache
1668 * slot(s) must already have been allocated by mwl_key_alloc.
1669 */
1670static int
1671mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1672	const uint8_t mac[IEEE80211_ADDR_LEN])
1673{
1674#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1675/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1676#define	IEEE80211_IS_STATICKEY(k) \
1677	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1678	 (GRPXMIT|IEEE80211_KEY_RECV))
1679	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1680	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1681	const struct ieee80211_cipher *cip = k->wk_cipher;
1682	const uint8_t *macaddr;
1683	MWL_HAL_KEYVAL hk;
1684
1685	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1686		("s/w crypto set?"));
1687
1688	if (hvap == NULL) {
1689		if (vap->iv_opmode != IEEE80211_M_WDS) {
1690			/* XXX monitor mode? */
1691			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1692			    "%s: no hvap for opmode %d\n", __func__,
1693			    vap->iv_opmode);
1694			return 0;
1695		}
1696		hvap = MWL_VAP(vap)->mv_ap_hvap;
1697	}
1698	memset(&hk, 0, sizeof(hk));
1699	hk.keyIndex = k->wk_keyix;
1700	switch (cip->ic_cipher) {
1701	case IEEE80211_CIPHER_WEP:
1702		hk.keyTypeId = KEY_TYPE_ID_WEP;
1703		hk.keyLen = k->wk_keylen;
1704		if (k->wk_keyix == vap->iv_def_txkey)
1705			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1706		if (!IEEE80211_IS_STATICKEY(k)) {
1707			/* NB: WEP is never used for the PTK */
1708			(void) addgroupflags(&hk, k);
1709		}
1710		break;
1711	case IEEE80211_CIPHER_TKIP:
1712		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1713		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1714		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1715		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1716		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1717		if (!addgroupflags(&hk, k))
1718			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1719		break;
1720	case IEEE80211_CIPHER_AES_CCM:
1721		hk.keyTypeId = KEY_TYPE_ID_AES;
1722		hk.keyLen = k->wk_keylen;
1723		if (!addgroupflags(&hk, k))
1724			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1725		break;
1726	default:
1727		/* XXX should not happen */
1728		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1729		    __func__, k->wk_cipher->ic_cipher);
1730		return 0;
1731	}
1732	/*
1733	 * NB: tkip mic keys get copied here too; the layout
1734	 *     just happens to match that in ieee80211_key.
1735	 */
1736	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1737
1738	/*
1739	 * Locate address of sta db entry for writing key;
1740	 * the convention unfortunately is somewhat different
1741	 * than how net80211, hostapd, and wpa_supplicant think.
1742	 */
1743	if (vap->iv_opmode == IEEE80211_M_STA) {
1744		/*
1745		 * NB: keys plumbed before the sta reaches AUTH state
1746		 * will be discarded or written to the wrong sta db
1747		 * entry because iv_bss is meaningless.  This is ok
1748		 * (right now) because we handle deferred plumbing of
1749		 * WEP keys when the sta reaches AUTH state.
1750		 */
1751		macaddr = vap->iv_bss->ni_bssid;
1752		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1753			/* XXX plumb to local sta db too for static key wep */
1754			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1755		}
1756	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1757	    vap->iv_state != IEEE80211_S_RUN) {
1758		/*
1759		 * Prior to RUN state a WDS vap will not it's BSS node
1760		 * setup so we will plumb the key to the wrong mac
1761		 * address (it'll be our local address).  Workaround
1762		 * this for the moment by grabbing the correct address.
1763		 */
1764		macaddr = vap->iv_des_bssid;
1765	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1766		macaddr = vap->iv_myaddr;
1767	else
1768		macaddr = mac;
1769	KEYPRINTF(sc, &hk, macaddr);
1770	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1771#undef IEEE80211_IS_STATICKEY
1772#undef GRPXMIT
1773}
1774
1775/* unaligned little endian access */
1776#define LE_READ_2(p)				\
1777	((uint16_t)				\
1778	 ((((const uint8_t *)(p))[0]      ) |	\
1779	  (((const uint8_t *)(p))[1] <<  8)))
1780#define LE_READ_4(p)				\
1781	((uint32_t)				\
1782	 ((((const uint8_t *)(p))[0]      ) |	\
1783	  (((const uint8_t *)(p))[1] <<  8) |	\
1784	  (((const uint8_t *)(p))[2] << 16) |	\
1785	  (((const uint8_t *)(p))[3] << 24)))
1786
1787/*
1788 * Set the multicast filter contents into the hardware.
1789 * XXX f/w has no support; just defer to the os.
1790 */
1791static void
1792mwl_setmcastfilter(struct mwl_softc *sc)
1793{
1794	struct ifnet *ifp = sc->sc_ifp;
1795#if 0
1796	struct ether_multi *enm;
1797	struct ether_multistep estep;
1798	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1799	uint8_t *mp;
1800	int nmc;
1801
1802	mp = macs;
1803	nmc = 0;
1804	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1805	while (enm != NULL) {
1806		/* XXX Punt on ranges. */
1807		if (nmc == MWL_HAL_MCAST_MAX ||
1808		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1809			ifp->if_flags |= IFF_ALLMULTI;
1810			return;
1811		}
1812		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1813		mp += IEEE80211_ADDR_LEN, nmc++;
1814		ETHER_NEXT_MULTI(estep, enm);
1815	}
1816	ifp->if_flags &= ~IFF_ALLMULTI;
1817	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1818#else
1819	/* XXX no mcast filter support; we get everything */
1820	ifp->if_flags |= IFF_ALLMULTI;
1821#endif
1822}
1823
1824static int
1825mwl_mode_init(struct mwl_softc *sc)
1826{
1827	struct ifnet *ifp = sc->sc_ifp;
1828	struct ieee80211com *ic = ifp->if_l2com;
1829	struct mwl_hal *mh = sc->sc_mh;
1830
1831	/*
1832	 * NB: Ignore promisc in hostap mode; it's set by the
1833	 * bridge.  This is wrong but we have no way to
1834	 * identify internal requests (from the bridge)
1835	 * versus external requests such as for tcpdump.
1836	 */
1837	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1838	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1839	mwl_setmcastfilter(sc);
1840
1841	return 0;
1842}
1843
1844/*
1845 * Callback from the 802.11 layer after a multicast state change.
1846 */
1847static void
1848mwl_update_mcast(struct ifnet *ifp)
1849{
1850	struct mwl_softc *sc = ifp->if_softc;
1851
1852	mwl_setmcastfilter(sc);
1853}
1854
1855/*
1856 * Callback from the 802.11 layer after a promiscuous mode change.
1857 * Note this interface does not check the operating mode as this
1858 * is an internal callback and we are expected to honor the current
1859 * state (e.g. this is used for setting the interface in promiscuous
1860 * mode when operating in hostap mode to do ACS).
1861 */
1862static void
1863mwl_update_promisc(struct ifnet *ifp)
1864{
1865	struct mwl_softc *sc = ifp->if_softc;
1866
1867	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1868}
1869
1870/*
1871 * Callback from the 802.11 layer to update the slot time
1872 * based on the current setting.  We use it to notify the
1873 * firmware of ERP changes and the f/w takes care of things
1874 * like slot time and preamble.
1875 */
1876static void
1877mwl_updateslot(struct ifnet *ifp)
1878{
1879	struct mwl_softc *sc = ifp->if_softc;
1880	struct ieee80211com *ic = ifp->if_l2com;
1881	struct mwl_hal *mh = sc->sc_mh;
1882	int prot;
1883
1884	/* NB: can be called early; suppress needless cmds */
1885	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1886		return;
1887
1888	/*
1889	 * Calculate the ERP flags.  The firwmare will use
1890	 * this to carry out the appropriate measures.
1891	 */
1892	prot = 0;
1893	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1894		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1895			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1896		if (ic->ic_flags & IEEE80211_F_USEPROT)
1897			prot |= IEEE80211_ERP_USE_PROTECTION;
1898		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1899			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1900	}
1901
1902	DPRINTF(sc, MWL_DEBUG_RESET,
1903	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1904	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1905	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1906	    ic->ic_flags);
1907
1908	mwl_hal_setgprot(mh, prot);
1909}
1910
1911/*
1912 * Setup the beacon frame.
1913 */
1914static int
1915mwl_beacon_setup(struct ieee80211vap *vap)
1916{
1917	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1918	struct ieee80211_node *ni = vap->iv_bss;
1919	struct ieee80211_beacon_offsets bo;
1920	struct mbuf *m;
1921
1922	m = ieee80211_beacon_alloc(ni, &bo);
1923	if (m == NULL)
1924		return ENOBUFS;
1925	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1926	m_free(m);
1927
1928	return 0;
1929}
1930
1931/*
1932 * Update the beacon frame in response to a change.
1933 */
1934static void
1935mwl_beacon_update(struct ieee80211vap *vap, int item)
1936{
1937	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1938	struct ieee80211com *ic = vap->iv_ic;
1939
1940	KASSERT(hvap != NULL, ("no beacon"));
1941	switch (item) {
1942	case IEEE80211_BEACON_ERP:
1943		mwl_updateslot(ic->ic_ifp);
1944		break;
1945	case IEEE80211_BEACON_HTINFO:
1946		mwl_hal_setnprotmode(hvap,
1947		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1948		break;
1949	case IEEE80211_BEACON_CAPS:
1950	case IEEE80211_BEACON_WME:
1951	case IEEE80211_BEACON_APPIE:
1952	case IEEE80211_BEACON_CSA:
1953		break;
1954	case IEEE80211_BEACON_TIM:
1955		/* NB: firmware always forms TIM */
1956		return;
1957	}
1958	/* XXX retain beacon frame and update */
1959	mwl_beacon_setup(vap);
1960}
1961
1962static void
1963mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1964{
1965	bus_addr_t *paddr = (bus_addr_t*) arg;
1966	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1967	*paddr = segs->ds_addr;
1968}
1969
1970#ifdef MWL_HOST_PS_SUPPORT
1971/*
1972 * Handle power save station occupancy changes.
1973 */
1974static void
1975mwl_update_ps(struct ieee80211vap *vap, int nsta)
1976{
1977	struct mwl_vap *mvp = MWL_VAP(vap);
1978
1979	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1980		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1981	mvp->mv_last_ps_sta = nsta;
1982}
1983
1984/*
1985 * Handle associated station power save state changes.
1986 */
1987static int
1988mwl_set_tim(struct ieee80211_node *ni, int set)
1989{
1990	struct ieee80211vap *vap = ni->ni_vap;
1991	struct mwl_vap *mvp = MWL_VAP(vap);
1992
1993	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1994		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1995		    IEEE80211_AID(ni->ni_associd), set);
1996		return 1;
1997	} else
1998		return 0;
1999}
2000#endif /* MWL_HOST_PS_SUPPORT */
2001
2002static int
2003mwl_desc_setup(struct mwl_softc *sc, const char *name,
2004	struct mwl_descdma *dd,
2005	int nbuf, size_t bufsize, int ndesc, size_t descsize)
2006{
2007	struct ifnet *ifp = sc->sc_ifp;
2008	uint8_t *ds;
2009	int error;
2010
2011	DPRINTF(sc, MWL_DEBUG_RESET,
2012	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2013	    __func__, name, nbuf, (uintmax_t) bufsize,
2014	    ndesc, (uintmax_t) descsize);
2015
2016	dd->dd_name = name;
2017	dd->dd_desc_len = nbuf * ndesc * descsize;
2018
2019	/*
2020	 * Setup DMA descriptor area.
2021	 */
2022	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2023		       PAGE_SIZE, 0,		/* alignment, bounds */
2024		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2025		       BUS_SPACE_MAXADDR,	/* highaddr */
2026		       NULL, NULL,		/* filter, filterarg */
2027		       dd->dd_desc_len,		/* maxsize */
2028		       1,			/* nsegments */
2029		       dd->dd_desc_len,		/* maxsegsize */
2030		       BUS_DMA_ALLOCNOW,	/* flags */
2031		       NULL,			/* lockfunc */
2032		       NULL,			/* lockarg */
2033		       &dd->dd_dmat);
2034	if (error != 0) {
2035		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2036		return error;
2037	}
2038
2039	/* allocate descriptors */
2040	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2041				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2042				 &dd->dd_dmamap);
2043	if (error != 0) {
2044		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2045			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2046		goto fail1;
2047	}
2048
2049	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2050				dd->dd_desc, dd->dd_desc_len,
2051				mwl_load_cb, &dd->dd_desc_paddr,
2052				BUS_DMA_NOWAIT);
2053	if (error != 0) {
2054		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2055			dd->dd_name, error);
2056		goto fail2;
2057	}
2058
2059	ds = dd->dd_desc;
2060	memset(ds, 0, dd->dd_desc_len);
2061	DPRINTF(sc, MWL_DEBUG_RESET,
2062	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
2063	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2064	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2065
2066	return 0;
2067fail2:
2068	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2069fail1:
2070	bus_dma_tag_destroy(dd->dd_dmat);
2071	memset(dd, 0, sizeof(*dd));
2072	return error;
2073#undef DS2PHYS
2074}
2075
2076static void
2077mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2078{
2079	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2080	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2081	bus_dma_tag_destroy(dd->dd_dmat);
2082
2083	memset(dd, 0, sizeof(*dd));
2084}
2085
2086/*
2087 * Construct a tx q's free list.  The order of entries on
2088 * the list must reflect the physical layout of tx descriptors
2089 * because the firmware pre-fetches descriptors.
2090 *
2091 * XXX might be better to use indices into the buffer array.
2092 */
2093static void
2094mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2095{
2096	struct mwl_txbuf *bf;
2097	int i;
2098
2099	bf = txq->dma.dd_bufptr;
2100	STAILQ_INIT(&txq->free);
2101	for (i = 0; i < mwl_txbuf; i++, bf++)
2102		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2103	txq->nfree = i;
2104}
2105
2106#define	DS2PHYS(_dd, _ds) \
2107	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2108
2109static int
2110mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2111{
2112	struct ifnet *ifp = sc->sc_ifp;
2113	int error, bsize, i;
2114	struct mwl_txbuf *bf;
2115	struct mwl_txdesc *ds;
2116
2117	error = mwl_desc_setup(sc, "tx", &txq->dma,
2118			mwl_txbuf, sizeof(struct mwl_txbuf),
2119			MWL_TXDESC, sizeof(struct mwl_txdesc));
2120	if (error != 0)
2121		return error;
2122
2123	/* allocate and setup tx buffers */
2124	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2125	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2126	if (bf == NULL) {
2127		if_printf(ifp, "malloc of %u tx buffers failed\n",
2128			mwl_txbuf);
2129		return ENOMEM;
2130	}
2131	txq->dma.dd_bufptr = bf;
2132
2133	ds = txq->dma.dd_desc;
2134	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2135		bf->bf_desc = ds;
2136		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2137		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2138				&bf->bf_dmamap);
2139		if (error != 0) {
2140			if_printf(ifp, "unable to create dmamap for tx "
2141				"buffer %u, error %u\n", i, error);
2142			return error;
2143		}
2144	}
2145	mwl_txq_reset(sc, txq);
2146	return 0;
2147}
2148
2149static void
2150mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2151{
2152	struct mwl_txbuf *bf;
2153	int i;
2154
2155	bf = txq->dma.dd_bufptr;
2156	for (i = 0; i < mwl_txbuf; i++, bf++) {
2157		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2158		KASSERT(bf->bf_node == NULL, ("node on free list"));
2159		if (bf->bf_dmamap != NULL)
2160			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2161	}
2162	STAILQ_INIT(&txq->free);
2163	txq->nfree = 0;
2164	if (txq->dma.dd_bufptr != NULL) {
2165		free(txq->dma.dd_bufptr, M_MWLDEV);
2166		txq->dma.dd_bufptr = NULL;
2167	}
2168	if (txq->dma.dd_desc_len != 0)
2169		mwl_desc_cleanup(sc, &txq->dma);
2170}
2171
2172static int
2173mwl_rxdma_setup(struct mwl_softc *sc)
2174{
2175	struct ifnet *ifp = sc->sc_ifp;
2176	int error, jumbosize, bsize, i;
2177	struct mwl_rxbuf *bf;
2178	struct mwl_jumbo *rbuf;
2179	struct mwl_rxdesc *ds;
2180	caddr_t data;
2181
2182	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2183			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2184			1, sizeof(struct mwl_rxdesc));
2185	if (error != 0)
2186		return error;
2187
2188	/*
2189	 * Receive is done to a private pool of jumbo buffers.
2190	 * This allows us to attach to mbuf's and avoid re-mapping
2191	 * memory on each rx we post.  We allocate a large chunk
2192	 * of memory and manage it in the driver.  The mbuf free
2193	 * callback method is used to reclaim frames after sending
2194	 * them up the stack.  By default we allocate 2x the number of
2195	 * rx descriptors configured so we have some slop to hold
2196	 * us while frames are processed.
2197	 */
2198	if (mwl_rxbuf < 2*mwl_rxdesc) {
2199		if_printf(ifp,
2200		    "too few rx dma buffers (%d); increasing to %d\n",
2201		    mwl_rxbuf, 2*mwl_rxdesc);
2202		mwl_rxbuf = 2*mwl_rxdesc;
2203	}
2204	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2205	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2206
2207	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2208		       PAGE_SIZE, 0,		/* alignment, bounds */
2209		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2210		       BUS_SPACE_MAXADDR,	/* highaddr */
2211		       NULL, NULL,		/* filter, filterarg */
2212		       sc->sc_rxmemsize,	/* maxsize */
2213		       1,			/* nsegments */
2214		       sc->sc_rxmemsize,	/* maxsegsize */
2215		       BUS_DMA_ALLOCNOW,	/* flags */
2216		       NULL,			/* lockfunc */
2217		       NULL,			/* lockarg */
2218		       &sc->sc_rxdmat);
2219	if (error != 0) {
2220		if_printf(ifp, "could not create rx DMA tag\n");
2221		return error;
2222	}
2223
2224	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2225				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2226				 &sc->sc_rxmap);
2227	if (error != 0) {
2228		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2229		    (uintmax_t) sc->sc_rxmemsize);
2230		return error;
2231	}
2232
2233	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2234				sc->sc_rxmem, sc->sc_rxmemsize,
2235				mwl_load_cb, &sc->sc_rxmem_paddr,
2236				BUS_DMA_NOWAIT);
2237	if (error != 0) {
2238		if_printf(ifp, "could not load rx DMA map\n");
2239		return error;
2240	}
2241
2242	/*
2243	 * Allocate rx buffers and set them up.
2244	 */
2245	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2246	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2247	if (bf == NULL) {
2248		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2249		return error;
2250	}
2251	sc->sc_rxdma.dd_bufptr = bf;
2252
2253	STAILQ_INIT(&sc->sc_rxbuf);
2254	ds = sc->sc_rxdma.dd_desc;
2255	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2256		bf->bf_desc = ds;
2257		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2258		/* pre-assign dma buffer */
2259		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2260		/* NB: tail is intentional to preserve descriptor order */
2261		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2262	}
2263
2264	/*
2265	 * Place remainder of dma memory buffers on the free list.
2266	 */
2267	SLIST_INIT(&sc->sc_rxfree);
2268	for (; i < mwl_rxbuf; i++) {
2269		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2270		rbuf = MWL_JUMBO_DATA2BUF(data);
2271		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2272		sc->sc_nrxfree++;
2273	}
2274	return 0;
2275}
2276#undef DS2PHYS
2277
2278static void
2279mwl_rxdma_cleanup(struct mwl_softc *sc)
2280{
2281	if (sc->sc_rxmem_paddr != 0) {
2282		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2283		sc->sc_rxmem_paddr = 0;
2284	}
2285	if (sc->sc_rxmem != NULL) {
2286		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2287		sc->sc_rxmem = NULL;
2288	}
2289	if (sc->sc_rxdma.dd_bufptr != NULL) {
2290		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2291		sc->sc_rxdma.dd_bufptr = NULL;
2292	}
2293	if (sc->sc_rxdma.dd_desc_len != 0)
2294		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2295}
2296
2297static int
2298mwl_dma_setup(struct mwl_softc *sc)
2299{
2300	int error, i;
2301
2302	error = mwl_rxdma_setup(sc);
2303	if (error != 0) {
2304		mwl_rxdma_cleanup(sc);
2305		return error;
2306	}
2307
2308	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2309		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2310		if (error != 0) {
2311			mwl_dma_cleanup(sc);
2312			return error;
2313		}
2314	}
2315	return 0;
2316}
2317
2318static void
2319mwl_dma_cleanup(struct mwl_softc *sc)
2320{
2321	int i;
2322
2323	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2324		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2325	mwl_rxdma_cleanup(sc);
2326}
2327
2328static struct ieee80211_node *
2329mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2330{
2331	struct ieee80211com *ic = vap->iv_ic;
2332	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2333	const size_t space = sizeof(struct mwl_node);
2334	struct mwl_node *mn;
2335
2336	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2337	if (mn == NULL) {
2338		/* XXX stat+msg */
2339		return NULL;
2340	}
2341	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2342	return &mn->mn_node;
2343}
2344
2345static void
2346mwl_node_cleanup(struct ieee80211_node *ni)
2347{
2348	struct ieee80211com *ic = ni->ni_ic;
2349        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2350	struct mwl_node *mn = MWL_NODE(ni);
2351
2352	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2353	    __func__, ni, ni->ni_ic, mn->mn_staid);
2354
2355	if (mn->mn_staid != 0) {
2356		struct ieee80211vap *vap = ni->ni_vap;
2357
2358		if (mn->mn_hvap != NULL) {
2359			if (vap->iv_opmode == IEEE80211_M_STA)
2360				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2361			else
2362				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2363		}
2364		/*
2365		 * NB: legacy WDS peer sta db entry is installed using
2366		 * the associate ap's hvap; use it again to delete it.
2367		 * XXX can vap be NULL?
2368		 */
2369		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2370		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2371			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2372			    ni->ni_macaddr);
2373		delstaid(sc, mn->mn_staid);
2374		mn->mn_staid = 0;
2375	}
2376	sc->sc_node_cleanup(ni);
2377}
2378
2379/*
2380 * Reclaim rx dma buffers from packets sitting on the ampdu
2381 * reorder queue for a station.  We replace buffers with a
2382 * system cluster (if available).
2383 */
2384static void
2385mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2386{
2387#if 0
2388	int i, n, off;
2389	struct mbuf *m;
2390	void *cl;
2391
2392	n = rap->rxa_qframes;
2393	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2394		m = rap->rxa_m[i];
2395		if (m == NULL)
2396			continue;
2397		n--;
2398		/* our dma buffers have a well-known free routine */
2399		if ((m->m_flags & M_EXT) == 0 ||
2400		    m->m_ext.ext_free != mwl_ext_free)
2401			continue;
2402		/*
2403		 * Try to allocate a cluster and move the data.
2404		 */
2405		off = m->m_data - m->m_ext.ext_buf;
2406		if (off + m->m_pkthdr.len > MCLBYTES) {
2407			/* XXX no AMSDU for now */
2408			continue;
2409		}
2410		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2411		    &m->m_ext.ext_paddr);
2412		if (cl != NULL) {
2413			/*
2414			 * Copy the existing data to the cluster, remove
2415			 * the rx dma buffer, and attach the cluster in
2416			 * its place.  Note we preserve the offset to the
2417			 * data so frames being bridged can still prepend
2418			 * their headers without adding another mbuf.
2419			 */
2420			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2421			MEXTREMOVE(m);
2422			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2423			/* setup mbuf like _MCLGET does */
2424			m->m_flags |= M_CLUSTER | M_EXT_RW;
2425			_MOWNERREF(m, M_EXT | M_CLUSTER);
2426			/* NB: m_data is clobbered by MEXTADDR, adjust */
2427			m->m_data += off;
2428		}
2429	}
2430#endif
2431}
2432
2433/*
2434 * Callback to reclaim resources.  We first let the
2435 * net80211 layer do it's thing, then if we are still
2436 * blocked by a lack of rx dma buffers we walk the ampdu
2437 * reorder q's to reclaim buffers by copying to a system
2438 * cluster.
2439 */
2440static void
2441mwl_node_drain(struct ieee80211_node *ni)
2442{
2443	struct ieee80211com *ic = ni->ni_ic;
2444        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2445	struct mwl_node *mn = MWL_NODE(ni);
2446
2447	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2448	    __func__, ni, ni->ni_vap, mn->mn_staid);
2449
2450	/* NB: call up first to age out ampdu q's */
2451	sc->sc_node_drain(ni);
2452
2453	/* XXX better to not check low water mark? */
2454	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2455	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2456		uint8_t tid;
2457		/*
2458		 * Walk the reorder q and reclaim rx dma buffers by copying
2459		 * the packet contents into clusters.
2460		 */
2461		for (tid = 0; tid < WME_NUM_TID; tid++) {
2462			struct ieee80211_rx_ampdu *rap;
2463
2464			rap = &ni->ni_rx_ampdu[tid];
2465			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2466				continue;
2467			if (rap->rxa_qframes)
2468				mwl_ampdu_rxdma_reclaim(rap);
2469		}
2470	}
2471}
2472
2473static void
2474mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2475{
2476	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2477#ifdef MWL_ANT_INFO_SUPPORT
2478#if 0
2479	/* XXX need to smooth data */
2480	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2481#else
2482	*noise = -95;		/* XXX */
2483#endif
2484#else
2485	*noise = -95;		/* XXX */
2486#endif
2487}
2488
2489/*
2490 * Convert Hardware per-antenna rssi info to common format:
2491 * Let a1, a2, a3 represent the amplitudes per chain
2492 * Let amax represent max[a1, a2, a3]
2493 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2494 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2495 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2496 * maintain some extra precision.
2497 *
2498 * Values are stored in .5 db format capped at 127.
2499 */
2500static void
2501mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2502	struct ieee80211_mimo_info *mi)
2503{
2504#define	CVT(_dst, _src) do {						\
2505	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2506	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2507} while (0)
2508	static const int8_t logdbtbl[32] = {
2509	       0,   0,  24,  38,  48,  56,  62,  68,
2510	      72,  76,  80,  83,  86,  89,  92,  94,
2511	      96,  98, 100, 102, 104, 106, 107, 109,
2512	     110, 112, 113, 115, 116, 117, 118, 119
2513	};
2514	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2515	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2516	uint32_t rssi_max;
2517
2518	rssi_max = mn->mn_ai.rssi_a;
2519	if (mn->mn_ai.rssi_b > rssi_max)
2520		rssi_max = mn->mn_ai.rssi_b;
2521	if (mn->mn_ai.rssi_c > rssi_max)
2522		rssi_max = mn->mn_ai.rssi_c;
2523
2524	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2525	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2526	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2527
2528	mi->noise[0] = mn->mn_ai.nf_a;
2529	mi->noise[1] = mn->mn_ai.nf_b;
2530	mi->noise[2] = mn->mn_ai.nf_c;
2531#undef CVT
2532}
2533
2534static __inline void *
2535mwl_getrxdma(struct mwl_softc *sc)
2536{
2537	struct mwl_jumbo *buf;
2538	void *data;
2539
2540	/*
2541	 * Allocate from jumbo pool.
2542	 */
2543	MWL_RXFREE_LOCK(sc);
2544	buf = SLIST_FIRST(&sc->sc_rxfree);
2545	if (buf == NULL) {
2546		DPRINTF(sc, MWL_DEBUG_ANY,
2547		    "%s: out of rx dma buffers\n", __func__);
2548		sc->sc_stats.mst_rx_nodmabuf++;
2549		data = NULL;
2550	} else {
2551		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2552		sc->sc_nrxfree--;
2553		data = MWL_JUMBO_BUF2DATA(buf);
2554	}
2555	MWL_RXFREE_UNLOCK(sc);
2556	return data;
2557}
2558
2559static __inline void
2560mwl_putrxdma(struct mwl_softc *sc, void *data)
2561{
2562	struct mwl_jumbo *buf;
2563
2564	/* XXX bounds check data */
2565	MWL_RXFREE_LOCK(sc);
2566	buf = MWL_JUMBO_DATA2BUF(data);
2567	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2568	sc->sc_nrxfree++;
2569	MWL_RXFREE_UNLOCK(sc);
2570}
2571
2572static int
2573mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2574{
2575	struct mwl_rxdesc *ds;
2576
2577	ds = bf->bf_desc;
2578	if (bf->bf_data == NULL) {
2579		bf->bf_data = mwl_getrxdma(sc);
2580		if (bf->bf_data == NULL) {
2581			/* mark descriptor to be skipped */
2582			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2583			/* NB: don't need PREREAD */
2584			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2585			sc->sc_stats.mst_rxbuf_failed++;
2586			return ENOMEM;
2587		}
2588	}
2589	/*
2590	 * NB: DMA buffer contents is known to be unmodified
2591	 *     so there's no need to flush the data cache.
2592	 */
2593
2594	/*
2595	 * Setup descriptor.
2596	 */
2597	ds->QosCtrl = 0;
2598	ds->RSSI = 0;
2599	ds->Status = EAGLE_RXD_STATUS_IDLE;
2600	ds->Channel = 0;
2601	ds->PktLen = htole16(MWL_AGGR_SIZE);
2602	ds->SQ2 = 0;
2603	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2604	/* NB: don't touch pPhysNext, set once */
2605	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2606	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2607
2608	return 0;
2609}
2610
2611static void
2612mwl_ext_free(struct mbuf *m, void *data, void *arg)
2613{
2614	struct mwl_softc *sc = arg;
2615
2616	/* XXX bounds check data */
2617	mwl_putrxdma(sc, data);
2618	/*
2619	 * If we were previously blocked by a lack of rx dma buffers
2620	 * check if we now have enough to restart rx interrupt handling.
2621	 * NB: we know we are called at splvm which is above splnet.
2622	 */
2623	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2624		sc->sc_rxblocked = 0;
2625		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2626	}
2627}
2628
2629struct mwl_frame_bar {
2630	u_int8_t	i_fc[2];
2631	u_int8_t	i_dur[2];
2632	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2633	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2634	/* ctl, seq, FCS */
2635} __packed;
2636
2637/*
2638 * Like ieee80211_anyhdrsize, but handles BAR frames
2639 * specially so the logic below to piece the 802.11
2640 * header together works.
2641 */
2642static __inline int
2643mwl_anyhdrsize(const void *data)
2644{
2645	const struct ieee80211_frame *wh = data;
2646
2647	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2648		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2649		case IEEE80211_FC0_SUBTYPE_CTS:
2650		case IEEE80211_FC0_SUBTYPE_ACK:
2651			return sizeof(struct ieee80211_frame_ack);
2652		case IEEE80211_FC0_SUBTYPE_BAR:
2653			return sizeof(struct mwl_frame_bar);
2654		}
2655		return sizeof(struct ieee80211_frame_min);
2656	} else
2657		return ieee80211_hdrsize(data);
2658}
2659
2660static void
2661mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2662{
2663	const struct ieee80211_frame *wh;
2664	struct ieee80211_node *ni;
2665
2666	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2667	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2668	if (ni != NULL) {
2669		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2670		ieee80211_free_node(ni);
2671	}
2672}
2673
2674/*
2675 * Convert hardware signal strength to rssi.  The value
2676 * provided by the device has the noise floor added in;
2677 * we need to compensate for this but we don't have that
2678 * so we use a fixed value.
2679 *
2680 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2681 * offset is already set as part of the initial gain.  This
2682 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2683 */
2684static __inline int
2685cvtrssi(uint8_t ssi)
2686{
2687	int rssi = (int) ssi + 8;
2688	/* XXX hack guess until we have a real noise floor */
2689	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2690	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2691}
2692
2693static void
2694mwl_rx_proc(void *arg, int npending)
2695{
2696#define	IEEE80211_DIR_DSTODS(wh) \
2697	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2698	struct mwl_softc *sc = arg;
2699	struct ifnet *ifp = sc->sc_ifp;
2700	struct ieee80211com *ic = ifp->if_l2com;
2701	struct mwl_rxbuf *bf;
2702	struct mwl_rxdesc *ds;
2703	struct mbuf *m;
2704	struct ieee80211_qosframe *wh;
2705	struct ieee80211_qosframe_addr4 *wh4;
2706	struct ieee80211_node *ni;
2707	struct mwl_node *mn;
2708	int off, len, hdrlen, pktlen, rssi, ntodo;
2709	uint8_t *data, status;
2710	void *newdata;
2711	int16_t nf;
2712
2713	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2714	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2715	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2716	nf = -96;			/* XXX */
2717	bf = sc->sc_rxnext;
2718	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2719		if (bf == NULL)
2720			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2721		ds = bf->bf_desc;
2722		data = bf->bf_data;
2723		if (data == NULL) {
2724			/*
2725			 * If data allocation failed previously there
2726			 * will be no buffer; try again to re-populate it.
2727			 * Note the firmware will not advance to the next
2728			 * descriptor with a dma buffer so we must mimic
2729			 * this or we'll get out of sync.
2730			 */
2731			DPRINTF(sc, MWL_DEBUG_ANY,
2732			    "%s: rx buf w/o dma memory\n", __func__);
2733			(void) mwl_rxbuf_init(sc, bf);
2734			sc->sc_stats.mst_rx_dmabufmissing++;
2735			break;
2736		}
2737		MWL_RXDESC_SYNC(sc, ds,
2738		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2739		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2740			break;
2741#ifdef MWL_DEBUG
2742		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2743			mwl_printrxbuf(bf, 0);
2744#endif
2745		status = ds->Status;
2746		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2747			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2748			sc->sc_stats.mst_rx_crypto++;
2749			/*
2750			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2751			 *     for backwards compatibility.
2752			 */
2753			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2754			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2755				/*
2756				 * MIC error, notify upper layers.
2757				 */
2758				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2759				    BUS_DMASYNC_POSTREAD);
2760				mwl_handlemicerror(ic, data);
2761				sc->sc_stats.mst_rx_tkipmic++;
2762			}
2763			/* XXX too painful to tap packets */
2764			goto rx_next;
2765		}
2766		/*
2767		 * Sync the data buffer.
2768		 */
2769		len = le16toh(ds->PktLen);
2770		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2771		/*
2772		 * The 802.11 header is provided all or in part at the front;
2773		 * use it to calculate the true size of the header that we'll
2774		 * construct below.  We use this to figure out where to copy
2775		 * payload prior to constructing the header.
2776		 */
2777		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2778		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2779
2780		/* calculate rssi early so we can re-use for each aggregate */
2781		rssi = cvtrssi(ds->RSSI);
2782
2783		pktlen = hdrlen + (len - off);
2784		/*
2785		 * NB: we know our frame is at least as large as
2786		 * IEEE80211_MIN_LEN because there is a 4-address
2787		 * frame at the front.  Hence there's no need to
2788		 * vet the packet length.  If the frame in fact
2789		 * is too small it should be discarded at the
2790		 * net80211 layer.
2791		 */
2792
2793		/*
2794		 * Attach dma buffer to an mbuf.  We tried
2795		 * doing this based on the packet size (i.e.
2796		 * copying small packets) but it turns out to
2797		 * be a net loss.  The tradeoff might be system
2798		 * dependent (cache architecture is important).
2799		 */
2800		MGETHDR(m, M_NOWAIT, MT_DATA);
2801		if (m == NULL) {
2802			DPRINTF(sc, MWL_DEBUG_ANY,
2803			    "%s: no rx mbuf\n", __func__);
2804			sc->sc_stats.mst_rx_nombuf++;
2805			goto rx_next;
2806		}
2807		/*
2808		 * Acquire the replacement dma buffer before
2809		 * processing the frame.  If we're out of dma
2810		 * buffers we disable rx interrupts and wait
2811		 * for the free pool to reach mlw_rxdmalow buffers
2812		 * before starting to do work again.  If the firmware
2813		 * runs out of descriptors then it will toss frames
2814		 * which is better than our doing it as that can
2815		 * starve our processing.  It is also important that
2816		 * we always process rx'd frames in case they are
2817		 * A-MPDU as otherwise the host's view of the BA
2818		 * window may get out of sync with the firmware.
2819		 */
2820		newdata = mwl_getrxdma(sc);
2821		if (newdata == NULL) {
2822			/* NB: stat+msg in mwl_getrxdma */
2823			m_free(m);
2824			/* disable RX interrupt and mark state */
2825			mwl_hal_intrset(sc->sc_mh,
2826			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2827			sc->sc_rxblocked = 1;
2828			ieee80211_drain(ic);
2829			/* XXX check rxblocked and immediately start again? */
2830			goto rx_stop;
2831		}
2832		bf->bf_data = newdata;
2833		/*
2834		 * Attach the dma buffer to the mbuf;
2835		 * mwl_rxbuf_init will re-setup the rx
2836		 * descriptor using the replacement dma
2837		 * buffer we just installed above.
2838		 */
2839		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2840		    data, sc, 0, EXT_NET_DRV);
2841		m->m_data += off - hdrlen;
2842		m->m_pkthdr.len = m->m_len = pktlen;
2843		m->m_pkthdr.rcvif = ifp;
2844		/* NB: dma buffer assumed read-only */
2845
2846		/*
2847		 * Piece 802.11 header together.
2848		 */
2849		wh = mtod(m, struct ieee80211_qosframe *);
2850		/* NB: don't need to do this sometimes but ... */
2851		/* XXX special case so we can memcpy after m_devget? */
2852		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2853		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2854			if (IEEE80211_DIR_DSTODS(wh)) {
2855				wh4 = mtod(m,
2856				    struct ieee80211_qosframe_addr4*);
2857				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2858			} else {
2859				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2860			}
2861		}
2862		/*
2863		 * The f/w strips WEP header but doesn't clear
2864		 * the WEP bit; mark the packet with M_WEP so
2865		 * net80211 will treat the data as decrypted.
2866		 * While here also clear the PWR_MGT bit since
2867		 * power save is handled by the firmware and
2868		 * passing this up will potentially cause the
2869		 * upper layer to put a station in power save
2870		 * (except when configured with MWL_HOST_PS_SUPPORT).
2871		 */
2872		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2873			m->m_flags |= M_WEP;
2874#ifdef MWL_HOST_PS_SUPPORT
2875		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2876#else
2877		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2878		    IEEE80211_FC1_PWR_MGT);
2879#endif
2880
2881		if (ieee80211_radiotap_active(ic)) {
2882			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2883
2884			tap->wr_flags = 0;
2885			tap->wr_rate = ds->Rate;
2886			tap->wr_antsignal = rssi + nf;
2887			tap->wr_antnoise = nf;
2888		}
2889		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2890			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2891			    len, ds->Rate, rssi);
2892		}
2893		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2894
2895		/* dispatch */
2896		ni = ieee80211_find_rxnode(ic,
2897		    (const struct ieee80211_frame_min *) wh);
2898		if (ni != NULL) {
2899			mn = MWL_NODE(ni);
2900#ifdef MWL_ANT_INFO_SUPPORT
2901			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2902			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2903			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2904			mn->mn_ai.rsvd1 = rssi;
2905#endif
2906			/* tag AMPDU aggregates for reorder processing */
2907			if (ni->ni_flags & IEEE80211_NODE_HT)
2908				m->m_flags |= M_AMPDU;
2909			(void) ieee80211_input(ni, m, rssi, nf);
2910			ieee80211_free_node(ni);
2911		} else
2912			(void) ieee80211_input_all(ic, m, rssi, nf);
2913rx_next:
2914		/* NB: ignore ENOMEM so we process more descriptors */
2915		(void) mwl_rxbuf_init(sc, bf);
2916		bf = STAILQ_NEXT(bf, bf_list);
2917	}
2918rx_stop:
2919	sc->sc_rxnext = bf;
2920
2921	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2922	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2923		/* NB: kick fw; the tx thread may have been preempted */
2924		mwl_hal_txstart(sc->sc_mh, 0);
2925		mwl_start(ifp);
2926	}
2927#undef IEEE80211_DIR_DSTODS
2928}
2929
2930static void
2931mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2932{
2933	struct mwl_txbuf *bf, *bn;
2934	struct mwl_txdesc *ds;
2935
2936	MWL_TXQ_LOCK_INIT(sc, txq);
2937	txq->qnum = qnum;
2938	txq->txpri = 0;	/* XXX */
2939#if 0
2940	/* NB: q setup by mwl_txdma_setup XXX */
2941	STAILQ_INIT(&txq->free);
2942#endif
2943	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2944		bf->bf_txq = txq;
2945
2946		ds = bf->bf_desc;
2947		bn = STAILQ_NEXT(bf, bf_list);
2948		if (bn == NULL)
2949			bn = STAILQ_FIRST(&txq->free);
2950		ds->pPhysNext = htole32(bn->bf_daddr);
2951	}
2952	STAILQ_INIT(&txq->active);
2953}
2954
2955/*
2956 * Setup a hardware data transmit queue for the specified
2957 * access control.  We record the mapping from ac's
2958 * to h/w queues for use by mwl_tx_start.
2959 */
2960static int
2961mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2962{
2963#define	N(a)	(sizeof(a)/sizeof(a[0]))
2964	struct mwl_txq *txq;
2965
2966	if (ac >= N(sc->sc_ac2q)) {
2967		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2968			ac, N(sc->sc_ac2q));
2969		return 0;
2970	}
2971	if (mvtype >= MWL_NUM_TX_QUEUES) {
2972		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2973			mvtype, MWL_NUM_TX_QUEUES);
2974		return 0;
2975	}
2976	txq = &sc->sc_txq[mvtype];
2977	mwl_txq_init(sc, txq, mvtype);
2978	sc->sc_ac2q[ac] = txq;
2979	return 1;
2980#undef N
2981}
2982
2983/*
2984 * Update WME parameters for a transmit queue.
2985 */
2986static int
2987mwl_txq_update(struct mwl_softc *sc, int ac)
2988{
2989#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2990	struct ifnet *ifp = sc->sc_ifp;
2991	struct ieee80211com *ic = ifp->if_l2com;
2992	struct mwl_txq *txq = sc->sc_ac2q[ac];
2993	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2994	struct mwl_hal *mh = sc->sc_mh;
2995	int aifs, cwmin, cwmax, txoplim;
2996
2997	aifs = wmep->wmep_aifsn;
2998	/* XXX in sta mode need to pass log values for cwmin/max */
2999	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3000	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3001	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3002
3003	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3004		device_printf(sc->sc_dev, "unable to update hardware queue "
3005			"parameters for %s traffic!\n",
3006			ieee80211_wme_acnames[ac]);
3007		return 0;
3008	}
3009	return 1;
3010#undef MWL_EXPONENT_TO_VALUE
3011}
3012
3013/*
3014 * Callback from the 802.11 layer to update WME parameters.
3015 */
3016static int
3017mwl_wme_update(struct ieee80211com *ic)
3018{
3019	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3020
3021	return !mwl_txq_update(sc, WME_AC_BE) ||
3022	    !mwl_txq_update(sc, WME_AC_BK) ||
3023	    !mwl_txq_update(sc, WME_AC_VI) ||
3024	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3025}
3026
3027/*
3028 * Reclaim resources for a setup queue.
3029 */
3030static void
3031mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3032{
3033	/* XXX hal work? */
3034	MWL_TXQ_LOCK_DESTROY(txq);
3035}
3036
3037/*
3038 * Reclaim all tx queue resources.
3039 */
3040static void
3041mwl_tx_cleanup(struct mwl_softc *sc)
3042{
3043	int i;
3044
3045	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3046		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3047}
3048
3049static int
3050mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3051{
3052	struct mbuf *m;
3053	int error;
3054
3055	/*
3056	 * Load the DMA map so any coalescing is done.  This
3057	 * also calculates the number of descriptors we need.
3058	 */
3059	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3060				     bf->bf_segs, &bf->bf_nseg,
3061				     BUS_DMA_NOWAIT);
3062	if (error == EFBIG) {
3063		/* XXX packet requires too many descriptors */
3064		bf->bf_nseg = MWL_TXDESC+1;
3065	} else if (error != 0) {
3066		sc->sc_stats.mst_tx_busdma++;
3067		m_freem(m0);
3068		return error;
3069	}
3070	/*
3071	 * Discard null packets and check for packets that
3072	 * require too many TX descriptors.  We try to convert
3073	 * the latter to a cluster.
3074	 */
3075	if (error == EFBIG) {		/* too many desc's, linearize */
3076		sc->sc_stats.mst_tx_linear++;
3077#if MWL_TXDESC > 1
3078		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
3079#else
3080		m = m_defrag(m0, M_NOWAIT);
3081#endif
3082		if (m == NULL) {
3083			m_freem(m0);
3084			sc->sc_stats.mst_tx_nombuf++;
3085			return ENOMEM;
3086		}
3087		m0 = m;
3088		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3089					     bf->bf_segs, &bf->bf_nseg,
3090					     BUS_DMA_NOWAIT);
3091		if (error != 0) {
3092			sc->sc_stats.mst_tx_busdma++;
3093			m_freem(m0);
3094			return error;
3095		}
3096		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3097		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3098	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3099		sc->sc_stats.mst_tx_nodata++;
3100		m_freem(m0);
3101		return EIO;
3102	}
3103	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3104		__func__, m0, m0->m_pkthdr.len);
3105	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3106	bf->bf_m = m0;
3107
3108	return 0;
3109}
3110
3111static __inline int
3112mwl_cvtlegacyrate(int rate)
3113{
3114	switch (rate) {
3115	case 2:	 return 0;
3116	case 4:	 return 1;
3117	case 11: return 2;
3118	case 22: return 3;
3119	case 44: return 4;
3120	case 12: return 5;
3121	case 18: return 6;
3122	case 24: return 7;
3123	case 36: return 8;
3124	case 48: return 9;
3125	case 72: return 10;
3126	case 96: return 11;
3127	case 108:return 12;
3128	}
3129	return 0;
3130}
3131
3132/*
3133 * Calculate fixed tx rate information per client state;
3134 * this value is suitable for writing to the Format field
3135 * of a tx descriptor.
3136 */
3137static uint16_t
3138mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3139{
3140	uint16_t fmt;
3141
3142	fmt = SM(3, EAGLE_TXD_ANTENNA)
3143	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3144		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3145	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3146		fmt |= EAGLE_TXD_FORMAT_HT
3147		    /* NB: 0x80 implicitly stripped from ucastrate */
3148		    | SM(rate, EAGLE_TXD_RATE);
3149		/* XXX short/long GI may be wrong; re-check */
3150		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3151			fmt |= EAGLE_TXD_CHW_40
3152			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3153			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3154		} else {
3155			fmt |= EAGLE_TXD_CHW_20
3156			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3157			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3158		}
3159	} else {			/* legacy rate */
3160		fmt |= EAGLE_TXD_FORMAT_LEGACY
3161		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3162		    | EAGLE_TXD_CHW_20
3163		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3164		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3165			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3166	}
3167	return fmt;
3168}
3169
3170static int
3171mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3172    struct mbuf *m0)
3173{
3174#define	IEEE80211_DIR_DSTODS(wh) \
3175	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3176	struct ifnet *ifp = sc->sc_ifp;
3177	struct ieee80211com *ic = ifp->if_l2com;
3178	struct ieee80211vap *vap = ni->ni_vap;
3179	int error, iswep, ismcast;
3180	int hdrlen, copyhdrlen, pktlen;
3181	struct mwl_txdesc *ds;
3182	struct mwl_txq *txq;
3183	struct ieee80211_frame *wh;
3184	struct mwltxrec *tr;
3185	struct mwl_node *mn;
3186	uint16_t qos;
3187#if MWL_TXDESC > 1
3188	int i;
3189#endif
3190
3191	wh = mtod(m0, struct ieee80211_frame *);
3192	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3193	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3194	hdrlen = ieee80211_anyhdrsize(wh);
3195	copyhdrlen = hdrlen;
3196	pktlen = m0->m_pkthdr.len;
3197	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3198		if (IEEE80211_DIR_DSTODS(wh)) {
3199			qos = *(uint16_t *)
3200			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3201			copyhdrlen -= sizeof(qos);
3202		} else
3203			qos = *(uint16_t *)
3204			    (((struct ieee80211_qosframe *) wh)->i_qos);
3205	} else
3206		qos = 0;
3207
3208	if (iswep) {
3209		const struct ieee80211_cipher *cip;
3210		struct ieee80211_key *k;
3211
3212		/*
3213		 * Construct the 802.11 header+trailer for an encrypted
3214		 * frame. The only reason this can fail is because of an
3215		 * unknown or unsupported cipher/key type.
3216		 *
3217		 * NB: we do this even though the firmware will ignore
3218		 *     what we've done for WEP and TKIP as we need the
3219		 *     ExtIV filled in for CCMP and this also adjusts
3220		 *     the headers which simplifies our work below.
3221		 */
3222		k = ieee80211_crypto_encap(ni, m0);
3223		if (k == NULL) {
3224			/*
3225			 * This can happen when the key is yanked after the
3226			 * frame was queued.  Just discard the frame; the
3227			 * 802.11 layer counts failures and provides
3228			 * debugging/diagnostics.
3229			 */
3230			m_freem(m0);
3231			return EIO;
3232		}
3233		/*
3234		 * Adjust the packet length for the crypto additions
3235		 * done during encap and any other bits that the f/w
3236		 * will add later on.
3237		 */
3238		cip = k->wk_cipher;
3239		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3240
3241		/* packet header may have moved, reset our local pointer */
3242		wh = mtod(m0, struct ieee80211_frame *);
3243	}
3244
3245	if (ieee80211_radiotap_active_vap(vap)) {
3246		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3247		if (iswep)
3248			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3249#if 0
3250		sc->sc_tx_th.wt_rate = ds->DataRate;
3251#endif
3252		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3253		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3254
3255		ieee80211_radiotap_tx(vap, m0);
3256	}
3257	/*
3258	 * Copy up/down the 802.11 header; the firmware requires
3259	 * we present a 2-byte payload length followed by a
3260	 * 4-address header (w/o QoS), followed (optionally) by
3261	 * any WEP/ExtIV header (but only filled in for CCMP).
3262	 * We are assured the mbuf has sufficient headroom to
3263	 * prepend in-place by the setup of ic_headroom in
3264	 * mwl_attach.
3265	 */
3266	if (hdrlen < sizeof(struct mwltxrec)) {
3267		const int space = sizeof(struct mwltxrec) - hdrlen;
3268		if (M_LEADINGSPACE(m0) < space) {
3269			/* NB: should never happen */
3270			device_printf(sc->sc_dev,
3271			    "not enough headroom, need %d found %zd, "
3272			    "m_flags 0x%x m_len %d\n",
3273			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3274			ieee80211_dump_pkt(ic,
3275			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3276			m_freem(m0);
3277			sc->sc_stats.mst_tx_noheadroom++;
3278			return EIO;
3279		}
3280		M_PREPEND(m0, space, M_NOWAIT);
3281	}
3282	tr = mtod(m0, struct mwltxrec *);
3283	if (wh != (struct ieee80211_frame *) &tr->wh)
3284		ovbcopy(wh, &tr->wh, hdrlen);
3285	/*
3286	 * Note: the "firmware length" is actually the length
3287	 * of the fully formed "802.11 payload".  That is, it's
3288	 * everything except for the 802.11 header.  In particular
3289	 * this includes all crypto material including the MIC!
3290	 */
3291	tr->fwlen = htole16(pktlen - hdrlen);
3292
3293	/*
3294	 * Load the DMA map so any coalescing is done.  This
3295	 * also calculates the number of descriptors we need.
3296	 */
3297	error = mwl_tx_dmasetup(sc, bf, m0);
3298	if (error != 0) {
3299		/* NB: stat collected in mwl_tx_dmasetup */
3300		DPRINTF(sc, MWL_DEBUG_XMIT,
3301		    "%s: unable to setup dma\n", __func__);
3302		return error;
3303	}
3304	bf->bf_node = ni;			/* NB: held reference */
3305	m0 = bf->bf_m;				/* NB: may have changed */
3306	tr = mtod(m0, struct mwltxrec *);
3307	wh = (struct ieee80211_frame *)&tr->wh;
3308
3309	/*
3310	 * Formulate tx descriptor.
3311	 */
3312	ds = bf->bf_desc;
3313	txq = bf->bf_txq;
3314
3315	ds->QosCtrl = qos;			/* NB: already little-endian */
3316#if MWL_TXDESC == 1
3317	/*
3318	 * NB: multiframes should be zero because the descriptors
3319	 *     are initialized to zero.  This should handle the case
3320	 *     where the driver is built with MWL_TXDESC=1 but we are
3321	 *     using firmware with multi-segment support.
3322	 */
3323	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3324	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3325#else
3326	ds->multiframes = htole32(bf->bf_nseg);
3327	ds->PktLen = htole16(m0->m_pkthdr.len);
3328	for (i = 0; i < bf->bf_nseg; i++) {
3329		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3330		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3331	}
3332#endif
3333	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3334	ds->Format = 0;
3335	ds->pad = 0;
3336	ds->ack_wcb_addr = 0;
3337
3338	mn = MWL_NODE(ni);
3339	/*
3340	 * Select transmit rate.
3341	 */
3342	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3343	case IEEE80211_FC0_TYPE_MGT:
3344		sc->sc_stats.mst_tx_mgmt++;
3345		/* fall thru... */
3346	case IEEE80211_FC0_TYPE_CTL:
3347		/* NB: assign to BE q to avoid bursting */
3348		ds->TxPriority = MWL_WME_AC_BE;
3349		break;
3350	case IEEE80211_FC0_TYPE_DATA:
3351		if (!ismcast) {
3352			const struct ieee80211_txparam *tp = ni->ni_txparms;
3353			/*
3354			 * EAPOL frames get forced to a fixed rate and w/o
3355			 * aggregation; otherwise check for any fixed rate
3356			 * for the client (may depend on association state).
3357			 */
3358			if (m0->m_flags & M_EAPOL) {
3359				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3360				ds->Format = mvp->mv_eapolformat;
3361				ds->pad = htole16(
3362				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3363			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3364				/* XXX pre-calculate per node */
3365				ds->Format = htole16(
3366				    mwl_calcformat(tp->ucastrate, ni));
3367				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3368			}
3369			/* NB: EAPOL frames will never have qos set */
3370			if (qos == 0)
3371				ds->TxPriority = txq->qnum;
3372#if MWL_MAXBA > 3
3373			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3374				ds->TxPriority = mn->mn_ba[3].txq;
3375#endif
3376#if MWL_MAXBA > 2
3377			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3378				ds->TxPriority = mn->mn_ba[2].txq;
3379#endif
3380#if MWL_MAXBA > 1
3381			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3382				ds->TxPriority = mn->mn_ba[1].txq;
3383#endif
3384#if MWL_MAXBA > 0
3385			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3386				ds->TxPriority = mn->mn_ba[0].txq;
3387#endif
3388			else
3389				ds->TxPriority = txq->qnum;
3390		} else
3391			ds->TxPriority = txq->qnum;
3392		break;
3393	default:
3394		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3395			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3396		sc->sc_stats.mst_tx_badframetype++;
3397		m_freem(m0);
3398		return EIO;
3399	}
3400
3401	if (IFF_DUMPPKTS_XMIT(sc))
3402		ieee80211_dump_pkt(ic,
3403		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3404		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3405
3406	MWL_TXQ_LOCK(txq);
3407	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3408	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3409	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3410
3411	if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3412	sc->sc_tx_timer = 5;
3413	MWL_TXQ_UNLOCK(txq);
3414
3415	return 0;
3416#undef	IEEE80211_DIR_DSTODS
3417}
3418
3419static __inline int
3420mwl_cvtlegacyrix(int rix)
3421{
3422#define	N(x)	(sizeof(x)/sizeof(x[0]))
3423	static const int ieeerates[] =
3424	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3425	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3426#undef N
3427}
3428
3429/*
3430 * Process completed xmit descriptors from the specified queue.
3431 */
3432static int
3433mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3434{
3435#define	EAGLE_TXD_STATUS_MCAST \
3436	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3437	struct ifnet *ifp = sc->sc_ifp;
3438	struct ieee80211com *ic = ifp->if_l2com;
3439	struct mwl_txbuf *bf;
3440	struct mwl_txdesc *ds;
3441	struct ieee80211_node *ni;
3442	struct mwl_node *an;
3443	int nreaped;
3444	uint32_t status;
3445
3446	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3447	for (nreaped = 0;; nreaped++) {
3448		MWL_TXQ_LOCK(txq);
3449		bf = STAILQ_FIRST(&txq->active);
3450		if (bf == NULL) {
3451			MWL_TXQ_UNLOCK(txq);
3452			break;
3453		}
3454		ds = bf->bf_desc;
3455		MWL_TXDESC_SYNC(txq, ds,
3456		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3457		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3458			MWL_TXQ_UNLOCK(txq);
3459			break;
3460		}
3461		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3462		MWL_TXQ_UNLOCK(txq);
3463
3464#ifdef MWL_DEBUG
3465		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3466			mwl_printtxbuf(bf, txq->qnum, nreaped);
3467#endif
3468		ni = bf->bf_node;
3469		if (ni != NULL) {
3470			an = MWL_NODE(ni);
3471			status = le32toh(ds->Status);
3472			if (status & EAGLE_TXD_STATUS_OK) {
3473				uint16_t Format = le16toh(ds->Format);
3474				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3475
3476				sc->sc_stats.mst_ant_tx[txant]++;
3477				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3478					sc->sc_stats.mst_tx_retries++;
3479				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3480					sc->sc_stats.mst_tx_mretries++;
3481				if (txq->qnum >= MWL_WME_AC_VO)
3482					ic->ic_wme.wme_hipri_traffic++;
3483				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3484				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3485					ni->ni_txrate = mwl_cvtlegacyrix(
3486					    ni->ni_txrate);
3487				} else
3488					ni->ni_txrate |= IEEE80211_RATE_MCS;
3489				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3490			} else {
3491				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3492					sc->sc_stats.mst_tx_linkerror++;
3493				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3494					sc->sc_stats.mst_tx_xretries++;
3495				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3496					sc->sc_stats.mst_tx_aging++;
3497				if (bf->bf_m->m_flags & M_FF)
3498					sc->sc_stats.mst_ff_txerr++;
3499			}
3500			/*
3501			 * Do any tx complete callback.  Note this must
3502			 * be done before releasing the node reference.
3503			 * XXX no way to figure out if frame was ACK'd
3504			 */
3505			if (bf->bf_m->m_flags & M_TXCB) {
3506				/* XXX strip fw len in case header inspected */
3507				m_adj(bf->bf_m, sizeof(uint16_t));
3508				ieee80211_process_callback(ni, bf->bf_m,
3509					(status & EAGLE_TXD_STATUS_OK) == 0);
3510			}
3511			/*
3512			 * Reclaim reference to node.
3513			 *
3514			 * NB: the node may be reclaimed here if, for example
3515			 *     this is a DEAUTH message that was sent and the
3516			 *     node was timed out due to inactivity.
3517			 */
3518			ieee80211_free_node(ni);
3519		}
3520		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3521
3522		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3523		    BUS_DMASYNC_POSTWRITE);
3524		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3525		m_freem(bf->bf_m);
3526
3527		mwl_puttxbuf_tail(txq, bf);
3528	}
3529	return nreaped;
3530#undef EAGLE_TXD_STATUS_MCAST
3531}
3532
3533/*
3534 * Deferred processing of transmit interrupt; special-cased
3535 * for four hardware queues, 0-3.
3536 */
3537static void
3538mwl_tx_proc(void *arg, int npending)
3539{
3540	struct mwl_softc *sc = arg;
3541	struct ifnet *ifp = sc->sc_ifp;
3542	int nreaped;
3543
3544	/*
3545	 * Process each active queue.
3546	 */
3547	nreaped = 0;
3548	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3549		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3550	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3551		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3552	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3553		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3554	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3555		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3556
3557	if (nreaped != 0) {
3558		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3559		sc->sc_tx_timer = 0;
3560		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3561			/* NB: kick fw; the tx thread may have been preempted */
3562			mwl_hal_txstart(sc->sc_mh, 0);
3563			mwl_start(ifp);
3564		}
3565	}
3566}
3567
3568static void
3569mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3570{
3571	struct ieee80211_node *ni;
3572	struct mwl_txbuf *bf;
3573	u_int ix;
3574
3575	/*
3576	 * NB: this assumes output has been stopped and
3577	 *     we do not need to block mwl_tx_tasklet
3578	 */
3579	for (ix = 0;; ix++) {
3580		MWL_TXQ_LOCK(txq);
3581		bf = STAILQ_FIRST(&txq->active);
3582		if (bf == NULL) {
3583			MWL_TXQ_UNLOCK(txq);
3584			break;
3585		}
3586		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3587		MWL_TXQ_UNLOCK(txq);
3588#ifdef MWL_DEBUG
3589		if (sc->sc_debug & MWL_DEBUG_RESET) {
3590			struct ifnet *ifp = sc->sc_ifp;
3591			struct ieee80211com *ic = ifp->if_l2com;
3592			const struct mwltxrec *tr =
3593			    mtod(bf->bf_m, const struct mwltxrec *);
3594			mwl_printtxbuf(bf, txq->qnum, ix);
3595			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3596				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3597		}
3598#endif /* MWL_DEBUG */
3599		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3600		ni = bf->bf_node;
3601		if (ni != NULL) {
3602			/*
3603			 * Reclaim node reference.
3604			 */
3605			ieee80211_free_node(ni);
3606		}
3607		m_freem(bf->bf_m);
3608
3609		mwl_puttxbuf_tail(txq, bf);
3610	}
3611}
3612
3613/*
3614 * Drain the transmit queues and reclaim resources.
3615 */
3616static void
3617mwl_draintxq(struct mwl_softc *sc)
3618{
3619	struct ifnet *ifp = sc->sc_ifp;
3620	int i;
3621
3622	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3623		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3624	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3625	sc->sc_tx_timer = 0;
3626}
3627
3628#ifdef MWL_DIAGAPI
3629/*
3630 * Reset the transmit queues to a pristine state after a fw download.
3631 */
3632static void
3633mwl_resettxq(struct mwl_softc *sc)
3634{
3635	int i;
3636
3637	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3638		mwl_txq_reset(sc, &sc->sc_txq[i]);
3639}
3640#endif /* MWL_DIAGAPI */
3641
3642/*
3643 * Clear the transmit queues of any frames submitted for the
3644 * specified vap.  This is done when the vap is deleted so we
3645 * don't potentially reference the vap after it is gone.
3646 * Note we cannot remove the frames; we only reclaim the node
3647 * reference.
3648 */
3649static void
3650mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3651{
3652	struct mwl_txq *txq;
3653	struct mwl_txbuf *bf;
3654	int i;
3655
3656	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3657		txq = &sc->sc_txq[i];
3658		MWL_TXQ_LOCK(txq);
3659		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3660			struct ieee80211_node *ni = bf->bf_node;
3661			if (ni != NULL && ni->ni_vap == vap) {
3662				bf->bf_node = NULL;
3663				ieee80211_free_node(ni);
3664			}
3665		}
3666		MWL_TXQ_UNLOCK(txq);
3667	}
3668}
3669
3670static int
3671mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3672	const uint8_t *frm, const uint8_t *efrm)
3673{
3674	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3675	const struct ieee80211_action *ia;
3676
3677	ia = (const struct ieee80211_action *) frm;
3678	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3679	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3680		const struct ieee80211_action_ht_mimopowersave *mps =
3681		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3682
3683		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3684		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3685		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3686		return 0;
3687	} else
3688		return sc->sc_recv_action(ni, wh, frm, efrm);
3689}
3690
3691static int
3692mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3693	int dialogtoken, int baparamset, int batimeout)
3694{
3695	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3696	struct ieee80211vap *vap = ni->ni_vap;
3697	struct mwl_node *mn = MWL_NODE(ni);
3698	struct mwl_bastate *bas;
3699
3700	bas = tap->txa_private;
3701	if (bas == NULL) {
3702		const MWL_HAL_BASTREAM *sp;
3703		/*
3704		 * Check for a free BA stream slot.
3705		 */
3706#if MWL_MAXBA > 3
3707		if (mn->mn_ba[3].bastream == NULL)
3708			bas = &mn->mn_ba[3];
3709		else
3710#endif
3711#if MWL_MAXBA > 2
3712		if (mn->mn_ba[2].bastream == NULL)
3713			bas = &mn->mn_ba[2];
3714		else
3715#endif
3716#if MWL_MAXBA > 1
3717		if (mn->mn_ba[1].bastream == NULL)
3718			bas = &mn->mn_ba[1];
3719		else
3720#endif
3721#if MWL_MAXBA > 0
3722		if (mn->mn_ba[0].bastream == NULL)
3723			bas = &mn->mn_ba[0];
3724		else
3725#endif
3726		{
3727			/* sta already has max BA streams */
3728			/* XXX assign BA stream to highest priority tid */
3729			DPRINTF(sc, MWL_DEBUG_AMPDU,
3730			    "%s: already has max bastreams\n", __func__);
3731			sc->sc_stats.mst_ampdu_reject++;
3732			return 0;
3733		}
3734		/* NB: no held reference to ni */
3735		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3736		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3737		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3738		    ni, tap);
3739		if (sp == NULL) {
3740			/*
3741			 * No available stream, return 0 so no
3742			 * a-mpdu aggregation will be done.
3743			 */
3744			DPRINTF(sc, MWL_DEBUG_AMPDU,
3745			    "%s: no bastream available\n", __func__);
3746			sc->sc_stats.mst_ampdu_nostream++;
3747			return 0;
3748		}
3749		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3750		    __func__, sp);
3751		/* NB: qos is left zero so we won't match in mwl_tx_start */
3752		bas->bastream = sp;
3753		tap->txa_private = bas;
3754	}
3755	/* fetch current seq# from the firmware; if available */
3756	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3757	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3758	    &tap->txa_start) != 0)
3759		tap->txa_start = 0;
3760	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3761}
3762
3763static int
3764mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3765	int code, int baparamset, int batimeout)
3766{
3767	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3768	struct mwl_bastate *bas;
3769
3770	bas = tap->txa_private;
3771	if (bas == NULL) {
3772		/* XXX should not happen */
3773		DPRINTF(sc, MWL_DEBUG_AMPDU,
3774		    "%s: no BA stream allocated, TID %d\n",
3775		    __func__, tap->txa_tid);
3776		sc->sc_stats.mst_addba_nostream++;
3777		return 0;
3778	}
3779	if (code == IEEE80211_STATUS_SUCCESS) {
3780		struct ieee80211vap *vap = ni->ni_vap;
3781		int bufsiz, error;
3782
3783		/*
3784		 * Tell the firmware to setup the BA stream;
3785		 * we know resources are available because we
3786		 * pre-allocated one before forming the request.
3787		 */
3788		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3789		if (bufsiz == 0)
3790			bufsiz = IEEE80211_AGGR_BAWMAX;
3791		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3792		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3793		if (error != 0) {
3794			/*
3795			 * Setup failed, return immediately so no a-mpdu
3796			 * aggregation will be done.
3797			 */
3798			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3799			mwl_bastream_free(bas);
3800			tap->txa_private = NULL;
3801
3802			DPRINTF(sc, MWL_DEBUG_AMPDU,
3803			    "%s: create failed, error %d, bufsiz %d TID %d "
3804			    "htparam 0x%x\n", __func__, error, bufsiz,
3805			    tap->txa_tid, ni->ni_htparam);
3806			sc->sc_stats.mst_bacreate_failed++;
3807			return 0;
3808		}
3809		/* NB: cache txq to avoid ptr indirect */
3810		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3811		DPRINTF(sc, MWL_DEBUG_AMPDU,
3812		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3813		    "htparam 0x%x\n", __func__, bas->bastream,
3814		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3815	} else {
3816		/*
3817		 * Other side NAK'd us; return the resources.
3818		 */
3819		DPRINTF(sc, MWL_DEBUG_AMPDU,
3820		    "%s: request failed with code %d, destroy bastream %p\n",
3821		    __func__, code, bas->bastream);
3822		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3823		mwl_bastream_free(bas);
3824		tap->txa_private = NULL;
3825	}
3826	/* NB: firmware sends BAR so we don't need to */
3827	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3828}
3829
3830static void
3831mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3832{
3833	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3834	struct mwl_bastate *bas;
3835
3836	bas = tap->txa_private;
3837	if (bas != NULL) {
3838		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3839		    __func__, bas->bastream);
3840		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3841		mwl_bastream_free(bas);
3842		tap->txa_private = NULL;
3843	}
3844	sc->sc_addba_stop(ni, tap);
3845}
3846
3847/*
3848 * Setup the rx data structures.  This should only be
3849 * done once or we may get out of sync with the firmware.
3850 */
3851static int
3852mwl_startrecv(struct mwl_softc *sc)
3853{
3854	if (!sc->sc_recvsetup) {
3855		struct mwl_rxbuf *bf, *prev;
3856		struct mwl_rxdesc *ds;
3857
3858		prev = NULL;
3859		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3860			int error = mwl_rxbuf_init(sc, bf);
3861			if (error != 0) {
3862				DPRINTF(sc, MWL_DEBUG_RECV,
3863					"%s: mwl_rxbuf_init failed %d\n",
3864					__func__, error);
3865				return error;
3866			}
3867			if (prev != NULL) {
3868				ds = prev->bf_desc;
3869				ds->pPhysNext = htole32(bf->bf_daddr);
3870			}
3871			prev = bf;
3872		}
3873		if (prev != NULL) {
3874			ds = prev->bf_desc;
3875			ds->pPhysNext =
3876			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3877		}
3878		sc->sc_recvsetup = 1;
3879	}
3880	mwl_mode_init(sc);		/* set filters, etc. */
3881	return 0;
3882}
3883
3884static MWL_HAL_APMODE
3885mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3886{
3887	MWL_HAL_APMODE mode;
3888
3889	if (IEEE80211_IS_CHAN_HT(chan)) {
3890		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3891			mode = AP_MODE_N_ONLY;
3892		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3893			mode = AP_MODE_AandN;
3894		else if (vap->iv_flags & IEEE80211_F_PUREG)
3895			mode = AP_MODE_GandN;
3896		else
3897			mode = AP_MODE_BandGandN;
3898	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3899		if (vap->iv_flags & IEEE80211_F_PUREG)
3900			mode = AP_MODE_G_ONLY;
3901		else
3902			mode = AP_MODE_MIXED;
3903	} else if (IEEE80211_IS_CHAN_B(chan))
3904		mode = AP_MODE_B_ONLY;
3905	else if (IEEE80211_IS_CHAN_A(chan))
3906		mode = AP_MODE_A_ONLY;
3907	else
3908		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3909	return mode;
3910}
3911
3912static int
3913mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3914{
3915	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3916	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3917}
3918
3919/*
3920 * Set/change channels.
3921 */
3922static int
3923mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3924{
3925	struct mwl_hal *mh = sc->sc_mh;
3926	struct ifnet *ifp = sc->sc_ifp;
3927	struct ieee80211com *ic = ifp->if_l2com;
3928	MWL_HAL_CHANNEL hchan;
3929	int maxtxpow;
3930
3931	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3932	    __func__, chan->ic_freq, chan->ic_flags);
3933
3934	/*
3935	 * Convert to a HAL channel description with
3936	 * the flags constrained to reflect the current
3937	 * operating mode.
3938	 */
3939	mwl_mapchan(&hchan, chan);
3940	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3941#if 0
3942	mwl_draintxq(sc);		/* clear pending tx frames */
3943#endif
3944	mwl_hal_setchannel(mh, &hchan);
3945	/*
3946	 * Tx power is cap'd by the regulatory setting and
3947	 * possibly a user-set limit.  We pass the min of
3948	 * these to the hal to apply them to the cal data
3949	 * for this channel.
3950	 * XXX min bound?
3951	 */
3952	maxtxpow = 2*chan->ic_maxregpower;
3953	if (maxtxpow > ic->ic_txpowlimit)
3954		maxtxpow = ic->ic_txpowlimit;
3955	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3956	/* NB: potentially change mcast/mgt rates */
3957	mwl_setcurchanrates(sc);
3958
3959	/*
3960	 * Update internal state.
3961	 */
3962	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3963	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3964	if (IEEE80211_IS_CHAN_A(chan)) {
3965		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3966		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3967	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3968		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3969		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3970	} else {
3971		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3972		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3973	}
3974	sc->sc_curchan = hchan;
3975	mwl_hal_intrset(mh, sc->sc_imask);
3976
3977	return 0;
3978}
3979
3980static void
3981mwl_scan_start(struct ieee80211com *ic)
3982{
3983	struct ifnet *ifp = ic->ic_ifp;
3984	struct mwl_softc *sc = ifp->if_softc;
3985
3986	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3987}
3988
3989static void
3990mwl_scan_end(struct ieee80211com *ic)
3991{
3992	struct ifnet *ifp = ic->ic_ifp;
3993	struct mwl_softc *sc = ifp->if_softc;
3994
3995	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3996}
3997
3998static void
3999mwl_set_channel(struct ieee80211com *ic)
4000{
4001	struct ifnet *ifp = ic->ic_ifp;
4002	struct mwl_softc *sc = ifp->if_softc;
4003
4004	(void) mwl_chan_set(sc, ic->ic_curchan);
4005}
4006
4007/*
4008 * Handle a channel switch request.  We inform the firmware
4009 * and mark the global state to suppress various actions.
4010 * NB: we issue only one request to the fw; we may be called
4011 * multiple times if there are multiple vap's.
4012 */
4013static void
4014mwl_startcsa(struct ieee80211vap *vap)
4015{
4016	struct ieee80211com *ic = vap->iv_ic;
4017	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4018	MWL_HAL_CHANNEL hchan;
4019
4020	if (sc->sc_csapending)
4021		return;
4022
4023	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4024	/* 1 =>'s quiet channel */
4025	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4026	sc->sc_csapending = 1;
4027}
4028
4029/*
4030 * Plumb any static WEP key for the station.  This is
4031 * necessary as we must propagate the key from the
4032 * global key table of the vap to each sta db entry.
4033 */
4034static void
4035mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4036{
4037	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4038		IEEE80211_F_PRIVACY &&
4039	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4040	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4041		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4042}
4043
4044static int
4045mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4046{
4047#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4048	struct ieee80211vap *vap = ni->ni_vap;
4049	struct mwl_hal_vap *hvap;
4050	int error;
4051
4052	if (vap->iv_opmode == IEEE80211_M_WDS) {
4053		/*
4054		 * WDS vap's do not have a f/w vap; instead they piggyback
4055		 * on an AP vap and we must install the sta db entry and
4056		 * crypto state using that AP's handle (the WDS vap has none).
4057		 */
4058		hvap = MWL_VAP(vap)->mv_ap_hvap;
4059	} else
4060		hvap = MWL_VAP(vap)->mv_hvap;
4061	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4062	    aid, staid, pi,
4063	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4064	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4065	if (error == 0) {
4066		/*
4067		 * Setup security for this station.  For sta mode this is
4068		 * needed even though do the same thing on transition to
4069		 * AUTH state because the call to mwl_hal_newstation
4070		 * clobbers the crypto state we setup.
4071		 */
4072		mwl_setanywepkey(vap, ni->ni_macaddr);
4073	}
4074	return error;
4075#undef WME
4076}
4077
4078static void
4079mwl_setglobalkeys(struct ieee80211vap *vap)
4080{
4081	struct ieee80211_key *wk;
4082
4083	wk = &vap->iv_nw_keys[0];
4084	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4085		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4086			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4087}
4088
4089/*
4090 * Convert a legacy rate set to a firmware bitmask.
4091 */
4092static uint32_t
4093get_rate_bitmap(const struct ieee80211_rateset *rs)
4094{
4095	uint32_t rates;
4096	int i;
4097
4098	rates = 0;
4099	for (i = 0; i < rs->rs_nrates; i++)
4100		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4101		case 2:	  rates |= 0x001; break;
4102		case 4:	  rates |= 0x002; break;
4103		case 11:  rates |= 0x004; break;
4104		case 22:  rates |= 0x008; break;
4105		case 44:  rates |= 0x010; break;
4106		case 12:  rates |= 0x020; break;
4107		case 18:  rates |= 0x040; break;
4108		case 24:  rates |= 0x080; break;
4109		case 36:  rates |= 0x100; break;
4110		case 48:  rates |= 0x200; break;
4111		case 72:  rates |= 0x400; break;
4112		case 96:  rates |= 0x800; break;
4113		case 108: rates |= 0x1000; break;
4114		}
4115	return rates;
4116}
4117
4118/*
4119 * Construct an HT firmware bitmask from an HT rate set.
4120 */
4121static uint32_t
4122get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4123{
4124	uint32_t rates;
4125	int i;
4126
4127	rates = 0;
4128	for (i = 0; i < rs->rs_nrates; i++) {
4129		if (rs->rs_rates[i] < 16)
4130			rates |= 1<<rs->rs_rates[i];
4131	}
4132	return rates;
4133}
4134
4135/*
4136 * Craft station database entry for station.
4137 * NB: use host byte order here, the hal handles byte swapping.
4138 */
4139static MWL_HAL_PEERINFO *
4140mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4141{
4142	const struct ieee80211vap *vap = ni->ni_vap;
4143
4144	memset(pi, 0, sizeof(*pi));
4145	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4146	pi->CapInfo = ni->ni_capinfo;
4147	if (ni->ni_flags & IEEE80211_NODE_HT) {
4148		/* HT capabilities, etc */
4149		pi->HTCapabilitiesInfo = ni->ni_htcap;
4150		/* XXX pi.HTCapabilitiesInfo */
4151	        pi->MacHTParamInfo = ni->ni_htparam;
4152		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4153		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4154		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4155		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4156		pi->AddHtInfo.stbc = ni->ni_htstbc;
4157
4158		/* constrain according to local configuration */
4159		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4160			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4161		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4162			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4163		if (ni->ni_chw != 40)
4164			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4165	}
4166	return pi;
4167}
4168
4169/*
4170 * Re-create the local sta db entry for a vap to ensure
4171 * up to date WME state is pushed to the firmware.  Because
4172 * this resets crypto state this must be followed by a
4173 * reload of any keys in the global key table.
4174 */
4175static int
4176mwl_localstadb(struct ieee80211vap *vap)
4177{
4178#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4179	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4180	struct ieee80211_node *bss;
4181	MWL_HAL_PEERINFO pi;
4182	int error;
4183
4184	switch (vap->iv_opmode) {
4185	case IEEE80211_M_STA:
4186		bss = vap->iv_bss;
4187		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4188		    vap->iv_state == IEEE80211_S_RUN ?
4189			mkpeerinfo(&pi, bss) : NULL,
4190		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4191		    bss->ni_ies.wme_ie != NULL ?
4192			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4193		if (error == 0)
4194			mwl_setglobalkeys(vap);
4195		break;
4196	case IEEE80211_M_HOSTAP:
4197	case IEEE80211_M_MBSS:
4198		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4199		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4200		if (error == 0)
4201			mwl_setglobalkeys(vap);
4202		break;
4203	default:
4204		error = 0;
4205		break;
4206	}
4207	return error;
4208#undef WME
4209}
4210
4211static int
4212mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4213{
4214	struct mwl_vap *mvp = MWL_VAP(vap);
4215	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4216	struct ieee80211com *ic = vap->iv_ic;
4217	struct ieee80211_node *ni = NULL;
4218	struct ifnet *ifp = ic->ic_ifp;
4219	struct mwl_softc *sc = ifp->if_softc;
4220	struct mwl_hal *mh = sc->sc_mh;
4221	enum ieee80211_state ostate = vap->iv_state;
4222	int error;
4223
4224	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4225	    vap->iv_ifp->if_xname, __func__,
4226	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4227
4228	callout_stop(&sc->sc_timer);
4229	/*
4230	 * Clear current radar detection state.
4231	 */
4232	if (ostate == IEEE80211_S_CAC) {
4233		/* stop quiet mode radar detection */
4234		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4235	} else if (sc->sc_radarena) {
4236		/* stop in-service radar detection */
4237		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4238		sc->sc_radarena = 0;
4239	}
4240	/*
4241	 * Carry out per-state actions before doing net80211 work.
4242	 */
4243	if (nstate == IEEE80211_S_INIT) {
4244		/* NB: only ap+sta vap's have a fw entity */
4245		if (hvap != NULL)
4246			mwl_hal_stop(hvap);
4247	} else if (nstate == IEEE80211_S_SCAN) {
4248		mwl_hal_start(hvap);
4249		/* NB: this disables beacon frames */
4250		mwl_hal_setinframode(hvap);
4251	} else if (nstate == IEEE80211_S_AUTH) {
4252		/*
4253		 * Must create a sta db entry in case a WEP key needs to
4254		 * be plumbed.  This entry will be overwritten if we
4255		 * associate; otherwise it will be reclaimed on node free.
4256		 */
4257		ni = vap->iv_bss;
4258		MWL_NODE(ni)->mn_hvap = hvap;
4259		(void) mwl_peerstadb(ni, 0, 0, NULL);
4260	} else if (nstate == IEEE80211_S_CSA) {
4261		/* XXX move to below? */
4262		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4263		    vap->iv_opmode == IEEE80211_M_MBSS)
4264			mwl_startcsa(vap);
4265	} else if (nstate == IEEE80211_S_CAC) {
4266		/* XXX move to below? */
4267		/* stop ap xmit and enable quiet mode radar detection */
4268		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4269	}
4270
4271	/*
4272	 * Invoke the parent method to do net80211 work.
4273	 */
4274	error = mvp->mv_newstate(vap, nstate, arg);
4275
4276	/*
4277	 * Carry out work that must be done after net80211 runs;
4278	 * this work requires up to date state (e.g. iv_bss).
4279	 */
4280	if (error == 0 && nstate == IEEE80211_S_RUN) {
4281		/* NB: collect bss node again, it may have changed */
4282		ni = vap->iv_bss;
4283
4284		DPRINTF(sc, MWL_DEBUG_STATE,
4285		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4286		    "capinfo 0x%04x chan %d\n",
4287		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4288		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4289		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4290
4291		/*
4292		 * Recreate local sta db entry to update WME/HT state.
4293		 */
4294		mwl_localstadb(vap);
4295		switch (vap->iv_opmode) {
4296		case IEEE80211_M_HOSTAP:
4297		case IEEE80211_M_MBSS:
4298			if (ostate == IEEE80211_S_CAC) {
4299				/* enable in-service radar detection */
4300				mwl_hal_setradardetection(mh,
4301				    DR_IN_SERVICE_MONITOR_START);
4302				sc->sc_radarena = 1;
4303			}
4304			/*
4305			 * Allocate and setup the beacon frame
4306			 * (and related state).
4307			 */
4308			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4309			if (error != 0) {
4310				DPRINTF(sc, MWL_DEBUG_STATE,
4311				    "%s: beacon setup failed, error %d\n",
4312				    __func__, error);
4313				goto bad;
4314			}
4315			/* NB: must be after setting up beacon */
4316			mwl_hal_start(hvap);
4317			break;
4318		case IEEE80211_M_STA:
4319			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4320			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4321			/*
4322			 * Set state now that we're associated.
4323			 */
4324			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4325			mwl_setrates(vap);
4326			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4327			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4328			    sc->sc_ndwdsvaps++ == 0)
4329				mwl_hal_setdwds(mh, 1);
4330			break;
4331		case IEEE80211_M_WDS:
4332			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4333			    vap->iv_ifp->if_xname, __func__,
4334			    ether_sprintf(ni->ni_bssid));
4335			mwl_seteapolformat(vap);
4336			break;
4337		default:
4338			break;
4339		}
4340		/*
4341		 * Set CS mode according to operating channel;
4342		 * this mostly an optimization for 5GHz.
4343		 *
4344		 * NB: must follow mwl_hal_start which resets csmode
4345		 */
4346		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4347			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4348		else
4349			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4350		/*
4351		 * Start timer to prod firmware.
4352		 */
4353		if (sc->sc_ageinterval != 0)
4354			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4355			    mwl_agestations, sc);
4356	} else if (nstate == IEEE80211_S_SLEEP) {
4357		/* XXX set chip in power save */
4358	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4359	    --sc->sc_ndwdsvaps == 0)
4360		mwl_hal_setdwds(mh, 0);
4361bad:
4362	return error;
4363}
4364
4365/*
4366 * Manage station id's; these are separate from AID's
4367 * as AID's may have values out of the range of possible
4368 * station id's acceptable to the firmware.
4369 */
4370static int
4371allocstaid(struct mwl_softc *sc, int aid)
4372{
4373	int staid;
4374
4375	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4376		/* NB: don't use 0 */
4377		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4378			if (isclr(sc->sc_staid, staid))
4379				break;
4380	} else
4381		staid = aid;
4382	setbit(sc->sc_staid, staid);
4383	return staid;
4384}
4385
4386static void
4387delstaid(struct mwl_softc *sc, int staid)
4388{
4389	clrbit(sc->sc_staid, staid);
4390}
4391
4392/*
4393 * Setup driver-specific state for a newly associated node.
4394 * Note that we're called also on a re-associate, the isnew
4395 * param tells us if this is the first time or not.
4396 */
4397static void
4398mwl_newassoc(struct ieee80211_node *ni, int isnew)
4399{
4400	struct ieee80211vap *vap = ni->ni_vap;
4401        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4402	struct mwl_node *mn = MWL_NODE(ni);
4403	MWL_HAL_PEERINFO pi;
4404	uint16_t aid;
4405	int error;
4406
4407	aid = IEEE80211_AID(ni->ni_associd);
4408	if (isnew) {
4409		mn->mn_staid = allocstaid(sc, aid);
4410		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4411	} else {
4412		mn = MWL_NODE(ni);
4413		/* XXX reset BA stream? */
4414	}
4415	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4416	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4417	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4418	if (error != 0) {
4419		DPRINTF(sc, MWL_DEBUG_NODE,
4420		    "%s: error %d creating sta db entry\n",
4421		    __func__, error);
4422		/* XXX how to deal with error? */
4423	}
4424}
4425
4426/*
4427 * Periodically poke the firmware to age out station state
4428 * (power save queues, pending tx aggregates).
4429 */
4430static void
4431mwl_agestations(void *arg)
4432{
4433	struct mwl_softc *sc = arg;
4434
4435	mwl_hal_setkeepalive(sc->sc_mh);
4436	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4437		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4438}
4439
4440static const struct mwl_hal_channel *
4441findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4442{
4443	int i;
4444
4445	for (i = 0; i < ci->nchannels; i++) {
4446		const struct mwl_hal_channel *hc = &ci->channels[i];
4447		if (hc->ieee == ieee)
4448			return hc;
4449	}
4450	return NULL;
4451}
4452
4453static int
4454mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4455	int nchan, struct ieee80211_channel chans[])
4456{
4457	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4458	struct mwl_hal *mh = sc->sc_mh;
4459	const MWL_HAL_CHANNELINFO *ci;
4460	int i;
4461
4462	for (i = 0; i < nchan; i++) {
4463		struct ieee80211_channel *c = &chans[i];
4464		const struct mwl_hal_channel *hc;
4465
4466		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4467			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4468			    IEEE80211_IS_CHAN_HT40(c) ?
4469				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4470		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4471			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4472			    IEEE80211_IS_CHAN_HT40(c) ?
4473				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4474		} else {
4475			if_printf(ic->ic_ifp,
4476			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4477			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4478			return EINVAL;
4479		}
4480		/*
4481		 * Verify channel has cal data and cap tx power.
4482		 */
4483		hc = findhalchannel(ci, c->ic_ieee);
4484		if (hc != NULL) {
4485			if (c->ic_maxpower > 2*hc->maxTxPow)
4486				c->ic_maxpower = 2*hc->maxTxPow;
4487			goto next;
4488		}
4489		if (IEEE80211_IS_CHAN_HT40(c)) {
4490			/*
4491			 * Look for the extension channel since the
4492			 * hal table only has the primary channel.
4493			 */
4494			hc = findhalchannel(ci, c->ic_extieee);
4495			if (hc != NULL) {
4496				if (c->ic_maxpower > 2*hc->maxTxPow)
4497					c->ic_maxpower = 2*hc->maxTxPow;
4498				goto next;
4499			}
4500		}
4501		if_printf(ic->ic_ifp,
4502		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4503		    __func__, c->ic_ieee, c->ic_extieee,
4504		    c->ic_freq, c->ic_flags);
4505		return EINVAL;
4506	next:
4507		;
4508	}
4509	return 0;
4510}
4511
4512#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4513#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4514
4515static void
4516addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4517{
4518	c->ic_freq = freq;
4519	c->ic_flags = flags;
4520	c->ic_ieee = ieee;
4521	c->ic_minpower = 0;
4522	c->ic_maxpower = 2*txpow;
4523	c->ic_maxregpower = txpow;
4524}
4525
4526static const struct ieee80211_channel *
4527findchannel(const struct ieee80211_channel chans[], int nchans,
4528	int freq, int flags)
4529{
4530	const struct ieee80211_channel *c;
4531	int i;
4532
4533	for (i = 0; i < nchans; i++) {
4534		c = &chans[i];
4535		if (c->ic_freq == freq && c->ic_flags == flags)
4536			return c;
4537	}
4538	return NULL;
4539}
4540
4541static void
4542addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4543	const MWL_HAL_CHANNELINFO *ci, int flags)
4544{
4545	struct ieee80211_channel *c;
4546	const struct ieee80211_channel *extc;
4547	const struct mwl_hal_channel *hc;
4548	int i;
4549
4550	c = &chans[*nchans];
4551
4552	flags &= ~IEEE80211_CHAN_HT;
4553	for (i = 0; i < ci->nchannels; i++) {
4554		/*
4555		 * Each entry defines an HT40 channel pair; find the
4556		 * extension channel above and the insert the pair.
4557		 */
4558		hc = &ci->channels[i];
4559		extc = findchannel(chans, *nchans, hc->freq+20,
4560		    flags | IEEE80211_CHAN_HT20);
4561		if (extc != NULL) {
4562			if (*nchans >= maxchans)
4563				break;
4564			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4565			    hc->ieee, hc->maxTxPow);
4566			c->ic_extieee = extc->ic_ieee;
4567			c++, (*nchans)++;
4568			if (*nchans >= maxchans)
4569				break;
4570			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4571			    extc->ic_ieee, hc->maxTxPow);
4572			c->ic_extieee = hc->ieee;
4573			c++, (*nchans)++;
4574		}
4575	}
4576}
4577
4578static void
4579addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4580	const MWL_HAL_CHANNELINFO *ci, int flags)
4581{
4582	struct ieee80211_channel *c;
4583	int i;
4584
4585	c = &chans[*nchans];
4586
4587	for (i = 0; i < ci->nchannels; i++) {
4588		const struct mwl_hal_channel *hc;
4589
4590		hc = &ci->channels[i];
4591		if (*nchans >= maxchans)
4592			break;
4593		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4594		c++, (*nchans)++;
4595		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4596			/* g channel have a separate b-only entry */
4597			if (*nchans >= maxchans)
4598				break;
4599			c[0] = c[-1];
4600			c[-1].ic_flags = IEEE80211_CHAN_B;
4601			c++, (*nchans)++;
4602		}
4603		if (flags == IEEE80211_CHAN_HTG) {
4604			/* HT g channel have a separate g-only entry */
4605			if (*nchans >= maxchans)
4606				break;
4607			c[-1].ic_flags = IEEE80211_CHAN_G;
4608			c[0] = c[-1];
4609			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4610			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4611			c++, (*nchans)++;
4612		}
4613		if (flags == IEEE80211_CHAN_HTA) {
4614			/* HT a channel have a separate a-only entry */
4615			if (*nchans >= maxchans)
4616				break;
4617			c[-1].ic_flags = IEEE80211_CHAN_A;
4618			c[0] = c[-1];
4619			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4620			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4621			c++, (*nchans)++;
4622		}
4623	}
4624}
4625
4626static void
4627getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4628	struct ieee80211_channel chans[])
4629{
4630	const MWL_HAL_CHANNELINFO *ci;
4631
4632	/*
4633	 * Use the channel info from the hal to craft the
4634	 * channel list.  Note that we pass back an unsorted
4635	 * list; the caller is required to sort it for us
4636	 * (if desired).
4637	 */
4638	*nchans = 0;
4639	if (mwl_hal_getchannelinfo(sc->sc_mh,
4640	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4641		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4642	if (mwl_hal_getchannelinfo(sc->sc_mh,
4643	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4644		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4645	if (mwl_hal_getchannelinfo(sc->sc_mh,
4646	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4647		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4648	if (mwl_hal_getchannelinfo(sc->sc_mh,
4649	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4650		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4651}
4652
4653static void
4654mwl_getradiocaps(struct ieee80211com *ic,
4655	int maxchans, int *nchans, struct ieee80211_channel chans[])
4656{
4657	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4658
4659	getchannels(sc, maxchans, nchans, chans);
4660}
4661
4662static int
4663mwl_getchannels(struct mwl_softc *sc)
4664{
4665	struct ifnet *ifp = sc->sc_ifp;
4666	struct ieee80211com *ic = ifp->if_l2com;
4667
4668	/*
4669	 * Use the channel info from the hal to craft the
4670	 * channel list for net80211.  Note that we pass up
4671	 * an unsorted list; net80211 will sort it for us.
4672	 */
4673	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4674	ic->ic_nchans = 0;
4675	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4676
4677	ic->ic_regdomain.regdomain = SKU_DEBUG;
4678	ic->ic_regdomain.country = CTRY_DEFAULT;
4679	ic->ic_regdomain.location = 'I';
4680	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4681	ic->ic_regdomain.isocc[1] = ' ';
4682	return (ic->ic_nchans == 0 ? EIO : 0);
4683}
4684#undef IEEE80211_CHAN_HTA
4685#undef IEEE80211_CHAN_HTG
4686
4687#ifdef MWL_DEBUG
4688static void
4689mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4690{
4691	const struct mwl_rxdesc *ds = bf->bf_desc;
4692	uint32_t status = le32toh(ds->Status);
4693
4694	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4695	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4696	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4697	    le32toh(ds->pPhysBuffData), ds->RxControl,
4698	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4699	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4700	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4701	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4702}
4703
4704static void
4705mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4706{
4707	const struct mwl_txdesc *ds = bf->bf_desc;
4708	uint32_t status = le32toh(ds->Status);
4709
4710	printf("Q%u[%3u]", qnum, ix);
4711	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4712	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4713	    le32toh(ds->pPhysNext),
4714	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4715	    status & EAGLE_TXD_STATUS_USED ?
4716		"" : (status & 3) != 0 ? " *" : " !");
4717	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4718	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4719	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4720#if MWL_TXDESC > 1
4721	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4722	    , le32toh(ds->multiframes)
4723	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4724	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4725	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4726	);
4727	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4728	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4729	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4730	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4731	);
4732#endif
4733#if 0
4734{ const uint8_t *cp = (const uint8_t *) ds;
4735  int i;
4736  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4737	printf("%02x ", cp[i]);
4738	if (((i+1) % 16) == 0)
4739		printf("\n");
4740  }
4741  printf("\n");
4742}
4743#endif
4744}
4745#endif /* MWL_DEBUG */
4746
4747#if 0
4748static void
4749mwl_txq_dump(struct mwl_txq *txq)
4750{
4751	struct mwl_txbuf *bf;
4752	int i = 0;
4753
4754	MWL_TXQ_LOCK(txq);
4755	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4756		struct mwl_txdesc *ds = bf->bf_desc;
4757		MWL_TXDESC_SYNC(txq, ds,
4758		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4759#ifdef MWL_DEBUG
4760		mwl_printtxbuf(bf, txq->qnum, i);
4761#endif
4762		i++;
4763	}
4764	MWL_TXQ_UNLOCK(txq);
4765}
4766#endif
4767
4768static void
4769mwl_watchdog(void *arg)
4770{
4771	struct mwl_softc *sc;
4772	struct ifnet *ifp;
4773
4774	sc = arg;
4775	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4776	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4777		return;
4778
4779	ifp = sc->sc_ifp;
4780	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4781		if (mwl_hal_setkeepalive(sc->sc_mh))
4782			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4783		else
4784			if_printf(ifp, "transmit timeout\n");
4785#if 0
4786		mwl_reset(ifp);
4787mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4788#endif
4789		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4790		sc->sc_stats.mst_watchdog++;
4791	}
4792}
4793
4794#ifdef MWL_DIAGAPI
4795/*
4796 * Diagnostic interface to the HAL.  This is used by various
4797 * tools to do things like retrieve register contents for
4798 * debugging.  The mechanism is intentionally opaque so that
4799 * it can change frequently w/o concern for compatiblity.
4800 */
4801static int
4802mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4803{
4804	struct mwl_hal *mh = sc->sc_mh;
4805	u_int id = md->md_id & MWL_DIAG_ID;
4806	void *indata = NULL;
4807	void *outdata = NULL;
4808	u_int32_t insize = md->md_in_size;
4809	u_int32_t outsize = md->md_out_size;
4810	int error = 0;
4811
4812	if (md->md_id & MWL_DIAG_IN) {
4813		/*
4814		 * Copy in data.
4815		 */
4816		indata = malloc(insize, M_TEMP, M_NOWAIT);
4817		if (indata == NULL) {
4818			error = ENOMEM;
4819			goto bad;
4820		}
4821		error = copyin(md->md_in_data, indata, insize);
4822		if (error)
4823			goto bad;
4824	}
4825	if (md->md_id & MWL_DIAG_DYN) {
4826		/*
4827		 * Allocate a buffer for the results (otherwise the HAL
4828		 * returns a pointer to a buffer where we can read the
4829		 * results).  Note that we depend on the HAL leaving this
4830		 * pointer for us to use below in reclaiming the buffer;
4831		 * may want to be more defensive.
4832		 */
4833		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4834		if (outdata == NULL) {
4835			error = ENOMEM;
4836			goto bad;
4837		}
4838	}
4839	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4840		if (outsize < md->md_out_size)
4841			md->md_out_size = outsize;
4842		if (outdata != NULL)
4843			error = copyout(outdata, md->md_out_data,
4844					md->md_out_size);
4845	} else {
4846		error = EINVAL;
4847	}
4848bad:
4849	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4850		free(indata, M_TEMP);
4851	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4852		free(outdata, M_TEMP);
4853	return error;
4854}
4855
4856static int
4857mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4858{
4859	struct mwl_hal *mh = sc->sc_mh;
4860	int error;
4861
4862	MWL_LOCK_ASSERT(sc);
4863
4864	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4865		device_printf(sc->sc_dev, "unable to load firmware\n");
4866		return EIO;
4867	}
4868	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4869		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4870		return EIO;
4871	}
4872	error = mwl_setupdma(sc);
4873	if (error != 0) {
4874		/* NB: mwl_setupdma prints a msg */
4875		return error;
4876	}
4877	/*
4878	 * Reset tx/rx data structures; after reload we must
4879	 * re-start the driver's notion of the next xmit/recv.
4880	 */
4881	mwl_draintxq(sc);		/* clear pending frames */
4882	mwl_resettxq(sc);		/* rebuild tx q lists */
4883	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4884	return 0;
4885}
4886#endif /* MWL_DIAGAPI */
4887
4888static int
4889mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4890{
4891#define	IS_RUNNING(ifp) \
4892	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4893	struct mwl_softc *sc = ifp->if_softc;
4894	struct ieee80211com *ic = ifp->if_l2com;
4895	struct ifreq *ifr = (struct ifreq *)data;
4896	int error = 0, startall;
4897
4898	switch (cmd) {
4899	case SIOCSIFFLAGS:
4900		MWL_LOCK(sc);
4901		startall = 0;
4902		if (IS_RUNNING(ifp)) {
4903			/*
4904			 * To avoid rescanning another access point,
4905			 * do not call mwl_init() here.  Instead,
4906			 * only reflect promisc mode settings.
4907			 */
4908			mwl_mode_init(sc);
4909		} else if (ifp->if_flags & IFF_UP) {
4910			/*
4911			 * Beware of being called during attach/detach
4912			 * to reset promiscuous mode.  In that case we
4913			 * will still be marked UP but not RUNNING.
4914			 * However trying to re-init the interface
4915			 * is the wrong thing to do as we've already
4916			 * torn down much of our state.  There's
4917			 * probably a better way to deal with this.
4918			 */
4919			if (!sc->sc_invalid) {
4920				mwl_init_locked(sc);	/* XXX lose error */
4921				startall = 1;
4922			}
4923		} else
4924			mwl_stop_locked(ifp, 1);
4925		MWL_UNLOCK(sc);
4926		if (startall)
4927			ieee80211_start_all(ic);
4928		break;
4929	case SIOCGMVSTATS:
4930		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4931		/* NB: embed these numbers to get a consistent view */
4932		sc->sc_stats.mst_tx_packets =
4933		    ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
4934		sc->sc_stats.mst_rx_packets =
4935		    ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
4936		/*
4937		 * NB: Drop the softc lock in case of a page fault;
4938		 * we'll accept any potential inconsisentcy in the
4939		 * statistics.  The alternative is to copy the data
4940		 * to a local structure.
4941		 */
4942		return copyout(&sc->sc_stats,
4943				ifr->ifr_data, sizeof (sc->sc_stats));
4944#ifdef MWL_DIAGAPI
4945	case SIOCGMVDIAG:
4946		/* XXX check privs */
4947		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4948	case SIOCGMVRESET:
4949		/* XXX check privs */
4950		MWL_LOCK(sc);
4951		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4952		MWL_UNLOCK(sc);
4953		break;
4954#endif /* MWL_DIAGAPI */
4955	case SIOCGIFMEDIA:
4956		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4957		break;
4958	case SIOCGIFADDR:
4959		error = ether_ioctl(ifp, cmd, data);
4960		break;
4961	default:
4962		error = EINVAL;
4963		break;
4964	}
4965	return error;
4966#undef IS_RUNNING
4967}
4968
4969#ifdef	MWL_DEBUG
4970static int
4971mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4972{
4973	struct mwl_softc *sc = arg1;
4974	int debug, error;
4975
4976	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4977	error = sysctl_handle_int(oidp, &debug, 0, req);
4978	if (error || !req->newptr)
4979		return error;
4980	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4981	sc->sc_debug = debug & 0x00ffffff;
4982	return 0;
4983}
4984#endif /* MWL_DEBUG */
4985
4986static void
4987mwl_sysctlattach(struct mwl_softc *sc)
4988{
4989#ifdef	MWL_DEBUG
4990	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4991	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4992
4993	sc->sc_debug = mwl_debug;
4994	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4995		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4996		mwl_sysctl_debug, "I", "control debugging printfs");
4997#endif
4998}
4999
5000/*
5001 * Announce various information on device/driver attach.
5002 */
5003static void
5004mwl_announce(struct mwl_softc *sc)
5005{
5006	struct ifnet *ifp = sc->sc_ifp;
5007
5008	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5009		sc->sc_hwspecs.hwVersion,
5010		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5011		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5012		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5013		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5014		sc->sc_hwspecs.regionCode);
5015	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5016
5017	if (bootverbose) {
5018		int i;
5019		for (i = 0; i <= WME_AC_VO; i++) {
5020			struct mwl_txq *txq = sc->sc_ac2q[i];
5021			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5022				txq->qnum, ieee80211_wme_acnames[i]);
5023		}
5024	}
5025	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5026		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5027	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5028		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5029	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5030		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5031	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5032		if_printf(ifp, "multi-bss support\n");
5033#ifdef MWL_TX_NODROP
5034	if (bootverbose)
5035		if_printf(ifp, "no tx drop\n");
5036#endif
5037}
5038