if_mwl.c revision 197307
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 197307 2009-09-18 12:25:31Z rpaulo $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/sysctl.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/kernel.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/errno.h>
52#include <sys/callout.h>
53#include <sys/bus.h>
54#include <sys/endian.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57
58#include <machine/bus.h>
59
60#include <net/if.h>
61#include <net/if_dl.h>
62#include <net/if_media.h>
63#include <net/if_types.h>
64#include <net/if_arp.h>
65#include <net/ethernet.h>
66#include <net/if_llc.h>
67
68#include <net/bpf.h>
69
70#include <net80211/ieee80211_var.h>
71#include <net80211/ieee80211_regdomain.h>
72
73#ifdef INET
74#include <netinet/in.h>
75#include <netinet/if_ether.h>
76#endif /* INET */
77
78#include <dev/mwl/if_mwlvar.h>
79#include <dev/mwl/mwldiag.h>
80
81/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
82#define	MS(v,x)	(((v) & x) >> x##_S)
83#define	SM(v,x)	(((v) << x##_S) & x)
84
85static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
86		    const char name[IFNAMSIZ], int unit, int opmode,
87		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
88		    const uint8_t mac[IEEE80211_ADDR_LEN]);
89static void	mwl_vap_delete(struct ieee80211vap *);
90static int	mwl_setupdma(struct mwl_softc *);
91static int	mwl_hal_reset(struct mwl_softc *sc);
92static int	mwl_init_locked(struct mwl_softc *);
93static void	mwl_init(void *);
94static void	mwl_stop_locked(struct ifnet *, int);
95static int	mwl_reset(struct ieee80211vap *, u_long);
96static void	mwl_stop(struct ifnet *, int);
97static void	mwl_start(struct ifnet *);
98static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
99			const struct ieee80211_bpf_params *);
100static int	mwl_media_change(struct ifnet *);
101static void	mwl_watchdog(struct ifnet *);
102static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
103static void	mwl_radar_proc(void *, int);
104static void	mwl_chanswitch_proc(void *, int);
105static void	mwl_bawatchdog_proc(void *, int);
106static int	mwl_key_alloc(struct ieee80211vap *,
107			struct ieee80211_key *,
108			ieee80211_keyix *, ieee80211_keyix *);
109static int	mwl_key_delete(struct ieee80211vap *,
110			const struct ieee80211_key *);
111static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
112			const uint8_t mac[IEEE80211_ADDR_LEN]);
113static int	mwl_mode_init(struct mwl_softc *);
114static void	mwl_update_mcast(struct ifnet *);
115static void	mwl_update_promisc(struct ifnet *);
116static void	mwl_updateslot(struct ifnet *);
117static int	mwl_beacon_setup(struct ieee80211vap *);
118static void	mwl_beacon_update(struct ieee80211vap *, int);
119#ifdef MWL_HOST_PS_SUPPORT
120static void	mwl_update_ps(struct ieee80211vap *, int);
121static int	mwl_set_tim(struct ieee80211_node *, int);
122#endif
123static int	mwl_dma_setup(struct mwl_softc *);
124static void	mwl_dma_cleanup(struct mwl_softc *);
125static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
126		    const uint8_t [IEEE80211_ADDR_LEN]);
127static void	mwl_node_cleanup(struct ieee80211_node *);
128static void	mwl_node_drain(struct ieee80211_node *);
129static void	mwl_node_getsignal(const struct ieee80211_node *,
130			int8_t *, int8_t *);
131static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
132			struct ieee80211_mimo_info *);
133static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
134static void	mwl_rx_proc(void *, int);
135static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
136static int	mwl_tx_setup(struct mwl_softc *, int, int);
137static int	mwl_wme_update(struct ieee80211com *);
138static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
139static void	mwl_tx_cleanup(struct mwl_softc *);
140static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
141static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
142			     struct mwl_txbuf *, struct mbuf *);
143static void	mwl_tx_proc(void *, int);
144static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
145static void	mwl_draintxq(struct mwl_softc *);
146static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
147static int	mwl_recv_action(struct ieee80211_node *,
148			const struct ieee80211_frame *,
149			const uint8_t *, const uint8_t *);
150static int	mwl_addba_request(struct ieee80211_node *,
151			struct ieee80211_tx_ampdu *, int dialogtoken,
152			int baparamset, int batimeout);
153static int	mwl_addba_response(struct ieee80211_node *,
154			struct ieee80211_tx_ampdu *, int status,
155			int baparamset, int batimeout);
156static void	mwl_addba_stop(struct ieee80211_node *,
157			struct ieee80211_tx_ampdu *);
158static int	mwl_startrecv(struct mwl_softc *);
159static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
160			struct ieee80211_channel *);
161static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
162static void	mwl_scan_start(struct ieee80211com *);
163static void	mwl_scan_end(struct ieee80211com *);
164static void	mwl_set_channel(struct ieee80211com *);
165static int	mwl_peerstadb(struct ieee80211_node *,
166			int aid, int staid, MWL_HAL_PEERINFO *pi);
167static int	mwl_localstadb(struct ieee80211vap *);
168static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
169static int	allocstaid(struct mwl_softc *sc, int aid);
170static void	delstaid(struct mwl_softc *sc, int staid);
171static void	mwl_newassoc(struct ieee80211_node *, int);
172static void	mwl_agestations(void *);
173static int	mwl_setregdomain(struct ieee80211com *,
174			struct ieee80211_regdomain *, int,
175			struct ieee80211_channel []);
176static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
177			struct ieee80211_channel []);
178static int	mwl_getchannels(struct mwl_softc *);
179
180static void	mwl_sysctlattach(struct mwl_softc *);
181static void	mwl_announce(struct mwl_softc *);
182
183SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
184
185static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
186SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
187	    0, "rx descriptors allocated");
188static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
189SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
190	    0, "rx buffers allocated");
191TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
192static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
193SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
194	    0, "tx buffers allocated");
195TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
196static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
197SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
198	    0, "tx buffers to send at once");
199TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
200static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
201SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
202	    0, "max rx buffers to process per interrupt");
203TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
204static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
205SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
206	    0, "min free rx buffers before restarting traffic");
207TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
208
209#ifdef MWL_DEBUG
210static	int mwl_debug = 0;
211SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
212	    0, "control debugging printfs");
213TUNABLE_INT("hw.mwl.debug", &mwl_debug);
214enum {
215	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
216	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
217	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
218	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
219	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
220	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
221	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
222	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
223	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
224	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
225	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
226	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
227	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
228	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
229	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
230	MWL_DEBUG_ANY		= 0xffffffff
231};
232#define	IS_BEACON(wh) \
233    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
234	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
235#define	IFF_DUMPPKTS_RECV(sc, wh) \
236    (((sc->sc_debug & MWL_DEBUG_RECV) && \
237      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
238     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
239#define	IFF_DUMPPKTS_XMIT(sc) \
240	((sc->sc_debug & MWL_DEBUG_XMIT) || \
241	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
242#define	DPRINTF(sc, m, fmt, ...) do {				\
243	if (sc->sc_debug & (m))					\
244		printf(fmt, __VA_ARGS__);			\
245} while (0)
246#define	KEYPRINTF(sc, hk, mac) do {				\
247	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
248		mwl_keyprint(sc, __func__, hk, mac);		\
249} while (0)
250static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
251static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
252#else
253#define	IFF_DUMPPKTS_RECV(sc, wh) \
254	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
255#define	IFF_DUMPPKTS_XMIT(sc) \
256	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
257#define	DPRINTF(sc, m, fmt, ...) do {				\
258	(void) sc;						\
259} while (0)
260#define	KEYPRINTF(sc, k, mac) do {				\
261	(void) sc;						\
262} while (0)
263#endif
264
265MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
266
267/*
268 * Each packet has fixed front matter: a 2-byte length
269 * of the payload, followed by a 4-address 802.11 header
270 * (regardless of the actual header and always w/o any
271 * QoS header).  The payload then follows.
272 */
273struct mwltxrec {
274	uint16_t fwlen;
275	struct ieee80211_frame_addr4 wh;
276} __packed;
277
278/*
279 * Read/Write shorthands for accesses to BAR 0.  Note
280 * that all BAR 1 operations are done in the "hal" and
281 * there should be no reference to them here.
282 */
283static __inline uint32_t
284RD4(struct mwl_softc *sc, bus_size_t off)
285{
286	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
287}
288
289static __inline void
290WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
291{
292	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
293}
294
295int
296mwl_attach(uint16_t devid, struct mwl_softc *sc)
297{
298	struct ifnet *ifp;
299	struct ieee80211com *ic;
300	struct mwl_hal *mh;
301	int error = 0;
302
303	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
304
305	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
306	if (ifp == NULL) {
307		device_printf(sc->sc_dev, "can not if_alloc()\n");
308		return ENOSPC;
309	}
310	ic = ifp->if_l2com;
311
312	/* set these up early for if_printf use */
313	if_initname(ifp, device_get_name(sc->sc_dev),
314		device_get_unit(sc->sc_dev));
315
316	mh = mwl_hal_attach(sc->sc_dev, devid,
317	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
318	if (mh == NULL) {
319		if_printf(ifp, "unable to attach HAL\n");
320		error = EIO;
321		goto bad;
322	}
323	sc->sc_mh = mh;
324	/*
325	 * Load firmware so we can get setup.  We arbitrarily
326	 * pick station firmware; we'll re-load firmware as
327	 * needed so setting up the wrong mode isn't a big deal.
328	 */
329	if (mwl_hal_fwload(mh, NULL) != 0) {
330		if_printf(ifp, "unable to setup builtin firmware\n");
331		error = EIO;
332		goto bad1;
333	}
334	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
335		if_printf(ifp, "unable to fetch h/w specs\n");
336		error = EIO;
337		goto bad1;
338	}
339	error = mwl_getchannels(sc);
340	if (error != 0)
341		goto bad1;
342
343	sc->sc_txantenna = 0;		/* h/w default */
344	sc->sc_rxantenna = 0;		/* h/w default */
345	sc->sc_invalid = 0;		/* ready to go, enable int handling */
346	sc->sc_ageinterval = MWL_AGEINTERVAL;
347
348	/*
349	 * Allocate tx+rx descriptors and populate the lists.
350	 * We immediately push the information to the firmware
351	 * as otherwise it gets upset.
352	 */
353	error = mwl_dma_setup(sc);
354	if (error != 0) {
355		if_printf(ifp, "failed to setup descriptors: %d\n", error);
356		goto bad1;
357	}
358	error = mwl_setupdma(sc);	/* push to firmware */
359	if (error != 0)			/* NB: mwl_setupdma prints msg */
360		goto bad1;
361
362	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
363
364	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
365		taskqueue_thread_enqueue, &sc->sc_tq);
366	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
367		"%s taskq", ifp->if_xname);
368
369	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
370	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
371	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
372	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
373
374	/* NB: insure BK queue is the lowest priority h/w queue */
375	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
376		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
377			ieee80211_wme_acnames[WME_AC_BK]);
378		error = EIO;
379		goto bad2;
380	}
381	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
382	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
383	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
384		/*
385		 * Not enough hardware tx queues to properly do WME;
386		 * just punt and assign them all to the same h/w queue.
387		 * We could do a better job of this if, for example,
388		 * we allocate queues when we switch from station to
389		 * AP mode.
390		 */
391		if (sc->sc_ac2q[WME_AC_VI] != NULL)
392			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
393		if (sc->sc_ac2q[WME_AC_BE] != NULL)
394			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
395		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
396		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
397		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
398	}
399	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
400
401	ifp->if_softc = sc;
402	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
403	ifp->if_start = mwl_start;
404	ifp->if_watchdog = mwl_watchdog;
405	ifp->if_ioctl = mwl_ioctl;
406	ifp->if_init = mwl_init;
407	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
408	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
409	IFQ_SET_READY(&ifp->if_snd);
410
411	ic->ic_ifp = ifp;
412	/* XXX not right but it's not used anywhere important */
413	ic->ic_phytype = IEEE80211_T_OFDM;
414	ic->ic_opmode = IEEE80211_M_STA;
415	ic->ic_caps =
416		  IEEE80211_C_STA		/* station mode supported */
417		| IEEE80211_C_HOSTAP		/* hostap mode */
418		| IEEE80211_C_MONITOR		/* monitor mode */
419#if 0
420		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
421		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
422#endif
423		| IEEE80211_C_MBSS		/* mesh point link mode */
424		| IEEE80211_C_WDS		/* WDS supported */
425		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
426		| IEEE80211_C_SHSLOT		/* short slot time supported */
427		| IEEE80211_C_WME		/* WME/WMM supported */
428		| IEEE80211_C_BURST		/* xmit bursting supported */
429		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
430		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
431		| IEEE80211_C_TXFRAG		/* handle tx frags */
432		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
433		| IEEE80211_C_DFS		/* DFS supported */
434		;
435
436	ic->ic_htcaps =
437		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
438		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
439		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
440		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
441		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
442#if MWL_AGGR_SIZE == 7935
443		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
444#else
445		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
446#endif
447#if 0
448		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
449		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
450#endif
451		/* s/w capabilities */
452		| IEEE80211_HTC_HT		/* HT operation */
453		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
454		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
455		| IEEE80211_HTC_SMPS		/* SMPS available */
456		;
457
458	/*
459	 * Mark h/w crypto support.
460	 * XXX no way to query h/w support.
461	 */
462	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
463			  |  IEEE80211_CRYPTO_AES_CCM
464			  |  IEEE80211_CRYPTO_TKIP
465			  |  IEEE80211_CRYPTO_TKIPMIC
466			  ;
467	/*
468	 * Transmit requires space in the packet for a special
469	 * format transmit record and optional padding between
470	 * this record and the payload.  Ask the net80211 layer
471	 * to arrange this when encapsulating packets so we can
472	 * add it efficiently.
473	 */
474	ic->ic_headroom = sizeof(struct mwltxrec) -
475		sizeof(struct ieee80211_frame);
476
477	/* call MI attach routine. */
478	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
479	ic->ic_setregdomain = mwl_setregdomain;
480	ic->ic_getradiocaps = mwl_getradiocaps;
481	/* override default methods */
482	ic->ic_raw_xmit = mwl_raw_xmit;
483	ic->ic_newassoc = mwl_newassoc;
484	ic->ic_updateslot = mwl_updateslot;
485	ic->ic_update_mcast = mwl_update_mcast;
486	ic->ic_update_promisc = mwl_update_promisc;
487	ic->ic_wme.wme_update = mwl_wme_update;
488
489	ic->ic_node_alloc = mwl_node_alloc;
490	sc->sc_node_cleanup = ic->ic_node_cleanup;
491	ic->ic_node_cleanup = mwl_node_cleanup;
492	sc->sc_node_drain = ic->ic_node_drain;
493	ic->ic_node_drain = mwl_node_drain;
494	ic->ic_node_getsignal = mwl_node_getsignal;
495	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
496
497	ic->ic_scan_start = mwl_scan_start;
498	ic->ic_scan_end = mwl_scan_end;
499	ic->ic_set_channel = mwl_set_channel;
500
501	sc->sc_recv_action = ic->ic_recv_action;
502	ic->ic_recv_action = mwl_recv_action;
503	sc->sc_addba_request = ic->ic_addba_request;
504	ic->ic_addba_request = mwl_addba_request;
505	sc->sc_addba_response = ic->ic_addba_response;
506	ic->ic_addba_response = mwl_addba_response;
507	sc->sc_addba_stop = ic->ic_addba_stop;
508	ic->ic_addba_stop = mwl_addba_stop;
509
510	ic->ic_vap_create = mwl_vap_create;
511	ic->ic_vap_delete = mwl_vap_delete;
512
513	ieee80211_radiotap_attach(ic,
514	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
515		MWL_TX_RADIOTAP_PRESENT,
516	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
517		MWL_RX_RADIOTAP_PRESENT);
518	/*
519	 * Setup dynamic sysctl's now that country code and
520	 * regdomain are available from the hal.
521	 */
522	mwl_sysctlattach(sc);
523
524	if (bootverbose)
525		ieee80211_announce(ic);
526	mwl_announce(sc);
527	return 0;
528bad2:
529	mwl_dma_cleanup(sc);
530bad1:
531	mwl_hal_detach(mh);
532bad:
533	if_free(ifp);
534	sc->sc_invalid = 1;
535	return error;
536}
537
538int
539mwl_detach(struct mwl_softc *sc)
540{
541	struct ifnet *ifp = sc->sc_ifp;
542	struct ieee80211com *ic = ifp->if_l2com;
543
544	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
545		__func__, ifp->if_flags);
546
547	mwl_stop(ifp, 1);
548	/*
549	 * NB: the order of these is important:
550	 * o call the 802.11 layer before detaching the hal to
551	 *   insure callbacks into the driver to delete global
552	 *   key cache entries can be handled
553	 * o reclaim the tx queue data structures after calling
554	 *   the 802.11 layer as we'll get called back to reclaim
555	 *   node state and potentially want to use them
556	 * o to cleanup the tx queues the hal is called, so detach
557	 *   it last
558	 * Other than that, it's straightforward...
559	 */
560	ieee80211_ifdetach(ic);
561	mwl_dma_cleanup(sc);
562	mwl_tx_cleanup(sc);
563	mwl_hal_detach(sc->sc_mh);
564	if_free(ifp);
565
566	return 0;
567}
568
569/*
570 * MAC address handling for multiple BSS on the same radio.
571 * The first vap uses the MAC address from the EEPROM.  For
572 * subsequent vap's we set the U/L bit (bit 1) in the MAC
573 * address and use the next six bits as an index.
574 */
575static void
576assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
577{
578	int i;
579
580	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
581		/* NB: we only do this if h/w supports multiple bssid */
582		for (i = 0; i < 32; i++)
583			if ((sc->sc_bssidmask & (1<<i)) == 0)
584				break;
585		if (i != 0)
586			mac[0] |= (i << 2)|0x2;
587	} else
588		i = 0;
589	sc->sc_bssidmask |= 1<<i;
590	if (i == 0)
591		sc->sc_nbssid0++;
592}
593
594static void
595reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
596{
597	int i = mac[0] >> 2;
598	if (i != 0 || --sc->sc_nbssid0 == 0)
599		sc->sc_bssidmask &= ~(1<<i);
600}
601
602static struct ieee80211vap *
603mwl_vap_create(struct ieee80211com *ic,
604	const char name[IFNAMSIZ], int unit, int opmode, int flags,
605	const uint8_t bssid[IEEE80211_ADDR_LEN],
606	const uint8_t mac0[IEEE80211_ADDR_LEN])
607{
608	struct ifnet *ifp = ic->ic_ifp;
609	struct mwl_softc *sc = ifp->if_softc;
610	struct mwl_hal *mh = sc->sc_mh;
611	struct ieee80211vap *vap, *apvap;
612	struct mwl_hal_vap *hvap;
613	struct mwl_vap *mvp;
614	uint8_t mac[IEEE80211_ADDR_LEN];
615
616	IEEE80211_ADDR_COPY(mac, mac0);
617	switch (opmode) {
618	case IEEE80211_M_HOSTAP:
619	case IEEE80211_M_MBSS:
620		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
621			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
622		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
623		if (hvap == NULL) {
624			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
625				reclaim_address(sc, mac);
626			return NULL;
627		}
628		break;
629	case IEEE80211_M_STA:
630		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
631			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
632		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
633		if (hvap == NULL) {
634			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
635				reclaim_address(sc, mac);
636			return NULL;
637		}
638		/* no h/w beacon miss support; always use s/w */
639		flags |= IEEE80211_CLONE_NOBEACONS;
640		break;
641	case IEEE80211_M_WDS:
642		hvap = NULL;		/* NB: we use associated AP vap */
643		if (sc->sc_napvaps == 0)
644			return NULL;	/* no existing AP vap */
645		break;
646	case IEEE80211_M_MONITOR:
647		hvap = NULL;
648		break;
649	case IEEE80211_M_IBSS:
650	case IEEE80211_M_AHDEMO:
651	default:
652		return NULL;
653	}
654
655	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
656	    M_80211_VAP, M_NOWAIT | M_ZERO);
657	if (mvp == NULL) {
658		if (hvap != NULL) {
659			mwl_hal_delvap(hvap);
660			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
661				reclaim_address(sc, mac);
662		}
663		/* XXX msg */
664		return NULL;
665	}
666	mvp->mv_hvap = hvap;
667	if (opmode == IEEE80211_M_WDS) {
668		/*
669		 * WDS vaps must have an associated AP vap; find one.
670		 * XXX not right.
671		 */
672		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
673			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
674				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
675				break;
676			}
677		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
678	}
679	vap = &mvp->mv_vap;
680	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
681	if (hvap != NULL)
682		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
683	/* override with driver methods */
684	mvp->mv_newstate = vap->iv_newstate;
685	vap->iv_newstate = mwl_newstate;
686	vap->iv_max_keyix = 0;	/* XXX */
687	vap->iv_key_alloc = mwl_key_alloc;
688	vap->iv_key_delete = mwl_key_delete;
689	vap->iv_key_set = mwl_key_set;
690#ifdef MWL_HOST_PS_SUPPORT
691	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
692		vap->iv_update_ps = mwl_update_ps;
693		mvp->mv_set_tim = vap->iv_set_tim;
694		vap->iv_set_tim = mwl_set_tim;
695	}
696#endif
697	vap->iv_reset = mwl_reset;
698	vap->iv_update_beacon = mwl_beacon_update;
699
700	/* override max aid so sta's cannot assoc when we're out of sta id's */
701	vap->iv_max_aid = MWL_MAXSTAID;
702	/* override default A-MPDU rx parameters */
703	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
704	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
705
706	/* complete setup */
707	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
708
709	switch (vap->iv_opmode) {
710	case IEEE80211_M_HOSTAP:
711	case IEEE80211_M_MBSS:
712	case IEEE80211_M_STA:
713		/*
714		 * Setup sta db entry for local address.
715		 */
716		mwl_localstadb(vap);
717		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
718		    vap->iv_opmode == IEEE80211_M_MBSS)
719			sc->sc_napvaps++;
720		else
721			sc->sc_nstavaps++;
722		break;
723	case IEEE80211_M_WDS:
724		sc->sc_nwdsvaps++;
725		break;
726	default:
727		break;
728	}
729	/*
730	 * Setup overall operating mode.
731	 */
732	if (sc->sc_napvaps)
733		ic->ic_opmode = IEEE80211_M_HOSTAP;
734	else if (sc->sc_nstavaps)
735		ic->ic_opmode = IEEE80211_M_STA;
736	else
737		ic->ic_opmode = opmode;
738
739	return vap;
740}
741
742static void
743mwl_vap_delete(struct ieee80211vap *vap)
744{
745	struct mwl_vap *mvp = MWL_VAP(vap);
746	struct ifnet *parent = vap->iv_ic->ic_ifp;
747	struct mwl_softc *sc = parent->if_softc;
748	struct mwl_hal *mh = sc->sc_mh;
749	struct mwl_hal_vap *hvap = mvp->mv_hvap;
750	enum ieee80211_opmode opmode = vap->iv_opmode;
751
752	/* XXX disallow ap vap delete if WDS still present */
753	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
754		/* quiesce h/w while we remove the vap */
755		mwl_hal_intrset(mh, 0);		/* disable interrupts */
756	}
757	ieee80211_vap_detach(vap);
758	switch (opmode) {
759	case IEEE80211_M_HOSTAP:
760	case IEEE80211_M_MBSS:
761	case IEEE80211_M_STA:
762		KASSERT(hvap != NULL, ("no hal vap handle"));
763		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
764		mwl_hal_delvap(hvap);
765		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
766			sc->sc_napvaps--;
767		else
768			sc->sc_nstavaps--;
769		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
770		reclaim_address(sc, vap->iv_myaddr);
771		break;
772	case IEEE80211_M_WDS:
773		sc->sc_nwdsvaps--;
774		break;
775	default:
776		break;
777	}
778	mwl_cleartxq(sc, vap);
779	free(mvp, M_80211_VAP);
780	if (parent->if_drv_flags & IFF_DRV_RUNNING)
781		mwl_hal_intrset(mh, sc->sc_imask);
782}
783
784void
785mwl_suspend(struct mwl_softc *sc)
786{
787	struct ifnet *ifp = sc->sc_ifp;
788
789	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
790		__func__, ifp->if_flags);
791
792	mwl_stop(ifp, 1);
793}
794
795void
796mwl_resume(struct mwl_softc *sc)
797{
798	struct ifnet *ifp = sc->sc_ifp;
799
800	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
801		__func__, ifp->if_flags);
802
803	if (ifp->if_flags & IFF_UP)
804		mwl_init(sc);
805}
806
807void
808mwl_shutdown(void *arg)
809{
810	struct mwl_softc *sc = arg;
811
812	mwl_stop(sc->sc_ifp, 1);
813}
814
815/*
816 * Interrupt handler.  Most of the actual processing is deferred.
817 */
818void
819mwl_intr(void *arg)
820{
821	struct mwl_softc *sc = arg;
822	struct mwl_hal *mh = sc->sc_mh;
823	uint32_t status;
824
825	if (sc->sc_invalid) {
826		/*
827		 * The hardware is not ready/present, don't touch anything.
828		 * Note this can happen early on if the IRQ is shared.
829		 */
830		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
831		return;
832	}
833	/*
834	 * Figure out the reason(s) for the interrupt.
835	 */
836	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
837	if (status == 0)			/* must be a shared irq */
838		return;
839
840	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
841	    __func__, status, sc->sc_imask);
842	if (status & MACREG_A2HRIC_BIT_RX_RDY)
843		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
844	if (status & MACREG_A2HRIC_BIT_TX_DONE)
845		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
846	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
847		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
848	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
849		mwl_hal_cmddone(mh);
850	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
851		;
852	}
853	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
854		/* TKIP ICV error */
855		sc->sc_stats.mst_rx_badtkipicv++;
856	}
857	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
858		/* 11n aggregation queue is empty, re-fill */
859		;
860	}
861	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
862		;
863	}
864	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
865		/* radar detected, process event */
866		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
867	}
868	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
869		/* DFS channel switch */
870		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
871	}
872}
873
874static void
875mwl_radar_proc(void *arg, int pending)
876{
877	struct mwl_softc *sc = arg;
878	struct ifnet *ifp = sc->sc_ifp;
879	struct ieee80211com *ic = ifp->if_l2com;
880
881	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
882	    __func__, pending);
883
884	sc->sc_stats.mst_radardetect++;
885	/* XXX stop h/w BA streams? */
886
887	IEEE80211_LOCK(ic);
888	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
889	IEEE80211_UNLOCK(ic);
890}
891
892static void
893mwl_chanswitch_proc(void *arg, int pending)
894{
895	struct mwl_softc *sc = arg;
896	struct ifnet *ifp = sc->sc_ifp;
897	struct ieee80211com *ic = ifp->if_l2com;
898
899	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
900	    __func__, pending);
901
902	IEEE80211_LOCK(ic);
903	sc->sc_csapending = 0;
904	ieee80211_csa_completeswitch(ic);
905	IEEE80211_UNLOCK(ic);
906}
907
908static void
909mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
910{
911	struct ieee80211_node *ni = sp->data[0];
912
913	/* send DELBA and drop the stream */
914	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
915}
916
917static void
918mwl_bawatchdog_proc(void *arg, int pending)
919{
920	struct mwl_softc *sc = arg;
921	struct mwl_hal *mh = sc->sc_mh;
922	const MWL_HAL_BASTREAM *sp;
923	uint8_t bitmap, n;
924
925	sc->sc_stats.mst_bawatchdog++;
926
927	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
928		DPRINTF(sc, MWL_DEBUG_AMPDU,
929		    "%s: could not get bitmap\n", __func__);
930		sc->sc_stats.mst_bawatchdog_failed++;
931		return;
932	}
933	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
934	if (bitmap == 0xff) {
935		n = 0;
936		/* disable all ba streams */
937		for (bitmap = 0; bitmap < 8; bitmap++) {
938			sp = mwl_hal_bastream_lookup(mh, bitmap);
939			if (sp != NULL) {
940				mwl_bawatchdog(sp);
941				n++;
942			}
943		}
944		if (n == 0) {
945			DPRINTF(sc, MWL_DEBUG_AMPDU,
946			    "%s: no BA streams found\n", __func__);
947			sc->sc_stats.mst_bawatchdog_empty++;
948		}
949	} else if (bitmap != 0xaa) {
950		/* disable a single ba stream */
951		sp = mwl_hal_bastream_lookup(mh, bitmap);
952		if (sp != NULL) {
953			mwl_bawatchdog(sp);
954		} else {
955			DPRINTF(sc, MWL_DEBUG_AMPDU,
956			    "%s: no BA stream %d\n", __func__, bitmap);
957			sc->sc_stats.mst_bawatchdog_notfound++;
958		}
959	}
960}
961
962/*
963 * Convert net80211 channel to a HAL channel.
964 */
965static void
966mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
967{
968	hc->channel = chan->ic_ieee;
969
970	*(uint32_t *)&hc->channelFlags = 0;
971	if (IEEE80211_IS_CHAN_2GHZ(chan))
972		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
973	else if (IEEE80211_IS_CHAN_5GHZ(chan))
974		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
975	if (IEEE80211_IS_CHAN_HT40(chan)) {
976		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
977		if (IEEE80211_IS_CHAN_HT40U(chan))
978			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
979		else
980			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
981	} else
982		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
983	/* XXX 10MHz channels */
984}
985
986/*
987 * Inform firmware of our tx/rx dma setup.  The BAR 0
988 * writes below are for compatibility with older firmware.
989 * For current firmware we send this information with a
990 * cmd block via mwl_hal_sethwdma.
991 */
992static int
993mwl_setupdma(struct mwl_softc *sc)
994{
995	int error, i;
996
997	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
998	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
999	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1000
1001	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1002		struct mwl_txq *txq = &sc->sc_txq[i];
1003		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1004		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1005	}
1006	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1007	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1008
1009	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1010	if (error != 0) {
1011		device_printf(sc->sc_dev,
1012		    "unable to setup tx/rx dma; hal status %u\n", error);
1013		/* XXX */
1014	}
1015	return error;
1016}
1017
1018/*
1019 * Inform firmware of tx rate parameters.
1020 * Called after a channel change.
1021 */
1022static int
1023mwl_setcurchanrates(struct mwl_softc *sc)
1024{
1025	struct ifnet *ifp = sc->sc_ifp;
1026	struct ieee80211com *ic = ifp->if_l2com;
1027	const struct ieee80211_rateset *rs;
1028	MWL_HAL_TXRATE rates;
1029
1030	memset(&rates, 0, sizeof(rates));
1031	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1032	/* rate used to send management frames */
1033	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1034	/* rate used to send multicast frames */
1035	rates.McastRate = rates.MgtRate;
1036
1037	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1038}
1039
1040/*
1041 * Inform firmware of tx rate parameters.  Called whenever
1042 * user-settable params change and after a channel change.
1043 */
1044static int
1045mwl_setrates(struct ieee80211vap *vap)
1046{
1047	struct mwl_vap *mvp = MWL_VAP(vap);
1048	struct ieee80211_node *ni = vap->iv_bss;
1049	const struct ieee80211_txparam *tp = ni->ni_txparms;
1050	MWL_HAL_TXRATE rates;
1051
1052	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1053
1054	/*
1055	 * Update the h/w rate map.
1056	 * NB: 0x80 for MCS is passed through unchanged
1057	 */
1058	memset(&rates, 0, sizeof(rates));
1059	/* rate used to send management frames */
1060	rates.MgtRate = tp->mgmtrate;
1061	/* rate used to send multicast frames */
1062	rates.McastRate = tp->mcastrate;
1063
1064	/* while here calculate EAPOL fixed rate cookie */
1065	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1066
1067	return mwl_hal_settxrate(mvp->mv_hvap,
1068	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1069		RATE_FIXED : RATE_AUTO, &rates);
1070}
1071
1072/*
1073 * Setup a fixed xmit rate cookie for EAPOL frames.
1074 */
1075static void
1076mwl_seteapolformat(struct ieee80211vap *vap)
1077{
1078	struct mwl_vap *mvp = MWL_VAP(vap);
1079	struct ieee80211_node *ni = vap->iv_bss;
1080	enum ieee80211_phymode mode;
1081	uint8_t rate;
1082
1083	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1084
1085	mode = ieee80211_chan2mode(ni->ni_chan);
1086	/*
1087	 * Use legacy rates when operating a mixed HT+non-HT bss.
1088	 * NB: this may violate POLA for sta and wds vap's.
1089	 */
1090	if (mode == IEEE80211_MODE_11NA &&
1091	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1092		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1093	else if (mode == IEEE80211_MODE_11NG &&
1094	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1095		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1096	else
1097		rate = vap->iv_txparms[mode].mgmtrate;
1098
1099	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1100}
1101
1102/*
1103 * Map SKU+country code to region code for radar bin'ing.
1104 */
1105static int
1106mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1107{
1108	switch (rd->regdomain) {
1109	case SKU_FCC:
1110	case SKU_FCC3:
1111		return DOMAIN_CODE_FCC;
1112	case SKU_CA:
1113		return DOMAIN_CODE_IC;
1114	case SKU_ETSI:
1115	case SKU_ETSI2:
1116	case SKU_ETSI3:
1117		if (rd->country == CTRY_SPAIN)
1118			return DOMAIN_CODE_SPAIN;
1119		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1120			return DOMAIN_CODE_FRANCE;
1121		/* XXX force 1.3.1 radar type */
1122		return DOMAIN_CODE_ETSI_131;
1123	case SKU_JAPAN:
1124		return DOMAIN_CODE_MKK;
1125	case SKU_ROW:
1126		return DOMAIN_CODE_DGT;	/* Taiwan */
1127	case SKU_APAC:
1128	case SKU_APAC2:
1129	case SKU_APAC3:
1130		return DOMAIN_CODE_AUS;	/* Australia */
1131	}
1132	/* XXX KOREA? */
1133	return DOMAIN_CODE_FCC;			/* XXX? */
1134}
1135
1136static int
1137mwl_hal_reset(struct mwl_softc *sc)
1138{
1139	struct ifnet *ifp = sc->sc_ifp;
1140	struct ieee80211com *ic = ifp->if_l2com;
1141	struct mwl_hal *mh = sc->sc_mh;
1142
1143	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1144	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1145	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1146	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1147	mwl_chan_set(sc, ic->ic_curchan);
1148	/* NB: RF/RA performance tuned for indoor mode */
1149	mwl_hal_setrateadaptmode(mh, 0);
1150	mwl_hal_setoptimizationlevel(mh,
1151	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1152
1153	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1154
1155	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1156	mwl_hal_setcfend(mh, 0);			/* XXX */
1157
1158	return 1;
1159}
1160
1161static int
1162mwl_init_locked(struct mwl_softc *sc)
1163{
1164	struct ifnet *ifp = sc->sc_ifp;
1165	struct mwl_hal *mh = sc->sc_mh;
1166	int error = 0;
1167
1168	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1169		__func__, ifp->if_flags);
1170
1171	MWL_LOCK_ASSERT(sc);
1172
1173	/*
1174	 * Stop anything previously setup.  This is safe
1175	 * whether this is the first time through or not.
1176	 */
1177	mwl_stop_locked(ifp, 0);
1178
1179	/*
1180	 * Push vap-independent state to the firmware.
1181	 */
1182	if (!mwl_hal_reset(sc)) {
1183		if_printf(ifp, "unable to reset hardware\n");
1184		return EIO;
1185	}
1186
1187	/*
1188	 * Setup recv (once); transmit is already good to go.
1189	 */
1190	error = mwl_startrecv(sc);
1191	if (error != 0) {
1192		if_printf(ifp, "unable to start recv logic\n");
1193		return error;
1194	}
1195
1196	/*
1197	 * Enable interrupts.
1198	 */
1199	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1200		     | MACREG_A2HRIC_BIT_TX_DONE
1201		     | MACREG_A2HRIC_BIT_OPC_DONE
1202#if 0
1203		     | MACREG_A2HRIC_BIT_MAC_EVENT
1204#endif
1205		     | MACREG_A2HRIC_BIT_ICV_ERROR
1206		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1207		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1208#if 0
1209		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1210#endif
1211		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1212		     | MACREQ_A2HRIC_BIT_TX_ACK
1213		     ;
1214
1215	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1216	mwl_hal_intrset(mh, sc->sc_imask);
1217
1218	return 0;
1219}
1220
1221static void
1222mwl_init(void *arg)
1223{
1224	struct mwl_softc *sc = arg;
1225	struct ifnet *ifp = sc->sc_ifp;
1226	struct ieee80211com *ic = ifp->if_l2com;
1227	int error = 0;
1228
1229	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1230		__func__, ifp->if_flags);
1231
1232	MWL_LOCK(sc);
1233	error = mwl_init_locked(sc);
1234	MWL_UNLOCK(sc);
1235
1236	if (error == 0)
1237		ieee80211_start_all(ic);	/* start all vap's */
1238}
1239
1240static void
1241mwl_stop_locked(struct ifnet *ifp, int disable)
1242{
1243	struct mwl_softc *sc = ifp->if_softc;
1244
1245	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1246		__func__, sc->sc_invalid, ifp->if_flags);
1247
1248	MWL_LOCK_ASSERT(sc);
1249	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1250		/*
1251		 * Shutdown the hardware and driver.
1252		 */
1253		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1254		ifp->if_timer = 0;
1255		mwl_draintxq(sc);
1256	}
1257}
1258
1259static void
1260mwl_stop(struct ifnet *ifp, int disable)
1261{
1262	struct mwl_softc *sc = ifp->if_softc;
1263
1264	MWL_LOCK(sc);
1265	mwl_stop_locked(ifp, disable);
1266	MWL_UNLOCK(sc);
1267}
1268
1269static int
1270mwl_reset_vap(struct ieee80211vap *vap, int state)
1271{
1272	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1273	struct ieee80211com *ic = vap->iv_ic;
1274
1275	if (state == IEEE80211_S_RUN)
1276		mwl_setrates(vap);
1277	/* XXX off by 1? */
1278	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1279	/* XXX auto? 20/40 split? */
1280	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1281	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1282	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1283	    HTPROTECT_NONE : HTPROTECT_AUTO);
1284	/* XXX txpower cap */
1285
1286	/* re-setup beacons */
1287	if (state == IEEE80211_S_RUN &&
1288	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1289	     vap->iv_opmode == IEEE80211_M_MBSS ||
1290	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1291		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1292		mwl_hal_setnprotmode(hvap,
1293		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1294		return mwl_beacon_setup(vap);
1295	}
1296	return 0;
1297}
1298
1299/*
1300 * Reset the hardware w/o losing operational state.
1301 * Used to to reset or reload hardware state for a vap.
1302 */
1303static int
1304mwl_reset(struct ieee80211vap *vap, u_long cmd)
1305{
1306	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1307	int error = 0;
1308
1309	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1310		struct ieee80211com *ic = vap->iv_ic;
1311		struct ifnet *ifp = ic->ic_ifp;
1312		struct mwl_softc *sc = ifp->if_softc;
1313		struct mwl_hal *mh = sc->sc_mh;
1314
1315		/* XXX handle DWDS sta vap change */
1316		/* XXX do we need to disable interrupts? */
1317		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1318		error = mwl_reset_vap(vap, vap->iv_state);
1319		mwl_hal_intrset(mh, sc->sc_imask);
1320	}
1321	return error;
1322}
1323
1324/*
1325 * Allocate a tx buffer for sending a frame.  The
1326 * packet is assumed to have the WME AC stored so
1327 * we can use it to select the appropriate h/w queue.
1328 */
1329static struct mwl_txbuf *
1330mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1331{
1332	struct mwl_txbuf *bf;
1333
1334	/*
1335	 * Grab a TX buffer and associated resources.
1336	 */
1337	MWL_TXQ_LOCK(txq);
1338	bf = STAILQ_FIRST(&txq->free);
1339	if (bf != NULL) {
1340		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1341		txq->nfree--;
1342	}
1343	MWL_TXQ_UNLOCK(txq);
1344	if (bf == NULL)
1345		DPRINTF(sc, MWL_DEBUG_XMIT,
1346		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1347	return bf;
1348}
1349
1350/*
1351 * Return a tx buffer to the queue it came from.  Note there
1352 * are two cases because we must preserve the order of buffers
1353 * as it reflects the fixed order of descriptors in memory
1354 * (the firmware pre-fetches descriptors so we cannot reorder).
1355 */
1356static void
1357mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1358{
1359	bf->bf_m = NULL;
1360	bf->bf_node = NULL;
1361	MWL_TXQ_LOCK(txq);
1362	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1363	txq->nfree++;
1364	MWL_TXQ_UNLOCK(txq);
1365}
1366
1367static void
1368mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1369{
1370	bf->bf_m = NULL;
1371	bf->bf_node = NULL;
1372	MWL_TXQ_LOCK(txq);
1373	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1374	txq->nfree++;
1375	MWL_TXQ_UNLOCK(txq);
1376}
1377
1378static void
1379mwl_start(struct ifnet *ifp)
1380{
1381	struct mwl_softc *sc = ifp->if_softc;
1382	struct ieee80211_node *ni;
1383	struct mwl_txbuf *bf;
1384	struct mbuf *m;
1385	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1386	int nqueued;
1387
1388	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1389		return;
1390	nqueued = 0;
1391	for (;;) {
1392		bf = NULL;
1393		IFQ_DEQUEUE(&ifp->if_snd, m);
1394		if (m == NULL)
1395			break;
1396		/*
1397		 * Grab the node for the destination.
1398		 */
1399		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1400		KASSERT(ni != NULL, ("no node"));
1401		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1402		/*
1403		 * Grab a TX buffer and associated resources.
1404		 * We honor the classification by the 802.11 layer.
1405		 */
1406		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1407		bf = mwl_gettxbuf(sc, txq);
1408		if (bf == NULL) {
1409			m_freem(m);
1410			ieee80211_free_node(ni);
1411#ifdef MWL_TX_NODROP
1412			sc->sc_stats.mst_tx_qstop++;
1413			/* XXX blocks other traffic */
1414			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1415			break;
1416#else
1417			DPRINTF(sc, MWL_DEBUG_XMIT,
1418			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1419			sc->sc_stats.mst_tx_qdrop++;
1420			continue;
1421#endif /* MWL_TX_NODROP */
1422		}
1423
1424		/*
1425		 * Pass the frame to the h/w for transmission.
1426		 */
1427		if (mwl_tx_start(sc, ni, bf, m)) {
1428			ifp->if_oerrors++;
1429			mwl_puttxbuf_head(txq, bf);
1430			ieee80211_free_node(ni);
1431			continue;
1432		}
1433		nqueued++;
1434		if (nqueued >= mwl_txcoalesce) {
1435			/*
1436			 * Poke the firmware to process queued frames;
1437			 * see below about (lack of) locking.
1438			 */
1439			nqueued = 0;
1440			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1441		}
1442	}
1443	if (nqueued) {
1444		/*
1445		 * NB: We don't need to lock against tx done because
1446		 * this just prods the firmware to check the transmit
1447		 * descriptors.  The firmware will also start fetching
1448		 * descriptors by itself if it notices new ones are
1449		 * present when it goes to deliver a tx done interrupt
1450		 * to the host. So if we race with tx done processing
1451		 * it's ok.  Delivering the kick here rather than in
1452		 * mwl_tx_start is an optimization to avoid poking the
1453		 * firmware for each packet.
1454		 *
1455		 * NB: the queue id isn't used so 0 is ok.
1456		 */
1457		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1458	}
1459}
1460
1461static int
1462mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1463	const struct ieee80211_bpf_params *params)
1464{
1465	struct ieee80211com *ic = ni->ni_ic;
1466	struct ifnet *ifp = ic->ic_ifp;
1467	struct mwl_softc *sc = ifp->if_softc;
1468	struct mwl_txbuf *bf;
1469	struct mwl_txq *txq;
1470
1471	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1472		ieee80211_free_node(ni);
1473		m_freem(m);
1474		return ENETDOWN;
1475	}
1476	/*
1477	 * Grab a TX buffer and associated resources.
1478	 * Note that we depend on the classification
1479	 * by the 802.11 layer to get to the right h/w
1480	 * queue.  Management frames must ALWAYS go on
1481	 * queue 1 but we cannot just force that here
1482	 * because we may receive non-mgt frames.
1483	 */
1484	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1485	bf = mwl_gettxbuf(sc, txq);
1486	if (bf == NULL) {
1487		sc->sc_stats.mst_tx_qstop++;
1488		/* XXX blocks other traffic */
1489		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1490		ieee80211_free_node(ni);
1491		m_freem(m);
1492		return ENOBUFS;
1493	}
1494	/*
1495	 * Pass the frame to the h/w for transmission.
1496	 */
1497	if (mwl_tx_start(sc, ni, bf, m)) {
1498		ifp->if_oerrors++;
1499		mwl_puttxbuf_head(txq, bf);
1500
1501		ieee80211_free_node(ni);
1502		return EIO;		/* XXX */
1503	}
1504	/*
1505	 * NB: We don't need to lock against tx done because
1506	 * this just prods the firmware to check the transmit
1507	 * descriptors.  The firmware will also start fetching
1508	 * descriptors by itself if it notices new ones are
1509	 * present when it goes to deliver a tx done interrupt
1510	 * to the host. So if we race with tx done processing
1511	 * it's ok.  Delivering the kick here rather than in
1512	 * mwl_tx_start is an optimization to avoid poking the
1513	 * firmware for each packet.
1514	 *
1515	 * NB: the queue id isn't used so 0 is ok.
1516	 */
1517	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1518	return 0;
1519}
1520
1521static int
1522mwl_media_change(struct ifnet *ifp)
1523{
1524	struct ieee80211vap *vap = ifp->if_softc;
1525	int error;
1526
1527	error = ieee80211_media_change(ifp);
1528	/* NB: only the fixed rate can change and that doesn't need a reset */
1529	if (error == ENETRESET) {
1530		mwl_setrates(vap);
1531		error = 0;
1532	}
1533	return error;
1534}
1535
1536#ifdef MWL_DEBUG
1537static void
1538mwl_keyprint(struct mwl_softc *sc, const char *tag,
1539	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1540{
1541	static const char *ciphers[] = {
1542		"WEP",
1543		"TKIP",
1544		"AES-CCM",
1545	};
1546	int i, n;
1547
1548	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1549	for (i = 0, n = hk->keyLen; i < n; i++)
1550		printf(" %02x", hk->key.aes[i]);
1551	printf(" mac %s", ether_sprintf(mac));
1552	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1553		printf(" %s", "rxmic");
1554		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1555			printf(" %02x", hk->key.tkip.rxMic[i]);
1556		printf(" txmic");
1557		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1558			printf(" %02x", hk->key.tkip.txMic[i]);
1559	}
1560	printf(" flags 0x%x\n", hk->keyFlags);
1561}
1562#endif
1563
1564/*
1565 * Allocate a key cache slot for a unicast key.  The
1566 * firmware handles key allocation and every station is
1567 * guaranteed key space so we are always successful.
1568 */
1569static int
1570mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1571	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1572{
1573	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1574
1575	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1576	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1577		if (!(&vap->iv_nw_keys[0] <= k &&
1578		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1579			/* should not happen */
1580			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1581				"%s: bogus group key\n", __func__);
1582			return 0;
1583		}
1584		/* give the caller what they requested */
1585		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1586	} else {
1587		/*
1588		 * Firmware handles key allocation.
1589		 */
1590		*keyix = *rxkeyix = 0;
1591	}
1592	return 1;
1593}
1594
1595/*
1596 * Delete a key entry allocated by mwl_key_alloc.
1597 */
1598static int
1599mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1600{
1601	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1602	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1603	MWL_HAL_KEYVAL hk;
1604	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1605	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1606
1607	if (hvap == NULL) {
1608		if (vap->iv_opmode != IEEE80211_M_WDS) {
1609			/* XXX monitor mode? */
1610			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1611			    "%s: no hvap for opmode %d\n", __func__,
1612			    vap->iv_opmode);
1613			return 0;
1614		}
1615		hvap = MWL_VAP(vap)->mv_ap_hvap;
1616	}
1617
1618	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1619	    __func__, k->wk_keyix);
1620
1621	memset(&hk, 0, sizeof(hk));
1622	hk.keyIndex = k->wk_keyix;
1623	switch (k->wk_cipher->ic_cipher) {
1624	case IEEE80211_CIPHER_WEP:
1625		hk.keyTypeId = KEY_TYPE_ID_WEP;
1626		break;
1627	case IEEE80211_CIPHER_TKIP:
1628		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1629		break;
1630	case IEEE80211_CIPHER_AES_CCM:
1631		hk.keyTypeId = KEY_TYPE_ID_AES;
1632		break;
1633	default:
1634		/* XXX should not happen */
1635		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1636		    __func__, k->wk_cipher->ic_cipher);
1637		return 0;
1638	}
1639	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1640}
1641
1642static __inline int
1643addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1644{
1645	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1646		if (k->wk_flags & IEEE80211_KEY_XMIT)
1647			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1648		if (k->wk_flags & IEEE80211_KEY_RECV)
1649			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1650		return 1;
1651	} else
1652		return 0;
1653}
1654
1655/*
1656 * Set the key cache contents for the specified key.  Key cache
1657 * slot(s) must already have been allocated by mwl_key_alloc.
1658 */
1659static int
1660mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1661	const uint8_t mac[IEEE80211_ADDR_LEN])
1662{
1663#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1664/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1665#define	IEEE80211_IS_STATICKEY(k) \
1666	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1667	 (GRPXMIT|IEEE80211_KEY_RECV))
1668	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1669	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1670	const struct ieee80211_cipher *cip = k->wk_cipher;
1671	const uint8_t *macaddr;
1672	MWL_HAL_KEYVAL hk;
1673
1674	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1675		("s/w crypto set?"));
1676
1677	if (hvap == NULL) {
1678		if (vap->iv_opmode != IEEE80211_M_WDS) {
1679			/* XXX monitor mode? */
1680			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1681			    "%s: no hvap for opmode %d\n", __func__,
1682			    vap->iv_opmode);
1683			return 0;
1684		}
1685		hvap = MWL_VAP(vap)->mv_ap_hvap;
1686	}
1687	memset(&hk, 0, sizeof(hk));
1688	hk.keyIndex = k->wk_keyix;
1689	switch (cip->ic_cipher) {
1690	case IEEE80211_CIPHER_WEP:
1691		hk.keyTypeId = KEY_TYPE_ID_WEP;
1692		hk.keyLen = k->wk_keylen;
1693		if (k->wk_keyix == vap->iv_def_txkey)
1694			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1695		if (!IEEE80211_IS_STATICKEY(k)) {
1696			/* NB: WEP is never used for the PTK */
1697			(void) addgroupflags(&hk, k);
1698		}
1699		break;
1700	case IEEE80211_CIPHER_TKIP:
1701		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1702		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1703		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1704		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1705		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1706		if (!addgroupflags(&hk, k))
1707			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1708		break;
1709	case IEEE80211_CIPHER_AES_CCM:
1710		hk.keyTypeId = KEY_TYPE_ID_AES;
1711		hk.keyLen = k->wk_keylen;
1712		if (!addgroupflags(&hk, k))
1713			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1714		break;
1715	default:
1716		/* XXX should not happen */
1717		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1718		    __func__, k->wk_cipher->ic_cipher);
1719		return 0;
1720	}
1721	/*
1722	 * NB: tkip mic keys get copied here too; the layout
1723	 *     just happens to match that in ieee80211_key.
1724	 */
1725	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1726
1727	/*
1728	 * Locate address of sta db entry for writing key;
1729	 * the convention unfortunately is somewhat different
1730	 * than how net80211, hostapd, and wpa_supplicant think.
1731	 */
1732	if (vap->iv_opmode == IEEE80211_M_STA) {
1733		/*
1734		 * NB: keys plumbed before the sta reaches AUTH state
1735		 * will be discarded or written to the wrong sta db
1736		 * entry because iv_bss is meaningless.  This is ok
1737		 * (right now) because we handle deferred plumbing of
1738		 * WEP keys when the sta reaches AUTH state.
1739		 */
1740		macaddr = vap->iv_bss->ni_bssid;
1741		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1742			/* XXX plumb to local sta db too for static key wep */
1743			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1744		}
1745	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1746	    vap->iv_state != IEEE80211_S_RUN) {
1747		/*
1748		 * Prior to RUN state a WDS vap will not it's BSS node
1749		 * setup so we will plumb the key to the wrong mac
1750		 * address (it'll be our local address).  Workaround
1751		 * this for the moment by grabbing the correct address.
1752		 */
1753		macaddr = vap->iv_des_bssid;
1754	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1755		macaddr = vap->iv_myaddr;
1756	else
1757		macaddr = mac;
1758	KEYPRINTF(sc, &hk, macaddr);
1759	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1760#undef IEEE80211_IS_STATICKEY
1761#undef GRPXMIT
1762}
1763
1764/* unaligned little endian access */
1765#define LE_READ_2(p)				\
1766	((uint16_t)				\
1767	 ((((const uint8_t *)(p))[0]      ) |	\
1768	  (((const uint8_t *)(p))[1] <<  8)))
1769#define LE_READ_4(p)				\
1770	((uint32_t)				\
1771	 ((((const uint8_t *)(p))[0]      ) |	\
1772	  (((const uint8_t *)(p))[1] <<  8) |	\
1773	  (((const uint8_t *)(p))[2] << 16) |	\
1774	  (((const uint8_t *)(p))[3] << 24)))
1775
1776/*
1777 * Set the multicast filter contents into the hardware.
1778 * XXX f/w has no support; just defer to the os.
1779 */
1780static void
1781mwl_setmcastfilter(struct mwl_softc *sc)
1782{
1783	struct ifnet *ifp = sc->sc_ifp;
1784#if 0
1785	struct ether_multi *enm;
1786	struct ether_multistep estep;
1787	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1788	uint8_t *mp;
1789	int nmc;
1790
1791	mp = macs;
1792	nmc = 0;
1793	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1794	while (enm != NULL) {
1795		/* XXX Punt on ranges. */
1796		if (nmc == MWL_HAL_MCAST_MAX ||
1797		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1798			ifp->if_flags |= IFF_ALLMULTI;
1799			return;
1800		}
1801		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1802		mp += IEEE80211_ADDR_LEN, nmc++;
1803		ETHER_NEXT_MULTI(estep, enm);
1804	}
1805	ifp->if_flags &= ~IFF_ALLMULTI;
1806	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1807#else
1808	/* XXX no mcast filter support; we get everything */
1809	ifp->if_flags |= IFF_ALLMULTI;
1810#endif
1811}
1812
1813static int
1814mwl_mode_init(struct mwl_softc *sc)
1815{
1816	struct ifnet *ifp = sc->sc_ifp;
1817	struct ieee80211com *ic = ifp->if_l2com;
1818	struct mwl_hal *mh = sc->sc_mh;
1819
1820	/*
1821	 * NB: Ignore promisc in hostap mode; it's set by the
1822	 * bridge.  This is wrong but we have no way to
1823	 * identify internal requests (from the bridge)
1824	 * versus external requests such as for tcpdump.
1825	 */
1826	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1827	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1828	mwl_setmcastfilter(sc);
1829
1830	return 0;
1831}
1832
1833/*
1834 * Callback from the 802.11 layer after a multicast state change.
1835 */
1836static void
1837mwl_update_mcast(struct ifnet *ifp)
1838{
1839	struct mwl_softc *sc = ifp->if_softc;
1840
1841	mwl_setmcastfilter(sc);
1842}
1843
1844/*
1845 * Callback from the 802.11 layer after a promiscuous mode change.
1846 * Note this interface does not check the operating mode as this
1847 * is an internal callback and we are expected to honor the current
1848 * state (e.g. this is used for setting the interface in promiscuous
1849 * mode when operating in hostap mode to do ACS).
1850 */
1851static void
1852mwl_update_promisc(struct ifnet *ifp)
1853{
1854	struct mwl_softc *sc = ifp->if_softc;
1855
1856	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1857}
1858
1859/*
1860 * Callback from the 802.11 layer to update the slot time
1861 * based on the current setting.  We use it to notify the
1862 * firmware of ERP changes and the f/w takes care of things
1863 * like slot time and preamble.
1864 */
1865static void
1866mwl_updateslot(struct ifnet *ifp)
1867{
1868	struct mwl_softc *sc = ifp->if_softc;
1869	struct ieee80211com *ic = ifp->if_l2com;
1870	struct mwl_hal *mh = sc->sc_mh;
1871	int prot;
1872
1873	/* NB: can be called early; suppress needless cmds */
1874	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1875		return;
1876
1877	/*
1878	 * Calculate the ERP flags.  The firwmare will use
1879	 * this to carry out the appropriate measures.
1880	 */
1881	prot = 0;
1882	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1883		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1884			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1885		if (ic->ic_flags & IEEE80211_F_USEPROT)
1886			prot |= IEEE80211_ERP_USE_PROTECTION;
1887		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1888			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1889	}
1890
1891	DPRINTF(sc, MWL_DEBUG_RESET,
1892	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1893	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1894	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1895	    ic->ic_flags);
1896
1897	mwl_hal_setgprot(mh, prot);
1898}
1899
1900/*
1901 * Setup the beacon frame.
1902 */
1903static int
1904mwl_beacon_setup(struct ieee80211vap *vap)
1905{
1906	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1907	struct ieee80211_node *ni = vap->iv_bss;
1908	struct ieee80211_beacon_offsets bo;
1909	struct mbuf *m;
1910
1911	m = ieee80211_beacon_alloc(ni, &bo);
1912	if (m == NULL)
1913		return ENOBUFS;
1914	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1915	m_free(m);
1916
1917	return 0;
1918}
1919
1920/*
1921 * Update the beacon frame in response to a change.
1922 */
1923static void
1924mwl_beacon_update(struct ieee80211vap *vap, int item)
1925{
1926	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1927	struct ieee80211com *ic = vap->iv_ic;
1928
1929	KASSERT(hvap != NULL, ("no beacon"));
1930	switch (item) {
1931	case IEEE80211_BEACON_ERP:
1932		mwl_updateslot(ic->ic_ifp);
1933		break;
1934	case IEEE80211_BEACON_HTINFO:
1935		mwl_hal_setnprotmode(hvap,
1936		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1937		break;
1938	case IEEE80211_BEACON_CAPS:
1939	case IEEE80211_BEACON_WME:
1940	case IEEE80211_BEACON_APPIE:
1941	case IEEE80211_BEACON_CSA:
1942		break;
1943	case IEEE80211_BEACON_TIM:
1944		/* NB: firmware always forms TIM */
1945		return;
1946	}
1947	/* XXX retain beacon frame and update */
1948	mwl_beacon_setup(vap);
1949}
1950
1951static void
1952mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1953{
1954	bus_addr_t *paddr = (bus_addr_t*) arg;
1955	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1956	*paddr = segs->ds_addr;
1957}
1958
1959#ifdef MWL_HOST_PS_SUPPORT
1960/*
1961 * Handle power save station occupancy changes.
1962 */
1963static void
1964mwl_update_ps(struct ieee80211vap *vap, int nsta)
1965{
1966	struct mwl_vap *mvp = MWL_VAP(vap);
1967
1968	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1969		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1970	mvp->mv_last_ps_sta = nsta;
1971}
1972
1973/*
1974 * Handle associated station power save state changes.
1975 */
1976static int
1977mwl_set_tim(struct ieee80211_node *ni, int set)
1978{
1979	struct ieee80211vap *vap = ni->ni_vap;
1980	struct mwl_vap *mvp = MWL_VAP(vap);
1981
1982	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1983		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1984		    IEEE80211_AID(ni->ni_associd), set);
1985		return 1;
1986	} else
1987		return 0;
1988}
1989#endif /* MWL_HOST_PS_SUPPORT */
1990
1991static int
1992mwl_desc_setup(struct mwl_softc *sc, const char *name,
1993	struct mwl_descdma *dd,
1994	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1995{
1996	struct ifnet *ifp = sc->sc_ifp;
1997	uint8_t *ds;
1998	int error;
1999
2000	DPRINTF(sc, MWL_DEBUG_RESET,
2001	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2002	    __func__, name, nbuf, (uintmax_t) bufsize,
2003	    ndesc, (uintmax_t) descsize);
2004
2005	dd->dd_name = name;
2006	dd->dd_desc_len = nbuf * ndesc * descsize;
2007
2008	/*
2009	 * Setup DMA descriptor area.
2010	 */
2011	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2012		       PAGE_SIZE, 0,		/* alignment, bounds */
2013		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2014		       BUS_SPACE_MAXADDR,	/* highaddr */
2015		       NULL, NULL,		/* filter, filterarg */
2016		       dd->dd_desc_len,		/* maxsize */
2017		       1,			/* nsegments */
2018		       dd->dd_desc_len,		/* maxsegsize */
2019		       BUS_DMA_ALLOCNOW,	/* flags */
2020		       NULL,			/* lockfunc */
2021		       NULL,			/* lockarg */
2022		       &dd->dd_dmat);
2023	if (error != 0) {
2024		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2025		return error;
2026	}
2027
2028	/* allocate descriptors */
2029	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2030	if (error != 0) {
2031		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2032			"error %u\n", dd->dd_name, error);
2033		goto fail0;
2034	}
2035
2036	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2037				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2038				 &dd->dd_dmamap);
2039	if (error != 0) {
2040		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2041			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2042		goto fail1;
2043	}
2044
2045	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2046				dd->dd_desc, dd->dd_desc_len,
2047				mwl_load_cb, &dd->dd_desc_paddr,
2048				BUS_DMA_NOWAIT);
2049	if (error != 0) {
2050		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2051			dd->dd_name, error);
2052		goto fail2;
2053	}
2054
2055	ds = dd->dd_desc;
2056	memset(ds, 0, dd->dd_desc_len);
2057	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2058	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2059	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2060
2061	return 0;
2062fail2:
2063	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2064fail1:
2065	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2066fail0:
2067	bus_dma_tag_destroy(dd->dd_dmat);
2068	memset(dd, 0, sizeof(*dd));
2069	return error;
2070#undef DS2PHYS
2071}
2072
2073static void
2074mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2075{
2076	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2077	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2078	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2079	bus_dma_tag_destroy(dd->dd_dmat);
2080
2081	memset(dd, 0, sizeof(*dd));
2082}
2083
2084/*
2085 * Construct a tx q's free list.  The order of entries on
2086 * the list must reflect the physical layout of tx descriptors
2087 * because the firmware pre-fetches descriptors.
2088 *
2089 * XXX might be better to use indices into the buffer array.
2090 */
2091static void
2092mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2093{
2094	struct mwl_txbuf *bf;
2095	int i;
2096
2097	bf = txq->dma.dd_bufptr;
2098	STAILQ_INIT(&txq->free);
2099	for (i = 0; i < mwl_txbuf; i++, bf++)
2100		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2101	txq->nfree = i;
2102}
2103
2104#define	DS2PHYS(_dd, _ds) \
2105	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2106
2107static int
2108mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2109{
2110	struct ifnet *ifp = sc->sc_ifp;
2111	int error, bsize, i;
2112	struct mwl_txbuf *bf;
2113	struct mwl_txdesc *ds;
2114
2115	error = mwl_desc_setup(sc, "tx", &txq->dma,
2116			mwl_txbuf, sizeof(struct mwl_txbuf),
2117			MWL_TXDESC, sizeof(struct mwl_txdesc));
2118	if (error != 0)
2119		return error;
2120
2121	/* allocate and setup tx buffers */
2122	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2123	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2124	if (bf == NULL) {
2125		if_printf(ifp, "malloc of %u tx buffers failed\n",
2126			mwl_txbuf);
2127		return ENOMEM;
2128	}
2129	txq->dma.dd_bufptr = bf;
2130
2131	ds = txq->dma.dd_desc;
2132	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2133		bf->bf_desc = ds;
2134		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2135		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2136				&bf->bf_dmamap);
2137		if (error != 0) {
2138			if_printf(ifp, "unable to create dmamap for tx "
2139				"buffer %u, error %u\n", i, error);
2140			return error;
2141		}
2142	}
2143	mwl_txq_reset(sc, txq);
2144	return 0;
2145}
2146
2147static void
2148mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2149{
2150	struct mwl_txbuf *bf;
2151	int i;
2152
2153	bf = txq->dma.dd_bufptr;
2154	for (i = 0; i < mwl_txbuf; i++, bf++) {
2155		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2156		KASSERT(bf->bf_node == NULL, ("node on free list"));
2157		if (bf->bf_dmamap != NULL)
2158			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2159	}
2160	STAILQ_INIT(&txq->free);
2161	txq->nfree = 0;
2162	if (txq->dma.dd_bufptr != NULL) {
2163		free(txq->dma.dd_bufptr, M_MWLDEV);
2164		txq->dma.dd_bufptr = NULL;
2165	}
2166	if (txq->dma.dd_desc_len != 0)
2167		mwl_desc_cleanup(sc, &txq->dma);
2168}
2169
2170static int
2171mwl_rxdma_setup(struct mwl_softc *sc)
2172{
2173	struct ifnet *ifp = sc->sc_ifp;
2174	int error, jumbosize, bsize, i;
2175	struct mwl_rxbuf *bf;
2176	struct mwl_jumbo *rbuf;
2177	struct mwl_rxdesc *ds;
2178	caddr_t data;
2179
2180	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2181			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2182			1, sizeof(struct mwl_rxdesc));
2183	if (error != 0)
2184		return error;
2185
2186	/*
2187	 * Receive is done to a private pool of jumbo buffers.
2188	 * This allows us to attach to mbuf's and avoid re-mapping
2189	 * memory on each rx we post.  We allocate a large chunk
2190	 * of memory and manage it in the driver.  The mbuf free
2191	 * callback method is used to reclaim frames after sending
2192	 * them up the stack.  By default we allocate 2x the number of
2193	 * rx descriptors configured so we have some slop to hold
2194	 * us while frames are processed.
2195	 */
2196	if (mwl_rxbuf < 2*mwl_rxdesc) {
2197		if_printf(ifp,
2198		    "too few rx dma buffers (%d); increasing to %d\n",
2199		    mwl_rxbuf, 2*mwl_rxdesc);
2200		mwl_rxbuf = 2*mwl_rxdesc;
2201	}
2202	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2203	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2204
2205	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2206		       PAGE_SIZE, 0,		/* alignment, bounds */
2207		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2208		       BUS_SPACE_MAXADDR,	/* highaddr */
2209		       NULL, NULL,		/* filter, filterarg */
2210		       sc->sc_rxmemsize,	/* maxsize */
2211		       1,			/* nsegments */
2212		       sc->sc_rxmemsize,	/* maxsegsize */
2213		       BUS_DMA_ALLOCNOW,	/* flags */
2214		       NULL,			/* lockfunc */
2215		       NULL,			/* lockarg */
2216		       &sc->sc_rxdmat);
2217	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2218	if (error != 0) {
2219		if_printf(ifp, "could not create rx DMA map\n");
2220		return error;
2221	}
2222
2223	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2224				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2225				 &sc->sc_rxmap);
2226	if (error != 0) {
2227		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2228		    (uintmax_t) sc->sc_rxmemsize);
2229		return error;
2230	}
2231
2232	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2233				sc->sc_rxmem, sc->sc_rxmemsize,
2234				mwl_load_cb, &sc->sc_rxmem_paddr,
2235				BUS_DMA_NOWAIT);
2236	if (error != 0) {
2237		if_printf(ifp, "could not load rx DMA map\n");
2238		return error;
2239	}
2240
2241	/*
2242	 * Allocate rx buffers and set them up.
2243	 */
2244	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2245	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2246	if (bf == NULL) {
2247		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2248		return error;
2249	}
2250	sc->sc_rxdma.dd_bufptr = bf;
2251
2252	STAILQ_INIT(&sc->sc_rxbuf);
2253	ds = sc->sc_rxdma.dd_desc;
2254	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2255		bf->bf_desc = ds;
2256		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2257		/* pre-assign dma buffer */
2258		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2259		/* NB: tail is intentional to preserve descriptor order */
2260		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2261	}
2262
2263	/*
2264	 * Place remainder of dma memory buffers on the free list.
2265	 */
2266	SLIST_INIT(&sc->sc_rxfree);
2267	for (; i < mwl_rxbuf; i++) {
2268		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2269		rbuf = MWL_JUMBO_DATA2BUF(data);
2270		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2271		sc->sc_nrxfree++;
2272	}
2273	MWL_RXFREE_INIT(sc);
2274	return 0;
2275}
2276#undef DS2PHYS
2277
2278static void
2279mwl_rxdma_cleanup(struct mwl_softc *sc)
2280{
2281	if (sc->sc_rxmap != NULL)
2282		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2283	if (sc->sc_rxmem != NULL) {
2284		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2285		sc->sc_rxmem = NULL;
2286	}
2287	if (sc->sc_rxmap != NULL) {
2288		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2289		sc->sc_rxmap = NULL;
2290	}
2291	if (sc->sc_rxdma.dd_bufptr != NULL) {
2292		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2293		sc->sc_rxdma.dd_bufptr = NULL;
2294	}
2295	if (sc->sc_rxdma.dd_desc_len != 0)
2296		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2297	MWL_RXFREE_DESTROY(sc);
2298}
2299
2300static int
2301mwl_dma_setup(struct mwl_softc *sc)
2302{
2303	int error, i;
2304
2305	error = mwl_rxdma_setup(sc);
2306	if (error != 0) {
2307		mwl_rxdma_cleanup(sc);
2308		return error;
2309	}
2310
2311	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2312		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2313		if (error != 0) {
2314			mwl_dma_cleanup(sc);
2315			return error;
2316		}
2317	}
2318	return 0;
2319}
2320
2321static void
2322mwl_dma_cleanup(struct mwl_softc *sc)
2323{
2324	int i;
2325
2326	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2327		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2328	mwl_rxdma_cleanup(sc);
2329}
2330
2331static struct ieee80211_node *
2332mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2333{
2334	struct ieee80211com *ic = vap->iv_ic;
2335	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2336	const size_t space = sizeof(struct mwl_node);
2337	struct mwl_node *mn;
2338
2339	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2340	if (mn == NULL) {
2341		/* XXX stat+msg */
2342		return NULL;
2343	}
2344	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2345	return &mn->mn_node;
2346}
2347
2348static void
2349mwl_node_cleanup(struct ieee80211_node *ni)
2350{
2351	struct ieee80211com *ic = ni->ni_ic;
2352        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2353	struct mwl_node *mn = MWL_NODE(ni);
2354
2355	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2356	    __func__, ni, ni->ni_ic, mn->mn_staid);
2357
2358	if (mn->mn_staid != 0) {
2359		struct ieee80211vap *vap = ni->ni_vap;
2360
2361		if (mn->mn_hvap != NULL) {
2362			if (vap->iv_opmode == IEEE80211_M_STA)
2363				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2364			else
2365				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2366		}
2367		/*
2368		 * NB: legacy WDS peer sta db entry is installed using
2369		 * the associate ap's hvap; use it again to delete it.
2370		 * XXX can vap be NULL?
2371		 */
2372		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2373		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2374			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2375			    ni->ni_macaddr);
2376		delstaid(sc, mn->mn_staid);
2377		mn->mn_staid = 0;
2378	}
2379	sc->sc_node_cleanup(ni);
2380}
2381
2382/*
2383 * Reclaim rx dma buffers from packets sitting on the ampdu
2384 * reorder queue for a station.  We replace buffers with a
2385 * system cluster (if available).
2386 */
2387static void
2388mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2389{
2390#if 0
2391	int i, n, off;
2392	struct mbuf *m;
2393	void *cl;
2394
2395	n = rap->rxa_qframes;
2396	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2397		m = rap->rxa_m[i];
2398		if (m == NULL)
2399			continue;
2400		n--;
2401		/* our dma buffers have a well-known free routine */
2402		if ((m->m_flags & M_EXT) == 0 ||
2403		    m->m_ext.ext_free != mwl_ext_free)
2404			continue;
2405		/*
2406		 * Try to allocate a cluster and move the data.
2407		 */
2408		off = m->m_data - m->m_ext.ext_buf;
2409		if (off + m->m_pkthdr.len > MCLBYTES) {
2410			/* XXX no AMSDU for now */
2411			continue;
2412		}
2413		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2414		    &m->m_ext.ext_paddr);
2415		if (cl != NULL) {
2416			/*
2417			 * Copy the existing data to the cluster, remove
2418			 * the rx dma buffer, and attach the cluster in
2419			 * its place.  Note we preserve the offset to the
2420			 * data so frames being bridged can still prepend
2421			 * their headers without adding another mbuf.
2422			 */
2423			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2424			MEXTREMOVE(m);
2425			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2426			/* setup mbuf like _MCLGET does */
2427			m->m_flags |= M_CLUSTER | M_EXT_RW;
2428			_MOWNERREF(m, M_EXT | M_CLUSTER);
2429			/* NB: m_data is clobbered by MEXTADDR, adjust */
2430			m->m_data += off;
2431		}
2432	}
2433#endif
2434}
2435
2436/*
2437 * Callback to reclaim resources.  We first let the
2438 * net80211 layer do it's thing, then if we are still
2439 * blocked by a lack of rx dma buffers we walk the ampdu
2440 * reorder q's to reclaim buffers by copying to a system
2441 * cluster.
2442 */
2443static void
2444mwl_node_drain(struct ieee80211_node *ni)
2445{
2446	struct ieee80211com *ic = ni->ni_ic;
2447        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2448	struct mwl_node *mn = MWL_NODE(ni);
2449
2450	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2451	    __func__, ni, ni->ni_vap, mn->mn_staid);
2452
2453	/* NB: call up first to age out ampdu q's */
2454	sc->sc_node_drain(ni);
2455
2456	/* XXX better to not check low water mark? */
2457	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2458	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2459		uint8_t tid;
2460		/*
2461		 * Walk the reorder q and reclaim rx dma buffers by copying
2462		 * the packet contents into clusters.
2463		 */
2464		for (tid = 0; tid < WME_NUM_TID; tid++) {
2465			struct ieee80211_rx_ampdu *rap;
2466
2467			rap = &ni->ni_rx_ampdu[tid];
2468			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2469				continue;
2470			if (rap->rxa_qframes)
2471				mwl_ampdu_rxdma_reclaim(rap);
2472		}
2473	}
2474}
2475
2476static void
2477mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2478{
2479	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2480#ifdef MWL_ANT_INFO_SUPPORT
2481#if 0
2482	/* XXX need to smooth data */
2483	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2484#else
2485	*noise = -95;		/* XXX */
2486#endif
2487#else
2488	*noise = -95;		/* XXX */
2489#endif
2490}
2491
2492/*
2493 * Convert Hardware per-antenna rssi info to common format:
2494 * Let a1, a2, a3 represent the amplitudes per chain
2495 * Let amax represent max[a1, a2, a3]
2496 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2497 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2498 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2499 * maintain some extra precision.
2500 *
2501 * Values are stored in .5 db format capped at 127.
2502 */
2503static void
2504mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2505	struct ieee80211_mimo_info *mi)
2506{
2507#define	CVT(_dst, _src) do {						\
2508	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2509	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2510} while (0)
2511	static const int8_t logdbtbl[32] = {
2512	       0,   0,  24,  38,  48,  56,  62,  68,
2513	      72,  76,  80,  83,  86,  89,  92,  94,
2514	      96,  98, 100, 102, 104, 106, 107, 109,
2515	     110, 112, 113, 115, 116, 117, 118, 119
2516	};
2517	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2518	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2519	uint32_t rssi_max;
2520
2521	rssi_max = mn->mn_ai.rssi_a;
2522	if (mn->mn_ai.rssi_b > rssi_max)
2523		rssi_max = mn->mn_ai.rssi_b;
2524	if (mn->mn_ai.rssi_c > rssi_max)
2525		rssi_max = mn->mn_ai.rssi_c;
2526
2527	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2528	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2529	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2530
2531	mi->noise[0] = mn->mn_ai.nf_a;
2532	mi->noise[1] = mn->mn_ai.nf_b;
2533	mi->noise[2] = mn->mn_ai.nf_c;
2534#undef CVT
2535}
2536
2537static __inline void *
2538mwl_getrxdma(struct mwl_softc *sc)
2539{
2540	struct mwl_jumbo *buf;
2541	void *data;
2542
2543	/*
2544	 * Allocate from jumbo pool.
2545	 */
2546	MWL_RXFREE_LOCK(sc);
2547	buf = SLIST_FIRST(&sc->sc_rxfree);
2548	if (buf == NULL) {
2549		DPRINTF(sc, MWL_DEBUG_ANY,
2550		    "%s: out of rx dma buffers\n", __func__);
2551		sc->sc_stats.mst_rx_nodmabuf++;
2552		data = NULL;
2553	} else {
2554		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2555		sc->sc_nrxfree--;
2556		data = MWL_JUMBO_BUF2DATA(buf);
2557	}
2558	MWL_RXFREE_UNLOCK(sc);
2559	return data;
2560}
2561
2562static __inline void
2563mwl_putrxdma(struct mwl_softc *sc, void *data)
2564{
2565	struct mwl_jumbo *buf;
2566
2567	/* XXX bounds check data */
2568	MWL_RXFREE_LOCK(sc);
2569	buf = MWL_JUMBO_DATA2BUF(data);
2570	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2571	sc->sc_nrxfree++;
2572	MWL_RXFREE_UNLOCK(sc);
2573}
2574
2575static int
2576mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2577{
2578	struct mwl_rxdesc *ds;
2579
2580	ds = bf->bf_desc;
2581	if (bf->bf_data == NULL) {
2582		bf->bf_data = mwl_getrxdma(sc);
2583		if (bf->bf_data == NULL) {
2584			/* mark descriptor to be skipped */
2585			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2586			/* NB: don't need PREREAD */
2587			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2588			sc->sc_stats.mst_rxbuf_failed++;
2589			return ENOMEM;
2590		}
2591	}
2592	/*
2593	 * NB: DMA buffer contents is known to be unmodified
2594	 *     so there's no need to flush the data cache.
2595	 */
2596
2597	/*
2598	 * Setup descriptor.
2599	 */
2600	ds->QosCtrl = 0;
2601	ds->RSSI = 0;
2602	ds->Status = EAGLE_RXD_STATUS_IDLE;
2603	ds->Channel = 0;
2604	ds->PktLen = htole16(MWL_AGGR_SIZE);
2605	ds->SQ2 = 0;
2606	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2607	/* NB: don't touch pPhysNext, set once */
2608	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2609	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2610
2611	return 0;
2612}
2613
2614static void
2615mwl_ext_free(void *data, void *arg)
2616{
2617	struct mwl_softc *sc = arg;
2618
2619	/* XXX bounds check data */
2620	mwl_putrxdma(sc, data);
2621	/*
2622	 * If we were previously blocked by a lack of rx dma buffers
2623	 * check if we now have enough to restart rx interrupt handling.
2624	 * NB: we know we are called at splvm which is above splnet.
2625	 */
2626	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2627		sc->sc_rxblocked = 0;
2628		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2629	}
2630}
2631
2632struct mwl_frame_bar {
2633	u_int8_t	i_fc[2];
2634	u_int8_t	i_dur[2];
2635	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2636	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2637	/* ctl, seq, FCS */
2638} __packed;
2639
2640/*
2641 * Like ieee80211_anyhdrsize, but handles BAR frames
2642 * specially so the logic below to piece the 802.11
2643 * header together works.
2644 */
2645static __inline int
2646mwl_anyhdrsize(const void *data)
2647{
2648	const struct ieee80211_frame *wh = data;
2649
2650	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2651		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2652		case IEEE80211_FC0_SUBTYPE_CTS:
2653		case IEEE80211_FC0_SUBTYPE_ACK:
2654			return sizeof(struct ieee80211_frame_ack);
2655		case IEEE80211_FC0_SUBTYPE_BAR:
2656			return sizeof(struct mwl_frame_bar);
2657		}
2658		return sizeof(struct ieee80211_frame_min);
2659	} else
2660		return ieee80211_hdrsize(data);
2661}
2662
2663static void
2664mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2665{
2666	const struct ieee80211_frame *wh;
2667	struct ieee80211_node *ni;
2668
2669	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2670	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2671	if (ni != NULL) {
2672		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2673		ieee80211_free_node(ni);
2674	}
2675}
2676
2677/*
2678 * Convert hardware signal strength to rssi.  The value
2679 * provided by the device has the noise floor added in;
2680 * we need to compensate for this but we don't have that
2681 * so we use a fixed value.
2682 *
2683 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2684 * offset is already set as part of the initial gain.  This
2685 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2686 */
2687static __inline int
2688cvtrssi(uint8_t ssi)
2689{
2690	int rssi = (int) ssi + 8;
2691	/* XXX hack guess until we have a real noise floor */
2692	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2693	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2694}
2695
2696static void
2697mwl_rx_proc(void *arg, int npending)
2698{
2699#define	IEEE80211_DIR_DSTODS(wh) \
2700	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2701	struct mwl_softc *sc = arg;
2702	struct ifnet *ifp = sc->sc_ifp;
2703	struct ieee80211com *ic = ifp->if_l2com;
2704	struct mwl_rxbuf *bf;
2705	struct mwl_rxdesc *ds;
2706	struct mbuf *m;
2707	struct ieee80211_qosframe *wh;
2708	struct ieee80211_qosframe_addr4 *wh4;
2709	struct ieee80211_node *ni;
2710	struct mwl_node *mn;
2711	int off, len, hdrlen, pktlen, rssi, ntodo;
2712	uint8_t *data, status;
2713	void *newdata;
2714	int16_t nf;
2715
2716	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2717	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2718	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2719	nf = -96;			/* XXX */
2720	bf = sc->sc_rxnext;
2721	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2722		if (bf == NULL)
2723			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2724		ds = bf->bf_desc;
2725		data = bf->bf_data;
2726		if (data == NULL) {
2727			/*
2728			 * If data allocation failed previously there
2729			 * will be no buffer; try again to re-populate it.
2730			 * Note the firmware will not advance to the next
2731			 * descriptor with a dma buffer so we must mimic
2732			 * this or we'll get out of sync.
2733			 */
2734			DPRINTF(sc, MWL_DEBUG_ANY,
2735			    "%s: rx buf w/o dma memory\n", __func__);
2736			(void) mwl_rxbuf_init(sc, bf);
2737			sc->sc_stats.mst_rx_dmabufmissing++;
2738			break;
2739		}
2740		MWL_RXDESC_SYNC(sc, ds,
2741		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2742		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2743			break;
2744#ifdef MWL_DEBUG
2745		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2746			mwl_printrxbuf(bf, 0);
2747#endif
2748		status = ds->Status;
2749		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2750			ifp->if_ierrors++;
2751			sc->sc_stats.mst_rx_crypto++;
2752			/*
2753			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2754			 *     for backwards compatibility.
2755			 */
2756			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2757			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2758				/*
2759				 * MIC error, notify upper layers.
2760				 */
2761				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2762				    BUS_DMASYNC_POSTREAD);
2763				mwl_handlemicerror(ic, data);
2764				sc->sc_stats.mst_rx_tkipmic++;
2765			}
2766			/* XXX too painful to tap packets */
2767			goto rx_next;
2768		}
2769		/*
2770		 * Sync the data buffer.
2771		 */
2772		len = le16toh(ds->PktLen);
2773		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2774		/*
2775		 * The 802.11 header is provided all or in part at the front;
2776		 * use it to calculate the true size of the header that we'll
2777		 * construct below.  We use this to figure out where to copy
2778		 * payload prior to constructing the header.
2779		 */
2780		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2781		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2782
2783		/* calculate rssi early so we can re-use for each aggregate */
2784		rssi = cvtrssi(ds->RSSI);
2785
2786		pktlen = hdrlen + (len - off);
2787		/*
2788		 * NB: we know our frame is at least as large as
2789		 * IEEE80211_MIN_LEN because there is a 4-address
2790		 * frame at the front.  Hence there's no need to
2791		 * vet the packet length.  If the frame in fact
2792		 * is too small it should be discarded at the
2793		 * net80211 layer.
2794		 */
2795
2796		/*
2797		 * Attach dma buffer to an mbuf.  We tried
2798		 * doing this based on the packet size (i.e.
2799		 * copying small packets) but it turns out to
2800		 * be a net loss.  The tradeoff might be system
2801		 * dependent (cache architecture is important).
2802		 */
2803		MGETHDR(m, M_DONTWAIT, MT_DATA);
2804		if (m == NULL) {
2805			DPRINTF(sc, MWL_DEBUG_ANY,
2806			    "%s: no rx mbuf\n", __func__);
2807			sc->sc_stats.mst_rx_nombuf++;
2808			goto rx_next;
2809		}
2810		/*
2811		 * Acquire the replacement dma buffer before
2812		 * processing the frame.  If we're out of dma
2813		 * buffers we disable rx interrupts and wait
2814		 * for the free pool to reach mlw_rxdmalow buffers
2815		 * before starting to do work again.  If the firmware
2816		 * runs out of descriptors then it will toss frames
2817		 * which is better than our doing it as that can
2818		 * starve our processing.  It is also important that
2819		 * we always process rx'd frames in case they are
2820		 * A-MPDU as otherwise the host's view of the BA
2821		 * window may get out of sync with the firmware.
2822		 */
2823		newdata = mwl_getrxdma(sc);
2824		if (newdata == NULL) {
2825			/* NB: stat+msg in mwl_getrxdma */
2826			m_free(m);
2827			/* disable RX interrupt and mark state */
2828			mwl_hal_intrset(sc->sc_mh,
2829			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2830			sc->sc_rxblocked = 1;
2831			ieee80211_drain(ic);
2832			/* XXX check rxblocked and immediately start again? */
2833			goto rx_stop;
2834		}
2835		bf->bf_data = newdata;
2836		/*
2837		 * Attach the dma buffer to the mbuf;
2838		 * mwl_rxbuf_init will re-setup the rx
2839		 * descriptor using the replacement dma
2840		 * buffer we just installed above.
2841		 */
2842		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2843		    data, sc, 0, EXT_NET_DRV);
2844		m->m_data += off - hdrlen;
2845		m->m_pkthdr.len = m->m_len = pktlen;
2846		m->m_pkthdr.rcvif = ifp;
2847		/* NB: dma buffer assumed read-only */
2848
2849		/*
2850		 * Piece 802.11 header together.
2851		 */
2852		wh = mtod(m, struct ieee80211_qosframe *);
2853		/* NB: don't need to do this sometimes but ... */
2854		/* XXX special case so we can memcpy after m_devget? */
2855		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2856		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2857			if (IEEE80211_DIR_DSTODS(wh)) {
2858				wh4 = mtod(m,
2859				    struct ieee80211_qosframe_addr4*);
2860				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2861			} else {
2862				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2863			}
2864		}
2865		/*
2866		 * The f/w strips WEP header but doesn't clear
2867		 * the WEP bit; mark the packet with M_WEP so
2868		 * net80211 will treat the data as decrypted.
2869		 * While here also clear the PWR_MGT bit since
2870		 * power save is handled by the firmware and
2871		 * passing this up will potentially cause the
2872		 * upper layer to put a station in power save
2873		 * (except when configured with MWL_HOST_PS_SUPPORT).
2874		 */
2875		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2876			m->m_flags |= M_WEP;
2877#ifdef MWL_HOST_PS_SUPPORT
2878		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2879#else
2880		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2881#endif
2882
2883		if (ieee80211_radiotap_active(ic)) {
2884			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2885
2886			tap->wr_flags = 0;
2887			tap->wr_rate = ds->Rate;
2888			tap->wr_antsignal = rssi + nf;
2889			tap->wr_antnoise = nf;
2890		}
2891		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2892			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2893			    len, ds->Rate, rssi);
2894		}
2895		ifp->if_ipackets++;
2896
2897		/* dispatch */
2898		ni = ieee80211_find_rxnode(ic,
2899		    (const struct ieee80211_frame_min *) wh);
2900		if (ni != NULL) {
2901			mn = MWL_NODE(ni);
2902#ifdef MWL_ANT_INFO_SUPPORT
2903			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2904			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2905			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2906			mn->mn_ai.rsvd1 = rssi;
2907#endif
2908			/* tag AMPDU aggregates for reorder processing */
2909			if (ni->ni_flags & IEEE80211_NODE_HT)
2910				m->m_flags |= M_AMPDU;
2911			(void) ieee80211_input(ni, m, rssi, nf);
2912			ieee80211_free_node(ni);
2913		} else
2914			(void) ieee80211_input_all(ic, m, rssi, nf);
2915rx_next:
2916		/* NB: ignore ENOMEM so we process more descriptors */
2917		(void) mwl_rxbuf_init(sc, bf);
2918		bf = STAILQ_NEXT(bf, bf_list);
2919	}
2920rx_stop:
2921	sc->sc_rxnext = bf;
2922
2923	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2924	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2925		/* NB: kick fw; the tx thread may have been preempted */
2926		mwl_hal_txstart(sc->sc_mh, 0);
2927		mwl_start(ifp);
2928	}
2929#undef IEEE80211_DIR_DSTODS
2930}
2931
2932static void
2933mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2934{
2935	struct mwl_txbuf *bf, *bn;
2936	struct mwl_txdesc *ds;
2937
2938	MWL_TXQ_LOCK_INIT(sc, txq);
2939	txq->qnum = qnum;
2940	txq->txpri = 0;	/* XXX */
2941#if 0
2942	/* NB: q setup by mwl_txdma_setup XXX */
2943	STAILQ_INIT(&txq->free);
2944#endif
2945	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2946		bf->bf_txq = txq;
2947
2948		ds = bf->bf_desc;
2949		bn = STAILQ_NEXT(bf, bf_list);
2950		if (bn == NULL)
2951			bn = STAILQ_FIRST(&txq->free);
2952		ds->pPhysNext = htole32(bn->bf_daddr);
2953	}
2954	STAILQ_INIT(&txq->active);
2955}
2956
2957/*
2958 * Setup a hardware data transmit queue for the specified
2959 * access control.  We record the mapping from ac's
2960 * to h/w queues for use by mwl_tx_start.
2961 */
2962static int
2963mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2964{
2965#define	N(a)	(sizeof(a)/sizeof(a[0]))
2966	struct mwl_txq *txq;
2967
2968	if (ac >= N(sc->sc_ac2q)) {
2969		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2970			ac, N(sc->sc_ac2q));
2971		return 0;
2972	}
2973	if (mvtype >= MWL_NUM_TX_QUEUES) {
2974		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2975			mvtype, MWL_NUM_TX_QUEUES);
2976		return 0;
2977	}
2978	txq = &sc->sc_txq[mvtype];
2979	mwl_txq_init(sc, txq, mvtype);
2980	sc->sc_ac2q[ac] = txq;
2981	return 1;
2982#undef N
2983}
2984
2985/*
2986 * Update WME parameters for a transmit queue.
2987 */
2988static int
2989mwl_txq_update(struct mwl_softc *sc, int ac)
2990{
2991#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2992	struct ifnet *ifp = sc->sc_ifp;
2993	struct ieee80211com *ic = ifp->if_l2com;
2994	struct mwl_txq *txq = sc->sc_ac2q[ac];
2995	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2996	struct mwl_hal *mh = sc->sc_mh;
2997	int aifs, cwmin, cwmax, txoplim;
2998
2999	aifs = wmep->wmep_aifsn;
3000	/* XXX in sta mode need to pass log values for cwmin/max */
3001	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3002	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3003	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3004
3005	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3006		device_printf(sc->sc_dev, "unable to update hardware queue "
3007			"parameters for %s traffic!\n",
3008			ieee80211_wme_acnames[ac]);
3009		return 0;
3010	}
3011	return 1;
3012#undef MWL_EXPONENT_TO_VALUE
3013}
3014
3015/*
3016 * Callback from the 802.11 layer to update WME parameters.
3017 */
3018static int
3019mwl_wme_update(struct ieee80211com *ic)
3020{
3021	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3022
3023	return !mwl_txq_update(sc, WME_AC_BE) ||
3024	    !mwl_txq_update(sc, WME_AC_BK) ||
3025	    !mwl_txq_update(sc, WME_AC_VI) ||
3026	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3027}
3028
3029/*
3030 * Reclaim resources for a setup queue.
3031 */
3032static void
3033mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3034{
3035	/* XXX hal work? */
3036	MWL_TXQ_LOCK_DESTROY(txq);
3037}
3038
3039/*
3040 * Reclaim all tx queue resources.
3041 */
3042static void
3043mwl_tx_cleanup(struct mwl_softc *sc)
3044{
3045	int i;
3046
3047	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3048		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3049}
3050
3051static int
3052mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3053{
3054	struct mbuf *m;
3055	int error;
3056
3057	/*
3058	 * Load the DMA map so any coalescing is done.  This
3059	 * also calculates the number of descriptors we need.
3060	 */
3061	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3062				     bf->bf_segs, &bf->bf_nseg,
3063				     BUS_DMA_NOWAIT);
3064	if (error == EFBIG) {
3065		/* XXX packet requires too many descriptors */
3066		bf->bf_nseg = MWL_TXDESC+1;
3067	} else if (error != 0) {
3068		sc->sc_stats.mst_tx_busdma++;
3069		m_freem(m0);
3070		return error;
3071	}
3072	/*
3073	 * Discard null packets and check for packets that
3074	 * require too many TX descriptors.  We try to convert
3075	 * the latter to a cluster.
3076	 */
3077	if (error == EFBIG) {		/* too many desc's, linearize */
3078		sc->sc_stats.mst_tx_linear++;
3079#if MWL_TXDESC > 1
3080		m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
3081#else
3082		m = m_defrag(m0, M_DONTWAIT);
3083#endif
3084		if (m == NULL) {
3085			m_freem(m0);
3086			sc->sc_stats.mst_tx_nombuf++;
3087			return ENOMEM;
3088		}
3089		m0 = m;
3090		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3091					     bf->bf_segs, &bf->bf_nseg,
3092					     BUS_DMA_NOWAIT);
3093		if (error != 0) {
3094			sc->sc_stats.mst_tx_busdma++;
3095			m_freem(m0);
3096			return error;
3097		}
3098		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3099		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3100	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3101		sc->sc_stats.mst_tx_nodata++;
3102		m_freem(m0);
3103		return EIO;
3104	}
3105	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3106		__func__, m0, m0->m_pkthdr.len);
3107	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3108	bf->bf_m = m0;
3109
3110	return 0;
3111}
3112
3113static __inline int
3114mwl_cvtlegacyrate(int rate)
3115{
3116	switch (rate) {
3117	case 2:	 return 0;
3118	case 4:	 return 1;
3119	case 11: return 2;
3120	case 22: return 3;
3121	case 44: return 4;
3122	case 12: return 5;
3123	case 18: return 6;
3124	case 24: return 7;
3125	case 36: return 8;
3126	case 48: return 9;
3127	case 72: return 10;
3128	case 96: return 11;
3129	case 108:return 12;
3130	}
3131	return 0;
3132}
3133
3134/*
3135 * Calculate fixed tx rate information per client state;
3136 * this value is suitable for writing to the Format field
3137 * of a tx descriptor.
3138 */
3139static uint16_t
3140mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3141{
3142	uint16_t fmt;
3143
3144	fmt = SM(3, EAGLE_TXD_ANTENNA)
3145	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3146		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3147	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3148		fmt |= EAGLE_TXD_FORMAT_HT
3149		    /* NB: 0x80 implicitly stripped from ucastrate */
3150		    | SM(rate, EAGLE_TXD_RATE);
3151		/* XXX short/long GI may be wrong; re-check */
3152		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3153			fmt |= EAGLE_TXD_CHW_40
3154			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3155			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3156		} else {
3157			fmt |= EAGLE_TXD_CHW_20
3158			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3159			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3160		}
3161	} else {			/* legacy rate */
3162		fmt |= EAGLE_TXD_FORMAT_LEGACY
3163		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3164		    | EAGLE_TXD_CHW_20
3165		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3166		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3167			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3168	}
3169	return fmt;
3170}
3171
3172static int
3173mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3174    struct mbuf *m0)
3175{
3176#define	IEEE80211_DIR_DSTODS(wh) \
3177	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3178	struct ifnet *ifp = sc->sc_ifp;
3179	struct ieee80211com *ic = ifp->if_l2com;
3180	struct ieee80211vap *vap = ni->ni_vap;
3181	int error, iswep, ismcast;
3182	int hdrlen, copyhdrlen, pktlen;
3183	struct mwl_txdesc *ds;
3184	struct mwl_txq *txq;
3185	struct ieee80211_frame *wh;
3186	struct mwltxrec *tr;
3187	struct mwl_node *mn;
3188	uint16_t qos;
3189#if MWL_TXDESC > 1
3190	int i;
3191#endif
3192
3193	wh = mtod(m0, struct ieee80211_frame *);
3194	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3195	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3196	hdrlen = ieee80211_anyhdrsize(wh);
3197	copyhdrlen = hdrlen;
3198	pktlen = m0->m_pkthdr.len;
3199	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3200		if (IEEE80211_DIR_DSTODS(wh)) {
3201			qos = *(uint16_t *)
3202			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3203			copyhdrlen -= sizeof(qos);
3204		} else
3205			qos = *(uint16_t *)
3206			    (((struct ieee80211_qosframe *) wh)->i_qos);
3207	} else
3208		qos = 0;
3209
3210	if (iswep) {
3211		const struct ieee80211_cipher *cip;
3212		struct ieee80211_key *k;
3213
3214		/*
3215		 * Construct the 802.11 header+trailer for an encrypted
3216		 * frame. The only reason this can fail is because of an
3217		 * unknown or unsupported cipher/key type.
3218		 *
3219		 * NB: we do this even though the firmware will ignore
3220		 *     what we've done for WEP and TKIP as we need the
3221		 *     ExtIV filled in for CCMP and this also adjusts
3222		 *     the headers which simplifies our work below.
3223		 */
3224		k = ieee80211_crypto_encap(ni, m0);
3225		if (k == NULL) {
3226			/*
3227			 * This can happen when the key is yanked after the
3228			 * frame was queued.  Just discard the frame; the
3229			 * 802.11 layer counts failures and provides
3230			 * debugging/diagnostics.
3231			 */
3232			m_freem(m0);
3233			return EIO;
3234		}
3235		/*
3236		 * Adjust the packet length for the crypto additions
3237		 * done during encap and any other bits that the f/w
3238		 * will add later on.
3239		 */
3240		cip = k->wk_cipher;
3241		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3242
3243		/* packet header may have moved, reset our local pointer */
3244		wh = mtod(m0, struct ieee80211_frame *);
3245	}
3246
3247	if (ieee80211_radiotap_active_vap(vap)) {
3248		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3249		if (iswep)
3250			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3251#if 0
3252		sc->sc_tx_th.wt_rate = ds->DataRate;
3253#endif
3254		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3255		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3256
3257		ieee80211_radiotap_tx(vap, m0);
3258	}
3259	/*
3260	 * Copy up/down the 802.11 header; the firmware requires
3261	 * we present a 2-byte payload length followed by a
3262	 * 4-address header (w/o QoS), followed (optionally) by
3263	 * any WEP/ExtIV header (but only filled in for CCMP).
3264	 * We are assured the mbuf has sufficient headroom to
3265	 * prepend in-place by the setup of ic_headroom in
3266	 * mwl_attach.
3267	 */
3268	if (hdrlen < sizeof(struct mwltxrec)) {
3269		const int space = sizeof(struct mwltxrec) - hdrlen;
3270		if (M_LEADINGSPACE(m0) < space) {
3271			/* NB: should never happen */
3272			device_printf(sc->sc_dev,
3273			    "not enough headroom, need %d found %zd, "
3274			    "m_flags 0x%x m_len %d\n",
3275			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3276			ieee80211_dump_pkt(ic,
3277			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3278			m_freem(m0);
3279			sc->sc_stats.mst_tx_noheadroom++;
3280			return EIO;
3281		}
3282		M_PREPEND(m0, space, M_NOWAIT);
3283	}
3284	tr = mtod(m0, struct mwltxrec *);
3285	if (wh != (struct ieee80211_frame *) &tr->wh)
3286		ovbcopy(wh, &tr->wh, hdrlen);
3287	/*
3288	 * Note: the "firmware length" is actually the length
3289	 * of the fully formed "802.11 payload".  That is, it's
3290	 * everything except for the 802.11 header.  In particular
3291	 * this includes all crypto material including the MIC!
3292	 */
3293	tr->fwlen = htole16(pktlen - hdrlen);
3294
3295	/*
3296	 * Load the DMA map so any coalescing is done.  This
3297	 * also calculates the number of descriptors we need.
3298	 */
3299	error = mwl_tx_dmasetup(sc, bf, m0);
3300	if (error != 0) {
3301		/* NB: stat collected in mwl_tx_dmasetup */
3302		DPRINTF(sc, MWL_DEBUG_XMIT,
3303		    "%s: unable to setup dma\n", __func__);
3304		return error;
3305	}
3306	bf->bf_node = ni;			/* NB: held reference */
3307	m0 = bf->bf_m;				/* NB: may have changed */
3308	tr = mtod(m0, struct mwltxrec *);
3309	wh = (struct ieee80211_frame *)&tr->wh;
3310
3311	/*
3312	 * Formulate tx descriptor.
3313	 */
3314	ds = bf->bf_desc;
3315	txq = bf->bf_txq;
3316
3317	ds->QosCtrl = qos;			/* NB: already little-endian */
3318#if MWL_TXDESC == 1
3319	/*
3320	 * NB: multiframes should be zero because the descriptors
3321	 *     are initialized to zero.  This should handle the case
3322	 *     where the driver is built with MWL_TXDESC=1 but we are
3323	 *     using firmware with multi-segment support.
3324	 */
3325	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3326	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3327#else
3328	ds->multiframes = htole32(bf->bf_nseg);
3329	ds->PktLen = htole16(m0->m_pkthdr.len);
3330	for (i = 0; i < bf->bf_nseg; i++) {
3331		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3332		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3333	}
3334#endif
3335	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3336	ds->Format = 0;
3337	ds->pad = 0;
3338	ds->ack_wcb_addr = 0;
3339
3340	mn = MWL_NODE(ni);
3341	/*
3342	 * Select transmit rate.
3343	 */
3344	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3345	case IEEE80211_FC0_TYPE_MGT:
3346		sc->sc_stats.mst_tx_mgmt++;
3347		/* fall thru... */
3348	case IEEE80211_FC0_TYPE_CTL:
3349		/* NB: assign to BE q to avoid bursting */
3350		ds->TxPriority = MWL_WME_AC_BE;
3351		break;
3352	case IEEE80211_FC0_TYPE_DATA:
3353		if (!ismcast) {
3354			const struct ieee80211_txparam *tp = ni->ni_txparms;
3355			/*
3356			 * EAPOL frames get forced to a fixed rate and w/o
3357			 * aggregation; otherwise check for any fixed rate
3358			 * for the client (may depend on association state).
3359			 */
3360			if (m0->m_flags & M_EAPOL) {
3361				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3362				ds->Format = mvp->mv_eapolformat;
3363				ds->pad = htole16(
3364				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3365			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3366				/* XXX pre-calculate per node */
3367				ds->Format = htole16(
3368				    mwl_calcformat(tp->ucastrate, ni));
3369				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3370			}
3371			/* NB: EAPOL frames will never have qos set */
3372			if (qos == 0)
3373				ds->TxPriority = txq->qnum;
3374#if MWL_MAXBA > 3
3375			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3376				ds->TxPriority = mn->mn_ba[3].txq;
3377#endif
3378#if MWL_MAXBA > 2
3379			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3380				ds->TxPriority = mn->mn_ba[2].txq;
3381#endif
3382#if MWL_MAXBA > 1
3383			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3384				ds->TxPriority = mn->mn_ba[1].txq;
3385#endif
3386#if MWL_MAXBA > 0
3387			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3388				ds->TxPriority = mn->mn_ba[0].txq;
3389#endif
3390			else
3391				ds->TxPriority = txq->qnum;
3392		} else
3393			ds->TxPriority = txq->qnum;
3394		break;
3395	default:
3396		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3397			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3398		sc->sc_stats.mst_tx_badframetype++;
3399		m_freem(m0);
3400		return EIO;
3401	}
3402
3403	if (IFF_DUMPPKTS_XMIT(sc))
3404		ieee80211_dump_pkt(ic,
3405		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3406		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3407
3408	MWL_TXQ_LOCK(txq);
3409	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3410	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3411	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3412
3413	ifp->if_opackets++;
3414	ifp->if_timer = 5;
3415	MWL_TXQ_UNLOCK(txq);
3416
3417	return 0;
3418#undef	IEEE80211_DIR_DSTODS
3419}
3420
3421static __inline int
3422mwl_cvtlegacyrix(int rix)
3423{
3424#define	N(x)	(sizeof(x)/sizeof(x[0]))
3425	static const int ieeerates[] =
3426	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3427	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3428#undef N
3429}
3430
3431/*
3432 * Process completed xmit descriptors from the specified queue.
3433 */
3434static int
3435mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3436{
3437#define	EAGLE_TXD_STATUS_MCAST \
3438	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3439	struct ifnet *ifp = sc->sc_ifp;
3440	struct ieee80211com *ic = ifp->if_l2com;
3441	struct mwl_txbuf *bf;
3442	struct mwl_txdesc *ds;
3443	struct ieee80211_node *ni;
3444	struct mwl_node *an;
3445	int nreaped;
3446	uint32_t status;
3447
3448	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3449	for (nreaped = 0;; nreaped++) {
3450		MWL_TXQ_LOCK(txq);
3451		bf = STAILQ_FIRST(&txq->active);
3452		if (bf == NULL) {
3453			MWL_TXQ_UNLOCK(txq);
3454			break;
3455		}
3456		ds = bf->bf_desc;
3457		MWL_TXDESC_SYNC(txq, ds,
3458		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3459		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3460			MWL_TXQ_UNLOCK(txq);
3461			break;
3462		}
3463		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3464		MWL_TXQ_UNLOCK(txq);
3465
3466#ifdef MWL_DEBUG
3467		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3468			mwl_printtxbuf(bf, txq->qnum, nreaped);
3469#endif
3470		ni = bf->bf_node;
3471		if (ni != NULL) {
3472			an = MWL_NODE(ni);
3473			status = le32toh(ds->Status);
3474			if (status & EAGLE_TXD_STATUS_OK) {
3475				uint16_t Format = le16toh(ds->Format);
3476				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3477
3478				sc->sc_stats.mst_ant_tx[txant]++;
3479				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3480					sc->sc_stats.mst_tx_retries++;
3481				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3482					sc->sc_stats.mst_tx_mretries++;
3483				if (txq->qnum >= MWL_WME_AC_VO)
3484					ic->ic_wme.wme_hipri_traffic++;
3485				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3486				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3487					ni->ni_txrate = mwl_cvtlegacyrix(
3488					    ni->ni_txrate);
3489				} else
3490					ni->ni_txrate |= IEEE80211_RATE_MCS;
3491				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3492			} else {
3493				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3494					sc->sc_stats.mst_tx_linkerror++;
3495				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3496					sc->sc_stats.mst_tx_xretries++;
3497				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3498					sc->sc_stats.mst_tx_aging++;
3499				if (bf->bf_m->m_flags & M_FF)
3500					sc->sc_stats.mst_ff_txerr++;
3501			}
3502			/*
3503			 * Do any tx complete callback.  Note this must
3504			 * be done before releasing the node reference.
3505			 * XXX no way to figure out if frame was ACK'd
3506			 */
3507			if (bf->bf_m->m_flags & M_TXCB) {
3508				/* XXX strip fw len in case header inspected */
3509				m_adj(bf->bf_m, sizeof(uint16_t));
3510				ieee80211_process_callback(ni, bf->bf_m,
3511					(status & EAGLE_TXD_STATUS_OK) == 0);
3512			}
3513			/*
3514			 * Reclaim reference to node.
3515			 *
3516			 * NB: the node may be reclaimed here if, for example
3517			 *     this is a DEAUTH message that was sent and the
3518			 *     node was timed out due to inactivity.
3519			 */
3520			ieee80211_free_node(ni);
3521		}
3522		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3523
3524		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3525		    BUS_DMASYNC_POSTWRITE);
3526		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3527		m_freem(bf->bf_m);
3528
3529		mwl_puttxbuf_tail(txq, bf);
3530	}
3531	return nreaped;
3532#undef EAGLE_TXD_STATUS_MCAST
3533}
3534
3535/*
3536 * Deferred processing of transmit interrupt; special-cased
3537 * for four hardware queues, 0-3.
3538 */
3539static void
3540mwl_tx_proc(void *arg, int npending)
3541{
3542	struct mwl_softc *sc = arg;
3543	struct ifnet *ifp = sc->sc_ifp;
3544	int nreaped;
3545
3546	/*
3547	 * Process each active queue.
3548	 */
3549	nreaped = 0;
3550	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3551		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3552	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3553		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3554	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3555		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3556	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3557		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3558
3559	if (nreaped != 0) {
3560		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3561		ifp->if_timer = 0;
3562		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3563			/* NB: kick fw; the tx thread may have been preempted */
3564			mwl_hal_txstart(sc->sc_mh, 0);
3565			mwl_start(ifp);
3566		}
3567	}
3568}
3569
3570static void
3571mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3572{
3573	struct ieee80211_node *ni;
3574	struct mwl_txbuf *bf;
3575	u_int ix;
3576
3577	/*
3578	 * NB: this assumes output has been stopped and
3579	 *     we do not need to block mwl_tx_tasklet
3580	 */
3581	for (ix = 0;; ix++) {
3582		MWL_TXQ_LOCK(txq);
3583		bf = STAILQ_FIRST(&txq->active);
3584		if (bf == NULL) {
3585			MWL_TXQ_UNLOCK(txq);
3586			break;
3587		}
3588		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3589		MWL_TXQ_UNLOCK(txq);
3590#ifdef MWL_DEBUG
3591		if (sc->sc_debug & MWL_DEBUG_RESET) {
3592			struct ifnet *ifp = sc->sc_ifp;
3593			struct ieee80211com *ic = ifp->if_l2com;
3594			const struct mwltxrec *tr =
3595			    mtod(bf->bf_m, const struct mwltxrec *);
3596			mwl_printtxbuf(bf, txq->qnum, ix);
3597			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3598				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3599		}
3600#endif /* MWL_DEBUG */
3601		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3602		ni = bf->bf_node;
3603		if (ni != NULL) {
3604			/*
3605			 * Reclaim node reference.
3606			 */
3607			ieee80211_free_node(ni);
3608		}
3609		m_freem(bf->bf_m);
3610
3611		mwl_puttxbuf_tail(txq, bf);
3612	}
3613}
3614
3615/*
3616 * Drain the transmit queues and reclaim resources.
3617 */
3618static void
3619mwl_draintxq(struct mwl_softc *sc)
3620{
3621	struct ifnet *ifp = sc->sc_ifp;
3622	int i;
3623
3624	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3625		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3626	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3627	ifp->if_timer = 0;
3628}
3629
3630#ifdef MWL_DIAGAPI
3631/*
3632 * Reset the transmit queues to a pristine state after a fw download.
3633 */
3634static void
3635mwl_resettxq(struct mwl_softc *sc)
3636{
3637	int i;
3638
3639	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3640		mwl_txq_reset(sc, &sc->sc_txq[i]);
3641}
3642#endif /* MWL_DIAGAPI */
3643
3644/*
3645 * Clear the transmit queues of any frames submitted for the
3646 * specified vap.  This is done when the vap is deleted so we
3647 * don't potentially reference the vap after it is gone.
3648 * Note we cannot remove the frames; we only reclaim the node
3649 * reference.
3650 */
3651static void
3652mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3653{
3654	struct mwl_txq *txq;
3655	struct mwl_txbuf *bf;
3656	int i;
3657
3658	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3659		txq = &sc->sc_txq[i];
3660		MWL_TXQ_LOCK(txq);
3661		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3662			struct ieee80211_node *ni = bf->bf_node;
3663			if (ni != NULL && ni->ni_vap == vap) {
3664				bf->bf_node = NULL;
3665				ieee80211_free_node(ni);
3666			}
3667		}
3668		MWL_TXQ_UNLOCK(txq);
3669	}
3670}
3671
3672static int
3673mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3674	const uint8_t *frm, const uint8_t *efrm)
3675{
3676	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3677	const struct ieee80211_action *ia;
3678
3679	ia = (const struct ieee80211_action *) frm;
3680	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3681	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3682		const struct ieee80211_action_ht_mimopowersave *mps =
3683		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3684
3685		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3686		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3687		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3688		return 0;
3689	} else
3690		return sc->sc_recv_action(ni, wh, frm, efrm);
3691}
3692
3693static int
3694mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3695	int dialogtoken, int baparamset, int batimeout)
3696{
3697	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3698	struct ieee80211vap *vap = ni->ni_vap;
3699	struct mwl_node *mn = MWL_NODE(ni);
3700	struct mwl_bastate *bas;
3701
3702	bas = tap->txa_private;
3703	if (bas == NULL) {
3704		const MWL_HAL_BASTREAM *sp;
3705		/*
3706		 * Check for a free BA stream slot.
3707		 */
3708#if MWL_MAXBA > 3
3709		if (mn->mn_ba[3].bastream == NULL)
3710			bas = &mn->mn_ba[3];
3711		else
3712#endif
3713#if MWL_MAXBA > 2
3714		if (mn->mn_ba[2].bastream == NULL)
3715			bas = &mn->mn_ba[2];
3716		else
3717#endif
3718#if MWL_MAXBA > 1
3719		if (mn->mn_ba[1].bastream == NULL)
3720			bas = &mn->mn_ba[1];
3721		else
3722#endif
3723#if MWL_MAXBA > 0
3724		if (mn->mn_ba[0].bastream == NULL)
3725			bas = &mn->mn_ba[0];
3726		else
3727#endif
3728		{
3729			/* sta already has max BA streams */
3730			/* XXX assign BA stream to highest priority tid */
3731			DPRINTF(sc, MWL_DEBUG_AMPDU,
3732			    "%s: already has max bastreams\n", __func__);
3733			sc->sc_stats.mst_ampdu_reject++;
3734			return 0;
3735		}
3736		/* NB: no held reference to ni */
3737		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3738		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3739		    ni->ni_macaddr, WME_AC_TO_TID(tap->txa_ac), ni->ni_htparam,
3740		    ni, tap);
3741		if (sp == NULL) {
3742			/*
3743			 * No available stream, return 0 so no
3744			 * a-mpdu aggregation will be done.
3745			 */
3746			DPRINTF(sc, MWL_DEBUG_AMPDU,
3747			    "%s: no bastream available\n", __func__);
3748			sc->sc_stats.mst_ampdu_nostream++;
3749			return 0;
3750		}
3751		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3752		    __func__, sp);
3753		/* NB: qos is left zero so we won't match in mwl_tx_start */
3754		bas->bastream = sp;
3755		tap->txa_private = bas;
3756	}
3757	/* fetch current seq# from the firmware; if available */
3758	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3759	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3760	    &tap->txa_start) != 0)
3761		tap->txa_start = 0;
3762	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3763}
3764
3765static int
3766mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3767	int code, int baparamset, int batimeout)
3768{
3769	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3770	struct mwl_bastate *bas;
3771
3772	bas = tap->txa_private;
3773	if (bas == NULL) {
3774		/* XXX should not happen */
3775		DPRINTF(sc, MWL_DEBUG_AMPDU,
3776		    "%s: no BA stream allocated, AC %d\n",
3777		    __func__, tap->txa_ac);
3778		sc->sc_stats.mst_addba_nostream++;
3779		return 0;
3780	}
3781	if (code == IEEE80211_STATUS_SUCCESS) {
3782		struct ieee80211vap *vap = ni->ni_vap;
3783		int bufsiz, error;
3784
3785		/*
3786		 * Tell the firmware to setup the BA stream;
3787		 * we know resources are available because we
3788		 * pre-allocated one before forming the request.
3789		 */
3790		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3791		if (bufsiz == 0)
3792			bufsiz = IEEE80211_AGGR_BAWMAX;
3793		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3794		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3795		if (error != 0) {
3796			/*
3797			 * Setup failed, return immediately so no a-mpdu
3798			 * aggregation will be done.
3799			 */
3800			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3801			mwl_bastream_free(bas);
3802			tap->txa_private = NULL;
3803
3804			DPRINTF(sc, MWL_DEBUG_AMPDU,
3805			    "%s: create failed, error %d, bufsiz %d AC %d "
3806			    "htparam 0x%x\n", __func__, error, bufsiz,
3807			    tap->txa_ac, ni->ni_htparam);
3808			sc->sc_stats.mst_bacreate_failed++;
3809			return 0;
3810		}
3811		/* NB: cache txq to avoid ptr indirect */
3812		mwl_bastream_setup(bas, tap->txa_ac, bas->bastream->txq);
3813		DPRINTF(sc, MWL_DEBUG_AMPDU,
3814		    "%s: bastream %p assigned to txq %d AC %d bufsiz %d "
3815		    "htparam 0x%x\n", __func__, bas->bastream,
3816		    bas->txq, tap->txa_ac, bufsiz, ni->ni_htparam);
3817	} else {
3818		/*
3819		 * Other side NAK'd us; return the resources.
3820		 */
3821		DPRINTF(sc, MWL_DEBUG_AMPDU,
3822		    "%s: request failed with code %d, destroy bastream %p\n",
3823		    __func__, code, bas->bastream);
3824		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3825		mwl_bastream_free(bas);
3826		tap->txa_private = NULL;
3827	}
3828	/* NB: firmware sends BAR so we don't need to */
3829	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3830}
3831
3832static void
3833mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3834{
3835	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3836	struct mwl_bastate *bas;
3837
3838	bas = tap->txa_private;
3839	if (bas != NULL) {
3840		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3841		    __func__, bas->bastream);
3842		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3843		mwl_bastream_free(bas);
3844		tap->txa_private = NULL;
3845	}
3846	sc->sc_addba_stop(ni, tap);
3847}
3848
3849/*
3850 * Setup the rx data structures.  This should only be
3851 * done once or we may get out of sync with the firmware.
3852 */
3853static int
3854mwl_startrecv(struct mwl_softc *sc)
3855{
3856	if (!sc->sc_recvsetup) {
3857		struct mwl_rxbuf *bf, *prev;
3858		struct mwl_rxdesc *ds;
3859
3860		prev = NULL;
3861		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3862			int error = mwl_rxbuf_init(sc, bf);
3863			if (error != 0) {
3864				DPRINTF(sc, MWL_DEBUG_RECV,
3865					"%s: mwl_rxbuf_init failed %d\n",
3866					__func__, error);
3867				return error;
3868			}
3869			if (prev != NULL) {
3870				ds = prev->bf_desc;
3871				ds->pPhysNext = htole32(bf->bf_daddr);
3872			}
3873			prev = bf;
3874		}
3875		if (prev != NULL) {
3876			ds = prev->bf_desc;
3877			ds->pPhysNext =
3878			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3879		}
3880		sc->sc_recvsetup = 1;
3881	}
3882	mwl_mode_init(sc);		/* set filters, etc. */
3883	return 0;
3884}
3885
3886static MWL_HAL_APMODE
3887mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3888{
3889	MWL_HAL_APMODE mode;
3890
3891	if (IEEE80211_IS_CHAN_HT(chan)) {
3892		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3893			mode = AP_MODE_N_ONLY;
3894		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3895			mode = AP_MODE_AandN;
3896		else if (vap->iv_flags & IEEE80211_F_PUREG)
3897			mode = AP_MODE_GandN;
3898		else
3899			mode = AP_MODE_BandGandN;
3900	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3901		if (vap->iv_flags & IEEE80211_F_PUREG)
3902			mode = AP_MODE_G_ONLY;
3903		else
3904			mode = AP_MODE_MIXED;
3905	} else if (IEEE80211_IS_CHAN_B(chan))
3906		mode = AP_MODE_B_ONLY;
3907	else if (IEEE80211_IS_CHAN_A(chan))
3908		mode = AP_MODE_A_ONLY;
3909	else
3910		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3911	return mode;
3912}
3913
3914static int
3915mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3916{
3917	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3918	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3919}
3920
3921/*
3922 * Set/change channels.
3923 */
3924static int
3925mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3926{
3927	struct mwl_hal *mh = sc->sc_mh;
3928	struct ifnet *ifp = sc->sc_ifp;
3929	struct ieee80211com *ic = ifp->if_l2com;
3930	MWL_HAL_CHANNEL hchan;
3931	int maxtxpow;
3932
3933	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3934	    __func__, chan->ic_freq, chan->ic_flags);
3935
3936	/*
3937	 * Convert to a HAL channel description with
3938	 * the flags constrained to reflect the current
3939	 * operating mode.
3940	 */
3941	mwl_mapchan(&hchan, chan);
3942	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3943#if 0
3944	mwl_draintxq(sc);		/* clear pending tx frames */
3945#endif
3946	mwl_hal_setchannel(mh, &hchan);
3947	/*
3948	 * Tx power is cap'd by the regulatory setting and
3949	 * possibly a user-set limit.  We pass the min of
3950	 * these to the hal to apply them to the cal data
3951	 * for this channel.
3952	 * XXX min bound?
3953	 */
3954	maxtxpow = 2*chan->ic_maxregpower;
3955	if (maxtxpow > ic->ic_txpowlimit)
3956		maxtxpow = ic->ic_txpowlimit;
3957	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3958	/* NB: potentially change mcast/mgt rates */
3959	mwl_setcurchanrates(sc);
3960
3961	/*
3962	 * Update internal state.
3963	 */
3964	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3965	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3966	if (IEEE80211_IS_CHAN_A(chan)) {
3967		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3968		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3969	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3970		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3971		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3972	} else {
3973		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3974		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3975	}
3976	sc->sc_curchan = hchan;
3977	mwl_hal_intrset(mh, sc->sc_imask);
3978
3979	return 0;
3980}
3981
3982static void
3983mwl_scan_start(struct ieee80211com *ic)
3984{
3985	struct ifnet *ifp = ic->ic_ifp;
3986	struct mwl_softc *sc = ifp->if_softc;
3987
3988	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3989}
3990
3991static void
3992mwl_scan_end(struct ieee80211com *ic)
3993{
3994	struct ifnet *ifp = ic->ic_ifp;
3995	struct mwl_softc *sc = ifp->if_softc;
3996
3997	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3998}
3999
4000static void
4001mwl_set_channel(struct ieee80211com *ic)
4002{
4003	struct ifnet *ifp = ic->ic_ifp;
4004	struct mwl_softc *sc = ifp->if_softc;
4005
4006	(void) mwl_chan_set(sc, ic->ic_curchan);
4007}
4008
4009/*
4010 * Handle a channel switch request.  We inform the firmware
4011 * and mark the global state to suppress various actions.
4012 * NB: we issue only one request to the fw; we may be called
4013 * multiple times if there are multiple vap's.
4014 */
4015static void
4016mwl_startcsa(struct ieee80211vap *vap)
4017{
4018	struct ieee80211com *ic = vap->iv_ic;
4019	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4020	MWL_HAL_CHANNEL hchan;
4021
4022	if (sc->sc_csapending)
4023		return;
4024
4025	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4026	/* 1 =>'s quiet channel */
4027	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4028	sc->sc_csapending = 1;
4029}
4030
4031/*
4032 * Plumb any static WEP key for the station.  This is
4033 * necessary as we must propagate the key from the
4034 * global key table of the vap to each sta db entry.
4035 */
4036static void
4037mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4038{
4039	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4040		IEEE80211_F_PRIVACY &&
4041	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4042	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4043		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4044}
4045
4046static int
4047mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4048{
4049#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4050	struct ieee80211vap *vap = ni->ni_vap;
4051	struct mwl_hal_vap *hvap;
4052	int error;
4053
4054	if (vap->iv_opmode == IEEE80211_M_WDS) {
4055		/*
4056		 * WDS vap's do not have a f/w vap; instead they piggyback
4057		 * on an AP vap and we must install the sta db entry and
4058		 * crypto state using that AP's handle (the WDS vap has none).
4059		 */
4060		hvap = MWL_VAP(vap)->mv_ap_hvap;
4061	} else
4062		hvap = MWL_VAP(vap)->mv_hvap;
4063	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4064	    aid, staid, pi,
4065	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4066	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4067	if (error == 0) {
4068		/*
4069		 * Setup security for this station.  For sta mode this is
4070		 * needed even though do the same thing on transition to
4071		 * AUTH state because the call to mwl_hal_newstation
4072		 * clobbers the crypto state we setup.
4073		 */
4074		mwl_setanywepkey(vap, ni->ni_macaddr);
4075	}
4076	return error;
4077#undef WME
4078}
4079
4080static void
4081mwl_setglobalkeys(struct ieee80211vap *vap)
4082{
4083	struct ieee80211_key *wk;
4084
4085	wk = &vap->iv_nw_keys[0];
4086	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4087		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4088			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4089}
4090
4091/*
4092 * Convert a legacy rate set to a firmware bitmask.
4093 */
4094static uint32_t
4095get_rate_bitmap(const struct ieee80211_rateset *rs)
4096{
4097	uint32_t rates;
4098	int i;
4099
4100	rates = 0;
4101	for (i = 0; i < rs->rs_nrates; i++)
4102		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4103		case 2:	  rates |= 0x001; break;
4104		case 4:	  rates |= 0x002; break;
4105		case 11:  rates |= 0x004; break;
4106		case 22:  rates |= 0x008; break;
4107		case 44:  rates |= 0x010; break;
4108		case 12:  rates |= 0x020; break;
4109		case 18:  rates |= 0x040; break;
4110		case 24:  rates |= 0x080; break;
4111		case 36:  rates |= 0x100; break;
4112		case 48:  rates |= 0x200; break;
4113		case 72:  rates |= 0x400; break;
4114		case 96:  rates |= 0x800; break;
4115		case 108: rates |= 0x1000; break;
4116		}
4117	return rates;
4118}
4119
4120/*
4121 * Construct an HT firmware bitmask from an HT rate set.
4122 */
4123static uint32_t
4124get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4125{
4126	uint32_t rates;
4127	int i;
4128
4129	rates = 0;
4130	for (i = 0; i < rs->rs_nrates; i++) {
4131		if (rs->rs_rates[i] < 16)
4132			rates |= 1<<rs->rs_rates[i];
4133	}
4134	return rates;
4135}
4136
4137/*
4138 * Craft station database entry for station.
4139 * NB: use host byte order here, the hal handles byte swapping.
4140 */
4141static MWL_HAL_PEERINFO *
4142mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4143{
4144	const struct ieee80211vap *vap = ni->ni_vap;
4145
4146	memset(pi, 0, sizeof(*pi));
4147	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4148	pi->CapInfo = ni->ni_capinfo;
4149	if (ni->ni_flags & IEEE80211_NODE_HT) {
4150		/* HT capabilities, etc */
4151		pi->HTCapabilitiesInfo = ni->ni_htcap;
4152		/* XXX pi.HTCapabilitiesInfo */
4153	        pi->MacHTParamInfo = ni->ni_htparam;
4154		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4155		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4156		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4157		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4158		pi->AddHtInfo.stbc = ni->ni_htstbc;
4159
4160		/* constrain according to local configuration */
4161		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4162			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4163		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4164			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4165		if (ni->ni_chw != 40)
4166			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4167	}
4168	return pi;
4169}
4170
4171/*
4172 * Re-create the local sta db entry for a vap to ensure
4173 * up to date WME state is pushed to the firmware.  Because
4174 * this resets crypto state this must be followed by a
4175 * reload of any keys in the global key table.
4176 */
4177static int
4178mwl_localstadb(struct ieee80211vap *vap)
4179{
4180#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4181	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4182	struct ieee80211_node *bss;
4183	MWL_HAL_PEERINFO pi;
4184	int error;
4185
4186	switch (vap->iv_opmode) {
4187	case IEEE80211_M_STA:
4188		bss = vap->iv_bss;
4189		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4190		    vap->iv_state == IEEE80211_S_RUN ?
4191			mkpeerinfo(&pi, bss) : NULL,
4192		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4193		    bss->ni_ies.wme_ie != NULL ?
4194			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4195		if (error == 0)
4196			mwl_setglobalkeys(vap);
4197		break;
4198	case IEEE80211_M_HOSTAP:
4199	case IEEE80211_M_MBSS:
4200		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4201		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4202		if (error == 0)
4203			mwl_setglobalkeys(vap);
4204		break;
4205	default:
4206		error = 0;
4207		break;
4208	}
4209	return error;
4210#undef WME
4211}
4212
4213static int
4214mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4215{
4216	struct mwl_vap *mvp = MWL_VAP(vap);
4217	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4218	struct ieee80211com *ic = vap->iv_ic;
4219	struct ieee80211_node *ni = NULL;
4220	struct ifnet *ifp = ic->ic_ifp;
4221	struct mwl_softc *sc = ifp->if_softc;
4222	struct mwl_hal *mh = sc->sc_mh;
4223	enum ieee80211_state ostate = vap->iv_state;
4224	int error;
4225
4226	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4227	    vap->iv_ifp->if_xname, __func__,
4228	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4229
4230	callout_stop(&sc->sc_timer);
4231	/*
4232	 * Clear current radar detection state.
4233	 */
4234	if (ostate == IEEE80211_S_CAC) {
4235		/* stop quiet mode radar detection */
4236		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4237	} else if (sc->sc_radarena) {
4238		/* stop in-service radar detection */
4239		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4240		sc->sc_radarena = 0;
4241	}
4242	/*
4243	 * Carry out per-state actions before doing net80211 work.
4244	 */
4245	if (nstate == IEEE80211_S_INIT) {
4246		/* NB: only ap+sta vap's have a fw entity */
4247		if (hvap != NULL)
4248			mwl_hal_stop(hvap);
4249	} else if (nstate == IEEE80211_S_SCAN) {
4250		mwl_hal_start(hvap);
4251		/* NB: this disables beacon frames */
4252		mwl_hal_setinframode(hvap);
4253	} else if (nstate == IEEE80211_S_AUTH) {
4254		/*
4255		 * Must create a sta db entry in case a WEP key needs to
4256		 * be plumbed.  This entry will be overwritten if we
4257		 * associate; otherwise it will be reclaimed on node free.
4258		 */
4259		ni = vap->iv_bss;
4260		MWL_NODE(ni)->mn_hvap = hvap;
4261		(void) mwl_peerstadb(ni, 0, 0, NULL);
4262	} else if (nstate == IEEE80211_S_CSA) {
4263		/* XXX move to below? */
4264		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4265		    vap->iv_opmode == IEEE80211_M_MBSS)
4266			mwl_startcsa(vap);
4267	} else if (nstate == IEEE80211_S_CAC) {
4268		/* XXX move to below? */
4269		/* stop ap xmit and enable quiet mode radar detection */
4270		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4271	}
4272
4273	/*
4274	 * Invoke the parent method to do net80211 work.
4275	 */
4276	error = mvp->mv_newstate(vap, nstate, arg);
4277
4278	/*
4279	 * Carry out work that must be done after net80211 runs;
4280	 * this work requires up to date state (e.g. iv_bss).
4281	 */
4282	if (error == 0 && nstate == IEEE80211_S_RUN) {
4283		/* NB: collect bss node again, it may have changed */
4284		ni = vap->iv_bss;
4285
4286		DPRINTF(sc, MWL_DEBUG_STATE,
4287		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4288		    "capinfo 0x%04x chan %d\n",
4289		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4290		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4291		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4292
4293		/*
4294		 * Recreate local sta db entry to update WME/HT state.
4295		 */
4296		mwl_localstadb(vap);
4297		switch (vap->iv_opmode) {
4298		case IEEE80211_M_HOSTAP:
4299		case IEEE80211_M_MBSS:
4300			if (ostate == IEEE80211_S_CAC) {
4301				/* enable in-service radar detection */
4302				mwl_hal_setradardetection(mh,
4303				    DR_IN_SERVICE_MONITOR_START);
4304				sc->sc_radarena = 1;
4305			}
4306			/*
4307			 * Allocate and setup the beacon frame
4308			 * (and related state).
4309			 */
4310			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4311			if (error != 0) {
4312				DPRINTF(sc, MWL_DEBUG_STATE,
4313				    "%s: beacon setup failed, error %d\n",
4314				    __func__, error);
4315				goto bad;
4316			}
4317			/* NB: must be after setting up beacon */
4318			mwl_hal_start(hvap);
4319			break;
4320		case IEEE80211_M_STA:
4321			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4322			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4323			/*
4324			 * Set state now that we're associated.
4325			 */
4326			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4327			mwl_setrates(vap);
4328			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4329			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4330			    sc->sc_ndwdsvaps++ == 0)
4331				mwl_hal_setdwds(mh, 1);
4332			break;
4333		case IEEE80211_M_WDS:
4334			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4335			    vap->iv_ifp->if_xname, __func__,
4336			    ether_sprintf(ni->ni_bssid));
4337			mwl_seteapolformat(vap);
4338			break;
4339		default:
4340			break;
4341		}
4342		/*
4343		 * Set CS mode according to operating channel;
4344		 * this mostly an optimization for 5GHz.
4345		 *
4346		 * NB: must follow mwl_hal_start which resets csmode
4347		 */
4348		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4349			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4350		else
4351			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4352		/*
4353		 * Start timer to prod firmware.
4354		 */
4355		if (sc->sc_ageinterval != 0)
4356			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4357			    mwl_agestations, sc);
4358	} else if (nstate == IEEE80211_S_SLEEP) {
4359		/* XXX set chip in power save */
4360	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4361	    --sc->sc_ndwdsvaps == 0)
4362		mwl_hal_setdwds(mh, 0);
4363bad:
4364	return error;
4365}
4366
4367/*
4368 * Manage station id's; these are separate from AID's
4369 * as AID's may have values out of the range of possible
4370 * station id's acceptable to the firmware.
4371 */
4372static int
4373allocstaid(struct mwl_softc *sc, int aid)
4374{
4375	int staid;
4376
4377	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4378		/* NB: don't use 0 */
4379		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4380			if (isclr(sc->sc_staid, staid))
4381				break;
4382	} else
4383		staid = aid;
4384	setbit(sc->sc_staid, staid);
4385	return staid;
4386}
4387
4388static void
4389delstaid(struct mwl_softc *sc, int staid)
4390{
4391	clrbit(sc->sc_staid, staid);
4392}
4393
4394/*
4395 * Setup driver-specific state for a newly associated node.
4396 * Note that we're called also on a re-associate, the isnew
4397 * param tells us if this is the first time or not.
4398 */
4399static void
4400mwl_newassoc(struct ieee80211_node *ni, int isnew)
4401{
4402	struct ieee80211vap *vap = ni->ni_vap;
4403        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4404	struct mwl_node *mn = MWL_NODE(ni);
4405	MWL_HAL_PEERINFO pi;
4406	uint16_t aid;
4407	int error;
4408
4409	aid = IEEE80211_AID(ni->ni_associd);
4410	if (isnew) {
4411		mn->mn_staid = allocstaid(sc, aid);
4412		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4413	} else {
4414		mn = MWL_NODE(ni);
4415		/* XXX reset BA stream? */
4416	}
4417	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4418	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4419	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4420	if (error != 0) {
4421		DPRINTF(sc, MWL_DEBUG_NODE,
4422		    "%s: error %d creating sta db entry\n",
4423		    __func__, error);
4424		/* XXX how to deal with error? */
4425	}
4426}
4427
4428/*
4429 * Periodically poke the firmware to age out station state
4430 * (power save queues, pending tx aggregates).
4431 */
4432static void
4433mwl_agestations(void *arg)
4434{
4435	struct mwl_softc *sc = arg;
4436
4437	mwl_hal_setkeepalive(sc->sc_mh);
4438	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4439		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4440}
4441
4442static const struct mwl_hal_channel *
4443findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4444{
4445	int i;
4446
4447	for (i = 0; i < ci->nchannels; i++) {
4448		const struct mwl_hal_channel *hc = &ci->channels[i];
4449		if (hc->ieee == ieee)
4450			return hc;
4451	}
4452	return NULL;
4453}
4454
4455static int
4456mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4457	int nchan, struct ieee80211_channel chans[])
4458{
4459	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4460	struct mwl_hal *mh = sc->sc_mh;
4461	const MWL_HAL_CHANNELINFO *ci;
4462	int i;
4463
4464	for (i = 0; i < nchan; i++) {
4465		struct ieee80211_channel *c = &chans[i];
4466		const struct mwl_hal_channel *hc;
4467
4468		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4469			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4470			    IEEE80211_IS_CHAN_HT40(c) ?
4471				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4472		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4473			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4474			    IEEE80211_IS_CHAN_HT40(c) ?
4475				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4476		} else {
4477			if_printf(ic->ic_ifp,
4478			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4479			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4480			return EINVAL;
4481		}
4482		/*
4483		 * Verify channel has cal data and cap tx power.
4484		 */
4485		hc = findhalchannel(ci, c->ic_ieee);
4486		if (hc != NULL) {
4487			if (c->ic_maxpower > 2*hc->maxTxPow)
4488				c->ic_maxpower = 2*hc->maxTxPow;
4489			goto next;
4490		}
4491		if (IEEE80211_IS_CHAN_HT40(c)) {
4492			/*
4493			 * Look for the extension channel since the
4494			 * hal table only has the primary channel.
4495			 */
4496			hc = findhalchannel(ci, c->ic_extieee);
4497			if (hc != NULL) {
4498				if (c->ic_maxpower > 2*hc->maxTxPow)
4499					c->ic_maxpower = 2*hc->maxTxPow;
4500				goto next;
4501			}
4502		}
4503		if_printf(ic->ic_ifp,
4504		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4505		    __func__, c->ic_ieee, c->ic_extieee,
4506		    c->ic_freq, c->ic_flags);
4507		return EINVAL;
4508	next:
4509		;
4510	}
4511	return 0;
4512}
4513
4514#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4515#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4516
4517static void
4518addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4519{
4520	c->ic_freq = freq;
4521	c->ic_flags = flags;
4522	c->ic_ieee = ieee;
4523	c->ic_minpower = 0;
4524	c->ic_maxpower = 2*txpow;
4525	c->ic_maxregpower = txpow;
4526}
4527
4528static const struct ieee80211_channel *
4529findchannel(const struct ieee80211_channel chans[], int nchans,
4530	int freq, int flags)
4531{
4532	const struct ieee80211_channel *c;
4533	int i;
4534
4535	for (i = 0; i < nchans; i++) {
4536		c = &chans[i];
4537		if (c->ic_freq == freq && c->ic_flags == flags)
4538			return c;
4539	}
4540	return NULL;
4541}
4542
4543static void
4544addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4545	const MWL_HAL_CHANNELINFO *ci, int flags)
4546{
4547	struct ieee80211_channel *c;
4548	const struct ieee80211_channel *extc;
4549	const struct mwl_hal_channel *hc;
4550	int i;
4551
4552	c = &chans[*nchans];
4553
4554	flags &= ~IEEE80211_CHAN_HT;
4555	for (i = 0; i < ci->nchannels; i++) {
4556		/*
4557		 * Each entry defines an HT40 channel pair; find the
4558		 * extension channel above and the insert the pair.
4559		 */
4560		hc = &ci->channels[i];
4561		extc = findchannel(chans, *nchans, hc->freq+20,
4562		    flags | IEEE80211_CHAN_HT20);
4563		if (extc != NULL) {
4564			if (*nchans >= maxchans)
4565				break;
4566			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4567			    hc->ieee, hc->maxTxPow);
4568			c->ic_extieee = extc->ic_ieee;
4569			c++, (*nchans)++;
4570			if (*nchans >= maxchans)
4571				break;
4572			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4573			    extc->ic_ieee, hc->maxTxPow);
4574			c->ic_extieee = hc->ieee;
4575			c++, (*nchans)++;
4576		}
4577	}
4578}
4579
4580static void
4581addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4582	const MWL_HAL_CHANNELINFO *ci, int flags)
4583{
4584	struct ieee80211_channel *c;
4585	int i;
4586
4587	c = &chans[*nchans];
4588
4589	for (i = 0; i < ci->nchannels; i++) {
4590		const struct mwl_hal_channel *hc;
4591
4592		hc = &ci->channels[i];
4593		if (*nchans >= maxchans)
4594			break;
4595		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4596		c++, (*nchans)++;
4597		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4598			/* g channel have a separate b-only entry */
4599			if (*nchans >= maxchans)
4600				break;
4601			c[0] = c[-1];
4602			c[-1].ic_flags = IEEE80211_CHAN_B;
4603			c++, (*nchans)++;
4604		}
4605		if (flags == IEEE80211_CHAN_HTG) {
4606			/* HT g channel have a separate g-only entry */
4607			if (*nchans >= maxchans)
4608				break;
4609			c[-1].ic_flags = IEEE80211_CHAN_G;
4610			c[0] = c[-1];
4611			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4612			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4613			c++, (*nchans)++;
4614		}
4615		if (flags == IEEE80211_CHAN_HTA) {
4616			/* HT a channel have a separate a-only entry */
4617			if (*nchans >= maxchans)
4618				break;
4619			c[-1].ic_flags = IEEE80211_CHAN_A;
4620			c[0] = c[-1];
4621			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4622			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4623			c++, (*nchans)++;
4624		}
4625	}
4626}
4627
4628static void
4629getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4630	struct ieee80211_channel chans[])
4631{
4632	const MWL_HAL_CHANNELINFO *ci;
4633
4634	/*
4635	 * Use the channel info from the hal to craft the
4636	 * channel list.  Note that we pass back an unsorted
4637	 * list; the caller is required to sort it for us
4638	 * (if desired).
4639	 */
4640	*nchans = 0;
4641	if (mwl_hal_getchannelinfo(sc->sc_mh,
4642	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4643		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4644	if (mwl_hal_getchannelinfo(sc->sc_mh,
4645	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4646		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4647	if (mwl_hal_getchannelinfo(sc->sc_mh,
4648	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4649		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4650	if (mwl_hal_getchannelinfo(sc->sc_mh,
4651	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4652		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4653}
4654
4655static void
4656mwl_getradiocaps(struct ieee80211com *ic,
4657	int maxchans, int *nchans, struct ieee80211_channel chans[])
4658{
4659	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4660
4661	getchannels(sc, maxchans, nchans, chans);
4662}
4663
4664static int
4665mwl_getchannels(struct mwl_softc *sc)
4666{
4667	struct ifnet *ifp = sc->sc_ifp;
4668	struct ieee80211com *ic = ifp->if_l2com;
4669
4670	/*
4671	 * Use the channel info from the hal to craft the
4672	 * channel list for net80211.  Note that we pass up
4673	 * an unsorted list; net80211 will sort it for us.
4674	 */
4675	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4676	ic->ic_nchans = 0;
4677	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4678
4679	ic->ic_regdomain.regdomain = SKU_DEBUG;
4680	ic->ic_regdomain.country = CTRY_DEFAULT;
4681	ic->ic_regdomain.location = 'I';
4682	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4683	ic->ic_regdomain.isocc[1] = ' ';
4684	return (ic->ic_nchans == 0 ? EIO : 0);
4685}
4686#undef IEEE80211_CHAN_HTA
4687#undef IEEE80211_CHAN_HTG
4688
4689#ifdef MWL_DEBUG
4690static void
4691mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4692{
4693	const struct mwl_rxdesc *ds = bf->bf_desc;
4694	uint32_t status = le32toh(ds->Status);
4695
4696	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4697	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4698	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4699	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4700	    ds->RxControl,
4701	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4702	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4703	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4704	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4705}
4706
4707static void
4708mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4709{
4710	const struct mwl_txdesc *ds = bf->bf_desc;
4711	uint32_t status = le32toh(ds->Status);
4712
4713	printf("Q%u[%3u]", qnum, ix);
4714	printf(" (DS.V:%p DS.P:%p)\n",
4715	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4716	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4717	    le32toh(ds->pPhysNext),
4718	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4719	    status & EAGLE_TXD_STATUS_USED ?
4720		"" : (status & 3) != 0 ? " *" : " !");
4721	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4722	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4723	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4724#if MWL_TXDESC > 1
4725	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4726	    , le32toh(ds->multiframes)
4727	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4728	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4729	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4730	);
4731	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4732	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4733	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4734	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4735	);
4736#endif
4737#if 0
4738{ const uint8_t *cp = (const uint8_t *) ds;
4739  int i;
4740  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4741	printf("%02x ", cp[i]);
4742	if (((i+1) % 16) == 0)
4743		printf("\n");
4744  }
4745  printf("\n");
4746}
4747#endif
4748}
4749#endif /* MWL_DEBUG */
4750
4751#if 0
4752static void
4753mwl_txq_dump(struct mwl_txq *txq)
4754{
4755	struct mwl_txbuf *bf;
4756	int i = 0;
4757
4758	MWL_TXQ_LOCK(txq);
4759	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4760		struct mwl_txdesc *ds = bf->bf_desc;
4761		MWL_TXDESC_SYNC(txq, ds,
4762		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4763#ifdef MWL_DEBUG
4764		mwl_printtxbuf(bf, txq->qnum, i);
4765#endif
4766		i++;
4767	}
4768	MWL_TXQ_UNLOCK(txq);
4769}
4770#endif
4771
4772static void
4773mwl_watchdog(struct ifnet *ifp)
4774{
4775	struct mwl_softc *sc = ifp->if_softc;
4776
4777	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4778		if (mwl_hal_setkeepalive(sc->sc_mh))
4779			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4780		else
4781			if_printf(ifp, "transmit timeout\n");
4782#if 0
4783		mwl_reset(ifp);
4784mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4785#endif
4786		ifp->if_oerrors++;
4787		sc->sc_stats.mst_watchdog++;
4788	}
4789}
4790
4791#ifdef MWL_DIAGAPI
4792/*
4793 * Diagnostic interface to the HAL.  This is used by various
4794 * tools to do things like retrieve register contents for
4795 * debugging.  The mechanism is intentionally opaque so that
4796 * it can change frequently w/o concern for compatiblity.
4797 */
4798static int
4799mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4800{
4801	struct mwl_hal *mh = sc->sc_mh;
4802	u_int id = md->md_id & MWL_DIAG_ID;
4803	void *indata = NULL;
4804	void *outdata = NULL;
4805	u_int32_t insize = md->md_in_size;
4806	u_int32_t outsize = md->md_out_size;
4807	int error = 0;
4808
4809	if (md->md_id & MWL_DIAG_IN) {
4810		/*
4811		 * Copy in data.
4812		 */
4813		indata = malloc(insize, M_TEMP, M_NOWAIT);
4814		if (indata == NULL) {
4815			error = ENOMEM;
4816			goto bad;
4817		}
4818		error = copyin(md->md_in_data, indata, insize);
4819		if (error)
4820			goto bad;
4821	}
4822	if (md->md_id & MWL_DIAG_DYN) {
4823		/*
4824		 * Allocate a buffer for the results (otherwise the HAL
4825		 * returns a pointer to a buffer where we can read the
4826		 * results).  Note that we depend on the HAL leaving this
4827		 * pointer for us to use below in reclaiming the buffer;
4828		 * may want to be more defensive.
4829		 */
4830		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4831		if (outdata == NULL) {
4832			error = ENOMEM;
4833			goto bad;
4834		}
4835	}
4836	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4837		if (outsize < md->md_out_size)
4838			md->md_out_size = outsize;
4839		if (outdata != NULL)
4840			error = copyout(outdata, md->md_out_data,
4841					md->md_out_size);
4842	} else {
4843		error = EINVAL;
4844	}
4845bad:
4846	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4847		free(indata, M_TEMP);
4848	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4849		free(outdata, M_TEMP);
4850	return error;
4851}
4852
4853static int
4854mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4855{
4856	struct mwl_hal *mh = sc->sc_mh;
4857	int error;
4858
4859	MWL_LOCK_ASSERT(sc);
4860
4861	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4862		device_printf(sc->sc_dev, "unable to load firmware\n");
4863		return EIO;
4864	}
4865	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4866		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4867		return EIO;
4868	}
4869	error = mwl_setupdma(sc);
4870	if (error != 0) {
4871		/* NB: mwl_setupdma prints a msg */
4872		return error;
4873	}
4874	/*
4875	 * Reset tx/rx data structures; after reload we must
4876	 * re-start the driver's notion of the next xmit/recv.
4877	 */
4878	mwl_draintxq(sc);		/* clear pending frames */
4879	mwl_resettxq(sc);		/* rebuild tx q lists */
4880	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4881	return 0;
4882}
4883#endif /* MWL_DIAGAPI */
4884
4885static int
4886mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4887{
4888#define	IS_RUNNING(ifp) \
4889	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4890	struct mwl_softc *sc = ifp->if_softc;
4891	struct ieee80211com *ic = ifp->if_l2com;
4892	struct ifreq *ifr = (struct ifreq *)data;
4893	int error = 0, startall;
4894
4895	switch (cmd) {
4896	case SIOCSIFFLAGS:
4897		MWL_LOCK(sc);
4898		startall = 0;
4899		if (IS_RUNNING(ifp)) {
4900			/*
4901			 * To avoid rescanning another access point,
4902			 * do not call mwl_init() here.  Instead,
4903			 * only reflect promisc mode settings.
4904			 */
4905			mwl_mode_init(sc);
4906		} else if (ifp->if_flags & IFF_UP) {
4907			/*
4908			 * Beware of being called during attach/detach
4909			 * to reset promiscuous mode.  In that case we
4910			 * will still be marked UP but not RUNNING.
4911			 * However trying to re-init the interface
4912			 * is the wrong thing to do as we've already
4913			 * torn down much of our state.  There's
4914			 * probably a better way to deal with this.
4915			 */
4916			if (!sc->sc_invalid) {
4917				mwl_init_locked(sc);	/* XXX lose error */
4918				startall = 1;
4919			}
4920		} else
4921			mwl_stop_locked(ifp, 1);
4922		MWL_UNLOCK(sc);
4923		if (startall)
4924			ieee80211_start_all(ic);
4925		break;
4926	case SIOCGMVSTATS:
4927		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4928		/* NB: embed these numbers to get a consistent view */
4929		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4930		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4931		/*
4932		 * NB: Drop the softc lock in case of a page fault;
4933		 * we'll accept any potential inconsisentcy in the
4934		 * statistics.  The alternative is to copy the data
4935		 * to a local structure.
4936		 */
4937		return copyout(&sc->sc_stats,
4938				ifr->ifr_data, sizeof (sc->sc_stats));
4939#ifdef MWL_DIAGAPI
4940	case SIOCGMVDIAG:
4941		/* XXX check privs */
4942		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4943	case SIOCGMVRESET:
4944		/* XXX check privs */
4945		MWL_LOCK(sc);
4946		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4947		MWL_UNLOCK(sc);
4948		break;
4949#endif /* MWL_DIAGAPI */
4950	case SIOCGIFMEDIA:
4951		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4952		break;
4953	case SIOCGIFADDR:
4954		error = ether_ioctl(ifp, cmd, data);
4955		break;
4956	default:
4957		error = EINVAL;
4958		break;
4959	}
4960	return error;
4961#undef IS_RUNNING
4962}
4963
4964#ifdef	MWL_DEBUG
4965static int
4966mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4967{
4968	struct mwl_softc *sc = arg1;
4969	int debug, error;
4970
4971	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4972	error = sysctl_handle_int(oidp, &debug, 0, req);
4973	if (error || !req->newptr)
4974		return error;
4975	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4976	sc->sc_debug = debug & 0x00ffffff;
4977	return 0;
4978}
4979#endif /* MWL_DEBUG */
4980
4981static void
4982mwl_sysctlattach(struct mwl_softc *sc)
4983{
4984#ifdef	MWL_DEBUG
4985	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4986	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4987
4988	sc->sc_debug = mwl_debug;
4989	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4990		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4991		mwl_sysctl_debug, "I", "control debugging printfs");
4992#endif
4993}
4994
4995/*
4996 * Announce various information on device/driver attach.
4997 */
4998static void
4999mwl_announce(struct mwl_softc *sc)
5000{
5001	struct ifnet *ifp = sc->sc_ifp;
5002
5003	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5004		sc->sc_hwspecs.hwVersion,
5005		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5006		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5007		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5008		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5009		sc->sc_hwspecs.regionCode);
5010	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5011
5012	if (bootverbose) {
5013		int i;
5014		for (i = 0; i <= WME_AC_VO; i++) {
5015			struct mwl_txq *txq = sc->sc_ac2q[i];
5016			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5017				txq->qnum, ieee80211_wme_acnames[i]);
5018		}
5019	}
5020	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5021		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5022	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5023		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5024	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5025		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5026	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5027		if_printf(ifp, "multi-bss support\n");
5028#ifdef MWL_TX_NODROP
5029	if (bootverbose)
5030		if_printf(ifp, "no tx drop\n");
5031#endif
5032}
5033