if_mwl.c revision 286413
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 286413 2015-08-07 12:34:20Z glebius $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40#include "opt_wlan.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysctl.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/kernel.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/errno.h>
53#include <sys/callout.h>
54#include <sys/bus.h>
55#include <sys/endian.h>
56#include <sys/kthread.h>
57#include <sys/taskqueue.h>
58
59#include <machine/bus.h>
60
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#include <net/if_types.h>
66#include <net/if_arp.h>
67#include <net/ethernet.h>
68#include <net/if_llc.h>
69
70#include <net/bpf.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
74
75#ifdef INET
76#include <netinet/in.h>
77#include <netinet/if_ether.h>
78#endif /* INET */
79
80#include <dev/mwl/if_mwlvar.h>
81#include <dev/mwl/mwldiag.h>
82
83/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
84#define	MS(v,x)	(((v) & x) >> x##_S)
85#define	SM(v,x)	(((v) << x##_S) & x)
86
87static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
88		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
89		    const uint8_t [IEEE80211_ADDR_LEN],
90		    const uint8_t [IEEE80211_ADDR_LEN]);
91static void	mwl_vap_delete(struct ieee80211vap *);
92static int	mwl_setupdma(struct mwl_softc *);
93static int	mwl_hal_reset(struct mwl_softc *sc);
94static int	mwl_init(struct mwl_softc *);
95static void	mwl_parent(struct ieee80211com *);
96static int	mwl_reset(struct ieee80211vap *, u_long);
97static void	mwl_stop(struct mwl_softc *);
98static void	mwl_start(struct mwl_softc *);
99static int	mwl_transmit(struct ieee80211com *, struct mbuf *);
100static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
101			const struct ieee80211_bpf_params *);
102static int	mwl_media_change(struct ifnet *);
103static void	mwl_watchdog(void *);
104static int	mwl_ioctl(struct ieee80211com *, u_long, void *);
105static void	mwl_radar_proc(void *, int);
106static void	mwl_chanswitch_proc(void *, int);
107static void	mwl_bawatchdog_proc(void *, int);
108static int	mwl_key_alloc(struct ieee80211vap *,
109			struct ieee80211_key *,
110			ieee80211_keyix *, ieee80211_keyix *);
111static int	mwl_key_delete(struct ieee80211vap *,
112			const struct ieee80211_key *);
113static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
114			const uint8_t mac[IEEE80211_ADDR_LEN]);
115static int	mwl_mode_init(struct mwl_softc *);
116static void	mwl_update_mcast(struct ieee80211com *);
117static void	mwl_update_promisc(struct ieee80211com *);
118static void	mwl_updateslot(struct ieee80211com *);
119static int	mwl_beacon_setup(struct ieee80211vap *);
120static void	mwl_beacon_update(struct ieee80211vap *, int);
121#ifdef MWL_HOST_PS_SUPPORT
122static void	mwl_update_ps(struct ieee80211vap *, int);
123static int	mwl_set_tim(struct ieee80211_node *, int);
124#endif
125static int	mwl_dma_setup(struct mwl_softc *);
126static void	mwl_dma_cleanup(struct mwl_softc *);
127static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128		    const uint8_t [IEEE80211_ADDR_LEN]);
129static void	mwl_node_cleanup(struct ieee80211_node *);
130static void	mwl_node_drain(struct ieee80211_node *);
131static void	mwl_node_getsignal(const struct ieee80211_node *,
132			int8_t *, int8_t *);
133static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
134			struct ieee80211_mimo_info *);
135static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136static void	mwl_rx_proc(void *, int);
137static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138static int	mwl_tx_setup(struct mwl_softc *, int, int);
139static int	mwl_wme_update(struct ieee80211com *);
140static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141static void	mwl_tx_cleanup(struct mwl_softc *);
142static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144			     struct mwl_txbuf *, struct mbuf *);
145static void	mwl_tx_proc(void *, int);
146static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147static void	mwl_draintxq(struct mwl_softc *);
148static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149static int	mwl_recv_action(struct ieee80211_node *,
150			const struct ieee80211_frame *,
151			const uint8_t *, const uint8_t *);
152static int	mwl_addba_request(struct ieee80211_node *,
153			struct ieee80211_tx_ampdu *, int dialogtoken,
154			int baparamset, int batimeout);
155static int	mwl_addba_response(struct ieee80211_node *,
156			struct ieee80211_tx_ampdu *, int status,
157			int baparamset, int batimeout);
158static void	mwl_addba_stop(struct ieee80211_node *,
159			struct ieee80211_tx_ampdu *);
160static int	mwl_startrecv(struct mwl_softc *);
161static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162			struct ieee80211_channel *);
163static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164static void	mwl_scan_start(struct ieee80211com *);
165static void	mwl_scan_end(struct ieee80211com *);
166static void	mwl_set_channel(struct ieee80211com *);
167static int	mwl_peerstadb(struct ieee80211_node *,
168			int aid, int staid, MWL_HAL_PEERINFO *pi);
169static int	mwl_localstadb(struct ieee80211vap *);
170static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171static int	allocstaid(struct mwl_softc *sc, int aid);
172static void	delstaid(struct mwl_softc *sc, int staid);
173static void	mwl_newassoc(struct ieee80211_node *, int);
174static void	mwl_agestations(void *);
175static int	mwl_setregdomain(struct ieee80211com *,
176			struct ieee80211_regdomain *, int,
177			struct ieee80211_channel []);
178static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
179			struct ieee80211_channel []);
180static int	mwl_getchannels(struct mwl_softc *);
181
182static void	mwl_sysctlattach(struct mwl_softc *);
183static void	mwl_announce(struct mwl_softc *);
184
185SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
186
187static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
188SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
189	    0, "rx descriptors allocated");
190static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
191SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
192	    0, "rx buffers allocated");
193static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
194SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
195	    0, "tx buffers allocated");
196static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
197SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
198	    0, "tx buffers to send at once");
199static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
200SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
201	    0, "max rx buffers to process per interrupt");
202static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
203SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
204	    0, "min free rx buffers before restarting traffic");
205
206#ifdef MWL_DEBUG
207static	int mwl_debug = 0;
208SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
209	    0, "control debugging printfs");
210enum {
211	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
212	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
213	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
214	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
215	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
216	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
217	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
218	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
219	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
220	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
221	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
222	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
223	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
224	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
225	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
226	MWL_DEBUG_ANY		= 0xffffffff
227};
228#define	IS_BEACON(wh) \
229    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
230	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
231#define	IFF_DUMPPKTS_RECV(sc, wh) \
232    ((sc->sc_debug & MWL_DEBUG_RECV) && \
233      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
234#define	IFF_DUMPPKTS_XMIT(sc) \
235	(sc->sc_debug & MWL_DEBUG_XMIT)
236
237#define	DPRINTF(sc, m, fmt, ...) do {				\
238	if (sc->sc_debug & (m))					\
239		printf(fmt, __VA_ARGS__);			\
240} while (0)
241#define	KEYPRINTF(sc, hk, mac) do {				\
242	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
243		mwl_keyprint(sc, __func__, hk, mac);		\
244} while (0)
245static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
246static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
247#else
248#define	IFF_DUMPPKTS_RECV(sc, wh)	0
249#define	IFF_DUMPPKTS_XMIT(sc)		0
250#define	DPRINTF(sc, m, fmt, ...)	do { (void )sc; } while (0)
251#define	KEYPRINTF(sc, k, mac)		do { (void )sc; } while (0)
252#endif
253
254static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
255
256/*
257 * Each packet has fixed front matter: a 2-byte length
258 * of the payload, followed by a 4-address 802.11 header
259 * (regardless of the actual header and always w/o any
260 * QoS header).  The payload then follows.
261 */
262struct mwltxrec {
263	uint16_t fwlen;
264	struct ieee80211_frame_addr4 wh;
265} __packed;
266
267/*
268 * Read/Write shorthands for accesses to BAR 0.  Note
269 * that all BAR 1 operations are done in the "hal" and
270 * there should be no reference to them here.
271 */
272#ifdef MWL_DEBUG
273static __inline uint32_t
274RD4(struct mwl_softc *sc, bus_size_t off)
275{
276	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
277}
278#endif
279
280static __inline void
281WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
282{
283	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
284}
285
286int
287mwl_attach(uint16_t devid, struct mwl_softc *sc)
288{
289	struct ieee80211com *ic = &sc->sc_ic;
290	struct mwl_hal *mh;
291	int error = 0;
292
293	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
294
295	/*
296	 * Setup the RX free list lock early, so it can be consistently
297	 * removed.
298	 */
299	MWL_RXFREE_INIT(sc);
300
301	mh = mwl_hal_attach(sc->sc_dev, devid,
302	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
303	if (mh == NULL) {
304		device_printf(sc->sc_dev, "unable to attach HAL\n");
305		error = EIO;
306		goto bad;
307	}
308	sc->sc_mh = mh;
309	/*
310	 * Load firmware so we can get setup.  We arbitrarily
311	 * pick station firmware; we'll re-load firmware as
312	 * needed so setting up the wrong mode isn't a big deal.
313	 */
314	if (mwl_hal_fwload(mh, NULL) != 0) {
315		device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
316		error = EIO;
317		goto bad1;
318	}
319	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
320		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
321		error = EIO;
322		goto bad1;
323	}
324	error = mwl_getchannels(sc);
325	if (error != 0)
326		goto bad1;
327
328	sc->sc_txantenna = 0;		/* h/w default */
329	sc->sc_rxantenna = 0;		/* h/w default */
330	sc->sc_invalid = 0;		/* ready to go, enable int handling */
331	sc->sc_ageinterval = MWL_AGEINTERVAL;
332
333	/*
334	 * Allocate tx+rx descriptors and populate the lists.
335	 * We immediately push the information to the firmware
336	 * as otherwise it gets upset.
337	 */
338	error = mwl_dma_setup(sc);
339	if (error != 0) {
340		device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
341		    error);
342		goto bad1;
343	}
344	error = mwl_setupdma(sc);	/* push to firmware */
345	if (error != 0)			/* NB: mwl_setupdma prints msg */
346		goto bad1;
347
348	callout_init(&sc->sc_timer, 1);
349	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
350	mbufq_init(&sc->sc_snd, ifqmaxlen);
351
352	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
353		taskqueue_thread_enqueue, &sc->sc_tq);
354	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
355		"%s taskq", device_get_nameunit(sc->sc_dev));
356
357	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
358	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
359	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
360	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
361
362	/* NB: insure BK queue is the lowest priority h/w queue */
363	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
364		device_printf(sc->sc_dev,
365		    "unable to setup xmit queue for %s traffic!\n",
366		     ieee80211_wme_acnames[WME_AC_BK]);
367		error = EIO;
368		goto bad2;
369	}
370	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
371	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
372	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
373		/*
374		 * Not enough hardware tx queues to properly do WME;
375		 * just punt and assign them all to the same h/w queue.
376		 * We could do a better job of this if, for example,
377		 * we allocate queues when we switch from station to
378		 * AP mode.
379		 */
380		if (sc->sc_ac2q[WME_AC_VI] != NULL)
381			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
382		if (sc->sc_ac2q[WME_AC_BE] != NULL)
383			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
384		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
385		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
386		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
387	}
388	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
389
390	ic->ic_softc = sc;
391	ic->ic_name = device_get_nameunit(sc->sc_dev);
392	/* XXX not right but it's not used anywhere important */
393	ic->ic_phytype = IEEE80211_T_OFDM;
394	ic->ic_opmode = IEEE80211_M_STA;
395	ic->ic_caps =
396		  IEEE80211_C_STA		/* station mode supported */
397		| IEEE80211_C_HOSTAP		/* hostap mode */
398		| IEEE80211_C_MONITOR		/* monitor mode */
399#if 0
400		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
401		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
402#endif
403		| IEEE80211_C_MBSS		/* mesh point link mode */
404		| IEEE80211_C_WDS		/* WDS supported */
405		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
406		| IEEE80211_C_SHSLOT		/* short slot time supported */
407		| IEEE80211_C_WME		/* WME/WMM supported */
408		| IEEE80211_C_BURST		/* xmit bursting supported */
409		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
410		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
411		| IEEE80211_C_TXFRAG		/* handle tx frags */
412		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
413		| IEEE80211_C_DFS		/* DFS supported */
414		;
415
416	ic->ic_htcaps =
417		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
418		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
419		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
420		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
421		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
422#if MWL_AGGR_SIZE == 7935
423		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
424#else
425		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
426#endif
427#if 0
428		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
429		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
430#endif
431		/* s/w capabilities */
432		| IEEE80211_HTC_HT		/* HT operation */
433		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
434		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
435		| IEEE80211_HTC_SMPS		/* SMPS available */
436		;
437
438	/*
439	 * Mark h/w crypto support.
440	 * XXX no way to query h/w support.
441	 */
442	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
443			  |  IEEE80211_CRYPTO_AES_CCM
444			  |  IEEE80211_CRYPTO_TKIP
445			  |  IEEE80211_CRYPTO_TKIPMIC
446			  ;
447	/*
448	 * Transmit requires space in the packet for a special
449	 * format transmit record and optional padding between
450	 * this record and the payload.  Ask the net80211 layer
451	 * to arrange this when encapsulating packets so we can
452	 * add it efficiently.
453	 */
454	ic->ic_headroom = sizeof(struct mwltxrec) -
455		sizeof(struct ieee80211_frame);
456
457	IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
458
459	/* call MI attach routine. */
460	ieee80211_ifattach(ic);
461	ic->ic_setregdomain = mwl_setregdomain;
462	ic->ic_getradiocaps = mwl_getradiocaps;
463	/* override default methods */
464	ic->ic_raw_xmit = mwl_raw_xmit;
465	ic->ic_newassoc = mwl_newassoc;
466	ic->ic_updateslot = mwl_updateslot;
467	ic->ic_update_mcast = mwl_update_mcast;
468	ic->ic_update_promisc = mwl_update_promisc;
469	ic->ic_wme.wme_update = mwl_wme_update;
470	ic->ic_transmit = mwl_transmit;
471	ic->ic_ioctl = mwl_ioctl;
472	ic->ic_parent = mwl_parent;
473
474	ic->ic_node_alloc = mwl_node_alloc;
475	sc->sc_node_cleanup = ic->ic_node_cleanup;
476	ic->ic_node_cleanup = mwl_node_cleanup;
477	sc->sc_node_drain = ic->ic_node_drain;
478	ic->ic_node_drain = mwl_node_drain;
479	ic->ic_node_getsignal = mwl_node_getsignal;
480	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
481
482	ic->ic_scan_start = mwl_scan_start;
483	ic->ic_scan_end = mwl_scan_end;
484	ic->ic_set_channel = mwl_set_channel;
485
486	sc->sc_recv_action = ic->ic_recv_action;
487	ic->ic_recv_action = mwl_recv_action;
488	sc->sc_addba_request = ic->ic_addba_request;
489	ic->ic_addba_request = mwl_addba_request;
490	sc->sc_addba_response = ic->ic_addba_response;
491	ic->ic_addba_response = mwl_addba_response;
492	sc->sc_addba_stop = ic->ic_addba_stop;
493	ic->ic_addba_stop = mwl_addba_stop;
494
495	ic->ic_vap_create = mwl_vap_create;
496	ic->ic_vap_delete = mwl_vap_delete;
497
498	ieee80211_radiotap_attach(ic,
499	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
500		MWL_TX_RADIOTAP_PRESENT,
501	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
502		MWL_RX_RADIOTAP_PRESENT);
503	/*
504	 * Setup dynamic sysctl's now that country code and
505	 * regdomain are available from the hal.
506	 */
507	mwl_sysctlattach(sc);
508
509	if (bootverbose)
510		ieee80211_announce(ic);
511	mwl_announce(sc);
512	return 0;
513bad2:
514	mwl_dma_cleanup(sc);
515bad1:
516	mwl_hal_detach(mh);
517bad:
518	MWL_RXFREE_DESTROY(sc);
519	sc->sc_invalid = 1;
520	return error;
521}
522
523int
524mwl_detach(struct mwl_softc *sc)
525{
526	struct ieee80211com *ic = &sc->sc_ic;
527
528	MWL_LOCK(sc);
529	mwl_stop(sc);
530	MWL_UNLOCK(sc);
531	/*
532	 * NB: the order of these is important:
533	 * o call the 802.11 layer before detaching the hal to
534	 *   insure callbacks into the driver to delete global
535	 *   key cache entries can be handled
536	 * o reclaim the tx queue data structures after calling
537	 *   the 802.11 layer as we'll get called back to reclaim
538	 *   node state and potentially want to use them
539	 * o to cleanup the tx queues the hal is called, so detach
540	 *   it last
541	 * Other than that, it's straightforward...
542	 */
543	ieee80211_ifdetach(ic);
544	callout_drain(&sc->sc_watchdog);
545	mwl_dma_cleanup(sc);
546	MWL_RXFREE_DESTROY(sc);
547	mwl_tx_cleanup(sc);
548	mwl_hal_detach(sc->sc_mh);
549	mbufq_drain(&sc->sc_snd);
550
551	return 0;
552}
553
554/*
555 * MAC address handling for multiple BSS on the same radio.
556 * The first vap uses the MAC address from the EEPROM.  For
557 * subsequent vap's we set the U/L bit (bit 1) in the MAC
558 * address and use the next six bits as an index.
559 */
560static void
561assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
562{
563	int i;
564
565	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
566		/* NB: we only do this if h/w supports multiple bssid */
567		for (i = 0; i < 32; i++)
568			if ((sc->sc_bssidmask & (1<<i)) == 0)
569				break;
570		if (i != 0)
571			mac[0] |= (i << 2)|0x2;
572	} else
573		i = 0;
574	sc->sc_bssidmask |= 1<<i;
575	if (i == 0)
576		sc->sc_nbssid0++;
577}
578
579static void
580reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
581{
582	int i = mac[0] >> 2;
583	if (i != 0 || --sc->sc_nbssid0 == 0)
584		sc->sc_bssidmask &= ~(1<<i);
585}
586
587static struct ieee80211vap *
588mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
589    enum ieee80211_opmode opmode, int flags,
590    const uint8_t bssid[IEEE80211_ADDR_LEN],
591    const uint8_t mac0[IEEE80211_ADDR_LEN])
592{
593	struct mwl_softc *sc = ic->ic_softc;
594	struct mwl_hal *mh = sc->sc_mh;
595	struct ieee80211vap *vap, *apvap;
596	struct mwl_hal_vap *hvap;
597	struct mwl_vap *mvp;
598	uint8_t mac[IEEE80211_ADDR_LEN];
599
600	IEEE80211_ADDR_COPY(mac, mac0);
601	switch (opmode) {
602	case IEEE80211_M_HOSTAP:
603	case IEEE80211_M_MBSS:
604		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
605			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
606		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
607		if (hvap == NULL) {
608			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
609				reclaim_address(sc, mac);
610			return NULL;
611		}
612		break;
613	case IEEE80211_M_STA:
614		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
615			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
616		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
617		if (hvap == NULL) {
618			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
619				reclaim_address(sc, mac);
620			return NULL;
621		}
622		/* no h/w beacon miss support; always use s/w */
623		flags |= IEEE80211_CLONE_NOBEACONS;
624		break;
625	case IEEE80211_M_WDS:
626		hvap = NULL;		/* NB: we use associated AP vap */
627		if (sc->sc_napvaps == 0)
628			return NULL;	/* no existing AP vap */
629		break;
630	case IEEE80211_M_MONITOR:
631		hvap = NULL;
632		break;
633	case IEEE80211_M_IBSS:
634	case IEEE80211_M_AHDEMO:
635	default:
636		return NULL;
637	}
638
639	mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
640	mvp->mv_hvap = hvap;
641	if (opmode == IEEE80211_M_WDS) {
642		/*
643		 * WDS vaps must have an associated AP vap; find one.
644		 * XXX not right.
645		 */
646		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
647			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
648				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
649				break;
650			}
651		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
652	}
653	vap = &mvp->mv_vap;
654	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
655	/* override with driver methods */
656	mvp->mv_newstate = vap->iv_newstate;
657	vap->iv_newstate = mwl_newstate;
658	vap->iv_max_keyix = 0;	/* XXX */
659	vap->iv_key_alloc = mwl_key_alloc;
660	vap->iv_key_delete = mwl_key_delete;
661	vap->iv_key_set = mwl_key_set;
662#ifdef MWL_HOST_PS_SUPPORT
663	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
664		vap->iv_update_ps = mwl_update_ps;
665		mvp->mv_set_tim = vap->iv_set_tim;
666		vap->iv_set_tim = mwl_set_tim;
667	}
668#endif
669	vap->iv_reset = mwl_reset;
670	vap->iv_update_beacon = mwl_beacon_update;
671
672	/* override max aid so sta's cannot assoc when we're out of sta id's */
673	vap->iv_max_aid = MWL_MAXSTAID;
674	/* override default A-MPDU rx parameters */
675	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
676	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
677
678	/* complete setup */
679	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
680	    mac);
681
682	switch (vap->iv_opmode) {
683	case IEEE80211_M_HOSTAP:
684	case IEEE80211_M_MBSS:
685	case IEEE80211_M_STA:
686		/*
687		 * Setup sta db entry for local address.
688		 */
689		mwl_localstadb(vap);
690		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
691		    vap->iv_opmode == IEEE80211_M_MBSS)
692			sc->sc_napvaps++;
693		else
694			sc->sc_nstavaps++;
695		break;
696	case IEEE80211_M_WDS:
697		sc->sc_nwdsvaps++;
698		break;
699	default:
700		break;
701	}
702	/*
703	 * Setup overall operating mode.
704	 */
705	if (sc->sc_napvaps)
706		ic->ic_opmode = IEEE80211_M_HOSTAP;
707	else if (sc->sc_nstavaps)
708		ic->ic_opmode = IEEE80211_M_STA;
709	else
710		ic->ic_opmode = opmode;
711
712	return vap;
713}
714
715static void
716mwl_vap_delete(struct ieee80211vap *vap)
717{
718	struct mwl_vap *mvp = MWL_VAP(vap);
719	struct mwl_softc *sc = vap->iv_ic->ic_softc;
720	struct mwl_hal *mh = sc->sc_mh;
721	struct mwl_hal_vap *hvap = mvp->mv_hvap;
722	enum ieee80211_opmode opmode = vap->iv_opmode;
723
724	/* XXX disallow ap vap delete if WDS still present */
725	if (sc->sc_running) {
726		/* quiesce h/w while we remove the vap */
727		mwl_hal_intrset(mh, 0);		/* disable interrupts */
728	}
729	ieee80211_vap_detach(vap);
730	switch (opmode) {
731	case IEEE80211_M_HOSTAP:
732	case IEEE80211_M_MBSS:
733	case IEEE80211_M_STA:
734		KASSERT(hvap != NULL, ("no hal vap handle"));
735		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
736		mwl_hal_delvap(hvap);
737		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
738			sc->sc_napvaps--;
739		else
740			sc->sc_nstavaps--;
741		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
742		reclaim_address(sc, vap->iv_myaddr);
743		break;
744	case IEEE80211_M_WDS:
745		sc->sc_nwdsvaps--;
746		break;
747	default:
748		break;
749	}
750	mwl_cleartxq(sc, vap);
751	free(mvp, M_80211_VAP);
752	if (sc->sc_running)
753		mwl_hal_intrset(mh, sc->sc_imask);
754}
755
756void
757mwl_suspend(struct mwl_softc *sc)
758{
759
760	MWL_LOCK(sc);
761	mwl_stop(sc);
762	MWL_UNLOCK(sc);
763}
764
765void
766mwl_resume(struct mwl_softc *sc)
767{
768	int error = EDOOFUS;
769
770	MWL_LOCK(sc);
771	if (sc->sc_ic.ic_nrunning > 0)
772		error = mwl_init(sc);
773	MWL_UNLOCK(sc);
774
775	if (error == 0)
776		ieee80211_start_all(&sc->sc_ic);	/* start all vap's */
777}
778
779void
780mwl_shutdown(void *arg)
781{
782	struct mwl_softc *sc = arg;
783
784	MWL_LOCK(sc);
785	mwl_stop(sc);
786	MWL_UNLOCK(sc);
787}
788
789/*
790 * Interrupt handler.  Most of the actual processing is deferred.
791 */
792void
793mwl_intr(void *arg)
794{
795	struct mwl_softc *sc = arg;
796	struct mwl_hal *mh = sc->sc_mh;
797	uint32_t status;
798
799	if (sc->sc_invalid) {
800		/*
801		 * The hardware is not ready/present, don't touch anything.
802		 * Note this can happen early on if the IRQ is shared.
803		 */
804		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
805		return;
806	}
807	/*
808	 * Figure out the reason(s) for the interrupt.
809	 */
810	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
811	if (status == 0)			/* must be a shared irq */
812		return;
813
814	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
815	    __func__, status, sc->sc_imask);
816	if (status & MACREG_A2HRIC_BIT_RX_RDY)
817		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
818	if (status & MACREG_A2HRIC_BIT_TX_DONE)
819		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
820	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
821		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
822	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
823		mwl_hal_cmddone(mh);
824	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
825		;
826	}
827	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
828		/* TKIP ICV error */
829		sc->sc_stats.mst_rx_badtkipicv++;
830	}
831	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
832		/* 11n aggregation queue is empty, re-fill */
833		;
834	}
835	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
836		;
837	}
838	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
839		/* radar detected, process event */
840		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
841	}
842	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
843		/* DFS channel switch */
844		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
845	}
846}
847
848static void
849mwl_radar_proc(void *arg, int pending)
850{
851	struct mwl_softc *sc = arg;
852	struct ieee80211com *ic = &sc->sc_ic;
853
854	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
855	    __func__, pending);
856
857	sc->sc_stats.mst_radardetect++;
858	/* XXX stop h/w BA streams? */
859
860	IEEE80211_LOCK(ic);
861	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
862	IEEE80211_UNLOCK(ic);
863}
864
865static void
866mwl_chanswitch_proc(void *arg, int pending)
867{
868	struct mwl_softc *sc = arg;
869	struct ieee80211com *ic = &sc->sc_ic;
870
871	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
872	    __func__, pending);
873
874	IEEE80211_LOCK(ic);
875	sc->sc_csapending = 0;
876	ieee80211_csa_completeswitch(ic);
877	IEEE80211_UNLOCK(ic);
878}
879
880static void
881mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
882{
883	struct ieee80211_node *ni = sp->data[0];
884
885	/* send DELBA and drop the stream */
886	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
887}
888
889static void
890mwl_bawatchdog_proc(void *arg, int pending)
891{
892	struct mwl_softc *sc = arg;
893	struct mwl_hal *mh = sc->sc_mh;
894	const MWL_HAL_BASTREAM *sp;
895	uint8_t bitmap, n;
896
897	sc->sc_stats.mst_bawatchdog++;
898
899	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
900		DPRINTF(sc, MWL_DEBUG_AMPDU,
901		    "%s: could not get bitmap\n", __func__);
902		sc->sc_stats.mst_bawatchdog_failed++;
903		return;
904	}
905	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
906	if (bitmap == 0xff) {
907		n = 0;
908		/* disable all ba streams */
909		for (bitmap = 0; bitmap < 8; bitmap++) {
910			sp = mwl_hal_bastream_lookup(mh, bitmap);
911			if (sp != NULL) {
912				mwl_bawatchdog(sp);
913				n++;
914			}
915		}
916		if (n == 0) {
917			DPRINTF(sc, MWL_DEBUG_AMPDU,
918			    "%s: no BA streams found\n", __func__);
919			sc->sc_stats.mst_bawatchdog_empty++;
920		}
921	} else if (bitmap != 0xaa) {
922		/* disable a single ba stream */
923		sp = mwl_hal_bastream_lookup(mh, bitmap);
924		if (sp != NULL) {
925			mwl_bawatchdog(sp);
926		} else {
927			DPRINTF(sc, MWL_DEBUG_AMPDU,
928			    "%s: no BA stream %d\n", __func__, bitmap);
929			sc->sc_stats.mst_bawatchdog_notfound++;
930		}
931	}
932}
933
934/*
935 * Convert net80211 channel to a HAL channel.
936 */
937static void
938mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
939{
940	hc->channel = chan->ic_ieee;
941
942	*(uint32_t *)&hc->channelFlags = 0;
943	if (IEEE80211_IS_CHAN_2GHZ(chan))
944		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
945	else if (IEEE80211_IS_CHAN_5GHZ(chan))
946		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
947	if (IEEE80211_IS_CHAN_HT40(chan)) {
948		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
949		if (IEEE80211_IS_CHAN_HT40U(chan))
950			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
951		else
952			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
953	} else
954		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
955	/* XXX 10MHz channels */
956}
957
958/*
959 * Inform firmware of our tx/rx dma setup.  The BAR 0
960 * writes below are for compatibility with older firmware.
961 * For current firmware we send this information with a
962 * cmd block via mwl_hal_sethwdma.
963 */
964static int
965mwl_setupdma(struct mwl_softc *sc)
966{
967	int error, i;
968
969	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
970	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
971	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
972
973	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
974		struct mwl_txq *txq = &sc->sc_txq[i];
975		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
976		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
977	}
978	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
979	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
980
981	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
982	if (error != 0) {
983		device_printf(sc->sc_dev,
984		    "unable to setup tx/rx dma; hal status %u\n", error);
985		/* XXX */
986	}
987	return error;
988}
989
990/*
991 * Inform firmware of tx rate parameters.
992 * Called after a channel change.
993 */
994static int
995mwl_setcurchanrates(struct mwl_softc *sc)
996{
997	struct ieee80211com *ic = &sc->sc_ic;
998	const struct ieee80211_rateset *rs;
999	MWL_HAL_TXRATE rates;
1000
1001	memset(&rates, 0, sizeof(rates));
1002	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1003	/* rate used to send management frames */
1004	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1005	/* rate used to send multicast frames */
1006	rates.McastRate = rates.MgtRate;
1007
1008	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1009}
1010
1011/*
1012 * Inform firmware of tx rate parameters.  Called whenever
1013 * user-settable params change and after a channel change.
1014 */
1015static int
1016mwl_setrates(struct ieee80211vap *vap)
1017{
1018	struct mwl_vap *mvp = MWL_VAP(vap);
1019	struct ieee80211_node *ni = vap->iv_bss;
1020	const struct ieee80211_txparam *tp = ni->ni_txparms;
1021	MWL_HAL_TXRATE rates;
1022
1023	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1024
1025	/*
1026	 * Update the h/w rate map.
1027	 * NB: 0x80 for MCS is passed through unchanged
1028	 */
1029	memset(&rates, 0, sizeof(rates));
1030	/* rate used to send management frames */
1031	rates.MgtRate = tp->mgmtrate;
1032	/* rate used to send multicast frames */
1033	rates.McastRate = tp->mcastrate;
1034
1035	/* while here calculate EAPOL fixed rate cookie */
1036	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1037
1038	return mwl_hal_settxrate(mvp->mv_hvap,
1039	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1040		RATE_FIXED : RATE_AUTO, &rates);
1041}
1042
1043/*
1044 * Setup a fixed xmit rate cookie for EAPOL frames.
1045 */
1046static void
1047mwl_seteapolformat(struct ieee80211vap *vap)
1048{
1049	struct mwl_vap *mvp = MWL_VAP(vap);
1050	struct ieee80211_node *ni = vap->iv_bss;
1051	enum ieee80211_phymode mode;
1052	uint8_t rate;
1053
1054	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1055
1056	mode = ieee80211_chan2mode(ni->ni_chan);
1057	/*
1058	 * Use legacy rates when operating a mixed HT+non-HT bss.
1059	 * NB: this may violate POLA for sta and wds vap's.
1060	 */
1061	if (mode == IEEE80211_MODE_11NA &&
1062	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1063		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1064	else if (mode == IEEE80211_MODE_11NG &&
1065	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1066		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1067	else
1068		rate = vap->iv_txparms[mode].mgmtrate;
1069
1070	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1071}
1072
1073/*
1074 * Map SKU+country code to region code for radar bin'ing.
1075 */
1076static int
1077mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1078{
1079	switch (rd->regdomain) {
1080	case SKU_FCC:
1081	case SKU_FCC3:
1082		return DOMAIN_CODE_FCC;
1083	case SKU_CA:
1084		return DOMAIN_CODE_IC;
1085	case SKU_ETSI:
1086	case SKU_ETSI2:
1087	case SKU_ETSI3:
1088		if (rd->country == CTRY_SPAIN)
1089			return DOMAIN_CODE_SPAIN;
1090		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1091			return DOMAIN_CODE_FRANCE;
1092		/* XXX force 1.3.1 radar type */
1093		return DOMAIN_CODE_ETSI_131;
1094	case SKU_JAPAN:
1095		return DOMAIN_CODE_MKK;
1096	case SKU_ROW:
1097		return DOMAIN_CODE_DGT;	/* Taiwan */
1098	case SKU_APAC:
1099	case SKU_APAC2:
1100	case SKU_APAC3:
1101		return DOMAIN_CODE_AUS;	/* Australia */
1102	}
1103	/* XXX KOREA? */
1104	return DOMAIN_CODE_FCC;			/* XXX? */
1105}
1106
1107static int
1108mwl_hal_reset(struct mwl_softc *sc)
1109{
1110	struct ieee80211com *ic = &sc->sc_ic;
1111	struct mwl_hal *mh = sc->sc_mh;
1112
1113	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1114	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1115	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1116	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1117	mwl_chan_set(sc, ic->ic_curchan);
1118	/* NB: RF/RA performance tuned for indoor mode */
1119	mwl_hal_setrateadaptmode(mh, 0);
1120	mwl_hal_setoptimizationlevel(mh,
1121	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1122
1123	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1124
1125	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1126	mwl_hal_setcfend(mh, 0);			/* XXX */
1127
1128	return 1;
1129}
1130
1131static int
1132mwl_init(struct mwl_softc *sc)
1133{
1134	struct mwl_hal *mh = sc->sc_mh;
1135	int error = 0;
1136
1137	MWL_LOCK_ASSERT(sc);
1138
1139	/*
1140	 * Stop anything previously setup.  This is safe
1141	 * whether this is the first time through or not.
1142	 */
1143	mwl_stop(sc);
1144
1145	/*
1146	 * Push vap-independent state to the firmware.
1147	 */
1148	if (!mwl_hal_reset(sc)) {
1149		device_printf(sc->sc_dev, "unable to reset hardware\n");
1150		return EIO;
1151	}
1152
1153	/*
1154	 * Setup recv (once); transmit is already good to go.
1155	 */
1156	error = mwl_startrecv(sc);
1157	if (error != 0) {
1158		device_printf(sc->sc_dev, "unable to start recv logic\n");
1159		return error;
1160	}
1161
1162	/*
1163	 * Enable interrupts.
1164	 */
1165	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1166		     | MACREG_A2HRIC_BIT_TX_DONE
1167		     | MACREG_A2HRIC_BIT_OPC_DONE
1168#if 0
1169		     | MACREG_A2HRIC_BIT_MAC_EVENT
1170#endif
1171		     | MACREG_A2HRIC_BIT_ICV_ERROR
1172		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1173		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1174#if 0
1175		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1176#endif
1177		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1178		     | MACREQ_A2HRIC_BIT_TX_ACK
1179		     ;
1180
1181	sc->sc_running = 1;
1182	mwl_hal_intrset(mh, sc->sc_imask);
1183	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1184
1185	return 0;
1186}
1187
1188static void
1189mwl_stop(struct mwl_softc *sc)
1190{
1191
1192	MWL_LOCK_ASSERT(sc);
1193	if (sc->sc_running) {
1194		/*
1195		 * Shutdown the hardware and driver.
1196		 */
1197		sc->sc_running = 0;
1198		callout_stop(&sc->sc_watchdog);
1199		sc->sc_tx_timer = 0;
1200		mwl_draintxq(sc);
1201	}
1202}
1203
1204static int
1205mwl_reset_vap(struct ieee80211vap *vap, int state)
1206{
1207	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1208	struct ieee80211com *ic = vap->iv_ic;
1209
1210	if (state == IEEE80211_S_RUN)
1211		mwl_setrates(vap);
1212	/* XXX off by 1? */
1213	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1214	/* XXX auto? 20/40 split? */
1215	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1216	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1217	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1218	    HTPROTECT_NONE : HTPROTECT_AUTO);
1219	/* XXX txpower cap */
1220
1221	/* re-setup beacons */
1222	if (state == IEEE80211_S_RUN &&
1223	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1224	     vap->iv_opmode == IEEE80211_M_MBSS ||
1225	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1226		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1227		mwl_hal_setnprotmode(hvap,
1228		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1229		return mwl_beacon_setup(vap);
1230	}
1231	return 0;
1232}
1233
1234/*
1235 * Reset the hardware w/o losing operational state.
1236 * Used to to reset or reload hardware state for a vap.
1237 */
1238static int
1239mwl_reset(struct ieee80211vap *vap, u_long cmd)
1240{
1241	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1242	int error = 0;
1243
1244	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1245		struct ieee80211com *ic = vap->iv_ic;
1246		struct mwl_softc *sc = ic->ic_softc;
1247		struct mwl_hal *mh = sc->sc_mh;
1248
1249		/* XXX handle DWDS sta vap change */
1250		/* XXX do we need to disable interrupts? */
1251		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1252		error = mwl_reset_vap(vap, vap->iv_state);
1253		mwl_hal_intrset(mh, sc->sc_imask);
1254	}
1255	return error;
1256}
1257
1258/*
1259 * Allocate a tx buffer for sending a frame.  The
1260 * packet is assumed to have the WME AC stored so
1261 * we can use it to select the appropriate h/w queue.
1262 */
1263static struct mwl_txbuf *
1264mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1265{
1266	struct mwl_txbuf *bf;
1267
1268	/*
1269	 * Grab a TX buffer and associated resources.
1270	 */
1271	MWL_TXQ_LOCK(txq);
1272	bf = STAILQ_FIRST(&txq->free);
1273	if (bf != NULL) {
1274		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1275		txq->nfree--;
1276	}
1277	MWL_TXQ_UNLOCK(txq);
1278	if (bf == NULL)
1279		DPRINTF(sc, MWL_DEBUG_XMIT,
1280		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1281	return bf;
1282}
1283
1284/*
1285 * Return a tx buffer to the queue it came from.  Note there
1286 * are two cases because we must preserve the order of buffers
1287 * as it reflects the fixed order of descriptors in memory
1288 * (the firmware pre-fetches descriptors so we cannot reorder).
1289 */
1290static void
1291mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1292{
1293	bf->bf_m = NULL;
1294	bf->bf_node = NULL;
1295	MWL_TXQ_LOCK(txq);
1296	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1297	txq->nfree++;
1298	MWL_TXQ_UNLOCK(txq);
1299}
1300
1301static void
1302mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1303{
1304	bf->bf_m = NULL;
1305	bf->bf_node = NULL;
1306	MWL_TXQ_LOCK(txq);
1307	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1308	txq->nfree++;
1309	MWL_TXQ_UNLOCK(txq);
1310}
1311
1312static int
1313mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1314{
1315	struct mwl_softc *sc = ic->ic_softc;
1316	int error;
1317
1318	MWL_LOCK(sc);
1319	if (!sc->sc_running) {
1320		MWL_UNLOCK(sc);
1321		return (ENXIO);
1322	}
1323	error = mbufq_enqueue(&sc->sc_snd, m);
1324	if (error) {
1325		MWL_UNLOCK(sc);
1326		return (error);
1327	}
1328	mwl_start(sc);
1329	MWL_UNLOCK(sc);
1330	return (0);
1331}
1332
1333static void
1334mwl_start(struct mwl_softc *sc)
1335{
1336	struct ieee80211_node *ni;
1337	struct mwl_txbuf *bf;
1338	struct mbuf *m;
1339	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1340	int nqueued;
1341
1342	MWL_LOCK_ASSERT(sc);
1343	if (!sc->sc_running || sc->sc_invalid)
1344		return;
1345	nqueued = 0;
1346	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1347		/*
1348		 * Grab the node for the destination.
1349		 */
1350		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1351		KASSERT(ni != NULL, ("no node"));
1352		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1353		/*
1354		 * Grab a TX buffer and associated resources.
1355		 * We honor the classification by the 802.11 layer.
1356		 */
1357		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1358		bf = mwl_gettxbuf(sc, txq);
1359		if (bf == NULL) {
1360			m_freem(m);
1361			ieee80211_free_node(ni);
1362#ifdef MWL_TX_NODROP
1363			sc->sc_stats.mst_tx_qstop++;
1364			break;
1365#else
1366			DPRINTF(sc, MWL_DEBUG_XMIT,
1367			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1368			sc->sc_stats.mst_tx_qdrop++;
1369			continue;
1370#endif /* MWL_TX_NODROP */
1371		}
1372
1373		/*
1374		 * Pass the frame to the h/w for transmission.
1375		 */
1376		if (mwl_tx_start(sc, ni, bf, m)) {
1377			if_inc_counter(ni->ni_vap->iv_ifp,
1378			    IFCOUNTER_OERRORS, 1);
1379			mwl_puttxbuf_head(txq, bf);
1380			ieee80211_free_node(ni);
1381			continue;
1382		}
1383		nqueued++;
1384		if (nqueued >= mwl_txcoalesce) {
1385			/*
1386			 * Poke the firmware to process queued frames;
1387			 * see below about (lack of) locking.
1388			 */
1389			nqueued = 0;
1390			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1391		}
1392	}
1393	if (nqueued) {
1394		/*
1395		 * NB: We don't need to lock against tx done because
1396		 * this just prods the firmware to check the transmit
1397		 * descriptors.  The firmware will also start fetching
1398		 * descriptors by itself if it notices new ones are
1399		 * present when it goes to deliver a tx done interrupt
1400		 * to the host. So if we race with tx done processing
1401		 * it's ok.  Delivering the kick here rather than in
1402		 * mwl_tx_start is an optimization to avoid poking the
1403		 * firmware for each packet.
1404		 *
1405		 * NB: the queue id isn't used so 0 is ok.
1406		 */
1407		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1408	}
1409}
1410
1411static int
1412mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1413	const struct ieee80211_bpf_params *params)
1414{
1415	struct ieee80211com *ic = ni->ni_ic;
1416	struct mwl_softc *sc = ic->ic_softc;
1417	struct mwl_txbuf *bf;
1418	struct mwl_txq *txq;
1419
1420	if (!sc->sc_running || sc->sc_invalid) {
1421		ieee80211_free_node(ni);
1422		m_freem(m);
1423		return ENETDOWN;
1424	}
1425	/*
1426	 * Grab a TX buffer and associated resources.
1427	 * Note that we depend on the classification
1428	 * by the 802.11 layer to get to the right h/w
1429	 * queue.  Management frames must ALWAYS go on
1430	 * queue 1 but we cannot just force that here
1431	 * because we may receive non-mgt frames.
1432	 */
1433	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1434	bf = mwl_gettxbuf(sc, txq);
1435	if (bf == NULL) {
1436		sc->sc_stats.mst_tx_qstop++;
1437		ieee80211_free_node(ni);
1438		m_freem(m);
1439		return ENOBUFS;
1440	}
1441	/*
1442	 * Pass the frame to the h/w for transmission.
1443	 */
1444	if (mwl_tx_start(sc, ni, bf, m)) {
1445		mwl_puttxbuf_head(txq, bf);
1446
1447		ieee80211_free_node(ni);
1448		return EIO;		/* XXX */
1449	}
1450	/*
1451	 * NB: We don't need to lock against tx done because
1452	 * this just prods the firmware to check the transmit
1453	 * descriptors.  The firmware will also start fetching
1454	 * descriptors by itself if it notices new ones are
1455	 * present when it goes to deliver a tx done interrupt
1456	 * to the host. So if we race with tx done processing
1457	 * it's ok.  Delivering the kick here rather than in
1458	 * mwl_tx_start is an optimization to avoid poking the
1459	 * firmware for each packet.
1460	 *
1461	 * NB: the queue id isn't used so 0 is ok.
1462	 */
1463	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1464	return 0;
1465}
1466
1467static int
1468mwl_media_change(struct ifnet *ifp)
1469{
1470	struct ieee80211vap *vap = ifp->if_softc;
1471	int error;
1472
1473	error = ieee80211_media_change(ifp);
1474	/* NB: only the fixed rate can change and that doesn't need a reset */
1475	if (error == ENETRESET) {
1476		mwl_setrates(vap);
1477		error = 0;
1478	}
1479	return error;
1480}
1481
1482#ifdef MWL_DEBUG
1483static void
1484mwl_keyprint(struct mwl_softc *sc, const char *tag,
1485	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1486{
1487	static const char *ciphers[] = {
1488		"WEP",
1489		"TKIP",
1490		"AES-CCM",
1491	};
1492	int i, n;
1493
1494	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1495	for (i = 0, n = hk->keyLen; i < n; i++)
1496		printf(" %02x", hk->key.aes[i]);
1497	printf(" mac %s", ether_sprintf(mac));
1498	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1499		printf(" %s", "rxmic");
1500		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1501			printf(" %02x", hk->key.tkip.rxMic[i]);
1502		printf(" txmic");
1503		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1504			printf(" %02x", hk->key.tkip.txMic[i]);
1505	}
1506	printf(" flags 0x%x\n", hk->keyFlags);
1507}
1508#endif
1509
1510/*
1511 * Allocate a key cache slot for a unicast key.  The
1512 * firmware handles key allocation and every station is
1513 * guaranteed key space so we are always successful.
1514 */
1515static int
1516mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1517	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1518{
1519	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1520
1521	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1522	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1523		if (!(&vap->iv_nw_keys[0] <= k &&
1524		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1525			/* should not happen */
1526			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1527				"%s: bogus group key\n", __func__);
1528			return 0;
1529		}
1530		/* give the caller what they requested */
1531		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1532	} else {
1533		/*
1534		 * Firmware handles key allocation.
1535		 */
1536		*keyix = *rxkeyix = 0;
1537	}
1538	return 1;
1539}
1540
1541/*
1542 * Delete a key entry allocated by mwl_key_alloc.
1543 */
1544static int
1545mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1546{
1547	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1548	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1549	MWL_HAL_KEYVAL hk;
1550	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1551	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1552
1553	if (hvap == NULL) {
1554		if (vap->iv_opmode != IEEE80211_M_WDS) {
1555			/* XXX monitor mode? */
1556			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1557			    "%s: no hvap for opmode %d\n", __func__,
1558			    vap->iv_opmode);
1559			return 0;
1560		}
1561		hvap = MWL_VAP(vap)->mv_ap_hvap;
1562	}
1563
1564	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1565	    __func__, k->wk_keyix);
1566
1567	memset(&hk, 0, sizeof(hk));
1568	hk.keyIndex = k->wk_keyix;
1569	switch (k->wk_cipher->ic_cipher) {
1570	case IEEE80211_CIPHER_WEP:
1571		hk.keyTypeId = KEY_TYPE_ID_WEP;
1572		break;
1573	case IEEE80211_CIPHER_TKIP:
1574		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1575		break;
1576	case IEEE80211_CIPHER_AES_CCM:
1577		hk.keyTypeId = KEY_TYPE_ID_AES;
1578		break;
1579	default:
1580		/* XXX should not happen */
1581		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1582		    __func__, k->wk_cipher->ic_cipher);
1583		return 0;
1584	}
1585	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1586}
1587
1588static __inline int
1589addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1590{
1591	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1592		if (k->wk_flags & IEEE80211_KEY_XMIT)
1593			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1594		if (k->wk_flags & IEEE80211_KEY_RECV)
1595			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1596		return 1;
1597	} else
1598		return 0;
1599}
1600
1601/*
1602 * Set the key cache contents for the specified key.  Key cache
1603 * slot(s) must already have been allocated by mwl_key_alloc.
1604 */
1605static int
1606mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1607	const uint8_t mac[IEEE80211_ADDR_LEN])
1608{
1609#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1610/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1611#define	IEEE80211_IS_STATICKEY(k) \
1612	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1613	 (GRPXMIT|IEEE80211_KEY_RECV))
1614	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1615	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1616	const struct ieee80211_cipher *cip = k->wk_cipher;
1617	const uint8_t *macaddr;
1618	MWL_HAL_KEYVAL hk;
1619
1620	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1621		("s/w crypto set?"));
1622
1623	if (hvap == NULL) {
1624		if (vap->iv_opmode != IEEE80211_M_WDS) {
1625			/* XXX monitor mode? */
1626			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1627			    "%s: no hvap for opmode %d\n", __func__,
1628			    vap->iv_opmode);
1629			return 0;
1630		}
1631		hvap = MWL_VAP(vap)->mv_ap_hvap;
1632	}
1633	memset(&hk, 0, sizeof(hk));
1634	hk.keyIndex = k->wk_keyix;
1635	switch (cip->ic_cipher) {
1636	case IEEE80211_CIPHER_WEP:
1637		hk.keyTypeId = KEY_TYPE_ID_WEP;
1638		hk.keyLen = k->wk_keylen;
1639		if (k->wk_keyix == vap->iv_def_txkey)
1640			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1641		if (!IEEE80211_IS_STATICKEY(k)) {
1642			/* NB: WEP is never used for the PTK */
1643			(void) addgroupflags(&hk, k);
1644		}
1645		break;
1646	case IEEE80211_CIPHER_TKIP:
1647		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1648		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1649		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1650		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1651		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1652		if (!addgroupflags(&hk, k))
1653			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1654		break;
1655	case IEEE80211_CIPHER_AES_CCM:
1656		hk.keyTypeId = KEY_TYPE_ID_AES;
1657		hk.keyLen = k->wk_keylen;
1658		if (!addgroupflags(&hk, k))
1659			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1660		break;
1661	default:
1662		/* XXX should not happen */
1663		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1664		    __func__, k->wk_cipher->ic_cipher);
1665		return 0;
1666	}
1667	/*
1668	 * NB: tkip mic keys get copied here too; the layout
1669	 *     just happens to match that in ieee80211_key.
1670	 */
1671	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1672
1673	/*
1674	 * Locate address of sta db entry for writing key;
1675	 * the convention unfortunately is somewhat different
1676	 * than how net80211, hostapd, and wpa_supplicant think.
1677	 */
1678	if (vap->iv_opmode == IEEE80211_M_STA) {
1679		/*
1680		 * NB: keys plumbed before the sta reaches AUTH state
1681		 * will be discarded or written to the wrong sta db
1682		 * entry because iv_bss is meaningless.  This is ok
1683		 * (right now) because we handle deferred plumbing of
1684		 * WEP keys when the sta reaches AUTH state.
1685		 */
1686		macaddr = vap->iv_bss->ni_bssid;
1687		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1688			/* XXX plumb to local sta db too for static key wep */
1689			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1690		}
1691	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1692	    vap->iv_state != IEEE80211_S_RUN) {
1693		/*
1694		 * Prior to RUN state a WDS vap will not it's BSS node
1695		 * setup so we will plumb the key to the wrong mac
1696		 * address (it'll be our local address).  Workaround
1697		 * this for the moment by grabbing the correct address.
1698		 */
1699		macaddr = vap->iv_des_bssid;
1700	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1701		macaddr = vap->iv_myaddr;
1702	else
1703		macaddr = mac;
1704	KEYPRINTF(sc, &hk, macaddr);
1705	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1706#undef IEEE80211_IS_STATICKEY
1707#undef GRPXMIT
1708}
1709
1710/* unaligned little endian access */
1711#define LE_READ_2(p)				\
1712	((uint16_t)				\
1713	 ((((const uint8_t *)(p))[0]      ) |	\
1714	  (((const uint8_t *)(p))[1] <<  8)))
1715#define LE_READ_4(p)				\
1716	((uint32_t)				\
1717	 ((((const uint8_t *)(p))[0]      ) |	\
1718	  (((const uint8_t *)(p))[1] <<  8) |	\
1719	  (((const uint8_t *)(p))[2] << 16) |	\
1720	  (((const uint8_t *)(p))[3] << 24)))
1721
1722/*
1723 * Set the multicast filter contents into the hardware.
1724 * XXX f/w has no support; just defer to the os.
1725 */
1726static void
1727mwl_setmcastfilter(struct mwl_softc *sc)
1728{
1729#if 0
1730	struct ether_multi *enm;
1731	struct ether_multistep estep;
1732	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1733	uint8_t *mp;
1734	int nmc;
1735
1736	mp = macs;
1737	nmc = 0;
1738	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1739	while (enm != NULL) {
1740		/* XXX Punt on ranges. */
1741		if (nmc == MWL_HAL_MCAST_MAX ||
1742		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1743			ifp->if_flags |= IFF_ALLMULTI;
1744			return;
1745		}
1746		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1747		mp += IEEE80211_ADDR_LEN, nmc++;
1748		ETHER_NEXT_MULTI(estep, enm);
1749	}
1750	ifp->if_flags &= ~IFF_ALLMULTI;
1751	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1752#endif
1753}
1754
1755static int
1756mwl_mode_init(struct mwl_softc *sc)
1757{
1758	struct ieee80211com *ic = &sc->sc_ic;
1759	struct mwl_hal *mh = sc->sc_mh;
1760
1761	/*
1762	 * NB: Ignore promisc in hostap mode; it's set by the
1763	 * bridge.  This is wrong but we have no way to
1764	 * identify internal requests (from the bridge)
1765	 * versus external requests such as for tcpdump.
1766	 */
1767	mwl_hal_setpromisc(mh, ic->ic_promisc > 0 &&
1768	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1769	mwl_setmcastfilter(sc);
1770
1771	return 0;
1772}
1773
1774/*
1775 * Callback from the 802.11 layer after a multicast state change.
1776 */
1777static void
1778mwl_update_mcast(struct ieee80211com *ic)
1779{
1780	struct mwl_softc *sc = ic->ic_softc;
1781
1782	mwl_setmcastfilter(sc);
1783}
1784
1785/*
1786 * Callback from the 802.11 layer after a promiscuous mode change.
1787 * Note this interface does not check the operating mode as this
1788 * is an internal callback and we are expected to honor the current
1789 * state (e.g. this is used for setting the interface in promiscuous
1790 * mode when operating in hostap mode to do ACS).
1791 */
1792static void
1793mwl_update_promisc(struct ieee80211com *ic)
1794{
1795	struct mwl_softc *sc = ic->ic_softc;
1796
1797	mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1798}
1799
1800/*
1801 * Callback from the 802.11 layer to update the slot time
1802 * based on the current setting.  We use it to notify the
1803 * firmware of ERP changes and the f/w takes care of things
1804 * like slot time and preamble.
1805 */
1806static void
1807mwl_updateslot(struct ieee80211com *ic)
1808{
1809	struct mwl_softc *sc = ic->ic_softc;
1810	struct mwl_hal *mh = sc->sc_mh;
1811	int prot;
1812
1813	/* NB: can be called early; suppress needless cmds */
1814	if (!sc->sc_running)
1815		return;
1816
1817	/*
1818	 * Calculate the ERP flags.  The firwmare will use
1819	 * this to carry out the appropriate measures.
1820	 */
1821	prot = 0;
1822	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1823		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1824			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1825		if (ic->ic_flags & IEEE80211_F_USEPROT)
1826			prot |= IEEE80211_ERP_USE_PROTECTION;
1827		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1828			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1829	}
1830
1831	DPRINTF(sc, MWL_DEBUG_RESET,
1832	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1833	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1834	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1835	    ic->ic_flags);
1836
1837	mwl_hal_setgprot(mh, prot);
1838}
1839
1840/*
1841 * Setup the beacon frame.
1842 */
1843static int
1844mwl_beacon_setup(struct ieee80211vap *vap)
1845{
1846	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1847	struct ieee80211_node *ni = vap->iv_bss;
1848	struct ieee80211_beacon_offsets bo;
1849	struct mbuf *m;
1850
1851	m = ieee80211_beacon_alloc(ni, &bo);
1852	if (m == NULL)
1853		return ENOBUFS;
1854	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1855	m_free(m);
1856
1857	return 0;
1858}
1859
1860/*
1861 * Update the beacon frame in response to a change.
1862 */
1863static void
1864mwl_beacon_update(struct ieee80211vap *vap, int item)
1865{
1866	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1867	struct ieee80211com *ic = vap->iv_ic;
1868
1869	KASSERT(hvap != NULL, ("no beacon"));
1870	switch (item) {
1871	case IEEE80211_BEACON_ERP:
1872		mwl_updateslot(ic);
1873		break;
1874	case IEEE80211_BEACON_HTINFO:
1875		mwl_hal_setnprotmode(hvap,
1876		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1877		break;
1878	case IEEE80211_BEACON_CAPS:
1879	case IEEE80211_BEACON_WME:
1880	case IEEE80211_BEACON_APPIE:
1881	case IEEE80211_BEACON_CSA:
1882		break;
1883	case IEEE80211_BEACON_TIM:
1884		/* NB: firmware always forms TIM */
1885		return;
1886	}
1887	/* XXX retain beacon frame and update */
1888	mwl_beacon_setup(vap);
1889}
1890
1891static void
1892mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1893{
1894	bus_addr_t *paddr = (bus_addr_t*) arg;
1895	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1896	*paddr = segs->ds_addr;
1897}
1898
1899#ifdef MWL_HOST_PS_SUPPORT
1900/*
1901 * Handle power save station occupancy changes.
1902 */
1903static void
1904mwl_update_ps(struct ieee80211vap *vap, int nsta)
1905{
1906	struct mwl_vap *mvp = MWL_VAP(vap);
1907
1908	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1909		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1910	mvp->mv_last_ps_sta = nsta;
1911}
1912
1913/*
1914 * Handle associated station power save state changes.
1915 */
1916static int
1917mwl_set_tim(struct ieee80211_node *ni, int set)
1918{
1919	struct ieee80211vap *vap = ni->ni_vap;
1920	struct mwl_vap *mvp = MWL_VAP(vap);
1921
1922	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1923		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1924		    IEEE80211_AID(ni->ni_associd), set);
1925		return 1;
1926	} else
1927		return 0;
1928}
1929#endif /* MWL_HOST_PS_SUPPORT */
1930
1931static int
1932mwl_desc_setup(struct mwl_softc *sc, const char *name,
1933	struct mwl_descdma *dd,
1934	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1935{
1936	uint8_t *ds;
1937	int error;
1938
1939	DPRINTF(sc, MWL_DEBUG_RESET,
1940	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1941	    __func__, name, nbuf, (uintmax_t) bufsize,
1942	    ndesc, (uintmax_t) descsize);
1943
1944	dd->dd_name = name;
1945	dd->dd_desc_len = nbuf * ndesc * descsize;
1946
1947	/*
1948	 * Setup DMA descriptor area.
1949	 */
1950	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1951		       PAGE_SIZE, 0,		/* alignment, bounds */
1952		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1953		       BUS_SPACE_MAXADDR,	/* highaddr */
1954		       NULL, NULL,		/* filter, filterarg */
1955		       dd->dd_desc_len,		/* maxsize */
1956		       1,			/* nsegments */
1957		       dd->dd_desc_len,		/* maxsegsize */
1958		       BUS_DMA_ALLOCNOW,	/* flags */
1959		       NULL,			/* lockfunc */
1960		       NULL,			/* lockarg */
1961		       &dd->dd_dmat);
1962	if (error != 0) {
1963		device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1964		return error;
1965	}
1966
1967	/* allocate descriptors */
1968	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1969				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1970				 &dd->dd_dmamap);
1971	if (error != 0) {
1972		device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1973			"error %u\n", nbuf * ndesc, dd->dd_name, error);
1974		goto fail1;
1975	}
1976
1977	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1978				dd->dd_desc, dd->dd_desc_len,
1979				mwl_load_cb, &dd->dd_desc_paddr,
1980				BUS_DMA_NOWAIT);
1981	if (error != 0) {
1982		device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1983			dd->dd_name, error);
1984		goto fail2;
1985	}
1986
1987	ds = dd->dd_desc;
1988	memset(ds, 0, dd->dd_desc_len);
1989	DPRINTF(sc, MWL_DEBUG_RESET,
1990	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1991	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1992	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1993
1994	return 0;
1995fail2:
1996	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1997fail1:
1998	bus_dma_tag_destroy(dd->dd_dmat);
1999	memset(dd, 0, sizeof(*dd));
2000	return error;
2001#undef DS2PHYS
2002}
2003
2004static void
2005mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2006{
2007	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2008	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2009	bus_dma_tag_destroy(dd->dd_dmat);
2010
2011	memset(dd, 0, sizeof(*dd));
2012}
2013
2014/*
2015 * Construct a tx q's free list.  The order of entries on
2016 * the list must reflect the physical layout of tx descriptors
2017 * because the firmware pre-fetches descriptors.
2018 *
2019 * XXX might be better to use indices into the buffer array.
2020 */
2021static void
2022mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2023{
2024	struct mwl_txbuf *bf;
2025	int i;
2026
2027	bf = txq->dma.dd_bufptr;
2028	STAILQ_INIT(&txq->free);
2029	for (i = 0; i < mwl_txbuf; i++, bf++)
2030		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2031	txq->nfree = i;
2032}
2033
2034#define	DS2PHYS(_dd, _ds) \
2035	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2036
2037static int
2038mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2039{
2040	int error, bsize, i;
2041	struct mwl_txbuf *bf;
2042	struct mwl_txdesc *ds;
2043
2044	error = mwl_desc_setup(sc, "tx", &txq->dma,
2045			mwl_txbuf, sizeof(struct mwl_txbuf),
2046			MWL_TXDESC, sizeof(struct mwl_txdesc));
2047	if (error != 0)
2048		return error;
2049
2050	/* allocate and setup tx buffers */
2051	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2052	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2053	if (bf == NULL) {
2054		device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2055			mwl_txbuf);
2056		return ENOMEM;
2057	}
2058	txq->dma.dd_bufptr = bf;
2059
2060	ds = txq->dma.dd_desc;
2061	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2062		bf->bf_desc = ds;
2063		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2064		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2065				&bf->bf_dmamap);
2066		if (error != 0) {
2067			device_printf(sc->sc_dev, "unable to create dmamap for tx "
2068				"buffer %u, error %u\n", i, error);
2069			return error;
2070		}
2071	}
2072	mwl_txq_reset(sc, txq);
2073	return 0;
2074}
2075
2076static void
2077mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2078{
2079	struct mwl_txbuf *bf;
2080	int i;
2081
2082	bf = txq->dma.dd_bufptr;
2083	for (i = 0; i < mwl_txbuf; i++, bf++) {
2084		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2085		KASSERT(bf->bf_node == NULL, ("node on free list"));
2086		if (bf->bf_dmamap != NULL)
2087			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2088	}
2089	STAILQ_INIT(&txq->free);
2090	txq->nfree = 0;
2091	if (txq->dma.dd_bufptr != NULL) {
2092		free(txq->dma.dd_bufptr, M_MWLDEV);
2093		txq->dma.dd_bufptr = NULL;
2094	}
2095	if (txq->dma.dd_desc_len != 0)
2096		mwl_desc_cleanup(sc, &txq->dma);
2097}
2098
2099static int
2100mwl_rxdma_setup(struct mwl_softc *sc)
2101{
2102	int error, jumbosize, bsize, i;
2103	struct mwl_rxbuf *bf;
2104	struct mwl_jumbo *rbuf;
2105	struct mwl_rxdesc *ds;
2106	caddr_t data;
2107
2108	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2109			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2110			1, sizeof(struct mwl_rxdesc));
2111	if (error != 0)
2112		return error;
2113
2114	/*
2115	 * Receive is done to a private pool of jumbo buffers.
2116	 * This allows us to attach to mbuf's and avoid re-mapping
2117	 * memory on each rx we post.  We allocate a large chunk
2118	 * of memory and manage it in the driver.  The mbuf free
2119	 * callback method is used to reclaim frames after sending
2120	 * them up the stack.  By default we allocate 2x the number of
2121	 * rx descriptors configured so we have some slop to hold
2122	 * us while frames are processed.
2123	 */
2124	if (mwl_rxbuf < 2*mwl_rxdesc) {
2125		device_printf(sc->sc_dev,
2126		    "too few rx dma buffers (%d); increasing to %d\n",
2127		    mwl_rxbuf, 2*mwl_rxdesc);
2128		mwl_rxbuf = 2*mwl_rxdesc;
2129	}
2130	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2131	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2132
2133	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2134		       PAGE_SIZE, 0,		/* alignment, bounds */
2135		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2136		       BUS_SPACE_MAXADDR,	/* highaddr */
2137		       NULL, NULL,		/* filter, filterarg */
2138		       sc->sc_rxmemsize,	/* maxsize */
2139		       1,			/* nsegments */
2140		       sc->sc_rxmemsize,	/* maxsegsize */
2141		       BUS_DMA_ALLOCNOW,	/* flags */
2142		       NULL,			/* lockfunc */
2143		       NULL,			/* lockarg */
2144		       &sc->sc_rxdmat);
2145	if (error != 0) {
2146		device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2147		return error;
2148	}
2149
2150	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2151				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2152				 &sc->sc_rxmap);
2153	if (error != 0) {
2154		device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2155		    (uintmax_t) sc->sc_rxmemsize);
2156		return error;
2157	}
2158
2159	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2160				sc->sc_rxmem, sc->sc_rxmemsize,
2161				mwl_load_cb, &sc->sc_rxmem_paddr,
2162				BUS_DMA_NOWAIT);
2163	if (error != 0) {
2164		device_printf(sc->sc_dev, "could not load rx DMA map\n");
2165		return error;
2166	}
2167
2168	/*
2169	 * Allocate rx buffers and set them up.
2170	 */
2171	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2172	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2173	if (bf == NULL) {
2174		device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2175		return error;
2176	}
2177	sc->sc_rxdma.dd_bufptr = bf;
2178
2179	STAILQ_INIT(&sc->sc_rxbuf);
2180	ds = sc->sc_rxdma.dd_desc;
2181	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2182		bf->bf_desc = ds;
2183		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2184		/* pre-assign dma buffer */
2185		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2186		/* NB: tail is intentional to preserve descriptor order */
2187		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2188	}
2189
2190	/*
2191	 * Place remainder of dma memory buffers on the free list.
2192	 */
2193	SLIST_INIT(&sc->sc_rxfree);
2194	for (; i < mwl_rxbuf; i++) {
2195		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2196		rbuf = MWL_JUMBO_DATA2BUF(data);
2197		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2198		sc->sc_nrxfree++;
2199	}
2200	return 0;
2201}
2202#undef DS2PHYS
2203
2204static void
2205mwl_rxdma_cleanup(struct mwl_softc *sc)
2206{
2207	if (sc->sc_rxmem_paddr != 0) {
2208		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2209		sc->sc_rxmem_paddr = 0;
2210	}
2211	if (sc->sc_rxmem != NULL) {
2212		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2213		sc->sc_rxmem = NULL;
2214	}
2215	if (sc->sc_rxdma.dd_bufptr != NULL) {
2216		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2217		sc->sc_rxdma.dd_bufptr = NULL;
2218	}
2219	if (sc->sc_rxdma.dd_desc_len != 0)
2220		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2221}
2222
2223static int
2224mwl_dma_setup(struct mwl_softc *sc)
2225{
2226	int error, i;
2227
2228	error = mwl_rxdma_setup(sc);
2229	if (error != 0) {
2230		mwl_rxdma_cleanup(sc);
2231		return error;
2232	}
2233
2234	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2235		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2236		if (error != 0) {
2237			mwl_dma_cleanup(sc);
2238			return error;
2239		}
2240	}
2241	return 0;
2242}
2243
2244static void
2245mwl_dma_cleanup(struct mwl_softc *sc)
2246{
2247	int i;
2248
2249	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2250		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2251	mwl_rxdma_cleanup(sc);
2252}
2253
2254static struct ieee80211_node *
2255mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2256{
2257	struct ieee80211com *ic = vap->iv_ic;
2258	struct mwl_softc *sc = ic->ic_softc;
2259	const size_t space = sizeof(struct mwl_node);
2260	struct mwl_node *mn;
2261
2262	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2263	if (mn == NULL) {
2264		/* XXX stat+msg */
2265		return NULL;
2266	}
2267	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2268	return &mn->mn_node;
2269}
2270
2271static void
2272mwl_node_cleanup(struct ieee80211_node *ni)
2273{
2274	struct ieee80211com *ic = ni->ni_ic;
2275        struct mwl_softc *sc = ic->ic_softc;
2276	struct mwl_node *mn = MWL_NODE(ni);
2277
2278	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2279	    __func__, ni, ni->ni_ic, mn->mn_staid);
2280
2281	if (mn->mn_staid != 0) {
2282		struct ieee80211vap *vap = ni->ni_vap;
2283
2284		if (mn->mn_hvap != NULL) {
2285			if (vap->iv_opmode == IEEE80211_M_STA)
2286				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2287			else
2288				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2289		}
2290		/*
2291		 * NB: legacy WDS peer sta db entry is installed using
2292		 * the associate ap's hvap; use it again to delete it.
2293		 * XXX can vap be NULL?
2294		 */
2295		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2296		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2297			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2298			    ni->ni_macaddr);
2299		delstaid(sc, mn->mn_staid);
2300		mn->mn_staid = 0;
2301	}
2302	sc->sc_node_cleanup(ni);
2303}
2304
2305/*
2306 * Reclaim rx dma buffers from packets sitting on the ampdu
2307 * reorder queue for a station.  We replace buffers with a
2308 * system cluster (if available).
2309 */
2310static void
2311mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2312{
2313#if 0
2314	int i, n, off;
2315	struct mbuf *m;
2316	void *cl;
2317
2318	n = rap->rxa_qframes;
2319	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2320		m = rap->rxa_m[i];
2321		if (m == NULL)
2322			continue;
2323		n--;
2324		/* our dma buffers have a well-known free routine */
2325		if ((m->m_flags & M_EXT) == 0 ||
2326		    m->m_ext.ext_free != mwl_ext_free)
2327			continue;
2328		/*
2329		 * Try to allocate a cluster and move the data.
2330		 */
2331		off = m->m_data - m->m_ext.ext_buf;
2332		if (off + m->m_pkthdr.len > MCLBYTES) {
2333			/* XXX no AMSDU for now */
2334			continue;
2335		}
2336		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2337		    &m->m_ext.ext_paddr);
2338		if (cl != NULL) {
2339			/*
2340			 * Copy the existing data to the cluster, remove
2341			 * the rx dma buffer, and attach the cluster in
2342			 * its place.  Note we preserve the offset to the
2343			 * data so frames being bridged can still prepend
2344			 * their headers without adding another mbuf.
2345			 */
2346			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2347			MEXTREMOVE(m);
2348			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2349			/* setup mbuf like _MCLGET does */
2350			m->m_flags |= M_CLUSTER | M_EXT_RW;
2351			_MOWNERREF(m, M_EXT | M_CLUSTER);
2352			/* NB: m_data is clobbered by MEXTADDR, adjust */
2353			m->m_data += off;
2354		}
2355	}
2356#endif
2357}
2358
2359/*
2360 * Callback to reclaim resources.  We first let the
2361 * net80211 layer do it's thing, then if we are still
2362 * blocked by a lack of rx dma buffers we walk the ampdu
2363 * reorder q's to reclaim buffers by copying to a system
2364 * cluster.
2365 */
2366static void
2367mwl_node_drain(struct ieee80211_node *ni)
2368{
2369	struct ieee80211com *ic = ni->ni_ic;
2370        struct mwl_softc *sc = ic->ic_softc;
2371	struct mwl_node *mn = MWL_NODE(ni);
2372
2373	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2374	    __func__, ni, ni->ni_vap, mn->mn_staid);
2375
2376	/* NB: call up first to age out ampdu q's */
2377	sc->sc_node_drain(ni);
2378
2379	/* XXX better to not check low water mark? */
2380	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2381	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2382		uint8_t tid;
2383		/*
2384		 * Walk the reorder q and reclaim rx dma buffers by copying
2385		 * the packet contents into clusters.
2386		 */
2387		for (tid = 0; tid < WME_NUM_TID; tid++) {
2388			struct ieee80211_rx_ampdu *rap;
2389
2390			rap = &ni->ni_rx_ampdu[tid];
2391			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2392				continue;
2393			if (rap->rxa_qframes)
2394				mwl_ampdu_rxdma_reclaim(rap);
2395		}
2396	}
2397}
2398
2399static void
2400mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2401{
2402	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2403#ifdef MWL_ANT_INFO_SUPPORT
2404#if 0
2405	/* XXX need to smooth data */
2406	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2407#else
2408	*noise = -95;		/* XXX */
2409#endif
2410#else
2411	*noise = -95;		/* XXX */
2412#endif
2413}
2414
2415/*
2416 * Convert Hardware per-antenna rssi info to common format:
2417 * Let a1, a2, a3 represent the amplitudes per chain
2418 * Let amax represent max[a1, a2, a3]
2419 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2420 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2421 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2422 * maintain some extra precision.
2423 *
2424 * Values are stored in .5 db format capped at 127.
2425 */
2426static void
2427mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2428	struct ieee80211_mimo_info *mi)
2429{
2430#define	CVT(_dst, _src) do {						\
2431	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2432	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2433} while (0)
2434	static const int8_t logdbtbl[32] = {
2435	       0,   0,  24,  38,  48,  56,  62,  68,
2436	      72,  76,  80,  83,  86,  89,  92,  94,
2437	      96,  98, 100, 102, 104, 106, 107, 109,
2438	     110, 112, 113, 115, 116, 117, 118, 119
2439	};
2440	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2441	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2442	uint32_t rssi_max;
2443
2444	rssi_max = mn->mn_ai.rssi_a;
2445	if (mn->mn_ai.rssi_b > rssi_max)
2446		rssi_max = mn->mn_ai.rssi_b;
2447	if (mn->mn_ai.rssi_c > rssi_max)
2448		rssi_max = mn->mn_ai.rssi_c;
2449
2450	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2451	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2452	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2453
2454	mi->noise[0] = mn->mn_ai.nf_a;
2455	mi->noise[1] = mn->mn_ai.nf_b;
2456	mi->noise[2] = mn->mn_ai.nf_c;
2457#undef CVT
2458}
2459
2460static __inline void *
2461mwl_getrxdma(struct mwl_softc *sc)
2462{
2463	struct mwl_jumbo *buf;
2464	void *data;
2465
2466	/*
2467	 * Allocate from jumbo pool.
2468	 */
2469	MWL_RXFREE_LOCK(sc);
2470	buf = SLIST_FIRST(&sc->sc_rxfree);
2471	if (buf == NULL) {
2472		DPRINTF(sc, MWL_DEBUG_ANY,
2473		    "%s: out of rx dma buffers\n", __func__);
2474		sc->sc_stats.mst_rx_nodmabuf++;
2475		data = NULL;
2476	} else {
2477		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2478		sc->sc_nrxfree--;
2479		data = MWL_JUMBO_BUF2DATA(buf);
2480	}
2481	MWL_RXFREE_UNLOCK(sc);
2482	return data;
2483}
2484
2485static __inline void
2486mwl_putrxdma(struct mwl_softc *sc, void *data)
2487{
2488	struct mwl_jumbo *buf;
2489
2490	/* XXX bounds check data */
2491	MWL_RXFREE_LOCK(sc);
2492	buf = MWL_JUMBO_DATA2BUF(data);
2493	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2494	sc->sc_nrxfree++;
2495	MWL_RXFREE_UNLOCK(sc);
2496}
2497
2498static int
2499mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2500{
2501	struct mwl_rxdesc *ds;
2502
2503	ds = bf->bf_desc;
2504	if (bf->bf_data == NULL) {
2505		bf->bf_data = mwl_getrxdma(sc);
2506		if (bf->bf_data == NULL) {
2507			/* mark descriptor to be skipped */
2508			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2509			/* NB: don't need PREREAD */
2510			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2511			sc->sc_stats.mst_rxbuf_failed++;
2512			return ENOMEM;
2513		}
2514	}
2515	/*
2516	 * NB: DMA buffer contents is known to be unmodified
2517	 *     so there's no need to flush the data cache.
2518	 */
2519
2520	/*
2521	 * Setup descriptor.
2522	 */
2523	ds->QosCtrl = 0;
2524	ds->RSSI = 0;
2525	ds->Status = EAGLE_RXD_STATUS_IDLE;
2526	ds->Channel = 0;
2527	ds->PktLen = htole16(MWL_AGGR_SIZE);
2528	ds->SQ2 = 0;
2529	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2530	/* NB: don't touch pPhysNext, set once */
2531	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2532	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2533
2534	return 0;
2535}
2536
2537static void
2538mwl_ext_free(struct mbuf *m, void *data, void *arg)
2539{
2540	struct mwl_softc *sc = arg;
2541
2542	/* XXX bounds check data */
2543	mwl_putrxdma(sc, data);
2544	/*
2545	 * If we were previously blocked by a lack of rx dma buffers
2546	 * check if we now have enough to restart rx interrupt handling.
2547	 * NB: we know we are called at splvm which is above splnet.
2548	 */
2549	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2550		sc->sc_rxblocked = 0;
2551		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2552	}
2553}
2554
2555struct mwl_frame_bar {
2556	u_int8_t	i_fc[2];
2557	u_int8_t	i_dur[2];
2558	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2559	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2560	/* ctl, seq, FCS */
2561} __packed;
2562
2563/*
2564 * Like ieee80211_anyhdrsize, but handles BAR frames
2565 * specially so the logic below to piece the 802.11
2566 * header together works.
2567 */
2568static __inline int
2569mwl_anyhdrsize(const void *data)
2570{
2571	const struct ieee80211_frame *wh = data;
2572
2573	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2574		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2575		case IEEE80211_FC0_SUBTYPE_CTS:
2576		case IEEE80211_FC0_SUBTYPE_ACK:
2577			return sizeof(struct ieee80211_frame_ack);
2578		case IEEE80211_FC0_SUBTYPE_BAR:
2579			return sizeof(struct mwl_frame_bar);
2580		}
2581		return sizeof(struct ieee80211_frame_min);
2582	} else
2583		return ieee80211_hdrsize(data);
2584}
2585
2586static void
2587mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2588{
2589	const struct ieee80211_frame *wh;
2590	struct ieee80211_node *ni;
2591
2592	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2593	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2594	if (ni != NULL) {
2595		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2596		ieee80211_free_node(ni);
2597	}
2598}
2599
2600/*
2601 * Convert hardware signal strength to rssi.  The value
2602 * provided by the device has the noise floor added in;
2603 * we need to compensate for this but we don't have that
2604 * so we use a fixed value.
2605 *
2606 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2607 * offset is already set as part of the initial gain.  This
2608 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2609 */
2610static __inline int
2611cvtrssi(uint8_t ssi)
2612{
2613	int rssi = (int) ssi + 8;
2614	/* XXX hack guess until we have a real noise floor */
2615	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2616	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2617}
2618
2619static void
2620mwl_rx_proc(void *arg, int npending)
2621{
2622#define	IEEE80211_DIR_DSTODS(wh) \
2623	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2624	struct mwl_softc *sc = arg;
2625	struct ieee80211com *ic = &sc->sc_ic;
2626	struct mwl_rxbuf *bf;
2627	struct mwl_rxdesc *ds;
2628	struct mbuf *m;
2629	struct ieee80211_qosframe *wh;
2630	struct ieee80211_qosframe_addr4 *wh4;
2631	struct ieee80211_node *ni;
2632	struct mwl_node *mn;
2633	int off, len, hdrlen, pktlen, rssi, ntodo;
2634	uint8_t *data, status;
2635	void *newdata;
2636	int16_t nf;
2637
2638	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2639	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2640	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2641	nf = -96;			/* XXX */
2642	bf = sc->sc_rxnext;
2643	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2644		if (bf == NULL)
2645			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2646		ds = bf->bf_desc;
2647		data = bf->bf_data;
2648		if (data == NULL) {
2649			/*
2650			 * If data allocation failed previously there
2651			 * will be no buffer; try again to re-populate it.
2652			 * Note the firmware will not advance to the next
2653			 * descriptor with a dma buffer so we must mimic
2654			 * this or we'll get out of sync.
2655			 */
2656			DPRINTF(sc, MWL_DEBUG_ANY,
2657			    "%s: rx buf w/o dma memory\n", __func__);
2658			(void) mwl_rxbuf_init(sc, bf);
2659			sc->sc_stats.mst_rx_dmabufmissing++;
2660			break;
2661		}
2662		MWL_RXDESC_SYNC(sc, ds,
2663		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2664		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2665			break;
2666#ifdef MWL_DEBUG
2667		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2668			mwl_printrxbuf(bf, 0);
2669#endif
2670		status = ds->Status;
2671		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2672			counter_u64_add(ic->ic_ierrors, 1);
2673			sc->sc_stats.mst_rx_crypto++;
2674			/*
2675			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2676			 *     for backwards compatibility.
2677			 */
2678			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2679			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2680				/*
2681				 * MIC error, notify upper layers.
2682				 */
2683				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2684				    BUS_DMASYNC_POSTREAD);
2685				mwl_handlemicerror(ic, data);
2686				sc->sc_stats.mst_rx_tkipmic++;
2687			}
2688			/* XXX too painful to tap packets */
2689			goto rx_next;
2690		}
2691		/*
2692		 * Sync the data buffer.
2693		 */
2694		len = le16toh(ds->PktLen);
2695		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2696		/*
2697		 * The 802.11 header is provided all or in part at the front;
2698		 * use it to calculate the true size of the header that we'll
2699		 * construct below.  We use this to figure out where to copy
2700		 * payload prior to constructing the header.
2701		 */
2702		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2703		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2704
2705		/* calculate rssi early so we can re-use for each aggregate */
2706		rssi = cvtrssi(ds->RSSI);
2707
2708		pktlen = hdrlen + (len - off);
2709		/*
2710		 * NB: we know our frame is at least as large as
2711		 * IEEE80211_MIN_LEN because there is a 4-address
2712		 * frame at the front.  Hence there's no need to
2713		 * vet the packet length.  If the frame in fact
2714		 * is too small it should be discarded at the
2715		 * net80211 layer.
2716		 */
2717
2718		/*
2719		 * Attach dma buffer to an mbuf.  We tried
2720		 * doing this based on the packet size (i.e.
2721		 * copying small packets) but it turns out to
2722		 * be a net loss.  The tradeoff might be system
2723		 * dependent (cache architecture is important).
2724		 */
2725		MGETHDR(m, M_NOWAIT, MT_DATA);
2726		if (m == NULL) {
2727			DPRINTF(sc, MWL_DEBUG_ANY,
2728			    "%s: no rx mbuf\n", __func__);
2729			sc->sc_stats.mst_rx_nombuf++;
2730			goto rx_next;
2731		}
2732		/*
2733		 * Acquire the replacement dma buffer before
2734		 * processing the frame.  If we're out of dma
2735		 * buffers we disable rx interrupts and wait
2736		 * for the free pool to reach mlw_rxdmalow buffers
2737		 * before starting to do work again.  If the firmware
2738		 * runs out of descriptors then it will toss frames
2739		 * which is better than our doing it as that can
2740		 * starve our processing.  It is also important that
2741		 * we always process rx'd frames in case they are
2742		 * A-MPDU as otherwise the host's view of the BA
2743		 * window may get out of sync with the firmware.
2744		 */
2745		newdata = mwl_getrxdma(sc);
2746		if (newdata == NULL) {
2747			/* NB: stat+msg in mwl_getrxdma */
2748			m_free(m);
2749			/* disable RX interrupt and mark state */
2750			mwl_hal_intrset(sc->sc_mh,
2751			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2752			sc->sc_rxblocked = 1;
2753			ieee80211_drain(ic);
2754			/* XXX check rxblocked and immediately start again? */
2755			goto rx_stop;
2756		}
2757		bf->bf_data = newdata;
2758		/*
2759		 * Attach the dma buffer to the mbuf;
2760		 * mwl_rxbuf_init will re-setup the rx
2761		 * descriptor using the replacement dma
2762		 * buffer we just installed above.
2763		 */
2764		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2765		    data, sc, 0, EXT_NET_DRV);
2766		m->m_data += off - hdrlen;
2767		m->m_pkthdr.len = m->m_len = pktlen;
2768		/* NB: dma buffer assumed read-only */
2769
2770		/*
2771		 * Piece 802.11 header together.
2772		 */
2773		wh = mtod(m, struct ieee80211_qosframe *);
2774		/* NB: don't need to do this sometimes but ... */
2775		/* XXX special case so we can memcpy after m_devget? */
2776		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2777		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2778			if (IEEE80211_DIR_DSTODS(wh)) {
2779				wh4 = mtod(m,
2780				    struct ieee80211_qosframe_addr4*);
2781				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2782			} else {
2783				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2784			}
2785		}
2786		/*
2787		 * The f/w strips WEP header but doesn't clear
2788		 * the WEP bit; mark the packet with M_WEP so
2789		 * net80211 will treat the data as decrypted.
2790		 * While here also clear the PWR_MGT bit since
2791		 * power save is handled by the firmware and
2792		 * passing this up will potentially cause the
2793		 * upper layer to put a station in power save
2794		 * (except when configured with MWL_HOST_PS_SUPPORT).
2795		 */
2796		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2797			m->m_flags |= M_WEP;
2798#ifdef MWL_HOST_PS_SUPPORT
2799		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2800#else
2801		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2802		    IEEE80211_FC1_PWR_MGT);
2803#endif
2804
2805		if (ieee80211_radiotap_active(ic)) {
2806			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2807
2808			tap->wr_flags = 0;
2809			tap->wr_rate = ds->Rate;
2810			tap->wr_antsignal = rssi + nf;
2811			tap->wr_antnoise = nf;
2812		}
2813		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2814			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2815			    len, ds->Rate, rssi);
2816		}
2817		/* dispatch */
2818		ni = ieee80211_find_rxnode(ic,
2819		    (const struct ieee80211_frame_min *) wh);
2820		if (ni != NULL) {
2821			mn = MWL_NODE(ni);
2822#ifdef MWL_ANT_INFO_SUPPORT
2823			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2824			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2825			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2826			mn->mn_ai.rsvd1 = rssi;
2827#endif
2828			/* tag AMPDU aggregates for reorder processing */
2829			if (ni->ni_flags & IEEE80211_NODE_HT)
2830				m->m_flags |= M_AMPDU;
2831			(void) ieee80211_input(ni, m, rssi, nf);
2832			ieee80211_free_node(ni);
2833		} else
2834			(void) ieee80211_input_all(ic, m, rssi, nf);
2835rx_next:
2836		/* NB: ignore ENOMEM so we process more descriptors */
2837		(void) mwl_rxbuf_init(sc, bf);
2838		bf = STAILQ_NEXT(bf, bf_list);
2839	}
2840rx_stop:
2841	sc->sc_rxnext = bf;
2842
2843	if (mbufq_first(&sc->sc_snd) != NULL) {
2844		/* NB: kick fw; the tx thread may have been preempted */
2845		mwl_hal_txstart(sc->sc_mh, 0);
2846		mwl_start(sc);
2847	}
2848#undef IEEE80211_DIR_DSTODS
2849}
2850
2851static void
2852mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2853{
2854	struct mwl_txbuf *bf, *bn;
2855	struct mwl_txdesc *ds;
2856
2857	MWL_TXQ_LOCK_INIT(sc, txq);
2858	txq->qnum = qnum;
2859	txq->txpri = 0;	/* XXX */
2860#if 0
2861	/* NB: q setup by mwl_txdma_setup XXX */
2862	STAILQ_INIT(&txq->free);
2863#endif
2864	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2865		bf->bf_txq = txq;
2866
2867		ds = bf->bf_desc;
2868		bn = STAILQ_NEXT(bf, bf_list);
2869		if (bn == NULL)
2870			bn = STAILQ_FIRST(&txq->free);
2871		ds->pPhysNext = htole32(bn->bf_daddr);
2872	}
2873	STAILQ_INIT(&txq->active);
2874}
2875
2876/*
2877 * Setup a hardware data transmit queue for the specified
2878 * access control.  We record the mapping from ac's
2879 * to h/w queues for use by mwl_tx_start.
2880 */
2881static int
2882mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2883{
2884#define	N(a)	(sizeof(a)/sizeof(a[0]))
2885	struct mwl_txq *txq;
2886
2887	if (ac >= N(sc->sc_ac2q)) {
2888		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2889			ac, N(sc->sc_ac2q));
2890		return 0;
2891	}
2892	if (mvtype >= MWL_NUM_TX_QUEUES) {
2893		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2894			mvtype, MWL_NUM_TX_QUEUES);
2895		return 0;
2896	}
2897	txq = &sc->sc_txq[mvtype];
2898	mwl_txq_init(sc, txq, mvtype);
2899	sc->sc_ac2q[ac] = txq;
2900	return 1;
2901#undef N
2902}
2903
2904/*
2905 * Update WME parameters for a transmit queue.
2906 */
2907static int
2908mwl_txq_update(struct mwl_softc *sc, int ac)
2909{
2910#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2911	struct ieee80211com *ic = &sc->sc_ic;
2912	struct mwl_txq *txq = sc->sc_ac2q[ac];
2913	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2914	struct mwl_hal *mh = sc->sc_mh;
2915	int aifs, cwmin, cwmax, txoplim;
2916
2917	aifs = wmep->wmep_aifsn;
2918	/* XXX in sta mode need to pass log values for cwmin/max */
2919	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2920	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2921	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2922
2923	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2924		device_printf(sc->sc_dev, "unable to update hardware queue "
2925			"parameters for %s traffic!\n",
2926			ieee80211_wme_acnames[ac]);
2927		return 0;
2928	}
2929	return 1;
2930#undef MWL_EXPONENT_TO_VALUE
2931}
2932
2933/*
2934 * Callback from the 802.11 layer to update WME parameters.
2935 */
2936static int
2937mwl_wme_update(struct ieee80211com *ic)
2938{
2939	struct mwl_softc *sc = ic->ic_softc;
2940
2941	return !mwl_txq_update(sc, WME_AC_BE) ||
2942	    !mwl_txq_update(sc, WME_AC_BK) ||
2943	    !mwl_txq_update(sc, WME_AC_VI) ||
2944	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2945}
2946
2947/*
2948 * Reclaim resources for a setup queue.
2949 */
2950static void
2951mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2952{
2953	/* XXX hal work? */
2954	MWL_TXQ_LOCK_DESTROY(txq);
2955}
2956
2957/*
2958 * Reclaim all tx queue resources.
2959 */
2960static void
2961mwl_tx_cleanup(struct mwl_softc *sc)
2962{
2963	int i;
2964
2965	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2966		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2967}
2968
2969static int
2970mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2971{
2972	struct mbuf *m;
2973	int error;
2974
2975	/*
2976	 * Load the DMA map so any coalescing is done.  This
2977	 * also calculates the number of descriptors we need.
2978	 */
2979	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2980				     bf->bf_segs, &bf->bf_nseg,
2981				     BUS_DMA_NOWAIT);
2982	if (error == EFBIG) {
2983		/* XXX packet requires too many descriptors */
2984		bf->bf_nseg = MWL_TXDESC+1;
2985	} else if (error != 0) {
2986		sc->sc_stats.mst_tx_busdma++;
2987		m_freem(m0);
2988		return error;
2989	}
2990	/*
2991	 * Discard null packets and check for packets that
2992	 * require too many TX descriptors.  We try to convert
2993	 * the latter to a cluster.
2994	 */
2995	if (error == EFBIG) {		/* too many desc's, linearize */
2996		sc->sc_stats.mst_tx_linear++;
2997#if MWL_TXDESC > 1
2998		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2999#else
3000		m = m_defrag(m0, M_NOWAIT);
3001#endif
3002		if (m == NULL) {
3003			m_freem(m0);
3004			sc->sc_stats.mst_tx_nombuf++;
3005			return ENOMEM;
3006		}
3007		m0 = m;
3008		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3009					     bf->bf_segs, &bf->bf_nseg,
3010					     BUS_DMA_NOWAIT);
3011		if (error != 0) {
3012			sc->sc_stats.mst_tx_busdma++;
3013			m_freem(m0);
3014			return error;
3015		}
3016		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3017		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3018	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3019		sc->sc_stats.mst_tx_nodata++;
3020		m_freem(m0);
3021		return EIO;
3022	}
3023	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3024		__func__, m0, m0->m_pkthdr.len);
3025	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3026	bf->bf_m = m0;
3027
3028	return 0;
3029}
3030
3031static __inline int
3032mwl_cvtlegacyrate(int rate)
3033{
3034	switch (rate) {
3035	case 2:	 return 0;
3036	case 4:	 return 1;
3037	case 11: return 2;
3038	case 22: return 3;
3039	case 44: return 4;
3040	case 12: return 5;
3041	case 18: return 6;
3042	case 24: return 7;
3043	case 36: return 8;
3044	case 48: return 9;
3045	case 72: return 10;
3046	case 96: return 11;
3047	case 108:return 12;
3048	}
3049	return 0;
3050}
3051
3052/*
3053 * Calculate fixed tx rate information per client state;
3054 * this value is suitable for writing to the Format field
3055 * of a tx descriptor.
3056 */
3057static uint16_t
3058mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3059{
3060	uint16_t fmt;
3061
3062	fmt = SM(3, EAGLE_TXD_ANTENNA)
3063	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3064		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3065	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3066		fmt |= EAGLE_TXD_FORMAT_HT
3067		    /* NB: 0x80 implicitly stripped from ucastrate */
3068		    | SM(rate, EAGLE_TXD_RATE);
3069		/* XXX short/long GI may be wrong; re-check */
3070		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3071			fmt |= EAGLE_TXD_CHW_40
3072			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3073			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3074		} else {
3075			fmt |= EAGLE_TXD_CHW_20
3076			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3077			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3078		}
3079	} else {			/* legacy rate */
3080		fmt |= EAGLE_TXD_FORMAT_LEGACY
3081		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3082		    | EAGLE_TXD_CHW_20
3083		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3084		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3085			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3086	}
3087	return fmt;
3088}
3089
3090static int
3091mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3092    struct mbuf *m0)
3093{
3094#define	IEEE80211_DIR_DSTODS(wh) \
3095	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3096	struct ieee80211com *ic = &sc->sc_ic;
3097	struct ieee80211vap *vap = ni->ni_vap;
3098	int error, iswep, ismcast;
3099	int hdrlen, copyhdrlen, pktlen;
3100	struct mwl_txdesc *ds;
3101	struct mwl_txq *txq;
3102	struct ieee80211_frame *wh;
3103	struct mwltxrec *tr;
3104	struct mwl_node *mn;
3105	uint16_t qos;
3106#if MWL_TXDESC > 1
3107	int i;
3108#endif
3109
3110	wh = mtod(m0, struct ieee80211_frame *);
3111	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3112	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3113	hdrlen = ieee80211_anyhdrsize(wh);
3114	copyhdrlen = hdrlen;
3115	pktlen = m0->m_pkthdr.len;
3116	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3117		if (IEEE80211_DIR_DSTODS(wh)) {
3118			qos = *(uint16_t *)
3119			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3120			copyhdrlen -= sizeof(qos);
3121		} else
3122			qos = *(uint16_t *)
3123			    (((struct ieee80211_qosframe *) wh)->i_qos);
3124	} else
3125		qos = 0;
3126
3127	if (iswep) {
3128		const struct ieee80211_cipher *cip;
3129		struct ieee80211_key *k;
3130
3131		/*
3132		 * Construct the 802.11 header+trailer for an encrypted
3133		 * frame. The only reason this can fail is because of an
3134		 * unknown or unsupported cipher/key type.
3135		 *
3136		 * NB: we do this even though the firmware will ignore
3137		 *     what we've done for WEP and TKIP as we need the
3138		 *     ExtIV filled in for CCMP and this also adjusts
3139		 *     the headers which simplifies our work below.
3140		 */
3141		k = ieee80211_crypto_encap(ni, m0);
3142		if (k == NULL) {
3143			/*
3144			 * This can happen when the key is yanked after the
3145			 * frame was queued.  Just discard the frame; the
3146			 * 802.11 layer counts failures and provides
3147			 * debugging/diagnostics.
3148			 */
3149			m_freem(m0);
3150			return EIO;
3151		}
3152		/*
3153		 * Adjust the packet length for the crypto additions
3154		 * done during encap and any other bits that the f/w
3155		 * will add later on.
3156		 */
3157		cip = k->wk_cipher;
3158		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3159
3160		/* packet header may have moved, reset our local pointer */
3161		wh = mtod(m0, struct ieee80211_frame *);
3162	}
3163
3164	if (ieee80211_radiotap_active_vap(vap)) {
3165		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3166		if (iswep)
3167			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3168#if 0
3169		sc->sc_tx_th.wt_rate = ds->DataRate;
3170#endif
3171		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3172		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3173
3174		ieee80211_radiotap_tx(vap, m0);
3175	}
3176	/*
3177	 * Copy up/down the 802.11 header; the firmware requires
3178	 * we present a 2-byte payload length followed by a
3179	 * 4-address header (w/o QoS), followed (optionally) by
3180	 * any WEP/ExtIV header (but only filled in for CCMP).
3181	 * We are assured the mbuf has sufficient headroom to
3182	 * prepend in-place by the setup of ic_headroom in
3183	 * mwl_attach.
3184	 */
3185	if (hdrlen < sizeof(struct mwltxrec)) {
3186		const int space = sizeof(struct mwltxrec) - hdrlen;
3187		if (M_LEADINGSPACE(m0) < space) {
3188			/* NB: should never happen */
3189			device_printf(sc->sc_dev,
3190			    "not enough headroom, need %d found %zd, "
3191			    "m_flags 0x%x m_len %d\n",
3192			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3193			ieee80211_dump_pkt(ic,
3194			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3195			m_freem(m0);
3196			sc->sc_stats.mst_tx_noheadroom++;
3197			return EIO;
3198		}
3199		M_PREPEND(m0, space, M_NOWAIT);
3200	}
3201	tr = mtod(m0, struct mwltxrec *);
3202	if (wh != (struct ieee80211_frame *) &tr->wh)
3203		ovbcopy(wh, &tr->wh, hdrlen);
3204	/*
3205	 * Note: the "firmware length" is actually the length
3206	 * of the fully formed "802.11 payload".  That is, it's
3207	 * everything except for the 802.11 header.  In particular
3208	 * this includes all crypto material including the MIC!
3209	 */
3210	tr->fwlen = htole16(pktlen - hdrlen);
3211
3212	/*
3213	 * Load the DMA map so any coalescing is done.  This
3214	 * also calculates the number of descriptors we need.
3215	 */
3216	error = mwl_tx_dmasetup(sc, bf, m0);
3217	if (error != 0) {
3218		/* NB: stat collected in mwl_tx_dmasetup */
3219		DPRINTF(sc, MWL_DEBUG_XMIT,
3220		    "%s: unable to setup dma\n", __func__);
3221		return error;
3222	}
3223	bf->bf_node = ni;			/* NB: held reference */
3224	m0 = bf->bf_m;				/* NB: may have changed */
3225	tr = mtod(m0, struct mwltxrec *);
3226	wh = (struct ieee80211_frame *)&tr->wh;
3227
3228	/*
3229	 * Formulate tx descriptor.
3230	 */
3231	ds = bf->bf_desc;
3232	txq = bf->bf_txq;
3233
3234	ds->QosCtrl = qos;			/* NB: already little-endian */
3235#if MWL_TXDESC == 1
3236	/*
3237	 * NB: multiframes should be zero because the descriptors
3238	 *     are initialized to zero.  This should handle the case
3239	 *     where the driver is built with MWL_TXDESC=1 but we are
3240	 *     using firmware with multi-segment support.
3241	 */
3242	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3243	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3244#else
3245	ds->multiframes = htole32(bf->bf_nseg);
3246	ds->PktLen = htole16(m0->m_pkthdr.len);
3247	for (i = 0; i < bf->bf_nseg; i++) {
3248		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3249		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3250	}
3251#endif
3252	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3253	ds->Format = 0;
3254	ds->pad = 0;
3255	ds->ack_wcb_addr = 0;
3256
3257	mn = MWL_NODE(ni);
3258	/*
3259	 * Select transmit rate.
3260	 */
3261	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3262	case IEEE80211_FC0_TYPE_MGT:
3263		sc->sc_stats.mst_tx_mgmt++;
3264		/* fall thru... */
3265	case IEEE80211_FC0_TYPE_CTL:
3266		/* NB: assign to BE q to avoid bursting */
3267		ds->TxPriority = MWL_WME_AC_BE;
3268		break;
3269	case IEEE80211_FC0_TYPE_DATA:
3270		if (!ismcast) {
3271			const struct ieee80211_txparam *tp = ni->ni_txparms;
3272			/*
3273			 * EAPOL frames get forced to a fixed rate and w/o
3274			 * aggregation; otherwise check for any fixed rate
3275			 * for the client (may depend on association state).
3276			 */
3277			if (m0->m_flags & M_EAPOL) {
3278				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3279				ds->Format = mvp->mv_eapolformat;
3280				ds->pad = htole16(
3281				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3282			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3283				/* XXX pre-calculate per node */
3284				ds->Format = htole16(
3285				    mwl_calcformat(tp->ucastrate, ni));
3286				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3287			}
3288			/* NB: EAPOL frames will never have qos set */
3289			if (qos == 0)
3290				ds->TxPriority = txq->qnum;
3291#if MWL_MAXBA > 3
3292			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3293				ds->TxPriority = mn->mn_ba[3].txq;
3294#endif
3295#if MWL_MAXBA > 2
3296			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3297				ds->TxPriority = mn->mn_ba[2].txq;
3298#endif
3299#if MWL_MAXBA > 1
3300			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3301				ds->TxPriority = mn->mn_ba[1].txq;
3302#endif
3303#if MWL_MAXBA > 0
3304			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3305				ds->TxPriority = mn->mn_ba[0].txq;
3306#endif
3307			else
3308				ds->TxPriority = txq->qnum;
3309		} else
3310			ds->TxPriority = txq->qnum;
3311		break;
3312	default:
3313		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3314			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3315		sc->sc_stats.mst_tx_badframetype++;
3316		m_freem(m0);
3317		return EIO;
3318	}
3319
3320	if (IFF_DUMPPKTS_XMIT(sc))
3321		ieee80211_dump_pkt(ic,
3322		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3323		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3324
3325	MWL_TXQ_LOCK(txq);
3326	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3327	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3328	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3329
3330	sc->sc_tx_timer = 5;
3331	MWL_TXQ_UNLOCK(txq);
3332
3333	return 0;
3334#undef	IEEE80211_DIR_DSTODS
3335}
3336
3337static __inline int
3338mwl_cvtlegacyrix(int rix)
3339{
3340#define	N(x)	(sizeof(x)/sizeof(x[0]))
3341	static const int ieeerates[] =
3342	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3343	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3344#undef N
3345}
3346
3347/*
3348 * Process completed xmit descriptors from the specified queue.
3349 */
3350static int
3351mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3352{
3353#define	EAGLE_TXD_STATUS_MCAST \
3354	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3355	struct ieee80211com *ic = &sc->sc_ic;
3356	struct mwl_txbuf *bf;
3357	struct mwl_txdesc *ds;
3358	struct ieee80211_node *ni;
3359	struct mwl_node *an;
3360	int nreaped;
3361	uint32_t status;
3362
3363	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3364	for (nreaped = 0;; nreaped++) {
3365		MWL_TXQ_LOCK(txq);
3366		bf = STAILQ_FIRST(&txq->active);
3367		if (bf == NULL) {
3368			MWL_TXQ_UNLOCK(txq);
3369			break;
3370		}
3371		ds = bf->bf_desc;
3372		MWL_TXDESC_SYNC(txq, ds,
3373		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3374		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3375			MWL_TXQ_UNLOCK(txq);
3376			break;
3377		}
3378		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3379		MWL_TXQ_UNLOCK(txq);
3380
3381#ifdef MWL_DEBUG
3382		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3383			mwl_printtxbuf(bf, txq->qnum, nreaped);
3384#endif
3385		ni = bf->bf_node;
3386		if (ni != NULL) {
3387			an = MWL_NODE(ni);
3388			status = le32toh(ds->Status);
3389			if (status & EAGLE_TXD_STATUS_OK) {
3390				uint16_t Format = le16toh(ds->Format);
3391				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3392
3393				sc->sc_stats.mst_ant_tx[txant]++;
3394				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3395					sc->sc_stats.mst_tx_retries++;
3396				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3397					sc->sc_stats.mst_tx_mretries++;
3398				if (txq->qnum >= MWL_WME_AC_VO)
3399					ic->ic_wme.wme_hipri_traffic++;
3400				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3401				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3402					ni->ni_txrate = mwl_cvtlegacyrix(
3403					    ni->ni_txrate);
3404				} else
3405					ni->ni_txrate |= IEEE80211_RATE_MCS;
3406				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3407			} else {
3408				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3409					sc->sc_stats.mst_tx_linkerror++;
3410				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3411					sc->sc_stats.mst_tx_xretries++;
3412				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3413					sc->sc_stats.mst_tx_aging++;
3414				if (bf->bf_m->m_flags & M_FF)
3415					sc->sc_stats.mst_ff_txerr++;
3416			}
3417			if (bf->bf_m->m_flags & M_TXCB)
3418				/* XXX strip fw len in case header inspected */
3419				m_adj(bf->bf_m, sizeof(uint16_t));
3420			ieee80211_tx_complete(ni, bf->bf_m,
3421			    (status & EAGLE_TXD_STATUS_OK) == 0);
3422		} else
3423			m_freem(bf->bf_m);
3424		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3425
3426		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3427		    BUS_DMASYNC_POSTWRITE);
3428		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3429
3430		mwl_puttxbuf_tail(txq, bf);
3431	}
3432	return nreaped;
3433#undef EAGLE_TXD_STATUS_MCAST
3434}
3435
3436/*
3437 * Deferred processing of transmit interrupt; special-cased
3438 * for four hardware queues, 0-3.
3439 */
3440static void
3441mwl_tx_proc(void *arg, int npending)
3442{
3443	struct mwl_softc *sc = arg;
3444	int nreaped;
3445
3446	/*
3447	 * Process each active queue.
3448	 */
3449	nreaped = 0;
3450	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3451		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3452	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3453		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3454	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3455		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3456	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3457		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3458
3459	if (nreaped != 0) {
3460		sc->sc_tx_timer = 0;
3461		if (mbufq_first(&sc->sc_snd) != NULL) {
3462			/* NB: kick fw; the tx thread may have been preempted */
3463			mwl_hal_txstart(sc->sc_mh, 0);
3464			mwl_start(sc);
3465		}
3466	}
3467}
3468
3469static void
3470mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3471{
3472	struct ieee80211_node *ni;
3473	struct mwl_txbuf *bf;
3474	u_int ix;
3475
3476	/*
3477	 * NB: this assumes output has been stopped and
3478	 *     we do not need to block mwl_tx_tasklet
3479	 */
3480	for (ix = 0;; ix++) {
3481		MWL_TXQ_LOCK(txq);
3482		bf = STAILQ_FIRST(&txq->active);
3483		if (bf == NULL) {
3484			MWL_TXQ_UNLOCK(txq);
3485			break;
3486		}
3487		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3488		MWL_TXQ_UNLOCK(txq);
3489#ifdef MWL_DEBUG
3490		if (sc->sc_debug & MWL_DEBUG_RESET) {
3491			struct ieee80211com *ic = &sc->sc_ic;
3492			const struct mwltxrec *tr =
3493			    mtod(bf->bf_m, const struct mwltxrec *);
3494			mwl_printtxbuf(bf, txq->qnum, ix);
3495			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3496				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3497		}
3498#endif /* MWL_DEBUG */
3499		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3500		ni = bf->bf_node;
3501		if (ni != NULL) {
3502			/*
3503			 * Reclaim node reference.
3504			 */
3505			ieee80211_free_node(ni);
3506		}
3507		m_freem(bf->bf_m);
3508
3509		mwl_puttxbuf_tail(txq, bf);
3510	}
3511}
3512
3513/*
3514 * Drain the transmit queues and reclaim resources.
3515 */
3516static void
3517mwl_draintxq(struct mwl_softc *sc)
3518{
3519	int i;
3520
3521	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3522		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3523	sc->sc_tx_timer = 0;
3524}
3525
3526#ifdef MWL_DIAGAPI
3527/*
3528 * Reset the transmit queues to a pristine state after a fw download.
3529 */
3530static void
3531mwl_resettxq(struct mwl_softc *sc)
3532{
3533	int i;
3534
3535	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3536		mwl_txq_reset(sc, &sc->sc_txq[i]);
3537}
3538#endif /* MWL_DIAGAPI */
3539
3540/*
3541 * Clear the transmit queues of any frames submitted for the
3542 * specified vap.  This is done when the vap is deleted so we
3543 * don't potentially reference the vap after it is gone.
3544 * Note we cannot remove the frames; we only reclaim the node
3545 * reference.
3546 */
3547static void
3548mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3549{
3550	struct mwl_txq *txq;
3551	struct mwl_txbuf *bf;
3552	int i;
3553
3554	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3555		txq = &sc->sc_txq[i];
3556		MWL_TXQ_LOCK(txq);
3557		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3558			struct ieee80211_node *ni = bf->bf_node;
3559			if (ni != NULL && ni->ni_vap == vap) {
3560				bf->bf_node = NULL;
3561				ieee80211_free_node(ni);
3562			}
3563		}
3564		MWL_TXQ_UNLOCK(txq);
3565	}
3566}
3567
3568static int
3569mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3570	const uint8_t *frm, const uint8_t *efrm)
3571{
3572	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3573	const struct ieee80211_action *ia;
3574
3575	ia = (const struct ieee80211_action *) frm;
3576	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3577	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3578		const struct ieee80211_action_ht_mimopowersave *mps =
3579		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3580
3581		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3582		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3583		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3584		return 0;
3585	} else
3586		return sc->sc_recv_action(ni, wh, frm, efrm);
3587}
3588
3589static int
3590mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3591	int dialogtoken, int baparamset, int batimeout)
3592{
3593	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3594	struct ieee80211vap *vap = ni->ni_vap;
3595	struct mwl_node *mn = MWL_NODE(ni);
3596	struct mwl_bastate *bas;
3597
3598	bas = tap->txa_private;
3599	if (bas == NULL) {
3600		const MWL_HAL_BASTREAM *sp;
3601		/*
3602		 * Check for a free BA stream slot.
3603		 */
3604#if MWL_MAXBA > 3
3605		if (mn->mn_ba[3].bastream == NULL)
3606			bas = &mn->mn_ba[3];
3607		else
3608#endif
3609#if MWL_MAXBA > 2
3610		if (mn->mn_ba[2].bastream == NULL)
3611			bas = &mn->mn_ba[2];
3612		else
3613#endif
3614#if MWL_MAXBA > 1
3615		if (mn->mn_ba[1].bastream == NULL)
3616			bas = &mn->mn_ba[1];
3617		else
3618#endif
3619#if MWL_MAXBA > 0
3620		if (mn->mn_ba[0].bastream == NULL)
3621			bas = &mn->mn_ba[0];
3622		else
3623#endif
3624		{
3625			/* sta already has max BA streams */
3626			/* XXX assign BA stream to highest priority tid */
3627			DPRINTF(sc, MWL_DEBUG_AMPDU,
3628			    "%s: already has max bastreams\n", __func__);
3629			sc->sc_stats.mst_ampdu_reject++;
3630			return 0;
3631		}
3632		/* NB: no held reference to ni */
3633		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3634		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3635		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3636		    ni, tap);
3637		if (sp == NULL) {
3638			/*
3639			 * No available stream, return 0 so no
3640			 * a-mpdu aggregation will be done.
3641			 */
3642			DPRINTF(sc, MWL_DEBUG_AMPDU,
3643			    "%s: no bastream available\n", __func__);
3644			sc->sc_stats.mst_ampdu_nostream++;
3645			return 0;
3646		}
3647		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3648		    __func__, sp);
3649		/* NB: qos is left zero so we won't match in mwl_tx_start */
3650		bas->bastream = sp;
3651		tap->txa_private = bas;
3652	}
3653	/* fetch current seq# from the firmware; if available */
3654	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3655	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3656	    &tap->txa_start) != 0)
3657		tap->txa_start = 0;
3658	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3659}
3660
3661static int
3662mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3663	int code, int baparamset, int batimeout)
3664{
3665	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3666	struct mwl_bastate *bas;
3667
3668	bas = tap->txa_private;
3669	if (bas == NULL) {
3670		/* XXX should not happen */
3671		DPRINTF(sc, MWL_DEBUG_AMPDU,
3672		    "%s: no BA stream allocated, TID %d\n",
3673		    __func__, tap->txa_tid);
3674		sc->sc_stats.mst_addba_nostream++;
3675		return 0;
3676	}
3677	if (code == IEEE80211_STATUS_SUCCESS) {
3678		struct ieee80211vap *vap = ni->ni_vap;
3679		int bufsiz, error;
3680
3681		/*
3682		 * Tell the firmware to setup the BA stream;
3683		 * we know resources are available because we
3684		 * pre-allocated one before forming the request.
3685		 */
3686		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3687		if (bufsiz == 0)
3688			bufsiz = IEEE80211_AGGR_BAWMAX;
3689		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3690		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3691		if (error != 0) {
3692			/*
3693			 * Setup failed, return immediately so no a-mpdu
3694			 * aggregation will be done.
3695			 */
3696			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3697			mwl_bastream_free(bas);
3698			tap->txa_private = NULL;
3699
3700			DPRINTF(sc, MWL_DEBUG_AMPDU,
3701			    "%s: create failed, error %d, bufsiz %d TID %d "
3702			    "htparam 0x%x\n", __func__, error, bufsiz,
3703			    tap->txa_tid, ni->ni_htparam);
3704			sc->sc_stats.mst_bacreate_failed++;
3705			return 0;
3706		}
3707		/* NB: cache txq to avoid ptr indirect */
3708		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3709		DPRINTF(sc, MWL_DEBUG_AMPDU,
3710		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3711		    "htparam 0x%x\n", __func__, bas->bastream,
3712		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3713	} else {
3714		/*
3715		 * Other side NAK'd us; return the resources.
3716		 */
3717		DPRINTF(sc, MWL_DEBUG_AMPDU,
3718		    "%s: request failed with code %d, destroy bastream %p\n",
3719		    __func__, code, bas->bastream);
3720		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3721		mwl_bastream_free(bas);
3722		tap->txa_private = NULL;
3723	}
3724	/* NB: firmware sends BAR so we don't need to */
3725	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3726}
3727
3728static void
3729mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3730{
3731	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3732	struct mwl_bastate *bas;
3733
3734	bas = tap->txa_private;
3735	if (bas != NULL) {
3736		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3737		    __func__, bas->bastream);
3738		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3739		mwl_bastream_free(bas);
3740		tap->txa_private = NULL;
3741	}
3742	sc->sc_addba_stop(ni, tap);
3743}
3744
3745/*
3746 * Setup the rx data structures.  This should only be
3747 * done once or we may get out of sync with the firmware.
3748 */
3749static int
3750mwl_startrecv(struct mwl_softc *sc)
3751{
3752	if (!sc->sc_recvsetup) {
3753		struct mwl_rxbuf *bf, *prev;
3754		struct mwl_rxdesc *ds;
3755
3756		prev = NULL;
3757		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3758			int error = mwl_rxbuf_init(sc, bf);
3759			if (error != 0) {
3760				DPRINTF(sc, MWL_DEBUG_RECV,
3761					"%s: mwl_rxbuf_init failed %d\n",
3762					__func__, error);
3763				return error;
3764			}
3765			if (prev != NULL) {
3766				ds = prev->bf_desc;
3767				ds->pPhysNext = htole32(bf->bf_daddr);
3768			}
3769			prev = bf;
3770		}
3771		if (prev != NULL) {
3772			ds = prev->bf_desc;
3773			ds->pPhysNext =
3774			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3775		}
3776		sc->sc_recvsetup = 1;
3777	}
3778	mwl_mode_init(sc);		/* set filters, etc. */
3779	return 0;
3780}
3781
3782static MWL_HAL_APMODE
3783mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3784{
3785	MWL_HAL_APMODE mode;
3786
3787	if (IEEE80211_IS_CHAN_HT(chan)) {
3788		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3789			mode = AP_MODE_N_ONLY;
3790		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3791			mode = AP_MODE_AandN;
3792		else if (vap->iv_flags & IEEE80211_F_PUREG)
3793			mode = AP_MODE_GandN;
3794		else
3795			mode = AP_MODE_BandGandN;
3796	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3797		if (vap->iv_flags & IEEE80211_F_PUREG)
3798			mode = AP_MODE_G_ONLY;
3799		else
3800			mode = AP_MODE_MIXED;
3801	} else if (IEEE80211_IS_CHAN_B(chan))
3802		mode = AP_MODE_B_ONLY;
3803	else if (IEEE80211_IS_CHAN_A(chan))
3804		mode = AP_MODE_A_ONLY;
3805	else
3806		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3807	return mode;
3808}
3809
3810static int
3811mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3812{
3813	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3814	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3815}
3816
3817/*
3818 * Set/change channels.
3819 */
3820static int
3821mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3822{
3823	struct mwl_hal *mh = sc->sc_mh;
3824	struct ieee80211com *ic = &sc->sc_ic;
3825	MWL_HAL_CHANNEL hchan;
3826	int maxtxpow;
3827
3828	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3829	    __func__, chan->ic_freq, chan->ic_flags);
3830
3831	/*
3832	 * Convert to a HAL channel description with
3833	 * the flags constrained to reflect the current
3834	 * operating mode.
3835	 */
3836	mwl_mapchan(&hchan, chan);
3837	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3838#if 0
3839	mwl_draintxq(sc);		/* clear pending tx frames */
3840#endif
3841	mwl_hal_setchannel(mh, &hchan);
3842	/*
3843	 * Tx power is cap'd by the regulatory setting and
3844	 * possibly a user-set limit.  We pass the min of
3845	 * these to the hal to apply them to the cal data
3846	 * for this channel.
3847	 * XXX min bound?
3848	 */
3849	maxtxpow = 2*chan->ic_maxregpower;
3850	if (maxtxpow > ic->ic_txpowlimit)
3851		maxtxpow = ic->ic_txpowlimit;
3852	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3853	/* NB: potentially change mcast/mgt rates */
3854	mwl_setcurchanrates(sc);
3855
3856	/*
3857	 * Update internal state.
3858	 */
3859	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3860	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3861	if (IEEE80211_IS_CHAN_A(chan)) {
3862		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3863		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3864	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3865		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3866		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3867	} else {
3868		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3869		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3870	}
3871	sc->sc_curchan = hchan;
3872	mwl_hal_intrset(mh, sc->sc_imask);
3873
3874	return 0;
3875}
3876
3877static void
3878mwl_scan_start(struct ieee80211com *ic)
3879{
3880	struct mwl_softc *sc = ic->ic_softc;
3881
3882	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3883}
3884
3885static void
3886mwl_scan_end(struct ieee80211com *ic)
3887{
3888	struct mwl_softc *sc = ic->ic_softc;
3889
3890	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3891}
3892
3893static void
3894mwl_set_channel(struct ieee80211com *ic)
3895{
3896	struct mwl_softc *sc = ic->ic_softc;
3897
3898	(void) mwl_chan_set(sc, ic->ic_curchan);
3899}
3900
3901/*
3902 * Handle a channel switch request.  We inform the firmware
3903 * and mark the global state to suppress various actions.
3904 * NB: we issue only one request to the fw; we may be called
3905 * multiple times if there are multiple vap's.
3906 */
3907static void
3908mwl_startcsa(struct ieee80211vap *vap)
3909{
3910	struct ieee80211com *ic = vap->iv_ic;
3911	struct mwl_softc *sc = ic->ic_softc;
3912	MWL_HAL_CHANNEL hchan;
3913
3914	if (sc->sc_csapending)
3915		return;
3916
3917	mwl_mapchan(&hchan, ic->ic_csa_newchan);
3918	/* 1 =>'s quiet channel */
3919	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3920	sc->sc_csapending = 1;
3921}
3922
3923/*
3924 * Plumb any static WEP key for the station.  This is
3925 * necessary as we must propagate the key from the
3926 * global key table of the vap to each sta db entry.
3927 */
3928static void
3929mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3930{
3931	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3932		IEEE80211_F_PRIVACY &&
3933	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3934	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3935		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
3936}
3937
3938static int
3939mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3940{
3941#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
3942	struct ieee80211vap *vap = ni->ni_vap;
3943	struct mwl_hal_vap *hvap;
3944	int error;
3945
3946	if (vap->iv_opmode == IEEE80211_M_WDS) {
3947		/*
3948		 * WDS vap's do not have a f/w vap; instead they piggyback
3949		 * on an AP vap and we must install the sta db entry and
3950		 * crypto state using that AP's handle (the WDS vap has none).
3951		 */
3952		hvap = MWL_VAP(vap)->mv_ap_hvap;
3953	} else
3954		hvap = MWL_VAP(vap)->mv_hvap;
3955	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3956	    aid, staid, pi,
3957	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3958	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3959	if (error == 0) {
3960		/*
3961		 * Setup security for this station.  For sta mode this is
3962		 * needed even though do the same thing on transition to
3963		 * AUTH state because the call to mwl_hal_newstation
3964		 * clobbers the crypto state we setup.
3965		 */
3966		mwl_setanywepkey(vap, ni->ni_macaddr);
3967	}
3968	return error;
3969#undef WME
3970}
3971
3972static void
3973mwl_setglobalkeys(struct ieee80211vap *vap)
3974{
3975	struct ieee80211_key *wk;
3976
3977	wk = &vap->iv_nw_keys[0];
3978	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3979		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3980			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
3981}
3982
3983/*
3984 * Convert a legacy rate set to a firmware bitmask.
3985 */
3986static uint32_t
3987get_rate_bitmap(const struct ieee80211_rateset *rs)
3988{
3989	uint32_t rates;
3990	int i;
3991
3992	rates = 0;
3993	for (i = 0; i < rs->rs_nrates; i++)
3994		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3995		case 2:	  rates |= 0x001; break;
3996		case 4:	  rates |= 0x002; break;
3997		case 11:  rates |= 0x004; break;
3998		case 22:  rates |= 0x008; break;
3999		case 44:  rates |= 0x010; break;
4000		case 12:  rates |= 0x020; break;
4001		case 18:  rates |= 0x040; break;
4002		case 24:  rates |= 0x080; break;
4003		case 36:  rates |= 0x100; break;
4004		case 48:  rates |= 0x200; break;
4005		case 72:  rates |= 0x400; break;
4006		case 96:  rates |= 0x800; break;
4007		case 108: rates |= 0x1000; break;
4008		}
4009	return rates;
4010}
4011
4012/*
4013 * Construct an HT firmware bitmask from an HT rate set.
4014 */
4015static uint32_t
4016get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4017{
4018	uint32_t rates;
4019	int i;
4020
4021	rates = 0;
4022	for (i = 0; i < rs->rs_nrates; i++) {
4023		if (rs->rs_rates[i] < 16)
4024			rates |= 1<<rs->rs_rates[i];
4025	}
4026	return rates;
4027}
4028
4029/*
4030 * Craft station database entry for station.
4031 * NB: use host byte order here, the hal handles byte swapping.
4032 */
4033static MWL_HAL_PEERINFO *
4034mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4035{
4036	const struct ieee80211vap *vap = ni->ni_vap;
4037
4038	memset(pi, 0, sizeof(*pi));
4039	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4040	pi->CapInfo = ni->ni_capinfo;
4041	if (ni->ni_flags & IEEE80211_NODE_HT) {
4042		/* HT capabilities, etc */
4043		pi->HTCapabilitiesInfo = ni->ni_htcap;
4044		/* XXX pi.HTCapabilitiesInfo */
4045	        pi->MacHTParamInfo = ni->ni_htparam;
4046		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4047		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4048		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4049		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4050		pi->AddHtInfo.stbc = ni->ni_htstbc;
4051
4052		/* constrain according to local configuration */
4053		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4054			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4055		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4056			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4057		if (ni->ni_chw != 40)
4058			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4059	}
4060	return pi;
4061}
4062
4063/*
4064 * Re-create the local sta db entry for a vap to ensure
4065 * up to date WME state is pushed to the firmware.  Because
4066 * this resets crypto state this must be followed by a
4067 * reload of any keys in the global key table.
4068 */
4069static int
4070mwl_localstadb(struct ieee80211vap *vap)
4071{
4072#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4073	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4074	struct ieee80211_node *bss;
4075	MWL_HAL_PEERINFO pi;
4076	int error;
4077
4078	switch (vap->iv_opmode) {
4079	case IEEE80211_M_STA:
4080		bss = vap->iv_bss;
4081		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4082		    vap->iv_state == IEEE80211_S_RUN ?
4083			mkpeerinfo(&pi, bss) : NULL,
4084		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4085		    bss->ni_ies.wme_ie != NULL ?
4086			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4087		if (error == 0)
4088			mwl_setglobalkeys(vap);
4089		break;
4090	case IEEE80211_M_HOSTAP:
4091	case IEEE80211_M_MBSS:
4092		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4093		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4094		if (error == 0)
4095			mwl_setglobalkeys(vap);
4096		break;
4097	default:
4098		error = 0;
4099		break;
4100	}
4101	return error;
4102#undef WME
4103}
4104
4105static int
4106mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4107{
4108	struct mwl_vap *mvp = MWL_VAP(vap);
4109	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4110	struct ieee80211com *ic = vap->iv_ic;
4111	struct ieee80211_node *ni = NULL;
4112	struct mwl_softc *sc = ic->ic_softc;
4113	struct mwl_hal *mh = sc->sc_mh;
4114	enum ieee80211_state ostate = vap->iv_state;
4115	int error;
4116
4117	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4118	    vap->iv_ifp->if_xname, __func__,
4119	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4120
4121	callout_stop(&sc->sc_timer);
4122	/*
4123	 * Clear current radar detection state.
4124	 */
4125	if (ostate == IEEE80211_S_CAC) {
4126		/* stop quiet mode radar detection */
4127		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4128	} else if (sc->sc_radarena) {
4129		/* stop in-service radar detection */
4130		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4131		sc->sc_radarena = 0;
4132	}
4133	/*
4134	 * Carry out per-state actions before doing net80211 work.
4135	 */
4136	if (nstate == IEEE80211_S_INIT) {
4137		/* NB: only ap+sta vap's have a fw entity */
4138		if (hvap != NULL)
4139			mwl_hal_stop(hvap);
4140	} else if (nstate == IEEE80211_S_SCAN) {
4141		mwl_hal_start(hvap);
4142		/* NB: this disables beacon frames */
4143		mwl_hal_setinframode(hvap);
4144	} else if (nstate == IEEE80211_S_AUTH) {
4145		/*
4146		 * Must create a sta db entry in case a WEP key needs to
4147		 * be plumbed.  This entry will be overwritten if we
4148		 * associate; otherwise it will be reclaimed on node free.
4149		 */
4150		ni = vap->iv_bss;
4151		MWL_NODE(ni)->mn_hvap = hvap;
4152		(void) mwl_peerstadb(ni, 0, 0, NULL);
4153	} else if (nstate == IEEE80211_S_CSA) {
4154		/* XXX move to below? */
4155		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4156		    vap->iv_opmode == IEEE80211_M_MBSS)
4157			mwl_startcsa(vap);
4158	} else if (nstate == IEEE80211_S_CAC) {
4159		/* XXX move to below? */
4160		/* stop ap xmit and enable quiet mode radar detection */
4161		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4162	}
4163
4164	/*
4165	 * Invoke the parent method to do net80211 work.
4166	 */
4167	error = mvp->mv_newstate(vap, nstate, arg);
4168
4169	/*
4170	 * Carry out work that must be done after net80211 runs;
4171	 * this work requires up to date state (e.g. iv_bss).
4172	 */
4173	if (error == 0 && nstate == IEEE80211_S_RUN) {
4174		/* NB: collect bss node again, it may have changed */
4175		ni = vap->iv_bss;
4176
4177		DPRINTF(sc, MWL_DEBUG_STATE,
4178		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4179		    "capinfo 0x%04x chan %d\n",
4180		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4181		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4182		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4183
4184		/*
4185		 * Recreate local sta db entry to update WME/HT state.
4186		 */
4187		mwl_localstadb(vap);
4188		switch (vap->iv_opmode) {
4189		case IEEE80211_M_HOSTAP:
4190		case IEEE80211_M_MBSS:
4191			if (ostate == IEEE80211_S_CAC) {
4192				/* enable in-service radar detection */
4193				mwl_hal_setradardetection(mh,
4194				    DR_IN_SERVICE_MONITOR_START);
4195				sc->sc_radarena = 1;
4196			}
4197			/*
4198			 * Allocate and setup the beacon frame
4199			 * (and related state).
4200			 */
4201			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4202			if (error != 0) {
4203				DPRINTF(sc, MWL_DEBUG_STATE,
4204				    "%s: beacon setup failed, error %d\n",
4205				    __func__, error);
4206				goto bad;
4207			}
4208			/* NB: must be after setting up beacon */
4209			mwl_hal_start(hvap);
4210			break;
4211		case IEEE80211_M_STA:
4212			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4213			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4214			/*
4215			 * Set state now that we're associated.
4216			 */
4217			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4218			mwl_setrates(vap);
4219			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4220			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4221			    sc->sc_ndwdsvaps++ == 0)
4222				mwl_hal_setdwds(mh, 1);
4223			break;
4224		case IEEE80211_M_WDS:
4225			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4226			    vap->iv_ifp->if_xname, __func__,
4227			    ether_sprintf(ni->ni_bssid));
4228			mwl_seteapolformat(vap);
4229			break;
4230		default:
4231			break;
4232		}
4233		/*
4234		 * Set CS mode according to operating channel;
4235		 * this mostly an optimization for 5GHz.
4236		 *
4237		 * NB: must follow mwl_hal_start which resets csmode
4238		 */
4239		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4240			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4241		else
4242			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4243		/*
4244		 * Start timer to prod firmware.
4245		 */
4246		if (sc->sc_ageinterval != 0)
4247			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4248			    mwl_agestations, sc);
4249	} else if (nstate == IEEE80211_S_SLEEP) {
4250		/* XXX set chip in power save */
4251	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4252	    --sc->sc_ndwdsvaps == 0)
4253		mwl_hal_setdwds(mh, 0);
4254bad:
4255	return error;
4256}
4257
4258/*
4259 * Manage station id's; these are separate from AID's
4260 * as AID's may have values out of the range of possible
4261 * station id's acceptable to the firmware.
4262 */
4263static int
4264allocstaid(struct mwl_softc *sc, int aid)
4265{
4266	int staid;
4267
4268	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4269		/* NB: don't use 0 */
4270		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4271			if (isclr(sc->sc_staid, staid))
4272				break;
4273	} else
4274		staid = aid;
4275	setbit(sc->sc_staid, staid);
4276	return staid;
4277}
4278
4279static void
4280delstaid(struct mwl_softc *sc, int staid)
4281{
4282	clrbit(sc->sc_staid, staid);
4283}
4284
4285/*
4286 * Setup driver-specific state for a newly associated node.
4287 * Note that we're called also on a re-associate, the isnew
4288 * param tells us if this is the first time or not.
4289 */
4290static void
4291mwl_newassoc(struct ieee80211_node *ni, int isnew)
4292{
4293	struct ieee80211vap *vap = ni->ni_vap;
4294        struct mwl_softc *sc = vap->iv_ic->ic_softc;
4295	struct mwl_node *mn = MWL_NODE(ni);
4296	MWL_HAL_PEERINFO pi;
4297	uint16_t aid;
4298	int error;
4299
4300	aid = IEEE80211_AID(ni->ni_associd);
4301	if (isnew) {
4302		mn->mn_staid = allocstaid(sc, aid);
4303		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4304	} else {
4305		mn = MWL_NODE(ni);
4306		/* XXX reset BA stream? */
4307	}
4308	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4309	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4310	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4311	if (error != 0) {
4312		DPRINTF(sc, MWL_DEBUG_NODE,
4313		    "%s: error %d creating sta db entry\n",
4314		    __func__, error);
4315		/* XXX how to deal with error? */
4316	}
4317}
4318
4319/*
4320 * Periodically poke the firmware to age out station state
4321 * (power save queues, pending tx aggregates).
4322 */
4323static void
4324mwl_agestations(void *arg)
4325{
4326	struct mwl_softc *sc = arg;
4327
4328	mwl_hal_setkeepalive(sc->sc_mh);
4329	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4330		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4331}
4332
4333static const struct mwl_hal_channel *
4334findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4335{
4336	int i;
4337
4338	for (i = 0; i < ci->nchannels; i++) {
4339		const struct mwl_hal_channel *hc = &ci->channels[i];
4340		if (hc->ieee == ieee)
4341			return hc;
4342	}
4343	return NULL;
4344}
4345
4346static int
4347mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4348	int nchan, struct ieee80211_channel chans[])
4349{
4350	struct mwl_softc *sc = ic->ic_softc;
4351	struct mwl_hal *mh = sc->sc_mh;
4352	const MWL_HAL_CHANNELINFO *ci;
4353	int i;
4354
4355	for (i = 0; i < nchan; i++) {
4356		struct ieee80211_channel *c = &chans[i];
4357		const struct mwl_hal_channel *hc;
4358
4359		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4360			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4361			    IEEE80211_IS_CHAN_HT40(c) ?
4362				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4363		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4364			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4365			    IEEE80211_IS_CHAN_HT40(c) ?
4366				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4367		} else {
4368			device_printf(sc->sc_dev,
4369			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4370			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4371			return EINVAL;
4372		}
4373		/*
4374		 * Verify channel has cal data and cap tx power.
4375		 */
4376		hc = findhalchannel(ci, c->ic_ieee);
4377		if (hc != NULL) {
4378			if (c->ic_maxpower > 2*hc->maxTxPow)
4379				c->ic_maxpower = 2*hc->maxTxPow;
4380			goto next;
4381		}
4382		if (IEEE80211_IS_CHAN_HT40(c)) {
4383			/*
4384			 * Look for the extension channel since the
4385			 * hal table only has the primary channel.
4386			 */
4387			hc = findhalchannel(ci, c->ic_extieee);
4388			if (hc != NULL) {
4389				if (c->ic_maxpower > 2*hc->maxTxPow)
4390					c->ic_maxpower = 2*hc->maxTxPow;
4391				goto next;
4392			}
4393		}
4394		device_printf(sc->sc_dev,
4395		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4396		    __func__, c->ic_ieee, c->ic_extieee,
4397		    c->ic_freq, c->ic_flags);
4398		return EINVAL;
4399	next:
4400		;
4401	}
4402	return 0;
4403}
4404
4405#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4406#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4407
4408static void
4409addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4410{
4411	c->ic_freq = freq;
4412	c->ic_flags = flags;
4413	c->ic_ieee = ieee;
4414	c->ic_minpower = 0;
4415	c->ic_maxpower = 2*txpow;
4416	c->ic_maxregpower = txpow;
4417}
4418
4419static const struct ieee80211_channel *
4420findchannel(const struct ieee80211_channel chans[], int nchans,
4421	int freq, int flags)
4422{
4423	const struct ieee80211_channel *c;
4424	int i;
4425
4426	for (i = 0; i < nchans; i++) {
4427		c = &chans[i];
4428		if (c->ic_freq == freq && c->ic_flags == flags)
4429			return c;
4430	}
4431	return NULL;
4432}
4433
4434static void
4435addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4436	const MWL_HAL_CHANNELINFO *ci, int flags)
4437{
4438	struct ieee80211_channel *c;
4439	const struct ieee80211_channel *extc;
4440	const struct mwl_hal_channel *hc;
4441	int i;
4442
4443	c = &chans[*nchans];
4444
4445	flags &= ~IEEE80211_CHAN_HT;
4446	for (i = 0; i < ci->nchannels; i++) {
4447		/*
4448		 * Each entry defines an HT40 channel pair; find the
4449		 * extension channel above and the insert the pair.
4450		 */
4451		hc = &ci->channels[i];
4452		extc = findchannel(chans, *nchans, hc->freq+20,
4453		    flags | IEEE80211_CHAN_HT20);
4454		if (extc != NULL) {
4455			if (*nchans >= maxchans)
4456				break;
4457			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4458			    hc->ieee, hc->maxTxPow);
4459			c->ic_extieee = extc->ic_ieee;
4460			c++, (*nchans)++;
4461			if (*nchans >= maxchans)
4462				break;
4463			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4464			    extc->ic_ieee, hc->maxTxPow);
4465			c->ic_extieee = hc->ieee;
4466			c++, (*nchans)++;
4467		}
4468	}
4469}
4470
4471static void
4472addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4473	const MWL_HAL_CHANNELINFO *ci, int flags)
4474{
4475	struct ieee80211_channel *c;
4476	int i;
4477
4478	c = &chans[*nchans];
4479
4480	for (i = 0; i < ci->nchannels; i++) {
4481		const struct mwl_hal_channel *hc;
4482
4483		hc = &ci->channels[i];
4484		if (*nchans >= maxchans)
4485			break;
4486		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4487		c++, (*nchans)++;
4488		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4489			/* g channel have a separate b-only entry */
4490			if (*nchans >= maxchans)
4491				break;
4492			c[0] = c[-1];
4493			c[-1].ic_flags = IEEE80211_CHAN_B;
4494			c++, (*nchans)++;
4495		}
4496		if (flags == IEEE80211_CHAN_HTG) {
4497			/* HT g channel have a separate g-only entry */
4498			if (*nchans >= maxchans)
4499				break;
4500			c[-1].ic_flags = IEEE80211_CHAN_G;
4501			c[0] = c[-1];
4502			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4503			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4504			c++, (*nchans)++;
4505		}
4506		if (flags == IEEE80211_CHAN_HTA) {
4507			/* HT a channel have a separate a-only entry */
4508			if (*nchans >= maxchans)
4509				break;
4510			c[-1].ic_flags = IEEE80211_CHAN_A;
4511			c[0] = c[-1];
4512			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4513			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4514			c++, (*nchans)++;
4515		}
4516	}
4517}
4518
4519static void
4520getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4521	struct ieee80211_channel chans[])
4522{
4523	const MWL_HAL_CHANNELINFO *ci;
4524
4525	/*
4526	 * Use the channel info from the hal to craft the
4527	 * channel list.  Note that we pass back an unsorted
4528	 * list; the caller is required to sort it for us
4529	 * (if desired).
4530	 */
4531	*nchans = 0;
4532	if (mwl_hal_getchannelinfo(sc->sc_mh,
4533	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4534		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4535	if (mwl_hal_getchannelinfo(sc->sc_mh,
4536	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4537		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4538	if (mwl_hal_getchannelinfo(sc->sc_mh,
4539	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4540		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4541	if (mwl_hal_getchannelinfo(sc->sc_mh,
4542	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4543		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4544}
4545
4546static void
4547mwl_getradiocaps(struct ieee80211com *ic,
4548	int maxchans, int *nchans, struct ieee80211_channel chans[])
4549{
4550	struct mwl_softc *sc = ic->ic_softc;
4551
4552	getchannels(sc, maxchans, nchans, chans);
4553}
4554
4555static int
4556mwl_getchannels(struct mwl_softc *sc)
4557{
4558	struct ieee80211com *ic = &sc->sc_ic;
4559
4560	/*
4561	 * Use the channel info from the hal to craft the
4562	 * channel list for net80211.  Note that we pass up
4563	 * an unsorted list; net80211 will sort it for us.
4564	 */
4565	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4566	ic->ic_nchans = 0;
4567	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4568
4569	ic->ic_regdomain.regdomain = SKU_DEBUG;
4570	ic->ic_regdomain.country = CTRY_DEFAULT;
4571	ic->ic_regdomain.location = 'I';
4572	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4573	ic->ic_regdomain.isocc[1] = ' ';
4574	return (ic->ic_nchans == 0 ? EIO : 0);
4575}
4576#undef IEEE80211_CHAN_HTA
4577#undef IEEE80211_CHAN_HTG
4578
4579#ifdef MWL_DEBUG
4580static void
4581mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4582{
4583	const struct mwl_rxdesc *ds = bf->bf_desc;
4584	uint32_t status = le32toh(ds->Status);
4585
4586	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4587	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4588	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4589	    le32toh(ds->pPhysBuffData), ds->RxControl,
4590	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4591	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4592	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4593	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4594}
4595
4596static void
4597mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4598{
4599	const struct mwl_txdesc *ds = bf->bf_desc;
4600	uint32_t status = le32toh(ds->Status);
4601
4602	printf("Q%u[%3u]", qnum, ix);
4603	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4604	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4605	    le32toh(ds->pPhysNext),
4606	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4607	    status & EAGLE_TXD_STATUS_USED ?
4608		"" : (status & 3) != 0 ? " *" : " !");
4609	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4610	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4611	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4612#if MWL_TXDESC > 1
4613	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4614	    , le32toh(ds->multiframes)
4615	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4616	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4617	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4618	);
4619	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4620	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4621	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4622	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4623	);
4624#endif
4625#if 0
4626{ const uint8_t *cp = (const uint8_t *) ds;
4627  int i;
4628  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4629	printf("%02x ", cp[i]);
4630	if (((i+1) % 16) == 0)
4631		printf("\n");
4632  }
4633  printf("\n");
4634}
4635#endif
4636}
4637#endif /* MWL_DEBUG */
4638
4639#if 0
4640static void
4641mwl_txq_dump(struct mwl_txq *txq)
4642{
4643	struct mwl_txbuf *bf;
4644	int i = 0;
4645
4646	MWL_TXQ_LOCK(txq);
4647	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4648		struct mwl_txdesc *ds = bf->bf_desc;
4649		MWL_TXDESC_SYNC(txq, ds,
4650		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4651#ifdef MWL_DEBUG
4652		mwl_printtxbuf(bf, txq->qnum, i);
4653#endif
4654		i++;
4655	}
4656	MWL_TXQ_UNLOCK(txq);
4657}
4658#endif
4659
4660static void
4661mwl_watchdog(void *arg)
4662{
4663	struct mwl_softc *sc = arg;
4664
4665	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4666	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4667		return;
4668
4669	if (sc->sc_running && !sc->sc_invalid) {
4670		if (mwl_hal_setkeepalive(sc->sc_mh))
4671			device_printf(sc->sc_dev,
4672			    "transmit timeout (firmware hung?)\n");
4673		else
4674			device_printf(sc->sc_dev,
4675			    "transmit timeout\n");
4676#if 0
4677		mwl_reset(sc);
4678mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4679#endif
4680		counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4681		sc->sc_stats.mst_watchdog++;
4682	}
4683}
4684
4685#ifdef MWL_DIAGAPI
4686/*
4687 * Diagnostic interface to the HAL.  This is used by various
4688 * tools to do things like retrieve register contents for
4689 * debugging.  The mechanism is intentionally opaque so that
4690 * it can change frequently w/o concern for compatiblity.
4691 */
4692static int
4693mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4694{
4695	struct mwl_hal *mh = sc->sc_mh;
4696	u_int id = md->md_id & MWL_DIAG_ID;
4697	void *indata = NULL;
4698	void *outdata = NULL;
4699	u_int32_t insize = md->md_in_size;
4700	u_int32_t outsize = md->md_out_size;
4701	int error = 0;
4702
4703	if (md->md_id & MWL_DIAG_IN) {
4704		/*
4705		 * Copy in data.
4706		 */
4707		indata = malloc(insize, M_TEMP, M_NOWAIT);
4708		if (indata == NULL) {
4709			error = ENOMEM;
4710			goto bad;
4711		}
4712		error = copyin(md->md_in_data, indata, insize);
4713		if (error)
4714			goto bad;
4715	}
4716	if (md->md_id & MWL_DIAG_DYN) {
4717		/*
4718		 * Allocate a buffer for the results (otherwise the HAL
4719		 * returns a pointer to a buffer where we can read the
4720		 * results).  Note that we depend on the HAL leaving this
4721		 * pointer for us to use below in reclaiming the buffer;
4722		 * may want to be more defensive.
4723		 */
4724		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4725		if (outdata == NULL) {
4726			error = ENOMEM;
4727			goto bad;
4728		}
4729	}
4730	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4731		if (outsize < md->md_out_size)
4732			md->md_out_size = outsize;
4733		if (outdata != NULL)
4734			error = copyout(outdata, md->md_out_data,
4735					md->md_out_size);
4736	} else {
4737		error = EINVAL;
4738	}
4739bad:
4740	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4741		free(indata, M_TEMP);
4742	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4743		free(outdata, M_TEMP);
4744	return error;
4745}
4746
4747static int
4748mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4749{
4750	struct mwl_hal *mh = sc->sc_mh;
4751	int error;
4752
4753	MWL_LOCK_ASSERT(sc);
4754
4755	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4756		device_printf(sc->sc_dev, "unable to load firmware\n");
4757		return EIO;
4758	}
4759	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4760		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4761		return EIO;
4762	}
4763	error = mwl_setupdma(sc);
4764	if (error != 0) {
4765		/* NB: mwl_setupdma prints a msg */
4766		return error;
4767	}
4768	/*
4769	 * Reset tx/rx data structures; after reload we must
4770	 * re-start the driver's notion of the next xmit/recv.
4771	 */
4772	mwl_draintxq(sc);		/* clear pending frames */
4773	mwl_resettxq(sc);		/* rebuild tx q lists */
4774	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4775	return 0;
4776}
4777#endif /* MWL_DIAGAPI */
4778
4779static void
4780mwl_parent(struct ieee80211com *ic)
4781{
4782	struct mwl_softc *sc = ic->ic_softc;
4783	int startall = 0;
4784
4785	MWL_LOCK(sc);
4786	if (ic->ic_nrunning > 0) {
4787		if (sc->sc_running) {
4788			/*
4789			 * To avoid rescanning another access point,
4790			 * do not call mwl_init() here.  Instead,
4791			 * only reflect promisc mode settings.
4792			 */
4793			mwl_mode_init(sc);
4794		} else {
4795			/*
4796			 * Beware of being called during attach/detach
4797			 * to reset promiscuous mode.  In that case we
4798			 * will still be marked UP but not RUNNING.
4799			 * However trying to re-init the interface
4800			 * is the wrong thing to do as we've already
4801			 * torn down much of our state.  There's
4802			 * probably a better way to deal with this.
4803			 */
4804			if (!sc->sc_invalid) {
4805				mwl_init(sc);	/* XXX lose error */
4806				startall = 1;
4807			}
4808		}
4809	} else
4810		mwl_stop(sc);
4811	MWL_UNLOCK(sc);
4812	if (startall)
4813		ieee80211_start_all(ic);
4814}
4815
4816static int
4817mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4818{
4819	struct mwl_softc *sc = ic->ic_softc;
4820	struct ifreq *ifr = data;
4821	int error = 0;
4822
4823	switch (cmd) {
4824	case SIOCGMVSTATS:
4825		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4826#if 0
4827		/* NB: embed these numbers to get a consistent view */
4828		sc->sc_stats.mst_tx_packets =
4829		    ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
4830		sc->sc_stats.mst_rx_packets =
4831		    ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
4832#endif
4833		/*
4834		 * NB: Drop the softc lock in case of a page fault;
4835		 * we'll accept any potential inconsisentcy in the
4836		 * statistics.  The alternative is to copy the data
4837		 * to a local structure.
4838		 */
4839		return (copyout(&sc->sc_stats,
4840				ifr->ifr_data, sizeof (sc->sc_stats)));
4841#ifdef MWL_DIAGAPI
4842	case SIOCGMVDIAG:
4843		/* XXX check privs */
4844		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4845	case SIOCGMVRESET:
4846		/* XXX check privs */
4847		MWL_LOCK(sc);
4848		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4849		MWL_UNLOCK(sc);
4850		break;
4851#endif /* MWL_DIAGAPI */
4852	default:
4853		error = ENOTTY;
4854		break;
4855	}
4856	return (error);
4857}
4858
4859#ifdef	MWL_DEBUG
4860static int
4861mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4862{
4863	struct mwl_softc *sc = arg1;
4864	int debug, error;
4865
4866	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4867	error = sysctl_handle_int(oidp, &debug, 0, req);
4868	if (error || !req->newptr)
4869		return error;
4870	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4871	sc->sc_debug = debug & 0x00ffffff;
4872	return 0;
4873}
4874#endif /* MWL_DEBUG */
4875
4876static void
4877mwl_sysctlattach(struct mwl_softc *sc)
4878{
4879#ifdef	MWL_DEBUG
4880	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4881	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4882
4883	sc->sc_debug = mwl_debug;
4884	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4885		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4886		mwl_sysctl_debug, "I", "control debugging printfs");
4887#endif
4888}
4889
4890/*
4891 * Announce various information on device/driver attach.
4892 */
4893static void
4894mwl_announce(struct mwl_softc *sc)
4895{
4896
4897	device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4898		sc->sc_hwspecs.hwVersion,
4899		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4900		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4901		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4902		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4903		sc->sc_hwspecs.regionCode);
4904	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4905
4906	if (bootverbose) {
4907		int i;
4908		for (i = 0; i <= WME_AC_VO; i++) {
4909			struct mwl_txq *txq = sc->sc_ac2q[i];
4910			device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4911				txq->qnum, ieee80211_wme_acnames[i]);
4912		}
4913	}
4914	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4915		device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4916	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4917		device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4918	if (bootverbose || mwl_txbuf != MWL_TXBUF)
4919		device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4920	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4921		device_printf(sc->sc_dev, "multi-bss support\n");
4922#ifdef MWL_TX_NODROP
4923	if (bootverbose)
4924		device_printf(sc->sc_dev, "no tx drop\n");
4925#endif
4926}
4927