1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer,
13 *    without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 *    redistribution must be conditioned upon including a substantially
17 *    similar Disclaimer requirement for further binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGES.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36/*
37 * Driver for the Marvell 88W8363 Wireless LAN controller.
38 */
39
40#include "opt_inet.h"
41#include "opt_mwl.h"
42#include "opt_wlan.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysctl.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/errno.h>
55#include <sys/callout.h>
56#include <sys/bus.h>
57#include <sys/endian.h>
58#include <sys/kthread.h>
59#include <sys/taskqueue.h>
60
61#include <machine/bus.h>
62
63#include <net/if.h>
64#include <net/if_var.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_types.h>
68#include <net/if_arp.h>
69#include <net/ethernet.h>
70#include <net/if_llc.h>
71
72#include <net/bpf.h>
73
74#include <net80211/ieee80211_var.h>
75#include <net80211/ieee80211_input.h>
76#include <net80211/ieee80211_regdomain.h>
77
78#ifdef INET
79#include <netinet/in.h>
80#include <netinet/if_ether.h>
81#endif /* INET */
82
83#include <dev/mwl/if_mwlvar.h>
84#include <dev/mwl/mwldiag.h>
85
86static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
87		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
88		    const uint8_t [IEEE80211_ADDR_LEN],
89		    const uint8_t [IEEE80211_ADDR_LEN]);
90static void	mwl_vap_delete(struct ieee80211vap *);
91static int	mwl_setupdma(struct mwl_softc *);
92static int	mwl_hal_reset(struct mwl_softc *sc);
93static int	mwl_init(struct mwl_softc *);
94static void	mwl_parent(struct ieee80211com *);
95static int	mwl_reset(struct ieee80211vap *, u_long);
96static void	mwl_stop(struct mwl_softc *);
97static void	mwl_start(struct mwl_softc *);
98static int	mwl_transmit(struct ieee80211com *, struct mbuf *);
99static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
100			const struct ieee80211_bpf_params *);
101static int	mwl_media_change(struct ifnet *);
102static void	mwl_watchdog(void *);
103static int	mwl_ioctl(struct ieee80211com *, u_long, void *);
104static void	mwl_radar_proc(void *, int);
105static void	mwl_chanswitch_proc(void *, int);
106static void	mwl_bawatchdog_proc(void *, int);
107static int	mwl_key_alloc(struct ieee80211vap *,
108			struct ieee80211_key *,
109			ieee80211_keyix *, ieee80211_keyix *);
110static int	mwl_key_delete(struct ieee80211vap *,
111			const struct ieee80211_key *);
112static int	mwl_key_set(struct ieee80211vap *,
113			const struct ieee80211_key *);
114static int	_mwl_key_set(struct ieee80211vap *,
115			const struct ieee80211_key *,
116			const uint8_t mac[IEEE80211_ADDR_LEN]);
117static int	mwl_mode_init(struct mwl_softc *);
118static void	mwl_update_mcast(struct ieee80211com *);
119static void	mwl_update_promisc(struct ieee80211com *);
120static void	mwl_updateslot(struct ieee80211com *);
121static int	mwl_beacon_setup(struct ieee80211vap *);
122static void	mwl_beacon_update(struct ieee80211vap *, int);
123#ifdef MWL_HOST_PS_SUPPORT
124static void	mwl_update_ps(struct ieee80211vap *, int);
125static int	mwl_set_tim(struct ieee80211_node *, int);
126#endif
127static int	mwl_dma_setup(struct mwl_softc *);
128static void	mwl_dma_cleanup(struct mwl_softc *);
129static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
130		    const uint8_t [IEEE80211_ADDR_LEN]);
131static void	mwl_node_cleanup(struct ieee80211_node *);
132static void	mwl_node_drain(struct ieee80211_node *);
133static void	mwl_node_getsignal(const struct ieee80211_node *,
134			int8_t *, int8_t *);
135static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
136			struct ieee80211_mimo_info *);
137static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
138static void	mwl_rx_proc(void *, int);
139static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
140static int	mwl_tx_setup(struct mwl_softc *, int, int);
141static int	mwl_wme_update(struct ieee80211com *);
142static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
143static void	mwl_tx_cleanup(struct mwl_softc *);
144static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
145static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
146			     struct mwl_txbuf *, struct mbuf *);
147static void	mwl_tx_proc(void *, int);
148static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
149static void	mwl_draintxq(struct mwl_softc *);
150static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
151static int	mwl_recv_action(struct ieee80211_node *,
152			const struct ieee80211_frame *,
153			const uint8_t *, const uint8_t *);
154static int	mwl_addba_request(struct ieee80211_node *,
155			struct ieee80211_tx_ampdu *, int dialogtoken,
156			int baparamset, int batimeout);
157static int	mwl_addba_response(struct ieee80211_node *,
158			struct ieee80211_tx_ampdu *, int status,
159			int baparamset, int batimeout);
160static void	mwl_addba_stop(struct ieee80211_node *,
161			struct ieee80211_tx_ampdu *);
162static int	mwl_startrecv(struct mwl_softc *);
163static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
164			struct ieee80211_channel *);
165static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
166static void	mwl_scan_start(struct ieee80211com *);
167static void	mwl_scan_end(struct ieee80211com *);
168static void	mwl_set_channel(struct ieee80211com *);
169static int	mwl_peerstadb(struct ieee80211_node *,
170			int aid, int staid, MWL_HAL_PEERINFO *pi);
171static int	mwl_localstadb(struct ieee80211vap *);
172static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
173static int	allocstaid(struct mwl_softc *sc, int aid);
174static void	delstaid(struct mwl_softc *sc, int staid);
175static void	mwl_newassoc(struct ieee80211_node *, int);
176static void	mwl_agestations(void *);
177static int	mwl_setregdomain(struct ieee80211com *,
178			struct ieee80211_regdomain *, int,
179			struct ieee80211_channel []);
180static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
181			struct ieee80211_channel []);
182static int	mwl_getchannels(struct mwl_softc *);
183
184static void	mwl_sysctlattach(struct mwl_softc *);
185static void	mwl_announce(struct mwl_softc *);
186
187SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
188
189static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
190SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
191	    0, "rx descriptors allocated");
192static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
193SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
194	    0, "rx buffers allocated");
195static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
196SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
197	    0, "tx buffers allocated");
198static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
199SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
200	    0, "tx buffers to send at once");
201static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
202SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
203	    0, "max rx buffers to process per interrupt");
204static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
205SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
206	    0, "min free rx buffers before restarting traffic");
207
208#ifdef MWL_DEBUG
209static	int mwl_debug = 0;
210SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
211	    0, "control debugging printfs");
212enum {
213	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
214	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
215	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
216	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
217	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
218	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
219	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
220	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
221	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
222	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
223	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
224	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
225	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
226	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
227	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
228	MWL_DEBUG_ANY		= 0xffffffff
229};
230#define	IS_BEACON(wh) \
231    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
232	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
233#define	IFF_DUMPPKTS_RECV(sc, wh) \
234    ((sc->sc_debug & MWL_DEBUG_RECV) && \
235      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
236#define	IFF_DUMPPKTS_XMIT(sc) \
237	(sc->sc_debug & MWL_DEBUG_XMIT)
238
239#define	DPRINTF(sc, m, fmt, ...) do {				\
240	if (sc->sc_debug & (m))					\
241		printf(fmt, __VA_ARGS__);			\
242} while (0)
243#define	KEYPRINTF(sc, hk, mac) do {				\
244	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
245		mwl_keyprint(sc, __func__, hk, mac);		\
246} while (0)
247static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
248static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
249#else
250#define	IFF_DUMPPKTS_RECV(sc, wh)	0
251#define	IFF_DUMPPKTS_XMIT(sc)		0
252#define	DPRINTF(sc, m, fmt, ...)	do { (void )sc; } while (0)
253#define	KEYPRINTF(sc, k, mac)		do { (void )sc; } while (0)
254#endif
255
256static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
257
258/*
259 * Each packet has fixed front matter: a 2-byte length
260 * of the payload, followed by a 4-address 802.11 header
261 * (regardless of the actual header and always w/o any
262 * QoS header).  The payload then follows.
263 */
264struct mwltxrec {
265	uint16_t fwlen;
266	struct ieee80211_frame_addr4 wh;
267} __packed;
268
269/*
270 * Read/Write shorthands for accesses to BAR 0.  Note
271 * that all BAR 1 operations are done in the "hal" and
272 * there should be no reference to them here.
273 */
274#ifdef MWL_DEBUG
275static __inline uint32_t
276RD4(struct mwl_softc *sc, bus_size_t off)
277{
278	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
279}
280#endif
281
282static __inline void
283WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
284{
285	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
286}
287
288int
289mwl_attach(uint16_t devid, struct mwl_softc *sc)
290{
291	struct ieee80211com *ic = &sc->sc_ic;
292	struct mwl_hal *mh;
293	int error = 0;
294
295	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
296
297	/*
298	 * Setup the RX free list lock early, so it can be consistently
299	 * removed.
300	 */
301	MWL_RXFREE_INIT(sc);
302
303	mh = mwl_hal_attach(sc->sc_dev, devid,
304	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
305	if (mh == NULL) {
306		device_printf(sc->sc_dev, "unable to attach HAL\n");
307		error = EIO;
308		goto bad;
309	}
310	sc->sc_mh = mh;
311	/*
312	 * Load firmware so we can get setup.  We arbitrarily
313	 * pick station firmware; we'll re-load firmware as
314	 * needed so setting up the wrong mode isn't a big deal.
315	 */
316	if (mwl_hal_fwload(mh, NULL) != 0) {
317		device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
318		error = EIO;
319		goto bad1;
320	}
321	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
322		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
323		error = EIO;
324		goto bad1;
325	}
326	error = mwl_getchannels(sc);
327	if (error != 0)
328		goto bad1;
329
330	sc->sc_txantenna = 0;		/* h/w default */
331	sc->sc_rxantenna = 0;		/* h/w default */
332	sc->sc_invalid = 0;		/* ready to go, enable int handling */
333	sc->sc_ageinterval = MWL_AGEINTERVAL;
334
335	/*
336	 * Allocate tx+rx descriptors and populate the lists.
337	 * We immediately push the information to the firmware
338	 * as otherwise it gets upset.
339	 */
340	error = mwl_dma_setup(sc);
341	if (error != 0) {
342		device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
343		    error);
344		goto bad1;
345	}
346	error = mwl_setupdma(sc);	/* push to firmware */
347	if (error != 0)			/* NB: mwl_setupdma prints msg */
348		goto bad1;
349
350	callout_init(&sc->sc_timer, 1);
351	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
352	mbufq_init(&sc->sc_snd, ifqmaxlen);
353
354	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
355		taskqueue_thread_enqueue, &sc->sc_tq);
356	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
357		"%s taskq", device_get_nameunit(sc->sc_dev));
358
359	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
360	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
361	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
362	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
363
364	/* NB: insure BK queue is the lowest priority h/w queue */
365	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
366		device_printf(sc->sc_dev,
367		    "unable to setup xmit queue for %s traffic!\n",
368		     ieee80211_wme_acnames[WME_AC_BK]);
369		error = EIO;
370		goto bad2;
371	}
372	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
373	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
374	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
375		/*
376		 * Not enough hardware tx queues to properly do WME;
377		 * just punt and assign them all to the same h/w queue.
378		 * We could do a better job of this if, for example,
379		 * we allocate queues when we switch from station to
380		 * AP mode.
381		 */
382		if (sc->sc_ac2q[WME_AC_VI] != NULL)
383			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
384		if (sc->sc_ac2q[WME_AC_BE] != NULL)
385			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
386		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
387		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
388		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
389	}
390	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
391
392	ic->ic_softc = sc;
393	ic->ic_name = device_get_nameunit(sc->sc_dev);
394	/* XXX not right but it's not used anywhere important */
395	ic->ic_phytype = IEEE80211_T_OFDM;
396	ic->ic_opmode = IEEE80211_M_STA;
397	ic->ic_caps =
398		  IEEE80211_C_STA		/* station mode supported */
399		| IEEE80211_C_HOSTAP		/* hostap mode */
400		| IEEE80211_C_MONITOR		/* monitor mode */
401#if 0
402		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
403		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
404#endif
405		| IEEE80211_C_MBSS		/* mesh point link mode */
406		| IEEE80211_C_WDS		/* WDS supported */
407		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
408		| IEEE80211_C_SHSLOT		/* short slot time supported */
409		| IEEE80211_C_WME		/* WME/WMM supported */
410		| IEEE80211_C_BURST		/* xmit bursting supported */
411		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
412		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
413		| IEEE80211_C_TXFRAG		/* handle tx frags */
414		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
415		| IEEE80211_C_DFS		/* DFS supported */
416		;
417
418	ic->ic_htcaps =
419		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
420		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
421		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
422		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
423		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
424#if MWL_AGGR_SIZE == 7935
425		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
426#else
427		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
428#endif
429#if 0
430		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
431		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
432#endif
433		/* s/w capabilities */
434		| IEEE80211_HTC_HT		/* HT operation */
435		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
436		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
437		| IEEE80211_HTC_SMPS		/* SMPS available */
438		;
439
440	/*
441	 * Mark h/w crypto support.
442	 * XXX no way to query h/w support.
443	 */
444	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
445			  |  IEEE80211_CRYPTO_AES_CCM
446			  |  IEEE80211_CRYPTO_TKIP
447			  |  IEEE80211_CRYPTO_TKIPMIC
448			  ;
449	/*
450	 * Transmit requires space in the packet for a special
451	 * format transmit record and optional padding between
452	 * this record and the payload.  Ask the net80211 layer
453	 * to arrange this when encapsulating packets so we can
454	 * add it efficiently.
455	 */
456	ic->ic_headroom = sizeof(struct mwltxrec) -
457		sizeof(struct ieee80211_frame);
458
459	IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
460
461	/* call MI attach routine. */
462	ieee80211_ifattach(ic);
463	ic->ic_setregdomain = mwl_setregdomain;
464	ic->ic_getradiocaps = mwl_getradiocaps;
465	/* override default methods */
466	ic->ic_raw_xmit = mwl_raw_xmit;
467	ic->ic_newassoc = mwl_newassoc;
468	ic->ic_updateslot = mwl_updateslot;
469	ic->ic_update_mcast = mwl_update_mcast;
470	ic->ic_update_promisc = mwl_update_promisc;
471	ic->ic_wme.wme_update = mwl_wme_update;
472	ic->ic_transmit = mwl_transmit;
473	ic->ic_ioctl = mwl_ioctl;
474	ic->ic_parent = mwl_parent;
475
476	ic->ic_node_alloc = mwl_node_alloc;
477	sc->sc_node_cleanup = ic->ic_node_cleanup;
478	ic->ic_node_cleanup = mwl_node_cleanup;
479	sc->sc_node_drain = ic->ic_node_drain;
480	ic->ic_node_drain = mwl_node_drain;
481	ic->ic_node_getsignal = mwl_node_getsignal;
482	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
483
484	ic->ic_scan_start = mwl_scan_start;
485	ic->ic_scan_end = mwl_scan_end;
486	ic->ic_set_channel = mwl_set_channel;
487
488	sc->sc_recv_action = ic->ic_recv_action;
489	ic->ic_recv_action = mwl_recv_action;
490	sc->sc_addba_request = ic->ic_addba_request;
491	ic->ic_addba_request = mwl_addba_request;
492	sc->sc_addba_response = ic->ic_addba_response;
493	ic->ic_addba_response = mwl_addba_response;
494	sc->sc_addba_stop = ic->ic_addba_stop;
495	ic->ic_addba_stop = mwl_addba_stop;
496
497	ic->ic_vap_create = mwl_vap_create;
498	ic->ic_vap_delete = mwl_vap_delete;
499
500	ieee80211_radiotap_attach(ic,
501	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
502		MWL_TX_RADIOTAP_PRESENT,
503	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
504		MWL_RX_RADIOTAP_PRESENT);
505	/*
506	 * Setup dynamic sysctl's now that country code and
507	 * regdomain are available from the hal.
508	 */
509	mwl_sysctlattach(sc);
510
511	if (bootverbose)
512		ieee80211_announce(ic);
513	mwl_announce(sc);
514	return 0;
515bad2:
516	mwl_dma_cleanup(sc);
517bad1:
518	mwl_hal_detach(mh);
519bad:
520	MWL_RXFREE_DESTROY(sc);
521	sc->sc_invalid = 1;
522	return error;
523}
524
525int
526mwl_detach(struct mwl_softc *sc)
527{
528	struct ieee80211com *ic = &sc->sc_ic;
529
530	MWL_LOCK(sc);
531	mwl_stop(sc);
532	MWL_UNLOCK(sc);
533	/*
534	 * NB: the order of these is important:
535	 * o call the 802.11 layer before detaching the hal to
536	 *   insure callbacks into the driver to delete global
537	 *   key cache entries can be handled
538	 * o reclaim the tx queue data structures after calling
539	 *   the 802.11 layer as we'll get called back to reclaim
540	 *   node state and potentially want to use them
541	 * o to cleanup the tx queues the hal is called, so detach
542	 *   it last
543	 * Other than that, it's straightforward...
544	 */
545	ieee80211_ifdetach(ic);
546	callout_drain(&sc->sc_watchdog);
547	mwl_dma_cleanup(sc);
548	MWL_RXFREE_DESTROY(sc);
549	mwl_tx_cleanup(sc);
550	mwl_hal_detach(sc->sc_mh);
551	mbufq_drain(&sc->sc_snd);
552
553	return 0;
554}
555
556/*
557 * MAC address handling for multiple BSS on the same radio.
558 * The first vap uses the MAC address from the EEPROM.  For
559 * subsequent vap's we set the U/L bit (bit 1) in the MAC
560 * address and use the next six bits as an index.
561 */
562static void
563assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
564{
565	int i;
566
567	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
568		/* NB: we only do this if h/w supports multiple bssid */
569		for (i = 0; i < 32; i++)
570			if ((sc->sc_bssidmask & (1<<i)) == 0)
571				break;
572		if (i != 0)
573			mac[0] |= (i << 2)|0x2;
574	} else
575		i = 0;
576	sc->sc_bssidmask |= 1<<i;
577	if (i == 0)
578		sc->sc_nbssid0++;
579}
580
581static void
582reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
583{
584	int i = mac[0] >> 2;
585	if (i != 0 || --sc->sc_nbssid0 == 0)
586		sc->sc_bssidmask &= ~(1<<i);
587}
588
589static struct ieee80211vap *
590mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
591    enum ieee80211_opmode opmode, int flags,
592    const uint8_t bssid[IEEE80211_ADDR_LEN],
593    const uint8_t mac0[IEEE80211_ADDR_LEN])
594{
595	struct mwl_softc *sc = ic->ic_softc;
596	struct mwl_hal *mh = sc->sc_mh;
597	struct ieee80211vap *vap, *apvap;
598	struct mwl_hal_vap *hvap;
599	struct mwl_vap *mvp;
600	uint8_t mac[IEEE80211_ADDR_LEN];
601
602	IEEE80211_ADDR_COPY(mac, mac0);
603	switch (opmode) {
604	case IEEE80211_M_HOSTAP:
605	case IEEE80211_M_MBSS:
606		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
607			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
608		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
609		if (hvap == NULL) {
610			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
611				reclaim_address(sc, mac);
612			return NULL;
613		}
614		break;
615	case IEEE80211_M_STA:
616		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
617			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
618		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
619		if (hvap == NULL) {
620			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
621				reclaim_address(sc, mac);
622			return NULL;
623		}
624		/* no h/w beacon miss support; always use s/w */
625		flags |= IEEE80211_CLONE_NOBEACONS;
626		break;
627	case IEEE80211_M_WDS:
628		hvap = NULL;		/* NB: we use associated AP vap */
629		if (sc->sc_napvaps == 0)
630			return NULL;	/* no existing AP vap */
631		break;
632	case IEEE80211_M_MONITOR:
633		hvap = NULL;
634		break;
635	case IEEE80211_M_IBSS:
636	case IEEE80211_M_AHDEMO:
637	default:
638		return NULL;
639	}
640
641	mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
642	mvp->mv_hvap = hvap;
643	if (opmode == IEEE80211_M_WDS) {
644		/*
645		 * WDS vaps must have an associated AP vap; find one.
646		 * XXX not right.
647		 */
648		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
649			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
650				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
651				break;
652			}
653		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
654	}
655	vap = &mvp->mv_vap;
656	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
657	/* override with driver methods */
658	mvp->mv_newstate = vap->iv_newstate;
659	vap->iv_newstate = mwl_newstate;
660	vap->iv_max_keyix = 0;	/* XXX */
661	vap->iv_key_alloc = mwl_key_alloc;
662	vap->iv_key_delete = mwl_key_delete;
663	vap->iv_key_set = mwl_key_set;
664#ifdef MWL_HOST_PS_SUPPORT
665	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
666		vap->iv_update_ps = mwl_update_ps;
667		mvp->mv_set_tim = vap->iv_set_tim;
668		vap->iv_set_tim = mwl_set_tim;
669	}
670#endif
671	vap->iv_reset = mwl_reset;
672	vap->iv_update_beacon = mwl_beacon_update;
673
674	/* override max aid so sta's cannot assoc when we're out of sta id's */
675	vap->iv_max_aid = MWL_MAXSTAID;
676	/* override default A-MPDU rx parameters */
677	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
678	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
679
680	/* complete setup */
681	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
682	    mac);
683
684	switch (vap->iv_opmode) {
685	case IEEE80211_M_HOSTAP:
686	case IEEE80211_M_MBSS:
687	case IEEE80211_M_STA:
688		/*
689		 * Setup sta db entry for local address.
690		 */
691		mwl_localstadb(vap);
692		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
693		    vap->iv_opmode == IEEE80211_M_MBSS)
694			sc->sc_napvaps++;
695		else
696			sc->sc_nstavaps++;
697		break;
698	case IEEE80211_M_WDS:
699		sc->sc_nwdsvaps++;
700		break;
701	default:
702		break;
703	}
704	/*
705	 * Setup overall operating mode.
706	 */
707	if (sc->sc_napvaps)
708		ic->ic_opmode = IEEE80211_M_HOSTAP;
709	else if (sc->sc_nstavaps)
710		ic->ic_opmode = IEEE80211_M_STA;
711	else
712		ic->ic_opmode = opmode;
713
714	return vap;
715}
716
717static void
718mwl_vap_delete(struct ieee80211vap *vap)
719{
720	struct mwl_vap *mvp = MWL_VAP(vap);
721	struct mwl_softc *sc = vap->iv_ic->ic_softc;
722	struct mwl_hal *mh = sc->sc_mh;
723	struct mwl_hal_vap *hvap = mvp->mv_hvap;
724	enum ieee80211_opmode opmode = vap->iv_opmode;
725
726	/* XXX disallow ap vap delete if WDS still present */
727	if (sc->sc_running) {
728		/* quiesce h/w while we remove the vap */
729		mwl_hal_intrset(mh, 0);		/* disable interrupts */
730	}
731	ieee80211_vap_detach(vap);
732	switch (opmode) {
733	case IEEE80211_M_HOSTAP:
734	case IEEE80211_M_MBSS:
735	case IEEE80211_M_STA:
736		KASSERT(hvap != NULL, ("no hal vap handle"));
737		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
738		mwl_hal_delvap(hvap);
739		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
740			sc->sc_napvaps--;
741		else
742			sc->sc_nstavaps--;
743		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
744		reclaim_address(sc, vap->iv_myaddr);
745		break;
746	case IEEE80211_M_WDS:
747		sc->sc_nwdsvaps--;
748		break;
749	default:
750		break;
751	}
752	mwl_cleartxq(sc, vap);
753	free(mvp, M_80211_VAP);
754	if (sc->sc_running)
755		mwl_hal_intrset(mh, sc->sc_imask);
756}
757
758void
759mwl_suspend(struct mwl_softc *sc)
760{
761
762	MWL_LOCK(sc);
763	mwl_stop(sc);
764	MWL_UNLOCK(sc);
765}
766
767void
768mwl_resume(struct mwl_softc *sc)
769{
770	int error = EDOOFUS;
771
772	MWL_LOCK(sc);
773	if (sc->sc_ic.ic_nrunning > 0)
774		error = mwl_init(sc);
775	MWL_UNLOCK(sc);
776
777	if (error == 0)
778		ieee80211_start_all(&sc->sc_ic);	/* start all vap's */
779}
780
781void
782mwl_shutdown(void *arg)
783{
784	struct mwl_softc *sc = arg;
785
786	MWL_LOCK(sc);
787	mwl_stop(sc);
788	MWL_UNLOCK(sc);
789}
790
791/*
792 * Interrupt handler.  Most of the actual processing is deferred.
793 */
794void
795mwl_intr(void *arg)
796{
797	struct mwl_softc *sc = arg;
798	struct mwl_hal *mh = sc->sc_mh;
799	uint32_t status;
800
801	if (sc->sc_invalid) {
802		/*
803		 * The hardware is not ready/present, don't touch anything.
804		 * Note this can happen early on if the IRQ is shared.
805		 */
806		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
807		return;
808	}
809	/*
810	 * Figure out the reason(s) for the interrupt.
811	 */
812	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
813	if (status == 0)			/* must be a shared irq */
814		return;
815
816	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
817	    __func__, status, sc->sc_imask);
818	if (status & MACREG_A2HRIC_BIT_RX_RDY)
819		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
820	if (status & MACREG_A2HRIC_BIT_TX_DONE)
821		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
822	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
823		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
824	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
825		mwl_hal_cmddone(mh);
826	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
827		;
828	}
829	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
830		/* TKIP ICV error */
831		sc->sc_stats.mst_rx_badtkipicv++;
832	}
833	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
834		/* 11n aggregation queue is empty, re-fill */
835		;
836	}
837	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
838		;
839	}
840	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
841		/* radar detected, process event */
842		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
843	}
844	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
845		/* DFS channel switch */
846		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
847	}
848}
849
850static void
851mwl_radar_proc(void *arg, int pending)
852{
853	struct mwl_softc *sc = arg;
854	struct ieee80211com *ic = &sc->sc_ic;
855
856	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
857	    __func__, pending);
858
859	sc->sc_stats.mst_radardetect++;
860	/* XXX stop h/w BA streams? */
861
862	IEEE80211_LOCK(ic);
863	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
864	IEEE80211_UNLOCK(ic);
865}
866
867static void
868mwl_chanswitch_proc(void *arg, int pending)
869{
870	struct mwl_softc *sc = arg;
871	struct ieee80211com *ic = &sc->sc_ic;
872
873	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
874	    __func__, pending);
875
876	IEEE80211_LOCK(ic);
877	sc->sc_csapending = 0;
878	ieee80211_csa_completeswitch(ic);
879	IEEE80211_UNLOCK(ic);
880}
881
882static void
883mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
884{
885	struct ieee80211_node *ni = sp->data[0];
886
887	/* send DELBA and drop the stream */
888	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
889}
890
891static void
892mwl_bawatchdog_proc(void *arg, int pending)
893{
894	struct mwl_softc *sc = arg;
895	struct mwl_hal *mh = sc->sc_mh;
896	const MWL_HAL_BASTREAM *sp;
897	uint8_t bitmap, n;
898
899	sc->sc_stats.mst_bawatchdog++;
900
901	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
902		DPRINTF(sc, MWL_DEBUG_AMPDU,
903		    "%s: could not get bitmap\n", __func__);
904		sc->sc_stats.mst_bawatchdog_failed++;
905		return;
906	}
907	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
908	if (bitmap == 0xff) {
909		n = 0;
910		/* disable all ba streams */
911		for (bitmap = 0; bitmap < 8; bitmap++) {
912			sp = mwl_hal_bastream_lookup(mh, bitmap);
913			if (sp != NULL) {
914				mwl_bawatchdog(sp);
915				n++;
916			}
917		}
918		if (n == 0) {
919			DPRINTF(sc, MWL_DEBUG_AMPDU,
920			    "%s: no BA streams found\n", __func__);
921			sc->sc_stats.mst_bawatchdog_empty++;
922		}
923	} else if (bitmap != 0xaa) {
924		/* disable a single ba stream */
925		sp = mwl_hal_bastream_lookup(mh, bitmap);
926		if (sp != NULL) {
927			mwl_bawatchdog(sp);
928		} else {
929			DPRINTF(sc, MWL_DEBUG_AMPDU,
930			    "%s: no BA stream %d\n", __func__, bitmap);
931			sc->sc_stats.mst_bawatchdog_notfound++;
932		}
933	}
934}
935
936/*
937 * Convert net80211 channel to a HAL channel.
938 */
939static void
940mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
941{
942	hc->channel = chan->ic_ieee;
943
944	*(uint32_t *)&hc->channelFlags = 0;
945	if (IEEE80211_IS_CHAN_2GHZ(chan))
946		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
947	else if (IEEE80211_IS_CHAN_5GHZ(chan))
948		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
949	if (IEEE80211_IS_CHAN_HT40(chan)) {
950		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
951		if (IEEE80211_IS_CHAN_HT40U(chan))
952			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
953		else
954			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
955	} else
956		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
957	/* XXX 10MHz channels */
958}
959
960/*
961 * Inform firmware of our tx/rx dma setup.  The BAR 0
962 * writes below are for compatibility with older firmware.
963 * For current firmware we send this information with a
964 * cmd block via mwl_hal_sethwdma.
965 */
966static int
967mwl_setupdma(struct mwl_softc *sc)
968{
969	int error, i;
970
971	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
972	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
973	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
974
975	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
976		struct mwl_txq *txq = &sc->sc_txq[i];
977		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
978		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
979	}
980	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
981	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
982
983	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
984	if (error != 0) {
985		device_printf(sc->sc_dev,
986		    "unable to setup tx/rx dma; hal status %u\n", error);
987		/* XXX */
988	}
989	return error;
990}
991
992/*
993 * Inform firmware of tx rate parameters.
994 * Called after a channel change.
995 */
996static int
997mwl_setcurchanrates(struct mwl_softc *sc)
998{
999	struct ieee80211com *ic = &sc->sc_ic;
1000	const struct ieee80211_rateset *rs;
1001	MWL_HAL_TXRATE rates;
1002
1003	memset(&rates, 0, sizeof(rates));
1004	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1005	/* rate used to send management frames */
1006	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1007	/* rate used to send multicast frames */
1008	rates.McastRate = rates.MgtRate;
1009
1010	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1011}
1012
1013/*
1014 * Inform firmware of tx rate parameters.  Called whenever
1015 * user-settable params change and after a channel change.
1016 */
1017static int
1018mwl_setrates(struct ieee80211vap *vap)
1019{
1020	struct mwl_vap *mvp = MWL_VAP(vap);
1021	struct ieee80211_node *ni = vap->iv_bss;
1022	const struct ieee80211_txparam *tp = ni->ni_txparms;
1023	MWL_HAL_TXRATE rates;
1024
1025	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1026
1027	/*
1028	 * Update the h/w rate map.
1029	 * NB: 0x80 for MCS is passed through unchanged
1030	 */
1031	memset(&rates, 0, sizeof(rates));
1032	/* rate used to send management frames */
1033	rates.MgtRate = tp->mgmtrate;
1034	/* rate used to send multicast frames */
1035	rates.McastRate = tp->mcastrate;
1036
1037	/* while here calculate EAPOL fixed rate cookie */
1038	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1039
1040	return mwl_hal_settxrate(mvp->mv_hvap,
1041	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1042		RATE_FIXED : RATE_AUTO, &rates);
1043}
1044
1045/*
1046 * Setup a fixed xmit rate cookie for EAPOL frames.
1047 */
1048static void
1049mwl_seteapolformat(struct ieee80211vap *vap)
1050{
1051	struct mwl_vap *mvp = MWL_VAP(vap);
1052	struct ieee80211_node *ni = vap->iv_bss;
1053	enum ieee80211_phymode mode;
1054	uint8_t rate;
1055
1056	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1057
1058	mode = ieee80211_chan2mode(ni->ni_chan);
1059	/*
1060	 * Use legacy rates when operating a mixed HT+non-HT bss.
1061	 * NB: this may violate POLA for sta and wds vap's.
1062	 */
1063	if (mode == IEEE80211_MODE_11NA &&
1064	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1065		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1066	else if (mode == IEEE80211_MODE_11NG &&
1067	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1068		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1069	else
1070		rate = vap->iv_txparms[mode].mgmtrate;
1071
1072	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1073}
1074
1075/*
1076 * Map SKU+country code to region code for radar bin'ing.
1077 */
1078static int
1079mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1080{
1081	switch (rd->regdomain) {
1082	case SKU_FCC:
1083	case SKU_FCC3:
1084		return DOMAIN_CODE_FCC;
1085	case SKU_CA:
1086		return DOMAIN_CODE_IC;
1087	case SKU_ETSI:
1088	case SKU_ETSI2:
1089	case SKU_ETSI3:
1090		if (rd->country == CTRY_SPAIN)
1091			return DOMAIN_CODE_SPAIN;
1092		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1093			return DOMAIN_CODE_FRANCE;
1094		/* XXX force 1.3.1 radar type */
1095		return DOMAIN_CODE_ETSI_131;
1096	case SKU_JAPAN:
1097		return DOMAIN_CODE_MKK;
1098	case SKU_ROW:
1099		return DOMAIN_CODE_DGT;	/* Taiwan */
1100	case SKU_APAC:
1101	case SKU_APAC2:
1102	case SKU_APAC3:
1103		return DOMAIN_CODE_AUS;	/* Australia */
1104	}
1105	/* XXX KOREA? */
1106	return DOMAIN_CODE_FCC;			/* XXX? */
1107}
1108
1109static int
1110mwl_hal_reset(struct mwl_softc *sc)
1111{
1112	struct ieee80211com *ic = &sc->sc_ic;
1113	struct mwl_hal *mh = sc->sc_mh;
1114
1115	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1116	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1117	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1118	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1119	mwl_chan_set(sc, ic->ic_curchan);
1120	/* NB: RF/RA performance tuned for indoor mode */
1121	mwl_hal_setrateadaptmode(mh, 0);
1122	mwl_hal_setoptimizationlevel(mh,
1123	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1124
1125	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1126
1127	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1128	mwl_hal_setcfend(mh, 0);			/* XXX */
1129
1130	return 1;
1131}
1132
1133static int
1134mwl_init(struct mwl_softc *sc)
1135{
1136	struct mwl_hal *mh = sc->sc_mh;
1137	int error = 0;
1138
1139	MWL_LOCK_ASSERT(sc);
1140
1141	/*
1142	 * Stop anything previously setup.  This is safe
1143	 * whether this is the first time through or not.
1144	 */
1145	mwl_stop(sc);
1146
1147	/*
1148	 * Push vap-independent state to the firmware.
1149	 */
1150	if (!mwl_hal_reset(sc)) {
1151		device_printf(sc->sc_dev, "unable to reset hardware\n");
1152		return EIO;
1153	}
1154
1155	/*
1156	 * Setup recv (once); transmit is already good to go.
1157	 */
1158	error = mwl_startrecv(sc);
1159	if (error != 0) {
1160		device_printf(sc->sc_dev, "unable to start recv logic\n");
1161		return error;
1162	}
1163
1164	/*
1165	 * Enable interrupts.
1166	 */
1167	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1168		     | MACREG_A2HRIC_BIT_TX_DONE
1169		     | MACREG_A2HRIC_BIT_OPC_DONE
1170#if 0
1171		     | MACREG_A2HRIC_BIT_MAC_EVENT
1172#endif
1173		     | MACREG_A2HRIC_BIT_ICV_ERROR
1174		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1175		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1176#if 0
1177		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1178#endif
1179		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1180		     | MACREQ_A2HRIC_BIT_TX_ACK
1181		     ;
1182
1183	sc->sc_running = 1;
1184	mwl_hal_intrset(mh, sc->sc_imask);
1185	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1186
1187	return 0;
1188}
1189
1190static void
1191mwl_stop(struct mwl_softc *sc)
1192{
1193
1194	MWL_LOCK_ASSERT(sc);
1195	if (sc->sc_running) {
1196		/*
1197		 * Shutdown the hardware and driver.
1198		 */
1199		sc->sc_running = 0;
1200		callout_stop(&sc->sc_watchdog);
1201		sc->sc_tx_timer = 0;
1202		mwl_draintxq(sc);
1203	}
1204}
1205
1206static int
1207mwl_reset_vap(struct ieee80211vap *vap, int state)
1208{
1209	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1210	struct ieee80211com *ic = vap->iv_ic;
1211
1212	if (state == IEEE80211_S_RUN)
1213		mwl_setrates(vap);
1214	/* XXX off by 1? */
1215	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1216	/* XXX auto? 20/40 split? */
1217	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1218	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1219	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1220	    HTPROTECT_NONE : HTPROTECT_AUTO);
1221	/* XXX txpower cap */
1222
1223	/* re-setup beacons */
1224	if (state == IEEE80211_S_RUN &&
1225	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1226	     vap->iv_opmode == IEEE80211_M_MBSS ||
1227	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1228		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1229		mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1230		    ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1231		return mwl_beacon_setup(vap);
1232	}
1233	return 0;
1234}
1235
1236/*
1237 * Reset the hardware w/o losing operational state.
1238 * Used to reset or reload hardware state for a vap.
1239 */
1240static int
1241mwl_reset(struct ieee80211vap *vap, u_long cmd)
1242{
1243	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1244	int error = 0;
1245
1246	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1247		struct ieee80211com *ic = vap->iv_ic;
1248		struct mwl_softc *sc = ic->ic_softc;
1249		struct mwl_hal *mh = sc->sc_mh;
1250
1251		/* XXX handle DWDS sta vap change */
1252		/* XXX do we need to disable interrupts? */
1253		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1254		error = mwl_reset_vap(vap, vap->iv_state);
1255		mwl_hal_intrset(mh, sc->sc_imask);
1256	}
1257	return error;
1258}
1259
1260/*
1261 * Allocate a tx buffer for sending a frame.  The
1262 * packet is assumed to have the WME AC stored so
1263 * we can use it to select the appropriate h/w queue.
1264 */
1265static struct mwl_txbuf *
1266mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1267{
1268	struct mwl_txbuf *bf;
1269
1270	/*
1271	 * Grab a TX buffer and associated resources.
1272	 */
1273	MWL_TXQ_LOCK(txq);
1274	bf = STAILQ_FIRST(&txq->free);
1275	if (bf != NULL) {
1276		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1277		txq->nfree--;
1278	}
1279	MWL_TXQ_UNLOCK(txq);
1280	if (bf == NULL)
1281		DPRINTF(sc, MWL_DEBUG_XMIT,
1282		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1283	return bf;
1284}
1285
1286/*
1287 * Return a tx buffer to the queue it came from.  Note there
1288 * are two cases because we must preserve the order of buffers
1289 * as it reflects the fixed order of descriptors in memory
1290 * (the firmware pre-fetches descriptors so we cannot reorder).
1291 */
1292static void
1293mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1294{
1295	bf->bf_m = NULL;
1296	bf->bf_node = NULL;
1297	MWL_TXQ_LOCK(txq);
1298	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1299	txq->nfree++;
1300	MWL_TXQ_UNLOCK(txq);
1301}
1302
1303static void
1304mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1305{
1306	bf->bf_m = NULL;
1307	bf->bf_node = NULL;
1308	MWL_TXQ_LOCK(txq);
1309	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1310	txq->nfree++;
1311	MWL_TXQ_UNLOCK(txq);
1312}
1313
1314static int
1315mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1316{
1317	struct mwl_softc *sc = ic->ic_softc;
1318	int error;
1319
1320	MWL_LOCK(sc);
1321	if (!sc->sc_running) {
1322		MWL_UNLOCK(sc);
1323		return (ENXIO);
1324	}
1325	error = mbufq_enqueue(&sc->sc_snd, m);
1326	if (error) {
1327		MWL_UNLOCK(sc);
1328		return (error);
1329	}
1330	mwl_start(sc);
1331	MWL_UNLOCK(sc);
1332	return (0);
1333}
1334
1335static void
1336mwl_start(struct mwl_softc *sc)
1337{
1338	struct ieee80211_node *ni;
1339	struct mwl_txbuf *bf;
1340	struct mbuf *m;
1341	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1342	int nqueued;
1343
1344	MWL_LOCK_ASSERT(sc);
1345	if (!sc->sc_running || sc->sc_invalid)
1346		return;
1347	nqueued = 0;
1348	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1349		/*
1350		 * Grab the node for the destination.
1351		 */
1352		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1353		KASSERT(ni != NULL, ("no node"));
1354		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1355		/*
1356		 * Grab a TX buffer and associated resources.
1357		 * We honor the classification by the 802.11 layer.
1358		 */
1359		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1360		bf = mwl_gettxbuf(sc, txq);
1361		if (bf == NULL) {
1362			m_freem(m);
1363			ieee80211_free_node(ni);
1364#ifdef MWL_TX_NODROP
1365			sc->sc_stats.mst_tx_qstop++;
1366			break;
1367#else
1368			DPRINTF(sc, MWL_DEBUG_XMIT,
1369			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1370			sc->sc_stats.mst_tx_qdrop++;
1371			continue;
1372#endif /* MWL_TX_NODROP */
1373		}
1374
1375		/*
1376		 * Pass the frame to the h/w for transmission.
1377		 */
1378		if (mwl_tx_start(sc, ni, bf, m)) {
1379			if_inc_counter(ni->ni_vap->iv_ifp,
1380			    IFCOUNTER_OERRORS, 1);
1381			mwl_puttxbuf_head(txq, bf);
1382			ieee80211_free_node(ni);
1383			continue;
1384		}
1385		nqueued++;
1386		if (nqueued >= mwl_txcoalesce) {
1387			/*
1388			 * Poke the firmware to process queued frames;
1389			 * see below about (lack of) locking.
1390			 */
1391			nqueued = 0;
1392			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1393		}
1394	}
1395	if (nqueued) {
1396		/*
1397		 * NB: We don't need to lock against tx done because
1398		 * this just prods the firmware to check the transmit
1399		 * descriptors.  The firmware will also start fetching
1400		 * descriptors by itself if it notices new ones are
1401		 * present when it goes to deliver a tx done interrupt
1402		 * to the host. So if we race with tx done processing
1403		 * it's ok.  Delivering the kick here rather than in
1404		 * mwl_tx_start is an optimization to avoid poking the
1405		 * firmware for each packet.
1406		 *
1407		 * NB: the queue id isn't used so 0 is ok.
1408		 */
1409		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1410	}
1411}
1412
1413static int
1414mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1415	const struct ieee80211_bpf_params *params)
1416{
1417	struct ieee80211com *ic = ni->ni_ic;
1418	struct mwl_softc *sc = ic->ic_softc;
1419	struct mwl_txbuf *bf;
1420	struct mwl_txq *txq;
1421
1422	if (!sc->sc_running || sc->sc_invalid) {
1423		m_freem(m);
1424		return ENETDOWN;
1425	}
1426	/*
1427	 * Grab a TX buffer and associated resources.
1428	 * Note that we depend on the classification
1429	 * by the 802.11 layer to get to the right h/w
1430	 * queue.  Management frames must ALWAYS go on
1431	 * queue 1 but we cannot just force that here
1432	 * because we may receive non-mgt frames.
1433	 */
1434	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1435	bf = mwl_gettxbuf(sc, txq);
1436	if (bf == NULL) {
1437		sc->sc_stats.mst_tx_qstop++;
1438		m_freem(m);
1439		return ENOBUFS;
1440	}
1441	/*
1442	 * Pass the frame to the h/w for transmission.
1443	 */
1444	if (mwl_tx_start(sc, ni, bf, m)) {
1445		mwl_puttxbuf_head(txq, bf);
1446
1447		return EIO;		/* XXX */
1448	}
1449	/*
1450	 * NB: We don't need to lock against tx done because
1451	 * this just prods the firmware to check the transmit
1452	 * descriptors.  The firmware will also start fetching
1453	 * descriptors by itself if it notices new ones are
1454	 * present when it goes to deliver a tx done interrupt
1455	 * to the host. So if we race with tx done processing
1456	 * it's ok.  Delivering the kick here rather than in
1457	 * mwl_tx_start is an optimization to avoid poking the
1458	 * firmware for each packet.
1459	 *
1460	 * NB: the queue id isn't used so 0 is ok.
1461	 */
1462	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1463	return 0;
1464}
1465
1466static int
1467mwl_media_change(struct ifnet *ifp)
1468{
1469	struct ieee80211vap *vap;
1470	int error;
1471
1472	/* NB: only the fixed rate can change and that doesn't need a reset */
1473	error = ieee80211_media_change(ifp);
1474	if (error != 0)
1475		return (error);
1476
1477	vap = ifp->if_softc;
1478	mwl_setrates(vap);
1479	return (0);
1480}
1481
1482#ifdef MWL_DEBUG
1483static void
1484mwl_keyprint(struct mwl_softc *sc, const char *tag,
1485	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1486{
1487	static const char *ciphers[] = {
1488		"WEP",
1489		"TKIP",
1490		"AES-CCM",
1491	};
1492	int i, n;
1493
1494	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1495	for (i = 0, n = hk->keyLen; i < n; i++)
1496		printf(" %02x", hk->key.aes[i]);
1497	printf(" mac %s", ether_sprintf(mac));
1498	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1499		printf(" %s", "rxmic");
1500		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1501			printf(" %02x", hk->key.tkip.rxMic[i]);
1502		printf(" txmic");
1503		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1504			printf(" %02x", hk->key.tkip.txMic[i]);
1505	}
1506	printf(" flags 0x%x\n", hk->keyFlags);
1507}
1508#endif
1509
1510/*
1511 * Allocate a key cache slot for a unicast key.  The
1512 * firmware handles key allocation and every station is
1513 * guaranteed key space so we are always successful.
1514 */
1515static int
1516mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1517	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1518{
1519	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1520
1521	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1522	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1523		if (!(&vap->iv_nw_keys[0] <= k &&
1524		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1525			/* should not happen */
1526			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1527				"%s: bogus group key\n", __func__);
1528			return 0;
1529		}
1530		/* give the caller what they requested */
1531		*keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
1532	} else {
1533		/*
1534		 * Firmware handles key allocation.
1535		 */
1536		*keyix = *rxkeyix = 0;
1537	}
1538	return 1;
1539}
1540
1541/*
1542 * Delete a key entry allocated by mwl_key_alloc.
1543 */
1544static int
1545mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1546{
1547	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1548	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1549	MWL_HAL_KEYVAL hk;
1550	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1551	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1552
1553	if (hvap == NULL) {
1554		if (vap->iv_opmode != IEEE80211_M_WDS) {
1555			/* XXX monitor mode? */
1556			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1557			    "%s: no hvap for opmode %d\n", __func__,
1558			    vap->iv_opmode);
1559			return 0;
1560		}
1561		hvap = MWL_VAP(vap)->mv_ap_hvap;
1562	}
1563
1564	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1565	    __func__, k->wk_keyix);
1566
1567	memset(&hk, 0, sizeof(hk));
1568	hk.keyIndex = k->wk_keyix;
1569	switch (k->wk_cipher->ic_cipher) {
1570	case IEEE80211_CIPHER_WEP:
1571		hk.keyTypeId = KEY_TYPE_ID_WEP;
1572		break;
1573	case IEEE80211_CIPHER_TKIP:
1574		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1575		break;
1576	case IEEE80211_CIPHER_AES_CCM:
1577		hk.keyTypeId = KEY_TYPE_ID_AES;
1578		break;
1579	default:
1580		/* XXX should not happen */
1581		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1582		    __func__, k->wk_cipher->ic_cipher);
1583		return 0;
1584	}
1585	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1586}
1587
1588static __inline int
1589addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1590{
1591	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1592		if (k->wk_flags & IEEE80211_KEY_XMIT)
1593			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1594		if (k->wk_flags & IEEE80211_KEY_RECV)
1595			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1596		return 1;
1597	} else
1598		return 0;
1599}
1600
1601/*
1602 * Set the key cache contents for the specified key.  Key cache
1603 * slot(s) must already have been allocated by mwl_key_alloc.
1604 */
1605static int
1606mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1607{
1608	return (_mwl_key_set(vap, k, k->wk_macaddr));
1609}
1610
1611static int
1612_mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1613	const uint8_t mac[IEEE80211_ADDR_LEN])
1614{
1615#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1616/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1617#define	IEEE80211_IS_STATICKEY(k) \
1618	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1619	 (GRPXMIT|IEEE80211_KEY_RECV))
1620	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1621	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1622	const struct ieee80211_cipher *cip = k->wk_cipher;
1623	const uint8_t *macaddr;
1624	MWL_HAL_KEYVAL hk;
1625
1626	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1627		("s/w crypto set?"));
1628
1629	if (hvap == NULL) {
1630		if (vap->iv_opmode != IEEE80211_M_WDS) {
1631			/* XXX monitor mode? */
1632			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1633			    "%s: no hvap for opmode %d\n", __func__,
1634			    vap->iv_opmode);
1635			return 0;
1636		}
1637		hvap = MWL_VAP(vap)->mv_ap_hvap;
1638	}
1639	memset(&hk, 0, sizeof(hk));
1640	hk.keyIndex = k->wk_keyix;
1641	switch (cip->ic_cipher) {
1642	case IEEE80211_CIPHER_WEP:
1643		hk.keyTypeId = KEY_TYPE_ID_WEP;
1644		hk.keyLen = k->wk_keylen;
1645		if (k->wk_keyix == vap->iv_def_txkey)
1646			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1647		if (!IEEE80211_IS_STATICKEY(k)) {
1648			/* NB: WEP is never used for the PTK */
1649			(void) addgroupflags(&hk, k);
1650		}
1651		break;
1652	case IEEE80211_CIPHER_TKIP:
1653		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1654		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1655		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1656		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1657		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1658		if (!addgroupflags(&hk, k))
1659			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1660		break;
1661	case IEEE80211_CIPHER_AES_CCM:
1662		hk.keyTypeId = KEY_TYPE_ID_AES;
1663		hk.keyLen = k->wk_keylen;
1664		if (!addgroupflags(&hk, k))
1665			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1666		break;
1667	default:
1668		/* XXX should not happen */
1669		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1670		    __func__, k->wk_cipher->ic_cipher);
1671		return 0;
1672	}
1673	/*
1674	 * NB: tkip mic keys get copied here too; the layout
1675	 *     just happens to match that in ieee80211_key.
1676	 */
1677	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1678
1679	/*
1680	 * Locate address of sta db entry for writing key;
1681	 * the convention unfortunately is somewhat different
1682	 * than how net80211, hostapd, and wpa_supplicant think.
1683	 */
1684	if (vap->iv_opmode == IEEE80211_M_STA) {
1685		/*
1686		 * NB: keys plumbed before the sta reaches AUTH state
1687		 * will be discarded or written to the wrong sta db
1688		 * entry because iv_bss is meaningless.  This is ok
1689		 * (right now) because we handle deferred plumbing of
1690		 * WEP keys when the sta reaches AUTH state.
1691		 */
1692		macaddr = vap->iv_bss->ni_bssid;
1693		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1694			/* XXX plumb to local sta db too for static key wep */
1695			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1696		}
1697	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1698	    vap->iv_state != IEEE80211_S_RUN) {
1699		/*
1700		 * Prior to RUN state a WDS vap will not it's BSS node
1701		 * setup so we will plumb the key to the wrong mac
1702		 * address (it'll be our local address).  Workaround
1703		 * this for the moment by grabbing the correct address.
1704		 */
1705		macaddr = vap->iv_des_bssid;
1706	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1707		macaddr = vap->iv_myaddr;
1708	else
1709		macaddr = mac;
1710	KEYPRINTF(sc, &hk, macaddr);
1711	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1712#undef IEEE80211_IS_STATICKEY
1713#undef GRPXMIT
1714}
1715
1716/*
1717 * Set the multicast filter contents into the hardware.
1718 * XXX f/w has no support; just defer to the os.
1719 */
1720static void
1721mwl_setmcastfilter(struct mwl_softc *sc)
1722{
1723#if 0
1724	struct ether_multi *enm;
1725	struct ether_multistep estep;
1726	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1727	uint8_t *mp;
1728	int nmc;
1729
1730	mp = macs;
1731	nmc = 0;
1732	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1733	while (enm != NULL) {
1734		/* XXX Punt on ranges. */
1735		if (nmc == MWL_HAL_MCAST_MAX ||
1736		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1737			ifp->if_flags |= IFF_ALLMULTI;
1738			return;
1739		}
1740		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1741		mp += IEEE80211_ADDR_LEN, nmc++;
1742		ETHER_NEXT_MULTI(estep, enm);
1743	}
1744	ifp->if_flags &= ~IFF_ALLMULTI;
1745	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1746#endif
1747}
1748
1749static int
1750mwl_mode_init(struct mwl_softc *sc)
1751{
1752	struct ieee80211com *ic = &sc->sc_ic;
1753	struct mwl_hal *mh = sc->sc_mh;
1754
1755	mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
1756	mwl_setmcastfilter(sc);
1757
1758	return 0;
1759}
1760
1761/*
1762 * Callback from the 802.11 layer after a multicast state change.
1763 */
1764static void
1765mwl_update_mcast(struct ieee80211com *ic)
1766{
1767	struct mwl_softc *sc = ic->ic_softc;
1768
1769	mwl_setmcastfilter(sc);
1770}
1771
1772/*
1773 * Callback from the 802.11 layer after a promiscuous mode change.
1774 * Note this interface does not check the operating mode as this
1775 * is an internal callback and we are expected to honor the current
1776 * state (e.g. this is used for setting the interface in promiscuous
1777 * mode when operating in hostap mode to do ACS).
1778 */
1779static void
1780mwl_update_promisc(struct ieee80211com *ic)
1781{
1782	struct mwl_softc *sc = ic->ic_softc;
1783
1784	mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1785}
1786
1787/*
1788 * Callback from the 802.11 layer to update the slot time
1789 * based on the current setting.  We use it to notify the
1790 * firmware of ERP changes and the f/w takes care of things
1791 * like slot time and preamble.
1792 */
1793static void
1794mwl_updateslot(struct ieee80211com *ic)
1795{
1796	struct mwl_softc *sc = ic->ic_softc;
1797	struct mwl_hal *mh = sc->sc_mh;
1798	int prot;
1799
1800	/* NB: can be called early; suppress needless cmds */
1801	if (!sc->sc_running)
1802		return;
1803
1804	/*
1805	 * Calculate the ERP flags.  The firwmare will use
1806	 * this to carry out the appropriate measures.
1807	 */
1808	prot = 0;
1809	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1810		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1811			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1812		if (ic->ic_flags & IEEE80211_F_USEPROT)
1813			prot |= IEEE80211_ERP_USE_PROTECTION;
1814		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1815			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1816	}
1817
1818	DPRINTF(sc, MWL_DEBUG_RESET,
1819	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1820	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1821	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1822	    ic->ic_flags);
1823
1824	mwl_hal_setgprot(mh, prot);
1825}
1826
1827/*
1828 * Setup the beacon frame.
1829 */
1830static int
1831mwl_beacon_setup(struct ieee80211vap *vap)
1832{
1833	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1834	struct ieee80211_node *ni = vap->iv_bss;
1835	struct mbuf *m;
1836
1837	m = ieee80211_beacon_alloc(ni);
1838	if (m == NULL)
1839		return ENOBUFS;
1840	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1841	m_free(m);
1842
1843	return 0;
1844}
1845
1846/*
1847 * Update the beacon frame in response to a change.
1848 */
1849static void
1850mwl_beacon_update(struct ieee80211vap *vap, int item)
1851{
1852	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1853	struct ieee80211com *ic = vap->iv_ic;
1854
1855	KASSERT(hvap != NULL, ("no beacon"));
1856	switch (item) {
1857	case IEEE80211_BEACON_ERP:
1858		mwl_updateslot(ic);
1859		break;
1860	case IEEE80211_BEACON_HTINFO:
1861		mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1862		    ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1863		break;
1864	case IEEE80211_BEACON_CAPS:
1865	case IEEE80211_BEACON_WME:
1866	case IEEE80211_BEACON_APPIE:
1867	case IEEE80211_BEACON_CSA:
1868		break;
1869	case IEEE80211_BEACON_TIM:
1870		/* NB: firmware always forms TIM */
1871		return;
1872	}
1873	/* XXX retain beacon frame and update */
1874	mwl_beacon_setup(vap);
1875}
1876
1877static void
1878mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1879{
1880	bus_addr_t *paddr = (bus_addr_t*) arg;
1881	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1882	*paddr = segs->ds_addr;
1883}
1884
1885#ifdef MWL_HOST_PS_SUPPORT
1886/*
1887 * Handle power save station occupancy changes.
1888 */
1889static void
1890mwl_update_ps(struct ieee80211vap *vap, int nsta)
1891{
1892	struct mwl_vap *mvp = MWL_VAP(vap);
1893
1894	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1895		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1896	mvp->mv_last_ps_sta = nsta;
1897}
1898
1899/*
1900 * Handle associated station power save state changes.
1901 */
1902static int
1903mwl_set_tim(struct ieee80211_node *ni, int set)
1904{
1905	struct ieee80211vap *vap = ni->ni_vap;
1906	struct mwl_vap *mvp = MWL_VAP(vap);
1907
1908	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1909		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1910		    IEEE80211_AID(ni->ni_associd), set);
1911		return 1;
1912	} else
1913		return 0;
1914}
1915#endif /* MWL_HOST_PS_SUPPORT */
1916
1917static int
1918mwl_desc_setup(struct mwl_softc *sc, const char *name,
1919	struct mwl_descdma *dd,
1920	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1921{
1922	uint8_t *ds;
1923	int error;
1924
1925	DPRINTF(sc, MWL_DEBUG_RESET,
1926	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1927	    __func__, name, nbuf, (uintmax_t) bufsize,
1928	    ndesc, (uintmax_t) descsize);
1929
1930	dd->dd_name = name;
1931	dd->dd_desc_len = nbuf * ndesc * descsize;
1932
1933	/*
1934	 * Setup DMA descriptor area.
1935	 */
1936	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1937		       PAGE_SIZE, 0,		/* alignment, bounds */
1938		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1939		       BUS_SPACE_MAXADDR,	/* highaddr */
1940		       NULL, NULL,		/* filter, filterarg */
1941		       dd->dd_desc_len,		/* maxsize */
1942		       1,			/* nsegments */
1943		       dd->dd_desc_len,		/* maxsegsize */
1944		       BUS_DMA_ALLOCNOW,	/* flags */
1945		       NULL,			/* lockfunc */
1946		       NULL,			/* lockarg */
1947		       &dd->dd_dmat);
1948	if (error != 0) {
1949		device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1950		return error;
1951	}
1952
1953	/* allocate descriptors */
1954	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1955				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1956				 &dd->dd_dmamap);
1957	if (error != 0) {
1958		device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1959			"error %u\n", nbuf * ndesc, dd->dd_name, error);
1960		goto fail1;
1961	}
1962
1963	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1964				dd->dd_desc, dd->dd_desc_len,
1965				mwl_load_cb, &dd->dd_desc_paddr,
1966				BUS_DMA_NOWAIT);
1967	if (error != 0) {
1968		device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1969			dd->dd_name, error);
1970		goto fail2;
1971	}
1972
1973	ds = dd->dd_desc;
1974	memset(ds, 0, dd->dd_desc_len);
1975	DPRINTF(sc, MWL_DEBUG_RESET,
1976	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1977	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1978	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1979
1980	return 0;
1981fail2:
1982	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1983fail1:
1984	bus_dma_tag_destroy(dd->dd_dmat);
1985	memset(dd, 0, sizeof(*dd));
1986	return error;
1987#undef DS2PHYS
1988}
1989
1990static void
1991mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
1992{
1993	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
1994	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1995	bus_dma_tag_destroy(dd->dd_dmat);
1996
1997	memset(dd, 0, sizeof(*dd));
1998}
1999
2000/*
2001 * Construct a tx q's free list.  The order of entries on
2002 * the list must reflect the physical layout of tx descriptors
2003 * because the firmware pre-fetches descriptors.
2004 *
2005 * XXX might be better to use indices into the buffer array.
2006 */
2007static void
2008mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2009{
2010	struct mwl_txbuf *bf;
2011	int i;
2012
2013	bf = txq->dma.dd_bufptr;
2014	STAILQ_INIT(&txq->free);
2015	for (i = 0; i < mwl_txbuf; i++, bf++)
2016		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2017	txq->nfree = i;
2018}
2019
2020#define	DS2PHYS(_dd, _ds) \
2021	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2022
2023static int
2024mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2025{
2026	int error, bsize, i;
2027	struct mwl_txbuf *bf;
2028	struct mwl_txdesc *ds;
2029
2030	error = mwl_desc_setup(sc, "tx", &txq->dma,
2031			mwl_txbuf, sizeof(struct mwl_txbuf),
2032			MWL_TXDESC, sizeof(struct mwl_txdesc));
2033	if (error != 0)
2034		return error;
2035
2036	/* allocate and setup tx buffers */
2037	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2038	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2039	if (bf == NULL) {
2040		device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2041			mwl_txbuf);
2042		return ENOMEM;
2043	}
2044	txq->dma.dd_bufptr = bf;
2045
2046	ds = txq->dma.dd_desc;
2047	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2048		bf->bf_desc = ds;
2049		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2050		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2051				&bf->bf_dmamap);
2052		if (error != 0) {
2053			device_printf(sc->sc_dev, "unable to create dmamap for tx "
2054				"buffer %u, error %u\n", i, error);
2055			return error;
2056		}
2057	}
2058	mwl_txq_reset(sc, txq);
2059	return 0;
2060}
2061
2062static void
2063mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2064{
2065	struct mwl_txbuf *bf;
2066	int i;
2067
2068	bf = txq->dma.dd_bufptr;
2069	for (i = 0; i < mwl_txbuf; i++, bf++) {
2070		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2071		KASSERT(bf->bf_node == NULL, ("node on free list"));
2072		if (bf->bf_dmamap != NULL)
2073			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2074	}
2075	STAILQ_INIT(&txq->free);
2076	txq->nfree = 0;
2077	if (txq->dma.dd_bufptr != NULL) {
2078		free(txq->dma.dd_bufptr, M_MWLDEV);
2079		txq->dma.dd_bufptr = NULL;
2080	}
2081	if (txq->dma.dd_desc_len != 0)
2082		mwl_desc_cleanup(sc, &txq->dma);
2083}
2084
2085static int
2086mwl_rxdma_setup(struct mwl_softc *sc)
2087{
2088	int error, jumbosize, bsize, i;
2089	struct mwl_rxbuf *bf;
2090	struct mwl_jumbo *rbuf;
2091	struct mwl_rxdesc *ds;
2092	caddr_t data;
2093
2094	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2095			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2096			1, sizeof(struct mwl_rxdesc));
2097	if (error != 0)
2098		return error;
2099
2100	/*
2101	 * Receive is done to a private pool of jumbo buffers.
2102	 * This allows us to attach to mbuf's and avoid re-mapping
2103	 * memory on each rx we post.  We allocate a large chunk
2104	 * of memory and manage it in the driver.  The mbuf free
2105	 * callback method is used to reclaim frames after sending
2106	 * them up the stack.  By default we allocate 2x the number of
2107	 * rx descriptors configured so we have some slop to hold
2108	 * us while frames are processed.
2109	 */
2110	if (mwl_rxbuf < 2*mwl_rxdesc) {
2111		device_printf(sc->sc_dev,
2112		    "too few rx dma buffers (%d); increasing to %d\n",
2113		    mwl_rxbuf, 2*mwl_rxdesc);
2114		mwl_rxbuf = 2*mwl_rxdesc;
2115	}
2116	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2117	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2118
2119	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2120		       PAGE_SIZE, 0,		/* alignment, bounds */
2121		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2122		       BUS_SPACE_MAXADDR,	/* highaddr */
2123		       NULL, NULL,		/* filter, filterarg */
2124		       sc->sc_rxmemsize,	/* maxsize */
2125		       1,			/* nsegments */
2126		       sc->sc_rxmemsize,	/* maxsegsize */
2127		       BUS_DMA_ALLOCNOW,	/* flags */
2128		       NULL,			/* lockfunc */
2129		       NULL,			/* lockarg */
2130		       &sc->sc_rxdmat);
2131	if (error != 0) {
2132		device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2133		return error;
2134	}
2135
2136	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2137				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2138				 &sc->sc_rxmap);
2139	if (error != 0) {
2140		device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2141		    (uintmax_t) sc->sc_rxmemsize);
2142		return error;
2143	}
2144
2145	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2146				sc->sc_rxmem, sc->sc_rxmemsize,
2147				mwl_load_cb, &sc->sc_rxmem_paddr,
2148				BUS_DMA_NOWAIT);
2149	if (error != 0) {
2150		device_printf(sc->sc_dev, "could not load rx DMA map\n");
2151		return error;
2152	}
2153
2154	/*
2155	 * Allocate rx buffers and set them up.
2156	 */
2157	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2158	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2159	if (bf == NULL) {
2160		device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2161		return error;
2162	}
2163	sc->sc_rxdma.dd_bufptr = bf;
2164
2165	STAILQ_INIT(&sc->sc_rxbuf);
2166	ds = sc->sc_rxdma.dd_desc;
2167	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2168		bf->bf_desc = ds;
2169		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2170		/* pre-assign dma buffer */
2171		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2172		/* NB: tail is intentional to preserve descriptor order */
2173		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2174	}
2175
2176	/*
2177	 * Place remainder of dma memory buffers on the free list.
2178	 */
2179	SLIST_INIT(&sc->sc_rxfree);
2180	for (; i < mwl_rxbuf; i++) {
2181		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2182		rbuf = MWL_JUMBO_DATA2BUF(data);
2183		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2184		sc->sc_nrxfree++;
2185	}
2186	return 0;
2187}
2188#undef DS2PHYS
2189
2190static void
2191mwl_rxdma_cleanup(struct mwl_softc *sc)
2192{
2193	if (sc->sc_rxmem_paddr != 0) {
2194		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2195		sc->sc_rxmem_paddr = 0;
2196	}
2197	if (sc->sc_rxmem != NULL) {
2198		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2199		sc->sc_rxmem = NULL;
2200	}
2201	if (sc->sc_rxdma.dd_bufptr != NULL) {
2202		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2203		sc->sc_rxdma.dd_bufptr = NULL;
2204	}
2205	if (sc->sc_rxdma.dd_desc_len != 0)
2206		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2207}
2208
2209static int
2210mwl_dma_setup(struct mwl_softc *sc)
2211{
2212	int error, i;
2213
2214	error = mwl_rxdma_setup(sc);
2215	if (error != 0) {
2216		mwl_rxdma_cleanup(sc);
2217		return error;
2218	}
2219
2220	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2221		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2222		if (error != 0) {
2223			mwl_dma_cleanup(sc);
2224			return error;
2225		}
2226	}
2227	return 0;
2228}
2229
2230static void
2231mwl_dma_cleanup(struct mwl_softc *sc)
2232{
2233	int i;
2234
2235	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2236		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2237	mwl_rxdma_cleanup(sc);
2238}
2239
2240static struct ieee80211_node *
2241mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2242{
2243	struct ieee80211com *ic = vap->iv_ic;
2244	struct mwl_softc *sc = ic->ic_softc;
2245	const size_t space = sizeof(struct mwl_node);
2246	struct mwl_node *mn;
2247
2248	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2249	if (mn == NULL) {
2250		/* XXX stat+msg */
2251		return NULL;
2252	}
2253	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2254	return &mn->mn_node;
2255}
2256
2257static void
2258mwl_node_cleanup(struct ieee80211_node *ni)
2259{
2260	struct ieee80211com *ic = ni->ni_ic;
2261        struct mwl_softc *sc = ic->ic_softc;
2262	struct mwl_node *mn = MWL_NODE(ni);
2263
2264	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2265	    __func__, ni, ni->ni_ic, mn->mn_staid);
2266
2267	if (mn->mn_staid != 0) {
2268		struct ieee80211vap *vap = ni->ni_vap;
2269
2270		if (mn->mn_hvap != NULL) {
2271			if (vap->iv_opmode == IEEE80211_M_STA)
2272				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2273			else
2274				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2275		}
2276		/*
2277		 * NB: legacy WDS peer sta db entry is installed using
2278		 * the associate ap's hvap; use it again to delete it.
2279		 * XXX can vap be NULL?
2280		 */
2281		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2282		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2283			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2284			    ni->ni_macaddr);
2285		delstaid(sc, mn->mn_staid);
2286		mn->mn_staid = 0;
2287	}
2288	sc->sc_node_cleanup(ni);
2289}
2290
2291/*
2292 * Reclaim rx dma buffers from packets sitting on the ampdu
2293 * reorder queue for a station.  We replace buffers with a
2294 * system cluster (if available).
2295 */
2296static void
2297mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2298{
2299#if 0
2300	int i, n, off;
2301	struct mbuf *m;
2302	void *cl;
2303
2304	n = rap->rxa_qframes;
2305	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2306		m = rap->rxa_m[i];
2307		if (m == NULL)
2308			continue;
2309		n--;
2310		/* our dma buffers have a well-known free routine */
2311		if ((m->m_flags & M_EXT) == 0 ||
2312		    m->m_ext.ext_free != mwl_ext_free)
2313			continue;
2314		/*
2315		 * Try to allocate a cluster and move the data.
2316		 */
2317		off = m->m_data - m->m_ext.ext_buf;
2318		if (off + m->m_pkthdr.len > MCLBYTES) {
2319			/* XXX no AMSDU for now */
2320			continue;
2321		}
2322		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2323		    &m->m_ext.ext_paddr);
2324		if (cl != NULL) {
2325			/*
2326			 * Copy the existing data to the cluster, remove
2327			 * the rx dma buffer, and attach the cluster in
2328			 * its place.  Note we preserve the offset to the
2329			 * data so frames being bridged can still prepend
2330			 * their headers without adding another mbuf.
2331			 */
2332			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2333			MEXTREMOVE(m);
2334			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2335			/* setup mbuf like _MCLGET does */
2336			m->m_flags |= M_CLUSTER | M_EXT_RW;
2337			_MOWNERREF(m, M_EXT | M_CLUSTER);
2338			/* NB: m_data is clobbered by MEXTADDR, adjust */
2339			m->m_data += off;
2340		}
2341	}
2342#endif
2343}
2344
2345/*
2346 * Callback to reclaim resources.  We first let the
2347 * net80211 layer do it's thing, then if we are still
2348 * blocked by a lack of rx dma buffers we walk the ampdu
2349 * reorder q's to reclaim buffers by copying to a system
2350 * cluster.
2351 */
2352static void
2353mwl_node_drain(struct ieee80211_node *ni)
2354{
2355	struct ieee80211com *ic = ni->ni_ic;
2356        struct mwl_softc *sc = ic->ic_softc;
2357	struct mwl_node *mn = MWL_NODE(ni);
2358
2359	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2360	    __func__, ni, ni->ni_vap, mn->mn_staid);
2361
2362	/* NB: call up first to age out ampdu q's */
2363	sc->sc_node_drain(ni);
2364
2365	/* XXX better to not check low water mark? */
2366	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2367	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2368		uint8_t tid;
2369		/*
2370		 * Walk the reorder q and reclaim rx dma buffers by copying
2371		 * the packet contents into clusters.
2372		 */
2373		for (tid = 0; tid < WME_NUM_TID; tid++) {
2374			struct ieee80211_rx_ampdu *rap;
2375
2376			rap = &ni->ni_rx_ampdu[tid];
2377			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2378				continue;
2379			if (rap->rxa_qframes)
2380				mwl_ampdu_rxdma_reclaim(rap);
2381		}
2382	}
2383}
2384
2385static void
2386mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2387{
2388	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2389#ifdef MWL_ANT_INFO_SUPPORT
2390#if 0
2391	/* XXX need to smooth data */
2392	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2393#else
2394	*noise = -95;		/* XXX */
2395#endif
2396#else
2397	*noise = -95;		/* XXX */
2398#endif
2399}
2400
2401/*
2402 * Convert Hardware per-antenna rssi info to common format:
2403 * Let a1, a2, a3 represent the amplitudes per chain
2404 * Let amax represent max[a1, a2, a3]
2405 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2406 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2407 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2408 * maintain some extra precision.
2409 *
2410 * Values are stored in .5 db format capped at 127.
2411 */
2412static void
2413mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2414	struct ieee80211_mimo_info *mi)
2415{
2416#define	CVT(_dst, _src) do {						\
2417	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2418	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2419} while (0)
2420	static const int8_t logdbtbl[32] = {
2421	       0,   0,  24,  38,  48,  56,  62,  68,
2422	      72,  76,  80,  83,  86,  89,  92,  94,
2423	      96,  98, 100, 102, 104, 106, 107, 109,
2424	     110, 112, 113, 115, 116, 117, 118, 119
2425	};
2426	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2427	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2428	uint32_t rssi_max;
2429
2430	rssi_max = mn->mn_ai.rssi_a;
2431	if (mn->mn_ai.rssi_b > rssi_max)
2432		rssi_max = mn->mn_ai.rssi_b;
2433	if (mn->mn_ai.rssi_c > rssi_max)
2434		rssi_max = mn->mn_ai.rssi_c;
2435
2436	CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
2437	CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
2438	CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
2439
2440	mi->ch[0].noise[0] = mn->mn_ai.nf_a;
2441	mi->ch[1].noise[0] = mn->mn_ai.nf_b;
2442	mi->ch[2].noise[0] = mn->mn_ai.nf_c;
2443#undef CVT
2444}
2445
2446static __inline void *
2447mwl_getrxdma(struct mwl_softc *sc)
2448{
2449	struct mwl_jumbo *buf;
2450	void *data;
2451
2452	/*
2453	 * Allocate from jumbo pool.
2454	 */
2455	MWL_RXFREE_LOCK(sc);
2456	buf = SLIST_FIRST(&sc->sc_rxfree);
2457	if (buf == NULL) {
2458		DPRINTF(sc, MWL_DEBUG_ANY,
2459		    "%s: out of rx dma buffers\n", __func__);
2460		sc->sc_stats.mst_rx_nodmabuf++;
2461		data = NULL;
2462	} else {
2463		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2464		sc->sc_nrxfree--;
2465		data = MWL_JUMBO_BUF2DATA(buf);
2466	}
2467	MWL_RXFREE_UNLOCK(sc);
2468	return data;
2469}
2470
2471static __inline void
2472mwl_putrxdma(struct mwl_softc *sc, void *data)
2473{
2474	struct mwl_jumbo *buf;
2475
2476	/* XXX bounds check data */
2477	MWL_RXFREE_LOCK(sc);
2478	buf = MWL_JUMBO_DATA2BUF(data);
2479	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2480	sc->sc_nrxfree++;
2481	MWL_RXFREE_UNLOCK(sc);
2482}
2483
2484static int
2485mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2486{
2487	struct mwl_rxdesc *ds;
2488
2489	ds = bf->bf_desc;
2490	if (bf->bf_data == NULL) {
2491		bf->bf_data = mwl_getrxdma(sc);
2492		if (bf->bf_data == NULL) {
2493			/* mark descriptor to be skipped */
2494			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2495			/* NB: don't need PREREAD */
2496			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2497			sc->sc_stats.mst_rxbuf_failed++;
2498			return ENOMEM;
2499		}
2500	}
2501	/*
2502	 * NB: DMA buffer contents is known to be unmodified
2503	 *     so there's no need to flush the data cache.
2504	 */
2505
2506	/*
2507	 * Setup descriptor.
2508	 */
2509	ds->QosCtrl = 0;
2510	ds->RSSI = 0;
2511	ds->Status = EAGLE_RXD_STATUS_IDLE;
2512	ds->Channel = 0;
2513	ds->PktLen = htole16(MWL_AGGR_SIZE);
2514	ds->SQ2 = 0;
2515	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2516	/* NB: don't touch pPhysNext, set once */
2517	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2518	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2519
2520	return 0;
2521}
2522
2523static void
2524mwl_ext_free(struct mbuf *m)
2525{
2526	struct mwl_softc *sc = m->m_ext.ext_arg1;
2527
2528	/* XXX bounds check data */
2529	mwl_putrxdma(sc, m->m_ext.ext_buf);
2530	/*
2531	 * If we were previously blocked by a lack of rx dma buffers
2532	 * check if we now have enough to restart rx interrupt handling.
2533	 * NB: we know we are called at splvm which is above splnet.
2534	 */
2535	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2536		sc->sc_rxblocked = 0;
2537		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2538	}
2539}
2540
2541struct mwl_frame_bar {
2542	u_int8_t	i_fc[2];
2543	u_int8_t	i_dur[2];
2544	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2545	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2546	/* ctl, seq, FCS */
2547} __packed;
2548
2549/*
2550 * Like ieee80211_anyhdrsize, but handles BAR frames
2551 * specially so the logic below to piece the 802.11
2552 * header together works.
2553 */
2554static __inline int
2555mwl_anyhdrsize(const void *data)
2556{
2557	const struct ieee80211_frame *wh = data;
2558
2559	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2560		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2561		case IEEE80211_FC0_SUBTYPE_CTS:
2562		case IEEE80211_FC0_SUBTYPE_ACK:
2563			return sizeof(struct ieee80211_frame_ack);
2564		case IEEE80211_FC0_SUBTYPE_BAR:
2565			return sizeof(struct mwl_frame_bar);
2566		}
2567		return sizeof(struct ieee80211_frame_min);
2568	} else
2569		return ieee80211_hdrsize(data);
2570}
2571
2572static void
2573mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2574{
2575	const struct ieee80211_frame *wh;
2576	struct ieee80211_node *ni;
2577
2578	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2579	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2580	if (ni != NULL) {
2581		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2582		ieee80211_free_node(ni);
2583	}
2584}
2585
2586/*
2587 * Convert hardware signal strength to rssi.  The value
2588 * provided by the device has the noise floor added in;
2589 * we need to compensate for this but we don't have that
2590 * so we use a fixed value.
2591 *
2592 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2593 * offset is already set as part of the initial gain.  This
2594 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2595 */
2596static __inline int
2597cvtrssi(uint8_t ssi)
2598{
2599	int rssi = (int) ssi + 8;
2600	/* XXX hack guess until we have a real noise floor */
2601	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2602	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2603}
2604
2605static void
2606mwl_rx_proc(void *arg, int npending)
2607{
2608	struct mwl_softc *sc = arg;
2609	struct ieee80211com *ic = &sc->sc_ic;
2610	struct mwl_rxbuf *bf;
2611	struct mwl_rxdesc *ds;
2612	struct mbuf *m;
2613	struct ieee80211_qosframe *wh;
2614	struct ieee80211_node *ni;
2615	struct mwl_node *mn;
2616	int off, len, hdrlen, pktlen, rssi, ntodo;
2617	uint8_t *data, status;
2618	void *newdata;
2619	int16_t nf;
2620
2621	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2622	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2623	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2624	nf = -96;			/* XXX */
2625	bf = sc->sc_rxnext;
2626	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2627		if (bf == NULL)
2628			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2629		ds = bf->bf_desc;
2630		data = bf->bf_data;
2631		if (data == NULL) {
2632			/*
2633			 * If data allocation failed previously there
2634			 * will be no buffer; try again to re-populate it.
2635			 * Note the firmware will not advance to the next
2636			 * descriptor with a dma buffer so we must mimic
2637			 * this or we'll get out of sync.
2638			 */
2639			DPRINTF(sc, MWL_DEBUG_ANY,
2640			    "%s: rx buf w/o dma memory\n", __func__);
2641			(void) mwl_rxbuf_init(sc, bf);
2642			sc->sc_stats.mst_rx_dmabufmissing++;
2643			break;
2644		}
2645		MWL_RXDESC_SYNC(sc, ds,
2646		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2647		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2648			break;
2649#ifdef MWL_DEBUG
2650		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2651			mwl_printrxbuf(bf, 0);
2652#endif
2653		status = ds->Status;
2654		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2655			counter_u64_add(ic->ic_ierrors, 1);
2656			sc->sc_stats.mst_rx_crypto++;
2657			/*
2658			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2659			 *     for backwards compatibility.
2660			 */
2661			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2662			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2663				/*
2664				 * MIC error, notify upper layers.
2665				 */
2666				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2667				    BUS_DMASYNC_POSTREAD);
2668				mwl_handlemicerror(ic, data);
2669				sc->sc_stats.mst_rx_tkipmic++;
2670			}
2671			/* XXX too painful to tap packets */
2672			goto rx_next;
2673		}
2674		/*
2675		 * Sync the data buffer.
2676		 */
2677		len = le16toh(ds->PktLen);
2678		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2679		/*
2680		 * The 802.11 header is provided all or in part at the front;
2681		 * use it to calculate the true size of the header that we'll
2682		 * construct below.  We use this to figure out where to copy
2683		 * payload prior to constructing the header.
2684		 */
2685		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2686		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2687
2688		/* calculate rssi early so we can re-use for each aggregate */
2689		rssi = cvtrssi(ds->RSSI);
2690
2691		pktlen = hdrlen + (len - off);
2692		/*
2693		 * NB: we know our frame is at least as large as
2694		 * IEEE80211_MIN_LEN because there is a 4-address
2695		 * frame at the front.  Hence there's no need to
2696		 * vet the packet length.  If the frame in fact
2697		 * is too small it should be discarded at the
2698		 * net80211 layer.
2699		 */
2700
2701		/*
2702		 * Attach dma buffer to an mbuf.  We tried
2703		 * doing this based on the packet size (i.e.
2704		 * copying small packets) but it turns out to
2705		 * be a net loss.  The tradeoff might be system
2706		 * dependent (cache architecture is important).
2707		 */
2708		MGETHDR(m, M_NOWAIT, MT_DATA);
2709		if (m == NULL) {
2710			DPRINTF(sc, MWL_DEBUG_ANY,
2711			    "%s: no rx mbuf\n", __func__);
2712			sc->sc_stats.mst_rx_nombuf++;
2713			goto rx_next;
2714		}
2715		/*
2716		 * Acquire the replacement dma buffer before
2717		 * processing the frame.  If we're out of dma
2718		 * buffers we disable rx interrupts and wait
2719		 * for the free pool to reach mlw_rxdmalow buffers
2720		 * before starting to do work again.  If the firmware
2721		 * runs out of descriptors then it will toss frames
2722		 * which is better than our doing it as that can
2723		 * starve our processing.  It is also important that
2724		 * we always process rx'd frames in case they are
2725		 * A-MPDU as otherwise the host's view of the BA
2726		 * window may get out of sync with the firmware.
2727		 */
2728		newdata = mwl_getrxdma(sc);
2729		if (newdata == NULL) {
2730			/* NB: stat+msg in mwl_getrxdma */
2731			m_free(m);
2732			/* disable RX interrupt and mark state */
2733			mwl_hal_intrset(sc->sc_mh,
2734			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2735			sc->sc_rxblocked = 1;
2736			ieee80211_drain(ic);
2737			/* XXX check rxblocked and immediately start again? */
2738			goto rx_stop;
2739		}
2740		bf->bf_data = newdata;
2741		/*
2742		 * Attach the dma buffer to the mbuf;
2743		 * mwl_rxbuf_init will re-setup the rx
2744		 * descriptor using the replacement dma
2745		 * buffer we just installed above.
2746		 */
2747		m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0,
2748		    EXT_NET_DRV);
2749		m->m_data += off - hdrlen;
2750		m->m_pkthdr.len = m->m_len = pktlen;
2751		/* NB: dma buffer assumed read-only */
2752
2753		/*
2754		 * Piece 802.11 header together.
2755		 */
2756		wh = mtod(m, struct ieee80211_qosframe *);
2757		/* NB: don't need to do this sometimes but ... */
2758		/* XXX special case so we can memcpy after m_devget? */
2759		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2760		if (IEEE80211_QOS_HAS_SEQ(wh))
2761			*(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
2762		/*
2763		 * The f/w strips WEP header but doesn't clear
2764		 * the WEP bit; mark the packet with M_WEP so
2765		 * net80211 will treat the data as decrypted.
2766		 * While here also clear the PWR_MGT bit since
2767		 * power save is handled by the firmware and
2768		 * passing this up will potentially cause the
2769		 * upper layer to put a station in power save
2770		 * (except when configured with MWL_HOST_PS_SUPPORT).
2771		 */
2772		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2773			m->m_flags |= M_WEP;
2774#ifdef MWL_HOST_PS_SUPPORT
2775		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2776#else
2777		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2778		    IEEE80211_FC1_PWR_MGT);
2779#endif
2780
2781		if (ieee80211_radiotap_active(ic)) {
2782			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2783
2784			tap->wr_flags = 0;
2785			tap->wr_rate = ds->Rate;
2786			tap->wr_antsignal = rssi + nf;
2787			tap->wr_antnoise = nf;
2788		}
2789		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2790			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2791			    len, ds->Rate, rssi);
2792		}
2793		/* dispatch */
2794		ni = ieee80211_find_rxnode(ic,
2795		    (const struct ieee80211_frame_min *) wh);
2796		if (ni != NULL) {
2797			mn = MWL_NODE(ni);
2798#ifdef MWL_ANT_INFO_SUPPORT
2799			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2800			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2801			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2802			mn->mn_ai.rsvd1 = rssi;
2803#endif
2804			/* tag AMPDU aggregates for reorder processing */
2805			if (ni->ni_flags & IEEE80211_NODE_HT)
2806				m->m_flags |= M_AMPDU;
2807			(void) ieee80211_input(ni, m, rssi, nf);
2808			ieee80211_free_node(ni);
2809		} else
2810			(void) ieee80211_input_all(ic, m, rssi, nf);
2811rx_next:
2812		/* NB: ignore ENOMEM so we process more descriptors */
2813		(void) mwl_rxbuf_init(sc, bf);
2814		bf = STAILQ_NEXT(bf, bf_list);
2815	}
2816rx_stop:
2817	sc->sc_rxnext = bf;
2818
2819	if (mbufq_first(&sc->sc_snd) != NULL) {
2820		/* NB: kick fw; the tx thread may have been preempted */
2821		mwl_hal_txstart(sc->sc_mh, 0);
2822		mwl_start(sc);
2823	}
2824}
2825
2826static void
2827mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2828{
2829	struct mwl_txbuf *bf, *bn;
2830	struct mwl_txdesc *ds;
2831
2832	MWL_TXQ_LOCK_INIT(sc, txq);
2833	txq->qnum = qnum;
2834	txq->txpri = 0;	/* XXX */
2835#if 0
2836	/* NB: q setup by mwl_txdma_setup XXX */
2837	STAILQ_INIT(&txq->free);
2838#endif
2839	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2840		bf->bf_txq = txq;
2841
2842		ds = bf->bf_desc;
2843		bn = STAILQ_NEXT(bf, bf_list);
2844		if (bn == NULL)
2845			bn = STAILQ_FIRST(&txq->free);
2846		ds->pPhysNext = htole32(bn->bf_daddr);
2847	}
2848	STAILQ_INIT(&txq->active);
2849}
2850
2851/*
2852 * Setup a hardware data transmit queue for the specified
2853 * access control.  We record the mapping from ac's
2854 * to h/w queues for use by mwl_tx_start.
2855 */
2856static int
2857mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2858{
2859	struct mwl_txq *txq;
2860
2861	if (ac >= nitems(sc->sc_ac2q)) {
2862		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2863			ac, nitems(sc->sc_ac2q));
2864		return 0;
2865	}
2866	if (mvtype >= MWL_NUM_TX_QUEUES) {
2867		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2868			mvtype, MWL_NUM_TX_QUEUES);
2869		return 0;
2870	}
2871	txq = &sc->sc_txq[mvtype];
2872	mwl_txq_init(sc, txq, mvtype);
2873	sc->sc_ac2q[ac] = txq;
2874	return 1;
2875}
2876
2877/*
2878 * Update WME parameters for a transmit queue.
2879 */
2880static int
2881mwl_txq_update(struct mwl_softc *sc, int ac)
2882{
2883#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2884	struct ieee80211com *ic = &sc->sc_ic;
2885	struct chanAccParams chp;
2886	struct mwl_txq *txq = sc->sc_ac2q[ac];
2887	struct wmeParams *wmep;
2888	struct mwl_hal *mh = sc->sc_mh;
2889	int aifs, cwmin, cwmax, txoplim;
2890
2891	ieee80211_wme_ic_getparams(ic, &chp);
2892	wmep = &chp.cap_wmeParams[ac];
2893
2894	aifs = wmep->wmep_aifsn;
2895	/* XXX in sta mode need to pass log values for cwmin/max */
2896	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2897	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2898	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2899
2900	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2901		device_printf(sc->sc_dev, "unable to update hardware queue "
2902			"parameters for %s traffic!\n",
2903			ieee80211_wme_acnames[ac]);
2904		return 0;
2905	}
2906	return 1;
2907#undef MWL_EXPONENT_TO_VALUE
2908}
2909
2910/*
2911 * Callback from the 802.11 layer to update WME parameters.
2912 */
2913static int
2914mwl_wme_update(struct ieee80211com *ic)
2915{
2916	struct mwl_softc *sc = ic->ic_softc;
2917
2918	return !mwl_txq_update(sc, WME_AC_BE) ||
2919	    !mwl_txq_update(sc, WME_AC_BK) ||
2920	    !mwl_txq_update(sc, WME_AC_VI) ||
2921	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2922}
2923
2924/*
2925 * Reclaim resources for a setup queue.
2926 */
2927static void
2928mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2929{
2930	/* XXX hal work? */
2931	MWL_TXQ_LOCK_DESTROY(txq);
2932}
2933
2934/*
2935 * Reclaim all tx queue resources.
2936 */
2937static void
2938mwl_tx_cleanup(struct mwl_softc *sc)
2939{
2940	int i;
2941
2942	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2943		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2944}
2945
2946static int
2947mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2948{
2949	struct mbuf *m;
2950	int error;
2951
2952	/*
2953	 * Load the DMA map so any coalescing is done.  This
2954	 * also calculates the number of descriptors we need.
2955	 */
2956	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2957				     bf->bf_segs, &bf->bf_nseg,
2958				     BUS_DMA_NOWAIT);
2959	if (error == EFBIG) {
2960		/* XXX packet requires too many descriptors */
2961		bf->bf_nseg = MWL_TXDESC+1;
2962	} else if (error != 0) {
2963		sc->sc_stats.mst_tx_busdma++;
2964		m_freem(m0);
2965		return error;
2966	}
2967	/*
2968	 * Discard null packets and check for packets that
2969	 * require too many TX descriptors.  We try to convert
2970	 * the latter to a cluster.
2971	 */
2972	if (error == EFBIG) {		/* too many desc's, linearize */
2973		sc->sc_stats.mst_tx_linear++;
2974#if MWL_TXDESC > 1
2975		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2976#else
2977		m = m_defrag(m0, M_NOWAIT);
2978#endif
2979		if (m == NULL) {
2980			m_freem(m0);
2981			sc->sc_stats.mst_tx_nombuf++;
2982			return ENOMEM;
2983		}
2984		m0 = m;
2985		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2986					     bf->bf_segs, &bf->bf_nseg,
2987					     BUS_DMA_NOWAIT);
2988		if (error != 0) {
2989			sc->sc_stats.mst_tx_busdma++;
2990			m_freem(m0);
2991			return error;
2992		}
2993		KASSERT(bf->bf_nseg <= MWL_TXDESC,
2994		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
2995	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
2996		sc->sc_stats.mst_tx_nodata++;
2997		m_freem(m0);
2998		return EIO;
2999	}
3000	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3001		__func__, m0, m0->m_pkthdr.len);
3002	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3003	bf->bf_m = m0;
3004
3005	return 0;
3006}
3007
3008static __inline int
3009mwl_cvtlegacyrate(int rate)
3010{
3011	switch (rate) {
3012	case 2:	 return 0;
3013	case 4:	 return 1;
3014	case 11: return 2;
3015	case 22: return 3;
3016	case 44: return 4;
3017	case 12: return 5;
3018	case 18: return 6;
3019	case 24: return 7;
3020	case 36: return 8;
3021	case 48: return 9;
3022	case 72: return 10;
3023	case 96: return 11;
3024	case 108:return 12;
3025	}
3026	return 0;
3027}
3028
3029/*
3030 * Calculate fixed tx rate information per client state;
3031 * this value is suitable for writing to the Format field
3032 * of a tx descriptor.
3033 */
3034static uint16_t
3035mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3036{
3037	uint16_t fmt;
3038
3039	fmt = _IEEE80211_SHIFTMASK(3, EAGLE_TXD_ANTENNA)
3040	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3041		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3042	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3043		fmt |= EAGLE_TXD_FORMAT_HT
3044		    /* NB: 0x80 implicitly stripped from ucastrate */
3045		    | _IEEE80211_SHIFTMASK(rate, EAGLE_TXD_RATE);
3046		/* XXX short/long GI may be wrong; re-check */
3047		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3048			fmt |= EAGLE_TXD_CHW_40
3049			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3050			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3051		} else {
3052			fmt |= EAGLE_TXD_CHW_20
3053			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3054			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3055		}
3056	} else {			/* legacy rate */
3057		fmt |= EAGLE_TXD_FORMAT_LEGACY
3058		    | _IEEE80211_SHIFTMASK(mwl_cvtlegacyrate(rate),
3059			EAGLE_TXD_RATE)
3060		    | EAGLE_TXD_CHW_20
3061		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3062		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3063			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3064	}
3065	return fmt;
3066}
3067
3068static int
3069mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3070    struct mbuf *m0)
3071{
3072	struct ieee80211com *ic = &sc->sc_ic;
3073	struct ieee80211vap *vap = ni->ni_vap;
3074	int error, iswep, ismcast;
3075	int hdrlen, copyhdrlen, pktlen;
3076	struct mwl_txdesc *ds;
3077	struct mwl_txq *txq;
3078	struct ieee80211_frame *wh;
3079	struct mwltxrec *tr;
3080	struct mwl_node *mn;
3081	uint16_t qos;
3082#if MWL_TXDESC > 1
3083	int i;
3084#endif
3085
3086	wh = mtod(m0, struct ieee80211_frame *);
3087	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3088	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3089	hdrlen = ieee80211_anyhdrsize(wh);
3090	copyhdrlen = hdrlen;
3091	pktlen = m0->m_pkthdr.len;
3092	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3093		qos = *(uint16_t *)ieee80211_getqos(wh);
3094		if (IEEE80211_IS_DSTODS(wh))
3095			copyhdrlen -= sizeof(qos);
3096	} else
3097		qos = 0;
3098
3099	if (iswep) {
3100		const struct ieee80211_cipher *cip;
3101		struct ieee80211_key *k;
3102
3103		/*
3104		 * Construct the 802.11 header+trailer for an encrypted
3105		 * frame. The only reason this can fail is because of an
3106		 * unknown or unsupported cipher/key type.
3107		 *
3108		 * NB: we do this even though the firmware will ignore
3109		 *     what we've done for WEP and TKIP as we need the
3110		 *     ExtIV filled in for CCMP and this also adjusts
3111		 *     the headers which simplifies our work below.
3112		 */
3113		k = ieee80211_crypto_encap(ni, m0);
3114		if (k == NULL) {
3115			/*
3116			 * This can happen when the key is yanked after the
3117			 * frame was queued.  Just discard the frame; the
3118			 * 802.11 layer counts failures and provides
3119			 * debugging/diagnostics.
3120			 */
3121			m_freem(m0);
3122			return EIO;
3123		}
3124		/*
3125		 * Adjust the packet length for the crypto additions
3126		 * done during encap and any other bits that the f/w
3127		 * will add later on.
3128		 */
3129		cip = k->wk_cipher;
3130		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3131
3132		/* packet header may have moved, reset our local pointer */
3133		wh = mtod(m0, struct ieee80211_frame *);
3134	}
3135
3136	if (ieee80211_radiotap_active_vap(vap)) {
3137		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3138		if (iswep)
3139			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3140#if 0
3141		sc->sc_tx_th.wt_rate = ds->DataRate;
3142#endif
3143		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3144		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3145
3146		ieee80211_radiotap_tx(vap, m0);
3147	}
3148	/*
3149	 * Copy up/down the 802.11 header; the firmware requires
3150	 * we present a 2-byte payload length followed by a
3151	 * 4-address header (w/o QoS), followed (optionally) by
3152	 * any WEP/ExtIV header (but only filled in for CCMP).
3153	 * We are assured the mbuf has sufficient headroom to
3154	 * prepend in-place by the setup of ic_headroom in
3155	 * mwl_attach.
3156	 */
3157	if (hdrlen < sizeof(struct mwltxrec)) {
3158		const int space = sizeof(struct mwltxrec) - hdrlen;
3159		if (M_LEADINGSPACE(m0) < space) {
3160			/* NB: should never happen */
3161			device_printf(sc->sc_dev,
3162			    "not enough headroom, need %d found %zd, "
3163			    "m_flags 0x%x m_len %d\n",
3164			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3165			ieee80211_dump_pkt(ic,
3166			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3167			m_freem(m0);
3168			sc->sc_stats.mst_tx_noheadroom++;
3169			return EIO;
3170		}
3171		M_PREPEND(m0, space, M_NOWAIT);
3172	}
3173	tr = mtod(m0, struct mwltxrec *);
3174	if (wh != (struct ieee80211_frame *) &tr->wh)
3175		ovbcopy(wh, &tr->wh, hdrlen);
3176	/*
3177	 * Note: the "firmware length" is actually the length
3178	 * of the fully formed "802.11 payload".  That is, it's
3179	 * everything except for the 802.11 header.  In particular
3180	 * this includes all crypto material including the MIC!
3181	 */
3182	tr->fwlen = htole16(pktlen - hdrlen);
3183
3184	/*
3185	 * Load the DMA map so any coalescing is done.  This
3186	 * also calculates the number of descriptors we need.
3187	 */
3188	error = mwl_tx_dmasetup(sc, bf, m0);
3189	if (error != 0) {
3190		/* NB: stat collected in mwl_tx_dmasetup */
3191		DPRINTF(sc, MWL_DEBUG_XMIT,
3192		    "%s: unable to setup dma\n", __func__);
3193		return error;
3194	}
3195	bf->bf_node = ni;			/* NB: held reference */
3196	m0 = bf->bf_m;				/* NB: may have changed */
3197	tr = mtod(m0, struct mwltxrec *);
3198	wh = (struct ieee80211_frame *)&tr->wh;
3199
3200	/*
3201	 * Formulate tx descriptor.
3202	 */
3203	ds = bf->bf_desc;
3204	txq = bf->bf_txq;
3205
3206	ds->QosCtrl = qos;			/* NB: already little-endian */
3207#if MWL_TXDESC == 1
3208	/*
3209	 * NB: multiframes should be zero because the descriptors
3210	 *     are initialized to zero.  This should handle the case
3211	 *     where the driver is built with MWL_TXDESC=1 but we are
3212	 *     using firmware with multi-segment support.
3213	 */
3214	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3215	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3216#else
3217	ds->multiframes = htole32(bf->bf_nseg);
3218	ds->PktLen = htole16(m0->m_pkthdr.len);
3219	for (i = 0; i < bf->bf_nseg; i++) {
3220		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3221		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3222	}
3223#endif
3224	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3225	ds->Format = 0;
3226	ds->pad = 0;
3227	ds->ack_wcb_addr = 0;
3228
3229	mn = MWL_NODE(ni);
3230	/*
3231	 * Select transmit rate.
3232	 */
3233	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3234	case IEEE80211_FC0_TYPE_MGT:
3235		sc->sc_stats.mst_tx_mgmt++;
3236		/* fall thru... */
3237	case IEEE80211_FC0_TYPE_CTL:
3238		/* NB: assign to BE q to avoid bursting */
3239		ds->TxPriority = MWL_WME_AC_BE;
3240		break;
3241	case IEEE80211_FC0_TYPE_DATA:
3242		if (!ismcast) {
3243			const struct ieee80211_txparam *tp = ni->ni_txparms;
3244			/*
3245			 * EAPOL frames get forced to a fixed rate and w/o
3246			 * aggregation; otherwise check for any fixed rate
3247			 * for the client (may depend on association state).
3248			 */
3249			if (m0->m_flags & M_EAPOL) {
3250				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3251				ds->Format = mvp->mv_eapolformat;
3252				ds->pad = htole16(
3253				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3254			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3255				/* XXX pre-calculate per node */
3256				ds->Format = htole16(
3257				    mwl_calcformat(tp->ucastrate, ni));
3258				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3259			}
3260			/* NB: EAPOL frames will never have qos set */
3261			if (qos == 0)
3262				ds->TxPriority = txq->qnum;
3263#if MWL_MAXBA > 3
3264			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3265				ds->TxPriority = mn->mn_ba[3].txq;
3266#endif
3267#if MWL_MAXBA > 2
3268			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3269				ds->TxPriority = mn->mn_ba[2].txq;
3270#endif
3271#if MWL_MAXBA > 1
3272			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3273				ds->TxPriority = mn->mn_ba[1].txq;
3274#endif
3275#if MWL_MAXBA > 0
3276			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3277				ds->TxPriority = mn->mn_ba[0].txq;
3278#endif
3279			else
3280				ds->TxPriority = txq->qnum;
3281		} else
3282			ds->TxPriority = txq->qnum;
3283		break;
3284	default:
3285		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3286			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3287		sc->sc_stats.mst_tx_badframetype++;
3288		m_freem(m0);
3289		return EIO;
3290	}
3291
3292	if (IFF_DUMPPKTS_XMIT(sc))
3293		ieee80211_dump_pkt(ic,
3294		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3295		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3296
3297	MWL_TXQ_LOCK(txq);
3298	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3299	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3300	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3301
3302	sc->sc_tx_timer = 5;
3303	MWL_TXQ_UNLOCK(txq);
3304
3305	return 0;
3306}
3307
3308static __inline int
3309mwl_cvtlegacyrix(int rix)
3310{
3311	static const int ieeerates[] =
3312	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3313	return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3314}
3315
3316/*
3317 * Process completed xmit descriptors from the specified queue.
3318 */
3319static int
3320mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3321{
3322#define	EAGLE_TXD_STATUS_MCAST \
3323	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3324	struct ieee80211com *ic = &sc->sc_ic;
3325	struct mwl_txbuf *bf;
3326	struct mwl_txdesc *ds;
3327	struct ieee80211_node *ni;
3328	struct mwl_node *an;
3329	int nreaped;
3330	uint32_t status;
3331
3332	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3333	for (nreaped = 0;; nreaped++) {
3334		MWL_TXQ_LOCK(txq);
3335		bf = STAILQ_FIRST(&txq->active);
3336		if (bf == NULL) {
3337			MWL_TXQ_UNLOCK(txq);
3338			break;
3339		}
3340		ds = bf->bf_desc;
3341		MWL_TXDESC_SYNC(txq, ds,
3342		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3343		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3344			MWL_TXQ_UNLOCK(txq);
3345			break;
3346		}
3347		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3348		MWL_TXQ_UNLOCK(txq);
3349
3350#ifdef MWL_DEBUG
3351		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3352			mwl_printtxbuf(bf, txq->qnum, nreaped);
3353#endif
3354		ni = bf->bf_node;
3355		if (ni != NULL) {
3356			an = MWL_NODE(ni);
3357			status = le32toh(ds->Status);
3358			if (status & EAGLE_TXD_STATUS_OK) {
3359				uint16_t Format = le16toh(ds->Format);
3360				uint8_t txant = _IEEE80211_MASKSHIFT(Format,
3361				    EAGLE_TXD_ANTENNA);
3362
3363				sc->sc_stats.mst_ant_tx[txant]++;
3364				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3365					sc->sc_stats.mst_tx_retries++;
3366				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3367					sc->sc_stats.mst_tx_mretries++;
3368				if (txq->qnum >= MWL_WME_AC_VO)
3369					ic->ic_wme.wme_hipri_traffic++;
3370				ni->ni_txrate = _IEEE80211_MASKSHIFT(Format,
3371				    EAGLE_TXD_RATE);
3372				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3373					ni->ni_txrate = mwl_cvtlegacyrix(
3374					    ni->ni_txrate);
3375				} else
3376					ni->ni_txrate |= IEEE80211_RATE_MCS;
3377				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3378			} else {
3379				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3380					sc->sc_stats.mst_tx_linkerror++;
3381				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3382					sc->sc_stats.mst_tx_xretries++;
3383				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3384					sc->sc_stats.mst_tx_aging++;
3385				if (bf->bf_m->m_flags & M_FF)
3386					sc->sc_stats.mst_ff_txerr++;
3387			}
3388			if (bf->bf_m->m_flags & M_TXCB)
3389				/* XXX strip fw len in case header inspected */
3390				m_adj(bf->bf_m, sizeof(uint16_t));
3391			ieee80211_tx_complete(ni, bf->bf_m,
3392			    (status & EAGLE_TXD_STATUS_OK) == 0);
3393		} else
3394			m_freem(bf->bf_m);
3395		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3396
3397		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3398		    BUS_DMASYNC_POSTWRITE);
3399		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3400
3401		mwl_puttxbuf_tail(txq, bf);
3402	}
3403	return nreaped;
3404#undef EAGLE_TXD_STATUS_MCAST
3405}
3406
3407/*
3408 * Deferred processing of transmit interrupt; special-cased
3409 * for four hardware queues, 0-3.
3410 */
3411static void
3412mwl_tx_proc(void *arg, int npending)
3413{
3414	struct mwl_softc *sc = arg;
3415	int nreaped;
3416
3417	/*
3418	 * Process each active queue.
3419	 */
3420	nreaped = 0;
3421	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3422		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3423	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3424		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3425	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3426		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3427	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3428		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3429
3430	if (nreaped != 0) {
3431		sc->sc_tx_timer = 0;
3432		if (mbufq_first(&sc->sc_snd) != NULL) {
3433			/* NB: kick fw; the tx thread may have been preempted */
3434			mwl_hal_txstart(sc->sc_mh, 0);
3435			mwl_start(sc);
3436		}
3437	}
3438}
3439
3440static void
3441mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3442{
3443	struct ieee80211_node *ni;
3444	struct mwl_txbuf *bf;
3445	u_int ix;
3446
3447	/*
3448	 * NB: this assumes output has been stopped and
3449	 *     we do not need to block mwl_tx_tasklet
3450	 */
3451	for (ix = 0;; ix++) {
3452		MWL_TXQ_LOCK(txq);
3453		bf = STAILQ_FIRST(&txq->active);
3454		if (bf == NULL) {
3455			MWL_TXQ_UNLOCK(txq);
3456			break;
3457		}
3458		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3459		MWL_TXQ_UNLOCK(txq);
3460#ifdef MWL_DEBUG
3461		if (sc->sc_debug & MWL_DEBUG_RESET) {
3462			struct ieee80211com *ic = &sc->sc_ic;
3463			const struct mwltxrec *tr =
3464			    mtod(bf->bf_m, const struct mwltxrec *);
3465			mwl_printtxbuf(bf, txq->qnum, ix);
3466			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3467				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3468		}
3469#endif /* MWL_DEBUG */
3470		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3471		ni = bf->bf_node;
3472		if (ni != NULL) {
3473			/*
3474			 * Reclaim node reference.
3475			 */
3476			ieee80211_free_node(ni);
3477		}
3478		m_freem(bf->bf_m);
3479
3480		mwl_puttxbuf_tail(txq, bf);
3481	}
3482}
3483
3484/*
3485 * Drain the transmit queues and reclaim resources.
3486 */
3487static void
3488mwl_draintxq(struct mwl_softc *sc)
3489{
3490	int i;
3491
3492	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3493		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3494	sc->sc_tx_timer = 0;
3495}
3496
3497#ifdef MWL_DIAGAPI
3498/*
3499 * Reset the transmit queues to a pristine state after a fw download.
3500 */
3501static void
3502mwl_resettxq(struct mwl_softc *sc)
3503{
3504	int i;
3505
3506	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3507		mwl_txq_reset(sc, &sc->sc_txq[i]);
3508}
3509#endif /* MWL_DIAGAPI */
3510
3511/*
3512 * Clear the transmit queues of any frames submitted for the
3513 * specified vap.  This is done when the vap is deleted so we
3514 * don't potentially reference the vap after it is gone.
3515 * Note we cannot remove the frames; we only reclaim the node
3516 * reference.
3517 */
3518static void
3519mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3520{
3521	struct mwl_txq *txq;
3522	struct mwl_txbuf *bf;
3523	int i;
3524
3525	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3526		txq = &sc->sc_txq[i];
3527		MWL_TXQ_LOCK(txq);
3528		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3529			struct ieee80211_node *ni = bf->bf_node;
3530			if (ni != NULL && ni->ni_vap == vap) {
3531				bf->bf_node = NULL;
3532				ieee80211_free_node(ni);
3533			}
3534		}
3535		MWL_TXQ_UNLOCK(txq);
3536	}
3537}
3538
3539static int
3540mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3541	const uint8_t *frm, const uint8_t *efrm)
3542{
3543	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3544	const struct ieee80211_action *ia;
3545
3546	ia = (const struct ieee80211_action *) frm;
3547	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3548	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3549		const struct ieee80211_action_ht_mimopowersave *mps =
3550		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3551
3552		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3553		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3554		    _IEEE80211_MASKSHIFT(mps->am_control,
3555			IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3556		return 0;
3557	} else
3558		return sc->sc_recv_action(ni, wh, frm, efrm);
3559}
3560
3561static int
3562mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3563	int dialogtoken, int baparamset, int batimeout)
3564{
3565	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3566	struct ieee80211vap *vap = ni->ni_vap;
3567	struct mwl_node *mn = MWL_NODE(ni);
3568	struct mwl_bastate *bas;
3569
3570	bas = tap->txa_private;
3571	if (bas == NULL) {
3572		const MWL_HAL_BASTREAM *sp;
3573		/*
3574		 * Check for a free BA stream slot.
3575		 */
3576#if MWL_MAXBA > 3
3577		if (mn->mn_ba[3].bastream == NULL)
3578			bas = &mn->mn_ba[3];
3579		else
3580#endif
3581#if MWL_MAXBA > 2
3582		if (mn->mn_ba[2].bastream == NULL)
3583			bas = &mn->mn_ba[2];
3584		else
3585#endif
3586#if MWL_MAXBA > 1
3587		if (mn->mn_ba[1].bastream == NULL)
3588			bas = &mn->mn_ba[1];
3589		else
3590#endif
3591#if MWL_MAXBA > 0
3592		if (mn->mn_ba[0].bastream == NULL)
3593			bas = &mn->mn_ba[0];
3594		else
3595#endif
3596		{
3597			/* sta already has max BA streams */
3598			/* XXX assign BA stream to highest priority tid */
3599			DPRINTF(sc, MWL_DEBUG_AMPDU,
3600			    "%s: already has max bastreams\n", __func__);
3601			sc->sc_stats.mst_ampdu_reject++;
3602			return 0;
3603		}
3604		/* NB: no held reference to ni */
3605		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3606		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3607		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3608		    ni, tap);
3609		if (sp == NULL) {
3610			/*
3611			 * No available stream, return 0 so no
3612			 * a-mpdu aggregation will be done.
3613			 */
3614			DPRINTF(sc, MWL_DEBUG_AMPDU,
3615			    "%s: no bastream available\n", __func__);
3616			sc->sc_stats.mst_ampdu_nostream++;
3617			return 0;
3618		}
3619		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3620		    __func__, sp);
3621		/* NB: qos is left zero so we won't match in mwl_tx_start */
3622		bas->bastream = sp;
3623		tap->txa_private = bas;
3624	}
3625	/* fetch current seq# from the firmware; if available */
3626	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3627	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3628	    &tap->txa_start) != 0)
3629		tap->txa_start = 0;
3630	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3631}
3632
3633static int
3634mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3635	int code, int baparamset, int batimeout)
3636{
3637	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3638	struct mwl_bastate *bas;
3639
3640	bas = tap->txa_private;
3641	if (bas == NULL) {
3642		/* XXX should not happen */
3643		DPRINTF(sc, MWL_DEBUG_AMPDU,
3644		    "%s: no BA stream allocated, TID %d\n",
3645		    __func__, tap->txa_tid);
3646		sc->sc_stats.mst_addba_nostream++;
3647		return 0;
3648	}
3649	if (code == IEEE80211_STATUS_SUCCESS) {
3650		struct ieee80211vap *vap = ni->ni_vap;
3651		int bufsiz, error;
3652
3653		/*
3654		 * Tell the firmware to setup the BA stream;
3655		 * we know resources are available because we
3656		 * pre-allocated one before forming the request.
3657		 */
3658		bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ);
3659		if (bufsiz == 0)
3660			bufsiz = IEEE80211_AGGR_BAWMAX;
3661		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3662		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3663		if (error != 0) {
3664			/*
3665			 * Setup failed, return immediately so no a-mpdu
3666			 * aggregation will be done.
3667			 */
3668			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3669			mwl_bastream_free(bas);
3670			tap->txa_private = NULL;
3671
3672			DPRINTF(sc, MWL_DEBUG_AMPDU,
3673			    "%s: create failed, error %d, bufsiz %d TID %d "
3674			    "htparam 0x%x\n", __func__, error, bufsiz,
3675			    tap->txa_tid, ni->ni_htparam);
3676			sc->sc_stats.mst_bacreate_failed++;
3677			return 0;
3678		}
3679		/* NB: cache txq to avoid ptr indirect */
3680		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3681		DPRINTF(sc, MWL_DEBUG_AMPDU,
3682		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3683		    "htparam 0x%x\n", __func__, bas->bastream,
3684		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3685	} else {
3686		/*
3687		 * Other side NAK'd us; return the resources.
3688		 */
3689		DPRINTF(sc, MWL_DEBUG_AMPDU,
3690		    "%s: request failed with code %d, destroy bastream %p\n",
3691		    __func__, code, bas->bastream);
3692		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3693		mwl_bastream_free(bas);
3694		tap->txa_private = NULL;
3695	}
3696	/* NB: firmware sends BAR so we don't need to */
3697	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3698}
3699
3700static void
3701mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3702{
3703	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3704	struct mwl_bastate *bas;
3705
3706	bas = tap->txa_private;
3707	if (bas != NULL) {
3708		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3709		    __func__, bas->bastream);
3710		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3711		mwl_bastream_free(bas);
3712		tap->txa_private = NULL;
3713	}
3714	sc->sc_addba_stop(ni, tap);
3715}
3716
3717/*
3718 * Setup the rx data structures.  This should only be
3719 * done once or we may get out of sync with the firmware.
3720 */
3721static int
3722mwl_startrecv(struct mwl_softc *sc)
3723{
3724	if (!sc->sc_recvsetup) {
3725		struct mwl_rxbuf *bf, *prev;
3726		struct mwl_rxdesc *ds;
3727
3728		prev = NULL;
3729		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3730			int error = mwl_rxbuf_init(sc, bf);
3731			if (error != 0) {
3732				DPRINTF(sc, MWL_DEBUG_RECV,
3733					"%s: mwl_rxbuf_init failed %d\n",
3734					__func__, error);
3735				return error;
3736			}
3737			if (prev != NULL) {
3738				ds = prev->bf_desc;
3739				ds->pPhysNext = htole32(bf->bf_daddr);
3740			}
3741			prev = bf;
3742		}
3743		if (prev != NULL) {
3744			ds = prev->bf_desc;
3745			ds->pPhysNext =
3746			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3747		}
3748		sc->sc_recvsetup = 1;
3749	}
3750	mwl_mode_init(sc);		/* set filters, etc. */
3751	return 0;
3752}
3753
3754static MWL_HAL_APMODE
3755mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3756{
3757	MWL_HAL_APMODE mode;
3758
3759	if (IEEE80211_IS_CHAN_HT(chan)) {
3760		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3761			mode = AP_MODE_N_ONLY;
3762		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3763			mode = AP_MODE_AandN;
3764		else if (vap->iv_flags & IEEE80211_F_PUREG)
3765			mode = AP_MODE_GandN;
3766		else
3767			mode = AP_MODE_BandGandN;
3768	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3769		if (vap->iv_flags & IEEE80211_F_PUREG)
3770			mode = AP_MODE_G_ONLY;
3771		else
3772			mode = AP_MODE_MIXED;
3773	} else if (IEEE80211_IS_CHAN_B(chan))
3774		mode = AP_MODE_B_ONLY;
3775	else if (IEEE80211_IS_CHAN_A(chan))
3776		mode = AP_MODE_A_ONLY;
3777	else
3778		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3779	return mode;
3780}
3781
3782static int
3783mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3784{
3785	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3786	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3787}
3788
3789/*
3790 * Set/change channels.
3791 */
3792static int
3793mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3794{
3795	struct mwl_hal *mh = sc->sc_mh;
3796	struct ieee80211com *ic = &sc->sc_ic;
3797	MWL_HAL_CHANNEL hchan;
3798	int maxtxpow;
3799
3800	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3801	    __func__, chan->ic_freq, chan->ic_flags);
3802
3803	/*
3804	 * Convert to a HAL channel description with
3805	 * the flags constrained to reflect the current
3806	 * operating mode.
3807	 */
3808	mwl_mapchan(&hchan, chan);
3809	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3810#if 0
3811	mwl_draintxq(sc);		/* clear pending tx frames */
3812#endif
3813	mwl_hal_setchannel(mh, &hchan);
3814	/*
3815	 * Tx power is cap'd by the regulatory setting and
3816	 * possibly a user-set limit.  We pass the min of
3817	 * these to the hal to apply them to the cal data
3818	 * for this channel.
3819	 * XXX min bound?
3820	 */
3821	maxtxpow = 2*chan->ic_maxregpower;
3822	if (maxtxpow > ic->ic_txpowlimit)
3823		maxtxpow = ic->ic_txpowlimit;
3824	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3825	/* NB: potentially change mcast/mgt rates */
3826	mwl_setcurchanrates(sc);
3827
3828	/*
3829	 * Update internal state.
3830	 */
3831	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3832	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3833	if (IEEE80211_IS_CHAN_A(chan)) {
3834		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3835		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3836	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3837		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3838		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3839	} else {
3840		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3841		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3842	}
3843	sc->sc_curchan = hchan;
3844	mwl_hal_intrset(mh, sc->sc_imask);
3845
3846	return 0;
3847}
3848
3849static void
3850mwl_scan_start(struct ieee80211com *ic)
3851{
3852	struct mwl_softc *sc = ic->ic_softc;
3853
3854	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3855}
3856
3857static void
3858mwl_scan_end(struct ieee80211com *ic)
3859{
3860	struct mwl_softc *sc = ic->ic_softc;
3861
3862	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3863}
3864
3865static void
3866mwl_set_channel(struct ieee80211com *ic)
3867{
3868	struct mwl_softc *sc = ic->ic_softc;
3869
3870	(void) mwl_chan_set(sc, ic->ic_curchan);
3871}
3872
3873/*
3874 * Handle a channel switch request.  We inform the firmware
3875 * and mark the global state to suppress various actions.
3876 * NB: we issue only one request to the fw; we may be called
3877 * multiple times if there are multiple vap's.
3878 */
3879static void
3880mwl_startcsa(struct ieee80211vap *vap)
3881{
3882	struct ieee80211com *ic = vap->iv_ic;
3883	struct mwl_softc *sc = ic->ic_softc;
3884	MWL_HAL_CHANNEL hchan;
3885
3886	if (sc->sc_csapending)
3887		return;
3888
3889	mwl_mapchan(&hchan, ic->ic_csa_newchan);
3890	/* 1 =>'s quiet channel */
3891	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3892	sc->sc_csapending = 1;
3893}
3894
3895/*
3896 * Plumb any static WEP key for the station.  This is
3897 * necessary as we must propagate the key from the
3898 * global key table of the vap to each sta db entry.
3899 */
3900static void
3901mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3902{
3903	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3904		IEEE80211_F_PRIVACY &&
3905	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3906	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3907		(void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3908				    mac);
3909}
3910
3911static int
3912mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3913{
3914#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
3915	struct ieee80211vap *vap = ni->ni_vap;
3916	struct mwl_hal_vap *hvap;
3917	int error;
3918
3919	if (vap->iv_opmode == IEEE80211_M_WDS) {
3920		/*
3921		 * WDS vap's do not have a f/w vap; instead they piggyback
3922		 * on an AP vap and we must install the sta db entry and
3923		 * crypto state using that AP's handle (the WDS vap has none).
3924		 */
3925		hvap = MWL_VAP(vap)->mv_ap_hvap;
3926	} else
3927		hvap = MWL_VAP(vap)->mv_hvap;
3928	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3929	    aid, staid, pi,
3930	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3931	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3932	if (error == 0) {
3933		/*
3934		 * Setup security for this station.  For sta mode this is
3935		 * needed even though do the same thing on transition to
3936		 * AUTH state because the call to mwl_hal_newstation
3937		 * clobbers the crypto state we setup.
3938		 */
3939		mwl_setanywepkey(vap, ni->ni_macaddr);
3940	}
3941	return error;
3942#undef WME
3943}
3944
3945static void
3946mwl_setglobalkeys(struct ieee80211vap *vap)
3947{
3948	struct ieee80211_key *wk;
3949
3950	wk = &vap->iv_nw_keys[0];
3951	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3952		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3953			(void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3954}
3955
3956/*
3957 * Convert a legacy rate set to a firmware bitmask.
3958 */
3959static uint32_t
3960get_rate_bitmap(const struct ieee80211_rateset *rs)
3961{
3962	uint32_t rates;
3963	int i;
3964
3965	rates = 0;
3966	for (i = 0; i < rs->rs_nrates; i++)
3967		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3968		case 2:	  rates |= 0x001; break;
3969		case 4:	  rates |= 0x002; break;
3970		case 11:  rates |= 0x004; break;
3971		case 22:  rates |= 0x008; break;
3972		case 44:  rates |= 0x010; break;
3973		case 12:  rates |= 0x020; break;
3974		case 18:  rates |= 0x040; break;
3975		case 24:  rates |= 0x080; break;
3976		case 36:  rates |= 0x100; break;
3977		case 48:  rates |= 0x200; break;
3978		case 72:  rates |= 0x400; break;
3979		case 96:  rates |= 0x800; break;
3980		case 108: rates |= 0x1000; break;
3981		}
3982	return rates;
3983}
3984
3985/*
3986 * Construct an HT firmware bitmask from an HT rate set.
3987 */
3988static uint32_t
3989get_htrate_bitmap(const struct ieee80211_htrateset *rs)
3990{
3991	uint32_t rates;
3992	int i;
3993
3994	rates = 0;
3995	for (i = 0; i < rs->rs_nrates; i++) {
3996		if (rs->rs_rates[i] < 16)
3997			rates |= 1<<rs->rs_rates[i];
3998	}
3999	return rates;
4000}
4001
4002/*
4003 * Craft station database entry for station.
4004 * NB: use host byte order here, the hal handles byte swapping.
4005 */
4006static MWL_HAL_PEERINFO *
4007mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4008{
4009	const struct ieee80211vap *vap = ni->ni_vap;
4010
4011	memset(pi, 0, sizeof(*pi));
4012	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4013	pi->CapInfo = ni->ni_capinfo;
4014	if (ni->ni_flags & IEEE80211_NODE_HT) {
4015		/* HT capabilities, etc */
4016		pi->HTCapabilitiesInfo = ni->ni_htcap;
4017		/* XXX pi.HTCapabilitiesInfo */
4018	        pi->MacHTParamInfo = ni->ni_htparam;
4019		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4020		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4021		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4022		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4023		pi->AddHtInfo.stbc = ni->ni_htstbc;
4024
4025		/* constrain according to local configuration */
4026		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4027			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4028		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4029			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4030		if (ni->ni_chw != 40)
4031			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4032	}
4033	return pi;
4034}
4035
4036/*
4037 * Re-create the local sta db entry for a vap to ensure
4038 * up to date WME state is pushed to the firmware.  Because
4039 * this resets crypto state this must be followed by a
4040 * reload of any keys in the global key table.
4041 */
4042static int
4043mwl_localstadb(struct ieee80211vap *vap)
4044{
4045#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4046	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4047	struct ieee80211_node *bss;
4048	MWL_HAL_PEERINFO pi;
4049	int error;
4050
4051	switch (vap->iv_opmode) {
4052	case IEEE80211_M_STA:
4053		bss = vap->iv_bss;
4054		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4055		    vap->iv_state == IEEE80211_S_RUN ?
4056			mkpeerinfo(&pi, bss) : NULL,
4057		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4058		    bss->ni_ies.wme_ie != NULL ?
4059			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4060		if (error == 0)
4061			mwl_setglobalkeys(vap);
4062		break;
4063	case IEEE80211_M_HOSTAP:
4064	case IEEE80211_M_MBSS:
4065		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4066		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4067		if (error == 0)
4068			mwl_setglobalkeys(vap);
4069		break;
4070	default:
4071		error = 0;
4072		break;
4073	}
4074	return error;
4075#undef WME
4076}
4077
4078static int
4079mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4080{
4081	struct mwl_vap *mvp = MWL_VAP(vap);
4082	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4083	struct ieee80211com *ic = vap->iv_ic;
4084	struct ieee80211_node *ni = NULL;
4085	struct mwl_softc *sc = ic->ic_softc;
4086	struct mwl_hal *mh = sc->sc_mh;
4087	enum ieee80211_state ostate = vap->iv_state;
4088	int error;
4089
4090	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4091	    vap->iv_ifp->if_xname, __func__,
4092	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4093
4094	callout_stop(&sc->sc_timer);
4095	/*
4096	 * Clear current radar detection state.
4097	 */
4098	if (ostate == IEEE80211_S_CAC) {
4099		/* stop quiet mode radar detection */
4100		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4101	} else if (sc->sc_radarena) {
4102		/* stop in-service radar detection */
4103		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4104		sc->sc_radarena = 0;
4105	}
4106	/*
4107	 * Carry out per-state actions before doing net80211 work.
4108	 */
4109	if (nstate == IEEE80211_S_INIT) {
4110		/* NB: only ap+sta vap's have a fw entity */
4111		if (hvap != NULL)
4112			mwl_hal_stop(hvap);
4113	} else if (nstate == IEEE80211_S_SCAN) {
4114		mwl_hal_start(hvap);
4115		/* NB: this disables beacon frames */
4116		mwl_hal_setinframode(hvap);
4117	} else if (nstate == IEEE80211_S_AUTH) {
4118		/*
4119		 * Must create a sta db entry in case a WEP key needs to
4120		 * be plumbed.  This entry will be overwritten if we
4121		 * associate; otherwise it will be reclaimed on node free.
4122		 */
4123		ni = vap->iv_bss;
4124		MWL_NODE(ni)->mn_hvap = hvap;
4125		(void) mwl_peerstadb(ni, 0, 0, NULL);
4126	} else if (nstate == IEEE80211_S_CSA) {
4127		/* XXX move to below? */
4128		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4129		    vap->iv_opmode == IEEE80211_M_MBSS)
4130			mwl_startcsa(vap);
4131	} else if (nstate == IEEE80211_S_CAC) {
4132		/* XXX move to below? */
4133		/* stop ap xmit and enable quiet mode radar detection */
4134		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4135	}
4136
4137	/*
4138	 * Invoke the parent method to do net80211 work.
4139	 */
4140	error = mvp->mv_newstate(vap, nstate, arg);
4141
4142	/*
4143	 * Carry out work that must be done after net80211 runs;
4144	 * this work requires up to date state (e.g. iv_bss).
4145	 */
4146	if (error == 0 && nstate == IEEE80211_S_RUN) {
4147		/* NB: collect bss node again, it may have changed */
4148		ni = vap->iv_bss;
4149
4150		DPRINTF(sc, MWL_DEBUG_STATE,
4151		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4152		    "capinfo 0x%04x chan %d\n",
4153		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4154		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4155		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4156
4157		/*
4158		 * Recreate local sta db entry to update WME/HT state.
4159		 */
4160		mwl_localstadb(vap);
4161		switch (vap->iv_opmode) {
4162		case IEEE80211_M_HOSTAP:
4163		case IEEE80211_M_MBSS:
4164			if (ostate == IEEE80211_S_CAC) {
4165				/* enable in-service radar detection */
4166				mwl_hal_setradardetection(mh,
4167				    DR_IN_SERVICE_MONITOR_START);
4168				sc->sc_radarena = 1;
4169			}
4170			/*
4171			 * Allocate and setup the beacon frame
4172			 * (and related state).
4173			 */
4174			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4175			if (error != 0) {
4176				DPRINTF(sc, MWL_DEBUG_STATE,
4177				    "%s: beacon setup failed, error %d\n",
4178				    __func__, error);
4179				goto bad;
4180			}
4181			/* NB: must be after setting up beacon */
4182			mwl_hal_start(hvap);
4183			break;
4184		case IEEE80211_M_STA:
4185			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4186			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4187			/*
4188			 * Set state now that we're associated.
4189			 */
4190			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4191			mwl_setrates(vap);
4192			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4193			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4194			    sc->sc_ndwdsvaps++ == 0)
4195				mwl_hal_setdwds(mh, 1);
4196			break;
4197		case IEEE80211_M_WDS:
4198			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4199			    vap->iv_ifp->if_xname, __func__,
4200			    ether_sprintf(ni->ni_bssid));
4201			mwl_seteapolformat(vap);
4202			break;
4203		default:
4204			break;
4205		}
4206		/*
4207		 * Set CS mode according to operating channel;
4208		 * this mostly an optimization for 5GHz.
4209		 *
4210		 * NB: must follow mwl_hal_start which resets csmode
4211		 */
4212		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4213			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4214		else
4215			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4216		/*
4217		 * Start timer to prod firmware.
4218		 */
4219		if (sc->sc_ageinterval != 0)
4220			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4221			    mwl_agestations, sc);
4222	} else if (nstate == IEEE80211_S_SLEEP) {
4223		/* XXX set chip in power save */
4224	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4225	    --sc->sc_ndwdsvaps == 0)
4226		mwl_hal_setdwds(mh, 0);
4227bad:
4228	return error;
4229}
4230
4231/*
4232 * Manage station id's; these are separate from AID's
4233 * as AID's may have values out of the range of possible
4234 * station id's acceptable to the firmware.
4235 */
4236static int
4237allocstaid(struct mwl_softc *sc, int aid)
4238{
4239	int staid;
4240
4241	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4242		/* NB: don't use 0 */
4243		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4244			if (isclr(sc->sc_staid, staid))
4245				break;
4246	} else
4247		staid = aid;
4248	setbit(sc->sc_staid, staid);
4249	return staid;
4250}
4251
4252static void
4253delstaid(struct mwl_softc *sc, int staid)
4254{
4255	clrbit(sc->sc_staid, staid);
4256}
4257
4258/*
4259 * Setup driver-specific state for a newly associated node.
4260 * Note that we're called also on a re-associate, the isnew
4261 * param tells us if this is the first time or not.
4262 */
4263static void
4264mwl_newassoc(struct ieee80211_node *ni, int isnew)
4265{
4266	struct ieee80211vap *vap = ni->ni_vap;
4267        struct mwl_softc *sc = vap->iv_ic->ic_softc;
4268	struct mwl_node *mn = MWL_NODE(ni);
4269	MWL_HAL_PEERINFO pi;
4270	uint16_t aid;
4271	int error;
4272
4273	aid = IEEE80211_AID(ni->ni_associd);
4274	if (isnew) {
4275		mn->mn_staid = allocstaid(sc, aid);
4276		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4277	} else {
4278		mn = MWL_NODE(ni);
4279		/* XXX reset BA stream? */
4280	}
4281	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4282	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4283	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4284	if (error != 0) {
4285		DPRINTF(sc, MWL_DEBUG_NODE,
4286		    "%s: error %d creating sta db entry\n",
4287		    __func__, error);
4288		/* XXX how to deal with error? */
4289	}
4290}
4291
4292/*
4293 * Periodically poke the firmware to age out station state
4294 * (power save queues, pending tx aggregates).
4295 */
4296static void
4297mwl_agestations(void *arg)
4298{
4299	struct mwl_softc *sc = arg;
4300
4301	mwl_hal_setkeepalive(sc->sc_mh);
4302	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4303		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4304}
4305
4306static const struct mwl_hal_channel *
4307findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4308{
4309	int i;
4310
4311	for (i = 0; i < ci->nchannels; i++) {
4312		const struct mwl_hal_channel *hc = &ci->channels[i];
4313		if (hc->ieee == ieee)
4314			return hc;
4315	}
4316	return NULL;
4317}
4318
4319static int
4320mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4321	int nchan, struct ieee80211_channel chans[])
4322{
4323	struct mwl_softc *sc = ic->ic_softc;
4324	struct mwl_hal *mh = sc->sc_mh;
4325	const MWL_HAL_CHANNELINFO *ci;
4326	int i;
4327
4328	for (i = 0; i < nchan; i++) {
4329		struct ieee80211_channel *c = &chans[i];
4330		const struct mwl_hal_channel *hc;
4331
4332		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4333			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4334			    IEEE80211_IS_CHAN_HT40(c) ?
4335				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4336		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4337			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4338			    IEEE80211_IS_CHAN_HT40(c) ?
4339				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4340		} else {
4341			device_printf(sc->sc_dev,
4342			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4343			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4344			return EINVAL;
4345		}
4346		/*
4347		 * Verify channel has cal data and cap tx power.
4348		 */
4349		hc = findhalchannel(ci, c->ic_ieee);
4350		if (hc != NULL) {
4351			if (c->ic_maxpower > 2*hc->maxTxPow)
4352				c->ic_maxpower = 2*hc->maxTxPow;
4353			goto next;
4354		}
4355		if (IEEE80211_IS_CHAN_HT40(c)) {
4356			/*
4357			 * Look for the extension channel since the
4358			 * hal table only has the primary channel.
4359			 */
4360			hc = findhalchannel(ci, c->ic_extieee);
4361			if (hc != NULL) {
4362				if (c->ic_maxpower > 2*hc->maxTxPow)
4363					c->ic_maxpower = 2*hc->maxTxPow;
4364				goto next;
4365			}
4366		}
4367		device_printf(sc->sc_dev,
4368		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4369		    __func__, c->ic_ieee, c->ic_extieee,
4370		    c->ic_freq, c->ic_flags);
4371		return EINVAL;
4372	next:
4373		;
4374	}
4375	return 0;
4376}
4377
4378#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4379#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4380
4381static void
4382addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4383	const MWL_HAL_CHANNELINFO *ci, int flags)
4384{
4385	int i, error;
4386
4387	for (i = 0; i < ci->nchannels; i++) {
4388		const struct mwl_hal_channel *hc = &ci->channels[i];
4389
4390		error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
4391		    hc->ieee, hc->maxTxPow, flags);
4392		if (error != 0 && error != ENOENT)
4393			break;
4394	}
4395}
4396
4397static void
4398addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4399	const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
4400{
4401	int i, error;
4402
4403	error = 0;
4404	for (i = 0; i < ci->nchannels && error == 0; i++) {
4405		const struct mwl_hal_channel *hc = &ci->channels[i];
4406
4407		error = ieee80211_add_channel(chans, maxchans, nchans,
4408		    hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
4409	}
4410}
4411
4412static void
4413getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4414	struct ieee80211_channel chans[])
4415{
4416	const MWL_HAL_CHANNELINFO *ci;
4417	uint8_t bands[IEEE80211_MODE_BYTES];
4418
4419	/*
4420	 * Use the channel info from the hal to craft the
4421	 * channel list.  Note that we pass back an unsorted
4422	 * list; the caller is required to sort it for us
4423	 * (if desired).
4424	 */
4425	*nchans = 0;
4426	if (mwl_hal_getchannelinfo(sc->sc_mh,
4427	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4428		memset(bands, 0, sizeof(bands));
4429		setbit(bands, IEEE80211_MODE_11B);
4430		setbit(bands, IEEE80211_MODE_11G);
4431		setbit(bands, IEEE80211_MODE_11NG);
4432		addchannels(chans, maxchans, nchans, ci, bands);
4433	}
4434	if (mwl_hal_getchannelinfo(sc->sc_mh,
4435	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4436		memset(bands, 0, sizeof(bands));
4437		setbit(bands, IEEE80211_MODE_11A);
4438		setbit(bands, IEEE80211_MODE_11NA);
4439		addchannels(chans, maxchans, nchans, ci, bands);
4440	}
4441	if (mwl_hal_getchannelinfo(sc->sc_mh,
4442	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4443		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4444	if (mwl_hal_getchannelinfo(sc->sc_mh,
4445	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4446		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4447}
4448
4449static void
4450mwl_getradiocaps(struct ieee80211com *ic,
4451	int maxchans, int *nchans, struct ieee80211_channel chans[])
4452{
4453	struct mwl_softc *sc = ic->ic_softc;
4454
4455	getchannels(sc, maxchans, nchans, chans);
4456}
4457
4458static int
4459mwl_getchannels(struct mwl_softc *sc)
4460{
4461	struct ieee80211com *ic = &sc->sc_ic;
4462
4463	/*
4464	 * Use the channel info from the hal to craft the
4465	 * channel list for net80211.  Note that we pass up
4466	 * an unsorted list; net80211 will sort it for us.
4467	 */
4468	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4469	ic->ic_nchans = 0;
4470	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4471
4472	ic->ic_regdomain.regdomain = SKU_DEBUG;
4473	ic->ic_regdomain.country = CTRY_DEFAULT;
4474	ic->ic_regdomain.location = 'I';
4475	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4476	ic->ic_regdomain.isocc[1] = ' ';
4477	return (ic->ic_nchans == 0 ? EIO : 0);
4478}
4479#undef IEEE80211_CHAN_HTA
4480#undef IEEE80211_CHAN_HTG
4481
4482#ifdef MWL_DEBUG
4483static void
4484mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4485{
4486	const struct mwl_rxdesc *ds = bf->bf_desc;
4487	uint32_t status = le32toh(ds->Status);
4488
4489	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4490	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4491	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4492	    le32toh(ds->pPhysBuffData), ds->RxControl,
4493	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4494	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4495	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4496	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4497}
4498
4499static void
4500mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4501{
4502	const struct mwl_txdesc *ds = bf->bf_desc;
4503	uint32_t status = le32toh(ds->Status);
4504
4505	printf("Q%u[%3u]", qnum, ix);
4506	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4507	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4508	    le32toh(ds->pPhysNext),
4509	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4510	    status & EAGLE_TXD_STATUS_USED ?
4511		"" : (status & 3) != 0 ? " *" : " !");
4512	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4513	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4514	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4515#if MWL_TXDESC > 1
4516	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4517	    , le32toh(ds->multiframes)
4518	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4519	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4520	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4521	);
4522	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4523	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4524	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4525	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4526	);
4527#endif
4528#if 0
4529{ const uint8_t *cp = (const uint8_t *) ds;
4530  int i;
4531  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4532	printf("%02x ", cp[i]);
4533	if (((i+1) % 16) == 0)
4534		printf("\n");
4535  }
4536  printf("\n");
4537}
4538#endif
4539}
4540#endif /* MWL_DEBUG */
4541
4542#if 0
4543static void
4544mwl_txq_dump(struct mwl_txq *txq)
4545{
4546	struct mwl_txbuf *bf;
4547	int i = 0;
4548
4549	MWL_TXQ_LOCK(txq);
4550	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4551		struct mwl_txdesc *ds = bf->bf_desc;
4552		MWL_TXDESC_SYNC(txq, ds,
4553		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4554#ifdef MWL_DEBUG
4555		mwl_printtxbuf(bf, txq->qnum, i);
4556#endif
4557		i++;
4558	}
4559	MWL_TXQ_UNLOCK(txq);
4560}
4561#endif
4562
4563static void
4564mwl_watchdog(void *arg)
4565{
4566	struct mwl_softc *sc = arg;
4567
4568	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4569	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4570		return;
4571
4572	if (sc->sc_running && !sc->sc_invalid) {
4573		if (mwl_hal_setkeepalive(sc->sc_mh))
4574			device_printf(sc->sc_dev,
4575			    "transmit timeout (firmware hung?)\n");
4576		else
4577			device_printf(sc->sc_dev,
4578			    "transmit timeout\n");
4579#if 0
4580		mwl_reset(sc);
4581mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4582#endif
4583		counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4584		sc->sc_stats.mst_watchdog++;
4585	}
4586}
4587
4588#ifdef MWL_DIAGAPI
4589/*
4590 * Diagnostic interface to the HAL.  This is used by various
4591 * tools to do things like retrieve register contents for
4592 * debugging.  The mechanism is intentionally opaque so that
4593 * it can change frequently w/o concern for compatibility.
4594 */
4595static int
4596mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4597{
4598	struct mwl_hal *mh = sc->sc_mh;
4599	u_int id = md->md_id & MWL_DIAG_ID;
4600	void *indata = NULL;
4601	void *outdata = NULL;
4602	u_int32_t insize = md->md_in_size;
4603	u_int32_t outsize = md->md_out_size;
4604	int error = 0;
4605
4606	if (md->md_id & MWL_DIAG_IN) {
4607		/*
4608		 * Copy in data.
4609		 */
4610		indata = malloc(insize, M_TEMP, M_NOWAIT);
4611		if (indata == NULL) {
4612			error = ENOMEM;
4613			goto bad;
4614		}
4615		error = copyin(md->md_in_data, indata, insize);
4616		if (error)
4617			goto bad;
4618	}
4619	if (md->md_id & MWL_DIAG_DYN) {
4620		/*
4621		 * Allocate a buffer for the results (otherwise the HAL
4622		 * returns a pointer to a buffer where we can read the
4623		 * results).  Note that we depend on the HAL leaving this
4624		 * pointer for us to use below in reclaiming the buffer;
4625		 * may want to be more defensive.
4626		 */
4627		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4628		if (outdata == NULL) {
4629			error = ENOMEM;
4630			goto bad;
4631		}
4632	}
4633	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4634		if (outsize < md->md_out_size)
4635			md->md_out_size = outsize;
4636		if (outdata != NULL)
4637			error = copyout(outdata, md->md_out_data,
4638					md->md_out_size);
4639	} else {
4640		error = EINVAL;
4641	}
4642bad:
4643	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4644		free(indata, M_TEMP);
4645	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4646		free(outdata, M_TEMP);
4647	return error;
4648}
4649
4650static int
4651mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4652{
4653	struct mwl_hal *mh = sc->sc_mh;
4654	int error;
4655
4656	MWL_LOCK_ASSERT(sc);
4657
4658	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4659		device_printf(sc->sc_dev, "unable to load firmware\n");
4660		return EIO;
4661	}
4662	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4663		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4664		return EIO;
4665	}
4666	error = mwl_setupdma(sc);
4667	if (error != 0) {
4668		/* NB: mwl_setupdma prints a msg */
4669		return error;
4670	}
4671	/*
4672	 * Reset tx/rx data structures; after reload we must
4673	 * re-start the driver's notion of the next xmit/recv.
4674	 */
4675	mwl_draintxq(sc);		/* clear pending frames */
4676	mwl_resettxq(sc);		/* rebuild tx q lists */
4677	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4678	return 0;
4679}
4680#endif /* MWL_DIAGAPI */
4681
4682static void
4683mwl_parent(struct ieee80211com *ic)
4684{
4685	struct mwl_softc *sc = ic->ic_softc;
4686	int startall = 0;
4687
4688	MWL_LOCK(sc);
4689	if (ic->ic_nrunning > 0) {
4690		if (sc->sc_running) {
4691			/*
4692			 * To avoid rescanning another access point,
4693			 * do not call mwl_init() here.  Instead,
4694			 * only reflect promisc mode settings.
4695			 */
4696			mwl_mode_init(sc);
4697		} else {
4698			/*
4699			 * Beware of being called during attach/detach
4700			 * to reset promiscuous mode.  In that case we
4701			 * will still be marked UP but not RUNNING.
4702			 * However trying to re-init the interface
4703			 * is the wrong thing to do as we've already
4704			 * torn down much of our state.  There's
4705			 * probably a better way to deal with this.
4706			 */
4707			if (!sc->sc_invalid) {
4708				mwl_init(sc);	/* XXX lose error */
4709				startall = 1;
4710			}
4711		}
4712	} else
4713		mwl_stop(sc);
4714	MWL_UNLOCK(sc);
4715	if (startall)
4716		ieee80211_start_all(ic);
4717}
4718
4719static int
4720mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4721{
4722	struct mwl_softc *sc = ic->ic_softc;
4723	struct ifreq *ifr = data;
4724	int error = 0;
4725
4726	switch (cmd) {
4727	case SIOCGMVSTATS:
4728		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4729#if 0
4730		/* NB: embed these numbers to get a consistent view */
4731		sc->sc_stats.mst_tx_packets =
4732		    ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
4733		sc->sc_stats.mst_rx_packets =
4734		    ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
4735#endif
4736		/*
4737		 * NB: Drop the softc lock in case of a page fault;
4738		 * we'll accept any potential inconsisentcy in the
4739		 * statistics.  The alternative is to copy the data
4740		 * to a local structure.
4741		 */
4742		return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
4743		    sizeof (sc->sc_stats)));
4744#ifdef MWL_DIAGAPI
4745	case SIOCGMVDIAG:
4746		/* XXX check privs */
4747		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4748	case SIOCGMVRESET:
4749		/* XXX check privs */
4750		MWL_LOCK(sc);
4751		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4752		MWL_UNLOCK(sc);
4753		break;
4754#endif /* MWL_DIAGAPI */
4755	default:
4756		error = ENOTTY;
4757		break;
4758	}
4759	return (error);
4760}
4761
4762#ifdef	MWL_DEBUG
4763static int
4764mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4765{
4766	struct mwl_softc *sc = arg1;
4767	int debug, error;
4768
4769	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4770	error = sysctl_handle_int(oidp, &debug, 0, req);
4771	if (error || !req->newptr)
4772		return error;
4773	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4774	sc->sc_debug = debug & 0x00ffffff;
4775	return 0;
4776}
4777#endif /* MWL_DEBUG */
4778
4779static void
4780mwl_sysctlattach(struct mwl_softc *sc)
4781{
4782#ifdef	MWL_DEBUG
4783	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4784	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4785
4786	sc->sc_debug = mwl_debug;
4787	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4788		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4789		mwl_sysctl_debug, "I", "control debugging printfs");
4790#endif
4791}
4792
4793/*
4794 * Announce various information on device/driver attach.
4795 */
4796static void
4797mwl_announce(struct mwl_softc *sc)
4798{
4799
4800	device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4801		sc->sc_hwspecs.hwVersion,
4802		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4803		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4804		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4805		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4806		sc->sc_hwspecs.regionCode);
4807	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4808
4809	if (bootverbose) {
4810		int i;
4811		for (i = 0; i <= WME_AC_VO; i++) {
4812			struct mwl_txq *txq = sc->sc_ac2q[i];
4813			device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4814				txq->qnum, ieee80211_wme_acnames[i]);
4815		}
4816	}
4817	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4818		device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4819	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4820		device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4821	if (bootverbose || mwl_txbuf != MWL_TXBUF)
4822		device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4823	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4824		device_printf(sc->sc_dev, "multi-bss support\n");
4825#ifdef MWL_TX_NODROP
4826	if (bootverbose)
4827		device_printf(sc->sc_dev, "no tx drop\n");
4828#endif
4829}
4830