1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer,
13 *    without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 *    redistribution must be conditioned upon including a substantially
17 *    similar Disclaimer requirement for further binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGES.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: releng/12.0/sys/dev/mwl/if_mwl.c 331797 2018-03-30 18:50:13Z brooks $");
35
36/*
37 * Driver for the Marvell 88W8363 Wireless LAN controller.
38 */
39
40#include "opt_inet.h"
41#include "opt_mwl.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/sysctl.h>
46#include <sys/mbuf.h>
47#include <sys/malloc.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/kernel.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/errno.h>
54#include <sys/callout.h>
55#include <sys/bus.h>
56#include <sys/endian.h>
57#include <sys/kthread.h>
58#include <sys/taskqueue.h>
59
60#include <machine/bus.h>
61
62#include <net/if.h>
63#include <net/if_var.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_arp.h>
68#include <net/ethernet.h>
69#include <net/if_llc.h>
70
71#include <net/bpf.h>
72
73#include <net80211/ieee80211_var.h>
74#include <net80211/ieee80211_input.h>
75#include <net80211/ieee80211_regdomain.h>
76
77#ifdef INET
78#include <netinet/in.h>
79#include <netinet/if_ether.h>
80#endif /* INET */
81
82#include <dev/mwl/if_mwlvar.h>
83#include <dev/mwl/mwldiag.h>
84
85static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
86		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
87		    const uint8_t [IEEE80211_ADDR_LEN],
88		    const uint8_t [IEEE80211_ADDR_LEN]);
89static void	mwl_vap_delete(struct ieee80211vap *);
90static int	mwl_setupdma(struct mwl_softc *);
91static int	mwl_hal_reset(struct mwl_softc *sc);
92static int	mwl_init(struct mwl_softc *);
93static void	mwl_parent(struct ieee80211com *);
94static int	mwl_reset(struct ieee80211vap *, u_long);
95static void	mwl_stop(struct mwl_softc *);
96static void	mwl_start(struct mwl_softc *);
97static int	mwl_transmit(struct ieee80211com *, struct mbuf *);
98static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
99			const struct ieee80211_bpf_params *);
100static int	mwl_media_change(struct ifnet *);
101static void	mwl_watchdog(void *);
102static int	mwl_ioctl(struct ieee80211com *, u_long, void *);
103static void	mwl_radar_proc(void *, int);
104static void	mwl_chanswitch_proc(void *, int);
105static void	mwl_bawatchdog_proc(void *, int);
106static int	mwl_key_alloc(struct ieee80211vap *,
107			struct ieee80211_key *,
108			ieee80211_keyix *, ieee80211_keyix *);
109static int	mwl_key_delete(struct ieee80211vap *,
110			const struct ieee80211_key *);
111static int	mwl_key_set(struct ieee80211vap *,
112			const struct ieee80211_key *);
113static int	_mwl_key_set(struct ieee80211vap *,
114			const struct ieee80211_key *,
115			const uint8_t mac[IEEE80211_ADDR_LEN]);
116static int	mwl_mode_init(struct mwl_softc *);
117static void	mwl_update_mcast(struct ieee80211com *);
118static void	mwl_update_promisc(struct ieee80211com *);
119static void	mwl_updateslot(struct ieee80211com *);
120static int	mwl_beacon_setup(struct ieee80211vap *);
121static void	mwl_beacon_update(struct ieee80211vap *, int);
122#ifdef MWL_HOST_PS_SUPPORT
123static void	mwl_update_ps(struct ieee80211vap *, int);
124static int	mwl_set_tim(struct ieee80211_node *, int);
125#endif
126static int	mwl_dma_setup(struct mwl_softc *);
127static void	mwl_dma_cleanup(struct mwl_softc *);
128static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
129		    const uint8_t [IEEE80211_ADDR_LEN]);
130static void	mwl_node_cleanup(struct ieee80211_node *);
131static void	mwl_node_drain(struct ieee80211_node *);
132static void	mwl_node_getsignal(const struct ieee80211_node *,
133			int8_t *, int8_t *);
134static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
135			struct ieee80211_mimo_info *);
136static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
137static void	mwl_rx_proc(void *, int);
138static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
139static int	mwl_tx_setup(struct mwl_softc *, int, int);
140static int	mwl_wme_update(struct ieee80211com *);
141static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
142static void	mwl_tx_cleanup(struct mwl_softc *);
143static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
144static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
145			     struct mwl_txbuf *, struct mbuf *);
146static void	mwl_tx_proc(void *, int);
147static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
148static void	mwl_draintxq(struct mwl_softc *);
149static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
150static int	mwl_recv_action(struct ieee80211_node *,
151			const struct ieee80211_frame *,
152			const uint8_t *, const uint8_t *);
153static int	mwl_addba_request(struct ieee80211_node *,
154			struct ieee80211_tx_ampdu *, int dialogtoken,
155			int baparamset, int batimeout);
156static int	mwl_addba_response(struct ieee80211_node *,
157			struct ieee80211_tx_ampdu *, int status,
158			int baparamset, int batimeout);
159static void	mwl_addba_stop(struct ieee80211_node *,
160			struct ieee80211_tx_ampdu *);
161static int	mwl_startrecv(struct mwl_softc *);
162static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
163			struct ieee80211_channel *);
164static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
165static void	mwl_scan_start(struct ieee80211com *);
166static void	mwl_scan_end(struct ieee80211com *);
167static void	mwl_set_channel(struct ieee80211com *);
168static int	mwl_peerstadb(struct ieee80211_node *,
169			int aid, int staid, MWL_HAL_PEERINFO *pi);
170static int	mwl_localstadb(struct ieee80211vap *);
171static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
172static int	allocstaid(struct mwl_softc *sc, int aid);
173static void	delstaid(struct mwl_softc *sc, int staid);
174static void	mwl_newassoc(struct ieee80211_node *, int);
175static void	mwl_agestations(void *);
176static int	mwl_setregdomain(struct ieee80211com *,
177			struct ieee80211_regdomain *, int,
178			struct ieee80211_channel []);
179static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
180			struct ieee80211_channel []);
181static int	mwl_getchannels(struct mwl_softc *);
182
183static void	mwl_sysctlattach(struct mwl_softc *);
184static void	mwl_announce(struct mwl_softc *);
185
186SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
187
188static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
189SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
190	    0, "rx descriptors allocated");
191static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
192SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
193	    0, "rx buffers allocated");
194static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
195SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
196	    0, "tx buffers allocated");
197static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
198SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
199	    0, "tx buffers to send at once");
200static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
201SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
202	    0, "max rx buffers to process per interrupt");
203static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
204SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
205	    0, "min free rx buffers before restarting traffic");
206
207#ifdef MWL_DEBUG
208static	int mwl_debug = 0;
209SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
210	    0, "control debugging printfs");
211enum {
212	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
213	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
214	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
215	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
216	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
217	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
218	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
219	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
220	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
221	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
222	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
223	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
224	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
225	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
226	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
227	MWL_DEBUG_ANY		= 0xffffffff
228};
229#define	IS_BEACON(wh) \
230    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
231	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
232#define	IFF_DUMPPKTS_RECV(sc, wh) \
233    ((sc->sc_debug & MWL_DEBUG_RECV) && \
234      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
235#define	IFF_DUMPPKTS_XMIT(sc) \
236	(sc->sc_debug & MWL_DEBUG_XMIT)
237
238#define	DPRINTF(sc, m, fmt, ...) do {				\
239	if (sc->sc_debug & (m))					\
240		printf(fmt, __VA_ARGS__);			\
241} while (0)
242#define	KEYPRINTF(sc, hk, mac) do {				\
243	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
244		mwl_keyprint(sc, __func__, hk, mac);		\
245} while (0)
246static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
247static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
248#else
249#define	IFF_DUMPPKTS_RECV(sc, wh)	0
250#define	IFF_DUMPPKTS_XMIT(sc)		0
251#define	DPRINTF(sc, m, fmt, ...)	do { (void )sc; } while (0)
252#define	KEYPRINTF(sc, k, mac)		do { (void )sc; } while (0)
253#endif
254
255static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
256
257/*
258 * Each packet has fixed front matter: a 2-byte length
259 * of the payload, followed by a 4-address 802.11 header
260 * (regardless of the actual header and always w/o any
261 * QoS header).  The payload then follows.
262 */
263struct mwltxrec {
264	uint16_t fwlen;
265	struct ieee80211_frame_addr4 wh;
266} __packed;
267
268/*
269 * Read/Write shorthands for accesses to BAR 0.  Note
270 * that all BAR 1 operations are done in the "hal" and
271 * there should be no reference to them here.
272 */
273#ifdef MWL_DEBUG
274static __inline uint32_t
275RD4(struct mwl_softc *sc, bus_size_t off)
276{
277	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
278}
279#endif
280
281static __inline void
282WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
283{
284	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
285}
286
287int
288mwl_attach(uint16_t devid, struct mwl_softc *sc)
289{
290	struct ieee80211com *ic = &sc->sc_ic;
291	struct mwl_hal *mh;
292	int error = 0;
293
294	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
295
296	/*
297	 * Setup the RX free list lock early, so it can be consistently
298	 * removed.
299	 */
300	MWL_RXFREE_INIT(sc);
301
302	mh = mwl_hal_attach(sc->sc_dev, devid,
303	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
304	if (mh == NULL) {
305		device_printf(sc->sc_dev, "unable to attach HAL\n");
306		error = EIO;
307		goto bad;
308	}
309	sc->sc_mh = mh;
310	/*
311	 * Load firmware so we can get setup.  We arbitrarily
312	 * pick station firmware; we'll re-load firmware as
313	 * needed so setting up the wrong mode isn't a big deal.
314	 */
315	if (mwl_hal_fwload(mh, NULL) != 0) {
316		device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
317		error = EIO;
318		goto bad1;
319	}
320	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
321		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
322		error = EIO;
323		goto bad1;
324	}
325	error = mwl_getchannels(sc);
326	if (error != 0)
327		goto bad1;
328
329	sc->sc_txantenna = 0;		/* h/w default */
330	sc->sc_rxantenna = 0;		/* h/w default */
331	sc->sc_invalid = 0;		/* ready to go, enable int handling */
332	sc->sc_ageinterval = MWL_AGEINTERVAL;
333
334	/*
335	 * Allocate tx+rx descriptors and populate the lists.
336	 * We immediately push the information to the firmware
337	 * as otherwise it gets upset.
338	 */
339	error = mwl_dma_setup(sc);
340	if (error != 0) {
341		device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
342		    error);
343		goto bad1;
344	}
345	error = mwl_setupdma(sc);	/* push to firmware */
346	if (error != 0)			/* NB: mwl_setupdma prints msg */
347		goto bad1;
348
349	callout_init(&sc->sc_timer, 1);
350	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
351	mbufq_init(&sc->sc_snd, ifqmaxlen);
352
353	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
354		taskqueue_thread_enqueue, &sc->sc_tq);
355	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
356		"%s taskq", device_get_nameunit(sc->sc_dev));
357
358	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
359	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
360	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
361	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
362
363	/* NB: insure BK queue is the lowest priority h/w queue */
364	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
365		device_printf(sc->sc_dev,
366		    "unable to setup xmit queue for %s traffic!\n",
367		     ieee80211_wme_acnames[WME_AC_BK]);
368		error = EIO;
369		goto bad2;
370	}
371	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
372	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
373	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
374		/*
375		 * Not enough hardware tx queues to properly do WME;
376		 * just punt and assign them all to the same h/w queue.
377		 * We could do a better job of this if, for example,
378		 * we allocate queues when we switch from station to
379		 * AP mode.
380		 */
381		if (sc->sc_ac2q[WME_AC_VI] != NULL)
382			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
383		if (sc->sc_ac2q[WME_AC_BE] != NULL)
384			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
385		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
386		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
387		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
388	}
389	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
390
391	ic->ic_softc = sc;
392	ic->ic_name = device_get_nameunit(sc->sc_dev);
393	/* XXX not right but it's not used anywhere important */
394	ic->ic_phytype = IEEE80211_T_OFDM;
395	ic->ic_opmode = IEEE80211_M_STA;
396	ic->ic_caps =
397		  IEEE80211_C_STA		/* station mode supported */
398		| IEEE80211_C_HOSTAP		/* hostap mode */
399		| IEEE80211_C_MONITOR		/* monitor mode */
400#if 0
401		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
402		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
403#endif
404		| IEEE80211_C_MBSS		/* mesh point link mode */
405		| IEEE80211_C_WDS		/* WDS supported */
406		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
407		| IEEE80211_C_SHSLOT		/* short slot time supported */
408		| IEEE80211_C_WME		/* WME/WMM supported */
409		| IEEE80211_C_BURST		/* xmit bursting supported */
410		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
411		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
412		| IEEE80211_C_TXFRAG		/* handle tx frags */
413		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
414		| IEEE80211_C_DFS		/* DFS supported */
415		;
416
417	ic->ic_htcaps =
418		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
419		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
420		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
421		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
422		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
423#if MWL_AGGR_SIZE == 7935
424		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
425#else
426		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
427#endif
428#if 0
429		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
430		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
431#endif
432		/* s/w capabilities */
433		| IEEE80211_HTC_HT		/* HT operation */
434		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
435		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
436		| IEEE80211_HTC_SMPS		/* SMPS available */
437		;
438
439	/*
440	 * Mark h/w crypto support.
441	 * XXX no way to query h/w support.
442	 */
443	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
444			  |  IEEE80211_CRYPTO_AES_CCM
445			  |  IEEE80211_CRYPTO_TKIP
446			  |  IEEE80211_CRYPTO_TKIPMIC
447			  ;
448	/*
449	 * Transmit requires space in the packet for a special
450	 * format transmit record and optional padding between
451	 * this record and the payload.  Ask the net80211 layer
452	 * to arrange this when encapsulating packets so we can
453	 * add it efficiently.
454	 */
455	ic->ic_headroom = sizeof(struct mwltxrec) -
456		sizeof(struct ieee80211_frame);
457
458	IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
459
460	/* call MI attach routine. */
461	ieee80211_ifattach(ic);
462	ic->ic_setregdomain = mwl_setregdomain;
463	ic->ic_getradiocaps = mwl_getradiocaps;
464	/* override default methods */
465	ic->ic_raw_xmit = mwl_raw_xmit;
466	ic->ic_newassoc = mwl_newassoc;
467	ic->ic_updateslot = mwl_updateslot;
468	ic->ic_update_mcast = mwl_update_mcast;
469	ic->ic_update_promisc = mwl_update_promisc;
470	ic->ic_wme.wme_update = mwl_wme_update;
471	ic->ic_transmit = mwl_transmit;
472	ic->ic_ioctl = mwl_ioctl;
473	ic->ic_parent = mwl_parent;
474
475	ic->ic_node_alloc = mwl_node_alloc;
476	sc->sc_node_cleanup = ic->ic_node_cleanup;
477	ic->ic_node_cleanup = mwl_node_cleanup;
478	sc->sc_node_drain = ic->ic_node_drain;
479	ic->ic_node_drain = mwl_node_drain;
480	ic->ic_node_getsignal = mwl_node_getsignal;
481	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
482
483	ic->ic_scan_start = mwl_scan_start;
484	ic->ic_scan_end = mwl_scan_end;
485	ic->ic_set_channel = mwl_set_channel;
486
487	sc->sc_recv_action = ic->ic_recv_action;
488	ic->ic_recv_action = mwl_recv_action;
489	sc->sc_addba_request = ic->ic_addba_request;
490	ic->ic_addba_request = mwl_addba_request;
491	sc->sc_addba_response = ic->ic_addba_response;
492	ic->ic_addba_response = mwl_addba_response;
493	sc->sc_addba_stop = ic->ic_addba_stop;
494	ic->ic_addba_stop = mwl_addba_stop;
495
496	ic->ic_vap_create = mwl_vap_create;
497	ic->ic_vap_delete = mwl_vap_delete;
498
499	ieee80211_radiotap_attach(ic,
500	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
501		MWL_TX_RADIOTAP_PRESENT,
502	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
503		MWL_RX_RADIOTAP_PRESENT);
504	/*
505	 * Setup dynamic sysctl's now that country code and
506	 * regdomain are available from the hal.
507	 */
508	mwl_sysctlattach(sc);
509
510	if (bootverbose)
511		ieee80211_announce(ic);
512	mwl_announce(sc);
513	return 0;
514bad2:
515	mwl_dma_cleanup(sc);
516bad1:
517	mwl_hal_detach(mh);
518bad:
519	MWL_RXFREE_DESTROY(sc);
520	sc->sc_invalid = 1;
521	return error;
522}
523
524int
525mwl_detach(struct mwl_softc *sc)
526{
527	struct ieee80211com *ic = &sc->sc_ic;
528
529	MWL_LOCK(sc);
530	mwl_stop(sc);
531	MWL_UNLOCK(sc);
532	/*
533	 * NB: the order of these is important:
534	 * o call the 802.11 layer before detaching the hal to
535	 *   insure callbacks into the driver to delete global
536	 *   key cache entries can be handled
537	 * o reclaim the tx queue data structures after calling
538	 *   the 802.11 layer as we'll get called back to reclaim
539	 *   node state and potentially want to use them
540	 * o to cleanup the tx queues the hal is called, so detach
541	 *   it last
542	 * Other than that, it's straightforward...
543	 */
544	ieee80211_ifdetach(ic);
545	callout_drain(&sc->sc_watchdog);
546	mwl_dma_cleanup(sc);
547	MWL_RXFREE_DESTROY(sc);
548	mwl_tx_cleanup(sc);
549	mwl_hal_detach(sc->sc_mh);
550	mbufq_drain(&sc->sc_snd);
551
552	return 0;
553}
554
555/*
556 * MAC address handling for multiple BSS on the same radio.
557 * The first vap uses the MAC address from the EEPROM.  For
558 * subsequent vap's we set the U/L bit (bit 1) in the MAC
559 * address and use the next six bits as an index.
560 */
561static void
562assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
563{
564	int i;
565
566	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
567		/* NB: we only do this if h/w supports multiple bssid */
568		for (i = 0; i < 32; i++)
569			if ((sc->sc_bssidmask & (1<<i)) == 0)
570				break;
571		if (i != 0)
572			mac[0] |= (i << 2)|0x2;
573	} else
574		i = 0;
575	sc->sc_bssidmask |= 1<<i;
576	if (i == 0)
577		sc->sc_nbssid0++;
578}
579
580static void
581reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
582{
583	int i = mac[0] >> 2;
584	if (i != 0 || --sc->sc_nbssid0 == 0)
585		sc->sc_bssidmask &= ~(1<<i);
586}
587
588static struct ieee80211vap *
589mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
590    enum ieee80211_opmode opmode, int flags,
591    const uint8_t bssid[IEEE80211_ADDR_LEN],
592    const uint8_t mac0[IEEE80211_ADDR_LEN])
593{
594	struct mwl_softc *sc = ic->ic_softc;
595	struct mwl_hal *mh = sc->sc_mh;
596	struct ieee80211vap *vap, *apvap;
597	struct mwl_hal_vap *hvap;
598	struct mwl_vap *mvp;
599	uint8_t mac[IEEE80211_ADDR_LEN];
600
601	IEEE80211_ADDR_COPY(mac, mac0);
602	switch (opmode) {
603	case IEEE80211_M_HOSTAP:
604	case IEEE80211_M_MBSS:
605		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
606			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
607		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
608		if (hvap == NULL) {
609			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
610				reclaim_address(sc, mac);
611			return NULL;
612		}
613		break;
614	case IEEE80211_M_STA:
615		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
616			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
617		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
618		if (hvap == NULL) {
619			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
620				reclaim_address(sc, mac);
621			return NULL;
622		}
623		/* no h/w beacon miss support; always use s/w */
624		flags |= IEEE80211_CLONE_NOBEACONS;
625		break;
626	case IEEE80211_M_WDS:
627		hvap = NULL;		/* NB: we use associated AP vap */
628		if (sc->sc_napvaps == 0)
629			return NULL;	/* no existing AP vap */
630		break;
631	case IEEE80211_M_MONITOR:
632		hvap = NULL;
633		break;
634	case IEEE80211_M_IBSS:
635	case IEEE80211_M_AHDEMO:
636	default:
637		return NULL;
638	}
639
640	mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
641	mvp->mv_hvap = hvap;
642	if (opmode == IEEE80211_M_WDS) {
643		/*
644		 * WDS vaps must have an associated AP vap; find one.
645		 * XXX not right.
646		 */
647		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
648			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
649				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
650				break;
651			}
652		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
653	}
654	vap = &mvp->mv_vap;
655	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
656	/* override with driver methods */
657	mvp->mv_newstate = vap->iv_newstate;
658	vap->iv_newstate = mwl_newstate;
659	vap->iv_max_keyix = 0;	/* XXX */
660	vap->iv_key_alloc = mwl_key_alloc;
661	vap->iv_key_delete = mwl_key_delete;
662	vap->iv_key_set = mwl_key_set;
663#ifdef MWL_HOST_PS_SUPPORT
664	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
665		vap->iv_update_ps = mwl_update_ps;
666		mvp->mv_set_tim = vap->iv_set_tim;
667		vap->iv_set_tim = mwl_set_tim;
668	}
669#endif
670	vap->iv_reset = mwl_reset;
671	vap->iv_update_beacon = mwl_beacon_update;
672
673	/* override max aid so sta's cannot assoc when we're out of sta id's */
674	vap->iv_max_aid = MWL_MAXSTAID;
675	/* override default A-MPDU rx parameters */
676	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
677	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
678
679	/* complete setup */
680	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
681	    mac);
682
683	switch (vap->iv_opmode) {
684	case IEEE80211_M_HOSTAP:
685	case IEEE80211_M_MBSS:
686	case IEEE80211_M_STA:
687		/*
688		 * Setup sta db entry for local address.
689		 */
690		mwl_localstadb(vap);
691		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
692		    vap->iv_opmode == IEEE80211_M_MBSS)
693			sc->sc_napvaps++;
694		else
695			sc->sc_nstavaps++;
696		break;
697	case IEEE80211_M_WDS:
698		sc->sc_nwdsvaps++;
699		break;
700	default:
701		break;
702	}
703	/*
704	 * Setup overall operating mode.
705	 */
706	if (sc->sc_napvaps)
707		ic->ic_opmode = IEEE80211_M_HOSTAP;
708	else if (sc->sc_nstavaps)
709		ic->ic_opmode = IEEE80211_M_STA;
710	else
711		ic->ic_opmode = opmode;
712
713	return vap;
714}
715
716static void
717mwl_vap_delete(struct ieee80211vap *vap)
718{
719	struct mwl_vap *mvp = MWL_VAP(vap);
720	struct mwl_softc *sc = vap->iv_ic->ic_softc;
721	struct mwl_hal *mh = sc->sc_mh;
722	struct mwl_hal_vap *hvap = mvp->mv_hvap;
723	enum ieee80211_opmode opmode = vap->iv_opmode;
724
725	/* XXX disallow ap vap delete if WDS still present */
726	if (sc->sc_running) {
727		/* quiesce h/w while we remove the vap */
728		mwl_hal_intrset(mh, 0);		/* disable interrupts */
729	}
730	ieee80211_vap_detach(vap);
731	switch (opmode) {
732	case IEEE80211_M_HOSTAP:
733	case IEEE80211_M_MBSS:
734	case IEEE80211_M_STA:
735		KASSERT(hvap != NULL, ("no hal vap handle"));
736		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
737		mwl_hal_delvap(hvap);
738		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
739			sc->sc_napvaps--;
740		else
741			sc->sc_nstavaps--;
742		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
743		reclaim_address(sc, vap->iv_myaddr);
744		break;
745	case IEEE80211_M_WDS:
746		sc->sc_nwdsvaps--;
747		break;
748	default:
749		break;
750	}
751	mwl_cleartxq(sc, vap);
752	free(mvp, M_80211_VAP);
753	if (sc->sc_running)
754		mwl_hal_intrset(mh, sc->sc_imask);
755}
756
757void
758mwl_suspend(struct mwl_softc *sc)
759{
760
761	MWL_LOCK(sc);
762	mwl_stop(sc);
763	MWL_UNLOCK(sc);
764}
765
766void
767mwl_resume(struct mwl_softc *sc)
768{
769	int error = EDOOFUS;
770
771	MWL_LOCK(sc);
772	if (sc->sc_ic.ic_nrunning > 0)
773		error = mwl_init(sc);
774	MWL_UNLOCK(sc);
775
776	if (error == 0)
777		ieee80211_start_all(&sc->sc_ic);	/* start all vap's */
778}
779
780void
781mwl_shutdown(void *arg)
782{
783	struct mwl_softc *sc = arg;
784
785	MWL_LOCK(sc);
786	mwl_stop(sc);
787	MWL_UNLOCK(sc);
788}
789
790/*
791 * Interrupt handler.  Most of the actual processing is deferred.
792 */
793void
794mwl_intr(void *arg)
795{
796	struct mwl_softc *sc = arg;
797	struct mwl_hal *mh = sc->sc_mh;
798	uint32_t status;
799
800#if !defined(__HAIKU__)
801	if (sc->sc_invalid) {
802		/*
803		 * The hardware is not ready/present, don't touch anything.
804		 * Note this can happen early on if the IRQ is shared.
805		 */
806		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
807		return;
808	}
809	/*
810	 * Figure out the reason(s) for the interrupt.
811	 */
812	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
813	if (status == 0)			/* must be a shared irq */
814		return;
815#else
816	status = atomic_get((int32 *)&sc->sc_intr_status);
817#endif
818
819	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
820	    __func__, status, sc->sc_imask);
821	if (status & MACREG_A2HRIC_BIT_RX_RDY)
822		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
823	if (status & MACREG_A2HRIC_BIT_TX_DONE)
824		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
825	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
826		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
827	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
828		mwl_hal_cmddone(mh);
829	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
830		;
831	}
832	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
833		/* TKIP ICV error */
834		sc->sc_stats.mst_rx_badtkipicv++;
835	}
836	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
837		/* 11n aggregation queue is empty, re-fill */
838		;
839	}
840	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
841		;
842	}
843	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
844		/* radar detected, process event */
845		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
846	}
847	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
848		/* DFS channel switch */
849		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
850	}
851}
852
853static void
854mwl_radar_proc(void *arg, int pending)
855{
856	struct mwl_softc *sc = arg;
857	struct ieee80211com *ic = &sc->sc_ic;
858
859	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
860	    __func__, pending);
861
862	sc->sc_stats.mst_radardetect++;
863	/* XXX stop h/w BA streams? */
864
865	IEEE80211_LOCK(ic);
866	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
867	IEEE80211_UNLOCK(ic);
868}
869
870static void
871mwl_chanswitch_proc(void *arg, int pending)
872{
873	struct mwl_softc *sc = arg;
874	struct ieee80211com *ic = &sc->sc_ic;
875
876	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
877	    __func__, pending);
878
879	IEEE80211_LOCK(ic);
880	sc->sc_csapending = 0;
881	ieee80211_csa_completeswitch(ic);
882	IEEE80211_UNLOCK(ic);
883}
884
885static void
886mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
887{
888	struct ieee80211_node *ni = sp->data[0];
889
890	/* send DELBA and drop the stream */
891	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
892}
893
894static void
895mwl_bawatchdog_proc(void *arg, int pending)
896{
897	struct mwl_softc *sc = arg;
898	struct mwl_hal *mh = sc->sc_mh;
899	const MWL_HAL_BASTREAM *sp;
900	uint8_t bitmap, n;
901
902	sc->sc_stats.mst_bawatchdog++;
903
904	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
905		DPRINTF(sc, MWL_DEBUG_AMPDU,
906		    "%s: could not get bitmap\n", __func__);
907		sc->sc_stats.mst_bawatchdog_failed++;
908		return;
909	}
910	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
911	if (bitmap == 0xff) {
912		n = 0;
913		/* disable all ba streams */
914		for (bitmap = 0; bitmap < 8; bitmap++) {
915			sp = mwl_hal_bastream_lookup(mh, bitmap);
916			if (sp != NULL) {
917				mwl_bawatchdog(sp);
918				n++;
919			}
920		}
921		if (n == 0) {
922			DPRINTF(sc, MWL_DEBUG_AMPDU,
923			    "%s: no BA streams found\n", __func__);
924			sc->sc_stats.mst_bawatchdog_empty++;
925		}
926	} else if (bitmap != 0xaa) {
927		/* disable a single ba stream */
928		sp = mwl_hal_bastream_lookup(mh, bitmap);
929		if (sp != NULL) {
930			mwl_bawatchdog(sp);
931		} else {
932			DPRINTF(sc, MWL_DEBUG_AMPDU,
933			    "%s: no BA stream %d\n", __func__, bitmap);
934			sc->sc_stats.mst_bawatchdog_notfound++;
935		}
936	}
937}
938
939/*
940 * Convert net80211 channel to a HAL channel.
941 */
942static void
943mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
944{
945	hc->channel = chan->ic_ieee;
946
947	*(uint32_t *)&hc->channelFlags = 0;
948	if (IEEE80211_IS_CHAN_2GHZ(chan))
949		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
950	else if (IEEE80211_IS_CHAN_5GHZ(chan))
951		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
952	if (IEEE80211_IS_CHAN_HT40(chan)) {
953		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
954		if (IEEE80211_IS_CHAN_HT40U(chan))
955			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
956		else
957			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
958	} else
959		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
960	/* XXX 10MHz channels */
961}
962
963/*
964 * Inform firmware of our tx/rx dma setup.  The BAR 0
965 * writes below are for compatibility with older firmware.
966 * For current firmware we send this information with a
967 * cmd block via mwl_hal_sethwdma.
968 */
969static int
970mwl_setupdma(struct mwl_softc *sc)
971{
972	int error, i;
973
974	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
975	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
976	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
977
978	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
979		struct mwl_txq *txq = &sc->sc_txq[i];
980		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
981		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
982	}
983	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
984	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
985
986	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
987	if (error != 0) {
988		device_printf(sc->sc_dev,
989		    "unable to setup tx/rx dma; hal status %u\n", error);
990		/* XXX */
991	}
992	return error;
993}
994
995/*
996 * Inform firmware of tx rate parameters.
997 * Called after a channel change.
998 */
999static int
1000mwl_setcurchanrates(struct mwl_softc *sc)
1001{
1002	struct ieee80211com *ic = &sc->sc_ic;
1003	const struct ieee80211_rateset *rs;
1004	MWL_HAL_TXRATE rates;
1005
1006	memset(&rates, 0, sizeof(rates));
1007	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1008	/* rate used to send management frames */
1009	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1010	/* rate used to send multicast frames */
1011	rates.McastRate = rates.MgtRate;
1012
1013	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1014}
1015
1016/*
1017 * Inform firmware of tx rate parameters.  Called whenever
1018 * user-settable params change and after a channel change.
1019 */
1020static int
1021mwl_setrates(struct ieee80211vap *vap)
1022{
1023	struct mwl_vap *mvp = MWL_VAP(vap);
1024	struct ieee80211_node *ni = vap->iv_bss;
1025	const struct ieee80211_txparam *tp = ni->ni_txparms;
1026	MWL_HAL_TXRATE rates;
1027
1028	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1029
1030	/*
1031	 * Update the h/w rate map.
1032	 * NB: 0x80 for MCS is passed through unchanged
1033	 */
1034	memset(&rates, 0, sizeof(rates));
1035	/* rate used to send management frames */
1036	rates.MgtRate = tp->mgmtrate;
1037	/* rate used to send multicast frames */
1038	rates.McastRate = tp->mcastrate;
1039
1040	/* while here calculate EAPOL fixed rate cookie */
1041	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1042
1043	return mwl_hal_settxrate(mvp->mv_hvap,
1044	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1045		RATE_FIXED : RATE_AUTO, &rates);
1046}
1047
1048/*
1049 * Setup a fixed xmit rate cookie for EAPOL frames.
1050 */
1051static void
1052mwl_seteapolformat(struct ieee80211vap *vap)
1053{
1054	struct mwl_vap *mvp = MWL_VAP(vap);
1055	struct ieee80211_node *ni = vap->iv_bss;
1056	enum ieee80211_phymode mode;
1057	uint8_t rate;
1058
1059	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1060
1061	mode = ieee80211_chan2mode(ni->ni_chan);
1062	/*
1063	 * Use legacy rates when operating a mixed HT+non-HT bss.
1064	 * NB: this may violate POLA for sta and wds vap's.
1065	 */
1066	if (mode == IEEE80211_MODE_11NA &&
1067	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1068		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1069	else if (mode == IEEE80211_MODE_11NG &&
1070	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1071		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1072	else
1073		rate = vap->iv_txparms[mode].mgmtrate;
1074
1075	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1076}
1077
1078/*
1079 * Map SKU+country code to region code for radar bin'ing.
1080 */
1081static int
1082mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1083{
1084	switch (rd->regdomain) {
1085	case SKU_FCC:
1086	case SKU_FCC3:
1087		return DOMAIN_CODE_FCC;
1088	case SKU_CA:
1089		return DOMAIN_CODE_IC;
1090	case SKU_ETSI:
1091	case SKU_ETSI2:
1092	case SKU_ETSI3:
1093		if (rd->country == CTRY_SPAIN)
1094			return DOMAIN_CODE_SPAIN;
1095		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1096			return DOMAIN_CODE_FRANCE;
1097		/* XXX force 1.3.1 radar type */
1098		return DOMAIN_CODE_ETSI_131;
1099	case SKU_JAPAN:
1100		return DOMAIN_CODE_MKK;
1101	case SKU_ROW:
1102		return DOMAIN_CODE_DGT;	/* Taiwan */
1103	case SKU_APAC:
1104	case SKU_APAC2:
1105	case SKU_APAC3:
1106		return DOMAIN_CODE_AUS;	/* Australia */
1107	}
1108	/* XXX KOREA? */
1109	return DOMAIN_CODE_FCC;			/* XXX? */
1110}
1111
1112static int
1113mwl_hal_reset(struct mwl_softc *sc)
1114{
1115	struct ieee80211com *ic = &sc->sc_ic;
1116	struct mwl_hal *mh = sc->sc_mh;
1117
1118	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1119	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1120	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1121	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1122	mwl_chan_set(sc, ic->ic_curchan);
1123	/* NB: RF/RA performance tuned for indoor mode */
1124	mwl_hal_setrateadaptmode(mh, 0);
1125	mwl_hal_setoptimizationlevel(mh,
1126	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1127
1128	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1129
1130	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1131	mwl_hal_setcfend(mh, 0);			/* XXX */
1132
1133	return 1;
1134}
1135
1136static int
1137mwl_init(struct mwl_softc *sc)
1138{
1139	struct mwl_hal *mh = sc->sc_mh;
1140	int error = 0;
1141
1142	MWL_LOCK_ASSERT(sc);
1143
1144	/*
1145	 * Stop anything previously setup.  This is safe
1146	 * whether this is the first time through or not.
1147	 */
1148	mwl_stop(sc);
1149
1150	/*
1151	 * Push vap-independent state to the firmware.
1152	 */
1153	if (!mwl_hal_reset(sc)) {
1154		device_printf(sc->sc_dev, "unable to reset hardware\n");
1155		return EIO;
1156	}
1157
1158	/*
1159	 * Setup recv (once); transmit is already good to go.
1160	 */
1161	error = mwl_startrecv(sc);
1162	if (error != 0) {
1163		device_printf(sc->sc_dev, "unable to start recv logic\n");
1164		return error;
1165	}
1166
1167	/*
1168	 * Enable interrupts.
1169	 */
1170	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1171		     | MACREG_A2HRIC_BIT_TX_DONE
1172		     | MACREG_A2HRIC_BIT_OPC_DONE
1173#if 0
1174		     | MACREG_A2HRIC_BIT_MAC_EVENT
1175#endif
1176		     | MACREG_A2HRIC_BIT_ICV_ERROR
1177		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1178		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1179#if 0
1180		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1181#endif
1182		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1183		     | MACREQ_A2HRIC_BIT_TX_ACK
1184		     ;
1185
1186	sc->sc_running = 1;
1187	mwl_hal_intrset(mh, sc->sc_imask);
1188	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1189
1190	return 0;
1191}
1192
1193static void
1194mwl_stop(struct mwl_softc *sc)
1195{
1196
1197	MWL_LOCK_ASSERT(sc);
1198	if (sc->sc_running) {
1199		/*
1200		 * Shutdown the hardware and driver.
1201		 */
1202		sc->sc_running = 0;
1203		callout_stop(&sc->sc_watchdog);
1204		sc->sc_tx_timer = 0;
1205		mwl_draintxq(sc);
1206	}
1207}
1208
1209static int
1210mwl_reset_vap(struct ieee80211vap *vap, int state)
1211{
1212	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1213	struct ieee80211com *ic = vap->iv_ic;
1214
1215	if (state == IEEE80211_S_RUN)
1216		mwl_setrates(vap);
1217	/* XXX off by 1? */
1218	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1219	/* XXX auto? 20/40 split? */
1220	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1221	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1222	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1223	    HTPROTECT_NONE : HTPROTECT_AUTO);
1224	/* XXX txpower cap */
1225
1226	/* re-setup beacons */
1227	if (state == IEEE80211_S_RUN &&
1228	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1229	     vap->iv_opmode == IEEE80211_M_MBSS ||
1230	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1231		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1232		mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1233		    ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1234		return mwl_beacon_setup(vap);
1235	}
1236	return 0;
1237}
1238
1239/*
1240 * Reset the hardware w/o losing operational state.
1241 * Used to reset or reload hardware state for a vap.
1242 */
1243static int
1244mwl_reset(struct ieee80211vap *vap, u_long cmd)
1245{
1246	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1247	int error = 0;
1248
1249	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1250		struct ieee80211com *ic = vap->iv_ic;
1251		struct mwl_softc *sc = ic->ic_softc;
1252		struct mwl_hal *mh = sc->sc_mh;
1253
1254		/* XXX handle DWDS sta vap change */
1255		/* XXX do we need to disable interrupts? */
1256		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1257		error = mwl_reset_vap(vap, vap->iv_state);
1258		mwl_hal_intrset(mh, sc->sc_imask);
1259	}
1260	return error;
1261}
1262
1263/*
1264 * Allocate a tx buffer for sending a frame.  The
1265 * packet is assumed to have the WME AC stored so
1266 * we can use it to select the appropriate h/w queue.
1267 */
1268static struct mwl_txbuf *
1269mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1270{
1271	struct mwl_txbuf *bf;
1272
1273	/*
1274	 * Grab a TX buffer and associated resources.
1275	 */
1276	MWL_TXQ_LOCK(txq);
1277	bf = STAILQ_FIRST(&txq->free);
1278	if (bf != NULL) {
1279		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1280		txq->nfree--;
1281	}
1282	MWL_TXQ_UNLOCK(txq);
1283	if (bf == NULL)
1284		DPRINTF(sc, MWL_DEBUG_XMIT,
1285		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1286	return bf;
1287}
1288
1289/*
1290 * Return a tx buffer to the queue it came from.  Note there
1291 * are two cases because we must preserve the order of buffers
1292 * as it reflects the fixed order of descriptors in memory
1293 * (the firmware pre-fetches descriptors so we cannot reorder).
1294 */
1295static void
1296mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1297{
1298	bf->bf_m = NULL;
1299	bf->bf_node = NULL;
1300	MWL_TXQ_LOCK(txq);
1301	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1302	txq->nfree++;
1303	MWL_TXQ_UNLOCK(txq);
1304}
1305
1306static void
1307mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1308{
1309	bf->bf_m = NULL;
1310	bf->bf_node = NULL;
1311	MWL_TXQ_LOCK(txq);
1312	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1313	txq->nfree++;
1314	MWL_TXQ_UNLOCK(txq);
1315}
1316
1317static int
1318mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1319{
1320	struct mwl_softc *sc = ic->ic_softc;
1321	int error;
1322
1323	MWL_LOCK(sc);
1324	if (!sc->sc_running) {
1325		MWL_UNLOCK(sc);
1326		return (ENXIO);
1327	}
1328	error = mbufq_enqueue(&sc->sc_snd, m);
1329	if (error) {
1330		MWL_UNLOCK(sc);
1331		return (error);
1332	}
1333	mwl_start(sc);
1334	MWL_UNLOCK(sc);
1335	return (0);
1336}
1337
1338static void
1339mwl_start(struct mwl_softc *sc)
1340{
1341	struct ieee80211_node *ni;
1342	struct mwl_txbuf *bf;
1343	struct mbuf *m;
1344	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1345	int nqueued;
1346
1347	MWL_LOCK_ASSERT(sc);
1348	if (!sc->sc_running || sc->sc_invalid)
1349		return;
1350	nqueued = 0;
1351	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1352		/*
1353		 * Grab the node for the destination.
1354		 */
1355		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1356		KASSERT(ni != NULL, ("no node"));
1357		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1358		/*
1359		 * Grab a TX buffer and associated resources.
1360		 * We honor the classification by the 802.11 layer.
1361		 */
1362		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1363		bf = mwl_gettxbuf(sc, txq);
1364		if (bf == NULL) {
1365			m_freem(m);
1366			ieee80211_free_node(ni);
1367#ifdef MWL_TX_NODROP
1368			sc->sc_stats.mst_tx_qstop++;
1369			break;
1370#else
1371			DPRINTF(sc, MWL_DEBUG_XMIT,
1372			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1373			sc->sc_stats.mst_tx_qdrop++;
1374			continue;
1375#endif /* MWL_TX_NODROP */
1376		}
1377
1378		/*
1379		 * Pass the frame to the h/w for transmission.
1380		 */
1381		if (mwl_tx_start(sc, ni, bf, m)) {
1382			if_inc_counter(ni->ni_vap->iv_ifp,
1383			    IFCOUNTER_OERRORS, 1);
1384			mwl_puttxbuf_head(txq, bf);
1385			ieee80211_free_node(ni);
1386			continue;
1387		}
1388		nqueued++;
1389		if (nqueued >= mwl_txcoalesce) {
1390			/*
1391			 * Poke the firmware to process queued frames;
1392			 * see below about (lack of) locking.
1393			 */
1394			nqueued = 0;
1395			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1396		}
1397	}
1398	if (nqueued) {
1399		/*
1400		 * NB: We don't need to lock against tx done because
1401		 * this just prods the firmware to check the transmit
1402		 * descriptors.  The firmware will also start fetching
1403		 * descriptors by itself if it notices new ones are
1404		 * present when it goes to deliver a tx done interrupt
1405		 * to the host. So if we race with tx done processing
1406		 * it's ok.  Delivering the kick here rather than in
1407		 * mwl_tx_start is an optimization to avoid poking the
1408		 * firmware for each packet.
1409		 *
1410		 * NB: the queue id isn't used so 0 is ok.
1411		 */
1412		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1413	}
1414}
1415
1416static int
1417mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1418	const struct ieee80211_bpf_params *params)
1419{
1420	struct ieee80211com *ic = ni->ni_ic;
1421	struct mwl_softc *sc = ic->ic_softc;
1422	struct mwl_txbuf *bf;
1423	struct mwl_txq *txq;
1424
1425	if (!sc->sc_running || sc->sc_invalid) {
1426		m_freem(m);
1427		return ENETDOWN;
1428	}
1429	/*
1430	 * Grab a TX buffer and associated resources.
1431	 * Note that we depend on the classification
1432	 * by the 802.11 layer to get to the right h/w
1433	 * queue.  Management frames must ALWAYS go on
1434	 * queue 1 but we cannot just force that here
1435	 * because we may receive non-mgt frames.
1436	 */
1437	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1438	bf = mwl_gettxbuf(sc, txq);
1439	if (bf == NULL) {
1440		sc->sc_stats.mst_tx_qstop++;
1441		m_freem(m);
1442		return ENOBUFS;
1443	}
1444	/*
1445	 * Pass the frame to the h/w for transmission.
1446	 */
1447	if (mwl_tx_start(sc, ni, bf, m)) {
1448		mwl_puttxbuf_head(txq, bf);
1449
1450		return EIO;		/* XXX */
1451	}
1452	/*
1453	 * NB: We don't need to lock against tx done because
1454	 * this just prods the firmware to check the transmit
1455	 * descriptors.  The firmware will also start fetching
1456	 * descriptors by itself if it notices new ones are
1457	 * present when it goes to deliver a tx done interrupt
1458	 * to the host. So if we race with tx done processing
1459	 * it's ok.  Delivering the kick here rather than in
1460	 * mwl_tx_start is an optimization to avoid poking the
1461	 * firmware for each packet.
1462	 *
1463	 * NB: the queue id isn't used so 0 is ok.
1464	 */
1465	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1466	return 0;
1467}
1468
1469static int
1470mwl_media_change(struct ifnet *ifp)
1471{
1472	struct ieee80211vap *vap;
1473	int error;
1474
1475	/* NB: only the fixed rate can change and that doesn't need a reset */
1476	error = ieee80211_media_change(ifp);
1477	if (error != 0)
1478		return (error);
1479
1480	vap = ifp->if_softc;
1481	mwl_setrates(vap);
1482	return (0);
1483}
1484
1485#ifdef MWL_DEBUG
1486static void
1487mwl_keyprint(struct mwl_softc *sc, const char *tag,
1488	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1489{
1490	static const char *ciphers[] = {
1491		"WEP",
1492		"TKIP",
1493		"AES-CCM",
1494	};
1495	int i, n;
1496
1497	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1498	for (i = 0, n = hk->keyLen; i < n; i++)
1499		printf(" %02x", hk->key.aes[i]);
1500	printf(" mac %s", ether_sprintf(mac));
1501	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1502		printf(" %s", "rxmic");
1503		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1504			printf(" %02x", hk->key.tkip.rxMic[i]);
1505		printf(" txmic");
1506		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1507			printf(" %02x", hk->key.tkip.txMic[i]);
1508	}
1509	printf(" flags 0x%x\n", hk->keyFlags);
1510}
1511#endif
1512
1513/*
1514 * Allocate a key cache slot for a unicast key.  The
1515 * firmware handles key allocation and every station is
1516 * guaranteed key space so we are always successful.
1517 */
1518static int
1519mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1520	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1521{
1522	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1523
1524	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1525	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1526		if (!(&vap->iv_nw_keys[0] <= k &&
1527		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1528			/* should not happen */
1529			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1530				"%s: bogus group key\n", __func__);
1531			return 0;
1532		}
1533		/* give the caller what they requested */
1534		*keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
1535	} else {
1536		/*
1537		 * Firmware handles key allocation.
1538		 */
1539		*keyix = *rxkeyix = 0;
1540	}
1541	return 1;
1542}
1543
1544/*
1545 * Delete a key entry allocated by mwl_key_alloc.
1546 */
1547static int
1548mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1549{
1550	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1551	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1552	MWL_HAL_KEYVAL hk;
1553	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1554	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1555
1556	if (hvap == NULL) {
1557		if (vap->iv_opmode != IEEE80211_M_WDS) {
1558			/* XXX monitor mode? */
1559			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1560			    "%s: no hvap for opmode %d\n", __func__,
1561			    vap->iv_opmode);
1562			return 0;
1563		}
1564		hvap = MWL_VAP(vap)->mv_ap_hvap;
1565	}
1566
1567	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1568	    __func__, k->wk_keyix);
1569
1570	memset(&hk, 0, sizeof(hk));
1571	hk.keyIndex = k->wk_keyix;
1572	switch (k->wk_cipher->ic_cipher) {
1573	case IEEE80211_CIPHER_WEP:
1574		hk.keyTypeId = KEY_TYPE_ID_WEP;
1575		break;
1576	case IEEE80211_CIPHER_TKIP:
1577		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1578		break;
1579	case IEEE80211_CIPHER_AES_CCM:
1580		hk.keyTypeId = KEY_TYPE_ID_AES;
1581		break;
1582	default:
1583		/* XXX should not happen */
1584		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1585		    __func__, k->wk_cipher->ic_cipher);
1586		return 0;
1587	}
1588	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1589}
1590
1591static __inline int
1592addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1593{
1594	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1595		if (k->wk_flags & IEEE80211_KEY_XMIT)
1596			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1597		if (k->wk_flags & IEEE80211_KEY_RECV)
1598			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1599		return 1;
1600	} else
1601		return 0;
1602}
1603
1604/*
1605 * Set the key cache contents for the specified key.  Key cache
1606 * slot(s) must already have been allocated by mwl_key_alloc.
1607 */
1608static int
1609mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1610{
1611	return (_mwl_key_set(vap, k, k->wk_macaddr));
1612}
1613
1614static int
1615_mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1616	const uint8_t mac[IEEE80211_ADDR_LEN])
1617{
1618#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1619/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1620#define	IEEE80211_IS_STATICKEY(k) \
1621	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1622	 (GRPXMIT|IEEE80211_KEY_RECV))
1623	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1624	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1625	const struct ieee80211_cipher *cip = k->wk_cipher;
1626	const uint8_t *macaddr;
1627	MWL_HAL_KEYVAL hk;
1628
1629	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1630		("s/w crypto set?"));
1631
1632	if (hvap == NULL) {
1633		if (vap->iv_opmode != IEEE80211_M_WDS) {
1634			/* XXX monitor mode? */
1635			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1636			    "%s: no hvap for opmode %d\n", __func__,
1637			    vap->iv_opmode);
1638			return 0;
1639		}
1640		hvap = MWL_VAP(vap)->mv_ap_hvap;
1641	}
1642	memset(&hk, 0, sizeof(hk));
1643	hk.keyIndex = k->wk_keyix;
1644	switch (cip->ic_cipher) {
1645	case IEEE80211_CIPHER_WEP:
1646		hk.keyTypeId = KEY_TYPE_ID_WEP;
1647		hk.keyLen = k->wk_keylen;
1648		if (k->wk_keyix == vap->iv_def_txkey)
1649			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1650		if (!IEEE80211_IS_STATICKEY(k)) {
1651			/* NB: WEP is never used for the PTK */
1652			(void) addgroupflags(&hk, k);
1653		}
1654		break;
1655	case IEEE80211_CIPHER_TKIP:
1656		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1657		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1658		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1659		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1660		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1661		if (!addgroupflags(&hk, k))
1662			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1663		break;
1664	case IEEE80211_CIPHER_AES_CCM:
1665		hk.keyTypeId = KEY_TYPE_ID_AES;
1666		hk.keyLen = k->wk_keylen;
1667		if (!addgroupflags(&hk, k))
1668			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1669		break;
1670	default:
1671		/* XXX should not happen */
1672		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1673		    __func__, k->wk_cipher->ic_cipher);
1674		return 0;
1675	}
1676	/*
1677	 * NB: tkip mic keys get copied here too; the layout
1678	 *     just happens to match that in ieee80211_key.
1679	 */
1680	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1681
1682	/*
1683	 * Locate address of sta db entry for writing key;
1684	 * the convention unfortunately is somewhat different
1685	 * than how net80211, hostapd, and wpa_supplicant think.
1686	 */
1687	if (vap->iv_opmode == IEEE80211_M_STA) {
1688		/*
1689		 * NB: keys plumbed before the sta reaches AUTH state
1690		 * will be discarded or written to the wrong sta db
1691		 * entry because iv_bss is meaningless.  This is ok
1692		 * (right now) because we handle deferred plumbing of
1693		 * WEP keys when the sta reaches AUTH state.
1694		 */
1695		macaddr = vap->iv_bss->ni_bssid;
1696		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1697			/* XXX plumb to local sta db too for static key wep */
1698			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1699		}
1700	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1701	    vap->iv_state != IEEE80211_S_RUN) {
1702		/*
1703		 * Prior to RUN state a WDS vap will not it's BSS node
1704		 * setup so we will plumb the key to the wrong mac
1705		 * address (it'll be our local address).  Workaround
1706		 * this for the moment by grabbing the correct address.
1707		 */
1708		macaddr = vap->iv_des_bssid;
1709	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1710		macaddr = vap->iv_myaddr;
1711	else
1712		macaddr = mac;
1713	KEYPRINTF(sc, &hk, macaddr);
1714	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1715#undef IEEE80211_IS_STATICKEY
1716#undef GRPXMIT
1717}
1718
1719/*
1720 * Set the multicast filter contents into the hardware.
1721 * XXX f/w has no support; just defer to the os.
1722 */
1723static void
1724mwl_setmcastfilter(struct mwl_softc *sc)
1725{
1726#if 0
1727	struct ether_multi *enm;
1728	struct ether_multistep estep;
1729	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1730	uint8_t *mp;
1731	int nmc;
1732
1733	mp = macs;
1734	nmc = 0;
1735	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1736	while (enm != NULL) {
1737		/* XXX Punt on ranges. */
1738		if (nmc == MWL_HAL_MCAST_MAX ||
1739		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1740			ifp->if_flags |= IFF_ALLMULTI;
1741			return;
1742		}
1743		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1744		mp += IEEE80211_ADDR_LEN, nmc++;
1745		ETHER_NEXT_MULTI(estep, enm);
1746	}
1747	ifp->if_flags &= ~IFF_ALLMULTI;
1748	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1749#endif
1750}
1751
1752static int
1753mwl_mode_init(struct mwl_softc *sc)
1754{
1755	struct ieee80211com *ic = &sc->sc_ic;
1756	struct mwl_hal *mh = sc->sc_mh;
1757
1758	mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
1759	mwl_setmcastfilter(sc);
1760
1761	return 0;
1762}
1763
1764/*
1765 * Callback from the 802.11 layer after a multicast state change.
1766 */
1767static void
1768mwl_update_mcast(struct ieee80211com *ic)
1769{
1770	struct mwl_softc *sc = ic->ic_softc;
1771
1772	mwl_setmcastfilter(sc);
1773}
1774
1775/*
1776 * Callback from the 802.11 layer after a promiscuous mode change.
1777 * Note this interface does not check the operating mode as this
1778 * is an internal callback and we are expected to honor the current
1779 * state (e.g. this is used for setting the interface in promiscuous
1780 * mode when operating in hostap mode to do ACS).
1781 */
1782static void
1783mwl_update_promisc(struct ieee80211com *ic)
1784{
1785	struct mwl_softc *sc = ic->ic_softc;
1786
1787	mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1788}
1789
1790/*
1791 * Callback from the 802.11 layer to update the slot time
1792 * based on the current setting.  We use it to notify the
1793 * firmware of ERP changes and the f/w takes care of things
1794 * like slot time and preamble.
1795 */
1796static void
1797mwl_updateslot(struct ieee80211com *ic)
1798{
1799	struct mwl_softc *sc = ic->ic_softc;
1800	struct mwl_hal *mh = sc->sc_mh;
1801	int prot;
1802
1803	/* NB: can be called early; suppress needless cmds */
1804	if (!sc->sc_running)
1805		return;
1806
1807	/*
1808	 * Calculate the ERP flags.  The firwmare will use
1809	 * this to carry out the appropriate measures.
1810	 */
1811	prot = 0;
1812	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1813		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1814			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1815		if (ic->ic_flags & IEEE80211_F_USEPROT)
1816			prot |= IEEE80211_ERP_USE_PROTECTION;
1817		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1818			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1819	}
1820
1821	DPRINTF(sc, MWL_DEBUG_RESET,
1822	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1823	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1824	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1825	    ic->ic_flags);
1826
1827	mwl_hal_setgprot(mh, prot);
1828}
1829
1830/*
1831 * Setup the beacon frame.
1832 */
1833static int
1834mwl_beacon_setup(struct ieee80211vap *vap)
1835{
1836	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1837	struct ieee80211_node *ni = vap->iv_bss;
1838	struct mbuf *m;
1839
1840	m = ieee80211_beacon_alloc(ni);
1841	if (m == NULL)
1842		return ENOBUFS;
1843	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1844	m_free(m);
1845
1846	return 0;
1847}
1848
1849/*
1850 * Update the beacon frame in response to a change.
1851 */
1852static void
1853mwl_beacon_update(struct ieee80211vap *vap, int item)
1854{
1855	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1856	struct ieee80211com *ic = vap->iv_ic;
1857
1858	KASSERT(hvap != NULL, ("no beacon"));
1859	switch (item) {
1860	case IEEE80211_BEACON_ERP:
1861		mwl_updateslot(ic);
1862		break;
1863	case IEEE80211_BEACON_HTINFO:
1864		mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1865		    ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1866		break;
1867	case IEEE80211_BEACON_CAPS:
1868	case IEEE80211_BEACON_WME:
1869	case IEEE80211_BEACON_APPIE:
1870	case IEEE80211_BEACON_CSA:
1871		break;
1872	case IEEE80211_BEACON_TIM:
1873		/* NB: firmware always forms TIM */
1874		return;
1875	}
1876	/* XXX retain beacon frame and update */
1877	mwl_beacon_setup(vap);
1878}
1879
1880static void
1881mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1882{
1883	bus_addr_t *paddr = (bus_addr_t*) arg;
1884	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1885	*paddr = segs->ds_addr;
1886}
1887
1888#ifdef MWL_HOST_PS_SUPPORT
1889/*
1890 * Handle power save station occupancy changes.
1891 */
1892static void
1893mwl_update_ps(struct ieee80211vap *vap, int nsta)
1894{
1895	struct mwl_vap *mvp = MWL_VAP(vap);
1896
1897	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1898		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1899	mvp->mv_last_ps_sta = nsta;
1900}
1901
1902/*
1903 * Handle associated station power save state changes.
1904 */
1905static int
1906mwl_set_tim(struct ieee80211_node *ni, int set)
1907{
1908	struct ieee80211vap *vap = ni->ni_vap;
1909	struct mwl_vap *mvp = MWL_VAP(vap);
1910
1911	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1912		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1913		    IEEE80211_AID(ni->ni_associd), set);
1914		return 1;
1915	} else
1916		return 0;
1917}
1918#endif /* MWL_HOST_PS_SUPPORT */
1919
1920static int
1921mwl_desc_setup(struct mwl_softc *sc, const char *name,
1922	struct mwl_descdma *dd,
1923	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1924{
1925	uint8_t *ds;
1926	int error;
1927
1928	DPRINTF(sc, MWL_DEBUG_RESET,
1929	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1930	    __func__, name, nbuf, (uintmax_t) bufsize,
1931	    ndesc, (uintmax_t) descsize);
1932
1933	dd->dd_name = name;
1934	dd->dd_desc_len = nbuf * ndesc * descsize;
1935
1936	/*
1937	 * Setup DMA descriptor area.
1938	 */
1939	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1940		       PAGE_SIZE, 0,		/* alignment, bounds */
1941		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1942		       BUS_SPACE_MAXADDR,	/* highaddr */
1943		       NULL, NULL,		/* filter, filterarg */
1944		       dd->dd_desc_len,		/* maxsize */
1945		       1,			/* nsegments */
1946		       dd->dd_desc_len,		/* maxsegsize */
1947		       BUS_DMA_ALLOCNOW,	/* flags */
1948		       NULL,			/* lockfunc */
1949		       NULL,			/* lockarg */
1950		       &dd->dd_dmat);
1951	if (error != 0) {
1952		device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1953		return error;
1954	}
1955
1956	/* allocate descriptors */
1957	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1958				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1959				 &dd->dd_dmamap);
1960	if (error != 0) {
1961		device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1962			"error %u\n", nbuf * ndesc, dd->dd_name, error);
1963		goto fail1;
1964	}
1965
1966	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1967				dd->dd_desc, dd->dd_desc_len,
1968				mwl_load_cb, &dd->dd_desc_paddr,
1969				BUS_DMA_NOWAIT);
1970	if (error != 0) {
1971		device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1972			dd->dd_name, error);
1973		goto fail2;
1974	}
1975
1976	ds = dd->dd_desc;
1977	memset(ds, 0, dd->dd_desc_len);
1978	DPRINTF(sc, MWL_DEBUG_RESET,
1979	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1980	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1981	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1982
1983	return 0;
1984fail2:
1985	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1986fail1:
1987	bus_dma_tag_destroy(dd->dd_dmat);
1988	memset(dd, 0, sizeof(*dd));
1989	return error;
1990#undef DS2PHYS
1991}
1992
1993static void
1994mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
1995{
1996	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
1997	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1998	bus_dma_tag_destroy(dd->dd_dmat);
1999
2000	memset(dd, 0, sizeof(*dd));
2001}
2002
2003/*
2004 * Construct a tx q's free list.  The order of entries on
2005 * the list must reflect the physical layout of tx descriptors
2006 * because the firmware pre-fetches descriptors.
2007 *
2008 * XXX might be better to use indices into the buffer array.
2009 */
2010static void
2011mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2012{
2013	struct mwl_txbuf *bf;
2014	int i;
2015
2016	bf = txq->dma.dd_bufptr;
2017	STAILQ_INIT(&txq->free);
2018	for (i = 0; i < mwl_txbuf; i++, bf++)
2019		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2020	txq->nfree = i;
2021}
2022
2023#define	DS2PHYS(_dd, _ds) \
2024	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2025
2026static int
2027mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2028{
2029	int error, bsize, i;
2030	struct mwl_txbuf *bf;
2031	struct mwl_txdesc *ds;
2032
2033	error = mwl_desc_setup(sc, "tx", &txq->dma,
2034			mwl_txbuf, sizeof(struct mwl_txbuf),
2035			MWL_TXDESC, sizeof(struct mwl_txdesc));
2036	if (error != 0)
2037		return error;
2038
2039	/* allocate and setup tx buffers */
2040	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2041	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2042	if (bf == NULL) {
2043		device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2044			mwl_txbuf);
2045		return ENOMEM;
2046	}
2047	txq->dma.dd_bufptr = bf;
2048
2049	ds = txq->dma.dd_desc;
2050	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2051		bf->bf_desc = ds;
2052		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2053		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2054				&bf->bf_dmamap);
2055		if (error != 0) {
2056			device_printf(sc->sc_dev, "unable to create dmamap for tx "
2057				"buffer %u, error %u\n", i, error);
2058			return error;
2059		}
2060	}
2061	mwl_txq_reset(sc, txq);
2062	return 0;
2063}
2064
2065static void
2066mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2067{
2068	struct mwl_txbuf *bf;
2069	int i;
2070
2071	bf = txq->dma.dd_bufptr;
2072	for (i = 0; i < mwl_txbuf; i++, bf++) {
2073		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2074		KASSERT(bf->bf_node == NULL, ("node on free list"));
2075		if (bf->bf_dmamap != NULL)
2076			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2077	}
2078	STAILQ_INIT(&txq->free);
2079	txq->nfree = 0;
2080	if (txq->dma.dd_bufptr != NULL) {
2081		free(txq->dma.dd_bufptr, M_MWLDEV);
2082		txq->dma.dd_bufptr = NULL;
2083	}
2084	if (txq->dma.dd_desc_len != 0)
2085		mwl_desc_cleanup(sc, &txq->dma);
2086}
2087
2088static int
2089mwl_rxdma_setup(struct mwl_softc *sc)
2090{
2091	int error, jumbosize, bsize, i;
2092	struct mwl_rxbuf *bf;
2093	struct mwl_jumbo *rbuf;
2094	struct mwl_rxdesc *ds;
2095	caddr_t data;
2096
2097	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2098			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2099			1, sizeof(struct mwl_rxdesc));
2100	if (error != 0)
2101		return error;
2102
2103	/*
2104	 * Receive is done to a private pool of jumbo buffers.
2105	 * This allows us to attach to mbuf's and avoid re-mapping
2106	 * memory on each rx we post.  We allocate a large chunk
2107	 * of memory and manage it in the driver.  The mbuf free
2108	 * callback method is used to reclaim frames after sending
2109	 * them up the stack.  By default we allocate 2x the number of
2110	 * rx descriptors configured so we have some slop to hold
2111	 * us while frames are processed.
2112	 */
2113	if (mwl_rxbuf < 2*mwl_rxdesc) {
2114		device_printf(sc->sc_dev,
2115		    "too few rx dma buffers (%d); increasing to %d\n",
2116		    mwl_rxbuf, 2*mwl_rxdesc);
2117		mwl_rxbuf = 2*mwl_rxdesc;
2118	}
2119	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2120	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2121
2122	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2123		       PAGE_SIZE, 0,		/* alignment, bounds */
2124		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2125		       BUS_SPACE_MAXADDR,	/* highaddr */
2126		       NULL, NULL,		/* filter, filterarg */
2127		       sc->sc_rxmemsize,	/* maxsize */
2128		       1,			/* nsegments */
2129		       sc->sc_rxmemsize,	/* maxsegsize */
2130		       BUS_DMA_ALLOCNOW,	/* flags */
2131		       NULL,			/* lockfunc */
2132		       NULL,			/* lockarg */
2133		       &sc->sc_rxdmat);
2134	if (error != 0) {
2135		device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2136		return error;
2137	}
2138
2139	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2140				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2141				 &sc->sc_rxmap);
2142	if (error != 0) {
2143		device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2144		    (uintmax_t) sc->sc_rxmemsize);
2145		return error;
2146	}
2147
2148	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2149				sc->sc_rxmem, sc->sc_rxmemsize,
2150				mwl_load_cb, &sc->sc_rxmem_paddr,
2151				BUS_DMA_NOWAIT);
2152	if (error != 0) {
2153		device_printf(sc->sc_dev, "could not load rx DMA map\n");
2154		return error;
2155	}
2156
2157	/*
2158	 * Allocate rx buffers and set them up.
2159	 */
2160	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2161	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2162	if (bf == NULL) {
2163		device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2164		return error;
2165	}
2166	sc->sc_rxdma.dd_bufptr = bf;
2167
2168	STAILQ_INIT(&sc->sc_rxbuf);
2169	ds = sc->sc_rxdma.dd_desc;
2170	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2171		bf->bf_desc = ds;
2172		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2173		/* pre-assign dma buffer */
2174		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2175		/* NB: tail is intentional to preserve descriptor order */
2176		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2177	}
2178
2179	/*
2180	 * Place remainder of dma memory buffers on the free list.
2181	 */
2182	SLIST_INIT(&sc->sc_rxfree);
2183	for (; i < mwl_rxbuf; i++) {
2184		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2185		rbuf = MWL_JUMBO_DATA2BUF(data);
2186		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2187		sc->sc_nrxfree++;
2188	}
2189	return 0;
2190}
2191#undef DS2PHYS
2192
2193static void
2194mwl_rxdma_cleanup(struct mwl_softc *sc)
2195{
2196	if (sc->sc_rxmem_paddr != 0) {
2197		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2198		sc->sc_rxmem_paddr = 0;
2199	}
2200	if (sc->sc_rxmem != NULL) {
2201		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2202		sc->sc_rxmem = NULL;
2203	}
2204	if (sc->sc_rxdma.dd_bufptr != NULL) {
2205		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2206		sc->sc_rxdma.dd_bufptr = NULL;
2207	}
2208	if (sc->sc_rxdma.dd_desc_len != 0)
2209		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2210}
2211
2212static int
2213mwl_dma_setup(struct mwl_softc *sc)
2214{
2215	int error, i;
2216
2217	error = mwl_rxdma_setup(sc);
2218	if (error != 0) {
2219		mwl_rxdma_cleanup(sc);
2220		return error;
2221	}
2222
2223	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2224		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2225		if (error != 0) {
2226			mwl_dma_cleanup(sc);
2227			return error;
2228		}
2229	}
2230	return 0;
2231}
2232
2233static void
2234mwl_dma_cleanup(struct mwl_softc *sc)
2235{
2236	int i;
2237
2238	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2239		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2240	mwl_rxdma_cleanup(sc);
2241}
2242
2243static struct ieee80211_node *
2244mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2245{
2246	struct ieee80211com *ic = vap->iv_ic;
2247	struct mwl_softc *sc = ic->ic_softc;
2248	const size_t space = sizeof(struct mwl_node);
2249	struct mwl_node *mn;
2250
2251	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2252	if (mn == NULL) {
2253		/* XXX stat+msg */
2254		return NULL;
2255	}
2256	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2257	return &mn->mn_node;
2258}
2259
2260static void
2261mwl_node_cleanup(struct ieee80211_node *ni)
2262{
2263	struct ieee80211com *ic = ni->ni_ic;
2264        struct mwl_softc *sc = ic->ic_softc;
2265	struct mwl_node *mn = MWL_NODE(ni);
2266
2267	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2268	    __func__, ni, ni->ni_ic, mn->mn_staid);
2269
2270	if (mn->mn_staid != 0) {
2271		struct ieee80211vap *vap = ni->ni_vap;
2272
2273		if (mn->mn_hvap != NULL) {
2274			if (vap->iv_opmode == IEEE80211_M_STA)
2275				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2276			else
2277				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2278		}
2279		/*
2280		 * NB: legacy WDS peer sta db entry is installed using
2281		 * the associate ap's hvap; use it again to delete it.
2282		 * XXX can vap be NULL?
2283		 */
2284		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2285		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2286			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2287			    ni->ni_macaddr);
2288		delstaid(sc, mn->mn_staid);
2289		mn->mn_staid = 0;
2290	}
2291	sc->sc_node_cleanup(ni);
2292}
2293
2294/*
2295 * Reclaim rx dma buffers from packets sitting on the ampdu
2296 * reorder queue for a station.  We replace buffers with a
2297 * system cluster (if available).
2298 */
2299static void
2300mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2301{
2302#if 0
2303	int i, n, off;
2304	struct mbuf *m;
2305	void *cl;
2306
2307	n = rap->rxa_qframes;
2308	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2309		m = rap->rxa_m[i];
2310		if (m == NULL)
2311			continue;
2312		n--;
2313		/* our dma buffers have a well-known free routine */
2314		if ((m->m_flags & M_EXT) == 0 ||
2315		    m->m_ext.ext_free != mwl_ext_free)
2316			continue;
2317		/*
2318		 * Try to allocate a cluster and move the data.
2319		 */
2320		off = m->m_data - m->m_ext.ext_buf;
2321		if (off + m->m_pkthdr.len > MCLBYTES) {
2322			/* XXX no AMSDU for now */
2323			continue;
2324		}
2325		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2326		    &m->m_ext.ext_paddr);
2327		if (cl != NULL) {
2328			/*
2329			 * Copy the existing data to the cluster, remove
2330			 * the rx dma buffer, and attach the cluster in
2331			 * its place.  Note we preserve the offset to the
2332			 * data so frames being bridged can still prepend
2333			 * their headers without adding another mbuf.
2334			 */
2335			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2336			MEXTREMOVE(m);
2337			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2338			/* setup mbuf like _MCLGET does */
2339			m->m_flags |= M_CLUSTER | M_EXT_RW;
2340			_MOWNERREF(m, M_EXT | M_CLUSTER);
2341			/* NB: m_data is clobbered by MEXTADDR, adjust */
2342			m->m_data += off;
2343		}
2344	}
2345#endif
2346}
2347
2348/*
2349 * Callback to reclaim resources.  We first let the
2350 * net80211 layer do it's thing, then if we are still
2351 * blocked by a lack of rx dma buffers we walk the ampdu
2352 * reorder q's to reclaim buffers by copying to a system
2353 * cluster.
2354 */
2355static void
2356mwl_node_drain(struct ieee80211_node *ni)
2357{
2358	struct ieee80211com *ic = ni->ni_ic;
2359        struct mwl_softc *sc = ic->ic_softc;
2360	struct mwl_node *mn = MWL_NODE(ni);
2361
2362	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2363	    __func__, ni, ni->ni_vap, mn->mn_staid);
2364
2365	/* NB: call up first to age out ampdu q's */
2366	sc->sc_node_drain(ni);
2367
2368	/* XXX better to not check low water mark? */
2369	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2370	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2371		uint8_t tid;
2372		/*
2373		 * Walk the reorder q and reclaim rx dma buffers by copying
2374		 * the packet contents into clusters.
2375		 */
2376		for (tid = 0; tid < WME_NUM_TID; tid++) {
2377			struct ieee80211_rx_ampdu *rap;
2378
2379			rap = &ni->ni_rx_ampdu[tid];
2380			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2381				continue;
2382			if (rap->rxa_qframes)
2383				mwl_ampdu_rxdma_reclaim(rap);
2384		}
2385	}
2386}
2387
2388static void
2389mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2390{
2391	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2392#ifdef MWL_ANT_INFO_SUPPORT
2393#if 0
2394	/* XXX need to smooth data */
2395	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2396#else
2397	*noise = -95;		/* XXX */
2398#endif
2399#else
2400	*noise = -95;		/* XXX */
2401#endif
2402}
2403
2404/*
2405 * Convert Hardware per-antenna rssi info to common format:
2406 * Let a1, a2, a3 represent the amplitudes per chain
2407 * Let amax represent max[a1, a2, a3]
2408 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2409 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2410 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2411 * maintain some extra precision.
2412 *
2413 * Values are stored in .5 db format capped at 127.
2414 */
2415static void
2416mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2417	struct ieee80211_mimo_info *mi)
2418{
2419#define	CVT(_dst, _src) do {						\
2420	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2421	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2422} while (0)
2423	static const int8_t logdbtbl[32] = {
2424	       0,   0,  24,  38,  48,  56,  62,  68,
2425	      72,  76,  80,  83,  86,  89,  92,  94,
2426	      96,  98, 100, 102, 104, 106, 107, 109,
2427	     110, 112, 113, 115, 116, 117, 118, 119
2428	};
2429	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2430	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2431	uint32_t rssi_max;
2432
2433	rssi_max = mn->mn_ai.rssi_a;
2434	if (mn->mn_ai.rssi_b > rssi_max)
2435		rssi_max = mn->mn_ai.rssi_b;
2436	if (mn->mn_ai.rssi_c > rssi_max)
2437		rssi_max = mn->mn_ai.rssi_c;
2438
2439	CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
2440	CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
2441	CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
2442
2443	mi->ch[0].noise[0] = mn->mn_ai.nf_a;
2444	mi->ch[1].noise[0] = mn->mn_ai.nf_b;
2445	mi->ch[2].noise[0] = mn->mn_ai.nf_c;
2446#undef CVT
2447}
2448
2449static __inline void *
2450mwl_getrxdma(struct mwl_softc *sc)
2451{
2452	struct mwl_jumbo *buf;
2453	void *data;
2454
2455	/*
2456	 * Allocate from jumbo pool.
2457	 */
2458	MWL_RXFREE_LOCK(sc);
2459	buf = SLIST_FIRST(&sc->sc_rxfree);
2460	if (buf == NULL) {
2461		DPRINTF(sc, MWL_DEBUG_ANY,
2462		    "%s: out of rx dma buffers\n", __func__);
2463		sc->sc_stats.mst_rx_nodmabuf++;
2464		data = NULL;
2465	} else {
2466		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2467		sc->sc_nrxfree--;
2468		data = MWL_JUMBO_BUF2DATA(buf);
2469	}
2470	MWL_RXFREE_UNLOCK(sc);
2471	return data;
2472}
2473
2474static __inline void
2475mwl_putrxdma(struct mwl_softc *sc, void *data)
2476{
2477	struct mwl_jumbo *buf;
2478
2479	/* XXX bounds check data */
2480	MWL_RXFREE_LOCK(sc);
2481	buf = MWL_JUMBO_DATA2BUF(data);
2482	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2483	sc->sc_nrxfree++;
2484	MWL_RXFREE_UNLOCK(sc);
2485}
2486
2487static int
2488mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2489{
2490	struct mwl_rxdesc *ds;
2491
2492	ds = bf->bf_desc;
2493	if (bf->bf_data == NULL) {
2494		bf->bf_data = mwl_getrxdma(sc);
2495		if (bf->bf_data == NULL) {
2496			/* mark descriptor to be skipped */
2497			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2498			/* NB: don't need PREREAD */
2499			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2500			sc->sc_stats.mst_rxbuf_failed++;
2501			return ENOMEM;
2502		}
2503	}
2504	/*
2505	 * NB: DMA buffer contents is known to be unmodified
2506	 *     so there's no need to flush the data cache.
2507	 */
2508
2509	/*
2510	 * Setup descriptor.
2511	 */
2512	ds->QosCtrl = 0;
2513	ds->RSSI = 0;
2514	ds->Status = EAGLE_RXD_STATUS_IDLE;
2515	ds->Channel = 0;
2516	ds->PktLen = htole16(MWL_AGGR_SIZE);
2517	ds->SQ2 = 0;
2518	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2519	/* NB: don't touch pPhysNext, set once */
2520	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2521	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2522
2523	return 0;
2524}
2525
2526static void
2527mwl_ext_free(struct mbuf *m, void *data, void *arg)
2528{
2529	struct mwl_softc *sc = arg;
2530
2531	/* XXX bounds check data */
2532	mwl_putrxdma(sc, m->m_ext.ext_buf);
2533	/*
2534	 * If we were previously blocked by a lack of rx dma buffers
2535	 * check if we now have enough to restart rx interrupt handling.
2536	 * NB: we know we are called at splvm which is above splnet.
2537	 */
2538	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2539		sc->sc_rxblocked = 0;
2540		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2541	}
2542}
2543
2544struct mwl_frame_bar {
2545	u_int8_t	i_fc[2];
2546	u_int8_t	i_dur[2];
2547	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2548	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2549	/* ctl, seq, FCS */
2550} __packed;
2551
2552/*
2553 * Like ieee80211_anyhdrsize, but handles BAR frames
2554 * specially so the logic below to piece the 802.11
2555 * header together works.
2556 */
2557static __inline int
2558mwl_anyhdrsize(const void *data)
2559{
2560	const struct ieee80211_frame *wh = data;
2561
2562	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2563		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2564		case IEEE80211_FC0_SUBTYPE_CTS:
2565		case IEEE80211_FC0_SUBTYPE_ACK:
2566			return sizeof(struct ieee80211_frame_ack);
2567		case IEEE80211_FC0_SUBTYPE_BAR:
2568			return sizeof(struct mwl_frame_bar);
2569		}
2570		return sizeof(struct ieee80211_frame_min);
2571	} else
2572		return ieee80211_hdrsize(data);
2573}
2574
2575static void
2576mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2577{
2578	const struct ieee80211_frame *wh;
2579	struct ieee80211_node *ni;
2580
2581	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2582	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2583	if (ni != NULL) {
2584		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2585		ieee80211_free_node(ni);
2586	}
2587}
2588
2589/*
2590 * Convert hardware signal strength to rssi.  The value
2591 * provided by the device has the noise floor added in;
2592 * we need to compensate for this but we don't have that
2593 * so we use a fixed value.
2594 *
2595 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2596 * offset is already set as part of the initial gain.  This
2597 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2598 */
2599static __inline int
2600cvtrssi(uint8_t ssi)
2601{
2602	int rssi = (int) ssi + 8;
2603	/* XXX hack guess until we have a real noise floor */
2604	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2605	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2606}
2607
2608static void
2609mwl_rx_proc(void *arg, int npending)
2610{
2611	struct mwl_softc *sc = arg;
2612	struct ieee80211com *ic = &sc->sc_ic;
2613	struct mwl_rxbuf *bf;
2614	struct mwl_rxdesc *ds;
2615	struct mbuf *m;
2616	struct ieee80211_qosframe *wh;
2617	struct ieee80211_node *ni;
2618	struct mwl_node *mn;
2619	int off, len, hdrlen, pktlen, rssi, ntodo;
2620	uint8_t *data, status;
2621	void *newdata;
2622	int16_t nf;
2623
2624	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2625	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2626	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2627	nf = -96;			/* XXX */
2628	bf = sc->sc_rxnext;
2629	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2630		if (bf == NULL)
2631			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2632		ds = bf->bf_desc;
2633		data = bf->bf_data;
2634		if (data == NULL) {
2635			/*
2636			 * If data allocation failed previously there
2637			 * will be no buffer; try again to re-populate it.
2638			 * Note the firmware will not advance to the next
2639			 * descriptor with a dma buffer so we must mimic
2640			 * this or we'll get out of sync.
2641			 */
2642			DPRINTF(sc, MWL_DEBUG_ANY,
2643			    "%s: rx buf w/o dma memory\n", __func__);
2644			(void) mwl_rxbuf_init(sc, bf);
2645			sc->sc_stats.mst_rx_dmabufmissing++;
2646			break;
2647		}
2648		MWL_RXDESC_SYNC(sc, ds,
2649		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2650		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2651			break;
2652#ifdef MWL_DEBUG
2653		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2654			mwl_printrxbuf(bf, 0);
2655#endif
2656		status = ds->Status;
2657		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2658			counter_u64_add(ic->ic_ierrors, 1);
2659			sc->sc_stats.mst_rx_crypto++;
2660			/*
2661			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2662			 *     for backwards compatibility.
2663			 */
2664			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2665			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2666				/*
2667				 * MIC error, notify upper layers.
2668				 */
2669				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2670				    BUS_DMASYNC_POSTREAD);
2671				mwl_handlemicerror(ic, data);
2672				sc->sc_stats.mst_rx_tkipmic++;
2673			}
2674			/* XXX too painful to tap packets */
2675			goto rx_next;
2676		}
2677		/*
2678		 * Sync the data buffer.
2679		 */
2680		len = le16toh(ds->PktLen);
2681		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2682		/*
2683		 * The 802.11 header is provided all or in part at the front;
2684		 * use it to calculate the true size of the header that we'll
2685		 * construct below.  We use this to figure out where to copy
2686		 * payload prior to constructing the header.
2687		 */
2688		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2689		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2690
2691		/* calculate rssi early so we can re-use for each aggregate */
2692		rssi = cvtrssi(ds->RSSI);
2693
2694		pktlen = hdrlen + (len - off);
2695		/*
2696		 * NB: we know our frame is at least as large as
2697		 * IEEE80211_MIN_LEN because there is a 4-address
2698		 * frame at the front.  Hence there's no need to
2699		 * vet the packet length.  If the frame in fact
2700		 * is too small it should be discarded at the
2701		 * net80211 layer.
2702		 */
2703
2704		/*
2705		 * Attach dma buffer to an mbuf.  We tried
2706		 * doing this based on the packet size (i.e.
2707		 * copying small packets) but it turns out to
2708		 * be a net loss.  The tradeoff might be system
2709		 * dependent (cache architecture is important).
2710		 */
2711		MGETHDR(m, M_NOWAIT, MT_DATA);
2712		if (m == NULL) {
2713			DPRINTF(sc, MWL_DEBUG_ANY,
2714			    "%s: no rx mbuf\n", __func__);
2715			sc->sc_stats.mst_rx_nombuf++;
2716			goto rx_next;
2717		}
2718		/*
2719		 * Acquire the replacement dma buffer before
2720		 * processing the frame.  If we're out of dma
2721		 * buffers we disable rx interrupts and wait
2722		 * for the free pool to reach mlw_rxdmalow buffers
2723		 * before starting to do work again.  If the firmware
2724		 * runs out of descriptors then it will toss frames
2725		 * which is better than our doing it as that can
2726		 * starve our processing.  It is also important that
2727		 * we always process rx'd frames in case they are
2728		 * A-MPDU as otherwise the host's view of the BA
2729		 * window may get out of sync with the firmware.
2730		 */
2731		newdata = mwl_getrxdma(sc);
2732		if (newdata == NULL) {
2733			/* NB: stat+msg in mwl_getrxdma */
2734			m_free(m);
2735			/* disable RX interrupt and mark state */
2736			mwl_hal_intrset(sc->sc_mh,
2737			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2738			sc->sc_rxblocked = 1;
2739			ieee80211_drain(ic);
2740			/* XXX check rxblocked and immediately start again? */
2741			goto rx_stop;
2742		}
2743		bf->bf_data = newdata;
2744		/*
2745		 * Attach the dma buffer to the mbuf;
2746		 * mwl_rxbuf_init will re-setup the rx
2747		 * descriptor using the replacement dma
2748		 * buffer we just installed above.
2749		 */
2750		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2751			data, sc, 0, EXT_NET_DRV);
2752		m->m_data += off - hdrlen;
2753		m->m_pkthdr.len = m->m_len = pktlen;
2754		/* NB: dma buffer assumed read-only */
2755
2756		/*
2757		 * Piece 802.11 header together.
2758		 */
2759		wh = mtod(m, struct ieee80211_qosframe *);
2760		/* NB: don't need to do this sometimes but ... */
2761		/* XXX special case so we can memcpy after m_devget? */
2762		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2763		if (IEEE80211_QOS_HAS_SEQ(wh))
2764			*(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
2765		/*
2766		 * The f/w strips WEP header but doesn't clear
2767		 * the WEP bit; mark the packet with M_WEP so
2768		 * net80211 will treat the data as decrypted.
2769		 * While here also clear the PWR_MGT bit since
2770		 * power save is handled by the firmware and
2771		 * passing this up will potentially cause the
2772		 * upper layer to put a station in power save
2773		 * (except when configured with MWL_HOST_PS_SUPPORT).
2774		 */
2775		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2776			m->m_flags |= M_WEP;
2777#ifdef MWL_HOST_PS_SUPPORT
2778		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2779#else
2780		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2781		    IEEE80211_FC1_PWR_MGT);
2782#endif
2783
2784		if (ieee80211_radiotap_active(ic)) {
2785			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2786
2787			tap->wr_flags = 0;
2788			tap->wr_rate = ds->Rate;
2789			tap->wr_antsignal = rssi + nf;
2790			tap->wr_antnoise = nf;
2791		}
2792		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2793			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2794			    len, ds->Rate, rssi);
2795		}
2796		/* dispatch */
2797		ni = ieee80211_find_rxnode(ic,
2798		    (const struct ieee80211_frame_min *) wh);
2799		if (ni != NULL) {
2800			mn = MWL_NODE(ni);
2801#ifdef MWL_ANT_INFO_SUPPORT
2802			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2803			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2804			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2805			mn->mn_ai.rsvd1 = rssi;
2806#endif
2807			/* tag AMPDU aggregates for reorder processing */
2808			if (ni->ni_flags & IEEE80211_NODE_HT)
2809				m->m_flags |= M_AMPDU;
2810			(void) ieee80211_input(ni, m, rssi, nf);
2811			ieee80211_free_node(ni);
2812		} else
2813			(void) ieee80211_input_all(ic, m, rssi, nf);
2814rx_next:
2815		/* NB: ignore ENOMEM so we process more descriptors */
2816		(void) mwl_rxbuf_init(sc, bf);
2817		bf = STAILQ_NEXT(bf, bf_list);
2818	}
2819rx_stop:
2820	sc->sc_rxnext = bf;
2821
2822	if (mbufq_first(&sc->sc_snd) != NULL) {
2823		/* NB: kick fw; the tx thread may have been preempted */
2824		mwl_hal_txstart(sc->sc_mh, 0);
2825		mwl_start(sc);
2826	}
2827}
2828
2829static void
2830mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2831{
2832	struct mwl_txbuf *bf, *bn;
2833	struct mwl_txdesc *ds;
2834
2835	MWL_TXQ_LOCK_INIT(sc, txq);
2836	txq->qnum = qnum;
2837	txq->txpri = 0;	/* XXX */
2838#if 0
2839	/* NB: q setup by mwl_txdma_setup XXX */
2840	STAILQ_INIT(&txq->free);
2841#endif
2842	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2843		bf->bf_txq = txq;
2844
2845		ds = bf->bf_desc;
2846		bn = STAILQ_NEXT(bf, bf_list);
2847		if (bn == NULL)
2848			bn = STAILQ_FIRST(&txq->free);
2849		ds->pPhysNext = htole32(bn->bf_daddr);
2850	}
2851	STAILQ_INIT(&txq->active);
2852}
2853
2854/*
2855 * Setup a hardware data transmit queue for the specified
2856 * access control.  We record the mapping from ac's
2857 * to h/w queues for use by mwl_tx_start.
2858 */
2859static int
2860mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2861{
2862	struct mwl_txq *txq;
2863
2864	if (ac >= nitems(sc->sc_ac2q)) {
2865		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2866			ac, nitems(sc->sc_ac2q));
2867		return 0;
2868	}
2869	if (mvtype >= MWL_NUM_TX_QUEUES) {
2870		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2871			mvtype, MWL_NUM_TX_QUEUES);
2872		return 0;
2873	}
2874	txq = &sc->sc_txq[mvtype];
2875	mwl_txq_init(sc, txq, mvtype);
2876	sc->sc_ac2q[ac] = txq;
2877	return 1;
2878}
2879
2880/*
2881 * Update WME parameters for a transmit queue.
2882 */
2883static int
2884mwl_txq_update(struct mwl_softc *sc, int ac)
2885{
2886#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2887	struct ieee80211com *ic = &sc->sc_ic;
2888	struct chanAccParams chp;
2889	struct mwl_txq *txq = sc->sc_ac2q[ac];
2890	struct wmeParams *wmep;
2891	struct mwl_hal *mh = sc->sc_mh;
2892	int aifs, cwmin, cwmax, txoplim;
2893
2894	ieee80211_wme_ic_getparams(ic, &chp);
2895	wmep = &chp.cap_wmeParams[ac];
2896
2897	aifs = wmep->wmep_aifsn;
2898	/* XXX in sta mode need to pass log values for cwmin/max */
2899	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2900	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2901	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2902
2903	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2904		device_printf(sc->sc_dev, "unable to update hardware queue "
2905			"parameters for %s traffic!\n",
2906			ieee80211_wme_acnames[ac]);
2907		return 0;
2908	}
2909	return 1;
2910#undef MWL_EXPONENT_TO_VALUE
2911}
2912
2913/*
2914 * Callback from the 802.11 layer to update WME parameters.
2915 */
2916static int
2917mwl_wme_update(struct ieee80211com *ic)
2918{
2919	struct mwl_softc *sc = ic->ic_softc;
2920
2921	return !mwl_txq_update(sc, WME_AC_BE) ||
2922	    !mwl_txq_update(sc, WME_AC_BK) ||
2923	    !mwl_txq_update(sc, WME_AC_VI) ||
2924	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2925}
2926
2927/*
2928 * Reclaim resources for a setup queue.
2929 */
2930static void
2931mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2932{
2933	/* XXX hal work? */
2934	MWL_TXQ_LOCK_DESTROY(txq);
2935}
2936
2937/*
2938 * Reclaim all tx queue resources.
2939 */
2940static void
2941mwl_tx_cleanup(struct mwl_softc *sc)
2942{
2943	int i;
2944
2945	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2946		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2947}
2948
2949static int
2950mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2951{
2952	struct mbuf *m;
2953	int error;
2954
2955	/*
2956	 * Load the DMA map so any coalescing is done.  This
2957	 * also calculates the number of descriptors we need.
2958	 */
2959	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2960				     bf->bf_segs, &bf->bf_nseg,
2961				     BUS_DMA_NOWAIT);
2962	if (error == EFBIG) {
2963		/* XXX packet requires too many descriptors */
2964		bf->bf_nseg = MWL_TXDESC+1;
2965	} else if (error != 0) {
2966		sc->sc_stats.mst_tx_busdma++;
2967		m_freem(m0);
2968		return error;
2969	}
2970	/*
2971	 * Discard null packets and check for packets that
2972	 * require too many TX descriptors.  We try to convert
2973	 * the latter to a cluster.
2974	 */
2975	if (error == EFBIG) {		/* too many desc's, linearize */
2976		sc->sc_stats.mst_tx_linear++;
2977#if MWL_TXDESC > 1
2978		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2979#else
2980		m = m_defrag(m0, M_NOWAIT);
2981#endif
2982		if (m == NULL) {
2983			m_freem(m0);
2984			sc->sc_stats.mst_tx_nombuf++;
2985			return ENOMEM;
2986		}
2987		m0 = m;
2988		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2989					     bf->bf_segs, &bf->bf_nseg,
2990					     BUS_DMA_NOWAIT);
2991		if (error != 0) {
2992			sc->sc_stats.mst_tx_busdma++;
2993			m_freem(m0);
2994			return error;
2995		}
2996		KASSERT(bf->bf_nseg <= MWL_TXDESC,
2997		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
2998	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
2999		sc->sc_stats.mst_tx_nodata++;
3000		m_freem(m0);
3001		return EIO;
3002	}
3003	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3004		__func__, m0, m0->m_pkthdr.len);
3005	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3006	bf->bf_m = m0;
3007
3008	return 0;
3009}
3010
3011static __inline int
3012mwl_cvtlegacyrate(int rate)
3013{
3014	switch (rate) {
3015	case 2:	 return 0;
3016	case 4:	 return 1;
3017	case 11: return 2;
3018	case 22: return 3;
3019	case 44: return 4;
3020	case 12: return 5;
3021	case 18: return 6;
3022	case 24: return 7;
3023	case 36: return 8;
3024	case 48: return 9;
3025	case 72: return 10;
3026	case 96: return 11;
3027	case 108:return 12;
3028	}
3029	return 0;
3030}
3031
3032/*
3033 * Calculate fixed tx rate information per client state;
3034 * this value is suitable for writing to the Format field
3035 * of a tx descriptor.
3036 */
3037static uint16_t
3038mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3039{
3040	uint16_t fmt;
3041
3042	fmt = _IEEE80211_SHIFTMASK(3, EAGLE_TXD_ANTENNA)
3043	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3044		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3045	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3046		fmt |= EAGLE_TXD_FORMAT_HT
3047		    /* NB: 0x80 implicitly stripped from ucastrate */
3048		    | _IEEE80211_SHIFTMASK(rate, EAGLE_TXD_RATE);
3049		/* XXX short/long GI may be wrong; re-check */
3050		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3051			fmt |= EAGLE_TXD_CHW_40
3052			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3053			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3054		} else {
3055			fmt |= EAGLE_TXD_CHW_20
3056			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3057			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3058		}
3059	} else {			/* legacy rate */
3060		fmt |= EAGLE_TXD_FORMAT_LEGACY
3061		    | _IEEE80211_SHIFTMASK(mwl_cvtlegacyrate(rate),
3062			EAGLE_TXD_RATE)
3063		    | EAGLE_TXD_CHW_20
3064		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3065		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3066			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3067	}
3068	return fmt;
3069}
3070
3071static int
3072mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3073    struct mbuf *m0)
3074{
3075	struct ieee80211com *ic = &sc->sc_ic;
3076	struct ieee80211vap *vap = ni->ni_vap;
3077	int error, iswep, ismcast;
3078	int hdrlen, copyhdrlen, pktlen;
3079	struct mwl_txdesc *ds;
3080	struct mwl_txq *txq;
3081	struct ieee80211_frame *wh;
3082	struct mwltxrec *tr;
3083	struct mwl_node *mn;
3084	uint16_t qos;
3085#if MWL_TXDESC > 1
3086	int i;
3087#endif
3088
3089	wh = mtod(m0, struct ieee80211_frame *);
3090	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3091	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3092	hdrlen = ieee80211_anyhdrsize(wh);
3093	copyhdrlen = hdrlen;
3094	pktlen = m0->m_pkthdr.len;
3095	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3096		qos = *(uint16_t *)ieee80211_getqos(wh);
3097		if (IEEE80211_IS_DSTODS(wh))
3098			copyhdrlen -= sizeof(qos);
3099	} else
3100		qos = 0;
3101
3102	if (iswep) {
3103		const struct ieee80211_cipher *cip;
3104		struct ieee80211_key *k;
3105
3106		/*
3107		 * Construct the 802.11 header+trailer for an encrypted
3108		 * frame. The only reason this can fail is because of an
3109		 * unknown or unsupported cipher/key type.
3110		 *
3111		 * NB: we do this even though the firmware will ignore
3112		 *     what we've done for WEP and TKIP as we need the
3113		 *     ExtIV filled in for CCMP and this also adjusts
3114		 *     the headers which simplifies our work below.
3115		 */
3116		k = ieee80211_crypto_encap(ni, m0);
3117		if (k == NULL) {
3118			/*
3119			 * This can happen when the key is yanked after the
3120			 * frame was queued.  Just discard the frame; the
3121			 * 802.11 layer counts failures and provides
3122			 * debugging/diagnostics.
3123			 */
3124			m_freem(m0);
3125			return EIO;
3126		}
3127		/*
3128		 * Adjust the packet length for the crypto additions
3129		 * done during encap and any other bits that the f/w
3130		 * will add later on.
3131		 */
3132		cip = k->wk_cipher;
3133		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3134
3135		/* packet header may have moved, reset our local pointer */
3136		wh = mtod(m0, struct ieee80211_frame *);
3137	}
3138
3139	if (ieee80211_radiotap_active_vap(vap)) {
3140		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3141		if (iswep)
3142			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3143#if 0
3144		sc->sc_tx_th.wt_rate = ds->DataRate;
3145#endif
3146		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3147		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3148
3149		ieee80211_radiotap_tx(vap, m0);
3150	}
3151	/*
3152	 * Copy up/down the 802.11 header; the firmware requires
3153	 * we present a 2-byte payload length followed by a
3154	 * 4-address header (w/o QoS), followed (optionally) by
3155	 * any WEP/ExtIV header (but only filled in for CCMP).
3156	 * We are assured the mbuf has sufficient headroom to
3157	 * prepend in-place by the setup of ic_headroom in
3158	 * mwl_attach.
3159	 */
3160	if (hdrlen < sizeof(struct mwltxrec)) {
3161		const int space = sizeof(struct mwltxrec) - hdrlen;
3162		if (M_LEADINGSPACE(m0) < space) {
3163			/* NB: should never happen */
3164			device_printf(sc->sc_dev,
3165			    "not enough headroom, need %d found %zd, "
3166			    "m_flags 0x%x m_len %d\n",
3167			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3168			ieee80211_dump_pkt(ic,
3169			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3170			m_freem(m0);
3171			sc->sc_stats.mst_tx_noheadroom++;
3172			return EIO;
3173		}
3174		M_PREPEND(m0, space, M_NOWAIT);
3175	}
3176	tr = mtod(m0, struct mwltxrec *);
3177	if (wh != (struct ieee80211_frame *) &tr->wh)
3178		ovbcopy(wh, &tr->wh, hdrlen);
3179	/*
3180	 * Note: the "firmware length" is actually the length
3181	 * of the fully formed "802.11 payload".  That is, it's
3182	 * everything except for the 802.11 header.  In particular
3183	 * this includes all crypto material including the MIC!
3184	 */
3185	tr->fwlen = htole16(pktlen - hdrlen);
3186
3187	/*
3188	 * Load the DMA map so any coalescing is done.  This
3189	 * also calculates the number of descriptors we need.
3190	 */
3191	error = mwl_tx_dmasetup(sc, bf, m0);
3192	if (error != 0) {
3193		/* NB: stat collected in mwl_tx_dmasetup */
3194		DPRINTF(sc, MWL_DEBUG_XMIT,
3195		    "%s: unable to setup dma\n", __func__);
3196		return error;
3197	}
3198	bf->bf_node = ni;			/* NB: held reference */
3199	m0 = bf->bf_m;				/* NB: may have changed */
3200	tr = mtod(m0, struct mwltxrec *);
3201	wh = (struct ieee80211_frame *)&tr->wh;
3202
3203	/*
3204	 * Formulate tx descriptor.
3205	 */
3206	ds = bf->bf_desc;
3207	txq = bf->bf_txq;
3208
3209	ds->QosCtrl = qos;			/* NB: already little-endian */
3210#if MWL_TXDESC == 1
3211	/*
3212	 * NB: multiframes should be zero because the descriptors
3213	 *     are initialized to zero.  This should handle the case
3214	 *     where the driver is built with MWL_TXDESC=1 but we are
3215	 *     using firmware with multi-segment support.
3216	 */
3217	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3218	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3219#else
3220	ds->multiframes = htole32(bf->bf_nseg);
3221	ds->PktLen = htole16(m0->m_pkthdr.len);
3222	for (i = 0; i < bf->bf_nseg; i++) {
3223		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3224		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3225	}
3226#endif
3227	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3228	ds->Format = 0;
3229	ds->pad = 0;
3230	ds->ack_wcb_addr = 0;
3231
3232	mn = MWL_NODE(ni);
3233	/*
3234	 * Select transmit rate.
3235	 */
3236	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3237	case IEEE80211_FC0_TYPE_MGT:
3238		sc->sc_stats.mst_tx_mgmt++;
3239		/* fall thru... */
3240	case IEEE80211_FC0_TYPE_CTL:
3241		/* NB: assign to BE q to avoid bursting */
3242		ds->TxPriority = MWL_WME_AC_BE;
3243		break;
3244	case IEEE80211_FC0_TYPE_DATA:
3245		if (!ismcast) {
3246			const struct ieee80211_txparam *tp = ni->ni_txparms;
3247			/*
3248			 * EAPOL frames get forced to a fixed rate and w/o
3249			 * aggregation; otherwise check for any fixed rate
3250			 * for the client (may depend on association state).
3251			 */
3252			if (m0->m_flags & M_EAPOL) {
3253				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3254				ds->Format = mvp->mv_eapolformat;
3255				ds->pad = htole16(
3256				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3257			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3258				/* XXX pre-calculate per node */
3259				ds->Format = htole16(
3260				    mwl_calcformat(tp->ucastrate, ni));
3261				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3262			}
3263			/* NB: EAPOL frames will never have qos set */
3264			if (qos == 0)
3265				ds->TxPriority = txq->qnum;
3266#if MWL_MAXBA > 3
3267			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3268				ds->TxPriority = mn->mn_ba[3].txq;
3269#endif
3270#if MWL_MAXBA > 2
3271			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3272				ds->TxPriority = mn->mn_ba[2].txq;
3273#endif
3274#if MWL_MAXBA > 1
3275			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3276				ds->TxPriority = mn->mn_ba[1].txq;
3277#endif
3278#if MWL_MAXBA > 0
3279			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3280				ds->TxPriority = mn->mn_ba[0].txq;
3281#endif
3282			else
3283				ds->TxPriority = txq->qnum;
3284		} else
3285			ds->TxPriority = txq->qnum;
3286		break;
3287	default:
3288		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3289			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3290		sc->sc_stats.mst_tx_badframetype++;
3291		m_freem(m0);
3292		return EIO;
3293	}
3294
3295	if (IFF_DUMPPKTS_XMIT(sc))
3296		ieee80211_dump_pkt(ic,
3297		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3298		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3299
3300	MWL_TXQ_LOCK(txq);
3301	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3302	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3303	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3304
3305	sc->sc_tx_timer = 5;
3306	MWL_TXQ_UNLOCK(txq);
3307
3308	return 0;
3309}
3310
3311static __inline int
3312mwl_cvtlegacyrix(int rix)
3313{
3314	static const int ieeerates[] =
3315	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3316	return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3317}
3318
3319/*
3320 * Process completed xmit descriptors from the specified queue.
3321 */
3322static int
3323mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3324{
3325#define	EAGLE_TXD_STATUS_MCAST \
3326	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3327	struct ieee80211com *ic = &sc->sc_ic;
3328	struct mwl_txbuf *bf;
3329	struct mwl_txdesc *ds;
3330	struct ieee80211_node *ni;
3331	struct mwl_node *an;
3332	int nreaped;
3333	uint32_t status;
3334
3335	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3336	for (nreaped = 0;; nreaped++) {
3337		MWL_TXQ_LOCK(txq);
3338		bf = STAILQ_FIRST(&txq->active);
3339		if (bf == NULL) {
3340			MWL_TXQ_UNLOCK(txq);
3341			break;
3342		}
3343		ds = bf->bf_desc;
3344		MWL_TXDESC_SYNC(txq, ds,
3345		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3346		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3347			MWL_TXQ_UNLOCK(txq);
3348			break;
3349		}
3350		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3351		MWL_TXQ_UNLOCK(txq);
3352
3353#ifdef MWL_DEBUG
3354		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3355			mwl_printtxbuf(bf, txq->qnum, nreaped);
3356#endif
3357		ni = bf->bf_node;
3358		if (ni != NULL) {
3359			an = MWL_NODE(ni);
3360			status = le32toh(ds->Status);
3361			if (status & EAGLE_TXD_STATUS_OK) {
3362				uint16_t Format = le16toh(ds->Format);
3363				uint8_t txant = _IEEE80211_MASKSHIFT(Format,
3364				    EAGLE_TXD_ANTENNA);
3365
3366				sc->sc_stats.mst_ant_tx[txant]++;
3367				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3368					sc->sc_stats.mst_tx_retries++;
3369				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3370					sc->sc_stats.mst_tx_mretries++;
3371				if (txq->qnum >= MWL_WME_AC_VO)
3372					ic->ic_wme.wme_hipri_traffic++;
3373				ni->ni_txrate = _IEEE80211_MASKSHIFT(Format,
3374				    EAGLE_TXD_RATE);
3375				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3376					ni->ni_txrate = mwl_cvtlegacyrix(
3377					    ni->ni_txrate);
3378				} else
3379					ni->ni_txrate |= IEEE80211_RATE_MCS;
3380				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3381			} else {
3382				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3383					sc->sc_stats.mst_tx_linkerror++;
3384				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3385					sc->sc_stats.mst_tx_xretries++;
3386				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3387					sc->sc_stats.mst_tx_aging++;
3388				if (bf->bf_m->m_flags & M_FF)
3389					sc->sc_stats.mst_ff_txerr++;
3390			}
3391			if (bf->bf_m->m_flags & M_TXCB)
3392				/* XXX strip fw len in case header inspected */
3393				m_adj(bf->bf_m, sizeof(uint16_t));
3394			ieee80211_tx_complete(ni, bf->bf_m,
3395			    (status & EAGLE_TXD_STATUS_OK) == 0);
3396		} else
3397			m_freem(bf->bf_m);
3398		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3399
3400		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3401		    BUS_DMASYNC_POSTWRITE);
3402		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3403
3404		mwl_puttxbuf_tail(txq, bf);
3405	}
3406	return nreaped;
3407#undef EAGLE_TXD_STATUS_MCAST
3408}
3409
3410/*
3411 * Deferred processing of transmit interrupt; special-cased
3412 * for four hardware queues, 0-3.
3413 */
3414static void
3415mwl_tx_proc(void *arg, int npending)
3416{
3417	struct mwl_softc *sc = arg;
3418	int nreaped;
3419
3420	/*
3421	 * Process each active queue.
3422	 */
3423	nreaped = 0;
3424	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3425		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3426	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3427		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3428	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3429		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3430	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3431		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3432
3433	if (nreaped != 0) {
3434		sc->sc_tx_timer = 0;
3435		if (mbufq_first(&sc->sc_snd) != NULL) {
3436			/* NB: kick fw; the tx thread may have been preempted */
3437			mwl_hal_txstart(sc->sc_mh, 0);
3438			mwl_start(sc);
3439		}
3440	}
3441}
3442
3443static void
3444mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3445{
3446	struct ieee80211_node *ni;
3447	struct mwl_txbuf *bf;
3448	u_int ix;
3449
3450	/*
3451	 * NB: this assumes output has been stopped and
3452	 *     we do not need to block mwl_tx_tasklet
3453	 */
3454	for (ix = 0;; ix++) {
3455		MWL_TXQ_LOCK(txq);
3456		bf = STAILQ_FIRST(&txq->active);
3457		if (bf == NULL) {
3458			MWL_TXQ_UNLOCK(txq);
3459			break;
3460		}
3461		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3462		MWL_TXQ_UNLOCK(txq);
3463#ifdef MWL_DEBUG
3464		if (sc->sc_debug & MWL_DEBUG_RESET) {
3465			struct ieee80211com *ic = &sc->sc_ic;
3466			const struct mwltxrec *tr =
3467			    mtod(bf->bf_m, const struct mwltxrec *);
3468			mwl_printtxbuf(bf, txq->qnum, ix);
3469			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3470				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3471		}
3472#endif /* MWL_DEBUG */
3473		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3474		ni = bf->bf_node;
3475		if (ni != NULL) {
3476			/*
3477			 * Reclaim node reference.
3478			 */
3479			ieee80211_free_node(ni);
3480		}
3481		m_freem(bf->bf_m);
3482
3483		mwl_puttxbuf_tail(txq, bf);
3484	}
3485}
3486
3487/*
3488 * Drain the transmit queues and reclaim resources.
3489 */
3490static void
3491mwl_draintxq(struct mwl_softc *sc)
3492{
3493	int i;
3494
3495	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3496		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3497	sc->sc_tx_timer = 0;
3498}
3499
3500#ifdef MWL_DIAGAPI
3501/*
3502 * Reset the transmit queues to a pristine state after a fw download.
3503 */
3504static void
3505mwl_resettxq(struct mwl_softc *sc)
3506{
3507	int i;
3508
3509	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3510		mwl_txq_reset(sc, &sc->sc_txq[i]);
3511}
3512#endif /* MWL_DIAGAPI */
3513
3514/*
3515 * Clear the transmit queues of any frames submitted for the
3516 * specified vap.  This is done when the vap is deleted so we
3517 * don't potentially reference the vap after it is gone.
3518 * Note we cannot remove the frames; we only reclaim the node
3519 * reference.
3520 */
3521static void
3522mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3523{
3524	struct mwl_txq *txq;
3525	struct mwl_txbuf *bf;
3526	int i;
3527
3528	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3529		txq = &sc->sc_txq[i];
3530		MWL_TXQ_LOCK(txq);
3531		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3532			struct ieee80211_node *ni = bf->bf_node;
3533			if (ni != NULL && ni->ni_vap == vap) {
3534				bf->bf_node = NULL;
3535				ieee80211_free_node(ni);
3536			}
3537		}
3538		MWL_TXQ_UNLOCK(txq);
3539	}
3540}
3541
3542static int
3543mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3544	const uint8_t *frm, const uint8_t *efrm)
3545{
3546	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3547	const struct ieee80211_action *ia;
3548
3549	ia = (const struct ieee80211_action *) frm;
3550	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3551	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3552		const struct ieee80211_action_ht_mimopowersave *mps =
3553		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3554
3555		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3556		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3557		    _IEEE80211_MASKSHIFT(mps->am_control,
3558			IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3559		return 0;
3560	} else
3561		return sc->sc_recv_action(ni, wh, frm, efrm);
3562}
3563
3564static int
3565mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3566	int dialogtoken, int baparamset, int batimeout)
3567{
3568	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3569	struct ieee80211vap *vap = ni->ni_vap;
3570	struct mwl_node *mn = MWL_NODE(ni);
3571	struct mwl_bastate *bas;
3572
3573	bas = tap->txa_private;
3574	if (bas == NULL) {
3575		const MWL_HAL_BASTREAM *sp;
3576		/*
3577		 * Check for a free BA stream slot.
3578		 */
3579#if MWL_MAXBA > 3
3580		if (mn->mn_ba[3].bastream == NULL)
3581			bas = &mn->mn_ba[3];
3582		else
3583#endif
3584#if MWL_MAXBA > 2
3585		if (mn->mn_ba[2].bastream == NULL)
3586			bas = &mn->mn_ba[2];
3587		else
3588#endif
3589#if MWL_MAXBA > 1
3590		if (mn->mn_ba[1].bastream == NULL)
3591			bas = &mn->mn_ba[1];
3592		else
3593#endif
3594#if MWL_MAXBA > 0
3595		if (mn->mn_ba[0].bastream == NULL)
3596			bas = &mn->mn_ba[0];
3597		else
3598#endif
3599		{
3600			/* sta already has max BA streams */
3601			/* XXX assign BA stream to highest priority tid */
3602			DPRINTF(sc, MWL_DEBUG_AMPDU,
3603			    "%s: already has max bastreams\n", __func__);
3604			sc->sc_stats.mst_ampdu_reject++;
3605			return 0;
3606		}
3607		/* NB: no held reference to ni */
3608		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3609		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3610		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3611		    ni, tap);
3612		if (sp == NULL) {
3613			/*
3614			 * No available stream, return 0 so no
3615			 * a-mpdu aggregation will be done.
3616			 */
3617			DPRINTF(sc, MWL_DEBUG_AMPDU,
3618			    "%s: no bastream available\n", __func__);
3619			sc->sc_stats.mst_ampdu_nostream++;
3620			return 0;
3621		}
3622		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3623		    __func__, sp);
3624		/* NB: qos is left zero so we won't match in mwl_tx_start */
3625		bas->bastream = sp;
3626		tap->txa_private = bas;
3627	}
3628	/* fetch current seq# from the firmware; if available */
3629	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3630	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3631	    &tap->txa_start) != 0)
3632		tap->txa_start = 0;
3633	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3634}
3635
3636static int
3637mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3638	int code, int baparamset, int batimeout)
3639{
3640	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3641	struct mwl_bastate *bas;
3642
3643	bas = tap->txa_private;
3644	if (bas == NULL) {
3645		/* XXX should not happen */
3646		DPRINTF(sc, MWL_DEBUG_AMPDU,
3647		    "%s: no BA stream allocated, TID %d\n",
3648		    __func__, tap->txa_tid);
3649		sc->sc_stats.mst_addba_nostream++;
3650		return 0;
3651	}
3652	if (code == IEEE80211_STATUS_SUCCESS) {
3653		struct ieee80211vap *vap = ni->ni_vap;
3654		int bufsiz, error;
3655
3656		/*
3657		 * Tell the firmware to setup the BA stream;
3658		 * we know resources are available because we
3659		 * pre-allocated one before forming the request.
3660		 */
3661		bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ);
3662		if (bufsiz == 0)
3663			bufsiz = IEEE80211_AGGR_BAWMAX;
3664		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3665		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3666		if (error != 0) {
3667			/*
3668			 * Setup failed, return immediately so no a-mpdu
3669			 * aggregation will be done.
3670			 */
3671			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3672			mwl_bastream_free(bas);
3673			tap->txa_private = NULL;
3674
3675			DPRINTF(sc, MWL_DEBUG_AMPDU,
3676			    "%s: create failed, error %d, bufsiz %d TID %d "
3677			    "htparam 0x%x\n", __func__, error, bufsiz,
3678			    tap->txa_tid, ni->ni_htparam);
3679			sc->sc_stats.mst_bacreate_failed++;
3680			return 0;
3681		}
3682		/* NB: cache txq to avoid ptr indirect */
3683		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3684		DPRINTF(sc, MWL_DEBUG_AMPDU,
3685		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3686		    "htparam 0x%x\n", __func__, bas->bastream,
3687		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3688	} else {
3689		/*
3690		 * Other side NAK'd us; return the resources.
3691		 */
3692		DPRINTF(sc, MWL_DEBUG_AMPDU,
3693		    "%s: request failed with code %d, destroy bastream %p\n",
3694		    __func__, code, bas->bastream);
3695		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3696		mwl_bastream_free(bas);
3697		tap->txa_private = NULL;
3698	}
3699	/* NB: firmware sends BAR so we don't need to */
3700	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3701}
3702
3703static void
3704mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3705{
3706	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3707	struct mwl_bastate *bas;
3708
3709	bas = tap->txa_private;
3710	if (bas != NULL) {
3711		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3712		    __func__, bas->bastream);
3713		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3714		mwl_bastream_free(bas);
3715		tap->txa_private = NULL;
3716	}
3717	sc->sc_addba_stop(ni, tap);
3718}
3719
3720/*
3721 * Setup the rx data structures.  This should only be
3722 * done once or we may get out of sync with the firmware.
3723 */
3724static int
3725mwl_startrecv(struct mwl_softc *sc)
3726{
3727	if (!sc->sc_recvsetup) {
3728		struct mwl_rxbuf *bf, *prev;
3729		struct mwl_rxdesc *ds;
3730
3731		prev = NULL;
3732		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3733			int error = mwl_rxbuf_init(sc, bf);
3734			if (error != 0) {
3735				DPRINTF(sc, MWL_DEBUG_RECV,
3736					"%s: mwl_rxbuf_init failed %d\n",
3737					__func__, error);
3738				return error;
3739			}
3740			if (prev != NULL) {
3741				ds = prev->bf_desc;
3742				ds->pPhysNext = htole32(bf->bf_daddr);
3743			}
3744			prev = bf;
3745		}
3746		if (prev != NULL) {
3747			ds = prev->bf_desc;
3748			ds->pPhysNext =
3749			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3750		}
3751		sc->sc_recvsetup = 1;
3752	}
3753	mwl_mode_init(sc);		/* set filters, etc. */
3754	return 0;
3755}
3756
3757static MWL_HAL_APMODE
3758mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3759{
3760	MWL_HAL_APMODE mode;
3761
3762	if (IEEE80211_IS_CHAN_HT(chan)) {
3763		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3764			mode = AP_MODE_N_ONLY;
3765		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3766			mode = AP_MODE_AandN;
3767		else if (vap->iv_flags & IEEE80211_F_PUREG)
3768			mode = AP_MODE_GandN;
3769		else
3770			mode = AP_MODE_BandGandN;
3771	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3772		if (vap->iv_flags & IEEE80211_F_PUREG)
3773			mode = AP_MODE_G_ONLY;
3774		else
3775			mode = AP_MODE_MIXED;
3776	} else if (IEEE80211_IS_CHAN_B(chan))
3777		mode = AP_MODE_B_ONLY;
3778	else if (IEEE80211_IS_CHAN_A(chan))
3779		mode = AP_MODE_A_ONLY;
3780	else
3781		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3782	return mode;
3783}
3784
3785static int
3786mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3787{
3788	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3789	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3790}
3791
3792/*
3793 * Set/change channels.
3794 */
3795static int
3796mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3797{
3798	struct mwl_hal *mh = sc->sc_mh;
3799	struct ieee80211com *ic = &sc->sc_ic;
3800	MWL_HAL_CHANNEL hchan;
3801	int maxtxpow;
3802
3803	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3804	    __func__, chan->ic_freq, chan->ic_flags);
3805
3806	/*
3807	 * Convert to a HAL channel description with
3808	 * the flags constrained to reflect the current
3809	 * operating mode.
3810	 */
3811	mwl_mapchan(&hchan, chan);
3812	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3813#if 0
3814	mwl_draintxq(sc);		/* clear pending tx frames */
3815#endif
3816	mwl_hal_setchannel(mh, &hchan);
3817	/*
3818	 * Tx power is cap'd by the regulatory setting and
3819	 * possibly a user-set limit.  We pass the min of
3820	 * these to the hal to apply them to the cal data
3821	 * for this channel.
3822	 * XXX min bound?
3823	 */
3824	maxtxpow = 2*chan->ic_maxregpower;
3825	if (maxtxpow > ic->ic_txpowlimit)
3826		maxtxpow = ic->ic_txpowlimit;
3827	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3828	/* NB: potentially change mcast/mgt rates */
3829	mwl_setcurchanrates(sc);
3830
3831	/*
3832	 * Update internal state.
3833	 */
3834	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3835	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3836	if (IEEE80211_IS_CHAN_A(chan)) {
3837		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3838		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3839	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3840		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3841		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3842	} else {
3843		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3844		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3845	}
3846	sc->sc_curchan = hchan;
3847	mwl_hal_intrset(mh, sc->sc_imask);
3848
3849	return 0;
3850}
3851
3852static void
3853mwl_scan_start(struct ieee80211com *ic)
3854{
3855	struct mwl_softc *sc = ic->ic_softc;
3856
3857	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3858}
3859
3860static void
3861mwl_scan_end(struct ieee80211com *ic)
3862{
3863	struct mwl_softc *sc = ic->ic_softc;
3864
3865	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3866}
3867
3868static void
3869mwl_set_channel(struct ieee80211com *ic)
3870{
3871	struct mwl_softc *sc = ic->ic_softc;
3872
3873	(void) mwl_chan_set(sc, ic->ic_curchan);
3874}
3875
3876/*
3877 * Handle a channel switch request.  We inform the firmware
3878 * and mark the global state to suppress various actions.
3879 * NB: we issue only one request to the fw; we may be called
3880 * multiple times if there are multiple vap's.
3881 */
3882static void
3883mwl_startcsa(struct ieee80211vap *vap)
3884{
3885	struct ieee80211com *ic = vap->iv_ic;
3886	struct mwl_softc *sc = ic->ic_softc;
3887	MWL_HAL_CHANNEL hchan;
3888
3889	if (sc->sc_csapending)
3890		return;
3891
3892	mwl_mapchan(&hchan, ic->ic_csa_newchan);
3893	/* 1 =>'s quiet channel */
3894	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3895	sc->sc_csapending = 1;
3896}
3897
3898/*
3899 * Plumb any static WEP key for the station.  This is
3900 * necessary as we must propagate the key from the
3901 * global key table of the vap to each sta db entry.
3902 */
3903static void
3904mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3905{
3906	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3907		IEEE80211_F_PRIVACY &&
3908	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3909	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3910		(void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3911				    mac);
3912}
3913
3914static int
3915mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3916{
3917#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
3918	struct ieee80211vap *vap = ni->ni_vap;
3919	struct mwl_hal_vap *hvap;
3920	int error;
3921
3922	if (vap->iv_opmode == IEEE80211_M_WDS) {
3923		/*
3924		 * WDS vap's do not have a f/w vap; instead they piggyback
3925		 * on an AP vap and we must install the sta db entry and
3926		 * crypto state using that AP's handle (the WDS vap has none).
3927		 */
3928		hvap = MWL_VAP(vap)->mv_ap_hvap;
3929	} else
3930		hvap = MWL_VAP(vap)->mv_hvap;
3931	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3932	    aid, staid, pi,
3933	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3934	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3935	if (error == 0) {
3936		/*
3937		 * Setup security for this station.  For sta mode this is
3938		 * needed even though do the same thing on transition to
3939		 * AUTH state because the call to mwl_hal_newstation
3940		 * clobbers the crypto state we setup.
3941		 */
3942		mwl_setanywepkey(vap, ni->ni_macaddr);
3943	}
3944	return error;
3945#undef WME
3946}
3947
3948static void
3949mwl_setglobalkeys(struct ieee80211vap *vap)
3950{
3951	struct ieee80211_key *wk;
3952
3953	wk = &vap->iv_nw_keys[0];
3954	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3955		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3956			(void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3957}
3958
3959/*
3960 * Convert a legacy rate set to a firmware bitmask.
3961 */
3962static uint32_t
3963get_rate_bitmap(const struct ieee80211_rateset *rs)
3964{
3965	uint32_t rates;
3966	int i;
3967
3968	rates = 0;
3969	for (i = 0; i < rs->rs_nrates; i++)
3970		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3971		case 2:	  rates |= 0x001; break;
3972		case 4:	  rates |= 0x002; break;
3973		case 11:  rates |= 0x004; break;
3974		case 22:  rates |= 0x008; break;
3975		case 44:  rates |= 0x010; break;
3976		case 12:  rates |= 0x020; break;
3977		case 18:  rates |= 0x040; break;
3978		case 24:  rates |= 0x080; break;
3979		case 36:  rates |= 0x100; break;
3980		case 48:  rates |= 0x200; break;
3981		case 72:  rates |= 0x400; break;
3982		case 96:  rates |= 0x800; break;
3983		case 108: rates |= 0x1000; break;
3984		}
3985	return rates;
3986}
3987
3988/*
3989 * Construct an HT firmware bitmask from an HT rate set.
3990 */
3991static uint32_t
3992get_htrate_bitmap(const struct ieee80211_htrateset *rs)
3993{
3994	uint32_t rates;
3995	int i;
3996
3997	rates = 0;
3998	for (i = 0; i < rs->rs_nrates; i++) {
3999		if (rs->rs_rates[i] < 16)
4000			rates |= 1<<rs->rs_rates[i];
4001	}
4002	return rates;
4003}
4004
4005/*
4006 * Craft station database entry for station.
4007 * NB: use host byte order here, the hal handles byte swapping.
4008 */
4009static MWL_HAL_PEERINFO *
4010mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4011{
4012	const struct ieee80211vap *vap = ni->ni_vap;
4013
4014	memset(pi, 0, sizeof(*pi));
4015	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4016	pi->CapInfo = ni->ni_capinfo;
4017	if (ni->ni_flags & IEEE80211_NODE_HT) {
4018		/* HT capabilities, etc */
4019		pi->HTCapabilitiesInfo = ni->ni_htcap;
4020		/* XXX pi.HTCapabilitiesInfo */
4021	        pi->MacHTParamInfo = ni->ni_htparam;
4022		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4023		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4024		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4025		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4026		pi->AddHtInfo.stbc = ni->ni_htstbc;
4027
4028		/* constrain according to local configuration */
4029		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4030			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4031		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4032			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4033		if (ni->ni_chw != 40)
4034			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4035	}
4036	return pi;
4037}
4038
4039/*
4040 * Re-create the local sta db entry for a vap to ensure
4041 * up to date WME state is pushed to the firmware.  Because
4042 * this resets crypto state this must be followed by a
4043 * reload of any keys in the global key table.
4044 */
4045static int
4046mwl_localstadb(struct ieee80211vap *vap)
4047{
4048#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4049	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4050	struct ieee80211_node *bss;
4051	MWL_HAL_PEERINFO pi;
4052	int error;
4053
4054	switch (vap->iv_opmode) {
4055	case IEEE80211_M_STA:
4056		bss = vap->iv_bss;
4057		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4058		    vap->iv_state == IEEE80211_S_RUN ?
4059			mkpeerinfo(&pi, bss) : NULL,
4060		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4061		    bss->ni_ies.wme_ie != NULL ?
4062			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4063		if (error == 0)
4064			mwl_setglobalkeys(vap);
4065		break;
4066	case IEEE80211_M_HOSTAP:
4067	case IEEE80211_M_MBSS:
4068		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4069		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4070		if (error == 0)
4071			mwl_setglobalkeys(vap);
4072		break;
4073	default:
4074		error = 0;
4075		break;
4076	}
4077	return error;
4078#undef WME
4079}
4080
4081static int
4082mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4083{
4084	struct mwl_vap *mvp = MWL_VAP(vap);
4085	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4086	struct ieee80211com *ic = vap->iv_ic;
4087	struct ieee80211_node *ni = NULL;
4088	struct mwl_softc *sc = ic->ic_softc;
4089	struct mwl_hal *mh = sc->sc_mh;
4090	enum ieee80211_state ostate = vap->iv_state;
4091	int error;
4092
4093	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4094	    vap->iv_ifp->if_xname, __func__,
4095	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4096
4097	callout_stop(&sc->sc_timer);
4098	/*
4099	 * Clear current radar detection state.
4100	 */
4101	if (ostate == IEEE80211_S_CAC) {
4102		/* stop quiet mode radar detection */
4103		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4104	} else if (sc->sc_radarena) {
4105		/* stop in-service radar detection */
4106		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4107		sc->sc_radarena = 0;
4108	}
4109	/*
4110	 * Carry out per-state actions before doing net80211 work.
4111	 */
4112	if (nstate == IEEE80211_S_INIT) {
4113		/* NB: only ap+sta vap's have a fw entity */
4114		if (hvap != NULL)
4115			mwl_hal_stop(hvap);
4116	} else if (nstate == IEEE80211_S_SCAN) {
4117		mwl_hal_start(hvap);
4118		/* NB: this disables beacon frames */
4119		mwl_hal_setinframode(hvap);
4120	} else if (nstate == IEEE80211_S_AUTH) {
4121		/*
4122		 * Must create a sta db entry in case a WEP key needs to
4123		 * be plumbed.  This entry will be overwritten if we
4124		 * associate; otherwise it will be reclaimed on node free.
4125		 */
4126		ni = vap->iv_bss;
4127		MWL_NODE(ni)->mn_hvap = hvap;
4128		(void) mwl_peerstadb(ni, 0, 0, NULL);
4129	} else if (nstate == IEEE80211_S_CSA) {
4130		/* XXX move to below? */
4131		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4132		    vap->iv_opmode == IEEE80211_M_MBSS)
4133			mwl_startcsa(vap);
4134	} else if (nstate == IEEE80211_S_CAC) {
4135		/* XXX move to below? */
4136		/* stop ap xmit and enable quiet mode radar detection */
4137		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4138	}
4139
4140	/*
4141	 * Invoke the parent method to do net80211 work.
4142	 */
4143	error = mvp->mv_newstate(vap, nstate, arg);
4144
4145	/*
4146	 * Carry out work that must be done after net80211 runs;
4147	 * this work requires up to date state (e.g. iv_bss).
4148	 */
4149	if (error == 0 && nstate == IEEE80211_S_RUN) {
4150		/* NB: collect bss node again, it may have changed */
4151		ni = vap->iv_bss;
4152
4153		DPRINTF(sc, MWL_DEBUG_STATE,
4154		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4155		    "capinfo 0x%04x chan %d\n",
4156		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4157		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4158		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4159
4160		/*
4161		 * Recreate local sta db entry to update WME/HT state.
4162		 */
4163		mwl_localstadb(vap);
4164		switch (vap->iv_opmode) {
4165		case IEEE80211_M_HOSTAP:
4166		case IEEE80211_M_MBSS:
4167			if (ostate == IEEE80211_S_CAC) {
4168				/* enable in-service radar detection */
4169				mwl_hal_setradardetection(mh,
4170				    DR_IN_SERVICE_MONITOR_START);
4171				sc->sc_radarena = 1;
4172			}
4173			/*
4174			 * Allocate and setup the beacon frame
4175			 * (and related state).
4176			 */
4177			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4178			if (error != 0) {
4179				DPRINTF(sc, MWL_DEBUG_STATE,
4180				    "%s: beacon setup failed, error %d\n",
4181				    __func__, error);
4182				goto bad;
4183			}
4184			/* NB: must be after setting up beacon */
4185			mwl_hal_start(hvap);
4186			break;
4187		case IEEE80211_M_STA:
4188			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4189			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4190			/*
4191			 * Set state now that we're associated.
4192			 */
4193			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4194			mwl_setrates(vap);
4195			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4196			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4197			    sc->sc_ndwdsvaps++ == 0)
4198				mwl_hal_setdwds(mh, 1);
4199			break;
4200		case IEEE80211_M_WDS:
4201			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4202			    vap->iv_ifp->if_xname, __func__,
4203			    ether_sprintf(ni->ni_bssid));
4204			mwl_seteapolformat(vap);
4205			break;
4206		default:
4207			break;
4208		}
4209		/*
4210		 * Set CS mode according to operating channel;
4211		 * this mostly an optimization for 5GHz.
4212		 *
4213		 * NB: must follow mwl_hal_start which resets csmode
4214		 */
4215		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4216			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4217		else
4218			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4219		/*
4220		 * Start timer to prod firmware.
4221		 */
4222		if (sc->sc_ageinterval != 0)
4223			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4224			    mwl_agestations, sc);
4225	} else if (nstate == IEEE80211_S_SLEEP) {
4226		/* XXX set chip in power save */
4227	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4228	    --sc->sc_ndwdsvaps == 0)
4229		mwl_hal_setdwds(mh, 0);
4230bad:
4231	return error;
4232}
4233
4234/*
4235 * Manage station id's; these are separate from AID's
4236 * as AID's may have values out of the range of possible
4237 * station id's acceptable to the firmware.
4238 */
4239static int
4240allocstaid(struct mwl_softc *sc, int aid)
4241{
4242	int staid;
4243
4244	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4245		/* NB: don't use 0 */
4246		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4247			if (isclr(sc->sc_staid, staid))
4248				break;
4249	} else
4250		staid = aid;
4251	setbit(sc->sc_staid, staid);
4252	return staid;
4253}
4254
4255static void
4256delstaid(struct mwl_softc *sc, int staid)
4257{
4258	clrbit(sc->sc_staid, staid);
4259}
4260
4261/*
4262 * Setup driver-specific state for a newly associated node.
4263 * Note that we're called also on a re-associate, the isnew
4264 * param tells us if this is the first time or not.
4265 */
4266static void
4267mwl_newassoc(struct ieee80211_node *ni, int isnew)
4268{
4269	struct ieee80211vap *vap = ni->ni_vap;
4270        struct mwl_softc *sc = vap->iv_ic->ic_softc;
4271	struct mwl_node *mn = MWL_NODE(ni);
4272	MWL_HAL_PEERINFO pi;
4273	uint16_t aid;
4274	int error;
4275
4276	aid = IEEE80211_AID(ni->ni_associd);
4277	if (isnew) {
4278		mn->mn_staid = allocstaid(sc, aid);
4279		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4280	} else {
4281		mn = MWL_NODE(ni);
4282		/* XXX reset BA stream? */
4283	}
4284	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4285	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4286	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4287	if (error != 0) {
4288		DPRINTF(sc, MWL_DEBUG_NODE,
4289		    "%s: error %d creating sta db entry\n",
4290		    __func__, error);
4291		/* XXX how to deal with error? */
4292	}
4293}
4294
4295/*
4296 * Periodically poke the firmware to age out station state
4297 * (power save queues, pending tx aggregates).
4298 */
4299static void
4300mwl_agestations(void *arg)
4301{
4302	struct mwl_softc *sc = arg;
4303
4304	mwl_hal_setkeepalive(sc->sc_mh);
4305	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4306		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4307}
4308
4309static const struct mwl_hal_channel *
4310findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4311{
4312	int i;
4313
4314	for (i = 0; i < ci->nchannels; i++) {
4315		const struct mwl_hal_channel *hc = &ci->channels[i];
4316		if (hc->ieee == ieee)
4317			return hc;
4318	}
4319	return NULL;
4320}
4321
4322static int
4323mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4324	int nchan, struct ieee80211_channel chans[])
4325{
4326	struct mwl_softc *sc = ic->ic_softc;
4327	struct mwl_hal *mh = sc->sc_mh;
4328	const MWL_HAL_CHANNELINFO *ci;
4329	int i;
4330
4331	for (i = 0; i < nchan; i++) {
4332		struct ieee80211_channel *c = &chans[i];
4333		const struct mwl_hal_channel *hc;
4334
4335		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4336			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4337			    IEEE80211_IS_CHAN_HT40(c) ?
4338				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4339		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4340			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4341			    IEEE80211_IS_CHAN_HT40(c) ?
4342				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4343		} else {
4344			device_printf(sc->sc_dev,
4345			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4346			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4347			return EINVAL;
4348		}
4349		/*
4350		 * Verify channel has cal data and cap tx power.
4351		 */
4352		hc = findhalchannel(ci, c->ic_ieee);
4353		if (hc != NULL) {
4354			if (c->ic_maxpower > 2*hc->maxTxPow)
4355				c->ic_maxpower = 2*hc->maxTxPow;
4356			goto next;
4357		}
4358		if (IEEE80211_IS_CHAN_HT40(c)) {
4359			/*
4360			 * Look for the extension channel since the
4361			 * hal table only has the primary channel.
4362			 */
4363			hc = findhalchannel(ci, c->ic_extieee);
4364			if (hc != NULL) {
4365				if (c->ic_maxpower > 2*hc->maxTxPow)
4366					c->ic_maxpower = 2*hc->maxTxPow;
4367				goto next;
4368			}
4369		}
4370		device_printf(sc->sc_dev,
4371		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4372		    __func__, c->ic_ieee, c->ic_extieee,
4373		    c->ic_freq, c->ic_flags);
4374		return EINVAL;
4375	next:
4376		;
4377	}
4378	return 0;
4379}
4380
4381#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4382#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4383
4384static void
4385addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4386	const MWL_HAL_CHANNELINFO *ci, int flags)
4387{
4388	int i, error;
4389
4390	for (i = 0; i < ci->nchannels; i++) {
4391		const struct mwl_hal_channel *hc = &ci->channels[i];
4392
4393		error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
4394		    hc->ieee, hc->maxTxPow, flags);
4395		if (error != 0 && error != ENOENT)
4396			break;
4397	}
4398}
4399
4400static void
4401addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4402	const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
4403{
4404	int i, error;
4405
4406	error = 0;
4407	for (i = 0; i < ci->nchannels && error == 0; i++) {
4408		const struct mwl_hal_channel *hc = &ci->channels[i];
4409
4410		error = ieee80211_add_channel(chans, maxchans, nchans,
4411		    hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
4412	}
4413}
4414
4415static void
4416getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4417	struct ieee80211_channel chans[])
4418{
4419	const MWL_HAL_CHANNELINFO *ci;
4420	uint8_t bands[IEEE80211_MODE_BYTES];
4421
4422	/*
4423	 * Use the channel info from the hal to craft the
4424	 * channel list.  Note that we pass back an unsorted
4425	 * list; the caller is required to sort it for us
4426	 * (if desired).
4427	 */
4428	*nchans = 0;
4429	if (mwl_hal_getchannelinfo(sc->sc_mh,
4430	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4431		memset(bands, 0, sizeof(bands));
4432		setbit(bands, IEEE80211_MODE_11B);
4433		setbit(bands, IEEE80211_MODE_11G);
4434		setbit(bands, IEEE80211_MODE_11NG);
4435		addchannels(chans, maxchans, nchans, ci, bands);
4436	}
4437	if (mwl_hal_getchannelinfo(sc->sc_mh,
4438	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4439		memset(bands, 0, sizeof(bands));
4440		setbit(bands, IEEE80211_MODE_11A);
4441		setbit(bands, IEEE80211_MODE_11NA);
4442		addchannels(chans, maxchans, nchans, ci, bands);
4443	}
4444	if (mwl_hal_getchannelinfo(sc->sc_mh,
4445	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4446		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4447	if (mwl_hal_getchannelinfo(sc->sc_mh,
4448	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4449		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4450}
4451
4452static void
4453mwl_getradiocaps(struct ieee80211com *ic,
4454	int maxchans, int *nchans, struct ieee80211_channel chans[])
4455{
4456	struct mwl_softc *sc = ic->ic_softc;
4457
4458	getchannels(sc, maxchans, nchans, chans);
4459}
4460
4461static int
4462mwl_getchannels(struct mwl_softc *sc)
4463{
4464	struct ieee80211com *ic = &sc->sc_ic;
4465
4466	/*
4467	 * Use the channel info from the hal to craft the
4468	 * channel list for net80211.  Note that we pass up
4469	 * an unsorted list; net80211 will sort it for us.
4470	 */
4471	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4472	ic->ic_nchans = 0;
4473	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4474
4475	ic->ic_regdomain.regdomain = SKU_DEBUG;
4476	ic->ic_regdomain.country = CTRY_DEFAULT;
4477	ic->ic_regdomain.location = 'I';
4478	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4479	ic->ic_regdomain.isocc[1] = ' ';
4480	return (ic->ic_nchans == 0 ? EIO : 0);
4481}
4482#undef IEEE80211_CHAN_HTA
4483#undef IEEE80211_CHAN_HTG
4484
4485#ifdef MWL_DEBUG
4486static void
4487mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4488{
4489	const struct mwl_rxdesc *ds = bf->bf_desc;
4490	uint32_t status = le32toh(ds->Status);
4491
4492	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4493	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4494	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4495	    le32toh(ds->pPhysBuffData), ds->RxControl,
4496	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4497	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4498	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4499	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4500}
4501
4502static void
4503mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4504{
4505	const struct mwl_txdesc *ds = bf->bf_desc;
4506	uint32_t status = le32toh(ds->Status);
4507
4508	printf("Q%u[%3u]", qnum, ix);
4509	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4510	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4511	    le32toh(ds->pPhysNext),
4512	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4513	    status & EAGLE_TXD_STATUS_USED ?
4514		"" : (status & 3) != 0 ? " *" : " !");
4515	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4516	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4517	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4518#if MWL_TXDESC > 1
4519	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4520	    , le32toh(ds->multiframes)
4521	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4522	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4523	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4524	);
4525	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4526	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4527	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4528	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4529	);
4530#endif
4531#if 0
4532{ const uint8_t *cp = (const uint8_t *) ds;
4533  int i;
4534  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4535	printf("%02x ", cp[i]);
4536	if (((i+1) % 16) == 0)
4537		printf("\n");
4538  }
4539  printf("\n");
4540}
4541#endif
4542}
4543#endif /* MWL_DEBUG */
4544
4545#if 0
4546static void
4547mwl_txq_dump(struct mwl_txq *txq)
4548{
4549	struct mwl_txbuf *bf;
4550	int i = 0;
4551
4552	MWL_TXQ_LOCK(txq);
4553	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4554		struct mwl_txdesc *ds = bf->bf_desc;
4555		MWL_TXDESC_SYNC(txq, ds,
4556		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4557#ifdef MWL_DEBUG
4558		mwl_printtxbuf(bf, txq->qnum, i);
4559#endif
4560		i++;
4561	}
4562	MWL_TXQ_UNLOCK(txq);
4563}
4564#endif
4565
4566static void
4567mwl_watchdog(void *arg)
4568{
4569	struct mwl_softc *sc = arg;
4570
4571	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4572	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4573		return;
4574
4575	if (sc->sc_running && !sc->sc_invalid) {
4576		if (mwl_hal_setkeepalive(sc->sc_mh))
4577			device_printf(sc->sc_dev,
4578			    "transmit timeout (firmware hung?)\n");
4579		else
4580			device_printf(sc->sc_dev,
4581			    "transmit timeout\n");
4582#if 0
4583		mwl_reset(sc);
4584mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4585#endif
4586		counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4587		sc->sc_stats.mst_watchdog++;
4588	}
4589}
4590
4591#ifdef MWL_DIAGAPI
4592/*
4593 * Diagnostic interface to the HAL.  This is used by various
4594 * tools to do things like retrieve register contents for
4595 * debugging.  The mechanism is intentionally opaque so that
4596 * it can change frequently w/o concern for compatibility.
4597 */
4598static int
4599mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4600{
4601	struct mwl_hal *mh = sc->sc_mh;
4602	u_int id = md->md_id & MWL_DIAG_ID;
4603	void *indata = NULL;
4604	void *outdata = NULL;
4605	u_int32_t insize = md->md_in_size;
4606	u_int32_t outsize = md->md_out_size;
4607	int error = 0;
4608
4609	if (md->md_id & MWL_DIAG_IN) {
4610		/*
4611		 * Copy in data.
4612		 */
4613		indata = malloc(insize, M_TEMP, M_NOWAIT);
4614		if (indata == NULL) {
4615			error = ENOMEM;
4616			goto bad;
4617		}
4618		error = copyin(md->md_in_data, indata, insize);
4619		if (error)
4620			goto bad;
4621	}
4622	if (md->md_id & MWL_DIAG_DYN) {
4623		/*
4624		 * Allocate a buffer for the results (otherwise the HAL
4625		 * returns a pointer to a buffer where we can read the
4626		 * results).  Note that we depend on the HAL leaving this
4627		 * pointer for us to use below in reclaiming the buffer;
4628		 * may want to be more defensive.
4629		 */
4630		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4631		if (outdata == NULL) {
4632			error = ENOMEM;
4633			goto bad;
4634		}
4635	}
4636	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4637		if (outsize < md->md_out_size)
4638			md->md_out_size = outsize;
4639		if (outdata != NULL)
4640			error = copyout(outdata, md->md_out_data,
4641					md->md_out_size);
4642	} else {
4643		error = EINVAL;
4644	}
4645bad:
4646	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4647		free(indata, M_TEMP);
4648	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4649		free(outdata, M_TEMP);
4650	return error;
4651}
4652
4653static int
4654mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4655{
4656	struct mwl_hal *mh = sc->sc_mh;
4657	int error;
4658
4659	MWL_LOCK_ASSERT(sc);
4660
4661	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4662		device_printf(sc->sc_dev, "unable to load firmware\n");
4663		return EIO;
4664	}
4665	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4666		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4667		return EIO;
4668	}
4669	error = mwl_setupdma(sc);
4670	if (error != 0) {
4671		/* NB: mwl_setupdma prints a msg */
4672		return error;
4673	}
4674	/*
4675	 * Reset tx/rx data structures; after reload we must
4676	 * re-start the driver's notion of the next xmit/recv.
4677	 */
4678	mwl_draintxq(sc);		/* clear pending frames */
4679	mwl_resettxq(sc);		/* rebuild tx q lists */
4680	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4681	return 0;
4682}
4683#endif /* MWL_DIAGAPI */
4684
4685static void
4686mwl_parent(struct ieee80211com *ic)
4687{
4688	struct mwl_softc *sc = ic->ic_softc;
4689	int startall = 0;
4690
4691	MWL_LOCK(sc);
4692	if (ic->ic_nrunning > 0) {
4693		if (sc->sc_running) {
4694			/*
4695			 * To avoid rescanning another access point,
4696			 * do not call mwl_init() here.  Instead,
4697			 * only reflect promisc mode settings.
4698			 */
4699			mwl_mode_init(sc);
4700		} else {
4701			/*
4702			 * Beware of being called during attach/detach
4703			 * to reset promiscuous mode.  In that case we
4704			 * will still be marked UP but not RUNNING.
4705			 * However trying to re-init the interface
4706			 * is the wrong thing to do as we've already
4707			 * torn down much of our state.  There's
4708			 * probably a better way to deal with this.
4709			 */
4710			if (!sc->sc_invalid) {
4711				mwl_init(sc);	/* XXX lose error */
4712				startall = 1;
4713			}
4714		}
4715	} else
4716		mwl_stop(sc);
4717	MWL_UNLOCK(sc);
4718	if (startall)
4719		ieee80211_start_all(ic);
4720}
4721
4722static int
4723mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4724{
4725	struct mwl_softc *sc = ic->ic_softc;
4726	struct ifreq *ifr = data;
4727	int error = 0;
4728
4729	switch (cmd) {
4730	case SIOCGMVSTATS:
4731		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4732#if 0
4733		/* NB: embed these numbers to get a consistent view */
4734		sc->sc_stats.mst_tx_packets =
4735		    ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
4736		sc->sc_stats.mst_rx_packets =
4737		    ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
4738#endif
4739		/*
4740		 * NB: Drop the softc lock in case of a page fault;
4741		 * we'll accept any potential inconsisentcy in the
4742		 * statistics.  The alternative is to copy the data
4743		 * to a local structure.
4744		 */
4745		return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
4746		    sizeof (sc->sc_stats)));
4747#ifdef MWL_DIAGAPI
4748	case SIOCGMVDIAG:
4749		/* XXX check privs */
4750		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4751	case SIOCGMVRESET:
4752		/* XXX check privs */
4753		MWL_LOCK(sc);
4754		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4755		MWL_UNLOCK(sc);
4756		break;
4757#endif /* MWL_DIAGAPI */
4758	default:
4759		error = ENOTTY;
4760		break;
4761	}
4762	return (error);
4763}
4764
4765#ifdef	MWL_DEBUG
4766static int
4767mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4768{
4769	struct mwl_softc *sc = arg1;
4770	int debug, error;
4771
4772	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4773	error = sysctl_handle_int(oidp, &debug, 0, req);
4774	if (error || !req->newptr)
4775		return error;
4776	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4777	sc->sc_debug = debug & 0x00ffffff;
4778	return 0;
4779}
4780#endif /* MWL_DEBUG */
4781
4782static void
4783mwl_sysctlattach(struct mwl_softc *sc)
4784{
4785#ifdef	MWL_DEBUG
4786	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4787	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4788
4789	sc->sc_debug = mwl_debug;
4790	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4791		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4792		mwl_sysctl_debug, "I", "control debugging printfs");
4793#endif
4794}
4795
4796/*
4797 * Announce various information on device/driver attach.
4798 */
4799static void
4800mwl_announce(struct mwl_softc *sc)
4801{
4802
4803	device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4804		sc->sc_hwspecs.hwVersion,
4805		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4806		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4807		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4808		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4809		sc->sc_hwspecs.regionCode);
4810	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4811
4812	if (bootverbose) {
4813		int i;
4814		for (i = 0; i <= WME_AC_VO; i++) {
4815			struct mwl_txq *txq = sc->sc_ac2q[i];
4816			device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4817				txq->qnum, ieee80211_wme_acnames[i]);
4818		}
4819	}
4820	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4821		device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4822	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4823		device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4824	if (bootverbose || mwl_txbuf != MWL_TXBUF)
4825		device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4826	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4827		device_printf(sc->sc_dev, "multi-bss support\n");
4828#ifdef MWL_TX_NODROP
4829	if (bootverbose)
4830		device_printf(sc->sc_dev, "no tx drop\n");
4831#endif
4832}
4833