if_mwl.c revision 193240
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 193240 2009-06-01 18:07:01Z sam $");
33
34/*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38#include "opt_inet.h"
39#include "opt_mwl.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/sysctl.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/kernel.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/errno.h>
52#include <sys/callout.h>
53#include <sys/bus.h>
54#include <sys/endian.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57
58#include <machine/bus.h>
59
60#include <net/if.h>
61#include <net/if_dl.h>
62#include <net/if_media.h>
63#include <net/if_types.h>
64#include <net/if_arp.h>
65#include <net/ethernet.h>
66#include <net/if_llc.h>
67
68#include <net/bpf.h>
69
70#include <net80211/ieee80211_var.h>
71#include <net80211/ieee80211_regdomain.h>
72
73#ifdef INET
74#include <netinet/in.h>
75#include <netinet/if_ether.h>
76#endif /* INET */
77
78#include <dev/mwl/if_mwlvar.h>
79#include <dev/mwl/mwldiag.h>
80
81/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
82#define	MS(v,x)	(((v) & x) >> x##_S)
83#define	SM(v,x)	(((v) << x##_S) & x)
84
85static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
86		    const char name[IFNAMSIZ], int unit, int opmode,
87		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
88		    const uint8_t mac[IEEE80211_ADDR_LEN]);
89static void	mwl_vap_delete(struct ieee80211vap *);
90static int	mwl_setupdma(struct mwl_softc *);
91static int	mwl_hal_reset(struct mwl_softc *sc);
92static int	mwl_init_locked(struct mwl_softc *);
93static void	mwl_init(void *);
94static void	mwl_stop_locked(struct ifnet *, int);
95static int	mwl_reset(struct ieee80211vap *, u_long);
96static void	mwl_stop(struct ifnet *, int);
97static void	mwl_start(struct ifnet *);
98static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
99			const struct ieee80211_bpf_params *);
100static int	mwl_media_change(struct ifnet *);
101static void	mwl_watchdog(struct ifnet *);
102static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
103static void	mwl_radar_proc(void *, int);
104static void	mwl_chanswitch_proc(void *, int);
105static void	mwl_bawatchdog_proc(void *, int);
106static int	mwl_key_alloc(struct ieee80211vap *,
107			struct ieee80211_key *,
108			ieee80211_keyix *, ieee80211_keyix *);
109static int	mwl_key_delete(struct ieee80211vap *,
110			const struct ieee80211_key *);
111static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
112			const uint8_t mac[IEEE80211_ADDR_LEN]);
113static int	mwl_mode_init(struct mwl_softc *);
114static void	mwl_update_mcast(struct ifnet *);
115static void	mwl_update_promisc(struct ifnet *);
116static void	mwl_updateslot(struct ifnet *);
117static int	mwl_beacon_setup(struct ieee80211vap *);
118static void	mwl_beacon_update(struct ieee80211vap *, int);
119#ifdef MWL_HOST_PS_SUPPORT
120static void	mwl_update_ps(struct ieee80211vap *, int);
121static int	mwl_set_tim(struct ieee80211_node *, int);
122#endif
123static int	mwl_dma_setup(struct mwl_softc *);
124static void	mwl_dma_cleanup(struct mwl_softc *);
125static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
126		    const uint8_t [IEEE80211_ADDR_LEN]);
127static void	mwl_node_cleanup(struct ieee80211_node *);
128static void	mwl_node_drain(struct ieee80211_node *);
129static void	mwl_node_getsignal(const struct ieee80211_node *,
130			int8_t *, int8_t *);
131static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
132			struct ieee80211_mimo_info *);
133static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
134static void	mwl_rx_proc(void *, int);
135static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
136static int	mwl_tx_setup(struct mwl_softc *, int, int);
137static int	mwl_wme_update(struct ieee80211com *);
138static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
139static void	mwl_tx_cleanup(struct mwl_softc *);
140static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
141static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
142			     struct mwl_txbuf *, struct mbuf *);
143static void	mwl_tx_proc(void *, int);
144static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
145static void	mwl_draintxq(struct mwl_softc *);
146static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
147static void	mwl_recv_action(struct ieee80211_node *,
148			const uint8_t *, const uint8_t *);
149static int	mwl_addba_request(struct ieee80211_node *,
150			struct ieee80211_tx_ampdu *, int dialogtoken,
151			int baparamset, int batimeout);
152static int	mwl_addba_response(struct ieee80211_node *,
153			struct ieee80211_tx_ampdu *, int status,
154			int baparamset, int batimeout);
155static void	mwl_addba_stop(struct ieee80211_node *,
156			struct ieee80211_tx_ampdu *);
157static int	mwl_startrecv(struct mwl_softc *);
158static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
159			struct ieee80211_channel *);
160static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
161static void	mwl_scan_start(struct ieee80211com *);
162static void	mwl_scan_end(struct ieee80211com *);
163static void	mwl_set_channel(struct ieee80211com *);
164static int	mwl_peerstadb(struct ieee80211_node *,
165			int aid, int staid, MWL_HAL_PEERINFO *pi);
166static int	mwl_localstadb(struct ieee80211vap *);
167static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
168static int	allocstaid(struct mwl_softc *sc, int aid);
169static void	delstaid(struct mwl_softc *sc, int staid);
170static void	mwl_newassoc(struct ieee80211_node *, int);
171static void	mwl_agestations(void *);
172static int	mwl_setregdomain(struct ieee80211com *,
173			struct ieee80211_regdomain *, int,
174			struct ieee80211_channel []);
175static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
176			struct ieee80211_channel []);
177static int	mwl_getchannels(struct mwl_softc *);
178
179static void	mwl_sysctlattach(struct mwl_softc *);
180static void	mwl_announce(struct mwl_softc *);
181
182SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
183
184static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
185SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
186	    0, "rx descriptors allocated");
187static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
188SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
189	    0, "rx buffers allocated");
190TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
191static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
192SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
193	    0, "tx buffers allocated");
194TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
195static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
196SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
197	    0, "tx buffers to send at once");
198TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
199static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
200SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
201	    0, "max rx buffers to process per interrupt");
202TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
203static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
204SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
205	    0, "min free rx buffers before restarting traffic");
206TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
207
208#ifdef MWL_DEBUG
209static	int mwl_debug = 0;
210SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
211	    0, "control debugging printfs");
212TUNABLE_INT("hw.mwl.debug", &mwl_debug);
213enum {
214	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
215	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
216	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
217	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
218	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
219	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
220	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
221	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
222	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
223	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
224	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
225	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
226	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
227	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
228	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
229	MWL_DEBUG_ANY		= 0xffffffff
230};
231#define	IS_BEACON(wh) \
232    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
233	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
234#define	IFF_DUMPPKTS_RECV(sc, wh) \
235    (((sc->sc_debug & MWL_DEBUG_RECV) && \
236      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
237     (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
238#define	IFF_DUMPPKTS_XMIT(sc) \
239	((sc->sc_debug & MWL_DEBUG_XMIT) || \
240	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
241#define	DPRINTF(sc, m, fmt, ...) do {				\
242	if (sc->sc_debug & (m))					\
243		printf(fmt, __VA_ARGS__);			\
244} while (0)
245#define	KEYPRINTF(sc, hk, mac) do {				\
246	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
247		mwl_keyprint(sc, __func__, hk, mac);		\
248} while (0)
249static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
250static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
251#else
252#define	IFF_DUMPPKTS_RECV(sc, wh) \
253	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
254#define	IFF_DUMPPKTS_XMIT(sc) \
255	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
256#define	DPRINTF(sc, m, fmt, ...) do {				\
257	(void) sc;						\
258} while (0)
259#define	KEYPRINTF(sc, k, mac) do {				\
260	(void) sc;						\
261} while (0)
262#endif
263
264MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
265
266/*
267 * Each packet has fixed front matter: a 2-byte length
268 * of the payload, followed by a 4-address 802.11 header
269 * (regardless of the actual header and always w/o any
270 * QoS header).  The payload then follows.
271 */
272struct mwltxrec {
273	uint16_t fwlen;
274	struct ieee80211_frame_addr4 wh;
275} __packed;
276
277/*
278 * Read/Write shorthands for accesses to BAR 0.  Note
279 * that all BAR 1 operations are done in the "hal" and
280 * there should be no reference to them here.
281 */
282static __inline uint32_t
283RD4(struct mwl_softc *sc, bus_size_t off)
284{
285	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
286}
287
288static __inline void
289WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
290{
291	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
292}
293
294int
295mwl_attach(uint16_t devid, struct mwl_softc *sc)
296{
297	struct ifnet *ifp;
298	struct ieee80211com *ic;
299	struct mwl_hal *mh;
300	int error = 0;
301
302	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
303
304	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
305	if (ifp == NULL) {
306		device_printf(sc->sc_dev, "can not if_alloc()\n");
307		return ENOSPC;
308	}
309	ic = ifp->if_l2com;
310
311	/* set these up early for if_printf use */
312	if_initname(ifp, device_get_name(sc->sc_dev),
313		device_get_unit(sc->sc_dev));
314
315	mh = mwl_hal_attach(sc->sc_dev, devid,
316	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
317	if (mh == NULL) {
318		if_printf(ifp, "unable to attach HAL\n");
319		error = EIO;
320		goto bad;
321	}
322	sc->sc_mh = mh;
323	/*
324	 * Load firmware so we can get setup.  We arbitrarily
325	 * pick station firmware; we'll re-load firmware as
326	 * needed so setting up the wrong mode isn't a big deal.
327	 */
328	if (mwl_hal_fwload(mh, NULL) != 0) {
329		if_printf(ifp, "unable to setup builtin firmware\n");
330		error = EIO;
331		goto bad1;
332	}
333	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
334		if_printf(ifp, "unable to fetch h/w specs\n");
335		error = EIO;
336		goto bad1;
337	}
338	error = mwl_getchannels(sc);
339	if (error != 0)
340		goto bad1;
341
342	sc->sc_txantenna = 0;		/* h/w default */
343	sc->sc_rxantenna = 0;		/* h/w default */
344	sc->sc_invalid = 0;		/* ready to go, enable int handling */
345	sc->sc_ageinterval = MWL_AGEINTERVAL;
346
347	/*
348	 * Allocate tx+rx descriptors and populate the lists.
349	 * We immediately push the information to the firmware
350	 * as otherwise it gets upset.
351	 */
352	error = mwl_dma_setup(sc);
353	if (error != 0) {
354		if_printf(ifp, "failed to setup descriptors: %d\n", error);
355		goto bad1;
356	}
357	error = mwl_setupdma(sc);	/* push to firmware */
358	if (error != 0)			/* NB: mwl_setupdma prints msg */
359		goto bad1;
360
361	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
362
363	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
364		taskqueue_thread_enqueue, &sc->sc_tq);
365	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
366		"%s taskq", ifp->if_xname);
367
368	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
369	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
370	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
371	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
372
373	/* NB: insure BK queue is the lowest priority h/w queue */
374	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
375		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
376			ieee80211_wme_acnames[WME_AC_BK]);
377		error = EIO;
378		goto bad2;
379	}
380	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
381	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
382	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
383		/*
384		 * Not enough hardware tx queues to properly do WME;
385		 * just punt and assign them all to the same h/w queue.
386		 * We could do a better job of this if, for example,
387		 * we allocate queues when we switch from station to
388		 * AP mode.
389		 */
390		if (sc->sc_ac2q[WME_AC_VI] != NULL)
391			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
392		if (sc->sc_ac2q[WME_AC_BE] != NULL)
393			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
394		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
395		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
396		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
397	}
398	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
399
400	ifp->if_softc = sc;
401	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
402	ifp->if_start = mwl_start;
403	ifp->if_watchdog = mwl_watchdog;
404	ifp->if_ioctl = mwl_ioctl;
405	ifp->if_init = mwl_init;
406	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
407	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
408	IFQ_SET_READY(&ifp->if_snd);
409
410	ic->ic_ifp = ifp;
411	/* XXX not right but it's not used anywhere important */
412	ic->ic_phytype = IEEE80211_T_OFDM;
413	ic->ic_opmode = IEEE80211_M_STA;
414	ic->ic_caps =
415		  IEEE80211_C_STA		/* station mode supported */
416		| IEEE80211_C_HOSTAP		/* hostap mode */
417		| IEEE80211_C_MONITOR		/* monitor mode */
418#if 0
419		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
420		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
421#endif
422		| IEEE80211_C_WDS		/* WDS supported */
423		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
424		| IEEE80211_C_SHSLOT		/* short slot time supported */
425		| IEEE80211_C_WME		/* WME/WMM supported */
426		| IEEE80211_C_BURST		/* xmit bursting supported */
427		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
428		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
429		| IEEE80211_C_TXFRAG		/* handle tx frags */
430		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
431		| IEEE80211_C_DFS		/* DFS supported */
432		;
433
434	ic->ic_htcaps =
435		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
436		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
437		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
438		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
439		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
440#if MWL_AGGR_SIZE == 7935
441		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
442#else
443		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
444#endif
445#if 0
446		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
447		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
448#endif
449		/* s/w capabilities */
450		| IEEE80211_HTC_HT		/* HT operation */
451		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
452		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
453		| IEEE80211_HTC_SMPS		/* SMPS available */
454		;
455
456	/*
457	 * Mark h/w crypto support.
458	 * XXX no way to query h/w support.
459	 */
460	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
461			  |  IEEE80211_CRYPTO_AES_CCM
462			  |  IEEE80211_CRYPTO_TKIP
463			  |  IEEE80211_CRYPTO_TKIPMIC
464			  ;
465	/*
466	 * Transmit requires space in the packet for a special
467	 * format transmit record and optional padding between
468	 * this record and the payload.  Ask the net80211 layer
469	 * to arrange this when encapsulating packets so we can
470	 * add it efficiently.
471	 */
472	ic->ic_headroom = sizeof(struct mwltxrec) -
473		sizeof(struct ieee80211_frame);
474
475	/* call MI attach routine. */
476	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
477	ic->ic_setregdomain = mwl_setregdomain;
478	ic->ic_getradiocaps = mwl_getradiocaps;
479	/* override default methods */
480	ic->ic_raw_xmit = mwl_raw_xmit;
481	ic->ic_newassoc = mwl_newassoc;
482	ic->ic_updateslot = mwl_updateslot;
483	ic->ic_update_mcast = mwl_update_mcast;
484	ic->ic_update_promisc = mwl_update_promisc;
485	ic->ic_wme.wme_update = mwl_wme_update;
486
487	ic->ic_node_alloc = mwl_node_alloc;
488	sc->sc_node_cleanup = ic->ic_node_cleanup;
489	ic->ic_node_cleanup = mwl_node_cleanup;
490	sc->sc_node_drain = ic->ic_node_drain;
491	ic->ic_node_drain = mwl_node_drain;
492	ic->ic_node_getsignal = mwl_node_getsignal;
493	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
494
495	ic->ic_scan_start = mwl_scan_start;
496	ic->ic_scan_end = mwl_scan_end;
497	ic->ic_set_channel = mwl_set_channel;
498
499	sc->sc_recv_action = ic->ic_recv_action;
500	ic->ic_recv_action = mwl_recv_action;
501	sc->sc_addba_request = ic->ic_addba_request;
502	ic->ic_addba_request = mwl_addba_request;
503	sc->sc_addba_response = ic->ic_addba_response;
504	ic->ic_addba_response = mwl_addba_response;
505	sc->sc_addba_stop = ic->ic_addba_stop;
506	ic->ic_addba_stop = mwl_addba_stop;
507
508	ic->ic_vap_create = mwl_vap_create;
509	ic->ic_vap_delete = mwl_vap_delete;
510
511	ieee80211_radiotap_attach(ic,
512	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
513		MWL_TX_RADIOTAP_PRESENT,
514	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
515		MWL_RX_RADIOTAP_PRESENT);
516	/*
517	 * Setup dynamic sysctl's now that country code and
518	 * regdomain are available from the hal.
519	 */
520	mwl_sysctlattach(sc);
521
522	if (bootverbose)
523		ieee80211_announce(ic);
524	mwl_announce(sc);
525	return 0;
526bad2:
527	mwl_dma_cleanup(sc);
528bad1:
529	mwl_hal_detach(mh);
530bad:
531	if_free(ifp);
532	sc->sc_invalid = 1;
533	return error;
534}
535
536int
537mwl_detach(struct mwl_softc *sc)
538{
539	struct ifnet *ifp = sc->sc_ifp;
540	struct ieee80211com *ic = ifp->if_l2com;
541
542	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
543		__func__, ifp->if_flags);
544
545	mwl_stop(ifp, 1);
546	/*
547	 * NB: the order of these is important:
548	 * o call the 802.11 layer before detaching the hal to
549	 *   insure callbacks into the driver to delete global
550	 *   key cache entries can be handled
551	 * o reclaim the tx queue data structures after calling
552	 *   the 802.11 layer as we'll get called back to reclaim
553	 *   node state and potentially want to use them
554	 * o to cleanup the tx queues the hal is called, so detach
555	 *   it last
556	 * Other than that, it's straightforward...
557	 */
558	ieee80211_ifdetach(ic);
559	mwl_dma_cleanup(sc);
560	mwl_tx_cleanup(sc);
561	mwl_hal_detach(sc->sc_mh);
562	if_free(ifp);
563
564	return 0;
565}
566
567/*
568 * MAC address handling for multiple BSS on the same radio.
569 * The first vap uses the MAC address from the EEPROM.  For
570 * subsequent vap's we set the U/L bit (bit 1) in the MAC
571 * address and use the next six bits as an index.
572 */
573static void
574assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
575{
576	int i;
577
578	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
579		/* NB: we only do this if h/w supports multiple bssid */
580		for (i = 0; i < 32; i++)
581			if ((sc->sc_bssidmask & (1<<i)) == 0)
582				break;
583		if (i != 0)
584			mac[0] |= (i << 2)|0x2;
585	} else
586		i = 0;
587	sc->sc_bssidmask |= 1<<i;
588	if (i == 0)
589		sc->sc_nbssid0++;
590}
591
592static void
593reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
594{
595	int i = mac[0] >> 2;
596	if (i != 0 || --sc->sc_nbssid0 == 0)
597		sc->sc_bssidmask &= ~(1<<i);
598}
599
600static struct ieee80211vap *
601mwl_vap_create(struct ieee80211com *ic,
602	const char name[IFNAMSIZ], int unit, int opmode, int flags,
603	const uint8_t bssid[IEEE80211_ADDR_LEN],
604	const uint8_t mac0[IEEE80211_ADDR_LEN])
605{
606	struct ifnet *ifp = ic->ic_ifp;
607	struct mwl_softc *sc = ifp->if_softc;
608	struct mwl_hal *mh = sc->sc_mh;
609	struct ieee80211vap *vap, *apvap;
610	struct mwl_hal_vap *hvap;
611	struct mwl_vap *mvp;
612	uint8_t mac[IEEE80211_ADDR_LEN];
613
614	IEEE80211_ADDR_COPY(mac, mac0);
615	switch (opmode) {
616	case IEEE80211_M_HOSTAP:
617		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
618			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
619		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
620		if (hvap == NULL) {
621			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
622				reclaim_address(sc, mac);
623			return NULL;
624		}
625		break;
626	case IEEE80211_M_STA:
627		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
628			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
629		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
630		if (hvap == NULL) {
631			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
632				reclaim_address(sc, mac);
633			return NULL;
634		}
635		/* no h/w beacon miss support; always use s/w */
636		flags |= IEEE80211_CLONE_NOBEACONS;
637		break;
638	case IEEE80211_M_WDS:
639		hvap = NULL;		/* NB: we use associated AP vap */
640		if (sc->sc_napvaps == 0)
641			return NULL;	/* no existing AP vap */
642		break;
643	case IEEE80211_M_MONITOR:
644		hvap = NULL;
645		break;
646	case IEEE80211_M_IBSS:
647	case IEEE80211_M_AHDEMO:
648	default:
649		return NULL;
650	}
651
652	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
653	    M_80211_VAP, M_NOWAIT | M_ZERO);
654	if (mvp == NULL) {
655		if (hvap != NULL) {
656			mwl_hal_delvap(hvap);
657			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
658				reclaim_address(sc, mac);
659		}
660		/* XXX msg */
661		return NULL;
662	}
663	mvp->mv_hvap = hvap;
664	if (opmode == IEEE80211_M_WDS) {
665		/*
666		 * WDS vaps must have an associated AP vap; find one.
667		 * XXX not right.
668		 */
669		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
670			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
671				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
672				break;
673			}
674		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
675	}
676	vap = &mvp->mv_vap;
677	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
678	if (hvap != NULL)
679		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
680	/* override with driver methods */
681	mvp->mv_newstate = vap->iv_newstate;
682	vap->iv_newstate = mwl_newstate;
683	vap->iv_max_keyix = 0;	/* XXX */
684	vap->iv_key_alloc = mwl_key_alloc;
685	vap->iv_key_delete = mwl_key_delete;
686	vap->iv_key_set = mwl_key_set;
687#ifdef MWL_HOST_PS_SUPPORT
688	if (opmode == IEEE80211_M_HOSTAP) {
689		vap->iv_update_ps = mwl_update_ps;
690		mvp->mv_set_tim = vap->iv_set_tim;
691		vap->iv_set_tim = mwl_set_tim;
692	}
693#endif
694	vap->iv_reset = mwl_reset;
695	vap->iv_update_beacon = mwl_beacon_update;
696
697	/* override max aid so sta's cannot assoc when we're out of sta id's */
698	vap->iv_max_aid = MWL_MAXSTAID;
699	/* override default A-MPDU rx parameters */
700	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
701	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
702
703	/* complete setup */
704	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
705
706	switch (vap->iv_opmode) {
707	case IEEE80211_M_HOSTAP:
708	case IEEE80211_M_STA:
709		/*
710		 * Setup sta db entry for local address.
711		 */
712		mwl_localstadb(vap);
713		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
714			sc->sc_napvaps++;
715		else
716			sc->sc_nstavaps++;
717		break;
718	case IEEE80211_M_WDS:
719		sc->sc_nwdsvaps++;
720		break;
721	default:
722		break;
723	}
724	/*
725	 * Setup overall operating mode.
726	 */
727	if (sc->sc_napvaps)
728		ic->ic_opmode = IEEE80211_M_HOSTAP;
729	else if (sc->sc_nstavaps)
730		ic->ic_opmode = IEEE80211_M_STA;
731	else
732		ic->ic_opmode = opmode;
733
734	return vap;
735}
736
737static void
738mwl_vap_delete(struct ieee80211vap *vap)
739{
740	struct mwl_vap *mvp = MWL_VAP(vap);
741	struct ifnet *parent = vap->iv_ic->ic_ifp;
742	struct mwl_softc *sc = parent->if_softc;
743	struct mwl_hal *mh = sc->sc_mh;
744	struct mwl_hal_vap *hvap = mvp->mv_hvap;
745	enum ieee80211_opmode opmode = vap->iv_opmode;
746
747	/* XXX disallow ap vap delete if WDS still present */
748	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
749		/* quiesce h/w while we remove the vap */
750		mwl_hal_intrset(mh, 0);		/* disable interrupts */
751	}
752	ieee80211_vap_detach(vap);
753	switch (opmode) {
754	case IEEE80211_M_HOSTAP:
755	case IEEE80211_M_STA:
756		KASSERT(hvap != NULL, ("no hal vap handle"));
757		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
758		mwl_hal_delvap(hvap);
759		if (opmode == IEEE80211_M_HOSTAP)
760			sc->sc_napvaps--;
761		else
762			sc->sc_nstavaps--;
763		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
764		reclaim_address(sc, vap->iv_myaddr);
765		break;
766	case IEEE80211_M_WDS:
767		sc->sc_nwdsvaps--;
768		break;
769	default:
770		break;
771	}
772	mwl_cleartxq(sc, vap);
773	free(mvp, M_80211_VAP);
774	if (parent->if_drv_flags & IFF_DRV_RUNNING)
775		mwl_hal_intrset(mh, sc->sc_imask);
776}
777
778void
779mwl_suspend(struct mwl_softc *sc)
780{
781	struct ifnet *ifp = sc->sc_ifp;
782
783	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
784		__func__, ifp->if_flags);
785
786	mwl_stop(ifp, 1);
787}
788
789void
790mwl_resume(struct mwl_softc *sc)
791{
792	struct ifnet *ifp = sc->sc_ifp;
793
794	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
795		__func__, ifp->if_flags);
796
797	if (ifp->if_flags & IFF_UP)
798		mwl_init(sc);
799}
800
801void
802mwl_shutdown(void *arg)
803{
804	struct mwl_softc *sc = arg;
805
806	mwl_stop(sc->sc_ifp, 1);
807}
808
809/*
810 * Interrupt handler.  Most of the actual processing is deferred.
811 */
812void
813mwl_intr(void *arg)
814{
815	struct mwl_softc *sc = arg;
816	struct mwl_hal *mh = sc->sc_mh;
817	uint32_t status;
818
819	if (sc->sc_invalid) {
820		/*
821		 * The hardware is not ready/present, don't touch anything.
822		 * Note this can happen early on if the IRQ is shared.
823		 */
824		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
825		return;
826	}
827	/*
828	 * Figure out the reason(s) for the interrupt.
829	 */
830	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
831	if (status == 0)			/* must be a shared irq */
832		return;
833
834	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
835	    __func__, status, sc->sc_imask);
836	if (status & MACREG_A2HRIC_BIT_RX_RDY)
837		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
838	if (status & MACREG_A2HRIC_BIT_TX_DONE)
839		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
840	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
841		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
842	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
843		mwl_hal_cmddone(mh);
844	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
845		;
846	}
847	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
848		/* TKIP ICV error */
849		sc->sc_stats.mst_rx_badtkipicv++;
850	}
851	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
852		/* 11n aggregation queue is empty, re-fill */
853		;
854	}
855	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
856		;
857	}
858	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
859		/* radar detected, process event */
860		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
861	}
862	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
863		/* DFS channel switch */
864		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
865	}
866}
867
868static void
869mwl_radar_proc(void *arg, int pending)
870{
871	struct mwl_softc *sc = arg;
872	struct ifnet *ifp = sc->sc_ifp;
873	struct ieee80211com *ic = ifp->if_l2com;
874
875	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
876	    __func__, pending);
877
878	sc->sc_stats.mst_radardetect++;
879
880	IEEE80211_LOCK(ic);
881	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
882	IEEE80211_UNLOCK(ic);
883}
884
885static void
886mwl_chanswitch_proc(void *arg, int pending)
887{
888	struct mwl_softc *sc = arg;
889	struct ifnet *ifp = sc->sc_ifp;
890	struct ieee80211com *ic = ifp->if_l2com;
891
892	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
893	    __func__, pending);
894
895	IEEE80211_LOCK(ic);
896	sc->sc_csapending = 0;
897	ieee80211_csa_completeswitch(ic);
898	IEEE80211_UNLOCK(ic);
899}
900
901static void
902mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
903{
904	struct ieee80211_node *ni = sp->data[0];
905
906	/* send DELBA and drop the stream */
907	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
908}
909
910static void
911mwl_bawatchdog_proc(void *arg, int pending)
912{
913	struct mwl_softc *sc = arg;
914	struct mwl_hal *mh = sc->sc_mh;
915	const MWL_HAL_BASTREAM *sp;
916	uint8_t bitmap, n;
917
918	sc->sc_stats.mst_bawatchdog++;
919
920	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
921		DPRINTF(sc, MWL_DEBUG_AMPDU,
922		    "%s: could not get bitmap\n", __func__);
923		sc->sc_stats.mst_bawatchdog_failed++;
924		return;
925	}
926	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
927	if (bitmap == 0xff) {
928		n = 0;
929		/* disable all ba streams */
930		for (bitmap = 0; bitmap < 8; bitmap++) {
931			sp = mwl_hal_bastream_lookup(mh, bitmap);
932			if (sp != NULL) {
933				mwl_bawatchdog(sp);
934				n++;
935			}
936		}
937		if (n == 0) {
938			DPRINTF(sc, MWL_DEBUG_AMPDU,
939			    "%s: no BA streams found\n", __func__);
940			sc->sc_stats.mst_bawatchdog_empty++;
941		}
942	} else if (bitmap != 0xaa) {
943		/* disable a single ba stream */
944		sp = mwl_hal_bastream_lookup(mh, bitmap);
945		if (sp != NULL) {
946			mwl_bawatchdog(sp);
947		} else {
948			DPRINTF(sc, MWL_DEBUG_AMPDU,
949			    "%s: no BA stream %d\n", __func__, bitmap);
950			sc->sc_stats.mst_bawatchdog_notfound++;
951		}
952	}
953}
954
955/*
956 * Convert net80211 channel to a HAL channel.
957 */
958static void
959mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
960{
961	hc->channel = chan->ic_ieee;
962
963	*(uint32_t *)&hc->channelFlags = 0;
964	if (IEEE80211_IS_CHAN_2GHZ(chan))
965		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
966	else if (IEEE80211_IS_CHAN_5GHZ(chan))
967		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
968	if (IEEE80211_IS_CHAN_HT40(chan)) {
969		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
970		if (IEEE80211_IS_CHAN_HT40U(chan))
971			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
972		else
973			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
974	} else
975		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
976	/* XXX 10MHz channels */
977}
978
979/*
980 * Inform firmware of our tx/rx dma setup.  The BAR 0
981 * writes below are for compatibility with older firmware.
982 * For current firmware we send this information with a
983 * cmd block via mwl_hal_sethwdma.
984 */
985static int
986mwl_setupdma(struct mwl_softc *sc)
987{
988	int error, i;
989
990	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
991	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
992	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
993
994	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
995		struct mwl_txq *txq = &sc->sc_txq[i];
996		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
997		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
998	}
999	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1000	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES;
1001
1002	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1003	if (error != 0) {
1004		device_printf(sc->sc_dev,
1005		    "unable to setup tx/rx dma; hal status %u\n", error);
1006		/* XXX */
1007	}
1008	return error;
1009}
1010
1011/*
1012 * Inform firmware of tx rate parameters.
1013 * Called after a channel change.
1014 */
1015static int
1016mwl_setcurchanrates(struct mwl_softc *sc)
1017{
1018	struct ifnet *ifp = sc->sc_ifp;
1019	struct ieee80211com *ic = ifp->if_l2com;
1020	const struct ieee80211_rateset *rs;
1021	MWL_HAL_TXRATE rates;
1022
1023	memset(&rates, 0, sizeof(rates));
1024	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1025	/* rate used to send management frames */
1026	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1027	/* rate used to send multicast frames */
1028	rates.McastRate = rates.MgtRate;
1029
1030	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1031}
1032
1033/*
1034 * Inform firmware of tx rate parameters.  Called whenever
1035 * user-settable params change and after a channel change.
1036 */
1037static int
1038mwl_setrates(struct ieee80211vap *vap)
1039{
1040	struct mwl_vap *mvp = MWL_VAP(vap);
1041	struct ieee80211_node *ni = vap->iv_bss;
1042	const struct ieee80211_txparam *tp = ni->ni_txparms;
1043	MWL_HAL_TXRATE rates;
1044
1045	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1046
1047	/*
1048	 * Update the h/w rate map.
1049	 * NB: 0x80 for MCS is passed through unchanged
1050	 */
1051	memset(&rates, 0, sizeof(rates));
1052	/* rate used to send management frames */
1053	rates.MgtRate = tp->mgmtrate;
1054	/* rate used to send multicast frames */
1055	rates.McastRate = tp->mcastrate;
1056
1057	/* while here calculate EAPOL fixed rate cookie */
1058	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1059
1060	return mwl_hal_settxrate(mvp->mv_hvap, RATE_AUTO, &rates);
1061}
1062
1063/*
1064 * Setup a fixed xmit rate cookie for EAPOL frames.
1065 */
1066static void
1067mwl_seteapolformat(struct ieee80211vap *vap)
1068{
1069	struct mwl_vap *mvp = MWL_VAP(vap);
1070	struct ieee80211_node *ni = vap->iv_bss;
1071	enum ieee80211_phymode mode;
1072	uint8_t rate;
1073
1074	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1075
1076	mode = ieee80211_chan2mode(ni->ni_chan);
1077	/*
1078	 * Use legacy rates when operating a mixed HT+non-HT bss.
1079	 * NB: this may violate POLA for sta and wds vap's.
1080	 */
1081	if (mode == IEEE80211_MODE_11NA &&
1082	    (vap->iv_flags_ext & IEEE80211_FEXT_PUREN) == 0)
1083		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1084	else if (mode == IEEE80211_MODE_11NG &&
1085	    (vap->iv_flags_ext & IEEE80211_FEXT_PUREN) == 0)
1086		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1087	else
1088		rate = vap->iv_txparms[mode].mgmtrate;
1089
1090	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1091}
1092
1093/*
1094 * Map SKU+country code to region code for radar bin'ing.
1095 */
1096static int
1097mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1098{
1099	switch (rd->regdomain) {
1100	case SKU_FCC:
1101	case SKU_FCC3:
1102		return DOMAIN_CODE_FCC;
1103	case SKU_CA:
1104		return DOMAIN_CODE_IC;
1105	case SKU_ETSI:
1106	case SKU_ETSI2:
1107	case SKU_ETSI3:
1108		if (rd->country == CTRY_SPAIN)
1109			return DOMAIN_CODE_SPAIN;
1110		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1111			return DOMAIN_CODE_FRANCE;
1112		/* XXX force 1.3.1 radar type */
1113		return DOMAIN_CODE_ETSI_131;
1114	case SKU_JAPAN:
1115		return DOMAIN_CODE_MKK;
1116	case SKU_ROW:
1117		return DOMAIN_CODE_DGT;	/* Taiwan */
1118	case SKU_APAC:
1119	case SKU_APAC2:
1120	case SKU_APAC3:
1121		return DOMAIN_CODE_AUS;	/* Australia */
1122	}
1123	/* XXX KOREA? */
1124	return DOMAIN_CODE_FCC;			/* XXX? */
1125}
1126
1127static int
1128mwl_hal_reset(struct mwl_softc *sc)
1129{
1130	struct ifnet *ifp = sc->sc_ifp;
1131	struct ieee80211com *ic = ifp->if_l2com;
1132	struct mwl_hal *mh = sc->sc_mh;
1133
1134	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1135	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1136	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1137	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1138	mwl_chan_set(sc, ic->ic_curchan);
1139	mwl_hal_setrateadaptmode(mh, ic->ic_regdomain.location == 'O');
1140	mwl_hal_setoptimizationlevel(mh,
1141	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1142
1143	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1144
1145	return 1;
1146}
1147
1148static int
1149mwl_init_locked(struct mwl_softc *sc)
1150{
1151	struct ifnet *ifp = sc->sc_ifp;
1152	struct mwl_hal *mh = sc->sc_mh;
1153	int error = 0;
1154
1155	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1156		__func__, ifp->if_flags);
1157
1158	MWL_LOCK_ASSERT(sc);
1159
1160	/*
1161	 * Stop anything previously setup.  This is safe
1162	 * whether this is the first time through or not.
1163	 */
1164	mwl_stop_locked(ifp, 0);
1165
1166	/*
1167	 * Push vap-independent state to the firmware.
1168	 */
1169	if (!mwl_hal_reset(sc)) {
1170		if_printf(ifp, "unable to reset hardware\n");
1171		return EIO;
1172	}
1173
1174	/*
1175	 * Setup recv (once); transmit is already good to go.
1176	 */
1177	error = mwl_startrecv(sc);
1178	if (error != 0) {
1179		if_printf(ifp, "unable to start recv logic\n");
1180		return error;
1181	}
1182
1183	/*
1184	 * Enable interrupts.
1185	 */
1186	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1187		     | MACREG_A2HRIC_BIT_TX_DONE
1188		     | MACREG_A2HRIC_BIT_OPC_DONE
1189#if 0
1190		     | MACREG_A2HRIC_BIT_MAC_EVENT
1191#endif
1192		     | MACREG_A2HRIC_BIT_ICV_ERROR
1193		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1194		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1195#if 0
1196		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1197#endif
1198		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1199		     ;
1200
1201	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1202	mwl_hal_intrset(mh, sc->sc_imask);
1203
1204	return 0;
1205}
1206
1207static void
1208mwl_init(void *arg)
1209{
1210	struct mwl_softc *sc = arg;
1211	struct ifnet *ifp = sc->sc_ifp;
1212	struct ieee80211com *ic = ifp->if_l2com;
1213	int error = 0;
1214
1215	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1216		__func__, ifp->if_flags);
1217
1218	MWL_LOCK(sc);
1219	error = mwl_init_locked(sc);
1220	MWL_UNLOCK(sc);
1221
1222	if (error == 0)
1223		ieee80211_start_all(ic);	/* start all vap's */
1224}
1225
1226static void
1227mwl_stop_locked(struct ifnet *ifp, int disable)
1228{
1229	struct mwl_softc *sc = ifp->if_softc;
1230
1231	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1232		__func__, sc->sc_invalid, ifp->if_flags);
1233
1234	MWL_LOCK_ASSERT(sc);
1235	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1236		/*
1237		 * Shutdown the hardware and driver.
1238		 */
1239		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1240		ifp->if_timer = 0;
1241		mwl_draintxq(sc);
1242	}
1243}
1244
1245static void
1246mwl_stop(struct ifnet *ifp, int disable)
1247{
1248	struct mwl_softc *sc = ifp->if_softc;
1249
1250	MWL_LOCK(sc);
1251	mwl_stop_locked(ifp, disable);
1252	MWL_UNLOCK(sc);
1253}
1254
1255static int
1256mwl_reset_vap(struct ieee80211vap *vap, int state)
1257{
1258	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1259	struct ieee80211com *ic = vap->iv_ic;
1260
1261	if (state == IEEE80211_S_RUN)
1262		mwl_setrates(vap);
1263	/* XXX off by 1? */
1264	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1265	/* XXX auto? 20/40 split? */
1266	mwl_hal_sethtgi(hvap, (vap->iv_flags_ext &
1267	    (IEEE80211_FEXT_SHORTGI20|IEEE80211_FEXT_SHORTGI40)) ? 1 : 0);
1268	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1269	    HTPROTECT_NONE : HTPROTECT_AUTO);
1270	/* XXX txpower cap */
1271
1272	/* re-setup beacons */
1273	if (state == IEEE80211_S_RUN &&
1274	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1275	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1276		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1277		mwl_hal_setnprotmode(hvap,
1278		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1279		return mwl_beacon_setup(vap);
1280	}
1281	return 0;
1282}
1283
1284/*
1285 * Reset the hardware w/o losing operational state.
1286 * Used to to reset or reload hardware state for a vap.
1287 */
1288static int
1289mwl_reset(struct ieee80211vap *vap, u_long cmd)
1290{
1291	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1292	int error = 0;
1293
1294	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1295		struct ieee80211com *ic = vap->iv_ic;
1296		struct ifnet *ifp = ic->ic_ifp;
1297		struct mwl_softc *sc = ifp->if_softc;
1298		struct mwl_hal *mh = sc->sc_mh;
1299
1300		/* XXX do we need to disable interrupts? */
1301		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1302		error = mwl_reset_vap(vap, vap->iv_state);
1303		mwl_hal_intrset(mh, sc->sc_imask);
1304	}
1305	return error;
1306}
1307
1308/*
1309 * Allocate a tx buffer for sending a frame.  The
1310 * packet is assumed to have the WME AC stored so
1311 * we can use it to select the appropriate h/w queue.
1312 */
1313static struct mwl_txbuf *
1314mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1315{
1316	struct mwl_txbuf *bf;
1317
1318	/*
1319	 * Grab a TX buffer and associated resources.
1320	 */
1321	MWL_TXQ_LOCK(txq);
1322	bf = STAILQ_FIRST(&txq->free);
1323	if (bf != NULL) {
1324		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1325		txq->nfree--;
1326	}
1327	MWL_TXQ_UNLOCK(txq);
1328	if (bf == NULL)
1329		DPRINTF(sc, MWL_DEBUG_XMIT,
1330		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1331	return bf;
1332}
1333
1334/*
1335 * Return a tx buffer to the queue it came from.  Note there
1336 * are two cases because we must preserve the order of buffers
1337 * as it reflects the fixed order of descriptors in memory
1338 * (the firmware pre-fetches descriptors so we cannot reorder).
1339 */
1340static void
1341mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1342{
1343	bf->bf_m = NULL;
1344	bf->bf_node = NULL;
1345	MWL_TXQ_LOCK(txq);
1346	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1347	txq->nfree++;
1348	MWL_TXQ_UNLOCK(txq);
1349}
1350
1351static void
1352mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1353{
1354	bf->bf_m = NULL;
1355	bf->bf_node = NULL;
1356	MWL_TXQ_LOCK(txq);
1357	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1358	txq->nfree++;
1359	MWL_TXQ_UNLOCK(txq);
1360}
1361
1362static void
1363mwl_start(struct ifnet *ifp)
1364{
1365	struct mwl_softc *sc = ifp->if_softc;
1366	struct ieee80211_node *ni;
1367	struct mwl_txbuf *bf;
1368	struct mbuf *m;
1369	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1370	int nqueued;
1371
1372	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1373		return;
1374	nqueued = 0;
1375	for (;;) {
1376		bf = NULL;
1377		IFQ_DEQUEUE(&ifp->if_snd, m);
1378		if (m == NULL)
1379			break;
1380		/*
1381		 * Grab the node for the destination.
1382		 */
1383		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1384		KASSERT(ni != NULL, ("no node"));
1385		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1386		/*
1387		 * Grab a TX buffer and associated resources.
1388		 * We honor the classification by the 802.11 layer.
1389		 */
1390		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1391		bf = mwl_gettxbuf(sc, txq);
1392		if (bf == NULL) {
1393			m_freem(m);
1394			ieee80211_free_node(ni);
1395#ifdef MWL_TX_NODROP
1396			sc->sc_stats.mst_tx_qstop++;
1397			/* XXX blocks other traffic */
1398			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1399			break;
1400#else
1401			DPRINTF(sc, MWL_DEBUG_XMIT,
1402			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1403			sc->sc_stats.mst_tx_qdrop++;
1404			continue;
1405#endif /* MWL_TX_NODROP */
1406		}
1407
1408		/*
1409		 * Pass the frame to the h/w for transmission.
1410		 */
1411		if (mwl_tx_start(sc, ni, bf, m)) {
1412			ifp->if_oerrors++;
1413			mwl_puttxbuf_head(txq, bf);
1414			ieee80211_free_node(ni);
1415			continue;
1416		}
1417		nqueued++;
1418		if (nqueued >= mwl_txcoalesce) {
1419			/*
1420			 * Poke the firmware to process queued frames;
1421			 * see below about (lack of) locking.
1422			 */
1423			nqueued = 0;
1424			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1425		}
1426	}
1427	if (nqueued) {
1428		/*
1429		 * NB: We don't need to lock against tx done because
1430		 * this just prods the firmware to check the transmit
1431		 * descriptors.  The firmware will also start fetching
1432		 * descriptors by itself if it notices new ones are
1433		 * present when it goes to deliver a tx done interrupt
1434		 * to the host. So if we race with tx done processing
1435		 * it's ok.  Delivering the kick here rather than in
1436		 * mwl_tx_start is an optimization to avoid poking the
1437		 * firmware for each packet.
1438		 *
1439		 * NB: the queue id isn't used so 0 is ok.
1440		 */
1441		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1442	}
1443}
1444
1445static int
1446mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1447	const struct ieee80211_bpf_params *params)
1448{
1449	struct ieee80211com *ic = ni->ni_ic;
1450	struct ifnet *ifp = ic->ic_ifp;
1451	struct mwl_softc *sc = ifp->if_softc;
1452	struct mwl_txbuf *bf;
1453	struct mwl_txq *txq;
1454
1455	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1456		ieee80211_free_node(ni);
1457		m_freem(m);
1458		return ENETDOWN;
1459	}
1460	/*
1461	 * Grab a TX buffer and associated resources.
1462	 * Note that we depend on the classification
1463	 * by the 802.11 layer to get to the right h/w
1464	 * queue.  Management frames must ALWAYS go on
1465	 * queue 1 but we cannot just force that here
1466	 * because we may receive non-mgt frames.
1467	 */
1468	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1469	bf = mwl_gettxbuf(sc, txq);
1470	if (bf == NULL) {
1471		sc->sc_stats.mst_tx_qstop++;
1472		/* XXX blocks other traffic */
1473		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1474		ieee80211_free_node(ni);
1475		m_freem(m);
1476		return ENOBUFS;
1477	}
1478	/*
1479	 * Pass the frame to the h/w for transmission.
1480	 */
1481	if (mwl_tx_start(sc, ni, bf, m)) {
1482		ifp->if_oerrors++;
1483		mwl_puttxbuf_head(txq, bf);
1484
1485		ieee80211_free_node(ni);
1486		return EIO;		/* XXX */
1487	}
1488	/*
1489	 * NB: We don't need to lock against tx done because
1490	 * this just prods the firmware to check the transmit
1491	 * descriptors.  The firmware will also start fetching
1492	 * descriptors by itself if it notices new ones are
1493	 * present when it goes to deliver a tx done interrupt
1494	 * to the host. So if we race with tx done processing
1495	 * it's ok.  Delivering the kick here rather than in
1496	 * mwl_tx_start is an optimization to avoid poking the
1497	 * firmware for each packet.
1498	 *
1499	 * NB: the queue id isn't used so 0 is ok.
1500	 */
1501	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1502	return 0;
1503}
1504
1505static int
1506mwl_media_change(struct ifnet *ifp)
1507{
1508	struct ieee80211vap *vap = ifp->if_softc;
1509	int error;
1510
1511	error = ieee80211_media_change(ifp);
1512	/* NB: only the fixed rate can change and that doesn't need a reset */
1513	if (error == ENETRESET) {
1514		mwl_setrates(vap);
1515		error = 0;
1516	}
1517	return error;
1518}
1519
1520#ifdef MWL_DEBUG
1521static void
1522mwl_keyprint(struct mwl_softc *sc, const char *tag,
1523	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1524{
1525	static const char *ciphers[] = {
1526		"WEP",
1527		"TKIP",
1528		"AES-CCM",
1529	};
1530	int i, n;
1531
1532	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1533	for (i = 0, n = hk->keyLen; i < n; i++)
1534		printf(" %02x", hk->key.aes[i]);
1535	printf(" mac %s", ether_sprintf(mac));
1536	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1537		printf(" %s", "rxmic");
1538		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1539			printf(" %02x", hk->key.tkip.rxMic[i]);
1540		printf(" txmic");
1541		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1542			printf(" %02x", hk->key.tkip.txMic[i]);
1543	}
1544	printf(" flags 0x%x\n", hk->keyFlags);
1545}
1546#endif
1547
1548/*
1549 * Allocate a key cache slot for a unicast key.  The
1550 * firmware handles key allocation and every station is
1551 * guaranteed key space so we are always successful.
1552 */
1553static int
1554mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1555	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1556{
1557	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1558
1559	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1560	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1561		if (!(&vap->iv_nw_keys[0] <= k &&
1562		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1563			/* should not happen */
1564			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1565				"%s: bogus group key\n", __func__);
1566			return 0;
1567		}
1568		/* give the caller what they requested */
1569		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1570	} else {
1571		/*
1572		 * Firmware handles key allocation.
1573		 */
1574		*keyix = *rxkeyix = 0;
1575	}
1576	return 1;
1577}
1578
1579/*
1580 * Delete a key entry allocated by mwl_key_alloc.
1581 */
1582static int
1583mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1584{
1585	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1586	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1587	MWL_HAL_KEYVAL hk;
1588	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1589	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1590
1591	if (hvap == NULL) {
1592		if (vap->iv_opmode != IEEE80211_M_WDS) {
1593			/* XXX monitor mode? */
1594			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1595			    "%s: no hvap for opmode %d\n", __func__,
1596			    vap->iv_opmode);
1597			return 0;
1598		}
1599		hvap = MWL_VAP(vap)->mv_ap_hvap;
1600	}
1601
1602	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1603	    __func__, k->wk_keyix);
1604
1605	memset(&hk, 0, sizeof(hk));
1606	hk.keyIndex = k->wk_keyix;
1607	switch (k->wk_cipher->ic_cipher) {
1608	case IEEE80211_CIPHER_WEP:
1609		hk.keyTypeId = KEY_TYPE_ID_WEP;
1610		break;
1611	case IEEE80211_CIPHER_TKIP:
1612		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1613		break;
1614	case IEEE80211_CIPHER_AES_CCM:
1615		hk.keyTypeId = KEY_TYPE_ID_AES;
1616		break;
1617	default:
1618		/* XXX should not happen */
1619		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1620		    __func__, k->wk_cipher->ic_cipher);
1621		return 0;
1622	}
1623	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1624}
1625
1626static __inline int
1627addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1628{
1629	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1630		if (k->wk_flags & IEEE80211_KEY_XMIT)
1631			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1632		if (k->wk_flags & IEEE80211_KEY_RECV)
1633			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1634		return 1;
1635	} else
1636		return 0;
1637}
1638
1639/*
1640 * Set the key cache contents for the specified key.  Key cache
1641 * slot(s) must already have been allocated by mwl_key_alloc.
1642 */
1643static int
1644mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1645	const uint8_t mac[IEEE80211_ADDR_LEN])
1646{
1647#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1648/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1649#define	IEEE80211_IS_STATICKEY(k) \
1650	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1651	 (GRPXMIT|IEEE80211_KEY_RECV))
1652	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1653	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1654	const struct ieee80211_cipher *cip = k->wk_cipher;
1655	const uint8_t *macaddr;
1656	MWL_HAL_KEYVAL hk;
1657
1658	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1659		("s/w crypto set?"));
1660
1661	if (hvap == NULL) {
1662		if (vap->iv_opmode != IEEE80211_M_WDS) {
1663			/* XXX monitor mode? */
1664			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1665			    "%s: no hvap for opmode %d\n", __func__,
1666			    vap->iv_opmode);
1667			return 0;
1668		}
1669		hvap = MWL_VAP(vap)->mv_ap_hvap;
1670	}
1671	memset(&hk, 0, sizeof(hk));
1672	hk.keyIndex = k->wk_keyix;
1673	switch (cip->ic_cipher) {
1674	case IEEE80211_CIPHER_WEP:
1675		hk.keyTypeId = KEY_TYPE_ID_WEP;
1676		hk.keyLen = k->wk_keylen;
1677		if (k->wk_keyix == vap->iv_def_txkey)
1678			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1679		if (!IEEE80211_IS_STATICKEY(k)) {
1680			/* NB: WEP is never used for the PTK */
1681			(void) addgroupflags(&hk, k);
1682		}
1683		break;
1684	case IEEE80211_CIPHER_TKIP:
1685		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1686		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1687		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1688		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1689		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1690		if (!addgroupflags(&hk, k))
1691			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1692		break;
1693	case IEEE80211_CIPHER_AES_CCM:
1694		hk.keyTypeId = KEY_TYPE_ID_AES;
1695		hk.keyLen = k->wk_keylen;
1696		if (!addgroupflags(&hk, k))
1697			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1698		break;
1699	default:
1700		/* XXX should not happen */
1701		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1702		    __func__, k->wk_cipher->ic_cipher);
1703		return 0;
1704	}
1705	/*
1706	 * NB: tkip mic keys get copied here too; the layout
1707	 *     just happens to match that in ieee80211_key.
1708	 */
1709	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1710
1711	/*
1712	 * Locate address of sta db entry for writing key;
1713	 * the convention unfortunately is somewhat different
1714	 * than how net80211, hostapd, and wpa_supplicant think.
1715	 */
1716	if (vap->iv_opmode == IEEE80211_M_STA) {
1717		/*
1718		 * NB: keys plumbed before the sta reaches AUTH state
1719		 * will be discarded or written to the wrong sta db
1720		 * entry because iv_bss is meaningless.  This is ok
1721		 * (right now) because we handle deferred plumbing of
1722		 * WEP keys when the sta reaches AUTH state.
1723		 */
1724		macaddr = vap->iv_bss->ni_bssid;
1725	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1726	    vap->iv_state != IEEE80211_S_RUN) {
1727		/*
1728		 * Prior to RUN state a WDS vap will not it's BSS node
1729		 * setup so we will plumb the key to the wrong mac
1730		 * address (it'll be our local address).  Workaround
1731		 * this for the moment by grabbing the correct address.
1732		 */
1733		macaddr = vap->iv_des_bssid;
1734	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1735		macaddr = vap->iv_myaddr;
1736	else
1737		macaddr = mac;
1738	KEYPRINTF(sc, &hk, macaddr);
1739	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1740#undef IEEE80211_IS_STATICKEY
1741#undef GRPXMIT
1742}
1743
1744/* unaligned little endian access */
1745#define LE_READ_2(p)				\
1746	((uint16_t)				\
1747	 ((((const uint8_t *)(p))[0]      ) |	\
1748	  (((const uint8_t *)(p))[1] <<  8)))
1749#define LE_READ_4(p)				\
1750	((uint32_t)				\
1751	 ((((const uint8_t *)(p))[0]      ) |	\
1752	  (((const uint8_t *)(p))[1] <<  8) |	\
1753	  (((const uint8_t *)(p))[2] << 16) |	\
1754	  (((const uint8_t *)(p))[3] << 24)))
1755
1756/*
1757 * Set the multicast filter contents into the hardware.
1758 * XXX f/w has no support; just defer to the os.
1759 */
1760static void
1761mwl_setmcastfilter(struct mwl_softc *sc)
1762{
1763	struct ifnet *ifp = sc->sc_ifp;
1764#if 0
1765	struct ether_multi *enm;
1766	struct ether_multistep estep;
1767	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1768	uint8_t *mp;
1769	int nmc;
1770
1771	mp = macs;
1772	nmc = 0;
1773	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1774	while (enm != NULL) {
1775		/* XXX Punt on ranges. */
1776		if (nmc == MWL_HAL_MCAST_MAX ||
1777		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1778			ifp->if_flags |= IFF_ALLMULTI;
1779			return;
1780		}
1781		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1782		mp += IEEE80211_ADDR_LEN, nmc++;
1783		ETHER_NEXT_MULTI(estep, enm);
1784	}
1785	ifp->if_flags &= ~IFF_ALLMULTI;
1786	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1787#else
1788	/* XXX no mcast filter support; we get everything */
1789	ifp->if_flags |= IFF_ALLMULTI;
1790#endif
1791}
1792
1793static int
1794mwl_mode_init(struct mwl_softc *sc)
1795{
1796	struct ifnet *ifp = sc->sc_ifp;
1797	struct ieee80211com *ic = ifp->if_l2com;
1798	struct mwl_hal *mh = sc->sc_mh;
1799
1800	/*
1801	 * NB: Ignore promisc in hostap mode; it's set by the
1802	 * bridge.  This is wrong but we have no way to
1803	 * identify internal requests (from the bridge)
1804	 * versus external requests such as for tcpdump.
1805	 */
1806	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1807	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1808	mwl_setmcastfilter(sc);
1809
1810	return 0;
1811}
1812
1813/*
1814 * Callback from the 802.11 layer after a multicast state change.
1815 */
1816static void
1817mwl_update_mcast(struct ifnet *ifp)
1818{
1819	struct mwl_softc *sc = ifp->if_softc;
1820
1821	mwl_setmcastfilter(sc);
1822}
1823
1824/*
1825 * Callback from the 802.11 layer after a promiscuous mode change.
1826 * Note this interface does not check the operating mode as this
1827 * is an internal callback and we are expected to honor the current
1828 * state (e.g. this is used for setting the interface in promiscuous
1829 * mode when operating in hostap mode to do ACS).
1830 */
1831static void
1832mwl_update_promisc(struct ifnet *ifp)
1833{
1834	struct mwl_softc *sc = ifp->if_softc;
1835
1836	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1837}
1838
1839/*
1840 * Callback from the 802.11 layer to update the slot time
1841 * based on the current setting.  We use it to notify the
1842 * firmware of ERP changes and the f/w takes care of things
1843 * like slot time and preamble.
1844 */
1845static void
1846mwl_updateslot(struct ifnet *ifp)
1847{
1848	struct mwl_softc *sc = ifp->if_softc;
1849	struct ieee80211com *ic = ifp->if_l2com;
1850	struct mwl_hal *mh = sc->sc_mh;
1851	int prot;
1852
1853	/* NB: can be called early; suppress needless cmds */
1854	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1855		return;
1856
1857	/*
1858	 * Calculate the ERP flags.  The firwmare will use
1859	 * this to carry out the appropriate measures.
1860	 */
1861	prot = 0;
1862	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1863		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1864			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1865		if (ic->ic_flags & IEEE80211_F_USEPROT)
1866			prot |= IEEE80211_ERP_USE_PROTECTION;
1867		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1868			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1869	}
1870
1871	DPRINTF(sc, MWL_DEBUG_RESET,
1872	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1873	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1874	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1875	    ic->ic_flags);
1876
1877	mwl_hal_setgprot(mh, prot);
1878}
1879
1880/*
1881 * Setup the beacon frame.
1882 */
1883static int
1884mwl_beacon_setup(struct ieee80211vap *vap)
1885{
1886	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1887	struct ieee80211_node *ni = vap->iv_bss;
1888	struct ieee80211_beacon_offsets bo;
1889	struct mbuf *m;
1890
1891	m = ieee80211_beacon_alloc(ni, &bo);
1892	if (m == NULL)
1893		return ENOBUFS;
1894	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1895	m_free(m);
1896
1897	return 0;
1898}
1899
1900/*
1901 * Update the beacon frame in response to a change.
1902 */
1903static void
1904mwl_beacon_update(struct ieee80211vap *vap, int item)
1905{
1906	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1907	struct ieee80211com *ic = vap->iv_ic;
1908
1909	KASSERT(hvap != NULL, ("no beacon"));
1910	switch (item) {
1911	case IEEE80211_BEACON_ERP:
1912		mwl_updateslot(ic->ic_ifp);
1913		break;
1914	case IEEE80211_BEACON_HTINFO:
1915		mwl_hal_setnprotmode(hvap,
1916		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1917		break;
1918	case IEEE80211_BEACON_CAPS:
1919	case IEEE80211_BEACON_WME:
1920	case IEEE80211_BEACON_APPIE:
1921	case IEEE80211_BEACON_CSA:
1922		break;
1923	case IEEE80211_BEACON_TIM:
1924		/* NB: firmware always forms TIM */
1925		return;
1926	}
1927	/* XXX retain beacon frame and update */
1928	mwl_beacon_setup(vap);
1929}
1930
1931static void
1932mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1933{
1934	bus_addr_t *paddr = (bus_addr_t*) arg;
1935	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1936	*paddr = segs->ds_addr;
1937}
1938
1939#ifdef MWL_HOST_PS_SUPPORT
1940/*
1941 * Handle power save station occupancy changes.
1942 */
1943static void
1944mwl_update_ps(struct ieee80211vap *vap, int nsta)
1945{
1946	struct mwl_vap *mvp = MWL_VAP(vap);
1947
1948	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1949		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1950	mvp->mv_last_ps_sta = nsta;
1951}
1952
1953/*
1954 * Handle associated station power save state changes.
1955 */
1956static int
1957mwl_set_tim(struct ieee80211_node *ni, int set)
1958{
1959	struct ieee80211vap *vap = ni->ni_vap;
1960	struct mwl_vap *mvp = MWL_VAP(vap);
1961
1962	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1963		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1964		    IEEE80211_AID(ni->ni_associd), set);
1965		return 1;
1966	} else
1967		return 0;
1968}
1969#endif /* MWL_HOST_PS_SUPPORT */
1970
1971static int
1972mwl_desc_setup(struct mwl_softc *sc, const char *name,
1973	struct mwl_descdma *dd,
1974	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1975{
1976	struct ifnet *ifp = sc->sc_ifp;
1977	uint8_t *ds;
1978	int error;
1979
1980	DPRINTF(sc, MWL_DEBUG_RESET,
1981	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1982	    __func__, name, nbuf, (uintmax_t) bufsize,
1983	    ndesc, (uintmax_t) descsize);
1984
1985	dd->dd_name = name;
1986	dd->dd_desc_len = nbuf * ndesc * descsize;
1987
1988	/*
1989	 * Setup DMA descriptor area.
1990	 */
1991	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1992		       PAGE_SIZE, 0,		/* alignment, bounds */
1993		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1994		       BUS_SPACE_MAXADDR,	/* highaddr */
1995		       NULL, NULL,		/* filter, filterarg */
1996		       dd->dd_desc_len,		/* maxsize */
1997		       1,			/* nsegments */
1998		       dd->dd_desc_len,		/* maxsegsize */
1999		       BUS_DMA_ALLOCNOW,	/* flags */
2000		       NULL,			/* lockfunc */
2001		       NULL,			/* lockarg */
2002		       &dd->dd_dmat);
2003	if (error != 0) {
2004		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2005		return error;
2006	}
2007
2008	/* allocate descriptors */
2009	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2010	if (error != 0) {
2011		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2012			"error %u\n", dd->dd_name, error);
2013		goto fail0;
2014	}
2015
2016	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2017				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2018				 &dd->dd_dmamap);
2019	if (error != 0) {
2020		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2021			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2022		goto fail1;
2023	}
2024
2025	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2026				dd->dd_desc, dd->dd_desc_len,
2027				mwl_load_cb, &dd->dd_desc_paddr,
2028				BUS_DMA_NOWAIT);
2029	if (error != 0) {
2030		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2031			dd->dd_name, error);
2032		goto fail2;
2033	}
2034
2035	ds = dd->dd_desc;
2036	memset(ds, 0, dd->dd_desc_len);
2037	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2038	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2039	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2040
2041	return 0;
2042fail2:
2043	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2044fail1:
2045	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2046fail0:
2047	bus_dma_tag_destroy(dd->dd_dmat);
2048	memset(dd, 0, sizeof(*dd));
2049	return error;
2050#undef DS2PHYS
2051}
2052
2053static void
2054mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2055{
2056	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2057	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2058	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2059	bus_dma_tag_destroy(dd->dd_dmat);
2060
2061	memset(dd, 0, sizeof(*dd));
2062}
2063
2064/*
2065 * Construct a tx q's free list.  The order of entries on
2066 * the list must reflect the physical layout of tx descriptors
2067 * because the firmware pre-fetches descriptors.
2068 *
2069 * XXX might be better to use indices into the buffer array.
2070 */
2071static void
2072mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2073{
2074	struct mwl_txbuf *bf;
2075	int i;
2076
2077	bf = txq->dma.dd_bufptr;
2078	STAILQ_INIT(&txq->free);
2079	for (i = 0; i < mwl_txbuf; i++, bf++)
2080		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2081	txq->nfree = i;
2082}
2083
2084#define	DS2PHYS(_dd, _ds) \
2085	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2086
2087static int
2088mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2089{
2090	struct ifnet *ifp = sc->sc_ifp;
2091	int error, bsize, i;
2092	struct mwl_txbuf *bf;
2093	struct mwl_txdesc *ds;
2094
2095	error = mwl_desc_setup(sc, "tx", &txq->dma,
2096			mwl_txbuf, sizeof(struct mwl_txbuf),
2097			MWL_TXDESC, sizeof(struct mwl_txdesc));
2098	if (error != 0)
2099		return error;
2100
2101	/* allocate and setup tx buffers */
2102	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2103	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2104	if (bf == NULL) {
2105		if_printf(ifp, "malloc of %u tx buffers failed\n",
2106			mwl_txbuf);
2107		return ENOMEM;
2108	}
2109	txq->dma.dd_bufptr = bf;
2110
2111	ds = txq->dma.dd_desc;
2112	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2113		bf->bf_desc = ds;
2114		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2115		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2116				&bf->bf_dmamap);
2117		if (error != 0) {
2118			if_printf(ifp, "unable to create dmamap for tx "
2119				"buffer %u, error %u\n", i, error);
2120			return error;
2121		}
2122	}
2123	mwl_txq_reset(sc, txq);
2124	return 0;
2125}
2126
2127static void
2128mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2129{
2130	struct mwl_txbuf *bf;
2131	int i;
2132
2133	bf = txq->dma.dd_bufptr;
2134	for (i = 0; i < mwl_txbuf; i++, bf++) {
2135		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2136		KASSERT(bf->bf_node == NULL, ("node on free list"));
2137		if (bf->bf_dmamap != NULL)
2138			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2139	}
2140	STAILQ_INIT(&txq->free);
2141	txq->nfree = 0;
2142	if (txq->dma.dd_bufptr != NULL) {
2143		free(txq->dma.dd_bufptr, M_MWLDEV);
2144		txq->dma.dd_bufptr = NULL;
2145	}
2146	if (txq->dma.dd_desc_len != 0)
2147		mwl_desc_cleanup(sc, &txq->dma);
2148}
2149
2150static int
2151mwl_rxdma_setup(struct mwl_softc *sc)
2152{
2153	struct ifnet *ifp = sc->sc_ifp;
2154	int error, jumbosize, bsize, i;
2155	struct mwl_rxbuf *bf;
2156	struct mwl_jumbo *rbuf;
2157	struct mwl_rxdesc *ds;
2158	caddr_t data;
2159
2160	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2161			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2162			1, sizeof(struct mwl_rxdesc));
2163	if (error != 0)
2164		return error;
2165
2166	/*
2167	 * Receive is done to a private pool of jumbo buffers.
2168	 * This allows us to attach to mbuf's and avoid re-mapping
2169	 * memory on each rx we post.  We allocate a large chunk
2170	 * of memory and manage it in the driver.  The mbuf free
2171	 * callback method is used to reclaim frames after sending
2172	 * them up the stack.  By default we allocate 2x the number of
2173	 * rx descriptors configured so we have some slop to hold
2174	 * us while frames are processed.
2175	 */
2176	if (mwl_rxbuf < 2*mwl_rxdesc) {
2177		if_printf(ifp,
2178		    "too few rx dma buffers (%d); increasing to %d\n",
2179		    mwl_rxbuf, 2*mwl_rxdesc);
2180		mwl_rxbuf = 2*mwl_rxdesc;
2181	}
2182	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2183	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2184
2185	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2186		       PAGE_SIZE, 0,		/* alignment, bounds */
2187		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2188		       BUS_SPACE_MAXADDR,	/* highaddr */
2189		       NULL, NULL,		/* filter, filterarg */
2190		       sc->sc_rxmemsize,	/* maxsize */
2191		       1,			/* nsegments */
2192		       sc->sc_rxmemsize,	/* maxsegsize */
2193		       BUS_DMA_ALLOCNOW,	/* flags */
2194		       NULL,			/* lockfunc */
2195		       NULL,			/* lockarg */
2196		       &sc->sc_rxdmat);
2197	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2198	if (error != 0) {
2199		if_printf(ifp, "could not create rx DMA map\n");
2200		return error;
2201	}
2202
2203	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2204				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2205				 &sc->sc_rxmap);
2206	if (error != 0) {
2207		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2208		    (uintmax_t) sc->sc_rxmemsize);
2209		return error;
2210	}
2211
2212	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2213				sc->sc_rxmem, sc->sc_rxmemsize,
2214				mwl_load_cb, &sc->sc_rxmem_paddr,
2215				BUS_DMA_NOWAIT);
2216	if (error != 0) {
2217		if_printf(ifp, "could not load rx DMA map\n");
2218		return error;
2219	}
2220
2221	/*
2222	 * Allocate rx buffers and set them up.
2223	 */
2224	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2225	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2226	if (bf == NULL) {
2227		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2228		return error;
2229	}
2230	sc->sc_rxdma.dd_bufptr = bf;
2231
2232	STAILQ_INIT(&sc->sc_rxbuf);
2233	ds = sc->sc_rxdma.dd_desc;
2234	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2235		bf->bf_desc = ds;
2236		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2237		/* pre-assign dma buffer */
2238		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2239		/* NB: tail is intentional to preserve descriptor order */
2240		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2241	}
2242
2243	/*
2244	 * Place remainder of dma memory buffers on the free list.
2245	 */
2246	SLIST_INIT(&sc->sc_rxfree);
2247	for (; i < mwl_rxbuf; i++) {
2248		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2249		rbuf = MWL_JUMBO_DATA2BUF(data);
2250		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2251		sc->sc_nrxfree++;
2252	}
2253	MWL_RXFREE_INIT(sc);
2254	return 0;
2255}
2256#undef DS2PHYS
2257
2258static void
2259mwl_rxdma_cleanup(struct mwl_softc *sc)
2260{
2261	if (sc->sc_rxmap != NULL)
2262		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2263	if (sc->sc_rxmem != NULL) {
2264		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2265		sc->sc_rxmem = NULL;
2266	}
2267	if (sc->sc_rxmap != NULL) {
2268		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2269		sc->sc_rxmap = NULL;
2270	}
2271	if (sc->sc_rxdma.dd_bufptr != NULL) {
2272		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2273		sc->sc_rxdma.dd_bufptr = NULL;
2274	}
2275	if (sc->sc_rxdma.dd_desc_len != 0)
2276		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2277	MWL_RXFREE_DESTROY(sc);
2278}
2279
2280static int
2281mwl_dma_setup(struct mwl_softc *sc)
2282{
2283	int error, i;
2284
2285	error = mwl_rxdma_setup(sc);
2286	if (error != 0)
2287		return error;
2288
2289	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2290		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2291		if (error != 0) {
2292			mwl_dma_cleanup(sc);
2293			return error;
2294		}
2295	}
2296	return 0;
2297}
2298
2299static void
2300mwl_dma_cleanup(struct mwl_softc *sc)
2301{
2302	int i;
2303
2304	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2305		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2306	mwl_rxdma_cleanup(sc);
2307}
2308
2309static struct ieee80211_node *
2310mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2311{
2312	struct ieee80211com *ic = vap->iv_ic;
2313	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2314	const size_t space = sizeof(struct mwl_node);
2315	struct mwl_node *mn;
2316
2317	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2318	if (mn == NULL) {
2319		/* XXX stat+msg */
2320		return NULL;
2321	}
2322	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2323	return &mn->mn_node;
2324}
2325
2326static void
2327mwl_node_cleanup(struct ieee80211_node *ni)
2328{
2329	struct ieee80211com *ic = ni->ni_ic;
2330        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2331	struct mwl_node *mn = MWL_NODE(ni);
2332
2333	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2334	    __func__, ni, ni->ni_ic, mn->mn_staid);
2335
2336	if (mn->mn_staid != 0) {
2337		struct ieee80211vap *vap = ni->ni_vap;
2338
2339		if (mn->mn_hvap != NULL) {
2340			if (vap->iv_opmode == IEEE80211_M_STA)
2341				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2342			else
2343				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2344		}
2345		/*
2346		 * NB: legacy WDS peer sta db entry is installed using
2347		 * the associate ap's hvap; use it again to delete it.
2348		 * XXX can vap be NULL?
2349		 */
2350		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2351		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2352			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2353			    ni->ni_macaddr);
2354		delstaid(sc, mn->mn_staid);
2355		mn->mn_staid = 0;
2356	}
2357	sc->sc_node_cleanup(ni);
2358}
2359
2360/*
2361 * Reclaim rx dma buffers from packets sitting on the ampdu
2362 * reorder queue for a station.  We replace buffers with a
2363 * system cluster (if available).
2364 */
2365static void
2366mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2367{
2368#if 0
2369	int i, n, off;
2370	struct mbuf *m;
2371	void *cl;
2372
2373	n = rap->rxa_qframes;
2374	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2375		m = rap->rxa_m[i];
2376		if (m == NULL)
2377			continue;
2378		n--;
2379		/* our dma buffers have a well-known free routine */
2380		if ((m->m_flags & M_EXT) == 0 ||
2381		    m->m_ext.ext_free != mwl_ext_free)
2382			continue;
2383		/*
2384		 * Try to allocate a cluster and move the data.
2385		 */
2386		off = m->m_data - m->m_ext.ext_buf;
2387		if (off + m->m_pkthdr.len > MCLBYTES) {
2388			/* XXX no AMSDU for now */
2389			continue;
2390		}
2391		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2392		    &m->m_ext.ext_paddr);
2393		if (cl != NULL) {
2394			/*
2395			 * Copy the existing data to the cluster, remove
2396			 * the rx dma buffer, and attach the cluster in
2397			 * its place.  Note we preserve the offset to the
2398			 * data so frames being bridged can still prepend
2399			 * their headers without adding another mbuf.
2400			 */
2401			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2402			MEXTREMOVE(m);
2403			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2404			/* setup mbuf like _MCLGET does */
2405			m->m_flags |= M_CLUSTER | M_EXT_RW;
2406			_MOWNERREF(m, M_EXT | M_CLUSTER);
2407			/* NB: m_data is clobbered by MEXTADDR, adjust */
2408			m->m_data += off;
2409		}
2410	}
2411#endif
2412}
2413
2414/*
2415 * Callback to reclaim resources.  We first let the
2416 * net80211 layer do it's thing, then if we are still
2417 * blocked by a lack of rx dma buffers we walk the ampdu
2418 * reorder q's to reclaim buffers by copying to a system
2419 * cluster.
2420 */
2421static void
2422mwl_node_drain(struct ieee80211_node *ni)
2423{
2424	struct ieee80211com *ic = ni->ni_ic;
2425        struct mwl_softc *sc = ic->ic_ifp->if_softc;
2426	struct mwl_node *mn = MWL_NODE(ni);
2427
2428	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2429	    __func__, ni, ni->ni_vap, mn->mn_staid);
2430
2431	/* NB: call up first to age out ampdu q's */
2432	sc->sc_node_drain(ni);
2433
2434	/* XXX better to not check low water mark? */
2435	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2436	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2437		uint8_t tid;
2438		/*
2439		 * Walk the reorder q and reclaim rx dma buffers by copying
2440		 * the packet contents into clusters.
2441		 */
2442		for (tid = 0; tid < WME_NUM_TID; tid++) {
2443			struct ieee80211_rx_ampdu *rap;
2444
2445			rap = &ni->ni_rx_ampdu[tid];
2446			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2447				continue;
2448			if (rap->rxa_qframes)
2449				mwl_ampdu_rxdma_reclaim(rap);
2450		}
2451	}
2452}
2453
2454static void
2455mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2456{
2457	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2458#ifdef MWL_ANT_INFO_SUPPORT
2459#if 0
2460	/* XXX need to smooth data */
2461	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2462#else
2463	*noise = -95;		/* XXX */
2464#endif
2465#else
2466	*noise = -95;		/* XXX */
2467#endif
2468}
2469
2470/*
2471 * Convert Hardware per-antenna rssi info to common format:
2472 * Let a1, a2, a3 represent the amplitudes per chain
2473 * Let amax represent max[a1, a2, a3]
2474 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2475 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2476 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2477 * maintain some extra precision.
2478 *
2479 * Values are stored in .5 db format capped at 127.
2480 */
2481static void
2482mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2483	struct ieee80211_mimo_info *mi)
2484{
2485#define	CVT(_dst, _src) do {						\
2486	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2487	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2488} while (0)
2489	static const int8_t logdbtbl[32] = {
2490	       0,   0,  24,  38,  48,  56,  62,  68,
2491	      72,  76,  80,  83,  86,  89,  92,  94,
2492	      96,  98, 100, 102, 104, 106, 107, 109,
2493	     110, 112, 113, 115, 116, 117, 118, 119
2494	};
2495	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2496	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2497	uint32_t rssi_max;
2498
2499	rssi_max = mn->mn_ai.rssi_a;
2500	if (mn->mn_ai.rssi_b > rssi_max)
2501		rssi_max = mn->mn_ai.rssi_b;
2502	if (mn->mn_ai.rssi_c > rssi_max)
2503		rssi_max = mn->mn_ai.rssi_c;
2504
2505	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2506	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2507	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2508
2509	mi->noise[0] = mn->mn_ai.nf_a;
2510	mi->noise[1] = mn->mn_ai.nf_b;
2511	mi->noise[2] = mn->mn_ai.nf_c;
2512#undef CVT
2513}
2514
2515static __inline void *
2516mwl_getrxdma(struct mwl_softc *sc)
2517{
2518	struct mwl_jumbo *buf;
2519	void *data;
2520
2521	/*
2522	 * Allocate from jumbo pool.
2523	 */
2524	MWL_RXFREE_LOCK(sc);
2525	buf = SLIST_FIRST(&sc->sc_rxfree);
2526	if (buf == NULL) {
2527		DPRINTF(sc, MWL_DEBUG_ANY,
2528		    "%s: out of rx dma buffers\n", __func__);
2529		sc->sc_stats.mst_rx_nodmabuf++;
2530		data = NULL;
2531	} else {
2532		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2533		sc->sc_nrxfree--;
2534		data = MWL_JUMBO_BUF2DATA(buf);
2535	}
2536	MWL_RXFREE_UNLOCK(sc);
2537	return data;
2538}
2539
2540static __inline void
2541mwl_putrxdma(struct mwl_softc *sc, void *data)
2542{
2543	struct mwl_jumbo *buf;
2544
2545	/* XXX bounds check data */
2546	MWL_RXFREE_LOCK(sc);
2547	buf = MWL_JUMBO_DATA2BUF(data);
2548	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2549	sc->sc_nrxfree++;
2550	MWL_RXFREE_UNLOCK(sc);
2551}
2552
2553static int
2554mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2555{
2556	struct mwl_rxdesc *ds;
2557
2558	ds = bf->bf_desc;
2559	if (bf->bf_data == NULL) {
2560		bf->bf_data = mwl_getrxdma(sc);
2561		if (bf->bf_data == NULL) {
2562			/* mark descriptor to be skipped */
2563			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2564			/* NB: don't need PREREAD */
2565			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2566			sc->sc_stats.mst_rxbuf_failed++;
2567			return ENOMEM;
2568		}
2569	}
2570	/*
2571	 * NB: DMA buffer contents is known to be unmodified
2572	 *     so there's no need to flush the data cache.
2573	 */
2574
2575	/*
2576	 * Setup descriptor.
2577	 */
2578	ds->QosCtrl = 0;
2579	ds->RSSI = 0;
2580	ds->Status = EAGLE_RXD_STATUS_IDLE;
2581	ds->Channel = 0;
2582	ds->PktLen = htole16(MWL_AGGR_SIZE);
2583	ds->SQ2 = 0;
2584	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2585	/* NB: don't touch pPhysNext, set once */
2586	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2587	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2588
2589	return 0;
2590}
2591
2592static void
2593mwl_ext_free(void *data, void *arg)
2594{
2595	struct mwl_softc *sc = arg;
2596
2597	/* XXX bounds check data */
2598	mwl_putrxdma(sc, data);
2599	/*
2600	 * If we were previously blocked by a lack of rx dma buffers
2601	 * check if we now have enough to restart rx interrupt handling.
2602	 * NB: we know we are called at splvm which is above splnet.
2603	 */
2604	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2605		sc->sc_rxblocked = 0;
2606		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2607	}
2608}
2609
2610struct mwl_frame_bar {
2611	u_int8_t	i_fc[2];
2612	u_int8_t	i_dur[2];
2613	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2614	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2615	/* ctl, seq, FCS */
2616} __packed;
2617
2618/*
2619 * Like ieee80211_anyhdrsize, but handles BAR frames
2620 * specially so the logic below to piece the 802.11
2621 * header together works.
2622 */
2623static __inline int
2624mwl_anyhdrsize(const void *data)
2625{
2626	const struct ieee80211_frame *wh = data;
2627
2628	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2629		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2630		case IEEE80211_FC0_SUBTYPE_CTS:
2631		case IEEE80211_FC0_SUBTYPE_ACK:
2632			return sizeof(struct ieee80211_frame_ack);
2633		case IEEE80211_FC0_SUBTYPE_BAR:
2634			return sizeof(struct mwl_frame_bar);
2635		}
2636		return sizeof(struct ieee80211_frame_min);
2637	} else
2638		return ieee80211_hdrsize(data);
2639}
2640
2641static void
2642mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2643{
2644	const struct ieee80211_frame *wh;
2645	struct ieee80211_node *ni;
2646
2647	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2648	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2649	if (ni != NULL) {
2650		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2651		ieee80211_free_node(ni);
2652	}
2653}
2654
2655/*
2656 * Convert hardware signal strength to rssi.  The value
2657 * provided by the device has the noise floor added in;
2658 * we need to compensate for this but we don't have that
2659 * so we use a fixed value.
2660 *
2661 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2662 * offset is already set as part of the initial gain.  This
2663 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2664 */
2665static __inline int
2666cvtrssi(uint8_t ssi)
2667{
2668	int rssi = (int) ssi + 8;
2669	/* XXX hack guess until we have a real noise floor */
2670	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2671	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2672}
2673
2674static void
2675mwl_rx_proc(void *arg, int npending)
2676{
2677#define	IEEE80211_DIR_DSTODS(wh) \
2678	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2679	struct mwl_softc *sc = arg;
2680	struct ifnet *ifp = sc->sc_ifp;
2681	struct ieee80211com *ic = ifp->if_l2com;
2682	struct mwl_rxbuf *bf;
2683	struct mwl_rxdesc *ds;
2684	struct mbuf *m;
2685	struct ieee80211_qosframe *wh;
2686	struct ieee80211_qosframe_addr4 *wh4;
2687	struct ieee80211_node *ni;
2688	struct mwl_node *mn;
2689	int off, len, hdrlen, pktlen, rssi, ntodo;
2690	uint8_t *data, status;
2691	void *newdata;
2692	int16_t nf;
2693
2694	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2695	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2696	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2697	nf = -96;			/* XXX */
2698	bf = sc->sc_rxnext;
2699	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2700		if (bf == NULL)
2701			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2702		ds = bf->bf_desc;
2703		data = bf->bf_data;
2704		if (data == NULL) {
2705			/*
2706			 * If data allocation failed previously there
2707			 * will be no buffer; try again to re-populate it.
2708			 * Note the firmware will not advance to the next
2709			 * descriptor with a dma buffer so we must mimic
2710			 * this or we'll get out of sync.
2711			 */
2712			DPRINTF(sc, MWL_DEBUG_ANY,
2713			    "%s: rx buf w/o dma memory\n", __func__);
2714			(void) mwl_rxbuf_init(sc, bf);
2715			sc->sc_stats.mst_rx_dmabufmissing++;
2716			break;
2717		}
2718		MWL_RXDESC_SYNC(sc, ds,
2719		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2720		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2721			break;
2722#ifdef MWL_DEBUG
2723		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2724			mwl_printrxbuf(bf, 0);
2725#endif
2726		status = ds->Status;
2727		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2728			ifp->if_ierrors++;
2729			sc->sc_stats.mst_rx_crypto++;
2730			/*
2731			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2732			 *     for backwards compatibility.
2733			 */
2734			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2735			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2736				/*
2737				 * MIC error, notify upper layers.
2738				 */
2739				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2740				    BUS_DMASYNC_POSTREAD);
2741				mwl_handlemicerror(ic, data);
2742				sc->sc_stats.mst_rx_tkipmic++;
2743			}
2744			/* XXX too painful to tap packets */
2745			goto rx_next;
2746		}
2747		/*
2748		 * Sync the data buffer.
2749		 */
2750		len = le16toh(ds->PktLen);
2751		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2752		/*
2753		 * The 802.11 header is provided all or in part at the front;
2754		 * use it to calculate the true size of the header that we'll
2755		 * construct below.  We use this to figure out where to copy
2756		 * payload prior to constructing the header.
2757		 */
2758		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2759		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2760
2761		/* calculate rssi early so we can re-use for each aggregate */
2762		rssi = cvtrssi(ds->RSSI);
2763
2764		pktlen = hdrlen + (len - off);
2765		/*
2766		 * NB: we know our frame is at least as large as
2767		 * IEEE80211_MIN_LEN because there is a 4-address
2768		 * frame at the front.  Hence there's no need to
2769		 * vet the packet length.  If the frame in fact
2770		 * is too small it should be discarded at the
2771		 * net80211 layer.
2772		 */
2773
2774		/*
2775		 * Attach dma buffer to an mbuf.  We tried
2776		 * doing this based on the packet size (i.e.
2777		 * copying small packets) but it turns out to
2778		 * be a net loss.  The tradeoff might be system
2779		 * dependent (cache architecture is important).
2780		 */
2781		MGETHDR(m, M_DONTWAIT, MT_DATA);
2782		if (m == NULL) {
2783			DPRINTF(sc, MWL_DEBUG_ANY,
2784			    "%s: no rx mbuf\n", __func__);
2785			sc->sc_stats.mst_rx_nombuf++;
2786			goto rx_next;
2787		}
2788		/*
2789		 * Acquire the replacement dma buffer before
2790		 * processing the frame.  If we're out of dma
2791		 * buffers we disable rx interrupts and wait
2792		 * for the free pool to reach mlw_rxdmalow buffers
2793		 * before starting to do work again.  If the firmware
2794		 * runs out of descriptors then it will toss frames
2795		 * which is better than our doing it as that can
2796		 * starve our processing.  It is also important that
2797		 * we always process rx'd frames in case they are
2798		 * A-MPDU as otherwise the host's view of the BA
2799		 * window may get out of sync with the firmware.
2800		 */
2801		newdata = mwl_getrxdma(sc);
2802		if (newdata == NULL) {
2803			/* NB: stat+msg in mwl_getrxdma */
2804			m_free(m);
2805			/* disable RX interrupt and mark state */
2806			mwl_hal_intrset(sc->sc_mh,
2807			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2808			sc->sc_rxblocked = 1;
2809			ieee80211_drain(ic);
2810			/* XXX check rxblocked and immediately start again? */
2811			goto rx_stop;
2812		}
2813		bf->bf_data = newdata;
2814		/*
2815		 * Attach the dma buffer to the mbuf;
2816		 * mwl_rxbuf_init will re-setup the rx
2817		 * descriptor using the replacement dma
2818		 * buffer we just installed above.
2819		 */
2820		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2821		    data, sc, 0, EXT_NET_DRV);
2822		m->m_data += off - hdrlen;
2823		m->m_pkthdr.len = m->m_len = pktlen;
2824		m->m_pkthdr.rcvif = ifp;
2825		/* NB: dma buffer assumed read-only */
2826
2827		/*
2828		 * Piece 802.11 header together.
2829		 */
2830		wh = mtod(m, struct ieee80211_qosframe *);
2831		/* NB: don't need to do this sometimes but ... */
2832		/* XXX special case so we can memcpy after m_devget? */
2833		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2834		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2835			if (IEEE80211_DIR_DSTODS(wh)) {
2836				wh4 = mtod(m,
2837				    struct ieee80211_qosframe_addr4*);
2838				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2839			} else {
2840				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2841			}
2842		}
2843		/*
2844		 * The f/w strips WEP header but doesn't clear
2845		 * the WEP bit; mark the packet with M_WEP so
2846		 * net80211 will treat the data as decrypted.
2847		 * While here also clear the PWR_MGT bit since
2848		 * power save is handled by the firmware and
2849		 * passing this up will potentially cause the
2850		 * upper layer to put a station in power save
2851		 * (except when configured with MWL_HOST_PS_SUPPORT).
2852		 */
2853		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2854			m->m_flags |= M_WEP;
2855#ifdef MWL_HOST_PS_SUPPORT
2856		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2857#else
2858		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2859#endif
2860
2861		if (ieee80211_radiotap_active(ic)) {
2862			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2863
2864			tap->wr_flags = 0;
2865			tap->wr_rate = ds->Rate;
2866			tap->wr_antsignal = rssi + nf;
2867			tap->wr_antnoise = nf;
2868		}
2869		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2870			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2871			    len, ds->Rate, rssi);
2872		}
2873		ifp->if_ipackets++;
2874
2875		/* dispatch */
2876		ni = ieee80211_find_rxnode(ic,
2877		    (const struct ieee80211_frame_min *) wh);
2878		if (ni != NULL) {
2879			mn = MWL_NODE(ni);
2880#ifdef MWL_ANT_INFO_SUPPORT
2881			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2882			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2883			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2884			mn->mn_ai.rsvd1 = rssi;
2885#endif
2886			/* tag AMPDU aggregates for reorder processing */
2887#if 0
2888			if ((ds->Rate & 0x80) && (ds->HtSig2 & 0x8))
2889#else
2890			if (ni->ni_flags & IEEE80211_NODE_HT)
2891#endif
2892				m->m_flags |= M_AMPDU;
2893			(void) ieee80211_input(ni, m, rssi, nf);
2894			ieee80211_free_node(ni);
2895		} else
2896			(void) ieee80211_input_all(ic, m, rssi, nf);
2897rx_next:
2898		/* NB: ignore ENOMEM so we process more descriptors */
2899		(void) mwl_rxbuf_init(sc, bf);
2900		bf = STAILQ_NEXT(bf, bf_list);
2901	}
2902rx_stop:
2903	sc->sc_rxnext = bf;
2904
2905	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2906	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2907		/* NB: kick fw; the tx thread may have been preempted */
2908		mwl_hal_txstart(sc->sc_mh, 0);
2909		mwl_start(ifp);
2910	}
2911#undef IEEE80211_DIR_DSTODS
2912}
2913
2914static void
2915mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2916{
2917	struct mwl_txbuf *bf, *bn;
2918	struct mwl_txdesc *ds;
2919
2920	MWL_TXQ_LOCK_INIT(sc, txq);
2921	txq->qnum = qnum;
2922	txq->txpri = 0;	/* XXX */
2923#if 0
2924	/* NB: q setup by mwl_txdma_setup XXX */
2925	STAILQ_INIT(&txq->free);
2926#endif
2927	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2928		bf->bf_txq = txq;
2929
2930		ds = bf->bf_desc;
2931		bn = STAILQ_NEXT(bf, bf_list);
2932		if (bn == NULL)
2933			bn = STAILQ_FIRST(&txq->free);
2934		ds->pPhysNext = htole32(bn->bf_daddr);
2935	}
2936	STAILQ_INIT(&txq->active);
2937}
2938
2939/*
2940 * Setup a hardware data transmit queue for the specified
2941 * access control.  We record the mapping from ac's
2942 * to h/w queues for use by mwl_tx_start.
2943 */
2944static int
2945mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2946{
2947#define	N(a)	(sizeof(a)/sizeof(a[0]))
2948	struct mwl_txq *txq;
2949
2950	if (ac >= N(sc->sc_ac2q)) {
2951		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2952			ac, N(sc->sc_ac2q));
2953		return 0;
2954	}
2955	if (mvtype >= MWL_NUM_TX_QUEUES) {
2956		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2957			mvtype, MWL_NUM_TX_QUEUES);
2958		return 0;
2959	}
2960	txq = &sc->sc_txq[mvtype];
2961	mwl_txq_init(sc, txq, mvtype);
2962	sc->sc_ac2q[ac] = txq;
2963	return 1;
2964#undef N
2965}
2966
2967/*
2968 * Update WME parameters for a transmit queue.
2969 */
2970static int
2971mwl_txq_update(struct mwl_softc *sc, int ac)
2972{
2973#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2974	struct ifnet *ifp = sc->sc_ifp;
2975	struct ieee80211com *ic = ifp->if_l2com;
2976	struct mwl_txq *txq = sc->sc_ac2q[ac];
2977	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2978	struct mwl_hal *mh = sc->sc_mh;
2979	int aifs, cwmin, cwmax, txoplim;
2980
2981	aifs = wmep->wmep_aifsn;
2982	/* XXX in sta mode need to pass log values for cwmin/max */
2983	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2984	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2985	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2986
2987	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2988		device_printf(sc->sc_dev, "unable to update hardware queue "
2989			"parameters for %s traffic!\n",
2990			ieee80211_wme_acnames[ac]);
2991		return 0;
2992	}
2993	return 1;
2994#undef MWL_EXPONENT_TO_VALUE
2995}
2996
2997/*
2998 * Callback from the 802.11 layer to update WME parameters.
2999 */
3000static int
3001mwl_wme_update(struct ieee80211com *ic)
3002{
3003	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3004
3005	return !mwl_txq_update(sc, WME_AC_BE) ||
3006	    !mwl_txq_update(sc, WME_AC_BK) ||
3007	    !mwl_txq_update(sc, WME_AC_VI) ||
3008	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3009}
3010
3011/*
3012 * Reclaim resources for a setup queue.
3013 */
3014static void
3015mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3016{
3017	/* XXX hal work? */
3018	MWL_TXQ_LOCK_DESTROY(txq);
3019}
3020
3021/*
3022 * Reclaim all tx queue resources.
3023 */
3024static void
3025mwl_tx_cleanup(struct mwl_softc *sc)
3026{
3027	int i;
3028
3029	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3030		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3031}
3032
3033static int
3034mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3035{
3036	struct mbuf *m;
3037	int error;
3038
3039	/*
3040	 * Load the DMA map so any coalescing is done.  This
3041	 * also calculates the number of descriptors we need.
3042	 */
3043	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3044				     bf->bf_segs, &bf->bf_nseg,
3045				     BUS_DMA_NOWAIT);
3046	if (error == EFBIG) {
3047		/* XXX packet requires too many descriptors */
3048		bf->bf_nseg = MWL_TXDESC+1;
3049	} else if (error != 0) {
3050		sc->sc_stats.mst_tx_busdma++;
3051		m_freem(m0);
3052		return error;
3053	}
3054	/*
3055	 * Discard null packets and check for packets that
3056	 * require too many TX descriptors.  We try to convert
3057	 * the latter to a cluster.
3058	 */
3059	if (error == EFBIG) {		/* too many desc's, linearize */
3060		sc->sc_stats.mst_tx_linear++;
3061#if MWL_TXDESC > 1
3062		m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
3063#else
3064		m = m_defrag(m0, M_DONTWAIT);
3065#endif
3066		if (m == NULL) {
3067			m_freem(m0);
3068			sc->sc_stats.mst_tx_nombuf++;
3069			return ENOMEM;
3070		}
3071		m0 = m;
3072		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3073					     bf->bf_segs, &bf->bf_nseg,
3074					     BUS_DMA_NOWAIT);
3075		if (error != 0) {
3076			sc->sc_stats.mst_tx_busdma++;
3077			m_freem(m0);
3078			return error;
3079		}
3080		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3081		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3082	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3083		sc->sc_stats.mst_tx_nodata++;
3084		m_freem(m0);
3085		return EIO;
3086	}
3087	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3088		__func__, m0, m0->m_pkthdr.len);
3089	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3090	bf->bf_m = m0;
3091
3092	return 0;
3093}
3094
3095static __inline int
3096mwl_cvtlegacyrate(int rate)
3097{
3098	switch (rate) {
3099	case 2:	 return 0;
3100	case 4:	 return 1;
3101	case 11: return 2;
3102	case 22: return 3;
3103	case 44: return 4;
3104	case 12: return 5;
3105	case 18: return 6;
3106	case 24: return 7;
3107	case 36: return 8;
3108	case 48: return 9;
3109	case 72: return 10;
3110	case 96: return 11;
3111	case 108:return 12;
3112	}
3113	return 0;
3114}
3115
3116/*
3117 * Calculate fixed tx rate information per client state;
3118 * this value is suitable for writing to the Format field
3119 * of a tx descriptor.
3120 */
3121static uint16_t
3122mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3123{
3124	uint16_t fmt;
3125
3126	fmt = SM(3, EAGLE_TXD_ANTENNA)
3127	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3128		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3129	if (rate & 0x80) {		/* HT MCS */
3130		fmt |= EAGLE_TXD_FORMAT_HT
3131		    /* NB: 0x80 implicitly stripped from ucastrate */
3132		    | SM(rate, EAGLE_TXD_RATE);
3133		/* XXX short/long GI may be wrong; re-check */
3134		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3135			fmt |= EAGLE_TXD_CHW_40
3136			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3137			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3138		} else {
3139			fmt |= EAGLE_TXD_CHW_20
3140			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3141			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3142		}
3143	} else {			/* legacy rate */
3144		fmt |= EAGLE_TXD_FORMAT_LEGACY
3145		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3146		    | EAGLE_TXD_CHW_20
3147		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3148		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3149			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3150	}
3151	return fmt;
3152}
3153
3154static int
3155mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3156    struct mbuf *m0)
3157{
3158#define	IEEE80211_DIR_DSTODS(wh) \
3159	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3160	struct ifnet *ifp = sc->sc_ifp;
3161	struct ieee80211com *ic = ifp->if_l2com;
3162	struct ieee80211vap *vap = ni->ni_vap;
3163	int error, iswep, ismcast;
3164	int hdrlen, copyhdrlen, pktlen;
3165	struct mwl_txdesc *ds;
3166	struct mwl_txq *txq;
3167	struct ieee80211_frame *wh;
3168	struct mwltxrec *tr;
3169	struct mwl_node *mn;
3170	uint16_t qos;
3171#if MWL_TXDESC > 1
3172	int i;
3173#endif
3174
3175	wh = mtod(m0, struct ieee80211_frame *);
3176	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3177	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3178	hdrlen = ieee80211_anyhdrsize(wh);
3179	copyhdrlen = hdrlen;
3180	pktlen = m0->m_pkthdr.len;
3181	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3182		if (IEEE80211_DIR_DSTODS(wh)) {
3183			qos = *(uint16_t *)
3184			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3185			copyhdrlen -= sizeof(qos);
3186		} else
3187			qos = *(uint16_t *)
3188			    (((struct ieee80211_qosframe *) wh)->i_qos);
3189	} else
3190		qos = 0;
3191
3192	if (iswep) {
3193		const struct ieee80211_cipher *cip;
3194		struct ieee80211_key *k;
3195
3196		/*
3197		 * Construct the 802.11 header+trailer for an encrypted
3198		 * frame. The only reason this can fail is because of an
3199		 * unknown or unsupported cipher/key type.
3200		 *
3201		 * NB: we do this even though the firmware will ignore
3202		 *     what we've done for WEP and TKIP as we need the
3203		 *     ExtIV filled in for CCMP and this also adjusts
3204		 *     the headers which simplifies our work below.
3205		 */
3206		k = ieee80211_crypto_encap(ni, m0);
3207		if (k == NULL) {
3208			/*
3209			 * This can happen when the key is yanked after the
3210			 * frame was queued.  Just discard the frame; the
3211			 * 802.11 layer counts failures and provides
3212			 * debugging/diagnostics.
3213			 */
3214			m_freem(m0);
3215			return EIO;
3216		}
3217		/*
3218		 * Adjust the packet length for the crypto additions
3219		 * done during encap and any other bits that the f/w
3220		 * will add later on.
3221		 */
3222		cip = k->wk_cipher;
3223		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3224
3225		/* packet header may have moved, reset our local pointer */
3226		wh = mtod(m0, struct ieee80211_frame *);
3227	}
3228
3229	if (ieee80211_radiotap_active_vap(vap)) {
3230		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3231		if (iswep)
3232			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3233#if 0
3234		sc->sc_tx_th.wt_rate = ds->DataRate;
3235#endif
3236		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3237		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3238
3239		ieee80211_radiotap_tx(vap, m0);
3240	}
3241	/*
3242	 * Copy up/down the 802.11 header; the firmware requires
3243	 * we present a 2-byte payload length followed by a
3244	 * 4-address header (w/o QoS), followed (optionally) by
3245	 * any WEP/ExtIV header (but only filled in for CCMP).
3246	 * We are assured the mbuf has sufficient headroom to
3247	 * prepend in-place by the setup of ic_headroom in
3248	 * mwl_attach.
3249	 */
3250	if (hdrlen < sizeof(struct mwltxrec)) {
3251		const int space = sizeof(struct mwltxrec) - hdrlen;
3252		if (M_LEADINGSPACE(m0) < space) {
3253			/* NB: should never happen */
3254			device_printf(sc->sc_dev,
3255			    "not enough headroom, need %d found %zd, "
3256			    "m_flags 0x%x m_len %d\n",
3257			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3258			ieee80211_dump_pkt(ic,
3259			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3260			m_freem(m0);
3261			sc->sc_stats.mst_tx_noheadroom++;
3262			return EIO;
3263		}
3264		M_PREPEND(m0, space, M_NOWAIT);
3265	}
3266	tr = mtod(m0, struct mwltxrec *);
3267	if (wh != (struct ieee80211_frame *) &tr->wh)
3268		ovbcopy(wh, &tr->wh, hdrlen);
3269	/*
3270	 * Note: the "firmware length" is actually the length
3271	 * of the fully formed "802.11 payload".  That is, it's
3272	 * everything except for the 802.11 header.  In particular
3273	 * this includes all crypto material including the MIC!
3274	 */
3275	tr->fwlen = htole16(pktlen - hdrlen);
3276
3277	/*
3278	 * Load the DMA map so any coalescing is done.  This
3279	 * also calculates the number of descriptors we need.
3280	 */
3281	error = mwl_tx_dmasetup(sc, bf, m0);
3282	if (error != 0) {
3283		/* NB: stat collected in mwl_tx_dmasetup */
3284		DPRINTF(sc, MWL_DEBUG_XMIT,
3285		    "%s: unable to setup dma\n", __func__);
3286		return error;
3287	}
3288	bf->bf_node = ni;			/* NB: held reference */
3289	m0 = bf->bf_m;				/* NB: may have changed */
3290	tr = mtod(m0, struct mwltxrec *);
3291	wh = (struct ieee80211_frame *)&tr->wh;
3292
3293	/*
3294	 * Formulate tx descriptor.
3295	 */
3296	ds = bf->bf_desc;
3297	txq = bf->bf_txq;
3298
3299	ds->QosCtrl = qos;			/* NB: already little-endian */
3300#if MWL_TXDESC == 1
3301	/*
3302	 * NB: multiframes should be zero because the descriptors
3303	 *     are initialized to zero.  This should handle the case
3304	 *     where the driver is built with MWL_TXDESC=1 but we are
3305	 *     using firmware with multi-segment support.
3306	 */
3307	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3308	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3309#else
3310	ds->multiframes = htole32(bf->bf_nseg);
3311	ds->PktLen = htole16(m0->m_pkthdr.len);
3312	for (i = 0; i < bf->bf_nseg; i++) {
3313		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3314		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3315	}
3316#endif
3317	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3318	ds->Format = 0;
3319	ds->pad = 0;
3320
3321	mn = MWL_NODE(ni);
3322	/*
3323	 * Select transmit rate.
3324	 */
3325	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3326	case IEEE80211_FC0_TYPE_MGT:
3327		sc->sc_stats.mst_tx_mgmt++;
3328		/* fall thru... */
3329	case IEEE80211_FC0_TYPE_CTL:
3330		/* NB: assign to BE q to avoid bursting */
3331		ds->TxPriority = MWL_WME_AC_BE;
3332		break;
3333	case IEEE80211_FC0_TYPE_DATA:
3334		if (!ismcast) {
3335			const struct ieee80211_txparam *tp = ni->ni_txparms;
3336			/*
3337			 * EAPOL frames get forced to a fixed rate and w/o
3338			 * aggregation; otherwise check for any fixed rate
3339			 * for the client (may depend on association state).
3340			 */
3341			if (m0->m_flags & M_EAPOL) {
3342				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3343				ds->Format = mvp->mv_eapolformat;
3344				ds->pad = htole16(
3345				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3346			} else if (tp != NULL && /* XXX temp dwds WAR */
3347			    tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3348				/* XXX pre-calculate per node */
3349				ds->Format = htole16(
3350				    mwl_calcformat(tp->ucastrate, ni));
3351				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3352			}
3353			/* NB: EAPOL frames will never have qos set */
3354			if (qos == 0)
3355				ds->TxPriority = txq->qnum;
3356#if MWL_MAXBA > 3
3357			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3358				ds->TxPriority = mn->mn_ba[3].txq;
3359#endif
3360#if MWL_MAXBA > 2
3361			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3362				ds->TxPriority = mn->mn_ba[2].txq;
3363#endif
3364#if MWL_MAXBA > 1
3365			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3366				ds->TxPriority = mn->mn_ba[1].txq;
3367#endif
3368#if MWL_MAXBA > 0
3369			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3370				ds->TxPriority = mn->mn_ba[0].txq;
3371#endif
3372			else
3373				ds->TxPriority = txq->qnum;
3374		} else
3375			ds->TxPriority = txq->qnum;
3376		break;
3377	default:
3378		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3379			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3380		sc->sc_stats.mst_tx_badframetype++;
3381		m_freem(m0);
3382		return EIO;
3383	}
3384
3385	if (IFF_DUMPPKTS_XMIT(sc))
3386		ieee80211_dump_pkt(ic,
3387		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3388		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3389
3390	MWL_TXQ_LOCK(txq);
3391	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3392	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3393	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3394
3395	ifp->if_opackets++;
3396	ifp->if_timer = 5;
3397	MWL_TXQ_UNLOCK(txq);
3398
3399	return 0;
3400#undef	IEEE80211_DIR_DSTODS
3401}
3402
3403static __inline int
3404mwl_cvtlegacyrix(int rix)
3405{
3406#define	N(x)	(sizeof(x)/sizeof(x[0]))
3407	static const int ieeerates[] =
3408	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3409	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3410#undef N
3411}
3412
3413/*
3414 * Process completed xmit descriptors from the specified queue.
3415 */
3416static int
3417mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3418{
3419#define	EAGLE_TXD_STATUS_MCAST \
3420	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3421	struct ifnet *ifp = sc->sc_ifp;
3422	struct ieee80211com *ic = ifp->if_l2com;
3423	struct mwl_txbuf *bf;
3424	struct mwl_txdesc *ds;
3425	struct ieee80211_node *ni;
3426	struct mwl_node *an;
3427	int nreaped;
3428	uint32_t status;
3429
3430	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3431	for (nreaped = 0;; nreaped++) {
3432		MWL_TXQ_LOCK(txq);
3433		bf = STAILQ_FIRST(&txq->active);
3434		if (bf == NULL) {
3435			MWL_TXQ_UNLOCK(txq);
3436			break;
3437		}
3438		ds = bf->bf_desc;
3439		MWL_TXDESC_SYNC(txq, ds,
3440		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3441		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3442			MWL_TXQ_UNLOCK(txq);
3443			break;
3444		}
3445		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3446		MWL_TXQ_UNLOCK(txq);
3447
3448#ifdef MWL_DEBUG
3449		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3450			mwl_printtxbuf(bf, txq->qnum, nreaped);
3451#endif
3452		ni = bf->bf_node;
3453		if (ni != NULL) {
3454			an = MWL_NODE(ni);
3455			status = le32toh(ds->Status);
3456			if (status & EAGLE_TXD_STATUS_OK) {
3457				uint16_t Format = le16toh(ds->Format);
3458				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3459
3460				sc->sc_stats.mst_ant_tx[txant]++;
3461				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3462					sc->sc_stats.mst_tx_retries++;
3463				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3464					sc->sc_stats.mst_tx_mretries++;
3465				if (txq->qnum >= MWL_WME_AC_VO)
3466					ic->ic_wme.wme_hipri_traffic++;
3467				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3468				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3469					ni->ni_txrate = mwl_cvtlegacyrix(
3470					    ni->ni_txrate);
3471				} else
3472					ni->ni_txrate |= IEEE80211_RATE_MCS;
3473				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3474			} else {
3475				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3476					sc->sc_stats.mst_tx_linkerror++;
3477				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3478					sc->sc_stats.mst_tx_xretries++;
3479				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3480					sc->sc_stats.mst_tx_aging++;
3481				if (bf->bf_m->m_flags & M_FF)
3482					sc->sc_stats.mst_ff_txerr++;
3483			}
3484			/*
3485			 * Do any tx complete callback.  Note this must
3486			 * be done before releasing the node reference.
3487			 * XXX no way to figure out if frame was ACK'd
3488			 */
3489			if (bf->bf_m->m_flags & M_TXCB) {
3490				/* XXX strip fw len in case header inspected */
3491				m_adj(bf->bf_m, sizeof(uint16_t));
3492				ieee80211_process_callback(ni, bf->bf_m,
3493					(status & EAGLE_TXD_STATUS_OK) == 0);
3494			}
3495			/*
3496			 * Reclaim reference to node.
3497			 *
3498			 * NB: the node may be reclaimed here if, for example
3499			 *     this is a DEAUTH message that was sent and the
3500			 *     node was timed out due to inactivity.
3501			 */
3502			ieee80211_free_node(ni);
3503		}
3504		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3505
3506		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3507		    BUS_DMASYNC_POSTWRITE);
3508		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3509		m_freem(bf->bf_m);
3510
3511		mwl_puttxbuf_tail(txq, bf);
3512	}
3513	return nreaped;
3514#undef EAGLE_TXD_STATUS_MCAST
3515}
3516
3517/*
3518 * Deferred processing of transmit interrupt; special-cased
3519 * for four hardware queues, 0-3.
3520 */
3521static void
3522mwl_tx_proc(void *arg, int npending)
3523{
3524	struct mwl_softc *sc = arg;
3525	struct ifnet *ifp = sc->sc_ifp;
3526	int nreaped;
3527
3528	/*
3529	 * Process each active queue.
3530	 */
3531	nreaped = 0;
3532	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3533		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3534	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3535		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3536	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3537		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3538	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3539		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3540
3541	if (nreaped != 0) {
3542		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3543		ifp->if_timer = 0;
3544		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3545			/* NB: kick fw; the tx thread may have been preempted */
3546			mwl_hal_txstart(sc->sc_mh, 0);
3547			mwl_start(ifp);
3548		}
3549	}
3550}
3551
3552static void
3553mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3554{
3555	struct ieee80211_node *ni;
3556	struct mwl_txbuf *bf;
3557	u_int ix;
3558
3559	/*
3560	 * NB: this assumes output has been stopped and
3561	 *     we do not need to block mwl_tx_tasklet
3562	 */
3563	for (ix = 0;; ix++) {
3564		MWL_TXQ_LOCK(txq);
3565		bf = STAILQ_FIRST(&txq->active);
3566		if (bf == NULL) {
3567			MWL_TXQ_UNLOCK(txq);
3568			break;
3569		}
3570		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3571		MWL_TXQ_UNLOCK(txq);
3572#ifdef MWL_DEBUG
3573		if (sc->sc_debug & MWL_DEBUG_RESET) {
3574			struct ifnet *ifp = sc->sc_ifp;
3575			struct ieee80211com *ic = ifp->if_l2com;
3576			const struct mwltxrec *tr =
3577			    mtod(bf->bf_m, const struct mwltxrec *);
3578			mwl_printtxbuf(bf, txq->qnum, ix);
3579			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3580				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3581		}
3582#endif /* MWL_DEBUG */
3583		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3584		ni = bf->bf_node;
3585		if (ni != NULL) {
3586			/*
3587			 * Reclaim node reference.
3588			 */
3589			ieee80211_free_node(ni);
3590		}
3591		m_freem(bf->bf_m);
3592
3593		mwl_puttxbuf_tail(txq, bf);
3594	}
3595}
3596
3597/*
3598 * Drain the transmit queues and reclaim resources.
3599 */
3600static void
3601mwl_draintxq(struct mwl_softc *sc)
3602{
3603	struct ifnet *ifp = sc->sc_ifp;
3604	int i;
3605
3606	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3607		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3608	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3609	ifp->if_timer = 0;
3610}
3611
3612#ifdef MWL_DIAGAPI
3613/*
3614 * Reset the transmit queues to a pristine state after a fw download.
3615 */
3616static void
3617mwl_resettxq(struct mwl_softc *sc)
3618{
3619	int i;
3620
3621	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3622		mwl_txq_reset(sc, &sc->sc_txq[i]);
3623}
3624#endif /* MWL_DIAGAPI */
3625
3626/*
3627 * Clear the transmit queues of any frames submitted for the
3628 * specified vap.  This is done when the vap is deleted so we
3629 * don't potentially reference the vap after it is gone.
3630 * Note we cannot remove the frames; we only reclaim the node
3631 * reference.
3632 */
3633static void
3634mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3635{
3636	struct mwl_txq *txq;
3637	struct mwl_txbuf *bf;
3638	int i;
3639
3640	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3641		txq = &sc->sc_txq[i];
3642		MWL_TXQ_LOCK(txq);
3643		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3644			struct ieee80211_node *ni = bf->bf_node;
3645			if (ni != NULL && ni->ni_vap == vap) {
3646				bf->bf_node = NULL;
3647				ieee80211_free_node(ni);
3648			}
3649		}
3650		MWL_TXQ_UNLOCK(txq);
3651	}
3652}
3653
3654static void
3655mwl_recv_action(struct ieee80211_node *ni, const uint8_t *frm, const uint8_t *efrm)
3656{
3657	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3658	const struct ieee80211_action *ia;
3659
3660	ia = (const struct ieee80211_action *) frm;
3661	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3662	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3663		const struct ieee80211_action_ht_mimopowersave *mps =
3664		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3665
3666		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3667		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3668		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3669	} else
3670		sc->sc_recv_action(ni, frm, efrm);
3671}
3672
3673static int
3674mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3675	int dialogtoken, int baparamset, int batimeout)
3676{
3677	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3678	struct mwl_node *mn = MWL_NODE(ni);
3679	struct mwl_bastate *bas;
3680
3681	bas = tap->txa_private;
3682	if (bas == NULL) {
3683		const MWL_HAL_BASTREAM *sp;
3684		/*
3685		 * Check for a free BA stream slot.
3686		 */
3687#if MWL_MAXBA > 3
3688		if (mn->mn_ba[3].bastream == NULL)
3689			bas = &mn->mn_ba[3];
3690		else
3691#endif
3692#if MWL_MAXBA > 2
3693		if (mn->mn_ba[2].bastream == NULL)
3694			bas = &mn->mn_ba[2];
3695		else
3696#endif
3697#if MWL_MAXBA > 1
3698		if (mn->mn_ba[1].bastream == NULL)
3699			bas = &mn->mn_ba[1];
3700		else
3701#endif
3702#if MWL_MAXBA > 0
3703		if (mn->mn_ba[0].bastream == NULL)
3704			bas = &mn->mn_ba[0];
3705		else
3706#endif
3707		{
3708			/* sta already has max BA streams */
3709			/* XXX assign BA stream to highest priority tid */
3710			DPRINTF(sc, MWL_DEBUG_AMPDU,
3711			    "%s: already has max bastreams\n", __func__);
3712			sc->sc_stats.mst_ampdu_reject++;
3713			return 0;
3714		}
3715		/* NB: no held reference to ni */
3716		sp = mwl_hal_bastream_alloc(sc->sc_mh,
3717		    1/* XXX immediate*/, ni->ni_macaddr, tap->txa_ac,
3718		    ni->ni_htparam, ni, tap);
3719		if (sp == NULL) {
3720			/*
3721			 * No available stream, return 0 so no
3722			 * a-mpdu aggregation will be done.
3723			 */
3724			DPRINTF(sc, MWL_DEBUG_AMPDU,
3725			    "%s: no bastream available\n", __func__);
3726			sc->sc_stats.mst_ampdu_nostream++;
3727			return 0;
3728		}
3729		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3730		    __func__, sp);
3731		/* NB: qos is left zero so we won't match in mwl_tx_start */
3732		bas->bastream = sp;
3733		tap->txa_private = bas;
3734	}
3735	/* fetch current seq# from the firmware; if available */
3736	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3737	    &tap->txa_start) != 0)
3738		tap->txa_start = 0;
3739	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3740}
3741
3742static int
3743mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3744	int code, int baparamset, int batimeout)
3745{
3746	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3747	struct mwl_bastate *bas;
3748
3749	bas = tap->txa_private;
3750	if (bas == NULL) {
3751		/* XXX should not happen */
3752		DPRINTF(sc, MWL_DEBUG_AMPDU,
3753		    "%s: no BA stream allocated, AC %d\n",
3754		    __func__, tap->txa_ac);
3755		sc->sc_stats.mst_addba_nostream++;
3756		return 0;
3757	}
3758	if (code == IEEE80211_STATUS_SUCCESS) {
3759		int bufsiz, error;
3760
3761		/*
3762		 * Tell the firmware to setup the BA stream;
3763		 * we know resources are available because we
3764		 * pre-allocated one before forming the request.
3765		 */
3766		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3767		if (bufsiz == 0)
3768			bufsiz = IEEE80211_AGGR_BAWMAX;
3769		error = mwl_hal_bastream_create(sc->sc_mh, bas->bastream,
3770		    bufsiz, bufsiz-1, tap->txa_start);
3771		if (error != 0) {
3772			/*
3773			 * Setup failed, return immediately so no a-mpdu
3774			 * aggregation will be done.
3775			 */
3776			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3777			mwl_bastream_free(bas);
3778			tap->txa_private = NULL;
3779
3780			DPRINTF(sc, MWL_DEBUG_AMPDU,
3781			    "%s: create failed, error %d, bufsiz %d AC %d "
3782			    "htparam 0x%x\n", __func__, error, bufsiz,
3783			    tap->txa_ac, ni->ni_htparam);
3784			sc->sc_stats.mst_bacreate_failed++;
3785			return 0;
3786		}
3787		/* NB: cache txq to avoid ptr indirect */
3788		mwl_bastream_setup(bas, tap->txa_ac, bas->bastream->txq);
3789		DPRINTF(sc, MWL_DEBUG_AMPDU,
3790		    "%s: bastream %p assigned to txq %d AC %d bufsiz %d "
3791		    "htparam 0x%x\n", __func__, bas->bastream,
3792		    bas->txq, tap->txa_ac, bufsiz, ni->ni_htparam);
3793	} else {
3794		/*
3795		 * Other side NAK'd us; return the resources.
3796		 */
3797		DPRINTF(sc, MWL_DEBUG_AMPDU,
3798		    "%s: request failed with code %d, destroy bastream %p\n",
3799		    __func__, code, bas->bastream);
3800		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3801		mwl_bastream_free(bas);
3802		tap->txa_private = NULL;
3803	}
3804	/* NB: firmware sends BAR so we don't need to */
3805	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3806}
3807
3808static void
3809mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3810{
3811	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3812	struct mwl_bastate *bas;
3813
3814	bas = tap->txa_private;
3815	if (bas != NULL) {
3816		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3817		    __func__, bas->bastream);
3818		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3819		mwl_bastream_free(bas);
3820		tap->txa_private = NULL;
3821	}
3822	sc->sc_addba_stop(ni, tap);
3823}
3824
3825/*
3826 * Setup the rx data structures.  This should only be
3827 * done once or we may get out of sync with the firmware.
3828 */
3829static int
3830mwl_startrecv(struct mwl_softc *sc)
3831{
3832	if (!sc->sc_recvsetup) {
3833		struct mwl_rxbuf *bf, *prev;
3834		struct mwl_rxdesc *ds;
3835
3836		prev = NULL;
3837		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3838			int error = mwl_rxbuf_init(sc, bf);
3839			if (error != 0) {
3840				DPRINTF(sc, MWL_DEBUG_RECV,
3841					"%s: mwl_rxbuf_init failed %d\n",
3842					__func__, error);
3843				return error;
3844			}
3845			if (prev != NULL) {
3846				ds = prev->bf_desc;
3847				ds->pPhysNext = htole32(bf->bf_daddr);
3848			}
3849			prev = bf;
3850		}
3851		if (prev != NULL) {
3852			ds = prev->bf_desc;
3853			ds->pPhysNext =
3854			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3855		}
3856		sc->sc_recvsetup = 1;
3857	}
3858	mwl_mode_init(sc);		/* set filters, etc. */
3859	return 0;
3860}
3861
3862static MWL_HAL_APMODE
3863mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3864{
3865	MWL_HAL_APMODE mode;
3866
3867	if (IEEE80211_IS_CHAN_HT(chan)) {
3868		if (vap->iv_flags_ext & IEEE80211_FEXT_PUREN)
3869			mode = AP_MODE_N_ONLY;
3870		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3871			mode = AP_MODE_AandN;
3872		else if (vap->iv_flags & IEEE80211_F_PUREG)
3873			mode = AP_MODE_GandN;
3874		else
3875			mode = AP_MODE_BandGandN;
3876	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3877		if (vap->iv_flags & IEEE80211_F_PUREG)
3878			mode = AP_MODE_G_ONLY;
3879		else
3880			mode = AP_MODE_MIXED;
3881	} else if (IEEE80211_IS_CHAN_B(chan))
3882		mode = AP_MODE_B_ONLY;
3883	else if (IEEE80211_IS_CHAN_A(chan))
3884		mode = AP_MODE_A_ONLY;
3885	else
3886		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3887	return mode;
3888}
3889
3890static int
3891mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3892{
3893	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3894	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3895}
3896
3897/*
3898 * Set/change channels.
3899 */
3900static int
3901mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3902{
3903	struct mwl_hal *mh = sc->sc_mh;
3904	struct ifnet *ifp = sc->sc_ifp;
3905	struct ieee80211com *ic = ifp->if_l2com;
3906	MWL_HAL_CHANNEL hchan;
3907	int maxtxpow;
3908
3909	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3910	    __func__, chan->ic_freq, chan->ic_flags);
3911
3912	/*
3913	 * Convert to a HAL channel description with
3914	 * the flags constrained to reflect the current
3915	 * operating mode.
3916	 */
3917	mwl_mapchan(&hchan, chan);
3918	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3919#if 0
3920	mwl_draintxq(sc);		/* clear pending tx frames */
3921#endif
3922	mwl_hal_setchannel(mh, &hchan);
3923	/*
3924	 * Tx power is cap'd by the regulatory setting and
3925	 * possibly a user-set limit.  We pass the min of
3926	 * these to the hal to apply them to the cal data
3927	 * for this channel.
3928	 * XXX min bound?
3929	 */
3930	maxtxpow = 2*chan->ic_maxregpower;
3931	if (maxtxpow > ic->ic_txpowlimit)
3932		maxtxpow = ic->ic_txpowlimit;
3933	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3934	/* NB: potentially change mcast/mgt rates */
3935	mwl_setcurchanrates(sc);
3936
3937	/*
3938	 * Update internal state.
3939	 */
3940	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3941	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3942	if (IEEE80211_IS_CHAN_A(chan)) {
3943		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3944		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3945	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3946		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3947		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3948	} else {
3949		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3950		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3951	}
3952	sc->sc_curchan = hchan;
3953	mwl_hal_intrset(mh, sc->sc_imask);
3954
3955	return 0;
3956}
3957
3958static void
3959mwl_scan_start(struct ieee80211com *ic)
3960{
3961	struct ifnet *ifp = ic->ic_ifp;
3962	struct mwl_softc *sc = ifp->if_softc;
3963
3964	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3965}
3966
3967static void
3968mwl_scan_end(struct ieee80211com *ic)
3969{
3970	struct ifnet *ifp = ic->ic_ifp;
3971	struct mwl_softc *sc = ifp->if_softc;
3972
3973	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3974}
3975
3976static void
3977mwl_set_channel(struct ieee80211com *ic)
3978{
3979	struct ifnet *ifp = ic->ic_ifp;
3980	struct mwl_softc *sc = ifp->if_softc;
3981
3982	(void) mwl_chan_set(sc, ic->ic_curchan);
3983}
3984
3985/*
3986 * Handle a channel switch request.  We inform the firmware
3987 * and mark the global state to suppress various actions.
3988 * NB: we issue only one request to the fw; we may be called
3989 * multiple times if there are multiple vap's.
3990 */
3991static void
3992mwl_startcsa(struct ieee80211vap *vap)
3993{
3994	struct ieee80211com *ic = vap->iv_ic;
3995	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3996	MWL_HAL_CHANNEL hchan;
3997
3998	if (sc->sc_csapending)
3999		return;
4000
4001	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4002	/* 1 =>'s quiet channel */
4003	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4004	sc->sc_csapending = 1;
4005}
4006
4007/*
4008 * Plumb any static WEP key for the station.  This is
4009 * necessary as we must propagate the key from the
4010 * global key table of the vap to each sta db entry.
4011 */
4012static void
4013mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4014{
4015	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4016		IEEE80211_F_PRIVACY &&
4017	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4018	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4019		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4020}
4021
4022static int
4023mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4024{
4025#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4026	struct ieee80211vap *vap = ni->ni_vap;
4027	struct mwl_hal_vap *hvap;
4028	int error;
4029
4030	if (vap->iv_opmode == IEEE80211_M_WDS) {
4031		/*
4032		 * WDS vap's do not have a f/w vap; instead they piggyback
4033		 * on an AP vap and we must install the sta db entry and
4034		 * crypto state using that AP's handle (the WDS vap has none).
4035		 */
4036		hvap = MWL_VAP(vap)->mv_ap_hvap;
4037	} else
4038		hvap = MWL_VAP(vap)->mv_hvap;
4039	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4040	    aid, staid, pi,
4041	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4042	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4043	if (error == 0) {
4044		/*
4045		 * Setup security for this station.  For sta mode this is
4046		 * needed even though do the same thing on transition to
4047		 * AUTH state because the call to mwl_hal_newstation
4048		 * clobbers the crypto state we setup.
4049		 */
4050		mwl_setanywepkey(vap, ni->ni_macaddr);
4051	}
4052	return error;
4053#undef WME
4054}
4055
4056static void
4057mwl_setglobalkeys(struct ieee80211vap *vap)
4058{
4059	struct ieee80211_key *wk;
4060
4061	wk = &vap->iv_nw_keys[0];
4062	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4063		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4064			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4065}
4066
4067/*
4068 * Re-create the local sta db entry for a vap to ensure
4069 * up to date WME state is pushed to the firmware.  Because
4070 * this resets crypto state this must be followed by a
4071 * reload of any keys in the global key table.
4072 */
4073static int
4074mwl_localstadb(struct ieee80211vap *vap)
4075{
4076#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4077	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4078	struct ieee80211_node *bss;
4079	int error;
4080
4081	switch (vap->iv_opmode) {
4082	case IEEE80211_M_STA:
4083		bss = vap->iv_bss;
4084		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4085		    0, 0, NULL, bss->ni_flags & IEEE80211_NODE_QOS,
4086		    bss->ni_ies.wme_ie != NULL ?
4087			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4088		if (error == 0)
4089			mwl_setglobalkeys(vap);
4090		break;
4091	case IEEE80211_M_HOSTAP:
4092		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4093		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4094		if (error == 0)
4095			mwl_setglobalkeys(vap);
4096		break;
4097	default:
4098		error = 0;
4099		break;
4100	}
4101	return error;
4102#undef WME
4103}
4104
4105static int
4106mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4107{
4108	struct mwl_vap *mvp = MWL_VAP(vap);
4109	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4110	struct ieee80211com *ic = vap->iv_ic;
4111	struct ieee80211_node *ni = NULL;
4112	struct ifnet *ifp = ic->ic_ifp;
4113	struct mwl_softc *sc = ifp->if_softc;
4114	struct mwl_hal *mh = sc->sc_mh;
4115	enum ieee80211_state ostate = vap->iv_state;
4116	int error;
4117
4118	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4119	    vap->iv_ifp->if_xname, __func__,
4120	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4121
4122	callout_stop(&sc->sc_timer);
4123	/*
4124	 * Clear current radar detection state.
4125	 */
4126	if (ostate == IEEE80211_S_CAC) {
4127		/* stop quiet mode radar detection */
4128		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4129	} else if (sc->sc_radarena) {
4130		/* stop in-service radar detection */
4131		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4132		sc->sc_radarena = 0;
4133	}
4134	/*
4135	 * Carry out per-state actions before doing net80211 work.
4136	 */
4137	if (nstate == IEEE80211_S_INIT) {
4138		/* NB: only ap+sta vap's have a fw entity */
4139		if (hvap != NULL)
4140			mwl_hal_stop(hvap);
4141	} else if (nstate == IEEE80211_S_SCAN) {
4142		mwl_hal_start(hvap);
4143		/* NB: this disables beacon frames */
4144		mwl_hal_setinframode(hvap);
4145	} else if (nstate == IEEE80211_S_AUTH) {
4146		/*
4147		 * Must create a sta db entry in case a WEP key needs to
4148		 * be plumbed.  This entry will be overwritten if we
4149		 * associate; otherwise it will be reclaimed on node free.
4150		 */
4151		ni = vap->iv_bss;
4152		MWL_NODE(ni)->mn_hvap = hvap;
4153		(void) mwl_peerstadb(ni, 0, 0, NULL);
4154	} else if (nstate == IEEE80211_S_CSA) {
4155		/* XXX move to below? */
4156		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
4157			mwl_startcsa(vap);
4158	} else if (nstate == IEEE80211_S_CAC) {
4159		/* XXX move to below? */
4160		/* stop ap xmit and enable quiet mode radar detection */
4161		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4162	}
4163
4164	/*
4165	 * Invoke the parent method to do net80211 work.
4166	 */
4167	error = mvp->mv_newstate(vap, nstate, arg);
4168
4169	/*
4170	 * Carry out work that must be done after net80211 runs;
4171	 * this work requires up to date state (e.g. iv_bss).
4172	 */
4173	if (error == 0 && nstate == IEEE80211_S_RUN) {
4174		/* NB: collect bss node again, it may have changed */
4175		ni = vap->iv_bss;
4176
4177		DPRINTF(sc, MWL_DEBUG_STATE,
4178		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4179		    "capinfo 0x%04x chan %d\n",
4180		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4181		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4182		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4183
4184		/*
4185		 * Recreate local sta db entry to update WME state.
4186		 */
4187		mwl_localstadb(vap);
4188		switch (vap->iv_opmode) {
4189		case IEEE80211_M_HOSTAP:
4190			if (ostate == IEEE80211_S_CAC) {
4191				/* enable in-service radar detection */
4192				mwl_hal_setradardetection(mh,
4193				    DR_IN_SERVICE_MONITOR_START);
4194				sc->sc_radarena = 1;
4195			}
4196			/*
4197			 * Allocate and setup the beacon frame
4198			 * (and related state).
4199			 */
4200			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4201			if (error != 0) {
4202				DPRINTF(sc, MWL_DEBUG_STATE,
4203				    "%s: beacon setup failed, error %d\n",
4204				    __func__, error);
4205				goto bad;
4206			}
4207			/* NB: must be after setting up beacon */
4208			mwl_hal_start(hvap);
4209			break;
4210		case IEEE80211_M_STA:
4211			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4212			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4213			/*
4214			 * Set state now that we're associated.
4215			 */
4216			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4217			mwl_setrates(vap);
4218			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4219			break;
4220		case IEEE80211_M_WDS:
4221			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4222			    vap->iv_ifp->if_xname, __func__,
4223			    ether_sprintf(ni->ni_bssid));
4224			mwl_seteapolformat(vap);
4225			break;
4226		default:
4227			break;
4228		}
4229		/*
4230		 * Set CS mode according to operating channel;
4231		 * this mostly an optimization for 5GHz.
4232		 *
4233		 * NB: must follow mwl_hal_start which resets csmode
4234		 */
4235		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4236			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4237		else
4238			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4239		/*
4240		 * Start timer to prod firmware.
4241		 */
4242		if (sc->sc_ageinterval != 0)
4243			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4244			    mwl_agestations, sc);
4245	} else if (nstate == IEEE80211_S_SLEEP) {
4246		/* XXX set chip in power save */
4247	}
4248bad:
4249	return error;
4250}
4251
4252/*
4253 * Convert a legacy rate set to a firmware bitmask.
4254 */
4255static uint32_t
4256get_rate_bitmap(const struct ieee80211_rateset *rs)
4257{
4258	uint32_t rates;
4259	int i;
4260
4261	rates = 0;
4262	for (i = 0; i < rs->rs_nrates; i++)
4263		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4264		case 2:	  rates |= 0x001; break;
4265		case 4:	  rates |= 0x002; break;
4266		case 11:  rates |= 0x004; break;
4267		case 22:  rates |= 0x008; break;
4268		case 44:  rates |= 0x010; break;
4269		case 12:  rates |= 0x020; break;
4270		case 18:  rates |= 0x040; break;
4271		case 24:  rates |= 0x080; break;
4272		case 36:  rates |= 0x100; break;
4273		case 48:  rates |= 0x200; break;
4274		case 72:  rates |= 0x400; break;
4275		case 96:  rates |= 0x800; break;
4276		case 108: rates |= 0x1000; break;
4277		}
4278	return rates;
4279}
4280
4281/*
4282 * Construct an HT firmware bitmask from an HT rate set.
4283 */
4284static uint32_t
4285get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4286{
4287	uint32_t rates;
4288	int i;
4289
4290	rates = 0;
4291	for (i = 0; i < rs->rs_nrates; i++) {
4292		if (rs->rs_rates[i] < 16)
4293			rates |= 1<<rs->rs_rates[i];
4294	}
4295	return rates;
4296}
4297
4298/*
4299 * Manage station id's; these are separate from AID's
4300 * as AID's may have values out of the range of possible
4301 * station id's acceptable to the firmware.
4302 */
4303static int
4304allocstaid(struct mwl_softc *sc, int aid)
4305{
4306	int staid;
4307
4308	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4309		/* NB: don't use 0 */
4310		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4311			if (isclr(sc->sc_staid, staid))
4312				break;
4313	} else
4314		staid = aid;
4315	setbit(sc->sc_staid, staid);
4316	return staid;
4317}
4318
4319static void
4320delstaid(struct mwl_softc *sc, int staid)
4321{
4322	clrbit(sc->sc_staid, staid);
4323}
4324
4325/*
4326 * Setup driver-specific state for a newly associated node.
4327 * Note that we're called also on a re-associate, the isnew
4328 * param tells us if this is the first time or not.
4329 */
4330static void
4331mwl_newassoc(struct ieee80211_node *ni, int isnew)
4332{
4333	struct ieee80211vap *vap = ni->ni_vap;
4334        struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4335	struct mwl_node *mn = MWL_NODE(ni);
4336	MWL_HAL_PEERINFO pi;
4337	uint16_t aid;
4338	int error;
4339
4340	aid = IEEE80211_AID(ni->ni_associd);
4341	if (isnew) {
4342		mn->mn_staid = allocstaid(sc, aid);
4343		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4344	} else {
4345		mn = MWL_NODE(ni);
4346		/* XXX reset BA stream? */
4347	}
4348	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4349	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4350	/*
4351	 * Craft station database entry for station.
4352	 * NB: use host byte order here, the hal handles byte swapping.
4353	 */
4354	memset(&pi, 0, sizeof(pi));
4355	pi.LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4356	pi.CapInfo = ni->ni_capinfo;
4357	if (ni->ni_flags & IEEE80211_NODE_HT) {
4358		/* HT capabilities, etc */
4359		pi.HTCapabilitiesInfo = ni->ni_htcap;
4360		/* XXX pi.HTCapabilitiesInfo */
4361	        pi.MacHTParamInfo = ni->ni_htparam;
4362		pi.HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4363		pi.AddHtInfo.ControlChan = ni->ni_htctlchan;
4364		pi.AddHtInfo.AddChan = ni->ni_ht2ndchan;
4365		pi.AddHtInfo.OpMode = ni->ni_htopmode;
4366		pi.AddHtInfo.stbc = ni->ni_htstbc;
4367
4368		/* constrain according to local configuration */
4369		if ((vap->iv_flags_ext & IEEE80211_FEXT_SHORTGI40) == 0)
4370			pi.HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4371		if ((vap->iv_flags_ext & IEEE80211_FEXT_SHORTGI20) == 0)
4372			pi.HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4373		if (ni->ni_chw != 40)
4374			pi.HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4375	}
4376	error = mwl_peerstadb(ni, aid, mn->mn_staid, &pi);
4377	if (error != 0) {
4378		DPRINTF(sc, MWL_DEBUG_NODE,
4379		    "%s: error %d creating sta db entry\n",
4380		    __func__, error);
4381		/* XXX how to deal with error? */
4382	}
4383}
4384
4385/*
4386 * Periodically poke the firmware to age out station state
4387 * (power save queues, pending tx aggregates).
4388 */
4389static void
4390mwl_agestations(void *arg)
4391{
4392	struct mwl_softc *sc = arg;
4393
4394	mwl_hal_setkeepalive(sc->sc_mh);
4395	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4396		callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4397			mwl_agestations, sc);
4398}
4399
4400static const struct mwl_hal_channel *
4401findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4402{
4403	int i;
4404
4405	for (i = 0; i < ci->nchannels; i++) {
4406		const struct mwl_hal_channel *hc = &ci->channels[i];
4407		if (hc->ieee == ieee)
4408			return hc;
4409	}
4410	return NULL;
4411}
4412
4413static int
4414mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4415	int nchan, struct ieee80211_channel chans[])
4416{
4417	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4418	struct mwl_hal *mh = sc->sc_mh;
4419	const MWL_HAL_CHANNELINFO *ci;
4420	int i;
4421
4422	for (i = 0; i < nchan; i++) {
4423		struct ieee80211_channel *c = &chans[i];
4424		const struct mwl_hal_channel *hc;
4425
4426		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4427			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4428			    IEEE80211_IS_CHAN_HT40(c) ?
4429				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4430		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4431			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4432			    IEEE80211_IS_CHAN_HT40(c) ?
4433				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4434		} else {
4435			if_printf(ic->ic_ifp,
4436			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4437			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4438			return EINVAL;
4439		}
4440		/*
4441		 * Verify channel has cal data and cap tx power.
4442		 */
4443		hc = findhalchannel(ci, c->ic_ieee);
4444		if (hc != NULL) {
4445			if (c->ic_maxpower > 2*hc->maxTxPow)
4446				c->ic_maxpower = 2*hc->maxTxPow;
4447			goto next;
4448		}
4449		if (IEEE80211_IS_CHAN_HT40(c)) {
4450			/*
4451			 * Look for the extension channel since the
4452			 * hal table only has the primary channel.
4453			 */
4454			hc = findhalchannel(ci, c->ic_extieee);
4455			if (hc != NULL) {
4456				if (c->ic_maxpower > 2*hc->maxTxPow)
4457					c->ic_maxpower = 2*hc->maxTxPow;
4458				goto next;
4459			}
4460		}
4461		if_printf(ic->ic_ifp,
4462		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4463		    __func__, c->ic_ieee, c->ic_extieee,
4464		    c->ic_freq, c->ic_flags);
4465		return EINVAL;
4466	next:
4467		;
4468	}
4469	return 0;
4470}
4471
4472#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4473#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4474
4475static void
4476addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4477{
4478	c->ic_freq = freq;
4479	c->ic_flags = flags;
4480	c->ic_ieee = ieee;
4481	c->ic_minpower = 0;
4482	c->ic_maxpower = 2*txpow;
4483	c->ic_maxregpower = txpow;
4484}
4485
4486static const struct ieee80211_channel *
4487findchannel(const struct ieee80211_channel chans[], int nchans,
4488	int freq, int flags)
4489{
4490	const struct ieee80211_channel *c;
4491	int i;
4492
4493	for (i = 0; i < nchans; i++) {
4494		c = &chans[i];
4495		if (c->ic_freq == freq && c->ic_flags == flags)
4496			return c;
4497	}
4498	return NULL;
4499}
4500
4501static void
4502addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4503	const MWL_HAL_CHANNELINFO *ci, int flags)
4504{
4505	struct ieee80211_channel *c;
4506	const struct ieee80211_channel *extc;
4507	const struct mwl_hal_channel *hc;
4508	int i;
4509
4510	c = &chans[*nchans];
4511
4512	flags &= ~IEEE80211_CHAN_HT;
4513	for (i = 0; i < ci->nchannels; i++) {
4514		/*
4515		 * Each entry defines an HT40 channel pair; find the
4516		 * extension channel above and the insert the pair.
4517		 */
4518		hc = &ci->channels[i];
4519		extc = findchannel(chans, *nchans, hc->freq+20,
4520		    flags | IEEE80211_CHAN_HT20);
4521		if (extc != NULL) {
4522			if (*nchans >= maxchans)
4523				break;
4524			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4525			    hc->ieee, hc->maxTxPow);
4526			c->ic_extieee = extc->ic_ieee;
4527			c++, (*nchans)++;
4528			if (*nchans >= maxchans)
4529				break;
4530			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4531			    extc->ic_ieee, hc->maxTxPow);
4532			c->ic_extieee = hc->ieee;
4533			c++, (*nchans)++;
4534		}
4535	}
4536}
4537
4538static void
4539addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4540	const MWL_HAL_CHANNELINFO *ci, int flags)
4541{
4542	struct ieee80211_channel *c;
4543	int i;
4544
4545	c = &chans[*nchans];
4546
4547	for (i = 0; i < ci->nchannels; i++) {
4548		const struct mwl_hal_channel *hc;
4549
4550		hc = &ci->channels[i];
4551		if (*nchans >= maxchans)
4552			break;
4553		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4554		c++, (*nchans)++;
4555		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4556			/* g channel have a separate b-only entry */
4557			if (*nchans >= maxchans)
4558				break;
4559			c[0] = c[-1];
4560			c[-1].ic_flags = IEEE80211_CHAN_B;
4561			c++, (*nchans)++;
4562		}
4563		if (flags == IEEE80211_CHAN_HTG) {
4564			/* HT g channel have a separate g-only entry */
4565			if (*nchans >= maxchans)
4566				break;
4567			c[-1].ic_flags = IEEE80211_CHAN_G;
4568			c[0] = c[-1];
4569			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4570			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4571			c++, (*nchans)++;
4572		}
4573		if (flags == IEEE80211_CHAN_HTA) {
4574			/* HT a channel have a separate a-only entry */
4575			if (*nchans >= maxchans)
4576				break;
4577			c[-1].ic_flags = IEEE80211_CHAN_A;
4578			c[0] = c[-1];
4579			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4580			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4581			c++, (*nchans)++;
4582		}
4583	}
4584}
4585
4586static void
4587getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4588	struct ieee80211_channel chans[])
4589{
4590	const MWL_HAL_CHANNELINFO *ci;
4591
4592	/*
4593	 * Use the channel info from the hal to craft the
4594	 * channel list.  Note that we pass back an unsorted
4595	 * list; the caller is required to sort it for us
4596	 * (if desired).
4597	 */
4598	*nchans = 0;
4599	if (mwl_hal_getchannelinfo(sc->sc_mh,
4600	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4601		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4602	if (mwl_hal_getchannelinfo(sc->sc_mh,
4603	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4604		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4605	if (mwl_hal_getchannelinfo(sc->sc_mh,
4606	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4607		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4608	if (mwl_hal_getchannelinfo(sc->sc_mh,
4609	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4610		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4611}
4612
4613static void
4614mwl_getradiocaps(struct ieee80211com *ic,
4615	int maxchans, int *nchans, struct ieee80211_channel chans[])
4616{
4617	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4618
4619	getchannels(sc, maxchans, nchans, chans);
4620}
4621
4622static int
4623mwl_getchannels(struct mwl_softc *sc)
4624{
4625	struct ifnet *ifp = sc->sc_ifp;
4626	struct ieee80211com *ic = ifp->if_l2com;
4627
4628	/*
4629	 * Use the channel info from the hal to craft the
4630	 * channel list for net80211.  Note that we pass up
4631	 * an unsorted list; net80211 will sort it for us.
4632	 */
4633	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4634	ic->ic_nchans = 0;
4635	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4636
4637	ic->ic_regdomain.regdomain = SKU_DEBUG;
4638	ic->ic_regdomain.country = CTRY_DEFAULT;
4639	ic->ic_regdomain.location = 'I';
4640	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4641	ic->ic_regdomain.isocc[1] = ' ';
4642	return (ic->ic_nchans == 0 ? EIO : 0);
4643}
4644#undef IEEE80211_CHAN_HTA
4645#undef IEEE80211_CHAN_HTG
4646
4647#ifdef MWL_DEBUG
4648static void
4649mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4650{
4651	const struct mwl_rxdesc *ds = bf->bf_desc;
4652	uint32_t status = le32toh(ds->Status);
4653
4654	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4655	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4656	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4657	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4658	    ds->RxControl,
4659	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4660	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4661	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4662	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4663}
4664
4665static void
4666mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4667{
4668	const struct mwl_txdesc *ds = bf->bf_desc;
4669	uint32_t status = le32toh(ds->Status);
4670
4671	printf("Q%u[%3u]", qnum, ix);
4672	printf(" (DS.V:%p DS.P:%p)\n",
4673	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4674	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4675	    le32toh(ds->pPhysNext),
4676	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4677	    status & EAGLE_TXD_STATUS_USED ?
4678		"" : (status & 3) != 0 ? " *" : " !");
4679	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4680	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4681	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4682#if MWL_TXDESC > 1
4683	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4684	    , le32toh(ds->multiframes)
4685	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4686	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4687	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4688	);
4689	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4690	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4691	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4692	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4693	);
4694#endif
4695#if 0
4696{ const uint8_t *cp = (const uint8_t *) ds;
4697  int i;
4698  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4699	printf("%02x ", cp[i]);
4700	if (((i+1) % 16) == 0)
4701		printf("\n");
4702  }
4703  printf("\n");
4704}
4705#endif
4706}
4707#endif /* MWL_DEBUG */
4708
4709#if 0
4710static void
4711mwl_txq_dump(struct mwl_txq *txq)
4712{
4713	struct mwl_txbuf *bf;
4714	int i = 0;
4715
4716	MWL_TXQ_LOCK(txq);
4717	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4718		struct mwl_txdesc *ds = bf->bf_desc;
4719		MWL_TXDESC_SYNC(txq, ds,
4720		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4721#ifdef MWL_DEBUG
4722		mwl_printtxbuf(bf, txq->qnum, i);
4723#endif
4724		i++;
4725	}
4726	MWL_TXQ_UNLOCK(txq);
4727}
4728#endif
4729
4730static void
4731mwl_watchdog(struct ifnet *ifp)
4732{
4733	struct mwl_softc *sc = ifp->if_softc;
4734
4735	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4736		if (mwl_hal_setkeepalive(sc->sc_mh))
4737			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4738		else
4739			if_printf(ifp, "transmit timeout\n");
4740#if 0
4741		mwl_reset(ifp);
4742mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4743#endif
4744		ifp->if_oerrors++;
4745		sc->sc_stats.mst_watchdog++;
4746	}
4747}
4748
4749#ifdef MWL_DIAGAPI
4750/*
4751 * Diagnostic interface to the HAL.  This is used by various
4752 * tools to do things like retrieve register contents for
4753 * debugging.  The mechanism is intentionally opaque so that
4754 * it can change frequently w/o concern for compatiblity.
4755 */
4756static int
4757mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4758{
4759	struct mwl_hal *mh = sc->sc_mh;
4760	u_int id = md->md_id & MWL_DIAG_ID;
4761	void *indata = NULL;
4762	void *outdata = NULL;
4763	u_int32_t insize = md->md_in_size;
4764	u_int32_t outsize = md->md_out_size;
4765	int error = 0;
4766
4767	if (md->md_id & MWL_DIAG_IN) {
4768		/*
4769		 * Copy in data.
4770		 */
4771		indata = malloc(insize, M_TEMP, M_NOWAIT);
4772		if (indata == NULL) {
4773			error = ENOMEM;
4774			goto bad;
4775		}
4776		error = copyin(md->md_in_data, indata, insize);
4777		if (error)
4778			goto bad;
4779	}
4780	if (md->md_id & MWL_DIAG_DYN) {
4781		/*
4782		 * Allocate a buffer for the results (otherwise the HAL
4783		 * returns a pointer to a buffer where we can read the
4784		 * results).  Note that we depend on the HAL leaving this
4785		 * pointer for us to use below in reclaiming the buffer;
4786		 * may want to be more defensive.
4787		 */
4788		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4789		if (outdata == NULL) {
4790			error = ENOMEM;
4791			goto bad;
4792		}
4793	}
4794	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4795		if (outsize < md->md_out_size)
4796			md->md_out_size = outsize;
4797		if (outdata != NULL)
4798			error = copyout(outdata, md->md_out_data,
4799					md->md_out_size);
4800	} else {
4801		error = EINVAL;
4802	}
4803bad:
4804	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4805		free(indata, M_TEMP);
4806	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4807		free(outdata, M_TEMP);
4808	return error;
4809}
4810
4811static int
4812mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4813{
4814	struct mwl_hal *mh = sc->sc_mh;
4815	int error;
4816
4817	MWL_LOCK_ASSERT(sc);
4818
4819	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4820		device_printf(sc->sc_dev, "unable to load firmware\n");
4821		return EIO;
4822	}
4823	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4824		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4825		return EIO;
4826	}
4827	error = mwl_setupdma(sc);
4828	if (error != 0) {
4829		/* NB: mwl_setupdma prints a msg */
4830		return error;
4831	}
4832	/*
4833	 * Reset tx/rx data structures; after reload we must
4834	 * re-start the driver's notion of the next xmit/recv.
4835	 */
4836	mwl_draintxq(sc);		/* clear pending frames */
4837	mwl_resettxq(sc);		/* rebuild tx q lists */
4838	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4839	return 0;
4840}
4841#endif /* MWL_DIAGAPI */
4842
4843static int
4844mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4845{
4846#define	IS_RUNNING(ifp) \
4847	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4848	struct mwl_softc *sc = ifp->if_softc;
4849	struct ieee80211com *ic = ifp->if_l2com;
4850	struct ifreq *ifr = (struct ifreq *)data;
4851	int error = 0, startall;
4852
4853	switch (cmd) {
4854	case SIOCSIFFLAGS:
4855		MWL_LOCK(sc);
4856		startall = 0;
4857		if (IS_RUNNING(ifp)) {
4858			/*
4859			 * To avoid rescanning another access point,
4860			 * do not call mwl_init() here.  Instead,
4861			 * only reflect promisc mode settings.
4862			 */
4863			mwl_mode_init(sc);
4864		} else if (ifp->if_flags & IFF_UP) {
4865			/*
4866			 * Beware of being called during attach/detach
4867			 * to reset promiscuous mode.  In that case we
4868			 * will still be marked UP but not RUNNING.
4869			 * However trying to re-init the interface
4870			 * is the wrong thing to do as we've already
4871			 * torn down much of our state.  There's
4872			 * probably a better way to deal with this.
4873			 */
4874			if (!sc->sc_invalid) {
4875				mwl_init_locked(sc);	/* XXX lose error */
4876				startall = 1;
4877			}
4878		} else
4879			mwl_stop_locked(ifp, 1);
4880		MWL_UNLOCK(sc);
4881		if (startall)
4882			ieee80211_start_all(ic);
4883		break;
4884	case SIOCGMVSTATS:
4885		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4886		/* NB: embed these numbers to get a consistent view */
4887		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4888		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4889		/*
4890		 * NB: Drop the softc lock in case of a page fault;
4891		 * we'll accept any potential inconsisentcy in the
4892		 * statistics.  The alternative is to copy the data
4893		 * to a local structure.
4894		 */
4895		return copyout(&sc->sc_stats,
4896				ifr->ifr_data, sizeof (sc->sc_stats));
4897#ifdef MWL_DIAGAPI
4898	case SIOCGMVDIAG:
4899		/* XXX check privs */
4900		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4901	case SIOCGMVRESET:
4902		/* XXX check privs */
4903		MWL_LOCK(sc);
4904		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4905		MWL_UNLOCK(sc);
4906		break;
4907#endif /* MWL_DIAGAPI */
4908	case SIOCGIFMEDIA:
4909		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4910		break;
4911	case SIOCGIFADDR:
4912		error = ether_ioctl(ifp, cmd, data);
4913		break;
4914	default:
4915		error = EINVAL;
4916		break;
4917	}
4918	return error;
4919#undef IS_RUNNING
4920}
4921
4922#ifdef	MWL_DEBUG
4923static int
4924mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4925{
4926	struct mwl_softc *sc = arg1;
4927	int debug, error;
4928
4929	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4930	error = sysctl_handle_int(oidp, &debug, 0, req);
4931	if (error || !req->newptr)
4932		return error;
4933	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4934	sc->sc_debug = debug & 0x00ffffff;
4935	return 0;
4936}
4937#endif /* MWL_DEBUG */
4938
4939static void
4940mwl_sysctlattach(struct mwl_softc *sc)
4941{
4942#ifdef	MWL_DEBUG
4943	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4944	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4945
4946	sc->sc_debug = mwl_debug;
4947	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4948		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4949		mwl_sysctl_debug, "I", "control debugging printfs");
4950#endif
4951}
4952
4953/*
4954 * Announce various information on device/driver attach.
4955 */
4956static void
4957mwl_announce(struct mwl_softc *sc)
4958{
4959	struct ifnet *ifp = sc->sc_ifp;
4960
4961	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4962		sc->sc_hwspecs.hwVersion,
4963		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4964		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4965		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4966		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4967		sc->sc_hwspecs.regionCode);
4968	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4969
4970	if (bootverbose) {
4971		int i;
4972		for (i = 0; i <= WME_AC_VO; i++) {
4973			struct mwl_txq *txq = sc->sc_ac2q[i];
4974			if_printf(ifp, "Use hw queue %u for %s traffic\n",
4975				txq->qnum, ieee80211_wme_acnames[i]);
4976		}
4977	}
4978	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4979		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
4980	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4981		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
4982	if (bootverbose || mwl_txbuf != MWL_TXBUF)
4983		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
4984	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4985		if_printf(ifp, "multi-bss support\n");
4986#ifdef MWL_TX_NODROP
4987	if (bootverbose)
4988		if_printf(ifp, "no tx drop\n");
4989#endif
4990}
4991