if_mwl.c revision 288635
1329800Smav/*-
2329800Smav * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3329800Smav * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4329800Smav * All rights reserved.
5329800Smav *
6329800Smav * Redistribution and use in source and binary forms, with or without
7329800Smav * modification, are permitted provided that the following conditions
8329800Smav * are met:
9329800Smav * 1. Redistributions of source code must retain the above copyright
10329800Smav *    notice, this list of conditions and the following disclaimer,
11329800Smav *    without modification.
12329800Smav * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13329800Smav *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14329800Smav *    redistribution must be conditioned upon including a substantially
15329800Smav *    similar Disclaimer requirement for further binary redistribution.
16329800Smav *
17329800Smav * NO WARRANTY
18329800Smav * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19329800Smav * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20329800Smav * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21329800Smav * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22329800Smav * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23329800Smav * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24329800Smav * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25329800Smav * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26329800Smav * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27329800Smav * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28329800Smav * THE POSSIBILITY OF SUCH DAMAGES.
29329800Smav */
30329800Smav
31329800Smav#include <sys/cdefs.h>
32329800Smav__FBSDID("$FreeBSD: head/sys/dev/mwl/if_mwl.c 288635 2015-10-03 21:48:27Z adrian $");
33329800Smav
34329800Smav/*
35329800Smav * Driver for the Marvell 88W8363 Wireless LAN controller.
36329800Smav */
37329800Smav
38329800Smav#include "opt_inet.h"
39329800Smav#include "opt_mwl.h"
40329800Smav#include "opt_wlan.h"
41329800Smav
42329800Smav#include <sys/param.h>
43329800Smav#include <sys/systm.h>
44329800Smav#include <sys/sysctl.h>
45329800Smav#include <sys/mbuf.h>
46329800Smav#include <sys/malloc.h>
47329800Smav#include <sys/lock.h>
48329800Smav#include <sys/mutex.h>
49329800Smav#include <sys/kernel.h>
50346686Smav#include <sys/socket.h>
51346686Smav#include <sys/sockio.h>
52346686Smav#include <sys/errno.h>
53346686Smav#include <sys/callout.h>
54329800Smav#include <sys/bus.h>
55329800Smav#include <sys/endian.h>
56329800Smav#include <sys/kthread.h>
57329800Smav#include <sys/taskqueue.h>
58329800Smav
59329800Smav#include <machine/bus.h>
60329800Smav
61329800Smav#include <net/if.h>
62329800Smav#include <net/if_var.h>
63329800Smav#include <net/if_dl.h>
64329800Smav#include <net/if_media.h>
65329800Smav#include <net/if_types.h>
66329800Smav#include <net/if_arp.h>
67329800Smav#include <net/ethernet.h>
68329800Smav#include <net/if_llc.h>
69329800Smav
70329800Smav#include <net/bpf.h>
71329800Smav
72329800Smav#include <net80211/ieee80211_var.h>
73329800Smav#include <net80211/ieee80211_input.h>
74329800Smav#include <net80211/ieee80211_regdomain.h>
75329800Smav
76329800Smav#ifdef INET
77329800Smav#include <netinet/in.h>
78329800Smav#include <netinet/if_ether.h>
79329800Smav#endif /* INET */
80329800Smav
81346686Smav#include <dev/mwl/if_mwlvar.h>
82346686Smav#include <dev/mwl/mwldiag.h>
83346686Smav
84329800Smav/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
85329800Smav#define	MS(v,x)	(((v) & x) >> x##_S)
86329800Smav#define	SM(v,x)	(((v) << x##_S) & x)
87329800Smav
88329800Smavstatic struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
89329800Smav		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
90329800Smav		    const uint8_t [IEEE80211_ADDR_LEN],
91329800Smav		    const uint8_t [IEEE80211_ADDR_LEN]);
92329800Smavstatic void	mwl_vap_delete(struct ieee80211vap *);
93329800Smavstatic int	mwl_setupdma(struct mwl_softc *);
94329800Smavstatic int	mwl_hal_reset(struct mwl_softc *sc);
95329800Smavstatic int	mwl_init(struct mwl_softc *);
96329800Smavstatic void	mwl_parent(struct ieee80211com *);
97329800Smavstatic int	mwl_reset(struct ieee80211vap *, u_long);
98329800Smavstatic void	mwl_stop(struct mwl_softc *);
99329800Smavstatic void	mwl_start(struct mwl_softc *);
100329800Smavstatic int	mwl_transmit(struct ieee80211com *, struct mbuf *);
101329800Smavstatic int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
102329800Smav			const struct ieee80211_bpf_params *);
103329800Smavstatic int	mwl_media_change(struct ifnet *);
104329800Smavstatic void	mwl_watchdog(void *);
105329800Smavstatic int	mwl_ioctl(struct ieee80211com *, u_long, void *);
106329800Smavstatic void	mwl_radar_proc(void *, int);
107329800Smavstatic void	mwl_chanswitch_proc(void *, int);
108329800Smavstatic void	mwl_bawatchdog_proc(void *, int);
109329800Smavstatic int	mwl_key_alloc(struct ieee80211vap *,
110329800Smav			struct ieee80211_key *,
111329800Smav			ieee80211_keyix *, ieee80211_keyix *);
112329800Smavstatic int	mwl_key_delete(struct ieee80211vap *,
113329800Smav			const struct ieee80211_key *);
114329800Smavstatic int	mwl_key_set(struct ieee80211vap *,
115329800Smav			const struct ieee80211_key *);
116329800Smavstatic int	_mwl_key_set(struct ieee80211vap *,
117329800Smav			const struct ieee80211_key *,
118329800Smav			const uint8_t mac[IEEE80211_ADDR_LEN]);
119329800Smavstatic int	mwl_mode_init(struct mwl_softc *);
120329800Smavstatic void	mwl_update_mcast(struct ieee80211com *);
121329800Smavstatic void	mwl_update_promisc(struct ieee80211com *);
122329800Smavstatic void	mwl_updateslot(struct ieee80211com *);
123329800Smavstatic int	mwl_beacon_setup(struct ieee80211vap *);
124329800Smavstatic void	mwl_beacon_update(struct ieee80211vap *, int);
125329800Smav#ifdef MWL_HOST_PS_SUPPORT
126329800Smavstatic void	mwl_update_ps(struct ieee80211vap *, int);
127329800Smavstatic int	mwl_set_tim(struct ieee80211_node *, int);
128329800Smav#endif
129329800Smavstatic int	mwl_dma_setup(struct mwl_softc *);
130329800Smavstatic void	mwl_dma_cleanup(struct mwl_softc *);
131329800Smavstatic struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
132329800Smav		    const uint8_t [IEEE80211_ADDR_LEN]);
133329800Smavstatic void	mwl_node_cleanup(struct ieee80211_node *);
134329800Smavstatic void	mwl_node_drain(struct ieee80211_node *);
135329800Smavstatic void	mwl_node_getsignal(const struct ieee80211_node *,
136329800Smav			int8_t *, int8_t *);
137329800Smavstatic void	mwl_node_getmimoinfo(const struct ieee80211_node *,
138329800Smav			struct ieee80211_mimo_info *);
139329800Smavstatic int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
140329800Smavstatic void	mwl_rx_proc(void *, int);
141329800Smavstatic void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
142329800Smavstatic int	mwl_tx_setup(struct mwl_softc *, int, int);
143329800Smavstatic int	mwl_wme_update(struct ieee80211com *);
144329800Smavstatic void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
145329800Smavstatic void	mwl_tx_cleanup(struct mwl_softc *);
146329800Smavstatic uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
147329800Smavstatic int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
148329800Smav			     struct mwl_txbuf *, struct mbuf *);
149329800Smavstatic void	mwl_tx_proc(void *, int);
150329800Smavstatic int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
151329800Smavstatic void	mwl_draintxq(struct mwl_softc *);
152329800Smavstatic void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
153329800Smavstatic int	mwl_recv_action(struct ieee80211_node *,
154329800Smav			const struct ieee80211_frame *,
155329800Smav			const uint8_t *, const uint8_t *);
156329800Smavstatic int	mwl_addba_request(struct ieee80211_node *,
157329800Smav			struct ieee80211_tx_ampdu *, int dialogtoken,
158329800Smav			int baparamset, int batimeout);
159329800Smavstatic int	mwl_addba_response(struct ieee80211_node *,
160329800Smav			struct ieee80211_tx_ampdu *, int status,
161329800Smav			int baparamset, int batimeout);
162329800Smavstatic void	mwl_addba_stop(struct ieee80211_node *,
163329800Smav			struct ieee80211_tx_ampdu *);
164329800Smavstatic int	mwl_startrecv(struct mwl_softc *);
165329800Smavstatic MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
166329800Smav			struct ieee80211_channel *);
167329800Smavstatic int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
168329800Smavstatic void	mwl_scan_start(struct ieee80211com *);
169329800Smavstatic void	mwl_scan_end(struct ieee80211com *);
170329800Smavstatic void	mwl_set_channel(struct ieee80211com *);
171329800Smavstatic int	mwl_peerstadb(struct ieee80211_node *,
172329800Smav			int aid, int staid, MWL_HAL_PEERINFO *pi);
173329800Smavstatic int	mwl_localstadb(struct ieee80211vap *);
174329800Smavstatic int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
175329800Smavstatic int	allocstaid(struct mwl_softc *sc, int aid);
176329800Smavstatic void	delstaid(struct mwl_softc *sc, int staid);
177329800Smavstatic void	mwl_newassoc(struct ieee80211_node *, int);
178329800Smavstatic void	mwl_agestations(void *);
179329800Smavstatic int	mwl_setregdomain(struct ieee80211com *,
180329800Smav			struct ieee80211_regdomain *, int,
181329800Smav			struct ieee80211_channel []);
182329800Smavstatic void	mwl_getradiocaps(struct ieee80211com *, int, int *,
183329800Smav			struct ieee80211_channel []);
184329800Smavstatic int	mwl_getchannels(struct mwl_softc *);
185329800Smav
186329800Smavstatic void	mwl_sysctlattach(struct mwl_softc *);
187329800Smavstatic void	mwl_announce(struct mwl_softc *);
188329800Smav
189329800SmavSYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
190329800Smav
191329800Smavstatic	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
192329800SmavSYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
193329800Smav	    0, "rx descriptors allocated");
194329800Smavstatic	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
195329800SmavSYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
196329800Smav	    0, "rx buffers allocated");
197329800Smavstatic	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
198329800SmavSYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
199346686Smav	    0, "tx buffers allocated");
200346686Smavstatic	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
201346686SmavSYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
202346686Smav	    0, "tx buffers to send at once");
203346686Smavstatic	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
204346686SmavSYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
205346686Smav	    0, "max rx buffers to process per interrupt");
206329800Smavstatic	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
207329800SmavSYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
208329800Smav	    0, "min free rx buffers before restarting traffic");
209329800Smav
210329800Smav#ifdef MWL_DEBUG
211329800Smavstatic	int mwl_debug = 0;
212329800SmavSYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
213329800Smav	    0, "control debugging printfs");
214329800Smavenum {
215329800Smav	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
216346686Smav	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
217346686Smav	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
218346686Smav	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
219346686Smav	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
220346686Smav	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
221346686Smav	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
222346686Smav	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
223346686Smav	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
224346686Smav	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
225346686Smav	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
226346686Smav	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
227346686Smav	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
228329800Smav	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
229329800Smav	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
230329800Smav	MWL_DEBUG_ANY		= 0xffffffff
231329800Smav};
232329800Smav#define	IS_BEACON(wh) \
233329800Smav    ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
234329800Smav	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
235329800Smav#define	IFF_DUMPPKTS_RECV(sc, wh) \
236346686Smav    ((sc->sc_debug & MWL_DEBUG_RECV) && \
237329800Smav      ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
238329800Smav#define	IFF_DUMPPKTS_XMIT(sc) \
239329800Smav	(sc->sc_debug & MWL_DEBUG_XMIT)
240329800Smav
241329800Smav#define	DPRINTF(sc, m, fmt, ...) do {				\
242329800Smav	if (sc->sc_debug & (m))					\
243329800Smav		printf(fmt, __VA_ARGS__);			\
244329800Smav} while (0)
245329800Smav#define	KEYPRINTF(sc, hk, mac) do {				\
246329800Smav	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
247329800Smav		mwl_keyprint(sc, __func__, hk, mac);		\
248329800Smav} while (0)
249329800Smavstatic	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
250329800Smavstatic	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
251329800Smav#else
252329800Smav#define	IFF_DUMPPKTS_RECV(sc, wh)	0
253329800Smav#define	IFF_DUMPPKTS_XMIT(sc)		0
254329800Smav#define	DPRINTF(sc, m, fmt, ...)	do { (void )sc; } while (0)
255329800Smav#define	KEYPRINTF(sc, k, mac)		do { (void )sc; } while (0)
256329800Smav#endif
257329800Smav
258329800Smavstatic MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
259329800Smav
260329800Smav/*
261329800Smav * Each packet has fixed front matter: a 2-byte length
262329800Smav * of the payload, followed by a 4-address 802.11 header
263329800Smav * (regardless of the actual header and always w/o any
264329800Smav * QoS header).  The payload then follows.
265329800Smav */
266329800Smavstruct mwltxrec {
267329800Smav	uint16_t fwlen;
268329800Smav	struct ieee80211_frame_addr4 wh;
269329800Smav} __packed;
270329800Smav
271329800Smav/*
272329800Smav * Read/Write shorthands for accesses to BAR 0.  Note
273329800Smav * that all BAR 1 operations are done in the "hal" and
274329800Smav * there should be no reference to them here.
275329800Smav */
276329800Smav#ifdef MWL_DEBUG
277329800Smavstatic __inline uint32_t
278329800SmavRD4(struct mwl_softc *sc, bus_size_t off)
279329800Smav{
280329800Smav	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
281329800Smav}
282329800Smav#endif
283329800Smav
284329800Smavstatic __inline void
285329800SmavWR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
286329800Smav{
287329800Smav	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
288329800Smav}
289329800Smav
290329800Smavint
291329800Smavmwl_attach(uint16_t devid, struct mwl_softc *sc)
292329800Smav{
293329800Smav	struct ieee80211com *ic = &sc->sc_ic;
294329800Smav	struct mwl_hal *mh;
295329800Smav	int error = 0;
296329800Smav
297329800Smav	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
298329800Smav
299329800Smav	/*
300329800Smav	 * Setup the RX free list lock early, so it can be consistently
301329800Smav	 * removed.
302329800Smav	 */
303329800Smav	MWL_RXFREE_INIT(sc);
304329800Smav
305329800Smav	mh = mwl_hal_attach(sc->sc_dev, devid,
306329800Smav	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
307329800Smav	if (mh == NULL) {
308329800Smav		device_printf(sc->sc_dev, "unable to attach HAL\n");
309329800Smav		error = EIO;
310329800Smav		goto bad;
311329800Smav	}
312329800Smav	sc->sc_mh = mh;
313329800Smav	/*
314329800Smav	 * Load firmware so we can get setup.  We arbitrarily
315329800Smav	 * pick station firmware; we'll re-load firmware as
316329800Smav	 * needed so setting up the wrong mode isn't a big deal.
317329800Smav	 */
318329800Smav	if (mwl_hal_fwload(mh, NULL) != 0) {
319329800Smav		device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
320329800Smav		error = EIO;
321329800Smav		goto bad1;
322329800Smav	}
323329800Smav	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
324329800Smav		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
325329800Smav		error = EIO;
326329800Smav		goto bad1;
327329800Smav	}
328329800Smav	error = mwl_getchannels(sc);
329329800Smav	if (error != 0)
330329800Smav		goto bad1;
331329800Smav
332329800Smav	sc->sc_txantenna = 0;		/* h/w default */
333329800Smav	sc->sc_rxantenna = 0;		/* h/w default */
334329800Smav	sc->sc_invalid = 0;		/* ready to go, enable int handling */
335329800Smav	sc->sc_ageinterval = MWL_AGEINTERVAL;
336329800Smav
337329800Smav	/*
338329800Smav	 * Allocate tx+rx descriptors and populate the lists.
339329800Smav	 * We immediately push the information to the firmware
340329800Smav	 * as otherwise it gets upset.
341329800Smav	 */
342329800Smav	error = mwl_dma_setup(sc);
343329800Smav	if (error != 0) {
344		device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
345		    error);
346		goto bad1;
347	}
348	error = mwl_setupdma(sc);	/* push to firmware */
349	if (error != 0)			/* NB: mwl_setupdma prints msg */
350		goto bad1;
351
352	callout_init(&sc->sc_timer, 1);
353	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
354	mbufq_init(&sc->sc_snd, ifqmaxlen);
355
356	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
357		taskqueue_thread_enqueue, &sc->sc_tq);
358	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
359		"%s taskq", device_get_nameunit(sc->sc_dev));
360
361	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
362	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
363	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
364	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
365
366	/* NB: insure BK queue is the lowest priority h/w queue */
367	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
368		device_printf(sc->sc_dev,
369		    "unable to setup xmit queue for %s traffic!\n",
370		     ieee80211_wme_acnames[WME_AC_BK]);
371		error = EIO;
372		goto bad2;
373	}
374	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
375	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
376	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
377		/*
378		 * Not enough hardware tx queues to properly do WME;
379		 * just punt and assign them all to the same h/w queue.
380		 * We could do a better job of this if, for example,
381		 * we allocate queues when we switch from station to
382		 * AP mode.
383		 */
384		if (sc->sc_ac2q[WME_AC_VI] != NULL)
385			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
386		if (sc->sc_ac2q[WME_AC_BE] != NULL)
387			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
388		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
389		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
390		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
391	}
392	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
393
394	ic->ic_softc = sc;
395	ic->ic_name = device_get_nameunit(sc->sc_dev);
396	/* XXX not right but it's not used anywhere important */
397	ic->ic_phytype = IEEE80211_T_OFDM;
398	ic->ic_opmode = IEEE80211_M_STA;
399	ic->ic_caps =
400		  IEEE80211_C_STA		/* station mode supported */
401		| IEEE80211_C_HOSTAP		/* hostap mode */
402		| IEEE80211_C_MONITOR		/* monitor mode */
403#if 0
404		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
405		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
406#endif
407		| IEEE80211_C_MBSS		/* mesh point link mode */
408		| IEEE80211_C_WDS		/* WDS supported */
409		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
410		| IEEE80211_C_SHSLOT		/* short slot time supported */
411		| IEEE80211_C_WME		/* WME/WMM supported */
412		| IEEE80211_C_BURST		/* xmit bursting supported */
413		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
414		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
415		| IEEE80211_C_TXFRAG		/* handle tx frags */
416		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
417		| IEEE80211_C_DFS		/* DFS supported */
418		;
419
420	ic->ic_htcaps =
421		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
422		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
423		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
424		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
425		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
426#if MWL_AGGR_SIZE == 7935
427		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
428#else
429		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
430#endif
431#if 0
432		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
433		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
434#endif
435		/* s/w capabilities */
436		| IEEE80211_HTC_HT		/* HT operation */
437		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
438		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
439		| IEEE80211_HTC_SMPS		/* SMPS available */
440		;
441
442	/*
443	 * Mark h/w crypto support.
444	 * XXX no way to query h/w support.
445	 */
446	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
447			  |  IEEE80211_CRYPTO_AES_CCM
448			  |  IEEE80211_CRYPTO_TKIP
449			  |  IEEE80211_CRYPTO_TKIPMIC
450			  ;
451	/*
452	 * Transmit requires space in the packet for a special
453	 * format transmit record and optional padding between
454	 * this record and the payload.  Ask the net80211 layer
455	 * to arrange this when encapsulating packets so we can
456	 * add it efficiently.
457	 */
458	ic->ic_headroom = sizeof(struct mwltxrec) -
459		sizeof(struct ieee80211_frame);
460
461	IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
462
463	/* call MI attach routine. */
464	ieee80211_ifattach(ic);
465	ic->ic_setregdomain = mwl_setregdomain;
466	ic->ic_getradiocaps = mwl_getradiocaps;
467	/* override default methods */
468	ic->ic_raw_xmit = mwl_raw_xmit;
469	ic->ic_newassoc = mwl_newassoc;
470	ic->ic_updateslot = mwl_updateslot;
471	ic->ic_update_mcast = mwl_update_mcast;
472	ic->ic_update_promisc = mwl_update_promisc;
473	ic->ic_wme.wme_update = mwl_wme_update;
474	ic->ic_transmit = mwl_transmit;
475	ic->ic_ioctl = mwl_ioctl;
476	ic->ic_parent = mwl_parent;
477
478	ic->ic_node_alloc = mwl_node_alloc;
479	sc->sc_node_cleanup = ic->ic_node_cleanup;
480	ic->ic_node_cleanup = mwl_node_cleanup;
481	sc->sc_node_drain = ic->ic_node_drain;
482	ic->ic_node_drain = mwl_node_drain;
483	ic->ic_node_getsignal = mwl_node_getsignal;
484	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
485
486	ic->ic_scan_start = mwl_scan_start;
487	ic->ic_scan_end = mwl_scan_end;
488	ic->ic_set_channel = mwl_set_channel;
489
490	sc->sc_recv_action = ic->ic_recv_action;
491	ic->ic_recv_action = mwl_recv_action;
492	sc->sc_addba_request = ic->ic_addba_request;
493	ic->ic_addba_request = mwl_addba_request;
494	sc->sc_addba_response = ic->ic_addba_response;
495	ic->ic_addba_response = mwl_addba_response;
496	sc->sc_addba_stop = ic->ic_addba_stop;
497	ic->ic_addba_stop = mwl_addba_stop;
498
499	ic->ic_vap_create = mwl_vap_create;
500	ic->ic_vap_delete = mwl_vap_delete;
501
502	ieee80211_radiotap_attach(ic,
503	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
504		MWL_TX_RADIOTAP_PRESENT,
505	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
506		MWL_RX_RADIOTAP_PRESENT);
507	/*
508	 * Setup dynamic sysctl's now that country code and
509	 * regdomain are available from the hal.
510	 */
511	mwl_sysctlattach(sc);
512
513	if (bootverbose)
514		ieee80211_announce(ic);
515	mwl_announce(sc);
516	return 0;
517bad2:
518	mwl_dma_cleanup(sc);
519bad1:
520	mwl_hal_detach(mh);
521bad:
522	MWL_RXFREE_DESTROY(sc);
523	sc->sc_invalid = 1;
524	return error;
525}
526
527int
528mwl_detach(struct mwl_softc *sc)
529{
530	struct ieee80211com *ic = &sc->sc_ic;
531
532	MWL_LOCK(sc);
533	mwl_stop(sc);
534	MWL_UNLOCK(sc);
535	/*
536	 * NB: the order of these is important:
537	 * o call the 802.11 layer before detaching the hal to
538	 *   insure callbacks into the driver to delete global
539	 *   key cache entries can be handled
540	 * o reclaim the tx queue data structures after calling
541	 *   the 802.11 layer as we'll get called back to reclaim
542	 *   node state and potentially want to use them
543	 * o to cleanup the tx queues the hal is called, so detach
544	 *   it last
545	 * Other than that, it's straightforward...
546	 */
547	ieee80211_ifdetach(ic);
548	callout_drain(&sc->sc_watchdog);
549	mwl_dma_cleanup(sc);
550	MWL_RXFREE_DESTROY(sc);
551	mwl_tx_cleanup(sc);
552	mwl_hal_detach(sc->sc_mh);
553	mbufq_drain(&sc->sc_snd);
554
555	return 0;
556}
557
558/*
559 * MAC address handling for multiple BSS on the same radio.
560 * The first vap uses the MAC address from the EEPROM.  For
561 * subsequent vap's we set the U/L bit (bit 1) in the MAC
562 * address and use the next six bits as an index.
563 */
564static void
565assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
566{
567	int i;
568
569	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
570		/* NB: we only do this if h/w supports multiple bssid */
571		for (i = 0; i < 32; i++)
572			if ((sc->sc_bssidmask & (1<<i)) == 0)
573				break;
574		if (i != 0)
575			mac[0] |= (i << 2)|0x2;
576	} else
577		i = 0;
578	sc->sc_bssidmask |= 1<<i;
579	if (i == 0)
580		sc->sc_nbssid0++;
581}
582
583static void
584reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
585{
586	int i = mac[0] >> 2;
587	if (i != 0 || --sc->sc_nbssid0 == 0)
588		sc->sc_bssidmask &= ~(1<<i);
589}
590
591static struct ieee80211vap *
592mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
593    enum ieee80211_opmode opmode, int flags,
594    const uint8_t bssid[IEEE80211_ADDR_LEN],
595    const uint8_t mac0[IEEE80211_ADDR_LEN])
596{
597	struct mwl_softc *sc = ic->ic_softc;
598	struct mwl_hal *mh = sc->sc_mh;
599	struct ieee80211vap *vap, *apvap;
600	struct mwl_hal_vap *hvap;
601	struct mwl_vap *mvp;
602	uint8_t mac[IEEE80211_ADDR_LEN];
603
604	IEEE80211_ADDR_COPY(mac, mac0);
605	switch (opmode) {
606	case IEEE80211_M_HOSTAP:
607	case IEEE80211_M_MBSS:
608		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
609			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
610		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
611		if (hvap == NULL) {
612			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
613				reclaim_address(sc, mac);
614			return NULL;
615		}
616		break;
617	case IEEE80211_M_STA:
618		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
619			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
620		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
621		if (hvap == NULL) {
622			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
623				reclaim_address(sc, mac);
624			return NULL;
625		}
626		/* no h/w beacon miss support; always use s/w */
627		flags |= IEEE80211_CLONE_NOBEACONS;
628		break;
629	case IEEE80211_M_WDS:
630		hvap = NULL;		/* NB: we use associated AP vap */
631		if (sc->sc_napvaps == 0)
632			return NULL;	/* no existing AP vap */
633		break;
634	case IEEE80211_M_MONITOR:
635		hvap = NULL;
636		break;
637	case IEEE80211_M_IBSS:
638	case IEEE80211_M_AHDEMO:
639	default:
640		return NULL;
641	}
642
643	mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
644	mvp->mv_hvap = hvap;
645	if (opmode == IEEE80211_M_WDS) {
646		/*
647		 * WDS vaps must have an associated AP vap; find one.
648		 * XXX not right.
649		 */
650		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
651			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
652				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
653				break;
654			}
655		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
656	}
657	vap = &mvp->mv_vap;
658	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
659	/* override with driver methods */
660	mvp->mv_newstate = vap->iv_newstate;
661	vap->iv_newstate = mwl_newstate;
662	vap->iv_max_keyix = 0;	/* XXX */
663	vap->iv_key_alloc = mwl_key_alloc;
664	vap->iv_key_delete = mwl_key_delete;
665	vap->iv_key_set = mwl_key_set;
666#ifdef MWL_HOST_PS_SUPPORT
667	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
668		vap->iv_update_ps = mwl_update_ps;
669		mvp->mv_set_tim = vap->iv_set_tim;
670		vap->iv_set_tim = mwl_set_tim;
671	}
672#endif
673	vap->iv_reset = mwl_reset;
674	vap->iv_update_beacon = mwl_beacon_update;
675
676	/* override max aid so sta's cannot assoc when we're out of sta id's */
677	vap->iv_max_aid = MWL_MAXSTAID;
678	/* override default A-MPDU rx parameters */
679	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
680	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
681
682	/* complete setup */
683	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
684	    mac);
685
686	switch (vap->iv_opmode) {
687	case IEEE80211_M_HOSTAP:
688	case IEEE80211_M_MBSS:
689	case IEEE80211_M_STA:
690		/*
691		 * Setup sta db entry for local address.
692		 */
693		mwl_localstadb(vap);
694		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
695		    vap->iv_opmode == IEEE80211_M_MBSS)
696			sc->sc_napvaps++;
697		else
698			sc->sc_nstavaps++;
699		break;
700	case IEEE80211_M_WDS:
701		sc->sc_nwdsvaps++;
702		break;
703	default:
704		break;
705	}
706	/*
707	 * Setup overall operating mode.
708	 */
709	if (sc->sc_napvaps)
710		ic->ic_opmode = IEEE80211_M_HOSTAP;
711	else if (sc->sc_nstavaps)
712		ic->ic_opmode = IEEE80211_M_STA;
713	else
714		ic->ic_opmode = opmode;
715
716	return vap;
717}
718
719static void
720mwl_vap_delete(struct ieee80211vap *vap)
721{
722	struct mwl_vap *mvp = MWL_VAP(vap);
723	struct mwl_softc *sc = vap->iv_ic->ic_softc;
724	struct mwl_hal *mh = sc->sc_mh;
725	struct mwl_hal_vap *hvap = mvp->mv_hvap;
726	enum ieee80211_opmode opmode = vap->iv_opmode;
727
728	/* XXX disallow ap vap delete if WDS still present */
729	if (sc->sc_running) {
730		/* quiesce h/w while we remove the vap */
731		mwl_hal_intrset(mh, 0);		/* disable interrupts */
732	}
733	ieee80211_vap_detach(vap);
734	switch (opmode) {
735	case IEEE80211_M_HOSTAP:
736	case IEEE80211_M_MBSS:
737	case IEEE80211_M_STA:
738		KASSERT(hvap != NULL, ("no hal vap handle"));
739		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
740		mwl_hal_delvap(hvap);
741		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
742			sc->sc_napvaps--;
743		else
744			sc->sc_nstavaps--;
745		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
746		reclaim_address(sc, vap->iv_myaddr);
747		break;
748	case IEEE80211_M_WDS:
749		sc->sc_nwdsvaps--;
750		break;
751	default:
752		break;
753	}
754	mwl_cleartxq(sc, vap);
755	free(mvp, M_80211_VAP);
756	if (sc->sc_running)
757		mwl_hal_intrset(mh, sc->sc_imask);
758}
759
760void
761mwl_suspend(struct mwl_softc *sc)
762{
763
764	MWL_LOCK(sc);
765	mwl_stop(sc);
766	MWL_UNLOCK(sc);
767}
768
769void
770mwl_resume(struct mwl_softc *sc)
771{
772	int error = EDOOFUS;
773
774	MWL_LOCK(sc);
775	if (sc->sc_ic.ic_nrunning > 0)
776		error = mwl_init(sc);
777	MWL_UNLOCK(sc);
778
779	if (error == 0)
780		ieee80211_start_all(&sc->sc_ic);	/* start all vap's */
781}
782
783void
784mwl_shutdown(void *arg)
785{
786	struct mwl_softc *sc = arg;
787
788	MWL_LOCK(sc);
789	mwl_stop(sc);
790	MWL_UNLOCK(sc);
791}
792
793/*
794 * Interrupt handler.  Most of the actual processing is deferred.
795 */
796void
797mwl_intr(void *arg)
798{
799	struct mwl_softc *sc = arg;
800	struct mwl_hal *mh = sc->sc_mh;
801	uint32_t status;
802
803	if (sc->sc_invalid) {
804		/*
805		 * The hardware is not ready/present, don't touch anything.
806		 * Note this can happen early on if the IRQ is shared.
807		 */
808		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
809		return;
810	}
811	/*
812	 * Figure out the reason(s) for the interrupt.
813	 */
814	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
815	if (status == 0)			/* must be a shared irq */
816		return;
817
818	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
819	    __func__, status, sc->sc_imask);
820	if (status & MACREG_A2HRIC_BIT_RX_RDY)
821		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
822	if (status & MACREG_A2HRIC_BIT_TX_DONE)
823		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
824	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
825		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
826	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
827		mwl_hal_cmddone(mh);
828	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
829		;
830	}
831	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
832		/* TKIP ICV error */
833		sc->sc_stats.mst_rx_badtkipicv++;
834	}
835	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
836		/* 11n aggregation queue is empty, re-fill */
837		;
838	}
839	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
840		;
841	}
842	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
843		/* radar detected, process event */
844		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
845	}
846	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
847		/* DFS channel switch */
848		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
849	}
850}
851
852static void
853mwl_radar_proc(void *arg, int pending)
854{
855	struct mwl_softc *sc = arg;
856	struct ieee80211com *ic = &sc->sc_ic;
857
858	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
859	    __func__, pending);
860
861	sc->sc_stats.mst_radardetect++;
862	/* XXX stop h/w BA streams? */
863
864	IEEE80211_LOCK(ic);
865	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
866	IEEE80211_UNLOCK(ic);
867}
868
869static void
870mwl_chanswitch_proc(void *arg, int pending)
871{
872	struct mwl_softc *sc = arg;
873	struct ieee80211com *ic = &sc->sc_ic;
874
875	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
876	    __func__, pending);
877
878	IEEE80211_LOCK(ic);
879	sc->sc_csapending = 0;
880	ieee80211_csa_completeswitch(ic);
881	IEEE80211_UNLOCK(ic);
882}
883
884static void
885mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
886{
887	struct ieee80211_node *ni = sp->data[0];
888
889	/* send DELBA and drop the stream */
890	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
891}
892
893static void
894mwl_bawatchdog_proc(void *arg, int pending)
895{
896	struct mwl_softc *sc = arg;
897	struct mwl_hal *mh = sc->sc_mh;
898	const MWL_HAL_BASTREAM *sp;
899	uint8_t bitmap, n;
900
901	sc->sc_stats.mst_bawatchdog++;
902
903	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
904		DPRINTF(sc, MWL_DEBUG_AMPDU,
905		    "%s: could not get bitmap\n", __func__);
906		sc->sc_stats.mst_bawatchdog_failed++;
907		return;
908	}
909	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
910	if (bitmap == 0xff) {
911		n = 0;
912		/* disable all ba streams */
913		for (bitmap = 0; bitmap < 8; bitmap++) {
914			sp = mwl_hal_bastream_lookup(mh, bitmap);
915			if (sp != NULL) {
916				mwl_bawatchdog(sp);
917				n++;
918			}
919		}
920		if (n == 0) {
921			DPRINTF(sc, MWL_DEBUG_AMPDU,
922			    "%s: no BA streams found\n", __func__);
923			sc->sc_stats.mst_bawatchdog_empty++;
924		}
925	} else if (bitmap != 0xaa) {
926		/* disable a single ba stream */
927		sp = mwl_hal_bastream_lookup(mh, bitmap);
928		if (sp != NULL) {
929			mwl_bawatchdog(sp);
930		} else {
931			DPRINTF(sc, MWL_DEBUG_AMPDU,
932			    "%s: no BA stream %d\n", __func__, bitmap);
933			sc->sc_stats.mst_bawatchdog_notfound++;
934		}
935	}
936}
937
938/*
939 * Convert net80211 channel to a HAL channel.
940 */
941static void
942mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
943{
944	hc->channel = chan->ic_ieee;
945
946	*(uint32_t *)&hc->channelFlags = 0;
947	if (IEEE80211_IS_CHAN_2GHZ(chan))
948		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
949	else if (IEEE80211_IS_CHAN_5GHZ(chan))
950		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
951	if (IEEE80211_IS_CHAN_HT40(chan)) {
952		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
953		if (IEEE80211_IS_CHAN_HT40U(chan))
954			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
955		else
956			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
957	} else
958		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
959	/* XXX 10MHz channels */
960}
961
962/*
963 * Inform firmware of our tx/rx dma setup.  The BAR 0
964 * writes below are for compatibility with older firmware.
965 * For current firmware we send this information with a
966 * cmd block via mwl_hal_sethwdma.
967 */
968static int
969mwl_setupdma(struct mwl_softc *sc)
970{
971	int error, i;
972
973	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
974	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
975	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
976
977	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
978		struct mwl_txq *txq = &sc->sc_txq[i];
979		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
980		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
981	}
982	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
983	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
984
985	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
986	if (error != 0) {
987		device_printf(sc->sc_dev,
988		    "unable to setup tx/rx dma; hal status %u\n", error);
989		/* XXX */
990	}
991	return error;
992}
993
994/*
995 * Inform firmware of tx rate parameters.
996 * Called after a channel change.
997 */
998static int
999mwl_setcurchanrates(struct mwl_softc *sc)
1000{
1001	struct ieee80211com *ic = &sc->sc_ic;
1002	const struct ieee80211_rateset *rs;
1003	MWL_HAL_TXRATE rates;
1004
1005	memset(&rates, 0, sizeof(rates));
1006	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1007	/* rate used to send management frames */
1008	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1009	/* rate used to send multicast frames */
1010	rates.McastRate = rates.MgtRate;
1011
1012	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1013}
1014
1015/*
1016 * Inform firmware of tx rate parameters.  Called whenever
1017 * user-settable params change and after a channel change.
1018 */
1019static int
1020mwl_setrates(struct ieee80211vap *vap)
1021{
1022	struct mwl_vap *mvp = MWL_VAP(vap);
1023	struct ieee80211_node *ni = vap->iv_bss;
1024	const struct ieee80211_txparam *tp = ni->ni_txparms;
1025	MWL_HAL_TXRATE rates;
1026
1027	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1028
1029	/*
1030	 * Update the h/w rate map.
1031	 * NB: 0x80 for MCS is passed through unchanged
1032	 */
1033	memset(&rates, 0, sizeof(rates));
1034	/* rate used to send management frames */
1035	rates.MgtRate = tp->mgmtrate;
1036	/* rate used to send multicast frames */
1037	rates.McastRate = tp->mcastrate;
1038
1039	/* while here calculate EAPOL fixed rate cookie */
1040	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1041
1042	return mwl_hal_settxrate(mvp->mv_hvap,
1043	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1044		RATE_FIXED : RATE_AUTO, &rates);
1045}
1046
1047/*
1048 * Setup a fixed xmit rate cookie for EAPOL frames.
1049 */
1050static void
1051mwl_seteapolformat(struct ieee80211vap *vap)
1052{
1053	struct mwl_vap *mvp = MWL_VAP(vap);
1054	struct ieee80211_node *ni = vap->iv_bss;
1055	enum ieee80211_phymode mode;
1056	uint8_t rate;
1057
1058	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1059
1060	mode = ieee80211_chan2mode(ni->ni_chan);
1061	/*
1062	 * Use legacy rates when operating a mixed HT+non-HT bss.
1063	 * NB: this may violate POLA for sta and wds vap's.
1064	 */
1065	if (mode == IEEE80211_MODE_11NA &&
1066	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1067		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1068	else if (mode == IEEE80211_MODE_11NG &&
1069	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1070		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1071	else
1072		rate = vap->iv_txparms[mode].mgmtrate;
1073
1074	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1075}
1076
1077/*
1078 * Map SKU+country code to region code for radar bin'ing.
1079 */
1080static int
1081mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1082{
1083	switch (rd->regdomain) {
1084	case SKU_FCC:
1085	case SKU_FCC3:
1086		return DOMAIN_CODE_FCC;
1087	case SKU_CA:
1088		return DOMAIN_CODE_IC;
1089	case SKU_ETSI:
1090	case SKU_ETSI2:
1091	case SKU_ETSI3:
1092		if (rd->country == CTRY_SPAIN)
1093			return DOMAIN_CODE_SPAIN;
1094		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1095			return DOMAIN_CODE_FRANCE;
1096		/* XXX force 1.3.1 radar type */
1097		return DOMAIN_CODE_ETSI_131;
1098	case SKU_JAPAN:
1099		return DOMAIN_CODE_MKK;
1100	case SKU_ROW:
1101		return DOMAIN_CODE_DGT;	/* Taiwan */
1102	case SKU_APAC:
1103	case SKU_APAC2:
1104	case SKU_APAC3:
1105		return DOMAIN_CODE_AUS;	/* Australia */
1106	}
1107	/* XXX KOREA? */
1108	return DOMAIN_CODE_FCC;			/* XXX? */
1109}
1110
1111static int
1112mwl_hal_reset(struct mwl_softc *sc)
1113{
1114	struct ieee80211com *ic = &sc->sc_ic;
1115	struct mwl_hal *mh = sc->sc_mh;
1116
1117	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1118	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1119	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1120	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1121	mwl_chan_set(sc, ic->ic_curchan);
1122	/* NB: RF/RA performance tuned for indoor mode */
1123	mwl_hal_setrateadaptmode(mh, 0);
1124	mwl_hal_setoptimizationlevel(mh,
1125	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1126
1127	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1128
1129	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1130	mwl_hal_setcfend(mh, 0);			/* XXX */
1131
1132	return 1;
1133}
1134
1135static int
1136mwl_init(struct mwl_softc *sc)
1137{
1138	struct mwl_hal *mh = sc->sc_mh;
1139	int error = 0;
1140
1141	MWL_LOCK_ASSERT(sc);
1142
1143	/*
1144	 * Stop anything previously setup.  This is safe
1145	 * whether this is the first time through or not.
1146	 */
1147	mwl_stop(sc);
1148
1149	/*
1150	 * Push vap-independent state to the firmware.
1151	 */
1152	if (!mwl_hal_reset(sc)) {
1153		device_printf(sc->sc_dev, "unable to reset hardware\n");
1154		return EIO;
1155	}
1156
1157	/*
1158	 * Setup recv (once); transmit is already good to go.
1159	 */
1160	error = mwl_startrecv(sc);
1161	if (error != 0) {
1162		device_printf(sc->sc_dev, "unable to start recv logic\n");
1163		return error;
1164	}
1165
1166	/*
1167	 * Enable interrupts.
1168	 */
1169	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1170		     | MACREG_A2HRIC_BIT_TX_DONE
1171		     | MACREG_A2HRIC_BIT_OPC_DONE
1172#if 0
1173		     | MACREG_A2HRIC_BIT_MAC_EVENT
1174#endif
1175		     | MACREG_A2HRIC_BIT_ICV_ERROR
1176		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1177		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1178#if 0
1179		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1180#endif
1181		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1182		     | MACREQ_A2HRIC_BIT_TX_ACK
1183		     ;
1184
1185	sc->sc_running = 1;
1186	mwl_hal_intrset(mh, sc->sc_imask);
1187	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1188
1189	return 0;
1190}
1191
1192static void
1193mwl_stop(struct mwl_softc *sc)
1194{
1195
1196	MWL_LOCK_ASSERT(sc);
1197	if (sc->sc_running) {
1198		/*
1199		 * Shutdown the hardware and driver.
1200		 */
1201		sc->sc_running = 0;
1202		callout_stop(&sc->sc_watchdog);
1203		sc->sc_tx_timer = 0;
1204		mwl_draintxq(sc);
1205	}
1206}
1207
1208static int
1209mwl_reset_vap(struct ieee80211vap *vap, int state)
1210{
1211	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1212	struct ieee80211com *ic = vap->iv_ic;
1213
1214	if (state == IEEE80211_S_RUN)
1215		mwl_setrates(vap);
1216	/* XXX off by 1? */
1217	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1218	/* XXX auto? 20/40 split? */
1219	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1220	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1221	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1222	    HTPROTECT_NONE : HTPROTECT_AUTO);
1223	/* XXX txpower cap */
1224
1225	/* re-setup beacons */
1226	if (state == IEEE80211_S_RUN &&
1227	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1228	     vap->iv_opmode == IEEE80211_M_MBSS ||
1229	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1230		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1231		mwl_hal_setnprotmode(hvap,
1232		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1233		return mwl_beacon_setup(vap);
1234	}
1235	return 0;
1236}
1237
1238/*
1239 * Reset the hardware w/o losing operational state.
1240 * Used to to reset or reload hardware state for a vap.
1241 */
1242static int
1243mwl_reset(struct ieee80211vap *vap, u_long cmd)
1244{
1245	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1246	int error = 0;
1247
1248	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1249		struct ieee80211com *ic = vap->iv_ic;
1250		struct mwl_softc *sc = ic->ic_softc;
1251		struct mwl_hal *mh = sc->sc_mh;
1252
1253		/* XXX handle DWDS sta vap change */
1254		/* XXX do we need to disable interrupts? */
1255		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1256		error = mwl_reset_vap(vap, vap->iv_state);
1257		mwl_hal_intrset(mh, sc->sc_imask);
1258	}
1259	return error;
1260}
1261
1262/*
1263 * Allocate a tx buffer for sending a frame.  The
1264 * packet is assumed to have the WME AC stored so
1265 * we can use it to select the appropriate h/w queue.
1266 */
1267static struct mwl_txbuf *
1268mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1269{
1270	struct mwl_txbuf *bf;
1271
1272	/*
1273	 * Grab a TX buffer and associated resources.
1274	 */
1275	MWL_TXQ_LOCK(txq);
1276	bf = STAILQ_FIRST(&txq->free);
1277	if (bf != NULL) {
1278		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1279		txq->nfree--;
1280	}
1281	MWL_TXQ_UNLOCK(txq);
1282	if (bf == NULL)
1283		DPRINTF(sc, MWL_DEBUG_XMIT,
1284		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1285	return bf;
1286}
1287
1288/*
1289 * Return a tx buffer to the queue it came from.  Note there
1290 * are two cases because we must preserve the order of buffers
1291 * as it reflects the fixed order of descriptors in memory
1292 * (the firmware pre-fetches descriptors so we cannot reorder).
1293 */
1294static void
1295mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1296{
1297	bf->bf_m = NULL;
1298	bf->bf_node = NULL;
1299	MWL_TXQ_LOCK(txq);
1300	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1301	txq->nfree++;
1302	MWL_TXQ_UNLOCK(txq);
1303}
1304
1305static void
1306mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1307{
1308	bf->bf_m = NULL;
1309	bf->bf_node = NULL;
1310	MWL_TXQ_LOCK(txq);
1311	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1312	txq->nfree++;
1313	MWL_TXQ_UNLOCK(txq);
1314}
1315
1316static int
1317mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1318{
1319	struct mwl_softc *sc = ic->ic_softc;
1320	int error;
1321
1322	MWL_LOCK(sc);
1323	if (!sc->sc_running) {
1324		MWL_UNLOCK(sc);
1325		return (ENXIO);
1326	}
1327	error = mbufq_enqueue(&sc->sc_snd, m);
1328	if (error) {
1329		MWL_UNLOCK(sc);
1330		return (error);
1331	}
1332	mwl_start(sc);
1333	MWL_UNLOCK(sc);
1334	return (0);
1335}
1336
1337static void
1338mwl_start(struct mwl_softc *sc)
1339{
1340	struct ieee80211_node *ni;
1341	struct mwl_txbuf *bf;
1342	struct mbuf *m;
1343	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1344	int nqueued;
1345
1346	MWL_LOCK_ASSERT(sc);
1347	if (!sc->sc_running || sc->sc_invalid)
1348		return;
1349	nqueued = 0;
1350	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1351		/*
1352		 * Grab the node for the destination.
1353		 */
1354		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1355		KASSERT(ni != NULL, ("no node"));
1356		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1357		/*
1358		 * Grab a TX buffer and associated resources.
1359		 * We honor the classification by the 802.11 layer.
1360		 */
1361		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1362		bf = mwl_gettxbuf(sc, txq);
1363		if (bf == NULL) {
1364			m_freem(m);
1365			ieee80211_free_node(ni);
1366#ifdef MWL_TX_NODROP
1367			sc->sc_stats.mst_tx_qstop++;
1368			break;
1369#else
1370			DPRINTF(sc, MWL_DEBUG_XMIT,
1371			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1372			sc->sc_stats.mst_tx_qdrop++;
1373			continue;
1374#endif /* MWL_TX_NODROP */
1375		}
1376
1377		/*
1378		 * Pass the frame to the h/w for transmission.
1379		 */
1380		if (mwl_tx_start(sc, ni, bf, m)) {
1381			if_inc_counter(ni->ni_vap->iv_ifp,
1382			    IFCOUNTER_OERRORS, 1);
1383			mwl_puttxbuf_head(txq, bf);
1384			ieee80211_free_node(ni);
1385			continue;
1386		}
1387		nqueued++;
1388		if (nqueued >= mwl_txcoalesce) {
1389			/*
1390			 * Poke the firmware to process queued frames;
1391			 * see below about (lack of) locking.
1392			 */
1393			nqueued = 0;
1394			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1395		}
1396	}
1397	if (nqueued) {
1398		/*
1399		 * NB: We don't need to lock against tx done because
1400		 * this just prods the firmware to check the transmit
1401		 * descriptors.  The firmware will also start fetching
1402		 * descriptors by itself if it notices new ones are
1403		 * present when it goes to deliver a tx done interrupt
1404		 * to the host. So if we race with tx done processing
1405		 * it's ok.  Delivering the kick here rather than in
1406		 * mwl_tx_start is an optimization to avoid poking the
1407		 * firmware for each packet.
1408		 *
1409		 * NB: the queue id isn't used so 0 is ok.
1410		 */
1411		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1412	}
1413}
1414
1415static int
1416mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1417	const struct ieee80211_bpf_params *params)
1418{
1419	struct ieee80211com *ic = ni->ni_ic;
1420	struct mwl_softc *sc = ic->ic_softc;
1421	struct mwl_txbuf *bf;
1422	struct mwl_txq *txq;
1423
1424	if (!sc->sc_running || sc->sc_invalid) {
1425		ieee80211_free_node(ni);
1426		m_freem(m);
1427		return ENETDOWN;
1428	}
1429	/*
1430	 * Grab a TX buffer and associated resources.
1431	 * Note that we depend on the classification
1432	 * by the 802.11 layer to get to the right h/w
1433	 * queue.  Management frames must ALWAYS go on
1434	 * queue 1 but we cannot just force that here
1435	 * because we may receive non-mgt frames.
1436	 */
1437	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1438	bf = mwl_gettxbuf(sc, txq);
1439	if (bf == NULL) {
1440		sc->sc_stats.mst_tx_qstop++;
1441		ieee80211_free_node(ni);
1442		m_freem(m);
1443		return ENOBUFS;
1444	}
1445	/*
1446	 * Pass the frame to the h/w for transmission.
1447	 */
1448	if (mwl_tx_start(sc, ni, bf, m)) {
1449		mwl_puttxbuf_head(txq, bf);
1450
1451		ieee80211_free_node(ni);
1452		return EIO;		/* XXX */
1453	}
1454	/*
1455	 * NB: We don't need to lock against tx done because
1456	 * this just prods the firmware to check the transmit
1457	 * descriptors.  The firmware will also start fetching
1458	 * descriptors by itself if it notices new ones are
1459	 * present when it goes to deliver a tx done interrupt
1460	 * to the host. So if we race with tx done processing
1461	 * it's ok.  Delivering the kick here rather than in
1462	 * mwl_tx_start is an optimization to avoid poking the
1463	 * firmware for each packet.
1464	 *
1465	 * NB: the queue id isn't used so 0 is ok.
1466	 */
1467	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1468	return 0;
1469}
1470
1471static int
1472mwl_media_change(struct ifnet *ifp)
1473{
1474	struct ieee80211vap *vap = ifp->if_softc;
1475	int error;
1476
1477	error = ieee80211_media_change(ifp);
1478	/* NB: only the fixed rate can change and that doesn't need a reset */
1479	if (error == ENETRESET) {
1480		mwl_setrates(vap);
1481		error = 0;
1482	}
1483	return error;
1484}
1485
1486#ifdef MWL_DEBUG
1487static void
1488mwl_keyprint(struct mwl_softc *sc, const char *tag,
1489	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1490{
1491	static const char *ciphers[] = {
1492		"WEP",
1493		"TKIP",
1494		"AES-CCM",
1495	};
1496	int i, n;
1497
1498	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1499	for (i = 0, n = hk->keyLen; i < n; i++)
1500		printf(" %02x", hk->key.aes[i]);
1501	printf(" mac %s", ether_sprintf(mac));
1502	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1503		printf(" %s", "rxmic");
1504		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1505			printf(" %02x", hk->key.tkip.rxMic[i]);
1506		printf(" txmic");
1507		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1508			printf(" %02x", hk->key.tkip.txMic[i]);
1509	}
1510	printf(" flags 0x%x\n", hk->keyFlags);
1511}
1512#endif
1513
1514/*
1515 * Allocate a key cache slot for a unicast key.  The
1516 * firmware handles key allocation and every station is
1517 * guaranteed key space so we are always successful.
1518 */
1519static int
1520mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1521	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1522{
1523	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1524
1525	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1526	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1527		if (!(&vap->iv_nw_keys[0] <= k &&
1528		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1529			/* should not happen */
1530			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1531				"%s: bogus group key\n", __func__);
1532			return 0;
1533		}
1534		/* give the caller what they requested */
1535		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1536	} else {
1537		/*
1538		 * Firmware handles key allocation.
1539		 */
1540		*keyix = *rxkeyix = 0;
1541	}
1542	return 1;
1543}
1544
1545/*
1546 * Delete a key entry allocated by mwl_key_alloc.
1547 */
1548static int
1549mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1550{
1551	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1552	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1553	MWL_HAL_KEYVAL hk;
1554	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1555	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1556
1557	if (hvap == NULL) {
1558		if (vap->iv_opmode != IEEE80211_M_WDS) {
1559			/* XXX monitor mode? */
1560			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1561			    "%s: no hvap for opmode %d\n", __func__,
1562			    vap->iv_opmode);
1563			return 0;
1564		}
1565		hvap = MWL_VAP(vap)->mv_ap_hvap;
1566	}
1567
1568	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1569	    __func__, k->wk_keyix);
1570
1571	memset(&hk, 0, sizeof(hk));
1572	hk.keyIndex = k->wk_keyix;
1573	switch (k->wk_cipher->ic_cipher) {
1574	case IEEE80211_CIPHER_WEP:
1575		hk.keyTypeId = KEY_TYPE_ID_WEP;
1576		break;
1577	case IEEE80211_CIPHER_TKIP:
1578		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1579		break;
1580	case IEEE80211_CIPHER_AES_CCM:
1581		hk.keyTypeId = KEY_TYPE_ID_AES;
1582		break;
1583	default:
1584		/* XXX should not happen */
1585		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1586		    __func__, k->wk_cipher->ic_cipher);
1587		return 0;
1588	}
1589	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1590}
1591
1592static __inline int
1593addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1594{
1595	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1596		if (k->wk_flags & IEEE80211_KEY_XMIT)
1597			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1598		if (k->wk_flags & IEEE80211_KEY_RECV)
1599			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1600		return 1;
1601	} else
1602		return 0;
1603}
1604
1605/*
1606 * Set the key cache contents for the specified key.  Key cache
1607 * slot(s) must already have been allocated by mwl_key_alloc.
1608 */
1609static int
1610mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1611{
1612	return (_mwl_key_set(vap, k, k->wk_macaddr));
1613}
1614
1615static int
1616_mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1617	const uint8_t mac[IEEE80211_ADDR_LEN])
1618{
1619#define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1620/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1621#define	IEEE80211_IS_STATICKEY(k) \
1622	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1623	 (GRPXMIT|IEEE80211_KEY_RECV))
1624	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1625	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1626	const struct ieee80211_cipher *cip = k->wk_cipher;
1627	const uint8_t *macaddr;
1628	MWL_HAL_KEYVAL hk;
1629
1630	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1631		("s/w crypto set?"));
1632
1633	if (hvap == NULL) {
1634		if (vap->iv_opmode != IEEE80211_M_WDS) {
1635			/* XXX monitor mode? */
1636			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1637			    "%s: no hvap for opmode %d\n", __func__,
1638			    vap->iv_opmode);
1639			return 0;
1640		}
1641		hvap = MWL_VAP(vap)->mv_ap_hvap;
1642	}
1643	memset(&hk, 0, sizeof(hk));
1644	hk.keyIndex = k->wk_keyix;
1645	switch (cip->ic_cipher) {
1646	case IEEE80211_CIPHER_WEP:
1647		hk.keyTypeId = KEY_TYPE_ID_WEP;
1648		hk.keyLen = k->wk_keylen;
1649		if (k->wk_keyix == vap->iv_def_txkey)
1650			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1651		if (!IEEE80211_IS_STATICKEY(k)) {
1652			/* NB: WEP is never used for the PTK */
1653			(void) addgroupflags(&hk, k);
1654		}
1655		break;
1656	case IEEE80211_CIPHER_TKIP:
1657		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1658		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1659		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1660		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1661		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1662		if (!addgroupflags(&hk, k))
1663			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1664		break;
1665	case IEEE80211_CIPHER_AES_CCM:
1666		hk.keyTypeId = KEY_TYPE_ID_AES;
1667		hk.keyLen = k->wk_keylen;
1668		if (!addgroupflags(&hk, k))
1669			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1670		break;
1671	default:
1672		/* XXX should not happen */
1673		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1674		    __func__, k->wk_cipher->ic_cipher);
1675		return 0;
1676	}
1677	/*
1678	 * NB: tkip mic keys get copied here too; the layout
1679	 *     just happens to match that in ieee80211_key.
1680	 */
1681	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1682
1683	/*
1684	 * Locate address of sta db entry for writing key;
1685	 * the convention unfortunately is somewhat different
1686	 * than how net80211, hostapd, and wpa_supplicant think.
1687	 */
1688	if (vap->iv_opmode == IEEE80211_M_STA) {
1689		/*
1690		 * NB: keys plumbed before the sta reaches AUTH state
1691		 * will be discarded or written to the wrong sta db
1692		 * entry because iv_bss is meaningless.  This is ok
1693		 * (right now) because we handle deferred plumbing of
1694		 * WEP keys when the sta reaches AUTH state.
1695		 */
1696		macaddr = vap->iv_bss->ni_bssid;
1697		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1698			/* XXX plumb to local sta db too for static key wep */
1699			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1700		}
1701	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1702	    vap->iv_state != IEEE80211_S_RUN) {
1703		/*
1704		 * Prior to RUN state a WDS vap will not it's BSS node
1705		 * setup so we will plumb the key to the wrong mac
1706		 * address (it'll be our local address).  Workaround
1707		 * this for the moment by grabbing the correct address.
1708		 */
1709		macaddr = vap->iv_des_bssid;
1710	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1711		macaddr = vap->iv_myaddr;
1712	else
1713		macaddr = mac;
1714	KEYPRINTF(sc, &hk, macaddr);
1715	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1716#undef IEEE80211_IS_STATICKEY
1717#undef GRPXMIT
1718}
1719
1720/*
1721 * Set the multicast filter contents into the hardware.
1722 * XXX f/w has no support; just defer to the os.
1723 */
1724static void
1725mwl_setmcastfilter(struct mwl_softc *sc)
1726{
1727#if 0
1728	struct ether_multi *enm;
1729	struct ether_multistep estep;
1730	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1731	uint8_t *mp;
1732	int nmc;
1733
1734	mp = macs;
1735	nmc = 0;
1736	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1737	while (enm != NULL) {
1738		/* XXX Punt on ranges. */
1739		if (nmc == MWL_HAL_MCAST_MAX ||
1740		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1741			ifp->if_flags |= IFF_ALLMULTI;
1742			return;
1743		}
1744		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1745		mp += IEEE80211_ADDR_LEN, nmc++;
1746		ETHER_NEXT_MULTI(estep, enm);
1747	}
1748	ifp->if_flags &= ~IFF_ALLMULTI;
1749	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1750#endif
1751}
1752
1753static int
1754mwl_mode_init(struct mwl_softc *sc)
1755{
1756	struct ieee80211com *ic = &sc->sc_ic;
1757	struct mwl_hal *mh = sc->sc_mh;
1758
1759	/*
1760	 * NB: Ignore promisc in hostap mode; it's set by the
1761	 * bridge.  This is wrong but we have no way to
1762	 * identify internal requests (from the bridge)
1763	 * versus external requests such as for tcpdump.
1764	 */
1765	mwl_hal_setpromisc(mh, ic->ic_promisc > 0 &&
1766	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1767	mwl_setmcastfilter(sc);
1768
1769	return 0;
1770}
1771
1772/*
1773 * Callback from the 802.11 layer after a multicast state change.
1774 */
1775static void
1776mwl_update_mcast(struct ieee80211com *ic)
1777{
1778	struct mwl_softc *sc = ic->ic_softc;
1779
1780	mwl_setmcastfilter(sc);
1781}
1782
1783/*
1784 * Callback from the 802.11 layer after a promiscuous mode change.
1785 * Note this interface does not check the operating mode as this
1786 * is an internal callback and we are expected to honor the current
1787 * state (e.g. this is used for setting the interface in promiscuous
1788 * mode when operating in hostap mode to do ACS).
1789 */
1790static void
1791mwl_update_promisc(struct ieee80211com *ic)
1792{
1793	struct mwl_softc *sc = ic->ic_softc;
1794
1795	mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1796}
1797
1798/*
1799 * Callback from the 802.11 layer to update the slot time
1800 * based on the current setting.  We use it to notify the
1801 * firmware of ERP changes and the f/w takes care of things
1802 * like slot time and preamble.
1803 */
1804static void
1805mwl_updateslot(struct ieee80211com *ic)
1806{
1807	struct mwl_softc *sc = ic->ic_softc;
1808	struct mwl_hal *mh = sc->sc_mh;
1809	int prot;
1810
1811	/* NB: can be called early; suppress needless cmds */
1812	if (!sc->sc_running)
1813		return;
1814
1815	/*
1816	 * Calculate the ERP flags.  The firwmare will use
1817	 * this to carry out the appropriate measures.
1818	 */
1819	prot = 0;
1820	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1821		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1822			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1823		if (ic->ic_flags & IEEE80211_F_USEPROT)
1824			prot |= IEEE80211_ERP_USE_PROTECTION;
1825		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1826			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1827	}
1828
1829	DPRINTF(sc, MWL_DEBUG_RESET,
1830	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1831	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1832	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1833	    ic->ic_flags);
1834
1835	mwl_hal_setgprot(mh, prot);
1836}
1837
1838/*
1839 * Setup the beacon frame.
1840 */
1841static int
1842mwl_beacon_setup(struct ieee80211vap *vap)
1843{
1844	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1845	struct ieee80211_node *ni = vap->iv_bss;
1846	struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
1847	struct mbuf *m;
1848
1849	m = ieee80211_beacon_alloc(ni, bo);
1850	if (m == NULL)
1851		return ENOBUFS;
1852	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1853	m_free(m);
1854
1855	return 0;
1856}
1857
1858/*
1859 * Update the beacon frame in response to a change.
1860 */
1861static void
1862mwl_beacon_update(struct ieee80211vap *vap, int item)
1863{
1864	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1865	struct ieee80211com *ic = vap->iv_ic;
1866
1867	KASSERT(hvap != NULL, ("no beacon"));
1868	switch (item) {
1869	case IEEE80211_BEACON_ERP:
1870		mwl_updateslot(ic);
1871		break;
1872	case IEEE80211_BEACON_HTINFO:
1873		mwl_hal_setnprotmode(hvap,
1874		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1875		break;
1876	case IEEE80211_BEACON_CAPS:
1877	case IEEE80211_BEACON_WME:
1878	case IEEE80211_BEACON_APPIE:
1879	case IEEE80211_BEACON_CSA:
1880		break;
1881	case IEEE80211_BEACON_TIM:
1882		/* NB: firmware always forms TIM */
1883		return;
1884	}
1885	/* XXX retain beacon frame and update */
1886	mwl_beacon_setup(vap);
1887}
1888
1889static void
1890mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1891{
1892	bus_addr_t *paddr = (bus_addr_t*) arg;
1893	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1894	*paddr = segs->ds_addr;
1895}
1896
1897#ifdef MWL_HOST_PS_SUPPORT
1898/*
1899 * Handle power save station occupancy changes.
1900 */
1901static void
1902mwl_update_ps(struct ieee80211vap *vap, int nsta)
1903{
1904	struct mwl_vap *mvp = MWL_VAP(vap);
1905
1906	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1907		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1908	mvp->mv_last_ps_sta = nsta;
1909}
1910
1911/*
1912 * Handle associated station power save state changes.
1913 */
1914static int
1915mwl_set_tim(struct ieee80211_node *ni, int set)
1916{
1917	struct ieee80211vap *vap = ni->ni_vap;
1918	struct mwl_vap *mvp = MWL_VAP(vap);
1919
1920	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1921		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1922		    IEEE80211_AID(ni->ni_associd), set);
1923		return 1;
1924	} else
1925		return 0;
1926}
1927#endif /* MWL_HOST_PS_SUPPORT */
1928
1929static int
1930mwl_desc_setup(struct mwl_softc *sc, const char *name,
1931	struct mwl_descdma *dd,
1932	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1933{
1934	uint8_t *ds;
1935	int error;
1936
1937	DPRINTF(sc, MWL_DEBUG_RESET,
1938	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1939	    __func__, name, nbuf, (uintmax_t) bufsize,
1940	    ndesc, (uintmax_t) descsize);
1941
1942	dd->dd_name = name;
1943	dd->dd_desc_len = nbuf * ndesc * descsize;
1944
1945	/*
1946	 * Setup DMA descriptor area.
1947	 */
1948	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1949		       PAGE_SIZE, 0,		/* alignment, bounds */
1950		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1951		       BUS_SPACE_MAXADDR,	/* highaddr */
1952		       NULL, NULL,		/* filter, filterarg */
1953		       dd->dd_desc_len,		/* maxsize */
1954		       1,			/* nsegments */
1955		       dd->dd_desc_len,		/* maxsegsize */
1956		       BUS_DMA_ALLOCNOW,	/* flags */
1957		       NULL,			/* lockfunc */
1958		       NULL,			/* lockarg */
1959		       &dd->dd_dmat);
1960	if (error != 0) {
1961		device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1962		return error;
1963	}
1964
1965	/* allocate descriptors */
1966	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1967				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1968				 &dd->dd_dmamap);
1969	if (error != 0) {
1970		device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1971			"error %u\n", nbuf * ndesc, dd->dd_name, error);
1972		goto fail1;
1973	}
1974
1975	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1976				dd->dd_desc, dd->dd_desc_len,
1977				mwl_load_cb, &dd->dd_desc_paddr,
1978				BUS_DMA_NOWAIT);
1979	if (error != 0) {
1980		device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1981			dd->dd_name, error);
1982		goto fail2;
1983	}
1984
1985	ds = dd->dd_desc;
1986	memset(ds, 0, dd->dd_desc_len);
1987	DPRINTF(sc, MWL_DEBUG_RESET,
1988	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1989	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1990	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1991
1992	return 0;
1993fail2:
1994	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1995fail1:
1996	bus_dma_tag_destroy(dd->dd_dmat);
1997	memset(dd, 0, sizeof(*dd));
1998	return error;
1999#undef DS2PHYS
2000}
2001
2002static void
2003mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2004{
2005	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2006	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2007	bus_dma_tag_destroy(dd->dd_dmat);
2008
2009	memset(dd, 0, sizeof(*dd));
2010}
2011
2012/*
2013 * Construct a tx q's free list.  The order of entries on
2014 * the list must reflect the physical layout of tx descriptors
2015 * because the firmware pre-fetches descriptors.
2016 *
2017 * XXX might be better to use indices into the buffer array.
2018 */
2019static void
2020mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2021{
2022	struct mwl_txbuf *bf;
2023	int i;
2024
2025	bf = txq->dma.dd_bufptr;
2026	STAILQ_INIT(&txq->free);
2027	for (i = 0; i < mwl_txbuf; i++, bf++)
2028		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2029	txq->nfree = i;
2030}
2031
2032#define	DS2PHYS(_dd, _ds) \
2033	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2034
2035static int
2036mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2037{
2038	int error, bsize, i;
2039	struct mwl_txbuf *bf;
2040	struct mwl_txdesc *ds;
2041
2042	error = mwl_desc_setup(sc, "tx", &txq->dma,
2043			mwl_txbuf, sizeof(struct mwl_txbuf),
2044			MWL_TXDESC, sizeof(struct mwl_txdesc));
2045	if (error != 0)
2046		return error;
2047
2048	/* allocate and setup tx buffers */
2049	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2050	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2051	if (bf == NULL) {
2052		device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2053			mwl_txbuf);
2054		return ENOMEM;
2055	}
2056	txq->dma.dd_bufptr = bf;
2057
2058	ds = txq->dma.dd_desc;
2059	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2060		bf->bf_desc = ds;
2061		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2062		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2063				&bf->bf_dmamap);
2064		if (error != 0) {
2065			device_printf(sc->sc_dev, "unable to create dmamap for tx "
2066				"buffer %u, error %u\n", i, error);
2067			return error;
2068		}
2069	}
2070	mwl_txq_reset(sc, txq);
2071	return 0;
2072}
2073
2074static void
2075mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2076{
2077	struct mwl_txbuf *bf;
2078	int i;
2079
2080	bf = txq->dma.dd_bufptr;
2081	for (i = 0; i < mwl_txbuf; i++, bf++) {
2082		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2083		KASSERT(bf->bf_node == NULL, ("node on free list"));
2084		if (bf->bf_dmamap != NULL)
2085			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2086	}
2087	STAILQ_INIT(&txq->free);
2088	txq->nfree = 0;
2089	if (txq->dma.dd_bufptr != NULL) {
2090		free(txq->dma.dd_bufptr, M_MWLDEV);
2091		txq->dma.dd_bufptr = NULL;
2092	}
2093	if (txq->dma.dd_desc_len != 0)
2094		mwl_desc_cleanup(sc, &txq->dma);
2095}
2096
2097static int
2098mwl_rxdma_setup(struct mwl_softc *sc)
2099{
2100	int error, jumbosize, bsize, i;
2101	struct mwl_rxbuf *bf;
2102	struct mwl_jumbo *rbuf;
2103	struct mwl_rxdesc *ds;
2104	caddr_t data;
2105
2106	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2107			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2108			1, sizeof(struct mwl_rxdesc));
2109	if (error != 0)
2110		return error;
2111
2112	/*
2113	 * Receive is done to a private pool of jumbo buffers.
2114	 * This allows us to attach to mbuf's and avoid re-mapping
2115	 * memory on each rx we post.  We allocate a large chunk
2116	 * of memory and manage it in the driver.  The mbuf free
2117	 * callback method is used to reclaim frames after sending
2118	 * them up the stack.  By default we allocate 2x the number of
2119	 * rx descriptors configured so we have some slop to hold
2120	 * us while frames are processed.
2121	 */
2122	if (mwl_rxbuf < 2*mwl_rxdesc) {
2123		device_printf(sc->sc_dev,
2124		    "too few rx dma buffers (%d); increasing to %d\n",
2125		    mwl_rxbuf, 2*mwl_rxdesc);
2126		mwl_rxbuf = 2*mwl_rxdesc;
2127	}
2128	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2129	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2130
2131	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2132		       PAGE_SIZE, 0,		/* alignment, bounds */
2133		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2134		       BUS_SPACE_MAXADDR,	/* highaddr */
2135		       NULL, NULL,		/* filter, filterarg */
2136		       sc->sc_rxmemsize,	/* maxsize */
2137		       1,			/* nsegments */
2138		       sc->sc_rxmemsize,	/* maxsegsize */
2139		       BUS_DMA_ALLOCNOW,	/* flags */
2140		       NULL,			/* lockfunc */
2141		       NULL,			/* lockarg */
2142		       &sc->sc_rxdmat);
2143	if (error != 0) {
2144		device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2145		return error;
2146	}
2147
2148	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2149				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2150				 &sc->sc_rxmap);
2151	if (error != 0) {
2152		device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2153		    (uintmax_t) sc->sc_rxmemsize);
2154		return error;
2155	}
2156
2157	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2158				sc->sc_rxmem, sc->sc_rxmemsize,
2159				mwl_load_cb, &sc->sc_rxmem_paddr,
2160				BUS_DMA_NOWAIT);
2161	if (error != 0) {
2162		device_printf(sc->sc_dev, "could not load rx DMA map\n");
2163		return error;
2164	}
2165
2166	/*
2167	 * Allocate rx buffers and set them up.
2168	 */
2169	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2170	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2171	if (bf == NULL) {
2172		device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2173		return error;
2174	}
2175	sc->sc_rxdma.dd_bufptr = bf;
2176
2177	STAILQ_INIT(&sc->sc_rxbuf);
2178	ds = sc->sc_rxdma.dd_desc;
2179	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2180		bf->bf_desc = ds;
2181		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2182		/* pre-assign dma buffer */
2183		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2184		/* NB: tail is intentional to preserve descriptor order */
2185		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2186	}
2187
2188	/*
2189	 * Place remainder of dma memory buffers on the free list.
2190	 */
2191	SLIST_INIT(&sc->sc_rxfree);
2192	for (; i < mwl_rxbuf; i++) {
2193		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2194		rbuf = MWL_JUMBO_DATA2BUF(data);
2195		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2196		sc->sc_nrxfree++;
2197	}
2198	return 0;
2199}
2200#undef DS2PHYS
2201
2202static void
2203mwl_rxdma_cleanup(struct mwl_softc *sc)
2204{
2205	if (sc->sc_rxmem_paddr != 0) {
2206		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2207		sc->sc_rxmem_paddr = 0;
2208	}
2209	if (sc->sc_rxmem != NULL) {
2210		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2211		sc->sc_rxmem = NULL;
2212	}
2213	if (sc->sc_rxdma.dd_bufptr != NULL) {
2214		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2215		sc->sc_rxdma.dd_bufptr = NULL;
2216	}
2217	if (sc->sc_rxdma.dd_desc_len != 0)
2218		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2219}
2220
2221static int
2222mwl_dma_setup(struct mwl_softc *sc)
2223{
2224	int error, i;
2225
2226	error = mwl_rxdma_setup(sc);
2227	if (error != 0) {
2228		mwl_rxdma_cleanup(sc);
2229		return error;
2230	}
2231
2232	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2233		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2234		if (error != 0) {
2235			mwl_dma_cleanup(sc);
2236			return error;
2237		}
2238	}
2239	return 0;
2240}
2241
2242static void
2243mwl_dma_cleanup(struct mwl_softc *sc)
2244{
2245	int i;
2246
2247	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2248		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2249	mwl_rxdma_cleanup(sc);
2250}
2251
2252static struct ieee80211_node *
2253mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2254{
2255	struct ieee80211com *ic = vap->iv_ic;
2256	struct mwl_softc *sc = ic->ic_softc;
2257	const size_t space = sizeof(struct mwl_node);
2258	struct mwl_node *mn;
2259
2260	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2261	if (mn == NULL) {
2262		/* XXX stat+msg */
2263		return NULL;
2264	}
2265	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2266	return &mn->mn_node;
2267}
2268
2269static void
2270mwl_node_cleanup(struct ieee80211_node *ni)
2271{
2272	struct ieee80211com *ic = ni->ni_ic;
2273        struct mwl_softc *sc = ic->ic_softc;
2274	struct mwl_node *mn = MWL_NODE(ni);
2275
2276	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2277	    __func__, ni, ni->ni_ic, mn->mn_staid);
2278
2279	if (mn->mn_staid != 0) {
2280		struct ieee80211vap *vap = ni->ni_vap;
2281
2282		if (mn->mn_hvap != NULL) {
2283			if (vap->iv_opmode == IEEE80211_M_STA)
2284				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2285			else
2286				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2287		}
2288		/*
2289		 * NB: legacy WDS peer sta db entry is installed using
2290		 * the associate ap's hvap; use it again to delete it.
2291		 * XXX can vap be NULL?
2292		 */
2293		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2294		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2295			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2296			    ni->ni_macaddr);
2297		delstaid(sc, mn->mn_staid);
2298		mn->mn_staid = 0;
2299	}
2300	sc->sc_node_cleanup(ni);
2301}
2302
2303/*
2304 * Reclaim rx dma buffers from packets sitting on the ampdu
2305 * reorder queue for a station.  We replace buffers with a
2306 * system cluster (if available).
2307 */
2308static void
2309mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2310{
2311#if 0
2312	int i, n, off;
2313	struct mbuf *m;
2314	void *cl;
2315
2316	n = rap->rxa_qframes;
2317	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2318		m = rap->rxa_m[i];
2319		if (m == NULL)
2320			continue;
2321		n--;
2322		/* our dma buffers have a well-known free routine */
2323		if ((m->m_flags & M_EXT) == 0 ||
2324		    m->m_ext.ext_free != mwl_ext_free)
2325			continue;
2326		/*
2327		 * Try to allocate a cluster and move the data.
2328		 */
2329		off = m->m_data - m->m_ext.ext_buf;
2330		if (off + m->m_pkthdr.len > MCLBYTES) {
2331			/* XXX no AMSDU for now */
2332			continue;
2333		}
2334		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2335		    &m->m_ext.ext_paddr);
2336		if (cl != NULL) {
2337			/*
2338			 * Copy the existing data to the cluster, remove
2339			 * the rx dma buffer, and attach the cluster in
2340			 * its place.  Note we preserve the offset to the
2341			 * data so frames being bridged can still prepend
2342			 * their headers without adding another mbuf.
2343			 */
2344			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2345			MEXTREMOVE(m);
2346			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2347			/* setup mbuf like _MCLGET does */
2348			m->m_flags |= M_CLUSTER | M_EXT_RW;
2349			_MOWNERREF(m, M_EXT | M_CLUSTER);
2350			/* NB: m_data is clobbered by MEXTADDR, adjust */
2351			m->m_data += off;
2352		}
2353	}
2354#endif
2355}
2356
2357/*
2358 * Callback to reclaim resources.  We first let the
2359 * net80211 layer do it's thing, then if we are still
2360 * blocked by a lack of rx dma buffers we walk the ampdu
2361 * reorder q's to reclaim buffers by copying to a system
2362 * cluster.
2363 */
2364static void
2365mwl_node_drain(struct ieee80211_node *ni)
2366{
2367	struct ieee80211com *ic = ni->ni_ic;
2368        struct mwl_softc *sc = ic->ic_softc;
2369	struct mwl_node *mn = MWL_NODE(ni);
2370
2371	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2372	    __func__, ni, ni->ni_vap, mn->mn_staid);
2373
2374	/* NB: call up first to age out ampdu q's */
2375	sc->sc_node_drain(ni);
2376
2377	/* XXX better to not check low water mark? */
2378	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2379	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2380		uint8_t tid;
2381		/*
2382		 * Walk the reorder q and reclaim rx dma buffers by copying
2383		 * the packet contents into clusters.
2384		 */
2385		for (tid = 0; tid < WME_NUM_TID; tid++) {
2386			struct ieee80211_rx_ampdu *rap;
2387
2388			rap = &ni->ni_rx_ampdu[tid];
2389			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2390				continue;
2391			if (rap->rxa_qframes)
2392				mwl_ampdu_rxdma_reclaim(rap);
2393		}
2394	}
2395}
2396
2397static void
2398mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2399{
2400	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2401#ifdef MWL_ANT_INFO_SUPPORT
2402#if 0
2403	/* XXX need to smooth data */
2404	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2405#else
2406	*noise = -95;		/* XXX */
2407#endif
2408#else
2409	*noise = -95;		/* XXX */
2410#endif
2411}
2412
2413/*
2414 * Convert Hardware per-antenna rssi info to common format:
2415 * Let a1, a2, a3 represent the amplitudes per chain
2416 * Let amax represent max[a1, a2, a3]
2417 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2418 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2419 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2420 * maintain some extra precision.
2421 *
2422 * Values are stored in .5 db format capped at 127.
2423 */
2424static void
2425mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2426	struct ieee80211_mimo_info *mi)
2427{
2428#define	CVT(_dst, _src) do {						\
2429	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2430	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2431} while (0)
2432	static const int8_t logdbtbl[32] = {
2433	       0,   0,  24,  38,  48,  56,  62,  68,
2434	      72,  76,  80,  83,  86,  89,  92,  94,
2435	      96,  98, 100, 102, 104, 106, 107, 109,
2436	     110, 112, 113, 115, 116, 117, 118, 119
2437	};
2438	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2439	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2440	uint32_t rssi_max;
2441
2442	rssi_max = mn->mn_ai.rssi_a;
2443	if (mn->mn_ai.rssi_b > rssi_max)
2444		rssi_max = mn->mn_ai.rssi_b;
2445	if (mn->mn_ai.rssi_c > rssi_max)
2446		rssi_max = mn->mn_ai.rssi_c;
2447
2448	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2449	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2450	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2451
2452	mi->noise[0] = mn->mn_ai.nf_a;
2453	mi->noise[1] = mn->mn_ai.nf_b;
2454	mi->noise[2] = mn->mn_ai.nf_c;
2455#undef CVT
2456}
2457
2458static __inline void *
2459mwl_getrxdma(struct mwl_softc *sc)
2460{
2461	struct mwl_jumbo *buf;
2462	void *data;
2463
2464	/*
2465	 * Allocate from jumbo pool.
2466	 */
2467	MWL_RXFREE_LOCK(sc);
2468	buf = SLIST_FIRST(&sc->sc_rxfree);
2469	if (buf == NULL) {
2470		DPRINTF(sc, MWL_DEBUG_ANY,
2471		    "%s: out of rx dma buffers\n", __func__);
2472		sc->sc_stats.mst_rx_nodmabuf++;
2473		data = NULL;
2474	} else {
2475		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2476		sc->sc_nrxfree--;
2477		data = MWL_JUMBO_BUF2DATA(buf);
2478	}
2479	MWL_RXFREE_UNLOCK(sc);
2480	return data;
2481}
2482
2483static __inline void
2484mwl_putrxdma(struct mwl_softc *sc, void *data)
2485{
2486	struct mwl_jumbo *buf;
2487
2488	/* XXX bounds check data */
2489	MWL_RXFREE_LOCK(sc);
2490	buf = MWL_JUMBO_DATA2BUF(data);
2491	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2492	sc->sc_nrxfree++;
2493	MWL_RXFREE_UNLOCK(sc);
2494}
2495
2496static int
2497mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2498{
2499	struct mwl_rxdesc *ds;
2500
2501	ds = bf->bf_desc;
2502	if (bf->bf_data == NULL) {
2503		bf->bf_data = mwl_getrxdma(sc);
2504		if (bf->bf_data == NULL) {
2505			/* mark descriptor to be skipped */
2506			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2507			/* NB: don't need PREREAD */
2508			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2509			sc->sc_stats.mst_rxbuf_failed++;
2510			return ENOMEM;
2511		}
2512	}
2513	/*
2514	 * NB: DMA buffer contents is known to be unmodified
2515	 *     so there's no need to flush the data cache.
2516	 */
2517
2518	/*
2519	 * Setup descriptor.
2520	 */
2521	ds->QosCtrl = 0;
2522	ds->RSSI = 0;
2523	ds->Status = EAGLE_RXD_STATUS_IDLE;
2524	ds->Channel = 0;
2525	ds->PktLen = htole16(MWL_AGGR_SIZE);
2526	ds->SQ2 = 0;
2527	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2528	/* NB: don't touch pPhysNext, set once */
2529	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2530	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2531
2532	return 0;
2533}
2534
2535static void
2536mwl_ext_free(struct mbuf *m, void *data, void *arg)
2537{
2538	struct mwl_softc *sc = arg;
2539
2540	/* XXX bounds check data */
2541	mwl_putrxdma(sc, data);
2542	/*
2543	 * If we were previously blocked by a lack of rx dma buffers
2544	 * check if we now have enough to restart rx interrupt handling.
2545	 * NB: we know we are called at splvm which is above splnet.
2546	 */
2547	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2548		sc->sc_rxblocked = 0;
2549		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2550	}
2551}
2552
2553struct mwl_frame_bar {
2554	u_int8_t	i_fc[2];
2555	u_int8_t	i_dur[2];
2556	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2557	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2558	/* ctl, seq, FCS */
2559} __packed;
2560
2561/*
2562 * Like ieee80211_anyhdrsize, but handles BAR frames
2563 * specially so the logic below to piece the 802.11
2564 * header together works.
2565 */
2566static __inline int
2567mwl_anyhdrsize(const void *data)
2568{
2569	const struct ieee80211_frame *wh = data;
2570
2571	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2572		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2573		case IEEE80211_FC0_SUBTYPE_CTS:
2574		case IEEE80211_FC0_SUBTYPE_ACK:
2575			return sizeof(struct ieee80211_frame_ack);
2576		case IEEE80211_FC0_SUBTYPE_BAR:
2577			return sizeof(struct mwl_frame_bar);
2578		}
2579		return sizeof(struct ieee80211_frame_min);
2580	} else
2581		return ieee80211_hdrsize(data);
2582}
2583
2584static void
2585mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2586{
2587	const struct ieee80211_frame *wh;
2588	struct ieee80211_node *ni;
2589
2590	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2591	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2592	if (ni != NULL) {
2593		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2594		ieee80211_free_node(ni);
2595	}
2596}
2597
2598/*
2599 * Convert hardware signal strength to rssi.  The value
2600 * provided by the device has the noise floor added in;
2601 * we need to compensate for this but we don't have that
2602 * so we use a fixed value.
2603 *
2604 * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2605 * offset is already set as part of the initial gain.  This
2606 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2607 */
2608static __inline int
2609cvtrssi(uint8_t ssi)
2610{
2611	int rssi = (int) ssi + 8;
2612	/* XXX hack guess until we have a real noise floor */
2613	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2614	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2615}
2616
2617static void
2618mwl_rx_proc(void *arg, int npending)
2619{
2620	struct mwl_softc *sc = arg;
2621	struct ieee80211com *ic = &sc->sc_ic;
2622	struct mwl_rxbuf *bf;
2623	struct mwl_rxdesc *ds;
2624	struct mbuf *m;
2625	struct ieee80211_qosframe *wh;
2626	struct ieee80211_qosframe_addr4 *wh4;
2627	struct ieee80211_node *ni;
2628	struct mwl_node *mn;
2629	int off, len, hdrlen, pktlen, rssi, ntodo;
2630	uint8_t *data, status;
2631	void *newdata;
2632	int16_t nf;
2633
2634	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2635	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2636	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2637	nf = -96;			/* XXX */
2638	bf = sc->sc_rxnext;
2639	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2640		if (bf == NULL)
2641			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2642		ds = bf->bf_desc;
2643		data = bf->bf_data;
2644		if (data == NULL) {
2645			/*
2646			 * If data allocation failed previously there
2647			 * will be no buffer; try again to re-populate it.
2648			 * Note the firmware will not advance to the next
2649			 * descriptor with a dma buffer so we must mimic
2650			 * this or we'll get out of sync.
2651			 */
2652			DPRINTF(sc, MWL_DEBUG_ANY,
2653			    "%s: rx buf w/o dma memory\n", __func__);
2654			(void) mwl_rxbuf_init(sc, bf);
2655			sc->sc_stats.mst_rx_dmabufmissing++;
2656			break;
2657		}
2658		MWL_RXDESC_SYNC(sc, ds,
2659		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2660		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2661			break;
2662#ifdef MWL_DEBUG
2663		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2664			mwl_printrxbuf(bf, 0);
2665#endif
2666		status = ds->Status;
2667		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2668			counter_u64_add(ic->ic_ierrors, 1);
2669			sc->sc_stats.mst_rx_crypto++;
2670			/*
2671			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2672			 *     for backwards compatibility.
2673			 */
2674			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2675			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2676				/*
2677				 * MIC error, notify upper layers.
2678				 */
2679				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2680				    BUS_DMASYNC_POSTREAD);
2681				mwl_handlemicerror(ic, data);
2682				sc->sc_stats.mst_rx_tkipmic++;
2683			}
2684			/* XXX too painful to tap packets */
2685			goto rx_next;
2686		}
2687		/*
2688		 * Sync the data buffer.
2689		 */
2690		len = le16toh(ds->PktLen);
2691		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2692		/*
2693		 * The 802.11 header is provided all or in part at the front;
2694		 * use it to calculate the true size of the header that we'll
2695		 * construct below.  We use this to figure out where to copy
2696		 * payload prior to constructing the header.
2697		 */
2698		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2699		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2700
2701		/* calculate rssi early so we can re-use for each aggregate */
2702		rssi = cvtrssi(ds->RSSI);
2703
2704		pktlen = hdrlen + (len - off);
2705		/*
2706		 * NB: we know our frame is at least as large as
2707		 * IEEE80211_MIN_LEN because there is a 4-address
2708		 * frame at the front.  Hence there's no need to
2709		 * vet the packet length.  If the frame in fact
2710		 * is too small it should be discarded at the
2711		 * net80211 layer.
2712		 */
2713
2714		/*
2715		 * Attach dma buffer to an mbuf.  We tried
2716		 * doing this based on the packet size (i.e.
2717		 * copying small packets) but it turns out to
2718		 * be a net loss.  The tradeoff might be system
2719		 * dependent (cache architecture is important).
2720		 */
2721		MGETHDR(m, M_NOWAIT, MT_DATA);
2722		if (m == NULL) {
2723			DPRINTF(sc, MWL_DEBUG_ANY,
2724			    "%s: no rx mbuf\n", __func__);
2725			sc->sc_stats.mst_rx_nombuf++;
2726			goto rx_next;
2727		}
2728		/*
2729		 * Acquire the replacement dma buffer before
2730		 * processing the frame.  If we're out of dma
2731		 * buffers we disable rx interrupts and wait
2732		 * for the free pool to reach mlw_rxdmalow buffers
2733		 * before starting to do work again.  If the firmware
2734		 * runs out of descriptors then it will toss frames
2735		 * which is better than our doing it as that can
2736		 * starve our processing.  It is also important that
2737		 * we always process rx'd frames in case they are
2738		 * A-MPDU as otherwise the host's view of the BA
2739		 * window may get out of sync with the firmware.
2740		 */
2741		newdata = mwl_getrxdma(sc);
2742		if (newdata == NULL) {
2743			/* NB: stat+msg in mwl_getrxdma */
2744			m_free(m);
2745			/* disable RX interrupt and mark state */
2746			mwl_hal_intrset(sc->sc_mh,
2747			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2748			sc->sc_rxblocked = 1;
2749			ieee80211_drain(ic);
2750			/* XXX check rxblocked and immediately start again? */
2751			goto rx_stop;
2752		}
2753		bf->bf_data = newdata;
2754		/*
2755		 * Attach the dma buffer to the mbuf;
2756		 * mwl_rxbuf_init will re-setup the rx
2757		 * descriptor using the replacement dma
2758		 * buffer we just installed above.
2759		 */
2760		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2761		    data, sc, 0, EXT_NET_DRV);
2762		m->m_data += off - hdrlen;
2763		m->m_pkthdr.len = m->m_len = pktlen;
2764		/* NB: dma buffer assumed read-only */
2765
2766		/*
2767		 * Piece 802.11 header together.
2768		 */
2769		wh = mtod(m, struct ieee80211_qosframe *);
2770		/* NB: don't need to do this sometimes but ... */
2771		/* XXX special case so we can memcpy after m_devget? */
2772		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2773		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2774			if (IEEE80211_IS_DSTODS(wh)) {
2775				wh4 = mtod(m,
2776				    struct ieee80211_qosframe_addr4*);
2777				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2778			} else {
2779				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2780			}
2781		}
2782		/*
2783		 * The f/w strips WEP header but doesn't clear
2784		 * the WEP bit; mark the packet with M_WEP so
2785		 * net80211 will treat the data as decrypted.
2786		 * While here also clear the PWR_MGT bit since
2787		 * power save is handled by the firmware and
2788		 * passing this up will potentially cause the
2789		 * upper layer to put a station in power save
2790		 * (except when configured with MWL_HOST_PS_SUPPORT).
2791		 */
2792		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2793			m->m_flags |= M_WEP;
2794#ifdef MWL_HOST_PS_SUPPORT
2795		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2796#else
2797		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2798		    IEEE80211_FC1_PWR_MGT);
2799#endif
2800
2801		if (ieee80211_radiotap_active(ic)) {
2802			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2803
2804			tap->wr_flags = 0;
2805			tap->wr_rate = ds->Rate;
2806			tap->wr_antsignal = rssi + nf;
2807			tap->wr_antnoise = nf;
2808		}
2809		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2810			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2811			    len, ds->Rate, rssi);
2812		}
2813		/* dispatch */
2814		ni = ieee80211_find_rxnode(ic,
2815		    (const struct ieee80211_frame_min *) wh);
2816		if (ni != NULL) {
2817			mn = MWL_NODE(ni);
2818#ifdef MWL_ANT_INFO_SUPPORT
2819			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2820			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2821			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2822			mn->mn_ai.rsvd1 = rssi;
2823#endif
2824			/* tag AMPDU aggregates for reorder processing */
2825			if (ni->ni_flags & IEEE80211_NODE_HT)
2826				m->m_flags |= M_AMPDU;
2827			(void) ieee80211_input(ni, m, rssi, nf);
2828			ieee80211_free_node(ni);
2829		} else
2830			(void) ieee80211_input_all(ic, m, rssi, nf);
2831rx_next:
2832		/* NB: ignore ENOMEM so we process more descriptors */
2833		(void) mwl_rxbuf_init(sc, bf);
2834		bf = STAILQ_NEXT(bf, bf_list);
2835	}
2836rx_stop:
2837	sc->sc_rxnext = bf;
2838
2839	if (mbufq_first(&sc->sc_snd) != NULL) {
2840		/* NB: kick fw; the tx thread may have been preempted */
2841		mwl_hal_txstart(sc->sc_mh, 0);
2842		mwl_start(sc);
2843	}
2844}
2845
2846static void
2847mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2848{
2849	struct mwl_txbuf *bf, *bn;
2850	struct mwl_txdesc *ds;
2851
2852	MWL_TXQ_LOCK_INIT(sc, txq);
2853	txq->qnum = qnum;
2854	txq->txpri = 0;	/* XXX */
2855#if 0
2856	/* NB: q setup by mwl_txdma_setup XXX */
2857	STAILQ_INIT(&txq->free);
2858#endif
2859	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2860		bf->bf_txq = txq;
2861
2862		ds = bf->bf_desc;
2863		bn = STAILQ_NEXT(bf, bf_list);
2864		if (bn == NULL)
2865			bn = STAILQ_FIRST(&txq->free);
2866		ds->pPhysNext = htole32(bn->bf_daddr);
2867	}
2868	STAILQ_INIT(&txq->active);
2869}
2870
2871/*
2872 * Setup a hardware data transmit queue for the specified
2873 * access control.  We record the mapping from ac's
2874 * to h/w queues for use by mwl_tx_start.
2875 */
2876static int
2877mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2878{
2879	struct mwl_txq *txq;
2880
2881	if (ac >= nitems(sc->sc_ac2q)) {
2882		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2883			ac, nitems(sc->sc_ac2q));
2884		return 0;
2885	}
2886	if (mvtype >= MWL_NUM_TX_QUEUES) {
2887		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2888			mvtype, MWL_NUM_TX_QUEUES);
2889		return 0;
2890	}
2891	txq = &sc->sc_txq[mvtype];
2892	mwl_txq_init(sc, txq, mvtype);
2893	sc->sc_ac2q[ac] = txq;
2894	return 1;
2895}
2896
2897/*
2898 * Update WME parameters for a transmit queue.
2899 */
2900static int
2901mwl_txq_update(struct mwl_softc *sc, int ac)
2902{
2903#define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2904	struct ieee80211com *ic = &sc->sc_ic;
2905	struct mwl_txq *txq = sc->sc_ac2q[ac];
2906	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2907	struct mwl_hal *mh = sc->sc_mh;
2908	int aifs, cwmin, cwmax, txoplim;
2909
2910	aifs = wmep->wmep_aifsn;
2911	/* XXX in sta mode need to pass log values for cwmin/max */
2912	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2913	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2914	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2915
2916	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2917		device_printf(sc->sc_dev, "unable to update hardware queue "
2918			"parameters for %s traffic!\n",
2919			ieee80211_wme_acnames[ac]);
2920		return 0;
2921	}
2922	return 1;
2923#undef MWL_EXPONENT_TO_VALUE
2924}
2925
2926/*
2927 * Callback from the 802.11 layer to update WME parameters.
2928 */
2929static int
2930mwl_wme_update(struct ieee80211com *ic)
2931{
2932	struct mwl_softc *sc = ic->ic_softc;
2933
2934	return !mwl_txq_update(sc, WME_AC_BE) ||
2935	    !mwl_txq_update(sc, WME_AC_BK) ||
2936	    !mwl_txq_update(sc, WME_AC_VI) ||
2937	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2938}
2939
2940/*
2941 * Reclaim resources for a setup queue.
2942 */
2943static void
2944mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2945{
2946	/* XXX hal work? */
2947	MWL_TXQ_LOCK_DESTROY(txq);
2948}
2949
2950/*
2951 * Reclaim all tx queue resources.
2952 */
2953static void
2954mwl_tx_cleanup(struct mwl_softc *sc)
2955{
2956	int i;
2957
2958	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2959		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2960}
2961
2962static int
2963mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2964{
2965	struct mbuf *m;
2966	int error;
2967
2968	/*
2969	 * Load the DMA map so any coalescing is done.  This
2970	 * also calculates the number of descriptors we need.
2971	 */
2972	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2973				     bf->bf_segs, &bf->bf_nseg,
2974				     BUS_DMA_NOWAIT);
2975	if (error == EFBIG) {
2976		/* XXX packet requires too many descriptors */
2977		bf->bf_nseg = MWL_TXDESC+1;
2978	} else if (error != 0) {
2979		sc->sc_stats.mst_tx_busdma++;
2980		m_freem(m0);
2981		return error;
2982	}
2983	/*
2984	 * Discard null packets and check for packets that
2985	 * require too many TX descriptors.  We try to convert
2986	 * the latter to a cluster.
2987	 */
2988	if (error == EFBIG) {		/* too many desc's, linearize */
2989		sc->sc_stats.mst_tx_linear++;
2990#if MWL_TXDESC > 1
2991		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2992#else
2993		m = m_defrag(m0, M_NOWAIT);
2994#endif
2995		if (m == NULL) {
2996			m_freem(m0);
2997			sc->sc_stats.mst_tx_nombuf++;
2998			return ENOMEM;
2999		}
3000		m0 = m;
3001		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3002					     bf->bf_segs, &bf->bf_nseg,
3003					     BUS_DMA_NOWAIT);
3004		if (error != 0) {
3005			sc->sc_stats.mst_tx_busdma++;
3006			m_freem(m0);
3007			return error;
3008		}
3009		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3010		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3011	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3012		sc->sc_stats.mst_tx_nodata++;
3013		m_freem(m0);
3014		return EIO;
3015	}
3016	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3017		__func__, m0, m0->m_pkthdr.len);
3018	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3019	bf->bf_m = m0;
3020
3021	return 0;
3022}
3023
3024static __inline int
3025mwl_cvtlegacyrate(int rate)
3026{
3027	switch (rate) {
3028	case 2:	 return 0;
3029	case 4:	 return 1;
3030	case 11: return 2;
3031	case 22: return 3;
3032	case 44: return 4;
3033	case 12: return 5;
3034	case 18: return 6;
3035	case 24: return 7;
3036	case 36: return 8;
3037	case 48: return 9;
3038	case 72: return 10;
3039	case 96: return 11;
3040	case 108:return 12;
3041	}
3042	return 0;
3043}
3044
3045/*
3046 * Calculate fixed tx rate information per client state;
3047 * this value is suitable for writing to the Format field
3048 * of a tx descriptor.
3049 */
3050static uint16_t
3051mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3052{
3053	uint16_t fmt;
3054
3055	fmt = SM(3, EAGLE_TXD_ANTENNA)
3056	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3057		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3058	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3059		fmt |= EAGLE_TXD_FORMAT_HT
3060		    /* NB: 0x80 implicitly stripped from ucastrate */
3061		    | SM(rate, EAGLE_TXD_RATE);
3062		/* XXX short/long GI may be wrong; re-check */
3063		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3064			fmt |= EAGLE_TXD_CHW_40
3065			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3066			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3067		} else {
3068			fmt |= EAGLE_TXD_CHW_20
3069			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3070			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3071		}
3072	} else {			/* legacy rate */
3073		fmt |= EAGLE_TXD_FORMAT_LEGACY
3074		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3075		    | EAGLE_TXD_CHW_20
3076		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3077		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3078			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3079	}
3080	return fmt;
3081}
3082
3083static int
3084mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3085    struct mbuf *m0)
3086{
3087	struct ieee80211com *ic = &sc->sc_ic;
3088	struct ieee80211vap *vap = ni->ni_vap;
3089	int error, iswep, ismcast;
3090	int hdrlen, copyhdrlen, pktlen;
3091	struct mwl_txdesc *ds;
3092	struct mwl_txq *txq;
3093	struct ieee80211_frame *wh;
3094	struct mwltxrec *tr;
3095	struct mwl_node *mn;
3096	uint16_t qos;
3097#if MWL_TXDESC > 1
3098	int i;
3099#endif
3100
3101	wh = mtod(m0, struct ieee80211_frame *);
3102	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3103	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3104	hdrlen = ieee80211_anyhdrsize(wh);
3105	copyhdrlen = hdrlen;
3106	pktlen = m0->m_pkthdr.len;
3107	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3108		if (IEEE80211_IS_DSTODS(wh)) {
3109			qos = *(uint16_t *)
3110			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3111			copyhdrlen -= sizeof(qos);
3112		} else
3113			qos = *(uint16_t *)
3114			    (((struct ieee80211_qosframe *) wh)->i_qos);
3115	} else
3116		qos = 0;
3117
3118	if (iswep) {
3119		const struct ieee80211_cipher *cip;
3120		struct ieee80211_key *k;
3121
3122		/*
3123		 * Construct the 802.11 header+trailer for an encrypted
3124		 * frame. The only reason this can fail is because of an
3125		 * unknown or unsupported cipher/key type.
3126		 *
3127		 * NB: we do this even though the firmware will ignore
3128		 *     what we've done for WEP and TKIP as we need the
3129		 *     ExtIV filled in for CCMP and this also adjusts
3130		 *     the headers which simplifies our work below.
3131		 */
3132		k = ieee80211_crypto_encap(ni, m0);
3133		if (k == NULL) {
3134			/*
3135			 * This can happen when the key is yanked after the
3136			 * frame was queued.  Just discard the frame; the
3137			 * 802.11 layer counts failures and provides
3138			 * debugging/diagnostics.
3139			 */
3140			m_freem(m0);
3141			return EIO;
3142		}
3143		/*
3144		 * Adjust the packet length for the crypto additions
3145		 * done during encap and any other bits that the f/w
3146		 * will add later on.
3147		 */
3148		cip = k->wk_cipher;
3149		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3150
3151		/* packet header may have moved, reset our local pointer */
3152		wh = mtod(m0, struct ieee80211_frame *);
3153	}
3154
3155	if (ieee80211_radiotap_active_vap(vap)) {
3156		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3157		if (iswep)
3158			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3159#if 0
3160		sc->sc_tx_th.wt_rate = ds->DataRate;
3161#endif
3162		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3163		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3164
3165		ieee80211_radiotap_tx(vap, m0);
3166	}
3167	/*
3168	 * Copy up/down the 802.11 header; the firmware requires
3169	 * we present a 2-byte payload length followed by a
3170	 * 4-address header (w/o QoS), followed (optionally) by
3171	 * any WEP/ExtIV header (but only filled in for CCMP).
3172	 * We are assured the mbuf has sufficient headroom to
3173	 * prepend in-place by the setup of ic_headroom in
3174	 * mwl_attach.
3175	 */
3176	if (hdrlen < sizeof(struct mwltxrec)) {
3177		const int space = sizeof(struct mwltxrec) - hdrlen;
3178		if (M_LEADINGSPACE(m0) < space) {
3179			/* NB: should never happen */
3180			device_printf(sc->sc_dev,
3181			    "not enough headroom, need %d found %zd, "
3182			    "m_flags 0x%x m_len %d\n",
3183			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3184			ieee80211_dump_pkt(ic,
3185			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3186			m_freem(m0);
3187			sc->sc_stats.mst_tx_noheadroom++;
3188			return EIO;
3189		}
3190		M_PREPEND(m0, space, M_NOWAIT);
3191	}
3192	tr = mtod(m0, struct mwltxrec *);
3193	if (wh != (struct ieee80211_frame *) &tr->wh)
3194		ovbcopy(wh, &tr->wh, hdrlen);
3195	/*
3196	 * Note: the "firmware length" is actually the length
3197	 * of the fully formed "802.11 payload".  That is, it's
3198	 * everything except for the 802.11 header.  In particular
3199	 * this includes all crypto material including the MIC!
3200	 */
3201	tr->fwlen = htole16(pktlen - hdrlen);
3202
3203	/*
3204	 * Load the DMA map so any coalescing is done.  This
3205	 * also calculates the number of descriptors we need.
3206	 */
3207	error = mwl_tx_dmasetup(sc, bf, m0);
3208	if (error != 0) {
3209		/* NB: stat collected in mwl_tx_dmasetup */
3210		DPRINTF(sc, MWL_DEBUG_XMIT,
3211		    "%s: unable to setup dma\n", __func__);
3212		return error;
3213	}
3214	bf->bf_node = ni;			/* NB: held reference */
3215	m0 = bf->bf_m;				/* NB: may have changed */
3216	tr = mtod(m0, struct mwltxrec *);
3217	wh = (struct ieee80211_frame *)&tr->wh;
3218
3219	/*
3220	 * Formulate tx descriptor.
3221	 */
3222	ds = bf->bf_desc;
3223	txq = bf->bf_txq;
3224
3225	ds->QosCtrl = qos;			/* NB: already little-endian */
3226#if MWL_TXDESC == 1
3227	/*
3228	 * NB: multiframes should be zero because the descriptors
3229	 *     are initialized to zero.  This should handle the case
3230	 *     where the driver is built with MWL_TXDESC=1 but we are
3231	 *     using firmware with multi-segment support.
3232	 */
3233	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3234	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3235#else
3236	ds->multiframes = htole32(bf->bf_nseg);
3237	ds->PktLen = htole16(m0->m_pkthdr.len);
3238	for (i = 0; i < bf->bf_nseg; i++) {
3239		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3240		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3241	}
3242#endif
3243	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3244	ds->Format = 0;
3245	ds->pad = 0;
3246	ds->ack_wcb_addr = 0;
3247
3248	mn = MWL_NODE(ni);
3249	/*
3250	 * Select transmit rate.
3251	 */
3252	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3253	case IEEE80211_FC0_TYPE_MGT:
3254		sc->sc_stats.mst_tx_mgmt++;
3255		/* fall thru... */
3256	case IEEE80211_FC0_TYPE_CTL:
3257		/* NB: assign to BE q to avoid bursting */
3258		ds->TxPriority = MWL_WME_AC_BE;
3259		break;
3260	case IEEE80211_FC0_TYPE_DATA:
3261		if (!ismcast) {
3262			const struct ieee80211_txparam *tp = ni->ni_txparms;
3263			/*
3264			 * EAPOL frames get forced to a fixed rate and w/o
3265			 * aggregation; otherwise check for any fixed rate
3266			 * for the client (may depend on association state).
3267			 */
3268			if (m0->m_flags & M_EAPOL) {
3269				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3270				ds->Format = mvp->mv_eapolformat;
3271				ds->pad = htole16(
3272				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3273			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3274				/* XXX pre-calculate per node */
3275				ds->Format = htole16(
3276				    mwl_calcformat(tp->ucastrate, ni));
3277				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3278			}
3279			/* NB: EAPOL frames will never have qos set */
3280			if (qos == 0)
3281				ds->TxPriority = txq->qnum;
3282#if MWL_MAXBA > 3
3283			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3284				ds->TxPriority = mn->mn_ba[3].txq;
3285#endif
3286#if MWL_MAXBA > 2
3287			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3288				ds->TxPriority = mn->mn_ba[2].txq;
3289#endif
3290#if MWL_MAXBA > 1
3291			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3292				ds->TxPriority = mn->mn_ba[1].txq;
3293#endif
3294#if MWL_MAXBA > 0
3295			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3296				ds->TxPriority = mn->mn_ba[0].txq;
3297#endif
3298			else
3299				ds->TxPriority = txq->qnum;
3300		} else
3301			ds->TxPriority = txq->qnum;
3302		break;
3303	default:
3304		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3305			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3306		sc->sc_stats.mst_tx_badframetype++;
3307		m_freem(m0);
3308		return EIO;
3309	}
3310
3311	if (IFF_DUMPPKTS_XMIT(sc))
3312		ieee80211_dump_pkt(ic,
3313		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3314		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3315
3316	MWL_TXQ_LOCK(txq);
3317	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3318	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3319	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3320
3321	sc->sc_tx_timer = 5;
3322	MWL_TXQ_UNLOCK(txq);
3323
3324	return 0;
3325}
3326
3327static __inline int
3328mwl_cvtlegacyrix(int rix)
3329{
3330	static const int ieeerates[] =
3331	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3332	return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3333}
3334
3335/*
3336 * Process completed xmit descriptors from the specified queue.
3337 */
3338static int
3339mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3340{
3341#define	EAGLE_TXD_STATUS_MCAST \
3342	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3343	struct ieee80211com *ic = &sc->sc_ic;
3344	struct mwl_txbuf *bf;
3345	struct mwl_txdesc *ds;
3346	struct ieee80211_node *ni;
3347	struct mwl_node *an;
3348	int nreaped;
3349	uint32_t status;
3350
3351	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3352	for (nreaped = 0;; nreaped++) {
3353		MWL_TXQ_LOCK(txq);
3354		bf = STAILQ_FIRST(&txq->active);
3355		if (bf == NULL) {
3356			MWL_TXQ_UNLOCK(txq);
3357			break;
3358		}
3359		ds = bf->bf_desc;
3360		MWL_TXDESC_SYNC(txq, ds,
3361		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3362		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3363			MWL_TXQ_UNLOCK(txq);
3364			break;
3365		}
3366		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3367		MWL_TXQ_UNLOCK(txq);
3368
3369#ifdef MWL_DEBUG
3370		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3371			mwl_printtxbuf(bf, txq->qnum, nreaped);
3372#endif
3373		ni = bf->bf_node;
3374		if (ni != NULL) {
3375			an = MWL_NODE(ni);
3376			status = le32toh(ds->Status);
3377			if (status & EAGLE_TXD_STATUS_OK) {
3378				uint16_t Format = le16toh(ds->Format);
3379				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3380
3381				sc->sc_stats.mst_ant_tx[txant]++;
3382				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3383					sc->sc_stats.mst_tx_retries++;
3384				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3385					sc->sc_stats.mst_tx_mretries++;
3386				if (txq->qnum >= MWL_WME_AC_VO)
3387					ic->ic_wme.wme_hipri_traffic++;
3388				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3389				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3390					ni->ni_txrate = mwl_cvtlegacyrix(
3391					    ni->ni_txrate);
3392				} else
3393					ni->ni_txrate |= IEEE80211_RATE_MCS;
3394				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3395			} else {
3396				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3397					sc->sc_stats.mst_tx_linkerror++;
3398				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3399					sc->sc_stats.mst_tx_xretries++;
3400				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3401					sc->sc_stats.mst_tx_aging++;
3402				if (bf->bf_m->m_flags & M_FF)
3403					sc->sc_stats.mst_ff_txerr++;
3404			}
3405			if (bf->bf_m->m_flags & M_TXCB)
3406				/* XXX strip fw len in case header inspected */
3407				m_adj(bf->bf_m, sizeof(uint16_t));
3408			ieee80211_tx_complete(ni, bf->bf_m,
3409			    (status & EAGLE_TXD_STATUS_OK) == 0);
3410		} else
3411			m_freem(bf->bf_m);
3412		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3413
3414		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3415		    BUS_DMASYNC_POSTWRITE);
3416		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3417
3418		mwl_puttxbuf_tail(txq, bf);
3419	}
3420	return nreaped;
3421#undef EAGLE_TXD_STATUS_MCAST
3422}
3423
3424/*
3425 * Deferred processing of transmit interrupt; special-cased
3426 * for four hardware queues, 0-3.
3427 */
3428static void
3429mwl_tx_proc(void *arg, int npending)
3430{
3431	struct mwl_softc *sc = arg;
3432	int nreaped;
3433
3434	/*
3435	 * Process each active queue.
3436	 */
3437	nreaped = 0;
3438	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3439		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3440	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3441		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3442	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3443		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3444	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3445		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3446
3447	if (nreaped != 0) {
3448		sc->sc_tx_timer = 0;
3449		if (mbufq_first(&sc->sc_snd) != NULL) {
3450			/* NB: kick fw; the tx thread may have been preempted */
3451			mwl_hal_txstart(sc->sc_mh, 0);
3452			mwl_start(sc);
3453		}
3454	}
3455}
3456
3457static void
3458mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3459{
3460	struct ieee80211_node *ni;
3461	struct mwl_txbuf *bf;
3462	u_int ix;
3463
3464	/*
3465	 * NB: this assumes output has been stopped and
3466	 *     we do not need to block mwl_tx_tasklet
3467	 */
3468	for (ix = 0;; ix++) {
3469		MWL_TXQ_LOCK(txq);
3470		bf = STAILQ_FIRST(&txq->active);
3471		if (bf == NULL) {
3472			MWL_TXQ_UNLOCK(txq);
3473			break;
3474		}
3475		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3476		MWL_TXQ_UNLOCK(txq);
3477#ifdef MWL_DEBUG
3478		if (sc->sc_debug & MWL_DEBUG_RESET) {
3479			struct ieee80211com *ic = &sc->sc_ic;
3480			const struct mwltxrec *tr =
3481			    mtod(bf->bf_m, const struct mwltxrec *);
3482			mwl_printtxbuf(bf, txq->qnum, ix);
3483			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3484				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3485		}
3486#endif /* MWL_DEBUG */
3487		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3488		ni = bf->bf_node;
3489		if (ni != NULL) {
3490			/*
3491			 * Reclaim node reference.
3492			 */
3493			ieee80211_free_node(ni);
3494		}
3495		m_freem(bf->bf_m);
3496
3497		mwl_puttxbuf_tail(txq, bf);
3498	}
3499}
3500
3501/*
3502 * Drain the transmit queues and reclaim resources.
3503 */
3504static void
3505mwl_draintxq(struct mwl_softc *sc)
3506{
3507	int i;
3508
3509	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3510		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3511	sc->sc_tx_timer = 0;
3512}
3513
3514#ifdef MWL_DIAGAPI
3515/*
3516 * Reset the transmit queues to a pristine state after a fw download.
3517 */
3518static void
3519mwl_resettxq(struct mwl_softc *sc)
3520{
3521	int i;
3522
3523	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3524		mwl_txq_reset(sc, &sc->sc_txq[i]);
3525}
3526#endif /* MWL_DIAGAPI */
3527
3528/*
3529 * Clear the transmit queues of any frames submitted for the
3530 * specified vap.  This is done when the vap is deleted so we
3531 * don't potentially reference the vap after it is gone.
3532 * Note we cannot remove the frames; we only reclaim the node
3533 * reference.
3534 */
3535static void
3536mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3537{
3538	struct mwl_txq *txq;
3539	struct mwl_txbuf *bf;
3540	int i;
3541
3542	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3543		txq = &sc->sc_txq[i];
3544		MWL_TXQ_LOCK(txq);
3545		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3546			struct ieee80211_node *ni = bf->bf_node;
3547			if (ni != NULL && ni->ni_vap == vap) {
3548				bf->bf_node = NULL;
3549				ieee80211_free_node(ni);
3550			}
3551		}
3552		MWL_TXQ_UNLOCK(txq);
3553	}
3554}
3555
3556static int
3557mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3558	const uint8_t *frm, const uint8_t *efrm)
3559{
3560	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3561	const struct ieee80211_action *ia;
3562
3563	ia = (const struct ieee80211_action *) frm;
3564	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3565	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3566		const struct ieee80211_action_ht_mimopowersave *mps =
3567		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3568
3569		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3570		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3571		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3572		return 0;
3573	} else
3574		return sc->sc_recv_action(ni, wh, frm, efrm);
3575}
3576
3577static int
3578mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3579	int dialogtoken, int baparamset, int batimeout)
3580{
3581	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3582	struct ieee80211vap *vap = ni->ni_vap;
3583	struct mwl_node *mn = MWL_NODE(ni);
3584	struct mwl_bastate *bas;
3585
3586	bas = tap->txa_private;
3587	if (bas == NULL) {
3588		const MWL_HAL_BASTREAM *sp;
3589		/*
3590		 * Check for a free BA stream slot.
3591		 */
3592#if MWL_MAXBA > 3
3593		if (mn->mn_ba[3].bastream == NULL)
3594			bas = &mn->mn_ba[3];
3595		else
3596#endif
3597#if MWL_MAXBA > 2
3598		if (mn->mn_ba[2].bastream == NULL)
3599			bas = &mn->mn_ba[2];
3600		else
3601#endif
3602#if MWL_MAXBA > 1
3603		if (mn->mn_ba[1].bastream == NULL)
3604			bas = &mn->mn_ba[1];
3605		else
3606#endif
3607#if MWL_MAXBA > 0
3608		if (mn->mn_ba[0].bastream == NULL)
3609			bas = &mn->mn_ba[0];
3610		else
3611#endif
3612		{
3613			/* sta already has max BA streams */
3614			/* XXX assign BA stream to highest priority tid */
3615			DPRINTF(sc, MWL_DEBUG_AMPDU,
3616			    "%s: already has max bastreams\n", __func__);
3617			sc->sc_stats.mst_ampdu_reject++;
3618			return 0;
3619		}
3620		/* NB: no held reference to ni */
3621		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3622		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3623		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3624		    ni, tap);
3625		if (sp == NULL) {
3626			/*
3627			 * No available stream, return 0 so no
3628			 * a-mpdu aggregation will be done.
3629			 */
3630			DPRINTF(sc, MWL_DEBUG_AMPDU,
3631			    "%s: no bastream available\n", __func__);
3632			sc->sc_stats.mst_ampdu_nostream++;
3633			return 0;
3634		}
3635		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3636		    __func__, sp);
3637		/* NB: qos is left zero so we won't match in mwl_tx_start */
3638		bas->bastream = sp;
3639		tap->txa_private = bas;
3640	}
3641	/* fetch current seq# from the firmware; if available */
3642	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3643	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3644	    &tap->txa_start) != 0)
3645		tap->txa_start = 0;
3646	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3647}
3648
3649static int
3650mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3651	int code, int baparamset, int batimeout)
3652{
3653	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3654	struct mwl_bastate *bas;
3655
3656	bas = tap->txa_private;
3657	if (bas == NULL) {
3658		/* XXX should not happen */
3659		DPRINTF(sc, MWL_DEBUG_AMPDU,
3660		    "%s: no BA stream allocated, TID %d\n",
3661		    __func__, tap->txa_tid);
3662		sc->sc_stats.mst_addba_nostream++;
3663		return 0;
3664	}
3665	if (code == IEEE80211_STATUS_SUCCESS) {
3666		struct ieee80211vap *vap = ni->ni_vap;
3667		int bufsiz, error;
3668
3669		/*
3670		 * Tell the firmware to setup the BA stream;
3671		 * we know resources are available because we
3672		 * pre-allocated one before forming the request.
3673		 */
3674		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3675		if (bufsiz == 0)
3676			bufsiz = IEEE80211_AGGR_BAWMAX;
3677		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3678		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3679		if (error != 0) {
3680			/*
3681			 * Setup failed, return immediately so no a-mpdu
3682			 * aggregation will be done.
3683			 */
3684			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3685			mwl_bastream_free(bas);
3686			tap->txa_private = NULL;
3687
3688			DPRINTF(sc, MWL_DEBUG_AMPDU,
3689			    "%s: create failed, error %d, bufsiz %d TID %d "
3690			    "htparam 0x%x\n", __func__, error, bufsiz,
3691			    tap->txa_tid, ni->ni_htparam);
3692			sc->sc_stats.mst_bacreate_failed++;
3693			return 0;
3694		}
3695		/* NB: cache txq to avoid ptr indirect */
3696		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3697		DPRINTF(sc, MWL_DEBUG_AMPDU,
3698		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3699		    "htparam 0x%x\n", __func__, bas->bastream,
3700		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3701	} else {
3702		/*
3703		 * Other side NAK'd us; return the resources.
3704		 */
3705		DPRINTF(sc, MWL_DEBUG_AMPDU,
3706		    "%s: request failed with code %d, destroy bastream %p\n",
3707		    __func__, code, bas->bastream);
3708		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3709		mwl_bastream_free(bas);
3710		tap->txa_private = NULL;
3711	}
3712	/* NB: firmware sends BAR so we don't need to */
3713	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3714}
3715
3716static void
3717mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3718{
3719	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3720	struct mwl_bastate *bas;
3721
3722	bas = tap->txa_private;
3723	if (bas != NULL) {
3724		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3725		    __func__, bas->bastream);
3726		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3727		mwl_bastream_free(bas);
3728		tap->txa_private = NULL;
3729	}
3730	sc->sc_addba_stop(ni, tap);
3731}
3732
3733/*
3734 * Setup the rx data structures.  This should only be
3735 * done once or we may get out of sync with the firmware.
3736 */
3737static int
3738mwl_startrecv(struct mwl_softc *sc)
3739{
3740	if (!sc->sc_recvsetup) {
3741		struct mwl_rxbuf *bf, *prev;
3742		struct mwl_rxdesc *ds;
3743
3744		prev = NULL;
3745		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3746			int error = mwl_rxbuf_init(sc, bf);
3747			if (error != 0) {
3748				DPRINTF(sc, MWL_DEBUG_RECV,
3749					"%s: mwl_rxbuf_init failed %d\n",
3750					__func__, error);
3751				return error;
3752			}
3753			if (prev != NULL) {
3754				ds = prev->bf_desc;
3755				ds->pPhysNext = htole32(bf->bf_daddr);
3756			}
3757			prev = bf;
3758		}
3759		if (prev != NULL) {
3760			ds = prev->bf_desc;
3761			ds->pPhysNext =
3762			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3763		}
3764		sc->sc_recvsetup = 1;
3765	}
3766	mwl_mode_init(sc);		/* set filters, etc. */
3767	return 0;
3768}
3769
3770static MWL_HAL_APMODE
3771mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3772{
3773	MWL_HAL_APMODE mode;
3774
3775	if (IEEE80211_IS_CHAN_HT(chan)) {
3776		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3777			mode = AP_MODE_N_ONLY;
3778		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3779			mode = AP_MODE_AandN;
3780		else if (vap->iv_flags & IEEE80211_F_PUREG)
3781			mode = AP_MODE_GandN;
3782		else
3783			mode = AP_MODE_BandGandN;
3784	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3785		if (vap->iv_flags & IEEE80211_F_PUREG)
3786			mode = AP_MODE_G_ONLY;
3787		else
3788			mode = AP_MODE_MIXED;
3789	} else if (IEEE80211_IS_CHAN_B(chan))
3790		mode = AP_MODE_B_ONLY;
3791	else if (IEEE80211_IS_CHAN_A(chan))
3792		mode = AP_MODE_A_ONLY;
3793	else
3794		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3795	return mode;
3796}
3797
3798static int
3799mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3800{
3801	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3802	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3803}
3804
3805/*
3806 * Set/change channels.
3807 */
3808static int
3809mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3810{
3811	struct mwl_hal *mh = sc->sc_mh;
3812	struct ieee80211com *ic = &sc->sc_ic;
3813	MWL_HAL_CHANNEL hchan;
3814	int maxtxpow;
3815
3816	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3817	    __func__, chan->ic_freq, chan->ic_flags);
3818
3819	/*
3820	 * Convert to a HAL channel description with
3821	 * the flags constrained to reflect the current
3822	 * operating mode.
3823	 */
3824	mwl_mapchan(&hchan, chan);
3825	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3826#if 0
3827	mwl_draintxq(sc);		/* clear pending tx frames */
3828#endif
3829	mwl_hal_setchannel(mh, &hchan);
3830	/*
3831	 * Tx power is cap'd by the regulatory setting and
3832	 * possibly a user-set limit.  We pass the min of
3833	 * these to the hal to apply them to the cal data
3834	 * for this channel.
3835	 * XXX min bound?
3836	 */
3837	maxtxpow = 2*chan->ic_maxregpower;
3838	if (maxtxpow > ic->ic_txpowlimit)
3839		maxtxpow = ic->ic_txpowlimit;
3840	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3841	/* NB: potentially change mcast/mgt rates */
3842	mwl_setcurchanrates(sc);
3843
3844	/*
3845	 * Update internal state.
3846	 */
3847	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3848	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3849	if (IEEE80211_IS_CHAN_A(chan)) {
3850		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3851		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3852	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3853		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3854		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3855	} else {
3856		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3857		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3858	}
3859	sc->sc_curchan = hchan;
3860	mwl_hal_intrset(mh, sc->sc_imask);
3861
3862	return 0;
3863}
3864
3865static void
3866mwl_scan_start(struct ieee80211com *ic)
3867{
3868	struct mwl_softc *sc = ic->ic_softc;
3869
3870	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3871}
3872
3873static void
3874mwl_scan_end(struct ieee80211com *ic)
3875{
3876	struct mwl_softc *sc = ic->ic_softc;
3877
3878	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3879}
3880
3881static void
3882mwl_set_channel(struct ieee80211com *ic)
3883{
3884	struct mwl_softc *sc = ic->ic_softc;
3885
3886	(void) mwl_chan_set(sc, ic->ic_curchan);
3887}
3888
3889/*
3890 * Handle a channel switch request.  We inform the firmware
3891 * and mark the global state to suppress various actions.
3892 * NB: we issue only one request to the fw; we may be called
3893 * multiple times if there are multiple vap's.
3894 */
3895static void
3896mwl_startcsa(struct ieee80211vap *vap)
3897{
3898	struct ieee80211com *ic = vap->iv_ic;
3899	struct mwl_softc *sc = ic->ic_softc;
3900	MWL_HAL_CHANNEL hchan;
3901
3902	if (sc->sc_csapending)
3903		return;
3904
3905	mwl_mapchan(&hchan, ic->ic_csa_newchan);
3906	/* 1 =>'s quiet channel */
3907	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3908	sc->sc_csapending = 1;
3909}
3910
3911/*
3912 * Plumb any static WEP key for the station.  This is
3913 * necessary as we must propagate the key from the
3914 * global key table of the vap to each sta db entry.
3915 */
3916static void
3917mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3918{
3919	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3920		IEEE80211_F_PRIVACY &&
3921	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3922	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3923		(void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3924				    mac);
3925}
3926
3927static int
3928mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3929{
3930#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
3931	struct ieee80211vap *vap = ni->ni_vap;
3932	struct mwl_hal_vap *hvap;
3933	int error;
3934
3935	if (vap->iv_opmode == IEEE80211_M_WDS) {
3936		/*
3937		 * WDS vap's do not have a f/w vap; instead they piggyback
3938		 * on an AP vap and we must install the sta db entry and
3939		 * crypto state using that AP's handle (the WDS vap has none).
3940		 */
3941		hvap = MWL_VAP(vap)->mv_ap_hvap;
3942	} else
3943		hvap = MWL_VAP(vap)->mv_hvap;
3944	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3945	    aid, staid, pi,
3946	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3947	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3948	if (error == 0) {
3949		/*
3950		 * Setup security for this station.  For sta mode this is
3951		 * needed even though do the same thing on transition to
3952		 * AUTH state because the call to mwl_hal_newstation
3953		 * clobbers the crypto state we setup.
3954		 */
3955		mwl_setanywepkey(vap, ni->ni_macaddr);
3956	}
3957	return error;
3958#undef WME
3959}
3960
3961static void
3962mwl_setglobalkeys(struct ieee80211vap *vap)
3963{
3964	struct ieee80211_key *wk;
3965
3966	wk = &vap->iv_nw_keys[0];
3967	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3968		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3969			(void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3970}
3971
3972/*
3973 * Convert a legacy rate set to a firmware bitmask.
3974 */
3975static uint32_t
3976get_rate_bitmap(const struct ieee80211_rateset *rs)
3977{
3978	uint32_t rates;
3979	int i;
3980
3981	rates = 0;
3982	for (i = 0; i < rs->rs_nrates; i++)
3983		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3984		case 2:	  rates |= 0x001; break;
3985		case 4:	  rates |= 0x002; break;
3986		case 11:  rates |= 0x004; break;
3987		case 22:  rates |= 0x008; break;
3988		case 44:  rates |= 0x010; break;
3989		case 12:  rates |= 0x020; break;
3990		case 18:  rates |= 0x040; break;
3991		case 24:  rates |= 0x080; break;
3992		case 36:  rates |= 0x100; break;
3993		case 48:  rates |= 0x200; break;
3994		case 72:  rates |= 0x400; break;
3995		case 96:  rates |= 0x800; break;
3996		case 108: rates |= 0x1000; break;
3997		}
3998	return rates;
3999}
4000
4001/*
4002 * Construct an HT firmware bitmask from an HT rate set.
4003 */
4004static uint32_t
4005get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4006{
4007	uint32_t rates;
4008	int i;
4009
4010	rates = 0;
4011	for (i = 0; i < rs->rs_nrates; i++) {
4012		if (rs->rs_rates[i] < 16)
4013			rates |= 1<<rs->rs_rates[i];
4014	}
4015	return rates;
4016}
4017
4018/*
4019 * Craft station database entry for station.
4020 * NB: use host byte order here, the hal handles byte swapping.
4021 */
4022static MWL_HAL_PEERINFO *
4023mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4024{
4025	const struct ieee80211vap *vap = ni->ni_vap;
4026
4027	memset(pi, 0, sizeof(*pi));
4028	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4029	pi->CapInfo = ni->ni_capinfo;
4030	if (ni->ni_flags & IEEE80211_NODE_HT) {
4031		/* HT capabilities, etc */
4032		pi->HTCapabilitiesInfo = ni->ni_htcap;
4033		/* XXX pi.HTCapabilitiesInfo */
4034	        pi->MacHTParamInfo = ni->ni_htparam;
4035		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4036		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4037		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4038		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4039		pi->AddHtInfo.stbc = ni->ni_htstbc;
4040
4041		/* constrain according to local configuration */
4042		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4043			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4044		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4045			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4046		if (ni->ni_chw != 40)
4047			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4048	}
4049	return pi;
4050}
4051
4052/*
4053 * Re-create the local sta db entry for a vap to ensure
4054 * up to date WME state is pushed to the firmware.  Because
4055 * this resets crypto state this must be followed by a
4056 * reload of any keys in the global key table.
4057 */
4058static int
4059mwl_localstadb(struct ieee80211vap *vap)
4060{
4061#define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4062	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4063	struct ieee80211_node *bss;
4064	MWL_HAL_PEERINFO pi;
4065	int error;
4066
4067	switch (vap->iv_opmode) {
4068	case IEEE80211_M_STA:
4069		bss = vap->iv_bss;
4070		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4071		    vap->iv_state == IEEE80211_S_RUN ?
4072			mkpeerinfo(&pi, bss) : NULL,
4073		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4074		    bss->ni_ies.wme_ie != NULL ?
4075			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4076		if (error == 0)
4077			mwl_setglobalkeys(vap);
4078		break;
4079	case IEEE80211_M_HOSTAP:
4080	case IEEE80211_M_MBSS:
4081		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4082		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4083		if (error == 0)
4084			mwl_setglobalkeys(vap);
4085		break;
4086	default:
4087		error = 0;
4088		break;
4089	}
4090	return error;
4091#undef WME
4092}
4093
4094static int
4095mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4096{
4097	struct mwl_vap *mvp = MWL_VAP(vap);
4098	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4099	struct ieee80211com *ic = vap->iv_ic;
4100	struct ieee80211_node *ni = NULL;
4101	struct mwl_softc *sc = ic->ic_softc;
4102	struct mwl_hal *mh = sc->sc_mh;
4103	enum ieee80211_state ostate = vap->iv_state;
4104	int error;
4105
4106	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4107	    vap->iv_ifp->if_xname, __func__,
4108	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4109
4110	callout_stop(&sc->sc_timer);
4111	/*
4112	 * Clear current radar detection state.
4113	 */
4114	if (ostate == IEEE80211_S_CAC) {
4115		/* stop quiet mode radar detection */
4116		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4117	} else if (sc->sc_radarena) {
4118		/* stop in-service radar detection */
4119		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4120		sc->sc_radarena = 0;
4121	}
4122	/*
4123	 * Carry out per-state actions before doing net80211 work.
4124	 */
4125	if (nstate == IEEE80211_S_INIT) {
4126		/* NB: only ap+sta vap's have a fw entity */
4127		if (hvap != NULL)
4128			mwl_hal_stop(hvap);
4129	} else if (nstate == IEEE80211_S_SCAN) {
4130		mwl_hal_start(hvap);
4131		/* NB: this disables beacon frames */
4132		mwl_hal_setinframode(hvap);
4133	} else if (nstate == IEEE80211_S_AUTH) {
4134		/*
4135		 * Must create a sta db entry in case a WEP key needs to
4136		 * be plumbed.  This entry will be overwritten if we
4137		 * associate; otherwise it will be reclaimed on node free.
4138		 */
4139		ni = vap->iv_bss;
4140		MWL_NODE(ni)->mn_hvap = hvap;
4141		(void) mwl_peerstadb(ni, 0, 0, NULL);
4142	} else if (nstate == IEEE80211_S_CSA) {
4143		/* XXX move to below? */
4144		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4145		    vap->iv_opmode == IEEE80211_M_MBSS)
4146			mwl_startcsa(vap);
4147	} else if (nstate == IEEE80211_S_CAC) {
4148		/* XXX move to below? */
4149		/* stop ap xmit and enable quiet mode radar detection */
4150		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4151	}
4152
4153	/*
4154	 * Invoke the parent method to do net80211 work.
4155	 */
4156	error = mvp->mv_newstate(vap, nstate, arg);
4157
4158	/*
4159	 * Carry out work that must be done after net80211 runs;
4160	 * this work requires up to date state (e.g. iv_bss).
4161	 */
4162	if (error == 0 && nstate == IEEE80211_S_RUN) {
4163		/* NB: collect bss node again, it may have changed */
4164		ni = vap->iv_bss;
4165
4166		DPRINTF(sc, MWL_DEBUG_STATE,
4167		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4168		    "capinfo 0x%04x chan %d\n",
4169		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4170		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4171		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4172
4173		/*
4174		 * Recreate local sta db entry to update WME/HT state.
4175		 */
4176		mwl_localstadb(vap);
4177		switch (vap->iv_opmode) {
4178		case IEEE80211_M_HOSTAP:
4179		case IEEE80211_M_MBSS:
4180			if (ostate == IEEE80211_S_CAC) {
4181				/* enable in-service radar detection */
4182				mwl_hal_setradardetection(mh,
4183				    DR_IN_SERVICE_MONITOR_START);
4184				sc->sc_radarena = 1;
4185			}
4186			/*
4187			 * Allocate and setup the beacon frame
4188			 * (and related state).
4189			 */
4190			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4191			if (error != 0) {
4192				DPRINTF(sc, MWL_DEBUG_STATE,
4193				    "%s: beacon setup failed, error %d\n",
4194				    __func__, error);
4195				goto bad;
4196			}
4197			/* NB: must be after setting up beacon */
4198			mwl_hal_start(hvap);
4199			break;
4200		case IEEE80211_M_STA:
4201			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4202			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4203			/*
4204			 * Set state now that we're associated.
4205			 */
4206			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4207			mwl_setrates(vap);
4208			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4209			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4210			    sc->sc_ndwdsvaps++ == 0)
4211				mwl_hal_setdwds(mh, 1);
4212			break;
4213		case IEEE80211_M_WDS:
4214			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4215			    vap->iv_ifp->if_xname, __func__,
4216			    ether_sprintf(ni->ni_bssid));
4217			mwl_seteapolformat(vap);
4218			break;
4219		default:
4220			break;
4221		}
4222		/*
4223		 * Set CS mode according to operating channel;
4224		 * this mostly an optimization for 5GHz.
4225		 *
4226		 * NB: must follow mwl_hal_start which resets csmode
4227		 */
4228		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4229			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4230		else
4231			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4232		/*
4233		 * Start timer to prod firmware.
4234		 */
4235		if (sc->sc_ageinterval != 0)
4236			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4237			    mwl_agestations, sc);
4238	} else if (nstate == IEEE80211_S_SLEEP) {
4239		/* XXX set chip in power save */
4240	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4241	    --sc->sc_ndwdsvaps == 0)
4242		mwl_hal_setdwds(mh, 0);
4243bad:
4244	return error;
4245}
4246
4247/*
4248 * Manage station id's; these are separate from AID's
4249 * as AID's may have values out of the range of possible
4250 * station id's acceptable to the firmware.
4251 */
4252static int
4253allocstaid(struct mwl_softc *sc, int aid)
4254{
4255	int staid;
4256
4257	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4258		/* NB: don't use 0 */
4259		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4260			if (isclr(sc->sc_staid, staid))
4261				break;
4262	} else
4263		staid = aid;
4264	setbit(sc->sc_staid, staid);
4265	return staid;
4266}
4267
4268static void
4269delstaid(struct mwl_softc *sc, int staid)
4270{
4271	clrbit(sc->sc_staid, staid);
4272}
4273
4274/*
4275 * Setup driver-specific state for a newly associated node.
4276 * Note that we're called also on a re-associate, the isnew
4277 * param tells us if this is the first time or not.
4278 */
4279static void
4280mwl_newassoc(struct ieee80211_node *ni, int isnew)
4281{
4282	struct ieee80211vap *vap = ni->ni_vap;
4283        struct mwl_softc *sc = vap->iv_ic->ic_softc;
4284	struct mwl_node *mn = MWL_NODE(ni);
4285	MWL_HAL_PEERINFO pi;
4286	uint16_t aid;
4287	int error;
4288
4289	aid = IEEE80211_AID(ni->ni_associd);
4290	if (isnew) {
4291		mn->mn_staid = allocstaid(sc, aid);
4292		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4293	} else {
4294		mn = MWL_NODE(ni);
4295		/* XXX reset BA stream? */
4296	}
4297	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4298	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4299	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4300	if (error != 0) {
4301		DPRINTF(sc, MWL_DEBUG_NODE,
4302		    "%s: error %d creating sta db entry\n",
4303		    __func__, error);
4304		/* XXX how to deal with error? */
4305	}
4306}
4307
4308/*
4309 * Periodically poke the firmware to age out station state
4310 * (power save queues, pending tx aggregates).
4311 */
4312static void
4313mwl_agestations(void *arg)
4314{
4315	struct mwl_softc *sc = arg;
4316
4317	mwl_hal_setkeepalive(sc->sc_mh);
4318	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4319		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4320}
4321
4322static const struct mwl_hal_channel *
4323findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4324{
4325	int i;
4326
4327	for (i = 0; i < ci->nchannels; i++) {
4328		const struct mwl_hal_channel *hc = &ci->channels[i];
4329		if (hc->ieee == ieee)
4330			return hc;
4331	}
4332	return NULL;
4333}
4334
4335static int
4336mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4337	int nchan, struct ieee80211_channel chans[])
4338{
4339	struct mwl_softc *sc = ic->ic_softc;
4340	struct mwl_hal *mh = sc->sc_mh;
4341	const MWL_HAL_CHANNELINFO *ci;
4342	int i;
4343
4344	for (i = 0; i < nchan; i++) {
4345		struct ieee80211_channel *c = &chans[i];
4346		const struct mwl_hal_channel *hc;
4347
4348		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4349			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4350			    IEEE80211_IS_CHAN_HT40(c) ?
4351				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4352		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4353			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4354			    IEEE80211_IS_CHAN_HT40(c) ?
4355				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4356		} else {
4357			device_printf(sc->sc_dev,
4358			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4359			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4360			return EINVAL;
4361		}
4362		/*
4363		 * Verify channel has cal data and cap tx power.
4364		 */
4365		hc = findhalchannel(ci, c->ic_ieee);
4366		if (hc != NULL) {
4367			if (c->ic_maxpower > 2*hc->maxTxPow)
4368				c->ic_maxpower = 2*hc->maxTxPow;
4369			goto next;
4370		}
4371		if (IEEE80211_IS_CHAN_HT40(c)) {
4372			/*
4373			 * Look for the extension channel since the
4374			 * hal table only has the primary channel.
4375			 */
4376			hc = findhalchannel(ci, c->ic_extieee);
4377			if (hc != NULL) {
4378				if (c->ic_maxpower > 2*hc->maxTxPow)
4379					c->ic_maxpower = 2*hc->maxTxPow;
4380				goto next;
4381			}
4382		}
4383		device_printf(sc->sc_dev,
4384		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4385		    __func__, c->ic_ieee, c->ic_extieee,
4386		    c->ic_freq, c->ic_flags);
4387		return EINVAL;
4388	next:
4389		;
4390	}
4391	return 0;
4392}
4393
4394#define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4395#define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4396
4397static void
4398addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4399{
4400	c->ic_freq = freq;
4401	c->ic_flags = flags;
4402	c->ic_ieee = ieee;
4403	c->ic_minpower = 0;
4404	c->ic_maxpower = 2*txpow;
4405	c->ic_maxregpower = txpow;
4406}
4407
4408static const struct ieee80211_channel *
4409findchannel(const struct ieee80211_channel chans[], int nchans,
4410	int freq, int flags)
4411{
4412	const struct ieee80211_channel *c;
4413	int i;
4414
4415	for (i = 0; i < nchans; i++) {
4416		c = &chans[i];
4417		if (c->ic_freq == freq && c->ic_flags == flags)
4418			return c;
4419	}
4420	return NULL;
4421}
4422
4423static void
4424addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4425	const MWL_HAL_CHANNELINFO *ci, int flags)
4426{
4427	struct ieee80211_channel *c;
4428	const struct ieee80211_channel *extc;
4429	const struct mwl_hal_channel *hc;
4430	int i;
4431
4432	c = &chans[*nchans];
4433
4434	flags &= ~IEEE80211_CHAN_HT;
4435	for (i = 0; i < ci->nchannels; i++) {
4436		/*
4437		 * Each entry defines an HT40 channel pair; find the
4438		 * extension channel above and the insert the pair.
4439		 */
4440		hc = &ci->channels[i];
4441		extc = findchannel(chans, *nchans, hc->freq+20,
4442		    flags | IEEE80211_CHAN_HT20);
4443		if (extc != NULL) {
4444			if (*nchans >= maxchans)
4445				break;
4446			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4447			    hc->ieee, hc->maxTxPow);
4448			c->ic_extieee = extc->ic_ieee;
4449			c++, (*nchans)++;
4450			if (*nchans >= maxchans)
4451				break;
4452			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4453			    extc->ic_ieee, hc->maxTxPow);
4454			c->ic_extieee = hc->ieee;
4455			c++, (*nchans)++;
4456		}
4457	}
4458}
4459
4460static void
4461addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4462	const MWL_HAL_CHANNELINFO *ci, int flags)
4463{
4464	struct ieee80211_channel *c;
4465	int i;
4466
4467	c = &chans[*nchans];
4468
4469	for (i = 0; i < ci->nchannels; i++) {
4470		const struct mwl_hal_channel *hc;
4471
4472		hc = &ci->channels[i];
4473		if (*nchans >= maxchans)
4474			break;
4475		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4476		c++, (*nchans)++;
4477		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4478			/* g channel have a separate b-only entry */
4479			if (*nchans >= maxchans)
4480				break;
4481			c[0] = c[-1];
4482			c[-1].ic_flags = IEEE80211_CHAN_B;
4483			c++, (*nchans)++;
4484		}
4485		if (flags == IEEE80211_CHAN_HTG) {
4486			/* HT g channel have a separate g-only entry */
4487			if (*nchans >= maxchans)
4488				break;
4489			c[-1].ic_flags = IEEE80211_CHAN_G;
4490			c[0] = c[-1];
4491			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4492			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4493			c++, (*nchans)++;
4494		}
4495		if (flags == IEEE80211_CHAN_HTA) {
4496			/* HT a channel have a separate a-only entry */
4497			if (*nchans >= maxchans)
4498				break;
4499			c[-1].ic_flags = IEEE80211_CHAN_A;
4500			c[0] = c[-1];
4501			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4502			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4503			c++, (*nchans)++;
4504		}
4505	}
4506}
4507
4508static void
4509getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4510	struct ieee80211_channel chans[])
4511{
4512	const MWL_HAL_CHANNELINFO *ci;
4513
4514	/*
4515	 * Use the channel info from the hal to craft the
4516	 * channel list.  Note that we pass back an unsorted
4517	 * list; the caller is required to sort it for us
4518	 * (if desired).
4519	 */
4520	*nchans = 0;
4521	if (mwl_hal_getchannelinfo(sc->sc_mh,
4522	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4523		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4524	if (mwl_hal_getchannelinfo(sc->sc_mh,
4525	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4526		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4527	if (mwl_hal_getchannelinfo(sc->sc_mh,
4528	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4529		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4530	if (mwl_hal_getchannelinfo(sc->sc_mh,
4531	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4532		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4533}
4534
4535static void
4536mwl_getradiocaps(struct ieee80211com *ic,
4537	int maxchans, int *nchans, struct ieee80211_channel chans[])
4538{
4539	struct mwl_softc *sc = ic->ic_softc;
4540
4541	getchannels(sc, maxchans, nchans, chans);
4542}
4543
4544static int
4545mwl_getchannels(struct mwl_softc *sc)
4546{
4547	struct ieee80211com *ic = &sc->sc_ic;
4548
4549	/*
4550	 * Use the channel info from the hal to craft the
4551	 * channel list for net80211.  Note that we pass up
4552	 * an unsorted list; net80211 will sort it for us.
4553	 */
4554	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4555	ic->ic_nchans = 0;
4556	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4557
4558	ic->ic_regdomain.regdomain = SKU_DEBUG;
4559	ic->ic_regdomain.country = CTRY_DEFAULT;
4560	ic->ic_regdomain.location = 'I';
4561	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4562	ic->ic_regdomain.isocc[1] = ' ';
4563	return (ic->ic_nchans == 0 ? EIO : 0);
4564}
4565#undef IEEE80211_CHAN_HTA
4566#undef IEEE80211_CHAN_HTG
4567
4568#ifdef MWL_DEBUG
4569static void
4570mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4571{
4572	const struct mwl_rxdesc *ds = bf->bf_desc;
4573	uint32_t status = le32toh(ds->Status);
4574
4575	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4576	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4577	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4578	    le32toh(ds->pPhysBuffData), ds->RxControl,
4579	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4580	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4581	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4582	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4583}
4584
4585static void
4586mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4587{
4588	const struct mwl_txdesc *ds = bf->bf_desc;
4589	uint32_t status = le32toh(ds->Status);
4590
4591	printf("Q%u[%3u]", qnum, ix);
4592	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4593	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4594	    le32toh(ds->pPhysNext),
4595	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4596	    status & EAGLE_TXD_STATUS_USED ?
4597		"" : (status & 3) != 0 ? " *" : " !");
4598	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4599	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4600	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4601#if MWL_TXDESC > 1
4602	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4603	    , le32toh(ds->multiframes)
4604	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4605	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4606	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4607	);
4608	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4609	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4610	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4611	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4612	);
4613#endif
4614#if 0
4615{ const uint8_t *cp = (const uint8_t *) ds;
4616  int i;
4617  for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4618	printf("%02x ", cp[i]);
4619	if (((i+1) % 16) == 0)
4620		printf("\n");
4621  }
4622  printf("\n");
4623}
4624#endif
4625}
4626#endif /* MWL_DEBUG */
4627
4628#if 0
4629static void
4630mwl_txq_dump(struct mwl_txq *txq)
4631{
4632	struct mwl_txbuf *bf;
4633	int i = 0;
4634
4635	MWL_TXQ_LOCK(txq);
4636	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4637		struct mwl_txdesc *ds = bf->bf_desc;
4638		MWL_TXDESC_SYNC(txq, ds,
4639		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4640#ifdef MWL_DEBUG
4641		mwl_printtxbuf(bf, txq->qnum, i);
4642#endif
4643		i++;
4644	}
4645	MWL_TXQ_UNLOCK(txq);
4646}
4647#endif
4648
4649static void
4650mwl_watchdog(void *arg)
4651{
4652	struct mwl_softc *sc = arg;
4653
4654	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4655	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4656		return;
4657
4658	if (sc->sc_running && !sc->sc_invalid) {
4659		if (mwl_hal_setkeepalive(sc->sc_mh))
4660			device_printf(sc->sc_dev,
4661			    "transmit timeout (firmware hung?)\n");
4662		else
4663			device_printf(sc->sc_dev,
4664			    "transmit timeout\n");
4665#if 0
4666		mwl_reset(sc);
4667mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4668#endif
4669		counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4670		sc->sc_stats.mst_watchdog++;
4671	}
4672}
4673
4674#ifdef MWL_DIAGAPI
4675/*
4676 * Diagnostic interface to the HAL.  This is used by various
4677 * tools to do things like retrieve register contents for
4678 * debugging.  The mechanism is intentionally opaque so that
4679 * it can change frequently w/o concern for compatiblity.
4680 */
4681static int
4682mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4683{
4684	struct mwl_hal *mh = sc->sc_mh;
4685	u_int id = md->md_id & MWL_DIAG_ID;
4686	void *indata = NULL;
4687	void *outdata = NULL;
4688	u_int32_t insize = md->md_in_size;
4689	u_int32_t outsize = md->md_out_size;
4690	int error = 0;
4691
4692	if (md->md_id & MWL_DIAG_IN) {
4693		/*
4694		 * Copy in data.
4695		 */
4696		indata = malloc(insize, M_TEMP, M_NOWAIT);
4697		if (indata == NULL) {
4698			error = ENOMEM;
4699			goto bad;
4700		}
4701		error = copyin(md->md_in_data, indata, insize);
4702		if (error)
4703			goto bad;
4704	}
4705	if (md->md_id & MWL_DIAG_DYN) {
4706		/*
4707		 * Allocate a buffer for the results (otherwise the HAL
4708		 * returns a pointer to a buffer where we can read the
4709		 * results).  Note that we depend on the HAL leaving this
4710		 * pointer for us to use below in reclaiming the buffer;
4711		 * may want to be more defensive.
4712		 */
4713		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4714		if (outdata == NULL) {
4715			error = ENOMEM;
4716			goto bad;
4717		}
4718	}
4719	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4720		if (outsize < md->md_out_size)
4721			md->md_out_size = outsize;
4722		if (outdata != NULL)
4723			error = copyout(outdata, md->md_out_data,
4724					md->md_out_size);
4725	} else {
4726		error = EINVAL;
4727	}
4728bad:
4729	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4730		free(indata, M_TEMP);
4731	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4732		free(outdata, M_TEMP);
4733	return error;
4734}
4735
4736static int
4737mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4738{
4739	struct mwl_hal *mh = sc->sc_mh;
4740	int error;
4741
4742	MWL_LOCK_ASSERT(sc);
4743
4744	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4745		device_printf(sc->sc_dev, "unable to load firmware\n");
4746		return EIO;
4747	}
4748	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4749		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4750		return EIO;
4751	}
4752	error = mwl_setupdma(sc);
4753	if (error != 0) {
4754		/* NB: mwl_setupdma prints a msg */
4755		return error;
4756	}
4757	/*
4758	 * Reset tx/rx data structures; after reload we must
4759	 * re-start the driver's notion of the next xmit/recv.
4760	 */
4761	mwl_draintxq(sc);		/* clear pending frames */
4762	mwl_resettxq(sc);		/* rebuild tx q lists */
4763	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4764	return 0;
4765}
4766#endif /* MWL_DIAGAPI */
4767
4768static void
4769mwl_parent(struct ieee80211com *ic)
4770{
4771	struct mwl_softc *sc = ic->ic_softc;
4772	int startall = 0;
4773
4774	MWL_LOCK(sc);
4775	if (ic->ic_nrunning > 0) {
4776		if (sc->sc_running) {
4777			/*
4778			 * To avoid rescanning another access point,
4779			 * do not call mwl_init() here.  Instead,
4780			 * only reflect promisc mode settings.
4781			 */
4782			mwl_mode_init(sc);
4783		} else {
4784			/*
4785			 * Beware of being called during attach/detach
4786			 * to reset promiscuous mode.  In that case we
4787			 * will still be marked UP but not RUNNING.
4788			 * However trying to re-init the interface
4789			 * is the wrong thing to do as we've already
4790			 * torn down much of our state.  There's
4791			 * probably a better way to deal with this.
4792			 */
4793			if (!sc->sc_invalid) {
4794				mwl_init(sc);	/* XXX lose error */
4795				startall = 1;
4796			}
4797		}
4798	} else
4799		mwl_stop(sc);
4800	MWL_UNLOCK(sc);
4801	if (startall)
4802		ieee80211_start_all(ic);
4803}
4804
4805static int
4806mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4807{
4808	struct mwl_softc *sc = ic->ic_softc;
4809	struct ifreq *ifr = data;
4810	int error = 0;
4811
4812	switch (cmd) {
4813	case SIOCGMVSTATS:
4814		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4815#if 0
4816		/* NB: embed these numbers to get a consistent view */
4817		sc->sc_stats.mst_tx_packets =
4818		    ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
4819		sc->sc_stats.mst_rx_packets =
4820		    ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
4821#endif
4822		/*
4823		 * NB: Drop the softc lock in case of a page fault;
4824		 * we'll accept any potential inconsisentcy in the
4825		 * statistics.  The alternative is to copy the data
4826		 * to a local structure.
4827		 */
4828		return (copyout(&sc->sc_stats,
4829				ifr->ifr_data, sizeof (sc->sc_stats)));
4830#ifdef MWL_DIAGAPI
4831	case SIOCGMVDIAG:
4832		/* XXX check privs */
4833		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4834	case SIOCGMVRESET:
4835		/* XXX check privs */
4836		MWL_LOCK(sc);
4837		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4838		MWL_UNLOCK(sc);
4839		break;
4840#endif /* MWL_DIAGAPI */
4841	default:
4842		error = ENOTTY;
4843		break;
4844	}
4845	return (error);
4846}
4847
4848#ifdef	MWL_DEBUG
4849static int
4850mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4851{
4852	struct mwl_softc *sc = arg1;
4853	int debug, error;
4854
4855	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4856	error = sysctl_handle_int(oidp, &debug, 0, req);
4857	if (error || !req->newptr)
4858		return error;
4859	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4860	sc->sc_debug = debug & 0x00ffffff;
4861	return 0;
4862}
4863#endif /* MWL_DEBUG */
4864
4865static void
4866mwl_sysctlattach(struct mwl_softc *sc)
4867{
4868#ifdef	MWL_DEBUG
4869	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4870	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4871
4872	sc->sc_debug = mwl_debug;
4873	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4874		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4875		mwl_sysctl_debug, "I", "control debugging printfs");
4876#endif
4877}
4878
4879/*
4880 * Announce various information on device/driver attach.
4881 */
4882static void
4883mwl_announce(struct mwl_softc *sc)
4884{
4885
4886	device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4887		sc->sc_hwspecs.hwVersion,
4888		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4889		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4890		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4891		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4892		sc->sc_hwspecs.regionCode);
4893	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4894
4895	if (bootverbose) {
4896		int i;
4897		for (i = 0; i <= WME_AC_VO; i++) {
4898			struct mwl_txq *txq = sc->sc_ac2q[i];
4899			device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4900				txq->qnum, ieee80211_wme_acnames[i]);
4901		}
4902	}
4903	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4904		device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4905	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4906		device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4907	if (bootverbose || mwl_txbuf != MWL_TXBUF)
4908		device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4909	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4910		device_printf(sc->sc_dev, "multi-bss support\n");
4911#ifdef MWL_TX_NODROP
4912	if (bootverbose)
4913		device_printf(sc->sc_dev, "no tx drop\n");
4914#endif
4915}
4916