ieee80211_superg.c revision 273736
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: stable/10/sys/net80211/ieee80211_superg.c 273736 2014-10-27 14:38:00Z hselasky $");
28
29#include "opt_wlan.h"
30
31#ifdef	IEEE80211_SUPPORT_SUPERG
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/mbuf.h>
36#include <sys/kernel.h>
37#include <sys/endian.h>
38
39#include <sys/socket.h>
40
41#include <net/bpf.h>
42#include <net/ethernet.h>
43#include <net/if.h>
44#include <net/if_llc.h>
45#include <net/if_media.h>
46
47#include <net80211/ieee80211_var.h>
48#include <net80211/ieee80211_input.h>
49#include <net80211/ieee80211_phy.h>
50#include <net80211/ieee80211_superg.h>
51
52/*
53 * Atheros fast-frame encapsulation format.
54 * FF max payload:
55 * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500:
56 *   8   +   4   +  4   +   14  +   8   + 1500 +  6   +   14  +   8   + 1500
57 * = 3066
58 */
59/* fast frame header is 32-bits */
60#define	ATH_FF_PROTO	0x0000003f	/* protocol */
61#define	ATH_FF_PROTO_S	0
62#define	ATH_FF_FTYPE	0x000000c0	/* frame type */
63#define	ATH_FF_FTYPE_S	6
64#define	ATH_FF_HLEN32	0x00000300	/* optional hdr length */
65#define	ATH_FF_HLEN32_S	8
66#define	ATH_FF_SEQNUM	0x001ffc00	/* sequence number */
67#define	ATH_FF_SEQNUM_S	10
68#define	ATH_FF_OFFSET	0xffe00000	/* offset to 2nd payload */
69#define	ATH_FF_OFFSET_S	21
70
71#define	ATH_FF_MAX_HDR_PAD	4
72#define	ATH_FF_MAX_SEP_PAD	6
73#define	ATH_FF_MAX_HDR		30
74
75#define	ATH_FF_PROTO_L2TUNNEL	0	/* L2 tunnel protocol */
76#define	ATH_FF_ETH_TYPE		0x88bd	/* Ether type for encapsulated frames */
77#define	ATH_FF_SNAP_ORGCODE_0	0x00
78#define	ATH_FF_SNAP_ORGCODE_1	0x03
79#define	ATH_FF_SNAP_ORGCODE_2	0x7f
80
81#define	ATH_FF_TXQMIN	2		/* min txq depth for staging */
82#define	ATH_FF_TXQMAX	50		/* maximum # of queued frames allowed */
83#define	ATH_FF_STAGEMAX	5		/* max waiting period for staged frame*/
84
85#define	ETHER_HEADER_COPY(dst, src) \
86	memcpy(dst, src, sizeof(struct ether_header))
87
88static	int ieee80211_ffppsmin = 2;	/* pps threshold for ff aggregation */
89SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLFLAG_RW,
90	&ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging");
91static	int ieee80211_ffagemax = -1;	/* max time frames held on stage q */
92SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW,
93	&ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I",
94	"max hold time for fast-frame staging (ms)");
95
96void
97ieee80211_superg_attach(struct ieee80211com *ic)
98{
99	struct ieee80211_superg *sg;
100
101	if (ic->ic_caps & IEEE80211_C_FF) {
102		sg = (struct ieee80211_superg *) malloc(
103		     sizeof(struct ieee80211_superg), M_80211_VAP,
104		     M_NOWAIT | M_ZERO);
105		if (sg == NULL) {
106			printf("%s: cannot allocate SuperG state block\n",
107			    __func__);
108			return;
109		}
110		ic->ic_superg = sg;
111	}
112	ieee80211_ffagemax = msecs_to_ticks(150);
113}
114
115void
116ieee80211_superg_detach(struct ieee80211com *ic)
117{
118	if (ic->ic_superg != NULL) {
119		free(ic->ic_superg, M_80211_VAP);
120		ic->ic_superg = NULL;
121	}
122}
123
124void
125ieee80211_superg_vattach(struct ieee80211vap *vap)
126{
127	struct ieee80211com *ic = vap->iv_ic;
128
129	if (ic->ic_superg == NULL)	/* NB: can't do fast-frames w/o state */
130		vap->iv_caps &= ~IEEE80211_C_FF;
131	if (vap->iv_caps & IEEE80211_C_FF)
132		vap->iv_flags |= IEEE80211_F_FF;
133	/* NB: we only implement sta mode */
134	if (vap->iv_opmode == IEEE80211_M_STA &&
135	    (vap->iv_caps & IEEE80211_C_TURBOP))
136		vap->iv_flags |= IEEE80211_F_TURBOP;
137}
138
139void
140ieee80211_superg_vdetach(struct ieee80211vap *vap)
141{
142}
143
144#define	ATH_OUI_BYTES		0x00, 0x03, 0x7f
145/*
146 * Add a WME information element to a frame.
147 */
148uint8_t *
149ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix)
150{
151	static const struct ieee80211_ath_ie info = {
152		.ath_id		= IEEE80211_ELEMID_VENDOR,
153		.ath_len	= sizeof(struct ieee80211_ath_ie) - 2,
154		.ath_oui	= { ATH_OUI_BYTES },
155		.ath_oui_type	= ATH_OUI_TYPE,
156		.ath_oui_subtype= ATH_OUI_SUBTYPE,
157		.ath_version	= ATH_OUI_VERSION,
158	};
159	struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm;
160
161	memcpy(frm, &info, sizeof(info));
162	ath->ath_capability = caps;
163	if (defkeyix != IEEE80211_KEYIX_NONE) {
164		ath->ath_defkeyix[0] = (defkeyix & 0xff);
165		ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff);
166	} else {
167		ath->ath_defkeyix[0] = 0xff;
168		ath->ath_defkeyix[1] = 0x7f;
169	}
170	return frm + sizeof(info);
171}
172#undef ATH_OUI_BYTES
173
174uint8_t *
175ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss)
176{
177	const struct ieee80211vap *vap = bss->ni_vap;
178
179	return ieee80211_add_ath(frm,
180	    vap->iv_flags & IEEE80211_F_ATHEROS,
181	    ((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
182	    bss->ni_authmode != IEEE80211_AUTH_8021X) ?
183	    vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
184}
185
186void
187ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie)
188{
189	const struct ieee80211_ath_ie *ath =
190		(const struct ieee80211_ath_ie *) ie;
191
192	ni->ni_ath_flags = ath->ath_capability;
193	ni->ni_ath_defkeyix = LE_READ_2(&ath->ath_defkeyix);
194}
195
196int
197ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm,
198	const struct ieee80211_frame *wh)
199{
200	struct ieee80211vap *vap = ni->ni_vap;
201	const struct ieee80211_ath_ie *ath;
202	u_int len = frm[1];
203	int capschanged;
204	uint16_t defkeyix;
205
206	if (len < sizeof(struct ieee80211_ath_ie)-2) {
207		IEEE80211_DISCARD_IE(vap,
208		    IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG,
209		    wh, "Atheros", "too short, len %u", len);
210		return -1;
211	}
212	ath = (const struct ieee80211_ath_ie *)frm;
213	capschanged = (ni->ni_ath_flags != ath->ath_capability);
214	defkeyix = LE_READ_2(ath->ath_defkeyix);
215	if (capschanged || defkeyix != ni->ni_ath_defkeyix) {
216		ni->ni_ath_flags = ath->ath_capability;
217		ni->ni_ath_defkeyix = defkeyix;
218		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
219		    "ath ie change: new caps 0x%x defkeyix 0x%x",
220		    ni->ni_ath_flags, ni->ni_ath_defkeyix);
221	}
222	if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) {
223		uint16_t curflags, newflags;
224
225		/*
226		 * Check for turbo mode switch.  Calculate flags
227		 * for the new mode and effect the switch.
228		 */
229		newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags;
230		/* NB: BOOST is not in ic_flags, so get it from the ie */
231		if (ath->ath_capability & ATHEROS_CAP_BOOST)
232			newflags |= IEEE80211_CHAN_TURBO;
233		else
234			newflags &= ~IEEE80211_CHAN_TURBO;
235		if (newflags != curflags)
236			ieee80211_dturbo_switch(vap, newflags);
237	}
238	return capschanged;
239}
240
241/*
242 * Decap the encapsulated frame pair and dispatch the first
243 * for delivery.  The second frame is returned for delivery
244 * via the normal path.
245 */
246struct mbuf *
247ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m)
248{
249#define	FF_LLC_SIZE	(sizeof(struct ether_header) + sizeof(struct llc))
250#define	MS(x,f)	(((x) & f) >> f##_S)
251	struct ieee80211vap *vap = ni->ni_vap;
252	struct llc *llc;
253	uint32_t ath;
254	struct mbuf *n;
255	int framelen;
256
257	/* NB: we assume caller does this check for us */
258	KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF),
259	    ("ff not negotiated"));
260	/*
261	 * Check for fast-frame tunnel encapsulation.
262	 */
263	if (m->m_pkthdr.len < 3*FF_LLC_SIZE)
264		return m;
265	if (m->m_len < FF_LLC_SIZE &&
266	    (m = m_pullup(m, FF_LLC_SIZE)) == NULL) {
267		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
268		    ni->ni_macaddr, "fast-frame",
269		    "%s", "m_pullup(llc) failed");
270		vap->iv_stats.is_rx_tooshort++;
271		return NULL;
272	}
273	llc = (struct llc *)(mtod(m, uint8_t *) +
274	    sizeof(struct ether_header));
275	if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE))
276		return m;
277	m_adj(m, FF_LLC_SIZE);
278	m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath);
279	if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) {
280		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
281		    ni->ni_macaddr, "fast-frame",
282		    "unsupport tunnel protocol, header 0x%x", ath);
283		vap->iv_stats.is_ff_badhdr++;
284		m_freem(m);
285		return NULL;
286	}
287	/* NB: skip header and alignment padding */
288	m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2);
289
290	vap->iv_stats.is_ff_decap++;
291
292	/*
293	 * Decap the first frame, bust it apart from the
294	 * second and deliver; then decap the second frame
295	 * and return it to the caller for normal delivery.
296	 */
297	m = ieee80211_decap1(m, &framelen);
298	if (m == NULL) {
299		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
300		    ni->ni_macaddr, "fast-frame", "%s", "first decap failed");
301		vap->iv_stats.is_ff_tooshort++;
302		return NULL;
303	}
304	n = m_split(m, framelen, M_NOWAIT);
305	if (n == NULL) {
306		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
307		    ni->ni_macaddr, "fast-frame",
308		    "%s", "unable to split encapsulated frames");
309		vap->iv_stats.is_ff_split++;
310		m_freem(m);			/* NB: must reclaim */
311		return NULL;
312	}
313	/* XXX not right for WDS */
314	vap->iv_deliver_data(vap, ni, m);	/* 1st of pair */
315
316	/*
317	 * Decap second frame.
318	 */
319	m_adj(n, roundup2(framelen, 4) - framelen);	/* padding */
320	n = ieee80211_decap1(n, &framelen);
321	if (n == NULL) {
322		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
323		    ni->ni_macaddr, "fast-frame", "%s", "second decap failed");
324		vap->iv_stats.is_ff_tooshort++;
325	}
326	/* XXX verify framelen against mbuf contents */
327	return n;				/* 2nd delivered by caller */
328#undef MS
329#undef FF_LLC_SIZE
330}
331
332/*
333 * Fast frame encapsulation.  There must be two packets
334 * chained with m_nextpkt.  We do header adjustment for
335 * each, add the tunnel encapsulation, and then concatenate
336 * the mbuf chains to form a single frame for transmission.
337 */
338struct mbuf *
339ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
340	struct ieee80211_key *key)
341{
342	struct mbuf *m2;
343	struct ether_header eh1, eh2;
344	struct llc *llc;
345	struct mbuf *m;
346	int pad;
347
348	m2 = m1->m_nextpkt;
349	if (m2 == NULL) {
350		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
351		    "%s: only one frame\n", __func__);
352		goto bad;
353	}
354	m1->m_nextpkt = NULL;
355	/*
356	 * Include fast frame headers in adjusting header layout.
357	 */
358	KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
359	ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
360	m1 = ieee80211_mbuf_adjust(vap,
361		hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 2 +
362		    sizeof(struct ether_header),
363		key, m1);
364	if (m1 == NULL) {
365		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
366		m_freem(m2);
367		goto bad;
368	}
369
370	/*
371	 * Copy second frame's Ethernet header out of line
372	 * and adjust for encapsulation headers.  Note that
373	 * we make room for padding in case there isn't room
374	 * at the end of first frame.
375	 */
376	KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
377	ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
378	m2 = ieee80211_mbuf_adjust(vap,
379		ATH_FF_MAX_HDR_PAD + sizeof(struct ether_header),
380		NULL, m2);
381	if (m2 == NULL) {
382		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
383		goto bad;
384	}
385
386	/*
387	 * Now do tunnel encapsulation.  First, each
388	 * frame gets a standard encapsulation.
389	 */
390	m1 = ieee80211_ff_encap1(vap, m1, &eh1);
391	if (m1 == NULL)
392		goto bad;
393	m2 = ieee80211_ff_encap1(vap, m2, &eh2);
394	if (m2 == NULL)
395		goto bad;
396
397	/*
398	 * Pad leading frame to a 4-byte boundary.  If there
399	 * is space at the end of the first frame, put it
400	 * there; otherwise prepend to the front of the second
401	 * frame.  We know doing the second will always work
402	 * because we reserve space above.  We prefer appending
403	 * as this typically has better DMA alignment properties.
404	 */
405	for (m = m1; m->m_next != NULL; m = m->m_next)
406		;
407	pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
408	if (pad) {
409		if (M_TRAILINGSPACE(m) < pad) {		/* prepend to second */
410			m2->m_data -= pad;
411			m2->m_len += pad;
412			m2->m_pkthdr.len += pad;
413		} else {				/* append to first */
414			m->m_len += pad;
415			m1->m_pkthdr.len += pad;
416		}
417	}
418
419	/*
420	 * Now, stick 'em together and prepend the tunnel headers;
421	 * first the Atheros tunnel header (all zero for now) and
422	 * then a special fast frame LLC.
423	 *
424	 * XXX optimize by prepending together
425	 */
426	m->m_next = m2;			/* NB: last mbuf from above */
427	m1->m_pkthdr.len += m2->m_pkthdr.len;
428	M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT);
429	if (m1 == NULL) {		/* XXX cannot happen */
430		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
431		    "%s: no space for tunnel header\n", __func__);
432		vap->iv_stats.is_tx_nobuf++;
433		return NULL;
434	}
435	memset(mtod(m1, void *), 0, sizeof(uint32_t)+2);
436
437	M_PREPEND(m1, sizeof(struct llc), M_NOWAIT);
438	if (m1 == NULL) {		/* XXX cannot happen */
439		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
440		    "%s: no space for llc header\n", __func__);
441		vap->iv_stats.is_tx_nobuf++;
442		return NULL;
443	}
444	llc = mtod(m1, struct llc *);
445	llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
446	llc->llc_control = LLC_UI;
447	llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0;
448	llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1;
449	llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2;
450	llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE);
451
452	vap->iv_stats.is_ff_encap++;
453
454	return m1;
455bad:
456	if (m1 != NULL)
457		m_freem(m1);
458	if (m2 != NULL)
459		m_freem(m2);
460	return NULL;
461}
462
463static void
464ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
465{
466	struct ieee80211vap *vap = ni->ni_vap;
467	struct ieee80211com *ic = ni->ni_ic;
468	int error;
469
470	IEEE80211_TX_LOCK_ASSERT(vap->iv_ic);
471
472	/* encap and xmit */
473	m = ieee80211_encap(vap, ni, m);
474	if (m != NULL) {
475		struct ifnet *ifp = vap->iv_ifp;
476
477		error = ieee80211_parent_xmitpkt(ic, m);;
478		if (error != 0) {
479			/* NB: IFQ_HANDOFF reclaims mbuf */
480			ieee80211_free_node(ni);
481		} else {
482			ifp->if_opackets++;
483		}
484	} else
485		ieee80211_free_node(ni);
486}
487
488/*
489 * Flush frames to device; note we re-use the linked list
490 * the frames were stored on and use the sentinel (unchanged)
491 * which may be non-NULL.
492 */
493static void
494ff_flush(struct mbuf *head, struct mbuf *last)
495{
496	struct mbuf *m, *next;
497	struct ieee80211_node *ni;
498	struct ieee80211vap *vap;
499
500	for (m = head; m != last; m = next) {
501		next = m->m_nextpkt;
502		m->m_nextpkt = NULL;
503
504		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
505		vap = ni->ni_vap;
506
507		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
508		    "%s: flush frame, age %u", __func__, M_AGE_GET(m));
509		vap->iv_stats.is_ff_flush++;
510
511		ff_transmit(ni, m);
512	}
513}
514
515/*
516 * Age frames on the staging queue.
517 *
518 * This is called without the comlock held, but it does all its work
519 * behind the comlock.  Because of this, it's possible that the
520 * staging queue will be serviced between the function which called
521 * it and now; thus simply checking that the queue has work in it
522 * may fail.
523 *
524 * See PR kern/174283 for more details.
525 */
526void
527ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq,
528    int quanta)
529{
530	struct mbuf *m, *head;
531	struct ieee80211_node *ni;
532	struct ieee80211_tx_ampdu *tap;
533
534#if 0
535	KASSERT(sq->head != NULL, ("stageq empty"));
536#endif
537
538	IEEE80211_LOCK(ic);
539	head = sq->head;
540	while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) {
541		int tid = WME_AC_TO_TID(M_WME_GETAC(m));
542
543		/* clear tap ref to frame */
544		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
545		tap = &ni->ni_tx_ampdu[tid];
546		KASSERT(tap->txa_private == m, ("staging queue empty"));
547		tap->txa_private = NULL;
548
549		sq->head = m->m_nextpkt;
550		sq->depth--;
551	}
552	if (m == NULL)
553		sq->tail = NULL;
554	else
555		M_AGE_SUB(m, quanta);
556	IEEE80211_UNLOCK(ic);
557
558	IEEE80211_TX_LOCK(ic);
559	ff_flush(head, m);
560	IEEE80211_TX_UNLOCK(ic);
561}
562
563static void
564stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m)
565{
566	int age = ieee80211_ffagemax;
567
568	IEEE80211_LOCK_ASSERT(ic);
569
570	if (sq->tail != NULL) {
571		sq->tail->m_nextpkt = m;
572		age -= M_AGE_GET(sq->head);
573	} else
574		sq->head = m;
575	KASSERT(age >= 0, ("age %d", age));
576	M_AGE_SET(m, age);
577	m->m_nextpkt = NULL;
578	sq->tail = m;
579	sq->depth++;
580}
581
582static void
583stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged)
584{
585	struct mbuf *m, *mprev;
586
587	IEEE80211_LOCK_ASSERT(ic);
588
589	mprev = NULL;
590	for (m = sq->head; m != NULL; m = m->m_nextpkt) {
591		if (m == mstaged) {
592			if (mprev == NULL)
593				sq->head = m->m_nextpkt;
594			else
595				mprev->m_nextpkt = m->m_nextpkt;
596			if (sq->tail == m)
597				sq->tail = mprev;
598			sq->depth--;
599			return;
600		}
601		mprev = m;
602	}
603	printf("%s: packet not found\n", __func__);
604}
605
606static uint32_t
607ff_approx_txtime(struct ieee80211_node *ni,
608	const struct mbuf *m1, const struct mbuf *m2)
609{
610	struct ieee80211com *ic = ni->ni_ic;
611	struct ieee80211vap *vap = ni->ni_vap;
612	uint32_t framelen;
613
614	/*
615	 * Approximate the frame length to be transmitted. A swag to add
616	 * the following maximal values to the skb payload:
617	 *   - 32: 802.11 encap + CRC
618	 *   - 24: encryption overhead (if wep bit)
619	 *   - 4 + 6: fast-frame header and padding
620	 *   - 16: 2 LLC FF tunnel headers
621	 *   - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd)
622	 */
623	framelen = m1->m_pkthdr.len + 32 +
624	    ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR;
625	if (vap->iv_flags & IEEE80211_F_PRIVACY)
626		framelen += 24;
627	if (m2 != NULL)
628		framelen += m2->m_pkthdr.len;
629	return ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0);
630}
631
632/*
633 * Check if the supplied frame can be partnered with an existing
634 * or pending frame.  Return a reference to any frame that should be
635 * sent on return; otherwise return NULL.
636 */
637struct mbuf *
638ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
639{
640	struct ieee80211vap *vap = ni->ni_vap;
641	struct ieee80211com *ic = ni->ni_ic;
642	struct ieee80211_superg *sg = ic->ic_superg;
643	const int pri = M_WME_GETAC(m);
644	struct ieee80211_stageq *sq;
645	struct ieee80211_tx_ampdu *tap;
646	struct mbuf *mstaged;
647	uint32_t txtime, limit;
648
649	IEEE80211_TX_UNLOCK_ASSERT(ic);
650
651	/*
652	 * Check if the supplied frame can be aggregated.
653	 *
654	 * NB: we allow EAPOL frames to be aggregated with other ucast traffic.
655	 *     Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
656	 *     be aggregated with other types of frames when encryption is on?
657	 */
658	IEEE80211_LOCK(ic);
659	tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)];
660	mstaged = tap->txa_private;		/* NB: we reuse AMPDU state */
661	ieee80211_txampdu_count_packet(tap);
662
663	/*
664	 * When not in station mode never aggregate a multicast
665	 * frame; this insures, for example, that a combined frame
666	 * does not require multiple encryption keys.
667	 */
668	if (vap->iv_opmode != IEEE80211_M_STA &&
669	    ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) {
670		/* XXX flush staged frame? */
671		IEEE80211_UNLOCK(ic);
672		return m;
673	}
674	/*
675	 * If there is no frame to combine with and the pps is
676	 * too low; then do not attempt to aggregate this frame.
677	 */
678	if (mstaged == NULL &&
679	    ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) {
680		IEEE80211_UNLOCK(ic);
681		return m;
682	}
683	sq = &sg->ff_stageq[pri];
684	/*
685	 * Check the txop limit to insure the aggregate fits.
686	 */
687	limit = IEEE80211_TXOP_TO_US(
688		ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
689	if (limit != 0 &&
690	    (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) {
691		/*
692		 * Aggregate too long, return to the caller for direct
693		 * transmission.  In addition, flush any pending frame
694		 * before sending this one.
695		 */
696		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
697		    "%s: txtime %u exceeds txop limit %u\n",
698		    __func__, txtime, limit);
699
700		tap->txa_private = NULL;
701		if (mstaged != NULL)
702			stageq_remove(ic, sq, mstaged);
703		IEEE80211_UNLOCK(ic);
704
705		if (mstaged != NULL) {
706			IEEE80211_TX_LOCK(ic);
707			IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
708			    "%s: flush staged frame", __func__);
709			/* encap and xmit */
710			ff_transmit(ni, mstaged);
711			IEEE80211_TX_UNLOCK(ic);
712		}
713		return m;		/* NB: original frame */
714	}
715	/*
716	 * An aggregation candidate.  If there's a frame to partner
717	 * with then combine and return for processing.  Otherwise
718	 * save this frame and wait for a partner to show up (or
719	 * the frame to be flushed).  Note that staged frames also
720	 * hold their node reference.
721	 */
722	if (mstaged != NULL) {
723		tap->txa_private = NULL;
724		stageq_remove(ic, sq, mstaged);
725		IEEE80211_UNLOCK(ic);
726
727		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
728		    "%s: aggregate fast-frame", __func__);
729		/*
730		 * Release the node reference; we only need
731		 * the one already in mstaged.
732		 */
733		KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni,
734		    ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni));
735		ieee80211_free_node(ni);
736
737		m->m_nextpkt = NULL;
738		mstaged->m_nextpkt = m;
739		mstaged->m_flags |= M_FF; /* NB: mark for encap work */
740	} else {
741		KASSERT(tap->txa_private == NULL,
742		    ("txa_private %p", tap->txa_private));
743		tap->txa_private = m;
744
745		stageq_add(ic, sq, m);
746		IEEE80211_UNLOCK(ic);
747
748		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
749		    "%s: stage frame, %u queued", __func__, sq->depth);
750		/* NB: mstaged is NULL */
751	}
752	return mstaged;
753}
754
755void
756ieee80211_ff_node_init(struct ieee80211_node *ni)
757{
758	/*
759	 * Clean FF state on re-associate.  This handles the case
760	 * where a station leaves w/o notifying us and then returns
761	 * before node is reaped for inactivity.
762	 */
763	ieee80211_ff_node_cleanup(ni);
764}
765
766void
767ieee80211_ff_node_cleanup(struct ieee80211_node *ni)
768{
769	struct ieee80211com *ic = ni->ni_ic;
770	struct ieee80211_superg *sg = ic->ic_superg;
771	struct ieee80211_tx_ampdu *tap;
772	struct mbuf *m, *next_m, *head;
773	int tid;
774
775	IEEE80211_LOCK(ic);
776	head = NULL;
777	for (tid = 0; tid < WME_NUM_TID; tid++) {
778		int ac = TID_TO_WME_AC(tid);
779
780		tap = &ni->ni_tx_ampdu[tid];
781		m = tap->txa_private;
782		if (m != NULL) {
783			tap->txa_private = NULL;
784			stageq_remove(ic, &sg->ff_stageq[ac], m);
785			m->m_nextpkt = head;
786			head = m;
787		}
788	}
789	IEEE80211_UNLOCK(ic);
790
791	/*
792	 * Free mbufs, taking care to not dereference the mbuf after
793	 * we free it (hence grabbing m_nextpkt before we free it.)
794	 */
795	m = head;
796	while (m != NULL) {
797		next_m = m->m_nextpkt;
798		m_freem(m);
799		ieee80211_free_node(ni);
800		m = next_m;
801	}
802}
803
804/*
805 * Switch between turbo and non-turbo operating modes.
806 * Use the specified channel flags to locate the new
807 * channel, update 802.11 state, and then call back into
808 * the driver to effect the change.
809 */
810void
811ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags)
812{
813	struct ieee80211com *ic = vap->iv_ic;
814	struct ieee80211_channel *chan;
815
816	chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags);
817	if (chan == NULL) {		/* XXX should not happen */
818		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
819		    "%s: no channel with freq %u flags 0x%x\n",
820		    __func__, ic->ic_bsschan->ic_freq, newflags);
821		return;
822	}
823
824	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
825	    "%s: %s -> %s (freq %u flags 0x%x)\n", __func__,
826	    ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)],
827	    ieee80211_phymode_name[ieee80211_chan2mode(chan)],
828	    chan->ic_freq, chan->ic_flags);
829
830	ic->ic_bsschan = chan;
831	ic->ic_prevchan = ic->ic_curchan;
832	ic->ic_curchan = chan;
833	ic->ic_rt = ieee80211_get_ratetable(chan);
834	ic->ic_set_channel(ic);
835	ieee80211_radiotap_chan_change(ic);
836	/* NB: do not need to reset ERP state 'cuz we're in sta mode */
837}
838
839/*
840 * Return the current ``state'' of an Atheros capbility.
841 * If associated in station mode report the negotiated
842 * setting. Otherwise report the current setting.
843 */
844static int
845getathcap(struct ieee80211vap *vap, int cap)
846{
847	if (vap->iv_opmode == IEEE80211_M_STA &&
848	    vap->iv_state == IEEE80211_S_RUN)
849		return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0;
850	else
851		return (vap->iv_flags & cap) != 0;
852}
853
854static int
855superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
856{
857	switch (ireq->i_type) {
858	case IEEE80211_IOC_FF:
859		ireq->i_val = getathcap(vap, IEEE80211_F_FF);
860		break;
861	case IEEE80211_IOC_TURBOP:
862		ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP);
863		break;
864	default:
865		return ENOSYS;
866	}
867	return 0;
868}
869IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211);
870
871static int
872superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
873{
874	switch (ireq->i_type) {
875	case IEEE80211_IOC_FF:
876		if (ireq->i_val) {
877			if ((vap->iv_caps & IEEE80211_C_FF) == 0)
878				return EOPNOTSUPP;
879			vap->iv_flags |= IEEE80211_F_FF;
880		} else
881			vap->iv_flags &= ~IEEE80211_F_FF;
882		return ENETRESET;
883	case IEEE80211_IOC_TURBOP:
884		if (ireq->i_val) {
885			if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0)
886				return EOPNOTSUPP;
887			vap->iv_flags |= IEEE80211_F_TURBOP;
888		} else
889			vap->iv_flags &= ~IEEE80211_F_TURBOP;
890		return ENETRESET;
891	default:
892		return ENOSYS;
893	}
894	return 0;
895}
896IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211);
897
898#endif	/* IEEE80211_SUPPORT_SUPERG */
899