1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2005-2006, Devicescape Software, Inc.
5 * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014  Intel Mobile Communications GmbH
8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9 * Copyright (C) 2018-2024 Intel Corporation
10 */
11
12#include <linux/jiffies.h>
13#include <linux/slab.h>
14#include <linux/kernel.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/rcupdate.h>
19#include <linux/export.h>
20#include <linux/kcov.h>
21#include <linux/bitops.h>
22#include <kunit/visibility.h>
23#include <net/mac80211.h>
24#include <net/ieee80211_radiotap.h>
25#include <asm/unaligned.h>
26
27#include "ieee80211_i.h"
28#include "driver-ops.h"
29#include "led.h"
30#include "mesh.h"
31#include "wep.h"
32#include "wpa.h"
33#include "tkip.h"
34#include "wme.h"
35#include "rate.h"
36
37/*
38 * monitor mode reception
39 *
40 * This function cleans up the SKB, i.e. it removes all the stuff
41 * only useful for monitoring.
42 */
43static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb,
44					   unsigned int present_fcs_len,
45					   unsigned int rtap_space)
46{
47	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
48	struct ieee80211_hdr *hdr;
49	unsigned int hdrlen;
50	__le16 fc;
51
52	if (present_fcs_len)
53		__pskb_trim(skb, skb->len - present_fcs_len);
54	pskb_pull(skb, rtap_space);
55
56	/* After pulling radiotap header, clear all flags that indicate
57	 * info in skb->data.
58	 */
59	status->flag &= ~(RX_FLAG_RADIOTAP_TLV_AT_END |
60			  RX_FLAG_RADIOTAP_LSIG |
61			  RX_FLAG_RADIOTAP_HE_MU |
62			  RX_FLAG_RADIOTAP_HE);
63
64	hdr = (void *)skb->data;
65	fc = hdr->frame_control;
66
67	/*
68	 * Remove the HT-Control field (if present) on management
69	 * frames after we've sent the frame to monitoring. We
70	 * (currently) don't need it, and don't properly parse
71	 * frames with it present, due to the assumption of a
72	 * fixed management header length.
73	 */
74	if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc)))
75		return skb;
76
77	hdrlen = ieee80211_hdrlen(fc);
78	hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
79
80	if (!pskb_may_pull(skb, hdrlen)) {
81		dev_kfree_skb(skb);
82		return NULL;
83	}
84
85	memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data,
86		hdrlen - IEEE80211_HT_CTL_LEN);
87	pskb_pull(skb, IEEE80211_HT_CTL_LEN);
88
89	return skb;
90}
91
92static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
93				     unsigned int rtap_space)
94{
95	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
96	struct ieee80211_hdr *hdr;
97
98	hdr = (void *)(skb->data + rtap_space);
99
100	if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
101			    RX_FLAG_FAILED_PLCP_CRC |
102			    RX_FLAG_ONLY_MONITOR |
103			    RX_FLAG_NO_PSDU))
104		return true;
105
106	if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
107		return true;
108
109	if (ieee80211_is_ctl(hdr->frame_control) &&
110	    !ieee80211_is_pspoll(hdr->frame_control) &&
111	    !ieee80211_is_back_req(hdr->frame_control))
112		return true;
113
114	return false;
115}
116
117static int
118ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
119			     struct ieee80211_rx_status *status,
120			     struct sk_buff *skb)
121{
122	int len;
123
124	/* always present fields */
125	len = sizeof(struct ieee80211_radiotap_header) + 8;
126
127	/* allocate extra bitmaps */
128	if (status->chains)
129		len += 4 * hweight8(status->chains);
130
131	if (ieee80211_have_rx_timestamp(status)) {
132		len = ALIGN(len, 8);
133		len += 8;
134	}
135	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
136		len += 1;
137
138	/* antenna field, if we don't have per-chain info */
139	if (!status->chains)
140		len += 1;
141
142	/* padding for RX_FLAGS if necessary */
143	len = ALIGN(len, 2);
144
145	if (status->encoding == RX_ENC_HT) /* HT info */
146		len += 3;
147
148	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
149		len = ALIGN(len, 4);
150		len += 8;
151	}
152
153	if (status->encoding == RX_ENC_VHT) {
154		len = ALIGN(len, 2);
155		len += 12;
156	}
157
158	if (local->hw.radiotap_timestamp.units_pos >= 0) {
159		len = ALIGN(len, 8);
160		len += 12;
161	}
162
163	if (status->encoding == RX_ENC_HE &&
164	    status->flag & RX_FLAG_RADIOTAP_HE) {
165		len = ALIGN(len, 2);
166		len += 12;
167		BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
168	}
169
170	if (status->encoding == RX_ENC_HE &&
171	    status->flag & RX_FLAG_RADIOTAP_HE_MU) {
172		len = ALIGN(len, 2);
173		len += 12;
174		BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
175	}
176
177	if (status->flag & RX_FLAG_NO_PSDU)
178		len += 1;
179
180	if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
181		len = ALIGN(len, 2);
182		len += 4;
183		BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
184	}
185
186	if (status->chains) {
187		/* antenna and antenna signal fields */
188		len += 2 * hweight8(status->chains);
189	}
190
191	if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) {
192		int tlv_offset = 0;
193
194		/*
195		 * The position to look at depends on the existence (or non-
196		 * existence) of other elements, so take that into account...
197		 */
198		if (status->flag & RX_FLAG_RADIOTAP_HE)
199			tlv_offset +=
200				sizeof(struct ieee80211_radiotap_he);
201		if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
202			tlv_offset +=
203				sizeof(struct ieee80211_radiotap_he_mu);
204		if (status->flag & RX_FLAG_RADIOTAP_LSIG)
205			tlv_offset +=
206				sizeof(struct ieee80211_radiotap_lsig);
207
208		/* ensure 4 byte alignment for TLV */
209		len = ALIGN(len, 4);
210
211		/* TLVs until the mac header */
212		len += skb_mac_header(skb) - &skb->data[tlv_offset];
213	}
214
215	return len;
216}
217
218static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
219					   int link_id,
220					   struct sta_info *sta,
221					   struct sk_buff *skb)
222{
223	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
224
225	if (link_id >= 0) {
226		status->link_valid = 1;
227		status->link_id = link_id;
228	} else {
229		status->link_valid = 0;
230	}
231
232	skb_queue_tail(&sdata->skb_queue, skb);
233	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
234	if (sta)
235		sta->deflink.rx_stats.packets++;
236}
237
238static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
239					 int link_id,
240					 struct sta_info *sta,
241					 struct sk_buff *skb)
242{
243	skb->protocol = 0;
244	__ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb);
245}
246
247static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
248					 struct sk_buff *skb,
249					 int rtap_space)
250{
251	struct {
252		struct ieee80211_hdr_3addr hdr;
253		u8 category;
254		u8 action_code;
255	} __packed __aligned(2) action;
256
257	if (!sdata)
258		return;
259
260	BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
261
262	if (skb->len < rtap_space + sizeof(action) +
263		       VHT_MUMIMO_GROUPS_DATA_LEN)
264		return;
265
266	if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
267		return;
268
269	skb_copy_bits(skb, rtap_space, &action, sizeof(action));
270
271	if (!ieee80211_is_action(action.hdr.frame_control))
272		return;
273
274	if (action.category != WLAN_CATEGORY_VHT)
275		return;
276
277	if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
278		return;
279
280	if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
281		return;
282
283	skb = skb_copy(skb, GFP_ATOMIC);
284	if (!skb)
285		return;
286
287	ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb);
288}
289
290/*
291 * ieee80211_add_rx_radiotap_header - add radiotap header
292 *
293 * add a radiotap header containing all the fields which the hardware provided.
294 */
295static void
296ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
297				 struct sk_buff *skb,
298				 struct ieee80211_rate *rate,
299				 int rtap_len, bool has_fcs)
300{
301	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
302	struct ieee80211_radiotap_header *rthdr;
303	unsigned char *pos;
304	__le32 *it_present;
305	u32 it_present_val;
306	u16 rx_flags = 0;
307	u16 channel_flags = 0;
308	u32 tlvs_len = 0;
309	int mpdulen, chain;
310	unsigned long chains = status->chains;
311	struct ieee80211_radiotap_he he = {};
312	struct ieee80211_radiotap_he_mu he_mu = {};
313	struct ieee80211_radiotap_lsig lsig = {};
314
315	if (status->flag & RX_FLAG_RADIOTAP_HE) {
316		he = *(struct ieee80211_radiotap_he *)skb->data;
317		skb_pull(skb, sizeof(he));
318		WARN_ON_ONCE(status->encoding != RX_ENC_HE);
319	}
320
321	if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
322		he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
323		skb_pull(skb, sizeof(he_mu));
324	}
325
326	if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
327		lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
328		skb_pull(skb, sizeof(lsig));
329	}
330
331	if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) {
332		/* data is pointer at tlv all other info was pulled off */
333		tlvs_len = skb_mac_header(skb) - skb->data;
334	}
335
336	mpdulen = skb->len;
337	if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
338		mpdulen += FCS_LEN;
339
340	rthdr = skb_push(skb, rtap_len - tlvs_len);
341	memset(rthdr, 0, rtap_len - tlvs_len);
342	it_present = &rthdr->it_present;
343
344	/* radiotap header, set always present flags */
345	rthdr->it_len = cpu_to_le16(rtap_len);
346	it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
347			 BIT(IEEE80211_RADIOTAP_CHANNEL) |
348			 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
349
350	if (!status->chains)
351		it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
352
353	for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
354		it_present_val |=
355			BIT(IEEE80211_RADIOTAP_EXT) |
356			BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
357		put_unaligned_le32(it_present_val, it_present);
358		it_present++;
359		it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
360				 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
361	}
362
363	if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END)
364		it_present_val |= BIT(IEEE80211_RADIOTAP_TLV);
365
366	put_unaligned_le32(it_present_val, it_present);
367
368	/* This references through an offset into it_optional[] rather
369	 * than via it_present otherwise later uses of pos will cause
370	 * the compiler to think we have walked past the end of the
371	 * struct member.
372	 */
373	pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional];
374
375	/* the order of the following fields is important */
376
377	/* IEEE80211_RADIOTAP_TSFT */
378	if (ieee80211_have_rx_timestamp(status)) {
379		/* padding */
380		while ((pos - (u8 *)rthdr) & 7)
381			*pos++ = 0;
382		put_unaligned_le64(
383			ieee80211_calculate_rx_timestamp(local, status,
384							 mpdulen, 0),
385			pos);
386		rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_TSFT));
387		pos += 8;
388	}
389
390	/* IEEE80211_RADIOTAP_FLAGS */
391	if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
392		*pos |= IEEE80211_RADIOTAP_F_FCS;
393	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
394		*pos |= IEEE80211_RADIOTAP_F_BADFCS;
395	if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
396		*pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
397	pos++;
398
399	/* IEEE80211_RADIOTAP_RATE */
400	if (!rate || status->encoding != RX_ENC_LEGACY) {
401		/*
402		 * Without rate information don't add it. If we have,
403		 * MCS information is a separate field in radiotap,
404		 * added below. The byte here is needed as padding
405		 * for the channel though, so initialise it to 0.
406		 */
407		*pos = 0;
408	} else {
409		int shift = 0;
410		rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE));
411		if (status->bw == RATE_INFO_BW_10)
412			shift = 1;
413		else if (status->bw == RATE_INFO_BW_5)
414			shift = 2;
415		*pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
416	}
417	pos++;
418
419	/* IEEE80211_RADIOTAP_CHANNEL */
420	/* TODO: frequency offset in KHz */
421	put_unaligned_le16(status->freq, pos);
422	pos += 2;
423	if (status->bw == RATE_INFO_BW_10)
424		channel_flags |= IEEE80211_CHAN_HALF;
425	else if (status->bw == RATE_INFO_BW_5)
426		channel_flags |= IEEE80211_CHAN_QUARTER;
427
428	if (status->band == NL80211_BAND_5GHZ ||
429	    status->band == NL80211_BAND_6GHZ)
430		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
431	else if (status->encoding != RX_ENC_LEGACY)
432		channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
433	else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
434		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
435	else if (rate)
436		channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
437	else
438		channel_flags |= IEEE80211_CHAN_2GHZ;
439	put_unaligned_le16(channel_flags, pos);
440	pos += 2;
441
442	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
443	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
444	    !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
445		*pos = status->signal;
446		rthdr->it_present |=
447			cpu_to_le32(BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL));
448		pos++;
449	}
450
451	/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
452
453	if (!status->chains) {
454		/* IEEE80211_RADIOTAP_ANTENNA */
455		*pos = status->antenna;
456		pos++;
457	}
458
459	/* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
460
461	/* IEEE80211_RADIOTAP_RX_FLAGS */
462	/* ensure 2 byte alignment for the 2 byte field as required */
463	if ((pos - (u8 *)rthdr) & 1)
464		*pos++ = 0;
465	if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
466		rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
467	put_unaligned_le16(rx_flags, pos);
468	pos += 2;
469
470	if (status->encoding == RX_ENC_HT) {
471		unsigned int stbc;
472
473		rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS));
474		*pos = local->hw.radiotap_mcs_details;
475		if (status->enc_flags & RX_ENC_FLAG_HT_GF)
476			*pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
477		if (status->enc_flags & RX_ENC_FLAG_LDPC)
478			*pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC;
479		pos++;
480		*pos = 0;
481		if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
482			*pos |= IEEE80211_RADIOTAP_MCS_SGI;
483		if (status->bw == RATE_INFO_BW_40)
484			*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
485		if (status->enc_flags & RX_ENC_FLAG_HT_GF)
486			*pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
487		if (status->enc_flags & RX_ENC_FLAG_LDPC)
488			*pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
489		stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
490		*pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
491		pos++;
492		*pos++ = status->rate_idx;
493	}
494
495	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
496		u16 flags = 0;
497
498		/* ensure 4 byte alignment */
499		while ((pos - (u8 *)rthdr) & 3)
500			pos++;
501		rthdr->it_present |=
502			cpu_to_le32(BIT(IEEE80211_RADIOTAP_AMPDU_STATUS));
503		put_unaligned_le32(status->ampdu_reference, pos);
504		pos += 4;
505		if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
506			flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
507		if (status->flag & RX_FLAG_AMPDU_IS_LAST)
508			flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
509		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
510			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
511		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
512			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
513		if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
514			flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
515		if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
516			flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
517		put_unaligned_le16(flags, pos);
518		pos += 2;
519		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
520			*pos++ = status->ampdu_delimiter_crc;
521		else
522			*pos++ = 0;
523		*pos++ = 0;
524	}
525
526	if (status->encoding == RX_ENC_VHT) {
527		u16 known = local->hw.radiotap_vht_details;
528
529		rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT));
530		put_unaligned_le16(known, pos);
531		pos += 2;
532		/* flags */
533		if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
534			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
535		/* in VHT, STBC is binary */
536		if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
537			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
538		if (status->enc_flags & RX_ENC_FLAG_BF)
539			*pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
540		pos++;
541		/* bandwidth */
542		switch (status->bw) {
543		case RATE_INFO_BW_80:
544			*pos++ = 4;
545			break;
546		case RATE_INFO_BW_160:
547			*pos++ = 11;
548			break;
549		case RATE_INFO_BW_40:
550			*pos++ = 1;
551			break;
552		default:
553			*pos++ = 0;
554		}
555		/* MCS/NSS */
556		*pos = (status->rate_idx << 4) | status->nss;
557		pos += 4;
558		/* coding field */
559		if (status->enc_flags & RX_ENC_FLAG_LDPC)
560			*pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
561		pos++;
562		/* group ID */
563		pos++;
564		/* partial_aid */
565		pos += 2;
566	}
567
568	if (local->hw.radiotap_timestamp.units_pos >= 0) {
569		u16 accuracy = 0;
570		u8 flags;
571		u64 ts;
572
573		rthdr->it_present |=
574			cpu_to_le32(BIT(IEEE80211_RADIOTAP_TIMESTAMP));
575
576		/* ensure 8 byte alignment */
577		while ((pos - (u8 *)rthdr) & 7)
578			pos++;
579
580		if (status->flag & RX_FLAG_MACTIME_IS_RTAP_TS64) {
581			flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT;
582			ts = status->mactime;
583		} else {
584			flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
585			ts = status->device_timestamp;
586		}
587
588		put_unaligned_le64(ts, pos);
589		pos += sizeof(u64);
590
591		if (local->hw.radiotap_timestamp.accuracy >= 0) {
592			accuracy = local->hw.radiotap_timestamp.accuracy;
593			flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
594		}
595		put_unaligned_le16(accuracy, pos);
596		pos += sizeof(u16);
597
598		*pos++ = local->hw.radiotap_timestamp.units_pos;
599		*pos++ = flags;
600	}
601
602	if (status->encoding == RX_ENC_HE &&
603	    status->flag & RX_FLAG_RADIOTAP_HE) {
604#define HE_PREP(f, val)	le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
605
606		if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
607			he.data6 |= HE_PREP(DATA6_NSTS,
608					    FIELD_GET(RX_ENC_FLAG_STBC_MASK,
609						      status->enc_flags));
610			he.data3 |= HE_PREP(DATA3_STBC, 1);
611		} else {
612			he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
613		}
614
615#define CHECK_GI(s) \
616	BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
617		     (int)NL80211_RATE_INFO_HE_GI_##s)
618
619		CHECK_GI(0_8);
620		CHECK_GI(1_6);
621		CHECK_GI(3_2);
622
623		he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
624		he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
625		he.data3 |= HE_PREP(DATA3_CODING,
626				    !!(status->enc_flags & RX_ENC_FLAG_LDPC));
627
628		he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
629
630		switch (status->bw) {
631		case RATE_INFO_BW_20:
632			he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
633					    IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
634			break;
635		case RATE_INFO_BW_40:
636			he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
637					    IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
638			break;
639		case RATE_INFO_BW_80:
640			he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
641					    IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
642			break;
643		case RATE_INFO_BW_160:
644			he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
645					    IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
646			break;
647		case RATE_INFO_BW_HE_RU:
648#define CHECK_RU_ALLOC(s) \
649	BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
650		     NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
651
652			CHECK_RU_ALLOC(26);
653			CHECK_RU_ALLOC(52);
654			CHECK_RU_ALLOC(106);
655			CHECK_RU_ALLOC(242);
656			CHECK_RU_ALLOC(484);
657			CHECK_RU_ALLOC(996);
658			CHECK_RU_ALLOC(2x996);
659
660			he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
661					    status->he_ru + 4);
662			break;
663		default:
664			WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
665		}
666
667		/* ensure 2 byte alignment */
668		while ((pos - (u8 *)rthdr) & 1)
669			pos++;
670		rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE));
671		memcpy(pos, &he, sizeof(he));
672		pos += sizeof(he);
673	}
674
675	if (status->encoding == RX_ENC_HE &&
676	    status->flag & RX_FLAG_RADIOTAP_HE_MU) {
677		/* ensure 2 byte alignment */
678		while ((pos - (u8 *)rthdr) & 1)
679			pos++;
680		rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE_MU));
681		memcpy(pos, &he_mu, sizeof(he_mu));
682		pos += sizeof(he_mu);
683	}
684
685	if (status->flag & RX_FLAG_NO_PSDU) {
686		rthdr->it_present |=
687			cpu_to_le32(BIT(IEEE80211_RADIOTAP_ZERO_LEN_PSDU));
688		*pos++ = status->zero_length_psdu_type;
689	}
690
691	if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
692		/* ensure 2 byte alignment */
693		while ((pos - (u8 *)rthdr) & 1)
694			pos++;
695		rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_LSIG));
696		memcpy(pos, &lsig, sizeof(lsig));
697		pos += sizeof(lsig);
698	}
699
700	for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
701		*pos++ = status->chain_signal[chain];
702		*pos++ = chain;
703	}
704}
705
706static struct sk_buff *
707ieee80211_make_monitor_skb(struct ieee80211_local *local,
708			   struct sk_buff **origskb,
709			   struct ieee80211_rate *rate,
710			   int rtap_space, bool use_origskb)
711{
712	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
713	int rt_hdrlen, needed_headroom;
714	struct sk_buff *skb;
715
716	/* room for the radiotap header based on driver features */
717	rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
718	needed_headroom = rt_hdrlen - rtap_space;
719
720	if (use_origskb) {
721		/* only need to expand headroom if necessary */
722		skb = *origskb;
723		*origskb = NULL;
724
725		/*
726		 * This shouldn't trigger often because most devices have an
727		 * RX header they pull before we get here, and that should
728		 * be big enough for our radiotap information. We should
729		 * probably export the length to drivers so that we can have
730		 * them allocate enough headroom to start with.
731		 */
732		if (skb_headroom(skb) < needed_headroom &&
733		    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
734			dev_kfree_skb(skb);
735			return NULL;
736		}
737	} else {
738		/*
739		 * Need to make a copy and possibly remove radiotap header
740		 * and FCS from the original.
741		 */
742		skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD,
743				      0, GFP_ATOMIC);
744
745		if (!skb)
746			return NULL;
747	}
748
749	/* prepend radiotap information */
750	ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
751
752	skb_reset_mac_header(skb);
753	skb->ip_summed = CHECKSUM_UNNECESSARY;
754	skb->pkt_type = PACKET_OTHERHOST;
755	skb->protocol = htons(ETH_P_802_2);
756
757	return skb;
758}
759
760/*
761 * This function copies a received frame to all monitor interfaces and
762 * returns a cleaned-up SKB that no longer includes the FCS nor the
763 * radiotap header the driver might have added.
764 */
765static struct sk_buff *
766ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
767		     struct ieee80211_rate *rate)
768{
769	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
770	struct ieee80211_sub_if_data *sdata;
771	struct sk_buff *monskb = NULL;
772	int present_fcs_len = 0;
773	unsigned int rtap_space = 0;
774	struct ieee80211_sub_if_data *monitor_sdata =
775		rcu_dereference(local->monitor_sdata);
776	bool only_monitor = false;
777	unsigned int min_head_len;
778
779	if (WARN_ON_ONCE(status->flag & RX_FLAG_RADIOTAP_TLV_AT_END &&
780			 !skb_mac_header_was_set(origskb))) {
781		/* with this skb no way to know where frame payload starts */
782		dev_kfree_skb(origskb);
783		return NULL;
784	}
785
786	if (status->flag & RX_FLAG_RADIOTAP_HE)
787		rtap_space += sizeof(struct ieee80211_radiotap_he);
788
789	if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
790		rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
791
792	if (status->flag & RX_FLAG_RADIOTAP_LSIG)
793		rtap_space += sizeof(struct ieee80211_radiotap_lsig);
794
795	if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END)
796		rtap_space += skb_mac_header(origskb) - &origskb->data[rtap_space];
797
798	min_head_len = rtap_space;
799
800	/*
801	 * First, we may need to make a copy of the skb because
802	 *  (1) we need to modify it for radiotap (if not present), and
803	 *  (2) the other RX handlers will modify the skb we got.
804	 *
805	 * We don't need to, of course, if we aren't going to return
806	 * the SKB because it has a bad FCS/PLCP checksum.
807	 */
808
809	if (!(status->flag & RX_FLAG_NO_PSDU)) {
810		if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
811			if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
812				/* driver bug */
813				WARN_ON(1);
814				dev_kfree_skb(origskb);
815				return NULL;
816			}
817			present_fcs_len = FCS_LEN;
818		}
819
820		/* also consider the hdr->frame_control */
821		min_head_len += 2;
822	}
823
824	/* ensure that the expected data elements are in skb head */
825	if (!pskb_may_pull(origskb, min_head_len)) {
826		dev_kfree_skb(origskb);
827		return NULL;
828	}
829
830	only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
831
832	if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
833		if (only_monitor) {
834			dev_kfree_skb(origskb);
835			return NULL;
836		}
837
838		return ieee80211_clean_skb(origskb, present_fcs_len,
839					   rtap_space);
840	}
841
842	ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
843
844	list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
845		bool last_monitor = list_is_last(&sdata->u.mntr.list,
846						 &local->mon_list);
847
848		if (!monskb)
849			monskb = ieee80211_make_monitor_skb(local, &origskb,
850							    rate, rtap_space,
851							    only_monitor &&
852							    last_monitor);
853
854		if (monskb) {
855			struct sk_buff *skb;
856
857			if (last_monitor) {
858				skb = monskb;
859				monskb = NULL;
860			} else {
861				skb = skb_clone(monskb, GFP_ATOMIC);
862			}
863
864			if (skb) {
865				skb->dev = sdata->dev;
866				dev_sw_netstats_rx_add(skb->dev, skb->len);
867				netif_receive_skb(skb);
868			}
869		}
870
871		if (last_monitor)
872			break;
873	}
874
875	/* this happens if last_monitor was erroneously false */
876	dev_kfree_skb(monskb);
877
878	/* ditto */
879	if (!origskb)
880		return NULL;
881
882	return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
883}
884
885static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
886{
887	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
888	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
889	int tid, seqno_idx, security_idx;
890
891	/* does the frame have a qos control field? */
892	if (ieee80211_is_data_qos(hdr->frame_control)) {
893		u8 *qc = ieee80211_get_qos_ctl(hdr);
894		/* frame has qos control */
895		tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
896		if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
897			status->rx_flags |= IEEE80211_RX_AMSDU;
898
899		seqno_idx = tid;
900		security_idx = tid;
901	} else {
902		/*
903		 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
904		 *
905		 *	Sequence numbers for management frames, QoS data
906		 *	frames with a broadcast/multicast address in the
907		 *	Address 1 field, and all non-QoS data frames sent
908		 *	by QoS STAs are assigned using an additional single
909		 *	modulo-4096 counter, [...]
910		 *
911		 * We also use that counter for non-QoS STAs.
912		 */
913		seqno_idx = IEEE80211_NUM_TIDS;
914		security_idx = 0;
915		if (ieee80211_is_mgmt(hdr->frame_control))
916			security_idx = IEEE80211_NUM_TIDS;
917		tid = 0;
918	}
919
920	rx->seqno_idx = seqno_idx;
921	rx->security_idx = security_idx;
922	/* Set skb->priority to 1d tag if highest order bit of TID is not set.
923	 * For now, set skb->priority to 0 for other cases. */
924	rx->skb->priority = (tid > 7) ? 0 : tid;
925}
926
927/**
928 * DOC: Packet alignment
929 *
930 * Drivers always need to pass packets that are aligned to two-byte boundaries
931 * to the stack.
932 *
933 * Additionally, they should, if possible, align the payload data in a way that
934 * guarantees that the contained IP header is aligned to a four-byte
935 * boundary. In the case of regular frames, this simply means aligning the
936 * payload to a four-byte boundary (because either the IP header is directly
937 * contained, or IV/RFC1042 headers that have a length divisible by four are
938 * in front of it).  If the payload data is not properly aligned and the
939 * architecture doesn't support efficient unaligned operations, mac80211
940 * will align the data.
941 *
942 * With A-MSDU frames, however, the payload data address must yield two modulo
943 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
944 * push the IP header further back to a multiple of four again. Thankfully, the
945 * specs were sane enough this time around to require padding each A-MSDU
946 * subframe to a length that is a multiple of four.
947 *
948 * Padding like Atheros hardware adds which is between the 802.11 header and
949 * the payload is not supported; the driver is required to move the 802.11
950 * header to be directly in front of the payload in that case.
951 */
952static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
953{
954#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
955	WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
956#endif
957}
958
959
960/* rx handlers */
961
962static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
963{
964	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
965
966	if (is_multicast_ether_addr(hdr->addr1))
967		return 0;
968
969	return ieee80211_is_robust_mgmt_frame(skb);
970}
971
972
973static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
974{
975	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
976
977	if (!is_multicast_ether_addr(hdr->addr1))
978		return 0;
979
980	return ieee80211_is_robust_mgmt_frame(skb);
981}
982
983
984/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
985static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
986{
987	struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
988	struct ieee80211_mmie *mmie;
989	struct ieee80211_mmie_16 *mmie16;
990
991	if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
992		return -1;
993
994	if (!ieee80211_is_robust_mgmt_frame(skb) &&
995	    !ieee80211_is_beacon(hdr->frame_control))
996		return -1; /* not a robust management frame */
997
998	mmie = (struct ieee80211_mmie *)
999		(skb->data + skb->len - sizeof(*mmie));
1000	if (mmie->element_id == WLAN_EID_MMIE &&
1001	    mmie->length == sizeof(*mmie) - 2)
1002		return le16_to_cpu(mmie->key_id);
1003
1004	mmie16 = (struct ieee80211_mmie_16 *)
1005		(skb->data + skb->len - sizeof(*mmie16));
1006	if (skb->len >= 24 + sizeof(*mmie16) &&
1007	    mmie16->element_id == WLAN_EID_MMIE &&
1008	    mmie16->length == sizeof(*mmie16) - 2)
1009		return le16_to_cpu(mmie16->key_id);
1010
1011	return -1;
1012}
1013
1014static int ieee80211_get_keyid(struct sk_buff *skb)
1015{
1016	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1017	__le16 fc = hdr->frame_control;
1018	int hdrlen = ieee80211_hdrlen(fc);
1019	u8 keyid;
1020
1021	/* WEP, TKIP, CCMP and GCMP */
1022	if (unlikely(skb->len < hdrlen + IEEE80211_WEP_IV_LEN))
1023		return -EINVAL;
1024
1025	skb_copy_bits(skb, hdrlen + 3, &keyid, 1);
1026
1027	keyid >>= 6;
1028
1029	return keyid;
1030}
1031
1032static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
1033{
1034	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1035	char *dev_addr = rx->sdata->vif.addr;
1036
1037	if (ieee80211_is_data(hdr->frame_control)) {
1038		if (is_multicast_ether_addr(hdr->addr1)) {
1039			if (ieee80211_has_tods(hdr->frame_control) ||
1040			    !ieee80211_has_fromds(hdr->frame_control))
1041				return RX_DROP_MONITOR;
1042			if (ether_addr_equal(hdr->addr3, dev_addr))
1043				return RX_DROP_MONITOR;
1044		} else {
1045			if (!ieee80211_has_a4(hdr->frame_control))
1046				return RX_DROP_MONITOR;
1047			if (ether_addr_equal(hdr->addr4, dev_addr))
1048				return RX_DROP_MONITOR;
1049		}
1050	}
1051
1052	/* If there is not an established peer link and this is not a peer link
1053	 * establisment frame, beacon or probe, drop the frame.
1054	 */
1055
1056	if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
1057		struct ieee80211_mgmt *mgmt;
1058
1059		if (!ieee80211_is_mgmt(hdr->frame_control))
1060			return RX_DROP_MONITOR;
1061
1062		if (ieee80211_is_action(hdr->frame_control)) {
1063			u8 category;
1064
1065			/* make sure category field is present */
1066			if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
1067				return RX_DROP_MONITOR;
1068
1069			mgmt = (struct ieee80211_mgmt *)hdr;
1070			category = mgmt->u.action.category;
1071			if (category != WLAN_CATEGORY_MESH_ACTION &&
1072			    category != WLAN_CATEGORY_SELF_PROTECTED)
1073				return RX_DROP_MONITOR;
1074			return RX_CONTINUE;
1075		}
1076
1077		if (ieee80211_is_probe_req(hdr->frame_control) ||
1078		    ieee80211_is_probe_resp(hdr->frame_control) ||
1079		    ieee80211_is_beacon(hdr->frame_control) ||
1080		    ieee80211_is_auth(hdr->frame_control))
1081			return RX_CONTINUE;
1082
1083		return RX_DROP_MONITOR;
1084	}
1085
1086	return RX_CONTINUE;
1087}
1088
1089static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
1090					      int index)
1091{
1092	struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
1093	struct sk_buff *tail = skb_peek_tail(frames);
1094	struct ieee80211_rx_status *status;
1095
1096	if (tid_agg_rx->reorder_buf_filtered &&
1097	    tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
1098		return true;
1099
1100	if (!tail)
1101		return false;
1102
1103	status = IEEE80211_SKB_RXCB(tail);
1104	if (status->flag & RX_FLAG_AMSDU_MORE)
1105		return false;
1106
1107	return true;
1108}
1109
1110static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
1111					    struct tid_ampdu_rx *tid_agg_rx,
1112					    int index,
1113					    struct sk_buff_head *frames)
1114{
1115	struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
1116	struct sk_buff *skb;
1117	struct ieee80211_rx_status *status;
1118
1119	lockdep_assert_held(&tid_agg_rx->reorder_lock);
1120
1121	if (skb_queue_empty(skb_list))
1122		goto no_frame;
1123
1124	if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1125		__skb_queue_purge(skb_list);
1126		goto no_frame;
1127	}
1128
1129	/* release frames from the reorder ring buffer */
1130	tid_agg_rx->stored_mpdu_num--;
1131	while ((skb = __skb_dequeue(skb_list))) {
1132		status = IEEE80211_SKB_RXCB(skb);
1133		status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
1134		__skb_queue_tail(frames, skb);
1135	}
1136
1137no_frame:
1138	if (tid_agg_rx->reorder_buf_filtered)
1139		tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
1140	tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1141}
1142
1143static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
1144					     struct tid_ampdu_rx *tid_agg_rx,
1145					     u16 head_seq_num,
1146					     struct sk_buff_head *frames)
1147{
1148	int index;
1149
1150	lockdep_assert_held(&tid_agg_rx->reorder_lock);
1151
1152	while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1153		index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1154		ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1155						frames);
1156	}
1157}
1158
1159/*
1160 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
1161 * the skb was added to the buffer longer than this time ago, the earlier
1162 * frames that have not yet been received are assumed to be lost and the skb
1163 * can be released for processing. This may also release other skb's from the
1164 * reorder buffer if there are no additional gaps between the frames.
1165 *
1166 * Callers must hold tid_agg_rx->reorder_lock.
1167 */
1168#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
1169
1170static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
1171					  struct tid_ampdu_rx *tid_agg_rx,
1172					  struct sk_buff_head *frames)
1173{
1174	int index, i, j;
1175
1176	lockdep_assert_held(&tid_agg_rx->reorder_lock);
1177
1178	/* release the buffer until next missing frame */
1179	index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1180	if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1181	    tid_agg_rx->stored_mpdu_num) {
1182		/*
1183		 * No buffers ready to be released, but check whether any
1184		 * frames in the reorder buffer have timed out.
1185		 */
1186		int skipped = 1;
1187		for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1188		     j = (j + 1) % tid_agg_rx->buf_size) {
1189			if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1190				skipped++;
1191				continue;
1192			}
1193			if (skipped &&
1194			    !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1195					HT_RX_REORDER_BUF_TIMEOUT))
1196				goto set_release_timer;
1197
1198			/* don't leave incomplete A-MSDUs around */
1199			for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1200			     i = (i + 1) % tid_agg_rx->buf_size)
1201				__skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1202
1203			ht_dbg_ratelimited(sdata,
1204					   "release an RX reorder frame due to timeout on earlier frames\n");
1205			ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1206							frames);
1207
1208			/*
1209			 * Increment the head seq# also for the skipped slots.
1210			 */
1211			tid_agg_rx->head_seq_num =
1212				(tid_agg_rx->head_seq_num +
1213				 skipped) & IEEE80211_SN_MASK;
1214			skipped = 0;
1215		}
1216	} else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1217		ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1218						frames);
1219		index =	tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1220	}
1221
1222	if (tid_agg_rx->stored_mpdu_num) {
1223		j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1224
1225		for (; j != (index - 1) % tid_agg_rx->buf_size;
1226		     j = (j + 1) % tid_agg_rx->buf_size) {
1227			if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1228				break;
1229		}
1230
1231 set_release_timer:
1232
1233		if (!tid_agg_rx->removed)
1234			mod_timer(&tid_agg_rx->reorder_timer,
1235				  tid_agg_rx->reorder_time[j] + 1 +
1236				  HT_RX_REORDER_BUF_TIMEOUT);
1237	} else {
1238		del_timer(&tid_agg_rx->reorder_timer);
1239	}
1240}
1241
1242/*
1243 * As this function belongs to the RX path it must be under
1244 * rcu_read_lock protection. It returns false if the frame
1245 * can be processed immediately, true if it was consumed.
1246 */
1247static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1248					     struct tid_ampdu_rx *tid_agg_rx,
1249					     struct sk_buff *skb,
1250					     struct sk_buff_head *frames)
1251{
1252	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1253	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1254	u16 mpdu_seq_num = ieee80211_get_sn(hdr);
1255	u16 head_seq_num, buf_size;
1256	int index;
1257	bool ret = true;
1258
1259	spin_lock(&tid_agg_rx->reorder_lock);
1260
1261	/*
1262	 * Offloaded BA sessions have no known starting sequence number so pick
1263	 * one from first Rxed frame for this tid after BA was started.
1264	 */
1265	if (unlikely(tid_agg_rx->auto_seq)) {
1266		tid_agg_rx->auto_seq = false;
1267		tid_agg_rx->ssn = mpdu_seq_num;
1268		tid_agg_rx->head_seq_num = mpdu_seq_num;
1269	}
1270
1271	buf_size = tid_agg_rx->buf_size;
1272	head_seq_num = tid_agg_rx->head_seq_num;
1273
1274	/*
1275	 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1276	 * be reordered.
1277	 */
1278	if (unlikely(!tid_agg_rx->started)) {
1279		if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1280			ret = false;
1281			goto out;
1282		}
1283		tid_agg_rx->started = true;
1284	}
1285
1286	/* frame with out of date sequence number */
1287	if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1288		dev_kfree_skb(skb);
1289		goto out;
1290	}
1291
1292	/*
1293	 * If frame the sequence number exceeds our buffering window
1294	 * size release some previous frames to make room for this one.
1295	 */
1296	if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1297		head_seq_num = ieee80211_sn_inc(
1298				ieee80211_sn_sub(mpdu_seq_num, buf_size));
1299		/* release stored frames up to new head to stack */
1300		ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1301						 head_seq_num, frames);
1302	}
1303
1304	/* Now the new frame is always in the range of the reordering buffer */
1305
1306	index = mpdu_seq_num % tid_agg_rx->buf_size;
1307
1308	/* check if we already stored this frame */
1309	if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1310		dev_kfree_skb(skb);
1311		goto out;
1312	}
1313
1314	/*
1315	 * If the current MPDU is in the right order and nothing else
1316	 * is stored we can process it directly, no need to buffer it.
1317	 * If it is first but there's something stored, we may be able
1318	 * to release frames after this one.
1319	 */
1320	if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1321	    tid_agg_rx->stored_mpdu_num == 0) {
1322		if (!(status->flag & RX_FLAG_AMSDU_MORE))
1323			tid_agg_rx->head_seq_num =
1324				ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1325		ret = false;
1326		goto out;
1327	}
1328
1329	/* put the frame in the reordering buffer */
1330	__skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1331	if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1332		tid_agg_rx->reorder_time[index] = jiffies;
1333		tid_agg_rx->stored_mpdu_num++;
1334		ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1335	}
1336
1337 out:
1338	spin_unlock(&tid_agg_rx->reorder_lock);
1339	return ret;
1340}
1341
1342/*
1343 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1344 * true if the MPDU was buffered, false if it should be processed.
1345 */
1346static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1347				       struct sk_buff_head *frames)
1348{
1349	struct sk_buff *skb = rx->skb;
1350	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1351	struct sta_info *sta = rx->sta;
1352	struct tid_ampdu_rx *tid_agg_rx;
1353	u16 sc;
1354	u8 tid, ack_policy;
1355
1356	if (!ieee80211_is_data_qos(hdr->frame_control) ||
1357	    is_multicast_ether_addr(hdr->addr1))
1358		goto dont_reorder;
1359
1360	/*
1361	 * filter the QoS data rx stream according to
1362	 * STA/TID and check if this STA/TID is on aggregation
1363	 */
1364
1365	if (!sta)
1366		goto dont_reorder;
1367
1368	ack_policy = *ieee80211_get_qos_ctl(hdr) &
1369		     IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1370	tid = ieee80211_get_tid(hdr);
1371
1372	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1373	if (!tid_agg_rx) {
1374		if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1375		    !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1376		    !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1377			ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1378					     WLAN_BACK_RECIPIENT,
1379					     WLAN_REASON_QSTA_REQUIRE_SETUP);
1380		goto dont_reorder;
1381	}
1382
1383	/* qos null data frames are excluded */
1384	if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1385		goto dont_reorder;
1386
1387	/* not part of a BA session */
1388	if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
1389		goto dont_reorder;
1390
1391	/* new, potentially un-ordered, ampdu frame - process it */
1392
1393	/* reset session timer */
1394	if (tid_agg_rx->timeout)
1395		tid_agg_rx->last_rx = jiffies;
1396
1397	/* if this mpdu is fragmented - terminate rx aggregation session */
1398	sc = le16_to_cpu(hdr->seq_ctrl);
1399	if (sc & IEEE80211_SCTL_FRAG) {
1400		ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb);
1401		return;
1402	}
1403
1404	/*
1405	 * No locking needed -- we will only ever process one
1406	 * RX packet at a time, and thus own tid_agg_rx. All
1407	 * other code manipulating it needs to (and does) make
1408	 * sure that we cannot get to it any more before doing
1409	 * anything with it.
1410	 */
1411	if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1412					     frames))
1413		return;
1414
1415 dont_reorder:
1416	__skb_queue_tail(frames, skb);
1417}
1418
1419static ieee80211_rx_result debug_noinline
1420ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1421{
1422	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1423	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1424
1425	if (status->flag & RX_FLAG_DUP_VALIDATED)
1426		return RX_CONTINUE;
1427
1428	/*
1429	 * Drop duplicate 802.11 retransmissions
1430	 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1431	 */
1432
1433	if (rx->skb->len < 24)
1434		return RX_CONTINUE;
1435
1436	if (ieee80211_is_ctl(hdr->frame_control) ||
1437	    ieee80211_is_any_nullfunc(hdr->frame_control))
1438		return RX_CONTINUE;
1439
1440	if (!rx->sta)
1441		return RX_CONTINUE;
1442
1443	if (unlikely(is_multicast_ether_addr(hdr->addr1))) {
1444		struct ieee80211_sub_if_data *sdata = rx->sdata;
1445		u16 sn = ieee80211_get_sn(hdr);
1446
1447		if (!ieee80211_is_data_present(hdr->frame_control))
1448			return RX_CONTINUE;
1449
1450		if (!ieee80211_vif_is_mld(&sdata->vif) ||
1451		    sdata->vif.type != NL80211_IFTYPE_STATION)
1452			return RX_CONTINUE;
1453
1454		if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO &&
1455		    ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last))
1456			return RX_DROP_U_DUP;
1457
1458		sdata->u.mgd.mcast_seq_last = sn;
1459		return RX_CONTINUE;
1460	}
1461
1462	if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1463		     rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1464		I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1465		rx->link_sta->rx_stats.num_duplicates++;
1466		return RX_DROP_U_DUP;
1467	} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1468		rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1469	}
1470
1471	return RX_CONTINUE;
1472}
1473
1474static ieee80211_rx_result debug_noinline
1475ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1476{
1477	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1478
1479	/* Drop disallowed frame classes based on STA auth/assoc state;
1480	 * IEEE 802.11, Chap 5.5.
1481	 *
1482	 * mac80211 filters only based on association state, i.e. it drops
1483	 * Class 3 frames from not associated stations. hostapd sends
1484	 * deauth/disassoc frames when needed. In addition, hostapd is
1485	 * responsible for filtering on both auth and assoc states.
1486	 */
1487
1488	if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1489		return ieee80211_rx_mesh_check(rx);
1490
1491	if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1492		      ieee80211_is_pspoll(hdr->frame_control)) &&
1493		     rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1494		     rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1495		     (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1496		/*
1497		 * accept port control frames from the AP even when it's not
1498		 * yet marked ASSOC to prevent a race where we don't set the
1499		 * assoc bit quickly enough before it sends the first frame
1500		 */
1501		if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1502		    ieee80211_is_data_present(hdr->frame_control)) {
1503			unsigned int hdrlen;
1504			__be16 ethertype;
1505
1506			hdrlen = ieee80211_hdrlen(hdr->frame_control);
1507
1508			if (rx->skb->len < hdrlen + 8)
1509				return RX_DROP_MONITOR;
1510
1511			skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
1512			if (ethertype == rx->sdata->control_port_protocol)
1513				return RX_CONTINUE;
1514		}
1515
1516		if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1517		    cfg80211_rx_spurious_frame(rx->sdata->dev,
1518					       hdr->addr2,
1519					       GFP_ATOMIC))
1520			return RX_DROP_U_SPURIOUS;
1521
1522		return RX_DROP_MONITOR;
1523	}
1524
1525	return RX_CONTINUE;
1526}
1527
1528
1529static ieee80211_rx_result debug_noinline
1530ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1531{
1532	struct ieee80211_local *local;
1533	struct ieee80211_hdr *hdr;
1534	struct sk_buff *skb;
1535
1536	local = rx->local;
1537	skb = rx->skb;
1538	hdr = (struct ieee80211_hdr *) skb->data;
1539
1540	if (!local->pspolling)
1541		return RX_CONTINUE;
1542
1543	if (!ieee80211_has_fromds(hdr->frame_control))
1544		/* this is not from AP */
1545		return RX_CONTINUE;
1546
1547	if (!ieee80211_is_data(hdr->frame_control))
1548		return RX_CONTINUE;
1549
1550	if (!ieee80211_has_moredata(hdr->frame_control)) {
1551		/* AP has no more frames buffered for us */
1552		local->pspolling = false;
1553		return RX_CONTINUE;
1554	}
1555
1556	/* more data bit is set, let's request a new frame from the AP */
1557	ieee80211_send_pspoll(local, rx->sdata);
1558
1559	return RX_CONTINUE;
1560}
1561
1562static void sta_ps_start(struct sta_info *sta)
1563{
1564	struct ieee80211_sub_if_data *sdata = sta->sdata;
1565	struct ieee80211_local *local = sdata->local;
1566	struct ps_data *ps;
1567	int tid;
1568
1569	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1570	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1571		ps = &sdata->bss->ps;
1572	else
1573		return;
1574
1575	atomic_inc(&ps->num_sta_ps);
1576	set_sta_flag(sta, WLAN_STA_PS_STA);
1577	if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1578		drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1579	ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1580	       sta->sta.addr, sta->sta.aid);
1581
1582	ieee80211_clear_fast_xmit(sta);
1583
1584	for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
1585		struct ieee80211_txq *txq = sta->sta.txq[tid];
1586		struct txq_info *txqi = to_txq_info(txq);
1587
1588		spin_lock(&local->active_txq_lock[txq->ac]);
1589		if (!list_empty(&txqi->schedule_order))
1590			list_del_init(&txqi->schedule_order);
1591		spin_unlock(&local->active_txq_lock[txq->ac]);
1592
1593		if (txq_has_queue(txq))
1594			set_bit(tid, &sta->txq_buffered_tids);
1595		else
1596			clear_bit(tid, &sta->txq_buffered_tids);
1597	}
1598}
1599
1600static void sta_ps_end(struct sta_info *sta)
1601{
1602	ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1603	       sta->sta.addr, sta->sta.aid);
1604
1605	if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1606		/*
1607		 * Clear the flag only if the other one is still set
1608		 * so that the TX path won't start TX'ing new frames
1609		 * directly ... In the case that the driver flag isn't
1610		 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1611		 */
1612		clear_sta_flag(sta, WLAN_STA_PS_STA);
1613		ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1614		       sta->sta.addr, sta->sta.aid);
1615		return;
1616	}
1617
1618	set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1619	clear_sta_flag(sta, WLAN_STA_PS_STA);
1620	ieee80211_sta_ps_deliver_wakeup(sta);
1621}
1622
1623int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1624{
1625	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1626	bool in_ps;
1627
1628	WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1629
1630	/* Don't let the same PS state be set twice */
1631	in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1632	if ((start && in_ps) || (!start && !in_ps))
1633		return -EINVAL;
1634
1635	if (start)
1636		sta_ps_start(sta);
1637	else
1638		sta_ps_end(sta);
1639
1640	return 0;
1641}
1642EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1643
1644void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1645{
1646	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1647
1648	if (test_sta_flag(sta, WLAN_STA_SP))
1649		return;
1650
1651	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1652		ieee80211_sta_ps_deliver_poll_response(sta);
1653	else
1654		set_sta_flag(sta, WLAN_STA_PSPOLL);
1655}
1656EXPORT_SYMBOL(ieee80211_sta_pspoll);
1657
1658void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1659{
1660	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1661	int ac = ieee80211_ac_from_tid(tid);
1662
1663	/*
1664	 * If this AC is not trigger-enabled do nothing unless the
1665	 * driver is calling us after it already checked.
1666	 *
1667	 * NB: This could/should check a separate bitmap of trigger-
1668	 * enabled queues, but for now we only implement uAPSD w/o
1669	 * TSPEC changes to the ACs, so they're always the same.
1670	 */
1671	if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1672	    tid != IEEE80211_NUM_TIDS)
1673		return;
1674
1675	/* if we are in a service period, do nothing */
1676	if (test_sta_flag(sta, WLAN_STA_SP))
1677		return;
1678
1679	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1680		ieee80211_sta_ps_deliver_uapsd(sta);
1681	else
1682		set_sta_flag(sta, WLAN_STA_UAPSD);
1683}
1684EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1685
1686static ieee80211_rx_result debug_noinline
1687ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1688{
1689	struct ieee80211_sub_if_data *sdata = rx->sdata;
1690	struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1691	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1692
1693	if (!rx->sta)
1694		return RX_CONTINUE;
1695
1696	if (sdata->vif.type != NL80211_IFTYPE_AP &&
1697	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1698		return RX_CONTINUE;
1699
1700	/*
1701	 * The device handles station powersave, so don't do anything about
1702	 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1703	 * it to mac80211 since they're handled.)
1704	 */
1705	if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1706		return RX_CONTINUE;
1707
1708	/*
1709	 * Don't do anything if the station isn't already asleep. In
1710	 * the uAPSD case, the station will probably be marked asleep,
1711	 * in the PS-Poll case the station must be confused ...
1712	 */
1713	if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1714		return RX_CONTINUE;
1715
1716	if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1717		ieee80211_sta_pspoll(&rx->sta->sta);
1718
1719		/* Free PS Poll skb here instead of returning RX_DROP that would
1720		 * count as an dropped frame. */
1721		dev_kfree_skb(rx->skb);
1722
1723		return RX_QUEUED;
1724	} else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1725		   !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1726		   ieee80211_has_pm(hdr->frame_control) &&
1727		   (ieee80211_is_data_qos(hdr->frame_control) ||
1728		    ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1729		u8 tid = ieee80211_get_tid(hdr);
1730
1731		ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1732	}
1733
1734	return RX_CONTINUE;
1735}
1736
1737static ieee80211_rx_result debug_noinline
1738ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1739{
1740	struct sta_info *sta = rx->sta;
1741	struct link_sta_info *link_sta = rx->link_sta;
1742	struct sk_buff *skb = rx->skb;
1743	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1744	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1745	int i;
1746
1747	if (!sta || !link_sta)
1748		return RX_CONTINUE;
1749
1750	/*
1751	 * Update last_rx only for IBSS packets which are for the current
1752	 * BSSID and for station already AUTHORIZED to avoid keeping the
1753	 * current IBSS network alive in cases where other STAs start
1754	 * using different BSSID. This will also give the station another
1755	 * chance to restart the authentication/authorization in case
1756	 * something went wrong the first time.
1757	 */
1758	if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1759		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1760						NL80211_IFTYPE_ADHOC);
1761		if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1762		    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1763			link_sta->rx_stats.last_rx = jiffies;
1764			if (ieee80211_is_data_present(hdr->frame_control) &&
1765			    !is_multicast_ether_addr(hdr->addr1))
1766				link_sta->rx_stats.last_rate =
1767					sta_stats_encode_rate(status);
1768		}
1769	} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1770		link_sta->rx_stats.last_rx = jiffies;
1771	} else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
1772		   !is_multicast_ether_addr(hdr->addr1)) {
1773		/*
1774		 * Mesh beacons will update last_rx when if they are found to
1775		 * match the current local configuration when processed.
1776		 */
1777		link_sta->rx_stats.last_rx = jiffies;
1778		if (ieee80211_is_data_present(hdr->frame_control))
1779			link_sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1780	}
1781
1782	link_sta->rx_stats.fragments++;
1783
1784	u64_stats_update_begin(&link_sta->rx_stats.syncp);
1785	link_sta->rx_stats.bytes += rx->skb->len;
1786	u64_stats_update_end(&link_sta->rx_stats.syncp);
1787
1788	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1789		link_sta->rx_stats.last_signal = status->signal;
1790		ewma_signal_add(&link_sta->rx_stats_avg.signal,
1791				-status->signal);
1792	}
1793
1794	if (status->chains) {
1795		link_sta->rx_stats.chains = status->chains;
1796		for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1797			int signal = status->chain_signal[i];
1798
1799			if (!(status->chains & BIT(i)))
1800				continue;
1801
1802			link_sta->rx_stats.chain_signal_last[i] = signal;
1803			ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i],
1804					-signal);
1805		}
1806	}
1807
1808	if (ieee80211_is_s1g_beacon(hdr->frame_control))
1809		return RX_CONTINUE;
1810
1811	/*
1812	 * Change STA power saving mode only at the end of a frame
1813	 * exchange sequence, and only for a data or management
1814	 * frame as specified in IEEE 802.11-2016 11.2.3.2
1815	 */
1816	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1817	    !ieee80211_has_morefrags(hdr->frame_control) &&
1818	    !is_multicast_ether_addr(hdr->addr1) &&
1819	    (ieee80211_is_mgmt(hdr->frame_control) ||
1820	     ieee80211_is_data(hdr->frame_control)) &&
1821	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1822	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1823	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1824		if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1825			if (!ieee80211_has_pm(hdr->frame_control))
1826				sta_ps_end(sta);
1827		} else {
1828			if (ieee80211_has_pm(hdr->frame_control))
1829				sta_ps_start(sta);
1830		}
1831	}
1832
1833	/* mesh power save support */
1834	if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1835		ieee80211_mps_rx_h_sta_process(sta, hdr);
1836
1837	/*
1838	 * Drop (qos-)data::nullfunc frames silently, since they
1839	 * are used only to control station power saving mode.
1840	 */
1841	if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
1842		I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1843
1844		/*
1845		 * If we receive a 4-addr nullfunc frame from a STA
1846		 * that was not moved to a 4-addr STA vlan yet send
1847		 * the event to userspace and for older hostapd drop
1848		 * the frame to the monitor interface.
1849		 */
1850		if (ieee80211_has_a4(hdr->frame_control) &&
1851		    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1852		     (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1853		      !rx->sdata->u.vlan.sta))) {
1854			if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1855				cfg80211_rx_unexpected_4addr_frame(
1856					rx->sdata->dev, sta->sta.addr,
1857					GFP_ATOMIC);
1858			return RX_DROP_M_UNEXPECTED_4ADDR_FRAME;
1859		}
1860		/*
1861		 * Update counter and free packet here to avoid
1862		 * counting this as a dropped packed.
1863		 */
1864		link_sta->rx_stats.packets++;
1865		dev_kfree_skb(rx->skb);
1866		return RX_QUEUED;
1867	}
1868
1869	return RX_CONTINUE;
1870} /* ieee80211_rx_h_sta_process */
1871
1872static struct ieee80211_key *
1873ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
1874{
1875	struct ieee80211_key *key = NULL;
1876	int idx2;
1877
1878	/* Make sure key gets set if either BIGTK key index is set so that
1879	 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
1880	 * Beacon frames and Beacon frames that claim to use another BIGTK key
1881	 * index (i.e., a key that we do not have).
1882	 */
1883
1884	if (idx < 0) {
1885		idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1886		idx2 = idx + 1;
1887	} else {
1888		if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1889			idx2 = idx + 1;
1890		else
1891			idx2 = idx - 1;
1892	}
1893
1894	if (rx->link_sta)
1895		key = rcu_dereference(rx->link_sta->gtk[idx]);
1896	if (!key)
1897		key = rcu_dereference(rx->link->gtk[idx]);
1898	if (!key && rx->link_sta)
1899		key = rcu_dereference(rx->link_sta->gtk[idx2]);
1900	if (!key)
1901		key = rcu_dereference(rx->link->gtk[idx2]);
1902
1903	return key;
1904}
1905
1906static ieee80211_rx_result debug_noinline
1907ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1908{
1909	struct sk_buff *skb = rx->skb;
1910	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1911	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1912	int keyidx;
1913	ieee80211_rx_result result = RX_DROP_U_DECRYPT_FAIL;
1914	struct ieee80211_key *sta_ptk = NULL;
1915	struct ieee80211_key *ptk_idx = NULL;
1916	int mmie_keyidx = -1;
1917	__le16 fc;
1918
1919	if (ieee80211_is_ext(hdr->frame_control))
1920		return RX_CONTINUE;
1921
1922	/*
1923	 * Key selection 101
1924	 *
1925	 * There are five types of keys:
1926	 *  - GTK (group keys)
1927	 *  - IGTK (group keys for management frames)
1928	 *  - BIGTK (group keys for Beacon frames)
1929	 *  - PTK (pairwise keys)
1930	 *  - STK (station-to-station pairwise keys)
1931	 *
1932	 * When selecting a key, we have to distinguish between multicast
1933	 * (including broadcast) and unicast frames, the latter can only
1934	 * use PTKs and STKs while the former always use GTKs, IGTKs, and
1935	 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
1936	 * then unicast frames can also use key indices like GTKs. Hence, if we
1937	 * don't have a PTK/STK we check the key index for a WEP key.
1938	 *
1939	 * Note that in a regular BSS, multicast frames are sent by the
1940	 * AP only, associated stations unicast the frame to the AP first
1941	 * which then multicasts it on their behalf.
1942	 *
1943	 * There is also a slight problem in IBSS mode: GTKs are negotiated
1944	 * with each station, that is something we don't currently handle.
1945	 * The spec seems to expect that one negotiates the same key with
1946	 * every station but there's no such requirement; VLANs could be
1947	 * possible.
1948	 */
1949
1950	/* start without a key */
1951	rx->key = NULL;
1952	fc = hdr->frame_control;
1953
1954	if (rx->sta) {
1955		int keyid = rx->sta->ptk_idx;
1956		sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1957
1958		if (ieee80211_has_protected(fc) &&
1959		    !(status->flag & RX_FLAG_IV_STRIPPED)) {
1960			keyid = ieee80211_get_keyid(rx->skb);
1961
1962			if (unlikely(keyid < 0))
1963				return RX_DROP_U_NO_KEY_ID;
1964
1965			ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
1966		}
1967	}
1968
1969	if (!ieee80211_has_protected(fc))
1970		mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1971
1972	if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1973		rx->key = ptk_idx ? ptk_idx : sta_ptk;
1974		if ((status->flag & RX_FLAG_DECRYPTED) &&
1975		    (status->flag & RX_FLAG_IV_STRIPPED))
1976			return RX_CONTINUE;
1977		/* Skip decryption if the frame is not protected. */
1978		if (!ieee80211_has_protected(fc))
1979			return RX_CONTINUE;
1980	} else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
1981		/* Broadcast/multicast robust management frame / BIP */
1982		if ((status->flag & RX_FLAG_DECRYPTED) &&
1983		    (status->flag & RX_FLAG_IV_STRIPPED))
1984			return RX_CONTINUE;
1985
1986		if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
1987		    mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
1988				   NUM_DEFAULT_BEACON_KEYS) {
1989			if (rx->sdata->dev)
1990				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
1991							     skb->data,
1992							     skb->len);
1993			return RX_DROP_M_BAD_BCN_KEYIDX;
1994		}
1995
1996		rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
1997		if (!rx->key)
1998			return RX_CONTINUE; /* Beacon protection not in use */
1999	} else if (mmie_keyidx >= 0) {
2000		/* Broadcast/multicast robust management frame / BIP */
2001		if ((status->flag & RX_FLAG_DECRYPTED) &&
2002		    (status->flag & RX_FLAG_IV_STRIPPED))
2003			return RX_CONTINUE;
2004
2005		if (mmie_keyidx < NUM_DEFAULT_KEYS ||
2006		    mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
2007			return RX_DROP_M_BAD_MGMT_KEYIDX; /* unexpected BIP keyidx */
2008		if (rx->link_sta) {
2009			if (ieee80211_is_group_privacy_action(skb) &&
2010			    test_sta_flag(rx->sta, WLAN_STA_MFP))
2011				return RX_DROP_MONITOR;
2012
2013			rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]);
2014		}
2015		if (!rx->key)
2016			rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]);
2017	} else if (!ieee80211_has_protected(fc)) {
2018		/*
2019		 * The frame was not protected, so skip decryption. However, we
2020		 * need to set rx->key if there is a key that could have been
2021		 * used so that the frame may be dropped if encryption would
2022		 * have been expected.
2023		 */
2024		struct ieee80211_key *key = NULL;
2025		int i;
2026
2027		if (ieee80211_is_beacon(fc)) {
2028			key = ieee80211_rx_get_bigtk(rx, -1);
2029		} else if (ieee80211_is_mgmt(fc) &&
2030			   is_multicast_ether_addr(hdr->addr1)) {
2031			key = rcu_dereference(rx->link->default_mgmt_key);
2032		} else {
2033			if (rx->link_sta) {
2034				for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2035					key = rcu_dereference(rx->link_sta->gtk[i]);
2036					if (key)
2037						break;
2038				}
2039			}
2040			if (!key) {
2041				for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2042					key = rcu_dereference(rx->link->gtk[i]);
2043					if (key)
2044						break;
2045				}
2046			}
2047		}
2048		if (key)
2049			rx->key = key;
2050		return RX_CONTINUE;
2051	} else {
2052		/*
2053		 * The device doesn't give us the IV so we won't be
2054		 * able to look up the key. That's ok though, we
2055		 * don't need to decrypt the frame, we just won't
2056		 * be able to keep statistics accurate.
2057		 * Except for key threshold notifications, should
2058		 * we somehow allow the driver to tell us which key
2059		 * the hardware used if this flag is set?
2060		 */
2061		if ((status->flag & RX_FLAG_DECRYPTED) &&
2062		    (status->flag & RX_FLAG_IV_STRIPPED))
2063			return RX_CONTINUE;
2064
2065		keyidx = ieee80211_get_keyid(rx->skb);
2066
2067		if (unlikely(keyidx < 0))
2068			return RX_DROP_U_NO_KEY_ID;
2069
2070		/* check per-station GTK first, if multicast packet */
2071		if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta)
2072			rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]);
2073
2074		/* if not found, try default key */
2075		if (!rx->key) {
2076			if (is_multicast_ether_addr(hdr->addr1))
2077				rx->key = rcu_dereference(rx->link->gtk[keyidx]);
2078			if (!rx->key)
2079				rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
2080
2081			/*
2082			 * RSNA-protected unicast frames should always be
2083			 * sent with pairwise or station-to-station keys,
2084			 * but for WEP we allow using a key index as well.
2085			 */
2086			if (rx->key &&
2087			    rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
2088			    rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
2089			    !is_multicast_ether_addr(hdr->addr1))
2090				rx->key = NULL;
2091		}
2092	}
2093
2094	if (rx->key) {
2095		if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
2096			return RX_DROP_MONITOR;
2097
2098		/* TODO: add threshold stuff again */
2099	} else {
2100		return RX_DROP_MONITOR;
2101	}
2102
2103	switch (rx->key->conf.cipher) {
2104	case WLAN_CIPHER_SUITE_WEP40:
2105	case WLAN_CIPHER_SUITE_WEP104:
2106		result = ieee80211_crypto_wep_decrypt(rx);
2107		break;
2108	case WLAN_CIPHER_SUITE_TKIP:
2109		result = ieee80211_crypto_tkip_decrypt(rx);
2110		break;
2111	case WLAN_CIPHER_SUITE_CCMP:
2112		result = ieee80211_crypto_ccmp_decrypt(
2113			rx, IEEE80211_CCMP_MIC_LEN);
2114		break;
2115	case WLAN_CIPHER_SUITE_CCMP_256:
2116		result = ieee80211_crypto_ccmp_decrypt(
2117			rx, IEEE80211_CCMP_256_MIC_LEN);
2118		break;
2119	case WLAN_CIPHER_SUITE_AES_CMAC:
2120		result = ieee80211_crypto_aes_cmac_decrypt(rx);
2121		break;
2122	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
2123		result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
2124		break;
2125	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2126	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2127		result = ieee80211_crypto_aes_gmac_decrypt(rx);
2128		break;
2129	case WLAN_CIPHER_SUITE_GCMP:
2130	case WLAN_CIPHER_SUITE_GCMP_256:
2131		result = ieee80211_crypto_gcmp_decrypt(rx);
2132		break;
2133	default:
2134		result = RX_DROP_U_BAD_CIPHER;
2135	}
2136
2137	/* the hdr variable is invalid after the decrypt handlers */
2138
2139	/* either the frame has been decrypted or will be dropped */
2140	status->flag |= RX_FLAG_DECRYPTED;
2141
2142	if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) &&
2143		     rx->sdata->dev))
2144		cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2145					     skb->data, skb->len);
2146
2147	return result;
2148}
2149
2150void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
2151{
2152	int i;
2153
2154	for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2155		skb_queue_head_init(&cache->entries[i].skb_list);
2156}
2157
2158void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
2159{
2160	int i;
2161
2162	for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2163		__skb_queue_purge(&cache->entries[i].skb_list);
2164}
2165
2166static inline struct ieee80211_fragment_entry *
2167ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
2168			 unsigned int frag, unsigned int seq, int rx_queue,
2169			 struct sk_buff **skb)
2170{
2171	struct ieee80211_fragment_entry *entry;
2172
2173	entry = &cache->entries[cache->next++];
2174	if (cache->next >= IEEE80211_FRAGMENT_MAX)
2175		cache->next = 0;
2176
2177	__skb_queue_purge(&entry->skb_list);
2178
2179	__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
2180	*skb = NULL;
2181	entry->first_frag_time = jiffies;
2182	entry->seq = seq;
2183	entry->rx_queue = rx_queue;
2184	entry->last_frag = frag;
2185	entry->check_sequential_pn = false;
2186	entry->extra_len = 0;
2187
2188	return entry;
2189}
2190
2191static inline struct ieee80211_fragment_entry *
2192ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
2193			  unsigned int frag, unsigned int seq,
2194			  int rx_queue, struct ieee80211_hdr *hdr)
2195{
2196	struct ieee80211_fragment_entry *entry;
2197	int i, idx;
2198
2199	idx = cache->next;
2200	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
2201		struct ieee80211_hdr *f_hdr;
2202		struct sk_buff *f_skb;
2203
2204		idx--;
2205		if (idx < 0)
2206			idx = IEEE80211_FRAGMENT_MAX - 1;
2207
2208		entry = &cache->entries[idx];
2209		if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
2210		    entry->rx_queue != rx_queue ||
2211		    entry->last_frag + 1 != frag)
2212			continue;
2213
2214		f_skb = __skb_peek(&entry->skb_list);
2215		f_hdr = (struct ieee80211_hdr *) f_skb->data;
2216
2217		/*
2218		 * Check ftype and addresses are equal, else check next fragment
2219		 */
2220		if (((hdr->frame_control ^ f_hdr->frame_control) &
2221		     cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
2222		    !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
2223		    !ether_addr_equal(hdr->addr2, f_hdr->addr2))
2224			continue;
2225
2226		if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
2227			__skb_queue_purge(&entry->skb_list);
2228			continue;
2229		}
2230		return entry;
2231	}
2232
2233	return NULL;
2234}
2235
2236static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
2237{
2238	return rx->key &&
2239		(rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2240		 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2241		 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2242		 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2243		ieee80211_has_protected(fc);
2244}
2245
2246static ieee80211_rx_result debug_noinline
2247ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2248{
2249	struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
2250	struct ieee80211_hdr *hdr;
2251	u16 sc;
2252	__le16 fc;
2253	unsigned int frag, seq;
2254	struct ieee80211_fragment_entry *entry;
2255	struct sk_buff *skb;
2256	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2257
2258	hdr = (struct ieee80211_hdr *)rx->skb->data;
2259	fc = hdr->frame_control;
2260
2261	if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
2262		return RX_CONTINUE;
2263
2264	sc = le16_to_cpu(hdr->seq_ctrl);
2265	frag = sc & IEEE80211_SCTL_FRAG;
2266
2267	if (rx->sta)
2268		cache = &rx->sta->frags;
2269
2270	if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2271		goto out;
2272
2273	if (is_multicast_ether_addr(hdr->addr1))
2274		return RX_DROP_MONITOR;
2275
2276	I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2277
2278	if (skb_linearize(rx->skb))
2279		return RX_DROP_U_OOM;
2280
2281	/*
2282	 *  skb_linearize() might change the skb->data and
2283	 *  previously cached variables (in this case, hdr) need to
2284	 *  be refreshed with the new data.
2285	 */
2286	hdr = (struct ieee80211_hdr *)rx->skb->data;
2287	seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2288
2289	if (frag == 0) {
2290		/* This is the first fragment of a new frame. */
2291		entry = ieee80211_reassemble_add(cache, frag, seq,
2292						 rx->seqno_idx, &(rx->skb));
2293		if (requires_sequential_pn(rx, fc)) {
2294			int queue = rx->security_idx;
2295
2296			/* Store CCMP/GCMP PN so that we can verify that the
2297			 * next fragment has a sequential PN value.
2298			 */
2299			entry->check_sequential_pn = true;
2300			entry->is_protected = true;
2301			entry->key_color = rx->key->color;
2302			memcpy(entry->last_pn,
2303			       rx->key->u.ccmp.rx_pn[queue],
2304			       IEEE80211_CCMP_PN_LEN);
2305			BUILD_BUG_ON(offsetof(struct ieee80211_key,
2306					      u.ccmp.rx_pn) !=
2307				     offsetof(struct ieee80211_key,
2308					      u.gcmp.rx_pn));
2309			BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2310				     sizeof(rx->key->u.gcmp.rx_pn[queue]));
2311			BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2312				     IEEE80211_GCMP_PN_LEN);
2313		} else if (rx->key &&
2314			   (ieee80211_has_protected(fc) ||
2315			    (status->flag & RX_FLAG_DECRYPTED))) {
2316			entry->is_protected = true;
2317			entry->key_color = rx->key->color;
2318		}
2319		return RX_QUEUED;
2320	}
2321
2322	/* This is a fragment for a frame that should already be pending in
2323	 * fragment cache. Add this fragment to the end of the pending entry.
2324	 */
2325	entry = ieee80211_reassemble_find(cache, frag, seq,
2326					  rx->seqno_idx, hdr);
2327	if (!entry) {
2328		I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2329		return RX_DROP_MONITOR;
2330	}
2331
2332	/* "The receiver shall discard MSDUs and MMPDUs whose constituent
2333	 *  MPDU PN values are not incrementing in steps of 1."
2334	 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2335	 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2336	 */
2337	if (entry->check_sequential_pn) {
2338		int i;
2339		u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2340
2341		if (!requires_sequential_pn(rx, fc))
2342			return RX_DROP_U_NONSEQ_PN;
2343
2344		/* Prevent mixed key and fragment cache attacks */
2345		if (entry->key_color != rx->key->color)
2346			return RX_DROP_U_BAD_KEY_COLOR;
2347
2348		memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2349		for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2350			pn[i]++;
2351			if (pn[i])
2352				break;
2353		}
2354
2355		rpn = rx->ccm_gcm.pn;
2356		if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2357			return RX_DROP_U_REPLAY;
2358		memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2359	} else if (entry->is_protected &&
2360		   (!rx->key ||
2361		    (!ieee80211_has_protected(fc) &&
2362		     !(status->flag & RX_FLAG_DECRYPTED)) ||
2363		    rx->key->color != entry->key_color)) {
2364		/* Drop this as a mixed key or fragment cache attack, even
2365		 * if for TKIP Michael MIC should protect us, and WEP is a
2366		 * lost cause anyway.
2367		 */
2368		return RX_DROP_U_EXPECT_DEFRAG_PROT;
2369	} else if (entry->is_protected && rx->key &&
2370		   entry->key_color != rx->key->color &&
2371		   (status->flag & RX_FLAG_DECRYPTED)) {
2372		return RX_DROP_U_BAD_KEY_COLOR;
2373	}
2374
2375	skb_pull(rx->skb, ieee80211_hdrlen(fc));
2376	__skb_queue_tail(&entry->skb_list, rx->skb);
2377	entry->last_frag = frag;
2378	entry->extra_len += rx->skb->len;
2379	if (ieee80211_has_morefrags(fc)) {
2380		rx->skb = NULL;
2381		return RX_QUEUED;
2382	}
2383
2384	rx->skb = __skb_dequeue(&entry->skb_list);
2385	if (skb_tailroom(rx->skb) < entry->extra_len) {
2386		I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2387		if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2388					      GFP_ATOMIC))) {
2389			I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2390			__skb_queue_purge(&entry->skb_list);
2391			return RX_DROP_U_OOM;
2392		}
2393	}
2394	while ((skb = __skb_dequeue(&entry->skb_list))) {
2395		skb_put_data(rx->skb, skb->data, skb->len);
2396		dev_kfree_skb(skb);
2397	}
2398
2399 out:
2400	ieee80211_led_rx(rx->local);
2401	if (rx->sta)
2402		rx->link_sta->rx_stats.packets++;
2403	return RX_CONTINUE;
2404}
2405
2406static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2407{
2408	if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2409		return -EACCES;
2410
2411	return 0;
2412}
2413
2414static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2415{
2416	struct sk_buff *skb = rx->skb;
2417	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2418
2419	/*
2420	 * Pass through unencrypted frames if the hardware has
2421	 * decrypted them already.
2422	 */
2423	if (status->flag & RX_FLAG_DECRYPTED)
2424		return 0;
2425
2426	/* Drop unencrypted frames if key is set. */
2427	if (unlikely(!ieee80211_has_protected(fc) &&
2428		     !ieee80211_is_any_nullfunc(fc) &&
2429		     ieee80211_is_data(fc) && rx->key))
2430		return -EACCES;
2431
2432	return 0;
2433}
2434
2435VISIBLE_IF_MAC80211_KUNIT ieee80211_rx_result
2436ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2437{
2438	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2439	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2440	__le16 fc = mgmt->frame_control;
2441
2442	/*
2443	 * Pass through unencrypted frames if the hardware has
2444	 * decrypted them already.
2445	 */
2446	if (status->flag & RX_FLAG_DECRYPTED)
2447		return RX_CONTINUE;
2448
2449	/* drop unicast protected dual (that wasn't protected) */
2450	if (ieee80211_is_action(fc) &&
2451	    mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
2452		return RX_DROP_U_UNPROT_DUAL;
2453
2454	if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2455		if (unlikely(!ieee80211_has_protected(fc) &&
2456			     ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) {
2457			if (ieee80211_is_deauth(fc) ||
2458			    ieee80211_is_disassoc(fc)) {
2459				/*
2460				 * Permit unprotected deauth/disassoc frames
2461				 * during 4-way-HS (key is installed after HS).
2462				 */
2463				if (!rx->key)
2464					return RX_CONTINUE;
2465
2466				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2467							     rx->skb->data,
2468							     rx->skb->len);
2469			}
2470			return RX_DROP_U_UNPROT_UCAST_MGMT;
2471		}
2472		/* BIP does not use Protected field, so need to check MMIE */
2473		if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2474			     ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2475			if (ieee80211_is_deauth(fc) ||
2476			    ieee80211_is_disassoc(fc))
2477				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2478							     rx->skb->data,
2479							     rx->skb->len);
2480			return RX_DROP_U_UNPROT_MCAST_MGMT;
2481		}
2482		if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
2483			     ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2484			cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2485						     rx->skb->data,
2486						     rx->skb->len);
2487			return RX_DROP_U_UNPROT_BEACON;
2488		}
2489		/*
2490		 * When using MFP, Action frames are not allowed prior to
2491		 * having configured keys.
2492		 */
2493		if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2494			     ieee80211_is_robust_mgmt_frame(rx->skb)))
2495			return RX_DROP_U_UNPROT_ACTION;
2496
2497		/* drop unicast public action frames when using MPF */
2498		if (is_unicast_ether_addr(mgmt->da) &&
2499		    ieee80211_is_protected_dual_of_public_action(rx->skb))
2500			return RX_DROP_U_UNPROT_UNICAST_PUB_ACTION;
2501	}
2502
2503	/*
2504	 * Drop robust action frames before assoc regardless of MFP state,
2505	 * after assoc we also have decided on MFP or not.
2506	 */
2507	if (ieee80211_is_action(fc) &&
2508	    ieee80211_is_robust_mgmt_frame(rx->skb) &&
2509	    (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))
2510		return RX_DROP_U_UNPROT_ROBUST_ACTION;
2511
2512	return RX_CONTINUE;
2513}
2514EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_drop_unencrypted_mgmt);
2515
2516static ieee80211_rx_result
2517__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2518{
2519	struct ieee80211_sub_if_data *sdata = rx->sdata;
2520	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2521	bool check_port_control = false;
2522	struct ethhdr *ehdr;
2523	int ret;
2524
2525	*port_control = false;
2526	if (ieee80211_has_a4(hdr->frame_control) &&
2527	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2528		return RX_DROP_U_UNEXPECTED_VLAN_4ADDR;
2529
2530	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2531	    !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2532		if (!sdata->u.mgd.use_4addr)
2533			return RX_DROP_U_UNEXPECTED_STA_4ADDR;
2534		else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
2535			check_port_control = true;
2536	}
2537
2538	if (is_multicast_ether_addr(hdr->addr1) &&
2539	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2540		return RX_DROP_U_UNEXPECTED_VLAN_MCAST;
2541
2542	ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2543	if (ret < 0)
2544		return RX_DROP_U_INVALID_8023;
2545
2546	ehdr = (struct ethhdr *) rx->skb->data;
2547	if (ehdr->h_proto == rx->sdata->control_port_protocol)
2548		*port_control = true;
2549	else if (check_port_control)
2550		return RX_DROP_U_NOT_PORT_CONTROL;
2551
2552	return RX_CONTINUE;
2553}
2554
2555bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata,
2556			   const u8 *addr, int *out_link_id)
2557{
2558	unsigned int link_id;
2559
2560	/* non-MLO, or MLD address replaced by hardware */
2561	if (ether_addr_equal(sdata->vif.addr, addr))
2562		return true;
2563
2564	if (!ieee80211_vif_is_mld(&sdata->vif))
2565		return false;
2566
2567	for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) {
2568		struct ieee80211_bss_conf *conf;
2569
2570		conf = rcu_dereference(sdata->vif.link_conf[link_id]);
2571
2572		if (!conf)
2573			continue;
2574		if (ether_addr_equal(conf->addr, addr)) {
2575			if (out_link_id)
2576				*out_link_id = link_id;
2577			return true;
2578		}
2579	}
2580
2581	return false;
2582}
2583
2584/*
2585 * requires that rx->skb is a frame with ethernet header
2586 */
2587static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2588{
2589	static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2590		= { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2591	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2592
2593	/*
2594	 * Allow EAPOL frames to us/the PAE group address regardless of
2595	 * whether the frame was encrypted or not, and always disallow
2596	 * all other destination addresses for them.
2597	 */
2598	if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
2599		return ieee80211_is_our_addr(rx->sdata, ehdr->h_dest, NULL) ||
2600		       ether_addr_equal(ehdr->h_dest, pae_group_addr);
2601
2602	if (ieee80211_802_1x_port_control(rx) ||
2603	    ieee80211_drop_unencrypted(rx, fc))
2604		return false;
2605
2606	return true;
2607}
2608
2609static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2610						 struct ieee80211_rx_data *rx)
2611{
2612	struct ieee80211_sub_if_data *sdata = rx->sdata;
2613	struct net_device *dev = sdata->dev;
2614
2615	if (unlikely((skb->protocol == sdata->control_port_protocol ||
2616		     (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
2617		      !sdata->control_port_no_preauth)) &&
2618		     sdata->control_port_over_nl80211)) {
2619		struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2620		bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2621
2622		cfg80211_rx_control_port(dev, skb, noencrypt, rx->link_id);
2623		dev_kfree_skb(skb);
2624	} else {
2625		struct ethhdr *ehdr = (void *)skb_mac_header(skb);
2626
2627		memset(skb->cb, 0, sizeof(skb->cb));
2628
2629		/*
2630		 * 802.1X over 802.11 requires that the authenticator address
2631		 * be used for EAPOL frames. However, 802.1X allows the use of
2632		 * the PAE group address instead. If the interface is part of
2633		 * a bridge and we pass the frame with the PAE group address,
2634		 * then the bridge will forward it to the network (even if the
2635		 * client was not associated yet), which isn't supposed to
2636		 * happen.
2637		 * To avoid that, rewrite the destination address to our own
2638		 * address, so that the authenticator (e.g. hostapd) will see
2639		 * the frame, but bridge won't forward it anywhere else. Note
2640		 * that due to earlier filtering, the only other address can
2641		 * be the PAE group address, unless the hardware allowed them
2642		 * through in 802.3 offloaded mode.
2643		 */
2644		if (unlikely(skb->protocol == sdata->control_port_protocol &&
2645			     !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
2646			ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
2647
2648		/* deliver to local stack */
2649		if (rx->list)
2650			list_add_tail(&skb->list, rx->list);
2651		else
2652			netif_receive_skb(skb);
2653	}
2654}
2655
2656/*
2657 * requires that rx->skb is a frame with ethernet header
2658 */
2659static void
2660ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2661{
2662	struct ieee80211_sub_if_data *sdata = rx->sdata;
2663	struct net_device *dev = sdata->dev;
2664	struct sk_buff *skb, *xmit_skb;
2665	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2666	struct sta_info *dsta;
2667
2668	skb = rx->skb;
2669	xmit_skb = NULL;
2670
2671	dev_sw_netstats_rx_add(dev, skb->len);
2672
2673	if (rx->sta) {
2674		/* The seqno index has the same property as needed
2675		 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2676		 * for non-QoS-data frames. Here we know it's a data
2677		 * frame, so count MSDUs.
2678		 */
2679		u64_stats_update_begin(&rx->link_sta->rx_stats.syncp);
2680		rx->link_sta->rx_stats.msdu[rx->seqno_idx]++;
2681		u64_stats_update_end(&rx->link_sta->rx_stats.syncp);
2682	}
2683
2684	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2685	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2686	    !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2687	    ehdr->h_proto != rx->sdata->control_port_protocol &&
2688	    (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2689		if (is_multicast_ether_addr(ehdr->h_dest) &&
2690		    ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2691			/*
2692			 * send multicast frames both to higher layers in
2693			 * local net stack and back to the wireless medium
2694			 */
2695			xmit_skb = skb_copy(skb, GFP_ATOMIC);
2696			if (!xmit_skb)
2697				net_info_ratelimited("%s: failed to clone multicast frame\n",
2698						    dev->name);
2699		} else if (!is_multicast_ether_addr(ehdr->h_dest) &&
2700			   !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
2701			dsta = sta_info_get(sdata, ehdr->h_dest);
2702			if (dsta) {
2703				/*
2704				 * The destination station is associated to
2705				 * this AP (in this VLAN), so send the frame
2706				 * directly to it and do not pass it to local
2707				 * net stack.
2708				 */
2709				xmit_skb = skb;
2710				skb = NULL;
2711			}
2712		}
2713	}
2714
2715#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2716	if (skb) {
2717		/* 'align' will only take the values 0 or 2 here since all
2718		 * frames are required to be aligned to 2-byte boundaries
2719		 * when being passed to mac80211; the code here works just
2720		 * as well if that isn't true, but mac80211 assumes it can
2721		 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2722		 */
2723		int align;
2724
2725		align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2726		if (align) {
2727			if (WARN_ON(skb_headroom(skb) < 3)) {
2728				dev_kfree_skb(skb);
2729				skb = NULL;
2730			} else {
2731				u8 *data = skb->data;
2732				size_t len = skb_headlen(skb);
2733				skb->data -= align;
2734				memmove(skb->data, data, len);
2735				skb_set_tail_pointer(skb, len);
2736			}
2737		}
2738	}
2739#endif
2740
2741	if (skb) {
2742		skb->protocol = eth_type_trans(skb, dev);
2743		ieee80211_deliver_skb_to_local_stack(skb, rx);
2744	}
2745
2746	if (xmit_skb) {
2747		/*
2748		 * Send to wireless media and increase priority by 256 to
2749		 * keep the received priority instead of reclassifying
2750		 * the frame (see cfg80211_classify8021d).
2751		 */
2752		xmit_skb->priority += 256;
2753		xmit_skb->protocol = htons(ETH_P_802_3);
2754		skb_reset_network_header(xmit_skb);
2755		skb_reset_mac_header(xmit_skb);
2756		dev_queue_xmit(xmit_skb);
2757	}
2758}
2759
2760#ifdef CONFIG_MAC80211_MESH
2761static bool
2762ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
2763			       struct sk_buff *skb, int hdrlen)
2764{
2765	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2766	struct ieee80211_mesh_fast_tx_key key = {
2767		.type = MESH_FAST_TX_TYPE_FORWARDED
2768	};
2769	struct ieee80211_mesh_fast_tx *entry;
2770	struct ieee80211s_hdr *mesh_hdr;
2771	struct tid_ampdu_tx *tid_tx;
2772	struct sta_info *sta;
2773	struct ethhdr eth;
2774	u8 tid;
2775
2776	mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth));
2777	if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
2778		ether_addr_copy(key.addr, mesh_hdr->eaddr1);
2779	else if (!(mesh_hdr->flags & MESH_FLAGS_AE))
2780		ether_addr_copy(key.addr, skb->data);
2781	else
2782		return false;
2783
2784	entry = mesh_fast_tx_get(sdata, &key);
2785	if (!entry)
2786		return false;
2787
2788	sta = rcu_dereference(entry->mpath->next_hop);
2789	if (!sta)
2790		return false;
2791
2792	if (skb_linearize(skb))
2793		return false;
2794
2795	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
2796	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
2797	if (tid_tx) {
2798		if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
2799			return false;
2800
2801		if (tid_tx->timeout)
2802			tid_tx->last_tx = jiffies;
2803	}
2804
2805	ieee80211_aggr_check(sdata, sta, skb);
2806
2807	if (ieee80211_get_8023_tunnel_proto(skb->data + hdrlen,
2808					    &skb->protocol))
2809		hdrlen += ETH_ALEN;
2810	else
2811		skb->protocol = htons(skb->len - hdrlen);
2812	skb_set_network_header(skb, hdrlen + 2);
2813
2814	skb->dev = sdata->dev;
2815	memcpy(&eth, skb->data, ETH_HLEN - 2);
2816	skb_pull(skb, 2);
2817	__ieee80211_xmit_fast(sdata, sta, &entry->fast_tx, skb, tid_tx,
2818			      eth.h_dest, eth.h_source);
2819	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2820	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2821
2822	return true;
2823}
2824#endif
2825
2826static ieee80211_rx_result
2827ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta,
2828		       struct sk_buff *skb)
2829{
2830#ifdef CONFIG_MAC80211_MESH
2831	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2832	struct ieee80211_local *local = sdata->local;
2833	uint16_t fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
2834	struct ieee80211_hdr hdr = {
2835		.frame_control = cpu_to_le16(fc)
2836	};
2837	struct ieee80211_hdr *fwd_hdr;
2838	struct ieee80211s_hdr *mesh_hdr;
2839	struct ieee80211_tx_info *info;
2840	struct sk_buff *fwd_skb;
2841	struct ethhdr *eth;
2842	bool multicast;
2843	int tailroom = 0;
2844	int hdrlen, mesh_hdrlen;
2845	u8 *qos;
2846
2847	if (!ieee80211_vif_is_mesh(&sdata->vif))
2848		return RX_CONTINUE;
2849
2850	if (!pskb_may_pull(skb, sizeof(*eth) + 6))
2851		return RX_DROP_MONITOR;
2852
2853	mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth));
2854	mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr);
2855
2856	if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen))
2857		return RX_DROP_MONITOR;
2858
2859	eth = (struct ethhdr *)skb->data;
2860	multicast = is_multicast_ether_addr(eth->h_dest);
2861
2862	mesh_hdr = (struct ieee80211s_hdr *)(eth + 1);
2863	if (!mesh_hdr->ttl)
2864		return RX_DROP_MONITOR;
2865
2866	/* frame is in RMC, don't forward */
2867	if (is_multicast_ether_addr(eth->h_dest) &&
2868	    mesh_rmc_check(sdata, eth->h_source, mesh_hdr))
2869		return RX_DROP_MONITOR;
2870
2871	/* forward packet */
2872	if (sdata->crypto_tx_tailroom_needed_cnt)
2873		tailroom = IEEE80211_ENCRYPT_TAILROOM;
2874
2875	if (mesh_hdr->flags & MESH_FLAGS_AE) {
2876		struct mesh_path *mppath;
2877		char *proxied_addr;
2878		bool update = false;
2879
2880		if (multicast)
2881			proxied_addr = mesh_hdr->eaddr1;
2882		else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
2883			/* has_a4 already checked in ieee80211_rx_mesh_check */
2884			proxied_addr = mesh_hdr->eaddr2;
2885		else
2886			return RX_DROP_MONITOR;
2887
2888		rcu_read_lock();
2889		mppath = mpp_path_lookup(sdata, proxied_addr);
2890		if (!mppath) {
2891			mpp_path_add(sdata, proxied_addr, eth->h_source);
2892		} else {
2893			spin_lock_bh(&mppath->state_lock);
2894			if (!ether_addr_equal(mppath->mpp, eth->h_source)) {
2895				memcpy(mppath->mpp, eth->h_source, ETH_ALEN);
2896				update = true;
2897			}
2898			mppath->exp_time = jiffies;
2899			spin_unlock_bh(&mppath->state_lock);
2900		}
2901
2902		/* flush fast xmit cache if the address path changed */
2903		if (update)
2904			mesh_fast_tx_flush_addr(sdata, proxied_addr);
2905
2906		rcu_read_unlock();
2907	}
2908
2909	/* Frame has reached destination.  Don't forward */
2910	if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
2911		goto rx_accept;
2912
2913	if (!--mesh_hdr->ttl) {
2914		if (multicast)
2915			goto rx_accept;
2916
2917		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
2918		return RX_DROP_MONITOR;
2919	}
2920
2921	if (!ifmsh->mshcfg.dot11MeshForwarding) {
2922		if (is_multicast_ether_addr(eth->h_dest))
2923			goto rx_accept;
2924
2925		return RX_DROP_MONITOR;
2926	}
2927
2928	skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]);
2929
2930	if (!multicast &&
2931	    ieee80211_rx_mesh_fast_forward(sdata, skb, mesh_hdrlen))
2932		return RX_QUEUED;
2933
2934	ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control,
2935				      eth->h_dest, eth->h_source);
2936	hdrlen = ieee80211_hdrlen(hdr.frame_control);
2937	if (multicast) {
2938		int extra_head = sizeof(struct ieee80211_hdr) - sizeof(*eth);
2939
2940		fwd_skb = skb_copy_expand(skb, local->tx_headroom + extra_head +
2941					       IEEE80211_ENCRYPT_HEADROOM,
2942					  tailroom, GFP_ATOMIC);
2943		if (!fwd_skb)
2944			goto rx_accept;
2945	} else {
2946		fwd_skb = skb;
2947		skb = NULL;
2948
2949		if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr)))
2950			return RX_DROP_U_OOM;
2951
2952		if (skb_linearize(fwd_skb))
2953			return RX_DROP_U_OOM;
2954	}
2955
2956	fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr));
2957	memcpy(fwd_hdr, &hdr, hdrlen - 2);
2958	qos = ieee80211_get_qos_ctl(fwd_hdr);
2959	qos[0] = qos[1] = 0;
2960
2961	skb_reset_mac_header(fwd_skb);
2962	hdrlen += mesh_hdrlen;
2963	if (ieee80211_get_8023_tunnel_proto(fwd_skb->data + hdrlen,
2964					    &fwd_skb->protocol))
2965		hdrlen += ETH_ALEN;
2966	else
2967		fwd_skb->protocol = htons(fwd_skb->len - hdrlen);
2968	skb_set_network_header(fwd_skb, hdrlen + 2);
2969
2970	info = IEEE80211_SKB_CB(fwd_skb);
2971	memset(info, 0, sizeof(*info));
2972	info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
2973	info->control.vif = &sdata->vif;
2974	info->control.jiffies = jiffies;
2975	fwd_skb->dev = sdata->dev;
2976	if (multicast) {
2977		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2978		memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2979		/* update power mode indication when forwarding */
2980		ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2981	} else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2982		/* mesh power mode flags updated in mesh_nexthop_lookup */
2983		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2984	} else {
2985		/* unable to resolve next hop */
2986		if (sta)
2987			mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2988					   hdr.addr3, 0,
2989					   WLAN_REASON_MESH_PATH_NOFORWARD,
2990					   sta->sta.addr);
2991		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2992		kfree_skb(fwd_skb);
2993		goto rx_accept;
2994	}
2995
2996	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2997	ieee80211_add_pending_skb(local, fwd_skb);
2998
2999rx_accept:
3000	if (!skb)
3001		return RX_QUEUED;
3002
3003	ieee80211_strip_8023_mesh_hdr(skb);
3004#endif
3005
3006	return RX_CONTINUE;
3007}
3008
3009static ieee80211_rx_result debug_noinline
3010__ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
3011{
3012	struct net_device *dev = rx->sdata->dev;
3013	struct sk_buff *skb = rx->skb;
3014	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3015	__le16 fc = hdr->frame_control;
3016	struct sk_buff_head frame_list;
3017	ieee80211_rx_result res;
3018	struct ethhdr ethhdr;
3019	const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
3020
3021	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
3022		check_da = NULL;
3023		check_sa = NULL;
3024	} else switch (rx->sdata->vif.type) {
3025		case NL80211_IFTYPE_AP:
3026		case NL80211_IFTYPE_AP_VLAN:
3027			check_da = NULL;
3028			break;
3029		case NL80211_IFTYPE_STATION:
3030			if (!rx->sta ||
3031			    !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
3032				check_sa = NULL;
3033			break;
3034		case NL80211_IFTYPE_MESH_POINT:
3035			check_sa = NULL;
3036			check_da = NULL;
3037			break;
3038		default:
3039			break;
3040	}
3041
3042	skb->dev = dev;
3043	__skb_queue_head_init(&frame_list);
3044
3045	if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
3046					  rx->sdata->vif.addr,
3047					  rx->sdata->vif.type,
3048					  data_offset, true))
3049		return RX_DROP_U_BAD_AMSDU;
3050
3051	if (rx->sta->amsdu_mesh_control < 0) {
3052		s8 valid = -1;
3053		int i;
3054
3055		for (i = 0; i <= 2; i++) {
3056			if (!ieee80211_is_valid_amsdu(skb, i))
3057				continue;
3058
3059			if (valid >= 0) {
3060				/* ambiguous */
3061				valid = -1;
3062				break;
3063			}
3064
3065			valid = i;
3066		}
3067
3068		rx->sta->amsdu_mesh_control = valid;
3069	}
3070
3071	ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
3072				 rx->sdata->vif.type,
3073				 rx->local->hw.extra_tx_headroom,
3074				 check_da, check_sa,
3075				 rx->sta->amsdu_mesh_control);
3076
3077	while (!skb_queue_empty(&frame_list)) {
3078		rx->skb = __skb_dequeue(&frame_list);
3079
3080		res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
3081		switch (res) {
3082		case RX_QUEUED:
3083			continue;
3084		case RX_CONTINUE:
3085			break;
3086		default:
3087			goto free;
3088		}
3089
3090		if (!ieee80211_frame_allowed(rx, fc))
3091			goto free;
3092
3093		ieee80211_deliver_skb(rx);
3094		continue;
3095
3096free:
3097		dev_kfree_skb(rx->skb);
3098	}
3099
3100	return RX_QUEUED;
3101}
3102
3103static ieee80211_rx_result debug_noinline
3104ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
3105{
3106	struct sk_buff *skb = rx->skb;
3107	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3108	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3109	__le16 fc = hdr->frame_control;
3110
3111	if (!(status->rx_flags & IEEE80211_RX_AMSDU))
3112		return RX_CONTINUE;
3113
3114	if (unlikely(!ieee80211_is_data(fc)))
3115		return RX_CONTINUE;
3116
3117	if (unlikely(!ieee80211_is_data_present(fc)))
3118		return RX_DROP_MONITOR;
3119
3120	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
3121		switch (rx->sdata->vif.type) {
3122		case NL80211_IFTYPE_AP_VLAN:
3123			if (!rx->sdata->u.vlan.sta)
3124				return RX_DROP_U_BAD_4ADDR;
3125			break;
3126		case NL80211_IFTYPE_STATION:
3127			if (!rx->sdata->u.mgd.use_4addr)
3128				return RX_DROP_U_BAD_4ADDR;
3129			break;
3130		case NL80211_IFTYPE_MESH_POINT:
3131			break;
3132		default:
3133			return RX_DROP_U_BAD_4ADDR;
3134		}
3135	}
3136
3137	if (is_multicast_ether_addr(hdr->addr1) || !rx->sta)
3138		return RX_DROP_U_BAD_AMSDU;
3139
3140	if (rx->key) {
3141		/*
3142		 * We should not receive A-MSDUs on pre-HT connections,
3143		 * and HT connections cannot use old ciphers. Thus drop
3144		 * them, as in those cases we couldn't even have SPP
3145		 * A-MSDUs or such.
3146		 */
3147		switch (rx->key->conf.cipher) {
3148		case WLAN_CIPHER_SUITE_WEP40:
3149		case WLAN_CIPHER_SUITE_WEP104:
3150		case WLAN_CIPHER_SUITE_TKIP:
3151			return RX_DROP_U_BAD_AMSDU_CIPHER;
3152		default:
3153			break;
3154		}
3155	}
3156
3157	return __ieee80211_rx_h_amsdu(rx, 0);
3158}
3159
3160static ieee80211_rx_result debug_noinline
3161ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
3162{
3163	struct ieee80211_sub_if_data *sdata = rx->sdata;
3164	struct ieee80211_local *local = rx->local;
3165	struct net_device *dev = sdata->dev;
3166	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
3167	__le16 fc = hdr->frame_control;
3168	ieee80211_rx_result res;
3169	bool port_control;
3170
3171	if (unlikely(!ieee80211_is_data(hdr->frame_control)))
3172		return RX_CONTINUE;
3173
3174	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
3175		return RX_DROP_MONITOR;
3176
3177	/*
3178	 * Send unexpected-4addr-frame event to hostapd. For older versions,
3179	 * also drop the frame to cooked monitor interfaces.
3180	 */
3181	if (ieee80211_has_a4(hdr->frame_control) &&
3182	    sdata->vif.type == NL80211_IFTYPE_AP) {
3183		if (rx->sta &&
3184		    !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
3185			cfg80211_rx_unexpected_4addr_frame(
3186				rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
3187		return RX_DROP_MONITOR;
3188	}
3189
3190	res = __ieee80211_data_to_8023(rx, &port_control);
3191	if (unlikely(res != RX_CONTINUE))
3192		return res;
3193
3194	res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
3195	if (res != RX_CONTINUE)
3196		return res;
3197
3198	if (!ieee80211_frame_allowed(rx, fc))
3199		return RX_DROP_MONITOR;
3200
3201	/* directly handle TDLS channel switch requests/responses */
3202	if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
3203						cpu_to_be16(ETH_P_TDLS))) {
3204		struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
3205
3206		if (pskb_may_pull(rx->skb,
3207				  offsetof(struct ieee80211_tdls_data, u)) &&
3208		    tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
3209		    tf->category == WLAN_CATEGORY_TDLS &&
3210		    (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
3211		     tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
3212			rx->skb->protocol = cpu_to_be16(ETH_P_TDLS);
3213			__ieee80211_queue_skb_to_iface(sdata, rx->link_id,
3214						       rx->sta, rx->skb);
3215			return RX_QUEUED;
3216		}
3217	}
3218
3219	if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
3220	    unlikely(port_control) && sdata->bss) {
3221		sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
3222				     u.ap);
3223		dev = sdata->dev;
3224		rx->sdata = sdata;
3225	}
3226
3227	rx->skb->dev = dev;
3228
3229	if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
3230	    local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
3231	    !is_multicast_ether_addr(
3232		    ((struct ethhdr *)rx->skb->data)->h_dest) &&
3233	    (!local->scanning &&
3234	     !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
3235		mod_timer(&local->dynamic_ps_timer, jiffies +
3236			  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
3237
3238	ieee80211_deliver_skb(rx);
3239
3240	return RX_QUEUED;
3241}
3242
3243static ieee80211_rx_result debug_noinline
3244ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
3245{
3246	struct sk_buff *skb = rx->skb;
3247	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
3248	struct tid_ampdu_rx *tid_agg_rx;
3249	u16 start_seq_num;
3250	u16 tid;
3251
3252	if (likely(!ieee80211_is_ctl(bar->frame_control)))
3253		return RX_CONTINUE;
3254
3255	if (ieee80211_is_back_req(bar->frame_control)) {
3256		struct {
3257			__le16 control, start_seq_num;
3258		} __packed bar_data;
3259		struct ieee80211_event event = {
3260			.type = BAR_RX_EVENT,
3261		};
3262
3263		if (!rx->sta)
3264			return RX_DROP_MONITOR;
3265
3266		if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
3267				  &bar_data, sizeof(bar_data)))
3268			return RX_DROP_MONITOR;
3269
3270		tid = le16_to_cpu(bar_data.control) >> 12;
3271
3272		if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
3273		    !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
3274			ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
3275					     WLAN_BACK_RECIPIENT,
3276					     WLAN_REASON_QSTA_REQUIRE_SETUP);
3277
3278		tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
3279		if (!tid_agg_rx)
3280			return RX_DROP_MONITOR;
3281
3282		start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
3283		event.u.ba.tid = tid;
3284		event.u.ba.ssn = start_seq_num;
3285		event.u.ba.sta = &rx->sta->sta;
3286
3287		/* reset session timer */
3288		if (tid_agg_rx->timeout)
3289			mod_timer(&tid_agg_rx->session_timer,
3290				  TU_TO_EXP_TIME(tid_agg_rx->timeout));
3291
3292		spin_lock(&tid_agg_rx->reorder_lock);
3293		/* release stored frames up to start of BAR */
3294		ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
3295						 start_seq_num, frames);
3296		spin_unlock(&tid_agg_rx->reorder_lock);
3297
3298		drv_event_callback(rx->local, rx->sdata, &event);
3299
3300		kfree_skb(skb);
3301		return RX_QUEUED;
3302	}
3303
3304	/*
3305	 * After this point, we only want management frames,
3306	 * so we can drop all remaining control frames to
3307	 * cooked monitor interfaces.
3308	 */
3309	return RX_DROP_MONITOR;
3310}
3311
3312static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
3313					   struct ieee80211_mgmt *mgmt,
3314					   size_t len)
3315{
3316	struct ieee80211_local *local = sdata->local;
3317	struct sk_buff *skb;
3318	struct ieee80211_mgmt *resp;
3319
3320	if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
3321		/* Not to own unicast address */
3322		return;
3323	}
3324
3325	if (!ether_addr_equal(mgmt->sa, sdata->deflink.u.mgd.bssid) ||
3326	    !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) {
3327		/* Not from the current AP or not associated yet. */
3328		return;
3329	}
3330
3331	if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
3332		/* Too short SA Query request frame */
3333		return;
3334	}
3335
3336	skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
3337	if (skb == NULL)
3338		return;
3339
3340	skb_reserve(skb, local->hw.extra_tx_headroom);
3341	resp = skb_put_zero(skb, 24);
3342	memcpy(resp->da, mgmt->sa, ETH_ALEN);
3343	memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
3344	memcpy(resp->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
3345	resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3346					  IEEE80211_STYPE_ACTION);
3347	skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
3348	resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
3349	resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
3350	memcpy(resp->u.action.u.sa_query.trans_id,
3351	       mgmt->u.action.u.sa_query.trans_id,
3352	       WLAN_SA_QUERY_TR_ID_LEN);
3353
3354	ieee80211_tx_skb(sdata, skb);
3355}
3356
3357static void
3358ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
3359{
3360	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3361	const struct element *ie;
3362	size_t baselen;
3363
3364	if (!wiphy_ext_feature_isset(rx->local->hw.wiphy,
3365				     NL80211_EXT_FEATURE_BSS_COLOR))
3366		return;
3367
3368	if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION))
3369		return;
3370
3371	if (rx->sdata->vif.bss_conf.csa_active)
3372		return;
3373
3374	baselen = mgmt->u.beacon.variable - rx->skb->data;
3375	if (baselen > rx->skb->len)
3376		return;
3377
3378	ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION,
3379				    mgmt->u.beacon.variable,
3380				    rx->skb->len - baselen);
3381	if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) &&
3382	    ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) {
3383		struct ieee80211_bss_conf *bss_conf = &rx->sdata->vif.bss_conf;
3384		const struct ieee80211_he_operation *he_oper;
3385		u8 color;
3386
3387		he_oper = (void *)(ie->data + 1);
3388		if (le32_get_bits(he_oper->he_oper_params,
3389				  IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED))
3390			return;
3391
3392		color = le32_get_bits(he_oper->he_oper_params,
3393				      IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
3394		if (color == bss_conf->he_bss_color.color)
3395			ieee80211_obss_color_collision_notify(&rx->sdata->vif,
3396							      BIT_ULL(color));
3397	}
3398}
3399
3400static ieee80211_rx_result debug_noinline
3401ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
3402{
3403	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3404	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3405
3406	if (ieee80211_is_s1g_beacon(mgmt->frame_control))
3407		return RX_CONTINUE;
3408
3409	/*
3410	 * From here on, look only at management frames.
3411	 * Data and control frames are already handled,
3412	 * and unknown (reserved) frames are useless.
3413	 */
3414	if (rx->skb->len < 24)
3415		return RX_DROP_MONITOR;
3416
3417	if (!ieee80211_is_mgmt(mgmt->frame_control))
3418		return RX_DROP_MONITOR;
3419
3420	/* drop too small action frames */
3421	if (ieee80211_is_action(mgmt->frame_control) &&
3422	    rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
3423		return RX_DROP_U_RUNT_ACTION;
3424
3425	if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
3426	    ieee80211_is_beacon(mgmt->frame_control) &&
3427	    !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
3428		int sig = 0;
3429
3430		/* sw bss color collision detection */
3431		ieee80211_rx_check_bss_color_collision(rx);
3432
3433		if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3434		    !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3435			sig = status->signal;
3436
3437		cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy,
3438						rx->skb->data, rx->skb->len,
3439						ieee80211_rx_status_to_khz(status),
3440						sig);
3441		rx->flags |= IEEE80211_RX_BEACON_REPORTED;
3442	}
3443
3444	return ieee80211_drop_unencrypted_mgmt(rx);
3445}
3446
3447static bool
3448ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx)
3449{
3450	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data;
3451	struct ieee80211_sub_if_data *sdata = rx->sdata;
3452
3453	/* TWT actions are only supported in AP for the moment */
3454	if (sdata->vif.type != NL80211_IFTYPE_AP)
3455		return false;
3456
3457	if (!rx->local->ops->add_twt_setup)
3458		return false;
3459
3460	if (!sdata->vif.bss_conf.twt_responder)
3461		return false;
3462
3463	if (!rx->sta)
3464		return false;
3465
3466	switch (mgmt->u.action.u.s1g.action_code) {
3467	case WLAN_S1G_TWT_SETUP: {
3468		struct ieee80211_twt_setup *twt;
3469
3470		if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE +
3471				   1 + /* action code */
3472				   sizeof(struct ieee80211_twt_setup) +
3473				   2 /* TWT req_type agrt */)
3474			break;
3475
3476		twt = (void *)mgmt->u.action.u.s1g.variable;
3477		if (twt->element_id != WLAN_EID_S1G_TWT)
3478			break;
3479
3480		if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE +
3481				   4 + /* action code + token + tlv */
3482				   twt->length)
3483			break;
3484
3485		return true; /* queue the frame */
3486	}
3487	case WLAN_S1G_TWT_TEARDOWN:
3488		if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 2)
3489			break;
3490
3491		return true; /* queue the frame */
3492	default:
3493		break;
3494	}
3495
3496	return false;
3497}
3498
3499static ieee80211_rx_result debug_noinline
3500ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3501{
3502	struct ieee80211_local *local = rx->local;
3503	struct ieee80211_sub_if_data *sdata = rx->sdata;
3504	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3505	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3506	int len = rx->skb->len;
3507
3508	if (!ieee80211_is_action(mgmt->frame_control))
3509		return RX_CONTINUE;
3510
3511	if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
3512	    mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
3513	    mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
3514		return RX_DROP_U_ACTION_UNKNOWN_SRC;
3515
3516	switch (mgmt->u.action.category) {
3517	case WLAN_CATEGORY_HT:
3518		/* reject HT action frames from stations not supporting HT */
3519		if (!rx->link_sta->pub->ht_cap.ht_supported)
3520			goto invalid;
3521
3522		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3523		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3524		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3525		    sdata->vif.type != NL80211_IFTYPE_AP &&
3526		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
3527			break;
3528
3529		/* verify action & smps_control/chanwidth are present */
3530		if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3531			goto invalid;
3532
3533		switch (mgmt->u.action.u.ht_smps.action) {
3534		case WLAN_HT_ACTION_SMPS: {
3535			struct ieee80211_supported_band *sband;
3536			enum ieee80211_smps_mode smps_mode;
3537			struct sta_opmode_info sta_opmode = {};
3538
3539			if (sdata->vif.type != NL80211_IFTYPE_AP &&
3540			    sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
3541				goto handled;
3542
3543			/* convert to HT capability */
3544			switch (mgmt->u.action.u.ht_smps.smps_control) {
3545			case WLAN_HT_SMPS_CONTROL_DISABLED:
3546				smps_mode = IEEE80211_SMPS_OFF;
3547				break;
3548			case WLAN_HT_SMPS_CONTROL_STATIC:
3549				smps_mode = IEEE80211_SMPS_STATIC;
3550				break;
3551			case WLAN_HT_SMPS_CONTROL_DYNAMIC:
3552				smps_mode = IEEE80211_SMPS_DYNAMIC;
3553				break;
3554			default:
3555				goto invalid;
3556			}
3557
3558			/* if no change do nothing */
3559			if (rx->link_sta->pub->smps_mode == smps_mode)
3560				goto handled;
3561			rx->link_sta->pub->smps_mode = smps_mode;
3562			sta_opmode.smps_mode =
3563				ieee80211_smps_mode_to_smps_mode(smps_mode);
3564			sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
3565
3566			sband = rx->local->hw.wiphy->bands[status->band];
3567
3568			rate_control_rate_update(local, sband, rx->sta, 0,
3569						 IEEE80211_RC_SMPS_CHANGED);
3570			cfg80211_sta_opmode_change_notify(sdata->dev,
3571							  rx->sta->addr,
3572							  &sta_opmode,
3573							  GFP_ATOMIC);
3574			goto handled;
3575		}
3576		case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
3577			struct ieee80211_supported_band *sband;
3578			u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
3579			enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
3580			struct sta_opmode_info sta_opmode = {};
3581
3582			/* If it doesn't support 40 MHz it can't change ... */
3583			if (!(rx->link_sta->pub->ht_cap.cap &
3584					IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3585				goto handled;
3586
3587			if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
3588				max_bw = IEEE80211_STA_RX_BW_20;
3589			else
3590				max_bw = ieee80211_sta_cap_rx_bw(rx->link_sta);
3591
3592			/* set cur_max_bandwidth and recalc sta bw */
3593			rx->link_sta->cur_max_bandwidth = max_bw;
3594			new_bw = ieee80211_sta_cur_vht_bw(rx->link_sta);
3595
3596			if (rx->link_sta->pub->bandwidth == new_bw)
3597				goto handled;
3598
3599			rx->link_sta->pub->bandwidth = new_bw;
3600			sband = rx->local->hw.wiphy->bands[status->band];
3601			sta_opmode.bw =
3602				ieee80211_sta_rx_bw_to_chan_width(rx->link_sta);
3603			sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
3604
3605			rate_control_rate_update(local, sband, rx->sta, 0,
3606						 IEEE80211_RC_BW_CHANGED);
3607			cfg80211_sta_opmode_change_notify(sdata->dev,
3608							  rx->sta->addr,
3609							  &sta_opmode,
3610							  GFP_ATOMIC);
3611			goto handled;
3612		}
3613		default:
3614			goto invalid;
3615		}
3616
3617		break;
3618	case WLAN_CATEGORY_PUBLIC:
3619		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3620			goto invalid;
3621		if (sdata->vif.type != NL80211_IFTYPE_STATION)
3622			break;
3623		if (!rx->sta)
3624			break;
3625		if (!ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid))
3626			break;
3627		if (mgmt->u.action.u.ext_chan_switch.action_code !=
3628				WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3629			break;
3630		if (len < offsetof(struct ieee80211_mgmt,
3631				   u.action.u.ext_chan_switch.variable))
3632			goto invalid;
3633		goto queue;
3634	case WLAN_CATEGORY_VHT:
3635		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3636		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3637		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3638		    sdata->vif.type != NL80211_IFTYPE_AP &&
3639		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
3640			break;
3641
3642		/* verify action code is present */
3643		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3644			goto invalid;
3645
3646		switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3647		case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3648			/* verify opmode is present */
3649			if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3650				goto invalid;
3651			goto queue;
3652		}
3653		case WLAN_VHT_ACTION_GROUPID_MGMT: {
3654			if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3655				goto invalid;
3656			goto queue;
3657		}
3658		default:
3659			break;
3660		}
3661		break;
3662	case WLAN_CATEGORY_BACK:
3663		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3664		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3665		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3666		    sdata->vif.type != NL80211_IFTYPE_AP &&
3667		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
3668			break;
3669
3670		/* verify action_code is present */
3671		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3672			break;
3673
3674		switch (mgmt->u.action.u.addba_req.action_code) {
3675		case WLAN_ACTION_ADDBA_REQ:
3676			if (len < (IEEE80211_MIN_ACTION_SIZE +
3677				   sizeof(mgmt->u.action.u.addba_req)))
3678				goto invalid;
3679			break;
3680		case WLAN_ACTION_ADDBA_RESP:
3681			if (len < (IEEE80211_MIN_ACTION_SIZE +
3682				   sizeof(mgmt->u.action.u.addba_resp)))
3683				goto invalid;
3684			break;
3685		case WLAN_ACTION_DELBA:
3686			if (len < (IEEE80211_MIN_ACTION_SIZE +
3687				   sizeof(mgmt->u.action.u.delba)))
3688				goto invalid;
3689			break;
3690		default:
3691			goto invalid;
3692		}
3693
3694		goto queue;
3695	case WLAN_CATEGORY_SPECTRUM_MGMT:
3696		/* verify action_code is present */
3697		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3698			break;
3699
3700		switch (mgmt->u.action.u.measurement.action_code) {
3701		case WLAN_ACTION_SPCT_MSR_REQ:
3702			if (status->band != NL80211_BAND_5GHZ)
3703				break;
3704
3705			if (len < (IEEE80211_MIN_ACTION_SIZE +
3706				   sizeof(mgmt->u.action.u.measurement)))
3707				break;
3708
3709			if (sdata->vif.type != NL80211_IFTYPE_STATION)
3710				break;
3711
3712			ieee80211_process_measurement_req(sdata, mgmt, len);
3713			goto handled;
3714		case WLAN_ACTION_SPCT_CHL_SWITCH: {
3715			u8 *bssid;
3716			if (len < (IEEE80211_MIN_ACTION_SIZE +
3717				   sizeof(mgmt->u.action.u.chan_switch)))
3718				break;
3719
3720			if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3721			    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3722			    sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3723				break;
3724
3725			if (sdata->vif.type == NL80211_IFTYPE_STATION)
3726				bssid = sdata->deflink.u.mgd.bssid;
3727			else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3728				bssid = sdata->u.ibss.bssid;
3729			else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3730				bssid = mgmt->sa;
3731			else
3732				break;
3733
3734			if (!ether_addr_equal(mgmt->bssid, bssid))
3735				break;
3736
3737			goto queue;
3738			}
3739		}
3740		break;
3741	case WLAN_CATEGORY_SELF_PROTECTED:
3742		if (len < (IEEE80211_MIN_ACTION_SIZE +
3743			   sizeof(mgmt->u.action.u.self_prot.action_code)))
3744			break;
3745
3746		switch (mgmt->u.action.u.self_prot.action_code) {
3747		case WLAN_SP_MESH_PEERING_OPEN:
3748		case WLAN_SP_MESH_PEERING_CLOSE:
3749		case WLAN_SP_MESH_PEERING_CONFIRM:
3750			if (!ieee80211_vif_is_mesh(&sdata->vif))
3751				goto invalid;
3752			if (sdata->u.mesh.user_mpm)
3753				/* userspace handles this frame */
3754				break;
3755			goto queue;
3756		case WLAN_SP_MGK_INFORM:
3757		case WLAN_SP_MGK_ACK:
3758			if (!ieee80211_vif_is_mesh(&sdata->vif))
3759				goto invalid;
3760			break;
3761		}
3762		break;
3763	case WLAN_CATEGORY_MESH_ACTION:
3764		if (len < (IEEE80211_MIN_ACTION_SIZE +
3765			   sizeof(mgmt->u.action.u.mesh_action.action_code)))
3766			break;
3767
3768		if (!ieee80211_vif_is_mesh(&sdata->vif))
3769			break;
3770		if (mesh_action_is_path_sel(mgmt) &&
3771		    !mesh_path_sel_is_hwmp(sdata))
3772			break;
3773		goto queue;
3774	case WLAN_CATEGORY_S1G:
3775		if (len < offsetofend(typeof(*mgmt),
3776				      u.action.u.s1g.action_code))
3777			break;
3778
3779		switch (mgmt->u.action.u.s1g.action_code) {
3780		case WLAN_S1G_TWT_SETUP:
3781		case WLAN_S1G_TWT_TEARDOWN:
3782			if (ieee80211_process_rx_twt_action(rx))
3783				goto queue;
3784			break;
3785		default:
3786			break;
3787		}
3788		break;
3789	case WLAN_CATEGORY_PROTECTED_EHT:
3790		if (len < offsetofend(typeof(*mgmt),
3791				      u.action.u.ttlm_req.action_code))
3792			break;
3793
3794		switch (mgmt->u.action.u.ttlm_req.action_code) {
3795		case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
3796			if (sdata->vif.type != NL80211_IFTYPE_STATION)
3797				break;
3798
3799			if (len < offsetofend(typeof(*mgmt),
3800					      u.action.u.ttlm_req))
3801				goto invalid;
3802			goto queue;
3803		case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
3804			if (sdata->vif.type != NL80211_IFTYPE_STATION)
3805				break;
3806
3807			if (len < offsetofend(typeof(*mgmt),
3808					      u.action.u.ttlm_res))
3809				goto invalid;
3810			goto queue;
3811		default:
3812			break;
3813		}
3814		break;
3815	}
3816
3817	return RX_CONTINUE;
3818
3819 invalid:
3820	status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3821	/* will return in the next handlers */
3822	return RX_CONTINUE;
3823
3824 handled:
3825	if (rx->sta)
3826		rx->link_sta->rx_stats.packets++;
3827	dev_kfree_skb(rx->skb);
3828	return RX_QUEUED;
3829
3830 queue:
3831	ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
3832	return RX_QUEUED;
3833}
3834
3835static ieee80211_rx_result debug_noinline
3836ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3837{
3838	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3839	struct cfg80211_rx_info info = {
3840		.freq = ieee80211_rx_status_to_khz(status),
3841		.buf = rx->skb->data,
3842		.len = rx->skb->len,
3843		.link_id = rx->link_id,
3844		.have_link_id = rx->link_id >= 0,
3845	};
3846
3847	/* skip known-bad action frames and return them in the next handler */
3848	if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3849		return RX_CONTINUE;
3850
3851	/*
3852	 * Getting here means the kernel doesn't know how to handle
3853	 * it, but maybe userspace does ... include returned frames
3854	 * so userspace can register for those to know whether ones
3855	 * it transmitted were processed or returned.
3856	 */
3857
3858	if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3859	    !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3860		info.sig_dbm = status->signal;
3861
3862	if (ieee80211_is_timing_measurement(rx->skb) ||
3863	    ieee80211_is_ftm(rx->skb)) {
3864		info.rx_tstamp = ktime_to_ns(skb_hwtstamps(rx->skb)->hwtstamp);
3865		info.ack_tstamp = ktime_to_ns(status->ack_tx_hwtstamp);
3866	}
3867
3868	if (cfg80211_rx_mgmt_ext(&rx->sdata->wdev, &info)) {
3869		if (rx->sta)
3870			rx->link_sta->rx_stats.packets++;
3871		dev_kfree_skb(rx->skb);
3872		return RX_QUEUED;
3873	}
3874
3875	return RX_CONTINUE;
3876}
3877
3878static ieee80211_rx_result debug_noinline
3879ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
3880{
3881	struct ieee80211_sub_if_data *sdata = rx->sdata;
3882	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3883	int len = rx->skb->len;
3884
3885	if (!ieee80211_is_action(mgmt->frame_control))
3886		return RX_CONTINUE;
3887
3888	switch (mgmt->u.action.category) {
3889	case WLAN_CATEGORY_SA_QUERY:
3890		if (len < (IEEE80211_MIN_ACTION_SIZE +
3891			   sizeof(mgmt->u.action.u.sa_query)))
3892			break;
3893
3894		switch (mgmt->u.action.u.sa_query.action) {
3895		case WLAN_ACTION_SA_QUERY_REQUEST:
3896			if (sdata->vif.type != NL80211_IFTYPE_STATION)
3897				break;
3898			ieee80211_process_sa_query_req(sdata, mgmt, len);
3899			goto handled;
3900		}
3901		break;
3902	}
3903
3904	return RX_CONTINUE;
3905
3906 handled:
3907	if (rx->sta)
3908		rx->link_sta->rx_stats.packets++;
3909	dev_kfree_skb(rx->skb);
3910	return RX_QUEUED;
3911}
3912
3913static ieee80211_rx_result debug_noinline
3914ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3915{
3916	struct ieee80211_local *local = rx->local;
3917	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3918	struct sk_buff *nskb;
3919	struct ieee80211_sub_if_data *sdata = rx->sdata;
3920	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3921
3922	if (!ieee80211_is_action(mgmt->frame_control))
3923		return RX_CONTINUE;
3924
3925	/*
3926	 * For AP mode, hostapd is responsible for handling any action
3927	 * frames that we didn't handle, including returning unknown
3928	 * ones. For all other modes we will return them to the sender,
3929	 * setting the 0x80 bit in the action category, as required by
3930	 * 802.11-2012 9.24.4.
3931	 * Newer versions of hostapd shall also use the management frame
3932	 * registration mechanisms, but older ones still use cooked
3933	 * monitor interfaces so push all frames there.
3934	 */
3935	if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3936	    (sdata->vif.type == NL80211_IFTYPE_AP ||
3937	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3938		return RX_DROP_MONITOR;
3939
3940	if (is_multicast_ether_addr(mgmt->da))
3941		return RX_DROP_MONITOR;
3942
3943	/* do not return rejected action frames */
3944	if (mgmt->u.action.category & 0x80)
3945		return RX_DROP_U_REJECTED_ACTION_RESPONSE;
3946
3947	nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3948			       GFP_ATOMIC);
3949	if (nskb) {
3950		struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3951
3952		nmgmt->u.action.category |= 0x80;
3953		memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3954		memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3955
3956		memset(nskb->cb, 0, sizeof(nskb->cb));
3957
3958		if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3959			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3960
3961			info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3962				      IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3963				      IEEE80211_TX_CTL_NO_CCK_RATE;
3964			if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3965				info->hw_queue =
3966					local->hw.offchannel_tx_hw_queue;
3967		}
3968
3969		__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1,
3970					    status->band);
3971	}
3972	dev_kfree_skb(rx->skb);
3973	return RX_QUEUED;
3974}
3975
3976static ieee80211_rx_result debug_noinline
3977ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
3978{
3979	struct ieee80211_sub_if_data *sdata = rx->sdata;
3980	struct ieee80211_hdr *hdr = (void *)rx->skb->data;
3981
3982	if (!ieee80211_is_ext(hdr->frame_control))
3983		return RX_CONTINUE;
3984
3985	if (sdata->vif.type != NL80211_IFTYPE_STATION)
3986		return RX_DROP_MONITOR;
3987
3988	/* for now only beacons are ext, so queue them */
3989	ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
3990
3991	return RX_QUEUED;
3992}
3993
3994static ieee80211_rx_result debug_noinline
3995ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3996{
3997	struct ieee80211_sub_if_data *sdata = rx->sdata;
3998	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3999	__le16 stype;
4000
4001	stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
4002
4003	if (!ieee80211_vif_is_mesh(&sdata->vif) &&
4004	    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
4005	    sdata->vif.type != NL80211_IFTYPE_OCB &&
4006	    sdata->vif.type != NL80211_IFTYPE_STATION)
4007		return RX_DROP_MONITOR;
4008
4009	switch (stype) {
4010	case cpu_to_le16(IEEE80211_STYPE_AUTH):
4011	case cpu_to_le16(IEEE80211_STYPE_BEACON):
4012	case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
4013		/* process for all: mesh, mlme, ibss */
4014		break;
4015	case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
4016		if (is_multicast_ether_addr(mgmt->da) &&
4017		    !is_broadcast_ether_addr(mgmt->da))
4018			return RX_DROP_MONITOR;
4019
4020		/* process only for station/IBSS */
4021		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
4022		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
4023			return RX_DROP_MONITOR;
4024		break;
4025	case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
4026	case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
4027	case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
4028		if (is_multicast_ether_addr(mgmt->da) &&
4029		    !is_broadcast_ether_addr(mgmt->da))
4030			return RX_DROP_MONITOR;
4031
4032		/* process only for station */
4033		if (sdata->vif.type != NL80211_IFTYPE_STATION)
4034			return RX_DROP_MONITOR;
4035		break;
4036	case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
4037		/* process only for ibss and mesh */
4038		if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
4039		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
4040			return RX_DROP_MONITOR;
4041		break;
4042	default:
4043		return RX_DROP_MONITOR;
4044	}
4045
4046	ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
4047
4048	return RX_QUEUED;
4049}
4050
4051static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
4052					struct ieee80211_rate *rate,
4053					ieee80211_rx_result reason)
4054{
4055	struct ieee80211_sub_if_data *sdata;
4056	struct ieee80211_local *local = rx->local;
4057	struct sk_buff *skb = rx->skb, *skb2;
4058	struct net_device *prev_dev = NULL;
4059	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4060	int needed_headroom;
4061
4062	/*
4063	 * If cooked monitor has been processed already, then
4064	 * don't do it again. If not, set the flag.
4065	 */
4066	if (rx->flags & IEEE80211_RX_CMNTR)
4067		goto out_free_skb;
4068	rx->flags |= IEEE80211_RX_CMNTR;
4069
4070	/* If there are no cooked monitor interfaces, just free the SKB */
4071	if (!local->cooked_mntrs)
4072		goto out_free_skb;
4073
4074	/* room for the radiotap header based on driver features */
4075	needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
4076
4077	if (skb_headroom(skb) < needed_headroom &&
4078	    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
4079		goto out_free_skb;
4080
4081	/* prepend radiotap information */
4082	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
4083					 false);
4084
4085	skb_reset_mac_header(skb);
4086	skb->ip_summed = CHECKSUM_UNNECESSARY;
4087	skb->pkt_type = PACKET_OTHERHOST;
4088	skb->protocol = htons(ETH_P_802_2);
4089
4090	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4091		if (!ieee80211_sdata_running(sdata))
4092			continue;
4093
4094		if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
4095		    !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
4096			continue;
4097
4098		if (prev_dev) {
4099			skb2 = skb_clone(skb, GFP_ATOMIC);
4100			if (skb2) {
4101				skb2->dev = prev_dev;
4102				netif_receive_skb(skb2);
4103			}
4104		}
4105
4106		prev_dev = sdata->dev;
4107		dev_sw_netstats_rx_add(sdata->dev, skb->len);
4108	}
4109
4110	if (prev_dev) {
4111		skb->dev = prev_dev;
4112		netif_receive_skb(skb);
4113		return;
4114	}
4115
4116 out_free_skb:
4117	kfree_skb_reason(skb, (__force u32)reason);
4118}
4119
4120static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
4121					 ieee80211_rx_result res)
4122{
4123	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
4124	struct ieee80211_supported_band *sband;
4125	struct ieee80211_rate *rate = NULL;
4126
4127	if (res == RX_QUEUED) {
4128		I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
4129		return;
4130	}
4131
4132	if (res != RX_CONTINUE) {
4133		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
4134		if (rx->sta)
4135			rx->link_sta->rx_stats.dropped++;
4136	}
4137
4138	if (u32_get_bits((__force u32)res, SKB_DROP_REASON_SUBSYS_MASK) ==
4139			SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE) {
4140		kfree_skb_reason(rx->skb, (__force u32)res);
4141		return;
4142	}
4143
4144	sband = rx->local->hw.wiphy->bands[status->band];
4145	if (status->encoding == RX_ENC_LEGACY)
4146		rate = &sband->bitrates[status->rate_idx];
4147
4148	ieee80211_rx_cooked_monitor(rx, rate, res);
4149}
4150
4151static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
4152				  struct sk_buff_head *frames)
4153{
4154	ieee80211_rx_result res = RX_DROP_MONITOR;
4155	struct sk_buff *skb;
4156
4157#define CALL_RXH(rxh)			\
4158	do {				\
4159		res = rxh(rx);		\
4160		if (res != RX_CONTINUE)	\
4161			goto rxh_next;  \
4162	} while (0)
4163
4164	/* Lock here to avoid hitting all of the data used in the RX
4165	 * path (e.g. key data, station data, ...) concurrently when
4166	 * a frame is released from the reorder buffer due to timeout
4167	 * from the timer, potentially concurrently with RX from the
4168	 * driver.
4169	 */
4170	spin_lock_bh(&rx->local->rx_path_lock);
4171
4172	while ((skb = __skb_dequeue(frames))) {
4173		/*
4174		 * all the other fields are valid across frames
4175		 * that belong to an aMPDU since they are on the
4176		 * same TID from the same station
4177		 */
4178		rx->skb = skb;
4179
4180		if (WARN_ON_ONCE(!rx->link))
4181			goto rxh_next;
4182
4183		CALL_RXH(ieee80211_rx_h_check_more_data);
4184		CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
4185		CALL_RXH(ieee80211_rx_h_sta_process);
4186		CALL_RXH(ieee80211_rx_h_decrypt);
4187		CALL_RXH(ieee80211_rx_h_defragment);
4188		CALL_RXH(ieee80211_rx_h_michael_mic_verify);
4189		/* must be after MMIC verify so header is counted in MPDU mic */
4190		CALL_RXH(ieee80211_rx_h_amsdu);
4191		CALL_RXH(ieee80211_rx_h_data);
4192
4193		/* special treatment -- needs the queue */
4194		res = ieee80211_rx_h_ctrl(rx, frames);
4195		if (res != RX_CONTINUE)
4196			goto rxh_next;
4197
4198		CALL_RXH(ieee80211_rx_h_mgmt_check);
4199		CALL_RXH(ieee80211_rx_h_action);
4200		CALL_RXH(ieee80211_rx_h_userspace_mgmt);
4201		CALL_RXH(ieee80211_rx_h_action_post_userspace);
4202		CALL_RXH(ieee80211_rx_h_action_return);
4203		CALL_RXH(ieee80211_rx_h_ext);
4204		CALL_RXH(ieee80211_rx_h_mgmt);
4205
4206 rxh_next:
4207		ieee80211_rx_handlers_result(rx, res);
4208
4209#undef CALL_RXH
4210	}
4211
4212	spin_unlock_bh(&rx->local->rx_path_lock);
4213}
4214
4215static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
4216{
4217	struct sk_buff_head reorder_release;
4218	ieee80211_rx_result res = RX_DROP_MONITOR;
4219
4220	__skb_queue_head_init(&reorder_release);
4221
4222#define CALL_RXH(rxh)			\
4223	do {				\
4224		res = rxh(rx);		\
4225		if (res != RX_CONTINUE)	\
4226			goto rxh_next;  \
4227	} while (0)
4228
4229	CALL_RXH(ieee80211_rx_h_check_dup);
4230	CALL_RXH(ieee80211_rx_h_check);
4231
4232	ieee80211_rx_reorder_ampdu(rx, &reorder_release);
4233
4234	ieee80211_rx_handlers(rx, &reorder_release);
4235	return;
4236
4237 rxh_next:
4238	ieee80211_rx_handlers_result(rx, res);
4239
4240#undef CALL_RXH
4241}
4242
4243static bool
4244ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
4245{
4246	return !!(sta->valid_links & BIT(link_id));
4247}
4248
4249static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
4250				       u8 link_id)
4251{
4252	rx->link_id = link_id;
4253	rx->link = rcu_dereference(rx->sdata->link[link_id]);
4254
4255	if (!rx->sta)
4256		return rx->link;
4257
4258	if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
4259		return false;
4260
4261	rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
4262
4263	return rx->link && rx->link_sta;
4264}
4265
4266static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
4267				      struct sta_info *sta, int link_id)
4268{
4269	rx->link_id = link_id;
4270	rx->sta = sta;
4271
4272	if (sta) {
4273		rx->local = sta->sdata->local;
4274		if (!rx->sdata)
4275			rx->sdata = sta->sdata;
4276		rx->link_sta = &sta->deflink;
4277	} else {
4278		rx->link_sta = NULL;
4279	}
4280
4281	if (link_id < 0)
4282		rx->link = &rx->sdata->deflink;
4283	else if (!ieee80211_rx_data_set_link(rx, link_id))
4284		return false;
4285
4286	return true;
4287}
4288
4289/*
4290 * This function makes calls into the RX path, therefore
4291 * it has to be invoked under RCU read lock.
4292 */
4293void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
4294{
4295	struct sk_buff_head frames;
4296	struct ieee80211_rx_data rx = {
4297		/* This is OK -- must be QoS data frame */
4298		.security_idx = tid,
4299		.seqno_idx = tid,
4300	};
4301	struct tid_ampdu_rx *tid_agg_rx;
4302	int link_id = -1;
4303
4304	/* FIXME: statistics won't be right with this */
4305	if (sta->sta.valid_links)
4306		link_id = ffs(sta->sta.valid_links) - 1;
4307
4308	if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
4309		return;
4310
4311	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
4312	if (!tid_agg_rx)
4313		return;
4314
4315	__skb_queue_head_init(&frames);
4316
4317	spin_lock(&tid_agg_rx->reorder_lock);
4318	ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
4319	spin_unlock(&tid_agg_rx->reorder_lock);
4320
4321	if (!skb_queue_empty(&frames)) {
4322		struct ieee80211_event event = {
4323			.type = BA_FRAME_TIMEOUT,
4324			.u.ba.tid = tid,
4325			.u.ba.sta = &sta->sta,
4326		};
4327		drv_event_callback(rx.local, rx.sdata, &event);
4328	}
4329
4330	ieee80211_rx_handlers(&rx, &frames);
4331}
4332
4333void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
4334					  u16 ssn, u64 filtered,
4335					  u16 received_mpdus)
4336{
4337	struct ieee80211_local *local;
4338	struct sta_info *sta;
4339	struct tid_ampdu_rx *tid_agg_rx;
4340	struct sk_buff_head frames;
4341	struct ieee80211_rx_data rx = {
4342		/* This is OK -- must be QoS data frame */
4343		.security_idx = tid,
4344		.seqno_idx = tid,
4345	};
4346	int i, diff;
4347
4348	if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
4349		return;
4350
4351	__skb_queue_head_init(&frames);
4352
4353	sta = container_of(pubsta, struct sta_info, sta);
4354
4355	local = sta->sdata->local;
4356	WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64,
4357		  "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n",
4358		  local->hw.max_rx_aggregation_subframes);
4359
4360	if (!ieee80211_rx_data_set_sta(&rx, sta, -1))
4361		return;
4362
4363	rcu_read_lock();
4364	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
4365	if (!tid_agg_rx)
4366		goto out;
4367
4368	spin_lock_bh(&tid_agg_rx->reorder_lock);
4369
4370	if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
4371		int release;
4372
4373		/* release all frames in the reorder buffer */
4374		release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
4375			   IEEE80211_SN_MODULO;
4376		ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
4377						 release, &frames);
4378		/* update ssn to match received ssn */
4379		tid_agg_rx->head_seq_num = ssn;
4380	} else {
4381		ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
4382						 &frames);
4383	}
4384
4385	/* handle the case that received ssn is behind the mac ssn.
4386	 * it can be tid_agg_rx->buf_size behind and still be valid */
4387	diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
4388	if (diff >= tid_agg_rx->buf_size) {
4389		tid_agg_rx->reorder_buf_filtered = 0;
4390		goto release;
4391	}
4392	filtered = filtered >> diff;
4393	ssn += diff;
4394
4395	/* update bitmap */
4396	for (i = 0; i < tid_agg_rx->buf_size; i++) {
4397		int index = (ssn + i) % tid_agg_rx->buf_size;
4398
4399		tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
4400		if (filtered & BIT_ULL(i))
4401			tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
4402	}
4403
4404	/* now process also frames that the filter marking released */
4405	ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
4406
4407release:
4408	spin_unlock_bh(&tid_agg_rx->reorder_lock);
4409
4410	ieee80211_rx_handlers(&rx, &frames);
4411
4412 out:
4413	rcu_read_unlock();
4414}
4415EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
4416
4417/* main receive path */
4418
4419static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
4420{
4421	return ether_addr_equal(raddr, addr) ||
4422	       is_broadcast_ether_addr(raddr);
4423}
4424
4425static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
4426{
4427	struct ieee80211_sub_if_data *sdata = rx->sdata;
4428	struct sk_buff *skb = rx->skb;
4429	struct ieee80211_hdr *hdr = (void *)skb->data;
4430	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4431	u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
4432	bool multicast = is_multicast_ether_addr(hdr->addr1) ||
4433			 ieee80211_is_s1g_beacon(hdr->frame_control);
4434
4435	switch (sdata->vif.type) {
4436	case NL80211_IFTYPE_STATION:
4437		if (!bssid && !sdata->u.mgd.use_4addr)
4438			return false;
4439		if (ieee80211_is_first_frag(hdr->seq_ctrl) &&
4440		    ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
4441			return false;
4442		if (multicast)
4443			return true;
4444		return ieee80211_is_our_addr(sdata, hdr->addr1, &rx->link_id);
4445	case NL80211_IFTYPE_ADHOC:
4446		if (!bssid)
4447			return false;
4448		if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
4449		    ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
4450		    !is_valid_ether_addr(hdr->addr2))
4451			return false;
4452		if (ieee80211_is_beacon(hdr->frame_control))
4453			return true;
4454		if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
4455			return false;
4456		if (!multicast &&
4457		    !ether_addr_equal(sdata->vif.addr, hdr->addr1))
4458			return false;
4459		if (!rx->sta) {
4460			int rate_idx;
4461			if (status->encoding != RX_ENC_LEGACY)
4462				rate_idx = 0; /* TODO: HT/VHT rates */
4463			else
4464				rate_idx = status->rate_idx;
4465			ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
4466						 BIT(rate_idx));
4467		}
4468		return true;
4469	case NL80211_IFTYPE_OCB:
4470		if (!bssid)
4471			return false;
4472		if (!ieee80211_is_data_present(hdr->frame_control))
4473			return false;
4474		if (!is_broadcast_ether_addr(bssid))
4475			return false;
4476		if (!multicast &&
4477		    !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
4478			return false;
4479		if (!rx->sta) {
4480			int rate_idx;
4481			if (status->encoding != RX_ENC_LEGACY)
4482				rate_idx = 0; /* TODO: HT rates */
4483			else
4484				rate_idx = status->rate_idx;
4485			ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
4486						BIT(rate_idx));
4487		}
4488		return true;
4489	case NL80211_IFTYPE_MESH_POINT:
4490		if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
4491			return false;
4492		if (multicast)
4493			return true;
4494		return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4495	case NL80211_IFTYPE_AP_VLAN:
4496	case NL80211_IFTYPE_AP:
4497		if (!bssid)
4498			return ieee80211_is_our_addr(sdata, hdr->addr1,
4499						     &rx->link_id);
4500
4501		if (!is_broadcast_ether_addr(bssid) &&
4502		    !ieee80211_is_our_addr(sdata, bssid, NULL)) {
4503			/*
4504			 * Accept public action frames even when the
4505			 * BSSID doesn't match, this is used for P2P
4506			 * and location updates. Note that mac80211
4507			 * itself never looks at these frames.
4508			 */
4509			if (!multicast &&
4510			    !ieee80211_is_our_addr(sdata, hdr->addr1,
4511						   &rx->link_id))
4512				return false;
4513			if (ieee80211_is_public_action(hdr, skb->len))
4514				return true;
4515			return ieee80211_is_beacon(hdr->frame_control);
4516		}
4517
4518		if (!ieee80211_has_tods(hdr->frame_control)) {
4519			/* ignore data frames to TDLS-peers */
4520			if (ieee80211_is_data(hdr->frame_control))
4521				return false;
4522			/* ignore action frames to TDLS-peers */
4523			if (ieee80211_is_action(hdr->frame_control) &&
4524			    !is_broadcast_ether_addr(bssid) &&
4525			    !ether_addr_equal(bssid, hdr->addr1))
4526				return false;
4527		}
4528
4529		/*
4530		 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
4531		 * the BSSID - we've checked that already but may have accepted
4532		 * the wildcard (ff:ff:ff:ff:ff:ff).
4533		 *
4534		 * It also says:
4535		 *	The BSSID of the Data frame is determined as follows:
4536		 *	a) If the STA is contained within an AP or is associated
4537		 *	   with an AP, the BSSID is the address currently in use
4538		 *	   by the STA contained in the AP.
4539		 *
4540		 * So we should not accept data frames with an address that's
4541		 * multicast.
4542		 *
4543		 * Accepting it also opens a security problem because stations
4544		 * could encrypt it with the GTK and inject traffic that way.
4545		 */
4546		if (ieee80211_is_data(hdr->frame_control) && multicast)
4547			return false;
4548
4549		return true;
4550	case NL80211_IFTYPE_P2P_DEVICE:
4551		return ieee80211_is_public_action(hdr, skb->len) ||
4552		       ieee80211_is_probe_req(hdr->frame_control) ||
4553		       ieee80211_is_probe_resp(hdr->frame_control) ||
4554		       ieee80211_is_beacon(hdr->frame_control);
4555	case NL80211_IFTYPE_NAN:
4556		/* Currently no frames on NAN interface are allowed */
4557		return false;
4558	default:
4559		break;
4560	}
4561
4562	WARN_ON_ONCE(1);
4563	return false;
4564}
4565
4566void ieee80211_check_fast_rx(struct sta_info *sta)
4567{
4568	struct ieee80211_sub_if_data *sdata = sta->sdata;
4569	struct ieee80211_local *local = sdata->local;
4570	struct ieee80211_key *key;
4571	struct ieee80211_fast_rx fastrx = {
4572		.dev = sdata->dev,
4573		.vif_type = sdata->vif.type,
4574		.control_port_protocol = sdata->control_port_protocol,
4575	}, *old, *new = NULL;
4576	u32 offload_flags;
4577	bool set_offload = false;
4578	bool assign = false;
4579	bool offload;
4580
4581	/* use sparse to check that we don't return without updating */
4582	__acquire(check_fast_rx);
4583
4584	BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
4585	BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
4586	ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
4587	ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
4588
4589	fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
4590
4591	/* fast-rx doesn't do reordering */
4592	if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
4593	    !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
4594		goto clear;
4595
4596	switch (sdata->vif.type) {
4597	case NL80211_IFTYPE_STATION:
4598		if (sta->sta.tdls) {
4599			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4600			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4601			fastrx.expected_ds_bits = 0;
4602		} else {
4603			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4604			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
4605			fastrx.expected_ds_bits =
4606				cpu_to_le16(IEEE80211_FCTL_FROMDS);
4607		}
4608
4609		if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
4610			fastrx.expected_ds_bits |=
4611				cpu_to_le16(IEEE80211_FCTL_TODS);
4612			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4613			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4614		}
4615
4616		if (!sdata->u.mgd.powersave)
4617			break;
4618
4619		/* software powersave is a huge mess, avoid all of it */
4620		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
4621			goto clear;
4622		if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
4623		    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
4624			goto clear;
4625		break;
4626	case NL80211_IFTYPE_AP_VLAN:
4627	case NL80211_IFTYPE_AP:
4628		/* parallel-rx requires this, at least with calls to
4629		 * ieee80211_sta_ps_transition()
4630		 */
4631		if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
4632			goto clear;
4633		fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4634		fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4635		fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
4636
4637		fastrx.internal_forward =
4638			!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
4639			(sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
4640			 !sdata->u.vlan.sta);
4641
4642		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4643		    sdata->u.vlan.sta) {
4644			fastrx.expected_ds_bits |=
4645				cpu_to_le16(IEEE80211_FCTL_FROMDS);
4646			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4647			fastrx.internal_forward = 0;
4648		}
4649
4650		break;
4651	case NL80211_IFTYPE_MESH_POINT:
4652		fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_FROMDS |
4653						      IEEE80211_FCTL_TODS);
4654		fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4655		fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4656		break;
4657	default:
4658		goto clear;
4659	}
4660
4661	if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
4662		goto clear;
4663
4664	rcu_read_lock();
4665	key = rcu_dereference(sta->ptk[sta->ptk_idx]);
4666	if (!key)
4667		key = rcu_dereference(sdata->default_unicast_key);
4668	if (key) {
4669		switch (key->conf.cipher) {
4670		case WLAN_CIPHER_SUITE_TKIP:
4671			/* we don't want to deal with MMIC in fast-rx */
4672			goto clear_rcu;
4673		case WLAN_CIPHER_SUITE_CCMP:
4674		case WLAN_CIPHER_SUITE_CCMP_256:
4675		case WLAN_CIPHER_SUITE_GCMP:
4676		case WLAN_CIPHER_SUITE_GCMP_256:
4677			break;
4678		default:
4679			/* We also don't want to deal with
4680			 * WEP or cipher scheme.
4681			 */
4682			goto clear_rcu;
4683		}
4684
4685		fastrx.key = true;
4686		fastrx.icv_len = key->conf.icv_len;
4687	}
4688
4689	assign = true;
4690 clear_rcu:
4691	rcu_read_unlock();
4692 clear:
4693	__release(check_fast_rx);
4694
4695	if (assign)
4696		new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
4697
4698	offload_flags = get_bss_sdata(sdata)->vif.offload_flags;
4699	offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED;
4700
4701	if (assign && offload)
4702		set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4703	else
4704		set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4705
4706	if (set_offload)
4707		drv_sta_set_decap_offload(local, sdata, &sta->sta, assign);
4708
4709	spin_lock_bh(&sta->lock);
4710	old = rcu_dereference_protected(sta->fast_rx, true);
4711	rcu_assign_pointer(sta->fast_rx, new);
4712	spin_unlock_bh(&sta->lock);
4713
4714	if (old)
4715		kfree_rcu(old, rcu_head);
4716}
4717
4718void ieee80211_clear_fast_rx(struct sta_info *sta)
4719{
4720	struct ieee80211_fast_rx *old;
4721
4722	spin_lock_bh(&sta->lock);
4723	old = rcu_dereference_protected(sta->fast_rx, true);
4724	RCU_INIT_POINTER(sta->fast_rx, NULL);
4725	spin_unlock_bh(&sta->lock);
4726
4727	if (old)
4728		kfree_rcu(old, rcu_head);
4729}
4730
4731void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4732{
4733	struct ieee80211_local *local = sdata->local;
4734	struct sta_info *sta;
4735
4736	lockdep_assert_wiphy(local->hw.wiphy);
4737
4738	list_for_each_entry(sta, &local->sta_list, list) {
4739		if (sdata != sta->sdata &&
4740		    (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
4741			continue;
4742		ieee80211_check_fast_rx(sta);
4743	}
4744}
4745
4746void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4747{
4748	struct ieee80211_local *local = sdata->local;
4749
4750	lockdep_assert_wiphy(local->hw.wiphy);
4751
4752	__ieee80211_check_fast_rx_iface(sdata);
4753}
4754
4755static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
4756			      struct ieee80211_fast_rx *fast_rx,
4757			      int orig_len)
4758{
4759	struct ieee80211_sta_rx_stats *stats;
4760	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
4761	struct sta_info *sta = rx->sta;
4762	struct link_sta_info *link_sta;
4763	struct sk_buff *skb = rx->skb;
4764	void *sa = skb->data + ETH_ALEN;
4765	void *da = skb->data;
4766
4767	if (rx->link_id >= 0) {
4768		link_sta = rcu_dereference(sta->link[rx->link_id]);
4769		if (WARN_ON_ONCE(!link_sta)) {
4770			dev_kfree_skb(rx->skb);
4771			return;
4772		}
4773	} else {
4774		link_sta = &sta->deflink;
4775	}
4776
4777	stats = &link_sta->rx_stats;
4778	if (fast_rx->uses_rss)
4779		stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
4780
4781	/* statistics part of ieee80211_rx_h_sta_process() */
4782	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4783		stats->last_signal = status->signal;
4784		if (!fast_rx->uses_rss)
4785			ewma_signal_add(&link_sta->rx_stats_avg.signal,
4786					-status->signal);
4787	}
4788
4789	if (status->chains) {
4790		int i;
4791
4792		stats->chains = status->chains;
4793		for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4794			int signal = status->chain_signal[i];
4795
4796			if (!(status->chains & BIT(i)))
4797				continue;
4798
4799			stats->chain_signal_last[i] = signal;
4800			if (!fast_rx->uses_rss)
4801				ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i],
4802						-signal);
4803		}
4804	}
4805	/* end of statistics */
4806
4807	stats->last_rx = jiffies;
4808	stats->last_rate = sta_stats_encode_rate(status);
4809
4810	stats->fragments++;
4811	stats->packets++;
4812
4813	skb->dev = fast_rx->dev;
4814
4815	dev_sw_netstats_rx_add(fast_rx->dev, skb->len);
4816
4817	/* The seqno index has the same property as needed
4818	 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4819	 * for non-QoS-data frames. Here we know it's a data
4820	 * frame, so count MSDUs.
4821	 */
4822	u64_stats_update_begin(&stats->syncp);
4823	stats->msdu[rx->seqno_idx]++;
4824	stats->bytes += orig_len;
4825	u64_stats_update_end(&stats->syncp);
4826
4827	if (fast_rx->internal_forward) {
4828		struct sk_buff *xmit_skb = NULL;
4829		if (is_multicast_ether_addr(da)) {
4830			xmit_skb = skb_copy(skb, GFP_ATOMIC);
4831		} else if (!ether_addr_equal(da, sa) &&
4832			   sta_info_get(rx->sdata, da)) {
4833			xmit_skb = skb;
4834			skb = NULL;
4835		}
4836
4837		if (xmit_skb) {
4838			/*
4839			 * Send to wireless media and increase priority by 256
4840			 * to keep the received priority instead of
4841			 * reclassifying the frame (see cfg80211_classify8021d).
4842			 */
4843			xmit_skb->priority += 256;
4844			xmit_skb->protocol = htons(ETH_P_802_3);
4845			skb_reset_network_header(xmit_skb);
4846			skb_reset_mac_header(xmit_skb);
4847			dev_queue_xmit(xmit_skb);
4848		}
4849
4850		if (!skb)
4851			return;
4852	}
4853
4854	/* deliver to local stack */
4855	skb->protocol = eth_type_trans(skb, fast_rx->dev);
4856	ieee80211_deliver_skb_to_local_stack(skb, rx);
4857}
4858
4859static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4860				     struct ieee80211_fast_rx *fast_rx)
4861{
4862	struct sk_buff *skb = rx->skb;
4863	struct ieee80211_hdr *hdr = (void *)skb->data;
4864	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4865	static ieee80211_rx_result res;
4866	int orig_len = skb->len;
4867	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
4868	int snap_offs = hdrlen;
4869	struct {
4870		u8 snap[sizeof(rfc1042_header)];
4871		__be16 proto;
4872	} *payload __aligned(2);
4873	struct {
4874		u8 da[ETH_ALEN];
4875		u8 sa[ETH_ALEN];
4876	} addrs __aligned(2);
4877	struct ieee80211_sta_rx_stats *stats;
4878
4879	/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4880	 * to a common data structure; drivers can implement that per queue
4881	 * but we don't have that information in mac80211
4882	 */
4883	if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4884		return false;
4885
4886#define FAST_RX_CRYPT_FLAGS	(RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4887
4888	/* If using encryption, we also need to have:
4889	 *  - PN_VALIDATED: similar, but the implementation is tricky
4890	 *  - DECRYPTED: necessary for PN_VALIDATED
4891	 */
4892	if (fast_rx->key &&
4893	    (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4894		return false;
4895
4896	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4897		return false;
4898
4899	if (unlikely(ieee80211_is_frag(hdr)))
4900		return false;
4901
4902	/* Since our interface address cannot be multicast, this
4903	 * implicitly also rejects multicast frames without the
4904	 * explicit check.
4905	 *
4906	 * We shouldn't get any *data* frames not addressed to us
4907	 * (AP mode will accept multicast *management* frames), but
4908	 * punting here will make it go through the full checks in
4909	 * ieee80211_accept_frame().
4910	 */
4911	if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4912		return false;
4913
4914	if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4915					      IEEE80211_FCTL_TODS)) !=
4916	    fast_rx->expected_ds_bits)
4917		return false;
4918
4919	/* assign the key to drop unencrypted frames (later)
4920	 * and strip the IV/MIC if necessary
4921	 */
4922	if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4923		/* GCMP header length is the same */
4924		snap_offs += IEEE80211_CCMP_HDR_LEN;
4925	}
4926
4927	if (!ieee80211_vif_is_mesh(&rx->sdata->vif) &&
4928	    !(status->rx_flags & IEEE80211_RX_AMSDU)) {
4929		if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4930			return false;
4931
4932		payload = (void *)(skb->data + snap_offs);
4933
4934		if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4935			return false;
4936
4937		/* Don't handle these here since they require special code.
4938		 * Accept AARP and IPX even though they should come with a
4939		 * bridge-tunnel header - but if we get them this way then
4940		 * there's little point in discarding them.
4941		 */
4942		if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4943			     payload->proto == fast_rx->control_port_protocol))
4944			return false;
4945	}
4946
4947	/* after this point, don't punt to the slowpath! */
4948
4949	if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4950	    pskb_trim(skb, skb->len - fast_rx->icv_len))
4951		goto drop;
4952
4953	if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4954		goto drop;
4955
4956	if (status->rx_flags & IEEE80211_RX_AMSDU) {
4957		if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
4958		    RX_QUEUED)
4959			goto drop;
4960
4961		return true;
4962	}
4963
4964	/* do the header conversion - first grab the addresses */
4965	ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4966	ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4967	if (ieee80211_vif_is_mesh(&rx->sdata->vif)) {
4968	    skb_pull(skb, snap_offs - 2);
4969	    put_unaligned_be16(skb->len - 2, skb->data);
4970	} else {
4971	    skb_postpull_rcsum(skb, skb->data + snap_offs,
4972			       sizeof(rfc1042_header) + 2);
4973
4974	    /* remove the SNAP but leave the ethertype */
4975	    skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4976	}
4977	/* push the addresses in front */
4978	memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4979
4980	res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
4981	switch (res) {
4982	case RX_QUEUED:
4983		return true;
4984	case RX_CONTINUE:
4985		break;
4986	default:
4987		goto drop;
4988	}
4989
4990	ieee80211_rx_8023(rx, fast_rx, orig_len);
4991
4992	return true;
4993 drop:
4994	dev_kfree_skb(skb);
4995
4996	if (fast_rx->uses_rss)
4997		stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
4998	else
4999		stats = &rx->link_sta->rx_stats;
5000
5001	stats->dropped++;
5002	return true;
5003}
5004
5005/*
5006 * This function returns whether or not the SKB
5007 * was destined for RX processing or not, which,
5008 * if consume is true, is equivalent to whether
5009 * or not the skb was consumed.
5010 */
5011static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
5012					    struct sk_buff *skb, bool consume)
5013{
5014	struct ieee80211_local *local = rx->local;
5015	struct ieee80211_sub_if_data *sdata = rx->sdata;
5016	struct ieee80211_hdr *hdr = (void *)skb->data;
5017	struct link_sta_info *link_sta = rx->link_sta;
5018	struct ieee80211_link_data *link = rx->link;
5019
5020	rx->skb = skb;
5021
5022	/* See if we can do fast-rx; if we have to copy we already lost,
5023	 * so punt in that case. We should never have to deliver a data
5024	 * frame to multiple interfaces anyway.
5025	 *
5026	 * We skip the ieee80211_accept_frame() call and do the necessary
5027	 * checking inside ieee80211_invoke_fast_rx().
5028	 */
5029	if (consume && rx->sta) {
5030		struct ieee80211_fast_rx *fast_rx;
5031
5032		fast_rx = rcu_dereference(rx->sta->fast_rx);
5033		if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
5034			return true;
5035	}
5036
5037	if (!ieee80211_accept_frame(rx))
5038		return false;
5039
5040	if (!consume) {
5041		struct skb_shared_hwtstamps *shwt;
5042
5043		rx->skb = skb_copy(skb, GFP_ATOMIC);
5044		if (!rx->skb) {
5045			if (net_ratelimit())
5046				wiphy_debug(local->hw.wiphy,
5047					"failed to copy skb for %s\n",
5048					sdata->name);
5049			return true;
5050		}
5051
5052		/* skb_copy() does not copy the hw timestamps, so copy it
5053		 * explicitly
5054		 */
5055		shwt = skb_hwtstamps(rx->skb);
5056		shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
5057
5058		/* Update the hdr pointer to the new skb for translation below */
5059		hdr = (struct ieee80211_hdr *)rx->skb->data;
5060	}
5061
5062	if (unlikely(rx->sta && rx->sta->sta.mlo) &&
5063	    is_unicast_ether_addr(hdr->addr1) &&
5064	    !ieee80211_is_probe_resp(hdr->frame_control) &&
5065	    !ieee80211_is_beacon(hdr->frame_control)) {
5066		/* translate to MLD addresses */
5067		if (ether_addr_equal(link->conf->addr, hdr->addr1))
5068			ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
5069		if (ether_addr_equal(link_sta->addr, hdr->addr2))
5070			ether_addr_copy(hdr->addr2, rx->sta->addr);
5071		/* translate A3 only if it's the BSSID */
5072		if (!ieee80211_has_tods(hdr->frame_control) &&
5073		    !ieee80211_has_fromds(hdr->frame_control)) {
5074			if (ether_addr_equal(link_sta->addr, hdr->addr3))
5075				ether_addr_copy(hdr->addr3, rx->sta->addr);
5076			else if (ether_addr_equal(link->conf->addr, hdr->addr3))
5077				ether_addr_copy(hdr->addr3, rx->sdata->vif.addr);
5078		}
5079		/* not needed for A4 since it can only carry the SA */
5080	}
5081
5082	ieee80211_invoke_rx_handlers(rx);
5083	return true;
5084}
5085
5086static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
5087				       struct ieee80211_sta *pubsta,
5088				       struct sk_buff *skb,
5089				       struct list_head *list)
5090{
5091	struct ieee80211_local *local = hw_to_local(hw);
5092	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5093	struct ieee80211_fast_rx *fast_rx;
5094	struct ieee80211_rx_data rx;
5095	struct sta_info *sta;
5096	int link_id = -1;
5097
5098	memset(&rx, 0, sizeof(rx));
5099	rx.skb = skb;
5100	rx.local = local;
5101	rx.list = list;
5102	rx.link_id = -1;
5103
5104	I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
5105
5106	/* drop frame if too short for header */
5107	if (skb->len < sizeof(struct ethhdr))
5108		goto drop;
5109
5110	if (!pubsta)
5111		goto drop;
5112
5113	if (status->link_valid)
5114		link_id = status->link_id;
5115
5116	/*
5117	 * TODO: Should the frame be dropped if the right link_id is not
5118	 * available? Or may be it is fine in the current form to proceed with
5119	 * the frame processing because with frame being in 802.3 format,
5120	 * link_id is used only for stats purpose and updating the stats on
5121	 * the deflink is fine?
5122	 */
5123	sta = container_of(pubsta, struct sta_info, sta);
5124	if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
5125		goto drop;
5126
5127	fast_rx = rcu_dereference(rx.sta->fast_rx);
5128	if (!fast_rx)
5129		goto drop;
5130
5131	ieee80211_rx_8023(&rx, fast_rx, skb->len);
5132	return;
5133
5134drop:
5135	dev_kfree_skb(skb);
5136}
5137
5138static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
5139				       struct sk_buff *skb, bool consume)
5140{
5141	struct link_sta_info *link_sta;
5142	struct ieee80211_hdr *hdr = (void *)skb->data;
5143	struct sta_info *sta;
5144	int link_id = -1;
5145
5146	/*
5147	 * Look up link station first, in case there's a
5148	 * chance that they might have a link address that
5149	 * is identical to the MLD address, that way we'll
5150	 * have the link information if needed.
5151	 */
5152	link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
5153	if (link_sta) {
5154		sta = link_sta->sta;
5155		link_id = link_sta->link_id;
5156	} else {
5157		struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5158
5159		sta = sta_info_get_bss(rx->sdata, hdr->addr2);
5160		if (status->link_valid)
5161			link_id = status->link_id;
5162	}
5163
5164	if (!ieee80211_rx_data_set_sta(rx, sta, link_id))
5165		return false;
5166
5167	return ieee80211_prepare_and_rx_handle(rx, skb, consume);
5168}
5169
5170/*
5171 * This is the actual Rx frames handler. as it belongs to Rx path it must
5172 * be called with rcu_read_lock protection.
5173 */
5174static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
5175					 struct ieee80211_sta *pubsta,
5176					 struct sk_buff *skb,
5177					 struct list_head *list)
5178{
5179	struct ieee80211_local *local = hw_to_local(hw);
5180	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5181	struct ieee80211_sub_if_data *sdata;
5182	struct ieee80211_hdr *hdr;
5183	__le16 fc;
5184	struct ieee80211_rx_data rx;
5185	struct ieee80211_sub_if_data *prev;
5186	struct rhlist_head *tmp;
5187	int err = 0;
5188
5189	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
5190	memset(&rx, 0, sizeof(rx));
5191	rx.skb = skb;
5192	rx.local = local;
5193	rx.list = list;
5194	rx.link_id = -1;
5195
5196	if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
5197		I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
5198
5199	if (ieee80211_is_mgmt(fc)) {
5200		/* drop frame if too short for header */
5201		if (skb->len < ieee80211_hdrlen(fc))
5202			err = -ENOBUFS;
5203		else
5204			err = skb_linearize(skb);
5205	} else {
5206		err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
5207	}
5208
5209	if (err) {
5210		dev_kfree_skb(skb);
5211		return;
5212	}
5213
5214	hdr = (struct ieee80211_hdr *)skb->data;
5215	ieee80211_parse_qos(&rx);
5216	ieee80211_verify_alignment(&rx);
5217
5218	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
5219		     ieee80211_is_beacon(hdr->frame_control) ||
5220		     ieee80211_is_s1g_beacon(hdr->frame_control)))
5221		ieee80211_scan_rx(local, skb);
5222
5223	if (ieee80211_is_data(fc)) {
5224		struct sta_info *sta, *prev_sta;
5225		int link_id = -1;
5226
5227		if (status->link_valid)
5228			link_id = status->link_id;
5229
5230		if (pubsta) {
5231			sta = container_of(pubsta, struct sta_info, sta);
5232			if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
5233				goto out;
5234
5235			/*
5236			 * In MLO connection, fetch the link_id using addr2
5237			 * when the driver does not pass link_id in status.
5238			 * When the address translation is already performed by
5239			 * driver/hw, the valid link_id must be passed in
5240			 * status.
5241			 */
5242
5243			if (!status->link_valid && pubsta->mlo) {
5244				struct link_sta_info *link_sta;
5245
5246				link_sta = link_sta_info_get_bss(rx.sdata,
5247								 hdr->addr2);
5248				if (!link_sta)
5249					goto out;
5250
5251				ieee80211_rx_data_set_link(&rx, link_sta->link_id);
5252			}
5253
5254			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
5255				return;
5256			goto out;
5257		}
5258
5259		prev_sta = NULL;
5260
5261		for_each_sta_info(local, hdr->addr2, sta, tmp) {
5262			if (!prev_sta) {
5263				prev_sta = sta;
5264				continue;
5265			}
5266
5267			rx.sdata = prev_sta->sdata;
5268			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
5269				goto out;
5270
5271			if (!status->link_valid && prev_sta->sta.mlo)
5272				continue;
5273
5274			ieee80211_prepare_and_rx_handle(&rx, skb, false);
5275
5276			prev_sta = sta;
5277		}
5278
5279		if (prev_sta) {
5280			rx.sdata = prev_sta->sdata;
5281			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
5282				goto out;
5283
5284			if (!status->link_valid && prev_sta->sta.mlo)
5285				goto out;
5286
5287			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
5288				return;
5289			goto out;
5290		}
5291	}
5292
5293	prev = NULL;
5294
5295	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
5296		if (!ieee80211_sdata_running(sdata))
5297			continue;
5298
5299		if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
5300		    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
5301			continue;
5302
5303		/*
5304		 * frame is destined for this interface, but if it's
5305		 * not also for the previous one we handle that after
5306		 * the loop to avoid copying the SKB once too much
5307		 */
5308
5309		if (!prev) {
5310			prev = sdata;
5311			continue;
5312		}
5313
5314		rx.sdata = prev;
5315		ieee80211_rx_for_interface(&rx, skb, false);
5316
5317		prev = sdata;
5318	}
5319
5320	if (prev) {
5321		rx.sdata = prev;
5322
5323		if (ieee80211_rx_for_interface(&rx, skb, true))
5324			return;
5325	}
5326
5327 out:
5328	dev_kfree_skb(skb);
5329}
5330
5331/*
5332 * This is the receive path handler. It is called by a low level driver when an
5333 * 802.11 MPDU is received from the hardware.
5334 */
5335void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
5336		       struct sk_buff *skb, struct list_head *list)
5337{
5338	struct ieee80211_local *local = hw_to_local(hw);
5339	struct ieee80211_rate *rate = NULL;
5340	struct ieee80211_supported_band *sband;
5341	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5342	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
5343
5344	WARN_ON_ONCE(softirq_count() == 0);
5345
5346	if (WARN_ON(status->band >= NUM_NL80211_BANDS))
5347		goto drop;
5348
5349	sband = local->hw.wiphy->bands[status->band];
5350	if (WARN_ON(!sband))
5351		goto drop;
5352
5353	/*
5354	 * If we're suspending, it is possible although not too likely
5355	 * that we'd be receiving frames after having already partially
5356	 * quiesced the stack. We can't process such frames then since
5357	 * that might, for example, cause stations to be added or other
5358	 * driver callbacks be invoked.
5359	 */
5360	if (unlikely(local->quiescing || local->suspended))
5361		goto drop;
5362
5363	/* We might be during a HW reconfig, prevent Rx for the same reason */
5364	if (unlikely(local->in_reconfig))
5365		goto drop;
5366
5367	/*
5368	 * The same happens when we're not even started,
5369	 * but that's worth a warning.
5370	 */
5371	if (WARN_ON(!local->started))
5372		goto drop;
5373
5374	if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
5375		/*
5376		 * Validate the rate, unless a PLCP error means that
5377		 * we probably can't have a valid rate here anyway.
5378		 */
5379
5380		switch (status->encoding) {
5381		case RX_ENC_HT:
5382			/*
5383			 * rate_idx is MCS index, which can be [0-76]
5384			 * as documented on:
5385			 *
5386			 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n
5387			 *
5388			 * Anything else would be some sort of driver or
5389			 * hardware error. The driver should catch hardware
5390			 * errors.
5391			 */
5392			if (WARN(status->rate_idx > 76,
5393				 "Rate marked as an HT rate but passed "
5394				 "status->rate_idx is not "
5395				 "an MCS index [0-76]: %d (0x%02x)\n",
5396				 status->rate_idx,
5397				 status->rate_idx))
5398				goto drop;
5399			break;
5400		case RX_ENC_VHT:
5401			if (WARN_ONCE(status->rate_idx > 11 ||
5402				      !status->nss ||
5403				      status->nss > 8,
5404				      "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
5405				      status->rate_idx, status->nss))
5406				goto drop;
5407			break;
5408		case RX_ENC_HE:
5409			if (WARN_ONCE(status->rate_idx > 11 ||
5410				      !status->nss ||
5411				      status->nss > 8,
5412				      "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
5413				      status->rate_idx, status->nss))
5414				goto drop;
5415			break;
5416		case RX_ENC_EHT:
5417			if (WARN_ONCE(status->rate_idx > 15 ||
5418				      !status->nss ||
5419				      status->nss > 8 ||
5420				      status->eht.gi > NL80211_RATE_INFO_EHT_GI_3_2,
5421				      "Rate marked as an EHT rate but data is invalid: MCS:%d, NSS:%d, GI:%d\n",
5422				      status->rate_idx, status->nss, status->eht.gi))
5423				goto drop;
5424			break;
5425		default:
5426			WARN_ON_ONCE(1);
5427			fallthrough;
5428		case RX_ENC_LEGACY:
5429			if (WARN_ON(status->rate_idx >= sband->n_bitrates))
5430				goto drop;
5431			rate = &sband->bitrates[status->rate_idx];
5432		}
5433	}
5434
5435	if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED))
5436		goto drop;
5437
5438	status->rx_flags = 0;
5439
5440	kcov_remote_start_common(skb_get_kcov_handle(skb));
5441
5442	/*
5443	 * Frames with failed FCS/PLCP checksum are not returned,
5444	 * all other frames are returned without radiotap header
5445	 * if it was previously present.
5446	 * Also, frames with less than 16 bytes are dropped.
5447	 */
5448	if (!(status->flag & RX_FLAG_8023))
5449		skb = ieee80211_rx_monitor(local, skb, rate);
5450	if (skb) {
5451		if ((status->flag & RX_FLAG_8023) ||
5452			ieee80211_is_data_present(hdr->frame_control))
5453			ieee80211_tpt_led_trig_rx(local, skb->len);
5454
5455		if (status->flag & RX_FLAG_8023)
5456			__ieee80211_rx_handle_8023(hw, pubsta, skb, list);
5457		else
5458			__ieee80211_rx_handle_packet(hw, pubsta, skb, list);
5459	}
5460
5461	kcov_remote_stop();
5462	return;
5463 drop:
5464	kfree_skb(skb);
5465}
5466EXPORT_SYMBOL(ieee80211_rx_list);
5467
5468void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
5469		       struct sk_buff *skb, struct napi_struct *napi)
5470{
5471	struct sk_buff *tmp;
5472	LIST_HEAD(list);
5473
5474
5475	/*
5476	 * key references and virtual interfaces are protected using RCU
5477	 * and this requires that we are in a read-side RCU section during
5478	 * receive processing
5479	 */
5480	rcu_read_lock();
5481	ieee80211_rx_list(hw, pubsta, skb, &list);
5482	rcu_read_unlock();
5483
5484	if (!napi) {
5485		netif_receive_skb_list(&list);
5486		return;
5487	}
5488
5489	list_for_each_entry_safe(skb, tmp, &list, list) {
5490		skb_list_del_init(skb);
5491		napi_gro_receive(napi, skb);
5492	}
5493}
5494EXPORT_SYMBOL(ieee80211_rx_napi);
5495
5496/* This is a version of the rx handler that can be called from hard irq
5497 * context. Post the skb on the queue and schedule the tasklet */
5498void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
5499{
5500	struct ieee80211_local *local = hw_to_local(hw);
5501
5502	BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
5503
5504	skb->pkt_type = IEEE80211_RX_MSG;
5505	skb_queue_tail(&local->skb_queue, skb);
5506	tasklet_schedule(&local->tasklet);
5507}
5508EXPORT_SYMBOL(ieee80211_rx_irqsafe);
5509