1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5#include <linux/sched.h>
6#include <linux/of.h>
7#include "mt76.h"
8
9#define CHAN2G(_idx, _freq) {			\
10	.band = NL80211_BAND_2GHZ,		\
11	.center_freq = (_freq),			\
12	.hw_value = (_idx),			\
13	.max_power = 30,			\
14}
15
16#define CHAN5G(_idx, _freq) {			\
17	.band = NL80211_BAND_5GHZ,		\
18	.center_freq = (_freq),			\
19	.hw_value = (_idx),			\
20	.max_power = 30,			\
21}
22
23#define CHAN6G(_idx, _freq) {			\
24	.band = NL80211_BAND_6GHZ,		\
25	.center_freq = (_freq),			\
26	.hw_value = (_idx),			\
27	.max_power = 30,			\
28}
29
30static const struct ieee80211_channel mt76_channels_2ghz[] = {
31	CHAN2G(1, 2412),
32	CHAN2G(2, 2417),
33	CHAN2G(3, 2422),
34	CHAN2G(4, 2427),
35	CHAN2G(5, 2432),
36	CHAN2G(6, 2437),
37	CHAN2G(7, 2442),
38	CHAN2G(8, 2447),
39	CHAN2G(9, 2452),
40	CHAN2G(10, 2457),
41	CHAN2G(11, 2462),
42	CHAN2G(12, 2467),
43	CHAN2G(13, 2472),
44	CHAN2G(14, 2484),
45};
46
47static const struct ieee80211_channel mt76_channels_5ghz[] = {
48	CHAN5G(36, 5180),
49	CHAN5G(40, 5200),
50	CHAN5G(44, 5220),
51	CHAN5G(48, 5240),
52
53	CHAN5G(52, 5260),
54	CHAN5G(56, 5280),
55	CHAN5G(60, 5300),
56	CHAN5G(64, 5320),
57
58	CHAN5G(100, 5500),
59	CHAN5G(104, 5520),
60	CHAN5G(108, 5540),
61	CHAN5G(112, 5560),
62	CHAN5G(116, 5580),
63	CHAN5G(120, 5600),
64	CHAN5G(124, 5620),
65	CHAN5G(128, 5640),
66	CHAN5G(132, 5660),
67	CHAN5G(136, 5680),
68	CHAN5G(140, 5700),
69	CHAN5G(144, 5720),
70
71	CHAN5G(149, 5745),
72	CHAN5G(153, 5765),
73	CHAN5G(157, 5785),
74	CHAN5G(161, 5805),
75	CHAN5G(165, 5825),
76	CHAN5G(169, 5845),
77	CHAN5G(173, 5865),
78	CHAN5G(177, 5885),
79};
80
81static const struct ieee80211_channel mt76_channels_6ghz[] = {
82	/* UNII-5 */
83	CHAN6G(1, 5955),
84	CHAN6G(5, 5975),
85	CHAN6G(9, 5995),
86	CHAN6G(13, 6015),
87	CHAN6G(17, 6035),
88	CHAN6G(21, 6055),
89	CHAN6G(25, 6075),
90	CHAN6G(29, 6095),
91	CHAN6G(33, 6115),
92	CHAN6G(37, 6135),
93	CHAN6G(41, 6155),
94	CHAN6G(45, 6175),
95	CHAN6G(49, 6195),
96	CHAN6G(53, 6215),
97	CHAN6G(57, 6235),
98	CHAN6G(61, 6255),
99	CHAN6G(65, 6275),
100	CHAN6G(69, 6295),
101	CHAN6G(73, 6315),
102	CHAN6G(77, 6335),
103	CHAN6G(81, 6355),
104	CHAN6G(85, 6375),
105	CHAN6G(89, 6395),
106	CHAN6G(93, 6415),
107	/* UNII-6 */
108	CHAN6G(97, 6435),
109	CHAN6G(101, 6455),
110	CHAN6G(105, 6475),
111	CHAN6G(109, 6495),
112	CHAN6G(113, 6515),
113	CHAN6G(117, 6535),
114	/* UNII-7 */
115	CHAN6G(121, 6555),
116	CHAN6G(125, 6575),
117	CHAN6G(129, 6595),
118	CHAN6G(133, 6615),
119	CHAN6G(137, 6635),
120	CHAN6G(141, 6655),
121	CHAN6G(145, 6675),
122	CHAN6G(149, 6695),
123	CHAN6G(153, 6715),
124	CHAN6G(157, 6735),
125	CHAN6G(161, 6755),
126	CHAN6G(165, 6775),
127	CHAN6G(169, 6795),
128	CHAN6G(173, 6815),
129	CHAN6G(177, 6835),
130	CHAN6G(181, 6855),
131	CHAN6G(185, 6875),
132	/* UNII-8 */
133	CHAN6G(189, 6895),
134	CHAN6G(193, 6915),
135	CHAN6G(197, 6935),
136	CHAN6G(201, 6955),
137	CHAN6G(205, 6975),
138	CHAN6G(209, 6995),
139	CHAN6G(213, 7015),
140	CHAN6G(217, 7035),
141	CHAN6G(221, 7055),
142	CHAN6G(225, 7075),
143	CHAN6G(229, 7095),
144	CHAN6G(233, 7115),
145};
146
147static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148	{ .throughput =   0 * 1024, .blink_time = 334 },
149	{ .throughput =   1 * 1024, .blink_time = 260 },
150	{ .throughput =   5 * 1024, .blink_time = 220 },
151	{ .throughput =  10 * 1024, .blink_time = 190 },
152	{ .throughput =  20 * 1024, .blink_time = 170 },
153	{ .throughput =  50 * 1024, .blink_time = 150 },
154	{ .throughput =  70 * 1024, .blink_time = 130 },
155	{ .throughput = 100 * 1024, .blink_time = 110 },
156	{ .throughput = 200 * 1024, .blink_time =  80 },
157	{ .throughput = 300 * 1024, .blink_time =  50 },
158};
159
160struct ieee80211_rate mt76_rates[] = {
161	CCK_RATE(0, 10),
162	CCK_RATE(1, 20),
163	CCK_RATE(2, 55),
164	CCK_RATE(3, 110),
165	OFDM_RATE(11, 60),
166	OFDM_RATE(15, 90),
167	OFDM_RATE(10, 120),
168	OFDM_RATE(14, 180),
169	OFDM_RATE(9,  240),
170	OFDM_RATE(13, 360),
171	OFDM_RATE(8,  480),
172	OFDM_RATE(12, 540),
173};
174EXPORT_SYMBOL_GPL(mt76_rates);
175
176static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177	{ .start_freq = 2402, .end_freq = 2494, },
178	{ .start_freq = 5150, .end_freq = 5350, },
179	{ .start_freq = 5350, .end_freq = 5470, },
180	{ .start_freq = 5470, .end_freq = 5725, },
181	{ .start_freq = 5725, .end_freq = 5950, },
182	{ .start_freq = 5945, .end_freq = 6165, },
183	{ .start_freq = 6165, .end_freq = 6405, },
184	{ .start_freq = 6405, .end_freq = 6525, },
185	{ .start_freq = 6525, .end_freq = 6705, },
186	{ .start_freq = 6705, .end_freq = 6865, },
187	{ .start_freq = 6865, .end_freq = 7125, },
188};
189
190static const struct cfg80211_sar_capa mt76_sar_capa = {
191	.type = NL80211_SAR_TYPE_POWER,
192	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193	.freq_ranges = &mt76_sar_freq_ranges[0],
194};
195
196static int mt76_led_init(struct mt76_phy *phy)
197{
198	struct mt76_dev *dev = phy->dev;
199	struct ieee80211_hw *hw = phy->hw;
200	struct device_node *np = dev->dev->of_node;
201
202	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203		return 0;
204
205	np = of_get_child_by_name(np, "led");
206	if (np) {
207		if (!of_device_is_available(np)) {
208			of_node_put(np);
209			dev_info(dev->dev,
210				"led registration was explicitly disabled by dts\n");
211			return 0;
212		}
213
214		if (phy == &dev->phy) {
215			int led_pin;
216
217			if (!of_property_read_u32(np, "led-sources", &led_pin))
218				phy->leds.pin = led_pin;
219
220			phy->leds.al =
221				of_property_read_bool(np, "led-active-low");
222		}
223
224		of_node_put(np);
225	}
226
227	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228		 wiphy_name(hw->wiphy));
229
230	phy->leds.cdev.name = phy->leds.name;
231	phy->leds.cdev.default_trigger =
232		ieee80211_create_tpt_led_trigger(hw,
233					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234					mt76_tpt_blink,
235					ARRAY_SIZE(mt76_tpt_blink));
236
237	dev_info(dev->dev,
238		"registering led '%s'\n", phy->leds.name);
239
240	return led_classdev_register(dev->dev, &phy->leds.cdev);
241}
242
243static void mt76_led_cleanup(struct mt76_phy *phy)
244{
245	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246		return;
247
248	led_classdev_unregister(&phy->leds.cdev);
249}
250
251static void mt76_init_stream_cap(struct mt76_phy *phy,
252				 struct ieee80211_supported_band *sband,
253				 bool vht)
254{
255	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256	int i, nstream = hweight8(phy->antenna_mask);
257	struct ieee80211_sta_vht_cap *vht_cap;
258	u16 mcs_map = 0;
259
260	if (nstream > 1)
261		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262	else
263		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264
265	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267
268	if (!vht)
269		return;
270
271	vht_cap = &sband->vht_cap;
272	if (nstream > 1)
273		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274	else
275		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278
279	for (i = 0; i < 8; i++) {
280		if (i < nstream)
281			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282		else
283			mcs_map |=
284				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285	}
286	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289		vht_cap->vht_mcs.tx_highest |=
290				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291}
292
293void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294{
295	if (phy->cap.has_2ghz)
296		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297	if (phy->cap.has_5ghz)
298		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299	if (phy->cap.has_6ghz)
300		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301}
302EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303
304static int
305mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306		const struct ieee80211_channel *chan, int n_chan,
307		struct ieee80211_rate *rates, int n_rates,
308		bool ht, bool vht)
309{
310	struct ieee80211_supported_band *sband = &msband->sband;
311	struct ieee80211_sta_vht_cap *vht_cap;
312	struct ieee80211_sta_ht_cap *ht_cap;
313	struct mt76_dev *dev = phy->dev;
314	void *chanlist;
315	int size;
316
317	size = n_chan * sizeof(*chan);
318	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319	if (!chanlist)
320		return -ENOMEM;
321
322	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323				    GFP_KERNEL);
324	if (!msband->chan)
325		return -ENOMEM;
326
327	sband->channels = chanlist;
328	sband->n_channels = n_chan;
329	sband->bitrates = rates;
330	sband->n_bitrates = n_rates;
331
332	if (!ht)
333		return 0;
334
335	ht_cap = &sband->ht_cap;
336	ht_cap->ht_supported = true;
337	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338		       IEEE80211_HT_CAP_GRN_FLD |
339		       IEEE80211_HT_CAP_SGI_20 |
340		       IEEE80211_HT_CAP_SGI_40 |
341		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342
343	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345
346	mt76_init_stream_cap(phy, sband, vht);
347
348	if (!vht)
349		return 0;
350
351	vht_cap = &sband->vht_cap;
352	vht_cap->vht_supported = true;
353	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354			IEEE80211_VHT_CAP_RXSTBC_1 |
355			IEEE80211_VHT_CAP_SHORT_GI_80 |
356			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357
358	return 0;
359}
360
361static int
362mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363		   int n_rates)
364{
365	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366
367	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369			       n_rates, true, false);
370}
371
372static int
373mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374		   int n_rates, bool vht)
375{
376	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377
378	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380			       n_rates, true, vht);
381}
382
383static int
384mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385		   int n_rates)
386{
387	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388
389	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391			       n_rates, false, false);
392}
393
394static void
395mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396		 enum nl80211_band band)
397{
398	struct ieee80211_supported_band *sband = &msband->sband;
399	bool found = false;
400	int i;
401
402	if (!sband)
403		return;
404
405	for (i = 0; i < sband->n_channels; i++) {
406		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407			continue;
408
409		found = true;
410		break;
411	}
412
413	if (found) {
414		phy->chandef.chan = &sband->channels[0];
415		phy->chan_state = &msband->chan[0];
416		return;
417	}
418
419	sband->n_channels = 0;
420	phy->hw->wiphy->bands[band] = NULL;
421}
422
423static int
424mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
425{
426	struct mt76_dev *dev = phy->dev;
427	struct wiphy *wiphy = hw->wiphy;
428
429	INIT_LIST_HEAD(&phy->tx_list);
430	spin_lock_init(&phy->tx_lock);
431
432	SET_IEEE80211_DEV(hw, dev->dev);
433	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
434
435	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
436			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
437	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
438			WIPHY_FLAG_SUPPORTS_TDLS |
439			WIPHY_FLAG_AP_UAPSD;
440
441	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
442	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
443	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
444
445	wiphy->available_antennas_tx = phy->antenna_mask;
446	wiphy->available_antennas_rx = phy->antenna_mask;
447
448	wiphy->sar_capa = &mt76_sar_capa;
449	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
450				sizeof(struct mt76_freq_range_power),
451				GFP_KERNEL);
452	if (!phy->frp)
453		return -ENOMEM;
454
455	hw->txq_data_size = sizeof(struct mt76_txq);
456	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
457
458	if (!hw->max_tx_fragments)
459		hw->max_tx_fragments = 16;
460
461	ieee80211_hw_set(hw, SIGNAL_DBM);
462	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
463	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
464	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
465	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
466	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
467	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
468
469	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
470	    hw->max_tx_fragments > 1) {
471		ieee80211_hw_set(hw, TX_AMSDU);
472		ieee80211_hw_set(hw, TX_FRAG_LIST);
473	}
474
475	ieee80211_hw_set(hw, MFP_CAPABLE);
476	ieee80211_hw_set(hw, AP_LINK_PS);
477	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
478
479	return 0;
480}
481
482struct mt76_phy *
483mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
484	       const struct ieee80211_ops *ops, u8 band_idx)
485{
486	struct ieee80211_hw *hw;
487	unsigned int phy_size;
488	struct mt76_phy *phy;
489
490	phy_size = ALIGN(sizeof(*phy), 8);
491	hw = ieee80211_alloc_hw(size + phy_size, ops);
492	if (!hw)
493		return NULL;
494
495	phy = hw->priv;
496	phy->dev = dev;
497	phy->hw = hw;
498	phy->priv = hw->priv + phy_size;
499	phy->band_idx = band_idx;
500
501	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
502	hw->wiphy->interface_modes =
503		BIT(NL80211_IFTYPE_STATION) |
504		BIT(NL80211_IFTYPE_AP) |
505#ifdef CONFIG_MAC80211_MESH
506		BIT(NL80211_IFTYPE_MESH_POINT) |
507#endif
508		BIT(NL80211_IFTYPE_P2P_CLIENT) |
509		BIT(NL80211_IFTYPE_P2P_GO) |
510		BIT(NL80211_IFTYPE_ADHOC);
511
512	return phy;
513}
514EXPORT_SYMBOL_GPL(mt76_alloc_phy);
515
516int mt76_register_phy(struct mt76_phy *phy, bool vht,
517		      struct ieee80211_rate *rates, int n_rates)
518{
519	int ret;
520
521	ret = mt76_phy_init(phy, phy->hw);
522	if (ret)
523		return ret;
524
525	if (phy->cap.has_2ghz) {
526		ret = mt76_init_sband_2g(phy, rates, n_rates);
527		if (ret)
528			return ret;
529	}
530
531	if (phy->cap.has_5ghz) {
532		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
533		if (ret)
534			return ret;
535	}
536
537	if (phy->cap.has_6ghz) {
538		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
539		if (ret)
540			return ret;
541	}
542
543	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
544		ret = mt76_led_init(phy);
545		if (ret)
546			return ret;
547	}
548
549	wiphy_read_of_freq_limits(phy->hw->wiphy);
550	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
551	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
552	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
553
554	ret = ieee80211_register_hw(phy->hw);
555	if (ret)
556		return ret;
557
558	set_bit(MT76_STATE_REGISTERED, &phy->state);
559	phy->dev->phys[phy->band_idx] = phy;
560
561	return 0;
562}
563EXPORT_SYMBOL_GPL(mt76_register_phy);
564
565void mt76_unregister_phy(struct mt76_phy *phy)
566{
567	struct mt76_dev *dev = phy->dev;
568
569	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
570		return;
571
572	if (IS_ENABLED(CONFIG_MT76_LEDS))
573		mt76_led_cleanup(phy);
574	mt76_tx_status_check(dev, true);
575	ieee80211_unregister_hw(phy->hw);
576	dev->phys[phy->band_idx] = NULL;
577}
578EXPORT_SYMBOL_GPL(mt76_unregister_phy);
579
580int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
581{
582	bool is_qrx = mt76_queue_is_rx(dev, q);
583	struct page_pool_params pp_params = {
584		.order = 0,
585		.flags = 0,
586		.nid = NUMA_NO_NODE,
587		.dev = dev->dma_dev,
588	};
589	int idx = is_qrx ? q - dev->q_rx : -1;
590
591	/* Allocate page_pools just for rx/wed_tx_free queues */
592	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
593		return 0;
594
595	switch (idx) {
596	case MT_RXQ_MAIN:
597	case MT_RXQ_BAND1:
598	case MT_RXQ_BAND2:
599		pp_params.pool_size = 256;
600		break;
601	default:
602		pp_params.pool_size = 16;
603		break;
604	}
605
606	if (mt76_is_mmio(dev)) {
607		/* rely on page_pool for DMA mapping */
608		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
609		pp_params.dma_dir = DMA_FROM_DEVICE;
610		pp_params.max_len = PAGE_SIZE;
611		pp_params.offset = 0;
612		/* NAPI is available just for rx queues */
613		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
614			pp_params.napi = &dev->napi[idx];
615	}
616
617	q->page_pool = page_pool_create(&pp_params);
618	if (IS_ERR(q->page_pool)) {
619		int err = PTR_ERR(q->page_pool);
620
621		q->page_pool = NULL;
622		return err;
623	}
624
625	return 0;
626}
627EXPORT_SYMBOL_GPL(mt76_create_page_pool);
628
629struct mt76_dev *
630mt76_alloc_device(struct device *pdev, unsigned int size,
631		  const struct ieee80211_ops *ops,
632		  const struct mt76_driver_ops *drv_ops)
633{
634	struct ieee80211_hw *hw;
635	struct mt76_phy *phy;
636	struct mt76_dev *dev;
637	int i;
638
639	hw = ieee80211_alloc_hw(size, ops);
640	if (!hw)
641		return NULL;
642
643	dev = hw->priv;
644	dev->hw = hw;
645	dev->dev = pdev;
646	dev->drv = drv_ops;
647	dev->dma_dev = pdev;
648
649	phy = &dev->phy;
650	phy->dev = dev;
651	phy->hw = hw;
652	phy->band_idx = MT_BAND0;
653	dev->phys[phy->band_idx] = phy;
654
655	spin_lock_init(&dev->rx_lock);
656	spin_lock_init(&dev->lock);
657	spin_lock_init(&dev->cc_lock);
658	spin_lock_init(&dev->status_lock);
659	spin_lock_init(&dev->wed_lock);
660	mutex_init(&dev->mutex);
661	init_waitqueue_head(&dev->tx_wait);
662
663	skb_queue_head_init(&dev->mcu.res_q);
664	init_waitqueue_head(&dev->mcu.wait);
665	mutex_init(&dev->mcu.mutex);
666	dev->tx_worker.fn = mt76_tx_worker;
667
668	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
669	hw->wiphy->interface_modes =
670		BIT(NL80211_IFTYPE_STATION) |
671		BIT(NL80211_IFTYPE_AP) |
672#ifdef CONFIG_MAC80211_MESH
673		BIT(NL80211_IFTYPE_MESH_POINT) |
674#endif
675		BIT(NL80211_IFTYPE_P2P_CLIENT) |
676		BIT(NL80211_IFTYPE_P2P_GO) |
677		BIT(NL80211_IFTYPE_ADHOC);
678
679	spin_lock_init(&dev->token_lock);
680	idr_init(&dev->token);
681
682	spin_lock_init(&dev->rx_token_lock);
683	idr_init(&dev->rx_token);
684
685	INIT_LIST_HEAD(&dev->wcid_list);
686	INIT_LIST_HEAD(&dev->sta_poll_list);
687	spin_lock_init(&dev->sta_poll_lock);
688
689	INIT_LIST_HEAD(&dev->txwi_cache);
690	INIT_LIST_HEAD(&dev->rxwi_cache);
691	dev->token_size = dev->drv->token_size;
692
693	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
694		skb_queue_head_init(&dev->rx_skb[i]);
695
696	dev->wq = alloc_ordered_workqueue("mt76", 0);
697	if (!dev->wq) {
698		ieee80211_free_hw(hw);
699		return NULL;
700	}
701
702	return dev;
703}
704EXPORT_SYMBOL_GPL(mt76_alloc_device);
705
706int mt76_register_device(struct mt76_dev *dev, bool vht,
707			 struct ieee80211_rate *rates, int n_rates)
708{
709	struct ieee80211_hw *hw = dev->hw;
710	struct mt76_phy *phy = &dev->phy;
711	int ret;
712
713	dev_set_drvdata(dev->dev, dev);
714	mt76_wcid_init(&dev->global_wcid);
715	ret = mt76_phy_init(phy, hw);
716	if (ret)
717		return ret;
718
719	if (phy->cap.has_2ghz) {
720		ret = mt76_init_sband_2g(phy, rates, n_rates);
721		if (ret)
722			return ret;
723	}
724
725	if (phy->cap.has_5ghz) {
726		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
727		if (ret)
728			return ret;
729	}
730
731	if (phy->cap.has_6ghz) {
732		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
733		if (ret)
734			return ret;
735	}
736
737	wiphy_read_of_freq_limits(hw->wiphy);
738	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
739	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
740	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
741
742	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
743		ret = mt76_led_init(phy);
744		if (ret)
745			return ret;
746	}
747
748	ret = ieee80211_register_hw(hw);
749	if (ret)
750		return ret;
751
752	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
753	set_bit(MT76_STATE_REGISTERED, &phy->state);
754	sched_set_fifo_low(dev->tx_worker.task);
755
756	return 0;
757}
758EXPORT_SYMBOL_GPL(mt76_register_device);
759
760void mt76_unregister_device(struct mt76_dev *dev)
761{
762	struct ieee80211_hw *hw = dev->hw;
763
764	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
765		return;
766
767	if (IS_ENABLED(CONFIG_MT76_LEDS))
768		mt76_led_cleanup(&dev->phy);
769	mt76_tx_status_check(dev, true);
770	mt76_wcid_cleanup(dev, &dev->global_wcid);
771	ieee80211_unregister_hw(hw);
772}
773EXPORT_SYMBOL_GPL(mt76_unregister_device);
774
775void mt76_free_device(struct mt76_dev *dev)
776{
777	mt76_worker_teardown(&dev->tx_worker);
778	if (dev->wq) {
779		destroy_workqueue(dev->wq);
780		dev->wq = NULL;
781	}
782	ieee80211_free_hw(dev->hw);
783}
784EXPORT_SYMBOL_GPL(mt76_free_device);
785
786static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
787{
788	struct sk_buff *skb = phy->rx_amsdu[q].head;
789	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
790	struct mt76_dev *dev = phy->dev;
791
792	phy->rx_amsdu[q].head = NULL;
793	phy->rx_amsdu[q].tail = NULL;
794
795	/*
796	 * Validate if the amsdu has a proper first subframe.
797	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
798	 * flag of the QoS header gets flipped. In such cases, the first
799	 * subframe has a LLC/SNAP header in the location of the destination
800	 * address.
801	 */
802	if (skb_shinfo(skb)->frag_list) {
803		int offset = 0;
804
805		if (!(status->flag & RX_FLAG_8023)) {
806			offset = ieee80211_get_hdrlen_from_skb(skb);
807
808			if ((status->flag &
809			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
810			    RX_FLAG_DECRYPTED)
811				offset += 8;
812		}
813
814		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
815			dev_kfree_skb(skb);
816			return;
817		}
818	}
819	__skb_queue_tail(&dev->rx_skb[q], skb);
820}
821
822static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
823				  struct sk_buff *skb)
824{
825	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
826
827	if (phy->rx_amsdu[q].head &&
828	    (!status->amsdu || status->first_amsdu ||
829	     status->seqno != phy->rx_amsdu[q].seqno))
830		mt76_rx_release_amsdu(phy, q);
831
832	if (!phy->rx_amsdu[q].head) {
833		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
834		phy->rx_amsdu[q].seqno = status->seqno;
835		phy->rx_amsdu[q].head = skb;
836	} else {
837		*phy->rx_amsdu[q].tail = skb;
838		phy->rx_amsdu[q].tail = &skb->next;
839	}
840
841	if (!status->amsdu || status->last_amsdu)
842		mt76_rx_release_amsdu(phy, q);
843}
844
845void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
846{
847	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
848	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
849
850	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
851		dev_kfree_skb(skb);
852		return;
853	}
854
855#ifdef CONFIG_NL80211_TESTMODE
856	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
857		phy->test.rx_stats.packets[q]++;
858		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
859			phy->test.rx_stats.fcs_error[q]++;
860	}
861#endif
862
863	mt76_rx_release_burst(phy, q, skb);
864}
865EXPORT_SYMBOL_GPL(mt76_rx);
866
867bool mt76_has_tx_pending(struct mt76_phy *phy)
868{
869	struct mt76_queue *q;
870	int i;
871
872	for (i = 0; i < __MT_TXQ_MAX; i++) {
873		q = phy->q_tx[i];
874		if (q && q->queued)
875			return true;
876	}
877
878	return false;
879}
880EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
881
882static struct mt76_channel_state *
883mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
884{
885	struct mt76_sband *msband;
886	int idx;
887
888	if (c->band == NL80211_BAND_2GHZ)
889		msband = &phy->sband_2g;
890	else if (c->band == NL80211_BAND_6GHZ)
891		msband = &phy->sband_6g;
892	else
893		msband = &phy->sband_5g;
894
895	idx = c - &msband->sband.channels[0];
896	return &msband->chan[idx];
897}
898
899void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
900{
901	struct mt76_channel_state *state = phy->chan_state;
902
903	state->cc_active += ktime_to_us(ktime_sub(time,
904						  phy->survey_time));
905	phy->survey_time = time;
906}
907EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
908
909void mt76_update_survey(struct mt76_phy *phy)
910{
911	struct mt76_dev *dev = phy->dev;
912	ktime_t cur_time;
913
914	if (dev->drv->update_survey)
915		dev->drv->update_survey(phy);
916
917	cur_time = ktime_get_boottime();
918	mt76_update_survey_active_time(phy, cur_time);
919
920	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
921		struct mt76_channel_state *state = phy->chan_state;
922
923		spin_lock_bh(&dev->cc_lock);
924		state->cc_bss_rx += dev->cur_cc_bss_rx;
925		dev->cur_cc_bss_rx = 0;
926		spin_unlock_bh(&dev->cc_lock);
927	}
928}
929EXPORT_SYMBOL_GPL(mt76_update_survey);
930
931void mt76_set_channel(struct mt76_phy *phy)
932{
933	struct mt76_dev *dev = phy->dev;
934	struct ieee80211_hw *hw = phy->hw;
935	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
936	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
937	int timeout = HZ / 5;
938
939	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
940	mt76_update_survey(phy);
941
942	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
943	    phy->chandef.width != chandef->width)
944		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
945
946	phy->chandef = *chandef;
947	phy->chan_state = mt76_channel_state(phy, chandef->chan);
948
949	if (!offchannel)
950		phy->main_chan = chandef->chan;
951
952	if (chandef->chan != phy->main_chan)
953		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
954}
955EXPORT_SYMBOL_GPL(mt76_set_channel);
956
957int mt76_get_survey(struct ieee80211_hw *hw, int idx,
958		    struct survey_info *survey)
959{
960	struct mt76_phy *phy = hw->priv;
961	struct mt76_dev *dev = phy->dev;
962	struct mt76_sband *sband;
963	struct ieee80211_channel *chan;
964	struct mt76_channel_state *state;
965	int ret = 0;
966
967	mutex_lock(&dev->mutex);
968	if (idx == 0 && dev->drv->update_survey)
969		mt76_update_survey(phy);
970
971	if (idx >= phy->sband_2g.sband.n_channels +
972		   phy->sband_5g.sband.n_channels) {
973		idx -= (phy->sband_2g.sband.n_channels +
974			phy->sband_5g.sband.n_channels);
975		sband = &phy->sband_6g;
976	} else if (idx >= phy->sband_2g.sband.n_channels) {
977		idx -= phy->sband_2g.sband.n_channels;
978		sband = &phy->sband_5g;
979	} else {
980		sband = &phy->sband_2g;
981	}
982
983	if (idx >= sband->sband.n_channels) {
984		ret = -ENOENT;
985		goto out;
986	}
987
988	chan = &sband->sband.channels[idx];
989	state = mt76_channel_state(phy, chan);
990
991	memset(survey, 0, sizeof(*survey));
992	survey->channel = chan;
993	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
994	survey->filled |= dev->drv->survey_flags;
995	if (state->noise)
996		survey->filled |= SURVEY_INFO_NOISE_DBM;
997
998	if (chan == phy->main_chan) {
999		survey->filled |= SURVEY_INFO_IN_USE;
1000
1001		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1002			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1003	}
1004
1005	survey->time_busy = div_u64(state->cc_busy, 1000);
1006	survey->time_rx = div_u64(state->cc_rx, 1000);
1007	survey->time = div_u64(state->cc_active, 1000);
1008	survey->noise = state->noise;
1009
1010	spin_lock_bh(&dev->cc_lock);
1011	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1012	survey->time_tx = div_u64(state->cc_tx, 1000);
1013	spin_unlock_bh(&dev->cc_lock);
1014
1015out:
1016	mutex_unlock(&dev->mutex);
1017
1018	return ret;
1019}
1020EXPORT_SYMBOL_GPL(mt76_get_survey);
1021
1022void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1023			 struct ieee80211_key_conf *key)
1024{
1025	struct ieee80211_key_seq seq;
1026	int i;
1027
1028	wcid->rx_check_pn = false;
1029
1030	if (!key)
1031		return;
1032
1033	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1034		return;
1035
1036	wcid->rx_check_pn = true;
1037
1038	/* data frame */
1039	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1040		ieee80211_get_key_rx_seq(key, i, &seq);
1041		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1042	}
1043
1044	/* robust management frame */
1045	ieee80211_get_key_rx_seq(key, -1, &seq);
1046	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1047
1048}
1049EXPORT_SYMBOL(mt76_wcid_key_setup);
1050
1051int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1052{
1053	int signal = -128;
1054	u8 chains;
1055
1056	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1057		int cur, diff;
1058
1059		cur = *chain_signal;
1060		if (!(chains & BIT(0)) ||
1061		    cur > 0)
1062			continue;
1063
1064		if (cur > signal)
1065			swap(cur, signal);
1066
1067		diff = signal - cur;
1068		if (diff == 0)
1069			signal += 3;
1070		else if (diff <= 2)
1071			signal += 2;
1072		else if (diff <= 6)
1073			signal += 1;
1074	}
1075
1076	return signal;
1077}
1078EXPORT_SYMBOL(mt76_rx_signal);
1079
1080static void
1081mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1082		struct ieee80211_hw **hw,
1083		struct ieee80211_sta **sta)
1084{
1085	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1086	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1087	struct mt76_rx_status mstat;
1088
1089	mstat = *((struct mt76_rx_status *)skb->cb);
1090	memset(status, 0, sizeof(*status));
1091
1092	status->flag = mstat.flag;
1093	status->freq = mstat.freq;
1094	status->enc_flags = mstat.enc_flags;
1095	status->encoding = mstat.encoding;
1096	status->bw = mstat.bw;
1097	if (status->encoding == RX_ENC_EHT) {
1098		status->eht.ru = mstat.eht.ru;
1099		status->eht.gi = mstat.eht.gi;
1100	} else {
1101		status->he_ru = mstat.he_ru;
1102		status->he_gi = mstat.he_gi;
1103		status->he_dcm = mstat.he_dcm;
1104	}
1105	status->rate_idx = mstat.rate_idx;
1106	status->nss = mstat.nss;
1107	status->band = mstat.band;
1108	status->signal = mstat.signal;
1109	status->chains = mstat.chains;
1110	status->ampdu_reference = mstat.ampdu_ref;
1111	status->device_timestamp = mstat.timestamp;
1112	status->mactime = mstat.timestamp;
1113	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1114	if (status->signal <= -128)
1115		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1116
1117	if (ieee80211_is_beacon(hdr->frame_control) ||
1118	    ieee80211_is_probe_resp(hdr->frame_control))
1119		status->boottime_ns = ktime_get_boottime_ns();
1120
1121	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1122	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1123		     sizeof(mstat.chain_signal));
1124	memcpy(status->chain_signal, mstat.chain_signal,
1125	       sizeof(mstat.chain_signal));
1126
1127	*sta = wcid_to_sta(mstat.wcid);
1128	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1129}
1130
1131static void
1132mt76_check_ccmp_pn(struct sk_buff *skb)
1133{
1134	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1135	struct mt76_wcid *wcid = status->wcid;
1136	struct ieee80211_hdr *hdr;
1137	int security_idx;
1138	int ret;
1139
1140	if (!(status->flag & RX_FLAG_DECRYPTED))
1141		return;
1142
1143	if (status->flag & RX_FLAG_ONLY_MONITOR)
1144		return;
1145
1146	if (!wcid || !wcid->rx_check_pn)
1147		return;
1148
1149	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1150	if (status->flag & RX_FLAG_8023)
1151		goto skip_hdr_check;
1152
1153	hdr = mt76_skb_get_hdr(skb);
1154	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1155		/*
1156		 * Validate the first fragment both here and in mac80211
1157		 * All further fragments will be validated by mac80211 only.
1158		 */
1159		if (ieee80211_is_frag(hdr) &&
1160		    !ieee80211_is_first_frag(hdr->frame_control))
1161			return;
1162	}
1163
1164	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1165	 *
1166	 * the recipient shall maintain a single replay counter for received
1167	 * individually addressed robust Management frames that are received
1168	 * with the To DS subfield equal to 0, [...]
1169	 */
1170	if (ieee80211_is_mgmt(hdr->frame_control) &&
1171	    !ieee80211_has_tods(hdr->frame_control))
1172		security_idx = IEEE80211_NUM_TIDS;
1173
1174skip_hdr_check:
1175	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1176	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1177		     sizeof(status->iv));
1178	if (ret <= 0) {
1179		status->flag |= RX_FLAG_ONLY_MONITOR;
1180		return;
1181	}
1182
1183	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1184
1185	if (status->flag & RX_FLAG_IV_STRIPPED)
1186		status->flag |= RX_FLAG_PN_VALIDATED;
1187}
1188
1189static void
1190mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1191		    int len)
1192{
1193	struct mt76_wcid *wcid = status->wcid;
1194	struct ieee80211_rx_status info = {
1195		.enc_flags = status->enc_flags,
1196		.rate_idx = status->rate_idx,
1197		.encoding = status->encoding,
1198		.band = status->band,
1199		.nss = status->nss,
1200		.bw = status->bw,
1201	};
1202	struct ieee80211_sta *sta;
1203	u32 airtime;
1204	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1205
1206	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1207	spin_lock(&dev->cc_lock);
1208	dev->cur_cc_bss_rx += airtime;
1209	spin_unlock(&dev->cc_lock);
1210
1211	if (!wcid || !wcid->sta)
1212		return;
1213
1214	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1215	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1216}
1217
1218static void
1219mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1220{
1221	struct mt76_wcid *wcid;
1222	int wcid_idx;
1223
1224	if (!dev->rx_ampdu_len)
1225		return;
1226
1227	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1228	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1229		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1230	else
1231		wcid = NULL;
1232	dev->rx_ampdu_status.wcid = wcid;
1233
1234	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1235
1236	dev->rx_ampdu_len = 0;
1237	dev->rx_ampdu_ref = 0;
1238}
1239
1240static void
1241mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1242{
1243	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1244	struct mt76_wcid *wcid = status->wcid;
1245
1246	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1247		return;
1248
1249	if (!wcid || !wcid->sta) {
1250		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1251
1252		if (status->flag & RX_FLAG_8023)
1253			return;
1254
1255		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1256			return;
1257
1258		wcid = NULL;
1259	}
1260
1261	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1262	    status->ampdu_ref != dev->rx_ampdu_ref)
1263		mt76_airtime_flush_ampdu(dev);
1264
1265	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1266		if (!dev->rx_ampdu_len ||
1267		    status->ampdu_ref != dev->rx_ampdu_ref) {
1268			dev->rx_ampdu_status = *status;
1269			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1270			dev->rx_ampdu_ref = status->ampdu_ref;
1271		}
1272
1273		dev->rx_ampdu_len += skb->len;
1274		return;
1275	}
1276
1277	mt76_airtime_report(dev, status, skb->len);
1278}
1279
1280static void
1281mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1282{
1283	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1284	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1285	struct ieee80211_sta *sta;
1286	struct ieee80211_hw *hw;
1287	struct mt76_wcid *wcid = status->wcid;
1288	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1289	bool ps;
1290
1291	hw = mt76_phy_hw(dev, status->phy_idx);
1292	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1293	    !(status->flag & RX_FLAG_8023)) {
1294		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1295		if (sta)
1296			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1297	}
1298
1299	mt76_airtime_check(dev, skb);
1300
1301	if (!wcid || !wcid->sta)
1302		return;
1303
1304	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1305
1306	if (status->signal <= 0)
1307		ewma_signal_add(&wcid->rssi, -status->signal);
1308
1309	wcid->inactive_count = 0;
1310
1311	if (status->flag & RX_FLAG_8023)
1312		return;
1313
1314	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1315		return;
1316
1317	if (ieee80211_is_pspoll(hdr->frame_control)) {
1318		ieee80211_sta_pspoll(sta);
1319		return;
1320	}
1321
1322	if (ieee80211_has_morefrags(hdr->frame_control) ||
1323	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1324	      ieee80211_is_data(hdr->frame_control)))
1325		return;
1326
1327	ps = ieee80211_has_pm(hdr->frame_control);
1328
1329	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1330		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1331		ieee80211_sta_uapsd_trigger(sta, tidno);
1332
1333	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1334		return;
1335
1336	if (ps)
1337		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1338
1339	if (dev->drv->sta_ps)
1340		dev->drv->sta_ps(dev, sta, ps);
1341
1342	if (!ps)
1343		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1344
1345	ieee80211_sta_ps_transition(sta, ps);
1346}
1347
1348void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1349		      struct napi_struct *napi)
1350{
1351	struct ieee80211_sta *sta;
1352	struct ieee80211_hw *hw;
1353	struct sk_buff *skb, *tmp;
1354	LIST_HEAD(list);
1355
1356	spin_lock(&dev->rx_lock);
1357	while ((skb = __skb_dequeue(frames)) != NULL) {
1358		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1359
1360		mt76_check_ccmp_pn(skb);
1361		skb_shinfo(skb)->frag_list = NULL;
1362		mt76_rx_convert(dev, skb, &hw, &sta);
1363		ieee80211_rx_list(hw, sta, skb, &list);
1364
1365		/* subsequent amsdu frames */
1366		while (nskb) {
1367			skb = nskb;
1368			nskb = nskb->next;
1369			skb->next = NULL;
1370
1371			mt76_rx_convert(dev, skb, &hw, &sta);
1372			ieee80211_rx_list(hw, sta, skb, &list);
1373		}
1374	}
1375	spin_unlock(&dev->rx_lock);
1376
1377	if (!napi) {
1378		netif_receive_skb_list(&list);
1379		return;
1380	}
1381
1382	list_for_each_entry_safe(skb, tmp, &list, list) {
1383		skb_list_del_init(skb);
1384		napi_gro_receive(napi, skb);
1385	}
1386}
1387
1388void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1389			   struct napi_struct *napi)
1390{
1391	struct sk_buff_head frames;
1392	struct sk_buff *skb;
1393
1394	__skb_queue_head_init(&frames);
1395
1396	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1397		mt76_check_sta(dev, skb);
1398		if (mtk_wed_device_active(&dev->mmio.wed))
1399			__skb_queue_tail(&frames, skb);
1400		else
1401			mt76_rx_aggr_reorder(skb, &frames);
1402	}
1403
1404	mt76_rx_complete(dev, &frames, napi);
1405}
1406EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1407
1408static int
1409mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1410	     struct ieee80211_sta *sta)
1411{
1412	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1413	struct mt76_dev *dev = phy->dev;
1414	int ret;
1415	int i;
1416
1417	mutex_lock(&dev->mutex);
1418
1419	ret = dev->drv->sta_add(dev, vif, sta);
1420	if (ret)
1421		goto out;
1422
1423	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1424		struct mt76_txq *mtxq;
1425
1426		if (!sta->txq[i])
1427			continue;
1428
1429		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1430		mtxq->wcid = wcid->idx;
1431	}
1432
1433	ewma_signal_init(&wcid->rssi);
1434	if (phy->band_idx == MT_BAND1)
1435		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1436	wcid->phy_idx = phy->band_idx;
1437	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1438
1439	mt76_wcid_init(wcid);
1440out:
1441	mutex_unlock(&dev->mutex);
1442
1443	return ret;
1444}
1445
1446void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1447		       struct ieee80211_sta *sta)
1448{
1449	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1450	int i, idx = wcid->idx;
1451
1452	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1453		mt76_rx_aggr_stop(dev, wcid, i);
1454
1455	if (dev->drv->sta_remove)
1456		dev->drv->sta_remove(dev, vif, sta);
1457
1458	mt76_wcid_cleanup(dev, wcid);
1459
1460	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1461	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1462}
1463EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1464
1465static void
1466mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1467		struct ieee80211_sta *sta)
1468{
1469	mutex_lock(&dev->mutex);
1470	__mt76_sta_remove(dev, vif, sta);
1471	mutex_unlock(&dev->mutex);
1472}
1473
1474int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1475		   struct ieee80211_sta *sta,
1476		   enum ieee80211_sta_state old_state,
1477		   enum ieee80211_sta_state new_state)
1478{
1479	struct mt76_phy *phy = hw->priv;
1480	struct mt76_dev *dev = phy->dev;
1481
1482	if (old_state == IEEE80211_STA_NOTEXIST &&
1483	    new_state == IEEE80211_STA_NONE)
1484		return mt76_sta_add(phy, vif, sta);
1485
1486	if (old_state == IEEE80211_STA_AUTH &&
1487	    new_state == IEEE80211_STA_ASSOC &&
1488	    dev->drv->sta_assoc)
1489		dev->drv->sta_assoc(dev, vif, sta);
1490
1491	if (old_state == IEEE80211_STA_NONE &&
1492	    new_state == IEEE80211_STA_NOTEXIST)
1493		mt76_sta_remove(dev, vif, sta);
1494
1495	return 0;
1496}
1497EXPORT_SYMBOL_GPL(mt76_sta_state);
1498
1499void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1500			     struct ieee80211_sta *sta)
1501{
1502	struct mt76_phy *phy = hw->priv;
1503	struct mt76_dev *dev = phy->dev;
1504	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1505
1506	mutex_lock(&dev->mutex);
1507	spin_lock_bh(&dev->status_lock);
1508	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1509	spin_unlock_bh(&dev->status_lock);
1510	mutex_unlock(&dev->mutex);
1511}
1512EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1513
1514void mt76_wcid_init(struct mt76_wcid *wcid)
1515{
1516	INIT_LIST_HEAD(&wcid->tx_list);
1517	skb_queue_head_init(&wcid->tx_pending);
1518
1519	INIT_LIST_HEAD(&wcid->list);
1520	idr_init(&wcid->pktid);
1521}
1522EXPORT_SYMBOL_GPL(mt76_wcid_init);
1523
1524void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1525{
1526	struct mt76_phy *phy = dev->phys[wcid->phy_idx];
1527	struct ieee80211_hw *hw;
1528	struct sk_buff_head list;
1529	struct sk_buff *skb;
1530
1531	mt76_tx_status_lock(dev, &list);
1532	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1533	mt76_tx_status_unlock(dev, &list);
1534
1535	idr_destroy(&wcid->pktid);
1536
1537	spin_lock_bh(&phy->tx_lock);
1538
1539	if (!list_empty(&wcid->tx_list))
1540		list_del_init(&wcid->tx_list);
1541
1542	spin_lock(&wcid->tx_pending.lock);
1543	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1544	spin_unlock(&wcid->tx_pending.lock);
1545
1546	spin_unlock_bh(&phy->tx_lock);
1547
1548	while ((skb = __skb_dequeue(&list)) != NULL) {
1549		hw = mt76_tx_status_get_hw(dev, skb);
1550		ieee80211_free_txskb(hw, skb);
1551	}
1552}
1553EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1554
1555int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1556		     int *dbm)
1557{
1558	struct mt76_phy *phy = hw->priv;
1559	int n_chains = hweight16(phy->chainmask);
1560	int delta = mt76_tx_power_nss_delta(n_chains);
1561
1562	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1563
1564	return 0;
1565}
1566EXPORT_SYMBOL_GPL(mt76_get_txpower);
1567
1568int mt76_init_sar_power(struct ieee80211_hw *hw,
1569			const struct cfg80211_sar_specs *sar)
1570{
1571	struct mt76_phy *phy = hw->priv;
1572	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1573	int i;
1574
1575	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1576		return -EINVAL;
1577
1578	for (i = 0; i < sar->num_sub_specs; i++) {
1579		u32 index = sar->sub_specs[i].freq_range_index;
1580		/* SAR specifies power limitaton in 0.25dbm */
1581		s32 power = sar->sub_specs[i].power >> 1;
1582
1583		if (power > 127 || power < -127)
1584			power = 127;
1585
1586		phy->frp[index].range = &capa->freq_ranges[index];
1587		phy->frp[index].power = power;
1588	}
1589
1590	return 0;
1591}
1592EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1593
1594int mt76_get_sar_power(struct mt76_phy *phy,
1595		       struct ieee80211_channel *chan,
1596		       int power)
1597{
1598	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1599	int freq, i;
1600
1601	if (!capa || !phy->frp)
1602		return power;
1603
1604	if (power > 127 || power < -127)
1605		power = 127;
1606
1607	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1608	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1609		if (phy->frp[i].range &&
1610		    freq >= phy->frp[i].range->start_freq &&
1611		    freq < phy->frp[i].range->end_freq) {
1612			power = min_t(int, phy->frp[i].power, power);
1613			break;
1614		}
1615	}
1616
1617	return power;
1618}
1619EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1620
1621static void
1622__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1623{
1624	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1625		ieee80211_csa_finish(vif, 0);
1626}
1627
1628void mt76_csa_finish(struct mt76_dev *dev)
1629{
1630	if (!dev->csa_complete)
1631		return;
1632
1633	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1634		IEEE80211_IFACE_ITER_RESUME_ALL,
1635		__mt76_csa_finish, dev);
1636
1637	dev->csa_complete = 0;
1638}
1639EXPORT_SYMBOL_GPL(mt76_csa_finish);
1640
1641static void
1642__mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1643{
1644	struct mt76_dev *dev = priv;
1645
1646	if (!vif->bss_conf.csa_active)
1647		return;
1648
1649	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1650}
1651
1652void mt76_csa_check(struct mt76_dev *dev)
1653{
1654	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1655		IEEE80211_IFACE_ITER_RESUME_ALL,
1656		__mt76_csa_check, dev);
1657}
1658EXPORT_SYMBOL_GPL(mt76_csa_check);
1659
1660int
1661mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1662{
1663	return 0;
1664}
1665EXPORT_SYMBOL_GPL(mt76_set_tim);
1666
1667void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1668{
1669	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1670	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1671	u8 *hdr, *pn = status->iv;
1672
1673	__skb_push(skb, 8);
1674	memmove(skb->data, skb->data + 8, hdr_len);
1675	hdr = skb->data + hdr_len;
1676
1677	hdr[0] = pn[5];
1678	hdr[1] = pn[4];
1679	hdr[2] = 0;
1680	hdr[3] = 0x20 | (key_id << 6);
1681	hdr[4] = pn[3];
1682	hdr[5] = pn[2];
1683	hdr[6] = pn[1];
1684	hdr[7] = pn[0];
1685
1686	status->flag &= ~RX_FLAG_IV_STRIPPED;
1687}
1688EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1689
1690int mt76_get_rate(struct mt76_dev *dev,
1691		  struct ieee80211_supported_band *sband,
1692		  int idx, bool cck)
1693{
1694	int i, offset = 0, len = sband->n_bitrates;
1695
1696	if (cck) {
1697		if (sband != &dev->phy.sband_2g.sband)
1698			return 0;
1699
1700		idx &= ~BIT(2); /* short preamble */
1701	} else if (sband == &dev->phy.sband_2g.sband) {
1702		offset = 4;
1703	}
1704
1705	for (i = offset; i < len; i++) {
1706		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1707			return i;
1708	}
1709
1710	return 0;
1711}
1712EXPORT_SYMBOL_GPL(mt76_get_rate);
1713
1714void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1715		  const u8 *mac)
1716{
1717	struct mt76_phy *phy = hw->priv;
1718
1719	set_bit(MT76_SCANNING, &phy->state);
1720}
1721EXPORT_SYMBOL_GPL(mt76_sw_scan);
1722
1723void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1724{
1725	struct mt76_phy *phy = hw->priv;
1726
1727	clear_bit(MT76_SCANNING, &phy->state);
1728}
1729EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1730
1731int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1732{
1733	struct mt76_phy *phy = hw->priv;
1734	struct mt76_dev *dev = phy->dev;
1735
1736	mutex_lock(&dev->mutex);
1737	*tx_ant = phy->antenna_mask;
1738	*rx_ant = phy->antenna_mask;
1739	mutex_unlock(&dev->mutex);
1740
1741	return 0;
1742}
1743EXPORT_SYMBOL_GPL(mt76_get_antenna);
1744
1745struct mt76_queue *
1746mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1747		int ring_base, void *wed, u32 flags)
1748{
1749	struct mt76_queue *hwq;
1750	int err;
1751
1752	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1753	if (!hwq)
1754		return ERR_PTR(-ENOMEM);
1755
1756	hwq->flags = flags;
1757	hwq->wed = wed;
1758
1759	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1760	if (err < 0)
1761		return ERR_PTR(err);
1762
1763	return hwq;
1764}
1765EXPORT_SYMBOL_GPL(mt76_init_queue);
1766
1767u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1768				struct ieee80211_vif *vif, int rateidx)
1769{
1770	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1771	struct cfg80211_chan_def *chandef = mvif->ctx ?
1772					    &mvif->ctx->def :
1773					    &phy->chandef;
1774	int offset = 0;
1775
1776	if (chandef->chan->band != NL80211_BAND_2GHZ)
1777		offset = 4;
1778
1779	/* pick the lowest rate for hidden nodes */
1780	if (rateidx < 0)
1781		rateidx = 0;
1782
1783	rateidx += offset;
1784	if (rateidx >= ARRAY_SIZE(mt76_rates))
1785		rateidx = offset;
1786
1787	return mt76_rates[rateidx].hw_value;
1788}
1789EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1790
1791void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1792			 struct mt76_sta_stats *stats, bool eht)
1793{
1794	int i, ei = wi->initial_stat_idx;
1795	u64 *data = wi->data;
1796
1797	wi->sta_count++;
1798
1799	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1800	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1801	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1802	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1803	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1804	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1805	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1806	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1807	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1808	if (eht) {
1809		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1810		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1811		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1812	}
1813
1814	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1815		data[ei++] += stats->tx_bw[i];
1816
1817	for (i = 0; i < (eht ? 14 : 12); i++)
1818		data[ei++] += stats->tx_mcs[i];
1819
1820	for (i = 0; i < 4; i++)
1821		data[ei++] += stats->tx_nss[i];
1822
1823	wi->worker_stat_count = ei - wi->initial_stat_idx;
1824}
1825EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1826
1827void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1828{
1829#ifdef CONFIG_PAGE_POOL_STATS
1830	struct page_pool_stats stats = {};
1831	int i;
1832
1833	mt76_for_each_q_rx(dev, i)
1834		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1835
1836	page_pool_ethtool_stats_get(data, &stats);
1837	*index += page_pool_ethtool_stats_get_count();
1838#endif
1839}
1840EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1841
1842enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1843{
1844	struct ieee80211_hw *hw = phy->hw;
1845	struct mt76_dev *dev = phy->dev;
1846
1847	if (dev->region == NL80211_DFS_UNSET ||
1848	    test_bit(MT76_SCANNING, &phy->state))
1849		return MT_DFS_STATE_DISABLED;
1850
1851	if (!hw->conf.radar_enabled) {
1852		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1853		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1854			return MT_DFS_STATE_ACTIVE;
1855
1856		return MT_DFS_STATE_DISABLED;
1857	}
1858
1859	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1860		return MT_DFS_STATE_CAC;
1861
1862	return MT_DFS_STATE_ACTIVE;
1863}
1864EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1865