1// SPDX-License-Identifier: ISC
2/* Copyright (C) 2019 MediaTek Inc.
3 *
4 * Author: Ryder Lee <ryder.lee@mediatek.com>
5 *         Roy Luo <royluo@google.com>
6 *         Felix Fietkau <nbd@nbd.name>
7 *         Lorenzo Bianconi <lorenzo@kernel.org>
8 */
9
10#include <linux/devcoredump.h>
11#include <linux/etherdevice.h>
12#include <linux/timekeeping.h>
13#include "mt7615.h"
14#include "../trace.h"
15#include "../dma.h"
16#include "mt7615_trace.h"
17#include "mac.h"
18#include "mcu.h"
19
20#define to_rssi(field, rxv)		((FIELD_GET(field, rxv) - 220) / 2)
21
22static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
23	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
24	.radar_pattern = {
25		[5] =  { 1, 0,  6, 32, 28, 0, 17,  990, 5010, 1, 1 },
26		[6] =  { 1, 0,  9, 32, 28, 0, 27,  615, 5010, 1, 1 },
27		[7] =  { 1, 0, 15, 32, 28, 0, 27,  240,  445, 1, 1 },
28		[8] =  { 1, 0, 12, 32, 28, 0, 42,  240,  510, 1, 1 },
29		[9] =  { 1, 1,  0,  0,  0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 },
30		[10] = { 1, 1,  0,  0,  0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 },
31		[11] = { 1, 1,  0,  0,  0, 0, 14,  823, 2510, 0, 0, 18, 32, 28 },
32		[12] = { 1, 1,  0,  0,  0, 0, 14,  823, 2510, 0, 0, 27, 32, 24 },
33	},
34};
35
36static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
37	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
38	.radar_pattern = {
39		[0] = { 1, 0,  9,  32, 28, 0, 13, 508, 3076, 1,  1 },
40		[1] = { 1, 0, 12,  32, 28, 0, 17, 140,  240, 1,  1 },
41		[2] = { 1, 0,  8,  32, 28, 0, 22, 190,  510, 1,  1 },
42		[3] = { 1, 0,  6,  32, 28, 0, 32, 190,  510, 1,  1 },
43		[4] = { 1, 0,  9, 255, 28, 0, 13, 323,  343, 1, 32 },
44	},
45};
46
47static const struct mt7615_dfs_radar_spec jp_radar_specs = {
48	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
49	.radar_pattern = {
50		[0] =  { 1, 0,  8, 32, 28, 0, 13,  508, 3076, 1,  1 },
51		[1] =  { 1, 0, 12, 32, 28, 0, 17,  140,  240, 1,  1 },
52		[2] =  { 1, 0,  8, 32, 28, 0, 22,  190,  510, 1,  1 },
53		[3] =  { 1, 0,  6, 32, 28, 0, 32,  190,  510, 1,  1 },
54		[4] =  { 1, 0,  9, 32, 28, 0, 13,  323,  343, 1, 32 },
55		[13] = { 1, 0, 8,  32, 28, 0, 14, 3836, 3856, 1,  1 },
56		[14] = { 1, 0, 8,  32, 28, 0, 14, 3990, 4010, 1,  1 },
57	},
58};
59
60static enum mt76_cipher_type
61mt7615_mac_get_cipher(int cipher)
62{
63	switch (cipher) {
64	case WLAN_CIPHER_SUITE_WEP40:
65		return MT_CIPHER_WEP40;
66	case WLAN_CIPHER_SUITE_WEP104:
67		return MT_CIPHER_WEP104;
68	case WLAN_CIPHER_SUITE_TKIP:
69		return MT_CIPHER_TKIP;
70	case WLAN_CIPHER_SUITE_AES_CMAC:
71		return MT_CIPHER_BIP_CMAC_128;
72	case WLAN_CIPHER_SUITE_CCMP:
73		return MT_CIPHER_AES_CCMP;
74	case WLAN_CIPHER_SUITE_CCMP_256:
75		return MT_CIPHER_CCMP_256;
76	case WLAN_CIPHER_SUITE_GCMP:
77		return MT_CIPHER_GCMP;
78	case WLAN_CIPHER_SUITE_GCMP_256:
79		return MT_CIPHER_GCMP_256;
80	case WLAN_CIPHER_SUITE_SMS4:
81		return MT_CIPHER_WAPI;
82	default:
83		return MT_CIPHER_NONE;
84	}
85}
86
87static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
88					    u8 idx, bool unicast)
89{
90	struct mt7615_sta *sta;
91	struct mt76_wcid *wcid;
92
93	if (idx >= MT7615_WTBL_SIZE)
94		return NULL;
95
96	wcid = rcu_dereference(dev->mt76.wcid[idx]);
97	if (unicast || !wcid)
98		return wcid;
99
100	if (!wcid->sta)
101		return NULL;
102
103	sta = container_of(wcid, struct mt7615_sta, wcid);
104	if (!sta->vif)
105		return NULL;
106
107	return &sta->vif->sta.wcid;
108}
109
110void mt7615_mac_reset_counters(struct mt7615_phy *phy)
111{
112	struct mt7615_dev *dev = phy->dev;
113	int i;
114
115	for (i = 0; i < 4; i++) {
116		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
117		mt76_rr(dev, MT_TX_AGG_CNT(1, i));
118	}
119
120	memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
121	phy->mt76->survey_time = ktime_get_boottime();
122
123	/* reset airtime counters */
124	mt76_rr(dev, MT_MIB_SDR9(0));
125	mt76_rr(dev, MT_MIB_SDR9(1));
126
127	mt76_rr(dev, MT_MIB_SDR36(0));
128	mt76_rr(dev, MT_MIB_SDR36(1));
129
130	mt76_rr(dev, MT_MIB_SDR37(0));
131	mt76_rr(dev, MT_MIB_SDR37(1));
132
133	mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
134	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
135}
136
137void mt7615_mac_set_timing(struct mt7615_phy *phy)
138{
139	s16 coverage_class = phy->coverage_class;
140	struct mt7615_dev *dev = phy->dev;
141	bool ext_phy = phy != &dev->phy;
142	u32 val, reg_offset;
143	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
144		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
145	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
146		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
147	int sifs, offset;
148	bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
149
150	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
151		return;
152
153	if (is_5ghz)
154		sifs = 16;
155	else
156		sifs = 10;
157
158	if (ext_phy) {
159		coverage_class = max_t(s16, dev->phy.coverage_class,
160				       coverage_class);
161		mt76_set(dev, MT_ARB_SCR,
162			 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
163	} else {
164		struct mt7615_phy *phy_ext = mt7615_ext_phy(dev);
165
166		if (phy_ext)
167			coverage_class = max_t(s16, phy_ext->coverage_class,
168					       coverage_class);
169		mt76_set(dev, MT_ARB_SCR,
170			 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
171	}
172	udelay(1);
173
174	offset = 3 * coverage_class;
175	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
176		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
177	mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset);
178	mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset);
179
180	mt76_wr(dev, MT_TMAC_ICR(ext_phy),
181		FIELD_PREP(MT_IFS_EIFS, 360) |
182		FIELD_PREP(MT_IFS_RIFS, 2) |
183		FIELD_PREP(MT_IFS_SIFS, sifs) |
184		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
185
186	if (phy->slottime < 20 || is_5ghz)
187		val = MT7615_CFEND_RATE_DEFAULT;
188	else
189		val = MT7615_CFEND_RATE_11B;
190
191	mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
192	if (ext_phy)
193		mt76_clear(dev, MT_ARB_SCR,
194			   MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
195	else
196		mt76_clear(dev, MT_ARB_SCR,
197			   MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
198
199}
200
201static void
202mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
203			    struct mt76_rx_status *status, u8 chfreq)
204{
205	if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
206	    !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
207	    !test_bit(MT76_STATE_ROC, &mphy->state)) {
208		status->freq = mphy->chandef.chan->center_freq;
209		status->band = mphy->chandef.chan->band;
210		return;
211	}
212
213	status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
214	status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
215}
216
217static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
218{
219#ifdef CONFIG_NL80211_TESTMODE
220	u32 rxv1 = le32_to_cpu(rxv[0]);
221	u32 rxv3 = le32_to_cpu(rxv[2]);
222	u32 rxv4 = le32_to_cpu(rxv[3]);
223	u32 rxv5 = le32_to_cpu(rxv[4]);
224	u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1);
225	u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1);
226	s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5);
227	u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000;
228
229	if (!mode) {
230		/* CCK */
231		foe &= ~BIT(11);
232		foe *= 1000;
233		foe >>= 11;
234	} else {
235		if (foe > 2048)
236			foe -= 4096;
237
238		foe = (foe * foe_const) >> 15;
239	}
240
241	phy->test.last_freq_offset = foe;
242	phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
243	phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
244	phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
245	phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
246	phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
247	phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
248#endif
249}
250
251/* The HW does not translate the mac header to 802.3 for mesh point */
252static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
253{
254	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
255	struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
256	struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
257	__le32 *rxd = (__le32 *)skb->data;
258	struct ieee80211_sta *sta;
259	struct ieee80211_vif *vif;
260	struct ieee80211_hdr hdr;
261	u16 frame_control;
262
263	if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) !=
264	    MT_RXD1_NORMAL_U2M)
265		return -EINVAL;
266
267	if (!(le32_to_cpu(rxd[0]) & MT_RXD0_NORMAL_GROUP_4))
268		return -EINVAL;
269
270	if (!msta || !msta->vif)
271		return -EINVAL;
272
273	sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
274	vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
275
276	/* store the info from RXD and ethhdr to avoid being overridden */
277	frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL);
278	hdr.frame_control = cpu_to_le16(frame_control);
279	hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL));
280	hdr.duration_id = 0;
281
282	ether_addr_copy(hdr.addr1, vif->addr);
283	ether_addr_copy(hdr.addr2, sta->addr);
284	switch (frame_control & (IEEE80211_FCTL_TODS |
285				 IEEE80211_FCTL_FROMDS)) {
286	case 0:
287		ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
288		break;
289	case IEEE80211_FCTL_FROMDS:
290		ether_addr_copy(hdr.addr3, eth_hdr->h_source);
291		break;
292	case IEEE80211_FCTL_TODS:
293		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
294		break;
295	case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
296		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
297		ether_addr_copy(hdr.addr4, eth_hdr->h_source);
298		break;
299	default:
300		break;
301	}
302
303	skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
304	if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
305	    eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
306		ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
307	else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
308		ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
309	else
310		skb_pull(skb, 2);
311
312	if (ieee80211_has_order(hdr.frame_control))
313		memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7],
314		       IEEE80211_HT_CTL_LEN);
315
316	if (ieee80211_is_data_qos(hdr.frame_control)) {
317		__le16 qos_ctrl;
318
319		qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL));
320		memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
321		       IEEE80211_QOS_CTL_LEN);
322	}
323
324	if (ieee80211_has_a4(hdr.frame_control))
325		memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
326	else
327		memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
328
329	status->flag &= ~(RX_FLAG_RADIOTAP_HE | RX_FLAG_RADIOTAP_HE_MU);
330	return 0;
331}
332
333static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
334{
335	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
336	struct mt76_phy *mphy = &dev->mt76.phy;
337	struct mt7615_phy *phy = &dev->phy;
338	struct ieee80211_supported_band *sband;
339	struct ieee80211_hdr *hdr;
340	struct mt7615_phy *phy2;
341	__le32 *rxd = (__le32 *)skb->data;
342	u32 rxd0 = le32_to_cpu(rxd[0]);
343	u32 rxd1 = le32_to_cpu(rxd[1]);
344	u32 rxd2 = le32_to_cpu(rxd[2]);
345	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
346	u32 csum_status = *(u32 *)skb->cb;
347	bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
348	u16 hdr_gap;
349	int phy_idx;
350	int i, idx;
351	u8 chfreq, amsdu_info, qos_ctl = 0;
352	u16 seq_ctrl = 0;
353	__le16 fc = 0;
354
355	memset(status, 0, sizeof(*status));
356
357	chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
358
359	phy2 = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
360	if (!phy2)
361		phy_idx = 0;
362	else if (phy2->chfreq == phy->chfreq)
363		phy_idx = -1;
364	else if (phy->chfreq == chfreq)
365		phy_idx = 0;
366	else if (phy2->chfreq == chfreq)
367		phy_idx = 1;
368	else
369		phy_idx = -1;
370
371	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
372		return -EINVAL;
373
374	hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS;
375	if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_CM))
376		return -EINVAL;
377
378	/* ICV error or CCMP/BIP/WPI MIC error */
379	if (rxd2 & MT_RXD2_NORMAL_ICV_ERR)
380		status->flag |= RX_FLAG_ONLY_MONITOR;
381
382	unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
383	idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
384	status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
385
386	if (status->wcid) {
387		struct mt7615_sta *msta;
388
389		msta = container_of(status->wcid, struct mt7615_sta, wcid);
390		spin_lock_bh(&dev->mt76.sta_poll_lock);
391		if (list_empty(&msta->wcid.poll_list))
392			list_add_tail(&msta->wcid.poll_list,
393				      &dev->mt76.sta_poll_list);
394		spin_unlock_bh(&dev->mt76.sta_poll_lock);
395	}
396
397	if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
398	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
399		skb->ip_summed = CHECKSUM_UNNECESSARY;
400
401	if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
402		status->flag |= RX_FLAG_FAILED_FCS_CRC;
403
404	if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
405		status->flag |= RX_FLAG_MMIC_ERROR;
406
407	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
408	    !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
409		status->flag |= RX_FLAG_DECRYPTED;
410		status->flag |= RX_FLAG_IV_STRIPPED;
411		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
412	}
413
414	remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
415
416	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
417		return -EINVAL;
418
419	rxd += 4;
420	if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
421		u32 v0 = le32_to_cpu(rxd[0]);
422		u32 v2 = le32_to_cpu(rxd[2]);
423
424		fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0));
425		qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2);
426		seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2);
427
428		rxd += 4;
429		if ((u8 *)rxd - skb->data >= skb->len)
430			return -EINVAL;
431	}
432
433	if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
434		u8 *data = (u8 *)rxd;
435
436		if (status->flag & RX_FLAG_DECRYPTED) {
437			switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
438			case MT_CIPHER_AES_CCMP:
439			case MT_CIPHER_CCMP_CCX:
440			case MT_CIPHER_CCMP_256:
441				insert_ccmp_hdr =
442					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
443				fallthrough;
444			case MT_CIPHER_TKIP:
445			case MT_CIPHER_TKIP_NO_MIC:
446			case MT_CIPHER_GCMP:
447			case MT_CIPHER_GCMP_256:
448				status->iv[0] = data[5];
449				status->iv[1] = data[4];
450				status->iv[2] = data[3];
451				status->iv[3] = data[2];
452				status->iv[4] = data[1];
453				status->iv[5] = data[0];
454				break;
455			default:
456				break;
457			}
458		}
459		rxd += 4;
460		if ((u8 *)rxd - skb->data >= skb->len)
461			return -EINVAL;
462	}
463
464	if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
465		status->timestamp = le32_to_cpu(rxd[0]);
466		status->flag |= RX_FLAG_MACTIME_START;
467
468		if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
469			      MT_RXD2_NORMAL_NON_AMPDU))) {
470			status->flag |= RX_FLAG_AMPDU_DETAILS;
471
472			/* all subframes of an A-MPDU have the same timestamp */
473			if (phy->rx_ampdu_ts != status->timestamp) {
474				if (!++phy->ampdu_ref)
475					phy->ampdu_ref++;
476			}
477			phy->rx_ampdu_ts = status->timestamp;
478
479			status->ampdu_ref = phy->ampdu_ref;
480		}
481
482		rxd += 2;
483		if ((u8 *)rxd - skb->data >= skb->len)
484			return -EINVAL;
485	}
486
487	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
488		u32 rxdg5 = le32_to_cpu(rxd[5]);
489
490		/*
491		 * If both PHYs are on the same channel and we don't have a WCID,
492		 * we need to figure out which PHY this packet was received on.
493		 * On the primary PHY, the noise value for the chains belonging to the
494		 * second PHY will be set to the noise value of the last packet from
495		 * that PHY.
496		 */
497		if (phy_idx < 0) {
498			int first_chain = ffs(phy2->mt76->chainmask) - 1;
499
500			phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
501		}
502	}
503
504	if (phy_idx == 1 && phy2) {
505		mphy = dev->mt76.phys[MT_BAND1];
506		phy = phy2;
507		status->phy_idx = phy_idx;
508	}
509
510	if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
511		return -EINVAL;
512
513	mt7615_get_status_freq_info(dev, mphy, status, chfreq);
514	if (status->band == NL80211_BAND_5GHZ)
515		sband = &mphy->sband_5g.sband;
516	else
517		sband = &mphy->sband_2g.sband;
518
519	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
520		return -EINVAL;
521
522	if (!sband->channels)
523		return -EINVAL;
524
525	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
526		u32 rxdg0 = le32_to_cpu(rxd[0]);
527		u32 rxdg1 = le32_to_cpu(rxd[1]);
528		u32 rxdg3 = le32_to_cpu(rxd[3]);
529		u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
530		bool cck = false;
531
532		i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
533		switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
534		case MT_PHY_TYPE_CCK:
535			cck = true;
536			fallthrough;
537		case MT_PHY_TYPE_OFDM:
538			i = mt76_get_rate(&dev->mt76, sband, i, cck);
539			break;
540		case MT_PHY_TYPE_HT_GF:
541		case MT_PHY_TYPE_HT:
542			status->encoding = RX_ENC_HT;
543			if (i > 31)
544				return -EINVAL;
545			break;
546		case MT_PHY_TYPE_VHT:
547			status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
548			status->encoding = RX_ENC_VHT;
549			break;
550		default:
551			return -EINVAL;
552		}
553		status->rate_idx = i;
554
555		switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
556		case MT_PHY_BW_20:
557			break;
558		case MT_PHY_BW_40:
559			status->bw = RATE_INFO_BW_40;
560			break;
561		case MT_PHY_BW_80:
562			status->bw = RATE_INFO_BW_80;
563			break;
564		case MT_PHY_BW_160:
565			status->bw = RATE_INFO_BW_160;
566			break;
567		default:
568			return -EINVAL;
569		}
570
571		if (rxdg0 & MT_RXV1_HT_SHORT_GI)
572			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
573		if (rxdg0 & MT_RXV1_HT_AD_CODE)
574			status->enc_flags |= RX_ENC_FLAG_LDPC;
575
576		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
577
578		status->chains = mphy->antenna_mask;
579		status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
580		status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
581		status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
582		status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
583
584		mt7615_mac_fill_tm_rx(mphy->priv, rxd);
585
586		rxd += 6;
587		if ((u8 *)rxd - skb->data >= skb->len)
588			return -EINVAL;
589	}
590
591	amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1);
592	status->amsdu = !!amsdu_info;
593	if (status->amsdu) {
594		status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME;
595		status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME;
596	}
597
598	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
599	if (hdr_trans && ieee80211_has_morefrags(fc)) {
600		if (mt7615_reverse_frag0_hdr_trans(skb, hdr_gap))
601			return -EINVAL;
602		hdr_trans = false;
603	} else {
604		int pad_start = 0;
605
606		skb_pull(skb, hdr_gap);
607		if (!hdr_trans && status->amsdu) {
608			pad_start = ieee80211_get_hdrlen_from_skb(skb);
609		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
610			/*
611			 * When header translation failure is indicated,
612			 * the hardware will insert an extra 2-byte field
613			 * containing the data length after the protocol
614			 * type field. This happens either when the LLC-SNAP
615			 * pattern did not match, or if a VLAN header was
616			 * detected.
617			 */
618			pad_start = 12;
619			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
620				pad_start += 4;
621			else
622				pad_start = 0;
623		}
624
625		if (pad_start) {
626			memmove(skb->data + 2, skb->data, pad_start);
627			skb_pull(skb, 2);
628		}
629	}
630
631	if (insert_ccmp_hdr && !hdr_trans) {
632		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
633
634		mt76_insert_ccmp_hdr(skb, key_id);
635	}
636
637	if (!hdr_trans) {
638		hdr = (struct ieee80211_hdr *)skb->data;
639		fc = hdr->frame_control;
640		if (ieee80211_is_data_qos(fc)) {
641			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
642			qos_ctl = *ieee80211_get_qos_ctl(hdr);
643		}
644	} else {
645		status->flag |= RX_FLAG_8023;
646	}
647
648	if (!status->wcid || !ieee80211_is_data_qos(fc))
649		return 0;
650
651	status->aggr = unicast &&
652		       !ieee80211_is_qos_nullfunc(fc);
653	status->qos_ctl = qos_ctl;
654	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
655
656	return 0;
657}
658
659static u16
660mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
661		       struct mt76_phy *mphy,
662		       const struct ieee80211_tx_rate *rate,
663		       bool stbc, u8 *bw)
664{
665	u8 phy, nss, rate_idx;
666	u16 rateval = 0;
667
668	*bw = 0;
669
670	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
671		rate_idx = ieee80211_rate_get_vht_mcs(rate);
672		nss = ieee80211_rate_get_vht_nss(rate);
673		phy = MT_PHY_TYPE_VHT;
674		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
675			*bw = 1;
676		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
677			*bw = 2;
678		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
679			*bw = 3;
680	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
681		rate_idx = rate->idx;
682		nss = 1 + (rate->idx >> 3);
683		phy = MT_PHY_TYPE_HT;
684		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
685			phy = MT_PHY_TYPE_HT_GF;
686		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
687			*bw = 1;
688	} else {
689		const struct ieee80211_rate *r;
690		int band = mphy->chandef.chan->band;
691		u16 val;
692
693		nss = 1;
694		r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx];
695		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
696			val = r->hw_value_short;
697		else
698			val = r->hw_value;
699
700		phy = val >> 8;
701		rate_idx = val & 0xff;
702	}
703
704	if (stbc && nss == 1) {
705		nss++;
706		rateval |= MT_TX_RATE_STBC;
707	}
708
709	rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
710		    FIELD_PREP(MT_TX_RATE_MODE, phy) |
711		    FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
712
713	return rateval;
714}
715
716int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
717			  struct sk_buff *skb, struct mt76_wcid *wcid,
718			  struct ieee80211_sta *sta, int pid,
719			  struct ieee80211_key_conf *key,
720			  enum mt76_txq_id qid, bool beacon)
721{
722	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
723	u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
724	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
725	struct ieee80211_tx_rate *rate = &info->control.rates[0];
726	u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
727	bool multicast = is_multicast_ether_addr(hdr->addr1);
728	struct ieee80211_vif *vif = info->control.vif;
729	bool is_mmio = mt76_is_mmio(&dev->mt76);
730	u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE;
731	struct mt76_phy *mphy = &dev->mphy;
732	__le16 fc = hdr->frame_control;
733	int tx_count = 8;
734	u16 seqno = 0;
735
736	if (vif) {
737		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
738
739		omac_idx = mvif->omac_idx;
740		wmm_idx = mvif->wmm_idx;
741	}
742
743	if (sta) {
744		struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
745
746		tx_count = msta->rate_count;
747	}
748
749	if (phy_idx && dev->mt76.phys[MT_BAND1])
750		mphy = dev->mt76.phys[MT_BAND1];
751
752	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
753	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
754
755	if (beacon) {
756		p_fmt = MT_TX_TYPE_FW;
757		q_idx = phy_idx ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
758	} else if (qid >= MT_TXQ_PSD) {
759		p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
760		q_idx = phy_idx ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
761	} else {
762		p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
763		q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
764			mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
765	}
766
767	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
768	      FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
769	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
770	txwi[0] = cpu_to_le32(val);
771
772	val = MT_TXD1_LONG_FORMAT |
773	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
774	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
775	      FIELD_PREP(MT_TXD1_HDR_INFO,
776			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
777	      FIELD_PREP(MT_TXD1_TID,
778			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
779	      FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
780	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
781	txwi[1] = cpu_to_le32(val);
782
783	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
784	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
785	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
786	if (key) {
787		if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
788		    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
789			val |= MT_TXD2_BIP;
790			txwi[3] = 0;
791		} else {
792			txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
793		}
794	} else {
795		txwi[3] = 0;
796	}
797	txwi[2] = cpu_to_le32(val);
798
799	if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
800		txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
801
802	txwi[4] = 0;
803	txwi[6] = 0;
804
805	if (rate->idx >= 0 && rate->count &&
806	    !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
807		bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
808		u8 bw;
809		u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc,
810						     &bw);
811
812		txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
813
814		val = MT_TXD6_FIXED_BW |
815		      FIELD_PREP(MT_TXD6_BW, bw) |
816		      FIELD_PREP(MT_TXD6_TX_RATE, rateval);
817		txwi[6] |= cpu_to_le32(val);
818
819		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
820			txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
821
822		if (info->flags & IEEE80211_TX_CTL_LDPC)
823			txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
824
825		if (!(rate->flags & (IEEE80211_TX_RC_MCS |
826				     IEEE80211_TX_RC_VHT_MCS)))
827			txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
828
829		tx_count = rate->count;
830	}
831
832	if (!ieee80211_is_beacon(fc)) {
833		struct ieee80211_hw *hw = mt76_hw(dev);
834
835		val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid);
836		if (!ieee80211_hw_check(hw, SUPPORTS_PS))
837			val |= MT_TXD5_SW_POWER_MGMT;
838		txwi[5] = cpu_to_le32(val);
839	} else {
840		txwi[5] = 0;
841		/* use maximum tx count for beacons */
842		tx_count = 0x1f;
843	}
844
845	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
846	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
847		seqno = le16_to_cpu(hdr->seq_ctrl);
848
849		if (ieee80211_is_back_req(hdr->frame_control)) {
850			struct ieee80211_bar *bar;
851
852			bar = (struct ieee80211_bar *)skb->data;
853			seqno = le16_to_cpu(bar->start_seq_num);
854		}
855
856		val |= MT_TXD3_SN_VALID |
857		       FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
858	}
859
860	txwi[3] |= cpu_to_le32(val);
861
862	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
863		txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
864
865	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
866	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
867	      FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
868	txwi[7] = cpu_to_le32(val);
869	if (!is_mmio) {
870		val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
871		      FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
872		txwi[8] = cpu_to_le32(val);
873	}
874
875	return 0;
876}
877EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
878
879bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
880{
881	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
882		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
883
884	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
885			 0, 5000);
886}
887
888void mt7615_mac_sta_poll(struct mt7615_dev *dev)
889{
890	static const u8 ac_to_tid[4] = {
891		[IEEE80211_AC_BE] = 0,
892		[IEEE80211_AC_BK] = 1,
893		[IEEE80211_AC_VI] = 4,
894		[IEEE80211_AC_VO] = 6
895	};
896	static const u8 hw_queue_map[] = {
897		[IEEE80211_AC_BK] = 0,
898		[IEEE80211_AC_BE] = 1,
899		[IEEE80211_AC_VI] = 2,
900		[IEEE80211_AC_VO] = 3,
901	};
902	struct ieee80211_sta *sta;
903	struct mt7615_sta *msta;
904	u32 addr, tx_time[4], rx_time[4];
905	struct list_head sta_poll_list;
906	int i;
907
908	INIT_LIST_HEAD(&sta_poll_list);
909	spin_lock_bh(&dev->mt76.sta_poll_lock);
910	list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
911	spin_unlock_bh(&dev->mt76.sta_poll_lock);
912
913	while (!list_empty(&sta_poll_list)) {
914		bool clear = false;
915
916		msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
917					wcid.poll_list);
918
919		spin_lock_bh(&dev->mt76.sta_poll_lock);
920		list_del_init(&msta->wcid.poll_list);
921		spin_unlock_bh(&dev->mt76.sta_poll_lock);
922
923		addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
924
925		for (i = 0; i < 4; i++, addr += 8) {
926			u32 tx_last = msta->airtime_ac[i];
927			u32 rx_last = msta->airtime_ac[i + 4];
928
929			msta->airtime_ac[i] = mt76_rr(dev, addr);
930			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
931			tx_time[i] = msta->airtime_ac[i] - tx_last;
932			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
933
934			if ((tx_last | rx_last) & BIT(30))
935				clear = true;
936		}
937
938		if (clear) {
939			mt7615_mac_wtbl_update(dev, msta->wcid.idx,
940					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
941			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
942		}
943
944		if (!msta->wcid.sta)
945			continue;
946
947		sta = container_of((void *)msta, struct ieee80211_sta,
948				   drv_priv);
949		for (i = 0; i < 4; i++) {
950			u32 tx_cur = tx_time[i];
951			u32 rx_cur = rx_time[hw_queue_map[i]];
952			u8 tid = ac_to_tid[i];
953
954			if (!tx_cur && !rx_cur)
955				continue;
956
957			ieee80211_sta_register_airtime(sta, tid, tx_cur,
958						       rx_cur);
959		}
960	}
961}
962EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll);
963
964static void
965mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta,
966			    struct ieee80211_tx_rate *probe_rate,
967			    struct ieee80211_tx_rate *rates,
968			    struct mt7615_rate_desc *rd)
969{
970	struct mt7615_dev *dev = phy->dev;
971	struct mt76_phy *mphy = phy->mt76;
972	struct ieee80211_tx_rate *ref;
973	bool rateset, stbc = false;
974	int n_rates = sta->n_rates;
975	u8 bw, bw_prev;
976	int i, j;
977
978	for (i = n_rates; i < 4; i++)
979		rates[i] = rates[n_rates - 1];
980
981	rateset = !(sta->rate_set_tsf & BIT(0));
982	memcpy(sta->rateset[rateset].rates, rates,
983	       sizeof(sta->rateset[rateset].rates));
984	if (probe_rate) {
985		sta->rateset[rateset].probe_rate = *probe_rate;
986		ref = &sta->rateset[rateset].probe_rate;
987	} else {
988		sta->rateset[rateset].probe_rate.idx = -1;
989		ref = &sta->rateset[rateset].rates[0];
990	}
991
992	rates = sta->rateset[rateset].rates;
993	for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
994		/*
995		 * We don't support switching between short and long GI
996		 * within the rate set. For accurate tx status reporting, we
997		 * need to make sure that flags match.
998		 * For improved performance, avoid duplicate entries by
999		 * decrementing the MCS index if necessary
1000		 */
1001		if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
1002			rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
1003
1004		for (j = 0; j < i; j++) {
1005			if (rates[i].idx != rates[j].idx)
1006				continue;
1007			if ((rates[i].flags ^ rates[j].flags) &
1008			    (IEEE80211_TX_RC_40_MHZ_WIDTH |
1009			     IEEE80211_TX_RC_80_MHZ_WIDTH |
1010			     IEEE80211_TX_RC_160_MHZ_WIDTH))
1011				continue;
1012
1013			if (!rates[i].idx)
1014				continue;
1015
1016			rates[i].idx--;
1017		}
1018	}
1019
1020	rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
1021	bw_prev = bw;
1022
1023	if (probe_rate) {
1024		rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
1025						       stbc, &bw);
1026		if (bw)
1027			rd->bw_idx = 1;
1028		else
1029			bw_prev = 0;
1030	} else {
1031		rd->probe_val = rd->val[0];
1032	}
1033
1034	rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
1035	if (bw_prev) {
1036		rd->bw_idx = 3;
1037		bw_prev = bw;
1038	}
1039
1040	rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
1041	if (bw_prev) {
1042		rd->bw_idx = 5;
1043		bw_prev = bw;
1044	}
1045
1046	rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
1047	if (bw_prev)
1048		rd->bw_idx = 7;
1049
1050	rd->rateset = rateset;
1051	rd->bw = bw;
1052}
1053
1054static int
1055mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
1056			     struct ieee80211_tx_rate *probe_rate,
1057			     struct ieee80211_tx_rate *rates)
1058{
1059	struct mt7615_dev *dev = phy->dev;
1060	struct mt7615_wtbl_rate_desc *wrd;
1061
1062	if (work_pending(&dev->rate_work))
1063		return -EBUSY;
1064
1065	wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC);
1066	if (!wrd)
1067		return -ENOMEM;
1068
1069	wrd->sta = sta;
1070	mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
1071				    &wrd->rate);
1072	list_add_tail(&wrd->node, &dev->wrd_head);
1073	queue_work(dev->mt76.wq, &dev->rate_work);
1074
1075	return 0;
1076}
1077
1078u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
1079{
1080	u32 addr, val, val2;
1081	u8 offset;
1082
1083	addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4;
1084
1085	offset = tid * 12;
1086	addr += 4 * (offset / 32);
1087	offset %= 32;
1088
1089	val = mt76_rr(dev, addr);
1090	val >>= offset;
1091
1092	if (offset > 20) {
1093		addr += 4;
1094		val2 = mt76_rr(dev, addr);
1095		val |= val2 << (32 - offset);
1096	}
1097
1098	return val & GENMASK(11, 0);
1099}
1100
1101void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
1102			  struct ieee80211_tx_rate *probe_rate,
1103			  struct ieee80211_tx_rate *rates)
1104{
1105	int wcid = sta->wcid.idx, n_rates = sta->n_rates;
1106	struct mt7615_dev *dev = phy->dev;
1107	struct mt7615_rate_desc rd;
1108	u32 w5, w27, addr;
1109	u16 idx = sta->vif->mt76.omac_idx;
1110
1111	if (!mt76_is_mmio(&dev->mt76)) {
1112		mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
1113		return;
1114	}
1115
1116	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
1117		return;
1118
1119	memset(&rd, 0, sizeof(struct mt7615_rate_desc));
1120	mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd);
1121
1122	addr = mt7615_mac_wtbl_addr(dev, wcid);
1123	w27 = mt76_rr(dev, addr + 27 * 4);
1124	w27 &= ~MT_WTBL_W27_CC_BW_SEL;
1125	w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw);
1126
1127	w5 = mt76_rr(dev, addr + 5 * 4);
1128	w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
1129		MT_WTBL_W5_MPDU_OK_COUNT |
1130		MT_WTBL_W5_MPDU_FAIL_COUNT |
1131		MT_WTBL_W5_RATE_IDX);
1132	w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) |
1133	      FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
1134			 rd.bw_idx ? rd.bw_idx - 1 : 7);
1135
1136	mt76_wr(dev, MT_WTBL_RIUCR0, w5);
1137
1138	mt76_wr(dev, MT_WTBL_RIUCR1,
1139		FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) |
1140		FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) |
1141		FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1]));
1142
1143	mt76_wr(dev, MT_WTBL_RIUCR2,
1144		FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) |
1145		FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) |
1146		FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) |
1147		FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2]));
1148
1149	mt76_wr(dev, MT_WTBL_RIUCR3,
1150		FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) |
1151		FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) |
1152		FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3]));
1153
1154	mt76_wr(dev, MT_WTBL_UPDATE,
1155		FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
1156		MT_WTBL_UPDATE_RATE_UPDATE |
1157		MT_WTBL_UPDATE_TX_COUNT_CLEAR);
1158
1159	mt76_wr(dev, addr + 27 * 4, w27);
1160
1161	idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
1162	addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
1163
1164	mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
1165	sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
1166	sta->rate_set_tsf |= rd.rateset;
1167
1168	if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
1169		mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
1170
1171	sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
1172	sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
1173	sta->rate_probe = !!probe_rate;
1174}
1175EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
1176
1177void mt7615_mac_enable_rtscts(struct mt7615_dev *dev,
1178			      struct ieee80211_vif *vif, bool enable)
1179{
1180	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
1181	u32 addr;
1182
1183	addr = mt7615_mac_wtbl_addr(dev, mvif->sta.wcid.idx) + 3 * 4;
1184
1185	if (enable)
1186		mt76_set(dev, addr, MT_WTBL_W3_RTS);
1187	else
1188		mt76_clear(dev, addr, MT_WTBL_W3_RTS);
1189}
1190EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
1191
1192static int
1193mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1194			   struct ieee80211_key_conf *key,
1195			   enum mt76_cipher_type cipher, u16 cipher_mask)
1196{
1197	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
1198	u8 data[32] = {};
1199
1200	if (key->keylen > sizeof(data))
1201		return -EINVAL;
1202
1203	mt76_rr_copy(dev, addr, data, sizeof(data));
1204	if (cipher == MT_CIPHER_TKIP) {
1205		/* Rx/Tx MIC keys are swapped */
1206		memcpy(data, key->key, 16);
1207		memcpy(data + 16, key->key + 24, 8);
1208		memcpy(data + 24, key->key + 16, 8);
1209	} else {
1210		if (cipher_mask == BIT(cipher))
1211			memcpy(data, key->key, key->keylen);
1212		else if (cipher != MT_CIPHER_BIP_CMAC_128)
1213			memcpy(data, key->key, 16);
1214		if (cipher == MT_CIPHER_BIP_CMAC_128)
1215			memcpy(data + 16, key->key, 16);
1216	}
1217
1218	mt76_wr_copy(dev, addr, data, sizeof(data));
1219
1220	return 0;
1221}
1222
1223static int
1224mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1225			  enum mt76_cipher_type cipher, u16 cipher_mask,
1226			  int keyidx)
1227{
1228	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
1229
1230	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
1231		return -ETIMEDOUT;
1232
1233	w0 = mt76_rr(dev, addr);
1234	w1 = mt76_rr(dev, addr + 4);
1235
1236	if (cipher_mask)
1237		w0 |= MT_WTBL_W0_RX_KEY_VALID;
1238	else
1239		w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
1240	if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
1241		w0 |= MT_WTBL_W0_RX_IK_VALID;
1242	else
1243		w0 &= ~MT_WTBL_W0_RX_IK_VALID;
1244
1245	if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) {
1246		w0 &= ~MT_WTBL_W0_KEY_IDX;
1247		w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
1248	}
1249
1250	mt76_wr(dev, MT_WTBL_RICR0, w0);
1251	mt76_wr(dev, MT_WTBL_RICR1, w1);
1252
1253	if (!mt7615_mac_wtbl_update(dev, wcid->idx,
1254				    MT_WTBL_UPDATE_RXINFO_UPDATE))
1255		return -ETIMEDOUT;
1256
1257	return 0;
1258}
1259
1260static void
1261mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1262			      enum mt76_cipher_type cipher, u16 cipher_mask)
1263{
1264	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
1265
1266	if (cipher == MT_CIPHER_BIP_CMAC_128 &&
1267	    cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
1268		return;
1269
1270	mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
1271		 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
1272}
1273
1274int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
1275			      struct mt76_wcid *wcid,
1276			      struct ieee80211_key_conf *key)
1277{
1278	enum mt76_cipher_type cipher;
1279	u16 cipher_mask = wcid->cipher;
1280	int err;
1281
1282	cipher = mt7615_mac_get_cipher(key->cipher);
1283	if (cipher == MT_CIPHER_NONE)
1284		return -EOPNOTSUPP;
1285
1286	cipher_mask |= BIT(cipher);
1287	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask);
1288	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask);
1289	if (err < 0)
1290		return err;
1291
1292	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
1293					key->keyidx);
1294	if (err < 0)
1295		return err;
1296
1297	wcid->cipher = cipher_mask;
1298
1299	return 0;
1300}
1301
1302int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
1303			    struct mt76_wcid *wcid,
1304			    struct ieee80211_key_conf *key)
1305{
1306	int err;
1307
1308	spin_lock_bh(&dev->mt76.lock);
1309	err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
1310	spin_unlock_bh(&dev->mt76.lock);
1311
1312	return err;
1313}
1314
1315static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
1316			    struct ieee80211_tx_info *info, __le32 *txs_data)
1317{
1318	struct ieee80211_supported_band *sband;
1319	struct mt7615_rate_set *rs;
1320	struct mt76_phy *mphy;
1321	int first_idx = 0, last_idx;
1322	int i, idx, count;
1323	bool fixed_rate, ack_timeout;
1324	bool ampdu, cck = false;
1325	bool rs_idx;
1326	u32 rate_set_tsf;
1327	u32 final_rate, final_rate_flags, final_nss, txs;
1328
1329	txs = le32_to_cpu(txs_data[1]);
1330	ampdu = txs & MT_TXS1_AMPDU;
1331
1332	txs = le32_to_cpu(txs_data[3]);
1333	count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
1334	last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
1335
1336	txs = le32_to_cpu(txs_data[0]);
1337	fixed_rate = txs & MT_TXS0_FIXED_RATE;
1338	final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1339	ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
1340
1341	if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
1342		return false;
1343
1344	if (txs & MT_TXS0_QUEUE_TIMEOUT)
1345		return false;
1346
1347	if (!ack_timeout)
1348		info->flags |= IEEE80211_TX_STAT_ACK;
1349
1350	info->status.ampdu_len = 1;
1351	info->status.ampdu_ack_len = !!(info->flags &
1352					IEEE80211_TX_STAT_ACK);
1353
1354	if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
1355		info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
1356
1357	first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
1358
1359	if (fixed_rate) {
1360		info->status.rates[0].count = count;
1361		i = 0;
1362		goto out;
1363	}
1364
1365	rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
1366	rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) -
1367			 rate_set_tsf) < 1000000);
1368	rs_idx ^= rate_set_tsf & BIT(0);
1369	rs = &sta->rateset[rs_idx];
1370
1371	if (!first_idx && rs->probe_rate.idx >= 0) {
1372		info->status.rates[0] = rs->probe_rate;
1373
1374		spin_lock_bh(&dev->mt76.lock);
1375		if (sta->rate_probe) {
1376			struct mt7615_phy *phy = &dev->phy;
1377
1378			if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
1379				phy = dev->mt76.phys[MT_BAND1]->priv;
1380
1381			mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
1382		}
1383		spin_unlock_bh(&dev->mt76.lock);
1384	} else {
1385		info->status.rates[0] = rs->rates[first_idx / 2];
1386	}
1387	info->status.rates[0].count = 0;
1388
1389	for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
1390		struct ieee80211_tx_rate *cur_rate;
1391		int cur_count;
1392
1393		cur_rate = &rs->rates[idx / 2];
1394		cur_count = min_t(int, MT7615_RATE_RETRY, count);
1395		count -= cur_count;
1396
1397		if (idx && (cur_rate->idx != info->status.rates[i].idx ||
1398			    cur_rate->flags != info->status.rates[i].flags)) {
1399			i++;
1400			if (i == ARRAY_SIZE(info->status.rates)) {
1401				i--;
1402				break;
1403			}
1404
1405			info->status.rates[i] = *cur_rate;
1406			info->status.rates[i].count = 0;
1407		}
1408
1409		info->status.rates[i].count += cur_count;
1410	}
1411
1412out:
1413	final_rate_flags = info->status.rates[i].flags;
1414
1415	switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1416	case MT_PHY_TYPE_CCK:
1417		cck = true;
1418		fallthrough;
1419	case MT_PHY_TYPE_OFDM:
1420		mphy = &dev->mphy;
1421		if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
1422			mphy = dev->mt76.phys[MT_BAND1];
1423
1424		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1425			sband = &mphy->sband_5g.sband;
1426		else
1427			sband = &mphy->sband_2g.sband;
1428		final_rate &= MT_TX_RATE_IDX;
1429		final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
1430					   cck);
1431		final_rate_flags = 0;
1432		break;
1433	case MT_PHY_TYPE_HT_GF:
1434	case MT_PHY_TYPE_HT:
1435		final_rate_flags |= IEEE80211_TX_RC_MCS;
1436		final_rate &= MT_TX_RATE_IDX;
1437		if (final_rate > 31)
1438			return false;
1439		break;
1440	case MT_PHY_TYPE_VHT:
1441		final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
1442
1443		if ((final_rate & MT_TX_RATE_STBC) && final_nss)
1444			final_nss--;
1445
1446		final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
1447		final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
1448		break;
1449	default:
1450		return false;
1451	}
1452
1453	info->status.rates[i].idx = final_rate;
1454	info->status.rates[i].flags = final_rate_flags;
1455
1456	return true;
1457}
1458
1459static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
1460				   struct mt7615_sta *sta, int pid,
1461				   __le32 *txs_data)
1462{
1463	struct mt76_dev *mdev = &dev->mt76;
1464	struct sk_buff_head list;
1465	struct sk_buff *skb;
1466
1467	if (pid < MT_PACKET_ID_FIRST)
1468		return false;
1469
1470	trace_mac_txdone(mdev, sta->wcid.idx, pid);
1471
1472	mt76_tx_status_lock(mdev, &list);
1473	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1474	if (skb) {
1475		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1476
1477		if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
1478			info->status.rates[0].count = 0;
1479			info->status.rates[0].idx = -1;
1480		}
1481
1482		mt76_tx_status_skb_done(mdev, skb, &list);
1483	}
1484	mt76_tx_status_unlock(mdev, &list);
1485
1486	return !!skb;
1487}
1488
1489static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
1490{
1491	struct ieee80211_tx_info info = {};
1492	struct ieee80211_sta *sta = NULL;
1493	struct mt7615_sta *msta = NULL;
1494	struct mt76_wcid *wcid;
1495	struct mt76_phy *mphy = &dev->mt76.phy;
1496	__le32 *txs_data = data;
1497	u8 wcidx;
1498	u8 pid;
1499
1500	pid = le32_get_bits(txs_data[0], MT_TXS0_PID);
1501	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1502
1503	if (pid == MT_PACKET_ID_NO_ACK)
1504		return;
1505
1506	if (wcidx >= MT7615_WTBL_SIZE)
1507		return;
1508
1509	rcu_read_lock();
1510
1511	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1512	if (!wcid)
1513		goto out;
1514
1515	msta = container_of(wcid, struct mt7615_sta, wcid);
1516	sta = wcid_to_sta(wcid);
1517
1518	spin_lock_bh(&dev->mt76.sta_poll_lock);
1519	if (list_empty(&msta->wcid.poll_list))
1520		list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
1521	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1522
1523	if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
1524		goto out;
1525
1526	if (wcidx >= MT7615_WTBL_STA || !sta)
1527		goto out;
1528
1529	if (wcid->phy_idx && dev->mt76.phys[MT_BAND1])
1530		mphy = dev->mt76.phys[MT_BAND1];
1531
1532	if (mt7615_fill_txs(dev, msta, &info, txs_data)) {
1533		spin_lock_bh(&dev->mt76.rx_lock);
1534		ieee80211_tx_status_noskb(mphy->hw, sta, &info);
1535		spin_unlock_bh(&dev->mt76.rx_lock);
1536	}
1537
1538out:
1539	rcu_read_unlock();
1540}
1541
1542static void
1543mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi)
1544{
1545	struct mt76_dev *mdev = &dev->mt76;
1546	__le32 *txwi_data;
1547	u32 val;
1548	u8 wcid;
1549
1550	mt76_connac_txp_skb_unmap(mdev, txwi);
1551	if (!txwi->skb)
1552		goto out;
1553
1554	txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi);
1555	val = le32_to_cpu(txwi_data[1]);
1556	wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val);
1557	mt76_tx_complete_skb(mdev, wcid, txwi->skb);
1558
1559out:
1560	txwi->skb = NULL;
1561	mt76_put_txwi(mdev, txwi);
1562}
1563
1564static void
1565mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
1566{
1567	struct mt76_dev *mdev = &dev->mt76;
1568	struct mt76_txwi_cache *txwi;
1569
1570	trace_mac_tx_free(dev, token);
1571	txwi = mt76_token_put(mdev, token);
1572	if (!txwi)
1573		return;
1574
1575	mt7615_txwi_free(dev, txwi);
1576}
1577
1578static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
1579{
1580	struct mt76_connac_tx_free *free = data;
1581	void *tx_token = data + sizeof(*free);
1582	void *end = data + len;
1583	u8 i, count;
1584
1585	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1586	if (is_mt7615(&dev->mt76)) {
1587		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1588	} else {
1589		for (i = 0; i < IEEE80211_NUM_ACS; i++)
1590			mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
1591	}
1592
1593	count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT);
1594	if (is_mt7615(&dev->mt76)) {
1595		__le16 *token = tx_token;
1596
1597		if (WARN_ON_ONCE((void *)&token[count] > end))
1598			return;
1599
1600		for (i = 0; i < count; i++)
1601			mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
1602	} else {
1603		__le32 *token = tx_token;
1604
1605		if (WARN_ON_ONCE((void *)&token[count] > end))
1606			return;
1607
1608		for (i = 0; i < count; i++)
1609			mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
1610	}
1611
1612	rcu_read_lock();
1613	mt7615_mac_sta_poll(dev);
1614	rcu_read_unlock();
1615
1616	mt76_worker_schedule(&dev->mt76.tx_worker);
1617}
1618
1619bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
1620{
1621	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1622	__le32 *rxd = (__le32 *)data;
1623	__le32 *end = (__le32 *)&rxd[len / 4];
1624	enum rx_pkt_type type;
1625
1626	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1627
1628	switch (type) {
1629	case PKT_TYPE_TXRX_NOTIFY:
1630		mt7615_mac_tx_free(dev, data, len);
1631		return false;
1632	case PKT_TYPE_TXS:
1633		for (rxd++; rxd + 7 <= end; rxd += 7)
1634			mt7615_mac_add_txs(dev, rxd);
1635		return false;
1636	default:
1637		return true;
1638	}
1639}
1640EXPORT_SYMBOL_GPL(mt7615_rx_check);
1641
1642void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1643			 struct sk_buff *skb, u32 *info)
1644{
1645	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1646	__le32 *rxd = (__le32 *)skb->data;
1647	__le32 *end = (__le32 *)&skb->data[skb->len];
1648	enum rx_pkt_type type;
1649	u16 flag;
1650
1651	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1652	flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
1653	if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
1654		type = PKT_TYPE_NORMAL_MCU;
1655
1656	switch (type) {
1657	case PKT_TYPE_TXS:
1658		for (rxd++; rxd + 7 <= end; rxd += 7)
1659			mt7615_mac_add_txs(dev, rxd);
1660		dev_kfree_skb(skb);
1661		break;
1662	case PKT_TYPE_TXRX_NOTIFY:
1663		mt7615_mac_tx_free(dev, skb->data, skb->len);
1664		dev_kfree_skb(skb);
1665		break;
1666	case PKT_TYPE_RX_EVENT:
1667		mt7615_mcu_rx_event(dev, skb);
1668		break;
1669	case PKT_TYPE_NORMAL_MCU:
1670	case PKT_TYPE_NORMAL:
1671		if (!mt7615_mac_fill_rx(dev, skb)) {
1672			mt76_rx(&dev->mt76, q, skb);
1673			return;
1674		}
1675		fallthrough;
1676	default:
1677		dev_kfree_skb(skb);
1678		break;
1679	}
1680}
1681EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb);
1682
1683static void
1684mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm)
1685{
1686	struct mt7615_dev *dev = phy->dev;
1687	bool ext_phy = phy != &dev->phy;
1688
1689	if (is_mt7663(&dev->mt76)) {
1690		if (ofdm)
1691			mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy),
1692				 MT_WF_PHY_PD_OFDM_MASK(0),
1693				 MT_WF_PHY_PD_OFDM(0, val));
1694		else
1695			mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy),
1696				 MT_WF_PHY_PD_CCK_MASK(ext_phy),
1697				 MT_WF_PHY_PD_CCK(ext_phy, val));
1698		return;
1699	}
1700
1701	if (ofdm)
1702		mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
1703			 MT_WF_PHY_PD_OFDM_MASK(ext_phy),
1704			 MT_WF_PHY_PD_OFDM(ext_phy, val));
1705	else
1706		mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
1707			 MT_WF_PHY_PD_CCK_MASK(ext_phy),
1708			 MT_WF_PHY_PD_CCK(ext_phy, val));
1709}
1710
1711static void
1712mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
1713{
1714	/* ofdm */
1715	mt7615_mac_set_sensitivity(phy, 0x13c, true);
1716	/* cck */
1717	mt7615_mac_set_sensitivity(phy, 0x92, false);
1718
1719	phy->ofdm_sensitivity = -98;
1720	phy->cck_sensitivity = -110;
1721	phy->last_cca_adj = jiffies;
1722}
1723
1724void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
1725{
1726	struct mt7615_dev *dev = phy->dev;
1727	bool ext_phy = phy != &dev->phy;
1728	u32 reg, mask;
1729
1730	mt7615_mutex_acquire(dev);
1731
1732	if (phy->scs_en == enable)
1733		goto out;
1734
1735	if (is_mt7663(&dev->mt76)) {
1736		reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy);
1737		mask = MT_WF_PHY_PD_BLK(0);
1738	} else {
1739		reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy);
1740		mask = MT_WF_PHY_PD_BLK(ext_phy);
1741	}
1742
1743	if (enable) {
1744		mt76_set(dev, reg, mask);
1745		if (is_mt7622(&dev->mt76)) {
1746			mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8);
1747			mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7);
1748		}
1749	} else {
1750		mt76_clear(dev, reg, mask);
1751	}
1752
1753	mt7615_mac_set_default_sensitivity(phy);
1754	phy->scs_en = enable;
1755
1756out:
1757	mt7615_mutex_release(dev);
1758}
1759
1760void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
1761{
1762	u32 rxtd, reg;
1763
1764	if (is_mt7663(&dev->mt76))
1765		reg = MT7663_WF_PHY_R0_PHYMUX_5;
1766	else
1767		reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
1768
1769	if (ext_phy)
1770		rxtd = MT_WF_PHY_RXTD2(10);
1771	else
1772		rxtd = MT_WF_PHY_RXTD(12);
1773
1774	mt76_set(dev, rxtd, BIT(18) | BIT(29));
1775	mt76_set(dev, reg, 0x5 << 12);
1776}
1777
1778void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
1779{
1780	struct mt7615_dev *dev = phy->dev;
1781	bool ext_phy = phy != &dev->phy;
1782	u32 reg;
1783
1784	if (is_mt7663(&dev->mt76))
1785		reg = MT7663_WF_PHY_R0_PHYMUX_5;
1786	else
1787		reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
1788
1789	/* reset PD and MDRDY counters */
1790	mt76_clear(dev, reg, GENMASK(22, 20));
1791	mt76_set(dev, reg, BIT(22) | BIT(20));
1792}
1793
1794static void
1795mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
1796			      u32 rts_err_rate, bool ofdm)
1797{
1798	struct mt7615_dev *dev = phy->dev;
1799	int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
1800	bool ext_phy = phy != &dev->phy;
1801	s16 def_th = ofdm ? -98 : -110;
1802	bool update = false;
1803	s8 *sensitivity;
1804	int signal;
1805
1806	sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity;
1807	signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy);
1808	if (!signal) {
1809		mt7615_mac_set_default_sensitivity(phy);
1810		return;
1811	}
1812
1813	signal = min(signal, -72);
1814	if (false_cca > 500) {
1815		if (rts_err_rate > MT_FRAC(40, 100))
1816			return;
1817
1818		/* decrease coverage */
1819		if (*sensitivity == def_th && signal > -90) {
1820			*sensitivity = -90;
1821			update = true;
1822		} else if (*sensitivity + 2 < signal) {
1823			*sensitivity += 2;
1824			update = true;
1825		}
1826	} else if ((false_cca > 0 && false_cca < 50) ||
1827		   rts_err_rate > MT_FRAC(60, 100)) {
1828		/* increase coverage */
1829		if (*sensitivity - 2 >= def_th) {
1830			*sensitivity -= 2;
1831			update = true;
1832		}
1833	}
1834
1835	if (*sensitivity > signal) {
1836		*sensitivity = signal;
1837		update = true;
1838	}
1839
1840	if (update) {
1841		u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256;
1842
1843		mt7615_mac_set_sensitivity(phy, val, ofdm);
1844		phy->last_cca_adj = jiffies;
1845	}
1846}
1847
1848static void
1849mt7615_mac_scs_check(struct mt7615_phy *phy)
1850{
1851	struct mt7615_dev *dev = phy->dev;
1852	struct mib_stats *mib = &phy->mib;
1853	u32 val, rts_err_rate = 0;
1854	u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
1855	bool ext_phy = phy != &dev->phy;
1856
1857	if (!phy->scs_en)
1858		return;
1859
1860	if (is_mt7663(&dev->mt76))
1861		val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
1862	else
1863		val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
1864	pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
1865	pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
1866
1867	if (is_mt7663(&dev->mt76))
1868		val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
1869	else
1870		val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
1871	mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
1872	mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
1873
1874	phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1875	phy->false_cca_cck = pd_cck - mdrdy_cck;
1876	mt7615_mac_cca_stats_reset(phy);
1877
1878	if (mib->rts_cnt + mib->rts_retries_cnt)
1879		rts_err_rate = MT_FRAC(mib->rts_retries_cnt,
1880				       mib->rts_cnt + mib->rts_retries_cnt);
1881
1882	/* cck */
1883	mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false);
1884	/* ofdm */
1885	mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true);
1886
1887	if (time_after(jiffies, phy->last_cca_adj + 10 * HZ))
1888		mt7615_mac_set_default_sensitivity(phy);
1889}
1890
1891static u8
1892mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
1893{
1894	static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1895	u32 reg, val, sum = 0, n = 0;
1896	int i;
1897
1898	if (is_mt7663(&dev->mt76))
1899		reg = MT7663_WF_PHY_RXTD(20);
1900	else
1901		reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
1902
1903	for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1904		val = mt76_rr(dev, reg);
1905		sum += val * nf_power[i];
1906		n += val;
1907	}
1908
1909	if (!n)
1910		return 0;
1911
1912	return sum / n;
1913}
1914
1915static void
1916mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
1917{
1918	struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
1919	struct mt7615_phy *phy = mphy->priv;
1920	struct mt76_channel_state *state;
1921	u64 busy_time, tx_time, rx_time, obss_time;
1922	u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5;
1923	int nf;
1924
1925	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1926				   MT_MIB_SDR9_BUSY_MASK);
1927	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1928				 MT_MIB_SDR36_TXTIME_MASK);
1929	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1930				 MT_MIB_SDR37_RXTIME_MASK);
1931	obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK);
1932
1933	nf = mt7615_phy_get_nf(dev, idx);
1934	if (!phy->noise)
1935		phy->noise = nf << 4;
1936	else if (nf)
1937		phy->noise += nf - (phy->noise >> 4);
1938
1939	state = mphy->chan_state;
1940	state->cc_busy += busy_time;
1941	state->cc_tx += tx_time;
1942	state->cc_rx += rx_time + obss_time;
1943	state->cc_bss_rx += rx_time;
1944	state->noise = -(phy->noise >> 4);
1945}
1946
1947static void mt7615_update_survey(struct mt7615_dev *dev)
1948{
1949	struct mt76_dev *mdev = &dev->mt76;
1950	struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
1951	ktime_t cur_time;
1952
1953	/* MT7615 can only update both phys simultaneously
1954	 * since some reisters are shared across bands.
1955	 */
1956
1957	mt7615_phy_update_channel(&mdev->phy, 0);
1958	if (mphy_ext)
1959		mt7615_phy_update_channel(mphy_ext, 1);
1960
1961	cur_time = ktime_get_boottime();
1962
1963	mt76_update_survey_active_time(&mdev->phy, cur_time);
1964	if (mphy_ext)
1965		mt76_update_survey_active_time(mphy_ext, cur_time);
1966
1967	/* reset obss airtime */
1968	mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
1969}
1970
1971void mt7615_update_channel(struct mt76_phy *mphy)
1972{
1973	struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
1974
1975	if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
1976		return;
1977
1978	mt7615_update_survey(dev);
1979	mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1980}
1981EXPORT_SYMBOL_GPL(mt7615_update_channel);
1982
1983static void
1984mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
1985{
1986	struct mt7615_dev *dev = phy->dev;
1987	struct mib_stats *mib = &phy->mib;
1988	bool ext_phy = phy != &dev->phy;
1989	int i, aggr = 0;
1990	u32 val, val2;
1991
1992	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
1993					   MT_MIB_SDR3_FCS_ERR_MASK);
1994
1995	val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
1996			     MT_MIB_AMPDU_MPDU_COUNT);
1997	if (val) {
1998		val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy),
1999				      MT_MIB_AMPDU_ACK_COUNT);
2000		mib->aggr_per = 1000 * (val - val2) / val;
2001	}
2002
2003	for (i = 0; i < 4; i++) {
2004		val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
2005		mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
2006		mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
2007					       val);
2008
2009		val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
2010		mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
2011		mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
2012						  val);
2013
2014		val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
2015		phy->mt76->aggr_stats[aggr++] += val & 0xffff;
2016		phy->mt76->aggr_stats[aggr++] += val >> 16;
2017	}
2018}
2019
2020void mt7615_pm_wake_work(struct work_struct *work)
2021{
2022	struct mt7615_dev *dev;
2023	struct mt76_phy *mphy;
2024
2025	dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
2026						pm.wake_work);
2027	mphy = dev->phy.mt76;
2028
2029	if (!mt7615_mcu_set_drv_ctrl(dev)) {
2030		struct mt76_dev *mdev = &dev->mt76;
2031		int i;
2032
2033		if (mt76_is_sdio(mdev)) {
2034			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
2035			mt76_worker_schedule(&mdev->sdio.txrx_worker);
2036		} else {
2037			local_bh_disable();
2038			mt76_for_each_q_rx(mdev, i)
2039				napi_schedule(&mdev->napi[i]);
2040			local_bh_enable();
2041			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
2042			mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
2043					      false);
2044		}
2045
2046		if (test_bit(MT76_STATE_RUNNING, &mphy->state)) {
2047			unsigned long timeout;
2048
2049			timeout = mt7615_get_macwork_timeout(dev);
2050			ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2051						     timeout);
2052		}
2053	}
2054
2055	ieee80211_wake_queues(mphy->hw);
2056	wake_up(&dev->pm.wait);
2057}
2058
2059void mt7615_pm_power_save_work(struct work_struct *work)
2060{
2061	struct mt7615_dev *dev;
2062	unsigned long delta;
2063
2064	dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
2065						pm.ps_work.work);
2066
2067	delta = dev->pm.idle_timeout;
2068	if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
2069	    test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
2070		goto out;
2071
2072	if (mutex_is_locked(&dev->mt76.mutex))
2073		/* if mt76 mutex is held we should not put the device
2074		 * to sleep since we are currently accessing device
2075		 * register map. We need to wait for the next power_save
2076		 * trigger.
2077		 */
2078		goto out;
2079
2080	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
2081		delta = dev->pm.last_activity + delta - jiffies;
2082		goto out;
2083	}
2084
2085	if (!mt7615_mcu_set_fw_ctrl(dev))
2086		return;
2087out:
2088	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
2089}
2090
2091void mt7615_mac_work(struct work_struct *work)
2092{
2093	struct mt7615_phy *phy;
2094	struct mt76_phy *mphy;
2095	unsigned long timeout;
2096
2097	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2098					       mac_work.work);
2099	phy = mphy->priv;
2100
2101	mt7615_mutex_acquire(phy->dev);
2102
2103	mt7615_update_survey(phy->dev);
2104	if (++mphy->mac_work_count == 5) {
2105		mphy->mac_work_count = 0;
2106
2107		mt7615_mac_update_mib_stats(phy);
2108		mt7615_mac_scs_check(phy);
2109	}
2110
2111	mt7615_mutex_release(phy->dev);
2112
2113	mt76_tx_status_check(mphy->dev, false);
2114
2115	timeout = mt7615_get_macwork_timeout(phy->dev);
2116	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout);
2117}
2118
2119void mt7615_tx_token_put(struct mt7615_dev *dev)
2120{
2121	struct mt76_txwi_cache *txwi;
2122	int id;
2123
2124	spin_lock_bh(&dev->mt76.token_lock);
2125	idr_for_each_entry(&dev->mt76.token, txwi, id)
2126		mt7615_txwi_free(dev, txwi);
2127	spin_unlock_bh(&dev->mt76.token_lock);
2128	idr_destroy(&dev->mt76.token);
2129}
2130EXPORT_SYMBOL_GPL(mt7615_tx_token_put);
2131
2132static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
2133{
2134	struct mt7615_dev *dev = phy->dev;
2135
2136	if (phy->rdd_state & BIT(0))
2137		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
2138					MT_RX_SEL0, 0);
2139	if (phy->rdd_state & BIT(1))
2140		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
2141					MT_RX_SEL0, 0);
2142}
2143
2144static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
2145{
2146	int err;
2147
2148	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
2149				      MT_RX_SEL0, 0);
2150	if (err < 0)
2151		return err;
2152
2153	return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
2154				       MT_RX_SEL0, 1);
2155}
2156
2157static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
2158{
2159	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2160	struct mt7615_dev *dev = phy->dev;
2161	bool ext_phy = phy != &dev->phy;
2162	int err;
2163
2164	/* start CAC */
2165	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
2166				      MT_RX_SEL0, 0);
2167	if (err < 0)
2168		return err;
2169
2170	err = mt7615_dfs_start_rdd(dev, ext_phy);
2171	if (err < 0)
2172		return err;
2173
2174	phy->rdd_state |= BIT(ext_phy);
2175
2176	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2177	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
2178		err = mt7615_dfs_start_rdd(dev, 1);
2179		if (err < 0)
2180			return err;
2181
2182		phy->rdd_state |= BIT(1);
2183	}
2184
2185	return 0;
2186}
2187
2188static int
2189mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
2190{
2191	const struct mt7615_dfs_radar_spec *radar_specs;
2192	struct mt7615_dev *dev = phy->dev;
2193	int err, i, lpn = 500;
2194
2195	switch (dev->mt76.region) {
2196	case NL80211_DFS_FCC:
2197		radar_specs = &fcc_radar_specs;
2198		lpn = 8;
2199		break;
2200	case NL80211_DFS_ETSI:
2201		radar_specs = &etsi_radar_specs;
2202		break;
2203	case NL80211_DFS_JP:
2204		radar_specs = &jp_radar_specs;
2205		break;
2206	default:
2207		return -EINVAL;
2208	}
2209
2210	/* avoid FCC radar detection in non-FCC region */
2211	err = mt7615_mcu_set_fcc5_lpn(dev, lpn);
2212	if (err < 0)
2213		return err;
2214
2215	for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2216		err = mt7615_mcu_set_radar_th(dev, i,
2217					      &radar_specs->radar_pattern[i]);
2218		if (err < 0)
2219			return err;
2220	}
2221
2222	return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2223}
2224
2225int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
2226{
2227	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2228	struct mt7615_dev *dev = phy->dev;
2229	bool ext_phy = phy != &dev->phy;
2230	enum mt76_dfs_state dfs_state, prev_state;
2231	int err;
2232
2233	if (is_mt7663(&dev->mt76))
2234		return 0;
2235
2236	prev_state = phy->mt76->dfs_state;
2237	dfs_state = mt76_phy_dfs_state(phy->mt76);
2238	if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
2239	    dfs_state < MT_DFS_STATE_CAC)
2240		dfs_state = MT_DFS_STATE_ACTIVE;
2241
2242	if (prev_state == dfs_state)
2243		return 0;
2244
2245	if (dfs_state == MT_DFS_STATE_DISABLED)
2246		goto stop;
2247
2248	if (prev_state <= MT_DFS_STATE_DISABLED) {
2249		err = mt7615_dfs_init_radar_specs(phy);
2250		if (err < 0)
2251			return err;
2252
2253		err = mt7615_dfs_start_radar_detector(phy);
2254		if (err < 0)
2255			return err;
2256
2257		phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2258	}
2259
2260	if (dfs_state == MT_DFS_STATE_CAC)
2261		return 0;
2262
2263	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
2264				      ext_phy, MT_RX_SEL0, 0);
2265	if (err < 0) {
2266		phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2267		return err;
2268	}
2269
2270	phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2271	return 0;
2272
2273stop:
2274	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
2275				      MT_RX_SEL0, 0);
2276	if (err < 0)
2277		return err;
2278
2279	mt7615_dfs_stop_radar_detector(phy);
2280	phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2281
2282	return 0;
2283}
2284
2285int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
2286				 struct ieee80211_vif *vif,
2287				 bool enable)
2288{
2289	struct mt7615_dev *dev = phy->dev;
2290	bool ext_phy = phy != &dev->phy;
2291	int err;
2292
2293	if (!mt7615_firmware_offload(dev))
2294		return -EOPNOTSUPP;
2295
2296	switch (vif->type) {
2297	case NL80211_IFTYPE_MONITOR:
2298		return 0;
2299	case NL80211_IFTYPE_MESH_POINT:
2300	case NL80211_IFTYPE_ADHOC:
2301	case NL80211_IFTYPE_AP:
2302		if (enable)
2303			phy->n_beacon_vif++;
2304		else
2305			phy->n_beacon_vif--;
2306		fallthrough;
2307	default:
2308		break;
2309	}
2310
2311	err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif);
2312	if (err)
2313		return err;
2314
2315	if (phy->n_beacon_vif) {
2316		vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
2317		mt76_clear(dev, MT_WF_RFCR(ext_phy),
2318			   MT_WF_RFCR_DROP_OTHER_BEACON);
2319	} else {
2320		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
2321		mt76_set(dev, MT_WF_RFCR(ext_phy),
2322			 MT_WF_RFCR_DROP_OTHER_BEACON);
2323	}
2324
2325	return 0;
2326}
2327
2328void mt7615_coredump_work(struct work_struct *work)
2329{
2330	struct mt7615_dev *dev;
2331	char *dump, *data;
2332
2333	dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
2334						coredump.work.work);
2335
2336	if (time_is_after_jiffies(dev->coredump.last_activity +
2337				  4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
2338		queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
2339				   MT76_CONNAC_COREDUMP_TIMEOUT);
2340		return;
2341	}
2342
2343	dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
2344	data = dump;
2345
2346	while (true) {
2347		struct sk_buff *skb;
2348
2349		spin_lock_bh(&dev->mt76.lock);
2350		skb = __skb_dequeue(&dev->coredump.msg_list);
2351		spin_unlock_bh(&dev->mt76.lock);
2352
2353		if (!skb)
2354			break;
2355
2356		skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
2357		if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
2358			dev_kfree_skb(skb);
2359			continue;
2360		}
2361
2362		memcpy(data, skb->data, skb->len);
2363		data += skb->len;
2364
2365		dev_kfree_skb(skb);
2366	}
2367
2368	if (dump)
2369		dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
2370			      GFP_KERNEL);
2371}
2372