1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2019-2020  Realtek Corporation
3 */
4
5#include "coex.h"
6#include "debug.h"
7#include "fw.h"
8#include "mac.h"
9#include "phy.h"
10#include "ps.h"
11#include "reg.h"
12#include "sar.h"
13#include "txrx.h"
14#include "util.h"
15
16static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
17			     const struct rtw89_ra_report *report)
18{
19	u32 bit_rate = report->bit_rate;
20
21	/* lower than ofdm, do not aggregate */
22	if (bit_rate < 550)
23		return 1;
24
25	/* avoid AMSDU for legacy rate */
26	if (report->might_fallback_legacy)
27		return 1;
28
29	/* lower than 20M vht 2ss mcs8, make it small */
30	if (bit_rate < 1800)
31		return 1200;
32
33	/* lower than 40M vht 2ss mcs9, make it medium */
34	if (bit_rate < 4000)
35		return 2600;
36
37	/* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
38	if (bit_rate < 7000)
39		return 3500;
40
41	return rtwdev->chip->max_amsdu_limit;
42}
43
44static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
45{
46	u64 ra_mask = 0;
47	u8 mcs_cap;
48	int i, nss;
49
50	for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
51		mcs_cap = mcs_map & 0x3;
52		switch (mcs_cap) {
53		case 2:
54			ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
55			break;
56		case 1:
57			ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
58			break;
59		case 0:
60			ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
61			break;
62		default:
63			break;
64		}
65	}
66
67	return ra_mask;
68}
69
70static u64 get_he_ra_mask(struct ieee80211_sta *sta)
71{
72	struct ieee80211_sta_he_cap cap = sta->deflink.he_cap;
73	u16 mcs_map;
74
75	switch (sta->deflink.bandwidth) {
76	case IEEE80211_STA_RX_BW_160:
77		if (cap.he_cap_elem.phy_cap_info[0] &
78		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
79			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
80		else
81			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
82		break;
83	default:
84		mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
85	}
86
87	/* MCS11, MCS9, MCS7 */
88	return get_mcs_ra_mask(mcs_map, 11, 2);
89}
90
91#define RA_FLOOR_TABLE_SIZE	7
92#define RA_FLOOR_UP_GAP		3
93static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
94				  u8 ratr_state)
95{
96	u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
97	u8 rssi_lv = 0;
98	u8 i;
99
100	rssi >>= 1;
101	for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
102		if (i >= ratr_state)
103			rssi_lv_t[i] += RA_FLOOR_UP_GAP;
104		if (rssi < rssi_lv_t[i]) {
105			rssi_lv = i;
106			break;
107		}
108	}
109	if (rssi_lv == 0)
110		return 0xffffffffffffffffULL;
111	else if (rssi_lv == 1)
112		return 0xfffffffffffffff0ULL;
113	else if (rssi_lv == 2)
114		return 0xffffffffffffefe0ULL;
115	else if (rssi_lv == 3)
116		return 0xffffffffffffcfc0ULL;
117	else if (rssi_lv == 4)
118		return 0xffffffffffff8f80ULL;
119	else if (rssi_lv >= 5)
120		return 0xffffffffffff0f00ULL;
121
122	return 0xffffffffffffffffULL;
123}
124
125static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
126{
127	if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
128		ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
129
130	if (ra_mask == 0)
131		ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
132
133	return ra_mask;
134}
135
136static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
137{
138	struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
139	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
140	struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
141	enum nl80211_band band;
142	u64 cfg_mask;
143
144	if (!rtwsta->use_cfg_mask)
145		return -1;
146
147	switch (chan->band_type) {
148	case RTW89_BAND_2G:
149		band = NL80211_BAND_2GHZ;
150		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
151					   RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
152		break;
153	case RTW89_BAND_5G:
154		band = NL80211_BAND_5GHZ;
155		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
156					   RA_MASK_OFDM_RATES);
157		break;
158	case RTW89_BAND_6G:
159		band = NL80211_BAND_6GHZ;
160		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
161					   RA_MASK_OFDM_RATES);
162		break;
163	default:
164		rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
165		return -1;
166	}
167
168	if (sta->deflink.he_cap.has_he) {
169		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
170					    RA_MASK_HE_1SS_RATES);
171		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
172					    RA_MASK_HE_2SS_RATES);
173	} else if (sta->deflink.vht_cap.vht_supported) {
174		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
175					    RA_MASK_VHT_1SS_RATES);
176		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
177					    RA_MASK_VHT_2SS_RATES);
178	} else if (sta->deflink.ht_cap.ht_supported) {
179		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
180					    RA_MASK_HT_1SS_RATES);
181		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
182					    RA_MASK_HT_2SS_RATES);
183	}
184
185	return cfg_mask;
186}
187
188static const u64
189rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
190			     RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
191static const u64
192rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
193			      RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
194static const u64
195rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
196			     RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
197
198static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
199				struct rtw89_sta *rtwsta,
200				bool *fix_giltf_en, u8 *fix_giltf)
201{
202	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
203	struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
204	u8 band = chan->band_type;
205	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
206	u8 he_gi = mask->control[nl_band].he_gi;
207	u8 he_ltf = mask->control[nl_band].he_ltf;
208
209	if (!rtwsta->use_cfg_mask)
210		return;
211
212	if (he_ltf == 2 && he_gi == 2) {
213		*fix_giltf = RTW89_GILTF_LGI_4XHE32;
214	} else if (he_ltf == 2 && he_gi == 0) {
215		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
216	} else if (he_ltf == 1 && he_gi == 1) {
217		*fix_giltf = RTW89_GILTF_2XHE16;
218	} else if (he_ltf == 1 && he_gi == 0) {
219		*fix_giltf = RTW89_GILTF_2XHE08;
220	} else if (he_ltf == 0 && he_gi == 1) {
221		*fix_giltf = RTW89_GILTF_1XHE16;
222	} else if (he_ltf == 0 && he_gi == 0) {
223		*fix_giltf = RTW89_GILTF_1XHE08;
224	} else {
225		*fix_giltf_en = false;
226		return;
227	}
228
229	*fix_giltf_en = true;
230}
231
232static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
233				    struct ieee80211_sta *sta, bool csi)
234{
235	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
236	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
237	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
238	struct rtw89_ra_info *ra = &rtwsta->ra;
239	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
240	struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
241	const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
242	u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
243	u64 ra_mask = 0;
244	u64 ra_mask_bak;
245	u8 mode = 0;
246	u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
247	u8 bw_mode = 0;
248	u8 stbc_en = 0;
249	u8 ldpc_en = 0;
250	u8 fix_giltf = 0;
251	u8 i;
252	bool sgi = false;
253	bool fix_giltf_en = false;
254
255	memset(ra, 0, sizeof(*ra));
256	/* Set the ra mask from sta's capability */
257	if (sta->deflink.he_cap.has_he) {
258		mode |= RTW89_RA_MODE_HE;
259		csi_mode = RTW89_RA_RPT_MODE_HE;
260		ra_mask |= get_he_ra_mask(sta);
261		high_rate_masks = rtw89_ra_mask_he_rates;
262		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] &
263		    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
264			stbc_en = 1;
265		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
266		    IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
267			ldpc_en = 1;
268		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, &fix_giltf_en, &fix_giltf);
269	} else if (sta->deflink.vht_cap.vht_supported) {
270		u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
271
272		mode |= RTW89_RA_MODE_VHT;
273		csi_mode = RTW89_RA_RPT_MODE_VHT;
274		/* MCS9, MCS8, MCS7 */
275		ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
276		high_rate_masks = rtw89_ra_mask_vht_rates;
277		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
278			stbc_en = 1;
279		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
280			ldpc_en = 1;
281	} else if (sta->deflink.ht_cap.ht_supported) {
282		mode |= RTW89_RA_MODE_HT;
283		csi_mode = RTW89_RA_RPT_MODE_HT;
284		ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
285			   ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
286			   (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
287			   (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
288		high_rate_masks = rtw89_ra_mask_ht_rates;
289		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
290			stbc_en = 1;
291		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
292			ldpc_en = 1;
293	}
294
295	switch (chan->band_type) {
296	case RTW89_BAND_2G:
297		ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
298		if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf)
299			mode |= RTW89_RA_MODE_CCK;
300		if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0)
301			mode |= RTW89_RA_MODE_OFDM;
302		break;
303	case RTW89_BAND_5G:
304		ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
305		mode |= RTW89_RA_MODE_OFDM;
306		break;
307	case RTW89_BAND_6G:
308		ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4;
309		mode |= RTW89_RA_MODE_OFDM;
310		break;
311	default:
312		rtw89_err(rtwdev, "Unknown band type\n");
313		break;
314	}
315
316	ra_mask_bak = ra_mask;
317
318	if (mode >= RTW89_RA_MODE_HT) {
319		u64 mask = 0;
320		for (i = 0; i < rtwdev->hal.tx_nss; i++)
321			mask |= high_rate_masks[i];
322		if (mode & RTW89_RA_MODE_OFDM)
323			mask |= RA_MASK_SUBOFDM_RATES;
324		if (mode & RTW89_RA_MODE_CCK)
325			mask |= RA_MASK_SUBCCK_RATES;
326		ra_mask &= mask;
327	} else if (mode & RTW89_RA_MODE_OFDM) {
328		ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
329	}
330
331	if (mode != RTW89_RA_MODE_CCK)
332		ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
333
334	ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
335	ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
336
337	switch (sta->deflink.bandwidth) {
338	case IEEE80211_STA_RX_BW_160:
339		bw_mode = RTW89_CHANNEL_WIDTH_160;
340		sgi = sta->deflink.vht_cap.vht_supported &&
341		      (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
342		break;
343	case IEEE80211_STA_RX_BW_80:
344		bw_mode = RTW89_CHANNEL_WIDTH_80;
345		sgi = sta->deflink.vht_cap.vht_supported &&
346		      (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
347		break;
348	case IEEE80211_STA_RX_BW_40:
349		bw_mode = RTW89_CHANNEL_WIDTH_40;
350		sgi = sta->deflink.ht_cap.ht_supported &&
351		      (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
352		break;
353	default:
354		bw_mode = RTW89_CHANNEL_WIDTH_20;
355		sgi = sta->deflink.ht_cap.ht_supported &&
356		      (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
357		break;
358	}
359
360	if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
361	    IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
362		ra->dcm_cap = 1;
363
364	if (rate_pattern->enable && !vif->p2p) {
365		ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
366		ra_mask &= rate_pattern->ra_mask;
367		mode = rate_pattern->ra_mode;
368	}
369
370	ra->bw_cap = bw_mode;
371	ra->er_cap = rtwsta->er_cap;
372	ra->mode_ctrl = mode;
373	ra->macid = rtwsta->mac_id;
374	ra->stbc_cap = stbc_en;
375	ra->ldpc_cap = ldpc_en;
376	ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
377	ra->en_sgi = sgi;
378	ra->ra_mask = ra_mask;
379	ra->fix_giltf_en = fix_giltf_en;
380	ra->fix_giltf = fix_giltf;
381
382	if (!csi)
383		return;
384
385	ra->fixed_csi_rate_en = false;
386	ra->ra_csi_rate_en = true;
387	ra->cr_tbl_sel = false;
388	ra->band_num = rtwvif->phy_idx;
389	ra->csi_bw = bw_mode;
390	ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
391	ra->csi_mcs_ss_idx = 5;
392	ra->csi_mode = csi_mode;
393}
394
395void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
396			     u32 changed)
397{
398	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
399	struct rtw89_ra_info *ra = &rtwsta->ra;
400
401	rtw89_phy_ra_sta_update(rtwdev, sta, false);
402
403	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
404		ra->upd_mask = 1;
405	if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
406		ra->upd_bw_nss_mask = 1;
407
408	rtw89_debug(rtwdev, RTW89_DBG_RA,
409		    "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
410		    ra->macid,
411		    ra->bw_cap,
412		    ra->ss_num,
413		    ra->en_sgi,
414		    ra->giltf);
415
416	rtw89_fw_h2c_ra(rtwdev, ra, false);
417}
418
419static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
420				 u16 rate_base, u64 ra_mask, u8 ra_mode,
421				 u32 rate_ctrl, u32 ctrl_skip, bool force)
422{
423	u8 n, c;
424
425	if (rate_ctrl == ctrl_skip)
426		return true;
427
428	n = hweight32(rate_ctrl);
429	if (n == 0)
430		return true;
431
432	if (force && n != 1)
433		return false;
434
435	if (next->enable)
436		return false;
437
438	c = __fls(rate_ctrl);
439	next->rate = rate_base + c;
440	next->ra_mode = ra_mode;
441	next->ra_mask = ra_mask;
442	next->enable = true;
443
444	return true;
445}
446
447#define RTW89_HW_RATE_BY_CHIP_GEN(rate) \
448	{ \
449		[RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \
450		[RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
451	}
452
453void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
454				struct ieee80211_vif *vif,
455				const struct cfg80211_bitrate_mask *mask)
456{
457	struct ieee80211_supported_band *sband;
458	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
459	struct rtw89_phy_rate_pattern next_pattern = {0};
460	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
461	static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
462		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
463		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
464		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0),
465		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0),
466	};
467	static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = {
468		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0),
469		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0),
470		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0),
471		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0),
472	};
473	static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = {
474		RTW89_HW_RATE_BY_CHIP_GEN(MCS0),
475		RTW89_HW_RATE_BY_CHIP_GEN(MCS8),
476		RTW89_HW_RATE_BY_CHIP_GEN(MCS16),
477		RTW89_HW_RATE_BY_CHIP_GEN(MCS24),
478	};
479	u8 band = chan->band_type;
480	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
481	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
482	u8 tx_nss = rtwdev->hal.tx_nss;
483	u8 i;
484
485	for (i = 0; i < tx_nss; i++)
486		if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen],
487					  RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
488					  mask->control[nl_band].he_mcs[i],
489					  0, true))
490			goto out;
491
492	for (i = 0; i < tx_nss; i++)
493		if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen],
494					  RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
495					  mask->control[nl_band].vht_mcs[i],
496					  0, true))
497			goto out;
498
499	for (i = 0; i < tx_nss; i++)
500		if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen],
501					  RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
502					  mask->control[nl_band].ht_mcs[i],
503					  0, true))
504			goto out;
505
506	/* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
507	 * require at least one basic rate for ieee80211_set_bitrate_mask,
508	 * so the decision just depends on if all bitrates are set or not.
509	 */
510	sband = rtwdev->hw->wiphy->bands[nl_band];
511	if (band == RTW89_BAND_2G) {
512		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
513					  RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
514					  RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
515					  mask->control[nl_band].legacy,
516					  BIT(sband->n_bitrates) - 1, false))
517			goto out;
518	} else {
519		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
520					  RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
521					  mask->control[nl_band].legacy,
522					  BIT(sband->n_bitrates) - 1, false))
523			goto out;
524	}
525
526	if (!next_pattern.enable)
527		goto out;
528
529	rtwvif->rate_pattern = next_pattern;
530	rtw89_debug(rtwdev, RTW89_DBG_RA,
531#if defined(__linux__)
532		    "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
533#elif defined(__FreeBSD__)
534		    "configure pattern: rate 0x%x, mask 0x%jx, mode 0x%x\n",
535#endif
536		    next_pattern.rate,
537#if defined(__FreeBSD__)
538		    (uintmax_t)
539#endif
540		    next_pattern.ra_mask,
541		    next_pattern.ra_mode);
542	return;
543
544out:
545	rtwvif->rate_pattern.enable = false;
546	rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
547}
548
549static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
550{
551	struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
552
553	rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
554}
555
556void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
557{
558	ieee80211_iterate_stations_atomic(rtwdev->hw,
559					  rtw89_phy_ra_updata_sta_iter,
560					  rtwdev);
561}
562
563void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
564{
565	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
566	struct rtw89_ra_info *ra = &rtwsta->ra;
567	u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR;
568	bool csi = rtw89_sta_has_beamformer_cap(sta);
569
570	rtw89_phy_ra_sta_update(rtwdev, sta, csi);
571
572	if (rssi > 40)
573		ra->init_rate_lv = 1;
574	else if (rssi > 20)
575		ra->init_rate_lv = 2;
576	else if (rssi > 1)
577		ra->init_rate_lv = 3;
578	else
579		ra->init_rate_lv = 0;
580	ra->upd_all = 1;
581	rtw89_debug(rtwdev, RTW89_DBG_RA,
582		    "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d",
583		    ra->macid,
584		    ra->mode_ctrl,
585		    ra->bw_cap,
586		    ra->ss_num,
587		    ra->init_rate_lv);
588	rtw89_debug(rtwdev, RTW89_DBG_RA,
589		    "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d",
590		    ra->dcm_cap,
591		    ra->er_cap,
592		    ra->ldpc_cap,
593		    ra->stbc_cap,
594		    ra->en_sgi,
595		    ra->giltf);
596
597	rtw89_fw_h2c_ra(rtwdev, ra, csi);
598}
599
600u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
601		      const struct rtw89_chan *chan,
602		      enum rtw89_bandwidth dbw)
603{
604	enum rtw89_bandwidth cbw = chan->band_width;
605	u8 pri_ch = chan->primary_channel;
606	u8 central_ch = chan->channel;
607	u8 txsc_idx = 0;
608	u8 tmp = 0;
609
610	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
611		return txsc_idx;
612
613	switch (cbw) {
614	case RTW89_CHANNEL_WIDTH_40:
615		txsc_idx = pri_ch > central_ch ? 1 : 2;
616		break;
617	case RTW89_CHANNEL_WIDTH_80:
618		if (dbw == RTW89_CHANNEL_WIDTH_20) {
619			if (pri_ch > central_ch)
620				txsc_idx = (pri_ch - central_ch) >> 1;
621			else
622				txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
623		} else {
624			txsc_idx = pri_ch > central_ch ? 9 : 10;
625		}
626		break;
627	case RTW89_CHANNEL_WIDTH_160:
628		if (pri_ch > central_ch)
629			tmp = (pri_ch - central_ch) >> 1;
630		else
631			tmp = ((central_ch - pri_ch) >> 1) + 1;
632
633		if (dbw == RTW89_CHANNEL_WIDTH_20) {
634			txsc_idx = tmp;
635		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
636			if (tmp == 1 || tmp == 3)
637				txsc_idx = 9;
638			else if (tmp == 5 || tmp == 7)
639				txsc_idx = 11;
640			else if (tmp == 2 || tmp == 4)
641				txsc_idx = 10;
642			else if (tmp == 6 || tmp == 8)
643				txsc_idx = 12;
644			else
645				return 0xff;
646		} else {
647			txsc_idx = pri_ch > central_ch ? 13 : 14;
648		}
649		break;
650	case RTW89_CHANNEL_WIDTH_80_80:
651		if (dbw == RTW89_CHANNEL_WIDTH_20) {
652			if (pri_ch > central_ch)
653				txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
654			else
655				txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
656		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
657			txsc_idx = pri_ch > central_ch ? 10 : 12;
658		} else {
659			txsc_idx = 14;
660		}
661		break;
662	default:
663		break;
664	}
665
666	return txsc_idx;
667}
668EXPORT_SYMBOL(rtw89_phy_get_txsc);
669
670static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
671{
672	return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
673	       !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
674}
675
676u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
677		      u32 addr, u32 mask)
678{
679	const struct rtw89_chip_info *chip = rtwdev->chip;
680	const u32 *base_addr = chip->rf_base_addr;
681	u32 val, direct_addr;
682
683	if (rf_path >= rtwdev->chip->rf_path_num) {
684		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
685		return INV_RF_DATA;
686	}
687
688	addr &= 0xff;
689	direct_addr = base_addr[rf_path] + (addr << 2);
690	mask &= RFREG_MASK;
691
692	val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
693
694	return val;
695}
696EXPORT_SYMBOL(rtw89_phy_read_rf);
697
698static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
699			       enum rtw89_rf_path rf_path, u32 addr, u32 mask)
700{
701	bool busy;
702	bool done;
703	u32 val;
704	int ret;
705
706	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
707				       1, 30, false, rtwdev);
708	if (ret) {
709		rtw89_err(rtwdev, "read rf busy swsi\n");
710		return INV_RF_DATA;
711	}
712
713	mask &= RFREG_MASK;
714
715	val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
716	      FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
717	rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
718	udelay(2);
719
720	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
721				       30, false, rtwdev, R_SWSI_V1,
722				       B_SWSI_R_DATA_DONE_V1);
723	if (ret) {
724		rtw89_err(rtwdev, "read swsi busy\n");
725		return INV_RF_DATA;
726	}
727
728	return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
729}
730
731u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
732			 u32 addr, u32 mask)
733{
734	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
735
736	if (rf_path >= rtwdev->chip->rf_path_num) {
737		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
738		return INV_RF_DATA;
739	}
740
741	if (ad_sel)
742		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
743	else
744		return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
745}
746EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
747
748bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
749			u32 addr, u32 mask, u32 data)
750{
751	const struct rtw89_chip_info *chip = rtwdev->chip;
752	const u32 *base_addr = chip->rf_base_addr;
753	u32 direct_addr;
754
755	if (rf_path >= rtwdev->chip->rf_path_num) {
756		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
757		return false;
758	}
759
760	addr &= 0xff;
761	direct_addr = base_addr[rf_path] + (addr << 2);
762	mask &= RFREG_MASK;
763
764	rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
765
766	/* delay to ensure writing properly */
767	udelay(1);
768
769	return true;
770}
771EXPORT_SYMBOL(rtw89_phy_write_rf);
772
773static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
774				 enum rtw89_rf_path rf_path, u32 addr, u32 mask,
775				 u32 data)
776{
777	u8 bit_shift;
778	u32 val;
779	bool busy, b_msk_en = false;
780	int ret;
781
782	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
783				       1, 30, false, rtwdev);
784	if (ret) {
785		rtw89_err(rtwdev, "write rf busy swsi\n");
786		return false;
787	}
788
789	data &= RFREG_MASK;
790	mask &= RFREG_MASK;
791
792	if (mask != RFREG_MASK) {
793		b_msk_en = true;
794		rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
795				       mask);
796		bit_shift = __ffs(mask);
797		data = (data << bit_shift) & RFREG_MASK;
798	}
799
800	val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
801	      FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
802	      FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
803	      FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
804
805	rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
806
807	return true;
808}
809
810bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
811			   u32 addr, u32 mask, u32 data)
812{
813	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
814
815	if (rf_path >= rtwdev->chip->rf_path_num) {
816		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
817		return false;
818	}
819
820	if (ad_sel)
821		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
822	else
823		return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
824}
825EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
826
827static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
828{
829	return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
830}
831
832static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
833			       enum rtw89_phy_idx phy_idx)
834{
835	const struct rtw89_chip_info *chip = rtwdev->chip;
836
837	chip->ops->bb_reset(rtwdev, phy_idx);
838}
839
840static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
841				    const struct rtw89_reg2_def *reg,
842				    enum rtw89_rf_path rf_path,
843				    void *extra_data)
844{
845	if (reg->addr == 0xfe)
846		mdelay(50);
847	else if (reg->addr == 0xfd)
848		mdelay(5);
849	else if (reg->addr == 0xfc)
850		mdelay(1);
851	else if (reg->addr == 0xfb)
852		udelay(50);
853	else if (reg->addr == 0xfa)
854		udelay(5);
855	else if (reg->addr == 0xf9)
856		udelay(1);
857	else
858		rtw89_phy_write32(rtwdev, reg->addr, reg->data);
859}
860
861union rtw89_phy_bb_gain_arg {
862	u32 addr;
863	struct {
864		union {
865			u8 type;
866			struct {
867				u8 rxsc_start:4;
868				u8 bw:4;
869			};
870		};
871		u8 path;
872		u8 gain_band;
873		u8 cfg_type;
874	};
875} __packed;
876
877static void
878rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
879			    union rtw89_phy_bb_gain_arg arg, u32 data)
880{
881	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
882	u8 type = arg.type;
883	u8 path = arg.path;
884	u8 gband = arg.gain_band;
885	int i;
886
887	switch (type) {
888	case 0:
889		for (i = 0; i < 4; i++, data >>= 8)
890			gain->lna_gain[gband][path][i] = data & 0xff;
891		break;
892	case 1:
893		for (i = 4; i < 7; i++, data >>= 8)
894			gain->lna_gain[gband][path][i] = data & 0xff;
895		break;
896	case 2:
897		for (i = 0; i < 2; i++, data >>= 8)
898			gain->tia_gain[gband][path][i] = data & 0xff;
899		break;
900	default:
901		rtw89_warn(rtwdev,
902			   "bb gain error {0x%x:0x%x} with unknown type: %d\n",
903			   arg.addr, data, type);
904		break;
905	}
906}
907
908enum rtw89_phy_bb_rxsc_start_idx {
909	RTW89_BB_RXSC_START_IDX_FULL = 0,
910	RTW89_BB_RXSC_START_IDX_20 = 1,
911	RTW89_BB_RXSC_START_IDX_20_1 = 5,
912	RTW89_BB_RXSC_START_IDX_40 = 9,
913	RTW89_BB_RXSC_START_IDX_80 = 13,
914};
915
916static void
917rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
918			  union rtw89_phy_bb_gain_arg arg, u32 data)
919{
920	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
921	u8 rxsc_start = arg.rxsc_start;
922	u8 bw = arg.bw;
923	u8 path = arg.path;
924	u8 gband = arg.gain_band;
925	u8 rxsc;
926	s8 ofst;
927	int i;
928
929	switch (bw) {
930	case RTW89_CHANNEL_WIDTH_20:
931		gain->rpl_ofst_20[gband][path] = (s8)data;
932		break;
933	case RTW89_CHANNEL_WIDTH_40:
934		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
935			gain->rpl_ofst_40[gband][path][0] = (s8)data;
936		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
937			for (i = 0; i < 2; i++, data >>= 8) {
938				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
939				ofst = (s8)(data & 0xff);
940				gain->rpl_ofst_40[gband][path][rxsc] = ofst;
941			}
942		}
943		break;
944	case RTW89_CHANNEL_WIDTH_80:
945		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
946			gain->rpl_ofst_80[gband][path][0] = (s8)data;
947		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
948			for (i = 0; i < 4; i++, data >>= 8) {
949				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
950				ofst = (s8)(data & 0xff);
951				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
952			}
953		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
954			for (i = 0; i < 2; i++, data >>= 8) {
955				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
956				ofst = (s8)(data & 0xff);
957				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
958			}
959		}
960		break;
961	case RTW89_CHANNEL_WIDTH_160:
962		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
963			gain->rpl_ofst_160[gband][path][0] = (s8)data;
964		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
965			for (i = 0; i < 4; i++, data >>= 8) {
966				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
967				ofst = (s8)(data & 0xff);
968				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
969			}
970		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
971			for (i = 0; i < 4; i++, data >>= 8) {
972				rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
973				ofst = (s8)(data & 0xff);
974				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
975			}
976		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
977			for (i = 0; i < 4; i++, data >>= 8) {
978				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
979				ofst = (s8)(data & 0xff);
980				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
981			}
982		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
983			for (i = 0; i < 2; i++, data >>= 8) {
984				rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
985				ofst = (s8)(data & 0xff);
986				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
987			}
988		}
989		break;
990	default:
991		rtw89_warn(rtwdev,
992			   "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
993			   arg.addr, data, bw);
994		break;
995	}
996}
997
998static void
999rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
1000			     union rtw89_phy_bb_gain_arg arg, u32 data)
1001{
1002	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
1003	u8 type = arg.type;
1004	u8 path = arg.path;
1005	u8 gband = arg.gain_band;
1006	int i;
1007
1008	switch (type) {
1009	case 0:
1010		for (i = 0; i < 4; i++, data >>= 8)
1011			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1012		break;
1013	case 1:
1014		for (i = 4; i < 7; i++, data >>= 8)
1015			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1016		break;
1017	default:
1018		rtw89_warn(rtwdev,
1019			   "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
1020			   arg.addr, data, type);
1021		break;
1022	}
1023}
1024
1025static void
1026rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
1027			    union rtw89_phy_bb_gain_arg arg, u32 data)
1028{
1029	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
1030	u8 type = arg.type;
1031	u8 path = arg.path;
1032	u8 gband = arg.gain_band;
1033	int i;
1034
1035	switch (type) {
1036	case 0:
1037		for (i = 0; i < 4; i++, data >>= 8)
1038			gain->lna_op1db[gband][path][i] = data & 0xff;
1039		break;
1040	case 1:
1041		for (i = 4; i < 7; i++, data >>= 8)
1042			gain->lna_op1db[gband][path][i] = data & 0xff;
1043		break;
1044	case 2:
1045		for (i = 0; i < 4; i++, data >>= 8)
1046			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1047		break;
1048	case 3:
1049		for (i = 4; i < 8; i++, data >>= 8)
1050			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1051		break;
1052	default:
1053		rtw89_warn(rtwdev,
1054			   "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
1055			   arg.addr, data, type);
1056		break;
1057	}
1058}
1059
1060static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
1061				     const struct rtw89_reg2_def *reg,
1062				     enum rtw89_rf_path rf_path,
1063				     void *extra_data)
1064{
1065	const struct rtw89_chip_info *chip = rtwdev->chip;
1066	union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
1067	struct rtw89_efuse *efuse = &rtwdev->efuse;
1068
1069	if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
1070		return;
1071
1072	if (arg.path >= chip->rf_path_num)
1073		return;
1074
1075	if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
1076		rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
1077		return;
1078	}
1079
1080	switch (arg.cfg_type) {
1081	case 0:
1082		rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
1083		break;
1084	case 1:
1085		rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
1086		break;
1087	case 2:
1088		rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
1089		break;
1090	case 3:
1091		rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
1092		break;
1093	case 4:
1094		/* This cfg_type is only used by rfe_type >= 50 with eFEM */
1095		if (efuse->rfe_type < 50)
1096			break;
1097		fallthrough;
1098	default:
1099		rtw89_warn(rtwdev,
1100			   "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
1101			   arg.addr, reg->data, arg.cfg_type);
1102		break;
1103	}
1104}
1105
1106static void
1107rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
1108			     const struct rtw89_reg2_def *reg,
1109			     enum rtw89_rf_path rf_path,
1110			     struct rtw89_fw_h2c_rf_reg_info *info)
1111{
1112	u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
1113	u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
1114
1115	if (page >= RTW89_H2C_RF_PAGE_NUM) {
1116		rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
1117			   rf_path, info->curr_idx);
1118		return;
1119	}
1120
1121	info->rtw89_phy_config_rf_h2c[page][idx] =
1122		cpu_to_le32((reg->addr << 20) | reg->data);
1123	info->curr_idx++;
1124}
1125
1126static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
1127				      struct rtw89_fw_h2c_rf_reg_info *info)
1128{
1129	u16 remain = info->curr_idx;
1130	u16 len = 0;
1131	u8 i;
1132	int ret = 0;
1133
1134	if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
1135		rtw89_warn(rtwdev,
1136			   "rf reg h2c total len %d larger than %d\n",
1137			   remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
1138		ret = -EINVAL;
1139		goto out;
1140	}
1141
1142	for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
1143		len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
1144		ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
1145		if (ret)
1146			goto out;
1147	}
1148out:
1149	info->curr_idx = 0;
1150
1151	return ret;
1152}
1153
1154static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
1155					 const struct rtw89_reg2_def *reg,
1156					 enum rtw89_rf_path rf_path,
1157					 void *extra_data)
1158{
1159	u32 addr = reg->addr;
1160
1161	if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
1162	    addr == 0xfa || addr == 0xf9)
1163		return;
1164
1165	if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
1166		return;
1167
1168	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1169				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1170}
1171
1172static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
1173				    const struct rtw89_reg2_def *reg,
1174				    enum rtw89_rf_path rf_path,
1175				    void *extra_data)
1176{
1177	if (reg->addr == 0xfe) {
1178		mdelay(50);
1179	} else if (reg->addr == 0xfd) {
1180		mdelay(5);
1181	} else if (reg->addr == 0xfc) {
1182		mdelay(1);
1183	} else if (reg->addr == 0xfb) {
1184		udelay(50);
1185	} else if (reg->addr == 0xfa) {
1186		udelay(5);
1187	} else if (reg->addr == 0xf9) {
1188		udelay(1);
1189	} else {
1190		rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
1191		rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1192					     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1193	}
1194}
1195
1196void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
1197				const struct rtw89_reg2_def *reg,
1198				enum rtw89_rf_path rf_path,
1199				void *extra_data)
1200{
1201	rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
1202
1203	if (reg->addr < 0x100)
1204		return;
1205
1206	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1207				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1208}
1209EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
1210
1211static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
1212				  const struct rtw89_phy_table *table,
1213				  u32 *headline_size, u32 *headline_idx,
1214				  u8 rfe, u8 cv)
1215{
1216	const struct rtw89_reg2_def *reg;
1217	u32 headline;
1218	u32 compare, target;
1219	u8 rfe_para, cv_para;
1220	u8 cv_max = 0;
1221	bool case_matched = false;
1222	u32 i;
1223
1224	for (i = 0; i < table->n_regs; i++) {
1225		reg = &table->regs[i];
1226		headline = get_phy_headline(reg->addr);
1227		if (headline != PHY_HEADLINE_VALID)
1228			break;
1229	}
1230	*headline_size = i;
1231	if (*headline_size == 0)
1232		return 0;
1233
1234	/* case 1: RFE match, CV match */
1235	compare = get_phy_compare(rfe, cv);
1236	for (i = 0; i < *headline_size; i++) {
1237		reg = &table->regs[i];
1238		target = get_phy_target(reg->addr);
1239		if (target == compare) {
1240			*headline_idx = i;
1241			return 0;
1242		}
1243	}
1244
1245	/* case 2: RFE match, CV don't care */
1246	compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
1247	for (i = 0; i < *headline_size; i++) {
1248		reg = &table->regs[i];
1249		target = get_phy_target(reg->addr);
1250		if (target == compare) {
1251			*headline_idx = i;
1252			return 0;
1253		}
1254	}
1255
1256	/* case 3: RFE match, CV max in table */
1257	for (i = 0; i < *headline_size; i++) {
1258		reg = &table->regs[i];
1259		rfe_para = get_phy_cond_rfe(reg->addr);
1260		cv_para = get_phy_cond_cv(reg->addr);
1261		if (rfe_para == rfe) {
1262			if (cv_para >= cv_max) {
1263				cv_max = cv_para;
1264				*headline_idx = i;
1265				case_matched = true;
1266			}
1267		}
1268	}
1269
1270	if (case_matched)
1271		return 0;
1272
1273	/* case 4: RFE don't care, CV max in table */
1274	for (i = 0; i < *headline_size; i++) {
1275		reg = &table->regs[i];
1276		rfe_para = get_phy_cond_rfe(reg->addr);
1277		cv_para = get_phy_cond_cv(reg->addr);
1278		if (rfe_para == PHY_COND_DONT_CARE) {
1279			if (cv_para >= cv_max) {
1280				cv_max = cv_para;
1281				*headline_idx = i;
1282				case_matched = true;
1283			}
1284		}
1285	}
1286
1287	if (case_matched)
1288		return 0;
1289
1290	return -EINVAL;
1291}
1292
1293static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
1294			       const struct rtw89_phy_table *table,
1295			       void (*config)(struct rtw89_dev *rtwdev,
1296					      const struct rtw89_reg2_def *reg,
1297					      enum rtw89_rf_path rf_path,
1298					      void *data),
1299			       void *extra_data)
1300{
1301	const struct rtw89_reg2_def *reg;
1302	enum rtw89_rf_path rf_path = table->rf_path;
1303	u8 rfe = rtwdev->efuse.rfe_type;
1304	u8 cv = rtwdev->hal.cv;
1305	u32 i;
1306	u32 headline_size = 0, headline_idx = 0;
1307	u32 target = 0, cfg_target;
1308	u8 cond;
1309	bool is_matched = true;
1310	bool target_found = false;
1311	int ret;
1312
1313	ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
1314				     &headline_idx, rfe, cv);
1315	if (ret) {
1316		rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv);
1317		return;
1318	}
1319
1320	cfg_target = get_phy_target(table->regs[headline_idx].addr);
1321	for (i = headline_size; i < table->n_regs; i++) {
1322		reg = &table->regs[i];
1323		cond = get_phy_cond(reg->addr);
1324		switch (cond) {
1325		case PHY_COND_BRANCH_IF:
1326		case PHY_COND_BRANCH_ELIF:
1327			target = get_phy_target(reg->addr);
1328			break;
1329		case PHY_COND_BRANCH_ELSE:
1330			is_matched = false;
1331			if (!target_found) {
1332				rtw89_warn(rtwdev, "failed to load CR %x/%x\n",
1333					   reg->addr, reg->data);
1334				return;
1335			}
1336			break;
1337		case PHY_COND_BRANCH_END:
1338			is_matched = true;
1339			target_found = false;
1340			break;
1341		case PHY_COND_CHECK:
1342			if (target_found) {
1343				is_matched = false;
1344				break;
1345			}
1346
1347			if (target == cfg_target) {
1348				is_matched = true;
1349				target_found = true;
1350			} else {
1351				is_matched = false;
1352				target_found = false;
1353			}
1354			break;
1355		default:
1356			if (is_matched)
1357				config(rtwdev, reg, rf_path, extra_data);
1358			break;
1359		}
1360	}
1361}
1362
1363void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
1364{
1365	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1366	const struct rtw89_chip_info *chip = rtwdev->chip;
1367	const struct rtw89_phy_table *bb_table;
1368	const struct rtw89_phy_table *bb_gain_table;
1369
1370	bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
1371	rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
1372	rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
1373
1374	bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
1375	if (bb_gain_table)
1376		rtw89_phy_init_reg(rtwdev, bb_gain_table,
1377				   rtw89_phy_config_bb_gain, NULL);
1378	rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
1379}
1380
1381static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
1382{
1383	rtw89_phy_write32(rtwdev, 0x8080, 0x4);
1384	udelay(1);
1385	return rtw89_phy_read32(rtwdev, 0x8080);
1386}
1387
1388void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
1389{
1390	void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
1391		       enum rtw89_rf_path rf_path, void *data);
1392	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1393	const struct rtw89_chip_info *chip = rtwdev->chip;
1394	const struct rtw89_phy_table *rf_table;
1395	struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
1396	u8 path;
1397
1398	rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL);
1399	if (!rf_reg_info)
1400		return;
1401
1402	for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
1403		rf_table = elm_info->rf_radio[path] ?
1404			   elm_info->rf_radio[path] : chip->rf_table[path];
1405		rf_reg_info->rf_path = rf_table->rf_path;
1406		if (noio)
1407			config = rtw89_phy_config_rf_reg_noio;
1408		else
1409			config = rf_table->config ? rf_table->config :
1410				 rtw89_phy_config_rf_reg;
1411		rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
1412		if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
1413			rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
1414				   rf_reg_info->rf_path);
1415	}
1416	kfree(rf_reg_info);
1417}
1418
1419static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
1420{
1421	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1422	const struct rtw89_chip_info *chip = rtwdev->chip;
1423	const struct rtw89_phy_table *nctl_table;
1424	u32 val;
1425	int ret;
1426
1427	/* IQK/DPK clock & reset */
1428	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
1429	rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
1430	rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
1431	if (chip->chip_id != RTL8851B)
1432		rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
1433	if (chip->chip_id == RTL8852B)
1434		rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
1435
1436	/* check 0x8080 */
1437	rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
1438
1439	ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
1440				1000, false, rtwdev);
1441	if (ret)
1442#if defined(__linux__)
1443		rtw89_err(rtwdev, "failed to poll nctl block\n");
1444#elif defined(__FreeBSD__)
1445		rtw89_err(rtwdev, "failed to poll nctl block: ret %d val %#06x\n", ret, val);
1446#endif
1447
1448	nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
1449	rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
1450
1451	if (chip->nctl_post_table)
1452		rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
1453}
1454
1455static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
1456{
1457	u32 phy_page = addr >> 8;
1458	u32 ofst = 0;
1459
1460	switch (phy_page) {
1461	case 0x6:
1462	case 0x7:
1463	case 0x8:
1464	case 0x9:
1465	case 0xa:
1466	case 0xb:
1467	case 0xc:
1468	case 0xd:
1469	case 0x19:
1470	case 0x1a:
1471	case 0x1b:
1472		ofst = 0x2000;
1473		break;
1474	default:
1475		/* warning case */
1476		ofst = 0;
1477		break;
1478	}
1479
1480	if (phy_page >= 0x40 && phy_page <= 0x4f)
1481		ofst = 0x2000;
1482
1483	return ofst;
1484}
1485
1486void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1487			   u32 data, enum rtw89_phy_idx phy_idx)
1488{
1489	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1490		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1491	rtw89_phy_write32_mask(rtwdev, addr, mask, data);
1492}
1493EXPORT_SYMBOL(rtw89_phy_write32_idx);
1494
1495u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1496			 enum rtw89_phy_idx phy_idx)
1497{
1498	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1499		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1500	return rtw89_phy_read32_mask(rtwdev, addr, mask);
1501}
1502EXPORT_SYMBOL(rtw89_phy_read32_idx);
1503
1504void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1505			    u32 val)
1506{
1507	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
1508
1509	if (!rtwdev->dbcc_en)
1510		return;
1511
1512	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
1513}
1514
1515void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
1516			      const struct rtw89_phy_reg3_tbl *tbl)
1517{
1518	const struct rtw89_reg3_def *reg3;
1519	int i;
1520
1521	for (i = 0; i < tbl->size; i++) {
1522		reg3 = &tbl->reg3[i];
1523		rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
1524	}
1525}
1526EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
1527
1528static const u8 rtw89_rs_idx_num[] = {
1529	[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
1530	[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
1531	[RTW89_RS_MCS] = RTW89_RATE_MCS_NUM,
1532	[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM,
1533	[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM,
1534};
1535
1536static const u8 rtw89_rs_nss_num[] = {
1537	[RTW89_RS_CCK] = 1,
1538	[RTW89_RS_OFDM] = 1,
1539	[RTW89_RS_MCS] = RTW89_NSS_NUM,
1540	[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM,
1541	[RTW89_RS_OFFSET] = 1,
1542};
1543
1544static const u8 _byr_of_rs[] = {
1545	[RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck),
1546	[RTW89_RS_OFDM] = offsetof(struct rtw89_txpwr_byrate, ofdm),
1547	[RTW89_RS_MCS] = offsetof(struct rtw89_txpwr_byrate, mcs),
1548	[RTW89_RS_HEDCM] = offsetof(struct rtw89_txpwr_byrate, hedcm),
1549	[RTW89_RS_OFFSET] = offsetof(struct rtw89_txpwr_byrate, offset),
1550};
1551
1552#define _byr_seek(rs, raw) ((s8 *)(raw) + _byr_of_rs[rs])
1553#define _byr_idx(rs, nss, idx) ((nss) * rtw89_rs_idx_num[rs] + (idx))
1554#define _byr_chk(rs, nss, idx) \
1555	((nss) < rtw89_rs_nss_num[rs] && (idx) < rtw89_rs_idx_num[rs])
1556
1557void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
1558				 const struct rtw89_txpwr_table *tbl)
1559{
1560	const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
1561	const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
1562	s8 *byr;
1563	u32 data;
1564	u8 i, idx;
1565
1566	for (; cfg < end; cfg++) {
1567		byr = _byr_seek(cfg->rs, &rtwdev->byr[cfg->band]);
1568		data = cfg->data;
1569
1570		for (i = 0; i < cfg->len; i++, data >>= 8) {
1571			idx = _byr_idx(cfg->rs, cfg->nss, (cfg->shf + i));
1572			byr[idx] = (s8)(data & 0xff);
1573		}
1574	}
1575}
1576EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
1577
1578#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf)				\
1579({									\
1580	const struct rtw89_chip_info *__c = (rtwdev)->chip;		\
1581	(txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac);	\
1582})
1583
1584static
1585s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
1586			       const struct rtw89_rate_desc *rate_desc)
1587{
1588	s8 *byr;
1589	u8 idx;
1590
1591	if (rate_desc->rs == RTW89_RS_CCK)
1592		band = RTW89_BAND_2G;
1593
1594	if (!_byr_chk(rate_desc->rs, rate_desc->nss, rate_desc->idx)) {
1595		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
1596			    "[TXPWR] unknown byrate desc rs=%d nss=%d idx=%d\n",
1597			    rate_desc->rs, rate_desc->nss, rate_desc->idx);
1598
1599		return 0;
1600	}
1601
1602	byr = _byr_seek(rate_desc->rs, &rtwdev->byr[band]);
1603	idx = _byr_idx(rate_desc->rs, rate_desc->nss, rate_desc->idx);
1604
1605	return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]);
1606}
1607
1608static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
1609{
1610	switch (channel_6g) {
1611	case 1 ... 29:
1612		return (channel_6g - 1) / 2;
1613	case 33 ... 61:
1614		return (channel_6g - 3) / 2;
1615	case 65 ... 93:
1616		return (channel_6g - 5) / 2;
1617	case 97 ... 125:
1618		return (channel_6g - 7) / 2;
1619	case 129 ... 157:
1620		return (channel_6g - 9) / 2;
1621	case 161 ... 189:
1622		return (channel_6g - 11) / 2;
1623	case 193 ... 221:
1624		return (channel_6g - 13) / 2;
1625	case 225 ... 253:
1626		return (channel_6g - 15) / 2;
1627	default:
1628		rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
1629		return 0;
1630	}
1631}
1632
1633static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
1634{
1635	if (band == RTW89_BAND_6G)
1636		return rtw89_channel_6g_to_idx(rtwdev, channel);
1637
1638	switch (channel) {
1639	case 1 ... 14:
1640		return channel - 1;
1641	case 36 ... 64:
1642		return (channel - 36) / 2;
1643	case 100 ... 144:
1644		return ((channel - 100) / 2) + 15;
1645	case 149 ... 177:
1646		return ((channel - 149) / 2) + 38;
1647	default:
1648		rtw89_warn(rtwdev, "unknown channel: %d\n", channel);
1649		return 0;
1650	}
1651}
1652
1653s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
1654			      u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
1655{
1656	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
1657	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
1658	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
1659	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
1660	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
1661	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
1662	u8 regd = rtw89_regd_get(rtwdev, band);
1663	u8 reg6 = regulatory->reg_6ghz_power;
1664	s8 lmt = 0, sar;
1665
1666	switch (band) {
1667	case RTW89_BAND_2G:
1668		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
1669		if (lmt)
1670			break;
1671
1672		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
1673		break;
1674	case RTW89_BAND_5G:
1675		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
1676		if (lmt)
1677			break;
1678
1679		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
1680		break;
1681	case RTW89_BAND_6G:
1682		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
1683		if (lmt)
1684			break;
1685
1686		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW]
1687				       [RTW89_REG_6GHZ_POWER_DFLT]
1688				       [ch_idx];
1689		break;
1690	default:
1691		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
1692		return 0;
1693	}
1694
1695	lmt = _phy_txpwr_rf_to_mac(rtwdev, lmt);
1696	sar = rtw89_query_sar(rtwdev);
1697
1698	return min(lmt, sar);
1699}
1700EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
1701
1702#define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch)		\
1703	do {								\
1704		u8 __i;							\
1705		for (__i = 0; __i < RTW89_BF_NUM; __i++)		\
1706			ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev,	\
1707							      band,	\
1708							      bw, ntx,	\
1709							      rs, __i,	\
1710							      (ch));	\
1711	} while (0)
1712
1713static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
1714					   struct rtw89_txpwr_limit *lmt,
1715					   u8 band, u8 ntx, u8 ch)
1716{
1717	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
1718				    ntx, RTW89_RS_CCK, ch);
1719	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
1720				    ntx, RTW89_RS_CCK, ch);
1721	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1722				    ntx, RTW89_RS_OFDM, ch);
1723	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1724				    RTW89_CHANNEL_WIDTH_20,
1725				    ntx, RTW89_RS_MCS, ch);
1726}
1727
1728static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
1729					   struct rtw89_txpwr_limit *lmt,
1730					   u8 band, u8 ntx, u8 ch, u8 pri_ch)
1731{
1732	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
1733				    ntx, RTW89_RS_CCK, ch - 2);
1734	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
1735				    ntx, RTW89_RS_CCK, ch);
1736	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1737				    ntx, RTW89_RS_OFDM, pri_ch);
1738	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1739				    RTW89_CHANNEL_WIDTH_20,
1740				    ntx, RTW89_RS_MCS, ch - 2);
1741	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
1742				    RTW89_CHANNEL_WIDTH_20,
1743				    ntx, RTW89_RS_MCS, ch + 2);
1744	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
1745				    RTW89_CHANNEL_WIDTH_40,
1746				    ntx, RTW89_RS_MCS, ch);
1747}
1748
1749static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
1750					   struct rtw89_txpwr_limit *lmt,
1751					   u8 band, u8 ntx, u8 ch, u8 pri_ch)
1752{
1753	s8 val_0p5_n[RTW89_BF_NUM];
1754	s8 val_0p5_p[RTW89_BF_NUM];
1755	u8 i;
1756
1757	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1758				    ntx, RTW89_RS_OFDM, pri_ch);
1759	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1760				    RTW89_CHANNEL_WIDTH_20,
1761				    ntx, RTW89_RS_MCS, ch - 6);
1762	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
1763				    RTW89_CHANNEL_WIDTH_20,
1764				    ntx, RTW89_RS_MCS, ch - 2);
1765	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
1766				    RTW89_CHANNEL_WIDTH_20,
1767				    ntx, RTW89_RS_MCS, ch + 2);
1768	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
1769				    RTW89_CHANNEL_WIDTH_20,
1770				    ntx, RTW89_RS_MCS, ch + 6);
1771	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
1772				    RTW89_CHANNEL_WIDTH_40,
1773				    ntx, RTW89_RS_MCS, ch - 4);
1774	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
1775				    RTW89_CHANNEL_WIDTH_40,
1776				    ntx, RTW89_RS_MCS, ch + 4);
1777	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
1778				    RTW89_CHANNEL_WIDTH_80,
1779				    ntx, RTW89_RS_MCS, ch);
1780
1781	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
1782				    ntx, RTW89_RS_MCS, ch - 4);
1783	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
1784				    ntx, RTW89_RS_MCS, ch + 4);
1785
1786	for (i = 0; i < RTW89_BF_NUM; i++)
1787		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
1788}
1789
1790static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
1791					    struct rtw89_txpwr_limit *lmt,
1792					    u8 band, u8 ntx, u8 ch, u8 pri_ch)
1793{
1794	s8 val_0p5_n[RTW89_BF_NUM];
1795	s8 val_0p5_p[RTW89_BF_NUM];
1796	s8 val_2p5_n[RTW89_BF_NUM];
1797	s8 val_2p5_p[RTW89_BF_NUM];
1798	u8 i;
1799
1800	/* fill ofdm section */
1801	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1802				    ntx, RTW89_RS_OFDM, pri_ch);
1803
1804	/* fill mcs 20m section */
1805	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1806				    RTW89_CHANNEL_WIDTH_20,
1807				    ntx, RTW89_RS_MCS, ch - 14);
1808	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
1809				    RTW89_CHANNEL_WIDTH_20,
1810				    ntx, RTW89_RS_MCS, ch - 10);
1811	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
1812				    RTW89_CHANNEL_WIDTH_20,
1813				    ntx, RTW89_RS_MCS, ch - 6);
1814	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
1815				    RTW89_CHANNEL_WIDTH_20,
1816				    ntx, RTW89_RS_MCS, ch - 2);
1817	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
1818				    RTW89_CHANNEL_WIDTH_20,
1819				    ntx, RTW89_RS_MCS, ch + 2);
1820	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
1821				    RTW89_CHANNEL_WIDTH_20,
1822				    ntx, RTW89_RS_MCS, ch + 6);
1823	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
1824				    RTW89_CHANNEL_WIDTH_20,
1825				    ntx, RTW89_RS_MCS, ch + 10);
1826	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
1827				    RTW89_CHANNEL_WIDTH_20,
1828				    ntx, RTW89_RS_MCS, ch + 14);
1829
1830	/* fill mcs 40m section */
1831	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
1832				    RTW89_CHANNEL_WIDTH_40,
1833				    ntx, RTW89_RS_MCS, ch - 12);
1834	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
1835				    RTW89_CHANNEL_WIDTH_40,
1836				    ntx, RTW89_RS_MCS, ch - 4);
1837	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
1838				    RTW89_CHANNEL_WIDTH_40,
1839				    ntx, RTW89_RS_MCS, ch + 4);
1840	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
1841				    RTW89_CHANNEL_WIDTH_40,
1842				    ntx, RTW89_RS_MCS, ch + 12);
1843
1844	/* fill mcs 80m section */
1845	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
1846				    RTW89_CHANNEL_WIDTH_80,
1847				    ntx, RTW89_RS_MCS, ch - 8);
1848	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
1849				    RTW89_CHANNEL_WIDTH_80,
1850				    ntx, RTW89_RS_MCS, ch + 8);
1851
1852	/* fill mcs 160m section */
1853	__fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
1854				    RTW89_CHANNEL_WIDTH_160,
1855				    ntx, RTW89_RS_MCS, ch);
1856
1857	/* fill mcs 40m 0p5 section */
1858	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
1859				    ntx, RTW89_RS_MCS, ch - 4);
1860	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
1861				    ntx, RTW89_RS_MCS, ch + 4);
1862
1863	for (i = 0; i < RTW89_BF_NUM; i++)
1864		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
1865
1866	/* fill mcs 40m 2p5 section */
1867	__fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
1868				    ntx, RTW89_RS_MCS, ch - 8);
1869	__fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
1870				    ntx, RTW89_RS_MCS, ch + 8);
1871
1872	for (i = 0; i < RTW89_BF_NUM; i++)
1873		lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
1874}
1875
1876static
1877void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
1878				const struct rtw89_chan *chan,
1879				struct rtw89_txpwr_limit *lmt,
1880				u8 ntx)
1881{
1882	u8 band = chan->band_type;
1883	u8 pri_ch = chan->primary_channel;
1884	u8 ch = chan->channel;
1885	u8 bw = chan->band_width;
1886
1887	memset(lmt, 0, sizeof(*lmt));
1888
1889	switch (bw) {
1890	case RTW89_CHANNEL_WIDTH_20:
1891		rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, band, ntx, ch);
1892		break;
1893	case RTW89_CHANNEL_WIDTH_40:
1894		rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, band, ntx, ch,
1895					       pri_ch);
1896		break;
1897	case RTW89_CHANNEL_WIDTH_80:
1898		rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, band, ntx, ch,
1899					       pri_ch);
1900		break;
1901	case RTW89_CHANNEL_WIDTH_160:
1902		rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, band, ntx, ch,
1903						pri_ch);
1904		break;
1905	}
1906}
1907
1908static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
1909					u8 ru, u8 ntx, u8 ch)
1910{
1911	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
1912	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
1913	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
1914	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
1915	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
1916	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
1917	u8 regd = rtw89_regd_get(rtwdev, band);
1918	u8 reg6 = regulatory->reg_6ghz_power;
1919	s8 lmt_ru = 0, sar;
1920
1921	switch (band) {
1922	case RTW89_BAND_2G:
1923		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
1924		if (lmt_ru)
1925			break;
1926
1927		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
1928		break;
1929	case RTW89_BAND_5G:
1930		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
1931		if (lmt_ru)
1932			break;
1933
1934		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
1935		break;
1936	case RTW89_BAND_6G:
1937		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
1938		if (lmt_ru)
1939			break;
1940
1941		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW]
1942					     [RTW89_REG_6GHZ_POWER_DFLT]
1943					     [ch_idx];
1944		break;
1945	default:
1946		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
1947		return 0;
1948	}
1949
1950	lmt_ru = _phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
1951	sar = rtw89_query_sar(rtwdev);
1952
1953	return min(lmt_ru, sar);
1954}
1955
1956static void
1957rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev,
1958				  struct rtw89_txpwr_limit_ru *lmt_ru,
1959				  u8 band, u8 ntx, u8 ch)
1960{
1961	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1962							RTW89_RU26,
1963							ntx, ch);
1964	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1965							RTW89_RU52,
1966							ntx, ch);
1967	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1968							 RTW89_RU106,
1969							 ntx, ch);
1970}
1971
1972static void
1973rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev,
1974				  struct rtw89_txpwr_limit_ru *lmt_ru,
1975				  u8 band, u8 ntx, u8 ch)
1976{
1977	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1978							RTW89_RU26,
1979							ntx, ch - 2);
1980	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1981							RTW89_RU26,
1982							ntx, ch + 2);
1983	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1984							RTW89_RU52,
1985							ntx, ch - 2);
1986	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1987							RTW89_RU52,
1988							ntx, ch + 2);
1989	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1990							 RTW89_RU106,
1991							 ntx, ch - 2);
1992	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1993							 RTW89_RU106,
1994							 ntx, ch + 2);
1995}
1996
1997static void
1998rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
1999				  struct rtw89_txpwr_limit_ru *lmt_ru,
2000				  u8 band, u8 ntx, u8 ch)
2001{
2002	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2003							RTW89_RU26,
2004							ntx, ch - 6);
2005	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2006							RTW89_RU26,
2007							ntx, ch - 2);
2008	lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2009							RTW89_RU26,
2010							ntx, ch + 2);
2011	lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2012							RTW89_RU26,
2013							ntx, ch + 6);
2014	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2015							RTW89_RU52,
2016							ntx, ch - 6);
2017	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2018							RTW89_RU52,
2019							ntx, ch - 2);
2020	lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2021							RTW89_RU52,
2022							ntx, ch + 2);
2023	lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2024							RTW89_RU52,
2025							ntx, ch + 6);
2026	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2027							 RTW89_RU106,
2028							 ntx, ch - 6);
2029	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2030							 RTW89_RU106,
2031							 ntx, ch - 2);
2032	lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2033							 RTW89_RU106,
2034							 ntx, ch + 2);
2035	lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2036							 RTW89_RU106,
2037							 ntx, ch + 6);
2038}
2039
2040static void
2041rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
2042				   struct rtw89_txpwr_limit_ru *lmt_ru,
2043				   u8 band, u8 ntx, u8 ch)
2044{
2045	static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
2046	int i;
2047
2048#if defined(__linux__)
2049	static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
2050#elif defined(__FreeBSD__)
2051	rtw89_static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
2052#endif
2053	for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
2054		lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2055								RTW89_RU26,
2056								ntx,
2057								ch + ofst[i]);
2058		lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2059								RTW89_RU52,
2060								ntx,
2061								ch + ofst[i]);
2062		lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2063								 RTW89_RU106,
2064								 ntx,
2065								 ch + ofst[i]);
2066	}
2067}
2068
2069static
2070void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
2071				   const struct rtw89_chan *chan,
2072				   struct rtw89_txpwr_limit_ru *lmt_ru,
2073				   u8 ntx)
2074{
2075	u8 band = chan->band_type;
2076	u8 ch = chan->channel;
2077	u8 bw = chan->band_width;
2078
2079	memset(lmt_ru, 0, sizeof(*lmt_ru));
2080
2081	switch (bw) {
2082	case RTW89_CHANNEL_WIDTH_20:
2083		rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, band, ntx,
2084						  ch);
2085		break;
2086	case RTW89_CHANNEL_WIDTH_40:
2087		rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, band, ntx,
2088						  ch);
2089		break;
2090	case RTW89_CHANNEL_WIDTH_80:
2091		rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, band, ntx,
2092						  ch);
2093		break;
2094	case RTW89_CHANNEL_WIDTH_160:
2095		rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, band, ntx,
2096						   ch);
2097		break;
2098	}
2099}
2100
2101void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
2102				const struct rtw89_chan *chan,
2103				enum rtw89_phy_idx phy_idx)
2104{
2105	u8 max_nss_num = rtwdev->chip->rf_path_num;
2106	static const u8 rs[] = {
2107		RTW89_RS_CCK,
2108		RTW89_RS_OFDM,
2109		RTW89_RS_MCS,
2110		RTW89_RS_HEDCM,
2111	};
2112	struct rtw89_rate_desc cur;
2113	u8 band = chan->band_type;
2114	u8 ch = chan->channel;
2115	u32 addr, val;
2116	s8 v[4] = {};
2117	u8 i;
2118
2119	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2120		    "[TXPWR] set txpwr byrate with ch=%d\n", ch);
2121
2122	BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_CCK] % 4);
2123	BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_OFDM] % 4);
2124	BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_MCS] % 4);
2125	BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_HEDCM] % 4);
2126
2127	addr = R_AX_PWR_BY_RATE;
2128	for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
2129		for (i = 0; i < ARRAY_SIZE(rs); i++) {
2130			if (cur.nss >= rtw89_rs_nss_num[rs[i]])
2131				continue;
2132
2133			cur.rs = rs[i];
2134			for (cur.idx = 0; cur.idx < rtw89_rs_idx_num[rs[i]];
2135			     cur.idx++) {
2136				v[cur.idx % 4] =
2137					rtw89_phy_read_txpwr_byrate(rtwdev,
2138								    band,
2139								    &cur);
2140
2141				if ((cur.idx + 1) % 4)
2142					continue;
2143
2144				val = FIELD_PREP(GENMASK(7, 0), v[0]) |
2145				      FIELD_PREP(GENMASK(15, 8), v[1]) |
2146				      FIELD_PREP(GENMASK(23, 16), v[2]) |
2147				      FIELD_PREP(GENMASK(31, 24), v[3]);
2148
2149				rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr,
2150							val);
2151				addr += 4;
2152			}
2153		}
2154	}
2155}
2156EXPORT_SYMBOL(rtw89_phy_set_txpwr_byrate);
2157
2158void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev,
2159				const struct rtw89_chan *chan,
2160				enum rtw89_phy_idx phy_idx)
2161{
2162	struct rtw89_rate_desc desc = {
2163		.nss = RTW89_NSS_1,
2164		.rs = RTW89_RS_OFFSET,
2165	};
2166	u8 band = chan->band_type;
2167	s8 v[RTW89_RATE_OFFSET_NUM] = {};
2168	u32 val;
2169
2170	rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
2171
2172	for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM; desc.idx++)
2173		v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
2174
2175	BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM != 5);
2176	val = FIELD_PREP(GENMASK(3, 0), v[0]) |
2177	      FIELD_PREP(GENMASK(7, 4), v[1]) |
2178	      FIELD_PREP(GENMASK(11, 8), v[2]) |
2179	      FIELD_PREP(GENMASK(15, 12), v[3]) |
2180	      FIELD_PREP(GENMASK(19, 16), v[4]);
2181
2182	rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
2183				     GENMASK(19, 0), val);
2184}
2185EXPORT_SYMBOL(rtw89_phy_set_txpwr_offset);
2186
2187void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
2188			       const struct rtw89_chan *chan,
2189			       enum rtw89_phy_idx phy_idx)
2190{
2191	u8 max_ntx_num = rtwdev->chip->rf_path_num;
2192	struct rtw89_txpwr_limit lmt;
2193	u8 ch = chan->channel;
2194	u8 bw = chan->band_width;
2195	const s8 *ptr;
2196	u32 addr, val;
2197	u8 i, j;
2198
2199	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2200		    "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
2201
2202	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit) !=
2203		     RTW89_TXPWR_LMT_PAGE_SIZE);
2204
2205	addr = R_AX_PWR_LMT;
2206	for (i = 0; i < max_ntx_num; i++) {
2207		rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt, i);
2208
2209		ptr = (s8 *)&lmt;
2210		for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE;
2211		     j += 4, addr += 4, ptr += 4) {
2212			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
2213			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
2214			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
2215			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
2216
2217			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
2218		}
2219	}
2220}
2221EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit);
2222
2223void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
2224				  const struct rtw89_chan *chan,
2225				  enum rtw89_phy_idx phy_idx)
2226{
2227	u8 max_ntx_num = rtwdev->chip->rf_path_num;
2228	struct rtw89_txpwr_limit_ru lmt_ru;
2229	u8 ch = chan->channel;
2230	u8 bw = chan->band_width;
2231	const s8 *ptr;
2232	u32 addr, val;
2233	u8 i, j;
2234
2235	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2236		    "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
2237
2238	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru) !=
2239		     RTW89_TXPWR_LMT_RU_PAGE_SIZE);
2240
2241	addr = R_AX_PWR_RU_LMT;
2242	for (i = 0; i < max_ntx_num; i++) {
2243		rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru, i);
2244
2245		ptr = (s8 *)&lmt_ru;
2246		for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE;
2247		     j += 4, addr += 4, ptr += 4) {
2248			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
2249			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
2250			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
2251			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
2252
2253			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
2254		}
2255	}
2256}
2257EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit_ru);
2258
2259struct rtw89_phy_iter_ra_data {
2260	struct rtw89_dev *rtwdev;
2261	struct sk_buff *c2h;
2262};
2263
2264static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
2265{
2266	struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
2267	struct rtw89_dev *rtwdev = ra_data->rtwdev;
2268	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
2269	const struct rtw89_c2h_ra_rpt *c2h =
2270		(const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data;
2271	struct rtw89_ra_report *ra_report = &rtwsta->ra_report;
2272	const struct rtw89_chip_info *chip = rtwdev->chip;
2273	bool format_v1 = chip->chip_gen == RTW89_CHIP_BE;
2274	u8 mode, rate, bw, giltf, mac_id;
2275	u16 legacy_bitrate;
2276	bool valid;
2277	u8 mcs = 0;
2278	u8 t;
2279
2280	mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID);
2281	if (mac_id != rtwsta->mac_id)
2282		return;
2283
2284	rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS);
2285	bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW);
2286	giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF);
2287	mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL);
2288
2289	if (format_v1) {
2290		t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7);
2291		rate |= u8_encode_bits(t, BIT(7));
2292		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2);
2293		bw |= u8_encode_bits(t, BIT(2));
2294		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2);
2295		mode |= u8_encode_bits(t, BIT(2));
2296	}
2297
2298	if (mode == RTW89_RA_RPT_MODE_LEGACY) {
2299		valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
2300		if (!valid)
2301			return;
2302	}
2303
2304	memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
2305
2306	switch (mode) {
2307	case RTW89_RA_RPT_MODE_LEGACY:
2308		ra_report->txrate.legacy = legacy_bitrate;
2309		break;
2310	case RTW89_RA_RPT_MODE_HT:
2311		ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
2312		if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
2313			rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
2314						FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
2315		else
2316			rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate);
2317		ra_report->txrate.mcs = rate;
2318		if (giltf)
2319			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2320		mcs = ra_report->txrate.mcs & 0x07;
2321		break;
2322	case RTW89_RA_RPT_MODE_VHT:
2323		ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
2324		ra_report->txrate.mcs = format_v1 ?
2325			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
2326			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
2327		ra_report->txrate.nss = format_v1 ?
2328			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
2329			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
2330		if (giltf)
2331			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2332		mcs = ra_report->txrate.mcs;
2333		break;
2334	case RTW89_RA_RPT_MODE_HE:
2335		ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
2336		ra_report->txrate.mcs = format_v1 ?
2337			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
2338			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
2339		ra_report->txrate.nss  = format_v1 ?
2340			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
2341			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
2342		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
2343			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8;
2344		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
2345			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
2346		else
2347			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
2348		mcs = ra_report->txrate.mcs;
2349		break;
2350	}
2351
2352	ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
2353	ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
2354	ra_report->hw_rate = format_v1 ?
2355			     u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) |
2356			     u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) :
2357			     u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) |
2358			     u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL);
2359	ra_report->might_fallback_legacy = mcs <= 2;
2360	sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
2361	rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1;
2362}
2363
2364static void
2365rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2366{
2367	struct rtw89_phy_iter_ra_data ra_data;
2368
2369	ra_data.rtwdev = rtwdev;
2370	ra_data.c2h = c2h;
2371	ieee80211_iterate_stations_atomic(rtwdev->hw,
2372					  rtw89_phy_c2h_ra_rpt_iter,
2373					  &ra_data);
2374}
2375
2376static
2377void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
2378					  struct sk_buff *c2h, u32 len) = {
2379	[RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
2380	[RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
2381	[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
2382};
2383
2384void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
2385			  u32 len, u8 class, u8 func)
2386{
2387	void (*handler)(struct rtw89_dev *rtwdev,
2388			struct sk_buff *c2h, u32 len) = NULL;
2389
2390	switch (class) {
2391	case RTW89_PHY_C2H_CLASS_RA:
2392		if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
2393			handler = rtw89_phy_c2h_ra_handler[func];
2394		break;
2395	case RTW89_PHY_C2H_CLASS_DM:
2396		if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY)
2397			return;
2398		fallthrough;
2399	default:
2400		rtw89_info(rtwdev, "c2h class %d not support\n", class);
2401		return;
2402	}
2403	if (!handler) {
2404		rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
2405			   func);
2406		return;
2407	}
2408	handler(rtwdev, skb, len);
2409}
2410
2411static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
2412{
2413	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
2414	u32 reg_mask;
2415
2416	if (sc_xo)
2417		reg_mask = xtal->sc_xo_mask;
2418	else
2419		reg_mask = xtal->sc_xi_mask;
2420
2421	return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask);
2422}
2423
2424static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
2425				       u8 val)
2426{
2427	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
2428	u32 reg_mask;
2429
2430	if (sc_xo)
2431		reg_mask = xtal->sc_xo_mask;
2432	else
2433		reg_mask = xtal->sc_xi_mask;
2434
2435	rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val);
2436}
2437
2438static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
2439					  u8 crystal_cap, bool force)
2440{
2441	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2442	const struct rtw89_chip_info *chip = rtwdev->chip;
2443	u8 sc_xi_val, sc_xo_val;
2444
2445	if (!force && cfo->crystal_cap == crystal_cap)
2446		return;
2447	crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
2448	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
2449		rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
2450		rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
2451		sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
2452		sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
2453	} else {
2454		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
2455					crystal_cap, XTAL_SC_XO_MASK);
2456		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
2457					crystal_cap, XTAL_SC_XI_MASK);
2458		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
2459		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
2460	}
2461	cfo->crystal_cap = sc_xi_val;
2462	cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
2463
2464	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val);
2465	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val);
2466	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n",
2467		    cfo->x_cap_ofst);
2468	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n");
2469}
2470
2471static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
2472{
2473	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2474	u8 cap;
2475
2476	cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK;
2477	cfo->is_adjust = false;
2478	if (cfo->crystal_cap == cfo->def_x_cap)
2479		return;
2480	cap = cfo->crystal_cap;
2481	cap += (cap > cfo->def_x_cap ? -1 : 1);
2482	rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false);
2483	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2484		    "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap,
2485		    cfo->def_x_cap);
2486}
2487
2488static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
2489{
2490	const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
2491	bool is_linked = rtwdev->total_sta_assoc > 0;
2492	s32 cfo_avg_312;
2493	s32 dcfo_comp_val;
2494	int sign;
2495
2496	if (!is_linked) {
2497		rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n",
2498			    is_linked);
2499		return;
2500	}
2501	rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
2502	if (curr_cfo == 0)
2503		return;
2504	dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
2505	sign = curr_cfo > 0 ? 1 : -1;
2506	cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val;
2507	rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312);
2508	if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
2509		cfo_avg_312 = -cfo_avg_312;
2510	rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
2511			       cfo_avg_312);
2512}
2513
2514static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev)
2515{
2516	const struct rtw89_chip_info *chip = rtwdev->chip;
2517
2518	rtw89_phy_set_phy_regs(rtwdev, R_DCFO_OPT, B_DCFO_OPT_EN, 1);
2519	rtw89_phy_set_phy_regs(rtwdev, R_DCFO_WEIGHT, B_DCFO_WEIGHT_MSK, 8);
2520
2521	if (chip->cfo_hw_comp)
2522		rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2,
2523				   B_AX_PWR_UL_CFO_MASK, 0x6);
2524	else
2525		rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, B_AX_PWR_UL_CFO_MASK);
2526}
2527
2528static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
2529{
2530	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2531	struct rtw89_efuse *efuse = &rtwdev->efuse;
2532
2533	cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
2534	cfo->crystal_cap = cfo->crystal_cap_default;
2535	cfo->def_x_cap = cfo->crystal_cap;
2536	cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
2537	cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
2538	cfo->is_adjust = false;
2539	cfo->divergence_lock_en = false;
2540	cfo->x_cap_ofst = 0;
2541	cfo->lock_cnt = 0;
2542	cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
2543	cfo->apply_compensation = false;
2544	cfo->residual_cfo_acc = 0;
2545	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n",
2546		    cfo->crystal_cap_default);
2547	rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true);
2548	rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1);
2549	rtw89_dcfo_comp_init(rtwdev);
2550	cfo->cfo_timer_ms = 2000;
2551	cfo->cfo_trig_by_timer_en = false;
2552	cfo->phy_cfo_trk_cnt = 0;
2553	cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
2554	cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
2555}
2556
2557static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
2558					     s32 curr_cfo)
2559{
2560	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2561	s8 crystal_cap = cfo->crystal_cap;
2562	s32 cfo_abs = abs(curr_cfo);
2563	int sign;
2564
2565	if (!cfo->is_adjust) {
2566		if (cfo_abs > CFO_TRK_ENABLE_TH)
2567			cfo->is_adjust = true;
2568	} else {
2569		if (cfo_abs < CFO_TRK_STOP_TH)
2570			cfo->is_adjust = false;
2571	}
2572	if (!cfo->is_adjust) {
2573		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n");
2574		return;
2575	}
2576	sign = curr_cfo > 0 ? 1 : -1;
2577	if (cfo_abs > CFO_TRK_STOP_TH_4)
2578		crystal_cap += 7 * sign;
2579	else if (cfo_abs > CFO_TRK_STOP_TH_3)
2580		crystal_cap += 5 * sign;
2581	else if (cfo_abs > CFO_TRK_STOP_TH_2)
2582		crystal_cap += 3 * sign;
2583	else if (cfo_abs > CFO_TRK_STOP_TH_1)
2584		crystal_cap += 1 * sign;
2585	else
2586		return;
2587	rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
2588	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2589		    "X_cap{Curr,Default}={0x%x,0x%x}\n",
2590		    cfo->crystal_cap, cfo->def_x_cap);
2591}
2592
2593static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev)
2594{
2595	const struct rtw89_chip_info *chip = rtwdev->chip;
2596	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2597	s32 cfo_khz_all = 0;
2598	s32 cfo_cnt_all = 0;
2599	s32 cfo_all_avg = 0;
2600	u8 i;
2601
2602	if (rtwdev->total_sta_assoc != 1)
2603		return 0;
2604	rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n");
2605	for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
2606		if (cfo->cfo_cnt[i] == 0)
2607			continue;
2608		cfo_khz_all += cfo->cfo_tail[i];
2609		cfo_cnt_all += cfo->cfo_cnt[i];
2610		cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all);
2611		cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
2612		cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft,
2613					cfo_cnt_all);
2614	}
2615	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2616		    "CFO track for macid = %d\n", i);
2617	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2618		    "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n",
2619		    cfo_khz_all, cfo_cnt_all, cfo_all_avg);
2620	return cfo_all_avg;
2621}
2622
2623static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev)
2624{
2625	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2626	struct rtw89_traffic_stats *stats = &rtwdev->stats;
2627	s32 target_cfo = 0;
2628	s32 cfo_khz_all = 0;
2629	s32 cfo_khz_all_tp_wgt = 0;
2630	s32 cfo_avg = 0;
2631	s32 max_cfo_lb = BIT(31);
2632	s32 min_cfo_ub = GENMASK(30, 0);
2633	u16 cfo_cnt_all = 0;
2634	u8 active_entry_cnt = 0;
2635	u8 sta_cnt = 0;
2636	u32 tp_all = 0;
2637	u8 i;
2638	u8 cfo_tol = 0;
2639
2640	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n");
2641	if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) {
2642		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n");
2643		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
2644			if (cfo->cfo_cnt[i] == 0)
2645				continue;
2646			cfo_khz_all += cfo->cfo_tail[i];
2647			cfo_cnt_all += cfo->cfo_cnt[i];
2648			cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all);
2649			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2650				    "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n",
2651				    cfo_khz_all, cfo_cnt_all, cfo_avg);
2652			target_cfo = cfo_avg;
2653		}
2654	} else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) {
2655		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n");
2656		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
2657			if (cfo->cfo_cnt[i] == 0)
2658				continue;
2659			cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
2660						  (s32)cfo->cfo_cnt[i]);
2661			cfo_khz_all += cfo->cfo_avg[i];
2662			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2663				    "Macid=%d, cfo_avg=%d\n", i,
2664				    cfo->cfo_avg[i]);
2665		}
2666		sta_cnt = rtwdev->total_sta_assoc;
2667		cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt);
2668		rtw89_debug(rtwdev, RTW89_DBG_CFO,
2669			    "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n",
2670			    cfo_khz_all, sta_cnt, cfo_avg);
2671		target_cfo = cfo_avg;
2672	} else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) {
2673		rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n");
2674		cfo_tol = cfo->sta_cfo_tolerance;
2675		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
2676			sta_cnt++;
2677			if (cfo->cfo_cnt[i] != 0) {
2678				cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
2679							  (s32)cfo->cfo_cnt[i]);
2680				active_entry_cnt++;
2681			} else {
2682				cfo->cfo_avg[i] = cfo->pre_cfo_avg[i];
2683			}
2684			max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb);
2685			min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub);
2686			cfo_khz_all += cfo->cfo_avg[i];
2687			/* need tp for each entry */
2688			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2689				    "[%d] cfo_avg=%d, tp=tbd\n",
2690				    i, cfo->cfo_avg[i]);
2691			if (sta_cnt >= rtwdev->total_sta_assoc)
2692				break;
2693		}
2694		tp_all = stats->rx_throughput; /* need tp for each entry */
2695		cfo_avg =  phy_div(cfo_khz_all_tp_wgt, (s32)tp_all);
2696
2697		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n",
2698			    sta_cnt);
2699		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n",
2700			    active_entry_cnt);
2701		rtw89_debug(rtwdev, RTW89_DBG_CFO,
2702			    "Msta cfo with tp_wgt=%d, avg_cfo=%d\n",
2703			    cfo_khz_all_tp_wgt, cfo_avg);
2704		rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n",
2705			    max_cfo_lb, min_cfo_ub);
2706		if (max_cfo_lb <= min_cfo_ub) {
2707			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2708				    "cfo win_size=%d\n",
2709				    min_cfo_ub - max_cfo_lb);
2710			target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub);
2711		} else {
2712			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2713				    "No intersection of cfo tolerance windows\n");
2714			target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt);
2715		}
2716		for (i = 0; i < CFO_TRACK_MAX_USER; i++)
2717			cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
2718	}
2719	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo);
2720	return target_cfo;
2721}
2722
2723static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev)
2724{
2725	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2726
2727	memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail));
2728	memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt));
2729	cfo->packet_count = 0;
2730	cfo->packet_count_pre = 0;
2731	cfo->cfo_avg_pre = 0;
2732}
2733
2734static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
2735{
2736	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2737	s32 new_cfo = 0;
2738	bool x_cap_update = false;
2739	u8 pre_x_cap = cfo->crystal_cap;
2740	u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
2741
2742	cfo->dcfo_avg = 0;
2743	rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
2744		    rtwdev->total_sta_assoc);
2745	if (rtwdev->total_sta_assoc == 0) {
2746		rtw89_phy_cfo_reset(rtwdev);
2747		return;
2748	}
2749	if (cfo->packet_count == 0) {
2750		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n");
2751		return;
2752	}
2753	if (cfo->packet_count == cfo->packet_count_pre) {
2754		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n");
2755		return;
2756	}
2757	if (rtwdev->total_sta_assoc == 1)
2758		new_cfo = rtw89_phy_average_cfo_calc(rtwdev);
2759	else
2760		new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev);
2761	if (new_cfo == 0) {
2762		rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
2763		return;
2764	}
2765	if (cfo->divergence_lock_en) {
2766		cfo->lock_cnt++;
2767		if (cfo->lock_cnt > CFO_PERIOD_CNT) {
2768			cfo->divergence_lock_en = false;
2769			cfo->lock_cnt = 0;
2770		} else {
2771			rtw89_phy_cfo_reset(rtwdev);
2772		}
2773		return;
2774	}
2775	if (cfo->crystal_cap >= cfo->x_cap_ub ||
2776	    cfo->crystal_cap <= cfo->x_cap_lb) {
2777		cfo->divergence_lock_en = true;
2778		rtw89_phy_cfo_reset(rtwdev);
2779		return;
2780	}
2781
2782	rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
2783	cfo->cfo_avg_pre = new_cfo;
2784	cfo->dcfo_avg_pre = cfo->dcfo_avg;
2785	x_cap_update =  cfo->crystal_cap != pre_x_cap;
2786	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
2787	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
2788		    cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
2789		    cfo->x_cap_ofst);
2790	if (x_cap_update) {
2791		if (cfo->dcfo_avg > 0)
2792			cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
2793		else
2794			cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
2795	}
2796	rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg);
2797	rtw89_phy_cfo_statistics_reset(rtwdev);
2798}
2799
2800void rtw89_phy_cfo_track_work(struct work_struct *work)
2801{
2802	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
2803						cfo_track_work.work);
2804	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2805
2806	mutex_lock(&rtwdev->mutex);
2807	if (!cfo->cfo_trig_by_timer_en)
2808		goto out;
2809	rtw89_leave_ps_mode(rtwdev);
2810	rtw89_phy_cfo_dm(rtwdev);
2811	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
2812				     msecs_to_jiffies(cfo->cfo_timer_ms));
2813out:
2814	mutex_unlock(&rtwdev->mutex);
2815}
2816
2817static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
2818{
2819	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2820
2821	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
2822				     msecs_to_jiffies(cfo->cfo_timer_ms));
2823}
2824
2825void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
2826{
2827	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2828	struct rtw89_traffic_stats *stats = &rtwdev->stats;
2829	bool is_ul_ofdma = false, ofdma_acc_en = false;
2830
2831	if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
2832		is_ul_ofdma = true;
2833	if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
2834	    is_ul_ofdma)
2835		ofdma_acc_en = true;
2836
2837	switch (cfo->phy_cfo_status) {
2838	case RTW89_PHY_DCFO_STATE_NORMAL:
2839		if (stats->tx_throughput >= CFO_TP_UPPER) {
2840			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE;
2841			cfo->cfo_trig_by_timer_en = true;
2842			cfo->cfo_timer_ms = CFO_COMP_PERIOD;
2843			rtw89_phy_cfo_start_work(rtwdev);
2844		}
2845		break;
2846	case RTW89_PHY_DCFO_STATE_ENHANCE:
2847		if (stats->tx_throughput <= CFO_TP_LOWER)
2848			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
2849		else if (ofdma_acc_en &&
2850			 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
2851			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
2852		else
2853			cfo->phy_cfo_trk_cnt++;
2854
2855		if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
2856			cfo->phy_cfo_trk_cnt = 0;
2857			cfo->cfo_trig_by_timer_en = false;
2858		}
2859		break;
2860	case RTW89_PHY_DCFO_STATE_HOLD:
2861		if (stats->tx_throughput <= CFO_TP_LOWER) {
2862			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
2863			cfo->phy_cfo_trk_cnt = 0;
2864			cfo->cfo_trig_by_timer_en = false;
2865		} else {
2866			cfo->phy_cfo_trk_cnt++;
2867		}
2868		break;
2869	default:
2870		cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
2871		cfo->phy_cfo_trk_cnt = 0;
2872		break;
2873	}
2874	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2875		    "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n",
2876		    stats->tx_throughput, cfo->phy_cfo_status,
2877		    cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt,
2878		    ewma_thermal_read(&rtwdev->phystat.avg_thermal[0]));
2879	if (cfo->cfo_trig_by_timer_en)
2880		return;
2881	rtw89_phy_cfo_dm(rtwdev);
2882}
2883
2884void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
2885			 struct rtw89_rx_phy_ppdu *phy_ppdu)
2886{
2887	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2888	u8 macid = phy_ppdu->mac_id;
2889
2890	if (macid >= CFO_TRACK_MAX_USER) {
2891		rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
2892		return;
2893	}
2894
2895	cfo->cfo_tail[macid] += cfo_val;
2896	cfo->cfo_cnt[macid]++;
2897	cfo->packet_count++;
2898}
2899
2900void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2901{
2902	const struct rtw89_chip_info *chip = rtwdev->chip;
2903	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2904	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
2905
2906	if (!chip->support_ul_tb_ctrl)
2907		return;
2908
2909	rtwvif->def_tri_idx =
2910		rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
2911
2912	if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
2913		rtwvif->dyn_tb_bedge_en = false;
2914	else if (chan->band_type >= RTW89_BAND_5G &&
2915		 chan->band_width >= RTW89_CHANNEL_WIDTH_40)
2916		rtwvif->dyn_tb_bedge_en = true;
2917	else
2918		rtwvif->dyn_tb_bedge_en = false;
2919
2920	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2921		    "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
2922		    ul_tb_info->def_if_bandedge, rtwvif->def_tri_idx);
2923	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2924		    "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
2925		    rtwvif->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
2926}
2927
2928struct rtw89_phy_ul_tb_check_data {
2929	bool valid;
2930	bool high_tf_client;
2931	bool low_tf_client;
2932	bool dyn_tb_bedge_en;
2933	u8 def_tri_idx;
2934};
2935
2936static
2937void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
2938				struct rtw89_vif *rtwvif,
2939				struct rtw89_phy_ul_tb_check_data *ul_tb_data)
2940{
2941	struct rtw89_traffic_stats *stats = &rtwdev->stats;
2942	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
2943
2944	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
2945		return;
2946
2947	if (!vif->cfg.assoc)
2948		return;
2949
2950	if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH)
2951		ul_tb_data->high_tf_client = true;
2952	else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH)
2953		ul_tb_data->low_tf_client = true;
2954
2955	ul_tb_data->valid = true;
2956	ul_tb_data->def_tri_idx = rtwvif->def_tri_idx;
2957	ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en;
2958}
2959
2960void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
2961{
2962	const struct rtw89_chip_info *chip = rtwdev->chip;
2963	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
2964	struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
2965	struct rtw89_vif *rtwvif;
2966
2967	if (!chip->support_ul_tb_ctrl)
2968		return;
2969
2970	if (rtwdev->total_sta_assoc != 1)
2971		return;
2972
2973	rtw89_for_each_rtwvif(rtwdev, rtwvif)
2974		rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data);
2975
2976	if (!ul_tb_data.valid)
2977		return;
2978
2979	if (ul_tb_data.dyn_tb_bedge_en) {
2980		if (ul_tb_data.high_tf_client) {
2981			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0);
2982			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2983				    "[ULTB] Turn off if_bandedge\n");
2984		} else if (ul_tb_data.low_tf_client) {
2985			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN,
2986					       ul_tb_info->def_if_bandedge);
2987			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2988				    "[ULTB] Set to default if_bandedge = %d\n",
2989				    ul_tb_info->def_if_bandedge);
2990		}
2991	}
2992
2993	if (ul_tb_info->dyn_tb_tri_en) {
2994		if (ul_tb_data.high_tf_client) {
2995			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
2996					       B_TXSHAPE_TRIANGULAR_CFG, 0);
2997			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2998				    "[ULTB] Turn off Tx triangle\n");
2999		} else if (ul_tb_data.low_tf_client) {
3000			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
3001					       B_TXSHAPE_TRIANGULAR_CFG,
3002					       ul_tb_data.def_tri_idx);
3003			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
3004				    "[ULTB] Set to default tx_shap_idx = %d\n",
3005				    ul_tb_data.def_tri_idx);
3006		}
3007	}
3008}
3009
3010static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
3011{
3012	const struct rtw89_chip_info *chip = rtwdev->chip;
3013	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
3014
3015	if (!chip->support_ul_tb_ctrl)
3016		return;
3017
3018	ul_tb_info->dyn_tb_tri_en = true;
3019	ul_tb_info->def_if_bandedge =
3020		rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
3021}
3022
3023static
3024void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts)
3025{
3026	ewma_rssi_init(&antdiv_sts->cck_rssi_avg);
3027	ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg);
3028	ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg);
3029	antdiv_sts->pkt_cnt_cck = 0;
3030	antdiv_sts->pkt_cnt_ofdm = 0;
3031	antdiv_sts->pkt_cnt_non_legacy = 0;
3032	antdiv_sts->evm = 0;
3033}
3034
3035static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev,
3036					      struct rtw89_rx_phy_ppdu *phy_ppdu,
3037					      struct rtw89_antdiv_stats *stats)
3038{
3039	if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) {
3040		if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) {
3041			ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg);
3042			stats->pkt_cnt_cck++;
3043		} else {
3044			ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg);
3045			stats->pkt_cnt_ofdm++;
3046			stats->evm += phy_ppdu->ofdm.evm_min;
3047		}
3048	} else {
3049		ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg);
3050		stats->pkt_cnt_non_legacy++;
3051		stats->evm += phy_ppdu->ofdm.evm_min;
3052	}
3053}
3054
3055static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats)
3056{
3057	if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck &&
3058	    stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm)
3059		return ewma_rssi_read(&stats->non_legacy_rssi_avg);
3060	else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck &&
3061		 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy)
3062		return ewma_rssi_read(&stats->ofdm_rssi_avg);
3063	else
3064		return ewma_rssi_read(&stats->cck_rssi_avg);
3065}
3066
3067static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats)
3068{
3069	return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm);
3070}
3071
3072void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
3073			    struct rtw89_rx_phy_ppdu *phy_ppdu)
3074{
3075	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
3076	struct rtw89_hal *hal = &rtwdev->hal;
3077
3078	if (!hal->ant_diversity || hal->ant_diversity_fixed)
3079		return;
3080
3081	rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats);
3082
3083	if (!antdiv->get_stats)
3084		return;
3085
3086	if (hal->antenna_rx == RF_A)
3087		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats);
3088	else if (hal->antenna_rx == RF_B)
3089		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats);
3090}
3091
3092static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev)
3093{
3094	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN,
3095			      0x0, RTW89_PHY_0);
3096	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL,
3097			      0x0, RTW89_PHY_0);
3098
3099	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND,
3100			      0x0, RTW89_PHY_0);
3101	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT,
3102			      0x0, RTW89_PHY_0);
3103
3104	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN,
3105			      0x0, RTW89_PHY_0);
3106
3107	rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING,
3108			      0x0100, RTW89_PHY_0);
3109
3110	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX,
3111			      0x1, RTW89_PHY_0);
3112	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL,
3113			      0x0, RTW89_PHY_0);
3114	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G,
3115			      0x0, RTW89_PHY_0);
3116	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G,
3117			      0x0, RTW89_PHY_0);
3118}
3119
3120static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev)
3121{
3122	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
3123
3124	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
3125	rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats);
3126	rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats);
3127}
3128
3129static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev)
3130{
3131	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
3132	struct rtw89_hal *hal = &rtwdev->hal;
3133
3134	if (!hal->ant_diversity)
3135		return;
3136
3137	antdiv->get_stats = false;
3138	antdiv->rssi_pre = 0;
3139	rtw89_phy_antdiv_sts_reset(rtwdev);
3140	rtw89_phy_antdiv_reg_init(rtwdev);
3141}
3142
3143static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
3144{
3145	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
3146	int i;
3147	u8 th;
3148
3149	for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
3150		th = rtw89_chip_get_thermal(rtwdev, i);
3151		if (th)
3152			ewma_thermal_add(&phystat->avg_thermal[i], th);
3153
3154		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
3155			    "path(%d) thermal cur=%u avg=%ld", i, th,
3156			    ewma_thermal_read(&phystat->avg_thermal[i]));
3157	}
3158}
3159
3160struct rtw89_phy_iter_rssi_data {
3161	struct rtw89_dev *rtwdev;
3162	struct rtw89_phy_ch_info *ch_info;
3163	bool rssi_changed;
3164};
3165
3166static void rtw89_phy_stat_rssi_update_iter(void *data,
3167					    struct ieee80211_sta *sta)
3168{
3169	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
3170	struct rtw89_phy_iter_rssi_data *rssi_data =
3171					(struct rtw89_phy_iter_rssi_data *)data;
3172	struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info;
3173	unsigned long rssi_curr;
3174
3175	rssi_curr = ewma_rssi_read(&rtwsta->avg_rssi);
3176
3177	if (rssi_curr < ch_info->rssi_min) {
3178		ch_info->rssi_min = rssi_curr;
3179		ch_info->rssi_min_macid = rtwsta->mac_id;
3180	}
3181
3182	if (rtwsta->prev_rssi == 0) {
3183		rtwsta->prev_rssi = rssi_curr;
3184	} else if (abs((int)rtwsta->prev_rssi - (int)rssi_curr) > (3 << RSSI_FACTOR)) {
3185		rtwsta->prev_rssi = rssi_curr;
3186		rssi_data->rssi_changed = true;
3187	}
3188}
3189
3190static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
3191{
3192	struct rtw89_phy_iter_rssi_data rssi_data = {0};
3193
3194	rssi_data.rtwdev = rtwdev;
3195	rssi_data.ch_info = &rtwdev->ch_info;
3196	rssi_data.ch_info->rssi_min = U8_MAX;
3197	ieee80211_iterate_stations_atomic(rtwdev->hw,
3198					  rtw89_phy_stat_rssi_update_iter,
3199					  &rssi_data);
3200	if (rssi_data.rssi_changed)
3201		rtw89_btc_ntfy_wl_sta(rtwdev);
3202}
3203
3204static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev)
3205{
3206	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
3207	int i;
3208
3209	for (i = 0; i < rtwdev->chip->rf_path_num; i++)
3210		ewma_thermal_init(&phystat->avg_thermal[i]);
3211
3212	rtw89_phy_stat_thermal_update(rtwdev);
3213
3214	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
3215	memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat));
3216}
3217
3218void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
3219{
3220	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
3221
3222	rtw89_phy_stat_thermal_update(rtwdev);
3223	rtw89_phy_stat_rssi_update(rtwdev);
3224
3225	phystat->last_pkt_stat = phystat->cur_pkt_stat;
3226	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
3227}
3228
3229static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us)
3230{
3231	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3232
3233	return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
3234}
3235
3236static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx)
3237{
3238	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3239
3240	return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
3241}
3242
3243static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
3244{
3245	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3246
3247	env->ccx_manual_ctrl = false;
3248	env->ccx_ongoing = false;
3249	env->ccx_rac_lv = RTW89_RAC_RELEASE;
3250	env->ccx_period = 0;
3251	env->ccx_unit_idx = RTW89_CCX_32_US;
3252
3253	rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EN_MSK, 1);
3254	rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_TRIG_OPT_MSK, 1);
3255	rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1);
3256	rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EDCCA_OPT_MSK,
3257			       RTW89_CCX_EDCCA_BW20_0);
3258}
3259
3260static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report,
3261				    u16 score)
3262{
3263	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3264	u32 numer = 0;
3265	u16 ret = 0;
3266
3267	numer = report * score + (env->ccx_period >> 1);
3268	if (env->ccx_period)
3269		ret = numer / env->ccx_period;
3270
3271	return ret >= score ? score - 1 : ret;
3272}
3273
3274static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
3275					    u16 time_ms, u32 *period,
3276					    u32 *unit_idx)
3277{
3278	u32 idx;
3279	u8 quotient;
3280
3281	if (time_ms >= CCX_MAX_PERIOD)
3282		time_ms = CCX_MAX_PERIOD;
3283
3284	quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD;
3285
3286	if (quotient < 4)
3287		idx = RTW89_CCX_4_US;
3288	else if (quotient < 8)
3289		idx = RTW89_CCX_8_US;
3290	else if (quotient < 16)
3291		idx = RTW89_CCX_16_US;
3292	else
3293		idx = RTW89_CCX_32_US;
3294
3295	*unit_idx = idx;
3296	*period = (time_ms * MS_TO_4US_RATIO) >> idx;
3297
3298	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3299		    "[Trigger Time] period:%d, unit_idx:%d\n",
3300		    *period, *unit_idx);
3301}
3302
3303static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
3304{
3305	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3306
3307	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3308		    "lv:(%d)->(0)\n", env->ccx_rac_lv);
3309
3310	env->ccx_ongoing = false;
3311	env->ccx_rac_lv = RTW89_RAC_RELEASE;
3312	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
3313}
3314
3315static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
3316					      struct rtw89_ccx_para_info *para)
3317{
3318	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3319	bool is_update = env->ifs_clm_app != para->ifs_clm_app;
3320	u8 i = 0;
3321	u16 *ifs_th_l = env->ifs_clm_th_l;
3322	u16 *ifs_th_h = env->ifs_clm_th_h;
3323	u32 ifs_th0_us = 0, ifs_th_times = 0;
3324	u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0};
3325
3326	if (!is_update)
3327		goto ifs_update_finished;
3328
3329	switch (para->ifs_clm_app) {
3330	case RTW89_IFS_CLM_INIT:
3331	case RTW89_IFS_CLM_BACKGROUND:
3332	case RTW89_IFS_CLM_ACS:
3333	case RTW89_IFS_CLM_DBG:
3334	case RTW89_IFS_CLM_DIG:
3335	case RTW89_IFS_CLM_TDMA_DIG:
3336		ifs_th0_us = IFS_CLM_TH0_UPPER;
3337		ifs_th_times = IFS_CLM_TH_MUL;
3338		break;
3339	case RTW89_IFS_CLM_DBG_MANUAL:
3340		ifs_th0_us = para->ifs_clm_manual_th0;
3341		ifs_th_times = para->ifs_clm_manual_th_times;
3342		break;
3343	default:
3344		break;
3345	}
3346
3347	/* Set sampling threshold for 4 different regions, unit in idx_cnt.
3348	 * low[i] = high[i-1] + 1
3349	 * high[i] = high[i-1] * ifs_th_times
3350	 */
3351	ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
3352	ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
3353	ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev,
3354								 ifs_th0_us);
3355	for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
3356		ifs_th_l[i] = ifs_th_h[i - 1] + 1;
3357		ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
3358		ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]);
3359	}
3360
3361ifs_update_finished:
3362	if (!is_update)
3363		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3364			    "No need to update IFS_TH\n");
3365
3366	return is_update;
3367}
3368
3369static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
3370{
3371	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3372	u8 i = 0;
3373
3374	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_LOW_MSK,
3375			       env->ifs_clm_th_l[0]);
3376	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_LOW_MSK,
3377			       env->ifs_clm_th_l[1]);
3378	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_LOW_MSK,
3379			       env->ifs_clm_th_l[2]);
3380	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_LOW_MSK,
3381			       env->ifs_clm_th_l[3]);
3382
3383	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_HIGH_MSK,
3384			       env->ifs_clm_th_h[0]);
3385	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_HIGH_MSK,
3386			       env->ifs_clm_th_h[1]);
3387	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_HIGH_MSK,
3388			       env->ifs_clm_th_h[2]);
3389	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_HIGH_MSK,
3390			       env->ifs_clm_th_h[3]);
3391
3392	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
3393		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3394			    "Update IFS_T%d_th{low, high} : {%d, %d}\n",
3395			    i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
3396}
3397
3398static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev)
3399{
3400	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3401	struct rtw89_ccx_para_info para = {0};
3402
3403	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
3404	env->ifs_clm_mntr_time = 0;
3405
3406	para.ifs_clm_app = RTW89_IFS_CLM_INIT;
3407	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, &para))
3408		rtw89_phy_ifs_clm_set_th_reg(rtwdev);
3409
3410	rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COLLECT_EN,
3411			       true);
3412	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_EN_MSK, true);
3413	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_EN_MSK, true);
3414	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_EN_MSK, true);
3415	rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_EN_MSK, true);
3416}
3417
3418static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
3419				     enum rtw89_env_racing_lv level)
3420{
3421	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3422	int ret = 0;
3423
3424	if (level >= RTW89_RAC_MAX_NUM) {
3425		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3426			    "[WARNING] Wrong LV=%d\n", level);
3427		return -EINVAL;
3428	}
3429
3430	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3431		    "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing,
3432		    env->ccx_rac_lv, level);
3433
3434	if (env->ccx_ongoing) {
3435		if (level <= env->ccx_rac_lv)
3436			ret = -EINVAL;
3437		else
3438			env->ccx_ongoing = false;
3439	}
3440
3441	if (ret == 0)
3442		env->ccx_rac_lv = level;
3443
3444	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n",
3445		    !ret);
3446
3447	return ret;
3448}
3449
3450static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev)
3451{
3452	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3453
3454	rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 0);
3455	rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 0);
3456	rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 1);
3457	rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1);
3458
3459	env->ccx_ongoing = true;
3460}
3461
3462static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
3463{
3464	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3465	u8 i = 0;
3466	u32 res = 0;
3467
3468	env->ifs_clm_tx_ratio =
3469		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT);
3470	env->ifs_clm_edcca_excl_cca_ratio =
3471		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca,
3472					 PERCENT);
3473	env->ifs_clm_cck_fa_ratio =
3474		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT);
3475	env->ifs_clm_ofdm_fa_ratio =
3476		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT);
3477	env->ifs_clm_cck_cca_excl_fa_ratio =
3478		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa,
3479					 PERCENT);
3480	env->ifs_clm_ofdm_cca_excl_fa_ratio =
3481		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa,
3482					 PERCENT);
3483	env->ifs_clm_cck_fa_permil =
3484		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL);
3485	env->ifs_clm_ofdm_fa_permil =
3486		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL);
3487
3488	for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
3489		if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
3490			env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
3491		} else {
3492			env->ifs_clm_ifs_avg[i] =
3493				rtw89_phy_ccx_idx_to_us(rtwdev,
3494							env->ifs_clm_avg[i]);
3495		}
3496
3497		res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]);
3498		res += env->ifs_clm_his[i] >> 1;
3499		if (env->ifs_clm_his[i])
3500			res /= env->ifs_clm_his[i];
3501		else
3502			res = 0;
3503		env->ifs_clm_cca_avg[i] = res;
3504	}
3505
3506	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3507		    "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n",
3508		    env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio);
3509	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3510		    "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n",
3511		    env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio);
3512	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3513		    "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n",
3514		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil);
3515	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3516		    "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n",
3517		    env->ifs_clm_cck_cca_excl_fa_ratio,
3518		    env->ifs_clm_ofdm_cca_excl_fa_ratio);
3519	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3520		    "Time:[his, ifs_avg(us), cca_avg(us)]\n");
3521	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
3522		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n",
3523			    i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i],
3524			    env->ifs_clm_cca_avg[i]);
3525}
3526
3527static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
3528{
3529	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3530	u8 i = 0;
3531
3532	if (rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_DONE_MSK) == 0) {
3533		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3534			    "Get IFS_CLM report Fail\n");
3535		return false;
3536	}
3537
3538	env->ifs_clm_tx =
3539		rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT,
3540				      B_IFS_CLM_TX_CNT_MSK);
3541	env->ifs_clm_edcca_excl_cca =
3542		rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT,
3543				      B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK);
3544	env->ifs_clm_cckcca_excl_fa =
3545		rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA,
3546				      B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK);
3547	env->ifs_clm_ofdmcca_excl_fa =
3548		rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA,
3549				      B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK);
3550	env->ifs_clm_cckfa =
3551		rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA,
3552				      B_IFS_CLM_CCK_FA_MSK);
3553	env->ifs_clm_ofdmfa =
3554		rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA,
3555				      B_IFS_CLM_OFDM_FA_MSK);
3556
3557	env->ifs_clm_his[0] =
3558		rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T1_HIS_MSK);
3559	env->ifs_clm_his[1] =
3560		rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T2_HIS_MSK);
3561	env->ifs_clm_his[2] =
3562		rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T3_HIS_MSK);
3563	env->ifs_clm_his[3] =
3564		rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T4_HIS_MSK);
3565
3566	env->ifs_clm_avg[0] =
3567		rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T1_AVG_MSK);
3568	env->ifs_clm_avg[1] =
3569		rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T2_AVG_MSK);
3570	env->ifs_clm_avg[2] =
3571		rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T3_AVG_MSK);
3572	env->ifs_clm_avg[3] =
3573		rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T4_AVG_MSK);
3574
3575	env->ifs_clm_cca[0] =
3576		rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T1_CCA_MSK);
3577	env->ifs_clm_cca[1] =
3578		rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T2_CCA_MSK);
3579	env->ifs_clm_cca[2] =
3580		rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T3_CCA_MSK);
3581	env->ifs_clm_cca[3] =
3582		rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T4_CCA_MSK);
3583
3584	env->ifs_clm_total_ifs =
3585		rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_TOTAL_CNT_MSK);
3586
3587	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
3588		    env->ifs_clm_total_ifs);
3589	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3590		    "{Tx, EDCCA_exclu_cca} = {%d, %d}\n",
3591		    env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca);
3592	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3593		    "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n",
3594		    env->ifs_clm_cckfa, env->ifs_clm_ofdmfa);
3595	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3596		    "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n",
3597		    env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa);
3598
3599	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n");
3600	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
3601		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3602			    "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
3603			    env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
3604
3605	rtw89_phy_ifs_clm_get_utility(rtwdev);
3606
3607	return true;
3608}
3609
3610static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
3611				 struct rtw89_ccx_para_info *para)
3612{
3613	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3614	u32 period = 0;
3615	u32 unit_idx = 0;
3616
3617	if (para->mntr_time == 0) {
3618		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3619			    "[WARN] MNTR_TIME is 0\n");
3620		return -EINVAL;
3621	}
3622
3623	if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv))
3624		return -EINVAL;
3625
3626	if (para->mntr_time != env->ifs_clm_mntr_time) {
3627		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
3628						&period, &unit_idx);
3629		rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER,
3630				       B_IFS_CLM_PERIOD_MSK, period);
3631		rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER,
3632				       B_IFS_CLM_COUNTER_UNIT_MSK, unit_idx);
3633
3634		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3635			    "Update IFS-CLM time ((%d)) -> ((%d))\n",
3636			    env->ifs_clm_mntr_time, para->mntr_time);
3637
3638		env->ifs_clm_mntr_time = para->mntr_time;
3639		env->ccx_period = (u16)period;
3640		env->ccx_unit_idx = (u8)unit_idx;
3641	}
3642
3643	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) {
3644		env->ifs_clm_app = para->ifs_clm_app;
3645		rtw89_phy_ifs_clm_set_th_reg(rtwdev);
3646	}
3647
3648	return 0;
3649}
3650
3651void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
3652{
3653	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3654	struct rtw89_ccx_para_info para = {0};
3655	u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
3656
3657	env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
3658	if (env->ccx_manual_ctrl) {
3659		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3660			    "CCX in manual ctrl\n");
3661		return;
3662	}
3663
3664	/* only ifs_clm for now */
3665	if (rtw89_phy_ifs_clm_get_result(rtwdev))
3666		env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
3667
3668	rtw89_phy_ccx_racing_release(rtwdev);
3669	para.mntr_time = 1900;
3670	para.rac_lv = RTW89_RAC_LV_1;
3671	para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
3672
3673	if (rtw89_phy_ifs_clm_set(rtwdev, &para) == 0)
3674		chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
3675	if (chk_result)
3676		rtw89_phy_ccx_trigger(rtwdev);
3677
3678	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3679		    "get_result=0x%x, chk_result:0x%x\n",
3680		    env->ccx_watchdog_result, chk_result);
3681}
3682
3683static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
3684{
3685	if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
3686	    *ie_page == RTW89_RSVD_9)
3687		return false;
3688	else if (*ie_page > RTW89_RSVD_9)
3689		*ie_page -= 1;
3690
3691	return true;
3692}
3693
3694static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)
3695{
3696	static const u8 ie_page_shift = 2;
3697
3698	return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift);
3699}
3700
3701static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
3702				      enum rtw89_phy_status_bitmap ie_page)
3703{
3704	u32 addr;
3705
3706	if (!rtw89_physts_ie_page_valid(&ie_page))
3707		return 0;
3708
3709	addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
3710
3711	return rtw89_phy_read32(rtwdev, addr);
3712}
3713
3714static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
3715				       enum rtw89_phy_status_bitmap ie_page,
3716				       u32 val)
3717{
3718	const struct rtw89_chip_info *chip = rtwdev->chip;
3719	u32 addr;
3720
3721	if (!rtw89_physts_ie_page_valid(&ie_page))
3722		return;
3723
3724	if (chip->chip_id == RTL8852A)
3725		val &= B_PHY_STS_BITMAP_MSK_52A;
3726
3727	addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
3728	rtw89_phy_write32(rtwdev, addr, val);
3729}
3730
3731static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev,
3732					  enum rtw89_phy_status_bitmap bitmap,
3733					  enum rtw89_phy_status_ie_type ie,
3734					  bool enable)
3735{
3736	u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap);
3737
3738	if (enable)
3739		val |= BIT(ie);
3740	else
3741		val &= ~BIT(ie);
3742
3743	rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val);
3744}
3745
3746static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
3747					    bool enable,
3748					    enum rtw89_phy_idx phy_idx)
3749{
3750	if (enable) {
3751		rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM,
3752				      B_STS_DIS_TRIG_BY_FAIL);
3753		rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM,
3754				      B_STS_DIS_TRIG_BY_BRK);
3755	} else {
3756		rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM,
3757				      B_STS_DIS_TRIG_BY_FAIL);
3758		rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM,
3759				      B_STS_DIS_TRIG_BY_BRK);
3760	}
3761}
3762
3763static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
3764{
3765	u8 i;
3766
3767	rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
3768
3769	for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
3770		if (i >= RTW89_CCK_PKT)
3771			rtw89_physts_enable_ie_bitmap(rtwdev, i,
3772						      RTW89_PHYSTS_IE09_FTR_0,
3773						      true);
3774		if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) ||
3775		    (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT))
3776			continue;
3777		rtw89_physts_enable_ie_bitmap(rtwdev, i,
3778					      RTW89_PHYSTS_IE24_OFDM_TD_PATH_A,
3779					      true);
3780	}
3781	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT,
3782				      RTW89_PHYSTS_IE13_DL_MU_DEF, true);
3783	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT,
3784				      RTW89_PHYSTS_IE13_DL_MU_DEF, true);
3785
3786	/* force IE01 for channel index, only channel field is valid */
3787	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT,
3788				      RTW89_PHYSTS_IE01_CMN_OFDM, true);
3789}
3790
3791static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
3792{
3793	const struct rtw89_chip_info *chip = rtwdev->chip;
3794	struct rtw89_dig_info *dig = &rtwdev->dig;
3795	const struct rtw89_phy_dig_gain_cfg *cfg;
3796	const char *msg;
3797	u8 i;
3798	s8 gain_base;
3799	s8 *gain_arr;
3800	u32 tmp;
3801
3802	switch (type) {
3803	case RTW89_DIG_GAIN_LNA_G:
3804		gain_arr = dig->lna_gain_g;
3805		gain_base = LNA0_GAIN;
3806		cfg = chip->dig_table->cfg_lna_g;
3807		msg = "lna_gain_g";
3808		break;
3809	case RTW89_DIG_GAIN_TIA_G:
3810		gain_arr = dig->tia_gain_g;
3811		gain_base = TIA0_GAIN_G;
3812		cfg = chip->dig_table->cfg_tia_g;
3813		msg = "tia_gain_g";
3814		break;
3815	case RTW89_DIG_GAIN_LNA_A:
3816		gain_arr = dig->lna_gain_a;
3817		gain_base = LNA0_GAIN;
3818		cfg = chip->dig_table->cfg_lna_a;
3819		msg = "lna_gain_a";
3820		break;
3821	case RTW89_DIG_GAIN_TIA_A:
3822		gain_arr = dig->tia_gain_a;
3823		gain_base = TIA0_GAIN_A;
3824		cfg = chip->dig_table->cfg_tia_a;
3825		msg = "tia_gain_a";
3826		break;
3827	default:
3828		return;
3829	}
3830
3831	for (i = 0; i < cfg->size; i++) {
3832		tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr,
3833					    cfg->table[i].mask);
3834		tmp >>= DIG_GAIN_SHIFT;
3835		gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
3836		gain_base += DIG_GAIN;
3837
3838		rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n",
3839			    msg, i, gain_arr[i]);
3840	}
3841}
3842
3843static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
3844{
3845	struct rtw89_dig_info *dig = &rtwdev->dig;
3846	u32 tmp;
3847	u8 i;
3848
3849	if (!rtwdev->hal.support_igi)
3850		return;
3851
3852	tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
3853				    B_PATH0_IB_PKPW_MSK);
3854	dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
3855	dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK,
3856					    B_PATH0_IB_PBK_MSK);
3857	rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
3858		    dig->ib_pkpwr, dig->ib_pbk);
3859
3860	for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
3861		rtw89_phy_dig_read_gain_table(rtwdev, i);
3862}
3863
3864static const u8 rssi_nolink = 22;
3865static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104};
3866static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
3867static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
3868static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
3869
3870static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
3871{
3872	struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
3873	struct rtw89_dig_info *dig = &rtwdev->dig;
3874	bool is_linked = rtwdev->total_sta_assoc > 0;
3875
3876	if (is_linked) {
3877		dig->igi_rssi = ch_info->rssi_min >> 1;
3878	} else {
3879		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
3880		dig->igi_rssi = rssi_nolink;
3881	}
3882}
3883
3884static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
3885{
3886	struct rtw89_dig_info *dig = &rtwdev->dig;
3887	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3888	bool is_linked = rtwdev->total_sta_assoc > 0;
3889	const u16 *fa_th_src = NULL;
3890
3891	switch (chan->band_type) {
3892	case RTW89_BAND_2G:
3893		dig->lna_gain = dig->lna_gain_g;
3894		dig->tia_gain = dig->tia_gain_g;
3895		fa_th_src = is_linked ? fa_th_2g : fa_th_nolink;
3896		dig->force_gaincode_idx_en = false;
3897		dig->dyn_pd_th_en = true;
3898		break;
3899	case RTW89_BAND_5G:
3900	default:
3901		dig->lna_gain = dig->lna_gain_a;
3902		dig->tia_gain = dig->tia_gain_a;
3903		fa_th_src = is_linked ? fa_th_5g : fa_th_nolink;
3904		dig->force_gaincode_idx_en = true;
3905		dig->dyn_pd_th_en = true;
3906		break;
3907	}
3908	memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th));
3909	memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
3910}
3911
3912static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
3913static const u8 igi_max_performance_mode = 0x5a;
3914static const u8 dynamic_pd_threshold_max;
3915
3916static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
3917{
3918	struct rtw89_dig_info *dig = &rtwdev->dig;
3919
3920	dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
3921	dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
3922	dig->cur_gaincode.rxb_idx = RXB_IDX_MAX;
3923	dig->force_gaincode.lna_idx = LNA_IDX_MAX;
3924	dig->force_gaincode.tia_idx = TIA_IDX_MAX;
3925	dig->force_gaincode.rxb_idx = RXB_IDX_MAX;
3926
3927	dig->dyn_igi_max = igi_max_performance_mode;
3928	dig->dyn_igi_min = dynamic_igi_min;
3929	dig->dyn_pd_th_max = dynamic_pd_threshold_max;
3930	dig->pd_low_th_ofst = pd_low_th_offset;
3931	dig->is_linked_pre = false;
3932}
3933
3934static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
3935{
3936	rtw89_phy_dig_update_gain_para(rtwdev);
3937	rtw89_phy_dig_reset(rtwdev);
3938}
3939
3940static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
3941{
3942	struct rtw89_dig_info *dig = &rtwdev->dig;
3943	u8 lna_idx;
3944
3945	if (rssi < dig->igi_rssi_th[0])
3946		lna_idx = RTW89_DIG_GAIN_LNA_IDX6;
3947	else if (rssi < dig->igi_rssi_th[1])
3948		lna_idx = RTW89_DIG_GAIN_LNA_IDX5;
3949	else if (rssi < dig->igi_rssi_th[2])
3950		lna_idx = RTW89_DIG_GAIN_LNA_IDX4;
3951	else if (rssi < dig->igi_rssi_th[3])
3952		lna_idx = RTW89_DIG_GAIN_LNA_IDX3;
3953	else if (rssi < dig->igi_rssi_th[4])
3954		lna_idx = RTW89_DIG_GAIN_LNA_IDX2;
3955	else
3956		lna_idx = RTW89_DIG_GAIN_LNA_IDX1;
3957
3958	return lna_idx;
3959}
3960
3961static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
3962{
3963	struct rtw89_dig_info *dig = &rtwdev->dig;
3964	u8 tia_idx;
3965
3966	if (rssi < dig->igi_rssi_th[0])
3967		tia_idx = RTW89_DIG_GAIN_TIA_IDX1;
3968	else
3969		tia_idx = RTW89_DIG_GAIN_TIA_IDX0;
3970
3971	return tia_idx;
3972}
3973
3974#define IB_PBK_BASE 110
3975#define WB_RSSI_BASE 10
3976static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
3977					struct rtw89_agc_gaincode_set *set)
3978{
3979	struct rtw89_dig_info *dig = &rtwdev->dig;
3980	s8 lna_gain = dig->lna_gain[set->lna_idx];
3981	s8 tia_gain = dig->tia_gain[set->tia_idx];
3982	s32 wb_rssi = rssi + lna_gain + tia_gain;
3983	s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE;
3984	u8 rxb_idx;
3985
3986	rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi;
3987	rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX);
3988
3989	rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n",
3990		    wb_rssi, rxb_idx_tmp);
3991
3992	return rxb_idx;
3993}
3994
3995static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
3996					   struct rtw89_agc_gaincode_set *set)
3997{
3998	set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi);
3999	set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi);
4000	set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set);
4001
4002	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4003		    "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
4004		    rssi, set->lna_idx, set->tia_idx, set->rxb_idx);
4005}
4006
4007#define IGI_OFFSET_MAX 25
4008#define IGI_OFFSET_MUL 2
4009static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
4010{
4011	struct rtw89_dig_info *dig = &rtwdev->dig;
4012	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4013	enum rtw89_dig_noisy_level noisy_lv;
4014	u8 igi_offset = dig->fa_rssi_ofst;
4015	u16 fa_ratio = 0;
4016
4017	fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil;
4018
4019	if (fa_ratio < dig->fa_th[0])
4020		noisy_lv = RTW89_DIG_NOISY_LEVEL0;
4021	else if (fa_ratio < dig->fa_th[1])
4022		noisy_lv = RTW89_DIG_NOISY_LEVEL1;
4023	else if (fa_ratio < dig->fa_th[2])
4024		noisy_lv = RTW89_DIG_NOISY_LEVEL2;
4025	else if (fa_ratio < dig->fa_th[3])
4026		noisy_lv = RTW89_DIG_NOISY_LEVEL3;
4027	else
4028		noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX;
4029
4030	if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2)
4031		igi_offset = 0;
4032	else
4033		igi_offset += noisy_lv * IGI_OFFSET_MUL;
4034
4035	igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX);
4036	dig->fa_rssi_ofst = igi_offset;
4037
4038	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4039		    "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n",
4040		    dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]);
4041
4042	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4043		    "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n",
4044		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil,
4045		    env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil,
4046		    noisy_lv, igi_offset);
4047}
4048
4049static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
4050{
4051	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4052
4053	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
4054			       dig_regs->p0_lna_init.mask, lna_idx);
4055	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
4056			       dig_regs->p1_lna_init.mask, lna_idx);
4057}
4058
4059static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
4060{
4061	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4062
4063	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
4064			       dig_regs->p0_tia_init.mask, tia_idx);
4065	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
4066			       dig_regs->p1_tia_init.mask, tia_idx);
4067}
4068
4069static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
4070{
4071	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4072
4073	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
4074			       dig_regs->p0_rxb_init.mask, rxb_idx);
4075	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
4076			       dig_regs->p1_rxb_init.mask, rxb_idx);
4077}
4078
4079static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
4080				     const struct rtw89_agc_gaincode_set set)
4081{
4082	rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
4083	rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
4084	rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
4085
4086	rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
4087		    set.lna_idx, set.tia_idx, set.rxb_idx);
4088}
4089
4090static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
4091						   bool enable)
4092{
4093	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4094
4095	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
4096			       dig_regs->p0_p20_pagcugc_en.mask, enable);
4097	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
4098			       dig_regs->p0_s20_pagcugc_en.mask, enable);
4099	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
4100			       dig_regs->p1_p20_pagcugc_en.mask, enable);
4101	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
4102			       dig_regs->p1_s20_pagcugc_en.mask, enable);
4103
4104	rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
4105}
4106
4107static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
4108{
4109	struct rtw89_dig_info *dig = &rtwdev->dig;
4110
4111	if (!rtwdev->hal.support_igi)
4112		return;
4113
4114	if (dig->force_gaincode_idx_en) {
4115		rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
4116		rtw89_debug(rtwdev, RTW89_DBG_DIG,
4117			    "Force gaincode index enabled.\n");
4118	} else {
4119		rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
4120					       &dig->cur_gaincode);
4121		rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
4122	}
4123}
4124
4125static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
4126				    bool enable)
4127{
4128	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
4129	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4130	enum rtw89_bandwidth cbw = chan->band_width;
4131	struct rtw89_dig_info *dig = &rtwdev->dig;
4132	u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
4133	u8 ofdm_cca_th;
4134	s8 cck_cca_th;
4135	u32 pd_val = 0;
4136
4137	under_region += PD_TH_SB_FLTR_CMP_VAL;
4138
4139	switch (cbw) {
4140	case RTW89_CHANNEL_WIDTH_40:
4141		under_region += PD_TH_BW40_CMP_VAL;
4142		break;
4143	case RTW89_CHANNEL_WIDTH_80:
4144		under_region += PD_TH_BW80_CMP_VAL;
4145		break;
4146	case RTW89_CHANNEL_WIDTH_160:
4147		under_region += PD_TH_BW160_CMP_VAL;
4148		break;
4149	case RTW89_CHANNEL_WIDTH_20:
4150		fallthrough;
4151	default:
4152		under_region += PD_TH_BW20_CMP_VAL;
4153		break;
4154	}
4155
4156	dig->dyn_pd_th_max = dig->igi_rssi;
4157
4158	final_rssi = min_t(u8, rssi, dig->igi_rssi);
4159	ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
4160			      PD_TH_MAX_RSSI + under_region);
4161
4162	if (enable) {
4163		pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
4164		rtw89_debug(rtwdev, RTW89_DBG_DIG,
4165			    "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
4166			    final_rssi, ofdm_cca_th, under_region, pd_val);
4167	} else {
4168		rtw89_debug(rtwdev, RTW89_DBG_DIG,
4169			    "Dynamic PD th disabled, Set PD_low_bd=0\n");
4170	}
4171
4172	rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
4173			       dig_regs->pd_lower_bound_mask, pd_val);
4174	rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
4175			       dig_regs->pd_spatial_reuse_en, enable);
4176
4177	if (!rtwdev->hal.support_cckpd)
4178		return;
4179
4180	cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
4181	pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
4182
4183	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4184		    "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
4185		    final_rssi, cck_cca_th, under_region, pd_val);
4186
4187	rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_EN_V1,
4188			       B_BMODE_PDTH_LIMIT_EN_MSK_V1, enable);
4189	rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_V1,
4190			       B_BMODE_PDTH_LOWER_BOUND_MSK_V1, pd_val);
4191}
4192
4193void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
4194{
4195	struct rtw89_dig_info *dig = &rtwdev->dig;
4196
4197	dig->bypass_dig = false;
4198	rtw89_phy_dig_para_reset(rtwdev);
4199	rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
4200	rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false);
4201	rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
4202	rtw89_phy_dig_update_para(rtwdev);
4203}
4204
4205#define IGI_RSSI_MIN 10
4206void rtw89_phy_dig(struct rtw89_dev *rtwdev)
4207{
4208	struct rtw89_dig_info *dig = &rtwdev->dig;
4209	bool is_linked = rtwdev->total_sta_assoc > 0;
4210
4211	if (unlikely(dig->bypass_dig)) {
4212		dig->bypass_dig = false;
4213		return;
4214	}
4215
4216	if (!dig->is_linked_pre && is_linked) {
4217		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
4218		rtw89_phy_dig_update_para(rtwdev);
4219	} else if (dig->is_linked_pre && !is_linked) {
4220		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
4221		rtw89_phy_dig_update_para(rtwdev);
4222	}
4223	dig->is_linked_pre = is_linked;
4224
4225	rtw89_phy_dig_igi_offset_by_env(rtwdev);
4226	rtw89_phy_dig_update_rssi_info(rtwdev);
4227
4228	dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
4229			    dig->igi_rssi - IGI_RSSI_MIN : 0;
4230	dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
4231	dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
4232
4233	dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
4234				 dig->dyn_igi_max);
4235
4236	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4237		    "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
4238		    dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
4239		    dig->igi_fa_rssi);
4240
4241	rtw89_phy_dig_config_igi(rtwdev);
4242
4243	rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
4244
4245	if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
4246		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true);
4247	else
4248		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
4249}
4250
4251static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
4252{
4253	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
4254	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
4255	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
4256	struct rtw89_hal *hal = &rtwdev->hal;
4257	bool *done = data;
4258	u8 rssi_a, rssi_b;
4259	u32 candidate;
4260
4261	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls)
4262		return;
4263
4264	if (*done)
4265		return;
4266
4267	*done = true;
4268
4269	rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]);
4270	rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]);
4271
4272	if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
4273		candidate = RF_A;
4274	else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
4275		candidate = RF_B;
4276	else
4277		return;
4278
4279	if (hal->antenna_tx == candidate)
4280		return;
4281
4282	hal->antenna_tx = candidate;
4283	rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta);
4284
4285	if (hal->antenna_tx == RF_A) {
4286		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
4287		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
4288	} else if (hal->antenna_tx == RF_B) {
4289		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
4290		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
4291	}
4292}
4293
4294void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
4295{
4296	struct rtw89_hal *hal = &rtwdev->hal;
4297	bool done = false;
4298
4299	if (!hal->tx_path_diversity)
4300		return;
4301
4302	ieee80211_iterate_stations_atomic(rtwdev->hw,
4303					  rtw89_phy_tx_path_div_sta_iter,
4304					  &done);
4305}
4306
4307#define ANTDIV_MAIN 0
4308#define ANTDIV_AUX 1
4309
4310static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev)
4311{
4312	struct rtw89_hal *hal = &rtwdev->hal;
4313	u8 default_ant, optional_ant;
4314
4315	if (!hal->ant_diversity || hal->antenna_tx == 0)
4316		return;
4317
4318	if (hal->antenna_tx == RF_B) {
4319		default_ant = ANTDIV_AUX;
4320		optional_ant = ANTDIV_MAIN;
4321	} else {
4322		default_ant = ANTDIV_MAIN;
4323		optional_ant = ANTDIV_AUX;
4324	}
4325
4326	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL,
4327			      default_ant, RTW89_PHY_0);
4328	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI,
4329			      default_ant, RTW89_PHY_0);
4330	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT,
4331			      optional_ant, RTW89_PHY_0);
4332	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI,
4333			      default_ant, RTW89_PHY_0);
4334}
4335
4336static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev)
4337{
4338	struct rtw89_hal *hal = &rtwdev->hal;
4339
4340	hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A;
4341	hal->antenna_tx = hal->antenna_rx;
4342}
4343
4344static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev)
4345{
4346	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4347	struct rtw89_hal *hal = &rtwdev->hal;
4348	bool no_change = false;
4349	u8 main_rssi, aux_rssi;
4350	u8 main_evm, aux_evm;
4351	u32 candidate;
4352
4353	antdiv->get_stats = false;
4354	antdiv->training_count = 0;
4355
4356	main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats);
4357	main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats);
4358	aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats);
4359	aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats);
4360
4361	if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH)
4362		candidate = RF_A;
4363	else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH)
4364		candidate = RF_B;
4365	else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
4366		candidate = RF_A;
4367	else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
4368		candidate = RF_B;
4369	else
4370		no_change = true;
4371
4372	if (no_change) {
4373		/* swap back from training antenna to original */
4374		rtw89_phy_swap_hal_antenna(rtwdev);
4375		return;
4376	}
4377
4378	hal->antenna_tx = candidate;
4379	hal->antenna_rx = candidate;
4380}
4381
4382static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
4383{
4384	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4385	u64 state_period;
4386
4387	if (antdiv->training_count % 2 == 0) {
4388		if (antdiv->training_count == 0)
4389			rtw89_phy_antdiv_sts_reset(rtwdev);
4390
4391		antdiv->get_stats = true;
4392		state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL);
4393	} else {
4394		antdiv->get_stats = false;
4395		state_period = msecs_to_jiffies(ANTDIV_DELAY);
4396
4397		rtw89_phy_swap_hal_antenna(rtwdev);
4398		rtw89_phy_antdiv_set_ant(rtwdev);
4399	}
4400
4401	antdiv->training_count++;
4402	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work,
4403				     state_period);
4404}
4405
4406void rtw89_phy_antdiv_work(struct work_struct *work)
4407{
4408	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
4409						antdiv_work.work);
4410	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4411
4412	mutex_lock(&rtwdev->mutex);
4413
4414	if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
4415		rtw89_phy_antdiv_training_state(rtwdev);
4416	} else {
4417		rtw89_phy_antdiv_decision_state(rtwdev);
4418		rtw89_phy_antdiv_set_ant(rtwdev);
4419	}
4420
4421	mutex_unlock(&rtwdev->mutex);
4422}
4423
4424void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
4425{
4426	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4427	struct rtw89_hal *hal = &rtwdev->hal;
4428	u8 rssi, rssi_pre;
4429
4430	if (!hal->ant_diversity || hal->ant_diversity_fixed)
4431		return;
4432
4433	rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats);
4434	rssi_pre = antdiv->rssi_pre;
4435	antdiv->rssi_pre = rssi;
4436	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
4437
4438	if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH)
4439		return;
4440
4441	antdiv->training_count = 0;
4442	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0);
4443}
4444
4445static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
4446{
4447	rtw89_phy_ccx_top_setting_init(rtwdev);
4448	rtw89_phy_ifs_clm_setting_init(rtwdev);
4449}
4450
4451void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
4452{
4453	const struct rtw89_chip_info *chip = rtwdev->chip;
4454
4455	rtw89_phy_stat_init(rtwdev);
4456
4457	rtw89_chip_bb_sethw(rtwdev);
4458
4459	rtw89_phy_env_monitor_init(rtwdev);
4460	rtw89_physts_parsing_init(rtwdev);
4461	rtw89_phy_dig_init(rtwdev);
4462	rtw89_phy_cfo_init(rtwdev);
4463	rtw89_phy_ul_tb_info_init(rtwdev);
4464	rtw89_phy_antdiv_init(rtwdev);
4465	rtw89_chip_rfe_gpio(rtwdev);
4466	rtw89_phy_antdiv_set_ant(rtwdev);
4467
4468	rtw89_phy_init_rf_nctl(rtwdev);
4469	rtw89_chip_rfk_init(rtwdev);
4470	rtw89_load_txpwr_table(rtwdev, chip->byr_table);
4471	rtw89_chip_set_txpwr_ctrl(rtwdev);
4472	rtw89_chip_power_trim(rtwdev);
4473	rtw89_chip_cfg_txrx_path(rtwdev);
4474}
4475
4476void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
4477{
4478	const struct rtw89_chip_info *chip = rtwdev->chip;
4479	enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
4480	u8 bss_color;
4481
4482	if (!vif->bss_conf.he_support || !vif->cfg.assoc)
4483		return;
4484
4485	bss_color = vif->bss_conf.he_bss_color.color;
4486
4487	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_VLD0, 0x1,
4488			      phy_idx);
4489	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
4490			      bss_color, phy_idx);
4491	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
4492			      vif->cfg.aid, phy_idx);
4493}
4494
4495static void
4496_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4497{
4498	rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
4499}
4500
4501static void
4502_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4503{
4504	rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
4505}
4506
4507static void
4508_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4509{
4510	rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
4511}
4512
4513static void
4514_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4515{
4516	rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
4517}
4518
4519static void
4520_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4521{
4522	udelay(def->data);
4523}
4524
4525static void
4526(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
4527	[RTW89_RFK_F_WRF] = _rfk_write_rf,
4528	[RTW89_RFK_F_WM] = _rfk_write32_mask,
4529	[RTW89_RFK_F_WS] = _rfk_write32_set,
4530	[RTW89_RFK_F_WC] = _rfk_write32_clr,
4531	[RTW89_RFK_F_DELAY] = _rfk_delay,
4532};
4533
4534#if defined(__linux__)
4535static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
4536#elif defined(__FreeBSD__)
4537rtw89_static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
4538#endif
4539
4540void
4541rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
4542{
4543	const struct rtw89_reg5_def *p = tbl->defs;
4544	const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
4545
4546	for (; p < end; p++)
4547		_rfk_handler[p->flag](rtwdev, p);
4548}
4549EXPORT_SYMBOL(rtw89_rfk_parser);
4550
4551#define RTW89_TSSI_FAST_MODE_NUM 4
4552
4553static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
4554	{0xD934, 0xff0000},
4555	{0xD934, 0xff000000},
4556	{0xD938, 0xff},
4557	{0xD934, 0xff00},
4558};
4559
4560static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
4561	{0xD930, 0xff0000},
4562	{0xD930, 0xff000000},
4563	{0xD934, 0xff},
4564	{0xD930, 0xff00},
4565};
4566
4567static
4568void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
4569					   enum rtw89_mac_idx mac_idx,
4570					   enum rtw89_tssi_bandedge_cfg bandedge_cfg,
4571					   u32 val)
4572{
4573	const struct rtw89_reg_def *regs;
4574	u32 reg;
4575	int i;
4576
4577	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
4578		regs = rtw89_tssi_fastmode_regs_flat;
4579	else
4580		regs = rtw89_tssi_fastmode_regs_level;
4581
4582	for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
4583		reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx);
4584		rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
4585	}
4586}
4587
4588static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
4589	{0xD91C, 0xff000000},
4590	{0xD920, 0xff},
4591	{0xD920, 0xff00},
4592	{0xD920, 0xff0000},
4593	{0xD920, 0xff000000},
4594	{0xD924, 0xff},
4595	{0xD924, 0xff00},
4596	{0xD914, 0xff000000},
4597	{0xD918, 0xff},
4598	{0xD918, 0xff00},
4599	{0xD918, 0xff0000},
4600	{0xD918, 0xff000000},
4601	{0xD91C, 0xff},
4602	{0xD91C, 0xff00},
4603	{0xD91C, 0xff0000},
4604};
4605
4606static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
4607	{0xD910, 0xff},
4608	{0xD910, 0xff00},
4609	{0xD910, 0xff0000},
4610	{0xD910, 0xff000000},
4611	{0xD914, 0xff},
4612	{0xD914, 0xff00},
4613	{0xD914, 0xff0000},
4614	{0xD908, 0xff},
4615	{0xD908, 0xff00},
4616	{0xD908, 0xff0000},
4617	{0xD908, 0xff000000},
4618	{0xD90C, 0xff},
4619	{0xD90C, 0xff00},
4620	{0xD90C, 0xff0000},
4621	{0xD90C, 0xff000000},
4622};
4623
4624void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
4625					  enum rtw89_mac_idx mac_idx,
4626					  enum rtw89_tssi_bandedge_cfg bandedge_cfg)
4627{
4628	const struct rtw89_chip_info *chip = rtwdev->chip;
4629	const struct rtw89_reg_def *regs;
4630	const u32 *data;
4631	u32 reg;
4632	int i;
4633
4634	if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
4635		return;
4636
4637	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
4638		regs = rtw89_tssi_bandedge_regs_flat;
4639	else
4640		regs = rtw89_tssi_bandedge_regs_level;
4641
4642	data = chip->tssi_dbw_table->data[bandedge_cfg];
4643
4644	for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
4645		reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx);
4646		rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
4647	}
4648
4649	reg = rtw89_mac_reg_by_idx(R_AX_BANDEDGE_CFG, mac_idx);
4650	rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
4651
4652	rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
4653					      data[RTW89_TSSI_SBW20]);
4654}
4655EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
4656
4657static
4658const u8 rtw89_ch_base_table[16] = {1, 0xff,
4659				    36, 100, 132, 149, 0xff,
4660				    1, 33, 65, 97, 129, 161, 193, 225, 0xff};
4661#define RTW89_CH_BASE_IDX_2G		0
4662#define RTW89_CH_BASE_IDX_5G_FIRST	2
4663#define RTW89_CH_BASE_IDX_5G_LAST	5
4664#define RTW89_CH_BASE_IDX_6G_FIRST	7
4665#define RTW89_CH_BASE_IDX_6G_LAST	14
4666
4667#define RTW89_CH_BASE_IDX_MASK		GENMASK(7, 4)
4668#define RTW89_CH_OFFSET_MASK		GENMASK(3, 0)
4669
4670u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
4671{
4672	u8 chan_idx;
4673	u8 last, first;
4674	u8 idx;
4675
4676	switch (band) {
4677	case RTW89_BAND_2G:
4678		chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) |
4679			   FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch);
4680		return chan_idx;
4681	case RTW89_BAND_5G:
4682		first = RTW89_CH_BASE_IDX_5G_FIRST;
4683		last = RTW89_CH_BASE_IDX_5G_LAST;
4684		break;
4685	case RTW89_BAND_6G:
4686		first = RTW89_CH_BASE_IDX_6G_FIRST;
4687		last = RTW89_CH_BASE_IDX_6G_LAST;
4688		break;
4689	default:
4690		rtw89_warn(rtwdev, "Unsupported band %d\n", band);
4691		return 0;
4692	}
4693
4694	for (idx = last; idx >= first; idx--)
4695		if (central_ch >= rtw89_ch_base_table[idx])
4696			break;
4697
4698	if (idx < first) {
4699		rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
4700		return 0;
4701	}
4702
4703	chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) |
4704		   FIELD_PREP(RTW89_CH_OFFSET_MASK,
4705			      (central_ch - rtw89_ch_base_table[idx]) >> 1);
4706	return chan_idx;
4707}
4708EXPORT_SYMBOL(rtw89_encode_chan_idx);
4709
4710void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
4711			   u8 *ch, enum nl80211_band *band)
4712{
4713	u8 idx, offset;
4714
4715	idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx);
4716	offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx);
4717
4718	if (idx == RTW89_CH_BASE_IDX_2G) {
4719		*band = NL80211_BAND_2GHZ;
4720		*ch = offset;
4721		return;
4722	}
4723
4724	*band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
4725	*ch = rtw89_ch_base_table[idx] + (offset << 1);
4726}
4727EXPORT_SYMBOL(rtw89_decode_chan_idx);
4728
4729#define EDCCA_DEFAULT 249
4730void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan)
4731{
4732	u32 reg = rtwdev->chip->edcca_lvl_reg;
4733	struct rtw89_hal *hal = &rtwdev->hal;
4734	u32 val;
4735
4736	if (scan) {
4737		hal->edcca_bak = rtw89_phy_read32(rtwdev, reg);
4738		val = hal->edcca_bak;
4739		u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_EDCCA_LVL_A_MSK);
4740		u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_EDCCA_LVL_P_MSK);
4741		u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_PPDU_LVL_MSK);
4742		rtw89_phy_write32(rtwdev, reg, val);
4743	} else {
4744		rtw89_phy_write32(rtwdev, reg, hal->edcca_bak);
4745	}
4746}
4747