1// SPDX-License-Identifier: ISC
2/* Copyright (C) 2023 MediaTek Inc. */
3
4#include <linux/module.h>
5#if defined(__FreeBSD__)
6#include <linux/delay.h>
7#endif
8
9#include "mt792x.h"
10#include "mt792x_regs.h"
11
12void mt792x_mac_work(struct work_struct *work)
13{
14	struct mt792x_phy *phy;
15	struct mt76_phy *mphy;
16
17	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
18					       mac_work.work);
19	phy = mphy->priv;
20
21	mt792x_mutex_acquire(phy->dev);
22
23	mt76_update_survey(mphy);
24	if (++mphy->mac_work_count == 2) {
25		mphy->mac_work_count = 0;
26
27		mt792x_mac_update_mib_stats(phy);
28	}
29
30	mt792x_mutex_release(phy->dev);
31
32	mt76_tx_status_check(mphy->dev, false);
33	ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
34				     MT792x_WATCHDOG_TIME);
35}
36EXPORT_SYMBOL_GPL(mt792x_mac_work);
37
38void mt792x_mac_set_timeing(struct mt792x_phy *phy)
39{
40	s16 coverage_class = phy->coverage_class;
41	struct mt792x_dev *dev = phy->dev;
42	u32 val, reg_offset;
43	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
44		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
45	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
46		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
47	bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ;
48	int sifs = is_2ghz ? 10 : 16, offset;
49
50	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
51		return;
52
53	mt76_set(dev, MT_ARB_SCR(0),
54		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
55	udelay(1);
56
57	offset = 3 * coverage_class;
58	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
59		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
60
61	mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
62	mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
63	mt76_wr(dev, MT_TMAC_ICR0(0),
64		FIELD_PREP(MT_IFS_EIFS, 360) |
65		FIELD_PREP(MT_IFS_RIFS, 2) |
66		FIELD_PREP(MT_IFS_SIFS, sifs) |
67		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
68
69	if (phy->slottime < 20 || !is_2ghz)
70		val = MT792x_CFEND_RATE_DEFAULT;
71	else
72		val = MT792x_CFEND_RATE_11B;
73
74	mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
75	mt76_clear(dev, MT_ARB_SCR(0),
76		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
77}
78EXPORT_SYMBOL_GPL(mt792x_mac_set_timeing);
79
80void mt792x_mac_update_mib_stats(struct mt792x_phy *phy)
81{
82	struct mt76_mib_stats *mib = &phy->mib;
83	struct mt792x_dev *dev = phy->dev;
84	int i, aggr0 = 0, aggr1;
85	u32 val;
86
87	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
88					   MT_MIB_SDR3_FCS_ERR_MASK);
89	mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
90					    MT_MIB_ACK_FAIL_COUNT_MASK);
91	mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
92					   MT_MIB_BA_FAIL_COUNT_MASK);
93	mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
94				       MT_MIB_RTS_COUNT_MASK);
95	mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
96					       MT_MIB_RTS_FAIL_COUNT_MASK);
97
98	mib->tx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR12(0));
99	mib->tx_mpdu_attempts_cnt += mt76_rr(dev, MT_MIB_SDR14(0));
100	mib->tx_mpdu_success_cnt += mt76_rr(dev, MT_MIB_SDR15(0));
101
102	val = mt76_rr(dev, MT_MIB_SDR32(0));
103	mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR9_EBF_CNT_MASK, val);
104	mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR9_IBF_CNT_MASK, val);
105
106	val = mt76_rr(dev, MT_ETBF_TX_APP_CNT(0));
107	mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, val);
108	mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, val);
109
110	val = mt76_rr(dev, MT_ETBF_RX_FB_CNT(0));
111	mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, val);
112	mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, val);
113	mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, val);
114	mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, val);
115
116	mib->rx_mpdu_cnt += mt76_rr(dev, MT_MIB_SDR5(0));
117	mib->rx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR22(0));
118	mib->rx_ampdu_bytes_cnt += mt76_rr(dev, MT_MIB_SDR23(0));
119	mib->rx_ba_cnt += mt76_rr(dev, MT_MIB_SDR31(0));
120
121	for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
122		val = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
123		mib->tx_amsdu[i] += val;
124		mib->tx_amsdu_cnt += val;
125	}
126
127	for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
128		u32 val2;
129
130		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
131		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
132
133		phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
134		phy->mt76->aggr_stats[aggr0++] += val >> 16;
135		phy->mt76->aggr_stats[aggr1++] += val2 & 0xffff;
136		phy->mt76->aggr_stats[aggr1++] += val2 >> 16;
137	}
138}
139EXPORT_SYMBOL_GPL(mt792x_mac_update_mib_stats);
140
141struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
142				     bool unicast)
143{
144	struct mt792x_sta *sta;
145	struct mt76_wcid *wcid;
146
147	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
148		return NULL;
149
150	wcid = rcu_dereference(dev->mt76.wcid[idx]);
151	if (unicast || !wcid)
152		return wcid;
153
154	if (!wcid->sta)
155		return NULL;
156
157	sta = container_of(wcid, struct mt792x_sta, wcid);
158	if (!sta->vif)
159		return NULL;
160
161	return &sta->vif->sta.wcid;
162}
163EXPORT_SYMBOL_GPL(mt792x_rx_get_wcid);
164
165static void
166mt792x_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
167{
168	struct sk_buff *skb = priv;
169	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
170	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
171	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
172
173	if (status->signal > 0)
174		return;
175
176	if (!ether_addr_equal(vif->addr, hdr->addr1))
177		return;
178
179	ewma_rssi_add(&mvif->rssi, -status->signal);
180}
181
182void mt792x_mac_assoc_rssi(struct mt792x_dev *dev, struct sk_buff *skb)
183{
184	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
185
186	if (!ieee80211_is_assoc_resp(hdr->frame_control) &&
187	    !ieee80211_is_auth(hdr->frame_control))
188		return;
189
190	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
191		IEEE80211_IFACE_ITER_RESUME_ALL,
192		mt792x_mac_rssi_iter, skb);
193}
194EXPORT_SYMBOL_GPL(mt792x_mac_assoc_rssi);
195
196void mt792x_mac_reset_counters(struct mt792x_phy *phy)
197{
198	struct mt792x_dev *dev = phy->dev;
199	int i;
200
201	for (i = 0; i < 4; i++) {
202		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
203		mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
204	}
205
206	dev->mt76.phy.survey_time = ktime_get_boottime();
207	memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
208
209	/* reset airtime counters */
210	mt76_rr(dev, MT_MIB_SDR9(0));
211	mt76_rr(dev, MT_MIB_SDR36(0));
212	mt76_rr(dev, MT_MIB_SDR37(0));
213
214	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
215	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
216}
217EXPORT_SYMBOL_GPL(mt792x_mac_reset_counters);
218
219static u8
220mt792x_phy_get_nf(struct mt792x_phy *phy, int idx)
221{
222	return 0;
223}
224
225static void
226mt792x_phy_update_channel(struct mt76_phy *mphy, int idx)
227{
228	struct mt792x_dev *dev = container_of(mphy->dev, struct mt792x_dev, mt76);
229	struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv;
230	struct mt76_channel_state *state;
231	u64 busy_time, tx_time, rx_time, obss_time;
232	int nf;
233
234	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
235				   MT_MIB_SDR9_BUSY_MASK);
236	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
237				 MT_MIB_SDR36_TXTIME_MASK);
238	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
239				 MT_MIB_SDR37_RXTIME_MASK);
240	obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
241				   MT_MIB_OBSSTIME_MASK);
242
243	nf = mt792x_phy_get_nf(phy, idx);
244	if (!phy->noise)
245		phy->noise = nf << 4;
246	else if (nf)
247		phy->noise += nf - (phy->noise >> 4);
248
249	state = mphy->chan_state;
250	state->cc_busy += busy_time;
251	state->cc_tx += tx_time;
252	state->cc_rx += rx_time + obss_time;
253	state->cc_bss_rx += rx_time;
254	state->noise = -(phy->noise >> 4);
255}
256
257void mt792x_update_channel(struct mt76_phy *mphy)
258{
259	struct mt792x_dev *dev = container_of(mphy->dev, struct mt792x_dev, mt76);
260
261	if (mt76_connac_pm_wake(mphy, &dev->pm))
262		return;
263
264	mt792x_phy_update_channel(mphy, 0);
265	/* reset obss airtime */
266	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
267	mt76_connac_power_save_sched(mphy, &dev->pm);
268}
269EXPORT_SYMBOL_GPL(mt792x_update_channel);
270
271void mt792x_reset(struct mt76_dev *mdev)
272{
273	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
274	struct mt76_connac_pm *pm = &dev->pm;
275
276	if (!dev->hw_init_done)
277		return;
278
279	if (dev->hw_full_reset)
280		return;
281
282	if (pm->suspended)
283		return;
284
285	queue_work(dev->mt76.wq, &dev->reset_work);
286}
287EXPORT_SYMBOL_GPL(mt792x_reset);
288
289void mt792x_mac_init_band(struct mt792x_dev *dev, u8 band)
290{
291	u32 mask, set;
292
293	mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
294		       MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
295	mt76_set(dev, MT_TMAC_CTCR0(band),
296		 MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
297		 MT_TMAC_CTCR0_INS_DDLMT_EN);
298
299	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
300	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
301
302	/* enable MIB tx-rx time reporting */
303	mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_TXDUR_EN);
304	mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_RXDUR_EN);
305
306	mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
307	/* disable rx rate report by default due to hw issues */
308	mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
309
310	/* filter out non-resp frames and get instantaneous signal reporting */
311	mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM;
312	set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) |
313	      FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3);
314	mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
315}
316EXPORT_SYMBOL_GPL(mt792x_mac_init_band);
317
318void mt792x_pm_wake_work(struct work_struct *work)
319{
320	struct mt792x_dev *dev;
321	struct mt76_phy *mphy;
322
323	dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
324						pm.wake_work);
325	mphy = dev->phy.mt76;
326
327	if (!mt792x_mcu_drv_pmctrl(dev)) {
328		struct mt76_dev *mdev = &dev->mt76;
329		int i;
330
331		if (mt76_is_sdio(mdev)) {
332			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
333			mt76_worker_schedule(&mdev->sdio.txrx_worker);
334		} else {
335			local_bh_disable();
336			mt76_for_each_q_rx(mdev, i)
337				napi_schedule(&mdev->napi[i]);
338			local_bh_enable();
339			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
340			mt76_connac_tx_cleanup(mdev);
341		}
342		if (test_bit(MT76_STATE_RUNNING, &mphy->state))
343			ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
344						     MT792x_WATCHDOG_TIME);
345	}
346
347	ieee80211_wake_queues(mphy->hw);
348	wake_up(&dev->pm.wait);
349}
350EXPORT_SYMBOL_GPL(mt792x_pm_wake_work);
351
352void mt792x_pm_power_save_work(struct work_struct *work)
353{
354	struct mt792x_dev *dev;
355	unsigned long delta;
356	struct mt76_phy *mphy;
357
358	dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
359						pm.ps_work.work);
360	mphy = dev->phy.mt76;
361
362	delta = dev->pm.idle_timeout;
363	if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
364	    test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
365	    dev->fw_assert)
366		goto out;
367
368	if (mutex_is_locked(&dev->mt76.mutex))
369		/* if mt76 mutex is held we should not put the device
370		 * to sleep since we are currently accessing device
371		 * register map. We need to wait for the next power_save
372		 * trigger.
373		 */
374		goto out;
375
376	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
377		delta = dev->pm.last_activity + delta - jiffies;
378		goto out;
379	}
380
381	if (!mt792x_mcu_fw_pmctrl(dev)) {
382		cancel_delayed_work_sync(&mphy->mac_work);
383		return;
384	}
385out:
386	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
387}
388EXPORT_SYMBOL_GPL(mt792x_pm_power_save_work);
389