1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2023  Realtek Corporation
3 */
4
5#include <linux/pci.h>
6
7#include "mac.h"
8#include "pci.h"
9#include "reg.h"
10
11enum pcie_rxbd_mode {
12	PCIE_RXBD_NORM = 0,
13	PCIE_RXBD_SEP,
14	PCIE_RXBD_EXT,
15};
16
17#define PL0_TMR_SCALE_ASIC 1
18#define PL0_TMR_ANA_172US 0x800
19#define PL0_TMR_MAC_1MS 0x27100
20#define PL0_TMR_AUX_1MS 0x1E848
21
22static void rtw89_pci_aspm_set_be(struct rtw89_dev *rtwdev, bool enable)
23{
24	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
25	struct pci_dev *pdev = rtwpci->pdev;
26	u8 value = 0;
27	int ret;
28
29	ret = pci_read_config_byte(pdev, RTW89_PCIE_ASPM_CTRL, &value);
30	if (ret)
31		rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
32
33	u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
34
35	ret = pci_write_config_byte(pdev, RTW89_PCIE_ASPM_CTRL, value);
36	if (ret)
37		rtw89_warn(rtwdev, "failed to write ASPM Delay\n");
38
39	if (enable)
40		rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
41				  B_BE_ASPM_CTRL_L1);
42	else
43		rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
44				  B_BE_ASPM_CTRL_L1);
45}
46
47static void rtw89_pci_l1ss_set_be(struct rtw89_dev *rtwdev, bool enable)
48{
49	if (enable)
50		rtw89_write32_set(rtwdev, R_BE_PCIE_MIX_CFG,
51				  B_BE_L1SUB_ENABLE);
52	else
53		rtw89_write32_clr(rtwdev, R_BE_PCIE_MIX_CFG,
54				  B_BE_L1SUB_ENABLE);
55}
56
57static void rtw89_pci_clkreq_set_be(struct rtw89_dev *rtwdev, bool enable)
58{
59	rtw89_write32_mask(rtwdev, R_BE_PCIE_LAT_CTRL, B_BE_CLK_REQ_LAT_MASK,
60			   PCIE_CLKDLY_HW_V1_0);
61
62	if (enable)
63		rtw89_write32_set(rtwdev, R_BE_L1_CLK_CTRL,
64				  B_BE_CLK_PM_EN);
65	else
66		rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
67				  B_BE_CLK_PM_EN);
68}
69
70static void _patch_pcie_power_wake_be(struct rtw89_dev *rtwdev, bool power_up)
71{
72	if (power_up)
73		rtw89_write32_set(rtwdev, R_BE_HCI_OPT_CTRL, BIT_WAKE_CTRL_V1);
74	else
75		rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, BIT_WAKE_CTRL_V1);
76}
77
78static void rtw89_pci_set_io_rcy_be(struct rtw89_dev *rtwdev)
79{
80	const struct rtw89_pci_info *info = rtwdev->pci_info;
81	u32 scale = PL0_TMR_SCALE_ASIC;
82	u32 val32;
83
84	if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
85		val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
86			PL0_TMR_ANA_172US : info->io_rcy_tmr;
87		val32 /= scale;
88
89		rtw89_write32(rtwdev, R_BE_AON_WDT_TMR, val32);
90		rtw89_write32(rtwdev, R_BE_MDIO_WDT_TMR, val32);
91		rtw89_write32(rtwdev, R_BE_LA_MODE_WDT_TMR, val32);
92		rtw89_write32(rtwdev, R_BE_WDT_AR_TMR, val32);
93		rtw89_write32(rtwdev, R_BE_WDT_AW_TMR, val32);
94		rtw89_write32(rtwdev, R_BE_WDT_W_TMR, val32);
95		rtw89_write32(rtwdev, R_BE_WDT_B_TMR, val32);
96		rtw89_write32(rtwdev, R_BE_WDT_R_TMR, val32);
97
98		val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
99			PL0_TMR_MAC_1MS : info->io_rcy_tmr;
100		val32 /= scale;
101		rtw89_write32(rtwdev, R_BE_WLAN_WDT_TMR, val32);
102		rtw89_write32(rtwdev, R_BE_AXIDMA_WDT_TMR, val32);
103
104		val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
105			PL0_TMR_AUX_1MS : info->io_rcy_tmr;
106		val32 /= scale;
107		rtw89_write32(rtwdev, R_BE_LOCAL_WDT_TMR, val32);
108	} else {
109		rtw89_write32_clr(rtwdev, R_BE_WLAN_WDT, B_BE_WLAN_WDT_ENABLE);
110		rtw89_write32_clr(rtwdev, R_BE_AXIDMA_WDT, B_BE_AXIDMA_WDT_ENABLE);
111		rtw89_write32_clr(rtwdev, R_BE_AON_WDT, B_BE_AON_WDT_ENABLE);
112		rtw89_write32_clr(rtwdev, R_BE_LOCAL_WDT, B_BE_LOCAL_WDT_ENABLE);
113		rtw89_write32_clr(rtwdev, R_BE_MDIO_WDT, B_BE_MDIO_WDT_ENABLE);
114		rtw89_write32_clr(rtwdev, R_BE_LA_MODE_WDT, B_BE_LA_MODE_WDT_ENABLE);
115		rtw89_write32_clr(rtwdev, R_BE_WDT_AR, B_BE_WDT_AR_ENABLE);
116		rtw89_write32_clr(rtwdev, R_BE_WDT_AW, B_BE_WDT_AW_ENABLE);
117		rtw89_write32_clr(rtwdev, R_BE_WDT_W, B_BE_WDT_W_ENABLE);
118		rtw89_write32_clr(rtwdev, R_BE_WDT_B, B_BE_WDT_B_ENABLE);
119		rtw89_write32_clr(rtwdev, R_BE_WDT_R, B_BE_WDT_R_ENABLE);
120	}
121}
122
123static void rtw89_pci_ctrl_wpdma_pcie_be(struct rtw89_dev *rtwdev, bool en)
124{
125	if (en)
126		rtw89_write32_clr(rtwdev, R_BE_HAXI_DMA_STOP1, B_BE_STOP_WPDMA);
127	else
128		rtw89_write32_set(rtwdev, R_BE_HAXI_DMA_STOP1, B_BE_STOP_WPDMA);
129}
130
131static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
132					  enum mac_ax_pcie_func_ctrl tx_en,
133					  enum mac_ax_pcie_func_ctrl rx_en,
134					  enum mac_ax_pcie_func_ctrl io_en)
135{
136	u32 val;
137
138	val = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
139
140	if (tx_en == MAC_AX_PCIE_ENABLE)
141		val |= B_BE_TXDMA_EN;
142	else if (tx_en == MAC_AX_PCIE_DISABLE)
143		val &= ~B_BE_TXDMA_EN;
144
145	if (rx_en == MAC_AX_PCIE_ENABLE)
146		val |= B_BE_RXDMA_EN;
147	else if (rx_en == MAC_AX_PCIE_DISABLE)
148		val &= ~B_BE_RXDMA_EN;
149
150	if (io_en == MAC_AX_PCIE_ENABLE)
151		val &= ~B_BE_STOP_AXI_MST;
152	else if (io_en == MAC_AX_PCIE_DISABLE)
153		val |= B_BE_STOP_AXI_MST;
154
155	rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
156
157	if (io_en == MAC_AX_PCIE_ENABLE)
158		rtw89_write32_mask(rtwdev, R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1,
159				   B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK, 4);
160}
161
162static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
163{
164	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
165	struct rtw89_pci_rx_ring *rx_ring;
166	u32 val;
167
168	val = B_BE_CLR_CH0_IDX | B_BE_CLR_CH1_IDX | B_BE_CLR_CH2_IDX |
169	      B_BE_CLR_CH3_IDX | B_BE_CLR_CH4_IDX | B_BE_CLR_CH5_IDX |
170	      B_BE_CLR_CH6_IDX | B_BE_CLR_CH7_IDX | B_BE_CLR_CH8_IDX |
171	      B_BE_CLR_CH9_IDX | B_BE_CLR_CH10_IDX | B_BE_CLR_CH11_IDX |
172	      B_BE_CLR_CH12_IDX | B_BE_CLR_CH13_IDX | B_BE_CLR_CH14_IDX;
173	rtw89_write32(rtwdev, R_BE_TXBD_RWPTR_CLR1, val);
174
175	rtw89_write32(rtwdev, R_BE_RXBD_RWPTR_CLR1_V1,
176		      B_BE_CLR_RXQ0_IDX | B_BE_CLR_RPQ0_IDX);
177
178	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
179	rtw89_write16(rtwdev, R_BE_RXQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
180
181	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
182	rtw89_write16(rtwdev, R_BE_RPQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
183}
184
185static int rtw89_pci_poll_txdma_ch_idle_be(struct rtw89_dev *rtwdev)
186{
187	u32 val;
188
189	return read_poll_timeout(rtw89_read32, val, (val & DMA_BUSY1_CHECK_BE) == 0,
190				 10, 1000, false, rtwdev, R_BE_HAXI_DMA_BUSY1);
191}
192
193static int rtw89_pci_poll_rxdma_ch_idle_be(struct rtw89_dev *rtwdev)
194{
195	u32 check;
196	u32 val;
197
198	check = B_BE_RXQ0_BUSY_V1 | B_BE_RPQ0_BUSY_V1;
199
200	return read_poll_timeout(rtw89_read32, val, (val & check) == 0,
201				 10, 1000, false, rtwdev, R_BE_HAXI_DMA_BUSY1);
202}
203
204static int rtw89_pci_poll_dma_all_idle_be(struct rtw89_dev *rtwdev)
205{
206	int ret;
207
208	ret = rtw89_pci_poll_txdma_ch_idle_be(rtwdev);
209	if (ret) {
210		rtw89_err(rtwdev, "txdma ch busy\n");
211		return ret;
212	}
213
214	ret = rtw89_pci_poll_rxdma_ch_idle_be(rtwdev);
215	if (ret) {
216		rtw89_err(rtwdev, "rxdma ch busy\n");
217		return ret;
218	}
219
220	return 0;
221}
222
223static void rtw89_pci_mode_op_be(struct rtw89_dev *rtwdev)
224{
225	const struct rtw89_pci_info *info = rtwdev->pci_info;
226	u32 val32_init1, val32_rxapp, val32_exp;
227
228	val32_init1 = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
229	val32_rxapp = rtw89_read32(rtwdev, R_BE_RX_APPEND_MODE);
230	val32_exp = rtw89_read32(rtwdev, R_BE_HAXI_EXP_CTRL_V1);
231
232	if (info->rxbd_mode == MAC_AX_RXBD_PKT) {
233		val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_NORM,
234					       B_BE_RXQ_RXBD_MODE_MASK);
235	} else if (info->rxbd_mode == MAC_AX_RXBD_SEP) {
236		val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_SEP,
237					       B_BE_RXQ_RXBD_MODE_MASK);
238		val32_rxapp = u32_replace_bits(val32_rxapp, 0,
239					       B_BE_APPEND_LEN_MASK);
240	}
241
242	val32_init1 = u32_replace_bits(val32_init1, info->tx_burst,
243				       B_BE_MAX_TXDMA_MASK);
244	val32_init1 = u32_replace_bits(val32_init1, info->rx_burst,
245				       B_BE_MAX_RXDMA_MASK);
246	val32_exp = u32_replace_bits(val32_exp, info->multi_tag_num,
247				     B_BE_MAX_TAG_NUM_MASK);
248	val32_init1 = u32_replace_bits(val32_init1, info->wd_dma_idle_intvl,
249				       B_BE_CFG_WD_PERIOD_IDLE_MASK);
250	val32_init1 = u32_replace_bits(val32_init1, info->wd_dma_act_intvl,
251				       B_BE_CFG_WD_PERIOD_ACTIVE_MASK);
252
253	rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val32_init1);
254	rtw89_write32(rtwdev, R_BE_RX_APPEND_MODE, val32_rxapp);
255	rtw89_write32(rtwdev, R_BE_HAXI_EXP_CTRL_V1, val32_exp);
256}
257
258static int rtw89_pci_rst_bdram_be(struct rtw89_dev *rtwdev)
259{
260	u32 val;
261
262	rtw89_write32_set(rtwdev, R_BE_HAXI_INIT_CFG1, B_BE_SET_BDRAM_BOUND);
263
264	return read_poll_timeout(rtw89_read32, val, !(val & B_BE_SET_BDRAM_BOUND),
265				 50, 500000, false, rtwdev, R_BE_HAXI_INIT_CFG1);
266}
267
268static void rtw89_pci_debounce_be(struct rtw89_dev *rtwdev)
269{
270	u32 val32;
271
272	val32 = rtw89_read32(rtwdev, R_BE_SYS_PAGE_CLK_GATED);
273	val32 = u32_replace_bits(val32, 0, B_BE_PCIE_PRST_DEBUNC_PERIOD_MASK);
274	val32 |= B_BE_SYM_PRST_DEBUNC_SEL;
275	rtw89_write32(rtwdev, R_BE_SYS_PAGE_CLK_GATED, val32);
276}
277
278static void rtw89_pci_ldo_low_pwr_be(struct rtw89_dev *rtwdev)
279{
280	rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_PSUS_OFF_CAPC_EN);
281	rtw89_write32_set(rtwdev, R_BE_SYS_PAGE_CLK_GATED,
282			  B_BE_SOP_OFFPOOBS_PC | B_BE_CPHY_AUXCLK_OP |
283			  B_BE_CPHY_POWER_READY_CHK);
284	rtw89_write32_clr(rtwdev, R_BE_SYS_SDIO_CTRL, B_BE_PCIE_FORCE_IBX_EN |
285						      B_BE_PCIE_DIS_L2_RTK_PERST |
286						      B_BE_PCIE_DIS_L2__CTRL_LDO_HCI);
287	rtw89_write32_clr(rtwdev, R_BE_L1_2_CTRL_HCILDO, B_BE_PCIE_DIS_L1_2_CTRL_HCILDO);
288}
289
290static void rtw89_pci_pcie_setting_be(struct rtw89_dev *rtwdev)
291{
292	const struct rtw89_chip_info *chip = rtwdev->chip;
293	struct rtw89_hal *hal = &rtwdev->hal;
294
295	rtw89_write32_set(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_EN_AUX_CLK);
296	rtw89_write32_clr(rtwdev, R_BE_PCIE_PS_CTRL, B_BE_CMAC_EXIT_L1_EN);
297
298	if (chip->chip_id == RTL8922A && hal->cv == CHIP_CAV)
299		return;
300
301	rtw89_write32_set(rtwdev, R_BE_EFUSE_CTRL_2_V1, B_BE_R_SYM_AUTOLOAD_WITH_PMC_SEL);
302	rtw89_write32_set(rtwdev, R_BE_PCIE_LAT_CTRL, B_BE_SYM_AUX_CLK_SEL);
303}
304
305static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
306{
307	u32 val32;
308
309	rtw89_write32(rtwdev, R_BE_PL1_DBG_INFO, 0x0);
310	rtw89_write32_set(rtwdev, R_BE_FWS1IMR, B_BE_PCIE_SER_TIMEOUT_INDIC_EN);
311	rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
312	rtw89_write32_mask(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_TIMER_UNIT_MASK, 1);
313
314	val32 = rtw89_read32(rtwdev, R_BE_REG_PL1_MASK);
315	val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
316		 B_BE_SER_LTSSM_IMR | B_BE_SER_PM_CLK_MASK | B_BE_SER_PCLKREQ_ACK_MASK;
317	rtw89_write32(rtwdev, R_BE_REG_PL1_MASK, val32);
318}
319
320static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool enable)
321{
322	u32 mask_all;
323	u32 val;
324
325	mask_all = B_BE_STOP_CH0 | B_BE_STOP_CH1 | B_BE_STOP_CH2 |
326		   B_BE_STOP_CH3 | B_BE_STOP_CH4 | B_BE_STOP_CH5 |
327		   B_BE_STOP_CH6 | B_BE_STOP_CH7 | B_BE_STOP_CH8 |
328		   B_BE_STOP_CH9 | B_BE_STOP_CH10 | B_BE_STOP_CH11;
329
330	val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
331	val |= B_BE_STOP_CH13 | B_BE_STOP_CH14;
332
333	if (enable)
334		val &= ~mask_all;
335	else
336		val |= mask_all;
337
338	rtw89_write32(rtwdev, R_BE_HAXI_DMA_STOP1, val);
339}
340
341static void rtw89_pci_ctrl_txdma_fw_ch_be(struct rtw89_dev *rtwdev, bool enable)
342{
343	u32 val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
344
345	if (enable)
346		val &= ~B_BE_STOP_CH12;
347	else
348		val |= B_BE_STOP_CH12;
349
350	rtw89_write32(rtwdev, R_BE_HAXI_DMA_STOP1, val);
351}
352
353static int rtw89_pci_ops_mac_pre_init_be(struct rtw89_dev *rtwdev)
354{
355	int ret;
356
357	rtw89_pci_set_io_rcy_be(rtwdev);
358	_patch_pcie_power_wake_be(rtwdev, true);
359	rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, false);
360	rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_DISABLE,
361				      MAC_AX_PCIE_DISABLE, MAC_AX_PCIE_DISABLE);
362	rtw89_pci_clr_idx_all_be(rtwdev);
363
364	ret = rtw89_pci_poll_dma_all_idle_be(rtwdev);
365	if (ret) {
366		rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
367		return ret;
368	}
369
370	rtw89_pci_mode_op_be(rtwdev);
371	rtw89_pci_ops_reset(rtwdev);
372
373	ret = rtw89_pci_rst_bdram_be(rtwdev);
374	if (ret) {
375		rtw89_err(rtwdev, "[ERR]pcie rst bdram\n");
376		return ret;
377	}
378
379	rtw89_pci_debounce_be(rtwdev);
380	rtw89_pci_ldo_low_pwr_be(rtwdev);
381	rtw89_pci_pcie_setting_be(rtwdev);
382	rtw89_pci_ser_setting_be(rtwdev);
383
384	rtw89_pci_ctrl_txdma_ch_be(rtwdev, false);
385	rtw89_pci_ctrl_txdma_fw_ch_be(rtwdev, true);
386	rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_ENABLE,
387				      MAC_AX_PCIE_ENABLE, MAC_AX_PCIE_ENABLE);
388
389	return 0;
390}
391
392static int rtw89_pci_ops_mac_pre_deinit_be(struct rtw89_dev *rtwdev)
393{
394	u32 val;
395
396	_patch_pcie_power_wake_be(rtwdev, false);
397
398	val = rtw89_read32_mask(rtwdev, R_BE_IC_PWR_STATE, B_BE_WLMAC_PWR_STE_MASK);
399	if (val == 0)
400		return 0;
401
402	rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_DISABLE,
403				      MAC_AX_PCIE_DISABLE, MAC_AX_PCIE_DISABLE);
404	rtw89_pci_clr_idx_all_be(rtwdev);
405
406	return 0;
407}
408
409int rtw89_pci_ltr_set_v2(struct rtw89_dev *rtwdev, bool en)
410{
411	u32 ctrl0, cfg0, cfg1, dec_ctrl, idle_ltcy, act_ltcy, dis_ltcy;
412
413	ctrl0 = rtw89_read32(rtwdev, R_BE_LTR_CTRL_0);
414	if (rtw89_pci_ltr_is_err_reg_val(ctrl0))
415		return -EINVAL;
416	cfg0 = rtw89_read32(rtwdev, R_BE_LTR_CFG_0);
417	if (rtw89_pci_ltr_is_err_reg_val(cfg0))
418		return -EINVAL;
419	cfg1 = rtw89_read32(rtwdev, R_BE_LTR_CFG_1);
420	if (rtw89_pci_ltr_is_err_reg_val(cfg1))
421		return -EINVAL;
422	dec_ctrl = rtw89_read32(rtwdev, R_BE_LTR_DECISION_CTRL_V1);
423	if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
424		return -EINVAL;
425	idle_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX3_V1);
426	if (rtw89_pci_ltr_is_err_reg_val(idle_ltcy))
427		return -EINVAL;
428	act_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX1_V1);
429	if (rtw89_pci_ltr_is_err_reg_val(act_ltcy))
430		return -EINVAL;
431	dis_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX0_V1);
432	if (rtw89_pci_ltr_is_err_reg_val(dis_ltcy))
433		return -EINVAL;
434
435	if (en) {
436		dec_ctrl |= B_BE_ENABLE_LTR_CTL_DECISION | B_BE_LTR_HW_DEC_EN_V1;
437		ctrl0 |= B_BE_LTR_HW_EN;
438	} else {
439		dec_ctrl &= ~(B_BE_ENABLE_LTR_CTL_DECISION | B_BE_LTR_HW_DEC_EN_V1 |
440			      B_BE_LTR_EN_PORT_V1_MASK);
441		ctrl0 &= ~B_BE_LTR_HW_EN;
442	}
443
444	dec_ctrl = u32_replace_bits(dec_ctrl, PCI_LTR_SPC_500US,
445				    B_BE_LTR_SPACE_IDX_MASK);
446	cfg0 = u32_replace_bits(cfg0, PCI_LTR_IDLE_TIMER_3_2MS,
447				B_BE_LTR_IDLE_TIMER_IDX_MASK);
448	cfg1 = u32_replace_bits(cfg1, 0xC0, B_BE_LTR_CMAC0_RX_USE_PG_TH_MASK);
449	cfg1 = u32_replace_bits(cfg1, 0xC0, B_BE_LTR_CMAC1_RX_USE_PG_TH_MASK);
450	cfg0 = u32_replace_bits(cfg0, 1, B_BE_LTR_IDX_ACTIVE_MASK);
451	cfg0 = u32_replace_bits(cfg0, 3, B_BE_LTR_IDX_IDLE_MASK);
452	dec_ctrl = u32_replace_bits(dec_ctrl, 0, B_BE_LTR_IDX_DISABLE_V1_MASK);
453
454	rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX3_V1, 0x90039003);
455	rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX1_V1, 0x880b880b);
456	rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX0_V1, 0);
457	rtw89_write32(rtwdev, R_BE_LTR_DECISION_CTRL_V1, dec_ctrl);
458	rtw89_write32(rtwdev, R_BE_LTR_CFG_0, cfg0);
459	rtw89_write32(rtwdev, R_BE_LTR_CFG_1, cfg1);
460	rtw89_write32(rtwdev, R_BE_LTR_CTRL_0, ctrl0);
461
462	return 0;
463}
464EXPORT_SYMBOL(rtw89_pci_ltr_set_v2);
465
466static void rtw89_pci_configure_mit_be(struct rtw89_dev *rtwdev)
467{
468	u32 cnt;
469	u32 val;
470
471	rtw89_write32_mask(rtwdev, R_BE_PCIE_MIT0_TMR,
472			   B_BE_PCIE_MIT0_RX_TMR_MASK, BE_MIT0_TMR_UNIT_1MS);
473
474	val = rtw89_read32(rtwdev, R_BE_PCIE_MIT0_CNT);
475	cnt = min_t(u32, U8_MAX, RTW89_PCI_RXBD_NUM_MAX / 2);
476	val = u32_replace_bits(val, cnt, B_BE_PCIE_RX_MIT0_CNT_MASK);
477	val = u32_replace_bits(val, 2, B_BE_PCIE_RX_MIT0_TMR_CNT_MASK);
478	rtw89_write32(rtwdev, R_BE_PCIE_MIT0_CNT, val);
479}
480
481static int rtw89_pci_ops_mac_post_init_be(struct rtw89_dev *rtwdev)
482{
483	const struct rtw89_pci_info *info = rtwdev->pci_info;
484	int ret;
485
486	ret = info->ltr_set(rtwdev, true);
487	if (ret) {
488		rtw89_err(rtwdev, "pci ltr set fail\n");
489		return ret;
490	}
491
492	rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_IGNORE,
493				      MAC_AX_PCIE_IGNORE, MAC_AX_PCIE_ENABLE);
494	rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, true);
495	rtw89_pci_ctrl_txdma_ch_be(rtwdev, true);
496	rtw89_pci_ctrl_txdma_fw_ch_be(rtwdev, true);
497	rtw89_pci_configure_mit_be(rtwdev);
498
499	return 0;
500}
501
502static int rtw89_pci_poll_io_idle_be(struct rtw89_dev *rtwdev)
503{
504	u32 sts;
505	int ret;
506
507	ret = read_poll_timeout_atomic(rtw89_read32, sts,
508				       !(sts & B_BE_HAXI_MST_BUSY),
509				       10, 1000, false, rtwdev,
510				       R_BE_HAXI_DMA_BUSY1);
511	if (ret) {
512		rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", sts);
513		return ret;
514	}
515
516	return 0;
517}
518
519static int rtw89_pci_lv1rst_stop_dma_be(struct rtw89_dev *rtwdev)
520{
521	int ret;
522
523	rtw89_pci_ctrl_dma_all(rtwdev, false);
524	ret = rtw89_pci_poll_io_idle_be(rtwdev);
525	if (!ret)
526		return 0;
527
528	rtw89_debug(rtwdev, RTW89_DBG_HCI,
529		    "[PCIe] poll_io_idle fail; reset hci dma trx\n");
530
531	rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
532	rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
533
534	return rtw89_pci_poll_io_idle_be(rtwdev);
535}
536
537static int rtw89_pci_lv1rst_start_dma_be(struct rtw89_dev *rtwdev)
538{
539	int ret;
540
541	rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
542	rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
543	rtw89_pci_clr_idx_all(rtwdev);
544
545	ret = rtw89_pci_rst_bdram_be(rtwdev);
546	if (ret)
547		return ret;
548
549	rtw89_pci_ctrl_dma_all(rtwdev, true);
550	return 0;
551}
552
553static int __maybe_unused rtw89_pci_suspend_be(struct device *dev)
554{
555	struct ieee80211_hw *hw = dev_get_drvdata(dev);
556	struct rtw89_dev *rtwdev = hw->priv;
557
558	rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
559	rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_R_DIS_PRST);
560	rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
561	rtw89_write32_set(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_FRZ_REG_RST);
562	rtw89_write32_clr(rtwdev, R_BE_REG_PL1_MASK, B_BE_SER_PM_MASTER_IMR);
563	return 0;
564}
565
566static int __maybe_unused rtw89_pci_resume_be(struct device *dev)
567{
568	struct ieee80211_hw *hw = dev_get_drvdata(dev);
569	struct rtw89_dev *rtwdev = hw->priv;
570	u32 polling;
571	int ret;
572
573	rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
574	rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_R_DIS_PRST);
575	rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
576	rtw89_write32_clr(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_FRZ_REG_RST);
577	rtw89_write32_clr(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
578
579	ret = read_poll_timeout_atomic(rtw89_read32, polling, !polling, 1, 1000,
580				       false, rtwdev, R_BE_REG_PL1_ISR);
581	if (ret)
582		rtw89_warn(rtwdev, "[ERR] PCIE SER clear polling fail\n");
583
584	rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
585	rtw89_write32_set(rtwdev, R_BE_REG_PL1_MASK, B_BE_SER_PM_MASTER_IMR);
586
587	return 0;
588}
589
590SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
591EXPORT_SYMBOL(rtw89_pm_ops_be);
592
593const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
594	.isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT,
595	.isr_halt_c2h = B_BE_HALT_C2H_INT,
596	.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
597	.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
598	.isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
599
600	.mac_pre_init = rtw89_pci_ops_mac_pre_init_be,
601	.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_be,
602	.mac_post_init = rtw89_pci_ops_mac_post_init_be,
603
604	.clr_idx_all = rtw89_pci_clr_idx_all_be,
605	.rst_bdram = rtw89_pci_rst_bdram_be,
606
607	.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_be,
608	.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_be,
609
610	.ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_be,
611	.ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_be,
612	.poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_be,
613
614	.aspm_set = rtw89_pci_aspm_set_be,
615	.clkreq_set = rtw89_pci_clkreq_set_be,
616	.l1ss_set = rtw89_pci_l1ss_set_be,
617};
618EXPORT_SYMBOL(rtw89_pci_gen_be);
619