1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PCIe host controller driver for the following SoCs
4 * Tegra194
5 * Tegra234
6 *
7 * Copyright (C) 2019-2022 NVIDIA Corporation.
8 *
9 * Author: Vidya Sagar <vidyas@nvidia.com>
10 */
11
12#include <linux/bitfield.h>
13#include <linux/clk.h>
14#include <linux/debugfs.h>
15#include <linux/delay.h>
16#include <linux/gpio.h>
17#include <linux/gpio/consumer.h>
18#include <linux/interconnect.h>
19#include <linux/interrupt.h>
20#include <linux/iopoll.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/of_pci.h>
26#include <linux/pci.h>
27#include <linux/phy/phy.h>
28#include <linux/pinctrl/consumer.h>
29#include <linux/platform_device.h>
30#include <linux/pm_runtime.h>
31#include <linux/random.h>
32#include <linux/reset.h>
33#include <linux/resource.h>
34#include <linux/types.h>
35#include "pcie-designware.h"
36#include <soc/tegra/bpmp.h>
37#include <soc/tegra/bpmp-abi.h>
38#include "../../pci.h"
39
40#define TEGRA194_DWC_IP_VER			0x490A
41#define TEGRA234_DWC_IP_VER			0x562A
42
43#define APPL_PINMUX				0x0
44#define APPL_PINMUX_PEX_RST			BIT(0)
45#define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2)
46#define APPL_PINMUX_CLKREQ_OVERRIDE		BIT(3)
47#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN	BIT(4)
48#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE	BIT(5)
49
50#define APPL_CTRL				0x4
51#define APPL_CTRL_SYS_PRE_DET_STATE		BIT(6)
52#define APPL_CTRL_LTSSM_EN			BIT(7)
53#define APPL_CTRL_HW_HOT_RST_EN			BIT(20)
54#define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)
55#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22
56#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1
57#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN	0x2
58
59#define APPL_INTR_EN_L0_0			0x8
60#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0)
61#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN	BIT(4)
62#define APPL_INTR_EN_L0_0_INT_INT_EN		BIT(8)
63#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN	BIT(15)
64#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN	BIT(19)
65#define APPL_INTR_EN_L0_0_SYS_INTR_EN		BIT(30)
66#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN	BIT(31)
67
68#define APPL_INTR_STATUS_L0			0xC
69#define APPL_INTR_STATUS_L0_LINK_STATE_INT	BIT(0)
70#define APPL_INTR_STATUS_L0_INT_INT		BIT(8)
71#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT	BIT(15)
72#define APPL_INTR_STATUS_L0_PEX_RST_INT		BIT(16)
73#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT	BIT(18)
74
75#define APPL_INTR_EN_L1_0_0				0x1C
76#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN	BIT(1)
77#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN		BIT(3)
78#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN	BIT(30)
79
80#define APPL_INTR_STATUS_L1_0_0				0x20
81#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED	BIT(1)
82#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED	BIT(3)
83#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE		BIT(30)
84
85#define APPL_INTR_STATUS_L1_1			0x2C
86#define APPL_INTR_STATUS_L1_2			0x30
87#define APPL_INTR_STATUS_L1_3			0x34
88#define APPL_INTR_STATUS_L1_6			0x3C
89#define APPL_INTR_STATUS_L1_7			0x40
90#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED	BIT(1)
91
92#define APPL_INTR_EN_L1_8_0			0x44
93#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN		BIT(2)
94#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN	BIT(3)
95#define APPL_INTR_EN_L1_8_INTX_EN		BIT(11)
96#define APPL_INTR_EN_L1_8_AER_INT_EN		BIT(15)
97
98#define APPL_INTR_STATUS_L1_8_0			0x4C
99#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK	GENMASK(11, 6)
100#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS	BIT(2)
101#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS	BIT(3)
102
103#define APPL_INTR_STATUS_L1_9			0x54
104#define APPL_INTR_STATUS_L1_10			0x58
105#define APPL_INTR_STATUS_L1_11			0x64
106#define APPL_INTR_STATUS_L1_13			0x74
107#define APPL_INTR_STATUS_L1_14			0x78
108#define APPL_INTR_STATUS_L1_15			0x7C
109#define APPL_INTR_STATUS_L1_17			0x88
110
111#define APPL_INTR_EN_L1_18				0x90
112#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT		BIT(2)
113#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR		BIT(1)
114#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
115
116#define APPL_INTR_STATUS_L1_18				0x94
117#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT	BIT(2)
118#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR	BIT(1)
119#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
120
121#define APPL_MSI_CTRL_1				0xAC
122
123#define APPL_MSI_CTRL_2				0xB0
124
125#define APPL_LEGACY_INTX			0xB8
126
127#define APPL_LTR_MSG_1				0xC4
128#define LTR_MSG_REQ				BIT(15)
129#define LTR_NOSNOOP_MSG_REQ			BIT(31)
130
131#define APPL_LTR_MSG_2				0xC8
132#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE	BIT(3)
133
134#define APPL_LINK_STATUS			0xCC
135#define APPL_LINK_STATUS_RDLH_LINK_UP		BIT(0)
136
137#define APPL_DEBUG				0xD0
138#define APPL_DEBUG_PM_LINKST_IN_L2_LAT		BIT(21)
139#define APPL_DEBUG_PM_LINKST_IN_L0		0x11
140#define APPL_DEBUG_LTSSM_STATE_MASK		GENMASK(8, 3)
141#define APPL_DEBUG_LTSSM_STATE_SHIFT		3
142#define LTSSM_STATE_PRE_DETECT			5
143
144#define APPL_RADM_STATUS			0xE4
145#define APPL_PM_XMT_TURNOFF_STATE		BIT(0)
146
147#define APPL_DM_TYPE				0x100
148#define APPL_DM_TYPE_MASK			GENMASK(3, 0)
149#define APPL_DM_TYPE_RP				0x4
150#define APPL_DM_TYPE_EP				0x0
151
152#define APPL_CFG_BASE_ADDR			0x104
153#define APPL_CFG_BASE_ADDR_MASK			GENMASK(31, 12)
154
155#define APPL_CFG_IATU_DMA_BASE_ADDR		0x108
156#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK	GENMASK(31, 18)
157
158#define APPL_CFG_MISC				0x110
159#define APPL_CFG_MISC_SLV_EP_MODE		BIT(14)
160#define APPL_CFG_MISC_ARCACHE_MASK		GENMASK(13, 10)
161#define APPL_CFG_MISC_ARCACHE_SHIFT		10
162#define APPL_CFG_MISC_ARCACHE_VAL		3
163
164#define APPL_CFG_SLCG_OVERRIDE			0x114
165#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER	BIT(0)
166
167#define APPL_CAR_RESET_OVRD				0x12C
168#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N	BIT(0)
169
170#define IO_BASE_IO_DECODE				BIT(0)
171#define IO_BASE_IO_DECODE_BIT8				BIT(8)
172
173#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE		BIT(0)
174#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE	BIT(16)
175
176#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718
177#define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19)
178
179#define N_FTS_VAL					52
180#define FTS_VAL						52
181
182#define GEN3_EQ_CONTROL_OFF			0x8a8
183#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT	8
184#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK	GENMASK(23, 8)
185#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK	GENMASK(3, 0)
186
187#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0
188#define AMBA_ERROR_RESPONSE_CRS_SHIFT		3
189#define AMBA_ERROR_RESPONSE_CRS_MASK		GENMASK(1, 0)
190#define AMBA_ERROR_RESPONSE_CRS_OKAY		0
191#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF	1
192#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001	2
193
194#define MSIX_ADDR_MATCH_LOW_OFF			0x940
195#define MSIX_ADDR_MATCH_LOW_OFF_EN		BIT(0)
196#define MSIX_ADDR_MATCH_LOW_OFF_MASK		GENMASK(31, 2)
197
198#define MSIX_ADDR_MATCH_HIGH_OFF		0x944
199#define MSIX_ADDR_MATCH_HIGH_OFF_MASK		GENMASK(31, 0)
200
201#define PORT_LOGIC_MSIX_DOORBELL			0x948
202
203#define CAP_SPCIE_CAP_OFF			0x154
204#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK	GENMASK(3, 0)
205#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK	GENMASK(11, 8)
206#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT	8
207
208#define PME_ACK_TIMEOUT 10000
209
210#define LTSSM_TIMEOUT 50000	/* 50ms */
211
212#define GEN3_GEN4_EQ_PRESET_INIT	5
213
214#define GEN1_CORE_CLK_FREQ	62500000
215#define GEN2_CORE_CLK_FREQ	125000000
216#define GEN3_CORE_CLK_FREQ	250000000
217#define GEN4_CORE_CLK_FREQ	500000000
218
219#define LTR_MSG_TIMEOUT		(100 * 1000)
220
221#define PERST_DEBOUNCE_TIME	(5 * 1000)
222
223#define EP_STATE_DISABLED	0
224#define EP_STATE_ENABLED	1
225
226static const unsigned int pcie_gen_freq[] = {
227	GEN1_CORE_CLK_FREQ,	/* PCI_EXP_LNKSTA_CLS == 0; undefined */
228	GEN1_CORE_CLK_FREQ,
229	GEN2_CORE_CLK_FREQ,
230	GEN3_CORE_CLK_FREQ,
231	GEN4_CORE_CLK_FREQ
232};
233
234struct tegra_pcie_dw_of_data {
235	u32 version;
236	enum dw_pcie_device_mode mode;
237	bool has_msix_doorbell_access_fix;
238	bool has_sbr_reset_fix;
239	bool has_l1ss_exit_fix;
240	bool has_ltr_req_fix;
241	u32 cdm_chk_int_en_bit;
242	u32 gen4_preset_vec;
243	u8 n_fts[2];
244};
245
246struct tegra_pcie_dw {
247	struct device *dev;
248	struct resource *appl_res;
249	struct resource *dbi_res;
250	struct resource *atu_dma_res;
251	void __iomem *appl_base;
252	struct clk *core_clk;
253	struct reset_control *core_apb_rst;
254	struct reset_control *core_rst;
255	struct dw_pcie pci;
256	struct tegra_bpmp *bpmp;
257
258	struct tegra_pcie_dw_of_data *of_data;
259
260	bool supports_clkreq;
261	bool enable_cdm_check;
262	bool enable_srns;
263	bool link_state;
264	bool update_fc_fixup;
265	bool enable_ext_refclk;
266	u8 init_link_width;
267	u32 msi_ctrl_int;
268	u32 num_lanes;
269	u32 cid;
270	u32 cfg_link_cap_l1sub;
271	u32 ras_des_cap;
272	u32 pcie_cap_base;
273	u32 aspm_cmrt;
274	u32 aspm_pwr_on_t;
275	u32 aspm_l0s_enter_lat;
276
277	struct regulator *pex_ctl_supply;
278	struct regulator *slot_ctl_3v3;
279	struct regulator *slot_ctl_12v;
280
281	unsigned int phy_count;
282	struct phy **phys;
283
284	struct dentry *debugfs;
285
286	/* Endpoint mode specific */
287	struct gpio_desc *pex_rst_gpiod;
288	struct gpio_desc *pex_refclk_sel_gpiod;
289	unsigned int pex_rst_irq;
290	int ep_state;
291	long link_status;
292	struct icc_path *icc_path;
293};
294
295static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
296{
297	return container_of(pci, struct tegra_pcie_dw, pci);
298}
299
300static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
301			       const u32 reg)
302{
303	writel_relaxed(value, pcie->appl_base + reg);
304}
305
306static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
307{
308	return readl_relaxed(pcie->appl_base + reg);
309}
310
311struct tegra_pcie_soc {
312	enum dw_pcie_device_mode mode;
313};
314
315static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
316{
317	struct dw_pcie *pci = &pcie->pci;
318	u32 val, speed, width;
319
320	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
321
322	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
323	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
324
325	val = width * PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]);
326
327	if (icc_set_bw(pcie->icc_path, Mbps_to_icc(val), 0))
328		dev_err(pcie->dev, "can't set bw[%u]\n", val);
329
330	if (speed >= ARRAY_SIZE(pcie_gen_freq))
331		speed = 0;
332
333	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
334}
335
336static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
337{
338	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
339	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
340	u32 current_link_width;
341	u16 val;
342
343	/*
344	 * NOTE:- Since this scenario is uncommon and link as such is not
345	 * stable anyway, not waiting to confirm if link is really
346	 * transitioning to Gen-2 speed
347	 */
348	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
349	if (val & PCI_EXP_LNKSTA_LBMS) {
350		current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
351		if (pcie->init_link_width > current_link_width) {
352			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
353			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
354						PCI_EXP_LNKCTL2);
355			val &= ~PCI_EXP_LNKCTL2_TLS;
356			val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
357			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
358					   PCI_EXP_LNKCTL2, val);
359
360			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
361						PCI_EXP_LNKCTL);
362			val |= PCI_EXP_LNKCTL_RL;
363			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
364					   PCI_EXP_LNKCTL, val);
365		}
366	}
367}
368
369static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
370{
371	struct tegra_pcie_dw *pcie = arg;
372	struct dw_pcie *pci = &pcie->pci;
373	struct dw_pcie_rp *pp = &pci->pp;
374	u32 val, status_l0, status_l1;
375	u16 val_w;
376
377	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
378	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
379		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
380		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
381		if (!pcie->of_data->has_sbr_reset_fix &&
382		    status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
383			/* SBR & Surprise Link Down WAR */
384			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
385			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
386			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
387			udelay(1);
388			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
389			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
390			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
391
392			val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
393			val |= PORT_LOGIC_SPEED_CHANGE;
394			dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
395		}
396	}
397
398	if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
399		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
400		if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
401			appl_writel(pcie,
402				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
403				    APPL_INTR_STATUS_L1_8_0);
404			apply_bad_link_workaround(pp);
405		}
406		if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
407			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
408						  PCI_EXP_LNKSTA);
409			val_w |= PCI_EXP_LNKSTA_LBMS;
410			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
411					   PCI_EXP_LNKSTA, val_w);
412
413			appl_writel(pcie,
414				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
415				    APPL_INTR_STATUS_L1_8_0);
416
417			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
418						  PCI_EXP_LNKSTA);
419			dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
420				PCI_EXP_LNKSTA_CLS);
421		}
422	}
423
424	if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
425		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
426		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
427		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
428			dev_info(pci->dev, "CDM check complete\n");
429			val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
430		}
431		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
432			dev_err(pci->dev, "CDM comparison mismatch\n");
433			val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
434		}
435		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
436			dev_err(pci->dev, "CDM Logic error\n");
437			val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
438		}
439		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
440		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
441		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
442	}
443
444	return IRQ_HANDLED;
445}
446
447static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
448{
449	u32 val;
450
451	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
452	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
453	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
454	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
455	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
456	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
457	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
458	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
459	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
460	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
461	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
462	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
463	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
464	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
465	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
466	appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
467
468	val = appl_readl(pcie, APPL_CTRL);
469	val |= APPL_CTRL_LTSSM_EN;
470	appl_writel(pcie, val, APPL_CTRL);
471}
472
473static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
474{
475	struct tegra_pcie_dw *pcie = arg;
476	struct dw_pcie_ep *ep = &pcie->pci.ep;
477	struct dw_pcie *pci = &pcie->pci;
478	u32 val;
479
480	if (test_and_clear_bit(0, &pcie->link_status))
481		dw_pcie_ep_linkup(ep);
482
483	tegra_pcie_icc_set(pcie);
484
485	if (pcie->of_data->has_ltr_req_fix)
486		return IRQ_HANDLED;
487
488	/* If EP doesn't advertise L1SS, just return */
489	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
490	if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
491		return IRQ_HANDLED;
492
493	/* Check if BME is set to '1' */
494	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
495	if (val & PCI_COMMAND_MASTER) {
496		ktime_t timeout;
497
498		/* 110us for both snoop and no-snoop */
499		val = FIELD_PREP(PCI_LTR_VALUE_MASK, 110) |
500		      FIELD_PREP(PCI_LTR_SCALE_MASK, 2) |
501		      LTR_MSG_REQ |
502		      FIELD_PREP(PCI_LTR_NOSNOOP_VALUE, 110) |
503		      FIELD_PREP(PCI_LTR_NOSNOOP_SCALE, 2) |
504		      LTR_NOSNOOP_MSG_REQ;
505		appl_writel(pcie, val, APPL_LTR_MSG_1);
506
507		/* Send LTR upstream */
508		val = appl_readl(pcie, APPL_LTR_MSG_2);
509		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
510		appl_writel(pcie, val, APPL_LTR_MSG_2);
511
512		timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
513		for (;;) {
514			val = appl_readl(pcie, APPL_LTR_MSG_2);
515			if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
516				break;
517			if (ktime_after(ktime_get(), timeout))
518				break;
519			usleep_range(1000, 1100);
520		}
521		if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
522			dev_err(pcie->dev, "Failed to send LTR message\n");
523	}
524
525	return IRQ_HANDLED;
526}
527
528static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
529{
530	struct tegra_pcie_dw *pcie = arg;
531	int spurious = 1;
532	u32 status_l0, status_l1, link_status;
533
534	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
535	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
536		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
537		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
538
539		if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
540			pex_ep_event_hot_rst_done(pcie);
541
542		if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
543			link_status = appl_readl(pcie, APPL_LINK_STATUS);
544			if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
545				dev_dbg(pcie->dev, "Link is up with Host\n");
546				set_bit(0, &pcie->link_status);
547				return IRQ_WAKE_THREAD;
548			}
549		}
550
551		spurious = 0;
552	}
553
554	if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
555		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
556		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
557
558		if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
559			return IRQ_WAKE_THREAD;
560
561		spurious = 0;
562	}
563
564	if (spurious) {
565		dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
566			 status_l0);
567		appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
568	}
569
570	return IRQ_HANDLED;
571}
572
573static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
574				     int size, u32 *val)
575{
576	struct dw_pcie_rp *pp = bus->sysdata;
577	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
578	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
579
580	/*
581	 * This is an endpoint mode specific register happen to appear even
582	 * when controller is operating in root port mode and system hangs
583	 * when it is accessed with link being in ASPM-L1 state.
584	 * So skip accessing it altogether
585	 */
586	if (!pcie->of_data->has_msix_doorbell_access_fix &&
587	    !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
588		*val = 0x00000000;
589		return PCIBIOS_SUCCESSFUL;
590	}
591
592	return pci_generic_config_read(bus, devfn, where, size, val);
593}
594
595static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
596				     int size, u32 val)
597{
598	struct dw_pcie_rp *pp = bus->sysdata;
599	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
600	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
601
602	/*
603	 * This is an endpoint mode specific register happen to appear even
604	 * when controller is operating in root port mode and system hangs
605	 * when it is accessed with link being in ASPM-L1 state.
606	 * So skip accessing it altogether
607	 */
608	if (!pcie->of_data->has_msix_doorbell_access_fix &&
609	    !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
610		return PCIBIOS_SUCCESSFUL;
611
612	return pci_generic_config_write(bus, devfn, where, size, val);
613}
614
615static struct pci_ops tegra_pci_ops = {
616	.map_bus = dw_pcie_own_conf_map_bus,
617	.read = tegra_pcie_dw_rd_own_conf,
618	.write = tegra_pcie_dw_wr_own_conf,
619};
620
621#if defined(CONFIG_PCIEASPM)
622static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
623{
624	u32 val;
625
626	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
627	val &= ~PCI_L1SS_CAP_ASPM_L1_1;
628	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
629}
630
631static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
632{
633	u32 val;
634
635	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
636	val &= ~PCI_L1SS_CAP_ASPM_L1_2;
637	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
638}
639
640static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
641{
642	u32 val;
643
644	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
645				PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
646	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
647	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
648	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
649	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
650	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
651			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
652	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
653				PCIE_RAS_DES_EVENT_COUNTER_DATA);
654
655	return val;
656}
657
658static int aspm_state_cnt(struct seq_file *s, void *data)
659{
660	struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
661				     dev_get_drvdata(s->private);
662	u32 val;
663
664	seq_printf(s, "Tx L0s entry count : %u\n",
665		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
666
667	seq_printf(s, "Rx L0s entry count : %u\n",
668		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
669
670	seq_printf(s, "Link L1 entry count : %u\n",
671		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
672
673	seq_printf(s, "Link L1.1 entry count : %u\n",
674		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
675
676	seq_printf(s, "Link L1.2 entry count : %u\n",
677		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
678
679	/* Clear all counters */
680	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
681			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
682			   EVENT_COUNTER_ALL_CLEAR);
683
684	/* Re-enable counting */
685	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
686	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
687	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
688			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
689
690	return 0;
691}
692
693static void init_host_aspm(struct tegra_pcie_dw *pcie)
694{
695	struct dw_pcie *pci = &pcie->pci;
696	u32 val;
697
698	val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
699	pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
700
701	pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
702							PCI_EXT_CAP_ID_VNDR);
703
704	/* Enable ASPM counters */
705	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
706	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
707	dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
708			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
709
710	/* Program T_cmrt and T_pwr_on values */
711	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
712	val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
713	val |= (pcie->aspm_cmrt << 8);
714	val |= (pcie->aspm_pwr_on_t << 19);
715	dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
716
717	/* Program L0s and L1 entrance latencies */
718	val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
719	val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
720	val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
721	val |= PORT_AFR_ENTER_ASPM;
722	dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
723}
724
725static void init_debugfs(struct tegra_pcie_dw *pcie)
726{
727	debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
728				    aspm_state_cnt);
729}
730#else
731static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
732static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
733static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
734static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
735#endif
736
737static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
738{
739	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
740	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
741	u32 val;
742	u16 val_w;
743
744	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
745	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
746	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
747
748	if (!pcie->of_data->has_sbr_reset_fix) {
749		val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
750		val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
751		appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
752	}
753
754	if (pcie->enable_cdm_check) {
755		val = appl_readl(pcie, APPL_INTR_EN_L0_0);
756		val |= pcie->of_data->cdm_chk_int_en_bit;
757		appl_writel(pcie, val, APPL_INTR_EN_L0_0);
758
759		val = appl_readl(pcie, APPL_INTR_EN_L1_18);
760		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
761		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
762		appl_writel(pcie, val, APPL_INTR_EN_L1_18);
763	}
764
765	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
766				  PCI_EXP_LNKSTA);
767	pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
768
769	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
770				  PCI_EXP_LNKCTL);
771	val_w |= PCI_EXP_LNKCTL_LBMIE;
772	dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
773			   val_w);
774}
775
776static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp)
777{
778	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
779	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
780	u32 val;
781
782	/* Enable INTX interrupt generation */
783	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
784	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
785	val |= APPL_INTR_EN_L0_0_INT_INT_EN;
786	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
787
788	val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
789	val |= APPL_INTR_EN_L1_8_INTX_EN;
790	val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
791	val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
792	if (IS_ENABLED(CONFIG_PCIEAER))
793		val |= APPL_INTR_EN_L1_8_AER_INT_EN;
794	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
795}
796
797static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
798{
799	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
800	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
801	u32 val;
802
803	/* Enable MSI interrupt generation */
804	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
805	val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
806	val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
807	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
808}
809
810static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
811{
812	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
813	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
814
815	/* Clear interrupt statuses before enabling interrupts */
816	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
817	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
818	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
819	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
820	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
821	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
822	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
823	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
824	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
825	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
826	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
827	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
828	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
829	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
830	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
831
832	tegra_pcie_enable_system_interrupts(pp);
833	tegra_pcie_enable_intx_interrupts(pp);
834	if (IS_ENABLED(CONFIG_PCI_MSI))
835		tegra_pcie_enable_msi_interrupts(pp);
836}
837
838static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
839{
840	struct dw_pcie *pci = &pcie->pci;
841	u32 val, offset, i;
842
843	/* Program init preset */
844	for (i = 0; i < pcie->num_lanes; i++) {
845		val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
846		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
847		val |= GEN3_GEN4_EQ_PRESET_INIT;
848		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
849		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
850			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
851		dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
852
853		offset = dw_pcie_find_ext_capability(pci,
854						     PCI_EXT_CAP_ID_PL_16GT) +
855				PCI_PL_16GT_LE_CTRL;
856		val = dw_pcie_readb_dbi(pci, offset + i);
857		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
858		val |= GEN3_GEN4_EQ_PRESET_INIT;
859		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
860		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
861			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
862		dw_pcie_writeb_dbi(pci, offset + i, val);
863	}
864
865	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
866	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
867	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
868
869	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
870	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
871	val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
872	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
873	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
874
875	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
876	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
877	val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
878	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
879
880	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
881	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
882	val |= (pcie->of_data->gen4_preset_vec <<
883		GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
884	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
885	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
886
887	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
888	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
889	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
890}
891
892static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
893{
894	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
895	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
896	u32 val;
897	u16 val_16;
898
899	pp->bridge->ops = &tegra_pci_ops;
900
901	if (!pcie->pcie_cap_base)
902		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
903							      PCI_CAP_ID_EXP);
904
905	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
906	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
907	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
908
909	val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
910	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
911	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
912	dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
913
914	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
915
916	/* Enable as 0xFFFF0001 response for CRS */
917	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
918	val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
919	val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
920		AMBA_ERROR_RESPONSE_CRS_SHIFT);
921	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
922
923	/* Clear Slot Clock Configuration bit if SRNS configuration */
924	if (pcie->enable_srns) {
925		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
926					   PCI_EXP_LNKSTA);
927		val_16 &= ~PCI_EXP_LNKSTA_SLC;
928		dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
929				   val_16);
930	}
931
932	config_gen3_gen4_eq_presets(pcie);
933
934	init_host_aspm(pcie);
935
936	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
937	if (!pcie->supports_clkreq) {
938		disable_aspm_l11(pcie);
939		disable_aspm_l12(pcie);
940	}
941
942	if (!pcie->of_data->has_l1ss_exit_fix) {
943		val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
944		val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
945		dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
946	}
947
948	if (pcie->update_fc_fixup) {
949		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
950		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
951		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
952	}
953
954	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
955
956	return 0;
957}
958
959static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
960{
961	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
962	struct dw_pcie_rp *pp = &pci->pp;
963	u32 val, offset, tmp;
964	bool retry = true;
965
966	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
967		enable_irq(pcie->pex_rst_irq);
968		return 0;
969	}
970
971retry_link:
972	/* Assert RST */
973	val = appl_readl(pcie, APPL_PINMUX);
974	val &= ~APPL_PINMUX_PEX_RST;
975	appl_writel(pcie, val, APPL_PINMUX);
976
977	usleep_range(100, 200);
978
979	/* Enable LTSSM */
980	val = appl_readl(pcie, APPL_CTRL);
981	val |= APPL_CTRL_LTSSM_EN;
982	appl_writel(pcie, val, APPL_CTRL);
983
984	/* De-assert RST */
985	val = appl_readl(pcie, APPL_PINMUX);
986	val |= APPL_PINMUX_PEX_RST;
987	appl_writel(pcie, val, APPL_PINMUX);
988
989	msleep(100);
990
991	if (dw_pcie_wait_for_link(pci)) {
992		if (!retry)
993			return 0;
994		/*
995		 * There are some endpoints which can't get the link up if
996		 * root port has Data Link Feature (DLF) enabled.
997		 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
998		 * on Scaled Flow Control and DLF.
999		 * So, need to confirm that is indeed the case here and attempt
1000		 * link up once again with DLF disabled.
1001		 */
1002		val = appl_readl(pcie, APPL_DEBUG);
1003		val &= APPL_DEBUG_LTSSM_STATE_MASK;
1004		val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
1005		tmp = appl_readl(pcie, APPL_LINK_STATUS);
1006		tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
1007		if (!(val == 0x11 && !tmp)) {
1008			/* Link is down for all good reasons */
1009			return 0;
1010		}
1011
1012		dev_info(pci->dev, "Link is down in DLL");
1013		dev_info(pci->dev, "Trying again with DLFE disabled\n");
1014		/* Disable LTSSM */
1015		val = appl_readl(pcie, APPL_CTRL);
1016		val &= ~APPL_CTRL_LTSSM_EN;
1017		appl_writel(pcie, val, APPL_CTRL);
1018
1019		reset_control_assert(pcie->core_rst);
1020		reset_control_deassert(pcie->core_rst);
1021
1022		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
1023		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
1024		val &= ~PCI_DLF_EXCHANGE_ENABLE;
1025		dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
1026
1027		tegra_pcie_dw_host_init(pp);
1028		dw_pcie_setup_rc(pp);
1029
1030		retry = false;
1031		goto retry_link;
1032	}
1033
1034	tegra_pcie_icc_set(pcie);
1035
1036	tegra_pcie_enable_interrupts(pp);
1037
1038	return 0;
1039}
1040
1041static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
1042{
1043	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1044	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
1045
1046	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1047}
1048
1049static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
1050{
1051	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1052
1053	disable_irq(pcie->pex_rst_irq);
1054}
1055
1056static const struct dw_pcie_ops tegra_dw_pcie_ops = {
1057	.link_up = tegra_pcie_dw_link_up,
1058	.start_link = tegra_pcie_dw_start_link,
1059	.stop_link = tegra_pcie_dw_stop_link,
1060};
1061
1062static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
1063	.init = tegra_pcie_dw_host_init,
1064};
1065
1066static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
1067{
1068	unsigned int phy_count = pcie->phy_count;
1069
1070	while (phy_count--) {
1071		phy_power_off(pcie->phys[phy_count]);
1072		phy_exit(pcie->phys[phy_count]);
1073	}
1074}
1075
1076static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
1077{
1078	unsigned int i;
1079	int ret;
1080
1081	for (i = 0; i < pcie->phy_count; i++) {
1082		ret = phy_init(pcie->phys[i]);
1083		if (ret < 0)
1084			goto phy_power_off;
1085
1086		ret = phy_power_on(pcie->phys[i]);
1087		if (ret < 0)
1088			goto phy_exit;
1089	}
1090
1091	return 0;
1092
1093phy_power_off:
1094	while (i--) {
1095		phy_power_off(pcie->phys[i]);
1096phy_exit:
1097		phy_exit(pcie->phys[i]);
1098	}
1099
1100	return ret;
1101}
1102
1103static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
1104{
1105	struct platform_device *pdev = to_platform_device(pcie->dev);
1106	struct device_node *np = pcie->dev->of_node;
1107	int ret;
1108
1109	pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1110	if (!pcie->dbi_res) {
1111		dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
1112		return -ENODEV;
1113	}
1114
1115	ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
1116	if (ret < 0) {
1117		dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
1118		return ret;
1119	}
1120
1121	ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
1122				   &pcie->aspm_pwr_on_t);
1123	if (ret < 0)
1124		dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
1125			 ret);
1126
1127	ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
1128				   &pcie->aspm_l0s_enter_lat);
1129	if (ret < 0)
1130		dev_info(pcie->dev,
1131			 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
1132
1133	ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
1134	if (ret < 0) {
1135		dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
1136		return ret;
1137	}
1138
1139	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
1140	if (ret) {
1141		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
1142		return ret;
1143	}
1144
1145	ret = of_property_count_strings(np, "phy-names");
1146	if (ret < 0) {
1147		dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
1148			ret);
1149		return ret;
1150	}
1151	pcie->phy_count = ret;
1152
1153	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
1154		pcie->update_fc_fixup = true;
1155
1156	/* RP using an external REFCLK is supported only in Tegra234 */
1157	if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
1158		if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
1159			pcie->enable_ext_refclk = true;
1160	} else {
1161		pcie->enable_ext_refclk =
1162			of_property_read_bool(pcie->dev->of_node,
1163					      "nvidia,enable-ext-refclk");
1164	}
1165
1166	pcie->supports_clkreq =
1167		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
1168
1169	pcie->enable_cdm_check =
1170		of_property_read_bool(np, "snps,enable-cdm-check");
1171
1172	if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
1173		pcie->enable_srns =
1174			of_property_read_bool(np, "nvidia,enable-srns");
1175
1176	if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
1177		return 0;
1178
1179	/* Endpoint mode specific DT entries */
1180	pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
1181	if (IS_ERR(pcie->pex_rst_gpiod)) {
1182		int err = PTR_ERR(pcie->pex_rst_gpiod);
1183		const char *level = KERN_ERR;
1184
1185		if (err == -EPROBE_DEFER)
1186			level = KERN_DEBUG;
1187
1188		dev_printk(level, pcie->dev,
1189			   dev_fmt("Failed to get PERST GPIO: %d\n"),
1190			   err);
1191		return err;
1192	}
1193
1194	pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
1195						    "nvidia,refclk-select",
1196						    GPIOD_OUT_HIGH);
1197	if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
1198		int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
1199		const char *level = KERN_ERR;
1200
1201		if (err == -EPROBE_DEFER)
1202			level = KERN_DEBUG;
1203
1204		dev_printk(level, pcie->dev,
1205			   dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1206			   err);
1207		pcie->pex_refclk_sel_gpiod = NULL;
1208	}
1209
1210	return 0;
1211}
1212
1213static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
1214					  bool enable)
1215{
1216	struct mrq_uphy_response resp;
1217	struct tegra_bpmp_message msg;
1218	struct mrq_uphy_request req;
1219
1220	/*
1221	 * Controller-5 doesn't need to have its state set by BPMP-FW in
1222	 * Tegra194
1223	 */
1224	if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
1225		return 0;
1226
1227	memset(&req, 0, sizeof(req));
1228	memset(&resp, 0, sizeof(resp));
1229
1230	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1231	req.controller_state.pcie_controller = pcie->cid;
1232	req.controller_state.enable = enable;
1233
1234	memset(&msg, 0, sizeof(msg));
1235	msg.mrq = MRQ_UPHY;
1236	msg.tx.data = &req;
1237	msg.tx.size = sizeof(req);
1238	msg.rx.data = &resp;
1239	msg.rx.size = sizeof(resp);
1240
1241	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1242}
1243
1244static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
1245					 bool enable)
1246{
1247	struct mrq_uphy_response resp;
1248	struct tegra_bpmp_message msg;
1249	struct mrq_uphy_request req;
1250
1251	memset(&req, 0, sizeof(req));
1252	memset(&resp, 0, sizeof(resp));
1253
1254	if (enable) {
1255		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
1256		req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
1257	} else {
1258		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
1259		req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
1260	}
1261
1262	memset(&msg, 0, sizeof(msg));
1263	msg.mrq = MRQ_UPHY;
1264	msg.tx.data = &req;
1265	msg.tx.size = sizeof(req);
1266	msg.rx.data = &resp;
1267	msg.rx.size = sizeof(resp);
1268
1269	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1270}
1271
1272static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1273{
1274	struct dw_pcie_rp *pp = &pcie->pci.pp;
1275	struct pci_bus *child, *root_bus = NULL;
1276	struct pci_dev *pdev;
1277
1278	/*
1279	 * link doesn't go into L2 state with some of the endpoints with Tegra
1280	 * if they are not in D0 state. So, need to make sure that immediate
1281	 * downstream devices are in D0 state before sending PME_TurnOff to put
1282	 * link into L2 state.
1283	 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1284	 * 5.2 Link State Power Management (Page #428).
1285	 */
1286
1287	list_for_each_entry(child, &pp->bridge->bus->children, node) {
1288		/* Bring downstream devices to D0 if they are not already in */
1289		if (child->parent == pp->bridge->bus) {
1290			root_bus = child;
1291			break;
1292		}
1293	}
1294
1295	if (!root_bus) {
1296		dev_err(pcie->dev, "Failed to find downstream devices\n");
1297		return;
1298	}
1299
1300	list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1301		if (PCI_SLOT(pdev->devfn) == 0) {
1302			if (pci_set_power_state(pdev, PCI_D0))
1303				dev_err(pcie->dev,
1304					"Failed to transition %s to D0 state\n",
1305					dev_name(&pdev->dev));
1306		}
1307	}
1308}
1309
1310static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1311{
1312	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1313	if (IS_ERR(pcie->slot_ctl_3v3)) {
1314		if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1315			return PTR_ERR(pcie->slot_ctl_3v3);
1316
1317		pcie->slot_ctl_3v3 = NULL;
1318	}
1319
1320	pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1321	if (IS_ERR(pcie->slot_ctl_12v)) {
1322		if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1323			return PTR_ERR(pcie->slot_ctl_12v);
1324
1325		pcie->slot_ctl_12v = NULL;
1326	}
1327
1328	return 0;
1329}
1330
1331static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1332{
1333	int ret;
1334
1335	if (pcie->slot_ctl_3v3) {
1336		ret = regulator_enable(pcie->slot_ctl_3v3);
1337		if (ret < 0) {
1338			dev_err(pcie->dev,
1339				"Failed to enable 3.3V slot supply: %d\n", ret);
1340			return ret;
1341		}
1342	}
1343
1344	if (pcie->slot_ctl_12v) {
1345		ret = regulator_enable(pcie->slot_ctl_12v);
1346		if (ret < 0) {
1347			dev_err(pcie->dev,
1348				"Failed to enable 12V slot supply: %d\n", ret);
1349			goto fail_12v_enable;
1350		}
1351	}
1352
1353	/*
1354	 * According to PCI Express Card Electromechanical Specification
1355	 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1356	 * should be a minimum of 100ms.
1357	 */
1358	if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1359		msleep(100);
1360
1361	return 0;
1362
1363fail_12v_enable:
1364	if (pcie->slot_ctl_3v3)
1365		regulator_disable(pcie->slot_ctl_3v3);
1366	return ret;
1367}
1368
1369static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1370{
1371	if (pcie->slot_ctl_12v)
1372		regulator_disable(pcie->slot_ctl_12v);
1373	if (pcie->slot_ctl_3v3)
1374		regulator_disable(pcie->slot_ctl_3v3);
1375}
1376
1377static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1378					bool en_hw_hot_rst)
1379{
1380	int ret;
1381	u32 val;
1382
1383	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1384	if (ret) {
1385		dev_err(pcie->dev,
1386			"Failed to enable controller %u: %d\n", pcie->cid, ret);
1387		return ret;
1388	}
1389
1390	if (pcie->enable_ext_refclk) {
1391		ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1392		if (ret) {
1393			dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
1394			goto fail_pll_init;
1395		}
1396	}
1397
1398	ret = tegra_pcie_enable_slot_regulators(pcie);
1399	if (ret < 0)
1400		goto fail_slot_reg_en;
1401
1402	ret = regulator_enable(pcie->pex_ctl_supply);
1403	if (ret < 0) {
1404		dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1405		goto fail_reg_en;
1406	}
1407
1408	ret = clk_prepare_enable(pcie->core_clk);
1409	if (ret) {
1410		dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1411		goto fail_core_clk;
1412	}
1413
1414	ret = reset_control_deassert(pcie->core_apb_rst);
1415	if (ret) {
1416		dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1417			ret);
1418		goto fail_core_apb_rst;
1419	}
1420
1421	if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
1422		/* Enable HW_HOT_RST mode */
1423		val = appl_readl(pcie, APPL_CTRL);
1424		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1425			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1426		val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
1427			APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1428		val |= APPL_CTRL_HW_HOT_RST_EN;
1429		appl_writel(pcie, val, APPL_CTRL);
1430	}
1431
1432	ret = tegra_pcie_enable_phy(pcie);
1433	if (ret) {
1434		dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1435		goto fail_phy;
1436	}
1437
1438	/* Update CFG base address */
1439	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1440		    APPL_CFG_BASE_ADDR);
1441
1442	/* Configure this core for RP mode operation */
1443	appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1444
1445	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1446
1447	val = appl_readl(pcie, APPL_CTRL);
1448	appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1449
1450	val = appl_readl(pcie, APPL_CFG_MISC);
1451	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1452	appl_writel(pcie, val, APPL_CFG_MISC);
1453
1454	if (pcie->enable_srns || pcie->enable_ext_refclk) {
1455		/*
1456		 * When Tegra PCIe RP is using external clock, it cannot supply
1457		 * same clock to its downstream hierarchy. Hence, gate PCIe RP
1458		 * REFCLK out pads when RP & EP are using separate clocks or RP
1459		 * is using an external REFCLK.
1460		 */
1461		val = appl_readl(pcie, APPL_PINMUX);
1462		val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1463		val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1464		appl_writel(pcie, val, APPL_PINMUX);
1465	}
1466
1467	if (!pcie->supports_clkreq) {
1468		val = appl_readl(pcie, APPL_PINMUX);
1469		val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
1470		val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
1471		appl_writel(pcie, val, APPL_PINMUX);
1472	}
1473
1474	/* Update iATU_DMA base address */
1475	appl_writel(pcie,
1476		    pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1477		    APPL_CFG_IATU_DMA_BASE_ADDR);
1478
1479	reset_control_deassert(pcie->core_rst);
1480
1481	return ret;
1482
1483fail_phy:
1484	reset_control_assert(pcie->core_apb_rst);
1485fail_core_apb_rst:
1486	clk_disable_unprepare(pcie->core_clk);
1487fail_core_clk:
1488	regulator_disable(pcie->pex_ctl_supply);
1489fail_reg_en:
1490	tegra_pcie_disable_slot_regulators(pcie);
1491fail_slot_reg_en:
1492	if (pcie->enable_ext_refclk)
1493		tegra_pcie_bpmp_set_pll_state(pcie, false);
1494fail_pll_init:
1495	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1496
1497	return ret;
1498}
1499
1500static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
1501{
1502	int ret;
1503
1504	ret = reset_control_assert(pcie->core_rst);
1505	if (ret)
1506		dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
1507
1508	tegra_pcie_disable_phy(pcie);
1509
1510	ret = reset_control_assert(pcie->core_apb_rst);
1511	if (ret)
1512		dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1513
1514	clk_disable_unprepare(pcie->core_clk);
1515
1516	ret = regulator_disable(pcie->pex_ctl_supply);
1517	if (ret)
1518		dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1519
1520	tegra_pcie_disable_slot_regulators(pcie);
1521
1522	if (pcie->enable_ext_refclk) {
1523		ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1524		if (ret)
1525			dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
1526	}
1527
1528	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1529	if (ret)
1530		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1531			pcie->cid, ret);
1532}
1533
1534static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1535{
1536	struct dw_pcie *pci = &pcie->pci;
1537	struct dw_pcie_rp *pp = &pci->pp;
1538	int ret;
1539
1540	ret = tegra_pcie_config_controller(pcie, false);
1541	if (ret < 0)
1542		return ret;
1543
1544	pp->ops = &tegra_pcie_dw_host_ops;
1545
1546	ret = dw_pcie_host_init(pp);
1547	if (ret < 0) {
1548		dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1549		goto fail_host_init;
1550	}
1551
1552	return 0;
1553
1554fail_host_init:
1555	tegra_pcie_unconfig_controller(pcie);
1556	return ret;
1557}
1558
1559static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1560{
1561	u32 val;
1562
1563	if (!tegra_pcie_dw_link_up(&pcie->pci))
1564		return 0;
1565
1566	val = appl_readl(pcie, APPL_RADM_STATUS);
1567	val |= APPL_PM_XMT_TURNOFF_STATE;
1568	appl_writel(pcie, val, APPL_RADM_STATUS);
1569
1570	return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1571				 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1572				 1, PME_ACK_TIMEOUT);
1573}
1574
1575static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1576{
1577	u32 data;
1578	int err;
1579
1580	if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1581		dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1582		return;
1583	}
1584
1585	/*
1586	 * PCIe controller exits from L2 only if reset is applied, so
1587	 * controller doesn't handle interrupts. But in cases where
1588	 * L2 entry fails, PERST# is asserted which can trigger surprise
1589	 * link down AER. However this function call happens in
1590	 * suspend_noirq(), so AER interrupt will not be processed.
1591	 * Disable all interrupts to avoid such a scenario.
1592	 */
1593	appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
1594
1595	if (tegra_pcie_try_link_l2(pcie)) {
1596		dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1597		/*
1598		 * TX lane clock freq will reset to Gen1 only if link is in L2
1599		 * or detect state.
1600		 * So apply pex_rst to end point to force RP to go into detect
1601		 * state
1602		 */
1603		data = appl_readl(pcie, APPL_PINMUX);
1604		data &= ~APPL_PINMUX_PEX_RST;
1605		appl_writel(pcie, data, APPL_PINMUX);
1606
1607		/*
1608		 * Some cards do not go to detect state even after de-asserting
1609		 * PERST#. So, de-assert LTSSM to bring link to detect state.
1610		 */
1611		data = readl(pcie->appl_base + APPL_CTRL);
1612		data &= ~APPL_CTRL_LTSSM_EN;
1613		writel(data, pcie->appl_base + APPL_CTRL);
1614
1615		err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1616						data,
1617						((data &
1618						APPL_DEBUG_LTSSM_STATE_MASK) >>
1619						APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1620						LTSSM_STATE_PRE_DETECT,
1621						1, LTSSM_TIMEOUT);
1622		if (err)
1623			dev_info(pcie->dev, "Link didn't go to detect state\n");
1624	}
1625	/*
1626	 * DBI registers may not be accessible after this as PLL-E would be
1627	 * down depending on how CLKREQ is pulled by end point
1628	 */
1629	data = appl_readl(pcie, APPL_PINMUX);
1630	data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1631	/* Cut REFCLK to slot */
1632	data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1633	data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1634	appl_writel(pcie, data, APPL_PINMUX);
1635}
1636
1637static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1638{
1639	tegra_pcie_downstream_dev_to_D0(pcie);
1640	dw_pcie_host_deinit(&pcie->pci.pp);
1641	tegra_pcie_dw_pme_turnoff(pcie);
1642	tegra_pcie_unconfig_controller(pcie);
1643}
1644
1645static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1646{
1647	struct device *dev = pcie->dev;
1648	char *name;
1649	int ret;
1650
1651	pm_runtime_enable(dev);
1652
1653	ret = pm_runtime_get_sync(dev);
1654	if (ret < 0) {
1655		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1656			ret);
1657		goto fail_pm_get_sync;
1658	}
1659
1660	ret = pinctrl_pm_select_default_state(dev);
1661	if (ret < 0) {
1662		dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1663		goto fail_pm_get_sync;
1664	}
1665
1666	ret = tegra_pcie_init_controller(pcie);
1667	if (ret < 0) {
1668		dev_err(dev, "Failed to initialize controller: %d\n", ret);
1669		goto fail_pm_get_sync;
1670	}
1671
1672	pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1673	if (!pcie->link_state) {
1674		ret = -ENOMEDIUM;
1675		goto fail_host_init;
1676	}
1677
1678	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1679	if (!name) {
1680		ret = -ENOMEM;
1681		goto fail_host_init;
1682	}
1683
1684	pcie->debugfs = debugfs_create_dir(name, NULL);
1685	init_debugfs(pcie);
1686
1687	return ret;
1688
1689fail_host_init:
1690	tegra_pcie_deinit_controller(pcie);
1691fail_pm_get_sync:
1692	pm_runtime_put_sync(dev);
1693	pm_runtime_disable(dev);
1694	return ret;
1695}
1696
1697static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
1698{
1699	u32 val;
1700	int ret;
1701
1702	if (pcie->ep_state == EP_STATE_DISABLED)
1703		return;
1704
1705	/* Disable LTSSM */
1706	val = appl_readl(pcie, APPL_CTRL);
1707	val &= ~APPL_CTRL_LTSSM_EN;
1708	appl_writel(pcie, val, APPL_CTRL);
1709
1710	ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
1711				 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
1712				 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1713				 LTSSM_STATE_PRE_DETECT,
1714				 1, LTSSM_TIMEOUT);
1715	if (ret)
1716		dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
1717
1718	reset_control_assert(pcie->core_rst);
1719
1720	tegra_pcie_disable_phy(pcie);
1721
1722	reset_control_assert(pcie->core_apb_rst);
1723
1724	clk_disable_unprepare(pcie->core_clk);
1725
1726	pm_runtime_put_sync(pcie->dev);
1727
1728	if (pcie->enable_ext_refclk) {
1729		ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1730		if (ret)
1731			dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
1732				ret);
1733	}
1734
1735	ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1736	if (ret)
1737		dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
1738
1739	pcie->ep_state = EP_STATE_DISABLED;
1740	dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
1741}
1742
1743static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
1744{
1745	struct dw_pcie *pci = &pcie->pci;
1746	struct dw_pcie_ep *ep = &pci->ep;
1747	struct device *dev = pcie->dev;
1748	u32 val;
1749	int ret;
1750	u16 val_16;
1751
1752	if (pcie->ep_state == EP_STATE_ENABLED)
1753		return;
1754
1755	ret = pm_runtime_resume_and_get(dev);
1756	if (ret < 0) {
1757		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1758			ret);
1759		return;
1760	}
1761
1762	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1763	if (ret) {
1764		dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
1765			pcie->cid, ret);
1766		goto fail_set_ctrl_state;
1767	}
1768
1769	if (pcie->enable_ext_refclk) {
1770		ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1771		if (ret) {
1772			dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
1773				ret);
1774			goto fail_pll_init;
1775		}
1776	}
1777
1778	ret = clk_prepare_enable(pcie->core_clk);
1779	if (ret) {
1780		dev_err(dev, "Failed to enable core clock: %d\n", ret);
1781		goto fail_core_clk_enable;
1782	}
1783
1784	ret = reset_control_deassert(pcie->core_apb_rst);
1785	if (ret) {
1786		dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
1787		goto fail_core_apb_rst;
1788	}
1789
1790	ret = tegra_pcie_enable_phy(pcie);
1791	if (ret) {
1792		dev_err(dev, "Failed to enable PHY: %d\n", ret);
1793		goto fail_phy;
1794	}
1795
1796	/* Clear any stale interrupt statuses */
1797	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
1798	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
1799	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
1800	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
1801	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
1802	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
1803	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
1804	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
1805	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
1806	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
1807	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
1808	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
1809	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
1810	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
1811	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
1812
1813	/* configure this core for EP mode operation */
1814	val = appl_readl(pcie, APPL_DM_TYPE);
1815	val &= ~APPL_DM_TYPE_MASK;
1816	val |= APPL_DM_TYPE_EP;
1817	appl_writel(pcie, val, APPL_DM_TYPE);
1818
1819	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1820
1821	val = appl_readl(pcie, APPL_CTRL);
1822	val |= APPL_CTRL_SYS_PRE_DET_STATE;
1823	val |= APPL_CTRL_HW_HOT_RST_EN;
1824	appl_writel(pcie, val, APPL_CTRL);
1825
1826	val = appl_readl(pcie, APPL_CFG_MISC);
1827	val |= APPL_CFG_MISC_SLV_EP_MODE;
1828	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1829	appl_writel(pcie, val, APPL_CFG_MISC);
1830
1831	val = appl_readl(pcie, APPL_PINMUX);
1832	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1833	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1834	appl_writel(pcie, val, APPL_PINMUX);
1835
1836	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1837		    APPL_CFG_BASE_ADDR);
1838
1839	appl_writel(pcie, pcie->atu_dma_res->start &
1840		    APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1841		    APPL_CFG_IATU_DMA_BASE_ADDR);
1842
1843	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
1844	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
1845	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
1846	val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
1847	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
1848
1849	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
1850	val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
1851	val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
1852	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
1853
1854	reset_control_deassert(pcie->core_rst);
1855
1856	if (pcie->update_fc_fixup) {
1857		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1858		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1859		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1860	}
1861
1862	config_gen3_gen4_eq_presets(pcie);
1863
1864	init_host_aspm(pcie);
1865
1866	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1867	if (!pcie->supports_clkreq) {
1868		disable_aspm_l11(pcie);
1869		disable_aspm_l12(pcie);
1870	}
1871
1872	if (!pcie->of_data->has_l1ss_exit_fix) {
1873		val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1874		val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1875		dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1876	}
1877
1878	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1879						      PCI_CAP_ID_EXP);
1880
1881	/* Clear Slot Clock Configuration bit if SRNS configuration */
1882	if (pcie->enable_srns) {
1883		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
1884					   PCI_EXP_LNKSTA);
1885		val_16 &= ~PCI_EXP_LNKSTA_SLC;
1886		dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
1887				   val_16);
1888	}
1889
1890	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1891
1892	val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
1893	val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
1894	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
1895	val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
1896	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
1897
1898	ret = dw_pcie_ep_init_complete(ep);
1899	if (ret) {
1900		dev_err(dev, "Failed to complete initialization: %d\n", ret);
1901		goto fail_init_complete;
1902	}
1903
1904	dw_pcie_ep_init_notify(ep);
1905
1906	/* Program the private control to allow sending LTR upstream */
1907	if (pcie->of_data->has_ltr_req_fix) {
1908		val = appl_readl(pcie, APPL_LTR_MSG_2);
1909		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
1910		appl_writel(pcie, val, APPL_LTR_MSG_2);
1911	}
1912
1913	/* Enable LTSSM */
1914	val = appl_readl(pcie, APPL_CTRL);
1915	val |= APPL_CTRL_LTSSM_EN;
1916	appl_writel(pcie, val, APPL_CTRL);
1917
1918	pcie->ep_state = EP_STATE_ENABLED;
1919	dev_dbg(dev, "Initialization of endpoint is completed\n");
1920
1921	return;
1922
1923fail_init_complete:
1924	reset_control_assert(pcie->core_rst);
1925	tegra_pcie_disable_phy(pcie);
1926fail_phy:
1927	reset_control_assert(pcie->core_apb_rst);
1928fail_core_apb_rst:
1929	clk_disable_unprepare(pcie->core_clk);
1930fail_core_clk_enable:
1931	tegra_pcie_bpmp_set_pll_state(pcie, false);
1932fail_pll_init:
1933	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1934fail_set_ctrl_state:
1935	pm_runtime_put_sync(dev);
1936}
1937
1938static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
1939{
1940	struct tegra_pcie_dw *pcie = arg;
1941
1942	if (gpiod_get_value(pcie->pex_rst_gpiod))
1943		pex_ep_event_pex_rst_assert(pcie);
1944	else
1945		pex_ep_event_pex_rst_deassert(pcie);
1946
1947	return IRQ_HANDLED;
1948}
1949
1950static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
1951{
1952	/* Tegra194 supports only INTA */
1953	if (irq > 1)
1954		return -EINVAL;
1955
1956	appl_writel(pcie, 1, APPL_LEGACY_INTX);
1957	usleep_range(1000, 2000);
1958	appl_writel(pcie, 0, APPL_LEGACY_INTX);
1959	return 0;
1960}
1961
1962static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
1963{
1964	if (unlikely(irq > 31))
1965		return -EINVAL;
1966
1967	appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
1968
1969	return 0;
1970}
1971
1972static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
1973{
1974	struct dw_pcie_ep *ep = &pcie->pci.ep;
1975
1976	writel(irq, ep->msi_mem);
1977
1978	return 0;
1979}
1980
1981static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1982				   unsigned int type, u16 interrupt_num)
1983{
1984	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1985	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1986
1987	switch (type) {
1988	case PCI_IRQ_INTX:
1989		return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num);
1990
1991	case PCI_IRQ_MSI:
1992		return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
1993
1994	case PCI_IRQ_MSIX:
1995		return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
1996
1997	default:
1998		dev_err(pci->dev, "Unknown IRQ type\n");
1999		return -EPERM;
2000	}
2001
2002	return 0;
2003}
2004
2005static const struct pci_epc_features tegra_pcie_epc_features = {
2006	.linkup_notifier = true,
2007	.core_init_notifier = true,
2008	.msi_capable = false,
2009	.msix_capable = false,
2010	.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
2011			.only_64bit = true, },
2012	.bar[BAR_1] = { .type = BAR_RESERVED, },
2013	.bar[BAR_2] = { .type = BAR_RESERVED, },
2014	.bar[BAR_3] = { .type = BAR_RESERVED, },
2015	.bar[BAR_4] = { .type = BAR_RESERVED, },
2016	.bar[BAR_5] = { .type = BAR_RESERVED, },
2017};
2018
2019static const struct pci_epc_features*
2020tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
2021{
2022	return &tegra_pcie_epc_features;
2023}
2024
2025static const struct dw_pcie_ep_ops pcie_ep_ops = {
2026	.raise_irq = tegra_pcie_ep_raise_irq,
2027	.get_features = tegra_pcie_ep_get_features,
2028};
2029
2030static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
2031				struct platform_device *pdev)
2032{
2033	struct dw_pcie *pci = &pcie->pci;
2034	struct device *dev = pcie->dev;
2035	struct dw_pcie_ep *ep;
2036	char *name;
2037	int ret;
2038
2039	ep = &pci->ep;
2040	ep->ops = &pcie_ep_ops;
2041
2042	ep->page_size = SZ_64K;
2043
2044	ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
2045	if (ret < 0) {
2046		dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
2047			ret);
2048		return ret;
2049	}
2050
2051	ret = gpiod_to_irq(pcie->pex_rst_gpiod);
2052	if (ret < 0) {
2053		dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
2054		return ret;
2055	}
2056	pcie->pex_rst_irq = (unsigned int)ret;
2057
2058	name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
2059			      pcie->cid);
2060	if (!name) {
2061		dev_err(dev, "Failed to create PERST IRQ string\n");
2062		return -ENOMEM;
2063	}
2064
2065	irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
2066
2067	pcie->ep_state = EP_STATE_DISABLED;
2068
2069	ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
2070					tegra_pcie_ep_pex_rst_irq,
2071					IRQF_TRIGGER_RISING |
2072					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2073					name, (void *)pcie);
2074	if (ret < 0) {
2075		dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
2076		return ret;
2077	}
2078
2079	pm_runtime_enable(dev);
2080
2081	ret = dw_pcie_ep_init(ep);
2082	if (ret) {
2083		dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
2084			ret);
2085		pm_runtime_disable(dev);
2086		return ret;
2087	}
2088
2089	return 0;
2090}
2091
2092static int tegra_pcie_dw_probe(struct platform_device *pdev)
2093{
2094	const struct tegra_pcie_dw_of_data *data;
2095	struct device *dev = &pdev->dev;
2096	struct resource *atu_dma_res;
2097	struct tegra_pcie_dw *pcie;
2098	struct dw_pcie_rp *pp;
2099	struct dw_pcie *pci;
2100	struct phy **phys;
2101	char *name;
2102	int ret;
2103	u32 i;
2104
2105	data = of_device_get_match_data(dev);
2106
2107	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
2108	if (!pcie)
2109		return -ENOMEM;
2110
2111	pci = &pcie->pci;
2112	pci->dev = &pdev->dev;
2113	pci->ops = &tegra_dw_pcie_ops;
2114	pcie->dev = &pdev->dev;
2115	pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
2116	pci->n_fts[0] = pcie->of_data->n_fts[0];
2117	pci->n_fts[1] = pcie->of_data->n_fts[1];
2118	pp = &pci->pp;
2119	pp->num_vectors = MAX_MSI_IRQS;
2120
2121	ret = tegra_pcie_dw_parse_dt(pcie);
2122	if (ret < 0) {
2123		const char *level = KERN_ERR;
2124
2125		if (ret == -EPROBE_DEFER)
2126			level = KERN_DEBUG;
2127
2128		dev_printk(level, dev,
2129			   dev_fmt("Failed to parse device tree: %d\n"),
2130			   ret);
2131		return ret;
2132	}
2133
2134	ret = tegra_pcie_get_slot_regulators(pcie);
2135	if (ret < 0) {
2136		const char *level = KERN_ERR;
2137
2138		if (ret == -EPROBE_DEFER)
2139			level = KERN_DEBUG;
2140
2141		dev_printk(level, dev,
2142			   dev_fmt("Failed to get slot regulators: %d\n"),
2143			   ret);
2144		return ret;
2145	}
2146
2147	if (pcie->pex_refclk_sel_gpiod)
2148		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
2149
2150	pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
2151	if (IS_ERR(pcie->pex_ctl_supply)) {
2152		ret = PTR_ERR(pcie->pex_ctl_supply);
2153		if (ret != -EPROBE_DEFER)
2154			dev_err(dev, "Failed to get regulator: %ld\n",
2155				PTR_ERR(pcie->pex_ctl_supply));
2156		return ret;
2157	}
2158
2159	pcie->core_clk = devm_clk_get(dev, "core");
2160	if (IS_ERR(pcie->core_clk)) {
2161		dev_err(dev, "Failed to get core clock: %ld\n",
2162			PTR_ERR(pcie->core_clk));
2163		return PTR_ERR(pcie->core_clk);
2164	}
2165
2166	pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2167						      "appl");
2168	if (!pcie->appl_res) {
2169		dev_err(dev, "Failed to find \"appl\" region\n");
2170		return -ENODEV;
2171	}
2172
2173	pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
2174	if (IS_ERR(pcie->appl_base))
2175		return PTR_ERR(pcie->appl_base);
2176
2177	pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
2178	if (IS_ERR(pcie->core_apb_rst)) {
2179		dev_err(dev, "Failed to get APB reset: %ld\n",
2180			PTR_ERR(pcie->core_apb_rst));
2181		return PTR_ERR(pcie->core_apb_rst);
2182	}
2183
2184	phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
2185	if (!phys)
2186		return -ENOMEM;
2187
2188	for (i = 0; i < pcie->phy_count; i++) {
2189		name = kasprintf(GFP_KERNEL, "p2u-%u", i);
2190		if (!name) {
2191			dev_err(dev, "Failed to create P2U string\n");
2192			return -ENOMEM;
2193		}
2194		phys[i] = devm_phy_get(dev, name);
2195		kfree(name);
2196		if (IS_ERR(phys[i])) {
2197			ret = PTR_ERR(phys[i]);
2198			if (ret != -EPROBE_DEFER)
2199				dev_err(dev, "Failed to get PHY: %d\n", ret);
2200			return ret;
2201		}
2202	}
2203
2204	pcie->phys = phys;
2205
2206	atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2207						   "atu_dma");
2208	if (!atu_dma_res) {
2209		dev_err(dev, "Failed to find \"atu_dma\" region\n");
2210		return -ENODEV;
2211	}
2212	pcie->atu_dma_res = atu_dma_res;
2213
2214	pci->atu_size = resource_size(atu_dma_res);
2215	pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
2216	if (IS_ERR(pci->atu_base))
2217		return PTR_ERR(pci->atu_base);
2218
2219	pcie->core_rst = devm_reset_control_get(dev, "core");
2220	if (IS_ERR(pcie->core_rst)) {
2221		dev_err(dev, "Failed to get core reset: %ld\n",
2222			PTR_ERR(pcie->core_rst));
2223		return PTR_ERR(pcie->core_rst);
2224	}
2225
2226	pp->irq = platform_get_irq_byname(pdev, "intr");
2227	if (pp->irq < 0)
2228		return pp->irq;
2229
2230	pcie->bpmp = tegra_bpmp_get(dev);
2231	if (IS_ERR(pcie->bpmp))
2232		return PTR_ERR(pcie->bpmp);
2233
2234	platform_set_drvdata(pdev, pcie);
2235
2236	pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
2237	ret = PTR_ERR_OR_ZERO(pcie->icc_path);
2238	if (ret) {
2239		tegra_bpmp_put(pcie->bpmp);
2240		dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
2241		return ret;
2242	}
2243
2244	switch (pcie->of_data->mode) {
2245	case DW_PCIE_RC_TYPE:
2246		ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
2247				       IRQF_SHARED, "tegra-pcie-intr", pcie);
2248		if (ret) {
2249			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2250				ret);
2251			goto fail;
2252		}
2253
2254		ret = tegra_pcie_config_rp(pcie);
2255		if (ret && ret != -ENOMEDIUM)
2256			goto fail;
2257		else
2258			return 0;
2259		break;
2260
2261	case DW_PCIE_EP_TYPE:
2262		ret = devm_request_threaded_irq(dev, pp->irq,
2263						tegra_pcie_ep_hard_irq,
2264						tegra_pcie_ep_irq_thread,
2265						IRQF_SHARED | IRQF_ONESHOT,
2266						"tegra-pcie-ep-intr", pcie);
2267		if (ret) {
2268			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2269				ret);
2270			goto fail;
2271		}
2272
2273		ret = tegra_pcie_config_ep(pcie, pdev);
2274		if (ret < 0)
2275			goto fail;
2276		break;
2277
2278	default:
2279		dev_err(dev, "Invalid PCIe device type %d\n",
2280			pcie->of_data->mode);
2281	}
2282
2283fail:
2284	tegra_bpmp_put(pcie->bpmp);
2285	return ret;
2286}
2287
2288static void tegra_pcie_dw_remove(struct platform_device *pdev)
2289{
2290	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2291
2292	if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
2293		if (!pcie->link_state)
2294			return;
2295
2296		debugfs_remove_recursive(pcie->debugfs);
2297		tegra_pcie_deinit_controller(pcie);
2298		pm_runtime_put_sync(pcie->dev);
2299	} else {
2300		disable_irq(pcie->pex_rst_irq);
2301		pex_ep_event_pex_rst_assert(pcie);
2302	}
2303
2304	pm_runtime_disable(pcie->dev);
2305	tegra_bpmp_put(pcie->bpmp);
2306	if (pcie->pex_refclk_sel_gpiod)
2307		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
2308}
2309
2310static int tegra_pcie_dw_suspend_late(struct device *dev)
2311{
2312	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2313	u32 val;
2314
2315	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
2316		dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
2317		return -EPERM;
2318	}
2319
2320	if (!pcie->link_state)
2321		return 0;
2322
2323	/* Enable HW_HOT_RST mode */
2324	if (!pcie->of_data->has_sbr_reset_fix) {
2325		val = appl_readl(pcie, APPL_CTRL);
2326		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2327			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2328		val |= APPL_CTRL_HW_HOT_RST_EN;
2329		appl_writel(pcie, val, APPL_CTRL);
2330	}
2331
2332	return 0;
2333}
2334
2335static int tegra_pcie_dw_suspend_noirq(struct device *dev)
2336{
2337	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2338
2339	if (!pcie->link_state)
2340		return 0;
2341
2342	tegra_pcie_downstream_dev_to_D0(pcie);
2343	tegra_pcie_dw_pme_turnoff(pcie);
2344	tegra_pcie_unconfig_controller(pcie);
2345
2346	return 0;
2347}
2348
2349static int tegra_pcie_dw_resume_noirq(struct device *dev)
2350{
2351	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2352	int ret;
2353
2354	if (!pcie->link_state)
2355		return 0;
2356
2357	ret = tegra_pcie_config_controller(pcie, true);
2358	if (ret < 0)
2359		return ret;
2360
2361	ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
2362	if (ret < 0) {
2363		dev_err(dev, "Failed to init host: %d\n", ret);
2364		goto fail_host_init;
2365	}
2366
2367	dw_pcie_setup_rc(&pcie->pci.pp);
2368
2369	ret = tegra_pcie_dw_start_link(&pcie->pci);
2370	if (ret < 0)
2371		goto fail_host_init;
2372
2373	return 0;
2374
2375fail_host_init:
2376	tegra_pcie_unconfig_controller(pcie);
2377	return ret;
2378}
2379
2380static int tegra_pcie_dw_resume_early(struct device *dev)
2381{
2382	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2383	u32 val;
2384
2385	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
2386		dev_err(dev, "Suspend is not supported in EP mode");
2387		return -ENOTSUPP;
2388	}
2389
2390	if (!pcie->link_state)
2391		return 0;
2392
2393	/* Disable HW_HOT_RST mode */
2394	if (!pcie->of_data->has_sbr_reset_fix) {
2395		val = appl_readl(pcie, APPL_CTRL);
2396		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2397			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2398		val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
2399		       APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
2400		val &= ~APPL_CTRL_HW_HOT_RST_EN;
2401		appl_writel(pcie, val, APPL_CTRL);
2402	}
2403
2404	return 0;
2405}
2406
2407static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
2408{
2409	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2410
2411	if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
2412		if (!pcie->link_state)
2413			return;
2414
2415		debugfs_remove_recursive(pcie->debugfs);
2416		tegra_pcie_downstream_dev_to_D0(pcie);
2417
2418		disable_irq(pcie->pci.pp.irq);
2419		if (IS_ENABLED(CONFIG_PCI_MSI))
2420			disable_irq(pcie->pci.pp.msi_irq[0]);
2421
2422		tegra_pcie_dw_pme_turnoff(pcie);
2423		tegra_pcie_unconfig_controller(pcie);
2424		pm_runtime_put_sync(pcie->dev);
2425	} else {
2426		disable_irq(pcie->pex_rst_irq);
2427		pex_ep_event_pex_rst_assert(pcie);
2428	}
2429}
2430
2431static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
2432	.version = TEGRA194_DWC_IP_VER,
2433	.mode = DW_PCIE_RC_TYPE,
2434	.cdm_chk_int_en_bit = BIT(19),
2435	/* Gen4 - 5, 6, 8 and 9 presets enabled */
2436	.gen4_preset_vec = 0x360,
2437	.n_fts = { 52, 52 },
2438};
2439
2440static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
2441	.version = TEGRA194_DWC_IP_VER,
2442	.mode = DW_PCIE_EP_TYPE,
2443	.cdm_chk_int_en_bit = BIT(19),
2444	/* Gen4 - 5, 6, 8 and 9 presets enabled */
2445	.gen4_preset_vec = 0x360,
2446	.n_fts = { 52, 52 },
2447};
2448
2449static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
2450	.version = TEGRA234_DWC_IP_VER,
2451	.mode = DW_PCIE_RC_TYPE,
2452	.has_msix_doorbell_access_fix = true,
2453	.has_sbr_reset_fix = true,
2454	.has_l1ss_exit_fix = true,
2455	.cdm_chk_int_en_bit = BIT(18),
2456	/* Gen4 - 6, 8 and 9 presets enabled */
2457	.gen4_preset_vec = 0x340,
2458	.n_fts = { 52, 80 },
2459};
2460
2461static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
2462	.version = TEGRA234_DWC_IP_VER,
2463	.mode = DW_PCIE_EP_TYPE,
2464	.has_l1ss_exit_fix = true,
2465	.has_ltr_req_fix = true,
2466	.cdm_chk_int_en_bit = BIT(18),
2467	/* Gen4 - 6, 8 and 9 presets enabled */
2468	.gen4_preset_vec = 0x340,
2469	.n_fts = { 52, 80 },
2470};
2471
2472static const struct of_device_id tegra_pcie_dw_of_match[] = {
2473	{
2474		.compatible = "nvidia,tegra194-pcie",
2475		.data = &tegra194_pcie_dw_rc_of_data,
2476	},
2477	{
2478		.compatible = "nvidia,tegra194-pcie-ep",
2479		.data = &tegra194_pcie_dw_ep_of_data,
2480	},
2481	{
2482		.compatible = "nvidia,tegra234-pcie",
2483		.data = &tegra234_pcie_dw_rc_of_data,
2484	},
2485	{
2486		.compatible = "nvidia,tegra234-pcie-ep",
2487		.data = &tegra234_pcie_dw_ep_of_data,
2488	},
2489	{}
2490};
2491
2492static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
2493	.suspend_late = tegra_pcie_dw_suspend_late,
2494	.suspend_noirq = tegra_pcie_dw_suspend_noirq,
2495	.resume_noirq = tegra_pcie_dw_resume_noirq,
2496	.resume_early = tegra_pcie_dw_resume_early,
2497};
2498
2499static struct platform_driver tegra_pcie_dw_driver = {
2500	.probe = tegra_pcie_dw_probe,
2501	.remove_new = tegra_pcie_dw_remove,
2502	.shutdown = tegra_pcie_dw_shutdown,
2503	.driver = {
2504		.name	= "tegra194-pcie",
2505		.pm = &tegra_pcie_dw_pm_ops,
2506		.of_match_table = tegra_pcie_dw_of_match,
2507	},
2508};
2509module_platform_driver(tegra_pcie_dw_driver);
2510
2511MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
2512
2513MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2514MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2515MODULE_LICENSE("GPL v2");
2516