1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * UFS Host Controller driver for Exynos specific extensions
4 *
5 * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
6 * Author: Seungwon Jeon  <essuuj@gmail.com>
7 * Author: Alim Akhtar <alim.akhtar@samsung.com>
8 *
9 */
10
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/mfd/syscon.h>
17#include <linux/phy/phy.h>
18#include <linux/platform_device.h>
19#include <linux/regmap.h>
20
21#include <ufs/ufshcd.h>
22#include "ufshcd-pltfrm.h"
23#include <ufs/ufshci.h>
24#include <ufs/unipro.h>
25
26#include "ufs-exynos.h"
27
28/*
29 * Exynos's Vendor specific registers for UFSHCI
30 */
31#define HCI_TXPRDT_ENTRY_SIZE	0x00
32#define PRDT_PREFECT_EN		BIT(31)
33#define PRDT_SET_SIZE(x)	((x) & 0x1F)
34#define HCI_RXPRDT_ENTRY_SIZE	0x04
35#define HCI_1US_TO_CNT_VAL	0x0C
36#define CNT_VAL_1US_MASK	0x3FF
37#define HCI_UTRL_NEXUS_TYPE	0x40
38#define HCI_UTMRL_NEXUS_TYPE	0x44
39#define HCI_SW_RST		0x50
40#define UFS_LINK_SW_RST		BIT(0)
41#define UFS_UNIPRO_SW_RST	BIT(1)
42#define UFS_SW_RST_MASK		(UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST)
43#define HCI_DATA_REORDER	0x60
44#define HCI_UNIPRO_APB_CLK_CTRL	0x68
45#define UNIPRO_APB_CLK(v, x)	(((v) & ~0xF) | ((x) & 0xF))
46#define HCI_AXIDMA_RWDATA_BURST_LEN	0x6C
47#define HCI_GPIO_OUT		0x70
48#define HCI_ERR_EN_PA_LAYER	0x78
49#define HCI_ERR_EN_DL_LAYER	0x7C
50#define HCI_ERR_EN_N_LAYER	0x80
51#define HCI_ERR_EN_T_LAYER	0x84
52#define HCI_ERR_EN_DME_LAYER	0x88
53#define HCI_CLKSTOP_CTRL	0xB0
54#define REFCLKOUT_STOP		BIT(4)
55#define MPHY_APBCLK_STOP	BIT(3)
56#define REFCLK_STOP		BIT(2)
57#define UNIPRO_MCLK_STOP	BIT(1)
58#define UNIPRO_PCLK_STOP	BIT(0)
59#define CLK_STOP_MASK		(REFCLKOUT_STOP | REFCLK_STOP |\
60				 UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\
61				 UNIPRO_PCLK_STOP)
62#define HCI_MISC		0xB4
63#define REFCLK_CTRL_EN		BIT(7)
64#define UNIPRO_PCLK_CTRL_EN	BIT(6)
65#define UNIPRO_MCLK_CTRL_EN	BIT(5)
66#define HCI_CORECLK_CTRL_EN	BIT(4)
67#define CLK_CTRL_EN_MASK	(REFCLK_CTRL_EN |\
68				 UNIPRO_PCLK_CTRL_EN |\
69				 UNIPRO_MCLK_CTRL_EN)
70/* Device fatal error */
71#define DFES_ERR_EN		BIT(31)
72#define DFES_DEF_L2_ERRS	(UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\
73				 UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
74#define DFES_DEF_L3_ERRS	(UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\
75				 UIC_NETWORK_BAD_DEVICEID_ENC |\
76				 UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING)
77#define DFES_DEF_L4_ERRS	(UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\
78				 UIC_TRANSPORT_UNKNOWN_CPORTID |\
79				 UIC_TRANSPORT_NO_CONNECTION_RX |\
80				 UIC_TRANSPORT_BAD_TC)
81
82/* FSYS UFS Shareability */
83#define UFS_WR_SHARABLE		BIT(2)
84#define UFS_RD_SHARABLE		BIT(1)
85#define UFS_SHARABLE		(UFS_WR_SHARABLE | UFS_RD_SHARABLE)
86#define UFS_SHAREABILITY_OFFSET	0x710
87
88/* Multi-host registers */
89#define MHCTRL			0xC4
90#define MHCTRL_EN_VH_MASK	(0xE)
91#define MHCTRL_EN_VH(vh)	(vh << 1)
92#define PH2VH_MBOX		0xD8
93
94#define MH_MSG_MASK		(0xFF)
95
96#define MH_MSG(id, msg)		((id << 8) | (msg & 0xFF))
97#define MH_MSG_PH_READY		0x1
98#define MH_MSG_VH_READY		0x2
99
100#define ALLOW_INQUIRY		BIT(25)
101#define ALLOW_MODE_SELECT	BIT(24)
102#define ALLOW_MODE_SENSE	BIT(23)
103#define ALLOW_PRE_FETCH		GENMASK(22, 21)
104#define ALLOW_READ_CMD_ALL	GENMASK(20, 18)	/* read_6/10/16 */
105#define ALLOW_READ_BUFFER	BIT(17)
106#define ALLOW_READ_CAPACITY	GENMASK(16, 15)
107#define ALLOW_REPORT_LUNS	BIT(14)
108#define ALLOW_REQUEST_SENSE	BIT(13)
109#define ALLOW_SYNCHRONIZE_CACHE	GENMASK(8, 7)
110#define ALLOW_TEST_UNIT_READY	BIT(6)
111#define ALLOW_UNMAP		BIT(5)
112#define ALLOW_VERIFY		BIT(4)
113#define ALLOW_WRITE_CMD_ALL	GENMASK(3, 1)	/* write_6/10/16 */
114
115#define ALLOW_TRANS_VH_DEFAULT	(ALLOW_INQUIRY | ALLOW_MODE_SELECT | \
116				 ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \
117				 ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \
118				 ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \
119				 ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \
120				 ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \
121				 ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL)
122
123#define HCI_MH_ALLOWABLE_TRAN_OF_VH		0x30C
124#define HCI_MH_IID_IN_TASK_TAG			0X308
125
126#define PH_READY_TIMEOUT_MS			(5 * MSEC_PER_SEC)
127
128enum {
129	UNIPRO_L1_5 = 0,/* PHY Adapter */
130	UNIPRO_L2,	/* Data Link */
131	UNIPRO_L3,	/* Network */
132	UNIPRO_L4,	/* Transport */
133	UNIPRO_DME,	/* DME */
134};
135
136/*
137 * UNIPRO registers
138 */
139#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0	0x78B8
140#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1	0x78BC
141#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2	0x78C0
142
143/*
144 * UFS Protector registers
145 */
146#define UFSPRSECURITY	0x010
147#define NSSMU		BIT(14)
148#define UFSPSBEGIN0	0x200
149#define UFSPSEND0	0x204
150#define UFSPSLUN0	0x208
151#define UFSPSCTRL0	0x20C
152
153#define CNTR_DIV_VAL 40
154
155static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
156static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
157
158static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs)
159{
160	exynos_ufs_auto_ctrl_hcc(ufs, true);
161}
162
163static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs)
164{
165	exynos_ufs_auto_ctrl_hcc(ufs, false);
166}
167
168static inline void exynos_ufs_disable_auto_ctrl_hcc_save(
169					struct exynos_ufs *ufs, u32 *val)
170{
171	*val = hci_readl(ufs, HCI_MISC);
172	exynos_ufs_auto_ctrl_hcc(ufs, false);
173}
174
175static inline void exynos_ufs_auto_ctrl_hcc_restore(
176					struct exynos_ufs *ufs, u32 *val)
177{
178	hci_writel(ufs, *val, HCI_MISC);
179}
180
181static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs)
182{
183	exynos_ufs_ctrl_clkstop(ufs, true);
184}
185
186static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs)
187{
188	exynos_ufs_ctrl_clkstop(ufs, false);
189}
190
191static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
192{
193	return 0;
194}
195
196static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
197{
198	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
199
200	/* IO Coherency setting */
201	if (ufs->sysreg) {
202		return regmap_update_bits(ufs->sysreg,
203					  ufs->shareability_reg_offset,
204					  UFS_SHARABLE, UFS_SHARABLE);
205	}
206
207	attr->tx_dif_p_nsec = 3200000;
208
209	return 0;
210}
211
212static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
213{
214	struct ufs_hba *hba = ufs->hba;
215
216	/* Enable Virtual Host #1 */
217	ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL);
218	/* Default VH Transfer permissions */
219	hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH);
220	/* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */
221	hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG);
222
223	return 0;
224}
225
226static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs)
227{
228	struct ufs_hba *hba = ufs->hba;
229	int i;
230	u32 tx_line_reset_period, rx_line_reset_period;
231
232	rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
233	tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
234
235	ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
236	for_each_ufs_rx_lane(ufs, i) {
237		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i),
238			       DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
239		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0);
240
241		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i),
242			       (rx_line_reset_period >> 16) & 0xFF);
243		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i),
244			       (rx_line_reset_period >> 8) & 0xFF);
245		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i),
246			       (rx_line_reset_period) & 0xFF);
247
248		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79);
249		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1);
250		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6);
251	}
252
253	for_each_ufs_tx_lane(ufs, i) {
254		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i),
255			       DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
256		/* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */
257		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i),
258			       0x02);
259
260		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i),
261			       (tx_line_reset_period >> 16) & 0xFF);
262		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i),
263			       (tx_line_reset_period >> 8) & 0xFF);
264		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i),
265			       (tx_line_reset_period) & 0xFF);
266
267		/* TX PWM Gear Capability / PWM_G1_ONLY */
268		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1);
269	}
270
271	ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
272
273	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
274
275	ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000);
276
277	return 0;
278}
279
280static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
281					 struct ufs_pa_layer_attr *pwr)
282{
283	struct ufs_hba *hba = ufs->hba;
284
285	/* PACP_PWR_req and delivered to the remote DME */
286	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
287	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
288	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
289
290	return 0;
291}
292
293static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
294					  struct ufs_pa_layer_attr *pwr)
295{
296	struct ufs_hba *hba = ufs->hba;
297	u32 enabled_vh;
298
299	enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK;
300
301	/* Send physical host ready message to virtual hosts */
302	ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX);
303
304	return 0;
305}
306
307static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
308{
309	struct ufs_hba *hba = ufs->hba;
310	u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite;
311	int i;
312
313	exynos_ufs_enable_ov_tm(hba);
314	for_each_ufs_tx_lane(ufs, i)
315		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17);
316	for_each_ufs_rx_lane(ufs, i) {
317		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff);
318		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00);
319	}
320	exynos_ufs_disable_ov_tm(hba);
321
322	for_each_ufs_tx_lane(ufs, i)
323		ufshcd_dme_set(hba,
324			UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0);
325	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1);
326	udelay(1);
327	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12));
328	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1);
329	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1);
330	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1);
331	udelay(1600);
332	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val);
333
334	return 0;
335}
336
337static int exynos7_ufs_post_link(struct exynos_ufs *ufs)
338{
339	struct ufs_hba *hba = ufs->hba;
340	int i;
341
342	exynos_ufs_enable_ov_tm(hba);
343	for_each_ufs_tx_lane(ufs, i) {
344		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83);
345		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07);
346		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i),
347			TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000)));
348	}
349	exynos_ufs_disable_ov_tm(hba);
350
351	exynos_ufs_enable_dbg_mode(hba);
352	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8);
353	exynos_ufs_disable_dbg_mode(hba);
354
355	return 0;
356}
357
358static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
359						struct ufs_pa_layer_attr *pwr)
360{
361	unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE);
362
363	return 0;
364}
365
366static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
367						struct ufs_pa_layer_attr *pwr)
368{
369	struct ufs_hba *hba = ufs->hba;
370	int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
371
372	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1);
373
374	if (lanes == 1) {
375		exynos_ufs_enable_dbg_mode(hba);
376		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1);
377		exynos_ufs_disable_dbg_mode(hba);
378	}
379
380	return 0;
381}
382
383/*
384 * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w
385 * Control should be disabled in the below cases
386 * - Before host controller S/W reset
387 * - Access to UFS protector's register
388 */
389static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en)
390{
391	u32 misc = hci_readl(ufs, HCI_MISC);
392
393	if (en)
394		hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC);
395	else
396		hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC);
397}
398
399static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en)
400{
401	u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL);
402	u32 misc = hci_readl(ufs, HCI_MISC);
403
404	if (en) {
405		hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC);
406		hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
407	} else {
408		hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
409		hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC);
410	}
411}
412
413static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
414{
415	struct ufs_hba *hba = ufs->hba;
416	struct list_head *head = &hba->clk_list_head;
417	struct ufs_clk_info *clki;
418	unsigned long pclk_rate;
419	u32 f_min, f_max;
420	u8 div = 0;
421	int ret = 0;
422
423	if (list_empty(head))
424		goto out;
425
426	list_for_each_entry(clki, head, list) {
427		if (!IS_ERR(clki->clk)) {
428			if (!strcmp(clki->name, "core_clk"))
429				ufs->clk_hci_core = clki->clk;
430			else if (!strcmp(clki->name, "sclk_unipro_main"))
431				ufs->clk_unipro_main = clki->clk;
432		}
433	}
434
435	if (!ufs->clk_hci_core || !ufs->clk_unipro_main) {
436		dev_err(hba->dev, "failed to get clk info\n");
437		ret = -EINVAL;
438		goto out;
439	}
440
441	ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main);
442	pclk_rate = clk_get_rate(ufs->clk_hci_core);
443	f_min = ufs->pclk_avail_min;
444	f_max = ufs->pclk_avail_max;
445
446	if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
447		do {
448			pclk_rate /= (div + 1);
449
450			if (pclk_rate <= f_max)
451				break;
452			div++;
453		} while (pclk_rate >= f_min);
454	}
455
456	if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
457		dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
458		ret = -EINVAL;
459		goto out;
460	}
461
462	ufs->pclk_rate = pclk_rate;
463	ufs->pclk_div = div;
464
465out:
466	return ret;
467}
468
469static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs)
470{
471	if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
472		u32 val;
473
474		val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL);
475		hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div),
476			   HCI_UNIPRO_APB_CLK_CTRL);
477	}
478}
479
480static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs)
481{
482	struct ufs_hba *hba = ufs->hba;
483	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
484
485	ufshcd_dme_set(hba,
486		UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl);
487}
488
489static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs)
490{
491	struct ufs_hba *hba = ufs->hba;
492	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
493	const unsigned int div = 30, mult = 20;
494	const unsigned long pwm_min = 3 * 1000 * 1000;
495	const unsigned long pwm_max = 9 * 1000 * 1000;
496	const int divs[] = {32, 16, 8, 4};
497	unsigned long clk = 0, _clk, clk_period;
498	int i = 0, clk_idx = -1;
499
500	clk_period = UNIPRO_PCLK_PERIOD(ufs);
501	for (i = 0; i < ARRAY_SIZE(divs); i++) {
502		_clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div);
503		if (_clk >= pwm_min && _clk <= pwm_max) {
504			if (_clk > clk) {
505				clk_idx = i;
506				clk = _clk;
507			}
508		}
509	}
510
511	if (clk_idx == -1) {
512		ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx);
513		dev_err(hba->dev,
514			"failed to decide pwm clock divider, will not change\n");
515	}
516
517	attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK;
518}
519
520long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period)
521{
522	const int precise = 10;
523	long pclk_rate = ufs->pclk_rate;
524	long clk_period, fraction;
525
526	clk_period = UNIPRO_PCLK_PERIOD(ufs);
527	fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate;
528
529	return (period * precise) / ((clk_period * precise) + fraction);
530}
531
532static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs)
533{
534	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
535	struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
536
537	t_cfg->tx_linereset_p =
538		exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec);
539	t_cfg->tx_linereset_n =
540		exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec);
541	t_cfg->tx_high_z_cnt =
542		exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec);
543	t_cfg->tx_base_n_val =
544		exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec);
545	t_cfg->tx_gran_n_val =
546		exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec);
547	t_cfg->tx_sleep_cnt =
548		exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt);
549
550	t_cfg->rx_linereset =
551		exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec);
552	t_cfg->rx_hibern8_wait =
553		exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec);
554	t_cfg->rx_base_n_val =
555		exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec);
556	t_cfg->rx_gran_n_val =
557		exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec);
558	t_cfg->rx_sleep_cnt =
559		exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt);
560	t_cfg->rx_stall_cnt =
561		exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt);
562}
563
564static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs)
565{
566	struct ufs_hba *hba = ufs->hba;
567	struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
568	int i;
569
570	exynos_ufs_set_pwm_clk_div(ufs);
571
572	exynos_ufs_enable_ov_tm(hba);
573
574	for_each_ufs_rx_lane(ufs, i) {
575		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i),
576				ufs->drv_data->uic_attr->rx_filler_enable);
577		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i),
578				RX_LINERESET(t_cfg->rx_linereset));
579		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i),
580				RX_BASE_NVAL_L(t_cfg->rx_base_n_val));
581		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i),
582				RX_BASE_NVAL_H(t_cfg->rx_base_n_val));
583		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i),
584				RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val));
585		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i),
586				RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val));
587		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i),
588				RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt));
589		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i),
590				RX_OV_STALL_CNT(t_cfg->rx_stall_cnt));
591	}
592
593	for_each_ufs_tx_lane(ufs, i) {
594		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i),
595				TX_LINERESET_P(t_cfg->tx_linereset_p));
596		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i),
597				TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt));
598		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i),
599				TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt));
600		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i),
601				TX_BASE_NVAL_L(t_cfg->tx_base_n_val));
602		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i),
603				TX_BASE_NVAL_H(t_cfg->tx_base_n_val));
604		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i),
605				TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val));
606		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i),
607				TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val));
608		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i),
609				TX_OV_H8_ENTER_EN |
610				TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt));
611		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i),
612				ufs->drv_data->uic_attr->tx_min_activatetime);
613	}
614
615	exynos_ufs_disable_ov_tm(hba);
616}
617
618static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
619{
620	struct ufs_hba *hba = ufs->hba;
621	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
622	int i;
623
624	exynos_ufs_enable_ov_tm(hba);
625
626	for_each_ufs_rx_lane(ufs, i) {
627		ufshcd_dme_set(hba,
628				UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i),
629				attr->rx_hs_g1_sync_len_cap);
630		ufshcd_dme_set(hba,
631				UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i),
632				attr->rx_hs_g2_sync_len_cap);
633		ufshcd_dme_set(hba,
634				UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i),
635				attr->rx_hs_g3_sync_len_cap);
636		ufshcd_dme_set(hba,
637				UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i),
638				attr->rx_hs_g1_prep_sync_len_cap);
639		ufshcd_dme_set(hba,
640				UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i),
641				attr->rx_hs_g2_prep_sync_len_cap);
642		ufshcd_dme_set(hba,
643				UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i),
644				attr->rx_hs_g3_prep_sync_len_cap);
645	}
646
647	if (attr->rx_adv_fine_gran_sup_en == 0) {
648		for_each_ufs_rx_lane(ufs, i) {
649			ufshcd_dme_set(hba,
650				UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0);
651
652			if (attr->rx_min_actv_time_cap)
653				ufshcd_dme_set(hba,
654					UIC_ARG_MIB_SEL(
655					RX_MIN_ACTIVATETIME_CAPABILITY, i),
656					attr->rx_min_actv_time_cap);
657
658			if (attr->rx_hibern8_time_cap)
659				ufshcd_dme_set(hba,
660					UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i),
661						attr->rx_hibern8_time_cap);
662		}
663	} else if (attr->rx_adv_fine_gran_sup_en == 1) {
664		for_each_ufs_rx_lane(ufs, i) {
665			if (attr->rx_adv_fine_gran_step)
666				ufshcd_dme_set(hba,
667					UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP,
668						i), RX_ADV_FINE_GRAN_STEP(
669						attr->rx_adv_fine_gran_step));
670
671			if (attr->rx_adv_min_actv_time_cap)
672				ufshcd_dme_set(hba,
673					UIC_ARG_MIB_SEL(
674						RX_ADV_MIN_ACTIVATETIME_CAP, i),
675						attr->rx_adv_min_actv_time_cap);
676
677			if (attr->rx_adv_hibern8_time_cap)
678				ufshcd_dme_set(hba,
679					UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP,
680						i),
681						attr->rx_adv_hibern8_time_cap);
682		}
683	}
684
685	exynos_ufs_disable_ov_tm(hba);
686}
687
688static void exynos_ufs_establish_connt(struct exynos_ufs *ufs)
689{
690	struct ufs_hba *hba = ufs->hba;
691	enum {
692		DEV_ID		= 0x00,
693		PEER_DEV_ID	= 0x01,
694		PEER_CPORT_ID	= 0x00,
695		TRAFFIC_CLASS	= 0x00,
696	};
697
698	/* allow cport attributes to be set */
699	ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE);
700
701	/* local unipro attributes */
702	ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID);
703	ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), true);
704	ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID);
705	ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID);
706	ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS);
707	ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS);
708	ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED);
709}
710
711static void exynos_ufs_config_smu(struct exynos_ufs *ufs)
712{
713	u32 reg, val;
714
715	exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
716
717	/* make encryption disabled by default */
718	reg = ufsp_readl(ufs, UFSPRSECURITY);
719	ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY);
720	ufsp_writel(ufs, 0x0, UFSPSBEGIN0);
721	ufsp_writel(ufs, 0xffffffff, UFSPSEND0);
722	ufsp_writel(ufs, 0xff, UFSPSLUN0);
723	ufsp_writel(ufs, 0xf1, UFSPSCTRL0);
724
725	exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
726}
727
728static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
729					struct ufs_pa_layer_attr *pwr)
730{
731	struct ufs_hba *hba = ufs->hba;
732	u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx);
733	u32 mask, sync_len;
734	enum {
735		SYNC_LEN_G1 = 80 * 1000, /* 80us */
736		SYNC_LEN_G2 = 40 * 1000, /* 44us */
737		SYNC_LEN_G3 = 20 * 1000, /* 20us */
738	};
739	int i;
740
741	if (g == 1)
742		sync_len = SYNC_LEN_G1;
743	else if (g == 2)
744		sync_len = SYNC_LEN_G2;
745	else if (g == 3)
746		sync_len = SYNC_LEN_G3;
747	else
748		return;
749
750	mask = exynos_ufs_calc_time_cntr(ufs, sync_len);
751	mask = (mask >> 8) & 0xff;
752
753	exynos_ufs_enable_ov_tm(hba);
754
755	for_each_ufs_rx_lane(ufs, i)
756		ufshcd_dme_set(hba,
757			UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask);
758
759	exynos_ufs_disable_ov_tm(hba);
760}
761
762static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
763				struct ufs_pa_layer_attr *dev_max_params,
764				struct ufs_pa_layer_attr *dev_req_params)
765{
766	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
767	struct phy *generic_phy = ufs->phy;
768	struct ufs_host_params host_params;
769	int ret;
770
771	if (!dev_req_params) {
772		pr_err("%s: incoming dev_req_params is NULL\n", __func__);
773		ret = -EINVAL;
774		goto out;
775	}
776
777	ufshcd_init_host_params(&host_params);
778
779	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
780	if (ret) {
781		pr_err("%s: failed to determine capabilities\n", __func__);
782		goto out;
783	}
784
785	if (ufs->drv_data->pre_pwr_change)
786		ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
787
788	if (ufshcd_is_hs_mode(dev_req_params)) {
789		exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params);
790
791		switch (dev_req_params->hs_rate) {
792		case PA_HS_MODE_A:
793		case PA_HS_MODE_B:
794			phy_calibrate(generic_phy);
795			break;
796		}
797	}
798
799	/* setting for three timeout values for traffic class #0 */
800	ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
801	ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
802	ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
803
804	return 0;
805out:
806	return ret;
807}
808
809#define PWR_MODE_STR_LEN	64
810static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
811				struct ufs_pa_layer_attr *pwr_req)
812{
813	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
814	struct phy *generic_phy = ufs->phy;
815	int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx);
816	int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx);
817	char pwr_str[PWR_MODE_STR_LEN] = "";
818
819	/* let default be PWM Gear 1, Lane 1 */
820	if (!gear)
821		gear = 1;
822
823	if (!lanes)
824		lanes = 1;
825
826	if (ufs->drv_data->post_pwr_change)
827		ufs->drv_data->post_pwr_change(ufs, pwr_req);
828
829	if ((ufshcd_is_hs_mode(pwr_req))) {
830		switch (pwr_req->hs_rate) {
831		case PA_HS_MODE_A:
832		case PA_HS_MODE_B:
833			phy_calibrate(generic_phy);
834			break;
835		}
836
837		snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d",
838			"FAST",	pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B",
839			gear, lanes);
840	} else {
841		snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d",
842			"SLOW", gear, lanes);
843	}
844
845	dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str);
846
847	return 0;
848}
849
850static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
851						int tag, bool is_scsi_cmd)
852{
853	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
854	u32 type;
855
856	type =  hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
857
858	if (is_scsi_cmd)
859		hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
860	else
861		hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);
862}
863
864static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba,
865						int tag, u8 func)
866{
867	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
868	u32 type;
869
870	type =  hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE);
871
872	switch (func) {
873	case UFS_ABORT_TASK:
874	case UFS_QUERY_TASK:
875		hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE);
876		break;
877	case UFS_ABORT_TASK_SET:
878	case UFS_CLEAR_TASK_SET:
879	case UFS_LOGICAL_RESET:
880	case UFS_QUERY_TASK_SET:
881		hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE);
882		break;
883	}
884}
885
886static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
887{
888	struct ufs_hba *hba = ufs->hba;
889	struct phy *generic_phy = ufs->phy;
890	int ret = 0;
891
892	if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) {
893		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
894			&ufs->avail_ln_rx);
895		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
896			&ufs->avail_ln_tx);
897		WARN(ufs->avail_ln_rx != ufs->avail_ln_tx,
898			"available data lane is not equal(rx:%d, tx:%d)\n",
899			ufs->avail_ln_rx, ufs->avail_ln_tx);
900	}
901
902	phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
903	ret = phy_init(generic_phy);
904	if (ret) {
905		dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
906			__func__, ret);
907		return ret;
908	}
909
910	ret = phy_power_on(generic_phy);
911	if (ret)
912		goto out_exit_phy;
913
914	return 0;
915
916out_exit_phy:
917	phy_exit(generic_phy);
918
919	return ret;
920}
921
922static void exynos_ufs_config_unipro(struct exynos_ufs *ufs)
923{
924	struct ufs_hba *hba = ufs->hba;
925
926	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
927		DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
928	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS),
929			ufs->drv_data->uic_attr->tx_trailingclks);
930	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE),
931			ufs->drv_data->uic_attr->pa_dbg_option_suite);
932}
933
934static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
935{
936	switch (index) {
937	case UNIPRO_L1_5:
938		hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER);
939		break;
940	case UNIPRO_L2:
941		hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER);
942		break;
943	case UNIPRO_L3:
944		hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER);
945		break;
946	case UNIPRO_L4:
947		hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER);
948		break;
949	case UNIPRO_DME:
950		hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER);
951		break;
952	}
953}
954
955static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on,
956				   enum ufs_notify_change_status status)
957{
958	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
959
960	if (!ufs)
961		return 0;
962
963	if (on && status == PRE_CHANGE) {
964		if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
965			exynos_ufs_disable_auto_ctrl_hcc(ufs);
966		exynos_ufs_ungate_clks(ufs);
967	} else if (!on && status == POST_CHANGE) {
968		exynos_ufs_gate_clks(ufs);
969		if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
970			exynos_ufs_enable_auto_ctrl_hcc(ufs);
971	}
972
973	return 0;
974}
975
976static int exynos_ufs_pre_link(struct ufs_hba *hba)
977{
978	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
979
980	/* hci */
981	exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2);
982	exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3);
983	exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
984	exynos_ufs_set_unipro_pclk_div(ufs);
985
986	/* unipro */
987	exynos_ufs_config_unipro(ufs);
988
989	/* m-phy */
990	exynos_ufs_phy_init(ufs);
991	if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
992		exynos_ufs_config_phy_time_attr(ufs);
993		exynos_ufs_config_phy_cap_attr(ufs);
994	}
995
996	exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
997
998	if (ufs->drv_data->pre_link)
999		ufs->drv_data->pre_link(ufs);
1000
1001	return 0;
1002}
1003
1004static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs)
1005{
1006	u32 val;
1007
1008	val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL);
1009	hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL);
1010}
1011
1012static int exynos_ufs_post_link(struct ufs_hba *hba)
1013{
1014	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1015	struct phy *generic_phy = ufs->phy;
1016	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
1017
1018	exynos_ufs_establish_connt(ufs);
1019	exynos_ufs_fit_aggr_timeout(ufs);
1020
1021	hci_writel(ufs, 0xa, HCI_DATA_REORDER);
1022	hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE);
1023	hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
1024	hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
1025	hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
1026	hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
1027
1028	if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
1029		ufshcd_dme_set(hba,
1030			UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), true);
1031
1032	if (attr->pa_granularity) {
1033		exynos_ufs_enable_dbg_mode(hba);
1034		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY),
1035				attr->pa_granularity);
1036		exynos_ufs_disable_dbg_mode(hba);
1037
1038		if (attr->pa_tactivate)
1039			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
1040					attr->pa_tactivate);
1041		if (attr->pa_hibern8time &&
1042		    !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER))
1043			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
1044					attr->pa_hibern8time);
1045	}
1046
1047	if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
1048		if (!attr->pa_granularity)
1049			ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
1050					&attr->pa_granularity);
1051		if (!attr->pa_hibern8time)
1052			ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
1053					&attr->pa_hibern8time);
1054		/*
1055		 * not wait for HIBERN8 time to exit hibernation
1056		 */
1057		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0);
1058
1059		if (attr->pa_granularity < 1 || attr->pa_granularity > 6) {
1060			/* Valid range for granularity: 1 ~ 6 */
1061			dev_warn(hba->dev,
1062				"%s: pa_granularity %d is invalid, assuming backwards compatibility\n",
1063				__func__,
1064				attr->pa_granularity);
1065			attr->pa_granularity = 6;
1066		}
1067	}
1068
1069	phy_calibrate(generic_phy);
1070
1071	if (ufs->drv_data->post_link)
1072		ufs->drv_data->post_link(ufs);
1073
1074	return 0;
1075}
1076
1077static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
1078{
1079	struct device_node *np = dev->of_node;
1080	struct exynos_ufs_uic_attr *attr;
1081	int ret = 0;
1082
1083	ufs->drv_data = device_get_match_data(dev);
1084
1085	if (ufs->drv_data && ufs->drv_data->uic_attr) {
1086		attr = ufs->drv_data->uic_attr;
1087	} else {
1088		dev_err(dev, "failed to get uic attributes\n");
1089		ret = -EINVAL;
1090		goto out;
1091	}
1092
1093	ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
1094	if (IS_ERR(ufs->sysreg))
1095		ufs->sysreg = NULL;
1096	else {
1097		if (of_property_read_u32_index(np, "samsung,sysreg", 1,
1098					       &ufs->shareability_reg_offset)) {
1099			dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
1100			ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
1101		}
1102	}
1103
1104	ufs->pclk_avail_min = PCLK_AVAIL_MIN;
1105	ufs->pclk_avail_max = PCLK_AVAIL_MAX;
1106
1107	attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN;
1108	attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL;
1109	attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP;
1110	attr->pa_granularity = PA_GRANULARITY_VAL;
1111	attr->pa_tactivate = PA_TACTIVATE_VAL;
1112	attr->pa_hibern8time = PA_HIBERN8TIME_VAL;
1113
1114out:
1115	return ret;
1116}
1117
1118static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
1119					struct exynos_ufs *ufs)
1120{
1121	ufs->hba = hba;
1122	ufs->opts = ufs->drv_data->opts;
1123	ufs->rx_sel_idx = PA_MAXDATALANES;
1124	if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
1125		ufs->rx_sel_idx = 0;
1126	hba->priv = (void *)ufs;
1127	hba->quirks = ufs->drv_data->quirks;
1128}
1129
1130static int exynos_ufs_init(struct ufs_hba *hba)
1131{
1132	struct device *dev = hba->dev;
1133	struct platform_device *pdev = to_platform_device(dev);
1134	struct exynos_ufs *ufs;
1135	int ret;
1136
1137	ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
1138	if (!ufs)
1139		return -ENOMEM;
1140
1141	/* exynos-specific hci */
1142	ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
1143	if (IS_ERR(ufs->reg_hci)) {
1144		dev_err(dev, "cannot ioremap for hci vendor register\n");
1145		return PTR_ERR(ufs->reg_hci);
1146	}
1147
1148	/* unipro */
1149	ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
1150	if (IS_ERR(ufs->reg_unipro)) {
1151		dev_err(dev, "cannot ioremap for unipro register\n");
1152		return PTR_ERR(ufs->reg_unipro);
1153	}
1154
1155	/* ufs protector */
1156	ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
1157	if (IS_ERR(ufs->reg_ufsp)) {
1158		dev_err(dev, "cannot ioremap for ufs protector register\n");
1159		return PTR_ERR(ufs->reg_ufsp);
1160	}
1161
1162	ret = exynos_ufs_parse_dt(dev, ufs);
1163	if (ret) {
1164		dev_err(dev, "failed to get dt info.\n");
1165		goto out;
1166	}
1167
1168	ufs->phy = devm_phy_get(dev, "ufs-phy");
1169	if (IS_ERR(ufs->phy)) {
1170		ret = PTR_ERR(ufs->phy);
1171		dev_err(dev, "failed to get ufs-phy\n");
1172		goto out;
1173	}
1174
1175	exynos_ufs_priv_init(hba, ufs);
1176
1177	if (ufs->drv_data->drv_init) {
1178		ret = ufs->drv_data->drv_init(dev, ufs);
1179		if (ret) {
1180			dev_err(dev, "failed to init drv-data\n");
1181			goto out;
1182		}
1183	}
1184
1185	ret = exynos_ufs_get_clk_info(ufs);
1186	if (ret)
1187		goto out;
1188	exynos_ufs_specify_phy_time_attr(ufs);
1189	exynos_ufs_config_smu(ufs);
1190	return 0;
1191
1192out:
1193	hba->priv = NULL;
1194	return ret;
1195}
1196
1197static int exynos_ufs_host_reset(struct ufs_hba *hba)
1198{
1199	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1200	unsigned long timeout = jiffies + msecs_to_jiffies(1);
1201	u32 val;
1202	int ret = 0;
1203
1204	exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
1205
1206	hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST);
1207
1208	do {
1209		if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK))
1210			goto out;
1211	} while (time_before(jiffies, timeout));
1212
1213	dev_err(hba->dev, "timeout host sw-reset\n");
1214	ret = -ETIMEDOUT;
1215
1216out:
1217	exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
1218	return ret;
1219}
1220
1221static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
1222{
1223	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1224
1225	hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
1226	udelay(5);
1227	hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
1228}
1229
1230static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
1231{
1232	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1233	struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
1234
1235	if (!enter) {
1236		if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
1237			exynos_ufs_disable_auto_ctrl_hcc(ufs);
1238		exynos_ufs_ungate_clks(ufs);
1239
1240		if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
1241			static const unsigned int granularity_tbl[] = {
1242				1, 4, 8, 16, 32, 100
1243			};
1244			int h8_time = attr->pa_hibern8time *
1245				granularity_tbl[attr->pa_granularity - 1];
1246			unsigned long us;
1247			s64 delta;
1248
1249			do {
1250				delta = h8_time - ktime_us_delta(ktime_get(),
1251							ufs->entry_hibern8_t);
1252				if (delta <= 0)
1253					break;
1254
1255				us = min_t(s64, delta, USEC_PER_MSEC);
1256				if (us >= 10)
1257					usleep_range(us, us + 10);
1258			} while (1);
1259		}
1260	}
1261}
1262
1263static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
1264{
1265	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1266
1267	if (!enter) {
1268		u32 cur_mode = 0;
1269		u32 pwrmode;
1270
1271		if (ufshcd_is_hs_mode(&ufs->dev_req_params))
1272			pwrmode = FAST_MODE;
1273		else
1274			pwrmode = SLOW_MODE;
1275
1276		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode);
1277		if (cur_mode != (pwrmode << 4 | pwrmode)) {
1278			dev_warn(hba->dev, "%s: power mode change\n", __func__);
1279			hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf;
1280			hba->pwr_info.pwr_tx = cur_mode & 0xf;
1281			ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
1282		}
1283
1284		if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB))
1285			exynos_ufs_establish_connt(ufs);
1286	} else {
1287		ufs->entry_hibern8_t = ktime_get();
1288		exynos_ufs_gate_clks(ufs);
1289		if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
1290			exynos_ufs_enable_auto_ctrl_hcc(ufs);
1291	}
1292}
1293
1294static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
1295					enum ufs_notify_change_status status)
1296{
1297	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1298	int ret = 0;
1299
1300	switch (status) {
1301	case PRE_CHANGE:
1302		/*
1303		 * The maximum segment size must be set after scsi_host_alloc()
1304		 * has been called and before LUN scanning starts
1305		 * (ufshcd_async_scan()). Note: this callback may also be called
1306		 * from other functions than ufshcd_init().
1307		 */
1308		hba->host->max_segment_size = SZ_4K;
1309
1310		if (ufs->drv_data->pre_hce_enable) {
1311			ret = ufs->drv_data->pre_hce_enable(ufs);
1312			if (ret)
1313				return ret;
1314		}
1315
1316		ret = exynos_ufs_host_reset(hba);
1317		if (ret)
1318			return ret;
1319		exynos_ufs_dev_hw_reset(hba);
1320		break;
1321	case POST_CHANGE:
1322		exynos_ufs_calc_pwm_clk_div(ufs);
1323		if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
1324			exynos_ufs_enable_auto_ctrl_hcc(ufs);
1325
1326		if (ufs->drv_data->post_hce_enable)
1327			ret = ufs->drv_data->post_hce_enable(ufs);
1328
1329		break;
1330	}
1331
1332	return ret;
1333}
1334
1335static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
1336					  enum ufs_notify_change_status status)
1337{
1338	int ret = 0;
1339
1340	switch (status) {
1341	case PRE_CHANGE:
1342		ret = exynos_ufs_pre_link(hba);
1343		break;
1344	case POST_CHANGE:
1345		ret = exynos_ufs_post_link(hba);
1346		break;
1347	}
1348
1349	return ret;
1350}
1351
1352static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
1353				enum ufs_notify_change_status status,
1354				struct ufs_pa_layer_attr *dev_max_params,
1355				struct ufs_pa_layer_attr *dev_req_params)
1356{
1357	int ret = 0;
1358
1359	switch (status) {
1360	case PRE_CHANGE:
1361		ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
1362					      dev_req_params);
1363		break;
1364	case POST_CHANGE:
1365		ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
1366		break;
1367	}
1368
1369	return ret;
1370}
1371
1372static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
1373				     enum uic_cmd_dme enter,
1374				     enum ufs_notify_change_status notify)
1375{
1376	switch ((u8)notify) {
1377	case PRE_CHANGE:
1378		exynos_ufs_pre_hibern8(hba, enter);
1379		break;
1380	case POST_CHANGE:
1381		exynos_ufs_post_hibern8(hba, enter);
1382		break;
1383	}
1384}
1385
1386static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1387	enum ufs_notify_change_status status)
1388{
1389	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1390
1391	if (status == PRE_CHANGE)
1392		return 0;
1393
1394	if (!ufshcd_is_link_active(hba))
1395		phy_power_off(ufs->phy);
1396
1397	return 0;
1398}
1399
1400static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1401{
1402	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1403
1404	if (!ufshcd_is_link_active(hba))
1405		phy_power_on(ufs->phy);
1406
1407	exynos_ufs_config_smu(ufs);
1408
1409	return 0;
1410}
1411
1412static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba,
1413						 enum ufs_notify_change_status status)
1414{
1415	if (status == POST_CHANGE) {
1416		ufshcd_set_link_active(hba);
1417		ufshcd_set_ufs_dev_active(hba);
1418	}
1419
1420	return 0;
1421}
1422
1423static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba)
1424{
1425	u32 mbox;
1426	ktime_t start, stop;
1427
1428	start = ktime_get();
1429	stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS));
1430
1431	do {
1432		mbox = ufshcd_readl(hba, PH2VH_MBOX);
1433		/* TODO: Mailbox message protocols between the PH and VHs are
1434		 * not implemented yet. This will be supported later
1435		 */
1436		if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY)
1437			return 0;
1438
1439		usleep_range(40, 50);
1440	} while (ktime_before(ktime_get(), stop));
1441
1442	return -ETIME;
1443}
1444
1445static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
1446{
1447	struct device *dev = hba->dev;
1448	struct platform_device *pdev = to_platform_device(dev);
1449	struct exynos_ufs *ufs;
1450	int ret;
1451
1452	ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
1453	if (!ufs)
1454		return -ENOMEM;
1455
1456	/* exynos-specific hci */
1457	ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
1458	if (IS_ERR(ufs->reg_hci)) {
1459		dev_err(dev, "cannot ioremap for hci vendor register\n");
1460		return PTR_ERR(ufs->reg_hci);
1461	}
1462
1463	ret = exynosauto_ufs_vh_wait_ph_ready(hba);
1464	if (ret)
1465		return ret;
1466
1467	ufs->drv_data = device_get_match_data(dev);
1468	if (!ufs->drv_data)
1469		return -ENODEV;
1470
1471	exynos_ufs_priv_init(hba, ufs);
1472
1473	return 0;
1474}
1475
1476static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
1477{
1478	int i;
1479	struct ufs_hba *hba = ufs->hba;
1480
1481	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
1482		       DIV_ROUND_UP(NSEC_PER_SEC,  ufs->mclk_rate));
1483	ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12);
1484	ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
1485
1486	for_each_ufs_tx_lane(ufs, i) {
1487		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i),
1488			       DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
1489		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F);
1490	}
1491
1492	for_each_ufs_rx_lane(ufs, i) {
1493		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i),
1494			       DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
1495		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38);
1496		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0);
1497		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1);
1498		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1);
1499		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0);
1500		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0);
1501	}
1502
1503	ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
1504	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20);
1505	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183);
1506	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
1507
1508	exynos_ufs_establish_connt(ufs);
1509
1510	return 0;
1511}
1512
1513static void exynos_ufs_config_scsi_dev(struct scsi_device *sdev)
1514{
1515	blk_queue_update_dma_alignment(sdev->request_queue, SZ_4K - 1);
1516}
1517
1518static int fsd_ufs_post_link(struct exynos_ufs *ufs)
1519{
1520	int i;
1521	struct ufs_hba *hba = ufs->hba;
1522	u32 hw_cap_min_tactivate;
1523	u32 peer_rx_min_actv_time_cap;
1524	u32 max_rx_hibern8_time_cap;
1525
1526	ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4),
1527			&hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */
1528	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
1529			&peer_rx_min_actv_time_cap);    /* PA_TActivate */
1530	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
1531			&max_rx_hibern8_time_cap);      /* PA_Hibern8Time */
1532
1533	if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate)
1534		ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
1535					peer_rx_min_actv_time_cap + 1);
1536	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1);
1537
1538	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01);
1539	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA);
1540	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00);
1541
1542	ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
1543
1544	for_each_ufs_rx_lane(ufs, i) {
1545		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05);
1546		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01);
1547		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02);
1548		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC);
1549	}
1550
1551	ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
1552
1553	return 0;
1554}
1555
1556static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
1557					struct ufs_pa_layer_attr *pwr)
1558{
1559	struct ufs_hba *hba = ufs->hba;
1560
1561	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1);
1562	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1);
1563	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
1564	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
1565	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
1566
1567	unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0);
1568	unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1);
1569	unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2);
1570
1571	return 0;
1572}
1573
1574static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
1575	.name				= "exynos_ufs",
1576	.init				= exynos_ufs_init,
1577	.hce_enable_notify		= exynos_ufs_hce_enable_notify,
1578	.link_startup_notify		= exynos_ufs_link_startup_notify,
1579	.pwr_change_notify		= exynos_ufs_pwr_change_notify,
1580	.setup_clocks			= exynos_ufs_setup_clocks,
1581	.setup_xfer_req			= exynos_ufs_specify_nexus_t_xfer_req,
1582	.setup_task_mgmt		= exynos_ufs_specify_nexus_t_tm_req,
1583	.hibern8_notify			= exynos_ufs_hibern8_notify,
1584	.suspend			= exynos_ufs_suspend,
1585	.resume				= exynos_ufs_resume,
1586	.config_scsi_dev		= exynos_ufs_config_scsi_dev,
1587};
1588
1589static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
1590	.name				= "exynosauto_ufs_vh",
1591	.init				= exynosauto_ufs_vh_init,
1592	.link_startup_notify		= exynosauto_ufs_vh_link_startup_notify,
1593};
1594
1595static int exynos_ufs_probe(struct platform_device *pdev)
1596{
1597	int err;
1598	struct device *dev = &pdev->dev;
1599	const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops;
1600	const struct exynos_ufs_drv_data *drv_data =
1601		device_get_match_data(dev);
1602
1603	if (drv_data && drv_data->vops)
1604		vops = drv_data->vops;
1605
1606	err = ufshcd_pltfrm_init(pdev, vops);
1607	if (err)
1608		dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1609
1610	return err;
1611}
1612
1613static void exynos_ufs_remove(struct platform_device *pdev)
1614{
1615	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1616	struct exynos_ufs *ufs = ufshcd_get_variant(hba);
1617
1618	pm_runtime_get_sync(&(pdev)->dev);
1619	ufshcd_remove(hba);
1620
1621	phy_power_off(ufs->phy);
1622	phy_exit(ufs->phy);
1623}
1624
1625static struct exynos_ufs_uic_attr exynos7_uic_attr = {
1626	.tx_trailingclks		= 0x10,
1627	.tx_dif_p_nsec			= 3000000,	/* unit: ns */
1628	.tx_dif_n_nsec			= 1000000,	/* unit: ns */
1629	.tx_high_z_cnt_nsec		= 20000,	/* unit: ns */
1630	.tx_base_unit_nsec		= 100000,	/* unit: ns */
1631	.tx_gran_unit_nsec		= 4000,		/* unit: ns */
1632	.tx_sleep_cnt			= 1000,		/* unit: ns */
1633	.tx_min_activatetime		= 0xa,
1634	.rx_filler_enable		= 0x2,
1635	.rx_dif_p_nsec			= 1000000,	/* unit: ns */
1636	.rx_hibern8_wait_nsec		= 4000000,	/* unit: ns */
1637	.rx_base_unit_nsec		= 100000,	/* unit: ns */
1638	.rx_gran_unit_nsec		= 4000,		/* unit: ns */
1639	.rx_sleep_cnt			= 1280,		/* unit: ns */
1640	.rx_stall_cnt			= 320,		/* unit: ns */
1641	.rx_hs_g1_sync_len_cap		= SYNC_LEN_COARSE(0xf),
1642	.rx_hs_g2_sync_len_cap		= SYNC_LEN_COARSE(0xf),
1643	.rx_hs_g3_sync_len_cap		= SYNC_LEN_COARSE(0xf),
1644	.rx_hs_g1_prep_sync_len_cap	= PREP_LEN(0xf),
1645	.rx_hs_g2_prep_sync_len_cap	= PREP_LEN(0xf),
1646	.rx_hs_g3_prep_sync_len_cap	= PREP_LEN(0xf),
1647	.pa_dbg_option_suite		= 0x30103,
1648};
1649
1650static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
1651	.uic_attr		= &exynos7_uic_attr,
1652	.quirks			= UFSHCD_QUIRK_PRDT_BYTE_GRAN |
1653				  UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
1654				  UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
1655				  UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
1656	.opts			= EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
1657				  EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
1658				  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
1659	.drv_init		= exynosauto_ufs_drv_init,
1660	.post_hce_enable	= exynosauto_ufs_post_hce_enable,
1661	.pre_link		= exynosauto_ufs_pre_link,
1662	.pre_pwr_change		= exynosauto_ufs_pre_pwr_change,
1663	.post_pwr_change	= exynosauto_ufs_post_pwr_change,
1664};
1665
1666static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
1667	.vops			= &ufs_hba_exynosauto_vh_ops,
1668	.quirks			= UFSHCD_QUIRK_PRDT_BYTE_GRAN |
1669				  UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
1670				  UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
1671				  UFSHCI_QUIRK_BROKEN_HCE |
1672				  UFSHCD_QUIRK_BROKEN_UIC_CMD |
1673				  UFSHCD_QUIRK_SKIP_PH_CONFIGURATION |
1674				  UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
1675	.opts			= EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
1676};
1677
1678static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
1679	.uic_attr		= &exynos7_uic_attr,
1680	.quirks			= UFSHCD_QUIRK_PRDT_BYTE_GRAN |
1681				  UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
1682				  UFSHCI_QUIRK_BROKEN_HCE |
1683				  UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
1684				  UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
1685				  UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
1686				  UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
1687	.opts			= EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
1688				  EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
1689				  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
1690				  EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB |
1691				  EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER,
1692	.drv_init		= exynos7_ufs_drv_init,
1693	.pre_link		= exynos7_ufs_pre_link,
1694	.post_link		= exynos7_ufs_post_link,
1695	.pre_pwr_change		= exynos7_ufs_pre_pwr_change,
1696	.post_pwr_change	= exynos7_ufs_post_pwr_change,
1697};
1698
1699static struct exynos_ufs_uic_attr fsd_uic_attr = {
1700	.tx_trailingclks		= 0x10,
1701	.tx_dif_p_nsec			= 3000000,	/* unit: ns */
1702	.tx_dif_n_nsec			= 1000000,	/* unit: ns */
1703	.tx_high_z_cnt_nsec		= 20000,	/* unit: ns */
1704	.tx_base_unit_nsec		= 100000,	/* unit: ns */
1705	.tx_gran_unit_nsec		= 4000,		/* unit: ns */
1706	.tx_sleep_cnt			= 1000,		/* unit: ns */
1707	.tx_min_activatetime		= 0xa,
1708	.rx_filler_enable		= 0x2,
1709	.rx_dif_p_nsec			= 1000000,	/* unit: ns */
1710	.rx_hibern8_wait_nsec		= 4000000,	/* unit: ns */
1711	.rx_base_unit_nsec		= 100000,	/* unit: ns */
1712	.rx_gran_unit_nsec		= 4000,		/* unit: ns */
1713	.rx_sleep_cnt			= 1280,		/* unit: ns */
1714	.rx_stall_cnt			= 320,		/* unit: ns */
1715	.rx_hs_g1_sync_len_cap		= SYNC_LEN_COARSE(0xf),
1716	.rx_hs_g2_sync_len_cap		= SYNC_LEN_COARSE(0xf),
1717	.rx_hs_g3_sync_len_cap		= SYNC_LEN_COARSE(0xf),
1718	.rx_hs_g1_prep_sync_len_cap	= PREP_LEN(0xf),
1719	.rx_hs_g2_prep_sync_len_cap	= PREP_LEN(0xf),
1720	.rx_hs_g3_prep_sync_len_cap	= PREP_LEN(0xf),
1721	.pa_dbg_option_suite		= 0x2E820183,
1722};
1723
1724static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
1725	.uic_attr               = &fsd_uic_attr,
1726	.quirks                 = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
1727				  UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
1728				  UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
1729				  UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
1730				  UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR,
1731	.opts                   = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
1732				  EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
1733				  EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
1734				  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
1735	.pre_link               = fsd_ufs_pre_link,
1736	.post_link              = fsd_ufs_post_link,
1737	.pre_pwr_change         = fsd_ufs_pre_pwr_change,
1738};
1739
1740static const struct of_device_id exynos_ufs_of_match[] = {
1741	{ .compatible = "samsung,exynos7-ufs",
1742	  .data	      = &exynos_ufs_drvs },
1743	{ .compatible = "samsung,exynosautov9-ufs",
1744	  .data	      = &exynosauto_ufs_drvs },
1745	{ .compatible = "samsung,exynosautov9-ufs-vh",
1746	  .data	      = &exynosauto_ufs_vh_drvs },
1747	{ .compatible = "tesla,fsd-ufs",
1748	  .data       = &fsd_ufs_drvs },
1749	{},
1750};
1751
1752static const struct dev_pm_ops exynos_ufs_pm_ops = {
1753	SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
1754	SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1755	.prepare	 = ufshcd_suspend_prepare,
1756	.complete	 = ufshcd_resume_complete,
1757};
1758
1759static struct platform_driver exynos_ufs_pltform = {
1760	.probe	= exynos_ufs_probe,
1761	.remove_new = exynos_ufs_remove,
1762	.driver	= {
1763		.name	= "exynos-ufshc",
1764		.pm	= &exynos_ufs_pm_ops,
1765		.of_match_table = exynos_ufs_of_match,
1766	},
1767};
1768module_platform_driver(exynos_ufs_pltform);
1769
1770MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>");
1771MODULE_AUTHOR("Seungwon Jeon  <essuuj@gmail.com>");
1772MODULE_DESCRIPTION("Exynos UFS HCI Driver");
1773MODULE_LICENSE("GPL v2");
1774