1/*	$NetBSD: intel_dpio_phy.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
2
3/*
4 * Copyright �� 2014-2016 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <sys/cdefs.h>
27__KERNEL_RCSID(0, "$NetBSD: intel_dpio_phy.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
28
29#include "display/intel_dp.h"
30
31#include "intel_display_types.h"
32#include "intel_dpio_phy.h"
33#include "intel_sideband.h"
34
35/**
36 * DOC: DPIO
37 *
38 * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
39 * ports. DPIO is the name given to such a display PHY. These PHYs
40 * don't follow the standard programming model using direct MMIO
41 * registers, and instead their registers must be accessed trough IOSF
42 * sideband. VLV has one such PHY for driving ports B and C, and CHV
43 * adds another PHY for driving port D. Each PHY responds to specific
44 * IOSF-SB port.
45 *
46 * Each display PHY is made up of one or two channels. Each channel
47 * houses a common lane part which contains the PLL and other common
48 * logic. CH0 common lane also contains the IOSF-SB logic for the
49 * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
50 * must be running when any DPIO registers are accessed.
51 *
52 * In addition to having their own registers, the PHYs are also
53 * controlled through some dedicated signals from the display
54 * controller. These include PLL reference clock enable, PLL enable,
55 * and CRI clock selection, for example.
56 *
57 * Eeach channel also has two splines (also called data lanes), and
58 * each spline is made up of one Physical Access Coding Sub-Layer
59 * (PCS) block and two TX lanes. So each channel has two PCS blocks
60 * and four TX lanes. The TX lanes are used as DP lanes or TMDS
61 * data/clock pairs depending on the output type.
62 *
63 * Additionally the PHY also contains an AUX lane with AUX blocks
64 * for each channel. This is used for DP AUX communication, but
65 * this fact isn't really relevant for the driver since AUX is
66 * controlled from the display controller side. No DPIO registers
67 * need to be accessed during AUX communication,
68 *
69 * Generally on VLV/CHV the common lane corresponds to the pipe and
70 * the spline (PCS/TX) corresponds to the port.
71 *
72 * For dual channel PHY (VLV/CHV):
73 *
74 *  pipe A == CMN/PLL/REF CH0
75 *
76 *  pipe B == CMN/PLL/REF CH1
77 *
78 *  port B == PCS/TX CH0
79 *
80 *  port C == PCS/TX CH1
81 *
82 * This is especially important when we cross the streams
83 * ie. drive port B with pipe B, or port C with pipe A.
84 *
85 * For single channel PHY (CHV):
86 *
87 *  pipe C == CMN/PLL/REF CH0
88 *
89 *  port D == PCS/TX CH0
90 *
91 * On BXT the entire PHY channel corresponds to the port. That means
92 * the PLL is also now associated with the port rather than the pipe,
93 * and so the clock needs to be routed to the appropriate transcoder.
94 * Port A PLL is directly connected to transcoder EDP and port B/C
95 * PLLs can be routed to any transcoder A/B/C.
96 *
97 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
98 * digital port D (CHV) or port A (BXT). ::
99 *
100 *
101 *     Dual channel PHY (VLV/CHV/BXT)
102 *     ---------------------------------
103 *     |      CH0      |      CH1      |
104 *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
105 *     |---------------|---------------| Display PHY
106 *     | PCS01 | PCS23 | PCS01 | PCS23 |
107 *     |-------|-------|-------|-------|
108 *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
109 *     ---------------------------------
110 *     |     DDI0      |     DDI1      | DP/HDMI ports
111 *     ---------------------------------
112 *
113 *     Single channel PHY (CHV/BXT)
114 *     -----------------
115 *     |      CH0      |
116 *     |  CMN/PLL/REF  |
117 *     |---------------| Display PHY
118 *     | PCS01 | PCS23 |
119 *     |-------|-------|
120 *     |TX0|TX1|TX2|TX3|
121 *     -----------------
122 *     |     DDI2      | DP/HDMI port
123 *     -----------------
124 */
125
126/**
127 * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
128 */
129struct bxt_ddi_phy_info {
130	/**
131	 * @dual_channel: true if this phy has a second channel.
132	 */
133	bool dual_channel;
134
135	/**
136	 * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
137	 * Otherwise the GRC value will be copied from the phy indicated by
138	 * this field.
139	 */
140	enum dpio_phy rcomp_phy;
141
142	/**
143	 * @reset_delay: delay in us to wait before setting the common reset
144	 * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
145	 */
146	int reset_delay;
147
148	/**
149	 * @pwron_mask: Mask with the appropriate bit set that would cause the
150	 * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
151	 */
152	u32 pwron_mask;
153
154	/**
155	 * @channel: struct containing per channel information.
156	 */
157	struct {
158		/**
159		 * @channel.port: which port maps to this channel.
160		 */
161		enum port port;
162	} channel[2];
163};
164
165static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
166	[DPIO_PHY0] = {
167		.dual_channel = true,
168		.rcomp_phy = DPIO_PHY1,
169		.pwron_mask = BIT(0),
170
171		.channel = {
172			[DPIO_CH0] = { .port = PORT_B },
173			[DPIO_CH1] = { .port = PORT_C },
174		}
175	},
176	[DPIO_PHY1] = {
177		.dual_channel = false,
178		.rcomp_phy = -1,
179		.pwron_mask = BIT(1),
180
181		.channel = {
182			[DPIO_CH0] = { .port = PORT_A },
183		}
184	},
185};
186
187static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
188	[DPIO_PHY0] = {
189		.dual_channel = false,
190		.rcomp_phy = DPIO_PHY1,
191		.pwron_mask = BIT(0),
192		.reset_delay = 20,
193
194		.channel = {
195			[DPIO_CH0] = { .port = PORT_B },
196		}
197	},
198	[DPIO_PHY1] = {
199		.dual_channel = false,
200		.rcomp_phy = -1,
201		.pwron_mask = BIT(3),
202		.reset_delay = 20,
203
204		.channel = {
205			[DPIO_CH0] = { .port = PORT_A },
206		}
207	},
208	[DPIO_PHY2] = {
209		.dual_channel = false,
210		.rcomp_phy = DPIO_PHY1,
211		.pwron_mask = BIT(1),
212		.reset_delay = 20,
213
214		.channel = {
215			[DPIO_CH0] = { .port = PORT_C },
216		}
217	},
218};
219
220static const struct bxt_ddi_phy_info *
221bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
222{
223	if (IS_GEMINILAKE(dev_priv)) {
224		*count =  ARRAY_SIZE(glk_ddi_phy_info);
225		return glk_ddi_phy_info;
226	} else {
227		*count =  ARRAY_SIZE(bxt_ddi_phy_info);
228		return bxt_ddi_phy_info;
229	}
230}
231
232static const struct bxt_ddi_phy_info *
233bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
234{
235	int count;
236	const struct bxt_ddi_phy_info *phy_list =
237		bxt_get_phy_list(dev_priv, &count);
238
239	return &phy_list[phy];
240}
241
242void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
243			     enum dpio_phy *phy, enum dpio_channel *ch)
244{
245	const struct bxt_ddi_phy_info *phy_info, *phys;
246	int i, count;
247
248	phys = bxt_get_phy_list(dev_priv, &count);
249
250	for (i = 0; i < count; i++) {
251		phy_info = &phys[i];
252
253		if (port == phy_info->channel[DPIO_CH0].port) {
254			*phy = i;
255			*ch = DPIO_CH0;
256			return;
257		}
258
259		if (phy_info->dual_channel &&
260		    port == phy_info->channel[DPIO_CH1].port) {
261			*phy = i;
262			*ch = DPIO_CH1;
263			return;
264		}
265	}
266
267	WARN(1, "PHY not found for PORT %c", port_name(port));
268	*phy = DPIO_PHY0;
269	*ch = DPIO_CH0;
270}
271
272void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
273				  enum port port, u32 margin, u32 scale,
274				  u32 enable, u32 deemphasis)
275{
276	u32 val;
277	enum dpio_phy phy;
278	enum dpio_channel ch;
279
280	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
281
282	/*
283	 * While we write to the group register to program all lanes at once we
284	 * can read only lane registers and we pick lanes 0/1 for that.
285	 */
286	val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
287	val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
288	I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
289
290	val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
291	val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
292	val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
293	I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
294
295	val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
296	val &= ~SCALE_DCOMP_METHOD;
297	if (enable)
298		val |= SCALE_DCOMP_METHOD;
299
300	if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
301		DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
302
303	I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
304
305	val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
306	val &= ~DE_EMPHASIS;
307	val |= deemphasis << DEEMPH_SHIFT;
308	I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
309
310	val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
311	val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
312	I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
313}
314
315bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
316			    enum dpio_phy phy)
317{
318	const struct bxt_ddi_phy_info *phy_info;
319
320	phy_info = bxt_get_phy_info(dev_priv, phy);
321
322	if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
323		return false;
324
325	if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
326	     (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
327		DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
328				 phy);
329
330		return false;
331	}
332
333	if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
334		DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
335				 phy);
336
337		return false;
338	}
339
340	return true;
341}
342
343static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
344{
345	u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
346
347	return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
348}
349
350static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
351				  enum dpio_phy phy)
352{
353	if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
354				  GRC_DONE, 10))
355		DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
356}
357
358static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
359			      enum dpio_phy phy)
360{
361	const struct bxt_ddi_phy_info *phy_info;
362	u32 val;
363
364	phy_info = bxt_get_phy_info(dev_priv, phy);
365
366	if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
367		/* Still read out the GRC value for state verification */
368		if (phy_info->rcomp_phy != -1)
369			dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
370
371		if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
372			DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
373					 "won't reprogram it\n", phy);
374			return;
375		}
376
377		DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
378				 "force reprogramming it\n", phy);
379	}
380
381	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
382	val |= phy_info->pwron_mask;
383	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
384
385	/*
386	 * The PHY registers start out inaccessible and respond to reads with
387	 * all 1s.  Eventually they become accessible as they power up, then
388	 * the reserved bit will give the default 0.  Poll on the reserved bit
389	 * becoming 0 to find when the PHY is accessible.
390	 * The flag should get set in 100us according to the HW team, but
391	 * use 1ms due to occasional timeouts observed with that.
392	 */
393	if (intel_wait_for_register_fw(&dev_priv->uncore,
394				       BXT_PORT_CL1CM_DW0(phy),
395				       PHY_RESERVED | PHY_POWER_GOOD,
396				       PHY_POWER_GOOD,
397				       1))
398		DRM_ERROR("timeout during PHY%d power on\n", phy);
399
400	/* Program PLL Rcomp code offset */
401	val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
402	val &= ~IREF0RC_OFFSET_MASK;
403	val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
404	I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
405
406	val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
407	val &= ~IREF1RC_OFFSET_MASK;
408	val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
409	I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
410
411	/* Program power gating */
412	val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
413	val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
414		SUS_CLK_CONFIG;
415	I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
416
417	if (phy_info->dual_channel) {
418		val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
419		val |= DW6_OLDO_DYN_PWR_DOWN_EN;
420		I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
421	}
422
423	if (phy_info->rcomp_phy != -1) {
424		u32 grc_code;
425
426		bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
427
428		/*
429		 * PHY0 isn't connected to an RCOMP resistor so copy over
430		 * the corresponding calibrated value from PHY1, and disable
431		 * the automatic calibration on PHY0.
432		 */
433		val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
434							  phy_info->rcomp_phy);
435		grc_code = val << GRC_CODE_FAST_SHIFT |
436			   val << GRC_CODE_SLOW_SHIFT |
437			   val;
438		I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
439
440		val = I915_READ(BXT_PORT_REF_DW8(phy));
441		val |= GRC_DIS | GRC_RDY_OVRD;
442		I915_WRITE(BXT_PORT_REF_DW8(phy), val);
443	}
444
445	if (phy_info->reset_delay)
446		udelay(phy_info->reset_delay);
447
448	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
449	val |= COMMON_RESET_DIS;
450	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
451}
452
453void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
454{
455	const struct bxt_ddi_phy_info *phy_info;
456	u32 val;
457
458	phy_info = bxt_get_phy_info(dev_priv, phy);
459
460	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
461	val &= ~COMMON_RESET_DIS;
462	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
463
464	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
465	val &= ~phy_info->pwron_mask;
466	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
467}
468
469void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
470{
471	const struct bxt_ddi_phy_info *phy_info =
472		bxt_get_phy_info(dev_priv, phy);
473	enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
474	bool was_enabled;
475
476	lockdep_assert_held(&dev_priv->power_domains.lock);
477
478	was_enabled = true;
479	if (rcomp_phy != -1)
480		was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
481
482	/*
483	 * We need to copy the GRC calibration value from rcomp_phy,
484	 * so make sure it's powered up.
485	 */
486	if (!was_enabled)
487		_bxt_ddi_phy_init(dev_priv, rcomp_phy);
488
489	_bxt_ddi_phy_init(dev_priv, phy);
490
491	if (!was_enabled)
492		bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
493}
494
495static bool __printf(6, 7)
496__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
497		       i915_reg_t reg, u32 mask, u32 expected,
498		       const char *reg_fmt, ...)
499{
500	struct va_format vaf;
501	va_list args;
502	u32 val;
503
504	val = I915_READ(reg);
505	if ((val & mask) == expected)
506		return true;
507
508	va_start(args, reg_fmt);
509	vaf.fmt = reg_fmt;
510	vaf.va = &args;
511
512	DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
513			 "current %08x, expected %08x (mask %08x)\n",
514			 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
515			 mask);
516
517	va_end(args);
518
519	return false;
520}
521
522bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
523			      enum dpio_phy phy)
524{
525	const struct bxt_ddi_phy_info *phy_info;
526	u32 mask;
527	bool ok;
528
529	phy_info = bxt_get_phy_info(dev_priv, phy);
530
531#define _CHK(reg, mask, exp, fmt, ...)					\
532	__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,	\
533			       ## __VA_ARGS__)
534
535	if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
536		return false;
537
538	ok = true;
539
540	/* PLL Rcomp code offset */
541	ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
542		    IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
543		    "BXT_PORT_CL1CM_DW9(%d)", phy);
544	ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
545		    IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
546		    "BXT_PORT_CL1CM_DW10(%d)", phy);
547
548	/* Power gating */
549	mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
550	ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
551		    "BXT_PORT_CL1CM_DW28(%d)", phy);
552
553	if (phy_info->dual_channel)
554		ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
555			   DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
556			   "BXT_PORT_CL2CM_DW6(%d)", phy);
557
558	if (phy_info->rcomp_phy != -1) {
559		u32 grc_code = dev_priv->bxt_phy_grc;
560
561		grc_code = grc_code << GRC_CODE_FAST_SHIFT |
562			   grc_code << GRC_CODE_SLOW_SHIFT |
563			   grc_code;
564		mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
565		       GRC_CODE_NOM_MASK;
566		ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
567			   "BXT_PORT_REF_DW6(%d)", phy);
568
569		mask = GRC_DIS | GRC_RDY_OVRD;
570		ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
571			    "BXT_PORT_REF_DW8(%d)", phy);
572	}
573
574	return ok;
575#undef _CHK
576}
577
578u8
579bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
580{
581	switch (lane_count) {
582	case 1:
583		return 0;
584	case 2:
585		return BIT(2) | BIT(0);
586	case 4:
587		return BIT(3) | BIT(2) | BIT(0);
588	default:
589		MISSING_CASE(lane_count);
590
591		return 0;
592	}
593}
594
595void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
596				     u8 lane_lat_optim_mask)
597{
598	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
599	enum port port = encoder->port;
600	enum dpio_phy phy;
601	enum dpio_channel ch;
602	int lane;
603
604	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
605
606	for (lane = 0; lane < 4; lane++) {
607		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
608
609		/*
610		 * Note that on CHV this flag is called UPAR, but has
611		 * the same function.
612		 */
613		val &= ~LATENCY_OPTIM;
614		if (lane_lat_optim_mask & BIT(lane))
615			val |= LATENCY_OPTIM;
616
617		I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
618	}
619}
620
621u8
622bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
623{
624	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
625	enum port port = encoder->port;
626	enum dpio_phy phy;
627	enum dpio_channel ch;
628	int lane;
629	u8 mask;
630
631	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
632
633	mask = 0;
634	for (lane = 0; lane < 4; lane++) {
635		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
636
637		if (val & LATENCY_OPTIM)
638			mask |= BIT(lane);
639	}
640
641	return mask;
642}
643
644
645void chv_set_phy_signal_level(struct intel_encoder *encoder,
646			      u32 deemph_reg_value, u32 margin_reg_value,
647			      bool uniq_trans_scale)
648{
649	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
650	struct intel_digital_port *dport = enc_to_dig_port(encoder);
651	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
652	enum dpio_channel ch = vlv_dport_to_channel(dport);
653	enum pipe pipe = intel_crtc->pipe;
654	u32 val;
655	int i;
656
657	vlv_dpio_get(dev_priv);
658
659	/* Clear calc init */
660	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
661	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
662	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
663	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
664	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
665
666	if (intel_crtc->config->lane_count > 2) {
667		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
668		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
669		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
670		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
671		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
672	}
673
674	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
675	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
676	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
677	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
678
679	if (intel_crtc->config->lane_count > 2) {
680		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
681		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
682		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
683		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
684	}
685
686	/* Program swing deemph */
687	for (i = 0; i < intel_crtc->config->lane_count; i++) {
688		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
689		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
690		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
691		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
692	}
693
694	/* Program swing margin */
695	for (i = 0; i < intel_crtc->config->lane_count; i++) {
696		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
697
698		val &= ~DPIO_SWING_MARGIN000_MASK;
699		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
700
701		/*
702		 * Supposedly this value shouldn't matter when unique transition
703		 * scale is disabled, but in fact it does matter. Let's just
704		 * always program the same value and hope it's OK.
705		 */
706		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
707		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
708
709		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
710	}
711
712	/*
713	 * The document said it needs to set bit 27 for ch0 and bit 26
714	 * for ch1. Might be a typo in the doc.
715	 * For now, for this unique transition scale selection, set bit
716	 * 27 for ch0 and ch1.
717	 */
718	for (i = 0; i < intel_crtc->config->lane_count; i++) {
719		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
720		if (uniq_trans_scale)
721			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
722		else
723			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
724		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
725	}
726
727	/* Start swing calculation */
728	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
729	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
730	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
731
732	if (intel_crtc->config->lane_count > 2) {
733		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
734		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
735		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
736	}
737
738	vlv_dpio_put(dev_priv);
739}
740
741void chv_data_lane_soft_reset(struct intel_encoder *encoder,
742			      const struct intel_crtc_state *crtc_state,
743			      bool reset)
744{
745	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
746	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
747	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
748	enum pipe pipe = crtc->pipe;
749	u32 val;
750
751	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
752	if (reset)
753		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
754	else
755		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
756	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
757
758	if (crtc_state->lane_count > 2) {
759		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
760		if (reset)
761			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
762		else
763			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
764		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
765	}
766
767	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
768	val |= CHV_PCS_REQ_SOFTRESET_EN;
769	if (reset)
770		val &= ~DPIO_PCS_CLK_SOFT_RESET;
771	else
772		val |= DPIO_PCS_CLK_SOFT_RESET;
773	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
774
775	if (crtc_state->lane_count > 2) {
776		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
777		val |= CHV_PCS_REQ_SOFTRESET_EN;
778		if (reset)
779			val &= ~DPIO_PCS_CLK_SOFT_RESET;
780		else
781			val |= DPIO_PCS_CLK_SOFT_RESET;
782		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
783	}
784}
785
786void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
787			    const struct intel_crtc_state *crtc_state)
788{
789	struct intel_digital_port *dport = enc_to_dig_port(encoder);
790	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
791	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
792	enum dpio_channel ch = vlv_dport_to_channel(dport);
793	enum pipe pipe = crtc->pipe;
794	unsigned int lane_mask =
795		intel_dp_unused_lane_mask(crtc_state->lane_count);
796	u32 val;
797
798	/*
799	 * Must trick the second common lane into life.
800	 * Otherwise we can't even access the PLL.
801	 */
802	if (ch == DPIO_CH0 && pipe == PIPE_B)
803		dport->release_cl2_override =
804			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
805
806	chv_phy_powergate_lanes(encoder, true, lane_mask);
807
808	vlv_dpio_get(dev_priv);
809
810	/* Assert data lane reset */
811	chv_data_lane_soft_reset(encoder, crtc_state, true);
812
813	/* program left/right clock distribution */
814	if (pipe != PIPE_B) {
815		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
816		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
817		if (ch == DPIO_CH0)
818			val |= CHV_BUFLEFTENA1_FORCE;
819		if (ch == DPIO_CH1)
820			val |= CHV_BUFRIGHTENA1_FORCE;
821		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
822	} else {
823		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
824		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
825		if (ch == DPIO_CH0)
826			val |= CHV_BUFLEFTENA2_FORCE;
827		if (ch == DPIO_CH1)
828			val |= CHV_BUFRIGHTENA2_FORCE;
829		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
830	}
831
832	/* program clock channel usage */
833	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
834	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
835	if (pipe != PIPE_B)
836		val &= ~CHV_PCS_USEDCLKCHANNEL;
837	else
838		val |= CHV_PCS_USEDCLKCHANNEL;
839	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
840
841	if (crtc_state->lane_count > 2) {
842		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
843		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
844		if (pipe != PIPE_B)
845			val &= ~CHV_PCS_USEDCLKCHANNEL;
846		else
847			val |= CHV_PCS_USEDCLKCHANNEL;
848		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
849	}
850
851	/*
852	 * This a a bit weird since generally CL
853	 * matches the pipe, but here we need to
854	 * pick the CL based on the port.
855	 */
856	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
857	if (pipe != PIPE_B)
858		val &= ~CHV_CMN_USEDCLKCHANNEL;
859	else
860		val |= CHV_CMN_USEDCLKCHANNEL;
861	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
862
863	vlv_dpio_put(dev_priv);
864}
865
866void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
867				const struct intel_crtc_state *crtc_state)
868{
869	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
870	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
871	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
872	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
873	enum dpio_channel ch = vlv_dport_to_channel(dport);
874	enum pipe pipe = crtc->pipe;
875	int data, i, stagger;
876	u32 val;
877
878	vlv_dpio_get(dev_priv);
879
880	/* allow hardware to manage TX FIFO reset source */
881	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
882	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
883	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
884
885	if (crtc_state->lane_count > 2) {
886		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
887		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
888		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
889	}
890
891	/* Program Tx lane latency optimal setting*/
892	for (i = 0; i < crtc_state->lane_count; i++) {
893		/* Set the upar bit */
894		if (crtc_state->lane_count == 1)
895			data = 0x0;
896		else
897			data = (i == 1) ? 0x0 : 0x1;
898		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
899				data << DPIO_UPAR_SHIFT);
900	}
901
902	/* Data lane stagger programming */
903	if (crtc_state->port_clock > 270000)
904		stagger = 0x18;
905	else if (crtc_state->port_clock > 135000)
906		stagger = 0xd;
907	else if (crtc_state->port_clock > 67500)
908		stagger = 0x7;
909	else if (crtc_state->port_clock > 33750)
910		stagger = 0x4;
911	else
912		stagger = 0x2;
913
914	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
915	val |= DPIO_TX2_STAGGER_MASK(0x1f);
916	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
917
918	if (crtc_state->lane_count > 2) {
919		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
920		val |= DPIO_TX2_STAGGER_MASK(0x1f);
921		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
922	}
923
924	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
925		       DPIO_LANESTAGGER_STRAP(stagger) |
926		       DPIO_LANESTAGGER_STRAP_OVRD |
927		       DPIO_TX1_STAGGER_MASK(0x1f) |
928		       DPIO_TX1_STAGGER_MULT(6) |
929		       DPIO_TX2_STAGGER_MULT(0));
930
931	if (crtc_state->lane_count > 2) {
932		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
933			       DPIO_LANESTAGGER_STRAP(stagger) |
934			       DPIO_LANESTAGGER_STRAP_OVRD |
935			       DPIO_TX1_STAGGER_MASK(0x1f) |
936			       DPIO_TX1_STAGGER_MULT(7) |
937			       DPIO_TX2_STAGGER_MULT(5));
938	}
939
940	/* Deassert data lane reset */
941	chv_data_lane_soft_reset(encoder, crtc_state, false);
942
943	vlv_dpio_put(dev_priv);
944}
945
946void chv_phy_release_cl2_override(struct intel_encoder *encoder)
947{
948	struct intel_digital_port *dport = enc_to_dig_port(encoder);
949	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
950
951	if (dport->release_cl2_override) {
952		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
953		dport->release_cl2_override = false;
954	}
955}
956
957void chv_phy_post_pll_disable(struct intel_encoder *encoder,
958			      const struct intel_crtc_state *old_crtc_state)
959{
960	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
961	enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
962	u32 val;
963
964	vlv_dpio_get(dev_priv);
965
966	/* disable left/right clock distribution */
967	if (pipe != PIPE_B) {
968		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
969		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
970		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
971	} else {
972		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
973		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
974		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
975	}
976
977	vlv_dpio_put(dev_priv);
978
979	/*
980	 * Leave the power down bit cleared for at least one
981	 * lane so that chv_powergate_phy_ch() will power
982	 * on something when the channel is otherwise unused.
983	 * When the port is off and the override is removed
984	 * the lanes power down anyway, so otherwise it doesn't
985	 * really matter what the state of power down bits is
986	 * after this.
987	 */
988	chv_phy_powergate_lanes(encoder, false, 0x0);
989}
990
991void vlv_set_phy_signal_level(struct intel_encoder *encoder,
992			      u32 demph_reg_value, u32 preemph_reg_value,
993			      u32 uniqtranscale_reg_value, u32 tx3_demph)
994{
995	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
996	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
997	struct intel_digital_port *dport = enc_to_dig_port(encoder);
998	enum dpio_channel port = vlv_dport_to_channel(dport);
999	enum pipe pipe = intel_crtc->pipe;
1000
1001	vlv_dpio_get(dev_priv);
1002
1003	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
1004	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
1005	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
1006			 uniqtranscale_reg_value);
1007	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
1008
1009	if (tx3_demph)
1010		vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
1011
1012	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
1013	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
1014	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1015
1016	vlv_dpio_put(dev_priv);
1017}
1018
1019void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
1020			    const struct intel_crtc_state *crtc_state)
1021{
1022	struct intel_digital_port *dport = enc_to_dig_port(encoder);
1023	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1024	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1025	enum dpio_channel port = vlv_dport_to_channel(dport);
1026	enum pipe pipe = crtc->pipe;
1027
1028	/* Program Tx lane resets to default */
1029	vlv_dpio_get(dev_priv);
1030
1031	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1032			 DPIO_PCS_TX_LANE2_RESET |
1033			 DPIO_PCS_TX_LANE1_RESET);
1034	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1035			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1036			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1037			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1038				 DPIO_PCS_CLK_SOFT_RESET);
1039
1040	/* Fix up inter-pair skew failure */
1041	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1042	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1043	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1044
1045	vlv_dpio_put(dev_priv);
1046}
1047
1048void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
1049				const struct intel_crtc_state *crtc_state)
1050{
1051	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1052	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1053	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1054	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1055	enum dpio_channel port = vlv_dport_to_channel(dport);
1056	enum pipe pipe = crtc->pipe;
1057	u32 val;
1058
1059	vlv_dpio_get(dev_priv);
1060
1061	/* Enable clock channels for this port */
1062	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1063	val = 0;
1064	if (pipe)
1065		val |= (1<<21);
1066	else
1067		val &= ~(1<<21);
1068	val |= 0x001000c4;
1069	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1070
1071	/* Program lane clock */
1072	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1073	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1074
1075	vlv_dpio_put(dev_priv);
1076}
1077
1078void vlv_phy_reset_lanes(struct intel_encoder *encoder,
1079			 const struct intel_crtc_state *old_crtc_state)
1080{
1081	struct intel_digital_port *dport = enc_to_dig_port(encoder);
1082	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1083	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1084	enum dpio_channel port = vlv_dport_to_channel(dport);
1085	enum pipe pipe = crtc->pipe;
1086
1087	vlv_dpio_get(dev_priv);
1088	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
1089	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
1090	vlv_dpio_put(dev_priv);
1091}
1092