intel_runtime_pm.c revision 1.5
1/*	$NetBSD: intel_runtime_pm.c,v 1.5 2018/08/27 07:30:37 riastradh Exp $	*/
2
3/*
4 * Copyright �� 2012-2014 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
27 *    Daniel Vetter <daniel.vetter@ffwll.ch>
28 *
29 */
30
31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: intel_runtime_pm.c,v 1.5 2018/08/27 07:30:37 riastradh Exp $");
33
34#include <linux/pm_runtime.h>
35#include <linux/vgaarb.h>
36
37#include "i915_drv.h"
38#include "intel_drv.h"
39
40/**
41 * DOC: runtime pm
42 *
43 * The i915 driver supports dynamic enabling and disabling of entire hardware
44 * blocks at runtime. This is especially important on the display side where
45 * software is supposed to control many power gates manually on recent hardware,
46 * since on the GT side a lot of the power management is done by the hardware.
47 * But even there some manual control at the device level is required.
48 *
49 * Since i915 supports a diverse set of platforms with a unified codebase and
50 * hardware engineers just love to shuffle functionality around between power
51 * domains there's a sizeable amount of indirection required. This file provides
52 * generic functions to the driver for grabbing and releasing references for
53 * abstract power domains. It then maps those to the actual power wells
54 * present for a given platform.
55 */
56
57#define GEN9_ENABLE_DC5(dev) 0
58#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
59
60#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
61	for (i = 0;							\
62	     i < (power_domains)->power_well_count &&			\
63		 ((power_well) = &(power_domains)->power_wells[i]);	\
64	     i++)							\
65		if ((power_well)->domains & (domain_mask))
66
67#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
68	for (i = (power_domains)->power_well_count - 1;			 \
69	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
70	     i--)							 \
71		if ((power_well)->domains & (domain_mask))
72
73bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
74				    int power_well_id);
75
76static void intel_power_well_enable(struct drm_i915_private *dev_priv,
77				    struct i915_power_well *power_well)
78{
79	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
80	power_well->ops->enable(dev_priv, power_well);
81	power_well->hw_enabled = true;
82}
83
84static void intel_power_well_disable(struct drm_i915_private *dev_priv,
85				     struct i915_power_well *power_well)
86{
87	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
88	power_well->hw_enabled = false;
89	power_well->ops->disable(dev_priv, power_well);
90}
91
92/*
93 * We should only use the power well if we explicitly asked the hardware to
94 * enable it, so check if it's enabled and also check if we've requested it to
95 * be enabled.
96 */
97static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
98				   struct i915_power_well *power_well)
99{
100	return I915_READ(HSW_PWR_WELL_DRIVER) ==
101		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
102}
103
104/**
105 * __intel_display_power_is_enabled - unlocked check for a power domain
106 * @dev_priv: i915 device instance
107 * @domain: power domain to check
108 *
109 * This is the unlocked version of intel_display_power_is_enabled() and should
110 * only be used from error capture and recovery code where deadlocks are
111 * possible.
112 *
113 * Returns:
114 * True when the power domain is enabled, false otherwise.
115 */
116bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
117				      enum intel_display_power_domain domain)
118{
119	struct i915_power_domains *power_domains;
120	struct i915_power_well *power_well;
121	bool is_enabled;
122	int i;
123
124	if (dev_priv->pm.suspended)
125		return false;
126
127	power_domains = &dev_priv->power_domains;
128
129	is_enabled = true;
130
131	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
132		if (power_well->always_on)
133			continue;
134
135		if (!power_well->hw_enabled) {
136			is_enabled = false;
137			break;
138		}
139	}
140
141	return is_enabled;
142}
143
144/**
145 * intel_display_power_is_enabled - check for a power domain
146 * @dev_priv: i915 device instance
147 * @domain: power domain to check
148 *
149 * This function can be used to check the hw power domain state. It is mostly
150 * used in hardware state readout functions. Everywhere else code should rely
151 * upon explicit power domain reference counting to ensure that the hardware
152 * block is powered up before accessing it.
153 *
154 * Callers must hold the relevant modesetting locks to ensure that concurrent
155 * threads can't disable the power well while the caller tries to read a few
156 * registers.
157 *
158 * Returns:
159 * True when the power domain is enabled, false otherwise.
160 */
161bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
162				    enum intel_display_power_domain domain)
163{
164	struct i915_power_domains *power_domains;
165	bool ret;
166
167	power_domains = &dev_priv->power_domains;
168
169	mutex_lock(&power_domains->lock);
170	ret = __intel_display_power_is_enabled(dev_priv, domain);
171	mutex_unlock(&power_domains->lock);
172
173	return ret;
174}
175
176/**
177 * intel_display_set_init_power - set the initial power domain state
178 * @dev_priv: i915 device instance
179 * @enable: whether to enable or disable the initial power domain state
180 *
181 * For simplicity our driver load/unload and system suspend/resume code assumes
182 * that all power domains are always enabled. This functions controls the state
183 * of this little hack. While the initial power domain state is enabled runtime
184 * pm is effectively disabled.
185 */
186void intel_display_set_init_power(struct drm_i915_private *dev_priv,
187				  bool enable)
188{
189	if (dev_priv->power_domains.init_power_on == enable)
190		return;
191
192	if (enable)
193		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
194	else
195		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
196
197	dev_priv->power_domains.init_power_on = enable;
198}
199
200static inline void
201touch_vga_msr(struct drm_device *dev)
202{
203#ifdef __NetBSD__
204	const bus_addr_t vgabase = 0x3c0;
205	const bus_space_tag_t iot = dev->pdev->pd_pa.pa_iot;
206	bus_space_handle_t ioh;
207	uint8_t msr;
208	int error;
209
210	error = bus_space_map(iot, vgabase, 0x10, 0, &ioh);
211	if (error) {
212		device_printf(dev->pdev->pd_dev,
213		    "unable to map VGA registers: %d\n", error);
214	} else {
215		CTASSERT(vgabase <= VGA_MSR_READ);
216		msr = bus_space_read_1(iot, ioh, VGA_MSR_READ - vgabase);
217		bus_space_write_1(iot, ioh, VGA_MSR_READ - vgabase, msr);
218		bus_space_unmap(iot, ioh, 0x10);
219	}
220#else
221	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
222	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
223	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
224#endif
225}
226
227/*
228 * Starting with Haswell, we have a "Power Down Well" that can be turned off
229 * when not needed anymore. We have 4 registers that can request the power well
230 * to be enabled, and it will only be disabled if none of the registers is
231 * requesting it to be enabled.
232 */
233static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
234{
235	struct drm_device *dev = dev_priv->dev;
236
237	/*
238	 * After we re-enable the power well, if we touch VGA register 0x3d5
239	 * we'll get unclaimed register interrupts. This stops after we write
240	 * anything to the VGA MSR register. The vgacon module uses this
241	 * register all the time, so if we unbind our driver and, as a
242	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
243	 * console_unlock(). So make here we touch the VGA MSR register, making
244	 * sure vgacon can keep working normally without triggering interrupts
245	 * and error messages.
246	 */
247	touch_vga_msr(dev);
248
249	if (IS_BROADWELL(dev))
250		gen8_irq_power_well_post_enable(dev_priv,
251						1 << PIPE_C | 1 << PIPE_B);
252}
253
254static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
255				       struct i915_power_well *power_well)
256{
257	struct drm_device *dev = dev_priv->dev;
258
259	/*
260	 * After we re-enable the power well, if we touch VGA register 0x3d5
261	 * we'll get unclaimed register interrupts. This stops after we write
262	 * anything to the VGA MSR register. The vgacon module uses this
263	 * register all the time, so if we unbind our driver and, as a
264	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
265	 * console_unlock(). So make here we touch the VGA MSR register, making
266	 * sure vgacon can keep working normally without triggering interrupts
267	 * and error messages.
268	 */
269	if (power_well->data == SKL_DISP_PW_2) {
270		touch_vga_msr(dev);
271
272		gen8_irq_power_well_post_enable(dev_priv,
273						1 << PIPE_C | 1 << PIPE_B);
274	}
275
276	if (power_well->data == SKL_DISP_PW_1) {
277		if (!dev_priv->power_domains.initializing)
278			intel_prepare_ddi(dev);
279		gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
280	}
281}
282
283static void hsw_set_power_well(struct drm_i915_private *dev_priv,
284			       struct i915_power_well *power_well, bool enable)
285{
286	bool is_enabled, enable_requested;
287	uint32_t tmp;
288
289	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
290	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
291	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
292
293	if (enable) {
294		if (!enable_requested)
295			I915_WRITE(HSW_PWR_WELL_DRIVER,
296				   HSW_PWR_WELL_ENABLE_REQUEST);
297
298		if (!is_enabled) {
299			DRM_DEBUG_KMS("Enabling power well\n");
300			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
301				      HSW_PWR_WELL_STATE_ENABLED), 20))
302				DRM_ERROR("Timeout enabling power well\n");
303			hsw_power_well_post_enable(dev_priv);
304		}
305
306	} else {
307		if (enable_requested) {
308			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
309			POSTING_READ(HSW_PWR_WELL_DRIVER);
310			DRM_DEBUG_KMS("Requesting to disable the power well\n");
311		}
312	}
313}
314
315#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
316	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
317	BIT(POWER_DOMAIN_PIPE_B) |			\
318	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
319	BIT(POWER_DOMAIN_PIPE_C) |			\
320	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
321	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
322	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
323	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
324	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
325	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
326	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
327	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
328	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
329	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
330	BIT(POWER_DOMAIN_AUX_B) |                       \
331	BIT(POWER_DOMAIN_AUX_C) |			\
332	BIT(POWER_DOMAIN_AUX_D) |			\
333	BIT(POWER_DOMAIN_AUDIO) |			\
334	BIT(POWER_DOMAIN_VGA) |				\
335	BIT(POWER_DOMAIN_INIT))
336#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
337	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
338	BIT(POWER_DOMAIN_PLLS) |			\
339	BIT(POWER_DOMAIN_PIPE_A) |			\
340	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
341	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
342	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
343	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
344	BIT(POWER_DOMAIN_AUX_A) |			\
345	BIT(POWER_DOMAIN_INIT))
346#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
347	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
348	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
349	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
350	BIT(POWER_DOMAIN_INIT))
351#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
352	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
353	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
354	BIT(POWER_DOMAIN_INIT))
355#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
356	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
357	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
358	BIT(POWER_DOMAIN_INIT))
359#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
360	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
361	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
362	BIT(POWER_DOMAIN_INIT))
363#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS (		\
364	SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |		\
365	BIT(POWER_DOMAIN_PLLS) |			\
366	BIT(POWER_DOMAIN_INIT))
367#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
368	(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
369	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
370	SKL_DISPLAY_DDI_A_E_POWER_DOMAINS |		\
371	SKL_DISPLAY_DDI_B_POWER_DOMAINS |		\
372	SKL_DISPLAY_DDI_C_POWER_DOMAINS |		\
373	SKL_DISPLAY_DDI_D_POWER_DOMAINS |		\
374	SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) |		\
375	BIT(POWER_DOMAIN_INIT))
376
377#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
378	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
379	BIT(POWER_DOMAIN_PIPE_B) |			\
380	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
381	BIT(POWER_DOMAIN_PIPE_C) |			\
382	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
383	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
384	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
385	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
386	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
387	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
388	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
389	BIT(POWER_DOMAIN_AUX_B) |			\
390	BIT(POWER_DOMAIN_AUX_C) |			\
391	BIT(POWER_DOMAIN_AUDIO) |			\
392	BIT(POWER_DOMAIN_VGA) |				\
393	BIT(POWER_DOMAIN_GMBUS) |			\
394	BIT(POWER_DOMAIN_INIT))
395#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
396	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
397	BIT(POWER_DOMAIN_PIPE_A) |			\
398	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
399	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
400	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
401	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
402	BIT(POWER_DOMAIN_AUX_A) |			\
403	BIT(POWER_DOMAIN_PLLS) |			\
404	BIT(POWER_DOMAIN_INIT))
405#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
406	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
407	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
408	BIT(POWER_DOMAIN_INIT))
409
410static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
411{
412	struct drm_device *dev = dev_priv->dev;
413
414	WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
415	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
416		"DC9 already programmed to be enabled.\n");
417	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
418		"DC5 still not disabled to enable DC9.\n");
419	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
420	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
421
422	 /*
423	  * TODO: check for the following to verify the conditions to enter DC9
424	  * state are satisfied:
425	  * 1] Check relevant display engine registers to verify if mode set
426	  * disable sequence was followed.
427	  * 2] Check if display uninitialize sequence is initialized.
428	  */
429}
430
431static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
432{
433	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
434	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
435		"DC9 already programmed to be disabled.\n");
436	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
437		"DC5 still not disabled.\n");
438
439	 /*
440	  * TODO: check for the following to verify DC9 state was indeed
441	  * entered before programming to disable it:
442	  * 1] Check relevant display engine registers to verify if mode
443	  *  set disable sequence was followed.
444	  * 2] Check if display uninitialize sequence is initialized.
445	  */
446}
447
448void bxt_enable_dc9(struct drm_i915_private *dev_priv)
449{
450	uint32_t val;
451
452	assert_can_enable_dc9(dev_priv);
453
454	DRM_DEBUG_KMS("Enabling DC9\n");
455
456	val = I915_READ(DC_STATE_EN);
457	val |= DC_STATE_EN_DC9;
458	I915_WRITE(DC_STATE_EN, val);
459	POSTING_READ(DC_STATE_EN);
460}
461
462void bxt_disable_dc9(struct drm_i915_private *dev_priv)
463{
464	uint32_t val;
465
466	assert_can_disable_dc9(dev_priv);
467
468	DRM_DEBUG_KMS("Disabling DC9\n");
469
470	val = I915_READ(DC_STATE_EN);
471	val &= ~DC_STATE_EN_DC9;
472	I915_WRITE(DC_STATE_EN, val);
473	POSTING_READ(DC_STATE_EN);
474}
475
476static void gen9_set_dc_state_debugmask_memory_up(
477			struct drm_i915_private *dev_priv)
478{
479	uint32_t val;
480
481	/* The below bit doesn't need to be cleared ever afterwards */
482	val = I915_READ(DC_STATE_DEBUG);
483	if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
484		val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
485		I915_WRITE(DC_STATE_DEBUG, val);
486		POSTING_READ(DC_STATE_DEBUG);
487	}
488}
489
490static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
491{
492	struct drm_device *dev = dev_priv->dev;
493	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
494					SKL_DISP_PW_2);
495
496	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
497	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
498	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
499
500	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
501		  "DC5 already programmed to be enabled.\n");
502	WARN_ONCE(dev_priv->pm.suspended,
503		  "DC5 cannot be enabled, if platform is runtime-suspended.\n");
504
505	assert_csr_loaded(dev_priv);
506}
507
508static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
509{
510	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
511					SKL_DISP_PW_2);
512	/*
513	 * During initialization, the firmware may not be loaded yet.
514	 * We still want to make sure that the DC enabling flag is cleared.
515	 */
516	if (dev_priv->power_domains.initializing)
517		return;
518
519	WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
520	WARN_ONCE(dev_priv->pm.suspended,
521		"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
522}
523
524static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
525{
526	uint32_t val;
527
528	assert_can_enable_dc5(dev_priv);
529
530	DRM_DEBUG_KMS("Enabling DC5\n");
531
532	gen9_set_dc_state_debugmask_memory_up(dev_priv);
533
534	val = I915_READ(DC_STATE_EN);
535	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
536	val |= DC_STATE_EN_UPTO_DC5;
537	I915_WRITE(DC_STATE_EN, val);
538	POSTING_READ(DC_STATE_EN);
539}
540
541static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
542{
543	uint32_t val;
544
545	assert_can_disable_dc5(dev_priv);
546
547	DRM_DEBUG_KMS("Disabling DC5\n");
548
549	val = I915_READ(DC_STATE_EN);
550	val &= ~DC_STATE_EN_UPTO_DC5;
551	I915_WRITE(DC_STATE_EN, val);
552	POSTING_READ(DC_STATE_EN);
553}
554
555static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
556{
557	struct drm_device *dev = dev_priv->dev;
558
559	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
560	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
561	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
562		  "Backlight is not disabled.\n");
563	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
564		  "DC6 already programmed to be enabled.\n");
565
566	assert_csr_loaded(dev_priv);
567}
568
569static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
570{
571	/*
572	 * During initialization, the firmware may not be loaded yet.
573	 * We still want to make sure that the DC enabling flag is cleared.
574	 */
575	if (dev_priv->power_domains.initializing)
576		return;
577
578	assert_csr_loaded(dev_priv);
579	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
580		  "DC6 already programmed to be disabled.\n");
581}
582
583static void skl_enable_dc6(struct drm_i915_private *dev_priv)
584{
585	uint32_t val;
586
587	assert_can_enable_dc6(dev_priv);
588
589	DRM_DEBUG_KMS("Enabling DC6\n");
590
591	gen9_set_dc_state_debugmask_memory_up(dev_priv);
592
593	val = I915_READ(DC_STATE_EN);
594	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
595	val |= DC_STATE_EN_UPTO_DC6;
596	I915_WRITE(DC_STATE_EN, val);
597	POSTING_READ(DC_STATE_EN);
598}
599
600static void skl_disable_dc6(struct drm_i915_private *dev_priv)
601{
602	uint32_t val;
603
604	assert_can_disable_dc6(dev_priv);
605
606	DRM_DEBUG_KMS("Disabling DC6\n");
607
608	val = I915_READ(DC_STATE_EN);
609	val &= ~DC_STATE_EN_UPTO_DC6;
610	I915_WRITE(DC_STATE_EN, val);
611	POSTING_READ(DC_STATE_EN);
612}
613
614static void skl_set_power_well(struct drm_i915_private *dev_priv,
615			struct i915_power_well *power_well, bool enable)
616{
617	struct drm_device *dev = dev_priv->dev;
618	uint32_t tmp, fuse_status;
619	uint32_t req_mask, state_mask;
620	bool is_enabled, enable_requested, check_fuse_status = false;
621
622	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
623	fuse_status = I915_READ(SKL_FUSE_STATUS);
624
625	switch (power_well->data) {
626	case SKL_DISP_PW_1:
627		if (wait_for((I915_READ(SKL_FUSE_STATUS) &
628			SKL_FUSE_PG0_DIST_STATUS), 1)) {
629			DRM_ERROR("PG0 not enabled\n");
630			return;
631		}
632		break;
633	case SKL_DISP_PW_2:
634		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
635			DRM_ERROR("PG1 in disabled state\n");
636			return;
637		}
638		break;
639	case SKL_DISP_PW_DDI_A_E:
640	case SKL_DISP_PW_DDI_B:
641	case SKL_DISP_PW_DDI_C:
642	case SKL_DISP_PW_DDI_D:
643	case SKL_DISP_PW_MISC_IO:
644		break;
645	default:
646		WARN(1, "Unknown power well %lu\n", power_well->data);
647		return;
648	}
649
650	req_mask = SKL_POWER_WELL_REQ(power_well->data);
651	enable_requested = tmp & req_mask;
652	state_mask = SKL_POWER_WELL_STATE(power_well->data);
653	is_enabled = tmp & state_mask;
654
655	if (enable) {
656		if (!enable_requested) {
657			WARN((tmp & state_mask) &&
658				!I915_READ(HSW_PWR_WELL_BIOS),
659				"Invalid for power well status to be enabled, unless done by the BIOS, \
660				when request is to disable!\n");
661			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
662				power_well->data == SKL_DISP_PW_2) {
663				if (SKL_ENABLE_DC6(dev)) {
664					skl_disable_dc6(dev_priv);
665					/*
666					 * DDI buffer programming unnecessary during driver-load/resume
667					 * as it's already done during modeset initialization then.
668					 * It's also invalid here as encoder list is still uninitialized.
669					 */
670					if (!dev_priv->power_domains.initializing)
671						intel_prepare_ddi(dev);
672				} else {
673					gen9_disable_dc5(dev_priv);
674				}
675			}
676			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
677		}
678
679		if (!is_enabled) {
680			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
681			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
682				state_mask), 1))
683				DRM_ERROR("%s enable timeout\n",
684					power_well->name);
685			check_fuse_status = true;
686		}
687	} else {
688		if (enable_requested) {
689			if (IS_SKYLAKE(dev) &&
690				(power_well->data == SKL_DISP_PW_1) &&
691				(intel_csr_load_status_get(dev_priv) == FW_LOADED))
692				DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
693			else {
694				I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
695				POSTING_READ(HSW_PWR_WELL_DRIVER);
696				DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
697			}
698
699			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
700				power_well->data == SKL_DISP_PW_2) {
701				enum csr_state state;
702				/* TODO: wait for a completion event or
703				 * similar here instead of busy
704				 * waiting using wait_for function.
705				 */
706				wait_for((state = intel_csr_load_status_get(dev_priv)) !=
707						FW_UNINITIALIZED, 1000);
708				if (state != FW_LOADED)
709					DRM_DEBUG("CSR firmware not ready (%d)\n",
710							state);
711				else
712					if (SKL_ENABLE_DC6(dev))
713						skl_enable_dc6(dev_priv);
714					else
715						gen9_enable_dc5(dev_priv);
716			}
717		}
718	}
719
720	if (check_fuse_status) {
721		if (power_well->data == SKL_DISP_PW_1) {
722			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
723				SKL_FUSE_PG1_DIST_STATUS), 1))
724				DRM_ERROR("PG1 distributing status timeout\n");
725		} else if (power_well->data == SKL_DISP_PW_2) {
726			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
727				SKL_FUSE_PG2_DIST_STATUS), 1))
728				DRM_ERROR("PG2 distributing status timeout\n");
729		}
730	}
731
732	if (enable && !is_enabled)
733		skl_power_well_post_enable(dev_priv, power_well);
734}
735
736static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
737				   struct i915_power_well *power_well)
738{
739	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
740
741	/*
742	 * We're taking over the BIOS, so clear any requests made by it since
743	 * the driver is in charge now.
744	 */
745	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
746		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
747}
748
749static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
750				  struct i915_power_well *power_well)
751{
752	hsw_set_power_well(dev_priv, power_well, true);
753}
754
755static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
756				   struct i915_power_well *power_well)
757{
758	hsw_set_power_well(dev_priv, power_well, false);
759}
760
761static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
762					struct i915_power_well *power_well)
763{
764	uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
765		SKL_POWER_WELL_STATE(power_well->data);
766
767	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
768}
769
770static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
771				struct i915_power_well *power_well)
772{
773	skl_set_power_well(dev_priv, power_well, power_well->count > 0);
774
775	/* Clear any request made by BIOS as driver is taking over */
776	I915_WRITE(HSW_PWR_WELL_BIOS, 0);
777}
778
779static void skl_power_well_enable(struct drm_i915_private *dev_priv,
780				struct i915_power_well *power_well)
781{
782	skl_set_power_well(dev_priv, power_well, true);
783}
784
785static void skl_power_well_disable(struct drm_i915_private *dev_priv,
786				struct i915_power_well *power_well)
787{
788	skl_set_power_well(dev_priv, power_well, false);
789}
790
791static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
792					   struct i915_power_well *power_well)
793{
794}
795
796static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
797					     struct i915_power_well *power_well)
798{
799	return true;
800}
801
802static void vlv_set_power_well(struct drm_i915_private *dev_priv,
803			       struct i915_power_well *power_well, bool enable)
804{
805	enum punit_power_well power_well_id = power_well->data;
806	u32 mask;
807	u32 state;
808	u32 ctrl;
809
810	mask = PUNIT_PWRGT_MASK(power_well_id);
811	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
812			 PUNIT_PWRGT_PWR_GATE(power_well_id);
813
814	mutex_lock(&dev_priv->rps.hw_lock);
815
816#define COND \
817	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
818
819	if (COND)
820		goto out;
821
822	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
823	ctrl &= ~mask;
824	ctrl |= state;
825	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
826
827	if (wait_for(COND, 100))
828		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
829			  state,
830			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
831
832#undef COND
833
834out:
835	mutex_unlock(&dev_priv->rps.hw_lock);
836}
837
838static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
839				   struct i915_power_well *power_well)
840{
841	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
842}
843
844static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
845				  struct i915_power_well *power_well)
846{
847	vlv_set_power_well(dev_priv, power_well, true);
848}
849
850static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
851				   struct i915_power_well *power_well)
852{
853	vlv_set_power_well(dev_priv, power_well, false);
854}
855
856static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
857				   struct i915_power_well *power_well)
858{
859	int power_well_id = power_well->data;
860	bool enabled = false;
861	u32 mask;
862	u32 state;
863	u32 ctrl;
864
865	mask = PUNIT_PWRGT_MASK(power_well_id);
866	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
867
868	mutex_lock(&dev_priv->rps.hw_lock);
869
870	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
871	/*
872	 * We only ever set the power-on and power-gate states, anything
873	 * else is unexpected.
874	 */
875	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
876		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
877	if (state == ctrl)
878		enabled = true;
879
880	/*
881	 * A transient state at this point would mean some unexpected party
882	 * is poking at the power controls too.
883	 */
884	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
885	WARN_ON(ctrl != state);
886
887	mutex_unlock(&dev_priv->rps.hw_lock);
888
889	return enabled;
890}
891
892static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
893{
894	enum i915_pipe pipe;
895
896	/*
897	 * Enable the CRI clock source so we can get at the
898	 * display and the reference clock for VGA
899	 * hotplug / manual detection. Supposedly DSI also
900	 * needs the ref clock up and running.
901	 *
902	 * CHV DPLL B/C have some issues if VGA mode is enabled.
903	 */
904	for_each_pipe(dev_priv->dev, pipe) {
905		u32 val = I915_READ(DPLL(pipe));
906
907		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
908		if (pipe != PIPE_A)
909			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
910
911		I915_WRITE(DPLL(pipe), val);
912	}
913
914	spin_lock_irq(&dev_priv->irq_lock);
915	valleyview_enable_display_irqs(dev_priv);
916	spin_unlock_irq(&dev_priv->irq_lock);
917
918	/*
919	 * During driver initialization/resume we can avoid restoring the
920	 * part of the HW/SW state that will be inited anyway explicitly.
921	 */
922	if (dev_priv->power_domains.initializing)
923		return;
924
925	intel_hpd_init(dev_priv);
926
927	i915_redisable_vga_power_on(dev_priv->dev);
928}
929
930static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
931{
932	spin_lock_irq(&dev_priv->irq_lock);
933	valleyview_disable_display_irqs(dev_priv);
934	spin_unlock_irq(&dev_priv->irq_lock);
935
936	vlv_power_sequencer_reset(dev_priv);
937}
938
939static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
940					  struct i915_power_well *power_well)
941{
942	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
943
944	vlv_set_power_well(dev_priv, power_well, true);
945
946	vlv_display_power_well_init(dev_priv);
947}
948
949static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
950					   struct i915_power_well *power_well)
951{
952	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
953
954	vlv_display_power_well_deinit(dev_priv);
955
956	vlv_set_power_well(dev_priv, power_well, false);
957}
958
959static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
960					   struct i915_power_well *power_well)
961{
962	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
963
964	/* since ref/cri clock was enabled */
965	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
966
967	vlv_set_power_well(dev_priv, power_well, true);
968
969	/*
970	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
971	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
972	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
973	 *   b.	The other bits such as sfr settings / modesel may all
974	 *	be set to 0.
975	 *
976	 * This should only be done on init and resume from S3 with
977	 * both PLLs disabled, or we risk losing DPIO and PLL
978	 * synchronization.
979	 */
980	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
981}
982
983static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
984					    struct i915_power_well *power_well)
985{
986	enum i915_pipe pipe;
987
988	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
989
990	for_each_pipe(dev_priv, pipe)
991		assert_pll_disabled(dev_priv, pipe);
992
993	/* Assert common reset */
994	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
995
996	vlv_set_power_well(dev_priv, power_well, false);
997}
998
999#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1000
1001static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1002						 int power_well_id)
1003{
1004	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1005	struct i915_power_well *power_well;
1006	int i;
1007
1008	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1009		if (power_well->data == power_well_id)
1010			return power_well;
1011	}
1012
1013	return NULL;
1014}
1015
1016#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1017
1018static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1019{
1020	struct i915_power_well *cmn_bc =
1021		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1022	struct i915_power_well *cmn_d =
1023		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1024	u32 phy_control = dev_priv->chv_phy_control;
1025	u32 phy_status = 0;
1026	u32 phy_status_mask = 0xffffffff;
1027	u32 tmp;
1028
1029	/*
1030	 * The BIOS can leave the PHY is some weird state
1031	 * where it doesn't fully power down some parts.
1032	 * Disable the asserts until the PHY has been fully
1033	 * reset (ie. the power well has been disabled at
1034	 * least once).
1035	 */
1036	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1037		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1038				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1039				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1040				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1041				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1042				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1043
1044	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1045		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1046				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1047				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1048
1049	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1050		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1051
1052		/* this assumes override is only used to enable lanes */
1053		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1054			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1055
1056		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1057			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1058
1059		/* CL1 is on whenever anything is on in either channel */
1060		if (BITS_SET(phy_control,
1061			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1062			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1063			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1064
1065		/*
1066		 * The DPLLB check accounts for the pipe B + port A usage
1067		 * with CL2 powered up but all the lanes in the second channel
1068		 * powered down.
1069		 */
1070		if (BITS_SET(phy_control,
1071			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1072		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1073			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1074
1075		if (BITS_SET(phy_control,
1076			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1077			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1078		if (BITS_SET(phy_control,
1079			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1080			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1081
1082		if (BITS_SET(phy_control,
1083			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1084			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1085		if (BITS_SET(phy_control,
1086			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1087			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1088	}
1089
1090	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1091		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1092
1093		/* this assumes override is only used to enable lanes */
1094		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1095			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1096
1097		if (BITS_SET(phy_control,
1098			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1099			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1100
1101		if (BITS_SET(phy_control,
1102			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1103			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1104		if (BITS_SET(phy_control,
1105			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1106			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1107	}
1108
1109	phy_status &= phy_status_mask;
1110
1111	/*
1112	 * The PHY may be busy with some initial calibration and whatnot,
1113	 * so the power state can take a while to actually change.
1114	 */
1115	if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
1116		WARN(phy_status != tmp,
1117		     "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1118		     tmp, phy_status, dev_priv->chv_phy_control);
1119}
1120
1121#undef BITS_SET
1122
1123static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1124					   struct i915_power_well *power_well)
1125{
1126	enum dpio_phy phy;
1127	enum i915_pipe pipe;
1128	uint32_t tmp;
1129
1130	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1131		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1132
1133	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1134		pipe = PIPE_A;
1135		phy = DPIO_PHY0;
1136	} else {
1137		pipe = PIPE_C;
1138		phy = DPIO_PHY1;
1139	}
1140
1141	/* since ref/cri clock was enabled */
1142	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1143	vlv_set_power_well(dev_priv, power_well, true);
1144
1145	/* Poll for phypwrgood signal */
1146	if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
1147		DRM_ERROR("Display PHY %d is not power up\n", phy);
1148
1149	mutex_lock(&dev_priv->sb_lock);
1150
1151	/* Enable dynamic power down */
1152	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1153	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1154		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1155	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1156
1157	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1158		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1159		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1160		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1161	} else {
1162		/*
1163		 * Force the non-existing CL2 off. BXT does this
1164		 * too, so maybe it saves some power even though
1165		 * CL2 doesn't exist?
1166		 */
1167		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1168		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1169		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1170	}
1171
1172	mutex_unlock(&dev_priv->sb_lock);
1173
1174	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1175	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1176
1177	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1178		      phy, dev_priv->chv_phy_control);
1179
1180	assert_chv_phy_status(dev_priv);
1181}
1182
1183static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1184					    struct i915_power_well *power_well)
1185{
1186	enum dpio_phy phy;
1187
1188	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1189		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1190
1191	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1192		phy = DPIO_PHY0;
1193		assert_pll_disabled(dev_priv, PIPE_A);
1194		assert_pll_disabled(dev_priv, PIPE_B);
1195	} else {
1196		phy = DPIO_PHY1;
1197		assert_pll_disabled(dev_priv, PIPE_C);
1198	}
1199
1200	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1201	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1202
1203	vlv_set_power_well(dev_priv, power_well, false);
1204
1205	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1206		      phy, dev_priv->chv_phy_control);
1207
1208	/* PHY is fully reset now, so we can enable the PHY state asserts */
1209	dev_priv->chv_phy_assert[phy] = true;
1210
1211	assert_chv_phy_status(dev_priv);
1212}
1213
1214static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1215				     enum dpio_channel ch, bool override, unsigned int mask)
1216{
1217	enum i915_pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1218	u32 reg, val, expected, actual;
1219
1220	/*
1221	 * The BIOS can leave the PHY is some weird state
1222	 * where it doesn't fully power down some parts.
1223	 * Disable the asserts until the PHY has been fully
1224	 * reset (ie. the power well has been disabled at
1225	 * least once).
1226	 */
1227	if (!dev_priv->chv_phy_assert[phy])
1228		return;
1229
1230	if (ch == DPIO_CH0)
1231		reg = _CHV_CMN_DW0_CH0;
1232	else
1233		reg = _CHV_CMN_DW6_CH1;
1234
1235	mutex_lock(&dev_priv->sb_lock);
1236	val = vlv_dpio_read(dev_priv, pipe, reg);
1237	mutex_unlock(&dev_priv->sb_lock);
1238
1239	/*
1240	 * This assumes !override is only used when the port is disabled.
1241	 * All lanes should power down even without the override when
1242	 * the port is disabled.
1243	 */
1244	if (!override || mask == 0xf) {
1245		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1246		/*
1247		 * If CH1 common lane is not active anymore
1248		 * (eg. for pipe B DPLL) the entire channel will
1249		 * shut down, which causes the common lane registers
1250		 * to read as 0. That means we can't actually check
1251		 * the lane power down status bits, but as the entire
1252		 * register reads as 0 it's a good indication that the
1253		 * channel is indeed entirely powered down.
1254		 */
1255		if (ch == DPIO_CH1 && val == 0)
1256			expected = 0;
1257	} else if (mask != 0x0) {
1258		expected = DPIO_ANYDL_POWERDOWN;
1259	} else {
1260		expected = 0;
1261	}
1262
1263	if (ch == DPIO_CH0)
1264		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1265	else
1266		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1267	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1268
1269	WARN(actual != expected,
1270	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1271	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1272	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1273	     reg, val);
1274}
1275
1276bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1277			  enum dpio_channel ch, bool override)
1278{
1279	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1280	bool was_override;
1281
1282	mutex_lock(&power_domains->lock);
1283
1284	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1285
1286	if (override == was_override)
1287		goto out;
1288
1289	if (override)
1290		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1291	else
1292		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1293
1294	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1295
1296	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1297		      phy, ch, dev_priv->chv_phy_control);
1298
1299	assert_chv_phy_status(dev_priv);
1300
1301out:
1302	mutex_unlock(&power_domains->lock);
1303
1304	return was_override;
1305}
1306
1307void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1308			     bool override, unsigned int mask)
1309{
1310	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1311	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1312	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1313	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1314
1315	mutex_lock(&power_domains->lock);
1316
1317	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1318	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1319
1320	if (override)
1321		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1322	else
1323		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1324
1325	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1326
1327	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1328		      phy, ch, mask, dev_priv->chv_phy_control);
1329
1330	assert_chv_phy_status(dev_priv);
1331
1332	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1333
1334	mutex_unlock(&power_domains->lock);
1335}
1336
1337static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1338					struct i915_power_well *power_well)
1339{
1340	enum i915_pipe pipe = power_well->data;
1341	bool enabled;
1342	u32 state, ctrl;
1343
1344	mutex_lock(&dev_priv->rps.hw_lock);
1345
1346	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1347	/*
1348	 * We only ever set the power-on and power-gate states, anything
1349	 * else is unexpected.
1350	 */
1351	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1352	enabled = state == DP_SSS_PWR_ON(pipe);
1353
1354	/*
1355	 * A transient state at this point would mean some unexpected party
1356	 * is poking at the power controls too.
1357	 */
1358	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1359	WARN_ON(ctrl << 16 != state);
1360
1361	mutex_unlock(&dev_priv->rps.hw_lock);
1362
1363	return enabled;
1364}
1365
1366static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1367				    struct i915_power_well *power_well,
1368				    bool enable)
1369{
1370	enum i915_pipe pipe = power_well->data;
1371	u32 state;
1372	u32 ctrl;
1373
1374	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1375
1376	mutex_lock(&dev_priv->rps.hw_lock);
1377
1378#define COND \
1379	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1380
1381	if (COND)
1382		goto out;
1383
1384	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1385	ctrl &= ~DP_SSC_MASK(pipe);
1386	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1387	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1388
1389	if (wait_for(COND, 100))
1390		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1391			  state,
1392			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1393
1394#undef COND
1395
1396out:
1397	mutex_unlock(&dev_priv->rps.hw_lock);
1398}
1399
1400static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1401					struct i915_power_well *power_well)
1402{
1403	WARN_ON_ONCE(power_well->data != PIPE_A);
1404
1405	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1406}
1407
1408static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1409				       struct i915_power_well *power_well)
1410{
1411	WARN_ON_ONCE(power_well->data != PIPE_A);
1412
1413	chv_set_pipe_power_well(dev_priv, power_well, true);
1414
1415	vlv_display_power_well_init(dev_priv);
1416}
1417
1418static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1419					struct i915_power_well *power_well)
1420{
1421	WARN_ON_ONCE(power_well->data != PIPE_A);
1422
1423	vlv_display_power_well_deinit(dev_priv);
1424
1425	chv_set_pipe_power_well(dev_priv, power_well, false);
1426}
1427
1428/**
1429 * intel_display_power_get - grab a power domain reference
1430 * @dev_priv: i915 device instance
1431 * @domain: power domain to reference
1432 *
1433 * This function grabs a power domain reference for @domain and ensures that the
1434 * power domain and all its parents are powered up. Therefore users should only
1435 * grab a reference to the innermost power domain they need.
1436 *
1437 * Any power domain reference obtained by this function must have a symmetric
1438 * call to intel_display_power_put() to release the reference again.
1439 */
1440void intel_display_power_get(struct drm_i915_private *dev_priv,
1441			     enum intel_display_power_domain domain)
1442{
1443	struct i915_power_domains *power_domains;
1444	struct i915_power_well *power_well;
1445	int i;
1446
1447	intel_runtime_pm_get(dev_priv);
1448
1449	power_domains = &dev_priv->power_domains;
1450
1451	mutex_lock(&power_domains->lock);
1452
1453	for_each_power_well(i, power_well, BIT(domain), power_domains) {
1454		if (!power_well->count++)
1455			intel_power_well_enable(dev_priv, power_well);
1456	}
1457
1458	power_domains->domain_use_count[domain]++;
1459
1460	mutex_unlock(&power_domains->lock);
1461}
1462
1463/**
1464 * intel_display_power_put - release a power domain reference
1465 * @dev_priv: i915 device instance
1466 * @domain: power domain to reference
1467 *
1468 * This function drops the power domain reference obtained by
1469 * intel_display_power_get() and might power down the corresponding hardware
1470 * block right away if this is the last reference.
1471 */
1472void intel_display_power_put(struct drm_i915_private *dev_priv,
1473			     enum intel_display_power_domain domain)
1474{
1475	struct i915_power_domains *power_domains;
1476	struct i915_power_well *power_well;
1477	int i;
1478
1479	power_domains = &dev_priv->power_domains;
1480
1481	mutex_lock(&power_domains->lock);
1482
1483	WARN_ON(!power_domains->domain_use_count[domain]);
1484	power_domains->domain_use_count[domain]--;
1485
1486	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1487		WARN_ON(!power_well->count);
1488
1489		if (!--power_well->count && i915.disable_power_well)
1490			intel_power_well_disable(dev_priv, power_well);
1491	}
1492
1493	mutex_unlock(&power_domains->lock);
1494
1495	intel_runtime_pm_put(dev_priv);
1496}
1497
1498#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
1499	BIT(POWER_DOMAIN_PIPE_A) |			\
1500	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
1501	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
1502	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
1503	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
1504	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
1505	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
1506	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
1507	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
1508	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
1509	BIT(POWER_DOMAIN_PORT_CRT) |			\
1510	BIT(POWER_DOMAIN_PLLS) |			\
1511	BIT(POWER_DOMAIN_AUX_A) |			\
1512	BIT(POWER_DOMAIN_AUX_B) |			\
1513	BIT(POWER_DOMAIN_AUX_C) |			\
1514	BIT(POWER_DOMAIN_AUX_D) |			\
1515	BIT(POWER_DOMAIN_GMBUS) |			\
1516	BIT(POWER_DOMAIN_INIT))
1517#define HSW_DISPLAY_POWER_DOMAINS (				\
1518	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
1519	BIT(POWER_DOMAIN_INIT))
1520
1521#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
1522	HSW_ALWAYS_ON_POWER_DOMAINS |			\
1523	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1524#define BDW_DISPLAY_POWER_DOMAINS (				\
1525	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
1526	BIT(POWER_DOMAIN_INIT))
1527
1528#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
1529#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
1530
1531#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1532	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1533	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1534	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1535	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1536	BIT(POWER_DOMAIN_PORT_CRT) |		\
1537	BIT(POWER_DOMAIN_AUX_B) |		\
1538	BIT(POWER_DOMAIN_AUX_C) |		\
1539	BIT(POWER_DOMAIN_INIT))
1540
1541#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1542	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1543	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1544	BIT(POWER_DOMAIN_AUX_B) |		\
1545	BIT(POWER_DOMAIN_INIT))
1546
1547#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1548	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1549	BIT(POWER_DOMAIN_AUX_B) |		\
1550	BIT(POWER_DOMAIN_INIT))
1551
1552#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1553	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1554	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1555	BIT(POWER_DOMAIN_AUX_C) |		\
1556	BIT(POWER_DOMAIN_INIT))
1557
1558#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1559	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1560	BIT(POWER_DOMAIN_AUX_C) |		\
1561	BIT(POWER_DOMAIN_INIT))
1562
1563#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1564	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1565	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1566	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1567	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1568	BIT(POWER_DOMAIN_AUX_B) |		\
1569	BIT(POWER_DOMAIN_AUX_C) |		\
1570	BIT(POWER_DOMAIN_INIT))
1571
1572#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1573	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |	\
1574	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |	\
1575	BIT(POWER_DOMAIN_AUX_D) |		\
1576	BIT(POWER_DOMAIN_INIT))
1577
1578static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1579	.sync_hw = i9xx_always_on_power_well_noop,
1580	.enable = i9xx_always_on_power_well_noop,
1581	.disable = i9xx_always_on_power_well_noop,
1582	.is_enabled = i9xx_always_on_power_well_enabled,
1583};
1584
1585static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1586	.sync_hw = chv_pipe_power_well_sync_hw,
1587	.enable = chv_pipe_power_well_enable,
1588	.disable = chv_pipe_power_well_disable,
1589	.is_enabled = chv_pipe_power_well_enabled,
1590};
1591
1592static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1593	.sync_hw = vlv_power_well_sync_hw,
1594	.enable = chv_dpio_cmn_power_well_enable,
1595	.disable = chv_dpio_cmn_power_well_disable,
1596	.is_enabled = vlv_power_well_enabled,
1597};
1598
1599static struct i915_power_well i9xx_always_on_power_well[] = {
1600	{
1601		.name = "always-on",
1602		.always_on = 1,
1603		.domains = POWER_DOMAIN_MASK,
1604		.ops = &i9xx_always_on_power_well_ops,
1605	},
1606};
1607
1608static const struct i915_power_well_ops hsw_power_well_ops = {
1609	.sync_hw = hsw_power_well_sync_hw,
1610	.enable = hsw_power_well_enable,
1611	.disable = hsw_power_well_disable,
1612	.is_enabled = hsw_power_well_enabled,
1613};
1614
1615static const struct i915_power_well_ops skl_power_well_ops = {
1616	.sync_hw = skl_power_well_sync_hw,
1617	.enable = skl_power_well_enable,
1618	.disable = skl_power_well_disable,
1619	.is_enabled = skl_power_well_enabled,
1620};
1621
1622static struct i915_power_well hsw_power_wells[] = {
1623	{
1624		.name = "always-on",
1625		.always_on = 1,
1626		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1627		.ops = &i9xx_always_on_power_well_ops,
1628	},
1629	{
1630		.name = "display",
1631		.domains = HSW_DISPLAY_POWER_DOMAINS,
1632		.ops = &hsw_power_well_ops,
1633	},
1634};
1635
1636static struct i915_power_well bdw_power_wells[] = {
1637	{
1638		.name = "always-on",
1639		.always_on = 1,
1640		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1641		.ops = &i9xx_always_on_power_well_ops,
1642	},
1643	{
1644		.name = "display",
1645		.domains = BDW_DISPLAY_POWER_DOMAINS,
1646		.ops = &hsw_power_well_ops,
1647	},
1648};
1649
1650static const struct i915_power_well_ops vlv_display_power_well_ops = {
1651	.sync_hw = vlv_power_well_sync_hw,
1652	.enable = vlv_display_power_well_enable,
1653	.disable = vlv_display_power_well_disable,
1654	.is_enabled = vlv_power_well_enabled,
1655};
1656
1657static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1658	.sync_hw = vlv_power_well_sync_hw,
1659	.enable = vlv_dpio_cmn_power_well_enable,
1660	.disable = vlv_dpio_cmn_power_well_disable,
1661	.is_enabled = vlv_power_well_enabled,
1662};
1663
1664static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1665	.sync_hw = vlv_power_well_sync_hw,
1666	.enable = vlv_power_well_enable,
1667	.disable = vlv_power_well_disable,
1668	.is_enabled = vlv_power_well_enabled,
1669};
1670
1671static struct i915_power_well vlv_power_wells[] = {
1672	{
1673		.name = "always-on",
1674		.always_on = 1,
1675		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1676		.ops = &i9xx_always_on_power_well_ops,
1677	},
1678	{
1679		.name = "display",
1680		.domains = VLV_DISPLAY_POWER_DOMAINS,
1681		.data = PUNIT_POWER_WELL_DISP2D,
1682		.ops = &vlv_display_power_well_ops,
1683	},
1684	{
1685		.name = "dpio-tx-b-01",
1686		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1687			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1688			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1689			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1690		.ops = &vlv_dpio_power_well_ops,
1691		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1692	},
1693	{
1694		.name = "dpio-tx-b-23",
1695		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1696			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1697			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1698			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1699		.ops = &vlv_dpio_power_well_ops,
1700		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1701	},
1702	{
1703		.name = "dpio-tx-c-01",
1704		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1705			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1706			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1707			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1708		.ops = &vlv_dpio_power_well_ops,
1709		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1710	},
1711	{
1712		.name = "dpio-tx-c-23",
1713		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1714			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1715			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1716			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1717		.ops = &vlv_dpio_power_well_ops,
1718		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1719	},
1720	{
1721		.name = "dpio-common",
1722		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1723		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1724		.ops = &vlv_dpio_cmn_power_well_ops,
1725	},
1726};
1727
1728static struct i915_power_well chv_power_wells[] = {
1729	{
1730		.name = "always-on",
1731		.always_on = 1,
1732		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1733		.ops = &i9xx_always_on_power_well_ops,
1734	},
1735	{
1736		.name = "display",
1737		/*
1738		 * Pipe A power well is the new disp2d well. Pipe B and C
1739		 * power wells don't actually exist. Pipe A power well is
1740		 * required for any pipe to work.
1741		 */
1742		.domains = VLV_DISPLAY_POWER_DOMAINS,
1743		.data = PIPE_A,
1744		.ops = &chv_pipe_power_well_ops,
1745	},
1746	{
1747		.name = "dpio-common-bc",
1748		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
1749		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1750		.ops = &chv_dpio_cmn_power_well_ops,
1751	},
1752	{
1753		.name = "dpio-common-d",
1754		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
1755		.data = PUNIT_POWER_WELL_DPIO_CMN_D,
1756		.ops = &chv_dpio_cmn_power_well_ops,
1757	},
1758};
1759
1760bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1761				    int power_well_id)
1762{
1763	struct i915_power_well *power_well;
1764	bool ret;
1765
1766	power_well = lookup_power_well(dev_priv, power_well_id);
1767	ret = power_well->ops->is_enabled(dev_priv, power_well);
1768
1769	return ret;
1770}
1771
1772static struct i915_power_well skl_power_wells[] = {
1773	{
1774		.name = "always-on",
1775		.always_on = 1,
1776		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1777		.ops = &i9xx_always_on_power_well_ops,
1778	},
1779	{
1780		.name = "power well 1",
1781		.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1782		.ops = &skl_power_well_ops,
1783		.data = SKL_DISP_PW_1,
1784	},
1785	{
1786		.name = "MISC IO power well",
1787		.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1788		.ops = &skl_power_well_ops,
1789		.data = SKL_DISP_PW_MISC_IO,
1790	},
1791	{
1792		.name = "power well 2",
1793		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1794		.ops = &skl_power_well_ops,
1795		.data = SKL_DISP_PW_2,
1796	},
1797	{
1798		.name = "DDI A/E power well",
1799		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1800		.ops = &skl_power_well_ops,
1801		.data = SKL_DISP_PW_DDI_A_E,
1802	},
1803	{
1804		.name = "DDI B power well",
1805		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1806		.ops = &skl_power_well_ops,
1807		.data = SKL_DISP_PW_DDI_B,
1808	},
1809	{
1810		.name = "DDI C power well",
1811		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1812		.ops = &skl_power_well_ops,
1813		.data = SKL_DISP_PW_DDI_C,
1814	},
1815	{
1816		.name = "DDI D power well",
1817		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1818		.ops = &skl_power_well_ops,
1819		.data = SKL_DISP_PW_DDI_D,
1820	},
1821};
1822
1823static struct i915_power_well bxt_power_wells[] = {
1824	{
1825		.name = "always-on",
1826		.always_on = 1,
1827		.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1828		.ops = &i9xx_always_on_power_well_ops,
1829	},
1830	{
1831		.name = "power well 1",
1832		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1833		.ops = &skl_power_well_ops,
1834		.data = SKL_DISP_PW_1,
1835	},
1836	{
1837		.name = "power well 2",
1838		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1839		.ops = &skl_power_well_ops,
1840		.data = SKL_DISP_PW_2,
1841	}
1842};
1843
1844static int
1845sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1846				   int disable_power_well)
1847{
1848	if (disable_power_well >= 0)
1849		return !!disable_power_well;
1850
1851	if (IS_SKYLAKE(dev_priv)) {
1852		DRM_DEBUG_KMS("Disabling display power well support\n");
1853		return 0;
1854	}
1855
1856	return 1;
1857}
1858
1859#define set_power_wells(power_domains, __power_wells) ({		\
1860	(power_domains)->power_wells = (__power_wells);			\
1861	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
1862})
1863
1864/**
1865 * intel_power_domains_init - initializes the power domain structures
1866 * @dev_priv: i915 device instance
1867 *
1868 * Initializes the power domain structures for @dev_priv depending upon the
1869 * supported platform.
1870 */
1871int intel_power_domains_init(struct drm_i915_private *dev_priv)
1872{
1873	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1874
1875	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1876						     i915.disable_power_well);
1877
1878	BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
1879
1880#ifdef __NetBSD__
1881	linux_mutex_init(&power_domains->lock);
1882#else
1883	mutex_init(&power_domains->lock);
1884#endif
1885
1886	/*
1887	 * The enabling order will be from lower to higher indexed wells,
1888	 * the disabling order is reversed.
1889	 */
1890	if (IS_HASWELL(dev_priv->dev)) {
1891		set_power_wells(power_domains, hsw_power_wells);
1892	} else if (IS_BROADWELL(dev_priv->dev)) {
1893		set_power_wells(power_domains, bdw_power_wells);
1894	} else if (IS_SKYLAKE(dev_priv->dev)) {
1895		set_power_wells(power_domains, skl_power_wells);
1896	} else if (IS_BROXTON(dev_priv->dev)) {
1897		set_power_wells(power_domains, bxt_power_wells);
1898	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1899		set_power_wells(power_domains, chv_power_wells);
1900	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
1901		set_power_wells(power_domains, vlv_power_wells);
1902	} else {
1903		set_power_wells(power_domains, i9xx_always_on_power_well);
1904	}
1905
1906	return 0;
1907}
1908
1909static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1910{
1911	struct drm_device *dev = dev_priv->dev;
1912	struct device *device = dev->dev;
1913
1914	if (!HAS_RUNTIME_PM(dev))
1915		return;
1916
1917	if (!intel_enable_rc6(dev))
1918		return;
1919
1920	/* Make sure we're not suspended first. */
1921	pm_runtime_get_sync(device);
1922}
1923
1924/**
1925 * intel_power_domains_fini - finalizes the power domain structures
1926 * @dev_priv: i915 device instance
1927 *
1928 * Finalizes the power domain structures for @dev_priv depending upon the
1929 * supported platform. This function also disables runtime pm and ensures that
1930 * the device stays powered up so that the driver can be reloaded.
1931 */
1932void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1933{
1934	intel_runtime_pm_disable(dev_priv);
1935
1936	/* The i915.ko module is still not prepared to be loaded when
1937	 * the power well is not enabled, so just enable it in case
1938	 * we're going to unload/reload. */
1939	intel_display_set_init_power(dev_priv, true);
1940
1941#ifdef __NetBSD__
1942	linux_mutex_destroy(&dev_priv->power_domains.lock);
1943#else
1944	mutex_destroy(&dev_priv->power_domains.lock);
1945#endif
1946}
1947
1948static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1949{
1950	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1951	struct i915_power_well *power_well;
1952	int i;
1953
1954	mutex_lock(&power_domains->lock);
1955	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1956		power_well->ops->sync_hw(dev_priv, power_well);
1957		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1958								     power_well);
1959	}
1960	mutex_unlock(&power_domains->lock);
1961}
1962
1963static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1964{
1965	struct i915_power_well *cmn_bc =
1966		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1967	struct i915_power_well *cmn_d =
1968		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1969
1970	/*
1971	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1972	 * workaround never ever read DISPLAY_PHY_CONTROL, and
1973	 * instead maintain a shadow copy ourselves. Use the actual
1974	 * power well state and lane status to reconstruct the
1975	 * expected initial value.
1976	 */
1977	dev_priv->chv_phy_control =
1978		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1979		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1980		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1981		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1982		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1983
1984	/*
1985	 * If all lanes are disabled we leave the override disabled
1986	 * with all power down bits cleared to match the state we
1987	 * would use after disabling the port. Otherwise enable the
1988	 * override and set the lane powerdown bits accding to the
1989	 * current lane status.
1990	 */
1991	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1992		uint32_t status = I915_READ(DPLL(PIPE_A));
1993		unsigned int mask;
1994
1995		mask = status & DPLL_PORTB_READY_MASK;
1996		if (mask == 0xf)
1997			mask = 0x0;
1998		else
1999			dev_priv->chv_phy_control |=
2000				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2001
2002		dev_priv->chv_phy_control |=
2003			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2004
2005		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2006		if (mask == 0xf)
2007			mask = 0x0;
2008		else
2009			dev_priv->chv_phy_control |=
2010				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2011
2012		dev_priv->chv_phy_control |=
2013			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2014
2015		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2016
2017		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2018	} else {
2019		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2020	}
2021
2022	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2023		uint32_t status = I915_READ(DPIO_PHY_STATUS);
2024		unsigned int mask;
2025
2026		mask = status & DPLL_PORTD_READY_MASK;
2027
2028		if (mask == 0xf)
2029			mask = 0x0;
2030		else
2031			dev_priv->chv_phy_control |=
2032				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2033
2034		dev_priv->chv_phy_control |=
2035			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2036
2037		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2038
2039		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2040	} else {
2041		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2042	}
2043
2044	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2045
2046	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2047		      dev_priv->chv_phy_control);
2048}
2049
2050static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2051{
2052	struct i915_power_well *cmn =
2053		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2054	struct i915_power_well *disp2d =
2055		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2056
2057	/* If the display might be already active skip this */
2058	if (cmn->ops->is_enabled(dev_priv, cmn) &&
2059	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
2060	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
2061		return;
2062
2063	DRM_DEBUG_KMS("toggling display PHY side reset\n");
2064
2065	/* cmnlane needs DPLL registers */
2066	disp2d->ops->enable(dev_priv, disp2d);
2067
2068	/*
2069	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2070	 * Need to assert and de-assert PHY SB reset by gating the
2071	 * common lane power, then un-gating it.
2072	 * Simply ungating isn't enough to reset the PHY enough to get
2073	 * ports and lanes running.
2074	 */
2075	cmn->ops->disable(dev_priv, cmn);
2076}
2077
2078/**
2079 * intel_power_domains_init_hw - initialize hardware power domain state
2080 * @dev_priv: i915 device instance
2081 *
2082 * This function initializes the hardware power domain state and enables all
2083 * power domains using intel_display_set_init_power().
2084 */
2085void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
2086{
2087	struct drm_device *dev = dev_priv->dev;
2088	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2089
2090	power_domains->initializing = true;
2091
2092	if (IS_CHERRYVIEW(dev)) {
2093		mutex_lock(&power_domains->lock);
2094		chv_phy_control_init(dev_priv);
2095		mutex_unlock(&power_domains->lock);
2096	} else if (IS_VALLEYVIEW(dev)) {
2097		mutex_lock(&power_domains->lock);
2098		vlv_cmnlane_wa(dev_priv);
2099		mutex_unlock(&power_domains->lock);
2100	}
2101
2102	/* For now, we need the power well to be always enabled. */
2103	intel_display_set_init_power(dev_priv, true);
2104	intel_power_domains_resume(dev_priv);
2105	power_domains->initializing = false;
2106}
2107
2108/**
2109 * intel_runtime_pm_get - grab a runtime pm reference
2110 * @dev_priv: i915 device instance
2111 *
2112 * This function grabs a device-level runtime pm reference (mostly used for GEM
2113 * code to ensure the GTT or GT is on) and ensures that it is powered up.
2114 *
2115 * Any runtime pm reference obtained by this function must have a symmetric
2116 * call to intel_runtime_pm_put() to release the reference again.
2117 */
2118void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2119{
2120	struct drm_device *dev = dev_priv->dev;
2121	struct device *device = dev->dev;
2122
2123	if (!HAS_RUNTIME_PM(dev))
2124		return;
2125
2126	pm_runtime_get_sync(device);
2127	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
2128}
2129
2130/**
2131 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2132 * @dev_priv: i915 device instance
2133 *
2134 * This function grabs a device-level runtime pm reference (mostly used for GEM
2135 * code to ensure the GTT or GT is on).
2136 *
2137 * It will _not_ power up the device but instead only check that it's powered
2138 * on.  Therefore it is only valid to call this functions from contexts where
2139 * the device is known to be powered up and where trying to power it up would
2140 * result in hilarity and deadlocks. That pretty much means only the system
2141 * suspend/resume code where this is used to grab runtime pm references for
2142 * delayed setup down in work items.
2143 *
2144 * Any runtime pm reference obtained by this function must have a symmetric
2145 * call to intel_runtime_pm_put() to release the reference again.
2146 */
2147void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2148{
2149	struct drm_device *dev = dev_priv->dev;
2150	struct device *device = dev->dev;
2151
2152	if (!HAS_RUNTIME_PM(dev))
2153		return;
2154
2155	WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
2156	pm_runtime_get_noresume(device);
2157}
2158
2159/**
2160 * intel_runtime_pm_put - release a runtime pm reference
2161 * @dev_priv: i915 device instance
2162 *
2163 * This function drops the device-level runtime pm reference obtained by
2164 * intel_runtime_pm_get() and might power down the corresponding
2165 * hardware block right away if this is the last reference.
2166 */
2167void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2168{
2169	struct drm_device *dev = dev_priv->dev;
2170	struct device *device = dev->dev;
2171
2172	if (!HAS_RUNTIME_PM(dev))
2173		return;
2174
2175	pm_runtime_mark_last_busy(device);
2176	pm_runtime_put_autosuspend(device);
2177}
2178
2179/**
2180 * intel_runtime_pm_enable - enable runtime pm
2181 * @dev_priv: i915 device instance
2182 *
2183 * This function enables runtime pm at the end of the driver load sequence.
2184 *
2185 * Note that this function does currently not enable runtime pm for the
2186 * subordinate display power domains. That is only done on the first modeset
2187 * using intel_display_set_init_power().
2188 */
2189void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2190{
2191	struct drm_device *dev = dev_priv->dev;
2192	struct device *device = dev->dev;
2193
2194	if (!HAS_RUNTIME_PM(dev))
2195		return;
2196
2197	/*
2198	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
2199	 * requirement.
2200	 */
2201	if (!intel_enable_rc6(dev)) {
2202		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
2203		return;
2204	}
2205
2206	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
2207	pm_runtime_mark_last_busy(device);
2208	pm_runtime_use_autosuspend(device);
2209
2210	pm_runtime_put_autosuspend(device);
2211}
2212
2213