intel_runtime_pm.c revision 1.8
1/*	$NetBSD: intel_runtime_pm.c,v 1.8 2020/02/14 04:35:19 riastradh Exp $	*/
2
3/*
4 * Copyright �� 2012-2014 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
27 *    Daniel Vetter <daniel.vetter@ffwll.ch>
28 *
29 */
30
31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: intel_runtime_pm.c,v 1.8 2020/02/14 04:35:19 riastradh Exp $");
33
34#include <linux/pm_runtime.h>
35#include <linux/vgaarb.h>
36
37#include "i915_drv.h"
38#include "intel_drv.h"
39
40#include <linux/nbsd-namespace.h>
41
42/**
43 * DOC: runtime pm
44 *
45 * The i915 driver supports dynamic enabling and disabling of entire hardware
46 * blocks at runtime. This is especially important on the display side where
47 * software is supposed to control many power gates manually on recent hardware,
48 * since on the GT side a lot of the power management is done by the hardware.
49 * But even there some manual control at the device level is required.
50 *
51 * Since i915 supports a diverse set of platforms with a unified codebase and
52 * hardware engineers just love to shuffle functionality around between power
53 * domains there's a sizeable amount of indirection required. This file provides
54 * generic functions to the driver for grabbing and releasing references for
55 * abstract power domains. It then maps those to the actual power wells
56 * present for a given platform.
57 */
58
59#define GEN9_ENABLE_DC5(dev) 0
60#define SKL_ENABLE_DC6(dev) (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
61
62#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
63	for (i = 0;							\
64	     i < (power_domains)->power_well_count &&			\
65		 ((power_well) = &(power_domains)->power_wells[i]);	\
66	     i++)							\
67		if ((power_well)->domains & (domain_mask))
68
69#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
70	for (i = (power_domains)->power_well_count - 1;			 \
71	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
72	     i--)							 \
73		if ((power_well)->domains & (domain_mask))
74
75bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
76				    int power_well_id);
77
78static void intel_power_well_enable(struct drm_i915_private *dev_priv,
79				    struct i915_power_well *power_well)
80{
81	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
82	power_well->ops->enable(dev_priv, power_well);
83	power_well->hw_enabled = true;
84}
85
86static void intel_power_well_disable(struct drm_i915_private *dev_priv,
87				     struct i915_power_well *power_well)
88{
89	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
90	power_well->hw_enabled = false;
91	power_well->ops->disable(dev_priv, power_well);
92}
93
94/*
95 * We should only use the power well if we explicitly asked the hardware to
96 * enable it, so check if it's enabled and also check if we've requested it to
97 * be enabled.
98 */
99static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
100				   struct i915_power_well *power_well)
101{
102	return I915_READ(HSW_PWR_WELL_DRIVER) ==
103		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
104}
105
106/**
107 * __intel_display_power_is_enabled - unlocked check for a power domain
108 * @dev_priv: i915 device instance
109 * @domain: power domain to check
110 *
111 * This is the unlocked version of intel_display_power_is_enabled() and should
112 * only be used from error capture and recovery code where deadlocks are
113 * possible.
114 *
115 * Returns:
116 * True when the power domain is enabled, false otherwise.
117 */
118bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
119				      enum intel_display_power_domain domain)
120{
121	struct i915_power_domains *power_domains;
122	struct i915_power_well *power_well;
123	bool is_enabled;
124	int i;
125
126	if (dev_priv->pm.suspended)
127		return false;
128
129	power_domains = &dev_priv->power_domains;
130
131	is_enabled = true;
132
133	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
134		if (power_well->always_on)
135			continue;
136
137		if (!power_well->hw_enabled) {
138			is_enabled = false;
139			break;
140		}
141	}
142
143	return is_enabled;
144}
145
146/**
147 * intel_display_power_is_enabled - check for a power domain
148 * @dev_priv: i915 device instance
149 * @domain: power domain to check
150 *
151 * This function can be used to check the hw power domain state. It is mostly
152 * used in hardware state readout functions. Everywhere else code should rely
153 * upon explicit power domain reference counting to ensure that the hardware
154 * block is powered up before accessing it.
155 *
156 * Callers must hold the relevant modesetting locks to ensure that concurrent
157 * threads can't disable the power well while the caller tries to read a few
158 * registers.
159 *
160 * Returns:
161 * True when the power domain is enabled, false otherwise.
162 */
163bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
164				    enum intel_display_power_domain domain)
165{
166	struct i915_power_domains *power_domains;
167	bool ret;
168
169	power_domains = &dev_priv->power_domains;
170
171	mutex_lock(&power_domains->lock);
172	ret = __intel_display_power_is_enabled(dev_priv, domain);
173	mutex_unlock(&power_domains->lock);
174
175	return ret;
176}
177
178/**
179 * intel_display_set_init_power - set the initial power domain state
180 * @dev_priv: i915 device instance
181 * @enable: whether to enable or disable the initial power domain state
182 *
183 * For simplicity our driver load/unload and system suspend/resume code assumes
184 * that all power domains are always enabled. This functions controls the state
185 * of this little hack. While the initial power domain state is enabled runtime
186 * pm is effectively disabled.
187 */
188void intel_display_set_init_power(struct drm_i915_private *dev_priv,
189				  bool enable)
190{
191	if (dev_priv->power_domains.init_power_on == enable)
192		return;
193
194	if (enable)
195		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
196	else
197		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
198
199	dev_priv->power_domains.init_power_on = enable;
200}
201
202static inline void
203touch_vga_msr(struct drm_device *dev)
204{
205#ifdef __NetBSD__
206	const bus_addr_t vgabase = 0x3c0;
207	const bus_space_tag_t iot = dev->pdev->pd_pa.pa_iot;
208	bus_space_handle_t ioh;
209	uint8_t msr;
210	int error;
211
212	error = bus_space_map(iot, vgabase, 0x10, 0, &ioh);
213	if (error) {
214		device_printf(dev->pdev->pd_dev,
215		    "unable to map VGA registers: %d\n", error);
216	} else {
217		DRMCTASSERT(vgabase <= VGA_MSR_READ);
218		msr = bus_space_read_1(iot, ioh, VGA_MSR_READ - vgabase);
219		bus_space_write_1(iot, ioh, VGA_MSR_READ - vgabase, msr);
220		bus_space_unmap(iot, ioh, 0x10);
221	}
222#else
223	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
224	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
225	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
226#endif
227}
228
229/*
230 * Starting with Haswell, we have a "Power Down Well" that can be turned off
231 * when not needed anymore. We have 4 registers that can request the power well
232 * to be enabled, and it will only be disabled if none of the registers is
233 * requesting it to be enabled.
234 */
235static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
236{
237	struct drm_device *dev = dev_priv->dev;
238
239	/*
240	 * After we re-enable the power well, if we touch VGA register 0x3d5
241	 * we'll get unclaimed register interrupts. This stops after we write
242	 * anything to the VGA MSR register. The vgacon module uses this
243	 * register all the time, so if we unbind our driver and, as a
244	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
245	 * console_unlock(). So make here we touch the VGA MSR register, making
246	 * sure vgacon can keep working normally without triggering interrupts
247	 * and error messages.
248	 */
249	touch_vga_msr(dev);
250
251	if (IS_BROADWELL(dev))
252		gen8_irq_power_well_post_enable(dev_priv,
253						1 << PIPE_C | 1 << PIPE_B);
254}
255
256static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
257				       struct i915_power_well *power_well)
258{
259	struct drm_device *dev = dev_priv->dev;
260
261	/*
262	 * After we re-enable the power well, if we touch VGA register 0x3d5
263	 * we'll get unclaimed register interrupts. This stops after we write
264	 * anything to the VGA MSR register. The vgacon module uses this
265	 * register all the time, so if we unbind our driver and, as a
266	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
267	 * console_unlock(). So make here we touch the VGA MSR register, making
268	 * sure vgacon can keep working normally without triggering interrupts
269	 * and error messages.
270	 */
271	if (power_well->data == SKL_DISP_PW_2) {
272		touch_vga_msr(dev);
273
274		gen8_irq_power_well_post_enable(dev_priv,
275						1 << PIPE_C | 1 << PIPE_B);
276	}
277
278	if (power_well->data == SKL_DISP_PW_1) {
279		if (!dev_priv->power_domains.initializing)
280			intel_prepare_ddi(dev);
281		gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
282	}
283}
284
285static void hsw_set_power_well(struct drm_i915_private *dev_priv,
286			       struct i915_power_well *power_well, bool enable)
287{
288	bool is_enabled, enable_requested;
289	uint32_t tmp;
290
291	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
292	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
293	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
294
295	if (enable) {
296		if (!enable_requested)
297			I915_WRITE(HSW_PWR_WELL_DRIVER,
298				   HSW_PWR_WELL_ENABLE_REQUEST);
299
300		if (!is_enabled) {
301			DRM_DEBUG_KMS("Enabling power well\n");
302			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
303				      HSW_PWR_WELL_STATE_ENABLED), 20))
304				DRM_ERROR("Timeout enabling power well\n");
305			hsw_power_well_post_enable(dev_priv);
306		}
307
308	} else {
309		if (enable_requested) {
310			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
311			POSTING_READ(HSW_PWR_WELL_DRIVER);
312			DRM_DEBUG_KMS("Requesting to disable the power well\n");
313		}
314	}
315}
316
317#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
318	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
319	BIT(POWER_DOMAIN_PIPE_B) |			\
320	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
321	BIT(POWER_DOMAIN_PIPE_C) |			\
322	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
323	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
324	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
325	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
326	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
327	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
328	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
329	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
330	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
331	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
332	BIT(POWER_DOMAIN_AUX_B) |                       \
333	BIT(POWER_DOMAIN_AUX_C) |			\
334	BIT(POWER_DOMAIN_AUX_D) |			\
335	BIT(POWER_DOMAIN_AUDIO) |			\
336	BIT(POWER_DOMAIN_VGA) |				\
337	BIT(POWER_DOMAIN_INIT))
338#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
339	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
340	BIT(POWER_DOMAIN_PLLS) |			\
341	BIT(POWER_DOMAIN_PIPE_A) |			\
342	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
343	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
344	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
345	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
346	BIT(POWER_DOMAIN_AUX_A) |			\
347	BIT(POWER_DOMAIN_INIT))
348#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
349	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
350	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
351	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
352	BIT(POWER_DOMAIN_INIT))
353#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
354	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
355	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
356	BIT(POWER_DOMAIN_INIT))
357#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
358	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
359	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
360	BIT(POWER_DOMAIN_INIT))
361#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
362	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
363	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
364	BIT(POWER_DOMAIN_INIT))
365#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS (		\
366	SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |		\
367	BIT(POWER_DOMAIN_PLLS) |			\
368	BIT(POWER_DOMAIN_INIT))
369#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
370	(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
371	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
372	SKL_DISPLAY_DDI_A_E_POWER_DOMAINS |		\
373	SKL_DISPLAY_DDI_B_POWER_DOMAINS |		\
374	SKL_DISPLAY_DDI_C_POWER_DOMAINS |		\
375	SKL_DISPLAY_DDI_D_POWER_DOMAINS |		\
376	SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) |		\
377	BIT(POWER_DOMAIN_INIT))
378
379#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
380	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
381	BIT(POWER_DOMAIN_PIPE_B) |			\
382	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
383	BIT(POWER_DOMAIN_PIPE_C) |			\
384	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
385	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
386	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
387	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
388	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
389	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
390	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
391	BIT(POWER_DOMAIN_AUX_B) |			\
392	BIT(POWER_DOMAIN_AUX_C) |			\
393	BIT(POWER_DOMAIN_AUDIO) |			\
394	BIT(POWER_DOMAIN_VGA) |				\
395	BIT(POWER_DOMAIN_GMBUS) |			\
396	BIT(POWER_DOMAIN_INIT))
397#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
398	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
399	BIT(POWER_DOMAIN_PIPE_A) |			\
400	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
401	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
402	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
403	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
404	BIT(POWER_DOMAIN_AUX_A) |			\
405	BIT(POWER_DOMAIN_PLLS) |			\
406	BIT(POWER_DOMAIN_INIT))
407#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
408	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
409	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
410	BIT(POWER_DOMAIN_INIT))
411
412static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
413{
414	struct drm_device *dev = dev_priv->dev;
415
416	WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
417	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
418		"DC9 already programmed to be enabled.\n");
419	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
420		"DC5 still not disabled to enable DC9.\n");
421	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
422	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
423
424	 /*
425	  * TODO: check for the following to verify the conditions to enter DC9
426	  * state are satisfied:
427	  * 1] Check relevant display engine registers to verify if mode set
428	  * disable sequence was followed.
429	  * 2] Check if display uninitialize sequence is initialized.
430	  */
431}
432
433static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
434{
435	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
436	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
437		"DC9 already programmed to be disabled.\n");
438	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
439		"DC5 still not disabled.\n");
440
441	 /*
442	  * TODO: check for the following to verify DC9 state was indeed
443	  * entered before programming to disable it:
444	  * 1] Check relevant display engine registers to verify if mode
445	  *  set disable sequence was followed.
446	  * 2] Check if display uninitialize sequence is initialized.
447	  */
448}
449
450void bxt_enable_dc9(struct drm_i915_private *dev_priv)
451{
452	uint32_t val;
453
454	assert_can_enable_dc9(dev_priv);
455
456	DRM_DEBUG_KMS("Enabling DC9\n");
457
458	val = I915_READ(DC_STATE_EN);
459	val |= DC_STATE_EN_DC9;
460	I915_WRITE(DC_STATE_EN, val);
461	POSTING_READ(DC_STATE_EN);
462}
463
464void bxt_disable_dc9(struct drm_i915_private *dev_priv)
465{
466	uint32_t val;
467
468	assert_can_disable_dc9(dev_priv);
469
470	DRM_DEBUG_KMS("Disabling DC9\n");
471
472	val = I915_READ(DC_STATE_EN);
473	val &= ~DC_STATE_EN_DC9;
474	I915_WRITE(DC_STATE_EN, val);
475	POSTING_READ(DC_STATE_EN);
476}
477
478static void gen9_set_dc_state_debugmask_memory_up(
479			struct drm_i915_private *dev_priv)
480{
481	uint32_t val;
482
483	/* The below bit doesn't need to be cleared ever afterwards */
484	val = I915_READ(DC_STATE_DEBUG);
485	if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
486		val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
487		I915_WRITE(DC_STATE_DEBUG, val);
488		POSTING_READ(DC_STATE_DEBUG);
489	}
490}
491
492static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
493{
494	struct drm_device *dev = dev_priv->dev;
495	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
496					SKL_DISP_PW_2);
497
498	WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
499		  "Platform doesn't support DC5.\n");
500	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
501	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
502
503	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
504		  "DC5 already programmed to be enabled.\n");
505	WARN_ONCE(dev_priv->pm.suspended,
506		  "DC5 cannot be enabled, if platform is runtime-suspended.\n");
507
508	assert_csr_loaded(dev_priv);
509}
510
511static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
512{
513	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
514					SKL_DISP_PW_2);
515	/*
516	 * During initialization, the firmware may not be loaded yet.
517	 * We still want to make sure that the DC enabling flag is cleared.
518	 */
519	if (dev_priv->power_domains.initializing)
520		return;
521
522	WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
523	WARN_ONCE(dev_priv->pm.suspended,
524		"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
525}
526
527static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
528{
529	uint32_t val;
530
531	assert_can_enable_dc5(dev_priv);
532
533	DRM_DEBUG_KMS("Enabling DC5\n");
534
535	gen9_set_dc_state_debugmask_memory_up(dev_priv);
536
537	val = I915_READ(DC_STATE_EN);
538	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
539	val |= DC_STATE_EN_UPTO_DC5;
540	I915_WRITE(DC_STATE_EN, val);
541	POSTING_READ(DC_STATE_EN);
542}
543
544static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
545{
546	uint32_t val;
547
548	assert_can_disable_dc5(dev_priv);
549
550	DRM_DEBUG_KMS("Disabling DC5\n");
551
552	val = I915_READ(DC_STATE_EN);
553	val &= ~DC_STATE_EN_UPTO_DC5;
554	I915_WRITE(DC_STATE_EN, val);
555	POSTING_READ(DC_STATE_EN);
556}
557
558static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
559{
560	struct drm_device *dev = dev_priv->dev;
561
562	WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
563		  "Platform doesn't support DC6.\n");
564	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
565	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
566		  "Backlight is not disabled.\n");
567	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
568		  "DC6 already programmed to be enabled.\n");
569
570	assert_csr_loaded(dev_priv);
571}
572
573static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
574{
575	/*
576	 * During initialization, the firmware may not be loaded yet.
577	 * We still want to make sure that the DC enabling flag is cleared.
578	 */
579	if (dev_priv->power_domains.initializing)
580		return;
581
582	assert_csr_loaded(dev_priv);
583	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
584		  "DC6 already programmed to be disabled.\n");
585}
586
587static void skl_enable_dc6(struct drm_i915_private *dev_priv)
588{
589	uint32_t val;
590
591	assert_can_enable_dc6(dev_priv);
592
593	DRM_DEBUG_KMS("Enabling DC6\n");
594
595	gen9_set_dc_state_debugmask_memory_up(dev_priv);
596
597	val = I915_READ(DC_STATE_EN);
598	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
599	val |= DC_STATE_EN_UPTO_DC6;
600	I915_WRITE(DC_STATE_EN, val);
601	POSTING_READ(DC_STATE_EN);
602}
603
604static void skl_disable_dc6(struct drm_i915_private *dev_priv)
605{
606	uint32_t val;
607
608	assert_can_disable_dc6(dev_priv);
609
610	DRM_DEBUG_KMS("Disabling DC6\n");
611
612	val = I915_READ(DC_STATE_EN);
613	val &= ~DC_STATE_EN_UPTO_DC6;
614	I915_WRITE(DC_STATE_EN, val);
615	POSTING_READ(DC_STATE_EN);
616}
617
618static void skl_set_power_well(struct drm_i915_private *dev_priv,
619			struct i915_power_well *power_well, bool enable)
620{
621	struct drm_device *dev = dev_priv->dev;
622	uint32_t tmp, fuse_status;
623	uint32_t req_mask, state_mask;
624	bool is_enabled, enable_requested, check_fuse_status = false;
625
626	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
627	fuse_status = I915_READ(SKL_FUSE_STATUS);
628
629	switch (power_well->data) {
630	case SKL_DISP_PW_1:
631		if (wait_for((I915_READ(SKL_FUSE_STATUS) &
632			SKL_FUSE_PG0_DIST_STATUS), 1)) {
633			DRM_ERROR("PG0 not enabled\n");
634			return;
635		}
636		break;
637	case SKL_DISP_PW_2:
638		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
639			DRM_ERROR("PG1 in disabled state\n");
640			return;
641		}
642		break;
643	case SKL_DISP_PW_DDI_A_E:
644	case SKL_DISP_PW_DDI_B:
645	case SKL_DISP_PW_DDI_C:
646	case SKL_DISP_PW_DDI_D:
647	case SKL_DISP_PW_MISC_IO:
648		break;
649	default:
650		WARN(1, "Unknown power well %lu\n", power_well->data);
651		return;
652	}
653
654	req_mask = SKL_POWER_WELL_REQ(power_well->data);
655	enable_requested = tmp & req_mask;
656	state_mask = SKL_POWER_WELL_STATE(power_well->data);
657	is_enabled = tmp & state_mask;
658
659	if (enable) {
660		if (!enable_requested) {
661			WARN((tmp & state_mask) &&
662				!I915_READ(HSW_PWR_WELL_BIOS),
663				"Invalid for power well status to be enabled, unless done by the BIOS, \
664				when request is to disable!\n");
665			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
666				power_well->data == SKL_DISP_PW_2) {
667				if (SKL_ENABLE_DC6(dev)) {
668					skl_disable_dc6(dev_priv);
669					/*
670					 * DDI buffer programming unnecessary during driver-load/resume
671					 * as it's already done during modeset initialization then.
672					 * It's also invalid here as encoder list is still uninitialized.
673					 */
674					if (!dev_priv->power_domains.initializing)
675						intel_prepare_ddi(dev);
676				} else {
677					gen9_disable_dc5(dev_priv);
678				}
679			}
680			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
681		}
682
683		if (!is_enabled) {
684			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
685			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
686				state_mask), 1))
687				DRM_ERROR("%s enable timeout\n",
688					power_well->name);
689			check_fuse_status = true;
690		}
691	} else {
692		if (enable_requested) {
693			if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
694				(power_well->data == SKL_DISP_PW_1) &&
695				(intel_csr_load_status_get(dev_priv) == FW_LOADED))
696				DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
697			else {
698				I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
699				POSTING_READ(HSW_PWR_WELL_DRIVER);
700				DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
701			}
702
703			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
704				power_well->data == SKL_DISP_PW_2) {
705				enum csr_state state;
706				/* TODO: wait for a completion event or
707				 * similar here instead of busy
708				 * waiting using wait_for function.
709				 */
710				wait_for((state = intel_csr_load_status_get(dev_priv)) !=
711						FW_UNINITIALIZED, 1000);
712				if (state != FW_LOADED)
713					DRM_DEBUG("CSR firmware not ready (%d)\n",
714							state);
715				else
716					if (SKL_ENABLE_DC6(dev))
717						skl_enable_dc6(dev_priv);
718					else
719						gen9_enable_dc5(dev_priv);
720			}
721		}
722	}
723
724	if (check_fuse_status) {
725		if (power_well->data == SKL_DISP_PW_1) {
726			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
727				SKL_FUSE_PG1_DIST_STATUS), 1))
728				DRM_ERROR("PG1 distributing status timeout\n");
729		} else if (power_well->data == SKL_DISP_PW_2) {
730			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
731				SKL_FUSE_PG2_DIST_STATUS), 1))
732				DRM_ERROR("PG2 distributing status timeout\n");
733		}
734	}
735
736	if (enable && !is_enabled)
737		skl_power_well_post_enable(dev_priv, power_well);
738}
739
740static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
741				   struct i915_power_well *power_well)
742{
743	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
744
745	/*
746	 * We're taking over the BIOS, so clear any requests made by it since
747	 * the driver is in charge now.
748	 */
749	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
750		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
751}
752
753static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
754				  struct i915_power_well *power_well)
755{
756	hsw_set_power_well(dev_priv, power_well, true);
757}
758
759static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
760				   struct i915_power_well *power_well)
761{
762	hsw_set_power_well(dev_priv, power_well, false);
763}
764
765static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
766					struct i915_power_well *power_well)
767{
768	uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
769		SKL_POWER_WELL_STATE(power_well->data);
770
771	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
772}
773
774static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
775				struct i915_power_well *power_well)
776{
777	skl_set_power_well(dev_priv, power_well, power_well->count > 0);
778
779	/* Clear any request made by BIOS as driver is taking over */
780	I915_WRITE(HSW_PWR_WELL_BIOS, 0);
781}
782
783static void skl_power_well_enable(struct drm_i915_private *dev_priv,
784				struct i915_power_well *power_well)
785{
786	skl_set_power_well(dev_priv, power_well, true);
787}
788
789static void skl_power_well_disable(struct drm_i915_private *dev_priv,
790				struct i915_power_well *power_well)
791{
792	skl_set_power_well(dev_priv, power_well, false);
793}
794
795static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
796					   struct i915_power_well *power_well)
797{
798}
799
800static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
801					     struct i915_power_well *power_well)
802{
803	return true;
804}
805
806static void vlv_set_power_well(struct drm_i915_private *dev_priv,
807			       struct i915_power_well *power_well, bool enable)
808{
809	enum punit_power_well power_well_id = power_well->data;
810	u32 mask;
811	u32 state;
812	u32 ctrl;
813
814	mask = PUNIT_PWRGT_MASK(power_well_id);
815	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
816			 PUNIT_PWRGT_PWR_GATE(power_well_id);
817
818	mutex_lock(&dev_priv->rps.hw_lock);
819
820#define COND \
821	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
822
823	if (COND)
824		goto out;
825
826	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
827	ctrl &= ~mask;
828	ctrl |= state;
829	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
830
831	if (wait_for(COND, 100))
832		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
833			  state,
834			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
835
836#undef COND
837
838out:
839	mutex_unlock(&dev_priv->rps.hw_lock);
840}
841
842static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
843				   struct i915_power_well *power_well)
844{
845	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
846}
847
848static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
849				  struct i915_power_well *power_well)
850{
851	vlv_set_power_well(dev_priv, power_well, true);
852}
853
854static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
855				   struct i915_power_well *power_well)
856{
857	vlv_set_power_well(dev_priv, power_well, false);
858}
859
860static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
861				   struct i915_power_well *power_well)
862{
863	int power_well_id = power_well->data;
864	bool enabled = false;
865	u32 mask;
866	u32 state;
867	u32 ctrl;
868
869	mask = PUNIT_PWRGT_MASK(power_well_id);
870	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
871
872	mutex_lock(&dev_priv->rps.hw_lock);
873
874	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
875	/*
876	 * We only ever set the power-on and power-gate states, anything
877	 * else is unexpected.
878	 */
879	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
880		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
881	if (state == ctrl)
882		enabled = true;
883
884	/*
885	 * A transient state at this point would mean some unexpected party
886	 * is poking at the power controls too.
887	 */
888	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
889	WARN_ON(ctrl != state);
890
891	mutex_unlock(&dev_priv->rps.hw_lock);
892
893	return enabled;
894}
895
896static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
897{
898	enum i915_pipe pipe;
899
900	/*
901	 * Enable the CRI clock source so we can get at the
902	 * display and the reference clock for VGA
903	 * hotplug / manual detection. Supposedly DSI also
904	 * needs the ref clock up and running.
905	 *
906	 * CHV DPLL B/C have some issues if VGA mode is enabled.
907	 */
908	for_each_pipe(dev_priv->dev, pipe) {
909		u32 val = I915_READ(DPLL(pipe));
910
911		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
912		if (pipe != PIPE_A)
913			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
914
915		I915_WRITE(DPLL(pipe), val);
916	}
917
918	spin_lock_irq(&dev_priv->irq_lock);
919	valleyview_enable_display_irqs(dev_priv);
920	spin_unlock_irq(&dev_priv->irq_lock);
921
922	/*
923	 * During driver initialization/resume we can avoid restoring the
924	 * part of the HW/SW state that will be inited anyway explicitly.
925	 */
926	if (dev_priv->power_domains.initializing)
927		return;
928
929	intel_hpd_init(dev_priv);
930
931	i915_redisable_vga_power_on(dev_priv->dev);
932}
933
934static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
935{
936	spin_lock_irq(&dev_priv->irq_lock);
937	valleyview_disable_display_irqs(dev_priv);
938	spin_unlock_irq(&dev_priv->irq_lock);
939
940	vlv_power_sequencer_reset(dev_priv);
941}
942
943static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
944					  struct i915_power_well *power_well)
945{
946	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
947
948	vlv_set_power_well(dev_priv, power_well, true);
949
950	vlv_display_power_well_init(dev_priv);
951}
952
953static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
954					   struct i915_power_well *power_well)
955{
956	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
957
958	vlv_display_power_well_deinit(dev_priv);
959
960	vlv_set_power_well(dev_priv, power_well, false);
961}
962
963static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
964					   struct i915_power_well *power_well)
965{
966	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
967
968	/* since ref/cri clock was enabled */
969	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
970
971	vlv_set_power_well(dev_priv, power_well, true);
972
973	/*
974	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
975	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
976	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
977	 *   b.	The other bits such as sfr settings / modesel may all
978	 *	be set to 0.
979	 *
980	 * This should only be done on init and resume from S3 with
981	 * both PLLs disabled, or we risk losing DPIO and PLL
982	 * synchronization.
983	 */
984	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
985}
986
987static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
988					    struct i915_power_well *power_well)
989{
990	enum i915_pipe pipe;
991
992	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
993
994	for_each_pipe(dev_priv, pipe)
995		assert_pll_disabled(dev_priv, pipe);
996
997	/* Assert common reset */
998	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
999
1000	vlv_set_power_well(dev_priv, power_well, false);
1001}
1002
1003#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1004
1005static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1006						 int power_well_id)
1007{
1008	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1009	struct i915_power_well *power_well;
1010	int i;
1011
1012	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1013		if (power_well->data == power_well_id)
1014			return power_well;
1015	}
1016
1017	return NULL;
1018}
1019
1020#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1021
1022static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1023{
1024	struct i915_power_well *cmn_bc =
1025		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1026	struct i915_power_well *cmn_d =
1027		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1028	u32 phy_control = dev_priv->chv_phy_control;
1029	u32 phy_status = 0;
1030	u32 phy_status_mask = 0xffffffff;
1031	u32 tmp;
1032
1033	/*
1034	 * The BIOS can leave the PHY is some weird state
1035	 * where it doesn't fully power down some parts.
1036	 * Disable the asserts until the PHY has been fully
1037	 * reset (ie. the power well has been disabled at
1038	 * least once).
1039	 */
1040	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1041		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1042				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1043				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1044				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1045				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1046				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1047
1048	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1049		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1050				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1051				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1052
1053	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1054		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1055
1056		/* this assumes override is only used to enable lanes */
1057		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1058			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1059
1060		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1061			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1062
1063		/* CL1 is on whenever anything is on in either channel */
1064		if (BITS_SET(phy_control,
1065			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1066			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1067			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1068
1069		/*
1070		 * The DPLLB check accounts for the pipe B + port A usage
1071		 * with CL2 powered up but all the lanes in the second channel
1072		 * powered down.
1073		 */
1074		if (BITS_SET(phy_control,
1075			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1076		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1077			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1078
1079		if (BITS_SET(phy_control,
1080			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1081			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1082		if (BITS_SET(phy_control,
1083			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1084			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1085
1086		if (BITS_SET(phy_control,
1087			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1088			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1089		if (BITS_SET(phy_control,
1090			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1091			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1092	}
1093
1094	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1095		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1096
1097		/* this assumes override is only used to enable lanes */
1098		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1099			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1100
1101		if (BITS_SET(phy_control,
1102			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1103			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1104
1105		if (BITS_SET(phy_control,
1106			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1107			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1108		if (BITS_SET(phy_control,
1109			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1110			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1111	}
1112
1113	phy_status &= phy_status_mask;
1114
1115	/*
1116	 * The PHY may be busy with some initial calibration and whatnot,
1117	 * so the power state can take a while to actually change.
1118	 */
1119	if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
1120		WARN(phy_status != tmp,
1121		     "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1122		     tmp, phy_status, dev_priv->chv_phy_control);
1123}
1124
1125#undef BITS_SET
1126
1127static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1128					   struct i915_power_well *power_well)
1129{
1130	enum dpio_phy phy;
1131	enum i915_pipe pipe;
1132	uint32_t tmp;
1133
1134	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1135		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1136
1137	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1138		pipe = PIPE_A;
1139		phy = DPIO_PHY0;
1140	} else {
1141		pipe = PIPE_C;
1142		phy = DPIO_PHY1;
1143	}
1144
1145	/* since ref/cri clock was enabled */
1146	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1147	vlv_set_power_well(dev_priv, power_well, true);
1148
1149	/* Poll for phypwrgood signal */
1150	if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
1151		DRM_ERROR("Display PHY %d is not power up\n", phy);
1152
1153	mutex_lock(&dev_priv->sb_lock);
1154
1155	/* Enable dynamic power down */
1156	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1157	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1158		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1159	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1160
1161	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1162		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1163		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1164		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1165	} else {
1166		/*
1167		 * Force the non-existing CL2 off. BXT does this
1168		 * too, so maybe it saves some power even though
1169		 * CL2 doesn't exist?
1170		 */
1171		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1172		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1173		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1174	}
1175
1176	mutex_unlock(&dev_priv->sb_lock);
1177
1178	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1179	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1180
1181	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1182		      phy, dev_priv->chv_phy_control);
1183
1184	assert_chv_phy_status(dev_priv);
1185}
1186
1187static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1188					    struct i915_power_well *power_well)
1189{
1190	enum dpio_phy phy;
1191
1192	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1193		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1194
1195	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1196		phy = DPIO_PHY0;
1197		assert_pll_disabled(dev_priv, PIPE_A);
1198		assert_pll_disabled(dev_priv, PIPE_B);
1199	} else {
1200		phy = DPIO_PHY1;
1201		assert_pll_disabled(dev_priv, PIPE_C);
1202	}
1203
1204	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1205	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1206
1207	vlv_set_power_well(dev_priv, power_well, false);
1208
1209	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1210		      phy, dev_priv->chv_phy_control);
1211
1212	/* PHY is fully reset now, so we can enable the PHY state asserts */
1213	dev_priv->chv_phy_assert[phy] = true;
1214
1215	assert_chv_phy_status(dev_priv);
1216}
1217
1218static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1219				     enum dpio_channel ch, bool override, unsigned int mask)
1220{
1221	enum i915_pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1222	u32 reg, val, expected, actual;
1223
1224	/*
1225	 * The BIOS can leave the PHY is some weird state
1226	 * where it doesn't fully power down some parts.
1227	 * Disable the asserts until the PHY has been fully
1228	 * reset (ie. the power well has been disabled at
1229	 * least once).
1230	 */
1231	if (!dev_priv->chv_phy_assert[phy])
1232		return;
1233
1234	if (ch == DPIO_CH0)
1235		reg = _CHV_CMN_DW0_CH0;
1236	else
1237		reg = _CHV_CMN_DW6_CH1;
1238
1239	mutex_lock(&dev_priv->sb_lock);
1240	val = vlv_dpio_read(dev_priv, pipe, reg);
1241	mutex_unlock(&dev_priv->sb_lock);
1242
1243	/*
1244	 * This assumes !override is only used when the port is disabled.
1245	 * All lanes should power down even without the override when
1246	 * the port is disabled.
1247	 */
1248	if (!override || mask == 0xf) {
1249		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1250		/*
1251		 * If CH1 common lane is not active anymore
1252		 * (eg. for pipe B DPLL) the entire channel will
1253		 * shut down, which causes the common lane registers
1254		 * to read as 0. That means we can't actually check
1255		 * the lane power down status bits, but as the entire
1256		 * register reads as 0 it's a good indication that the
1257		 * channel is indeed entirely powered down.
1258		 */
1259		if (ch == DPIO_CH1 && val == 0)
1260			expected = 0;
1261	} else if (mask != 0x0) {
1262		expected = DPIO_ANYDL_POWERDOWN;
1263	} else {
1264		expected = 0;
1265	}
1266
1267	if (ch == DPIO_CH0)
1268		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1269	else
1270		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1271	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1272
1273	WARN(actual != expected,
1274	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1275	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1276	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1277	     reg, val);
1278}
1279
1280bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1281			  enum dpio_channel ch, bool override)
1282{
1283	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1284	bool was_override;
1285
1286	mutex_lock(&power_domains->lock);
1287
1288	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1289
1290	if (override == was_override)
1291		goto out;
1292
1293	if (override)
1294		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1295	else
1296		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1297
1298	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1299
1300	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1301		      phy, ch, dev_priv->chv_phy_control);
1302
1303	assert_chv_phy_status(dev_priv);
1304
1305out:
1306	mutex_unlock(&power_domains->lock);
1307
1308	return was_override;
1309}
1310
1311void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1312			     bool override, unsigned int mask)
1313{
1314	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1315	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1316	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1317	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1318
1319	mutex_lock(&power_domains->lock);
1320
1321	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1322	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1323
1324	if (override)
1325		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1326	else
1327		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1328
1329	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1330
1331	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1332		      phy, ch, mask, dev_priv->chv_phy_control);
1333
1334	assert_chv_phy_status(dev_priv);
1335
1336	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1337
1338	mutex_unlock(&power_domains->lock);
1339}
1340
1341static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1342					struct i915_power_well *power_well)
1343{
1344	enum i915_pipe pipe = power_well->data;
1345	bool enabled;
1346	u32 state, ctrl;
1347
1348	mutex_lock(&dev_priv->rps.hw_lock);
1349
1350	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1351	/*
1352	 * We only ever set the power-on and power-gate states, anything
1353	 * else is unexpected.
1354	 */
1355	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1356	enabled = state == DP_SSS_PWR_ON(pipe);
1357
1358	/*
1359	 * A transient state at this point would mean some unexpected party
1360	 * is poking at the power controls too.
1361	 */
1362	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1363	WARN_ON(ctrl << 16 != state);
1364
1365	mutex_unlock(&dev_priv->rps.hw_lock);
1366
1367	return enabled;
1368}
1369
1370static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1371				    struct i915_power_well *power_well,
1372				    bool enable)
1373{
1374	enum i915_pipe pipe = power_well->data;
1375	u32 state;
1376	u32 ctrl;
1377
1378	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1379
1380	mutex_lock(&dev_priv->rps.hw_lock);
1381
1382#define COND \
1383	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1384
1385	if (COND)
1386		goto out;
1387
1388	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1389	ctrl &= ~DP_SSC_MASK(pipe);
1390	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1391	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1392
1393	if (wait_for(COND, 100))
1394		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1395			  state,
1396			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1397
1398#undef COND
1399
1400out:
1401	mutex_unlock(&dev_priv->rps.hw_lock);
1402}
1403
1404static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1405					struct i915_power_well *power_well)
1406{
1407	WARN_ON_ONCE(power_well->data != PIPE_A);
1408
1409	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1410}
1411
1412static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1413				       struct i915_power_well *power_well)
1414{
1415	WARN_ON_ONCE(power_well->data != PIPE_A);
1416
1417	chv_set_pipe_power_well(dev_priv, power_well, true);
1418
1419	vlv_display_power_well_init(dev_priv);
1420}
1421
1422static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1423					struct i915_power_well *power_well)
1424{
1425	WARN_ON_ONCE(power_well->data != PIPE_A);
1426
1427	vlv_display_power_well_deinit(dev_priv);
1428
1429	chv_set_pipe_power_well(dev_priv, power_well, false);
1430}
1431
1432/**
1433 * intel_display_power_get - grab a power domain reference
1434 * @dev_priv: i915 device instance
1435 * @domain: power domain to reference
1436 *
1437 * This function grabs a power domain reference for @domain and ensures that the
1438 * power domain and all its parents are powered up. Therefore users should only
1439 * grab a reference to the innermost power domain they need.
1440 *
1441 * Any power domain reference obtained by this function must have a symmetric
1442 * call to intel_display_power_put() to release the reference again.
1443 */
1444void intel_display_power_get(struct drm_i915_private *dev_priv,
1445			     enum intel_display_power_domain domain)
1446{
1447	struct i915_power_domains *power_domains;
1448	struct i915_power_well *power_well;
1449	int i;
1450
1451	intel_runtime_pm_get(dev_priv);
1452
1453	power_domains = &dev_priv->power_domains;
1454
1455	mutex_lock(&power_domains->lock);
1456
1457	for_each_power_well(i, power_well, BIT(domain), power_domains) {
1458		if (!power_well->count++)
1459			intel_power_well_enable(dev_priv, power_well);
1460	}
1461
1462	power_domains->domain_use_count[domain]++;
1463
1464	mutex_unlock(&power_domains->lock);
1465}
1466
1467/**
1468 * intel_display_power_put - release a power domain reference
1469 * @dev_priv: i915 device instance
1470 * @domain: power domain to reference
1471 *
1472 * This function drops the power domain reference obtained by
1473 * intel_display_power_get() and might power down the corresponding hardware
1474 * block right away if this is the last reference.
1475 */
1476void intel_display_power_put(struct drm_i915_private *dev_priv,
1477			     enum intel_display_power_domain domain)
1478{
1479	struct i915_power_domains *power_domains;
1480	struct i915_power_well *power_well;
1481	int i;
1482
1483	power_domains = &dev_priv->power_domains;
1484
1485	mutex_lock(&power_domains->lock);
1486
1487	WARN_ON(!power_domains->domain_use_count[domain]);
1488	power_domains->domain_use_count[domain]--;
1489
1490	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1491		WARN_ON(!power_well->count);
1492
1493		if (!--power_well->count && i915.disable_power_well)
1494			intel_power_well_disable(dev_priv, power_well);
1495	}
1496
1497	mutex_unlock(&power_domains->lock);
1498
1499	intel_runtime_pm_put(dev_priv);
1500}
1501
1502#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
1503	BIT(POWER_DOMAIN_PIPE_A) |			\
1504	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
1505	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
1506	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
1507	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
1508	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
1509	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
1510	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
1511	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
1512	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
1513	BIT(POWER_DOMAIN_PORT_CRT) |			\
1514	BIT(POWER_DOMAIN_PLLS) |			\
1515	BIT(POWER_DOMAIN_AUX_A) |			\
1516	BIT(POWER_DOMAIN_AUX_B) |			\
1517	BIT(POWER_DOMAIN_AUX_C) |			\
1518	BIT(POWER_DOMAIN_AUX_D) |			\
1519	BIT(POWER_DOMAIN_GMBUS) |			\
1520	BIT(POWER_DOMAIN_INIT))
1521#define HSW_DISPLAY_POWER_DOMAINS (				\
1522	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
1523	BIT(POWER_DOMAIN_INIT))
1524
1525#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
1526	HSW_ALWAYS_ON_POWER_DOMAINS |			\
1527	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1528#define BDW_DISPLAY_POWER_DOMAINS (				\
1529	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
1530	BIT(POWER_DOMAIN_INIT))
1531
1532#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
1533#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
1534
1535#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1536	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1537	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1538	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1539	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1540	BIT(POWER_DOMAIN_PORT_CRT) |		\
1541	BIT(POWER_DOMAIN_AUX_B) |		\
1542	BIT(POWER_DOMAIN_AUX_C) |		\
1543	BIT(POWER_DOMAIN_INIT))
1544
1545#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1546	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1547	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1548	BIT(POWER_DOMAIN_AUX_B) |		\
1549	BIT(POWER_DOMAIN_INIT))
1550
1551#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1552	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1553	BIT(POWER_DOMAIN_AUX_B) |		\
1554	BIT(POWER_DOMAIN_INIT))
1555
1556#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1557	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1558	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1559	BIT(POWER_DOMAIN_AUX_C) |		\
1560	BIT(POWER_DOMAIN_INIT))
1561
1562#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1563	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1564	BIT(POWER_DOMAIN_AUX_C) |		\
1565	BIT(POWER_DOMAIN_INIT))
1566
1567#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1568	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1569	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1570	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1571	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1572	BIT(POWER_DOMAIN_AUX_B) |		\
1573	BIT(POWER_DOMAIN_AUX_C) |		\
1574	BIT(POWER_DOMAIN_INIT))
1575
1576#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1577	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |	\
1578	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |	\
1579	BIT(POWER_DOMAIN_AUX_D) |		\
1580	BIT(POWER_DOMAIN_INIT))
1581
1582static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1583	.sync_hw = i9xx_always_on_power_well_noop,
1584	.enable = i9xx_always_on_power_well_noop,
1585	.disable = i9xx_always_on_power_well_noop,
1586	.is_enabled = i9xx_always_on_power_well_enabled,
1587};
1588
1589static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1590	.sync_hw = chv_pipe_power_well_sync_hw,
1591	.enable = chv_pipe_power_well_enable,
1592	.disable = chv_pipe_power_well_disable,
1593	.is_enabled = chv_pipe_power_well_enabled,
1594};
1595
1596static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1597	.sync_hw = vlv_power_well_sync_hw,
1598	.enable = chv_dpio_cmn_power_well_enable,
1599	.disable = chv_dpio_cmn_power_well_disable,
1600	.is_enabled = vlv_power_well_enabled,
1601};
1602
1603static struct i915_power_well i9xx_always_on_power_well[] = {
1604	{
1605		.name = "always-on",
1606		.always_on = 1,
1607		.domains = POWER_DOMAIN_MASK,
1608		.ops = &i9xx_always_on_power_well_ops,
1609	},
1610};
1611
1612static const struct i915_power_well_ops hsw_power_well_ops = {
1613	.sync_hw = hsw_power_well_sync_hw,
1614	.enable = hsw_power_well_enable,
1615	.disable = hsw_power_well_disable,
1616	.is_enabled = hsw_power_well_enabled,
1617};
1618
1619static const struct i915_power_well_ops skl_power_well_ops = {
1620	.sync_hw = skl_power_well_sync_hw,
1621	.enable = skl_power_well_enable,
1622	.disable = skl_power_well_disable,
1623	.is_enabled = skl_power_well_enabled,
1624};
1625
1626static struct i915_power_well hsw_power_wells[] = {
1627	{
1628		.name = "always-on",
1629		.always_on = 1,
1630		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1631		.ops = &i9xx_always_on_power_well_ops,
1632	},
1633	{
1634		.name = "display",
1635		.domains = HSW_DISPLAY_POWER_DOMAINS,
1636		.ops = &hsw_power_well_ops,
1637	},
1638};
1639
1640static struct i915_power_well bdw_power_wells[] = {
1641	{
1642		.name = "always-on",
1643		.always_on = 1,
1644		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1645		.ops = &i9xx_always_on_power_well_ops,
1646	},
1647	{
1648		.name = "display",
1649		.domains = BDW_DISPLAY_POWER_DOMAINS,
1650		.ops = &hsw_power_well_ops,
1651	},
1652};
1653
1654static const struct i915_power_well_ops vlv_display_power_well_ops = {
1655	.sync_hw = vlv_power_well_sync_hw,
1656	.enable = vlv_display_power_well_enable,
1657	.disable = vlv_display_power_well_disable,
1658	.is_enabled = vlv_power_well_enabled,
1659};
1660
1661static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1662	.sync_hw = vlv_power_well_sync_hw,
1663	.enable = vlv_dpio_cmn_power_well_enable,
1664	.disable = vlv_dpio_cmn_power_well_disable,
1665	.is_enabled = vlv_power_well_enabled,
1666};
1667
1668static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1669	.sync_hw = vlv_power_well_sync_hw,
1670	.enable = vlv_power_well_enable,
1671	.disable = vlv_power_well_disable,
1672	.is_enabled = vlv_power_well_enabled,
1673};
1674
1675static struct i915_power_well vlv_power_wells[] = {
1676	{
1677		.name = "always-on",
1678		.always_on = 1,
1679		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1680		.ops = &i9xx_always_on_power_well_ops,
1681	},
1682	{
1683		.name = "display",
1684		.domains = VLV_DISPLAY_POWER_DOMAINS,
1685		.data = PUNIT_POWER_WELL_DISP2D,
1686		.ops = &vlv_display_power_well_ops,
1687	},
1688	{
1689		.name = "dpio-tx-b-01",
1690		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1691			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1692			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1693			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1694		.ops = &vlv_dpio_power_well_ops,
1695		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1696	},
1697	{
1698		.name = "dpio-tx-b-23",
1699		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1700			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1701			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1702			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1703		.ops = &vlv_dpio_power_well_ops,
1704		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1705	},
1706	{
1707		.name = "dpio-tx-c-01",
1708		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1709			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1710			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1711			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1712		.ops = &vlv_dpio_power_well_ops,
1713		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1714	},
1715	{
1716		.name = "dpio-tx-c-23",
1717		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1718			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1719			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1720			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1721		.ops = &vlv_dpio_power_well_ops,
1722		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1723	},
1724	{
1725		.name = "dpio-common",
1726		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1727		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1728		.ops = &vlv_dpio_cmn_power_well_ops,
1729	},
1730};
1731
1732static struct i915_power_well chv_power_wells[] = {
1733	{
1734		.name = "always-on",
1735		.always_on = 1,
1736		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1737		.ops = &i9xx_always_on_power_well_ops,
1738	},
1739	{
1740		.name = "display",
1741		/*
1742		 * Pipe A power well is the new disp2d well. Pipe B and C
1743		 * power wells don't actually exist. Pipe A power well is
1744		 * required for any pipe to work.
1745		 */
1746		.domains = VLV_DISPLAY_POWER_DOMAINS,
1747		.data = PIPE_A,
1748		.ops = &chv_pipe_power_well_ops,
1749	},
1750	{
1751		.name = "dpio-common-bc",
1752		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
1753		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1754		.ops = &chv_dpio_cmn_power_well_ops,
1755	},
1756	{
1757		.name = "dpio-common-d",
1758		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
1759		.data = PUNIT_POWER_WELL_DPIO_CMN_D,
1760		.ops = &chv_dpio_cmn_power_well_ops,
1761	},
1762};
1763
1764bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1765				    int power_well_id)
1766{
1767	struct i915_power_well *power_well;
1768	bool ret;
1769
1770	power_well = lookup_power_well(dev_priv, power_well_id);
1771	ret = power_well->ops->is_enabled(dev_priv, power_well);
1772
1773	return ret;
1774}
1775
1776static struct i915_power_well skl_power_wells[] = {
1777	{
1778		.name = "always-on",
1779		.always_on = 1,
1780		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1781		.ops = &i9xx_always_on_power_well_ops,
1782	},
1783	{
1784		.name = "power well 1",
1785		.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1786		.ops = &skl_power_well_ops,
1787		.data = SKL_DISP_PW_1,
1788	},
1789	{
1790		.name = "MISC IO power well",
1791		.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1792		.ops = &skl_power_well_ops,
1793		.data = SKL_DISP_PW_MISC_IO,
1794	},
1795	{
1796		.name = "power well 2",
1797		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1798		.ops = &skl_power_well_ops,
1799		.data = SKL_DISP_PW_2,
1800	},
1801	{
1802		.name = "DDI A/E power well",
1803		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1804		.ops = &skl_power_well_ops,
1805		.data = SKL_DISP_PW_DDI_A_E,
1806	},
1807	{
1808		.name = "DDI B power well",
1809		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1810		.ops = &skl_power_well_ops,
1811		.data = SKL_DISP_PW_DDI_B,
1812	},
1813	{
1814		.name = "DDI C power well",
1815		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1816		.ops = &skl_power_well_ops,
1817		.data = SKL_DISP_PW_DDI_C,
1818	},
1819	{
1820		.name = "DDI D power well",
1821		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1822		.ops = &skl_power_well_ops,
1823		.data = SKL_DISP_PW_DDI_D,
1824	},
1825};
1826
1827static struct i915_power_well bxt_power_wells[] = {
1828	{
1829		.name = "always-on",
1830		.always_on = 1,
1831		.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1832		.ops = &i9xx_always_on_power_well_ops,
1833	},
1834	{
1835		.name = "power well 1",
1836		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1837		.ops = &skl_power_well_ops,
1838		.data = SKL_DISP_PW_1,
1839	},
1840	{
1841		.name = "power well 2",
1842		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1843		.ops = &skl_power_well_ops,
1844		.data = SKL_DISP_PW_2,
1845	}
1846};
1847
1848static int
1849sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1850				   int disable_power_well)
1851{
1852	if (disable_power_well >= 0)
1853		return !!disable_power_well;
1854
1855	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1856		DRM_DEBUG_KMS("Disabling display power well support\n");
1857		return 0;
1858	}
1859
1860	return 1;
1861}
1862
1863#define set_power_wells(power_domains, __power_wells) ({		\
1864	(power_domains)->power_wells = (__power_wells);			\
1865	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
1866})
1867
1868/**
1869 * intel_power_domains_init - initializes the power domain structures
1870 * @dev_priv: i915 device instance
1871 *
1872 * Initializes the power domain structures for @dev_priv depending upon the
1873 * supported platform.
1874 */
1875int intel_power_domains_init(struct drm_i915_private *dev_priv)
1876{
1877	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1878
1879	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1880						     i915.disable_power_well);
1881
1882	BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
1883
1884	mutex_init(&power_domains->lock);
1885
1886	/*
1887	 * The enabling order will be from lower to higher indexed wells,
1888	 * the disabling order is reversed.
1889	 */
1890	if (IS_HASWELL(dev_priv->dev)) {
1891		set_power_wells(power_domains, hsw_power_wells);
1892	} else if (IS_BROADWELL(dev_priv->dev)) {
1893		set_power_wells(power_domains, bdw_power_wells);
1894	} else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
1895		set_power_wells(power_domains, skl_power_wells);
1896	} else if (IS_BROXTON(dev_priv->dev)) {
1897		set_power_wells(power_domains, bxt_power_wells);
1898	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1899		set_power_wells(power_domains, chv_power_wells);
1900	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
1901		set_power_wells(power_domains, vlv_power_wells);
1902	} else {
1903		set_power_wells(power_domains, i9xx_always_on_power_well);
1904	}
1905
1906	return 0;
1907}
1908
1909static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1910{
1911	struct drm_device *dev = dev_priv->dev;
1912	struct device *device = dev->dev;
1913
1914	if (!HAS_RUNTIME_PM(dev))
1915		return;
1916
1917	if (!intel_enable_rc6(dev))
1918		return;
1919
1920	/* Make sure we're not suspended first. */
1921	pm_runtime_get_sync(device);
1922}
1923
1924/**
1925 * intel_power_domains_fini - finalizes the power domain structures
1926 * @dev_priv: i915 device instance
1927 *
1928 * Finalizes the power domain structures for @dev_priv depending upon the
1929 * supported platform. This function also disables runtime pm and ensures that
1930 * the device stays powered up so that the driver can be reloaded.
1931 */
1932void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1933{
1934	intel_runtime_pm_disable(dev_priv);
1935
1936	/* The i915.ko module is still not prepared to be loaded when
1937	 * the power well is not enabled, so just enable it in case
1938	 * we're going to unload/reload. */
1939	intel_display_set_init_power(dev_priv, true);
1940
1941	mutex_destroy(&dev_priv->power_domains.lock);
1942}
1943
1944static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1945{
1946	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1947	struct i915_power_well *power_well;
1948	int i;
1949
1950	mutex_lock(&power_domains->lock);
1951	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1952		power_well->ops->sync_hw(dev_priv, power_well);
1953		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1954								     power_well);
1955	}
1956	mutex_unlock(&power_domains->lock);
1957}
1958
1959static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1960{
1961	struct i915_power_well *cmn_bc =
1962		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1963	struct i915_power_well *cmn_d =
1964		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1965
1966	/*
1967	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1968	 * workaround never ever read DISPLAY_PHY_CONTROL, and
1969	 * instead maintain a shadow copy ourselves. Use the actual
1970	 * power well state and lane status to reconstruct the
1971	 * expected initial value.
1972	 */
1973	dev_priv->chv_phy_control =
1974		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1975		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1976		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1977		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1978		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1979
1980	/*
1981	 * If all lanes are disabled we leave the override disabled
1982	 * with all power down bits cleared to match the state we
1983	 * would use after disabling the port. Otherwise enable the
1984	 * override and set the lane powerdown bits accding to the
1985	 * current lane status.
1986	 */
1987	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1988		uint32_t status = I915_READ(DPLL(PIPE_A));
1989		unsigned int mask;
1990
1991		mask = status & DPLL_PORTB_READY_MASK;
1992		if (mask == 0xf)
1993			mask = 0x0;
1994		else
1995			dev_priv->chv_phy_control |=
1996				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1997
1998		dev_priv->chv_phy_control |=
1999			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2000
2001		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2002		if (mask == 0xf)
2003			mask = 0x0;
2004		else
2005			dev_priv->chv_phy_control |=
2006				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2007
2008		dev_priv->chv_phy_control |=
2009			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2010
2011		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2012
2013		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2014	} else {
2015		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2016	}
2017
2018	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2019		uint32_t status = I915_READ(DPIO_PHY_STATUS);
2020		unsigned int mask;
2021
2022		mask = status & DPLL_PORTD_READY_MASK;
2023
2024		if (mask == 0xf)
2025			mask = 0x0;
2026		else
2027			dev_priv->chv_phy_control |=
2028				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2029
2030		dev_priv->chv_phy_control |=
2031			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2032
2033		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2034
2035		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2036	} else {
2037		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2038	}
2039
2040	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2041
2042	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2043		      dev_priv->chv_phy_control);
2044}
2045
2046static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2047{
2048	struct i915_power_well *cmn =
2049		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2050	struct i915_power_well *disp2d =
2051		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2052
2053	/* If the display might be already active skip this */
2054	if (cmn->ops->is_enabled(dev_priv, cmn) &&
2055	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
2056	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
2057		return;
2058
2059	DRM_DEBUG_KMS("toggling display PHY side reset\n");
2060
2061	/* cmnlane needs DPLL registers */
2062	disp2d->ops->enable(dev_priv, disp2d);
2063
2064	/*
2065	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2066	 * Need to assert and de-assert PHY SB reset by gating the
2067	 * common lane power, then un-gating it.
2068	 * Simply ungating isn't enough to reset the PHY enough to get
2069	 * ports and lanes running.
2070	 */
2071	cmn->ops->disable(dev_priv, cmn);
2072}
2073
2074/**
2075 * intel_power_domains_init_hw - initialize hardware power domain state
2076 * @dev_priv: i915 device instance
2077 *
2078 * This function initializes the hardware power domain state and enables all
2079 * power domains using intel_display_set_init_power().
2080 */
2081void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
2082{
2083	struct drm_device *dev = dev_priv->dev;
2084	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2085
2086	power_domains->initializing = true;
2087
2088	if (IS_CHERRYVIEW(dev)) {
2089		mutex_lock(&power_domains->lock);
2090		chv_phy_control_init(dev_priv);
2091		mutex_unlock(&power_domains->lock);
2092	} else if (IS_VALLEYVIEW(dev)) {
2093		mutex_lock(&power_domains->lock);
2094		vlv_cmnlane_wa(dev_priv);
2095		mutex_unlock(&power_domains->lock);
2096	}
2097
2098	/* For now, we need the power well to be always enabled. */
2099	intel_display_set_init_power(dev_priv, true);
2100	intel_power_domains_resume(dev_priv);
2101	power_domains->initializing = false;
2102}
2103
2104/**
2105 * intel_runtime_pm_get - grab a runtime pm reference
2106 * @dev_priv: i915 device instance
2107 *
2108 * This function grabs a device-level runtime pm reference (mostly used for GEM
2109 * code to ensure the GTT or GT is on) and ensures that it is powered up.
2110 *
2111 * Any runtime pm reference obtained by this function must have a symmetric
2112 * call to intel_runtime_pm_put() to release the reference again.
2113 */
2114void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2115{
2116	struct drm_device *dev = dev_priv->dev;
2117	struct device *device = dev->dev;
2118
2119	if (!HAS_RUNTIME_PM(dev))
2120		return;
2121
2122	pm_runtime_get_sync(device);
2123	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
2124}
2125
2126/**
2127 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2128 * @dev_priv: i915 device instance
2129 *
2130 * This function grabs a device-level runtime pm reference (mostly used for GEM
2131 * code to ensure the GTT or GT is on).
2132 *
2133 * It will _not_ power up the device but instead only check that it's powered
2134 * on.  Therefore it is only valid to call this functions from contexts where
2135 * the device is known to be powered up and where trying to power it up would
2136 * result in hilarity and deadlocks. That pretty much means only the system
2137 * suspend/resume code where this is used to grab runtime pm references for
2138 * delayed setup down in work items.
2139 *
2140 * Any runtime pm reference obtained by this function must have a symmetric
2141 * call to intel_runtime_pm_put() to release the reference again.
2142 */
2143void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2144{
2145	struct drm_device *dev = dev_priv->dev;
2146	struct device *device = dev->dev;
2147
2148	if (!HAS_RUNTIME_PM(dev))
2149		return;
2150
2151	WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
2152	pm_runtime_get_noresume(device);
2153}
2154
2155/**
2156 * intel_runtime_pm_put - release a runtime pm reference
2157 * @dev_priv: i915 device instance
2158 *
2159 * This function drops the device-level runtime pm reference obtained by
2160 * intel_runtime_pm_get() and might power down the corresponding
2161 * hardware block right away if this is the last reference.
2162 */
2163void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2164{
2165	struct drm_device *dev = dev_priv->dev;
2166	struct device *device = dev->dev;
2167
2168	if (!HAS_RUNTIME_PM(dev))
2169		return;
2170
2171	pm_runtime_mark_last_busy(device);
2172	pm_runtime_put_autosuspend(device);
2173}
2174
2175/**
2176 * intel_runtime_pm_enable - enable runtime pm
2177 * @dev_priv: i915 device instance
2178 *
2179 * This function enables runtime pm at the end of the driver load sequence.
2180 *
2181 * Note that this function does currently not enable runtime pm for the
2182 * subordinate display power domains. That is only done on the first modeset
2183 * using intel_display_set_init_power().
2184 */
2185void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2186{
2187	struct drm_device *dev = dev_priv->dev;
2188	struct device *device = dev->dev;
2189
2190	if (!HAS_RUNTIME_PM(dev))
2191		return;
2192
2193	/*
2194	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
2195	 * requirement.
2196	 */
2197	if (!intel_enable_rc6(dev)) {
2198		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
2199		return;
2200	}
2201
2202	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
2203	pm_runtime_mark_last_busy(device);
2204	pm_runtime_use_autosuspend(device);
2205
2206	pm_runtime_put_autosuspend(device);
2207}
2208