intel_pm.c revision 277487
1277487Skib/*
2277487Skib * Copyright �� 2012 Intel Corporation
3277487Skib *
4277487Skib * Permission is hereby granted, free of charge, to any person obtaining a
5277487Skib * copy of this software and associated documentation files (the "Software"),
6277487Skib * to deal in the Software without restriction, including without limitation
7277487Skib * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8277487Skib * and/or sell copies of the Software, and to permit persons to whom the
9277487Skib * Software is furnished to do so, subject to the following conditions:
10277487Skib *
11277487Skib * The above copyright notice and this permission notice (including the next
12277487Skib * paragraph) shall be included in all copies or substantial portions of the
13277487Skib * Software.
14277487Skib *
15277487Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16277487Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17277487Skib * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18277487Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19277487Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20277487Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21277487Skib * IN THE SOFTWARE.
22277487Skib *
23277487Skib * Authors:
24277487Skib *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25277487Skib *
26277487Skib */
27277487Skib
28277487Skib#include <sys/cdefs.h>
29277487Skib__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/intel_pm.c 277487 2015-01-21 16:10:37Z kib $");
30277487Skib
31277487Skib#include <dev/drm2/drmP.h>
32277487Skib#include <dev/drm2/drm.h>
33277487Skib#include <dev/drm2/i915/i915_drm.h>
34277487Skib#include <dev/drm2/i915/i915_drv.h>
35277487Skib#include <dev/drm2/i915/intel_drv.h>
36277487Skib#include <sys/kdb.h>
37277487Skib
38277487Skibstatic struct drm_i915_private *i915_mch_dev;
39277487Skib/*
40277487Skib * Lock protecting IPS related data structures
41277487Skib *   - i915_mch_dev
42277487Skib *   - dev_priv->max_delay
43277487Skib *   - dev_priv->min_delay
44277487Skib *   - dev_priv->fmax
45277487Skib *   - dev_priv->gpu_busy
46277487Skib */
47277487Skibstatic struct mtx mchdev_lock;
48277487SkibMTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF);
49277487Skib
50277487Skib/* FBC, or Frame Buffer Compression, is a technique employed to compress the
51277487Skib * framebuffer contents in-memory, aiming at reducing the required bandwidth
52277487Skib * during in-memory transfers and, therefore, reduce the power packet.
53277487Skib *
54277487Skib * The benefits of FBC are mostly visible with solid backgrounds and
55277487Skib * variation-less patterns.
56277487Skib *
57277487Skib * FBC-related functionality can be enabled by the means of the
58277487Skib * i915.i915_enable_fbc parameter
59277487Skib */
60277487Skib
61277487Skibstatic void i8xx_disable_fbc(struct drm_device *dev)
62277487Skib{
63277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
64277487Skib	u32 fbc_ctl;
65277487Skib
66277487Skib	/* Disable compression */
67277487Skib	fbc_ctl = I915_READ(FBC_CONTROL);
68277487Skib	if ((fbc_ctl & FBC_CTL_EN) == 0)
69277487Skib		return;
70277487Skib
71277487Skib	fbc_ctl &= ~FBC_CTL_EN;
72277487Skib	I915_WRITE(FBC_CONTROL, fbc_ctl);
73277487Skib
74277487Skib	/* Wait for compressing bit to clear */
75277487Skib	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
76277487Skib		DRM_DEBUG_KMS("FBC idle timed out\n");
77277487Skib		return;
78277487Skib	}
79277487Skib
80277487Skib	DRM_DEBUG_KMS("disabled FBC\n");
81277487Skib}
82277487Skib
83277487Skibstatic void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
84277487Skib{
85277487Skib	struct drm_device *dev = crtc->dev;
86277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
87277487Skib	struct drm_framebuffer *fb = crtc->fb;
88277487Skib	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
89277487Skib	struct drm_i915_gem_object *obj = intel_fb->obj;
90277487Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
91277487Skib	int cfb_pitch;
92277487Skib	int plane, i;
93277487Skib	u32 fbc_ctl, fbc_ctl2;
94277487Skib
95277487Skib	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
96277487Skib	if (fb->pitches[0] < cfb_pitch)
97277487Skib		cfb_pitch = fb->pitches[0];
98277487Skib
99277487Skib	/* FBC_CTL wants 64B units */
100277487Skib	cfb_pitch = (cfb_pitch / 64) - 1;
101277487Skib	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
102277487Skib
103277487Skib	/* Clear old tags */
104277487Skib	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
105277487Skib		I915_WRITE(FBC_TAG + (i * 4), 0);
106277487Skib
107277487Skib	/* Set it up... */
108277487Skib	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
109277487Skib	fbc_ctl2 |= plane;
110277487Skib	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
111277487Skib	I915_WRITE(FBC_FENCE_OFF, crtc->y);
112277487Skib
113277487Skib	/* enable it... */
114277487Skib	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
115277487Skib	if (IS_I945GM(dev))
116277487Skib		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
117277487Skib	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
118277487Skib	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
119277487Skib	fbc_ctl |= obj->fence_reg;
120277487Skib	I915_WRITE(FBC_CONTROL, fbc_ctl);
121277487Skib
122277487Skib	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
123277487Skib		      cfb_pitch, crtc->y, intel_crtc->plane);
124277487Skib}
125277487Skib
126277487Skibstatic bool i8xx_fbc_enabled(struct drm_device *dev)
127277487Skib{
128277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
129277487Skib
130277487Skib	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
131277487Skib}
132277487Skib
133277487Skibstatic void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
134277487Skib{
135277487Skib	struct drm_device *dev = crtc->dev;
136277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
137277487Skib	struct drm_framebuffer *fb = crtc->fb;
138277487Skib	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
139277487Skib	struct drm_i915_gem_object *obj = intel_fb->obj;
140277487Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
141277487Skib	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
142277487Skib	unsigned long stall_watermark = 200;
143277487Skib	u32 dpfc_ctl;
144277487Skib
145277487Skib	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
146277487Skib	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
147277487Skib	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
148277487Skib
149277487Skib	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
150277487Skib		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
151277487Skib		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
152277487Skib	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
153277487Skib
154277487Skib	/* enable it... */
155277487Skib	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
156277487Skib
157277487Skib	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
158277487Skib}
159277487Skib
160277487Skibstatic void g4x_disable_fbc(struct drm_device *dev)
161277487Skib{
162277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
163277487Skib	u32 dpfc_ctl;
164277487Skib
165277487Skib	/* Disable compression */
166277487Skib	dpfc_ctl = I915_READ(DPFC_CONTROL);
167277487Skib	if (dpfc_ctl & DPFC_CTL_EN) {
168277487Skib		dpfc_ctl &= ~DPFC_CTL_EN;
169277487Skib		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
170277487Skib
171277487Skib		DRM_DEBUG_KMS("disabled FBC\n");
172277487Skib	}
173277487Skib}
174277487Skib
175277487Skibstatic bool g4x_fbc_enabled(struct drm_device *dev)
176277487Skib{
177277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
178277487Skib
179277487Skib	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
180277487Skib}
181277487Skib
182277487Skibstatic void sandybridge_blit_fbc_update(struct drm_device *dev)
183277487Skib{
184277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
185277487Skib	u32 blt_ecoskpd;
186277487Skib
187277487Skib	/* Make sure blitter notifies FBC of writes */
188277487Skib	gen6_gt_force_wake_get(dev_priv);
189277487Skib	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
190277487Skib	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
191277487Skib		GEN6_BLITTER_LOCK_SHIFT;
192277487Skib	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
193277487Skib	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
194277487Skib	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
195277487Skib	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
196277487Skib			 GEN6_BLITTER_LOCK_SHIFT);
197277487Skib	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
198277487Skib	POSTING_READ(GEN6_BLITTER_ECOSKPD);
199277487Skib	gen6_gt_force_wake_put(dev_priv);
200277487Skib}
201277487Skib
202277487Skibstatic void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
203277487Skib{
204277487Skib	struct drm_device *dev = crtc->dev;
205277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
206277487Skib	struct drm_framebuffer *fb = crtc->fb;
207277487Skib	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
208277487Skib	struct drm_i915_gem_object *obj = intel_fb->obj;
209277487Skib	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
210277487Skib	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
211277487Skib	unsigned long stall_watermark = 200;
212277487Skib	u32 dpfc_ctl;
213277487Skib
214277487Skib	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
215277487Skib	dpfc_ctl &= DPFC_RESERVED;
216277487Skib	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
217277487Skib	/* Set persistent mode for front-buffer rendering, ala X. */
218277487Skib	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
219277487Skib	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
220277487Skib	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
221277487Skib
222277487Skib	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
223277487Skib		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
224277487Skib		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
225277487Skib	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
226277487Skib	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
227277487Skib	/* enable it... */
228277487Skib	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
229277487Skib
230277487Skib	if (IS_GEN6(dev)) {
231277487Skib		I915_WRITE(SNB_DPFC_CTL_SA,
232277487Skib			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
233277487Skib		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
234277487Skib		sandybridge_blit_fbc_update(dev);
235277487Skib	}
236277487Skib
237277487Skib	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
238277487Skib}
239277487Skib
240277487Skibstatic void ironlake_disable_fbc(struct drm_device *dev)
241277487Skib{
242277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
243277487Skib	u32 dpfc_ctl;
244277487Skib
245277487Skib	/* Disable compression */
246277487Skib	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
247277487Skib	if (dpfc_ctl & DPFC_CTL_EN) {
248277487Skib		dpfc_ctl &= ~DPFC_CTL_EN;
249277487Skib		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
250277487Skib
251277487Skib		DRM_DEBUG_KMS("disabled FBC\n");
252277487Skib	}
253277487Skib}
254277487Skib
255277487Skibstatic bool ironlake_fbc_enabled(struct drm_device *dev)
256277487Skib{
257277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
258277487Skib
259277487Skib	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
260277487Skib}
261277487Skib
262277487Skibbool intel_fbc_enabled(struct drm_device *dev)
263277487Skib{
264277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
265277487Skib
266277487Skib	if (!dev_priv->display.fbc_enabled)
267277487Skib		return false;
268277487Skib
269277487Skib	return dev_priv->display.fbc_enabled(dev);
270277487Skib}
271277487Skib
272277487Skibstatic void intel_fbc_work_fn(void *arg, int pending)
273277487Skib{
274277487Skib	struct intel_fbc_work *work = arg;
275277487Skib	struct drm_device *dev = work->crtc->dev;
276277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
277277487Skib
278277487Skib	DRM_LOCK(dev);
279277487Skib	if (work == dev_priv->fbc_work) {
280277487Skib		/* Double check that we haven't switched fb without cancelling
281277487Skib		 * the prior work.
282277487Skib		 */
283277487Skib		if (work->crtc->fb == work->fb) {
284277487Skib			dev_priv->display.enable_fbc(work->crtc,
285277487Skib						     work->interval);
286277487Skib
287277487Skib			dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
288277487Skib			dev_priv->cfb_fb = work->crtc->fb->base.id;
289277487Skib			dev_priv->cfb_y = work->crtc->y;
290277487Skib		}
291277487Skib
292277487Skib		dev_priv->fbc_work = NULL;
293277487Skib	}
294277487Skib	DRM_UNLOCK(dev);
295277487Skib
296277487Skib	free(work, DRM_MEM_KMS);
297277487Skib}
298277487Skib
299277487Skibstatic void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
300277487Skib{
301277487Skib	u_int pending;
302277487Skib
303277487Skib	if (dev_priv->fbc_work == NULL)
304277487Skib		return;
305277487Skib
306277487Skib	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
307277487Skib
308277487Skib	/* Synchronisation is provided by struct_mutex and checking of
309277487Skib	 * dev_priv->fbc_work, so we can perform the cancellation
310277487Skib	 * entirely asynchronously.
311277487Skib	 */
312277487Skib	if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
313277487Skib	    &pending) == 0)
314277487Skib		/* tasklet was killed before being run, clean up */
315277487Skib		free(dev_priv->fbc_work, DRM_MEM_KMS);
316277487Skib
317277487Skib	/* Mark the work as no longer wanted so that if it does
318277487Skib	 * wake-up (because the work was already running and waiting
319277487Skib	 * for our mutex), it will discover that is no longer
320277487Skib	 * necessary to run.
321277487Skib	 */
322277487Skib	dev_priv->fbc_work = NULL;
323277487Skib}
324277487Skib
325277487Skibvoid intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
326277487Skib{
327277487Skib	struct intel_fbc_work *work;
328277487Skib	struct drm_device *dev = crtc->dev;
329277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
330277487Skib
331277487Skib	if (!dev_priv->display.enable_fbc)
332277487Skib		return;
333277487Skib
334277487Skib	intel_cancel_fbc_work(dev_priv);
335277487Skib
336277487Skib	work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
337277487Skib
338277487Skib	work->crtc = crtc;
339277487Skib	work->fb = crtc->fb;
340277487Skib	work->interval = interval;
341277487Skib	TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
342277487Skib	    work);
343277487Skib
344277487Skib	dev_priv->fbc_work = work;
345277487Skib
346277487Skib	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
347277487Skib
348277487Skib	/* Delay the actual enabling to let pageflipping cease and the
349277487Skib	 * display to settle before starting the compression. Note that
350277487Skib	 * this delay also serves a second purpose: it allows for a
351277487Skib	 * vblank to pass after disabling the FBC before we attempt
352277487Skib	 * to modify the control registers.
353277487Skib	 *
354277487Skib	 * A more complicated solution would involve tracking vblanks
355277487Skib	 * following the termination of the page-flipping sequence
356277487Skib	 * and indeed performing the enable as a co-routine and not
357277487Skib	 * waiting synchronously upon the vblank.
358277487Skib	 */
359277487Skib	taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
360277487Skib	    msecs_to_jiffies(50));
361277487Skib}
362277487Skib
363277487Skibvoid intel_disable_fbc(struct drm_device *dev)
364277487Skib{
365277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
366277487Skib
367277487Skib	intel_cancel_fbc_work(dev_priv);
368277487Skib
369277487Skib	if (!dev_priv->display.disable_fbc)
370277487Skib		return;
371277487Skib
372277487Skib	dev_priv->display.disable_fbc(dev);
373277487Skib	dev_priv->cfb_plane = -1;
374277487Skib}
375277487Skib
376277487Skib/**
377277487Skib * intel_update_fbc - enable/disable FBC as needed
378277487Skib * @dev: the drm_device
379277487Skib *
380277487Skib * Set up the framebuffer compression hardware at mode set time.  We
381277487Skib * enable it if possible:
382277487Skib *   - plane A only (on pre-965)
383277487Skib *   - no pixel mulitply/line duplication
384277487Skib *   - no alpha buffer discard
385277487Skib *   - no dual wide
386277487Skib *   - framebuffer <= 2048 in width, 1536 in height
387277487Skib *
388277487Skib * We can't assume that any compression will take place (worst case),
389277487Skib * so the compressed buffer has to be the same size as the uncompressed
390277487Skib * one.  It also must reside (along with the line length buffer) in
391277487Skib * stolen memory.
392277487Skib *
393277487Skib * We need to enable/disable FBC on a global basis.
394277487Skib */
395277487Skibvoid intel_update_fbc(struct drm_device *dev)
396277487Skib{
397277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
398277487Skib	struct drm_crtc *crtc = NULL, *tmp_crtc;
399277487Skib	struct intel_crtc *intel_crtc;
400277487Skib	struct drm_framebuffer *fb;
401277487Skib	struct intel_framebuffer *intel_fb;
402277487Skib	struct drm_i915_gem_object *obj;
403277487Skib	int enable_fbc;
404277487Skib
405277487Skib	DRM_DEBUG_KMS("\n");
406277487Skib
407277487Skib	if (!i915_powersave)
408277487Skib		return;
409277487Skib
410277487Skib	if (!I915_HAS_FBC(dev))
411277487Skib		return;
412277487Skib
413277487Skib	/*
414277487Skib	 * If FBC is already on, we just have to verify that we can
415277487Skib	 * keep it that way...
416277487Skib	 * Need to disable if:
417277487Skib	 *   - more than one pipe is active
418277487Skib	 *   - changing FBC params (stride, fence, mode)
419277487Skib	 *   - new fb is too large to fit in compressed buffer
420277487Skib	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
421277487Skib	 */
422277487Skib	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
423277487Skib		if (tmp_crtc->enabled && tmp_crtc->fb) {
424277487Skib			if (crtc) {
425277487Skib				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
426277487Skib				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
427277487Skib				goto out_disable;
428277487Skib			}
429277487Skib			crtc = tmp_crtc;
430277487Skib		}
431277487Skib	}
432277487Skib
433277487Skib	if (!crtc || crtc->fb == NULL) {
434277487Skib		DRM_DEBUG_KMS("no output, disabling\n");
435277487Skib		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
436277487Skib		goto out_disable;
437277487Skib	}
438277487Skib
439277487Skib	intel_crtc = to_intel_crtc(crtc);
440277487Skib	fb = crtc->fb;
441277487Skib	intel_fb = to_intel_framebuffer(fb);
442277487Skib	obj = intel_fb->obj;
443277487Skib
444277487Skib	enable_fbc = i915_enable_fbc;
445277487Skib	if (enable_fbc < 0) {
446277487Skib		DRM_DEBUG_KMS("fbc set to per-chip default\n");
447277487Skib		enable_fbc = 1;
448277487Skib		if (INTEL_INFO(dev)->gen <= 6)
449277487Skib			enable_fbc = 0;
450277487Skib	}
451277487Skib	if (!enable_fbc) {
452277487Skib		DRM_DEBUG_KMS("fbc disabled per module param\n");
453277487Skib		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
454277487Skib		goto out_disable;
455277487Skib	}
456277487Skib	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
457277487Skib		DRM_DEBUG_KMS("framebuffer too large, disabling "
458277487Skib			      "compression\n");
459277487Skib		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
460277487Skib		goto out_disable;
461277487Skib	}
462277487Skib	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
463277487Skib	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
464277487Skib		DRM_DEBUG_KMS("mode incompatible with compression, "
465277487Skib			      "disabling\n");
466277487Skib		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
467277487Skib		goto out_disable;
468277487Skib	}
469277487Skib	if ((crtc->mode.hdisplay > 2048) ||
470277487Skib	    (crtc->mode.vdisplay > 1536)) {
471277487Skib		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
472277487Skib		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
473277487Skib		goto out_disable;
474277487Skib	}
475277487Skib	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
476277487Skib		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
477277487Skib		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
478277487Skib		goto out_disable;
479277487Skib	}
480277487Skib
481277487Skib	/* The use of a CPU fence is mandatory in order to detect writes
482277487Skib	 * by the CPU to the scanout and trigger updates to the FBC.
483277487Skib	 */
484277487Skib	if (obj->tiling_mode != I915_TILING_X ||
485277487Skib	    obj->fence_reg == I915_FENCE_REG_NONE) {
486277487Skib		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
487277487Skib		dev_priv->no_fbc_reason = FBC_NOT_TILED;
488277487Skib		goto out_disable;
489277487Skib	}
490277487Skib
491277487Skib	/* If the kernel debugger is active, always disable compression */
492277487Skib	if (kdb_active)
493277487Skib		goto out_disable;
494277487Skib
495277487Skib	/* If the scanout has not changed, don't modify the FBC settings.
496277487Skib	 * Note that we make the fundamental assumption that the fb->obj
497277487Skib	 * cannot be unpinned (and have its GTT offset and fence revoked)
498277487Skib	 * without first being decoupled from the scanout and FBC disabled.
499277487Skib	 */
500277487Skib	if (dev_priv->cfb_plane == intel_crtc->plane &&
501277487Skib	    dev_priv->cfb_fb == fb->base.id &&
502277487Skib	    dev_priv->cfb_y == crtc->y)
503277487Skib		return;
504277487Skib
505277487Skib	if (intel_fbc_enabled(dev)) {
506277487Skib		/* We update FBC along two paths, after changing fb/crtc
507277487Skib		 * configuration (modeswitching) and after page-flipping
508277487Skib		 * finishes. For the latter, we know that not only did
509277487Skib		 * we disable the FBC at the start of the page-flip
510277487Skib		 * sequence, but also more than one vblank has passed.
511277487Skib		 *
512277487Skib		 * For the former case of modeswitching, it is possible
513277487Skib		 * to switch between two FBC valid configurations
514277487Skib		 * instantaneously so we do need to disable the FBC
515277487Skib		 * before we can modify its control registers. We also
516277487Skib		 * have to wait for the next vblank for that to take
517277487Skib		 * effect. However, since we delay enabling FBC we can
518277487Skib		 * assume that a vblank has passed since disabling and
519277487Skib		 * that we can safely alter the registers in the deferred
520277487Skib		 * callback.
521277487Skib		 *
522277487Skib		 * In the scenario that we go from a valid to invalid
523277487Skib		 * and then back to valid FBC configuration we have
524277487Skib		 * no strict enforcement that a vblank occurred since
525277487Skib		 * disabling the FBC. However, along all current pipe
526277487Skib		 * disabling paths we do need to wait for a vblank at
527277487Skib		 * some point. And we wait before enabling FBC anyway.
528277487Skib		 */
529277487Skib		DRM_DEBUG_KMS("disabling active FBC for update\n");
530277487Skib		intel_disable_fbc(dev);
531277487Skib	}
532277487Skib
533277487Skib	intel_enable_fbc(crtc, 500);
534277487Skib	return;
535277487Skib
536277487Skibout_disable:
537277487Skib	/* Multiple disables should be harmless */
538277487Skib	if (intel_fbc_enabled(dev)) {
539277487Skib		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
540277487Skib		intel_disable_fbc(dev);
541277487Skib	}
542277487Skib}
543277487Skib
544277487Skibstatic void i915_pineview_get_mem_freq(struct drm_device *dev)
545277487Skib{
546277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
547277487Skib	u32 tmp;
548277487Skib
549277487Skib	tmp = I915_READ(CLKCFG);
550277487Skib
551277487Skib	switch (tmp & CLKCFG_FSB_MASK) {
552277487Skib	case CLKCFG_FSB_533:
553277487Skib		dev_priv->fsb_freq = 533; /* 133*4 */
554277487Skib		break;
555277487Skib	case CLKCFG_FSB_800:
556277487Skib		dev_priv->fsb_freq = 800; /* 200*4 */
557277487Skib		break;
558277487Skib	case CLKCFG_FSB_667:
559277487Skib		dev_priv->fsb_freq =  667; /* 167*4 */
560277487Skib		break;
561277487Skib	case CLKCFG_FSB_400:
562277487Skib		dev_priv->fsb_freq = 400; /* 100*4 */
563277487Skib		break;
564277487Skib	}
565277487Skib
566277487Skib	switch (tmp & CLKCFG_MEM_MASK) {
567277487Skib	case CLKCFG_MEM_533:
568277487Skib		dev_priv->mem_freq = 533;
569277487Skib		break;
570277487Skib	case CLKCFG_MEM_667:
571277487Skib		dev_priv->mem_freq = 667;
572277487Skib		break;
573277487Skib	case CLKCFG_MEM_800:
574277487Skib		dev_priv->mem_freq = 800;
575277487Skib		break;
576277487Skib	}
577277487Skib
578277487Skib	/* detect pineview DDR3 setting */
579277487Skib	tmp = I915_READ(CSHRDDR3CTL);
580277487Skib	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
581277487Skib}
582277487Skib
583277487Skibstatic void i915_ironlake_get_mem_freq(struct drm_device *dev)
584277487Skib{
585277487Skib	drm_i915_private_t *dev_priv = dev->dev_private;
586277487Skib	u16 ddrpll, csipll;
587277487Skib
588277487Skib	ddrpll = I915_READ16(DDRMPLL1);
589277487Skib	csipll = I915_READ16(CSIPLL0);
590277487Skib
591277487Skib	switch (ddrpll & 0xff) {
592277487Skib	case 0xc:
593277487Skib		dev_priv->mem_freq = 800;
594277487Skib		break;
595277487Skib	case 0x10:
596277487Skib		dev_priv->mem_freq = 1066;
597277487Skib		break;
598277487Skib	case 0x14:
599277487Skib		dev_priv->mem_freq = 1333;
600277487Skib		break;
601277487Skib	case 0x18:
602277487Skib		dev_priv->mem_freq = 1600;
603277487Skib		break;
604277487Skib	default:
605277487Skib		DRM_DEBUG("unknown memory frequency 0x%02x\n",
606277487Skib				 ddrpll & 0xff);
607277487Skib		dev_priv->mem_freq = 0;
608277487Skib		break;
609277487Skib	}
610277487Skib
611277487Skib	dev_priv->r_t = dev_priv->mem_freq;
612277487Skib
613277487Skib	switch (csipll & 0x3ff) {
614277487Skib	case 0x00c:
615277487Skib		dev_priv->fsb_freq = 3200;
616277487Skib		break;
617277487Skib	case 0x00e:
618277487Skib		dev_priv->fsb_freq = 3733;
619277487Skib		break;
620277487Skib	case 0x010:
621277487Skib		dev_priv->fsb_freq = 4266;
622277487Skib		break;
623277487Skib	case 0x012:
624277487Skib		dev_priv->fsb_freq = 4800;
625277487Skib		break;
626277487Skib	case 0x014:
627277487Skib		dev_priv->fsb_freq = 5333;
628277487Skib		break;
629277487Skib	case 0x016:
630277487Skib		dev_priv->fsb_freq = 5866;
631277487Skib		break;
632277487Skib	case 0x018:
633277487Skib		dev_priv->fsb_freq = 6400;
634277487Skib		break;
635277487Skib	default:
636277487Skib		DRM_DEBUG("unknown fsb frequency 0x%04x\n",
637277487Skib				 csipll & 0x3ff);
638277487Skib		dev_priv->fsb_freq = 0;
639277487Skib		break;
640277487Skib	}
641277487Skib
642277487Skib	if (dev_priv->fsb_freq == 3200) {
643277487Skib		dev_priv->c_m = 0;
644277487Skib	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
645277487Skib		dev_priv->c_m = 1;
646277487Skib	} else {
647277487Skib		dev_priv->c_m = 2;
648277487Skib	}
649277487Skib}
650277487Skib
651277487Skibstatic const struct cxsr_latency cxsr_latency_table[] = {
652277487Skib	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
653277487Skib	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
654277487Skib	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
655277487Skib	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
656277487Skib	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
657277487Skib
658277487Skib	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
659277487Skib	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
660277487Skib	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
661277487Skib	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
662277487Skib	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
663277487Skib
664277487Skib	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
665277487Skib	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
666277487Skib	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
667277487Skib	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
668277487Skib	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
669277487Skib
670277487Skib	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
671277487Skib	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
672277487Skib	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
673277487Skib	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
674277487Skib	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
675277487Skib
676277487Skib	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
677277487Skib	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
678277487Skib	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
679277487Skib	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
680277487Skib	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
681277487Skib
682277487Skib	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
683277487Skib	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
684277487Skib	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
685277487Skib	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
686277487Skib	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
687277487Skib};
688277487Skib
689277487Skibstatic const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
690277487Skib							 int is_ddr3,
691277487Skib							 int fsb,
692277487Skib							 int mem)
693277487Skib{
694277487Skib	const struct cxsr_latency *latency;
695277487Skib	int i;
696277487Skib
697277487Skib	if (fsb == 0 || mem == 0)
698277487Skib		return NULL;
699277487Skib
700277487Skib	for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
701277487Skib		latency = &cxsr_latency_table[i];
702277487Skib		if (is_desktop == latency->is_desktop &&
703277487Skib		    is_ddr3 == latency->is_ddr3 &&
704277487Skib		    fsb == latency->fsb_freq && mem == latency->mem_freq)
705277487Skib			return latency;
706277487Skib	}
707277487Skib
708277487Skib	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
709277487Skib
710277487Skib	return NULL;
711277487Skib}
712277487Skib
713277487Skibstatic void pineview_disable_cxsr(struct drm_device *dev)
714277487Skib{
715277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
716277487Skib
717277487Skib	/* deactivate cxsr */
718277487Skib	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
719277487Skib}
720277487Skib
721277487Skib/*
722277487Skib * Latency for FIFO fetches is dependent on several factors:
723277487Skib *   - memory configuration (speed, channels)
724277487Skib *   - chipset
725277487Skib *   - current MCH state
726277487Skib * It can be fairly high in some situations, so here we assume a fairly
727277487Skib * pessimal value.  It's a tradeoff between extra memory fetches (if we
728277487Skib * set this value too high, the FIFO will fetch frequently to stay full)
729277487Skib * and power consumption (set it too low to save power and we might see
730277487Skib * FIFO underruns and display "flicker").
731277487Skib *
732277487Skib * A value of 5us seems to be a good balance; safe for very low end
733277487Skib * platforms but not overly aggressive on lower latency configs.
734277487Skib */
735277487Skibstatic const int latency_ns = 5000;
736277487Skib
737277487Skibstatic int i9xx_get_fifo_size(struct drm_device *dev, int plane)
738277487Skib{
739277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
740277487Skib	uint32_t dsparb = I915_READ(DSPARB);
741277487Skib	int size;
742277487Skib
743277487Skib	size = dsparb & 0x7f;
744277487Skib	if (plane)
745277487Skib		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
746277487Skib
747277487Skib	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
748277487Skib		      plane ? "B" : "A", size);
749277487Skib
750277487Skib	return size;
751277487Skib}
752277487Skib
753277487Skibstatic int i85x_get_fifo_size(struct drm_device *dev, int plane)
754277487Skib{
755277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
756277487Skib	uint32_t dsparb = I915_READ(DSPARB);
757277487Skib	int size;
758277487Skib
759277487Skib	size = dsparb & 0x1ff;
760277487Skib	if (plane)
761277487Skib		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
762277487Skib	size >>= 1; /* Convert to cachelines */
763277487Skib
764277487Skib	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
765277487Skib		      plane ? "B" : "A", size);
766277487Skib
767277487Skib	return size;
768277487Skib}
769277487Skib
770277487Skibstatic int i845_get_fifo_size(struct drm_device *dev, int plane)
771277487Skib{
772277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
773277487Skib	uint32_t dsparb = I915_READ(DSPARB);
774277487Skib	int size;
775277487Skib
776277487Skib	size = dsparb & 0x7f;
777277487Skib	size >>= 2; /* Convert to cachelines */
778277487Skib
779277487Skib	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
780277487Skib		      plane ? "B" : "A",
781277487Skib		      size);
782277487Skib
783277487Skib	return size;
784277487Skib}
785277487Skib
786277487Skibstatic int i830_get_fifo_size(struct drm_device *dev, int plane)
787277487Skib{
788277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
789277487Skib	uint32_t dsparb = I915_READ(DSPARB);
790277487Skib	int size;
791277487Skib
792277487Skib	size = dsparb & 0x7f;
793277487Skib	size >>= 1; /* Convert to cachelines */
794277487Skib
795277487Skib	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
796277487Skib		      plane ? "B" : "A", size);
797277487Skib
798277487Skib	return size;
799277487Skib}
800277487Skib
801277487Skib/* Pineview has different values for various configs */
802277487Skibstatic const struct intel_watermark_params pineview_display_wm = {
803277487Skib	PINEVIEW_DISPLAY_FIFO,
804277487Skib	PINEVIEW_MAX_WM,
805277487Skib	PINEVIEW_DFT_WM,
806277487Skib	PINEVIEW_GUARD_WM,
807277487Skib	PINEVIEW_FIFO_LINE_SIZE
808277487Skib};
809277487Skibstatic const struct intel_watermark_params pineview_display_hplloff_wm = {
810277487Skib	PINEVIEW_DISPLAY_FIFO,
811277487Skib	PINEVIEW_MAX_WM,
812277487Skib	PINEVIEW_DFT_HPLLOFF_WM,
813277487Skib	PINEVIEW_GUARD_WM,
814277487Skib	PINEVIEW_FIFO_LINE_SIZE
815277487Skib};
816277487Skibstatic const struct intel_watermark_params pineview_cursor_wm = {
817277487Skib	PINEVIEW_CURSOR_FIFO,
818277487Skib	PINEVIEW_CURSOR_MAX_WM,
819277487Skib	PINEVIEW_CURSOR_DFT_WM,
820277487Skib	PINEVIEW_CURSOR_GUARD_WM,
821277487Skib	PINEVIEW_FIFO_LINE_SIZE,
822277487Skib};
823277487Skibstatic const struct intel_watermark_params pineview_cursor_hplloff_wm = {
824277487Skib	PINEVIEW_CURSOR_FIFO,
825277487Skib	PINEVIEW_CURSOR_MAX_WM,
826277487Skib	PINEVIEW_CURSOR_DFT_WM,
827277487Skib	PINEVIEW_CURSOR_GUARD_WM,
828277487Skib	PINEVIEW_FIFO_LINE_SIZE
829277487Skib};
830277487Skibstatic const struct intel_watermark_params g4x_wm_info = {
831277487Skib	G4X_FIFO_SIZE,
832277487Skib	G4X_MAX_WM,
833277487Skib	G4X_MAX_WM,
834277487Skib	2,
835277487Skib	G4X_FIFO_LINE_SIZE,
836277487Skib};
837277487Skibstatic const struct intel_watermark_params g4x_cursor_wm_info = {
838277487Skib	I965_CURSOR_FIFO,
839277487Skib	I965_CURSOR_MAX_WM,
840277487Skib	I965_CURSOR_DFT_WM,
841277487Skib	2,
842277487Skib	G4X_FIFO_LINE_SIZE,
843277487Skib};
844277487Skibstatic const struct intel_watermark_params valleyview_wm_info = {
845277487Skib	VALLEYVIEW_FIFO_SIZE,
846277487Skib	VALLEYVIEW_MAX_WM,
847277487Skib	VALLEYVIEW_MAX_WM,
848277487Skib	2,
849277487Skib	G4X_FIFO_LINE_SIZE,
850277487Skib};
851277487Skibstatic const struct intel_watermark_params valleyview_cursor_wm_info = {
852277487Skib	I965_CURSOR_FIFO,
853277487Skib	VALLEYVIEW_CURSOR_MAX_WM,
854277487Skib	I965_CURSOR_DFT_WM,
855277487Skib	2,
856277487Skib	G4X_FIFO_LINE_SIZE,
857277487Skib};
858277487Skibstatic const struct intel_watermark_params i965_cursor_wm_info = {
859277487Skib	I965_CURSOR_FIFO,
860277487Skib	I965_CURSOR_MAX_WM,
861277487Skib	I965_CURSOR_DFT_WM,
862277487Skib	2,
863277487Skib	I915_FIFO_LINE_SIZE,
864277487Skib};
865277487Skibstatic const struct intel_watermark_params i945_wm_info = {
866277487Skib	I945_FIFO_SIZE,
867277487Skib	I915_MAX_WM,
868277487Skib	1,
869277487Skib	2,
870277487Skib	I915_FIFO_LINE_SIZE
871277487Skib};
872277487Skibstatic const struct intel_watermark_params i915_wm_info = {
873277487Skib	I915_FIFO_SIZE,
874277487Skib	I915_MAX_WM,
875277487Skib	1,
876277487Skib	2,
877277487Skib	I915_FIFO_LINE_SIZE
878277487Skib};
879277487Skibstatic const struct intel_watermark_params i855_wm_info = {
880277487Skib	I855GM_FIFO_SIZE,
881277487Skib	I915_MAX_WM,
882277487Skib	1,
883277487Skib	2,
884277487Skib	I830_FIFO_LINE_SIZE
885277487Skib};
886277487Skibstatic const struct intel_watermark_params i830_wm_info = {
887277487Skib	I830_FIFO_SIZE,
888277487Skib	I915_MAX_WM,
889277487Skib	1,
890277487Skib	2,
891277487Skib	I830_FIFO_LINE_SIZE
892277487Skib};
893277487Skib
894277487Skibstatic const struct intel_watermark_params ironlake_display_wm_info = {
895277487Skib	ILK_DISPLAY_FIFO,
896277487Skib	ILK_DISPLAY_MAXWM,
897277487Skib	ILK_DISPLAY_DFTWM,
898277487Skib	2,
899277487Skib	ILK_FIFO_LINE_SIZE
900277487Skib};
901277487Skibstatic const struct intel_watermark_params ironlake_cursor_wm_info = {
902277487Skib	ILK_CURSOR_FIFO,
903277487Skib	ILK_CURSOR_MAXWM,
904277487Skib	ILK_CURSOR_DFTWM,
905277487Skib	2,
906277487Skib	ILK_FIFO_LINE_SIZE
907277487Skib};
908277487Skibstatic const struct intel_watermark_params ironlake_display_srwm_info = {
909277487Skib	ILK_DISPLAY_SR_FIFO,
910277487Skib	ILK_DISPLAY_MAX_SRWM,
911277487Skib	ILK_DISPLAY_DFT_SRWM,
912277487Skib	2,
913277487Skib	ILK_FIFO_LINE_SIZE
914277487Skib};
915277487Skibstatic const struct intel_watermark_params ironlake_cursor_srwm_info = {
916277487Skib	ILK_CURSOR_SR_FIFO,
917277487Skib	ILK_CURSOR_MAX_SRWM,
918277487Skib	ILK_CURSOR_DFT_SRWM,
919277487Skib	2,
920277487Skib	ILK_FIFO_LINE_SIZE
921277487Skib};
922277487Skib
923277487Skibstatic const struct intel_watermark_params sandybridge_display_wm_info = {
924277487Skib	SNB_DISPLAY_FIFO,
925277487Skib	SNB_DISPLAY_MAXWM,
926277487Skib	SNB_DISPLAY_DFTWM,
927277487Skib	2,
928277487Skib	SNB_FIFO_LINE_SIZE
929277487Skib};
930277487Skibstatic const struct intel_watermark_params sandybridge_cursor_wm_info = {
931277487Skib	SNB_CURSOR_FIFO,
932277487Skib	SNB_CURSOR_MAXWM,
933277487Skib	SNB_CURSOR_DFTWM,
934277487Skib	2,
935277487Skib	SNB_FIFO_LINE_SIZE
936277487Skib};
937277487Skibstatic const struct intel_watermark_params sandybridge_display_srwm_info = {
938277487Skib	SNB_DISPLAY_SR_FIFO,
939277487Skib	SNB_DISPLAY_MAX_SRWM,
940277487Skib	SNB_DISPLAY_DFT_SRWM,
941277487Skib	2,
942277487Skib	SNB_FIFO_LINE_SIZE
943277487Skib};
944277487Skibstatic const struct intel_watermark_params sandybridge_cursor_srwm_info = {
945277487Skib	SNB_CURSOR_SR_FIFO,
946277487Skib	SNB_CURSOR_MAX_SRWM,
947277487Skib	SNB_CURSOR_DFT_SRWM,
948277487Skib	2,
949277487Skib	SNB_FIFO_LINE_SIZE
950277487Skib};
951277487Skib
952277487Skib
953277487Skib/**
954277487Skib * intel_calculate_wm - calculate watermark level
955277487Skib * @clock_in_khz: pixel clock
956277487Skib * @wm: chip FIFO params
957277487Skib * @pixel_size: display pixel size
958277487Skib * @latency_ns: memory latency for the platform
959277487Skib *
960277487Skib * Calculate the watermark level (the level at which the display plane will
961277487Skib * start fetching from memory again).  Each chip has a different display
962277487Skib * FIFO size and allocation, so the caller needs to figure that out and pass
963277487Skib * in the correct intel_watermark_params structure.
964277487Skib *
965277487Skib * As the pixel clock runs, the FIFO will be drained at a rate that depends
966277487Skib * on the pixel size.  When it reaches the watermark level, it'll start
967277487Skib * fetching FIFO line sized based chunks from memory until the FIFO fills
968277487Skib * past the watermark point.  If the FIFO drains completely, a FIFO underrun
969277487Skib * will occur, and a display engine hang could result.
970277487Skib */
971277487Skibstatic unsigned long intel_calculate_wm(unsigned long clock_in_khz,
972277487Skib					const struct intel_watermark_params *wm,
973277487Skib					int fifo_size,
974277487Skib					int pixel_size,
975277487Skib					unsigned long latency_ns)
976277487Skib{
977277487Skib	long entries_required, wm_size;
978277487Skib
979277487Skib	/*
980277487Skib	 * Note: we need to make sure we don't overflow for various clock &
981277487Skib	 * latency values.
982277487Skib	 * clocks go from a few thousand to several hundred thousand.
983277487Skib	 * latency is usually a few thousand
984277487Skib	 */
985277487Skib	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
986277487Skib		1000;
987277487Skib	entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
988277487Skib
989277487Skib	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
990277487Skib
991277487Skib	wm_size = fifo_size - (entries_required + wm->guard_size);
992277487Skib
993277487Skib	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
994277487Skib
995277487Skib	/* Don't promote wm_size to unsigned... */
996277487Skib	if (wm_size > (long)wm->max_wm)
997277487Skib		wm_size = wm->max_wm;
998277487Skib	if (wm_size <= 0)
999277487Skib		wm_size = wm->default_wm;
1000277487Skib	return wm_size;
1001277487Skib}
1002277487Skib
1003277487Skibstatic struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1004277487Skib{
1005277487Skib	struct drm_crtc *crtc, *enabled = NULL;
1006277487Skib
1007277487Skib	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1008277487Skib		if (crtc->enabled && crtc->fb) {
1009277487Skib			if (enabled)
1010277487Skib				return NULL;
1011277487Skib			enabled = crtc;
1012277487Skib		}
1013277487Skib	}
1014277487Skib
1015277487Skib	return enabled;
1016277487Skib}
1017277487Skib
1018277487Skibstatic void pineview_update_wm(struct drm_device *dev)
1019277487Skib{
1020277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1021277487Skib	struct drm_crtc *crtc;
1022277487Skib	const struct cxsr_latency *latency;
1023277487Skib	u32 reg;
1024277487Skib	unsigned long wm;
1025277487Skib
1026277487Skib	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1027277487Skib					 dev_priv->fsb_freq, dev_priv->mem_freq);
1028277487Skib	if (!latency) {
1029277487Skib		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1030277487Skib		pineview_disable_cxsr(dev);
1031277487Skib		return;
1032277487Skib	}
1033277487Skib
1034277487Skib	crtc = single_enabled_crtc(dev);
1035277487Skib	if (crtc) {
1036277487Skib		int clock = crtc->mode.clock;
1037277487Skib		int pixel_size = crtc->fb->bits_per_pixel / 8;
1038277487Skib
1039277487Skib		/* Display SR */
1040277487Skib		wm = intel_calculate_wm(clock, &pineview_display_wm,
1041277487Skib					pineview_display_wm.fifo_size,
1042277487Skib					pixel_size, latency->display_sr);
1043277487Skib		reg = I915_READ(DSPFW1);
1044277487Skib		reg &= ~DSPFW_SR_MASK;
1045277487Skib		reg |= wm << DSPFW_SR_SHIFT;
1046277487Skib		I915_WRITE(DSPFW1, reg);
1047277487Skib		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1048277487Skib
1049277487Skib		/* cursor SR */
1050277487Skib		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1051277487Skib					pineview_display_wm.fifo_size,
1052277487Skib					pixel_size, latency->cursor_sr);
1053277487Skib		reg = I915_READ(DSPFW3);
1054277487Skib		reg &= ~DSPFW_CURSOR_SR_MASK;
1055277487Skib		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1056277487Skib		I915_WRITE(DSPFW3, reg);
1057277487Skib
1058277487Skib		/* Display HPLL off SR */
1059277487Skib		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1060277487Skib					pineview_display_hplloff_wm.fifo_size,
1061277487Skib					pixel_size, latency->display_hpll_disable);
1062277487Skib		reg = I915_READ(DSPFW3);
1063277487Skib		reg &= ~DSPFW_HPLL_SR_MASK;
1064277487Skib		reg |= wm & DSPFW_HPLL_SR_MASK;
1065277487Skib		I915_WRITE(DSPFW3, reg);
1066277487Skib
1067277487Skib		/* cursor HPLL off SR */
1068277487Skib		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1069277487Skib					pineview_display_hplloff_wm.fifo_size,
1070277487Skib					pixel_size, latency->cursor_hpll_disable);
1071277487Skib		reg = I915_READ(DSPFW3);
1072277487Skib		reg &= ~DSPFW_HPLL_CURSOR_MASK;
1073277487Skib		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1074277487Skib		I915_WRITE(DSPFW3, reg);
1075277487Skib		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1076277487Skib
1077277487Skib		/* activate cxsr */
1078277487Skib		I915_WRITE(DSPFW3,
1079277487Skib			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1080277487Skib		DRM_DEBUG_KMS("Self-refresh is enabled\n");
1081277487Skib	} else {
1082277487Skib		pineview_disable_cxsr(dev);
1083277487Skib		DRM_DEBUG_KMS("Self-refresh is disabled\n");
1084277487Skib	}
1085277487Skib}
1086277487Skib
1087277487Skibstatic bool g4x_compute_wm0(struct drm_device *dev,
1088277487Skib			    int plane,
1089277487Skib			    const struct intel_watermark_params *display,
1090277487Skib			    int display_latency_ns,
1091277487Skib			    const struct intel_watermark_params *cursor,
1092277487Skib			    int cursor_latency_ns,
1093277487Skib			    int *plane_wm,
1094277487Skib			    int *cursor_wm)
1095277487Skib{
1096277487Skib	struct drm_crtc *crtc;
1097277487Skib	int htotal, hdisplay, clock, pixel_size;
1098277487Skib	int line_time_us, line_count;
1099277487Skib	int entries, tlb_miss;
1100277487Skib
1101277487Skib	crtc = intel_get_crtc_for_plane(dev, plane);
1102277487Skib	if (crtc->fb == NULL || !crtc->enabled) {
1103277487Skib		*cursor_wm = cursor->guard_size;
1104277487Skib		*plane_wm = display->guard_size;
1105277487Skib		return false;
1106277487Skib	}
1107277487Skib
1108277487Skib	htotal = crtc->mode.htotal;
1109277487Skib	hdisplay = crtc->mode.hdisplay;
1110277487Skib	clock = crtc->mode.clock;
1111277487Skib	pixel_size = crtc->fb->bits_per_pixel / 8;
1112277487Skib
1113277487Skib	/* Use the small buffer method to calculate plane watermark */
1114277487Skib	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1115277487Skib	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1116277487Skib	if (tlb_miss > 0)
1117277487Skib		entries += tlb_miss;
1118277487Skib	entries = DIV_ROUND_UP(entries, display->cacheline_size);
1119277487Skib	*plane_wm = entries + display->guard_size;
1120277487Skib	if (*plane_wm > (int)display->max_wm)
1121277487Skib		*plane_wm = display->max_wm;
1122277487Skib
1123277487Skib	/* Use the large buffer method to calculate cursor watermark */
1124277487Skib	line_time_us = ((htotal * 1000) / clock);
1125277487Skib	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1126277487Skib	entries = line_count * 64 * pixel_size;
1127277487Skib	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1128277487Skib	if (tlb_miss > 0)
1129277487Skib		entries += tlb_miss;
1130277487Skib	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1131277487Skib	*cursor_wm = entries + cursor->guard_size;
1132277487Skib	if (*cursor_wm > (int)cursor->max_wm)
1133277487Skib		*cursor_wm = (int)cursor->max_wm;
1134277487Skib
1135277487Skib	return true;
1136277487Skib}
1137277487Skib
1138277487Skib/*
1139277487Skib * Check the wm result.
1140277487Skib *
1141277487Skib * If any calculated watermark values is larger than the maximum value that
1142277487Skib * can be programmed into the associated watermark register, that watermark
1143277487Skib * must be disabled.
1144277487Skib */
1145277487Skibstatic bool g4x_check_srwm(struct drm_device *dev,
1146277487Skib			   int display_wm, int cursor_wm,
1147277487Skib			   const struct intel_watermark_params *display,
1148277487Skib			   const struct intel_watermark_params *cursor)
1149277487Skib{
1150277487Skib	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1151277487Skib		      display_wm, cursor_wm);
1152277487Skib
1153277487Skib	if (display_wm > display->max_wm) {
1154277487Skib		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1155277487Skib			      display_wm, display->max_wm);
1156277487Skib		return false;
1157277487Skib	}
1158277487Skib
1159277487Skib	if (cursor_wm > cursor->max_wm) {
1160277487Skib		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1161277487Skib			      cursor_wm, cursor->max_wm);
1162277487Skib		return false;
1163277487Skib	}
1164277487Skib
1165277487Skib	if (!(display_wm || cursor_wm)) {
1166277487Skib		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1167277487Skib		return false;
1168277487Skib	}
1169277487Skib
1170277487Skib	return true;
1171277487Skib}
1172277487Skib
1173277487Skibstatic bool g4x_compute_srwm(struct drm_device *dev,
1174277487Skib			     int plane,
1175277487Skib			     int latency_ns,
1176277487Skib			     const struct intel_watermark_params *display,
1177277487Skib			     const struct intel_watermark_params *cursor,
1178277487Skib			     int *display_wm, int *cursor_wm)
1179277487Skib{
1180277487Skib	struct drm_crtc *crtc;
1181277487Skib	int hdisplay, htotal, pixel_size, clock;
1182277487Skib	unsigned long line_time_us;
1183277487Skib	int line_count, line_size;
1184277487Skib	int small, large;
1185277487Skib	int entries;
1186277487Skib
1187277487Skib	if (!latency_ns) {
1188277487Skib		*display_wm = *cursor_wm = 0;
1189277487Skib		return false;
1190277487Skib	}
1191277487Skib
1192277487Skib	crtc = intel_get_crtc_for_plane(dev, plane);
1193277487Skib	hdisplay = crtc->mode.hdisplay;
1194277487Skib	htotal = crtc->mode.htotal;
1195277487Skib	clock = crtc->mode.clock;
1196277487Skib	pixel_size = crtc->fb->bits_per_pixel / 8;
1197277487Skib
1198277487Skib	line_time_us = (htotal * 1000) / clock;
1199277487Skib	line_count = (latency_ns / line_time_us + 1000) / 1000;
1200277487Skib	line_size = hdisplay * pixel_size;
1201277487Skib
1202277487Skib	/* Use the minimum of the small and large buffer method for primary */
1203277487Skib	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1204277487Skib	large = line_count * line_size;
1205277487Skib
1206277487Skib	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1207277487Skib	*display_wm = entries + display->guard_size;
1208277487Skib
1209277487Skib	/* calculate the self-refresh watermark for display cursor */
1210277487Skib	entries = line_count * pixel_size * 64;
1211277487Skib	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1212277487Skib	*cursor_wm = entries + cursor->guard_size;
1213277487Skib
1214277487Skib	return g4x_check_srwm(dev,
1215277487Skib			      *display_wm, *cursor_wm,
1216277487Skib			      display, cursor);
1217277487Skib}
1218277487Skib
1219277487Skibstatic bool vlv_compute_drain_latency(struct drm_device *dev,
1220277487Skib				     int plane,
1221277487Skib				     int *plane_prec_mult,
1222277487Skib				     int *plane_dl,
1223277487Skib				     int *cursor_prec_mult,
1224277487Skib				     int *cursor_dl)
1225277487Skib{
1226277487Skib	struct drm_crtc *crtc;
1227277487Skib	int clock, pixel_size;
1228277487Skib	int entries;
1229277487Skib
1230277487Skib	crtc = intel_get_crtc_for_plane(dev, plane);
1231277487Skib	if (crtc->fb == NULL || !crtc->enabled)
1232277487Skib		return false;
1233277487Skib
1234277487Skib	clock = crtc->mode.clock;	/* VESA DOT Clock */
1235277487Skib	pixel_size = crtc->fb->bits_per_pixel / 8;	/* BPP */
1236277487Skib
1237277487Skib	entries = (clock / 1000) * pixel_size;
1238277487Skib	*plane_prec_mult = (entries > 256) ?
1239277487Skib		DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1240277487Skib	*plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1241277487Skib						     pixel_size);
1242277487Skib
1243277487Skib	entries = (clock / 1000) * 4;	/* BPP is always 4 for cursor */
1244277487Skib	*cursor_prec_mult = (entries > 256) ?
1245277487Skib		DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1246277487Skib	*cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1247277487Skib
1248277487Skib	return true;
1249277487Skib}
1250277487Skib
1251277487Skib/*
1252277487Skib * Update drain latency registers of memory arbiter
1253277487Skib *
1254277487Skib * Valleyview SoC has a new memory arbiter and needs drain latency registers
1255277487Skib * to be programmed. Each plane has a drain latency multiplier and a drain
1256277487Skib * latency value.
1257277487Skib */
1258277487Skib
1259277487Skibstatic void vlv_update_drain_latency(struct drm_device *dev)
1260277487Skib{
1261277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1262277487Skib	int planea_prec, planea_dl, planeb_prec, planeb_dl;
1263277487Skib	int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1264277487Skib	int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1265277487Skib							either 16 or 32 */
1266277487Skib
1267277487Skib	/* For plane A, Cursor A */
1268277487Skib	if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1269277487Skib				      &cursor_prec_mult, &cursora_dl)) {
1270277487Skib		cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1271277487Skib			DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1272277487Skib		planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1273277487Skib			DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1274277487Skib
1275277487Skib		I915_WRITE(VLV_DDL1, cursora_prec |
1276277487Skib				(cursora_dl << DDL_CURSORA_SHIFT) |
1277277487Skib				planea_prec | planea_dl);
1278277487Skib	}
1279277487Skib
1280277487Skib	/* For plane B, Cursor B */
1281277487Skib	if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1282277487Skib				      &cursor_prec_mult, &cursorb_dl)) {
1283277487Skib		cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1284277487Skib			DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1285277487Skib		planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1286277487Skib			DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1287277487Skib
1288277487Skib		I915_WRITE(VLV_DDL2, cursorb_prec |
1289277487Skib				(cursorb_dl << DDL_CURSORB_SHIFT) |
1290277487Skib				planeb_prec | planeb_dl);
1291277487Skib	}
1292277487Skib}
1293277487Skib
1294277487Skib#define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
1295277487Skib
1296277487Skibstatic void valleyview_update_wm(struct drm_device *dev)
1297277487Skib{
1298277487Skib	static const int sr_latency_ns = 12000;
1299277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1300277487Skib	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1301277487Skib	int plane_sr, cursor_sr;
1302277487Skib	unsigned int enabled = 0;
1303277487Skib
1304277487Skib	vlv_update_drain_latency(dev);
1305277487Skib
1306277487Skib	if (g4x_compute_wm0(dev, 0,
1307277487Skib			    &valleyview_wm_info, latency_ns,
1308277487Skib			    &valleyview_cursor_wm_info, latency_ns,
1309277487Skib			    &planea_wm, &cursora_wm))
1310277487Skib		enabled |= 1;
1311277487Skib
1312277487Skib	if (g4x_compute_wm0(dev, 1,
1313277487Skib			    &valleyview_wm_info, latency_ns,
1314277487Skib			    &valleyview_cursor_wm_info, latency_ns,
1315277487Skib			    &planeb_wm, &cursorb_wm))
1316277487Skib		enabled |= 2;
1317277487Skib
1318277487Skib	plane_sr = cursor_sr = 0;
1319277487Skib	if (single_plane_enabled(enabled) &&
1320277487Skib	    g4x_compute_srwm(dev, ffs(enabled) - 1,
1321277487Skib			     sr_latency_ns,
1322277487Skib			     &valleyview_wm_info,
1323277487Skib			     &valleyview_cursor_wm_info,
1324277487Skib			     &plane_sr, &cursor_sr))
1325277487Skib		I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1326277487Skib	else
1327277487Skib		I915_WRITE(FW_BLC_SELF_VLV,
1328277487Skib			   I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1329277487Skib
1330277487Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1331277487Skib		      planea_wm, cursora_wm,
1332277487Skib		      planeb_wm, cursorb_wm,
1333277487Skib		      plane_sr, cursor_sr);
1334277487Skib
1335277487Skib	I915_WRITE(DSPFW1,
1336277487Skib		   (plane_sr << DSPFW_SR_SHIFT) |
1337277487Skib		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1338277487Skib		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
1339277487Skib		   planea_wm);
1340277487Skib	I915_WRITE(DSPFW2,
1341277487Skib		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1342277487Skib		   (cursora_wm << DSPFW_CURSORA_SHIFT));
1343277487Skib	I915_WRITE(DSPFW3,
1344277487Skib		   (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
1345277487Skib}
1346277487Skib
1347277487Skibstatic void g4x_update_wm(struct drm_device *dev)
1348277487Skib{
1349277487Skib	static const int sr_latency_ns = 12000;
1350277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1351277487Skib	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1352277487Skib	int plane_sr, cursor_sr;
1353277487Skib	unsigned int enabled = 0;
1354277487Skib
1355277487Skib	if (g4x_compute_wm0(dev, 0,
1356277487Skib			    &g4x_wm_info, latency_ns,
1357277487Skib			    &g4x_cursor_wm_info, latency_ns,
1358277487Skib			    &planea_wm, &cursora_wm))
1359277487Skib		enabled |= 1;
1360277487Skib
1361277487Skib	if (g4x_compute_wm0(dev, 1,
1362277487Skib			    &g4x_wm_info, latency_ns,
1363277487Skib			    &g4x_cursor_wm_info, latency_ns,
1364277487Skib			    &planeb_wm, &cursorb_wm))
1365277487Skib		enabled |= 2;
1366277487Skib
1367277487Skib	plane_sr = cursor_sr = 0;
1368277487Skib	if (single_plane_enabled(enabled) &&
1369277487Skib	    g4x_compute_srwm(dev, ffs(enabled) - 1,
1370277487Skib			     sr_latency_ns,
1371277487Skib			     &g4x_wm_info,
1372277487Skib			     &g4x_cursor_wm_info,
1373277487Skib			     &plane_sr, &cursor_sr))
1374277487Skib		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1375277487Skib	else
1376277487Skib		I915_WRITE(FW_BLC_SELF,
1377277487Skib			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1378277487Skib
1379277487Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1380277487Skib		      planea_wm, cursora_wm,
1381277487Skib		      planeb_wm, cursorb_wm,
1382277487Skib		      plane_sr, cursor_sr);
1383277487Skib
1384277487Skib	I915_WRITE(DSPFW1,
1385277487Skib		   (plane_sr << DSPFW_SR_SHIFT) |
1386277487Skib		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1387277487Skib		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
1388277487Skib		   planea_wm);
1389277487Skib	I915_WRITE(DSPFW2,
1390277487Skib		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1391277487Skib		   (cursora_wm << DSPFW_CURSORA_SHIFT));
1392277487Skib	/* HPLL off in SR has some issues on G4x... disable it */
1393277487Skib	I915_WRITE(DSPFW3,
1394277487Skib		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1395277487Skib		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1396277487Skib}
1397277487Skib
1398277487Skibstatic void i965_update_wm(struct drm_device *dev)
1399277487Skib{
1400277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1401277487Skib	struct drm_crtc *crtc;
1402277487Skib	int srwm = 1;
1403277487Skib	int cursor_sr = 16;
1404277487Skib
1405277487Skib	/* Calc sr entries for one plane configs */
1406277487Skib	crtc = single_enabled_crtc(dev);
1407277487Skib	if (crtc) {
1408277487Skib		/* self-refresh has much higher latency */
1409277487Skib		static const int sr_latency_ns = 12000;
1410277487Skib		int clock = crtc->mode.clock;
1411277487Skib		int htotal = crtc->mode.htotal;
1412277487Skib		int hdisplay = crtc->mode.hdisplay;
1413277487Skib		int pixel_size = crtc->fb->bits_per_pixel / 8;
1414277487Skib		unsigned long line_time_us;
1415277487Skib		int entries;
1416277487Skib
1417277487Skib		line_time_us = ((htotal * 1000) / clock);
1418277487Skib
1419277487Skib		/* Use ns/us then divide to preserve precision */
1420277487Skib		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1421277487Skib			pixel_size * hdisplay;
1422277487Skib		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1423277487Skib		srwm = I965_FIFO_SIZE - entries;
1424277487Skib		if (srwm < 0)
1425277487Skib			srwm = 1;
1426277487Skib		srwm &= 0x1ff;
1427277487Skib		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1428277487Skib			      entries, srwm);
1429277487Skib
1430277487Skib		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1431277487Skib			pixel_size * 64;
1432277487Skib		entries = DIV_ROUND_UP(entries,
1433277487Skib					  i965_cursor_wm_info.cacheline_size);
1434277487Skib		cursor_sr = i965_cursor_wm_info.fifo_size -
1435277487Skib			(entries + i965_cursor_wm_info.guard_size);
1436277487Skib
1437277487Skib		if (cursor_sr > i965_cursor_wm_info.max_wm)
1438277487Skib			cursor_sr = i965_cursor_wm_info.max_wm;
1439277487Skib
1440277487Skib		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1441277487Skib			      "cursor %d\n", srwm, cursor_sr);
1442277487Skib
1443277487Skib		if (IS_CRESTLINE(dev))
1444277487Skib			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1445277487Skib	} else {
1446277487Skib		/* Turn off self refresh if both pipes are enabled */
1447277487Skib		if (IS_CRESTLINE(dev))
1448277487Skib			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1449277487Skib				   & ~FW_BLC_SELF_EN);
1450277487Skib	}
1451277487Skib
1452277487Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1453277487Skib		      srwm);
1454277487Skib
1455277487Skib	/* 965 has limitations... */
1456277487Skib	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1457277487Skib		   (8 << 16) | (8 << 8) | (8 << 0));
1458277487Skib	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1459277487Skib	/* update cursor SR watermark */
1460277487Skib	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1461277487Skib}
1462277487Skib
1463277487Skibstatic void i9xx_update_wm(struct drm_device *dev)
1464277487Skib{
1465277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1466277487Skib	const struct intel_watermark_params *wm_info;
1467277487Skib	uint32_t fwater_lo;
1468277487Skib	uint32_t fwater_hi;
1469277487Skib	int cwm, srwm = 1;
1470277487Skib	int fifo_size;
1471277487Skib	int planea_wm, planeb_wm;
1472277487Skib	struct drm_crtc *crtc, *enabled = NULL;
1473277487Skib
1474277487Skib	if (IS_I945GM(dev))
1475277487Skib		wm_info = &i945_wm_info;
1476277487Skib	else if (!IS_GEN2(dev))
1477277487Skib		wm_info = &i915_wm_info;
1478277487Skib	else
1479277487Skib		wm_info = &i855_wm_info;
1480277487Skib
1481277487Skib	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1482277487Skib	crtc = intel_get_crtc_for_plane(dev, 0);
1483277487Skib	if (crtc->enabled && crtc->fb) {
1484277487Skib		planea_wm = intel_calculate_wm(crtc->mode.clock,
1485277487Skib					       wm_info, fifo_size,
1486277487Skib					       crtc->fb->bits_per_pixel / 8,
1487277487Skib					       latency_ns);
1488277487Skib		enabled = crtc;
1489277487Skib	} else
1490277487Skib		planea_wm = fifo_size - wm_info->guard_size;
1491277487Skib
1492277487Skib	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1493277487Skib	crtc = intel_get_crtc_for_plane(dev, 1);
1494277487Skib	if (crtc->enabled && crtc->fb) {
1495277487Skib		planeb_wm = intel_calculate_wm(crtc->mode.clock,
1496277487Skib					       wm_info, fifo_size,
1497277487Skib					       crtc->fb->bits_per_pixel / 8,
1498277487Skib					       latency_ns);
1499277487Skib		if (enabled == NULL)
1500277487Skib			enabled = crtc;
1501277487Skib		else
1502277487Skib			enabled = NULL;
1503277487Skib	} else
1504277487Skib		planeb_wm = fifo_size - wm_info->guard_size;
1505277487Skib
1506277487Skib	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1507277487Skib
1508277487Skib	/*
1509277487Skib	 * Overlay gets an aggressive default since video jitter is bad.
1510277487Skib	 */
1511277487Skib	cwm = 2;
1512277487Skib
1513277487Skib	/* Play safe and disable self-refresh before adjusting watermarks. */
1514277487Skib	if (IS_I945G(dev) || IS_I945GM(dev))
1515277487Skib		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1516277487Skib	else if (IS_I915GM(dev))
1517277487Skib		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1518277487Skib
1519277487Skib	/* Calc sr entries for one plane configs */
1520277487Skib	if (HAS_FW_BLC(dev) && enabled) {
1521277487Skib		/* self-refresh has much higher latency */
1522277487Skib		static const int sr_latency_ns = 6000;
1523277487Skib		int clock = enabled->mode.clock;
1524277487Skib		int htotal = enabled->mode.htotal;
1525277487Skib		int hdisplay = enabled->mode.hdisplay;
1526277487Skib		int pixel_size = enabled->fb->bits_per_pixel / 8;
1527277487Skib		unsigned long line_time_us;
1528277487Skib		int entries;
1529277487Skib
1530277487Skib		line_time_us = (htotal * 1000) / clock;
1531277487Skib
1532277487Skib		/* Use ns/us then divide to preserve precision */
1533277487Skib		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1534277487Skib			pixel_size * hdisplay;
1535277487Skib		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1536277487Skib		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1537277487Skib		srwm = wm_info->fifo_size - entries;
1538277487Skib		if (srwm < 0)
1539277487Skib			srwm = 1;
1540277487Skib
1541277487Skib		if (IS_I945G(dev) || IS_I945GM(dev))
1542277487Skib			I915_WRITE(FW_BLC_SELF,
1543277487Skib				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1544277487Skib		else if (IS_I915GM(dev))
1545277487Skib			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1546277487Skib	}
1547277487Skib
1548277487Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1549277487Skib		      planea_wm, planeb_wm, cwm, srwm);
1550277487Skib
1551277487Skib	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1552277487Skib	fwater_hi = (cwm & 0x1f);
1553277487Skib
1554277487Skib	/* Set request length to 8 cachelines per fetch */
1555277487Skib	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1556277487Skib	fwater_hi = fwater_hi | (1 << 8);
1557277487Skib
1558277487Skib	I915_WRITE(FW_BLC, fwater_lo);
1559277487Skib	I915_WRITE(FW_BLC2, fwater_hi);
1560277487Skib
1561277487Skib	if (HAS_FW_BLC(dev)) {
1562277487Skib		if (enabled) {
1563277487Skib			if (IS_I945G(dev) || IS_I945GM(dev))
1564277487Skib				I915_WRITE(FW_BLC_SELF,
1565277487Skib					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1566277487Skib			else if (IS_I915GM(dev))
1567277487Skib				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1568277487Skib			DRM_DEBUG_KMS("memory self refresh enabled\n");
1569277487Skib		} else
1570277487Skib			DRM_DEBUG_KMS("memory self refresh disabled\n");
1571277487Skib	}
1572277487Skib}
1573277487Skib
1574277487Skibstatic void i830_update_wm(struct drm_device *dev)
1575277487Skib{
1576277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1577277487Skib	struct drm_crtc *crtc;
1578277487Skib	uint32_t fwater_lo;
1579277487Skib	int planea_wm;
1580277487Skib
1581277487Skib	crtc = single_enabled_crtc(dev);
1582277487Skib	if (crtc == NULL)
1583277487Skib		return;
1584277487Skib
1585277487Skib	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1586277487Skib				       dev_priv->display.get_fifo_size(dev, 0),
1587277487Skib				       crtc->fb->bits_per_pixel / 8,
1588277487Skib				       latency_ns);
1589277487Skib	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1590277487Skib	fwater_lo |= (3<<8) | planea_wm;
1591277487Skib
1592277487Skib	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1593277487Skib
1594277487Skib	I915_WRITE(FW_BLC, fwater_lo);
1595277487Skib}
1596277487Skib
1597277487Skib#define ILK_LP0_PLANE_LATENCY		700
1598277487Skib#define ILK_LP0_CURSOR_LATENCY		1300
1599277487Skib
1600277487Skib/*
1601277487Skib * Check the wm result.
1602277487Skib *
1603277487Skib * If any calculated watermark values is larger than the maximum value that
1604277487Skib * can be programmed into the associated watermark register, that watermark
1605277487Skib * must be disabled.
1606277487Skib */
1607277487Skibstatic bool ironlake_check_srwm(struct drm_device *dev, int level,
1608277487Skib				int fbc_wm, int display_wm, int cursor_wm,
1609277487Skib				const struct intel_watermark_params *display,
1610277487Skib				const struct intel_watermark_params *cursor)
1611277487Skib{
1612277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1613277487Skib
1614277487Skib	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1615277487Skib		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1616277487Skib
1617277487Skib	if (fbc_wm > SNB_FBC_MAX_SRWM) {
1618277487Skib		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1619277487Skib			      fbc_wm, SNB_FBC_MAX_SRWM, level);
1620277487Skib
1621277487Skib		/* fbc has it's own way to disable FBC WM */
1622277487Skib		I915_WRITE(DISP_ARB_CTL,
1623277487Skib			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1624277487Skib		return false;
1625277487Skib	}
1626277487Skib
1627277487Skib	if (display_wm > display->max_wm) {
1628277487Skib		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1629277487Skib			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
1630277487Skib		return false;
1631277487Skib	}
1632277487Skib
1633277487Skib	if (cursor_wm > cursor->max_wm) {
1634277487Skib		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1635277487Skib			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1636277487Skib		return false;
1637277487Skib	}
1638277487Skib
1639277487Skib	if (!(fbc_wm || display_wm || cursor_wm)) {
1640277487Skib		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1641277487Skib		return false;
1642277487Skib	}
1643277487Skib
1644277487Skib	return true;
1645277487Skib}
1646277487Skib
1647277487Skib/*
1648277487Skib * Compute watermark values of WM[1-3],
1649277487Skib */
1650277487Skibstatic bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1651277487Skib				  int latency_ns,
1652277487Skib				  const struct intel_watermark_params *display,
1653277487Skib				  const struct intel_watermark_params *cursor,
1654277487Skib				  int *fbc_wm, int *display_wm, int *cursor_wm)
1655277487Skib{
1656277487Skib	struct drm_crtc *crtc;
1657277487Skib	unsigned long line_time_us;
1658277487Skib	int hdisplay, htotal, pixel_size, clock;
1659277487Skib	int line_count, line_size;
1660277487Skib	int small, large;
1661277487Skib	int entries;
1662277487Skib
1663277487Skib	if (!latency_ns) {
1664277487Skib		*fbc_wm = *display_wm = *cursor_wm = 0;
1665277487Skib		return false;
1666277487Skib	}
1667277487Skib
1668277487Skib	crtc = intel_get_crtc_for_plane(dev, plane);
1669277487Skib	hdisplay = crtc->mode.hdisplay;
1670277487Skib	htotal = crtc->mode.htotal;
1671277487Skib	clock = crtc->mode.clock;
1672277487Skib	pixel_size = crtc->fb->bits_per_pixel / 8;
1673277487Skib
1674277487Skib	line_time_us = (htotal * 1000) / clock;
1675277487Skib	line_count = (latency_ns / line_time_us + 1000) / 1000;
1676277487Skib	line_size = hdisplay * pixel_size;
1677277487Skib
1678277487Skib	/* Use the minimum of the small and large buffer method for primary */
1679277487Skib	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1680277487Skib	large = line_count * line_size;
1681277487Skib
1682277487Skib	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1683277487Skib	*display_wm = entries + display->guard_size;
1684277487Skib
1685277487Skib	/*
1686277487Skib	 * Spec says:
1687277487Skib	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1688277487Skib	 */
1689277487Skib	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1690277487Skib
1691277487Skib	/* calculate the self-refresh watermark for display cursor */
1692277487Skib	entries = line_count * pixel_size * 64;
1693277487Skib	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1694277487Skib	*cursor_wm = entries + cursor->guard_size;
1695277487Skib
1696277487Skib	return ironlake_check_srwm(dev, level,
1697277487Skib				   *fbc_wm, *display_wm, *cursor_wm,
1698277487Skib				   display, cursor);
1699277487Skib}
1700277487Skib
1701277487Skibstatic void ironlake_update_wm(struct drm_device *dev)
1702277487Skib{
1703277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1704277487Skib	int fbc_wm, plane_wm, cursor_wm;
1705277487Skib	unsigned int enabled;
1706277487Skib
1707277487Skib	enabled = 0;
1708277487Skib	if (g4x_compute_wm0(dev, 0,
1709277487Skib			    &ironlake_display_wm_info,
1710277487Skib			    ILK_LP0_PLANE_LATENCY,
1711277487Skib			    &ironlake_cursor_wm_info,
1712277487Skib			    ILK_LP0_CURSOR_LATENCY,
1713277487Skib			    &plane_wm, &cursor_wm)) {
1714277487Skib		I915_WRITE(WM0_PIPEA_ILK,
1715277487Skib			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1716277487Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1717277487Skib			      " plane %d, " "cursor: %d\n",
1718277487Skib			      plane_wm, cursor_wm);
1719277487Skib		enabled |= 1;
1720277487Skib	}
1721277487Skib
1722277487Skib	if (g4x_compute_wm0(dev, 1,
1723277487Skib			    &ironlake_display_wm_info,
1724277487Skib			    ILK_LP0_PLANE_LATENCY,
1725277487Skib			    &ironlake_cursor_wm_info,
1726277487Skib			    ILK_LP0_CURSOR_LATENCY,
1727277487Skib			    &plane_wm, &cursor_wm)) {
1728277487Skib		I915_WRITE(WM0_PIPEB_ILK,
1729277487Skib			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1730277487Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1731277487Skib			      " plane %d, cursor: %d\n",
1732277487Skib			      plane_wm, cursor_wm);
1733277487Skib		enabled |= 2;
1734277487Skib	}
1735277487Skib
1736277487Skib	/*
1737277487Skib	 * Calculate and update the self-refresh watermark only when one
1738277487Skib	 * display plane is used.
1739277487Skib	 */
1740277487Skib	I915_WRITE(WM3_LP_ILK, 0);
1741277487Skib	I915_WRITE(WM2_LP_ILK, 0);
1742277487Skib	I915_WRITE(WM1_LP_ILK, 0);
1743277487Skib
1744277487Skib	if (!single_plane_enabled(enabled))
1745277487Skib		return;
1746277487Skib	enabled = ffs(enabled) - 1;
1747277487Skib
1748277487Skib	/* WM1 */
1749277487Skib	if (!ironlake_compute_srwm(dev, 1, enabled,
1750277487Skib				   ILK_READ_WM1_LATENCY() * 500,
1751277487Skib				   &ironlake_display_srwm_info,
1752277487Skib				   &ironlake_cursor_srwm_info,
1753277487Skib				   &fbc_wm, &plane_wm, &cursor_wm))
1754277487Skib		return;
1755277487Skib
1756277487Skib	I915_WRITE(WM1_LP_ILK,
1757277487Skib		   WM1_LP_SR_EN |
1758277487Skib		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1759277487Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
1760277487Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
1761277487Skib		   cursor_wm);
1762277487Skib
1763277487Skib	/* WM2 */
1764277487Skib	if (!ironlake_compute_srwm(dev, 2, enabled,
1765277487Skib				   ILK_READ_WM2_LATENCY() * 500,
1766277487Skib				   &ironlake_display_srwm_info,
1767277487Skib				   &ironlake_cursor_srwm_info,
1768277487Skib				   &fbc_wm, &plane_wm, &cursor_wm))
1769277487Skib		return;
1770277487Skib
1771277487Skib	I915_WRITE(WM2_LP_ILK,
1772277487Skib		   WM2_LP_EN |
1773277487Skib		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1774277487Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
1775277487Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
1776277487Skib		   cursor_wm);
1777277487Skib
1778277487Skib	/*
1779277487Skib	 * WM3 is unsupported on ILK, probably because we don't have latency
1780277487Skib	 * data for that power state
1781277487Skib	 */
1782277487Skib}
1783277487Skib
1784277487Skibstatic void sandybridge_update_wm(struct drm_device *dev)
1785277487Skib{
1786277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1787277487Skib	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
1788277487Skib	u32 val;
1789277487Skib	int fbc_wm, plane_wm, cursor_wm;
1790277487Skib	unsigned int enabled;
1791277487Skib
1792277487Skib	enabled = 0;
1793277487Skib	if (g4x_compute_wm0(dev, 0,
1794277487Skib			    &sandybridge_display_wm_info, latency,
1795277487Skib			    &sandybridge_cursor_wm_info, latency,
1796277487Skib			    &plane_wm, &cursor_wm)) {
1797277487Skib		val = I915_READ(WM0_PIPEA_ILK);
1798277487Skib		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1799277487Skib		I915_WRITE(WM0_PIPEA_ILK, val |
1800277487Skib			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1801277487Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1802277487Skib			      " plane %d, " "cursor: %d\n",
1803277487Skib			      plane_wm, cursor_wm);
1804277487Skib		enabled |= 1;
1805277487Skib	}
1806277487Skib
1807277487Skib	if (g4x_compute_wm0(dev, 1,
1808277487Skib			    &sandybridge_display_wm_info, latency,
1809277487Skib			    &sandybridge_cursor_wm_info, latency,
1810277487Skib			    &plane_wm, &cursor_wm)) {
1811277487Skib		val = I915_READ(WM0_PIPEB_ILK);
1812277487Skib		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1813277487Skib		I915_WRITE(WM0_PIPEB_ILK, val |
1814277487Skib			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1815277487Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1816277487Skib			      " plane %d, cursor: %d\n",
1817277487Skib			      plane_wm, cursor_wm);
1818277487Skib		enabled |= 2;
1819277487Skib	}
1820277487Skib
1821277487Skib	if ((dev_priv->num_pipe == 3) &&
1822277487Skib	    g4x_compute_wm0(dev, 2,
1823277487Skib			    &sandybridge_display_wm_info, latency,
1824277487Skib			    &sandybridge_cursor_wm_info, latency,
1825277487Skib			    &plane_wm, &cursor_wm)) {
1826277487Skib		val = I915_READ(WM0_PIPEC_IVB);
1827277487Skib		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1828277487Skib		I915_WRITE(WM0_PIPEC_IVB, val |
1829277487Skib			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1830277487Skib		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1831277487Skib			      " plane %d, cursor: %d\n",
1832277487Skib			      plane_wm, cursor_wm);
1833277487Skib		enabled |= 3;
1834277487Skib	}
1835277487Skib
1836277487Skib	/*
1837277487Skib	 * Calculate and update the self-refresh watermark only when one
1838277487Skib	 * display plane is used.
1839277487Skib	 *
1840277487Skib	 * SNB support 3 levels of watermark.
1841277487Skib	 *
1842277487Skib	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1843277487Skib	 * and disabled in the descending order
1844277487Skib	 *
1845277487Skib	 */
1846277487Skib	I915_WRITE(WM3_LP_ILK, 0);
1847277487Skib	I915_WRITE(WM2_LP_ILK, 0);
1848277487Skib	I915_WRITE(WM1_LP_ILK, 0);
1849277487Skib
1850277487Skib	if (!single_plane_enabled(enabled) ||
1851277487Skib	    dev_priv->sprite_scaling_enabled)
1852277487Skib		return;
1853277487Skib	enabled = ffs(enabled) - 1;
1854277487Skib
1855277487Skib	/* WM1 */
1856277487Skib	if (!ironlake_compute_srwm(dev, 1, enabled,
1857277487Skib				   SNB_READ_WM1_LATENCY() * 500,
1858277487Skib				   &sandybridge_display_srwm_info,
1859277487Skib				   &sandybridge_cursor_srwm_info,
1860277487Skib				   &fbc_wm, &plane_wm, &cursor_wm))
1861277487Skib		return;
1862277487Skib
1863277487Skib	I915_WRITE(WM1_LP_ILK,
1864277487Skib		   WM1_LP_SR_EN |
1865277487Skib		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1866277487Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
1867277487Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
1868277487Skib		   cursor_wm);
1869277487Skib
1870277487Skib	/* WM2 */
1871277487Skib	if (!ironlake_compute_srwm(dev, 2, enabled,
1872277487Skib				   SNB_READ_WM2_LATENCY() * 500,
1873277487Skib				   &sandybridge_display_srwm_info,
1874277487Skib				   &sandybridge_cursor_srwm_info,
1875277487Skib				   &fbc_wm, &plane_wm, &cursor_wm))
1876277487Skib		return;
1877277487Skib
1878277487Skib	I915_WRITE(WM2_LP_ILK,
1879277487Skib		   WM2_LP_EN |
1880277487Skib		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1881277487Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
1882277487Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
1883277487Skib		   cursor_wm);
1884277487Skib
1885277487Skib	/* WM3 */
1886277487Skib	if (!ironlake_compute_srwm(dev, 3, enabled,
1887277487Skib				   SNB_READ_WM3_LATENCY() * 500,
1888277487Skib				   &sandybridge_display_srwm_info,
1889277487Skib				   &sandybridge_cursor_srwm_info,
1890277487Skib				   &fbc_wm, &plane_wm, &cursor_wm))
1891277487Skib		return;
1892277487Skib
1893277487Skib	I915_WRITE(WM3_LP_ILK,
1894277487Skib		   WM3_LP_EN |
1895277487Skib		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1896277487Skib		   (fbc_wm << WM1_LP_FBC_SHIFT) |
1897277487Skib		   (plane_wm << WM1_LP_SR_SHIFT) |
1898277487Skib		   cursor_wm);
1899277487Skib}
1900277487Skib
1901277487Skibstatic void
1902277487Skibhaswell_update_linetime_wm(struct drm_device *dev, int pipe,
1903277487Skib				 struct drm_display_mode *mode)
1904277487Skib{
1905277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
1906277487Skib	u32 temp;
1907277487Skib
1908277487Skib	temp = I915_READ(PIPE_WM_LINETIME(pipe));
1909277487Skib	temp &= ~PIPE_WM_LINETIME_MASK;
1910277487Skib
1911277487Skib	/* The WM are computed with base on how long it takes to fill a single
1912277487Skib	 * row at the given clock rate, multiplied by 8.
1913277487Skib	 * */
1914277487Skib	temp |= PIPE_WM_LINETIME_TIME(
1915277487Skib		((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
1916277487Skib
1917277487Skib	/* IPS watermarks are only used by pipe A, and are ignored by
1918277487Skib	 * pipes B and C.  They are calculated similarly to the common
1919277487Skib	 * linetime values, except that we are using CD clock frequency
1920277487Skib	 * in MHz instead of pixel rate for the division.
1921277487Skib	 *
1922277487Skib	 * This is a placeholder for the IPS watermark calculation code.
1923277487Skib	 */
1924277487Skib
1925277487Skib	I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
1926277487Skib}
1927277487Skib
1928277487Skibstatic bool
1929277487Skibsandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1930277487Skib			      uint32_t sprite_width, int pixel_size,
1931277487Skib			      const struct intel_watermark_params *display,
1932277487Skib			      int display_latency_ns, int *sprite_wm)
1933277487Skib{
1934277487Skib	struct drm_crtc *crtc;
1935277487Skib	int clock;
1936277487Skib	int entries, tlb_miss;
1937277487Skib
1938277487Skib	crtc = intel_get_crtc_for_plane(dev, plane);
1939277487Skib	if (crtc->fb == NULL || !crtc->enabled) {
1940277487Skib		*sprite_wm = display->guard_size;
1941277487Skib		return false;
1942277487Skib	}
1943277487Skib
1944277487Skib	clock = crtc->mode.clock;
1945277487Skib
1946277487Skib	/* Use the small buffer method to calculate the sprite watermark */
1947277487Skib	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1948277487Skib	tlb_miss = display->fifo_size*display->cacheline_size -
1949277487Skib		sprite_width * 8;
1950277487Skib	if (tlb_miss > 0)
1951277487Skib		entries += tlb_miss;
1952277487Skib	entries = DIV_ROUND_UP(entries, display->cacheline_size);
1953277487Skib	*sprite_wm = entries + display->guard_size;
1954277487Skib	if (*sprite_wm > (int)display->max_wm)
1955277487Skib		*sprite_wm = display->max_wm;
1956277487Skib
1957277487Skib	return true;
1958277487Skib}
1959277487Skib
1960277487Skibstatic bool
1961277487Skibsandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1962277487Skib				uint32_t sprite_width, int pixel_size,
1963277487Skib				const struct intel_watermark_params *display,
1964277487Skib				int latency_ns, int *sprite_wm)
1965277487Skib{
1966277487Skib	struct drm_crtc *crtc;
1967277487Skib	unsigned long line_time_us;
1968277487Skib	int clock;
1969277487Skib	int line_count, line_size;
1970277487Skib	int small, large;
1971277487Skib	int entries;
1972277487Skib
1973277487Skib	if (!latency_ns) {
1974277487Skib		*sprite_wm = 0;
1975277487Skib		return false;
1976277487Skib	}
1977277487Skib
1978277487Skib	crtc = intel_get_crtc_for_plane(dev, plane);
1979277487Skib	clock = crtc->mode.clock;
1980277487Skib	if (!clock) {
1981277487Skib		*sprite_wm = 0;
1982277487Skib		return false;
1983277487Skib	}
1984277487Skib
1985277487Skib	line_time_us = (sprite_width * 1000) / clock;
1986277487Skib	if (!line_time_us) {
1987277487Skib		*sprite_wm = 0;
1988277487Skib		return false;
1989277487Skib	}
1990277487Skib
1991277487Skib	line_count = (latency_ns / line_time_us + 1000) / 1000;
1992277487Skib	line_size = sprite_width * pixel_size;
1993277487Skib
1994277487Skib	/* Use the minimum of the small and large buffer method for primary */
1995277487Skib	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1996277487Skib	large = line_count * line_size;
1997277487Skib
1998277487Skib	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1999277487Skib	*sprite_wm = entries + display->guard_size;
2000277487Skib
2001277487Skib	return *sprite_wm > 0x3ff ? false : true;
2002277487Skib}
2003277487Skib
2004277487Skibstatic void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2005277487Skib					 uint32_t sprite_width, int pixel_size)
2006277487Skib{
2007277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2008277487Skib	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
2009277487Skib	u32 val;
2010277487Skib	int sprite_wm, reg;
2011277487Skib	int ret;
2012277487Skib
2013277487Skib	switch (pipe) {
2014277487Skib	case 0:
2015277487Skib		reg = WM0_PIPEA_ILK;
2016277487Skib		break;
2017277487Skib	case 1:
2018277487Skib		reg = WM0_PIPEB_ILK;
2019277487Skib		break;
2020277487Skib	case 2:
2021277487Skib		reg = WM0_PIPEC_IVB;
2022277487Skib		break;
2023277487Skib	default:
2024277487Skib		return; /* bad pipe */
2025277487Skib	}
2026277487Skib
2027277487Skib	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2028277487Skib					    &sandybridge_display_wm_info,
2029277487Skib					    latency, &sprite_wm);
2030277487Skib	if (!ret) {
2031277487Skib		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2032277487Skib			      pipe);
2033277487Skib		return;
2034277487Skib	}
2035277487Skib
2036277487Skib	val = I915_READ(reg);
2037277487Skib	val &= ~WM0_PIPE_SPRITE_MASK;
2038277487Skib	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2039277487Skib	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
2040277487Skib
2041277487Skib
2042277487Skib	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2043277487Skib					      pixel_size,
2044277487Skib					      &sandybridge_display_srwm_info,
2045277487Skib					      SNB_READ_WM1_LATENCY() * 500,
2046277487Skib					      &sprite_wm);
2047277487Skib	if (!ret) {
2048277487Skib		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2049277487Skib			      pipe);
2050277487Skib		return;
2051277487Skib	}
2052277487Skib	I915_WRITE(WM1S_LP_ILK, sprite_wm);
2053277487Skib
2054277487Skib	/* Only IVB has two more LP watermarks for sprite */
2055277487Skib	if (!IS_IVYBRIDGE(dev))
2056277487Skib		return;
2057277487Skib
2058277487Skib	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2059277487Skib					      pixel_size,
2060277487Skib					      &sandybridge_display_srwm_info,
2061277487Skib					      SNB_READ_WM2_LATENCY() * 500,
2062277487Skib					      &sprite_wm);
2063277487Skib	if (!ret) {
2064277487Skib		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2065277487Skib			      pipe);
2066277487Skib		return;
2067277487Skib	}
2068277487Skib	I915_WRITE(WM2S_LP_IVB, sprite_wm);
2069277487Skib
2070277487Skib	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2071277487Skib					      pixel_size,
2072277487Skib					      &sandybridge_display_srwm_info,
2073277487Skib					      SNB_READ_WM3_LATENCY() * 500,
2074277487Skib					      &sprite_wm);
2075277487Skib	if (!ret) {
2076277487Skib		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2077277487Skib			      pipe);
2078277487Skib		return;
2079277487Skib	}
2080277487Skib	I915_WRITE(WM3S_LP_IVB, sprite_wm);
2081277487Skib}
2082277487Skib
2083277487Skib/**
2084277487Skib * intel_update_watermarks - update FIFO watermark values based on current modes
2085277487Skib *
2086277487Skib * Calculate watermark values for the various WM regs based on current mode
2087277487Skib * and plane configuration.
2088277487Skib *
2089277487Skib * There are several cases to deal with here:
2090277487Skib *   - normal (i.e. non-self-refresh)
2091277487Skib *   - self-refresh (SR) mode
2092277487Skib *   - lines are large relative to FIFO size (buffer can hold up to 2)
2093277487Skib *   - lines are small relative to FIFO size (buffer can hold more than 2
2094277487Skib *     lines), so need to account for TLB latency
2095277487Skib *
2096277487Skib *   The normal calculation is:
2097277487Skib *     watermark = dotclock * bytes per pixel * latency
2098277487Skib *   where latency is platform & configuration dependent (we assume pessimal
2099277487Skib *   values here).
2100277487Skib *
2101277487Skib *   The SR calculation is:
2102277487Skib *     watermark = (trunc(latency/line time)+1) * surface width *
2103277487Skib *       bytes per pixel
2104277487Skib *   where
2105277487Skib *     line time = htotal / dotclock
2106277487Skib *     surface width = hdisplay for normal plane and 64 for cursor
2107277487Skib *   and latency is assumed to be high, as above.
2108277487Skib *
2109277487Skib * The final value programmed to the register should always be rounded up,
2110277487Skib * and include an extra 2 entries to account for clock crossings.
2111277487Skib *
2112277487Skib * We don't use the sprite, so we can ignore that.  And on Crestline we have
2113277487Skib * to set the non-SR watermarks to 8.
2114277487Skib */
2115277487Skibvoid intel_update_watermarks(struct drm_device *dev)
2116277487Skib{
2117277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2118277487Skib
2119277487Skib	if (dev_priv->display.update_wm)
2120277487Skib		dev_priv->display.update_wm(dev);
2121277487Skib}
2122277487Skib
2123277487Skibvoid intel_update_linetime_watermarks(struct drm_device *dev,
2124277487Skib		int pipe, struct drm_display_mode *mode)
2125277487Skib{
2126277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2127277487Skib
2128277487Skib	if (dev_priv->display.update_linetime_wm)
2129277487Skib		dev_priv->display.update_linetime_wm(dev, pipe, mode);
2130277487Skib}
2131277487Skib
2132277487Skibvoid intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2133277487Skib				    uint32_t sprite_width, int pixel_size)
2134277487Skib{
2135277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2136277487Skib
2137277487Skib	if (dev_priv->display.update_sprite_wm)
2138277487Skib		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2139277487Skib						   pixel_size);
2140277487Skib}
2141277487Skib
2142277487Skibstatic struct drm_i915_gem_object *
2143277487Skibintel_alloc_context_page(struct drm_device *dev)
2144277487Skib{
2145277487Skib	struct drm_i915_gem_object *ctx;
2146277487Skib	int ret;
2147277487Skib
2148277487Skib	DRM_LOCK_ASSERT(dev);
2149277487Skib
2150277487Skib	ctx = i915_gem_alloc_object(dev, 4096);
2151277487Skib	if (!ctx) {
2152277487Skib		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2153277487Skib		return NULL;
2154277487Skib	}
2155277487Skib
2156277487Skib	ret = i915_gem_object_pin(ctx, 4096, true);
2157277487Skib	if (ret) {
2158277487Skib		DRM_ERROR("failed to pin power context: %d\n", ret);
2159277487Skib		goto err_unref;
2160277487Skib	}
2161277487Skib
2162277487Skib	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2163277487Skib	if (ret) {
2164277487Skib		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2165277487Skib		goto err_unpin;
2166277487Skib	}
2167277487Skib
2168277487Skib	return ctx;
2169277487Skib
2170277487Skiberr_unpin:
2171277487Skib	i915_gem_object_unpin(ctx);
2172277487Skiberr_unref:
2173277487Skib	drm_gem_object_unreference(&ctx->base);
2174277487Skib	DRM_UNLOCK(dev);
2175277487Skib	return NULL;
2176277487Skib}
2177277487Skib
2178277487Skibbool ironlake_set_drps(struct drm_device *dev, u8 val)
2179277487Skib{
2180277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2181277487Skib	u16 rgvswctl;
2182277487Skib
2183277487Skib	rgvswctl = I915_READ16(MEMSWCTL);
2184277487Skib	if (rgvswctl & MEMCTL_CMD_STS) {
2185277487Skib		DRM_DEBUG("gpu busy, RCS change rejected\n");
2186277487Skib		return false; /* still busy with another command */
2187277487Skib	}
2188277487Skib
2189277487Skib	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2190277487Skib		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2191277487Skib	I915_WRITE16(MEMSWCTL, rgvswctl);
2192277487Skib	POSTING_READ16(MEMSWCTL);
2193277487Skib
2194277487Skib	rgvswctl |= MEMCTL_CMD_STS;
2195277487Skib	I915_WRITE16(MEMSWCTL, rgvswctl);
2196277487Skib
2197277487Skib	return true;
2198277487Skib}
2199277487Skib
2200277487Skibvoid ironlake_enable_drps(struct drm_device *dev)
2201277487Skib{
2202277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2203277487Skib	u32 rgvmodectl = I915_READ(MEMMODECTL);
2204277487Skib	u8 fmax, fmin, fstart, vstart;
2205277487Skib
2206277487Skib	/* Enable temp reporting */
2207277487Skib	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2208277487Skib	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2209277487Skib
2210277487Skib	/* 100ms RC evaluation intervals */
2211277487Skib	I915_WRITE(RCUPEI, 100000);
2212277487Skib	I915_WRITE(RCDNEI, 100000);
2213277487Skib
2214277487Skib	/* Set max/min thresholds to 90ms and 80ms respectively */
2215277487Skib	I915_WRITE(RCBMAXAVG, 90000);
2216277487Skib	I915_WRITE(RCBMINAVG, 80000);
2217277487Skib
2218277487Skib	I915_WRITE(MEMIHYST, 1);
2219277487Skib
2220277487Skib	/* Set up min, max, and cur for interrupt handling */
2221277487Skib	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2222277487Skib	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2223277487Skib	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2224277487Skib		MEMMODE_FSTART_SHIFT;
2225277487Skib
2226277487Skib	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2227277487Skib		PXVFREQ_PX_SHIFT;
2228277487Skib
2229277487Skib	dev_priv->fmax = fmax; /* IPS callback will increase this */
2230277487Skib	dev_priv->fstart = fstart;
2231277487Skib
2232277487Skib	dev_priv->max_delay = fstart;
2233277487Skib	dev_priv->min_delay = fmin;
2234277487Skib	dev_priv->cur_delay = fstart;
2235277487Skib
2236277487Skib	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2237277487Skib			 fmax, fmin, fstart);
2238277487Skib
2239277487Skib	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2240277487Skib
2241277487Skib	/*
2242277487Skib	 * Interrupts will be enabled in ironlake_irq_postinstall
2243277487Skib	 */
2244277487Skib
2245277487Skib	I915_WRITE(VIDSTART, vstart);
2246277487Skib	POSTING_READ(VIDSTART);
2247277487Skib
2248277487Skib	rgvmodectl |= MEMMODE_SWMODE_EN;
2249277487Skib	I915_WRITE(MEMMODECTL, rgvmodectl);
2250277487Skib
2251277487Skib	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2252277487Skib		DRM_ERROR("stuck trying to change perf mode\n");
2253277487Skib	pause("915dsp", 1);
2254277487Skib
2255277487Skib	ironlake_set_drps(dev, fstart);
2256277487Skib
2257277487Skib	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2258277487Skib		I915_READ(0x112e0);
2259277487Skib	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2260277487Skib	dev_priv->last_count2 = I915_READ(0x112f4);
2261277487Skib	nanotime(&dev_priv->last_time2);
2262277487Skib}
2263277487Skib
2264277487Skibvoid ironlake_disable_drps(struct drm_device *dev)
2265277487Skib{
2266277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2267277487Skib	u16 rgvswctl = I915_READ16(MEMSWCTL);
2268277487Skib
2269277487Skib	/* Ack interrupts, disable EFC interrupt */
2270277487Skib	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2271277487Skib	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2272277487Skib	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2273277487Skib	I915_WRITE(DEIIR, DE_PCU_EVENT);
2274277487Skib	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2275277487Skib
2276277487Skib	/* Go back to the starting frequency */
2277277487Skib	ironlake_set_drps(dev, dev_priv->fstart);
2278277487Skib	pause("915dsp", 1);
2279277487Skib	rgvswctl |= MEMCTL_CMD_STS;
2280277487Skib	I915_WRITE(MEMSWCTL, rgvswctl);
2281277487Skib	pause("915dsp", 1);
2282277487Skib
2283277487Skib}
2284277487Skib
2285277487Skibvoid gen6_set_rps(struct drm_device *dev, u8 val)
2286277487Skib{
2287277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2288277487Skib	u32 swreq;
2289277487Skib
2290277487Skib	swreq = (val & 0x3ff) << 25;
2291277487Skib	I915_WRITE(GEN6_RPNSWREQ, swreq);
2292277487Skib}
2293277487Skib
2294277487Skibvoid gen6_disable_rps(struct drm_device *dev)
2295277487Skib{
2296277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2297277487Skib
2298277487Skib	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2299277487Skib	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2300277487Skib	I915_WRITE(GEN6_PMIER, 0);
2301277487Skib	/* Complete PM interrupt masking here doesn't race with the rps work
2302277487Skib	 * item again unmasking PM interrupts because that is using a different
2303277487Skib	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2304277487Skib	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2305277487Skib
2306277487Skib	mtx_lock(&dev_priv->rps_lock);
2307277487Skib	dev_priv->pm_iir = 0;
2308277487Skib	mtx_unlock(&dev_priv->rps_lock);
2309277487Skib
2310277487Skib	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2311277487Skib}
2312277487Skib
2313277487Skibint intel_enable_rc6(const struct drm_device *dev)
2314277487Skib{
2315277487Skib	/*
2316277487Skib	 * Respect the kernel parameter if it is set
2317277487Skib	 */
2318277487Skib	if (i915_enable_rc6 >= 0)
2319277487Skib		return i915_enable_rc6;
2320277487Skib
2321277487Skib	/*
2322277487Skib	 * Disable RC6 on Ironlake
2323277487Skib	 */
2324277487Skib	if (INTEL_INFO(dev)->gen == 5)
2325277487Skib		return 0;
2326277487Skib
2327277487Skib	/* Sorry Haswell, no RC6 for you for now. */
2328277487Skib	if (IS_HASWELL(dev))
2329277487Skib		return 0;
2330277487Skib
2331277487Skib	/*
2332277487Skib	 * Disable rc6 on Sandybridge
2333277487Skib	 */
2334277487Skib	if (INTEL_INFO(dev)->gen == 6) {
2335277487Skib		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2336277487Skib		return INTEL_RC6_ENABLE;
2337277487Skib	}
2338277487Skib	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2339277487Skib	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2340277487Skib}
2341277487Skib
2342277487Skibvoid gen6_enable_rps(struct drm_i915_private *dev_priv)
2343277487Skib{
2344277487Skib	struct intel_ring_buffer *ring;
2345277487Skib	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2346277487Skib	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2347277487Skib	u32 pcu_mbox, rc6_mask = 0;
2348277487Skib	u32 gtfifodbg;
2349277487Skib	int cur_freq, min_freq, max_freq;
2350277487Skib	int rc6_mode;
2351277487Skib	int i;
2352277487Skib
2353277487Skib	/* Here begins a magic sequence of register writes to enable
2354277487Skib	 * auto-downclocking.
2355277487Skib	 *
2356277487Skib	 * Perhaps there might be some value in exposing these to
2357277487Skib	 * userspace...
2358277487Skib	 */
2359277487Skib	I915_WRITE(GEN6_RC_STATE, 0);
2360277487Skib	DRM_LOCK(dev_priv->dev);
2361277487Skib
2362277487Skib	/* Clear the DBG now so we don't confuse earlier errors */
2363277487Skib	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2364277487Skib		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2365277487Skib		I915_WRITE(GTFIFODBG, gtfifodbg);
2366277487Skib	}
2367277487Skib
2368277487Skib	gen6_gt_force_wake_get(dev_priv);
2369277487Skib
2370277487Skib	/* disable the counters and set deterministic thresholds */
2371277487Skib	I915_WRITE(GEN6_RC_CONTROL, 0);
2372277487Skib
2373277487Skib	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2374277487Skib	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2375277487Skib	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2376277487Skib	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2377277487Skib	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2378277487Skib
2379277487Skib	for_each_ring(ring, dev_priv, i)
2380277487Skib		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2381277487Skib
2382277487Skib	I915_WRITE(GEN6_RC_SLEEP, 0);
2383277487Skib	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2384277487Skib	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2385277487Skib	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2386277487Skib	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2387277487Skib
2388277487Skib	rc6_mode = intel_enable_rc6(dev_priv->dev);
2389277487Skib	if (rc6_mode & INTEL_RC6_ENABLE)
2390277487Skib		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2391277487Skib
2392277487Skib	if (rc6_mode & INTEL_RC6p_ENABLE)
2393277487Skib		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2394277487Skib
2395277487Skib	if (rc6_mode & INTEL_RC6pp_ENABLE)
2396277487Skib		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2397277487Skib
2398277487Skib	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2399277487Skib			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2400277487Skib			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2401277487Skib			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2402277487Skib
2403277487Skib	I915_WRITE(GEN6_RC_CONTROL,
2404277487Skib		   rc6_mask |
2405277487Skib		   GEN6_RC_CTL_EI_MODE(1) |
2406277487Skib		   GEN6_RC_CTL_HW_ENABLE);
2407277487Skib
2408277487Skib	I915_WRITE(GEN6_RPNSWREQ,
2409277487Skib		   GEN6_FREQUENCY(10) |
2410277487Skib		   GEN6_OFFSET(0) |
2411277487Skib		   GEN6_AGGRESSIVE_TURBO);
2412277487Skib	I915_WRITE(GEN6_RC_VIDEO_FREQ,
2413277487Skib		   GEN6_FREQUENCY(12));
2414277487Skib
2415277487Skib	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2416277487Skib	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2417277487Skib		   18 << 24 |
2418277487Skib		   6 << 16);
2419277487Skib	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2420277487Skib	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2421277487Skib	I915_WRITE(GEN6_RP_UP_EI, 100000);
2422277487Skib	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2423277487Skib	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2424277487Skib	I915_WRITE(GEN6_RP_CONTROL,
2425277487Skib		   GEN6_RP_MEDIA_TURBO |
2426277487Skib		   GEN6_RP_MEDIA_HW_MODE |
2427277487Skib		   GEN6_RP_MEDIA_IS_GFX |
2428277487Skib		   GEN6_RP_ENABLE |
2429277487Skib		   GEN6_RP_UP_BUSY_AVG |
2430277487Skib		   GEN6_RP_DOWN_IDLE_CONT);
2431277487Skib
2432277487Skib	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2433277487Skib		     500))
2434277487Skib		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2435277487Skib
2436277487Skib	I915_WRITE(GEN6_PCODE_DATA, 0);
2437277487Skib	I915_WRITE(GEN6_PCODE_MAILBOX,
2438277487Skib		   GEN6_PCODE_READY |
2439277487Skib		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2440277487Skib	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2441277487Skib		     500))
2442277487Skib		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2443277487Skib
2444277487Skib	min_freq = (rp_state_cap & 0xff0000) >> 16;
2445277487Skib	max_freq = rp_state_cap & 0xff;
2446277487Skib	cur_freq = (gt_perf_status & 0xff00) >> 8;
2447277487Skib
2448277487Skib	/* Check for overclock support */
2449277487Skib	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2450277487Skib		     500))
2451277487Skib		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2452277487Skib	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2453277487Skib	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2454277487Skib	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2455277487Skib		     500))
2456277487Skib		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2457277487Skib	if (pcu_mbox & (1<<31)) { /* OC supported */
2458277487Skib		max_freq = pcu_mbox & 0xff;
2459277487Skib		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2460277487Skib	}
2461277487Skib
2462277487Skib	/* In units of 100MHz */
2463277487Skib	dev_priv->max_delay = max_freq;
2464277487Skib	dev_priv->min_delay = min_freq;
2465277487Skib	dev_priv->cur_delay = cur_freq;
2466277487Skib
2467277487Skib	/* requires MSI enabled */
2468277487Skib	I915_WRITE(GEN6_PMIER,
2469277487Skib		   GEN6_PM_MBOX_EVENT |
2470277487Skib		   GEN6_PM_THERMAL_EVENT |
2471277487Skib		   GEN6_PM_RP_DOWN_TIMEOUT |
2472277487Skib		   GEN6_PM_RP_UP_THRESHOLD |
2473277487Skib		   GEN6_PM_RP_DOWN_THRESHOLD |
2474277487Skib		   GEN6_PM_RP_UP_EI_EXPIRED |
2475277487Skib		   GEN6_PM_RP_DOWN_EI_EXPIRED);
2476277487Skib	mtx_lock(&dev_priv->rps_lock);
2477277487Skib	if (dev_priv->pm_iir != 0)
2478277487Skib		printf("KMS: pm_iir %x\n", dev_priv->pm_iir);
2479277487Skib	I915_WRITE(GEN6_PMIMR, 0);
2480277487Skib	mtx_unlock(&dev_priv->rps_lock);
2481277487Skib	/* enable all PM interrupts */
2482277487Skib	I915_WRITE(GEN6_PMINTRMSK, 0);
2483277487Skib
2484277487Skib	gen6_gt_force_wake_put(dev_priv);
2485277487Skib	DRM_UNLOCK(dev_priv->dev);
2486277487Skib}
2487277487Skib
2488277487Skibvoid gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2489277487Skib{
2490277487Skib	int min_freq = 15;
2491277487Skib	int gpu_freq, ia_freq, max_ia_freq;
2492277487Skib	int scaling_factor = 180;
2493277487Skib	uint64_t tsc_freq;
2494277487Skib
2495277487Skib#if 0
2496277487Skib	max_ia_freq = cpufreq_quick_get_max(0);
2497277487Skib	/*
2498277487Skib	 * Default to measured freq if none found, PCU will ensure we don't go
2499277487Skib	 * over
2500277487Skib	 */
2501277487Skib	if (!max_ia_freq)
2502277487Skib		max_ia_freq = tsc_khz;
2503277487Skib
2504277487Skib	/* Convert from kHz to MHz */
2505277487Skib	max_ia_freq /= 1000;
2506277487Skib#else
2507277487Skib	tsc_freq = atomic_load_acq_64(&tsc_freq);
2508277487Skib	max_ia_freq = tsc_freq / 1000 / 1000;
2509277487Skib#endif
2510277487Skib
2511277487Skib	DRM_LOCK(dev_priv->dev);
2512277487Skib
2513277487Skib	/*
2514277487Skib	 * For each potential GPU frequency, load a ring frequency we'd like
2515277487Skib	 * to use for memory access.  We do this by specifying the IA frequency
2516277487Skib	 * the PCU should use as a reference to determine the ring frequency.
2517277487Skib	 */
2518277487Skib	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
2519277487Skib	     gpu_freq--) {
2520277487Skib		int diff = dev_priv->max_delay - gpu_freq;
2521277487Skib		int d;
2522277487Skib
2523277487Skib		/*
2524277487Skib		 * For GPU frequencies less than 750MHz, just use the lowest
2525277487Skib		 * ring freq.
2526277487Skib		 */
2527277487Skib		if (gpu_freq < min_freq)
2528277487Skib			ia_freq = 800;
2529277487Skib		else
2530277487Skib			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2531277487Skib		d = 100;
2532277487Skib		ia_freq = (ia_freq + d / 2) / d;
2533277487Skib
2534277487Skib		I915_WRITE(GEN6_PCODE_DATA,
2535277487Skib			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2536277487Skib			   gpu_freq);
2537277487Skib		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2538277487Skib			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2539277487Skib		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2540277487Skib			      GEN6_PCODE_READY) == 0, 10)) {
2541277487Skib			DRM_ERROR("pcode write of freq table timed out\n");
2542277487Skib			continue;
2543277487Skib		}
2544277487Skib	}
2545277487Skib
2546277487Skib	DRM_UNLOCK(dev_priv->dev);
2547277487Skib}
2548277487Skib
2549277487Skibstatic void ironlake_teardown_rc6(struct drm_device *dev)
2550277487Skib{
2551277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2552277487Skib
2553277487Skib	if (dev_priv->renderctx) {
2554277487Skib		i915_gem_object_unpin(dev_priv->renderctx);
2555277487Skib		drm_gem_object_unreference(&dev_priv->renderctx->base);
2556277487Skib		dev_priv->renderctx = NULL;
2557277487Skib	}
2558277487Skib
2559277487Skib	if (dev_priv->pwrctx) {
2560277487Skib		i915_gem_object_unpin(dev_priv->pwrctx);
2561277487Skib		drm_gem_object_unreference(&dev_priv->pwrctx->base);
2562277487Skib		dev_priv->pwrctx = NULL;
2563277487Skib	}
2564277487Skib}
2565277487Skib
2566277487Skibvoid ironlake_disable_rc6(struct drm_device *dev)
2567277487Skib{
2568277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2569277487Skib
2570277487Skib	if (I915_READ(PWRCTXA)) {
2571277487Skib		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2572277487Skib		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2573277487Skib		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2574277487Skib			 50);
2575277487Skib
2576277487Skib		I915_WRITE(PWRCTXA, 0);
2577277487Skib		POSTING_READ(PWRCTXA);
2578277487Skib
2579277487Skib		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2580277487Skib		POSTING_READ(RSTDBYCTL);
2581277487Skib	}
2582277487Skib
2583277487Skib	ironlake_teardown_rc6(dev);
2584277487Skib}
2585277487Skib
2586277487Skibstatic int ironlake_setup_rc6(struct drm_device *dev)
2587277487Skib{
2588277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2589277487Skib
2590277487Skib	if (dev_priv->renderctx == NULL)
2591277487Skib		dev_priv->renderctx = intel_alloc_context_page(dev);
2592277487Skib	if (!dev_priv->renderctx)
2593277487Skib		return -ENOMEM;
2594277487Skib
2595277487Skib	if (dev_priv->pwrctx == NULL)
2596277487Skib		dev_priv->pwrctx = intel_alloc_context_page(dev);
2597277487Skib	if (!dev_priv->pwrctx) {
2598277487Skib		ironlake_teardown_rc6(dev);
2599277487Skib		return -ENOMEM;
2600277487Skib	}
2601277487Skib
2602277487Skib	return 0;
2603277487Skib}
2604277487Skib
2605277487Skibvoid ironlake_enable_rc6(struct drm_device *dev)
2606277487Skib{
2607277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
2608277487Skib	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
2609277487Skib	int ret;
2610277487Skib
2611277487Skib	/* rc6 disabled by default due to repeated reports of hanging during
2612277487Skib	 * boot and resume.
2613277487Skib	 */
2614277487Skib	if (!intel_enable_rc6(dev))
2615277487Skib		return;
2616277487Skib
2617277487Skib	DRM_LOCK(dev);
2618277487Skib	ret = ironlake_setup_rc6(dev);
2619277487Skib	if (ret) {
2620277487Skib		DRM_UNLOCK(dev);
2621277487Skib		return;
2622277487Skib	}
2623277487Skib
2624277487Skib	/*
2625277487Skib	 * GPU can automatically power down the render unit if given a page
2626277487Skib	 * to save state.
2627277487Skib	 */
2628277487Skib	ret = intel_ring_begin(ring, 6);
2629277487Skib	if (ret) {
2630277487Skib		ironlake_teardown_rc6(dev);
2631277487Skib		DRM_UNLOCK(dev);
2632277487Skib		return;
2633277487Skib	}
2634277487Skib
2635277487Skib	intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2636277487Skib	intel_ring_emit(ring, MI_SET_CONTEXT);
2637277487Skib	intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
2638277487Skib			MI_MM_SPACE_GTT |
2639277487Skib			MI_SAVE_EXT_STATE_EN |
2640277487Skib			MI_RESTORE_EXT_STATE_EN |
2641277487Skib			MI_RESTORE_INHIBIT);
2642277487Skib	intel_ring_emit(ring, MI_SUSPEND_FLUSH);
2643277487Skib	intel_ring_emit(ring, MI_NOOP);
2644277487Skib	intel_ring_emit(ring, MI_FLUSH);
2645277487Skib	intel_ring_advance(ring);
2646277487Skib
2647277487Skib	/*
2648277487Skib	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2649277487Skib	 * does an implicit flush, combined with MI_FLUSH above, it should be
2650277487Skib	 * safe to assume that renderctx is valid
2651277487Skib	 */
2652277487Skib	ret = intel_wait_ring_idle(ring);
2653277487Skib	if (ret) {
2654277487Skib		DRM_ERROR("failed to enable ironlake power power savings\n");
2655277487Skib		ironlake_teardown_rc6(dev);
2656277487Skib		DRM_UNLOCK(dev);
2657277487Skib		return;
2658277487Skib	}
2659277487Skib
2660277487Skib	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
2661277487Skib	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2662277487Skib	DRM_UNLOCK(dev);
2663277487Skib}
2664277487Skib
2665277487Skibstatic unsigned long intel_pxfreq(u32 vidfreq)
2666277487Skib{
2667277487Skib	unsigned long freq;
2668277487Skib	int div = (vidfreq & 0x3f0000) >> 16;
2669277487Skib	int post = (vidfreq & 0x3000) >> 12;
2670277487Skib	int pre = (vidfreq & 0x7);
2671277487Skib
2672277487Skib	if (!pre)
2673277487Skib		return 0;
2674277487Skib
2675277487Skib	freq = ((div * 133333) / ((1<<post) * pre));
2676277487Skib
2677277487Skib	return freq;
2678277487Skib}
2679277487Skib
2680277487Skibstatic const struct cparams {
2681277487Skib	u16 i;
2682277487Skib	u16 t;
2683277487Skib	u16 m;
2684277487Skib	u16 c;
2685277487Skib} cparams[] = {
2686277487Skib	{ 1, 1333, 301, 28664 },
2687277487Skib	{ 1, 1066, 294, 24460 },
2688277487Skib	{ 1, 800, 294, 25192 },
2689277487Skib	{ 0, 1333, 276, 27605 },
2690277487Skib	{ 0, 1066, 276, 27605 },
2691277487Skib	{ 0, 800, 231, 23784 },
2692277487Skib};
2693277487Skib
2694277487Skibunsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2695277487Skib{
2696277487Skib	u64 total_count, diff, ret;
2697277487Skib	u32 count1, count2, count3, m = 0, c = 0;
2698277487Skib	unsigned long now = jiffies_to_msecs(jiffies), diff1;
2699277487Skib	int i;
2700277487Skib
2701277487Skib	diff1 = now - dev_priv->last_time1;
2702277487Skib	/*
2703277487Skib	 * sysctl(8) reads the value of sysctl twice in rapid
2704277487Skib	 * succession.  There is high chance that it happens in the
2705277487Skib	 * same timer tick.  Use the cached value to not divide by
2706277487Skib	 * zero and give the hw a chance to gather more samples.
2707277487Skib	 */
2708277487Skib	if (diff1 <= 10)
2709277487Skib		return (dev_priv->chipset_power);
2710277487Skib
2711277487Skib	count1 = I915_READ(DMIEC);
2712277487Skib	count2 = I915_READ(DDREC);
2713277487Skib	count3 = I915_READ(CSIEC);
2714277487Skib
2715277487Skib	total_count = count1 + count2 + count3;
2716277487Skib
2717277487Skib	/* FIXME: handle per-counter overflow */
2718277487Skib	if (total_count < dev_priv->last_count1) {
2719277487Skib		diff = ~0UL - dev_priv->last_count1;
2720277487Skib		diff += total_count;
2721277487Skib	} else {
2722277487Skib		diff = total_count - dev_priv->last_count1;
2723277487Skib	}
2724277487Skib
2725277487Skib	for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
2726277487Skib		if (cparams[i].i == dev_priv->c_m &&
2727277487Skib		    cparams[i].t == dev_priv->r_t) {
2728277487Skib			m = cparams[i].m;
2729277487Skib			c = cparams[i].c;
2730277487Skib			break;
2731277487Skib		}
2732277487Skib	}
2733277487Skib
2734277487Skib	diff = diff / diff1;
2735277487Skib	ret = ((m * diff) + c);
2736277487Skib	ret = ret / 10;
2737277487Skib
2738277487Skib	dev_priv->last_count1 = total_count;
2739277487Skib	dev_priv->last_time1 = now;
2740277487Skib
2741277487Skib	dev_priv->chipset_power = ret;
2742277487Skib	return (ret);
2743277487Skib}
2744277487Skib
2745277487Skibunsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2746277487Skib{
2747277487Skib	unsigned long m, x, b;
2748277487Skib	u32 tsfs;
2749277487Skib
2750277487Skib	tsfs = I915_READ(TSFS);
2751277487Skib
2752277487Skib	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2753277487Skib	x = I915_READ8(I915_TR1);
2754277487Skib
2755277487Skib	b = tsfs & TSFS_INTR_MASK;
2756277487Skib
2757277487Skib	return ((m * x) / 127) - b;
2758277487Skib}
2759277487Skib
2760277487Skibstatic u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2761277487Skib{
2762277487Skib	static const struct v_table {
2763277487Skib		u16 vd; /* in .1 mil */
2764277487Skib		u16 vm; /* in .1 mil */
2765277487Skib	} v_table[] = {
2766277487Skib		{ 0, 0, },
2767277487Skib		{ 375, 0, },
2768277487Skib		{ 500, 0, },
2769277487Skib		{ 625, 0, },
2770277487Skib		{ 750, 0, },
2771277487Skib		{ 875, 0, },
2772277487Skib		{ 1000, 0, },
2773277487Skib		{ 1125, 0, },
2774277487Skib		{ 4125, 3000, },
2775277487Skib		{ 4125, 3000, },
2776277487Skib		{ 4125, 3000, },
2777277487Skib		{ 4125, 3000, },
2778277487Skib		{ 4125, 3000, },
2779277487Skib		{ 4125, 3000, },
2780277487Skib		{ 4125, 3000, },
2781277487Skib		{ 4125, 3000, },
2782277487Skib		{ 4125, 3000, },
2783277487Skib		{ 4125, 3000, },
2784277487Skib		{ 4125, 3000, },
2785277487Skib		{ 4125, 3000, },
2786277487Skib		{ 4125, 3000, },
2787277487Skib		{ 4125, 3000, },
2788277487Skib		{ 4125, 3000, },
2789277487Skib		{ 4125, 3000, },
2790277487Skib		{ 4125, 3000, },
2791277487Skib		{ 4125, 3000, },
2792277487Skib		{ 4125, 3000, },
2793277487Skib		{ 4125, 3000, },
2794277487Skib		{ 4125, 3000, },
2795277487Skib		{ 4125, 3000, },
2796277487Skib		{ 4125, 3000, },
2797277487Skib		{ 4125, 3000, },
2798277487Skib		{ 4250, 3125, },
2799277487Skib		{ 4375, 3250, },
2800277487Skib		{ 4500, 3375, },
2801277487Skib		{ 4625, 3500, },
2802277487Skib		{ 4750, 3625, },
2803277487Skib		{ 4875, 3750, },
2804277487Skib		{ 5000, 3875, },
2805277487Skib		{ 5125, 4000, },
2806277487Skib		{ 5250, 4125, },
2807277487Skib		{ 5375, 4250, },
2808277487Skib		{ 5500, 4375, },
2809277487Skib		{ 5625, 4500, },
2810277487Skib		{ 5750, 4625, },
2811277487Skib		{ 5875, 4750, },
2812277487Skib		{ 6000, 4875, },
2813277487Skib		{ 6125, 5000, },
2814277487Skib		{ 6250, 5125, },
2815277487Skib		{ 6375, 5250, },
2816277487Skib		{ 6500, 5375, },
2817277487Skib		{ 6625, 5500, },
2818277487Skib		{ 6750, 5625, },
2819277487Skib		{ 6875, 5750, },
2820277487Skib		{ 7000, 5875, },
2821277487Skib		{ 7125, 6000, },
2822277487Skib		{ 7250, 6125, },
2823277487Skib		{ 7375, 6250, },
2824277487Skib		{ 7500, 6375, },
2825277487Skib		{ 7625, 6500, },
2826277487Skib		{ 7750, 6625, },
2827277487Skib		{ 7875, 6750, },
2828277487Skib		{ 8000, 6875, },
2829277487Skib		{ 8125, 7000, },
2830277487Skib		{ 8250, 7125, },
2831277487Skib		{ 8375, 7250, },
2832277487Skib		{ 8500, 7375, },
2833277487Skib		{ 8625, 7500, },
2834277487Skib		{ 8750, 7625, },
2835277487Skib		{ 8875, 7750, },
2836277487Skib		{ 9000, 7875, },
2837277487Skib		{ 9125, 8000, },
2838277487Skib		{ 9250, 8125, },
2839277487Skib		{ 9375, 8250, },
2840277487Skib		{ 9500, 8375, },
2841277487Skib		{ 9625, 8500, },
2842277487Skib		{ 9750, 8625, },
2843277487Skib		{ 9875, 8750, },
2844277487Skib		{ 10000, 8875, },
2845277487Skib		{ 10125, 9000, },
2846277487Skib		{ 10250, 9125, },
2847277487Skib		{ 10375, 9250, },
2848277487Skib		{ 10500, 9375, },
2849277487Skib		{ 10625, 9500, },
2850277487Skib		{ 10750, 9625, },
2851277487Skib		{ 10875, 9750, },
2852277487Skib		{ 11000, 9875, },
2853277487Skib		{ 11125, 10000, },
2854277487Skib		{ 11250, 10125, },
2855277487Skib		{ 11375, 10250, },
2856277487Skib		{ 11500, 10375, },
2857277487Skib		{ 11625, 10500, },
2858277487Skib		{ 11750, 10625, },
2859277487Skib		{ 11875, 10750, },
2860277487Skib		{ 12000, 10875, },
2861277487Skib		{ 12125, 11000, },
2862277487Skib		{ 12250, 11125, },
2863277487Skib		{ 12375, 11250, },
2864277487Skib		{ 12500, 11375, },
2865277487Skib		{ 12625, 11500, },
2866277487Skib		{ 12750, 11625, },
2867277487Skib		{ 12875, 11750, },
2868277487Skib		{ 13000, 11875, },
2869277487Skib		{ 13125, 12000, },
2870277487Skib		{ 13250, 12125, },
2871277487Skib		{ 13375, 12250, },
2872277487Skib		{ 13500, 12375, },
2873277487Skib		{ 13625, 12500, },
2874277487Skib		{ 13750, 12625, },
2875277487Skib		{ 13875, 12750, },
2876277487Skib		{ 14000, 12875, },
2877277487Skib		{ 14125, 13000, },
2878277487Skib		{ 14250, 13125, },
2879277487Skib		{ 14375, 13250, },
2880277487Skib		{ 14500, 13375, },
2881277487Skib		{ 14625, 13500, },
2882277487Skib		{ 14750, 13625, },
2883277487Skib		{ 14875, 13750, },
2884277487Skib		{ 15000, 13875, },
2885277487Skib		{ 15125, 14000, },
2886277487Skib		{ 15250, 14125, },
2887277487Skib		{ 15375, 14250, },
2888277487Skib		{ 15500, 14375, },
2889277487Skib		{ 15625, 14500, },
2890277487Skib		{ 15750, 14625, },
2891277487Skib		{ 15875, 14750, },
2892277487Skib		{ 16000, 14875, },
2893277487Skib		{ 16125, 15000, },
2894277487Skib	};
2895277487Skib	if (dev_priv->info->is_mobile)
2896277487Skib		return v_table[pxvid].vm;
2897277487Skib	else
2898277487Skib		return v_table[pxvid].vd;
2899277487Skib}
2900277487Skib
2901277487Skibvoid i915_update_gfx_val(struct drm_i915_private *dev_priv)
2902277487Skib{
2903277487Skib	struct timespec now, diff1;
2904277487Skib	u64 diff;
2905277487Skib	unsigned long diffms;
2906277487Skib	u32 count;
2907277487Skib
2908277487Skib	if (dev_priv->info->gen != 5)
2909277487Skib		return;
2910277487Skib
2911277487Skib	nanotime(&now);
2912277487Skib	diff1 = now;
2913277487Skib	timespecsub(&diff1, &dev_priv->last_time2);
2914277487Skib
2915277487Skib	/* Don't divide by 0 */
2916277487Skib	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2917277487Skib	if (!diffms)
2918277487Skib		return;
2919277487Skib
2920277487Skib	count = I915_READ(GFXEC);
2921277487Skib
2922277487Skib	if (count < dev_priv->last_count2) {
2923277487Skib		diff = ~0UL - dev_priv->last_count2;
2924277487Skib		diff += count;
2925277487Skib	} else {
2926277487Skib		diff = count - dev_priv->last_count2;
2927277487Skib	}
2928277487Skib
2929277487Skib	dev_priv->last_count2 = count;
2930277487Skib	dev_priv->last_time2 = now;
2931277487Skib
2932277487Skib	/* More magic constants... */
2933277487Skib	diff = diff * 1181;
2934277487Skib	diff = diff / (diffms * 10);
2935277487Skib	dev_priv->gfx_power = diff;
2936277487Skib}
2937277487Skib
2938277487Skibunsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2939277487Skib{
2940277487Skib	unsigned long t, corr, state1, corr2, state2;
2941277487Skib	u32 pxvid, ext_v;
2942277487Skib
2943277487Skib	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
2944277487Skib	pxvid = (pxvid >> 24) & 0x7f;
2945277487Skib	ext_v = pvid_to_extvid(dev_priv, pxvid);
2946277487Skib
2947277487Skib	state1 = ext_v;
2948277487Skib
2949277487Skib	t = i915_mch_val(dev_priv);
2950277487Skib
2951277487Skib	/* Revel in the empirically derived constants */
2952277487Skib
2953277487Skib	/* Correction factor in 1/100000 units */
2954277487Skib	if (t > 80)
2955277487Skib		corr = ((t * 2349) + 135940);
2956277487Skib	else if (t >= 50)
2957277487Skib		corr = ((t * 964) + 29317);
2958277487Skib	else /* < 50 */
2959277487Skib		corr = ((t * 301) + 1004);
2960277487Skib
2961277487Skib	corr = corr * ((150142 * state1) / 10000 - 78642);
2962277487Skib	corr /= 100000;
2963277487Skib	corr2 = (corr * dev_priv->corr);
2964277487Skib
2965277487Skib	state2 = (corr2 * state1) / 10000;
2966277487Skib	state2 /= 100; /* convert to mW */
2967277487Skib
2968277487Skib	i915_update_gfx_val(dev_priv);
2969277487Skib
2970277487Skib	return dev_priv->gfx_power + state2;
2971277487Skib}
2972277487Skib
2973277487Skib/**
2974277487Skib * i915_read_mch_val - return value for IPS use
2975277487Skib *
2976277487Skib * Calculate and return a value for the IPS driver to use when deciding whether
2977277487Skib * we have thermal and power headroom to increase CPU or GPU power budget.
2978277487Skib */
2979277487Skibunsigned long i915_read_mch_val(void)
2980277487Skib{
2981277487Skib	struct drm_i915_private *dev_priv;
2982277487Skib	unsigned long chipset_val, graphics_val, ret = 0;
2983277487Skib
2984277487Skib	mtx_lock(&mchdev_lock);
2985277487Skib	if (!i915_mch_dev)
2986277487Skib		goto out_unlock;
2987277487Skib	dev_priv = i915_mch_dev;
2988277487Skib
2989277487Skib	chipset_val = i915_chipset_val(dev_priv);
2990277487Skib	graphics_val = i915_gfx_val(dev_priv);
2991277487Skib
2992277487Skib	ret = chipset_val + graphics_val;
2993277487Skib
2994277487Skibout_unlock:
2995277487Skib	mtx_unlock(&mchdev_lock);
2996277487Skib
2997277487Skib	return ret;
2998277487Skib}
2999277487Skib
3000277487Skib/**
3001277487Skib * i915_gpu_raise - raise GPU frequency limit
3002277487Skib *
3003277487Skib * Raise the limit; IPS indicates we have thermal headroom.
3004277487Skib */
3005277487Skibbool i915_gpu_raise(void)
3006277487Skib{
3007277487Skib	struct drm_i915_private *dev_priv;
3008277487Skib	bool ret = true;
3009277487Skib
3010277487Skib	mtx_lock(&mchdev_lock);
3011277487Skib	if (!i915_mch_dev) {
3012277487Skib		ret = false;
3013277487Skib		goto out_unlock;
3014277487Skib	}
3015277487Skib	dev_priv = i915_mch_dev;
3016277487Skib
3017277487Skib	if (dev_priv->max_delay > dev_priv->fmax)
3018277487Skib		dev_priv->max_delay--;
3019277487Skib
3020277487Skibout_unlock:
3021277487Skib	mtx_unlock(&mchdev_lock);
3022277487Skib
3023277487Skib	return ret;
3024277487Skib}
3025277487Skib
3026277487Skib/**
3027277487Skib * i915_gpu_lower - lower GPU frequency limit
3028277487Skib *
3029277487Skib * IPS indicates we're close to a thermal limit, so throttle back the GPU
3030277487Skib * frequency maximum.
3031277487Skib */
3032277487Skibbool i915_gpu_lower(void)
3033277487Skib{
3034277487Skib	struct drm_i915_private *dev_priv;
3035277487Skib	bool ret = true;
3036277487Skib
3037277487Skib	mtx_lock(&mchdev_lock);
3038277487Skib	if (!i915_mch_dev) {
3039277487Skib		ret = false;
3040277487Skib		goto out_unlock;
3041277487Skib	}
3042277487Skib	dev_priv = i915_mch_dev;
3043277487Skib
3044277487Skib	if (dev_priv->max_delay < dev_priv->min_delay)
3045277487Skib		dev_priv->max_delay++;
3046277487Skib
3047277487Skibout_unlock:
3048277487Skib	mtx_unlock(&mchdev_lock);
3049277487Skib
3050277487Skib	return ret;
3051277487Skib}
3052277487Skib
3053277487Skib/**
3054277487Skib * i915_gpu_busy - indicate GPU business to IPS
3055277487Skib *
3056277487Skib * Tell the IPS driver whether or not the GPU is busy.
3057277487Skib */
3058277487Skibbool i915_gpu_busy(void)
3059277487Skib{
3060277487Skib	struct drm_i915_private *dev_priv;
3061277487Skib	bool ret = false;
3062277487Skib
3063277487Skib	mtx_lock(&mchdev_lock);
3064277487Skib	if (!i915_mch_dev)
3065277487Skib		goto out_unlock;
3066277487Skib	dev_priv = i915_mch_dev;
3067277487Skib
3068277487Skib	ret = dev_priv->busy;
3069277487Skib
3070277487Skibout_unlock:
3071277487Skib	mtx_unlock(&mchdev_lock);
3072277487Skib
3073277487Skib	return ret;
3074277487Skib}
3075277487Skib
3076277487Skib/**
3077277487Skib * i915_gpu_turbo_disable - disable graphics turbo
3078277487Skib *
3079277487Skib * Disable graphics turbo by resetting the max frequency and setting the
3080277487Skib * current frequency to the default.
3081277487Skib */
3082277487Skibbool i915_gpu_turbo_disable(void)
3083277487Skib{
3084277487Skib	struct drm_i915_private *dev_priv;
3085277487Skib	bool ret = true;
3086277487Skib
3087277487Skib	mtx_lock(&mchdev_lock);
3088277487Skib	if (!i915_mch_dev) {
3089277487Skib		ret = false;
3090277487Skib		goto out_unlock;
3091277487Skib	}
3092277487Skib	dev_priv = i915_mch_dev;
3093277487Skib
3094277487Skib	dev_priv->max_delay = dev_priv->fstart;
3095277487Skib
3096277487Skib	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
3097277487Skib		ret = false;
3098277487Skib
3099277487Skibout_unlock:
3100277487Skib	mtx_unlock(&mchdev_lock);
3101277487Skib
3102277487Skib	return ret;
3103277487Skib}
3104277487Skib
3105277487Skibvoid intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3106277487Skib{
3107277487Skib	mtx_lock(&mchdev_lock);
3108277487Skib	i915_mch_dev = dev_priv;
3109277487Skib	dev_priv->mchdev_lock = &mchdev_lock;
3110277487Skib	mtx_unlock(&mchdev_lock);
3111277487Skib
3112277487Skib#if 0
3113277487Skib	ips_ping_for_i915_load();
3114277487Skib#endif
3115277487Skib}
3116277487Skib
3117277487Skibvoid intel_gpu_ips_teardown(void)
3118277487Skib{
3119277487Skib	mtx_lock(&mchdev_lock);
3120277487Skib	i915_mch_dev = NULL;
3121277487Skib	mtx_unlock(&mchdev_lock);
3122277487Skib}
3123277487Skib
3124277487Skibvoid intel_init_emon(struct drm_device *dev)
3125277487Skib{
3126277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3127277487Skib	u32 lcfuse;
3128277487Skib	u8 pxw[16];
3129277487Skib	int i;
3130277487Skib
3131277487Skib	/* Disable to program */
3132277487Skib	I915_WRITE(ECR, 0);
3133277487Skib	POSTING_READ(ECR);
3134277487Skib
3135277487Skib	/* Program energy weights for various events */
3136277487Skib	I915_WRITE(SDEW, 0x15040d00);
3137277487Skib	I915_WRITE(CSIEW0, 0x007f0000);
3138277487Skib	I915_WRITE(CSIEW1, 0x1e220004);
3139277487Skib	I915_WRITE(CSIEW2, 0x04000004);
3140277487Skib
3141277487Skib	for (i = 0; i < 5; i++)
3142277487Skib		I915_WRITE(PEW + (i * 4), 0);
3143277487Skib	for (i = 0; i < 3; i++)
3144277487Skib		I915_WRITE(DEW + (i * 4), 0);
3145277487Skib
3146277487Skib	/* Program P-state weights to account for frequency power adjustment */
3147277487Skib	for (i = 0; i < 16; i++) {
3148277487Skib		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
3149277487Skib		unsigned long freq = intel_pxfreq(pxvidfreq);
3150277487Skib		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
3151277487Skib			PXVFREQ_PX_SHIFT;
3152277487Skib		unsigned long val;
3153277487Skib
3154277487Skib		val = vid * vid;
3155277487Skib		val *= (freq / 1000);
3156277487Skib		val *= 255;
3157277487Skib		val /= (127*127*900);
3158277487Skib		if (val > 0xff)
3159277487Skib			DRM_ERROR("bad pxval: %ld\n", val);
3160277487Skib		pxw[i] = val;
3161277487Skib	}
3162277487Skib	/* Render standby states get 0 weight */
3163277487Skib	pxw[14] = 0;
3164277487Skib	pxw[15] = 0;
3165277487Skib
3166277487Skib	for (i = 0; i < 4; i++) {
3167277487Skib		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
3168277487Skib			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
3169277487Skib		I915_WRITE(PXW + (i * 4), val);
3170277487Skib	}
3171277487Skib
3172277487Skib	/* Adjust magic regs to magic values (more experimental results) */
3173277487Skib	I915_WRITE(OGW0, 0);
3174277487Skib	I915_WRITE(OGW1, 0);
3175277487Skib	I915_WRITE(EG0, 0x00007f00);
3176277487Skib	I915_WRITE(EG1, 0x0000000e);
3177277487Skib	I915_WRITE(EG2, 0x000e0000);
3178277487Skib	I915_WRITE(EG3, 0x68000300);
3179277487Skib	I915_WRITE(EG4, 0x42000000);
3180277487Skib	I915_WRITE(EG5, 0x00140031);
3181277487Skib	I915_WRITE(EG6, 0);
3182277487Skib	I915_WRITE(EG7, 0);
3183277487Skib
3184277487Skib	for (i = 0; i < 8; i++)
3185277487Skib		I915_WRITE(PXWL + (i * 4), 0);
3186277487Skib
3187277487Skib	/* Enable PMON + select events */
3188277487Skib	I915_WRITE(ECR, 0x80000019);
3189277487Skib
3190277487Skib	lcfuse = I915_READ(LCFUSE02);
3191277487Skib
3192277487Skib	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
3193277487Skib}
3194277487Skib
3195277487Skibstatic void ironlake_init_clock_gating(struct drm_device *dev)
3196277487Skib{
3197277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3198277487Skib	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3199277487Skib
3200277487Skib	/* Required for FBC */
3201277487Skib	dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
3202277487Skib		DPFCRUNIT_CLOCK_GATE_DISABLE |
3203277487Skib		DPFDUNIT_CLOCK_GATE_DISABLE;
3204277487Skib	/* Required for CxSR */
3205277487Skib	dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
3206277487Skib
3207277487Skib	I915_WRITE(PCH_3DCGDIS0,
3208277487Skib		   MARIUNIT_CLOCK_GATE_DISABLE |
3209277487Skib		   SVSMUNIT_CLOCK_GATE_DISABLE);
3210277487Skib	I915_WRITE(PCH_3DCGDIS1,
3211277487Skib		   VFMUNIT_CLOCK_GATE_DISABLE);
3212277487Skib
3213277487Skib	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3214277487Skib
3215277487Skib	/*
3216277487Skib	 * According to the spec the following bits should be set in
3217277487Skib	 * order to enable memory self-refresh
3218277487Skib	 * The bit 22/21 of 0x42004
3219277487Skib	 * The bit 5 of 0x42020
3220277487Skib	 * The bit 15 of 0x45000
3221277487Skib	 */
3222277487Skib	I915_WRITE(ILK_DISPLAY_CHICKEN2,
3223277487Skib		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
3224277487Skib		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3225277487Skib	I915_WRITE(ILK_DSPCLK_GATE,
3226277487Skib		   (I915_READ(ILK_DSPCLK_GATE) |
3227277487Skib		    ILK_DPARB_CLK_GATE));
3228277487Skib	I915_WRITE(DISP_ARB_CTL,
3229277487Skib		   (I915_READ(DISP_ARB_CTL) |
3230277487Skib		    DISP_FBC_WM_DIS));
3231277487Skib	I915_WRITE(WM3_LP_ILK, 0);
3232277487Skib	I915_WRITE(WM2_LP_ILK, 0);
3233277487Skib	I915_WRITE(WM1_LP_ILK, 0);
3234277487Skib
3235277487Skib	/*
3236277487Skib	 * Based on the document from hardware guys the following bits
3237277487Skib	 * should be set unconditionally in order to enable FBC.
3238277487Skib	 * The bit 22 of 0x42000
3239277487Skib	 * The bit 22 of 0x42004
3240277487Skib	 * The bit 7,8,9 of 0x42020.
3241277487Skib	 */
3242277487Skib	if (IS_IRONLAKE_M(dev)) {
3243277487Skib		I915_WRITE(ILK_DISPLAY_CHICKEN1,
3244277487Skib			   I915_READ(ILK_DISPLAY_CHICKEN1) |
3245277487Skib			   ILK_FBCQ_DIS);
3246277487Skib		I915_WRITE(ILK_DISPLAY_CHICKEN2,
3247277487Skib			   I915_READ(ILK_DISPLAY_CHICKEN2) |
3248277487Skib			   ILK_DPARB_GATE);
3249277487Skib		I915_WRITE(ILK_DSPCLK_GATE,
3250277487Skib			   I915_READ(ILK_DSPCLK_GATE) |
3251277487Skib			   ILK_DPFC_DIS1 |
3252277487Skib			   ILK_DPFC_DIS2 |
3253277487Skib			   ILK_CLK_FBC);
3254277487Skib	}
3255277487Skib
3256277487Skib	I915_WRITE(ILK_DISPLAY_CHICKEN2,
3257277487Skib		   I915_READ(ILK_DISPLAY_CHICKEN2) |
3258277487Skib		   ILK_ELPIN_409_SELECT);
3259277487Skib	I915_WRITE(_3D_CHICKEN2,
3260277487Skib		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3261277487Skib		   _3D_CHICKEN2_WM_READ_PIPELINED);
3262277487Skib}
3263277487Skib
3264277487Skibstatic void gen6_init_clock_gating(struct drm_device *dev)
3265277487Skib{
3266277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3267277487Skib	int pipe;
3268277487Skib	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3269277487Skib
3270277487Skib	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3271277487Skib
3272277487Skib	I915_WRITE(ILK_DISPLAY_CHICKEN2,
3273277487Skib		   I915_READ(ILK_DISPLAY_CHICKEN2) |
3274277487Skib		   ILK_ELPIN_409_SELECT);
3275277487Skib
3276277487Skib	I915_WRITE(WM3_LP_ILK, 0);
3277277487Skib	I915_WRITE(WM2_LP_ILK, 0);
3278277487Skib	I915_WRITE(WM1_LP_ILK, 0);
3279277487Skib
3280277487Skib	I915_WRITE(CACHE_MODE_0,
3281277487Skib		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
3282277487Skib
3283277487Skib	I915_WRITE(GEN6_UCGCTL1,
3284277487Skib		   I915_READ(GEN6_UCGCTL1) |
3285277487Skib		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
3286277487Skib		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
3287277487Skib
3288277487Skib	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3289277487Skib	 * gating disable must be set.  Failure to set it results in
3290277487Skib	 * flickering pixels due to Z write ordering failures after
3291277487Skib	 * some amount of runtime in the Mesa "fire" demo, and Unigine
3292277487Skib	 * Sanctuary and Tropics, and apparently anything else with
3293277487Skib	 * alpha test or pixel discard.
3294277487Skib	 *
3295277487Skib	 * According to the spec, bit 11 (RCCUNIT) must also be set,
3296277487Skib	 * but we didn't debug actual testcases to find it out.
3297277487Skib	 */
3298277487Skib	I915_WRITE(GEN6_UCGCTL2,
3299277487Skib		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3300277487Skib		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3301277487Skib
3302277487Skib	/* Bspec says we need to always set all mask bits. */
3303277487Skib	I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
3304277487Skib		   _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
3305277487Skib
3306277487Skib	/*
3307277487Skib	 * According to the spec the following bits should be
3308277487Skib	 * set in order to enable memory self-refresh and fbc:
3309277487Skib	 * The bit21 and bit22 of 0x42000
3310277487Skib	 * The bit21 and bit22 of 0x42004
3311277487Skib	 * The bit5 and bit7 of 0x42020
3312277487Skib	 * The bit14 of 0x70180
3313277487Skib	 * The bit14 of 0x71180
3314277487Skib	 */
3315277487Skib	I915_WRITE(ILK_DISPLAY_CHICKEN1,
3316277487Skib		   I915_READ(ILK_DISPLAY_CHICKEN1) |
3317277487Skib		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
3318277487Skib	I915_WRITE(ILK_DISPLAY_CHICKEN2,
3319277487Skib		   I915_READ(ILK_DISPLAY_CHICKEN2) |
3320277487Skib		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3321277487Skib	I915_WRITE(ILK_DSPCLK_GATE,
3322277487Skib		   I915_READ(ILK_DSPCLK_GATE) |
3323277487Skib		   ILK_DPARB_CLK_GATE  |
3324277487Skib		   ILK_DPFD_CLK_GATE);
3325277487Skib
3326277487Skib	for_each_pipe(pipe) {
3327277487Skib		I915_WRITE(DSPCNTR(pipe),
3328277487Skib			   I915_READ(DSPCNTR(pipe)) |
3329277487Skib			   DISPPLANE_TRICKLE_FEED_DISABLE);
3330277487Skib		intel_flush_display_plane(dev_priv, pipe);
3331277487Skib	}
3332277487Skib}
3333277487Skib
3334277487Skibstatic void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3335277487Skib{
3336277487Skib	uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
3337277487Skib
3338277487Skib	reg &= ~GEN7_FF_SCHED_MASK;
3339277487Skib	reg |= GEN7_FF_TS_SCHED_HW;
3340277487Skib	reg |= GEN7_FF_VS_SCHED_HW;
3341277487Skib	reg |= GEN7_FF_DS_SCHED_HW;
3342277487Skib
3343277487Skib	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3344277487Skib}
3345277487Skib
3346277487Skibstatic void ivybridge_init_clock_gating(struct drm_device *dev)
3347277487Skib{
3348277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3349277487Skib	int pipe;
3350277487Skib	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3351277487Skib
3352277487Skib	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3353277487Skib
3354277487Skib	I915_WRITE(WM3_LP_ILK, 0);
3355277487Skib	I915_WRITE(WM2_LP_ILK, 0);
3356277487Skib	I915_WRITE(WM1_LP_ILK, 0);
3357277487Skib
3358277487Skib	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3359277487Skib	 * This implements the WaDisableRCZUnitClockGating workaround.
3360277487Skib	 */
3361277487Skib	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3362277487Skib
3363277487Skib	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3364277487Skib
3365277487Skib	I915_WRITE(IVB_CHICKEN3,
3366277487Skib		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3367277487Skib		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
3368277487Skib
3369277487Skib	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3370277487Skib	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3371277487Skib		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3372277487Skib
3373277487Skib	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3374277487Skib	I915_WRITE(GEN7_L3CNTLREG1,
3375277487Skib			GEN7_WA_FOR_GEN7_L3_CONTROL);
3376277487Skib	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3377277487Skib			GEN7_WA_L3_CHICKEN_MODE);
3378277487Skib
3379277487Skib	/* This is required by WaCatErrorRejectionIssue */
3380277487Skib	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3381277487Skib			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3382277487Skib			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3383277487Skib
3384277487Skib	for_each_pipe(pipe) {
3385277487Skib		I915_WRITE(DSPCNTR(pipe),
3386277487Skib			   I915_READ(DSPCNTR(pipe)) |
3387277487Skib			   DISPPLANE_TRICKLE_FEED_DISABLE);
3388277487Skib		intel_flush_display_plane(dev_priv, pipe);
3389277487Skib	}
3390277487Skib
3391277487Skib	gen7_setup_fixed_func_scheduler(dev_priv);
3392277487Skib
3393277487Skib	/* WaDisable4x2SubspanOptimization */
3394277487Skib	I915_WRITE(CACHE_MODE_1,
3395277487Skib		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3396277487Skib}
3397277487Skib
3398277487Skibstatic void valleyview_init_clock_gating(struct drm_device *dev)
3399277487Skib{
3400277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3401277487Skib	int pipe;
3402277487Skib	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3403277487Skib
3404277487Skib	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3405277487Skib
3406277487Skib	I915_WRITE(WM3_LP_ILK, 0);
3407277487Skib	I915_WRITE(WM2_LP_ILK, 0);
3408277487Skib	I915_WRITE(WM1_LP_ILK, 0);
3409277487Skib
3410277487Skib	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3411277487Skib	 * This implements the WaDisableRCZUnitClockGating workaround.
3412277487Skib	 */
3413277487Skib	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3414277487Skib
3415277487Skib	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3416277487Skib
3417277487Skib	I915_WRITE(IVB_CHICKEN3,
3418277487Skib		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3419277487Skib		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
3420277487Skib
3421277487Skib	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3422277487Skib	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3423277487Skib		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3424277487Skib
3425277487Skib	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3426277487Skib	I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
3427277487Skib	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3428277487Skib
3429277487Skib	/* This is required by WaCatErrorRejectionIssue */
3430277487Skib	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3431277487Skib		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3432277487Skib		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3433277487Skib
3434277487Skib	for_each_pipe(pipe) {
3435277487Skib		I915_WRITE(DSPCNTR(pipe),
3436277487Skib			   I915_READ(DSPCNTR(pipe)) |
3437277487Skib			   DISPPLANE_TRICKLE_FEED_DISABLE);
3438277487Skib		intel_flush_display_plane(dev_priv, pipe);
3439277487Skib	}
3440277487Skib
3441277487Skib	I915_WRITE(CACHE_MODE_1,
3442277487Skib		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3443277487Skib}
3444277487Skib
3445277487Skibstatic void g4x_init_clock_gating(struct drm_device *dev)
3446277487Skib{
3447277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3448277487Skib	uint32_t dspclk_gate;
3449277487Skib
3450277487Skib	I915_WRITE(RENCLK_GATE_D1, 0);
3451277487Skib	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3452277487Skib		   GS_UNIT_CLOCK_GATE_DISABLE |
3453277487Skib		   CL_UNIT_CLOCK_GATE_DISABLE);
3454277487Skib	I915_WRITE(RAMCLK_GATE_D, 0);
3455277487Skib	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3456277487Skib		OVRUNIT_CLOCK_GATE_DISABLE |
3457277487Skib		OVCUNIT_CLOCK_GATE_DISABLE;
3458277487Skib	if (IS_GM45(dev))
3459277487Skib		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3460277487Skib	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3461277487Skib}
3462277487Skib
3463277487Skibstatic void crestline_init_clock_gating(struct drm_device *dev)
3464277487Skib{
3465277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3466277487Skib
3467277487Skib	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3468277487Skib	I915_WRITE(RENCLK_GATE_D2, 0);
3469277487Skib	I915_WRITE(DSPCLK_GATE_D, 0);
3470277487Skib	I915_WRITE(RAMCLK_GATE_D, 0);
3471277487Skib	I915_WRITE16(DEUC, 0);
3472277487Skib}
3473277487Skib
3474277487Skibstatic void broadwater_init_clock_gating(struct drm_device *dev)
3475277487Skib{
3476277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3477277487Skib
3478277487Skib	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3479277487Skib		   I965_RCC_CLOCK_GATE_DISABLE |
3480277487Skib		   I965_RCPB_CLOCK_GATE_DISABLE |
3481277487Skib		   I965_ISC_CLOCK_GATE_DISABLE |
3482277487Skib		   I965_FBC_CLOCK_GATE_DISABLE);
3483277487Skib	I915_WRITE(RENCLK_GATE_D2, 0);
3484277487Skib}
3485277487Skib
3486277487Skibstatic void gen3_init_clock_gating(struct drm_device *dev)
3487277487Skib{
3488277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3489277487Skib	u32 dstate = I915_READ(D_STATE);
3490277487Skib
3491277487Skib	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3492277487Skib		DSTATE_DOT_CLOCK_GATING;
3493277487Skib	I915_WRITE(D_STATE, dstate);
3494277487Skib
3495277487Skib	if (IS_PINEVIEW(dev))
3496277487Skib		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
3497277487Skib}
3498277487Skib
3499277487Skibstatic void i85x_init_clock_gating(struct drm_device *dev)
3500277487Skib{
3501277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3502277487Skib
3503277487Skib	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3504277487Skib}
3505277487Skib
3506277487Skibstatic void i830_init_clock_gating(struct drm_device *dev)
3507277487Skib{
3508277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3509277487Skib
3510277487Skib	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3511277487Skib}
3512277487Skib
3513277487Skibstatic void ibx_init_clock_gating(struct drm_device *dev)
3514277487Skib{
3515277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3516277487Skib
3517277487Skib	/*
3518277487Skib	 * On Ibex Peak and Cougar Point, we need to disable clock
3519277487Skib	 * gating for the panel power sequencer or it will fail to
3520277487Skib	 * start up when no ports are active.
3521277487Skib	 */
3522277487Skib	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3523277487Skib}
3524277487Skib
3525277487Skibstatic void cpt_init_clock_gating(struct drm_device *dev)
3526277487Skib{
3527277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3528277487Skib	int pipe;
3529277487Skib
3530277487Skib	/*
3531277487Skib	 * On Ibex Peak and Cougar Point, we need to disable clock
3532277487Skib	 * gating for the panel power sequencer or it will fail to
3533277487Skib	 * start up when no ports are active.
3534277487Skib	 */
3535277487Skib	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3536277487Skib	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3537277487Skib		   DPLS_EDP_PPS_FIX_DIS);
3538277487Skib	/* Without this, mode sets may fail silently on FDI */
3539277487Skib	for_each_pipe(pipe)
3540277487Skib		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
3541277487Skib}
3542277487Skib
3543277487Skibvoid intel_init_clock_gating(struct drm_device *dev)
3544277487Skib{
3545277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3546277487Skib
3547277487Skib	dev_priv->display.init_clock_gating(dev);
3548277487Skib
3549277487Skib	if (dev_priv->display.init_pch_clock_gating)
3550277487Skib		dev_priv->display.init_pch_clock_gating(dev);
3551277487Skib}
3552277487Skib
3553277487Skibstatic void gen6_sanitize_pm(struct drm_device *dev)
3554277487Skib{
3555277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3556277487Skib	u32 limits, delay, old;
3557277487Skib
3558277487Skib	gen6_gt_force_wake_get(dev_priv);
3559277487Skib
3560277487Skib	old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
3561277487Skib	/* Make sure we continue to get interrupts
3562277487Skib	 * until we hit the minimum or maximum frequencies.
3563277487Skib	 */
3564277487Skib	limits &= ~(0x3f << 16 | 0x3f << 24);
3565277487Skib	delay = dev_priv->cur_delay;
3566277487Skib	if (delay < dev_priv->max_delay)
3567277487Skib		limits |= (dev_priv->max_delay & 0x3f) << 24;
3568277487Skib	if (delay > dev_priv->min_delay)
3569277487Skib		limits |= (dev_priv->min_delay & 0x3f) << 16;
3570277487Skib
3571277487Skib	if (old != limits) {
3572277487Skib		DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
3573277487Skib			  limits, old);
3574277487Skib		I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3575277487Skib	}
3576277487Skib
3577277487Skib	gen6_gt_force_wake_put(dev_priv);
3578277487Skib}
3579277487Skib
3580277487Skibvoid intel_sanitize_pm(struct drm_device *dev)
3581277487Skib{
3582277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3583277487Skib
3584277487Skib	if (dev_priv->display.sanitize_pm)
3585277487Skib		dev_priv->display.sanitize_pm(dev);
3586277487Skib}
3587277487Skib
3588277487Skib/* Starting with Haswell, we have different power wells for
3589277487Skib * different parts of the GPU. This attempts to enable them all.
3590277487Skib */
3591277487Skibstatic void intel_init_power_wells(struct drm_device *dev)
3592277487Skib{
3593277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3594277487Skib	unsigned long power_wells[] = {
3595277487Skib		HSW_PWR_WELL_CTL1,
3596277487Skib		HSW_PWR_WELL_CTL2,
3597277487Skib		HSW_PWR_WELL_CTL4
3598277487Skib	};
3599277487Skib	int i;
3600277487Skib
3601277487Skib	if (!IS_HASWELL(dev))
3602277487Skib		return;
3603277487Skib
3604277487Skib	DRM_LOCK(dev);
3605277487Skib
3606277487Skib	for (i = 0; i < DRM_ARRAY_SIZE(power_wells); i++) {
3607277487Skib		int well = I915_READ(power_wells[i]);
3608277487Skib
3609277487Skib		if ((well & HSW_PWR_WELL_STATE) == 0) {
3610277487Skib			I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3611277487Skib			if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
3612277487Skib				DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3613277487Skib		}
3614277487Skib	}
3615277487Skib
3616277487Skibprintf("XXXKIB HACK: HSW RC OFF\n");
3617277487Skib	I915_WRITE(GEN6_RC_STATE, 0);
3618277487Skib	I915_WRITE(GEN6_RC_CONTROL, 0);
3619277487Skib	DRM_UNLOCK(dev);
3620277487Skib}
3621277487Skib
3622277487Skib/* Set up chip specific power management-related functions */
3623277487Skibvoid intel_init_pm(struct drm_device *dev)
3624277487Skib{
3625277487Skib	struct drm_i915_private *dev_priv = dev->dev_private;
3626277487Skib
3627277487Skib	if (I915_HAS_FBC(dev)) {
3628277487Skib		if (HAS_PCH_SPLIT(dev)) {
3629277487Skib			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
3630277487Skib			dev_priv->display.enable_fbc = ironlake_enable_fbc;
3631277487Skib			dev_priv->display.disable_fbc = ironlake_disable_fbc;
3632277487Skib		} else if (IS_GM45(dev)) {
3633277487Skib			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
3634277487Skib			dev_priv->display.enable_fbc = g4x_enable_fbc;
3635277487Skib			dev_priv->display.disable_fbc = g4x_disable_fbc;
3636277487Skib		} else if (IS_CRESTLINE(dev)) {
3637277487Skib			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
3638277487Skib			dev_priv->display.enable_fbc = i8xx_enable_fbc;
3639277487Skib			dev_priv->display.disable_fbc = i8xx_disable_fbc;
3640277487Skib		}
3641277487Skib		/* 855GM needs testing */
3642277487Skib	}
3643277487Skib
3644277487Skib	/* For cxsr */
3645277487Skib	if (IS_PINEVIEW(dev))
3646277487Skib		i915_pineview_get_mem_freq(dev);
3647277487Skib	else if (IS_GEN5(dev))
3648277487Skib		i915_ironlake_get_mem_freq(dev);
3649277487Skib
3650277487Skib	/* For FIFO watermark updates */
3651277487Skib	if (HAS_PCH_SPLIT(dev)) {
3652277487Skib		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
3653277487Skib		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
3654277487Skib
3655277487Skib		/* IVB configs may use multi-threaded forcewake */
3656277487Skib		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3657277487Skib			u32	ecobus;
3658277487Skib
3659277487Skib			/* A small trick here - if the bios hasn't configured MT forcewake,
3660277487Skib			 * and if the device is in RC6, then force_wake_mt_get will not wake
3661277487Skib			 * the device and the ECOBUS read will return zero. Which will be
3662277487Skib			 * (correctly) interpreted by the test below as MT forcewake being
3663277487Skib			 * disabled.
3664277487Skib			 */
3665277487Skib			DRM_LOCK(dev);
3666277487Skib			__gen6_gt_force_wake_mt_get(dev_priv);
3667277487Skib			ecobus = I915_READ_NOTRACE(ECOBUS);
3668277487Skib			__gen6_gt_force_wake_mt_put(dev_priv);
3669277487Skib			DRM_UNLOCK(dev);
3670277487Skib
3671277487Skib			if (ecobus & FORCEWAKE_MT_ENABLE) {
3672277487Skib				DRM_DEBUG_KMS("Using MT version of forcewake\n");
3673277487Skib				dev_priv->display.force_wake_get =
3674277487Skib					__gen6_gt_force_wake_mt_get;
3675277487Skib				dev_priv->display.force_wake_put =
3676277487Skib					__gen6_gt_force_wake_mt_put;
3677277487Skib			}
3678277487Skib		}
3679277487Skib
3680277487Skib		if (HAS_PCH_IBX(dev))
3681277487Skib			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
3682277487Skib		else if (HAS_PCH_CPT(dev))
3683277487Skib			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
3684277487Skib
3685277487Skib		if (IS_GEN5(dev)) {
3686277487Skib			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
3687277487Skib				dev_priv->display.update_wm = ironlake_update_wm;
3688277487Skib			else {
3689277487Skib				DRM_DEBUG_KMS("Failed to get proper latency. "
3690277487Skib					      "Disable CxSR\n");
3691277487Skib				dev_priv->display.update_wm = NULL;
3692277487Skib			}
3693277487Skib			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
3694277487Skib		} else if (IS_GEN6(dev)) {
3695277487Skib			if (SNB_READ_WM0_LATENCY()) {
3696277487Skib				dev_priv->display.update_wm = sandybridge_update_wm;
3697277487Skib				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3698277487Skib			} else {
3699277487Skib				DRM_DEBUG_KMS("Failed to read display plane latency. "
3700277487Skib					      "Disable CxSR\n");
3701277487Skib				dev_priv->display.update_wm = NULL;
3702277487Skib			}
3703277487Skib			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3704277487Skib			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3705277487Skib		} else if (IS_IVYBRIDGE(dev)) {
3706277487Skib			/* FIXME: detect B0+ stepping and use auto training */
3707277487Skib			if (SNB_READ_WM0_LATENCY()) {
3708277487Skib				dev_priv->display.update_wm = sandybridge_update_wm;
3709277487Skib				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3710277487Skib			} else {
3711277487Skib				DRM_DEBUG_KMS("Failed to read display plane latency. "
3712277487Skib					      "Disable CxSR\n");
3713277487Skib				dev_priv->display.update_wm = NULL;
3714277487Skib			}
3715277487Skib			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3716277487Skib			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3717277487Skib		} else if (IS_HASWELL(dev)) {
3718277487Skib			if (SNB_READ_WM0_LATENCY()) {
3719277487Skib				dev_priv->display.update_wm = sandybridge_update_wm;
3720277487Skib				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3721277487Skib				dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
3722277487Skib			} else {
3723277487Skib				DRM_DEBUG_KMS("Failed to read display plane latency. "
3724277487Skib					      "Disable CxSR\n");
3725277487Skib				dev_priv->display.update_wm = NULL;
3726277487Skib			}
3727277487Skib			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3728277487Skib			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3729277487Skib		} else
3730277487Skib			dev_priv->display.update_wm = NULL;
3731277487Skib	} else if (IS_VALLEYVIEW(dev)) {
3732277487Skib		dev_priv->display.update_wm = valleyview_update_wm;
3733277487Skib		dev_priv->display.init_clock_gating =
3734277487Skib			valleyview_init_clock_gating;
3735277487Skib		dev_priv->display.force_wake_get = vlv_force_wake_get;
3736277487Skib		dev_priv->display.force_wake_put = vlv_force_wake_put;
3737277487Skib	} else if (IS_PINEVIEW(dev)) {
3738277487Skib		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
3739277487Skib					    dev_priv->is_ddr3,
3740277487Skib					    dev_priv->fsb_freq,
3741277487Skib					    dev_priv->mem_freq)) {
3742277487Skib			DRM_INFO("failed to find known CxSR latency "
3743277487Skib				 "(found ddr%s fsb freq %d, mem freq %d), "
3744277487Skib				 "disabling CxSR\n",
3745277487Skib				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
3746277487Skib				 dev_priv->fsb_freq, dev_priv->mem_freq);
3747277487Skib			/* Disable CxSR and never update its watermark again */
3748277487Skib			pineview_disable_cxsr(dev);
3749277487Skib			dev_priv->display.update_wm = NULL;
3750277487Skib		} else
3751277487Skib			dev_priv->display.update_wm = pineview_update_wm;
3752277487Skib		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3753277487Skib	} else if (IS_G4X(dev)) {
3754277487Skib		dev_priv->display.update_wm = g4x_update_wm;
3755277487Skib		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
3756277487Skib	} else if (IS_GEN4(dev)) {
3757277487Skib		dev_priv->display.update_wm = i965_update_wm;
3758277487Skib		if (IS_CRESTLINE(dev))
3759277487Skib			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
3760277487Skib		else if (IS_BROADWATER(dev))
3761277487Skib			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
3762277487Skib	} else if (IS_GEN3(dev)) {
3763277487Skib		dev_priv->display.update_wm = i9xx_update_wm;
3764277487Skib		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
3765277487Skib		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3766277487Skib	} else if (IS_I865G(dev)) {
3767277487Skib		dev_priv->display.update_wm = i830_update_wm;
3768277487Skib		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3769277487Skib		dev_priv->display.get_fifo_size = i830_get_fifo_size;
3770277487Skib	} else if (IS_I85X(dev)) {
3771277487Skib		dev_priv->display.update_wm = i9xx_update_wm;
3772277487Skib		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
3773277487Skib		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3774277487Skib	} else {
3775277487Skib		dev_priv->display.update_wm = i830_update_wm;
3776277487Skib		dev_priv->display.init_clock_gating = i830_init_clock_gating;
3777277487Skib		if (IS_845G(dev))
3778277487Skib			dev_priv->display.get_fifo_size = i845_get_fifo_size;
3779277487Skib		else
3780277487Skib			dev_priv->display.get_fifo_size = i830_get_fifo_size;
3781277487Skib	}
3782277487Skib
3783277487Skib	/* We attempt to init the necessary power wells early in the initialization
3784277487Skib	 * time, so the subsystems that expect power to be enabled can work.
3785277487Skib	 */
3786277487Skib	intel_init_power_wells(dev);
3787277487Skib}
3788277487Skib
3789